diff options
Diffstat (limited to 'drivers/gpu/drm/i915')
261 files changed, 22954 insertions, 15329 deletions
diff --git a/drivers/gpu/drm/i915/.gitignore b/drivers/gpu/drm/i915/.gitignore index d9a77f3b59b2..81972dce1aff 100644 --- a/drivers/gpu/drm/i915/.gitignore +++ b/drivers/gpu/drm/i915/.gitignore @@ -1 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only *.hdrtest diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 907c4471f591..9afa5c4a6bf0 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -42,16 +42,9 @@ config DRM_I915 If "M" is selected, the module will be called i915. -config DRM_I915_ALPHA_SUPPORT - bool "Enable alpha quality support for new Intel hardware by default" - depends on DRM_I915 - help - This option is deprecated. Use DRM_I915_FORCE_PROBE option instead. - config DRM_I915_FORCE_PROBE string "Force probe driver for selected new Intel hardware" depends on DRM_I915 - default "*" if DRM_I915_ALPHA_SUPPORT help This is the default value for the i915.force_probe module parameter. Using the module parameter overrides this option. diff --git a/drivers/gpu/drm/i915/Kconfig.profile b/drivers/gpu/drm/i915/Kconfig.profile index c280b6ae38eb..0bfd276c19fe 100644 --- a/drivers/gpu/drm/i915/Kconfig.profile +++ b/drivers/gpu/drm/i915/Kconfig.profile @@ -20,6 +20,9 @@ config DRM_I915_HEARTBEAT_INTERVAL check the health of the GPU and undertake regular house-keeping of internal driver state. + This is adjustable via + /sys/class/drm/card?/engine/*/heartbeat_interval_ms + May be 0 to disable heartbeats and therefore disable automatic GPU hang detection. @@ -33,11 +36,18 @@ config DRM_I915_PREEMPT_TIMEOUT expires, the HW will be reset to allow the more important context to execute. + This is adjustable via + /sys/class/drm/card?/engine/*/preempt_timeout_ms + May be 0 to disable the timeout. -config DRM_I915_SPIN_REQUEST - int "Busywait for request completion (us)" - default 5 # microseconds + The compiled in default may get overridden at driver probe time on + certain platforms and certain engines which will be reflected in the + sysfs control. + +config DRM_I915_MAX_REQUEST_BUSYWAIT + int "Busywait for request completion limit (ns)" + default 8000 # nanoseconds help Before sleeping waiting for a request (GPU operation) to complete, we may spend some time polling for its completion. As the IRQ may @@ -45,6 +55,9 @@ config DRM_I915_SPIN_REQUEST check if the request will complete in the time it would have taken us to enable the interrupt. + This is adjustable via + /sys/class/drm/card?/engine/*/max_busywait_duration_ns + May be 0 to disable the initial spin. In practice, we estimate the cost of enabling the interrupt (if currently disabled) to be a few microseconds. @@ -60,6 +73,9 @@ config DRM_I915_STOP_TIMEOUT that the reset itself may take longer and so be more disruptive to interactive or low latency workloads. + This is adjustable via + /sys/class/drm/card?/engine/*/stop_timeout_ms + config DRM_I915_TIMESLICE_DURATION int "Scheduling quantum for userspace batches (ms, jiffy granularity)" default 1 # milliseconds @@ -73,4 +89,7 @@ config DRM_I915_TIMESLICE_DURATION is scheduled for execution for the timeslice duration, before switching to the next context. + This is adjustable via + /sys/class/drm/card?/engine/*/timeslice_duration_ms + May be 0 to disable timeslicing. diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index a1f2411aa21b..6cd1f6253814 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -28,9 +28,6 @@ subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror CFLAGS_i915_pci.o = $(call cc-disable-warning, override-init) CFLAGS_display/intel_fbdev.o = $(call cc-disable-warning, override-init) -subdir-ccflags-y += \ - $(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA) - subdir-ccflags-y += -I$(srctree)/$(src) # Please keep these build lists sorted! @@ -46,15 +43,16 @@ i915-y += i915_drv.o \ i915_switcheroo.o \ i915_sysfs.o \ i915_utils.o \ - intel_csr.o \ intel_device_info.o \ + intel_dram.o \ intel_memory_region.o \ intel_pch.o \ intel_pm.o \ intel_runtime_pm.o \ intel_sideband.o \ intel_uncore.o \ - intel_wakeref.o + intel_wakeref.o \ + vlv_suspend.o # core library code i915-y += \ @@ -66,7 +64,11 @@ i915-y += \ i915_user_extensions.o i915-$(CONFIG_COMPAT) += i915_ioc32.o -i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o display/intel_pipe_crc.o +i915-$(CONFIG_DEBUG_FS) += \ + i915_debugfs.o \ + i915_debugfs_params.o \ + display/intel_display_debugfs.o \ + display/intel_pipe_crc.o i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o # "Graphics Technology" (aka we talk to the gpu) @@ -75,9 +77,12 @@ gt-y += \ gt/debugfs_gt.o \ gt/debugfs_gt_pm.o \ gt/gen6_ppgtt.o \ + gt/gen7_renderclear.o \ gt/gen8_ppgtt.o \ gt/intel_breadcrumbs.o \ gt/intel_context.o \ + gt/intel_context_param.o \ + gt/intel_context_sseu.o \ gt/intel_engine_cs.o \ gt/intel_engine_heartbeat.o \ gt/intel_engine_pm.o \ @@ -102,7 +107,8 @@ gt-y += \ gt/intel_rps.o \ gt/intel_sseu.o \ gt/intel_timeline.o \ - gt/intel_workarounds.o + gt/intel_workarounds.o \ + gt/sysfs_engines.o # autogenerated null render state gt-y += \ gt/gen6_renderstate.o \ @@ -179,6 +185,7 @@ i915-y += \ display/intel_color.o \ display/intel_combo_phy.o \ display/intel_connector.o \ + display/intel_csr.o \ display/intel_display.o \ display/intel_display_power.o \ display/intel_dpio_phy.o \ @@ -187,6 +194,7 @@ i915-y += \ display/intel_fbc.o \ display/intel_fifo_underrun.o \ display/intel_frontbuffer.o \ + display/intel_global_state.o \ display/intel_hdcp.o \ display/intel_hotplug.o \ display/intel_lpe_audio.o \ diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index f8e882101396..17cee6f80d8b 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -39,14 +39,14 @@ static inline int header_credits_available(struct drm_i915_private *dev_priv, enum transcoder dsi_trans) { - return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK) + return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK) >> FREE_HEADER_CREDIT_SHIFT; } static inline int payload_credits_available(struct drm_i915_private *dev_priv, enum transcoder dsi_trans) { - return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK) + return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK) >> FREE_PLOAD_CREDIT_SHIFT; } @@ -55,7 +55,7 @@ static void wait_for_header_credits(struct drm_i915_private *dev_priv, { if (wait_for_us(header_credits_available(dev_priv, dsi_trans) >= MAX_HEADER_CREDIT, 100)) - DRM_ERROR("DSI header credits not released\n"); + drm_err(&dev_priv->drm, "DSI header credits not released\n"); } static void wait_for_payload_credits(struct drm_i915_private *dev_priv, @@ -63,7 +63,7 @@ static void wait_for_payload_credits(struct drm_i915_private *dev_priv, { if (wait_for_us(payload_credits_available(dev_priv, dsi_trans) >= MAX_PLOAD_CREDIT, 100)) - DRM_ERROR("DSI payload credits not released\n"); + drm_err(&dev_priv->drm, "DSI payload credits not released\n"); } static enum transcoder dsi_port_to_transcoder(enum port port) @@ -97,7 +97,8 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder) dsi->channel = 0; ret = mipi_dsi_dcs_nop(dsi); if (ret < 0) - DRM_ERROR("error sending DCS NOP command\n"); + drm_err(&dev_priv->drm, + "error sending DCS NOP command\n"); } /* wait for header credits to be released */ @@ -109,9 +110,9 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder) /* wait for LP TX in progress bit to be cleared */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - if (wait_for_us(!(I915_READ(DSI_LP_MSG(dsi_trans)) & + if (wait_for_us(!(intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)) & LPTX_IN_PROGRESS), 20)) - DRM_ERROR("LPTX bit not cleared\n"); + drm_err(&dev_priv->drm, "LPTX bit not cleared\n"); } } @@ -129,14 +130,15 @@ static bool add_payld_to_queue(struct intel_dsi_host *host, const u8 *data, free_credits = payload_credits_available(dev_priv, dsi_trans); if (free_credits < 1) { - DRM_ERROR("Payload credit not available\n"); + drm_err(&dev_priv->drm, + "Payload credit not available\n"); return false; } for (j = 0; j < min_t(u32, len - i, 4); j++) tmp |= *data++ << 8 * j; - I915_WRITE(DSI_CMD_TXPYLD(dsi_trans), tmp); + intel_de_write(dev_priv, DSI_CMD_TXPYLD(dsi_trans), tmp); } return true; @@ -154,11 +156,12 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host, /* check if header credit available */ free_credits = header_credits_available(dev_priv, dsi_trans); if (free_credits < 1) { - DRM_ERROR("send pkt header failed, not enough hdr credits\n"); + drm_err(&dev_priv->drm, + "send pkt header failed, not enough hdr credits\n"); return -1; } - tmp = I915_READ(DSI_CMD_TXHDR(dsi_trans)); + tmp = intel_de_read(dev_priv, DSI_CMD_TXHDR(dsi_trans)); if (pkt.payload) tmp |= PAYLOAD_PRESENT; @@ -175,7 +178,7 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host, tmp |= ((pkt.header[0] & DT_MASK) << DT_SHIFT); tmp |= (pkt.header[1] << PARAM_WC_LOWER_SHIFT); tmp |= (pkt.header[2] << PARAM_WC_UPPER_SHIFT); - I915_WRITE(DSI_CMD_TXHDR(dsi_trans), tmp); + intel_de_write(dev_priv, DSI_CMD_TXHDR(dsi_trans), tmp); return 0; } @@ -212,53 +215,55 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) * Program voltage swing and pre-emphasis level values as per * table in BSPEC under DDI buffer programing */ - tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy)); + tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy)); tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK); tmp |= SCALING_MODE_SEL(0x2); tmp |= TAP2_DISABLE | TAP3_DISABLE; tmp |= RTERM_SELECT(0x6); - I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp); + intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp); - tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy)); + tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy)); tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK); tmp |= SCALING_MODE_SEL(0x2); tmp |= TAP2_DISABLE | TAP3_DISABLE; tmp |= RTERM_SELECT(0x6); - I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp); + intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp); - tmp = I915_READ(ICL_PORT_TX_DW2_LN0(phy)); + tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy)); tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | RCOMP_SCALAR_MASK); tmp |= SWING_SEL_UPPER(0x2); tmp |= SWING_SEL_LOWER(0x2); tmp |= RCOMP_SCALAR(0x98); - I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), tmp); + intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp); - tmp = I915_READ(ICL_PORT_TX_DW2_AUX(phy)); + tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_AUX(phy)); tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | RCOMP_SCALAR_MASK); tmp |= SWING_SEL_UPPER(0x2); tmp |= SWING_SEL_LOWER(0x2); tmp |= RCOMP_SCALAR(0x98); - I915_WRITE(ICL_PORT_TX_DW2_AUX(phy), tmp); + intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp); - tmp = I915_READ(ICL_PORT_TX_DW4_AUX(phy)); + tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW4_AUX(phy)); tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK); tmp |= POST_CURSOR_1(0x0); tmp |= POST_CURSOR_2(0x0); tmp |= CURSOR_COEFF(0x3f); - I915_WRITE(ICL_PORT_TX_DW4_AUX(phy), tmp); + intel_de_write(dev_priv, ICL_PORT_TX_DW4_AUX(phy), tmp); for (lane = 0; lane <= 3; lane++) { /* Bspec: must not use GRP register for write */ - tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, phy)); + tmp = intel_de_read(dev_priv, + ICL_PORT_TX_DW4_LN(lane, phy)); tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK); tmp |= POST_CURSOR_1(0x0); tmp |= POST_CURSOR_2(0x0); tmp |= CURSOR_COEFF(0x3f); - I915_WRITE(ICL_PORT_TX_DW4_LN(lane, phy), tmp); + intel_de_write(dev_priv, + ICL_PORT_TX_DW4_LN(lane, phy), tmp); } } } @@ -270,7 +275,7 @@ static void configure_dual_link_mode(struct intel_encoder *encoder, struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 dss_ctl1; - dss_ctl1 = I915_READ(DSS_CTL1); + dss_ctl1 = intel_de_read(dev_priv, DSS_CTL1); dss_ctl1 |= SPLITTER_ENABLE; dss_ctl1 &= ~OVERLAP_PIXELS_MASK; dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap); @@ -286,20 +291,21 @@ static void configure_dual_link_mode(struct intel_encoder *encoder, dl_buffer_depth = hactive / 2 + intel_dsi->pixel_overlap; if (dl_buffer_depth > MAX_DL_BUFFER_TARGET_DEPTH) - DRM_ERROR("DL buffer depth exceed max value\n"); + drm_err(&dev_priv->drm, + "DL buffer depth exceed max value\n"); dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK; dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth); - dss_ctl2 = I915_READ(DSS_CTL2); + dss_ctl2 = intel_de_read(dev_priv, DSS_CTL2); dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK; dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth); - I915_WRITE(DSS_CTL2, dss_ctl2); + intel_de_write(dev_priv, DSS_CTL2, dss_ctl2); } else { /* Interleave */ dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE; } - I915_WRITE(DSS_CTL1, dss_ctl1); + intel_de_write(dev_priv, DSS_CTL1, dss_ctl1); } /* aka DSI 8X clock */ @@ -330,15 +336,15 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder, esc_clk_div_m = DIV_ROUND_UP(afe_clk_khz, DSI_MAX_ESC_CLK); for_each_dsi_port(port, intel_dsi->ports) { - I915_WRITE(ICL_DSI_ESC_CLK_DIV(port), - esc_clk_div_m & ICL_ESC_CLK_DIV_MASK); - POSTING_READ(ICL_DSI_ESC_CLK_DIV(port)); + intel_de_write(dev_priv, ICL_DSI_ESC_CLK_DIV(port), + esc_clk_div_m & ICL_ESC_CLK_DIV_MASK); + intel_de_posting_read(dev_priv, ICL_DSI_ESC_CLK_DIV(port)); } for_each_dsi_port(port, intel_dsi->ports) { - I915_WRITE(ICL_DPHY_ESC_CLK_DIV(port), - esc_clk_div_m & ICL_ESC_CLK_DIV_MASK); - POSTING_READ(ICL_DPHY_ESC_CLK_DIV(port)); + intel_de_write(dev_priv, ICL_DPHY_ESC_CLK_DIV(port), + esc_clk_div_m & ICL_ESC_CLK_DIV_MASK); + intel_de_posting_read(dev_priv, ICL_DPHY_ESC_CLK_DIV(port)); } } @@ -348,7 +354,7 @@ static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv, enum port port; for_each_dsi_port(port, intel_dsi->ports) { - WARN_ON(intel_dsi->io_wakeref[port]); + drm_WARN_ON(&dev_priv->drm, intel_dsi->io_wakeref[port]); intel_dsi->io_wakeref[port] = intel_display_power_get(dev_priv, port == PORT_A ? @@ -365,9 +371,9 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder) u32 tmp; for_each_dsi_port(port, intel_dsi->ports) { - tmp = I915_READ(ICL_DSI_IO_MODECTL(port)); + tmp = intel_de_read(dev_priv, ICL_DSI_IO_MODECTL(port)); tmp |= COMBO_PHY_MODE_DSI; - I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp); + intel_de_write(dev_priv, ICL_DSI_IO_MODECTL(port), tmp); } get_dsi_io_power_domains(dev_priv, intel_dsi); @@ -394,40 +400,46 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) /* Step 4b(i) set loadgen select for transmit and aux lanes */ for_each_dsi_phy(phy, intel_dsi->phys) { - tmp = I915_READ(ICL_PORT_TX_DW4_AUX(phy)); + tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW4_AUX(phy)); tmp &= ~LOADGEN_SELECT; - I915_WRITE(ICL_PORT_TX_DW4_AUX(phy), tmp); + intel_de_write(dev_priv, ICL_PORT_TX_DW4_AUX(phy), tmp); for (lane = 0; lane <= 3; lane++) { - tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, phy)); + tmp = intel_de_read(dev_priv, + ICL_PORT_TX_DW4_LN(lane, phy)); tmp &= ~LOADGEN_SELECT; if (lane != 2) tmp |= LOADGEN_SELECT; - I915_WRITE(ICL_PORT_TX_DW4_LN(lane, phy), tmp); + intel_de_write(dev_priv, + ICL_PORT_TX_DW4_LN(lane, phy), tmp); } } /* Step 4b(ii) set latency optimization for transmit and aux lanes */ for_each_dsi_phy(phy, intel_dsi->phys) { - tmp = I915_READ(ICL_PORT_TX_DW2_AUX(phy)); + tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_AUX(phy)); tmp &= ~FRC_LATENCY_OPTIM_MASK; tmp |= FRC_LATENCY_OPTIM_VAL(0x5); - I915_WRITE(ICL_PORT_TX_DW2_AUX(phy), tmp); - tmp = I915_READ(ICL_PORT_TX_DW2_LN0(phy)); + intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp); + tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy)); tmp &= ~FRC_LATENCY_OPTIM_MASK; tmp |= FRC_LATENCY_OPTIM_VAL(0x5); - I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), tmp); + intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp); /* For EHL, TGL, set latency optimization for PCS_DW1 lanes */ if (IS_ELKHARTLAKE(dev_priv) || (INTEL_GEN(dev_priv) >= 12)) { - tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(phy)); + tmp = intel_de_read(dev_priv, + ICL_PORT_PCS_DW1_AUX(phy)); tmp &= ~LATENCY_OPTIM_MASK; tmp |= LATENCY_OPTIM_VAL(0); - I915_WRITE(ICL_PORT_PCS_DW1_AUX(phy), tmp); + intel_de_write(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), + tmp); - tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(phy)); + tmp = intel_de_read(dev_priv, + ICL_PORT_PCS_DW1_LN0(phy)); tmp &= ~LATENCY_OPTIM_MASK; tmp |= LATENCY_OPTIM_VAL(0x1); - I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), tmp); + intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), + tmp); } } @@ -442,12 +454,12 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) /* clear common keeper enable bit */ for_each_dsi_phy(phy, intel_dsi->phys) { - tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(phy)); + tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy)); tmp &= ~COMMON_KEEPER_EN; - I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), tmp); - tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(phy)); + intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), tmp); + tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_AUX(phy)); tmp &= ~COMMON_KEEPER_EN; - I915_WRITE(ICL_PORT_PCS_DW1_AUX(phy), tmp); + intel_de_write(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), tmp); } /* @@ -456,19 +468,19 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) * as part of lane phy sequence configuration */ for_each_dsi_phy(phy, intel_dsi->phys) { - tmp = I915_READ(ICL_PORT_CL_DW5(phy)); + tmp = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy)); tmp |= SUS_CLOCK_CONFIG; - I915_WRITE(ICL_PORT_CL_DW5(phy), tmp); + intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), tmp); } /* Clear training enable to change swing values */ for_each_dsi_phy(phy, intel_dsi->phys) { - tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy)); + tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy)); tmp &= ~TX_TRAINING_EN; - I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp); - tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy)); + intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp); + tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy)); tmp &= ~TX_TRAINING_EN; - I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp); + intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp); } /* Program swing and de-emphasis */ @@ -476,12 +488,12 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) /* Set training enable to trigger update */ for_each_dsi_phy(phy, intel_dsi->phys) { - tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy)); + tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy)); tmp |= TX_TRAINING_EN; - I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp); - tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy)); + intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp); + tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy)); tmp |= TX_TRAINING_EN; - I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp); + intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp); } } @@ -493,14 +505,15 @@ static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder) enum port port; for_each_dsi_port(port, intel_dsi->ports) { - tmp = I915_READ(DDI_BUF_CTL(port)); + tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port)); tmp |= DDI_BUF_CTL_ENABLE; - I915_WRITE(DDI_BUF_CTL(port), tmp); + intel_de_write(dev_priv, DDI_BUF_CTL(port), tmp); - if (wait_for_us(!(I915_READ(DDI_BUF_CTL(port)) & + if (wait_for_us(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) & DDI_BUF_IS_IDLE), 500)) - DRM_ERROR("DDI port:%c buffer idle\n", port_name(port)); + drm_err(&dev_priv->drm, "DDI port:%c buffer idle\n", + port_name(port)); } } @@ -516,28 +529,30 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, /* Program T-INIT master registers */ for_each_dsi_port(port, intel_dsi->ports) { - tmp = I915_READ(ICL_DSI_T_INIT_MASTER(port)); + tmp = intel_de_read(dev_priv, ICL_DSI_T_INIT_MASTER(port)); tmp &= ~MASTER_INIT_TIMER_MASK; tmp |= intel_dsi->init_count; - I915_WRITE(ICL_DSI_T_INIT_MASTER(port), tmp); + intel_de_write(dev_priv, ICL_DSI_T_INIT_MASTER(port), tmp); } /* Program DPHY clock lanes timings */ for_each_dsi_port(port, intel_dsi->ports) { - I915_WRITE(DPHY_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg); + intel_de_write(dev_priv, DPHY_CLK_TIMING_PARAM(port), + intel_dsi->dphy_reg); /* shadow register inside display core */ - I915_WRITE(DSI_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg); + intel_de_write(dev_priv, DSI_CLK_TIMING_PARAM(port), + intel_dsi->dphy_reg); } /* Program DPHY data lanes timings */ for_each_dsi_port(port, intel_dsi->ports) { - I915_WRITE(DPHY_DATA_TIMING_PARAM(port), - intel_dsi->dphy_data_lane_reg); + intel_de_write(dev_priv, DPHY_DATA_TIMING_PARAM(port), + intel_dsi->dphy_data_lane_reg); /* shadow register inside display core */ - I915_WRITE(DSI_DATA_TIMING_PARAM(port), - intel_dsi->dphy_data_lane_reg); + intel_de_write(dev_priv, DSI_DATA_TIMING_PARAM(port), + intel_dsi->dphy_data_lane_reg); } /* @@ -549,25 +564,30 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, if (IS_GEN(dev_priv, 11)) { if (afe_clk(encoder, crtc_state) <= 800000) { for_each_dsi_port(port, intel_dsi->ports) { - tmp = I915_READ(DPHY_TA_TIMING_PARAM(port)); + tmp = intel_de_read(dev_priv, + DPHY_TA_TIMING_PARAM(port)); tmp &= ~TA_SURE_MASK; tmp |= TA_SURE_OVERRIDE | TA_SURE(0); - I915_WRITE(DPHY_TA_TIMING_PARAM(port), tmp); + intel_de_write(dev_priv, + DPHY_TA_TIMING_PARAM(port), + tmp); /* shadow register inside display core */ - tmp = I915_READ(DSI_TA_TIMING_PARAM(port)); + tmp = intel_de_read(dev_priv, + DSI_TA_TIMING_PARAM(port)); tmp &= ~TA_SURE_MASK; tmp |= TA_SURE_OVERRIDE | TA_SURE(0); - I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp); + intel_de_write(dev_priv, + DSI_TA_TIMING_PARAM(port), tmp); } } } if (IS_ELKHARTLAKE(dev_priv)) { for_each_dsi_phy(phy, intel_dsi->phys) { - tmp = I915_READ(ICL_DPHY_CHKN(phy)); + tmp = intel_de_read(dev_priv, ICL_DPHY_CHKN(phy)); tmp |= ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP; - I915_WRITE(ICL_DPHY_CHKN(phy), tmp); + intel_de_write(dev_priv, ICL_DPHY_CHKN(phy), tmp); } } } @@ -579,13 +599,13 @@ static void gen11_dsi_gate_clocks(struct intel_encoder *encoder) u32 tmp; enum phy phy; - mutex_lock(&dev_priv->dpll_lock); - tmp = I915_READ(ICL_DPCLKA_CFGCR0); + mutex_lock(&dev_priv->dpll.lock); + tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); for_each_dsi_phy(phy, intel_dsi->phys) tmp |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); - I915_WRITE(ICL_DPCLKA_CFGCR0, tmp); - mutex_unlock(&dev_priv->dpll_lock); + intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp); + mutex_unlock(&dev_priv->dpll.lock); } static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder) @@ -595,13 +615,13 @@ static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder) u32 tmp; enum phy phy; - mutex_lock(&dev_priv->dpll_lock); - tmp = I915_READ(ICL_DPCLKA_CFGCR0); + mutex_lock(&dev_priv->dpll.lock); + tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); for_each_dsi_phy(phy, intel_dsi->phys) tmp &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); - I915_WRITE(ICL_DPCLKA_CFGCR0, tmp); - mutex_unlock(&dev_priv->dpll_lock); + intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp); + mutex_unlock(&dev_priv->dpll.lock); } static void gen11_dsi_map_pll(struct intel_encoder *encoder, @@ -613,14 +633,14 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder, enum phy phy; u32 val; - mutex_lock(&dev_priv->dpll_lock); + mutex_lock(&dev_priv->dpll.lock); - val = I915_READ(ICL_DPCLKA_CFGCR0); + val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); for_each_dsi_phy(phy, intel_dsi->phys) { val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy); } - I915_WRITE(ICL_DPCLKA_CFGCR0, val); + intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); for_each_dsi_phy(phy, intel_dsi->phys) { if (INTEL_GEN(dev_priv) >= 12) @@ -628,11 +648,11 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder, else val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); } - I915_WRITE(ICL_DPCLKA_CFGCR0, val); + intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); - POSTING_READ(ICL_DPCLKA_CFGCR0); + intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0); - mutex_unlock(&dev_priv->dpll_lock); + mutex_unlock(&dev_priv->dpll.lock); } static void @@ -649,7 +669,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - tmp = I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans)); + tmp = intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans)); if (intel_dsi->eotp_pkt) tmp &= ~EOTP_DISABLED; @@ -726,16 +746,18 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, } } - I915_WRITE(DSI_TRANS_FUNC_CONF(dsi_trans), tmp); + intel_de_write(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans), tmp); } /* enable port sync mode if dual link */ if (intel_dsi->dual_link) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans)); + tmp = intel_de_read(dev_priv, + TRANS_DDI_FUNC_CTL2(dsi_trans)); tmp |= PORT_SYNC_MODE_ENABLE; - I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp); + intel_de_write(dev_priv, + TRANS_DDI_FUNC_CTL2(dsi_trans), tmp); } /* configure stream splitting */ @@ -746,7 +768,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, dsi_trans = dsi_port_to_transcoder(port); /* select data lane width */ - tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans)); + tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans)); tmp &= ~DDI_PORT_WIDTH_MASK; tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count); @@ -772,15 +794,15 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, /* enable DDI buffer */ tmp |= TRANS_DDI_FUNC_ENABLE; - I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp); + intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans), tmp); } /* wait for link ready */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - if (wait_for_us((I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans)) & - LINK_READY), 2500)) - DRM_ERROR("DSI link not ready\n"); + if (wait_for_us((intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans)) & + LINK_READY), 2500)) + drm_err(&dev_priv->drm, "DSI link not ready\n"); } } @@ -836,17 +858,18 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, /* minimum hactive as per bspec: 256 pixels */ if (adjusted_mode->crtc_hdisplay < 256) - DRM_ERROR("hactive is less then 256 pixels\n"); + drm_err(&dev_priv->drm, "hactive is less then 256 pixels\n"); /* if RGB666 format, then hactive must be multiple of 4 pixels */ if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB666 && hactive % 4 != 0) - DRM_ERROR("hactive pixels are not multiple of 4\n"); + drm_err(&dev_priv->drm, + "hactive pixels are not multiple of 4\n"); /* program TRANS_HTOTAL register */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - I915_WRITE(HTOTAL(dsi_trans), - (hactive - 1) | ((htotal - 1) << 16)); + intel_de_write(dev_priv, HTOTAL(dsi_trans), + (hactive - 1) | ((htotal - 1) << 16)); } /* TRANS_HSYNC register to be programmed only for video mode */ @@ -855,11 +878,12 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE) { /* BSPEC: hsync size should be atleast 16 pixels */ if (hsync_size < 16) - DRM_ERROR("hsync size < 16 pixels\n"); + drm_err(&dev_priv->drm, + "hsync size < 16 pixels\n"); } if (hback_porch < 16) - DRM_ERROR("hback porch < 16 pixels\n"); + drm_err(&dev_priv->drm, "hback porch < 16 pixels\n"); if (intel_dsi->dual_link) { hsync_start /= 2; @@ -868,8 +892,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - I915_WRITE(HSYNC(dsi_trans), - (hsync_start - 1) | ((hsync_end - 1) << 16)); + intel_de_write(dev_priv, HSYNC(dsi_trans), + (hsync_start - 1) | ((hsync_end - 1) << 16)); } } @@ -882,21 +906,21 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, * struct drm_display_mode. * For interlace mode: program required pixel minus 2 */ - I915_WRITE(VTOTAL(dsi_trans), - (vactive - 1) | ((vtotal - 1) << 16)); + intel_de_write(dev_priv, VTOTAL(dsi_trans), + (vactive - 1) | ((vtotal - 1) << 16)); } if (vsync_end < vsync_start || vsync_end > vtotal) - DRM_ERROR("Invalid vsync_end value\n"); + drm_err(&dev_priv->drm, "Invalid vsync_end value\n"); if (vsync_start < vactive) - DRM_ERROR("vsync_start less than vactive\n"); + drm_err(&dev_priv->drm, "vsync_start less than vactive\n"); /* program TRANS_VSYNC register */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - I915_WRITE(VSYNC(dsi_trans), - (vsync_start - 1) | ((vsync_end - 1) << 16)); + intel_de_write(dev_priv, VSYNC(dsi_trans), + (vsync_start - 1) | ((vsync_end - 1) << 16)); } /* @@ -907,15 +931,15 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - I915_WRITE(VSYNCSHIFT(dsi_trans), vsync_shift); + intel_de_write(dev_priv, VSYNCSHIFT(dsi_trans), vsync_shift); } /* program TRANS_VBLANK register, should be same as vtotal programmed */ if (INTEL_GEN(dev_priv) >= 12) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - I915_WRITE(VBLANK(dsi_trans), - (vactive - 1) | ((vtotal - 1) << 16)); + intel_de_write(dev_priv, VBLANK(dsi_trans), + (vactive - 1) | ((vtotal - 1) << 16)); } } } @@ -930,14 +954,15 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder) for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - tmp = I915_READ(PIPECONF(dsi_trans)); + tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans)); tmp |= PIPECONF_ENABLE; - I915_WRITE(PIPECONF(dsi_trans), tmp); + intel_de_write(dev_priv, PIPECONF(dsi_trans), tmp); /* wait for transcoder to be enabled */ if (intel_de_wait_for_set(dev_priv, PIPECONF(dsi_trans), I965_PIPECONF_ACTIVE, 10)) - DRM_ERROR("DSI transcoder not enabled\n"); + drm_err(&dev_priv->drm, + "DSI transcoder not enabled\n"); } } @@ -968,26 +993,26 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder, dsi_trans = dsi_port_to_transcoder(port); /* program hst_tx_timeout */ - tmp = I915_READ(DSI_HSTX_TO(dsi_trans)); + tmp = intel_de_read(dev_priv, DSI_HSTX_TO(dsi_trans)); tmp &= ~HSTX_TIMEOUT_VALUE_MASK; tmp |= HSTX_TIMEOUT_VALUE(hs_tx_timeout); - I915_WRITE(DSI_HSTX_TO(dsi_trans), tmp); + intel_de_write(dev_priv, DSI_HSTX_TO(dsi_trans), tmp); /* FIXME: DSI_CALIB_TO */ /* program lp_rx_host timeout */ - tmp = I915_READ(DSI_LPRX_HOST_TO(dsi_trans)); + tmp = intel_de_read(dev_priv, DSI_LPRX_HOST_TO(dsi_trans)); tmp &= ~LPRX_TIMEOUT_VALUE_MASK; tmp |= LPRX_TIMEOUT_VALUE(lp_rx_timeout); - I915_WRITE(DSI_LPRX_HOST_TO(dsi_trans), tmp); + intel_de_write(dev_priv, DSI_LPRX_HOST_TO(dsi_trans), tmp); /* FIXME: DSI_PWAIT_TO */ /* program turn around timeout */ - tmp = I915_READ(DSI_TA_TO(dsi_trans)); + tmp = intel_de_read(dev_priv, DSI_TA_TO(dsi_trans)); tmp &= ~TA_TIMEOUT_VALUE_MASK; tmp |= TA_TIMEOUT_VALUE(ta_timeout); - I915_WRITE(DSI_TA_TO(dsi_trans), tmp); + intel_de_write(dev_priv, DSI_TA_TO(dsi_trans), tmp); } } @@ -1041,14 +1066,15 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder) * FIXME: This uses the number of DW's currently in the payload * receive queue. This is probably not what we want here. */ - tmp = I915_READ(DSI_CMD_RXCTL(dsi_trans)); + tmp = intel_de_read(dev_priv, DSI_CMD_RXCTL(dsi_trans)); tmp &= NUMBER_RX_PLOAD_DW_MASK; /* multiply "Number Rx Payload DW" by 4 to get max value */ tmp = tmp * 4; dsi = intel_dsi->dsi_hosts[port]->device; ret = mipi_dsi_set_maximum_return_packet_size(dsi, tmp); if (ret < 0) - DRM_ERROR("error setting max return pkt size%d\n", tmp); + drm_err(&dev_priv->drm, + "error setting max return pkt size%d\n", tmp); } /* panel power on related mipi dsi vbt sequences */ @@ -1077,8 +1103,6 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); - /* step3b */ gen11_dsi_map_pll(encoder, pipe_config); @@ -1092,13 +1116,24 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder, /* step6c: configure transcoder timings */ gen11_dsi_set_transcoder_timings(encoder, pipe_config); +} + +static void gen11_dsi_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); + + WARN_ON(crtc_state->has_pch_encoder); /* step6d: enable dsi transcoder */ gen11_dsi_enable_transcoder(encoder); /* step7: enable backlight */ - intel_panel_enable_backlight(pipe_config, conn_state); + intel_panel_enable_backlight(crtc_state, conn_state); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON); + + intel_crtc_vblank_on(crtc_state); } static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder) @@ -1113,14 +1148,15 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder) dsi_trans = dsi_port_to_transcoder(port); /* disable transcoder */ - tmp = I915_READ(PIPECONF(dsi_trans)); + tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans)); tmp &= ~PIPECONF_ENABLE; - I915_WRITE(PIPECONF(dsi_trans), tmp); + intel_de_write(dev_priv, PIPECONF(dsi_trans), tmp); /* wait for transcoder to be disabled */ if (intel_de_wait_for_clear(dev_priv, PIPECONF(dsi_trans), I965_PIPECONF_ACTIVE, 50)) - DRM_ERROR("DSI trancoder not disabled\n"); + drm_err(&dev_priv->drm, + "DSI trancoder not disabled\n"); } } @@ -1147,32 +1183,34 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) /* put dsi link in ULPS */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - tmp = I915_READ(DSI_LP_MSG(dsi_trans)); + tmp = intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)); tmp |= LINK_ENTER_ULPS; tmp &= ~LINK_ULPS_TYPE_LP11; - I915_WRITE(DSI_LP_MSG(dsi_trans), tmp); + intel_de_write(dev_priv, DSI_LP_MSG(dsi_trans), tmp); - if (wait_for_us((I915_READ(DSI_LP_MSG(dsi_trans)) & - LINK_IN_ULPS), + if (wait_for_us((intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)) & + LINK_IN_ULPS), 10)) - DRM_ERROR("DSI link not in ULPS\n"); + drm_err(&dev_priv->drm, "DSI link not in ULPS\n"); } /* disable ddi function */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans)); + tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans)); tmp &= ~TRANS_DDI_FUNC_ENABLE; - I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp); + intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans), tmp); } /* disable port sync mode if dual link */ if (intel_dsi->dual_link) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans)); + tmp = intel_de_read(dev_priv, + TRANS_DDI_FUNC_CTL2(dsi_trans)); tmp &= ~PORT_SYNC_MODE_ENABLE; - I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp); + intel_de_write(dev_priv, + TRANS_DDI_FUNC_CTL2(dsi_trans), tmp); } } } @@ -1186,15 +1224,16 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder) gen11_dsi_ungate_clocks(encoder); for_each_dsi_port(port, intel_dsi->ports) { - tmp = I915_READ(DDI_BUF_CTL(port)); + tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port)); tmp &= ~DDI_BUF_CTL_ENABLE; - I915_WRITE(DDI_BUF_CTL(port), tmp); + intel_de_write(dev_priv, DDI_BUF_CTL(port), tmp); - if (wait_for_us((I915_READ(DDI_BUF_CTL(port)) & + if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) & DDI_BUF_IS_IDLE), 8)) - DRM_ERROR("DDI port:%c buffer not idle\n", - port_name(port)); + drm_err(&dev_priv->drm, + "DDI port:%c buffer not idle\n", + port_name(port)); } gen11_dsi_gate_clocks(encoder); } @@ -1219,9 +1258,9 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) /* set mode to DDI */ for_each_dsi_port(port, intel_dsi->ports) { - tmp = I915_READ(ICL_DSI_IO_MODECTL(port)); + tmp = intel_de_read(dev_priv, ICL_DSI_IO_MODECTL(port)); tmp &= ~COMBO_PHY_MODE_DSI; - I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp); + intel_de_write(dev_priv, ICL_DSI_IO_MODECTL(port), tmp); } } @@ -1311,15 +1350,15 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder, static void gen11_dsi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); intel_dsc_get_config(encoder, pipe_config); /* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */ - pipe_config->port_clock = - cnl_calc_wrpll_link(dev_priv, &pipe_config->dpll_hw_state); + pipe_config->port_clock = intel_dpll_get_freq(i915, + pipe_config->shared_dpll); pipe_config->hw.adjusted_mode.crtc_clock = intel_dsi->pclk; if (intel_dsi->dual_link) @@ -1357,11 +1396,13 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder, return ret; /* DSI specific sanity checks on the common code */ - WARN_ON(vdsc_cfg->vbr_enable); - WARN_ON(vdsc_cfg->simple_422); - WARN_ON(vdsc_cfg->pic_width % vdsc_cfg->slice_width); - WARN_ON(vdsc_cfg->slice_height < 8); - WARN_ON(vdsc_cfg->pic_height % vdsc_cfg->slice_height); + drm_WARN_ON(&dev_priv->drm, vdsc_cfg->vbr_enable); + drm_WARN_ON(&dev_priv->drm, vdsc_cfg->simple_422); + drm_WARN_ON(&dev_priv->drm, + vdsc_cfg->pic_width % vdsc_cfg->slice_width); + drm_WARN_ON(&dev_priv->drm, vdsc_cfg->slice_height < 8); + drm_WARN_ON(&dev_priv->drm, + vdsc_cfg->pic_height % vdsc_cfg->slice_height); ret = drm_dsc_compute_rc_parameters(vdsc_cfg); if (ret) @@ -1443,7 +1484,7 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans)); + tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans)); switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { case TRANS_DDI_EDP_INPUT_A_ON: *pipe = PIPE_A; @@ -1458,11 +1499,11 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, *pipe = PIPE_D; break; default: - DRM_ERROR("Invalid PIPE input\n"); + drm_err(&dev_priv->drm, "Invalid PIPE input\n"); goto out; } - tmp = I915_READ(PIPECONF(dsi_trans)); + tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans)); ret = tmp & PIPECONF_ENABLE; } out: @@ -1582,7 +1623,8 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi) */ prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns); if (prepare_cnt > ICL_PREPARE_CNT_MAX) { - DRM_DEBUG_KMS("prepare_cnt out of range (%d)\n", prepare_cnt); + drm_dbg_kms(&dev_priv->drm, "prepare_cnt out of range (%d)\n", + prepare_cnt); prepare_cnt = ICL_PREPARE_CNT_MAX; } @@ -1590,28 +1632,33 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi) clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero - ths_prepare_ns, tlpx_ns); if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) { - DRM_DEBUG_KMS("clk_zero_cnt out of range (%d)\n", clk_zero_cnt); + drm_dbg_kms(&dev_priv->drm, + "clk_zero_cnt out of range (%d)\n", clk_zero_cnt); clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX; } /* trail cnt in escape clocks*/ trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns); if (trail_cnt > ICL_TRAIL_CNT_MAX) { - DRM_DEBUG_KMS("trail_cnt out of range (%d)\n", trail_cnt); + drm_dbg_kms(&dev_priv->drm, "trail_cnt out of range (%d)\n", + trail_cnt); trail_cnt = ICL_TRAIL_CNT_MAX; } /* tclk pre count in escape clocks */ tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns); if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) { - DRM_DEBUG_KMS("tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt); + drm_dbg_kms(&dev_priv->drm, + "tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt); tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX; } /* tclk post count in escape clocks */ tclk_post_cnt = DIV_ROUND_UP(mipi_config->tclk_post, tlpx_ns); if (tclk_post_cnt > ICL_TCLK_POST_CNT_MAX) { - DRM_DEBUG_KMS("tclk_post_cnt out of range (%d)\n", tclk_post_cnt); + drm_dbg_kms(&dev_priv->drm, + "tclk_post_cnt out of range (%d)\n", + tclk_post_cnt); tclk_post_cnt = ICL_TCLK_POST_CNT_MAX; } @@ -1619,14 +1666,17 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi) hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero - ths_prepare_ns, tlpx_ns); if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) { - DRM_DEBUG_KMS("hs_zero_cnt out of range (%d)\n", hs_zero_cnt); + drm_dbg_kms(&dev_priv->drm, "hs_zero_cnt out of range (%d)\n", + hs_zero_cnt); hs_zero_cnt = ICL_HS_ZERO_CNT_MAX; } /* hs exit zero cnt in escape clocks */ exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns); if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) { - DRM_DEBUG_KMS("exit_zero_cnt out of range (%d)\n", exit_zero_cnt); + drm_dbg_kms(&dev_priv->drm, + "exit_zero_cnt out of range (%d)\n", + exit_zero_cnt); exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX; } @@ -1668,9 +1718,8 @@ static void icl_dsi_add_properties(struct intel_connector *connector) connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT; - connector->base.display_info.panel_orientation = - intel_dsi_get_panel_orientation(connector); - drm_connector_init_panel_orientation_property(&connector->base, + drm_connector_set_panel_orientation_with_quirk(&connector->base, + intel_dsi_get_panel_orientation(connector), connector->panel.fixed_mode->hdisplay, connector->panel.fixed_mode->vdisplay); } @@ -1708,6 +1757,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) encoder->pre_pll_enable = gen11_dsi_pre_pll_enable; encoder->pre_enable = gen11_dsi_pre_enable; + encoder->enable = gen11_dsi_enable; encoder->disable = gen11_dsi_disable; encoder->post_disable = gen11_dsi_post_disable; encoder->port = port; @@ -1738,7 +1788,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) mutex_unlock(&dev->mode_config.mutex); if (!fixed_mode) { - DRM_ERROR("DSI fixed mode info missing\n"); + drm_err(&dev_priv->drm, "DSI fixed mode info missing\n"); goto err; } @@ -1764,7 +1814,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) } if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) { - DRM_DEBUG_KMS("no device found\n"); + drm_dbg_kms(&dev_priv->drm, "no device found\n"); goto err; } diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c index 3456d33feb46..e21fb14d5e07 100644 --- a/drivers/gpu/drm/i915/display/intel_acpi.c +++ b/drivers/gpu/drm/i915/display/intel_acpi.c @@ -10,6 +10,7 @@ #include "i915_drv.h" #include "intel_acpi.h" +#include "intel_display_types.h" #define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */ #define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */ @@ -156,3 +157,91 @@ void intel_register_dsm_handler(void) void intel_unregister_dsm_handler(void) { } + +/* + * ACPI Specification, Revision 5.0, Appendix B.3.2 _DOD (Enumerate All Devices + * Attached to the Display Adapter). + */ +#define ACPI_DISPLAY_INDEX_SHIFT 0 +#define ACPI_DISPLAY_INDEX_MASK (0xf << 0) +#define ACPI_DISPLAY_PORT_ATTACHMENT_SHIFT 4 +#define ACPI_DISPLAY_PORT_ATTACHMENT_MASK (0xf << 4) +#define ACPI_DISPLAY_TYPE_SHIFT 8 +#define ACPI_DISPLAY_TYPE_MASK (0xf << 8) +#define ACPI_DISPLAY_TYPE_OTHER (0 << 8) +#define ACPI_DISPLAY_TYPE_VGA (1 << 8) +#define ACPI_DISPLAY_TYPE_TV (2 << 8) +#define ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL (3 << 8) +#define ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL (4 << 8) +#define ACPI_VENDOR_SPECIFIC_SHIFT 12 +#define ACPI_VENDOR_SPECIFIC_MASK (0xf << 12) +#define ACPI_BIOS_CAN_DETECT (1 << 16) +#define ACPI_DEPENDS_ON_VGA (1 << 17) +#define ACPI_PIPE_ID_SHIFT 18 +#define ACPI_PIPE_ID_MASK (7 << 18) +#define ACPI_DEVICE_ID_SCHEME (1ULL << 31) + +static u32 acpi_display_type(struct intel_connector *connector) +{ + u32 display_type; + + switch (connector->base.connector_type) { + case DRM_MODE_CONNECTOR_VGA: + case DRM_MODE_CONNECTOR_DVIA: + display_type = ACPI_DISPLAY_TYPE_VGA; + break; + case DRM_MODE_CONNECTOR_Composite: + case DRM_MODE_CONNECTOR_SVIDEO: + case DRM_MODE_CONNECTOR_Component: + case DRM_MODE_CONNECTOR_9PinDIN: + case DRM_MODE_CONNECTOR_TV: + display_type = ACPI_DISPLAY_TYPE_TV; + break; + case DRM_MODE_CONNECTOR_DVII: + case DRM_MODE_CONNECTOR_DVID: + case DRM_MODE_CONNECTOR_DisplayPort: + case DRM_MODE_CONNECTOR_HDMIA: + case DRM_MODE_CONNECTOR_HDMIB: + display_type = ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL; + break; + case DRM_MODE_CONNECTOR_LVDS: + case DRM_MODE_CONNECTOR_eDP: + case DRM_MODE_CONNECTOR_DSI: + display_type = ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL; + break; + case DRM_MODE_CONNECTOR_Unknown: + case DRM_MODE_CONNECTOR_VIRTUAL: + display_type = ACPI_DISPLAY_TYPE_OTHER; + break; + default: + MISSING_CASE(connector->base.connector_type); + display_type = ACPI_DISPLAY_TYPE_OTHER; + break; + } + + return display_type; +} + +void intel_acpi_device_id_update(struct drm_i915_private *dev_priv) +{ + struct drm_device *drm_dev = &dev_priv->drm; + struct intel_connector *connector; + struct drm_connector_list_iter conn_iter; + u8 display_index[16] = {}; + + /* Populate the ACPI IDs for all connectors for a given drm_device */ + drm_connector_list_iter_begin(drm_dev, &conn_iter); + for_each_intel_connector_iter(connector, &conn_iter) { + u32 device_id, type; + + device_id = acpi_display_type(connector); + + /* Use display type specific display index. */ + type = (device_id & ACPI_DISPLAY_TYPE_MASK) + >> ACPI_DISPLAY_TYPE_SHIFT; + device_id |= display_index[type]++ << ACPI_DISPLAY_INDEX_SHIFT; + + connector->acpi_device_id = device_id; + } + drm_connector_list_iter_end(&conn_iter); +} diff --git a/drivers/gpu/drm/i915/display/intel_acpi.h b/drivers/gpu/drm/i915/display/intel_acpi.h index 1c576b3fb712..e8b068661d22 100644 --- a/drivers/gpu/drm/i915/display/intel_acpi.h +++ b/drivers/gpu/drm/i915/display/intel_acpi.h @@ -6,12 +6,17 @@ #ifndef __INTEL_ACPI_H__ #define __INTEL_ACPI_H__ +struct drm_i915_private; + #ifdef CONFIG_ACPI void intel_register_dsm_handler(void); void intel_unregister_dsm_handler(void); +void intel_acpi_device_id_update(struct drm_i915_private *i915); #else static inline void intel_register_dsm_handler(void) { return; } static inline void intel_unregister_dsm_handler(void) { return; } +static inline +void intel_acpi_device_id_update(struct drm_i915_private *i915) { return; } #endif /* CONFIG_ACPI */ #endif /* __INTEL_ACPI_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index c362eecdd414..d043057d2fa0 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -35,7 +35,9 @@ #include <drm/drm_plane_helper.h> #include "intel_atomic.h" +#include "intel_cdclk.h" #include "intel_display_types.h" +#include "intel_global_state.h" #include "intel_hdcp.h" #include "intel_psr.h" #include "intel_sprite.h" @@ -64,8 +66,9 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector, else if (property == dev_priv->broadcast_rgb_property) *val = intel_conn_state->broadcast_rgb; else { - DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n", - property->base.id, property->name); + drm_dbg_atomic(&dev_priv->drm, + "Unknown property [PROP:%d:%s]\n", + property->base.id, property->name); return -EINVAL; } @@ -101,8 +104,8 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector, return 0; } - DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n", - property->base.id, property->name); + drm_dbg_atomic(&dev_priv->drm, "Unknown property [PROP:%d:%s]\n", + property->base.id, property->name); return -EINVAL; } @@ -178,6 +181,8 @@ intel_digital_connector_duplicate_state(struct drm_connector *connector) /** * intel_connector_needs_modeset - check if connector needs a modeset + * @state: the atomic state corresponding to this modeset + * @connector: the connector */ bool intel_connector_needs_modeset(struct intel_atomic_state *state, @@ -314,7 +319,8 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta } } - if (WARN(*scaler_id < 0, "Cannot find scaler for %s:%d\n", name, idx)) + if (drm_WARN(&dev_priv->drm, *scaler_id < 0, + "Cannot find scaler for %s:%d\n", name, idx)) return; /* set scaler mode */ @@ -357,8 +363,8 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta mode = SKL_PS_SCALER_MODE_DYN; } - DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n", - intel_crtc->pipe, *scaler_id, name, idx); + drm_dbg_kms(&dev_priv->drm, "Attached scaler id %u.%u to %s:%d\n", + intel_crtc->pipe, *scaler_id, name, idx); scaler_state->scalers[*scaler_id].mode = mode; } @@ -409,8 +415,9 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, /* fail if required scalers > available scalers */ if (num_scalers_need > intel_crtc->num_scalers){ - DRM_DEBUG_KMS("Too many scaling requests %d > %d\n", - num_scalers_need, intel_crtc->num_scalers); + drm_dbg_kms(&dev_priv->drm, + "Too many scaling requests %d > %d\n", + num_scalers_need, intel_crtc->num_scalers); return -EINVAL; } @@ -455,8 +462,9 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, plane = drm_plane_from_index(&dev_priv->drm, i); state = drm_atomic_get_plane_state(drm_state, plane); if (IS_ERR(state)) { - DRM_DEBUG_KMS("Failed to add [PLANE:%d] to drm_state\n", - plane->base.id); + drm_dbg_kms(&dev_priv->drm, + "Failed to add [PLANE:%d] to drm_state\n", + plane->base.id); return PTR_ERR(state); } } @@ -465,7 +473,8 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, idx = plane->base.id; /* plane on different crtc cannot be a scaler user of this crtc */ - if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) + if (drm_WARN_ON(&dev_priv->drm, + intel_plane->pipe != intel_crtc->pipe)) continue; plane_state = intel_atomic_get_new_plane_state(intel_state, @@ -494,18 +503,28 @@ intel_atomic_state_alloc(struct drm_device *dev) return &state->base; } +void intel_atomic_state_free(struct drm_atomic_state *_state) +{ + struct intel_atomic_state *state = to_intel_atomic_state(_state); + + drm_atomic_state_default_release(&state->base); + kfree(state->global_objs); + + i915_sw_fence_fini(&state->commit_ready); + + kfree(state); +} + void intel_atomic_state_clear(struct drm_atomic_state *s) { struct intel_atomic_state *state = to_intel_atomic_state(s); + drm_atomic_state_default_clear(&state->base); + intel_atomic_clear_global_state(state); + state->dpll_set = state->modeset = false; state->global_state_changed = false; state->active_pipes = 0; - memset(&state->min_cdclk, 0, sizeof(state->min_cdclk)); - memset(&state->min_voltage_level, 0, sizeof(state->min_voltage_level)); - memset(&state->cdclk.logical, 0, sizeof(state->cdclk.logical)); - memset(&state->cdclk.actual, 0, sizeof(state->cdclk.actual)); - state->cdclk.pipe = INVALID_PIPE; } struct intel_crtc_state * @@ -520,7 +539,7 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state, return to_intel_crtc_state(crtc_state); } -int intel_atomic_lock_global_state(struct intel_atomic_state *state) +int _intel_atomic_lock_global_state(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc *crtc; @@ -539,7 +558,7 @@ int intel_atomic_lock_global_state(struct intel_atomic_state *state) return 0; } -int intel_atomic_serialize_global_state(struct intel_atomic_state *state) +int _intel_atomic_serialize_global_state(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc *crtc; diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h index 74c749dbfb4f..11146292b06f 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.h +++ b/drivers/gpu/drm/i915/display/intel_atomic.h @@ -45,6 +45,7 @@ void intel_crtc_destroy_state(struct drm_crtc *crtc, void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state); void intel_crtc_copy_color_blobs(struct intel_crtc_state *crtc_state); struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev); +void intel_atomic_state_free(struct drm_atomic_state *state); void intel_atomic_state_clear(struct drm_atomic_state *state); struct intel_crtc_state * @@ -55,8 +56,8 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state); -int intel_atomic_lock_global_state(struct intel_atomic_state *state); +int _intel_atomic_lock_global_state(struct intel_atomic_state *state); -int intel_atomic_serialize_global_state(struct intel_atomic_state *state); +int _intel_atomic_serialize_global_state(struct intel_atomic_state *state); #endif /* __INTEL_ATOMIC_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index 3e97af682b1b..457b258683d3 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -37,6 +37,7 @@ #include "i915_trace.h" #include "intel_atomic_plane.h" +#include "intel_cdclk.h" #include "intel_display_types.h" #include "intel_pm.h" #include "intel_sprite.h" @@ -132,15 +133,37 @@ intel_plane_destroy_state(struct drm_plane *plane, kfree(plane_state); } +unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + unsigned int src_w, src_h, dst_w, dst_h; + unsigned int pixel_rate = crtc_state->pixel_rate; + + src_w = drm_rect_width(&plane_state->uapi.src) >> 16; + src_h = drm_rect_height(&plane_state->uapi.src) >> 16; + dst_w = drm_rect_width(&plane_state->uapi.dst); + dst_h = drm_rect_height(&plane_state->uapi.dst); + + /* Downscaling limits the maximum pixel rate */ + dst_w = min(src_w, dst_w); + dst_h = min(src_h, dst_h); + + return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_rate, src_w * src_h), + dst_w * dst_h); +} + unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int cpp; + unsigned int pixel_rate; if (!plane_state->uapi.visible) return 0; + pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state); + cpp = fb->format->cpp[0]; /* @@ -152,45 +175,67 @@ unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state, if (fb->format->is_yuv && fb->format->num_planes > 1) cpp *= 4; - return cpp * crtc_state->pixel_rate; + return pixel_rate * cpp; } -bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state, - struct intel_plane *plane) +int intel_plane_calc_min_cdclk(struct intel_atomic_state *state, + struct intel_plane *plane, + bool *need_cdclk_calc) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct intel_plane_state *plane_state = intel_atomic_get_new_plane_state(state, plane); struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc); - struct intel_crtc_state *crtc_state; + const struct intel_cdclk_state *cdclk_state; + const struct intel_crtc_state *old_crtc_state; + struct intel_crtc_state *new_crtc_state; if (!plane_state->uapi.visible || !plane->min_cdclk) - return false; + return 0; + + old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); + new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + + new_crtc_state->min_cdclk[plane->id] = + plane->min_cdclk(new_crtc_state, plane_state); - crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + /* + * No need to check against the cdclk state if + * the min cdclk for the plane doesn't increase. + * + * Ie. we only ever increase the cdclk due to plane + * requirements. This can reduce back and forth + * display blinking due to constant cdclk changes. + */ + if (new_crtc_state->min_cdclk[plane->id] <= + old_crtc_state->min_cdclk[plane->id]) + return 0; - crtc_state->min_cdclk[plane->id] = - plane->min_cdclk(crtc_state, plane_state); + cdclk_state = intel_atomic_get_cdclk_state(state); + if (IS_ERR(cdclk_state)) + return PTR_ERR(cdclk_state); /* - * Does the cdclk need to be bumbed up? + * No need to recalculate the cdclk state if + * the min cdclk for the pipe doesn't increase. * - * Note: we obviously need to be called before the new - * cdclk frequency is calculated so state->cdclk.logical - * hasn't been populated yet. Hence we look at the old - * cdclk state under dev_priv->cdclk.logical. This is - * safe as long we hold at least one crtc mutex (which - * must be true since we have crtc_state). + * Ie. we only ever increase the cdclk due to plane + * requirements. This can reduce back and forth + * display blinking due to constant cdclk changes. */ - if (crtc_state->min_cdclk[plane->id] > dev_priv->cdclk.logical.cdclk) { - DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk (%d kHz) > logical cdclk (%d kHz)\n", - plane->base.base.id, plane->base.name, - crtc_state->min_cdclk[plane->id], - dev_priv->cdclk.logical.cdclk); - return true; - } + if (new_crtc_state->min_cdclk[plane->id] <= + cdclk_state->min_cdclk[crtc->pipe]) + return 0; + + drm_dbg_kms(&dev_priv->drm, + "[PLANE:%d:%s] min cdclk (%d kHz) > [CRTC:%d:%s] min cdclk (%d kHz)\n", + plane->base.base.id, plane->base.name, + new_crtc_state->min_cdclk[plane->id], + crtc->base.base.id, crtc->base.name, + cdclk_state->min_cdclk[crtc->pipe]); + *need_cdclk_calc = true; - return false; + return 0; } static void intel_plane_clear_hw_state(struct intel_plane_state *plane_state) @@ -225,12 +270,9 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ struct intel_plane_state *new_plane_state) { struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane); - const struct drm_framebuffer *fb; + const struct drm_framebuffer *fb = new_plane_state->hw.fb; int ret; - intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state); - fb = new_plane_state->hw.fb; - new_crtc_state->active_planes &= ~BIT(plane->id); new_crtc_state->nv12_planes &= ~BIT(plane->id); new_crtc_state->c8_planes &= ~BIT(plane->id); @@ -292,6 +334,7 @@ int intel_plane_atomic_check(struct intel_atomic_state *state, const struct intel_crtc_state *old_crtc_state; struct intel_crtc_state *new_crtc_state; + intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state); new_plane_state->uapi.visible = false; if (!crtc) return 0; diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h index 5cedafdddb55..a6bbf42bae1f 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h @@ -18,6 +18,9 @@ struct intel_plane_state; extern const struct drm_plane_helper_funcs intel_plane_helper_funcs; +unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); + unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state, @@ -46,7 +49,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat struct intel_crtc_state *crtc_state, const struct intel_plane_state *old_plane_state, struct intel_plane_state *plane_state); -bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state, - struct intel_plane *plane); +int intel_plane_calc_min_cdclk(struct intel_atomic_state *state, + struct intel_plane *plane, + bool *need_cdclk_calc); #endif /* __INTEL_ATOMIC_PLANE_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index b18040793d9e..62f234f641de 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -30,6 +30,7 @@ #include "i915_drv.h" #include "intel_atomic.h" #include "intel_audio.h" +#include "intel_cdclk.h" #include "intel_display_types.h" #include "intel_lpe_audio.h" @@ -148,6 +149,10 @@ static const struct { { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 }, { 148352, AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 }, { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 }, + { 296703, AUD_CONFIG_PIXEL_CLOCK_HDMI_296703 }, + { 297000, AUD_CONFIG_PIXEL_CLOCK_HDMI_297000 }, + { 593407, AUD_CONFIG_PIXEL_CLOCK_HDMI_593407 }, + { 594000, AUD_CONFIG_PIXEL_CLOCK_HDMI_594000 }, }; /* HDMI N/CTS table */ @@ -233,6 +238,7 @@ static const struct hdmi_aud_ncts hdmi_aud_ncts_36bpp[] = { /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state) { + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; int i; @@ -242,6 +248,9 @@ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_sta break; } + if (INTEL_GEN(dev_priv) < 12 && adjusted_mode->crtc_clock > 148500) + i = ARRAY_SIZE(hdmi_audio_clock); + if (i == ARRAY_SIZE(hdmi_audio_clock)) { DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", adjusted_mode->crtc_clock); @@ -291,18 +300,18 @@ static bool intel_eld_uptodate(struct drm_connector *connector, u32 tmp; int i; - tmp = I915_READ(reg_eldv); + tmp = intel_de_read(dev_priv, reg_eldv); tmp &= bits_eldv; if (!tmp) return false; - tmp = I915_READ(reg_elda); + tmp = intel_de_read(dev_priv, reg_elda); tmp &= ~bits_elda; - I915_WRITE(reg_elda, tmp); + intel_de_write(dev_priv, reg_elda, tmp); for (i = 0; i < drm_eld_size(eld) / 4; i++) - if (I915_READ(reg_edid) != *((const u32 *)eld + i)) + if (intel_de_read(dev_priv, reg_edid) != *((const u32 *)eld + i)) return false; return true; @@ -315,18 +324,18 @@ static void g4x_audio_codec_disable(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 eldv, tmp; - DRM_DEBUG_KMS("Disable audio codec\n"); + drm_dbg_kms(&dev_priv->drm, "Disable audio codec\n"); - tmp = I915_READ(G4X_AUD_VID_DID); + tmp = intel_de_read(dev_priv, G4X_AUD_VID_DID); if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL) eldv = G4X_ELDV_DEVCL_DEVBLC; else eldv = G4X_ELDV_DEVCTG; /* Invalidate ELD */ - tmp = I915_READ(G4X_AUD_CNTL_ST); + tmp = intel_de_read(dev_priv, G4X_AUD_CNTL_ST); tmp &= ~eldv; - I915_WRITE(G4X_AUD_CNTL_ST, tmp); + intel_de_write(dev_priv, G4X_AUD_CNTL_ST, tmp); } static void g4x_audio_codec_enable(struct intel_encoder *encoder, @@ -340,9 +349,10 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder, u32 tmp; int len, i; - DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", drm_eld_size(eld)); + drm_dbg_kms(&dev_priv->drm, "Enable audio codec, %u bytes ELD\n", + drm_eld_size(eld)); - tmp = I915_READ(G4X_AUD_VID_DID); + tmp = intel_de_read(dev_priv, G4X_AUD_VID_DID); if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL) eldv = G4X_ELDV_DEVCL_DEVBLC; else @@ -354,19 +364,20 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder, G4X_HDMIW_HDMIEDID)) return; - tmp = I915_READ(G4X_AUD_CNTL_ST); + tmp = intel_de_read(dev_priv, G4X_AUD_CNTL_ST); tmp &= ~(eldv | G4X_ELD_ADDR_MASK); len = (tmp >> 9) & 0x1f; /* ELD buffer size */ - I915_WRITE(G4X_AUD_CNTL_ST, tmp); + intel_de_write(dev_priv, G4X_AUD_CNTL_ST, tmp); len = min(drm_eld_size(eld) / 4, len); - DRM_DEBUG_DRIVER("ELD size %d\n", len); + drm_dbg(&dev_priv->drm, "ELD size %d\n", len); for (i = 0; i < len; i++) - I915_WRITE(G4X_HDMIW_HDMIEDID, *((const u32 *)eld + i)); + intel_de_write(dev_priv, G4X_HDMIW_HDMIEDID, + *((const u32 *)eld + i)); - tmp = I915_READ(G4X_AUD_CNTL_ST); + tmp = intel_de_read(dev_priv, G4X_AUD_CNTL_ST); tmp |= eldv; - I915_WRITE(G4X_AUD_CNTL_ST, tmp); + intel_de_write(dev_priv, G4X_AUD_CNTL_ST, tmp); } static void @@ -384,11 +395,12 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder, rate = acomp ? acomp->aud_sample_rate[port] : 0; nm = audio_config_dp_get_n_m(crtc_state, rate); if (nm) - DRM_DEBUG_KMS("using Maud %u, Naud %u\n", nm->m, nm->n); + drm_dbg_kms(&dev_priv->drm, "using Maud %u, Naud %u\n", nm->m, + nm->n); else - DRM_DEBUG_KMS("using automatic Maud, Naud\n"); + drm_dbg_kms(&dev_priv->drm, "using automatic Maud, Naud\n"); - tmp = I915_READ(HSW_AUD_CFG(cpu_transcoder)); + tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder)); tmp &= ~AUD_CONFIG_N_VALUE_INDEX; tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK; tmp &= ~AUD_CONFIG_N_PROG_ENABLE; @@ -400,9 +412,9 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder, tmp |= AUD_CONFIG_N_PROG_ENABLE; } - I915_WRITE(HSW_AUD_CFG(cpu_transcoder), tmp); + intel_de_write(dev_priv, HSW_AUD_CFG(cpu_transcoder), tmp); - tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(cpu_transcoder)); + tmp = intel_de_read(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder)); tmp &= ~AUD_CONFIG_M_MASK; tmp &= ~AUD_M_CTS_M_VALUE_INDEX; tmp &= ~AUD_M_CTS_M_PROG_ENABLE; @@ -413,7 +425,7 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder, tmp |= AUD_M_CTS_M_PROG_ENABLE; } - I915_WRITE(HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp); + intel_de_write(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp); } static void @@ -429,7 +441,7 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder, rate = acomp ? acomp->aud_sample_rate[port] : 0; - tmp = I915_READ(HSW_AUD_CFG(cpu_transcoder)); + tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder)); tmp &= ~AUD_CONFIG_N_VALUE_INDEX; tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK; tmp &= ~AUD_CONFIG_N_PROG_ENABLE; @@ -437,25 +449,25 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder, n = audio_config_hdmi_get_n(crtc_state, rate); if (n != 0) { - DRM_DEBUG_KMS("using N %d\n", n); + drm_dbg_kms(&dev_priv->drm, "using N %d\n", n); tmp &= ~AUD_CONFIG_N_MASK; tmp |= AUD_CONFIG_N(n); tmp |= AUD_CONFIG_N_PROG_ENABLE; } else { - DRM_DEBUG_KMS("using automatic N\n"); + drm_dbg_kms(&dev_priv->drm, "using automatic N\n"); } - I915_WRITE(HSW_AUD_CFG(cpu_transcoder), tmp); + intel_de_write(dev_priv, HSW_AUD_CFG(cpu_transcoder), tmp); /* * Let's disable "Enable CTS or M Prog bit" * and let HW calculate the value */ - tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(cpu_transcoder)); + tmp = intel_de_read(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder)); tmp &= ~AUD_M_CTS_M_PROG_ENABLE; tmp &= ~AUD_M_CTS_M_VALUE_INDEX; - I915_WRITE(HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp); + intel_de_write(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp); } static void @@ -476,26 +488,26 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder, enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; u32 tmp; - DRM_DEBUG_KMS("Disable audio codec on transcoder %s\n", - transcoder_name(cpu_transcoder)); + drm_dbg_kms(&dev_priv->drm, "Disable audio codec on transcoder %s\n", + transcoder_name(cpu_transcoder)); mutex_lock(&dev_priv->av_mutex); /* Disable timestamps */ - tmp = I915_READ(HSW_AUD_CFG(cpu_transcoder)); + tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder)); tmp &= ~AUD_CONFIG_N_VALUE_INDEX; tmp |= AUD_CONFIG_N_PROG_ENABLE; tmp &= ~AUD_CONFIG_UPPER_N_MASK; tmp &= ~AUD_CONFIG_LOWER_N_MASK; if (intel_crtc_has_dp_encoder(old_crtc_state)) tmp |= AUD_CONFIG_N_VALUE_INDEX; - I915_WRITE(HSW_AUD_CFG(cpu_transcoder), tmp); + intel_de_write(dev_priv, HSW_AUD_CFG(cpu_transcoder), tmp); /* Invalidate ELD */ - tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); + tmp = intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD); tmp &= ~AUDIO_ELD_VALID(cpu_transcoder); tmp &= ~AUDIO_OUTPUT_ENABLE(cpu_transcoder); - I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp); + intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp); mutex_unlock(&dev_priv->av_mutex); } @@ -511,16 +523,17 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder, u32 tmp; int len, i; - DRM_DEBUG_KMS("Enable audio codec on transcoder %s, %u bytes ELD\n", - transcoder_name(cpu_transcoder), drm_eld_size(eld)); + drm_dbg_kms(&dev_priv->drm, + "Enable audio codec on transcoder %s, %u bytes ELD\n", + transcoder_name(cpu_transcoder), drm_eld_size(eld)); mutex_lock(&dev_priv->av_mutex); /* Enable audio presence detect, invalidate ELD */ - tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); + tmp = intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD); tmp |= AUDIO_OUTPUT_ENABLE(cpu_transcoder); tmp &= ~AUDIO_ELD_VALID(cpu_transcoder); - I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp); + intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp); /* * FIXME: We're supposed to wait for vblank here, but we have vblanks @@ -530,19 +543,20 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder, */ /* Reset ELD write address */ - tmp = I915_READ(HSW_AUD_DIP_ELD_CTRL(cpu_transcoder)); + tmp = intel_de_read(dev_priv, HSW_AUD_DIP_ELD_CTRL(cpu_transcoder)); tmp &= ~IBX_ELD_ADDRESS_MASK; - I915_WRITE(HSW_AUD_DIP_ELD_CTRL(cpu_transcoder), tmp); + intel_de_write(dev_priv, HSW_AUD_DIP_ELD_CTRL(cpu_transcoder), tmp); /* Up to 84 bytes of hw ELD buffer */ len = min(drm_eld_size(eld), 84); for (i = 0; i < len / 4; i++) - I915_WRITE(HSW_AUD_EDID_DATA(cpu_transcoder), *((const u32 *)eld + i)); + intel_de_write(dev_priv, HSW_AUD_EDID_DATA(cpu_transcoder), + *((const u32 *)eld + i)); /* ELD valid */ - tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); + tmp = intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD); tmp |= AUDIO_ELD_VALID(cpu_transcoder); - I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp); + intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp); /* Enable timestamps */ hsw_audio_config_update(encoder, crtc_state); @@ -561,11 +575,12 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder, u32 tmp, eldv; i915_reg_t aud_config, aud_cntrl_st2; - DRM_DEBUG_KMS("Disable audio codec on [ENCODER:%d:%s], pipe %c\n", - encoder->base.base.id, encoder->base.name, - pipe_name(pipe)); + drm_dbg_kms(&dev_priv->drm, + "Disable audio codec on [ENCODER:%d:%s], pipe %c\n", + encoder->base.base.id, encoder->base.name, + pipe_name(pipe)); - if (WARN_ON(port == PORT_A)) + if (drm_WARN_ON(&dev_priv->drm, port == PORT_A)) return; if (HAS_PCH_IBX(dev_priv)) { @@ -580,21 +595,21 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder, } /* Disable timestamps */ - tmp = I915_READ(aud_config); + tmp = intel_de_read(dev_priv, aud_config); tmp &= ~AUD_CONFIG_N_VALUE_INDEX; tmp |= AUD_CONFIG_N_PROG_ENABLE; tmp &= ~AUD_CONFIG_UPPER_N_MASK; tmp &= ~AUD_CONFIG_LOWER_N_MASK; if (intel_crtc_has_dp_encoder(old_crtc_state)) tmp |= AUD_CONFIG_N_VALUE_INDEX; - I915_WRITE(aud_config, tmp); + intel_de_write(dev_priv, aud_config, tmp); eldv = IBX_ELD_VALID(port); /* Invalidate ELD */ - tmp = I915_READ(aud_cntrl_st2); + tmp = intel_de_read(dev_priv, aud_cntrl_st2); tmp &= ~eldv; - I915_WRITE(aud_cntrl_st2, tmp); + intel_de_write(dev_priv, aud_cntrl_st2, tmp); } static void ilk_audio_codec_enable(struct intel_encoder *encoder, @@ -611,11 +626,12 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder, int len, i; i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2; - DRM_DEBUG_KMS("Enable audio codec on [ENCODER:%d:%s], pipe %c, %u bytes ELD\n", - encoder->base.base.id, encoder->base.name, - pipe_name(pipe), drm_eld_size(eld)); + drm_dbg_kms(&dev_priv->drm, + "Enable audio codec on [ENCODER:%d:%s], pipe %c, %u bytes ELD\n", + encoder->base.base.id, encoder->base.name, + pipe_name(pipe), drm_eld_size(eld)); - if (WARN_ON(port == PORT_A)) + if (drm_WARN_ON(&dev_priv->drm, port == PORT_A)) return; /* @@ -646,27 +662,28 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder, eldv = IBX_ELD_VALID(port); /* Invalidate ELD */ - tmp = I915_READ(aud_cntrl_st2); + tmp = intel_de_read(dev_priv, aud_cntrl_st2); tmp &= ~eldv; - I915_WRITE(aud_cntrl_st2, tmp); + intel_de_write(dev_priv, aud_cntrl_st2, tmp); /* Reset ELD write address */ - tmp = I915_READ(aud_cntl_st); + tmp = intel_de_read(dev_priv, aud_cntl_st); tmp &= ~IBX_ELD_ADDRESS_MASK; - I915_WRITE(aud_cntl_st, tmp); + intel_de_write(dev_priv, aud_cntl_st, tmp); /* Up to 84 bytes of hw ELD buffer */ len = min(drm_eld_size(eld), 84); for (i = 0; i < len / 4; i++) - I915_WRITE(hdmiw_hdmiedid, *((const u32 *)eld + i)); + intel_de_write(dev_priv, hdmiw_hdmiedid, + *((const u32 *)eld + i)); /* ELD valid */ - tmp = I915_READ(aud_cntrl_st2); + tmp = intel_de_read(dev_priv, aud_cntrl_st2); tmp |= eldv; - I915_WRITE(aud_cntrl_st2, tmp); + intel_de_write(dev_priv, aud_cntrl_st2, tmp); /* Enable timestamps */ - tmp = I915_READ(aud_config); + tmp = intel_de_read(dev_priv, aud_config); tmp &= ~AUD_CONFIG_N_VALUE_INDEX; tmp &= ~AUD_CONFIG_N_PROG_ENABLE; tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK; @@ -674,7 +691,7 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder, tmp |= AUD_CONFIG_N_VALUE_INDEX; else tmp |= audio_config_hdmi_pixel_clock(crtc_state); - I915_WRITE(aud_config, tmp); + intel_de_write(dev_priv, aud_config, tmp); } /** @@ -701,14 +718,15 @@ void intel_audio_codec_enable(struct intel_encoder *encoder, /* FIXME precompute the ELD in .compute_config() */ if (!connector->eld[0]) - DRM_DEBUG_KMS("Bogus ELD on [CONNECTOR:%d:%s]\n", - connector->base.id, connector->name); + drm_dbg_kms(&dev_priv->drm, + "Bogus ELD on [CONNECTOR:%d:%s]\n", + connector->base.id, connector->name); - DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", - connector->base.id, - connector->name, - encoder->base.base.id, - encoder->base.name); + drm_dbg(&dev_priv->drm, "ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", + connector->base.id, + connector->name, + encoder->base.base.id, + encoder->base.name); connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; @@ -800,37 +818,61 @@ void intel_init_audio_hooks(struct drm_i915_private *dev_priv) } } +static int glk_force_audio_cdclk_commit(struct intel_atomic_state *state, + struct intel_crtc *crtc, + bool enable) +{ + struct intel_cdclk_state *cdclk_state; + int ret; + + /* need to hold at least one crtc lock for the global state */ + ret = drm_modeset_lock(&crtc->base.mutex, state->base.acquire_ctx); + if (ret) + return ret; + + cdclk_state = intel_atomic_get_cdclk_state(state); + if (IS_ERR(cdclk_state)) + return PTR_ERR(cdclk_state); + + cdclk_state->force_min_cdclk_changed = true; + cdclk_state->force_min_cdclk = enable ? 2 * 96000 : 0; + + ret = intel_atomic_lock_global_state(&cdclk_state->base); + if (ret) + return ret; + + return drm_atomic_commit(&state->base); +} + static void glk_force_audio_cdclk(struct drm_i915_private *dev_priv, bool enable) { struct drm_modeset_acquire_ctx ctx; struct drm_atomic_state *state; + struct intel_crtc *crtc; int ret; + crtc = intel_get_first_crtc(dev_priv); + if (!crtc) + return; + drm_modeset_acquire_init(&ctx, 0); state = drm_atomic_state_alloc(&dev_priv->drm); - if (WARN_ON(!state)) + if (drm_WARN_ON(&dev_priv->drm, !state)) return; state->acquire_ctx = &ctx; retry: - to_intel_atomic_state(state)->cdclk.force_min_cdclk_changed = true; - to_intel_atomic_state(state)->cdclk.force_min_cdclk = - enable ? 2 * 96000 : 0; - - /* Protects dev_priv->cdclk.force_min_cdclk */ - ret = intel_atomic_lock_global_state(to_intel_atomic_state(state)); - if (!ret) - ret = drm_atomic_commit(state); - + ret = glk_force_audio_cdclk_commit(to_intel_atomic_state(state), crtc, + enable); if (ret == -EDEADLK) { drm_atomic_state_clear(state); drm_modeset_backoff(&ctx); goto retry; } - WARN_ON(ret); + drm_WARN_ON(&dev_priv->drm, ret); drm_atomic_state_put(state); @@ -850,9 +892,11 @@ static unsigned long i915_audio_component_get_power(struct device *kdev) if (dev_priv->audio_power_refcount++ == 0) { if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) { - I915_WRITE(AUD_FREQ_CNTRL, dev_priv->audio_freq_cntrl); - DRM_DEBUG_KMS("restored AUD_FREQ_CNTRL to 0x%x\n", - dev_priv->audio_freq_cntrl); + intel_de_write(dev_priv, AUD_FREQ_CNTRL, + dev_priv->audio_freq_cntrl); + drm_dbg_kms(&dev_priv->drm, + "restored AUD_FREQ_CNTRL to 0x%x\n", + dev_priv->audio_freq_cntrl); } /* Force CDCLK to 2*BCLK as long as we need audio powered. */ @@ -860,9 +904,8 @@ static unsigned long i915_audio_component_get_power(struct device *kdev) glk_force_audio_cdclk(dev_priv, true); if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) - I915_WRITE(AUD_PIN_BUF_CTL, - (I915_READ(AUD_PIN_BUF_CTL) | - AUD_PIN_BUF_ENABLE)); + intel_de_write(dev_priv, AUD_PIN_BUF_CTL, + (intel_de_read(dev_priv, AUD_PIN_BUF_CTL) | AUD_PIN_BUF_ENABLE)); } return ret; @@ -897,15 +940,15 @@ static void i915_audio_component_codec_wake_override(struct device *kdev, * Enable/disable generating the codec wake signal, overriding the * internal logic to generate the codec wake to controller. */ - tmp = I915_READ(HSW_AUD_CHICKENBIT); + tmp = intel_de_read(dev_priv, HSW_AUD_CHICKENBIT); tmp &= ~SKL_AUD_CODEC_WAKE_SIGNAL; - I915_WRITE(HSW_AUD_CHICKENBIT, tmp); + intel_de_write(dev_priv, HSW_AUD_CHICKENBIT, tmp); usleep_range(1000, 1500); if (enable) { - tmp = I915_READ(HSW_AUD_CHICKENBIT); + tmp = intel_de_read(dev_priv, HSW_AUD_CHICKENBIT); tmp |= SKL_AUD_CODEC_WAKE_SIGNAL; - I915_WRITE(HSW_AUD_CHICKENBIT, tmp); + intel_de_write(dev_priv, HSW_AUD_CHICKENBIT, tmp); usleep_range(1000, 1500); } @@ -917,7 +960,7 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); - if (WARN_ON_ONCE(!HAS_DDI(dev_priv))) + if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DDI(dev_priv))) return -ENODEV; return dev_priv->cdclk.hw.cdclk; @@ -940,7 +983,8 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv, /* MST */ if (pipe >= 0) { - if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map))) + if (drm_WARN_ON(&dev_priv->drm, + pipe >= ARRAY_SIZE(dev_priv->av_enc_map))) return NULL; encoder = dev_priv->av_enc_map[pipe]; @@ -992,7 +1036,8 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, /* 1. get the pipe */ encoder = get_saved_enc(dev_priv, port, pipe); if (!encoder || !encoder->base.crtc) { - DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port)); + drm_dbg_kms(&dev_priv->drm, "Not valid for port %c\n", + port_name(port)); err = -ENODEV; goto unlock; } @@ -1023,7 +1068,8 @@ static int i915_audio_component_get_eld(struct device *kdev, int port, intel_encoder = get_saved_enc(dev_priv, port, pipe); if (!intel_encoder) { - DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port)); + drm_dbg_kms(&dev_priv->drm, "Not valid for port %c\n", + port_name(port)); mutex_unlock(&dev_priv->av_mutex); return ret; } @@ -1057,10 +1103,12 @@ static int i915_audio_component_bind(struct device *i915_kdev, struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev); int i; - if (WARN_ON(acomp->base.ops || acomp->base.dev)) + if (drm_WARN_ON(&dev_priv->drm, acomp->base.ops || acomp->base.dev)) return -EEXIST; - if (WARN_ON(!device_link_add(hda_kdev, i915_kdev, DL_FLAG_STATELESS))) + if (drm_WARN_ON(&dev_priv->drm, + !device_link_add(hda_kdev, i915_kdev, + DL_FLAG_STATELESS))) return -ENOMEM; drm_modeset_lock_all(&dev_priv->drm); @@ -1119,15 +1167,18 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv) &i915_audio_component_bind_ops, I915_COMPONENT_AUDIO); if (ret < 0) { - DRM_ERROR("failed to add audio component (%d)\n", ret); + drm_err(&dev_priv->drm, + "failed to add audio component (%d)\n", ret); /* continue with reduced functionality */ return; } if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) { - dev_priv->audio_freq_cntrl = I915_READ(AUD_FREQ_CNTRL); - DRM_DEBUG_KMS("init value of AUD_FREQ_CNTRL of 0x%x\n", - dev_priv->audio_freq_cntrl); + dev_priv->audio_freq_cntrl = intel_de_read(dev_priv, + AUD_FREQ_CNTRL); + drm_dbg_kms(&dev_priv->drm, + "init value of AUD_FREQ_CNTRL of 0x%x\n", + dev_priv->audio_freq_cntrl); } dev_priv->audio_component_registered = true; diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index ef4017a1baba..839124647202 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -26,7 +26,6 @@ */ #include <drm/drm_dp_helper.h> -#include <drm/i915_drm.h> #include "display/intel_display.h" #include "display/intel_display_types.h" @@ -228,17 +227,20 @@ parse_panel_options(struct drm_i915_private *dev_priv, ret = intel_opregion_get_panel_type(dev_priv); if (ret >= 0) { - WARN_ON(ret > 0xf); + drm_WARN_ON(&dev_priv->drm, ret > 0xf); panel_type = ret; - DRM_DEBUG_KMS("Panel type: %d (OpRegion)\n", panel_type); + drm_dbg_kms(&dev_priv->drm, "Panel type: %d (OpRegion)\n", + panel_type); } else { if (lvds_options->panel_type > 0xf) { - DRM_DEBUG_KMS("Invalid VBT panel type 0x%x\n", - lvds_options->panel_type); + drm_dbg_kms(&dev_priv->drm, + "Invalid VBT panel type 0x%x\n", + lvds_options->panel_type); return; } panel_type = lvds_options->panel_type; - DRM_DEBUG_KMS("Panel type: %d (VBT)\n", panel_type); + drm_dbg_kms(&dev_priv->drm, "Panel type: %d (VBT)\n", + panel_type); } dev_priv->vbt.panel_type = panel_type; @@ -253,15 +255,17 @@ parse_panel_options(struct drm_i915_private *dev_priv, switch (drrs_mode) { case 0: dev_priv->vbt.drrs_type = STATIC_DRRS_SUPPORT; - DRM_DEBUG_KMS("DRRS supported mode is static\n"); + drm_dbg_kms(&dev_priv->drm, "DRRS supported mode is static\n"); break; case 2: dev_priv->vbt.drrs_type = SEAMLESS_DRRS_SUPPORT; - DRM_DEBUG_KMS("DRRS supported mode is seamless\n"); + drm_dbg_kms(&dev_priv->drm, + "DRRS supported mode is seamless\n"); break; default: dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED; - DRM_DEBUG_KMS("DRRS not supported (VBT input)\n"); + drm_dbg_kms(&dev_priv->drm, + "DRRS not supported (VBT input)\n"); break; } } @@ -298,7 +302,8 @@ parse_lfp_panel_dtd(struct drm_i915_private *dev_priv, dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode; - DRM_DEBUG_KMS("Found panel mode in BIOS VBT legacy lfp table:\n"); + drm_dbg_kms(&dev_priv->drm, + "Found panel mode in BIOS VBT legacy lfp table:\n"); drm_mode_debug_printmodeline(panel_fixed_mode); fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data, @@ -309,8 +314,9 @@ parse_lfp_panel_dtd(struct drm_i915_private *dev_priv, if (fp_timing->x_res == panel_fixed_mode->hdisplay && fp_timing->y_res == panel_fixed_mode->vdisplay) { dev_priv->vbt.bios_lvds_val = fp_timing->lvds_reg_val; - DRM_DEBUG_KMS("VBT initial LVDS value %x\n", - dev_priv->vbt.bios_lvds_val); + drm_dbg_kms(&dev_priv->drm, + "VBT initial LVDS value %x\n", + dev_priv->vbt.bios_lvds_val); } } } @@ -329,20 +335,22 @@ parse_generic_dtd(struct drm_i915_private *dev_priv, return; if (generic_dtd->gdtd_size < sizeof(struct generic_dtd_entry)) { - DRM_ERROR("GDTD size %u is too small.\n", - generic_dtd->gdtd_size); + drm_err(&dev_priv->drm, "GDTD size %u is too small.\n", + generic_dtd->gdtd_size); return; } else if (generic_dtd->gdtd_size != sizeof(struct generic_dtd_entry)) { - DRM_ERROR("Unexpected GDTD size %u\n", generic_dtd->gdtd_size); + drm_err(&dev_priv->drm, "Unexpected GDTD size %u\n", + generic_dtd->gdtd_size); /* DTD has unknown fields, but keep going */ } num_dtd = (get_blocksize(generic_dtd) - sizeof(struct bdb_generic_dtd)) / generic_dtd->gdtd_size; if (dev_priv->vbt.panel_type >= num_dtd) { - DRM_ERROR("Panel type %d not found in table of %d DTD's\n", - dev_priv->vbt.panel_type, num_dtd); + drm_err(&dev_priv->drm, + "Panel type %d not found in table of %d DTD's\n", + dev_priv->vbt.panel_type, num_dtd); return; } @@ -385,7 +393,8 @@ parse_generic_dtd(struct drm_i915_private *dev_priv, else panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; - DRM_DEBUG_KMS("Found panel mode in BIOS VBT generic dtd table:\n"); + drm_dbg_kms(&dev_priv->drm, + "Found panel mode in BIOS VBT generic dtd table:\n"); drm_mode_debug_printmodeline(panel_fixed_mode); dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode; @@ -422,8 +431,9 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, return; if (backlight_data->entry_size != sizeof(backlight_data->data[0])) { - DRM_DEBUG_KMS("Unsupported backlight data entry size %u\n", - backlight_data->entry_size); + drm_dbg_kms(&dev_priv->drm, + "Unsupported backlight data entry size %u\n", + backlight_data->entry_size); return; } @@ -431,8 +441,9 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, dev_priv->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM; if (!dev_priv->vbt.backlight.present) { - DRM_DEBUG_KMS("PWM backlight not present in VBT (type %u)\n", - entry->type); + drm_dbg_kms(&dev_priv->drm, + "PWM backlight not present in VBT (type %u)\n", + entry->type); return; } @@ -449,13 +460,14 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm; dev_priv->vbt.backlight.min_brightness = entry->min_brightness; - DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, " - "active %s, min brightness %u, level %u, controller %u\n", - dev_priv->vbt.backlight.pwm_freq_hz, - dev_priv->vbt.backlight.active_low_pwm ? "low" : "high", - dev_priv->vbt.backlight.min_brightness, - backlight_data->level[panel_type], - dev_priv->vbt.backlight.controller); + drm_dbg_kms(&dev_priv->drm, + "VBT backlight PWM modulation frequency %u Hz, " + "active %s, min brightness %u, level %u, controller %u\n", + dev_priv->vbt.backlight.pwm_freq_hz, + dev_priv->vbt.backlight.active_low_pwm ? "low" : "high", + dev_priv->vbt.backlight.min_brightness, + backlight_data->level[panel_type], + dev_priv->vbt.backlight.controller); } /* Try to find sdvo panel data */ @@ -469,7 +481,8 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv, index = i915_modparams.vbt_sdvo_panel_type; if (index == -2) { - DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n"); + drm_dbg_kms(&dev_priv->drm, + "Ignore SDVO panel mode from BIOS VBT tables.\n"); return; } @@ -495,7 +508,8 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv, dev_priv->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode; - DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n"); + drm_dbg_kms(&dev_priv->drm, + "Found SDVO panel mode in BIOS VBT tables:\n"); drm_mode_debug_printmodeline(panel_fixed_mode); } @@ -540,13 +554,14 @@ parse_general_features(struct drm_i915_private *dev_priv, } else { dev_priv->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN; } - DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n", - dev_priv->vbt.int_tv_support, - dev_priv->vbt.int_crt_support, - dev_priv->vbt.lvds_use_ssc, - dev_priv->vbt.lvds_ssc_freq, - dev_priv->vbt.display_clock_mode, - dev_priv->vbt.fdi_rx_polarity_inverted); + drm_dbg_kms(&dev_priv->drm, + "BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n", + dev_priv->vbt.int_tv_support, + dev_priv->vbt.int_crt_support, + dev_priv->vbt.lvds_use_ssc, + dev_priv->vbt.lvds_ssc_freq, + dev_priv->vbt.display_clock_mode, + dev_priv->vbt.fdi_rx_polarity_inverted); } static const struct child_device_config * @@ -568,7 +583,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version) * accurate and doesn't have to be, as long as it's not too strict. */ if (!IS_GEN_RANGE(dev_priv, 3, 7)) { - DRM_DEBUG_KMS("Skipping SDVO device mapping\n"); + drm_dbg_kms(&dev_priv->drm, "Skipping SDVO device mapping\n"); return; } @@ -586,14 +601,16 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version) if (child->dvo_port != DEVICE_PORT_DVOB && child->dvo_port != DEVICE_PORT_DVOC) { /* skip the incorrect SDVO port */ - DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n"); + drm_dbg_kms(&dev_priv->drm, + "Incorrect SDVO port. Skip it\n"); continue; } - DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on" - " %s port\n", - child->slave_addr, - (child->dvo_port == DEVICE_PORT_DVOB) ? - "SDVOB" : "SDVOC"); + drm_dbg_kms(&dev_priv->drm, + "the SDVO device with slave addr %2x is found on" + " %s port\n", + child->slave_addr, + (child->dvo_port == DEVICE_PORT_DVOB) ? + "SDVOB" : "SDVOC"); mapping = &dev_priv->vbt.sdvo_mappings[child->dvo_port - 1]; if (!mapping->initialized) { mapping->dvo_port = child->dvo_port; @@ -602,28 +619,30 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version) mapping->ddc_pin = child->ddc_pin; mapping->i2c_pin = child->i2c_pin; mapping->initialized = 1; - DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n", - mapping->dvo_port, - mapping->slave_addr, - mapping->dvo_wiring, - mapping->ddc_pin, - mapping->i2c_pin); + drm_dbg_kms(&dev_priv->drm, + "SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n", + mapping->dvo_port, mapping->slave_addr, + mapping->dvo_wiring, mapping->ddc_pin, + mapping->i2c_pin); } else { - DRM_DEBUG_KMS("Maybe one SDVO port is shared by " - "two SDVO device.\n"); + drm_dbg_kms(&dev_priv->drm, + "Maybe one SDVO port is shared by " + "two SDVO device.\n"); } if (child->slave2_addr) { /* Maybe this is a SDVO device with multiple inputs */ /* And the mapping info is not added */ - DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this" - " is a SDVO device with multiple inputs.\n"); + drm_dbg_kms(&dev_priv->drm, + "there exists the slave2_addr. Maybe this" + " is a SDVO device with multiple inputs.\n"); } count++; } if (!count) { /* No SDVO device info is found */ - DRM_DEBUG_KMS("No SDVO device info is found in VBT\n"); + drm_dbg_kms(&dev_priv->drm, + "No SDVO device info is found in VBT\n"); } } @@ -664,7 +683,8 @@ parse_driver_features(struct drm_i915_private *dev_priv, } if (bdb->version < 228) { - DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled); + drm_dbg_kms(&dev_priv->drm, "DRRS State Enabled:%d\n", + driver->drrs_enabled); /* * If DRRS is not supported, drrs_type has to be set to 0. * This is because, VBT is configured in such a way that @@ -688,7 +708,7 @@ parse_power_conservation_features(struct drm_i915_private *dev_priv, if (bdb->version < 228) return; - power = find_section(bdb, BDB_LVDS_POWER); + power = find_section(bdb, BDB_LFP_POWER); if (!power) return; @@ -742,8 +762,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) dev_priv->vbt.edp.rate = DP_LINK_BW_2_7; break; default: - DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n", - edp_link_params->rate); + drm_dbg_kms(&dev_priv->drm, + "VBT has unknown eDP link rate value %u\n", + edp_link_params->rate); break; } @@ -758,8 +779,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) dev_priv->vbt.edp.lanes = 4; break; default: - DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n", - edp_link_params->lanes); + drm_dbg_kms(&dev_priv->drm, + "VBT has unknown eDP lane count value %u\n", + edp_link_params->lanes); break; } @@ -777,8 +799,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3; break; default: - DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n", - edp_link_params->preemphasis); + drm_dbg_kms(&dev_priv->drm, + "VBT has unknown eDP pre-emphasis value %u\n", + edp_link_params->preemphasis); break; } @@ -796,8 +819,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3; break; default: - DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n", - edp_link_params->vswing); + drm_dbg_kms(&dev_priv->drm, + "VBT has unknown eDP voltage swing value %u\n", + edp_link_params->vswing); break; } @@ -824,7 +848,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) psr = find_section(bdb, BDB_PSR); if (!psr) { - DRM_DEBUG_KMS("No PSR BDB found.\n"); + drm_dbg_kms(&dev_priv->drm, "No PSR BDB found.\n"); return; } @@ -851,8 +875,9 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) dev_priv->vbt.psr.lines_to_wait = PSR_8_LINES_TO_WAIT; break; default: - DRM_DEBUG_KMS("VBT has unknown PSR lines to wait %u\n", - psr_table->lines_to_wait); + drm_dbg_kms(&dev_priv->drm, + "VBT has unknown PSR lines to wait %u\n", + psr_table->lines_to_wait); break; } @@ -874,8 +899,9 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) dev_priv->vbt.psr.tp1_wakeup_time_us = 0; break; default: - DRM_DEBUG_KMS("VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n", - psr_table->tp1_wakeup_time); + drm_dbg_kms(&dev_priv->drm, + "VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n", + psr_table->tp1_wakeup_time); /* fallthrough */ case 2: dev_priv->vbt.psr.tp1_wakeup_time_us = 2500; @@ -893,8 +919,9 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 0; break; default: - DRM_DEBUG_KMS("VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n", - psr_table->tp2_tp3_wakeup_time); + drm_dbg_kms(&dev_priv->drm, + "VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n", + psr_table->tp2_tp3_wakeup_time); /* fallthrough */ case 2: dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 2500; @@ -1000,12 +1027,12 @@ parse_mipi_config(struct drm_i915_private *dev_priv, */ start = find_section(bdb, BDB_MIPI_CONFIG); if (!start) { - DRM_DEBUG_KMS("No MIPI config BDB found"); + drm_dbg_kms(&dev_priv->drm, "No MIPI config BDB found"); return; } - DRM_DEBUG_DRIVER("Found MIPI Config block, panel index = %d\n", - panel_type); + drm_dbg(&dev_priv->drm, "Found MIPI Config block, panel index = %d\n", + panel_type); /* * get hold of the correct configuration block and pps data as per @@ -1220,7 +1247,8 @@ static int get_init_otp_deassert_fragment_len(struct drm_i915_private *dev_priv) const u8 *data = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP]; int index, len; - if (WARN_ON(!data || dev_priv->vbt.dsi.seq_version != 1)) + if (drm_WARN_ON(&dev_priv->drm, + !data || dev_priv->vbt.dsi.seq_version != 1)) return 0; /* index = 1 to skip sequence byte */ @@ -1273,7 +1301,8 @@ static void fixup_mipi_sequences(struct drm_i915_private *dev_priv) if (!len) return; - DRM_DEBUG_KMS("Using init OTP fragment to deassert reset\n"); + drm_dbg_kms(&dev_priv->drm, + "Using init OTP fragment to deassert reset\n"); /* Copy the fragment, update seq byte and terminate it */ init_otp = (u8 *)dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP]; @@ -1308,18 +1337,21 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv, sequence = find_section(bdb, BDB_MIPI_SEQUENCE); if (!sequence) { - DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n"); + drm_dbg_kms(&dev_priv->drm, + "No MIPI Sequence found, parsing complete\n"); return; } /* Fail gracefully for forward incompatible sequence block. */ if (sequence->version >= 4) { - DRM_ERROR("Unable to parse MIPI Sequence Block v%u\n", - sequence->version); + drm_err(&dev_priv->drm, + "Unable to parse MIPI Sequence Block v%u\n", + sequence->version); return; } - DRM_DEBUG_DRIVER("Found MIPI sequence block v%u\n", sequence->version); + drm_dbg(&dev_priv->drm, "Found MIPI sequence block v%u\n", + sequence->version); seq_data = find_panel_sequence_block(sequence, panel_type, &seq_size); if (!seq_data) @@ -1336,13 +1368,15 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv, break; if (seq_id >= MIPI_SEQ_MAX) { - DRM_ERROR("Unknown sequence %u\n", seq_id); + drm_err(&dev_priv->drm, "Unknown sequence %u\n", + seq_id); goto err; } /* Log about presence of sequences we won't run. */ if (seq_id == MIPI_SEQ_TEAR_ON || seq_id == MIPI_SEQ_TEAR_OFF) - DRM_DEBUG_KMS("Unsupported sequence %u\n", seq_id); + drm_dbg_kms(&dev_priv->drm, + "Unsupported sequence %u\n", seq_id); dev_priv->vbt.dsi.sequence[seq_id] = data + index; @@ -1351,7 +1385,8 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv, else index = goto_next_sequence(data, index, seq_size); if (!index) { - DRM_ERROR("Invalid sequence %u\n", seq_id); + drm_err(&dev_priv->drm, "Invalid sequence %u\n", + seq_id); goto err; } } @@ -1362,7 +1397,7 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv, fixup_mipi_sequences(dev_priv); - DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n"); + drm_dbg(&dev_priv->drm, "MIPI related VBT parsing complete\n"); return; err: @@ -1387,13 +1422,15 @@ parse_compression_parameters(struct drm_i915_private *i915, if (params) { /* Sanity checks */ if (params->entry_size != sizeof(params->data[0])) { - DRM_DEBUG_KMS("VBT: unsupported compression param entry size\n"); + drm_dbg_kms(&i915->drm, + "VBT: unsupported compression param entry size\n"); return; } block_size = get_blocksize(params); if (block_size < sizeof(*params)) { - DRM_DEBUG_KMS("VBT: expected 16 compression param entries\n"); + drm_dbg_kms(&i915->drm, + "VBT: expected 16 compression param entries\n"); return; } } @@ -1405,12 +1442,14 @@ parse_compression_parameters(struct drm_i915_private *i915, continue; if (!params) { - DRM_DEBUG_KMS("VBT: compression params not available\n"); + drm_dbg_kms(&i915->drm, + "VBT: compression params not available\n"); continue; } if (child->compression_method_cps) { - DRM_DEBUG_KMS("VBT: CPS compression not supported\n"); + drm_dbg_kms(&i915->drm, + "VBT: CPS compression not supported\n"); continue; } @@ -1458,10 +1497,11 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv, p = get_port_by_ddc_pin(dev_priv, info->alternate_ddc_pin); if (p != PORT_NONE) { - DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, " - "disabling port %c DVI/HDMI support\n", - port_name(port), info->alternate_ddc_pin, - port_name(p), port_name(p)); + drm_dbg_kms(&dev_priv->drm, + "port %c trying to use the same DDC pin (0x%x) as port %c, " + "disabling port %c DVI/HDMI support\n", + port_name(port), info->alternate_ddc_pin, + port_name(p), port_name(p)); /* * If we have multiple ports supposedly sharing the @@ -1509,10 +1549,11 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv, p = get_port_by_aux_ch(dev_priv, info->alternate_aux_channel); if (p != PORT_NONE) { - DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, " - "disabling port %c DP support\n", - port_name(port), info->alternate_aux_channel, - port_name(p), port_name(p)); + drm_dbg_kms(&dev_priv->drm, + "port %c trying to use the same AUX CH (0x%x) as port %c, " + "disabling port %c DP support\n", + port_name(port), info->alternate_aux_channel, + port_name(p), port_name(p)); /* * If we have multiple ports supposedlt sharing the @@ -1572,8 +1613,9 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin) if (vbt_pin < n_entries && ddc_pin_map[vbt_pin] != 0) return ddc_pin_map[vbt_pin]; - DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n", - vbt_pin); + drm_dbg_kms(&dev_priv->drm, + "Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n", + vbt_pin); return 0; } @@ -1624,8 +1666,9 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, info = &dev_priv->vbt.ddi_port_info[port]; if (info->child) { - DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n", - port_name(port)); + drm_dbg_kms(&dev_priv->drm, + "More than one child device for port %c in VBT, using the first.\n", + port_name(port)); return; } @@ -1636,8 +1679,9 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, is_edp = is_dp && (child->device_type & DEVICE_TYPE_INTERNAL_CONNECTOR); if (port == PORT_A && is_dvi && INTEL_GEN(dev_priv) < 12) { - DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n", - is_hdmi ? "/HDMI" : ""); + drm_dbg_kms(&dev_priv->drm, + "VBT claims port A supports DVI%s, ignoring\n", + is_hdmi ? "/HDMI" : ""); is_dvi = false; is_hdmi = false; } @@ -1653,11 +1697,12 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, if (bdb_version >= 209) info->supports_tbt = child->tbt; - DRM_DEBUG_KMS("Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n", - port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp, - HAS_LSPCON(dev_priv) && child->lspcon, - info->supports_typec_usb, info->supports_tbt, - devdata->dsc != NULL); + drm_dbg_kms(&dev_priv->drm, + "Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n", + port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp, + HAS_LSPCON(dev_priv) && child->lspcon, + info->supports_typec_usb, info->supports_tbt, + devdata->dsc != NULL); if (is_dvi) { u8 ddc_pin; @@ -1667,9 +1712,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, info->alternate_ddc_pin = ddc_pin; sanitize_ddc_pin(dev_priv, port); } else { - DRM_DEBUG_KMS("Port %c has invalid DDC pin %d, " - "sticking to defaults\n", - port_name(port), ddc_pin); + drm_dbg_kms(&dev_priv->drm, + "Port %c has invalid DDC pin %d, " + "sticking to defaults\n", + port_name(port), ddc_pin); } } @@ -1682,9 +1728,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, if (bdb_version >= 158) { /* The VBT HDMI level shift values match the table we have. */ u8 hdmi_level_shift = child->hdmi_level_shifter_value; - DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n", - port_name(port), - hdmi_level_shift); + drm_dbg_kms(&dev_priv->drm, + "VBT HDMI level shift for port %c: %d\n", + port_name(port), + hdmi_level_shift); info->hdmi_level_shift = hdmi_level_shift; info->hdmi_level_shift_set = true; } @@ -1708,19 +1755,22 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, } if (max_tmds_clock) - DRM_DEBUG_KMS("VBT HDMI max TMDS clock for port %c: %d kHz\n", - port_name(port), max_tmds_clock); + drm_dbg_kms(&dev_priv->drm, + "VBT HDMI max TMDS clock for port %c: %d kHz\n", + port_name(port), max_tmds_clock); info->max_tmds_clock = max_tmds_clock; } /* Parse the I_boost config for SKL and above */ if (bdb_version >= 196 && child->iboost) { info->dp_boost_level = translate_iboost(child->dp_iboost_level); - DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n", - port_name(port), info->dp_boost_level); + drm_dbg_kms(&dev_priv->drm, + "VBT (e)DP boost level for port %c: %d\n", + port_name(port), info->dp_boost_level); info->hdmi_boost_level = translate_iboost(child->hdmi_iboost_level); - DRM_DEBUG_KMS("VBT HDMI boost level for port %c: %d\n", - port_name(port), info->hdmi_boost_level); + drm_dbg_kms(&dev_priv->drm, + "VBT HDMI boost level for port %c: %d\n", + port_name(port), info->hdmi_boost_level); } /* DP max link rate for CNL+ */ @@ -1740,8 +1790,9 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, info->dp_max_link_rate = 162000; break; } - DRM_DEBUG_KMS("VBT DP max link rate for port %c: %d\n", - port_name(port), info->dp_max_link_rate); + drm_dbg_kms(&dev_priv->drm, + "VBT DP max link rate for port %c: %d\n", + port_name(port), info->dp_max_link_rate); } info->child = child; @@ -1775,19 +1826,21 @@ parse_general_definitions(struct drm_i915_private *dev_priv, defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); if (!defs) { - DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n"); + drm_dbg_kms(&dev_priv->drm, + "No general definition block is found, no devices defined.\n"); return; } block_size = get_blocksize(defs); if (block_size < sizeof(*defs)) { - DRM_DEBUG_KMS("General definitions block too small (%u)\n", - block_size); + drm_dbg_kms(&dev_priv->drm, + "General definitions block too small (%u)\n", + block_size); return; } bus_pin = defs->crt_ddc_gmbus_pin; - DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin); + drm_dbg_kms(&dev_priv->drm, "crt_ddc_bus_pin: %d\n", bus_pin); if (intel_gmbus_is_valid_pin(dev_priv, bus_pin)) dev_priv->vbt.crt_ddc_pin = bus_pin; @@ -1806,19 +1859,22 @@ parse_general_definitions(struct drm_i915_private *dev_priv, } else { expected_size = sizeof(*child); BUILD_BUG_ON(sizeof(*child) < 39); - DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n", - bdb->version, expected_size); + drm_dbg(&dev_priv->drm, + "Expected child device config size for VBT version %u not known; assuming %u\n", + bdb->version, expected_size); } /* Flag an error for unexpected size, but continue anyway. */ if (defs->child_dev_size != expected_size) - DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n", - defs->child_dev_size, expected_size, bdb->version); + drm_err(&dev_priv->drm, + "Unexpected child device config size %u (expected %u for VBT version %u)\n", + defs->child_dev_size, expected_size, bdb->version); /* The legacy sized child device config is the minimum we need. */ if (defs->child_dev_size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) { - DRM_DEBUG_KMS("Child device config size %u is too small.\n", - defs->child_dev_size); + drm_dbg_kms(&dev_priv->drm, + "Child device config size %u is too small.\n", + defs->child_dev_size); return; } @@ -1830,8 +1886,9 @@ parse_general_definitions(struct drm_i915_private *dev_priv, if (!child->device_type) continue; - DRM_DEBUG_KMS("Found VBT child device with type 0x%x\n", - child->device_type); + drm_dbg_kms(&dev_priv->drm, + "Found VBT child device with type 0x%x\n", + child->device_type); devdata = kzalloc(sizeof(*devdata), GFP_KERNEL); if (!devdata) @@ -1849,7 +1906,8 @@ parse_general_definitions(struct drm_i915_private *dev_priv, } if (list_empty(&dev_priv->vbt.display_devices)) - DRM_DEBUG_KMS("no child dev is parsed from VBT\n"); + drm_dbg_kms(&dev_priv->drm, + "no child dev is parsed from VBT\n"); } /* Common defaults which may be overridden by VBT. */ @@ -1882,7 +1940,8 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) */ dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev_priv, !HAS_PCH_SPLIT(dev_priv)); - DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq); + drm_dbg_kms(&dev_priv->drm, "Set default to SSC at %d kHz\n", + dev_priv->vbt.lvds_ssc_freq); } /* Defaults to initialize only if there is no VBT. */ @@ -1992,13 +2051,14 @@ static struct vbt_header *oprom_get_vbt(struct drm_i915_private *dev_priv) goto err_unmap_oprom; if (sizeof(struct vbt_header) > size) { - DRM_DEBUG_DRIVER("VBT header incomplete\n"); + drm_dbg(&dev_priv->drm, "VBT header incomplete\n"); goto err_unmap_oprom; } vbt_size = ioread16(p + offsetof(struct vbt_header, vbt_size)); if (vbt_size > size) { - DRM_DEBUG_DRIVER("VBT incomplete (vbt_size overflows)\n"); + drm_dbg(&dev_priv->drm, + "VBT incomplete (vbt_size overflows)\n"); goto err_unmap_oprom; } @@ -2041,7 +2101,8 @@ void intel_bios_init(struct drm_i915_private *dev_priv) INIT_LIST_HEAD(&dev_priv->vbt.display_devices); if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) { - DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n"); + drm_dbg_kms(&dev_priv->drm, + "Skipping VBT init due to disabled display.\n"); return; } @@ -2055,13 +2116,14 @@ void intel_bios_init(struct drm_i915_private *dev_priv) vbt = oprom_vbt; - DRM_DEBUG_KMS("Found valid VBT in PCI ROM\n"); + drm_dbg_kms(&dev_priv->drm, "Found valid VBT in PCI ROM\n"); } bdb = get_bdb_header(vbt); - DRM_DEBUG_KMS("VBT signature \"%.*s\", BDB version %d\n", - (int)sizeof(vbt->signature), vbt->signature, bdb->version); + drm_dbg_kms(&dev_priv->drm, + "VBT signature \"%.*s\", BDB version %d\n", + (int)sizeof(vbt->signature), vbt->signature, bdb->version); /* Grab useful general definitions */ parse_general_features(dev_priv, bdb); @@ -2086,7 +2148,8 @@ void intel_bios_init(struct drm_i915_private *dev_priv) out: if (!vbt) { - DRM_INFO("Failed to find VBIOS tables (VBT)\n"); + drm_info(&dev_priv->drm, + "Failed to find VBIOS tables (VBT)\n"); init_vbt_missing_defaults(dev_priv); } @@ -2238,13 +2301,12 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por const struct ddi_vbt_port_info *port_info = &dev_priv->vbt.ddi_port_info[port]; - return port_info->supports_dp || - port_info->supports_dvi || - port_info->supports_hdmi; + return port_info->child; } /* FIXME maybe deal with port A as well? */ - if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping)) + if (drm_WARN_ON(&dev_priv->drm, + port == PORT_A) || port >= ARRAY_SIZE(port_mapping)) return false; list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) { @@ -2373,8 +2435,9 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, } else if (dvo_port == DVO_PORT_MIPIB || dvo_port == DVO_PORT_MIPIC || dvo_port == DVO_PORT_MIPID) { - DRM_DEBUG_KMS("VBT has unsupported DSI port %c\n", - port_name(dvo_port - DVO_PORT_MIPIA)); + drm_dbg_kms(&dev_priv->drm, + "VBT has unsupported DSI port %c\n", + port_name(dvo_port - DVO_PORT_MIPIA)); } } @@ -2493,7 +2556,7 @@ intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915, const struct child_device_config *child = i915->vbt.ddi_port_info[port].child; - if (WARN_ON_ONCE(!IS_GEN9_LP(i915))) + if (drm_WARN_ON_ONCE(&i915->drm, !IS_GEN9_LP(i915))) return false; return child && child->hpd_invert; @@ -2526,8 +2589,9 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, if (!info->alternate_aux_channel) { aux_ch = (enum aux_ch)port; - DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n", - aux_ch_name(aux_ch), port_name(port)); + drm_dbg_kms(&dev_priv->drm, + "using AUX %c for port %c (platform default)\n", + aux_ch_name(aux_ch), port_name(port)); return aux_ch; } @@ -2559,8 +2623,78 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, break; } - DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n", - aux_ch_name(aux_ch), port_name(port)); + drm_dbg_kms(&dev_priv->drm, "using AUX %c for port %c (VBT)\n", + aux_ch_name(aux_ch), port_name(port)); return aux_ch; } + +int intel_bios_max_tmds_clock(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + return i915->vbt.ddi_port_info[encoder->port].max_tmds_clock; +} + +int intel_bios_hdmi_level_shift(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + const struct ddi_vbt_port_info *info = + &i915->vbt.ddi_port_info[encoder->port]; + + return info->hdmi_level_shift_set ? info->hdmi_level_shift : -1; +} + +int intel_bios_dp_boost_level(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + return i915->vbt.ddi_port_info[encoder->port].dp_boost_level; +} + +int intel_bios_hdmi_boost_level(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + return i915->vbt.ddi_port_info[encoder->port].hdmi_boost_level; +} + +int intel_bios_dp_max_link_rate(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + return i915->vbt.ddi_port_info[encoder->port].dp_max_link_rate; +} + +int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + return i915->vbt.ddi_port_info[encoder->port].alternate_ddc_pin; +} + +bool intel_bios_port_supports_dvi(struct drm_i915_private *i915, enum port port) +{ + return i915->vbt.ddi_port_info[port].supports_dvi; +} + +bool intel_bios_port_supports_hdmi(struct drm_i915_private *i915, enum port port) +{ + return i915->vbt.ddi_port_info[port].supports_hdmi; +} + +bool intel_bios_port_supports_dp(struct drm_i915_private *i915, enum port port) +{ + return i915->vbt.ddi_port_info[port].supports_dp; +} + +bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915, + enum port port) +{ + return i915->vbt.ddi_port_info[port].supports_typec_usb; +} + +bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port) +{ + return i915->vbt.ddi_port_info[port].supports_tbt; +} diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h index d6a0c29d37ac..e29e79faa01b 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.h +++ b/drivers/gpu/drm/i915/display/intel_bios.h @@ -32,8 +32,6 @@ #include <linux/types.h> -#include <drm/i915_drm.h> - struct drm_i915_private; struct intel_crtc_state; struct intel_encoder; @@ -247,5 +245,16 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port bool intel_bios_get_dsc_params(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, int dsc_max_bpc); +int intel_bios_max_tmds_clock(struct intel_encoder *encoder); +int intel_bios_hdmi_level_shift(struct intel_encoder *encoder); +int intel_bios_dp_boost_level(struct intel_encoder *encoder); +int intel_bios_hdmi_boost_level(struct intel_encoder *encoder); +int intel_bios_dp_max_link_rate(struct intel_encoder *encoder); +int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder); +bool intel_bios_port_supports_dvi(struct drm_i915_private *i915, enum port port); +bool intel_bios_port_supports_hdmi(struct drm_i915_private *i915, enum port port); +bool intel_bios_port_supports_dp(struct drm_i915_private *i915, enum port port); +bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915, enum port port); +bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port); #endif /* _INTEL_BIOS_H_ */ diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index b228671d5a5d..58b264bc318d 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -122,7 +122,8 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv, if (ret) return ret; - if (WARN_ON(qi->num_points > ARRAY_SIZE(qi->points))) + if (drm_WARN_ON(&dev_priv->drm, + qi->num_points > ARRAY_SIZE(qi->points))) qi->num_points = ARRAY_SIZE(qi->points); for (i = 0; i < qi->num_points; i++) { @@ -132,9 +133,10 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv, if (ret) return ret; - DRM_DEBUG_KMS("QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n", - i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras, - sp->t_rcd, sp->t_rc); + drm_dbg_kms(&dev_priv->drm, + "QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n", + i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras, + sp->t_rcd, sp->t_rc); } return 0; @@ -187,7 +189,8 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel ret = icl_get_qgv_points(dev_priv, &qi); if (ret) { - DRM_DEBUG_KMS("Failed to get memory subsystem information, ignoring bandwidth limits"); + drm_dbg_kms(&dev_priv->drm, + "Failed to get memory subsystem information, ignoring bandwidth limits"); return ret; } num_channels = qi.num_channels; @@ -228,8 +231,9 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel bi->deratedbw[j] = min(maxdebw, bw * 9 / 10); /* 90% */ - DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%u\n", - i, j, bi->num_planes, bi->deratedbw[j]); + drm_dbg_kms(&dev_priv->drm, + "BW%d / QGV %d: num_planes=%d deratedbw=%u\n", + i, j, bi->num_planes, bi->deratedbw[j]); } if (bi->num_planes == 1) @@ -374,10 +378,9 @@ static struct intel_bw_state * intel_atomic_get_bw_state(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct drm_private_state *bw_state; + struct intel_global_state *bw_state; - bw_state = drm_atomic_get_private_obj_state(&state->base, - &dev_priv->bw_obj); + bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->bw_obj); if (IS_ERR(bw_state)) return ERR_CAST(bw_state); @@ -392,7 +395,7 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) unsigned int data_rate, max_data_rate; unsigned int num_active_planes; struct intel_crtc *crtc; - int i; + int i, ret; /* FIXME earlier gens need some checks too */ if (INTEL_GEN(dev_priv) < 11) @@ -424,15 +427,20 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) bw_state->data_rate[crtc->pipe] = new_data_rate; bw_state->num_active_planes[crtc->pipe] = new_active_planes; - DRM_DEBUG_KMS("pipe %c data rate %u num active planes %u\n", - pipe_name(crtc->pipe), - bw_state->data_rate[crtc->pipe], - bw_state->num_active_planes[crtc->pipe]); + drm_dbg_kms(&dev_priv->drm, + "pipe %c data rate %u num active planes %u\n", + pipe_name(crtc->pipe), + bw_state->data_rate[crtc->pipe], + bw_state->num_active_planes[crtc->pipe]); } if (!bw_state) return 0; + ret = intel_atomic_lock_global_state(&bw_state->base); + if (ret) + return ret; + data_rate = intel_bw_data_rate(dev_priv, bw_state); num_active_planes = intel_bw_num_active_planes(dev_priv, bw_state); @@ -441,15 +449,17 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) data_rate = DIV_ROUND_UP(data_rate, 1000); if (data_rate > max_data_rate) { - DRM_DEBUG_KMS("Bandwidth %u MB/s exceeds max available %d MB/s (%d active planes)\n", - data_rate, max_data_rate, num_active_planes); + drm_dbg_kms(&dev_priv->drm, + "Bandwidth %u MB/s exceeds max available %d MB/s (%d active planes)\n", + data_rate, max_data_rate, num_active_planes); return -EINVAL; } return 0; } -static struct drm_private_state *intel_bw_duplicate_state(struct drm_private_obj *obj) +static struct intel_global_state * +intel_bw_duplicate_state(struct intel_global_obj *obj) { struct intel_bw_state *state; @@ -457,18 +467,16 @@ static struct drm_private_state *intel_bw_duplicate_state(struct drm_private_obj if (!state) return NULL; - __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); - return &state->base; } -static void intel_bw_destroy_state(struct drm_private_obj *obj, - struct drm_private_state *state) +static void intel_bw_destroy_state(struct intel_global_obj *obj, + struct intel_global_state *state) { kfree(state); } -static const struct drm_private_state_funcs intel_bw_funcs = { +static const struct intel_global_state_funcs intel_bw_funcs = { .atomic_duplicate_state = intel_bw_duplicate_state, .atomic_destroy_state = intel_bw_destroy_state, }; @@ -481,13 +489,8 @@ int intel_bw_init(struct drm_i915_private *dev_priv) if (!state) return -ENOMEM; - drm_atomic_private_obj_init(&dev_priv->drm, &dev_priv->bw_obj, - &state->base, &intel_bw_funcs); + intel_atomic_global_obj_init(dev_priv, &dev_priv->bw_obj, + &state->base, &intel_bw_funcs); return 0; } - -void intel_bw_cleanup(struct drm_i915_private *dev_priv) -{ - drm_atomic_private_obj_fini(&dev_priv->bw_obj); -} diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h index 20b9ad241802..a8aa7624c5aa 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.h +++ b/drivers/gpu/drm/i915/display/intel_bw.h @@ -9,13 +9,14 @@ #include <drm/drm_atomic.h> #include "intel_display.h" +#include "intel_global_state.h" struct drm_i915_private; struct intel_atomic_state; struct intel_crtc_state; struct intel_bw_state { - struct drm_private_state base; + struct intel_global_state base; unsigned int data_rate[I915_MAX_PIPES]; u8 num_active_planes[I915_MAX_PIPES]; @@ -25,7 +26,6 @@ struct intel_bw_state { void intel_bw_init_hw(struct drm_i915_private *dev_priv); int intel_bw_init(struct drm_i915_private *dev_priv); -void intel_bw_cleanup(struct drm_i915_private *dev_priv); int intel_bw_atomic_check(struct intel_atomic_state *state); void intel_bw_crtc_update(struct intel_bw_state *bw_state, const struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 0ce5926006ca..979a0241fdcb 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -55,43 +55,43 @@ */ static void fixed_133mhz_get_cdclk(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) + struct intel_cdclk_config *cdclk_config) { - cdclk_state->cdclk = 133333; + cdclk_config->cdclk = 133333; } static void fixed_200mhz_get_cdclk(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) + struct intel_cdclk_config *cdclk_config) { - cdclk_state->cdclk = 200000; + cdclk_config->cdclk = 200000; } static void fixed_266mhz_get_cdclk(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) + struct intel_cdclk_config *cdclk_config) { - cdclk_state->cdclk = 266667; + cdclk_config->cdclk = 266667; } static void fixed_333mhz_get_cdclk(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) + struct intel_cdclk_config *cdclk_config) { - cdclk_state->cdclk = 333333; + cdclk_config->cdclk = 333333; } static void fixed_400mhz_get_cdclk(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) + struct intel_cdclk_config *cdclk_config) { - cdclk_state->cdclk = 400000; + cdclk_config->cdclk = 400000; } static void fixed_450mhz_get_cdclk(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) + struct intel_cdclk_config *cdclk_config) { - cdclk_state->cdclk = 450000; + cdclk_config->cdclk = 450000; } static void i85x_get_cdclk(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) + struct intel_cdclk_config *cdclk_config) { struct pci_dev *pdev = dev_priv->drm.pdev; u16 hpllcc = 0; @@ -102,7 +102,7 @@ static void i85x_get_cdclk(struct drm_i915_private *dev_priv, * FIXME is this the right way to detect 852GM/852GMV? */ if (pdev->revision == 0x1) { - cdclk_state->cdclk = 133333; + cdclk_config->cdclk = 133333; return; } @@ -116,24 +116,24 @@ static void i85x_get_cdclk(struct drm_i915_private *dev_priv, case GC_CLOCK_133_200: case GC_CLOCK_133_200_2: case GC_CLOCK_100_200: - cdclk_state->cdclk = 200000; + cdclk_config->cdclk = 200000; break; case GC_CLOCK_166_250: - cdclk_state->cdclk = 250000; + cdclk_config->cdclk = 250000; break; case GC_CLOCK_100_133: - cdclk_state->cdclk = 133333; + cdclk_config->cdclk = 133333; break; case GC_CLOCK_133_266: case GC_CLOCK_133_266_2: case GC_CLOCK_166_266: - cdclk_state->cdclk = 266667; + cdclk_config->cdclk = 266667; break; } } static void i915gm_get_cdclk(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) + struct intel_cdclk_config *cdclk_config) { struct pci_dev *pdev = dev_priv->drm.pdev; u16 gcfgc = 0; @@ -141,23 +141,23 @@ static void i915gm_get_cdclk(struct drm_i915_private *dev_priv, pci_read_config_word(pdev, GCFGC, &gcfgc); if (gcfgc & GC_LOW_FREQUENCY_ENABLE) { - cdclk_state->cdclk = 133333; + cdclk_config->cdclk = 133333; return; } switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { case GC_DISPLAY_CLOCK_333_320_MHZ: - cdclk_state->cdclk = 333333; + cdclk_config->cdclk = 333333; break; default: case GC_DISPLAY_CLOCK_190_200_MHZ: - cdclk_state->cdclk = 190000; + cdclk_config->cdclk = 190000; break; } } static void i945gm_get_cdclk(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) + struct intel_cdclk_config *cdclk_config) { struct pci_dev *pdev = dev_priv->drm.pdev; u16 gcfgc = 0; @@ -165,17 +165,17 @@ static void i945gm_get_cdclk(struct drm_i915_private *dev_priv, pci_read_config_word(pdev, GCFGC, &gcfgc); if (gcfgc & GC_LOW_FREQUENCY_ENABLE) { - cdclk_state->cdclk = 133333; + cdclk_config->cdclk = 133333; return; } switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { case GC_DISPLAY_CLOCK_333_320_MHZ: - cdclk_state->cdclk = 320000; + cdclk_config->cdclk = 320000; break; default: case GC_DISPLAY_CLOCK_190_200_MHZ: - cdclk_state->cdclk = 200000; + cdclk_config->cdclk = 200000; break; } } @@ -237,20 +237,21 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv) else return 0; - tmp = I915_READ(IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ? - HPLLVCO_MOBILE : HPLLVCO); + tmp = intel_de_read(dev_priv, + IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO); vco = vco_table[tmp & 0x7]; if (vco == 0) - DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp); + drm_err(&dev_priv->drm, "Bad HPLL VCO (HPLLVCO=0x%02x)\n", + tmp); else - DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco); + drm_dbg_kms(&dev_priv->drm, "HPLL VCO %u kHz\n", vco); return vco; } static void g33_get_cdclk(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) + struct intel_cdclk_config *cdclk_config) { struct pci_dev *pdev = dev_priv->drm.pdev; static const u8 div_3200[] = { 12, 10, 8, 7, 5, 16 }; @@ -261,7 +262,7 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv, unsigned int cdclk_sel; u16 tmp = 0; - cdclk_state->vco = intel_hpll_vco(dev_priv); + cdclk_config->vco = intel_hpll_vco(dev_priv); pci_read_config_word(pdev, GCFGC, &tmp); @@ -270,7 +271,7 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv, if (cdclk_sel >= ARRAY_SIZE(div_3200)) goto fail; - switch (cdclk_state->vco) { + switch (cdclk_config->vco) { case 3200000: div_table = div_3200; break; @@ -287,18 +288,19 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv, goto fail; } - cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco, - div_table[cdclk_sel]); + cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco, + div_table[cdclk_sel]); return; fail: - DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", - cdclk_state->vco, tmp); - cdclk_state->cdclk = 190476; + drm_err(&dev_priv->drm, + "Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", + cdclk_config->vco, tmp); + cdclk_config->cdclk = 190476; } static void pnv_get_cdclk(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) + struct intel_cdclk_config *cdclk_config) { struct pci_dev *pdev = dev_priv->drm.pdev; u16 gcfgc = 0; @@ -307,31 +309,32 @@ static void pnv_get_cdclk(struct drm_i915_private *dev_priv, switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { case GC_DISPLAY_CLOCK_267_MHZ_PNV: - cdclk_state->cdclk = 266667; + cdclk_config->cdclk = 266667; break; case GC_DISPLAY_CLOCK_333_MHZ_PNV: - cdclk_state->cdclk = 333333; + cdclk_config->cdclk = 333333; break; case GC_DISPLAY_CLOCK_444_MHZ_PNV: - cdclk_state->cdclk = 444444; + cdclk_config->cdclk = 444444; break; case GC_DISPLAY_CLOCK_200_MHZ_PNV: - cdclk_state->cdclk = 200000; + cdclk_config->cdclk = 200000; break; default: - DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc); + drm_err(&dev_priv->drm, + "Unknown pnv display core clock 0x%04x\n", gcfgc); /* fall through */ case GC_DISPLAY_CLOCK_133_MHZ_PNV: - cdclk_state->cdclk = 133333; + cdclk_config->cdclk = 133333; break; case GC_DISPLAY_CLOCK_167_MHZ_PNV: - cdclk_state->cdclk = 166667; + cdclk_config->cdclk = 166667; break; } } static void i965gm_get_cdclk(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) + struct intel_cdclk_config *cdclk_config) { struct pci_dev *pdev = dev_priv->drm.pdev; static const u8 div_3200[] = { 16, 10, 8 }; @@ -341,7 +344,7 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv, unsigned int cdclk_sel; u16 tmp = 0; - cdclk_state->vco = intel_hpll_vco(dev_priv); + cdclk_config->vco = intel_hpll_vco(dev_priv); pci_read_config_word(pdev, GCFGC, &tmp); @@ -350,7 +353,7 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv, if (cdclk_sel >= ARRAY_SIZE(div_3200)) goto fail; - switch (cdclk_state->vco) { + switch (cdclk_config->vco) { case 3200000: div_table = div_3200; break; @@ -364,62 +367,64 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv, goto fail; } - cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco, - div_table[cdclk_sel]); + cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco, + div_table[cdclk_sel]); return; fail: - DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", - cdclk_state->vco, tmp); - cdclk_state->cdclk = 200000; + drm_err(&dev_priv->drm, + "Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", + cdclk_config->vco, tmp); + cdclk_config->cdclk = 200000; } static void gm45_get_cdclk(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) + struct intel_cdclk_config *cdclk_config) { struct pci_dev *pdev = dev_priv->drm.pdev; unsigned int cdclk_sel; u16 tmp = 0; - cdclk_state->vco = intel_hpll_vco(dev_priv); + cdclk_config->vco = intel_hpll_vco(dev_priv); pci_read_config_word(pdev, GCFGC, &tmp); cdclk_sel = (tmp >> 12) & 0x1; - switch (cdclk_state->vco) { + switch (cdclk_config->vco) { case 2666667: case 4000000: case 5333333: - cdclk_state->cdclk = cdclk_sel ? 333333 : 222222; + cdclk_config->cdclk = cdclk_sel ? 333333 : 222222; break; case 3200000: - cdclk_state->cdclk = cdclk_sel ? 320000 : 228571; + cdclk_config->cdclk = cdclk_sel ? 320000 : 228571; break; default: - DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", - cdclk_state->vco, tmp); - cdclk_state->cdclk = 222222; + drm_err(&dev_priv->drm, + "Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", + cdclk_config->vco, tmp); + cdclk_config->cdclk = 222222; break; } } static void hsw_get_cdclk(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) + struct intel_cdclk_config *cdclk_config) { - u32 lcpll = I915_READ(LCPLL_CTL); + u32 lcpll = intel_de_read(dev_priv, LCPLL_CTL); u32 freq = lcpll & LCPLL_CLK_FREQ_MASK; if (lcpll & LCPLL_CD_SOURCE_FCLK) - cdclk_state->cdclk = 800000; - else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) - cdclk_state->cdclk = 450000; + cdclk_config->cdclk = 800000; + else if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT) + cdclk_config->cdclk = 450000; else if (freq == LCPLL_CLK_FREQ_450) - cdclk_state->cdclk = 450000; + cdclk_config->cdclk = 450000; else if (IS_HSW_ULT(dev_priv)) - cdclk_state->cdclk = 337500; + cdclk_config->cdclk = 337500; else - cdclk_state->cdclk = 540000; + cdclk_config->cdclk = 540000; } static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk) @@ -462,17 +467,17 @@ static u8 vlv_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk) } static void vlv_get_cdclk(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) + struct intel_cdclk_config *cdclk_config) { u32 val; vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT)); - cdclk_state->vco = vlv_get_hpll_vco(dev_priv); - cdclk_state->cdclk = vlv_get_cck_clock(dev_priv, "cdclk", - CCK_DISPLAY_CLOCK_CONTROL, - cdclk_state->vco); + cdclk_config->vco = vlv_get_hpll_vco(dev_priv); + cdclk_config->cdclk = vlv_get_cck_clock(dev_priv, "cdclk", + CCK_DISPLAY_CLOCK_CONTROL, + cdclk_config->vco); val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); @@ -480,10 +485,10 @@ static void vlv_get_cdclk(struct drm_i915_private *dev_priv, BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT)); if (IS_VALLEYVIEW(dev_priv)) - cdclk_state->voltage_level = (val & DSPFREQGUAR_MASK) >> + cdclk_config->voltage_level = (val & DSPFREQGUAR_MASK) >> DSPFREQGUAR_SHIFT; else - cdclk_state->voltage_level = (val & DSPFREQGUAR_MASK_CHV) >> + cdclk_config->voltage_level = (val & DSPFREQGUAR_MASK_CHV) >> DSPFREQGUAR_SHIFT_CHV; } @@ -510,25 +515,26 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) * WA - write default credits before re-programming * FIXME: should we also set the resend bit here? */ - I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE | - default_credits); + intel_de_write(dev_priv, GCI_CONTROL, + VGA_FAST_MODE_DISABLE | default_credits); - I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE | - credits | PFI_CREDIT_RESEND); + intel_de_write(dev_priv, GCI_CONTROL, + VGA_FAST_MODE_DISABLE | credits | PFI_CREDIT_RESEND); /* * FIXME is this guaranteed to clear * immediately or should we poll for it? */ - WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND); + drm_WARN_ON(&dev_priv->drm, + intel_de_read(dev_priv, GCI_CONTROL) & PFI_CREDIT_RESEND); } static void vlv_set_cdclk(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *cdclk_state, + const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { - int cdclk = cdclk_state->cdclk; - u32 val, cmd = cdclk_state->voltage_level; + int cdclk = cdclk_config->cdclk; + u32 val, cmd = cdclk_config->voltage_level; intel_wakeref_t wakeref; switch (cdclk) { @@ -563,7 +569,8 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT), 50)) { - DRM_ERROR("timed out waiting for CDclk change\n"); + drm_err(&dev_priv->drm, + "timed out waiting for CDclk change\n"); } if (cdclk == 400000) { @@ -581,7 +588,8 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) & CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT), 50)) - DRM_ERROR("timed out waiting for CDclk change\n"); + drm_err(&dev_priv->drm, + "timed out waiting for CDclk change\n"); } /* adjust self-refresh exit latency value */ @@ -611,11 +619,11 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, } static void chv_set_cdclk(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *cdclk_state, + const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { - int cdclk = cdclk_state->cdclk; - u32 val, cmd = cdclk_state->voltage_level; + int cdclk = cdclk_config->cdclk; + u32 val, cmd = cdclk_config->voltage_level; intel_wakeref_t wakeref; switch (cdclk) { @@ -645,7 +653,8 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV), 50)) { - DRM_ERROR("timed out waiting for CDclk change\n"); + drm_err(&dev_priv->drm, + "timed out waiting for CDclk change\n"); } vlv_punit_put(dev_priv); @@ -685,68 +694,70 @@ static u8 bdw_calc_voltage_level(int cdclk) } static void bdw_get_cdclk(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) + struct intel_cdclk_config *cdclk_config) { - u32 lcpll = I915_READ(LCPLL_CTL); + u32 lcpll = intel_de_read(dev_priv, LCPLL_CTL); u32 freq = lcpll & LCPLL_CLK_FREQ_MASK; if (lcpll & LCPLL_CD_SOURCE_FCLK) - cdclk_state->cdclk = 800000; - else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) - cdclk_state->cdclk = 450000; + cdclk_config->cdclk = 800000; + else if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT) + cdclk_config->cdclk = 450000; else if (freq == LCPLL_CLK_FREQ_450) - cdclk_state->cdclk = 450000; + cdclk_config->cdclk = 450000; else if (freq == LCPLL_CLK_FREQ_54O_BDW) - cdclk_state->cdclk = 540000; + cdclk_config->cdclk = 540000; else if (freq == LCPLL_CLK_FREQ_337_5_BDW) - cdclk_state->cdclk = 337500; + cdclk_config->cdclk = 337500; else - cdclk_state->cdclk = 675000; + cdclk_config->cdclk = 675000; /* * Can't read this out :( Let's assume it's * at least what the CDCLK frequency requires. */ - cdclk_state->voltage_level = - bdw_calc_voltage_level(cdclk_state->cdclk); + cdclk_config->voltage_level = + bdw_calc_voltage_level(cdclk_config->cdclk); } static void bdw_set_cdclk(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *cdclk_state, + const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { - int cdclk = cdclk_state->cdclk; + int cdclk = cdclk_config->cdclk; u32 val; int ret; - if (WARN((I915_READ(LCPLL_CTL) & - (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK | - LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE | - LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW | - LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK, - "trying to change cdclk frequency with cdclk not enabled\n")) + if (drm_WARN(&dev_priv->drm, + (intel_de_read(dev_priv, LCPLL_CTL) & + (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK | + LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE | + LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW | + LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK, + "trying to change cdclk frequency with cdclk not enabled\n")) return; ret = sandybridge_pcode_write(dev_priv, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0); if (ret) { - DRM_ERROR("failed to inform pcode about cdclk change\n"); + drm_err(&dev_priv->drm, + "failed to inform pcode about cdclk change\n"); return; } - val = I915_READ(LCPLL_CTL); + val = intel_de_read(dev_priv, LCPLL_CTL); val |= LCPLL_CD_SOURCE_FCLK; - I915_WRITE(LCPLL_CTL, val); + intel_de_write(dev_priv, LCPLL_CTL, val); /* * According to the spec, it should be enough to poll for this 1 us. * However, extensive testing shows that this can take longer. */ - if (wait_for_us(I915_READ(LCPLL_CTL) & + if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) & LCPLL_CD_SOURCE_FCLK_DONE, 100)) - DRM_ERROR("Switching to FCLK failed\n"); + drm_err(&dev_priv->drm, "Switching to FCLK failed\n"); - val = I915_READ(LCPLL_CTL); + val = intel_de_read(dev_priv, LCPLL_CTL); val &= ~LCPLL_CLK_FREQ_MASK; switch (cdclk) { @@ -767,20 +778,21 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv, break; } - I915_WRITE(LCPLL_CTL, val); + intel_de_write(dev_priv, LCPLL_CTL, val); - val = I915_READ(LCPLL_CTL); + val = intel_de_read(dev_priv, LCPLL_CTL); val &= ~LCPLL_CD_SOURCE_FCLK; - I915_WRITE(LCPLL_CTL, val); + intel_de_write(dev_priv, LCPLL_CTL, val); - if (wait_for_us((I915_READ(LCPLL_CTL) & - LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) - DRM_ERROR("Switching back to LCPLL failed\n"); + if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) & + LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) + drm_err(&dev_priv->drm, "Switching back to LCPLL failed\n"); sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, - cdclk_state->voltage_level); + cdclk_config->voltage_level); - I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1); + intel_de_write(dev_priv, CDCLK_FREQ, + DIV_ROUND_CLOSEST(cdclk, 1000) - 1); intel_update_cdclk(dev_priv); } @@ -821,26 +833,27 @@ static u8 skl_calc_voltage_level(int cdclk) } static void skl_dpll0_update(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) + struct intel_cdclk_config *cdclk_config) { u32 val; - cdclk_state->ref = 24000; - cdclk_state->vco = 0; + cdclk_config->ref = 24000; + cdclk_config->vco = 0; - val = I915_READ(LCPLL1_CTL); + val = intel_de_read(dev_priv, LCPLL1_CTL); if ((val & LCPLL_PLL_ENABLE) == 0) return; - if (WARN_ON((val & LCPLL_PLL_LOCK) == 0)) + if (drm_WARN_ON(&dev_priv->drm, (val & LCPLL_PLL_LOCK) == 0)) return; - val = I915_READ(DPLL_CTRL1); + val = intel_de_read(dev_priv, DPLL_CTRL1); - if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | - DPLL_CTRL1_SSC(SKL_DPLL0) | - DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) != - DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) + if (drm_WARN_ON(&dev_priv->drm, + (val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | + DPLL_CTRL1_SSC(SKL_DPLL0) | + DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) != + DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) return; switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) { @@ -848,11 +861,11 @@ static void skl_dpll0_update(struct drm_i915_private *dev_priv, case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0): case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0): case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0): - cdclk_state->vco = 8100000; + cdclk_config->vco = 8100000; break; case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0): case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0): - cdclk_state->vco = 8640000; + cdclk_config->vco = 8640000; break; default: MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)); @@ -861,32 +874,32 @@ static void skl_dpll0_update(struct drm_i915_private *dev_priv, } static void skl_get_cdclk(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) + struct intel_cdclk_config *cdclk_config) { u32 cdctl; - skl_dpll0_update(dev_priv, cdclk_state); + skl_dpll0_update(dev_priv, cdclk_config); - cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref; + cdclk_config->cdclk = cdclk_config->bypass = cdclk_config->ref; - if (cdclk_state->vco == 0) + if (cdclk_config->vco == 0) goto out; - cdctl = I915_READ(CDCLK_CTL); + cdctl = intel_de_read(dev_priv, CDCLK_CTL); - if (cdclk_state->vco == 8640000) { + if (cdclk_config->vco == 8640000) { switch (cdctl & CDCLK_FREQ_SEL_MASK) { case CDCLK_FREQ_450_432: - cdclk_state->cdclk = 432000; + cdclk_config->cdclk = 432000; break; case CDCLK_FREQ_337_308: - cdclk_state->cdclk = 308571; + cdclk_config->cdclk = 308571; break; case CDCLK_FREQ_540: - cdclk_state->cdclk = 540000; + cdclk_config->cdclk = 540000; break; case CDCLK_FREQ_675_617: - cdclk_state->cdclk = 617143; + cdclk_config->cdclk = 617143; break; default: MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK); @@ -895,16 +908,16 @@ static void skl_get_cdclk(struct drm_i915_private *dev_priv, } else { switch (cdctl & CDCLK_FREQ_SEL_MASK) { case CDCLK_FREQ_450_432: - cdclk_state->cdclk = 450000; + cdclk_config->cdclk = 450000; break; case CDCLK_FREQ_337_308: - cdclk_state->cdclk = 337500; + cdclk_config->cdclk = 337500; break; case CDCLK_FREQ_540: - cdclk_state->cdclk = 540000; + cdclk_config->cdclk = 540000; break; case CDCLK_FREQ_675_617: - cdclk_state->cdclk = 675000; + cdclk_config->cdclk = 675000; break; default: MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK); @@ -917,8 +930,8 @@ static void skl_get_cdclk(struct drm_i915_private *dev_priv, * Can't read this out :( Let's assume it's * at least what the CDCLK frequency requires. */ - cdclk_state->voltage_level = - skl_calc_voltage_level(cdclk_state->cdclk); + cdclk_config->voltage_level = + skl_calc_voltage_level(cdclk_config->cdclk); } /* convert from kHz to .1 fixpoint MHz with -1MHz offset */ @@ -942,7 +955,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) { u32 val; - WARN_ON(vco != 8100000 && vco != 8640000); + drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000); /* * We always enable DPLL0 with the lowest link rate possible, but still @@ -953,7 +966,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) * rate later on, with the constraint of choosing a frequency that * works with vco. */ - val = I915_READ(DPLL_CTRL1); + val = intel_de_read(dev_priv, DPLL_CTRL1); val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) | DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)); @@ -965,13 +978,14 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0); - I915_WRITE(DPLL_CTRL1, val); - POSTING_READ(DPLL_CTRL1); + intel_de_write(dev_priv, DPLL_CTRL1, val); + intel_de_posting_read(dev_priv, DPLL_CTRL1); - I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE); + intel_de_write(dev_priv, LCPLL1_CTL, + intel_de_read(dev_priv, LCPLL1_CTL) | LCPLL_PLL_ENABLE); if (intel_de_wait_for_set(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 5)) - DRM_ERROR("DPLL0 not locked\n"); + drm_err(&dev_priv->drm, "DPLL0 not locked\n"); dev_priv->cdclk.hw.vco = vco; @@ -981,19 +995,20 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) static void skl_dpll0_disable(struct drm_i915_private *dev_priv) { - I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE); + intel_de_write(dev_priv, LCPLL1_CTL, + intel_de_read(dev_priv, LCPLL1_CTL) & ~LCPLL_PLL_ENABLE); if (intel_de_wait_for_clear(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 1)) - DRM_ERROR("Couldn't disable DPLL0\n"); + drm_err(&dev_priv->drm, "Couldn't disable DPLL0\n"); dev_priv->cdclk.hw.vco = 0; } static void skl_set_cdclk(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *cdclk_state, + const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { - int cdclk = cdclk_state->cdclk; - int vco = cdclk_state->vco; + int cdclk = cdclk_config->cdclk; + int vco = cdclk_config->vco; u32 freq_select, cdclk_ctl; int ret; @@ -1005,23 +1020,25 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, * use the corresponding VCO freq as that always leads to using the * minimum 308MHz CDCLK. */ - WARN_ON_ONCE(IS_SKYLAKE(dev_priv) && vco == 8640000); + drm_WARN_ON_ONCE(&dev_priv->drm, + IS_SKYLAKE(dev_priv) && vco == 8640000); ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, SKL_CDCLK_PREPARE_FOR_CHANGE, SKL_CDCLK_READY_FOR_CHANGE, SKL_CDCLK_READY_FOR_CHANGE, 3); if (ret) { - DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n", - ret); + drm_err(&dev_priv->drm, + "Failed to inform PCU about cdclk change (%d)\n", ret); return; } /* Choose frequency for this cdclk */ switch (cdclk) { default: - WARN_ON(cdclk != dev_priv->cdclk.hw.bypass); - WARN_ON(vco != 0); + drm_WARN_ON(&dev_priv->drm, + cdclk != dev_priv->cdclk.hw.bypass); + drm_WARN_ON(&dev_priv->drm, vco != 0); /* fall through */ case 308571: case 337500: @@ -1044,38 +1061,38 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, dev_priv->cdclk.hw.vco != vco) skl_dpll0_disable(dev_priv); - cdclk_ctl = I915_READ(CDCLK_CTL); + cdclk_ctl = intel_de_read(dev_priv, CDCLK_CTL); if (dev_priv->cdclk.hw.vco != vco) { /* Wa Display #1183: skl,kbl,cfl */ cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK); cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk); - I915_WRITE(CDCLK_CTL, cdclk_ctl); + intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); } /* Wa Display #1183: skl,kbl,cfl */ cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE; - I915_WRITE(CDCLK_CTL, cdclk_ctl); - POSTING_READ(CDCLK_CTL); + intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); + intel_de_posting_read(dev_priv, CDCLK_CTL); if (dev_priv->cdclk.hw.vco != vco) skl_dpll0_enable(dev_priv, vco); /* Wa Display #1183: skl,kbl,cfl */ cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK); - I915_WRITE(CDCLK_CTL, cdclk_ctl); + intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk); - I915_WRITE(CDCLK_CTL, cdclk_ctl); + intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); /* Wa Display #1183: skl,kbl,cfl */ cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE; - I915_WRITE(CDCLK_CTL, cdclk_ctl); - POSTING_READ(CDCLK_CTL); + intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl); + intel_de_posting_read(dev_priv, CDCLK_CTL); /* inform PCU of the change */ sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, - cdclk_state->voltage_level); + cdclk_config->voltage_level); intel_update_cdclk(dev_priv); } @@ -1089,11 +1106,11 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) * There is SWF18 scratchpad register defined which is set by the * pre-os which can be used by the OS drivers to check the status */ - if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0) + if ((intel_de_read(dev_priv, SWF_ILK(0x18)) & 0x00FFFFFF) == 0) goto sanitize; intel_update_cdclk(dev_priv); - intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK"); + intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK"); /* Is PLL enabled and locked ? */ if (dev_priv->cdclk.hw.vco == 0 || @@ -1106,7 +1123,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) * decimal part is programmed wrong from BIOS where pre-os does not * enable display. Verify the same as well. */ - cdctl = I915_READ(CDCLK_CTL); + cdctl = intel_de_read(dev_priv, CDCLK_CTL); expected = (cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk); if (cdctl == expected) @@ -1114,7 +1131,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) return; sanitize: - DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n"); + drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n"); /* force cdclk programming */ dev_priv->cdclk.hw.cdclk = 0; @@ -1122,9 +1139,9 @@ sanitize: dev_priv->cdclk.hw.vco = -1; } -static void skl_init_cdclk(struct drm_i915_private *dev_priv) +static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv) { - struct intel_cdclk_state cdclk_state; + struct intel_cdclk_config cdclk_config; skl_sanitize_cdclk(dev_priv); @@ -1140,26 +1157,26 @@ static void skl_init_cdclk(struct drm_i915_private *dev_priv) return; } - cdclk_state = dev_priv->cdclk.hw; + cdclk_config = dev_priv->cdclk.hw; - cdclk_state.vco = dev_priv->skl_preferred_vco_freq; - if (cdclk_state.vco == 0) - cdclk_state.vco = 8100000; - cdclk_state.cdclk = skl_calc_cdclk(0, cdclk_state.vco); - cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk); + cdclk_config.vco = dev_priv->skl_preferred_vco_freq; + if (cdclk_config.vco == 0) + cdclk_config.vco = 8100000; + cdclk_config.cdclk = skl_calc_cdclk(0, cdclk_config.vco); + cdclk_config.voltage_level = skl_calc_voltage_level(cdclk_config.cdclk); - skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); + skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); } -static void skl_uninit_cdclk(struct drm_i915_private *dev_priv) +static void skl_cdclk_uninit_hw(struct drm_i915_private *dev_priv) { - struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw; + struct intel_cdclk_config cdclk_config = dev_priv->cdclk.hw; - cdclk_state.cdclk = cdclk_state.bypass; - cdclk_state.vco = 0; - cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk); + cdclk_config.cdclk = cdclk_config.bypass; + cdclk_config.vco = 0; + cdclk_config.voltage_level = skl_calc_voltage_level(cdclk_config.cdclk); - skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); + skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); } static const struct intel_cdclk_vals bxt_cdclk_table[] = { @@ -1223,8 +1240,9 @@ static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk) table[i].cdclk >= min_cdclk) return table[i].cdclk; - WARN(1, "Cannot satisfy minimum cdclk %d with refclk %u\n", - min_cdclk, dev_priv->cdclk.hw.ref); + drm_WARN(&dev_priv->drm, 1, + "Cannot satisfy minimum cdclk %d with refclk %u\n", + min_cdclk, dev_priv->cdclk.hw.ref); return 0; } @@ -1241,8 +1259,8 @@ static int bxt_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk) table[i].cdclk == cdclk) return dev_priv->cdclk.hw.ref * table[i].ratio; - WARN(1, "cdclk %d not valid for refclk %u\n", - cdclk, dev_priv->cdclk.hw.ref); + drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n", + cdclk, dev_priv->cdclk.hw.ref); return 0; } @@ -1283,56 +1301,68 @@ static u8 ehl_calc_voltage_level(int cdclk) return 0; } +static u8 tgl_calc_voltage_level(int cdclk) +{ + if (cdclk > 556800) + return 3; + else if (cdclk > 326400) + return 2; + else if (cdclk > 312000) + return 1; + else + return 0; +} + static void cnl_readout_refclk(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) + struct intel_cdclk_config *cdclk_config) { - if (I915_READ(SKL_DSSM) & CNL_DSSM_CDCLK_PLL_REFCLK_24MHz) - cdclk_state->ref = 24000; + if (intel_de_read(dev_priv, SKL_DSSM) & CNL_DSSM_CDCLK_PLL_REFCLK_24MHz) + cdclk_config->ref = 24000; else - cdclk_state->ref = 19200; + cdclk_config->ref = 19200; } static void icl_readout_refclk(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) + struct intel_cdclk_config *cdclk_config) { - u32 dssm = I915_READ(SKL_DSSM) & ICL_DSSM_CDCLK_PLL_REFCLK_MASK; + u32 dssm = intel_de_read(dev_priv, SKL_DSSM) & ICL_DSSM_CDCLK_PLL_REFCLK_MASK; switch (dssm) { default: MISSING_CASE(dssm); /* fall through */ case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz: - cdclk_state->ref = 24000; + cdclk_config->ref = 24000; break; case ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz: - cdclk_state->ref = 19200; + cdclk_config->ref = 19200; break; case ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz: - cdclk_state->ref = 38400; + cdclk_config->ref = 38400; break; } } static void bxt_de_pll_readout(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) + struct intel_cdclk_config *cdclk_config) { u32 val, ratio; if (INTEL_GEN(dev_priv) >= 11) - icl_readout_refclk(dev_priv, cdclk_state); + icl_readout_refclk(dev_priv, cdclk_config); else if (IS_CANNONLAKE(dev_priv)) - cnl_readout_refclk(dev_priv, cdclk_state); + cnl_readout_refclk(dev_priv, cdclk_config); else - cdclk_state->ref = 19200; + cdclk_config->ref = 19200; - val = I915_READ(BXT_DE_PLL_ENABLE); + val = intel_de_read(dev_priv, BXT_DE_PLL_ENABLE); if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 || (val & BXT_DE_PLL_LOCK) == 0) { /* * CDCLK PLL is disabled, the VCO/ratio doesn't matter, but * setting it to zero is a way to signal that. */ - cdclk_state->vco = 0; + cdclk_config->vco = 0; return; } @@ -1343,47 +1373,49 @@ static void bxt_de_pll_readout(struct drm_i915_private *dev_priv, if (INTEL_GEN(dev_priv) >= 10) ratio = val & CNL_CDCLK_PLL_RATIO_MASK; else - ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK; + ratio = intel_de_read(dev_priv, BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK; - cdclk_state->vco = ratio * cdclk_state->ref; + cdclk_config->vco = ratio * cdclk_config->ref; } static void bxt_get_cdclk(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) + struct intel_cdclk_config *cdclk_config) { u32 divider; int div; - bxt_de_pll_readout(dev_priv, cdclk_state); + bxt_de_pll_readout(dev_priv, cdclk_config); if (INTEL_GEN(dev_priv) >= 12) - cdclk_state->bypass = cdclk_state->ref / 2; + cdclk_config->bypass = cdclk_config->ref / 2; else if (INTEL_GEN(dev_priv) >= 11) - cdclk_state->bypass = 50000; + cdclk_config->bypass = 50000; else - cdclk_state->bypass = cdclk_state->ref; + cdclk_config->bypass = cdclk_config->ref; - if (cdclk_state->vco == 0) { - cdclk_state->cdclk = cdclk_state->bypass; + if (cdclk_config->vco == 0) { + cdclk_config->cdclk = cdclk_config->bypass; goto out; } - divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK; + divider = intel_de_read(dev_priv, CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK; switch (divider) { case BXT_CDCLK_CD2X_DIV_SEL_1: div = 2; break; case BXT_CDCLK_CD2X_DIV_SEL_1_5: - WARN(IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10, - "Unsupported divider\n"); + drm_WARN(&dev_priv->drm, + IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10, + "Unsupported divider\n"); div = 3; break; case BXT_CDCLK_CD2X_DIV_SEL_2: div = 4; break; case BXT_CDCLK_CD2X_DIV_SEL_4: - WARN(INTEL_GEN(dev_priv) >= 10, "Unsupported divider\n"); + drm_WARN(&dev_priv->drm, INTEL_GEN(dev_priv) >= 10, + "Unsupported divider\n"); div = 8; break; default: @@ -1391,25 +1423,25 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv, return; } - cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco, div); + cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco, div); out: /* * Can't read this out :( Let's assume it's * at least what the CDCLK frequency requires. */ - cdclk_state->voltage_level = - dev_priv->display.calc_voltage_level(cdclk_state->cdclk); + cdclk_config->voltage_level = + dev_priv->display.calc_voltage_level(cdclk_config->cdclk); } static void bxt_de_pll_disable(struct drm_i915_private *dev_priv) { - I915_WRITE(BXT_DE_PLL_ENABLE, 0); + intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, 0); /* Timeout 200us */ if (intel_de_wait_for_clear(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) - DRM_ERROR("timeout waiting for DE PLL unlock\n"); + drm_err(&dev_priv->drm, "timeout waiting for DE PLL unlock\n"); dev_priv->cdclk.hw.vco = 0; } @@ -1419,17 +1451,17 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco) int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref); u32 val; - val = I915_READ(BXT_DE_PLL_CTL); + val = intel_de_read(dev_priv, BXT_DE_PLL_CTL); val &= ~BXT_DE_PLL_RATIO_MASK; val |= BXT_DE_PLL_RATIO(ratio); - I915_WRITE(BXT_DE_PLL_CTL, val); + intel_de_write(dev_priv, BXT_DE_PLL_CTL, val); - I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE); + intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE); /* Timeout 200us */ if (intel_de_wait_for_set(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1)) - DRM_ERROR("timeout waiting for DE PLL lock\n"); + drm_err(&dev_priv->drm, "timeout waiting for DE PLL lock\n"); dev_priv->cdclk.hw.vco = vco; } @@ -1438,13 +1470,14 @@ static void cnl_cdclk_pll_disable(struct drm_i915_private *dev_priv) { u32 val; - val = I915_READ(BXT_DE_PLL_ENABLE); + val = intel_de_read(dev_priv, BXT_DE_PLL_ENABLE); val &= ~BXT_DE_PLL_PLL_ENABLE; - I915_WRITE(BXT_DE_PLL_ENABLE, val); + intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); /* Timeout 200us */ - if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1)) - DRM_ERROR("timeout waiting for CDCLK PLL unlock\n"); + if (wait_for((intel_de_read(dev_priv, BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1)) + drm_err(&dev_priv->drm, + "timeout waiting for CDCLK PLL unlock\n"); dev_priv->cdclk.hw.vco = 0; } @@ -1455,14 +1488,15 @@ static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco) u32 val; val = CNL_CDCLK_PLL_RATIO(ratio); - I915_WRITE(BXT_DE_PLL_ENABLE, val); + intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); val |= BXT_DE_PLL_PLL_ENABLE; - I915_WRITE(BXT_DE_PLL_ENABLE, val); + intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); /* Timeout 200us */ - if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1)) - DRM_ERROR("timeout waiting for CDCLK PLL lock\n"); + if (wait_for((intel_de_read(dev_priv, BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1)) + drm_err(&dev_priv->drm, + "timeout waiting for CDCLK PLL lock\n"); dev_priv->cdclk.hw.vco = vco; } @@ -1488,11 +1522,11 @@ static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe } static void bxt_set_cdclk(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *cdclk_state, + const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { - int cdclk = cdclk_state->cdclk; - int vco = cdclk_state->vco; + int cdclk = cdclk_config->cdclk; + int vco = cdclk_config->vco; u32 val, divider; int ret; @@ -1512,30 +1546,34 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, 0x80000000, 150, 2); if (ret) { - DRM_ERROR("Failed to inform PCU about cdclk change (err %d, freq %d)\n", - ret, cdclk); + drm_err(&dev_priv->drm, + "Failed to inform PCU about cdclk change (err %d, freq %d)\n", + ret, cdclk); return; } /* cdclk = vco / 2 / div{1,1.5,2,4} */ switch (DIV_ROUND_CLOSEST(vco, cdclk)) { default: - WARN_ON(cdclk != dev_priv->cdclk.hw.bypass); - WARN_ON(vco != 0); + drm_WARN_ON(&dev_priv->drm, + cdclk != dev_priv->cdclk.hw.bypass); + drm_WARN_ON(&dev_priv->drm, vco != 0); /* fall through */ case 2: divider = BXT_CDCLK_CD2X_DIV_SEL_1; break; case 3: - WARN(IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10, - "Unsupported divider\n"); + drm_WARN(&dev_priv->drm, + IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10, + "Unsupported divider\n"); divider = BXT_CDCLK_CD2X_DIV_SEL_1_5; break; case 4: divider = BXT_CDCLK_CD2X_DIV_SEL_2; break; case 8: - WARN(INTEL_GEN(dev_priv) >= 10, "Unsupported divider\n"); + drm_WARN(&dev_priv->drm, INTEL_GEN(dev_priv) >= 10, + "Unsupported divider\n"); divider = BXT_CDCLK_CD2X_DIV_SEL_4; break; } @@ -1566,14 +1604,14 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, */ if (IS_GEN9_LP(dev_priv) && cdclk >= 500000) val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; - I915_WRITE(CDCLK_CTL, val); + intel_de_write(dev_priv, CDCLK_CTL, val); if (pipe != INVALID_PIPE) intel_wait_for_vblank(dev_priv, pipe); if (INTEL_GEN(dev_priv) >= 10) { ret = sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, - cdclk_state->voltage_level); + cdclk_config->voltage_level); } else { /* * The timeout isn't specified, the 2ms used here is based on @@ -1583,13 +1621,14 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, */ ret = sandybridge_pcode_write_timeout(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, - cdclk_state->voltage_level, + cdclk_config->voltage_level, 150, 2); } if (ret) { - DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n", - ret, cdclk); + drm_err(&dev_priv->drm, + "PCode CDCLK freq set failed, (err %d, freq %d)\n", + ret, cdclk); return; } @@ -1600,7 +1639,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, * Can't read out the voltage level :( * Let's just assume everything is as expected. */ - dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level; + dev_priv->cdclk.hw.voltage_level = cdclk_config->voltage_level; } static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) @@ -1609,7 +1648,7 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) int cdclk, vco; intel_update_cdclk(dev_priv); - intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK"); + intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK"); if (dev_priv->cdclk.hw.vco == 0 || dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass) @@ -1621,7 +1660,7 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4, * so sanitize this register. */ - cdctl = I915_READ(CDCLK_CTL); + cdctl = intel_de_read(dev_priv, CDCLK_CTL); /* * Let's ignore the pipe field, since BIOS could have configured the * dividers both synching to an active pipe, or asynchronously @@ -1672,7 +1711,7 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) return; sanitize: - DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n"); + drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n"); /* force cdclk programming */ dev_priv->cdclk.hw.cdclk = 0; @@ -1681,9 +1720,9 @@ sanitize: dev_priv->cdclk.hw.vco = -1; } -static void bxt_init_cdclk(struct drm_i915_private *dev_priv) +static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv) { - struct intel_cdclk_state cdclk_state; + struct intel_cdclk_config cdclk_config; bxt_sanitize_cdclk(dev_priv); @@ -1691,35 +1730,35 @@ static void bxt_init_cdclk(struct drm_i915_private *dev_priv) dev_priv->cdclk.hw.vco != 0) return; - cdclk_state = dev_priv->cdclk.hw; + cdclk_config = dev_priv->cdclk.hw; /* * FIXME: * - The initial CDCLK needs to be read from VBT. * Need to make this change after VBT has changes for BXT. */ - cdclk_state.cdclk = bxt_calc_cdclk(dev_priv, 0); - cdclk_state.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_state.cdclk); - cdclk_state.voltage_level = - dev_priv->display.calc_voltage_level(cdclk_state.cdclk); + cdclk_config.cdclk = bxt_calc_cdclk(dev_priv, 0); + cdclk_config.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_config.cdclk); + cdclk_config.voltage_level = + dev_priv->display.calc_voltage_level(cdclk_config.cdclk); - bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); + bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); } -static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv) +static void bxt_cdclk_uninit_hw(struct drm_i915_private *dev_priv) { - struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw; + struct intel_cdclk_config cdclk_config = dev_priv->cdclk.hw; - cdclk_state.cdclk = cdclk_state.bypass; - cdclk_state.vco = 0; - cdclk_state.voltage_level = - dev_priv->display.calc_voltage_level(cdclk_state.cdclk); + cdclk_config.cdclk = cdclk_config.bypass; + cdclk_config.vco = 0; + cdclk_config.voltage_level = + dev_priv->display.calc_voltage_level(cdclk_config.cdclk); - bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); + bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE); } /** - * intel_cdclk_init - Initialize CDCLK + * intel_cdclk_init_hw - Initialize CDCLK hardware * @i915: i915 device * * Initialize CDCLK. This consists mainly of initializing dev_priv->cdclk.hw and @@ -1727,39 +1766,41 @@ static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv) * during the display core initialization sequence, after which the DMC will * take care of turning CDCLK off/on as needed. */ -void intel_cdclk_init(struct drm_i915_private *i915) +void intel_cdclk_init_hw(struct drm_i915_private *i915) { if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10) - bxt_init_cdclk(i915); + bxt_cdclk_init_hw(i915); else if (IS_GEN9_BC(i915)) - skl_init_cdclk(i915); + skl_cdclk_init_hw(i915); } /** - * intel_cdclk_uninit - Uninitialize CDCLK + * intel_cdclk_uninit_hw - Uninitialize CDCLK hardware * @i915: i915 device * * Uninitialize CDCLK. This is done only during the display core * uninitialization sequence. */ -void intel_cdclk_uninit(struct drm_i915_private *i915) +void intel_cdclk_uninit_hw(struct drm_i915_private *i915) { if (INTEL_GEN(i915) >= 10 || IS_GEN9_LP(i915)) - bxt_uninit_cdclk(i915); + bxt_cdclk_uninit_hw(i915); else if (IS_GEN9_BC(i915)) - skl_uninit_cdclk(i915); + skl_cdclk_uninit_hw(i915); } /** - * intel_cdclk_needs_modeset - Determine if two CDCLK states require a modeset on all pipes - * @a: first CDCLK state - * @b: second CDCLK state + * intel_cdclk_needs_modeset - Determine if changong between the CDCLK + * configurations requires a modeset on all pipes + * @a: first CDCLK configuration + * @b: second CDCLK configuration * * Returns: - * True if the CDCLK states require pipes to be off during reprogramming, false if not. + * True if changing between the two CDCLK configurations + * requires all pipes to be off, false if not. */ -bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a, - const struct intel_cdclk_state *b) +bool intel_cdclk_needs_modeset(const struct intel_cdclk_config *a, + const struct intel_cdclk_config *b) { return a->cdclk != b->cdclk || a->vco != b->vco || @@ -1767,17 +1808,19 @@ bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a, } /** - * intel_cdclk_needs_cd2x_update - Determine if two CDCLK states require a cd2x divider update - * @dev_priv: Not a CDCLK state, it's the drm_i915_private! - * @a: first CDCLK state - * @b: second CDCLK state + * intel_cdclk_can_cd2x_update - Determine if changing between the two CDCLK + * configurations requires only a cd2x divider update + * @dev_priv: i915 device + * @a: first CDCLK configuration + * @b: second CDCLK configuration * * Returns: - * True if the CDCLK states require just a cd2x divider update, false if not. + * True if changing between the two CDCLK configurations + * can be done with just a cd2x divider update, false if not. */ -static bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *a, - const struct intel_cdclk_state *b) +static bool intel_cdclk_can_cd2x_update(struct drm_i915_private *dev_priv, + const struct intel_cdclk_config *a, + const struct intel_cdclk_config *b) { /* Older hw doesn't have the capability */ if (INTEL_GEN(dev_priv) < 10 && !IS_GEN9_LP(dev_priv)) @@ -1789,117 +1832,138 @@ static bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv, } /** - * intel_cdclk_changed - Determine if two CDCLK states are different - * @a: first CDCLK state - * @b: second CDCLK state + * intel_cdclk_changed - Determine if two CDCLK configurations are different + * @a: first CDCLK configuration + * @b: second CDCLK configuration * * Returns: - * True if the CDCLK states don't match, false if they do. + * True if the CDCLK configurations don't match, false if they do. */ -static bool intel_cdclk_changed(const struct intel_cdclk_state *a, - const struct intel_cdclk_state *b) +static bool intel_cdclk_changed(const struct intel_cdclk_config *a, + const struct intel_cdclk_config *b) { return intel_cdclk_needs_modeset(a, b) || a->voltage_level != b->voltage_level; } -/** - * intel_cdclk_swap_state - make atomic CDCLK configuration effective - * @state: atomic state - * - * This is the CDCLK version of drm_atomic_helper_swap_state() since the - * helper does not handle driver-specific global state. - * - * Similarly to the atomic helpers this function does a complete swap, - * i.e. it also puts the old state into @state. This is used by the commit - * code to determine how CDCLK has changed (for instance did it increase or - * decrease). - */ -void intel_cdclk_swap_state(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - - swap(state->cdclk.logical, dev_priv->cdclk.logical); - swap(state->cdclk.actual, dev_priv->cdclk.actual); -} - -void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state, - const char *context) +void intel_dump_cdclk_config(const struct intel_cdclk_config *cdclk_config, + const char *context) { DRM_DEBUG_DRIVER("%s %d kHz, VCO %d kHz, ref %d kHz, bypass %d kHz, voltage level %d\n", - context, cdclk_state->cdclk, cdclk_state->vco, - cdclk_state->ref, cdclk_state->bypass, - cdclk_state->voltage_level); + context, cdclk_config->cdclk, cdclk_config->vco, + cdclk_config->ref, cdclk_config->bypass, + cdclk_config->voltage_level); } /** - * intel_set_cdclk - Push the CDCLK state to the hardware + * intel_set_cdclk - Push the CDCLK configuration to the hardware * @dev_priv: i915 device - * @cdclk_state: new CDCLK state + * @cdclk_config: new CDCLK configuration * @pipe: pipe with which to synchronize the update * * Program the hardware based on the passed in CDCLK state, * if necessary. */ static void intel_set_cdclk(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *cdclk_state, + const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { - if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state)) + struct intel_encoder *encoder; + + if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config)) return; - if (WARN_ON_ONCE(!dev_priv->display.set_cdclk)) + if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->display.set_cdclk)) return; - intel_dump_cdclk_state(cdclk_state, "Changing CDCLK to"); + intel_dump_cdclk_config(cdclk_config, "Changing CDCLK to"); - dev_priv->display.set_cdclk(dev_priv, cdclk_state, pipe); + /* + * Lock aux/gmbus while we change cdclk in case those + * functions use cdclk. Not all platforms/ports do, + * but we'll lock them all for simplicity. + */ + mutex_lock(&dev_priv->gmbus_mutex); + for_each_intel_dp(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + mutex_lock_nest_lock(&intel_dp->aux.hw_mutex, + &dev_priv->gmbus_mutex); + } + + dev_priv->display.set_cdclk(dev_priv, cdclk_config, pipe); - if (WARN(intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state), - "cdclk state doesn't match!\n")) { - intel_dump_cdclk_state(&dev_priv->cdclk.hw, "[hw state]"); - intel_dump_cdclk_state(cdclk_state, "[sw state]"); + for_each_intel_dp(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + mutex_unlock(&intel_dp->aux.hw_mutex); + } + mutex_unlock(&dev_priv->gmbus_mutex); + + if (drm_WARN(&dev_priv->drm, + intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config), + "cdclk state doesn't match!\n")) { + intel_dump_cdclk_config(&dev_priv->cdclk.hw, "[hw state]"); + intel_dump_cdclk_config(cdclk_config, "[sw state]"); } } /** * intel_set_cdclk_pre_plane_update - Push the CDCLK state to the hardware - * @dev_priv: i915 device - * @old_state: old CDCLK state - * @new_state: new CDCLK state - * @pipe: pipe with which to synchronize the update + * @state: intel atomic state * - * Program the hardware before updating the HW plane state based on the passed - * in CDCLK state, if necessary. + * Program the hardware before updating the HW plane state based on the + * new CDCLK state, if necessary. */ void -intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *old_state, - const struct intel_cdclk_state *new_state, - enum pipe pipe) +intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state) { - if (pipe == INVALID_PIPE || old_state->cdclk <= new_state->cdclk) - intel_set_cdclk(dev_priv, new_state, pipe); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + const struct intel_cdclk_state *old_cdclk_state = + intel_atomic_get_old_cdclk_state(state); + const struct intel_cdclk_state *new_cdclk_state = + intel_atomic_get_new_cdclk_state(state); + enum pipe pipe = new_cdclk_state->pipe; + + if (!intel_cdclk_changed(&old_cdclk_state->actual, + &new_cdclk_state->actual)) + return; + + if (pipe == INVALID_PIPE || + old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) { + drm_WARN_ON(&dev_priv->drm, !new_cdclk_state->base.changed); + + intel_set_cdclk(dev_priv, &new_cdclk_state->actual, pipe); + } } /** * intel_set_cdclk_post_plane_update - Push the CDCLK state to the hardware - * @dev_priv: i915 device - * @old_state: old CDCLK state - * @new_state: new CDCLK state - * @pipe: pipe with which to synchronize the update + * @state: intel atomic state * - * Program the hardware after updating the HW plane state based on the passed - * in CDCLK state, if necessary. + * Program the hardware after updating the HW plane state based on the + * new CDCLK state, if necessary. */ void -intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *old_state, - const struct intel_cdclk_state *new_state, - enum pipe pipe) +intel_set_cdclk_post_plane_update(struct intel_atomic_state *state) { - if (pipe != INVALID_PIPE && old_state->cdclk > new_state->cdclk) - intel_set_cdclk(dev_priv, new_state, pipe); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + const struct intel_cdclk_state *old_cdclk_state = + intel_atomic_get_old_cdclk_state(state); + const struct intel_cdclk_state *new_cdclk_state = + intel_atomic_get_new_cdclk_state(state); + enum pipe pipe = new_cdclk_state->pipe; + + if (!intel_cdclk_changed(&old_cdclk_state->actual, + &new_cdclk_state->actual)) + return; + + if (pipe != INVALID_PIPE && + old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) { + drm_WARN_ON(&dev_priv->drm, !new_cdclk_state->base.changed); + + intel_set_cdclk(dev_priv, &new_cdclk_state->actual, pipe); + } } static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state) @@ -2017,25 +2081,24 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate); if (min_cdclk > dev_priv->max_cdclk_freq) { - DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n", - min_cdclk, dev_priv->max_cdclk_freq); + drm_dbg_kms(&dev_priv->drm, + "required cdclk (%d kHz) exceeds max (%d kHz)\n", + min_cdclk, dev_priv->max_cdclk_freq); return -EINVAL; } return min_cdclk; } -static int intel_compute_min_cdclk(struct intel_atomic_state *state) +static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state) { + struct intel_atomic_state *state = cdclk_state->base.state; struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc *crtc; struct intel_crtc_state *crtc_state; int min_cdclk, i; enum pipe pipe; - memcpy(state->min_cdclk, dev_priv->min_cdclk, - sizeof(state->min_cdclk)); - for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { int ret; @@ -2043,19 +2106,19 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state) if (min_cdclk < 0) return min_cdclk; - if (state->min_cdclk[i] == min_cdclk) + if (cdclk_state->min_cdclk[i] == min_cdclk) continue; - state->min_cdclk[i] = min_cdclk; + cdclk_state->min_cdclk[i] = min_cdclk; - ret = intel_atomic_lock_global_state(state); + ret = intel_atomic_lock_global_state(&cdclk_state->base); if (ret) return ret; } - min_cdclk = state->cdclk.force_min_cdclk; + min_cdclk = cdclk_state->force_min_cdclk; for_each_pipe(dev_priv, pipe) - min_cdclk = max(state->min_cdclk[pipe], min_cdclk); + min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk); return min_cdclk; } @@ -2073,8 +2136,9 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state) * future platforms this code will need to be * adjusted. */ -static int bxt_compute_min_voltage_level(struct intel_atomic_state *state) +static int bxt_compute_min_voltage_level(struct intel_cdclk_state *cdclk_state) { + struct intel_atomic_state *state = cdclk_state->base.state; struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc *crtc; struct intel_crtc_state *crtc_state; @@ -2082,9 +2146,6 @@ static int bxt_compute_min_voltage_level(struct intel_atomic_state *state) int i; enum pipe pipe; - memcpy(state->min_voltage_level, dev_priv->min_voltage_level, - sizeof(state->min_voltage_level)); - for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { int ret; @@ -2093,57 +2154,58 @@ static int bxt_compute_min_voltage_level(struct intel_atomic_state *state) else min_voltage_level = 0; - if (state->min_voltage_level[i] == min_voltage_level) + if (cdclk_state->min_voltage_level[i] == min_voltage_level) continue; - state->min_voltage_level[i] = min_voltage_level; + cdclk_state->min_voltage_level[i] = min_voltage_level; - ret = intel_atomic_lock_global_state(state); + ret = intel_atomic_lock_global_state(&cdclk_state->base); if (ret) return ret; } min_voltage_level = 0; for_each_pipe(dev_priv, pipe) - min_voltage_level = max(state->min_voltage_level[pipe], + min_voltage_level = max(cdclk_state->min_voltage_level[pipe], min_voltage_level); return min_voltage_level; } -static int vlv_modeset_calc_cdclk(struct intel_atomic_state *state) +static int vlv_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state) { + struct intel_atomic_state *state = cdclk_state->base.state; struct drm_i915_private *dev_priv = to_i915(state->base.dev); int min_cdclk, cdclk; - min_cdclk = intel_compute_min_cdclk(state); + min_cdclk = intel_compute_min_cdclk(cdclk_state); if (min_cdclk < 0) return min_cdclk; cdclk = vlv_calc_cdclk(dev_priv, min_cdclk); - state->cdclk.logical.cdclk = cdclk; - state->cdclk.logical.voltage_level = + cdclk_state->logical.cdclk = cdclk; + cdclk_state->logical.voltage_level = vlv_calc_voltage_level(dev_priv, cdclk); - if (!state->active_pipes) { - cdclk = vlv_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk); + if (!cdclk_state->active_pipes) { + cdclk = vlv_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk); - state->cdclk.actual.cdclk = cdclk; - state->cdclk.actual.voltage_level = + cdclk_state->actual.cdclk = cdclk; + cdclk_state->actual.voltage_level = vlv_calc_voltage_level(dev_priv, cdclk); } else { - state->cdclk.actual = state->cdclk.logical; + cdclk_state->actual = cdclk_state->logical; } return 0; } -static int bdw_modeset_calc_cdclk(struct intel_atomic_state *state) +static int bdw_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state) { int min_cdclk, cdclk; - min_cdclk = intel_compute_min_cdclk(state); + min_cdclk = intel_compute_min_cdclk(cdclk_state); if (min_cdclk < 0) return min_cdclk; @@ -2153,31 +2215,32 @@ static int bdw_modeset_calc_cdclk(struct intel_atomic_state *state) */ cdclk = bdw_calc_cdclk(min_cdclk); - state->cdclk.logical.cdclk = cdclk; - state->cdclk.logical.voltage_level = + cdclk_state->logical.cdclk = cdclk; + cdclk_state->logical.voltage_level = bdw_calc_voltage_level(cdclk); - if (!state->active_pipes) { - cdclk = bdw_calc_cdclk(state->cdclk.force_min_cdclk); + if (!cdclk_state->active_pipes) { + cdclk = bdw_calc_cdclk(cdclk_state->force_min_cdclk); - state->cdclk.actual.cdclk = cdclk; - state->cdclk.actual.voltage_level = + cdclk_state->actual.cdclk = cdclk; + cdclk_state->actual.voltage_level = bdw_calc_voltage_level(cdclk); } else { - state->cdclk.actual = state->cdclk.logical; + cdclk_state->actual = cdclk_state->logical; } return 0; } -static int skl_dpll0_vco(struct intel_atomic_state *state) +static int skl_dpll0_vco(struct intel_cdclk_state *cdclk_state) { + struct intel_atomic_state *state = cdclk_state->base.state; struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc *crtc; struct intel_crtc_state *crtc_state; int vco, i; - vco = state->cdclk.logical.vco; + vco = cdclk_state->logical.vco; if (!vco) vco = dev_priv->skl_preferred_vco_freq; @@ -2206,15 +2269,15 @@ static int skl_dpll0_vco(struct intel_atomic_state *state) return vco; } -static int skl_modeset_calc_cdclk(struct intel_atomic_state *state) +static int skl_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state) { int min_cdclk, cdclk, vco; - min_cdclk = intel_compute_min_cdclk(state); + min_cdclk = intel_compute_min_cdclk(cdclk_state); if (min_cdclk < 0) return min_cdclk; - vco = skl_dpll0_vco(state); + vco = skl_dpll0_vco(cdclk_state); /* * FIXME should also account for plane ratio @@ -2222,57 +2285,58 @@ static int skl_modeset_calc_cdclk(struct intel_atomic_state *state) */ cdclk = skl_calc_cdclk(min_cdclk, vco); - state->cdclk.logical.vco = vco; - state->cdclk.logical.cdclk = cdclk; - state->cdclk.logical.voltage_level = + cdclk_state->logical.vco = vco; + cdclk_state->logical.cdclk = cdclk; + cdclk_state->logical.voltage_level = skl_calc_voltage_level(cdclk); - if (!state->active_pipes) { - cdclk = skl_calc_cdclk(state->cdclk.force_min_cdclk, vco); + if (!cdclk_state->active_pipes) { + cdclk = skl_calc_cdclk(cdclk_state->force_min_cdclk, vco); - state->cdclk.actual.vco = vco; - state->cdclk.actual.cdclk = cdclk; - state->cdclk.actual.voltage_level = + cdclk_state->actual.vco = vco; + cdclk_state->actual.cdclk = cdclk; + cdclk_state->actual.voltage_level = skl_calc_voltage_level(cdclk); } else { - state->cdclk.actual = state->cdclk.logical; + cdclk_state->actual = cdclk_state->logical; } return 0; } -static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state) +static int bxt_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state) { + struct intel_atomic_state *state = cdclk_state->base.state; struct drm_i915_private *dev_priv = to_i915(state->base.dev); int min_cdclk, min_voltage_level, cdclk, vco; - min_cdclk = intel_compute_min_cdclk(state); + min_cdclk = intel_compute_min_cdclk(cdclk_state); if (min_cdclk < 0) return min_cdclk; - min_voltage_level = bxt_compute_min_voltage_level(state); + min_voltage_level = bxt_compute_min_voltage_level(cdclk_state); if (min_voltage_level < 0) return min_voltage_level; cdclk = bxt_calc_cdclk(dev_priv, min_cdclk); vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); - state->cdclk.logical.vco = vco; - state->cdclk.logical.cdclk = cdclk; - state->cdclk.logical.voltage_level = + cdclk_state->logical.vco = vco; + cdclk_state->logical.cdclk = cdclk; + cdclk_state->logical.voltage_level = max_t(int, min_voltage_level, dev_priv->display.calc_voltage_level(cdclk)); - if (!state->active_pipes) { - cdclk = bxt_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk); + if (!cdclk_state->active_pipes) { + cdclk = bxt_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk); vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); - state->cdclk.actual.vco = vco; - state->cdclk.actual.cdclk = cdclk; - state->cdclk.actual.voltage_level = + cdclk_state->actual.vco = vco; + cdclk_state->actual.cdclk = cdclk; + cdclk_state->actual.voltage_level = dev_priv->display.calc_voltage_level(cdclk); } else { - state->cdclk.actual = state->cdclk.logical; + cdclk_state->actual = cdclk_state->logical; } return 0; @@ -2317,7 +2381,7 @@ static int intel_modeset_all_pipes(struct intel_atomic_state *state) return 0; } -static int fixed_modeset_calc_cdclk(struct intel_atomic_state *state) +static int fixed_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state) { int min_cdclk; @@ -2326,54 +2390,113 @@ static int fixed_modeset_calc_cdclk(struct intel_atomic_state *state) * check that the required minimum frequency doesn't exceed * the actual cdclk frequency. */ - min_cdclk = intel_compute_min_cdclk(state); + min_cdclk = intel_compute_min_cdclk(cdclk_state); if (min_cdclk < 0) return min_cdclk; return 0; } +static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_global_obj *obj) +{ + struct intel_cdclk_state *cdclk_state; + + cdclk_state = kmemdup(obj->state, sizeof(*cdclk_state), GFP_KERNEL); + if (!cdclk_state) + return NULL; + + cdclk_state->force_min_cdclk_changed = false; + cdclk_state->pipe = INVALID_PIPE; + + return &cdclk_state->base; +} + +static void intel_cdclk_destroy_state(struct intel_global_obj *obj, + struct intel_global_state *state) +{ + kfree(state); +} + +static const struct intel_global_state_funcs intel_cdclk_funcs = { + .atomic_duplicate_state = intel_cdclk_duplicate_state, + .atomic_destroy_state = intel_cdclk_destroy_state, +}; + +struct intel_cdclk_state * +intel_atomic_get_cdclk_state(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_global_state *cdclk_state; + + cdclk_state = intel_atomic_get_global_obj_state(state, &dev_priv->cdclk.obj); + if (IS_ERR(cdclk_state)) + return ERR_CAST(cdclk_state); + + return to_intel_cdclk_state(cdclk_state); +} + +int intel_cdclk_init(struct drm_i915_private *dev_priv) +{ + struct intel_cdclk_state *cdclk_state; + + cdclk_state = kzalloc(sizeof(*cdclk_state), GFP_KERNEL); + if (!cdclk_state) + return -ENOMEM; + + intel_atomic_global_obj_init(dev_priv, &dev_priv->cdclk.obj, + &cdclk_state->base, &intel_cdclk_funcs); + + return 0; +} + int intel_modeset_calc_cdclk(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); + const struct intel_cdclk_state *old_cdclk_state; + struct intel_cdclk_state *new_cdclk_state; enum pipe pipe; int ret; - ret = dev_priv->display.modeset_calc_cdclk(state); + new_cdclk_state = intel_atomic_get_cdclk_state(state); + if (IS_ERR(new_cdclk_state)) + return PTR_ERR(new_cdclk_state); + + old_cdclk_state = intel_atomic_get_old_cdclk_state(state); + + new_cdclk_state->active_pipes = + intel_calc_active_pipes(state, old_cdclk_state->active_pipes); + + ret = dev_priv->display.modeset_calc_cdclk(new_cdclk_state); if (ret) return ret; - /* - * Writes to dev_priv->cdclk.{actual,logical} must protected - * by holding all the crtc mutexes even if we don't end up - * touching the hardware - */ - if (intel_cdclk_changed(&dev_priv->cdclk.actual, - &state->cdclk.actual)) { + if (intel_cdclk_changed(&old_cdclk_state->actual, + &new_cdclk_state->actual)) { /* * Also serialize commits across all crtcs * if the actual hw needs to be poked. */ - ret = intel_atomic_serialize_global_state(state); + ret = intel_atomic_serialize_global_state(&new_cdclk_state->base); if (ret) return ret; - } else if (intel_cdclk_changed(&dev_priv->cdclk.logical, - &state->cdclk.logical)) { - ret = intel_atomic_lock_global_state(state); + } else if (old_cdclk_state->active_pipes != new_cdclk_state->active_pipes || + intel_cdclk_changed(&old_cdclk_state->logical, + &new_cdclk_state->logical)) { + ret = intel_atomic_lock_global_state(&new_cdclk_state->base); if (ret) return ret; } else { return 0; } - if (is_power_of_2(state->active_pipes) && - intel_cdclk_needs_cd2x_update(dev_priv, - &dev_priv->cdclk.actual, - &state->cdclk.actual)) { + if (is_power_of_2(new_cdclk_state->active_pipes) && + intel_cdclk_can_cd2x_update(dev_priv, + &old_cdclk_state->actual, + &new_cdclk_state->actual)) { struct intel_crtc *crtc; struct intel_crtc_state *crtc_state; - pipe = ilog2(state->active_pipes); + pipe = ilog2(new_cdclk_state->active_pipes); crtc = intel_get_crtc_for_pipe(dev_priv, pipe); crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); @@ -2387,28 +2510,32 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state) } if (pipe != INVALID_PIPE) { - state->cdclk.pipe = pipe; + new_cdclk_state->pipe = pipe; - DRM_DEBUG_KMS("Can change cdclk with pipe %c active\n", - pipe_name(pipe)); - } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual, - &state->cdclk.actual)) { + drm_dbg_kms(&dev_priv->drm, + "Can change cdclk with pipe %c active\n", + pipe_name(pipe)); + } else if (intel_cdclk_needs_modeset(&old_cdclk_state->actual, + &new_cdclk_state->actual)) { /* All pipes must be switched off while we change the cdclk. */ ret = intel_modeset_all_pipes(state); if (ret) return ret; - state->cdclk.pipe = INVALID_PIPE; + new_cdclk_state->pipe = INVALID_PIPE; - DRM_DEBUG_KMS("Modeset required for cdclk change\n"); + drm_dbg_kms(&dev_priv->drm, + "Modeset required for cdclk change\n"); } - DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n", - state->cdclk.logical.cdclk, - state->cdclk.actual.cdclk); - DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n", - state->cdclk.logical.voltage_level, - state->cdclk.actual.voltage_level); + drm_dbg_kms(&dev_priv->drm, + "New cdclk calculated to be logical %u kHz, actual %u kHz\n", + new_cdclk_state->logical.cdclk, + new_cdclk_state->actual.cdclk); + drm_dbg_kms(&dev_priv->drm, + "New voltage level calculated to be logical %u, actual %u\n", + new_cdclk_state->logical.voltage_level, + new_cdclk_state->actual.voltage_level); return 0; } @@ -2453,11 +2580,11 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv) } else if (IS_CANNONLAKE(dev_priv)) { dev_priv->max_cdclk_freq = 528000; } else if (IS_GEN9_BC(dev_priv)) { - u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; + u32 limit = intel_de_read(dev_priv, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; int max_cdclk, vco; vco = dev_priv->skl_preferred_vco_freq; - WARN_ON(vco != 8100000 && vco != 8640000); + drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000); /* * Use the lower (vco 8640) cdclk values as a @@ -2485,7 +2612,7 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv) * How can we know if extra cooling is * available? PCI ID, VTB, something else? */ - if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) + if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT) dev_priv->max_cdclk_freq = 450000; else if (IS_BDW_ULX(dev_priv)) dev_priv->max_cdclk_freq = 450000; @@ -2504,11 +2631,11 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv) dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv); - DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n", - dev_priv->max_cdclk_freq); + drm_dbg(&dev_priv->drm, "Max CD clock rate: %d kHz\n", + dev_priv->max_cdclk_freq); - DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n", - dev_priv->max_dotclk_freq); + drm_dbg(&dev_priv->drm, "Max dotclock rate: %d kHz\n", + dev_priv->max_dotclk_freq); } /** @@ -2528,8 +2655,8 @@ void intel_update_cdclk(struct drm_i915_private *dev_priv) * generate GMBus clock. This will vary with the cdclk freq. */ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - I915_WRITE(GMBUSFREQ_VLV, - DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000)); + intel_de_write(dev_priv, GMBUSFREQ_VLV, + DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000)); } static int cnp_rawclk(struct drm_i915_private *dev_priv) @@ -2537,7 +2664,7 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv) u32 rawclk; int divider, fraction; - if (I915_READ(SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) { + if (intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) { /* 24 MHz */ divider = 24000; fraction = 0; @@ -2557,13 +2684,13 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv) rawclk |= ICP_RAWCLK_NUM(numerator); } - I915_WRITE(PCH_RAWCLK_FREQ, rawclk); + intel_de_write(dev_priv, PCH_RAWCLK_FREQ, rawclk); return divider + fraction; } static int pch_rawclk(struct drm_i915_private *dev_priv) { - return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000; + return (intel_de_read(dev_priv, PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000; } static int vlv_hrawclk(struct drm_i915_private *dev_priv) @@ -2578,7 +2705,7 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv) u32 clkcfg; /* hrawclock is 1/4 the FSB frequency */ - clkcfg = I915_READ(CLKCFG); + clkcfg = intel_de_read(dev_priv, CLKCFG); switch (clkcfg & CLKCFG_FSB_MASK) { case CLKCFG_FSB_400: return 100000; @@ -2600,27 +2727,29 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv) } /** - * intel_update_rawclk - Determine the current RAWCLK frequency + * intel_read_rawclk - Determine the current RAWCLK frequency * @dev_priv: i915 device * * Determine the current RAWCLK frequency. RAWCLK is a fixed * frequency clock so this needs to done only once. */ -void intel_update_rawclk(struct drm_i915_private *dev_priv) +u32 intel_read_rawclk(struct drm_i915_private *dev_priv) { + u32 freq; + if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) - dev_priv->rawclk_freq = cnp_rawclk(dev_priv); + freq = cnp_rawclk(dev_priv); else if (HAS_PCH_SPLIT(dev_priv)) - dev_priv->rawclk_freq = pch_rawclk(dev_priv); + freq = pch_rawclk(dev_priv); else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - dev_priv->rawclk_freq = vlv_hrawclk(dev_priv); + freq = vlv_hrawclk(dev_priv); else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv)) - dev_priv->rawclk_freq = g4x_hrawclk(dev_priv); + freq = g4x_hrawclk(dev_priv); else /* no rawclk on other platforms, or no need to know it */ - return; + return 0; - DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq); + return freq; } /** @@ -2629,7 +2758,12 @@ void intel_update_rawclk(struct drm_i915_private *dev_priv) */ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) { - if (IS_ELKHARTLAKE(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 12) { + dev_priv->display.set_cdclk = bxt_set_cdclk; + dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; + dev_priv->display.calc_voltage_level = tgl_calc_voltage_level; + dev_priv->cdclk.table = icl_cdclk_table; + } else if (IS_ELKHARTLAKE(dev_priv)) { dev_priv->display.set_cdclk = bxt_set_cdclk; dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; dev_priv->display.calc_voltage_level = ehl_calc_voltage_level; @@ -2709,8 +2843,8 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) else if (IS_I845G(dev_priv)) dev_priv->display.get_cdclk = fixed_200mhz_get_cdclk; else { /* 830 */ - WARN(!IS_I830(dev_priv), - "Unknown platform. Assuming 133 MHz CDCLK\n"); + drm_WARN(&dev_priv->drm, !IS_I830(dev_priv), + "Unknown platform. Assuming 133 MHz CDCLK\n"); dev_priv->display.get_cdclk = fixed_133mhz_get_cdclk; } } diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h index cf71394cc79c..5731806e4cee 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.h +++ b/drivers/gpu/drm/i915/display/intel_cdclk.h @@ -8,11 +8,12 @@ #include <linux/types.h> +#include "i915_drv.h" #include "intel_display.h" +#include "intel_global_state.h" struct drm_i915_private; struct intel_atomic_state; -struct intel_cdclk_state; struct intel_crtc_state; struct intel_cdclk_vals { @@ -22,28 +23,62 @@ struct intel_cdclk_vals { u8 ratio; }; +struct intel_cdclk_state { + struct intel_global_state base; + + /* + * Logical configuration of cdclk (used for all scaling, + * watermark, etc. calculations and checks). This is + * computed as if all enabled crtcs were active. + */ + struct intel_cdclk_config logical; + + /* + * Actual configuration of cdclk, can be different from the + * logical configuration only when all crtc's are DPMS off. + */ + struct intel_cdclk_config actual; + + /* minimum acceptable cdclk for each pipe */ + int min_cdclk[I915_MAX_PIPES]; + /* minimum acceptable voltage level for each pipe */ + u8 min_voltage_level[I915_MAX_PIPES]; + + /* pipe to which cd2x update is synchronized */ + enum pipe pipe; + + /* forced minimum cdclk for glk+ audio w/a */ + int force_min_cdclk; + bool force_min_cdclk_changed; + + /* bitmask of active pipes */ + u8 active_pipes; +}; + int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state); -void intel_cdclk_init(struct drm_i915_private *i915); -void intel_cdclk_uninit(struct drm_i915_private *i915); +void intel_cdclk_init_hw(struct drm_i915_private *i915); +void intel_cdclk_uninit_hw(struct drm_i915_private *i915); void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv); void intel_update_max_cdclk(struct drm_i915_private *dev_priv); void intel_update_cdclk(struct drm_i915_private *dev_priv); -void intel_update_rawclk(struct drm_i915_private *dev_priv); -bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a, - const struct intel_cdclk_state *b); -void intel_cdclk_swap_state(struct intel_atomic_state *state); -void -intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *old_state, - const struct intel_cdclk_state *new_state, - enum pipe pipe); -void -intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *old_state, - const struct intel_cdclk_state *new_state, - enum pipe pipe); -void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state, - const char *context); +u32 intel_read_rawclk(struct drm_i915_private *dev_priv); +bool intel_cdclk_needs_modeset(const struct intel_cdclk_config *a, + const struct intel_cdclk_config *b); +void intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state); +void intel_set_cdclk_post_plane_update(struct intel_atomic_state *state); +void intel_dump_cdclk_config(const struct intel_cdclk_config *cdclk_config, + const char *context); int intel_modeset_calc_cdclk(struct intel_atomic_state *state); +struct intel_cdclk_state * +intel_atomic_get_cdclk_state(struct intel_atomic_state *state); + +#define to_intel_cdclk_state(x) container_of((x), struct intel_cdclk_state, base) +#define intel_atomic_get_old_cdclk_state(state) \ + to_intel_cdclk_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->cdclk.obj)) +#define intel_atomic_get_new_cdclk_state(state) \ + to_intel_cdclk_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->cdclk.obj)) + +int intel_cdclk_init(struct drm_i915_private *dev_priv); + #endif /* __INTEL_CDCLK_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 3980e8b50c28..c1cce93a1c25 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -157,23 +157,29 @@ static void ilk_update_pipe_csc(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), preoff[0]); - I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), preoff[1]); - I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), preoff[2]); + intel_de_write(dev_priv, PIPE_CSC_PREOFF_HI(pipe), preoff[0]); + intel_de_write(dev_priv, PIPE_CSC_PREOFF_ME(pipe), preoff[1]); + intel_de_write(dev_priv, PIPE_CSC_PREOFF_LO(pipe), preoff[2]); - I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff[0] << 16 | coeff[1]); - I915_WRITE(PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16); + intel_de_write(dev_priv, PIPE_CSC_COEFF_RY_GY(pipe), + coeff[0] << 16 | coeff[1]); + intel_de_write(dev_priv, PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16); - I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff[3] << 16 | coeff[4]); - I915_WRITE(PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16); + intel_de_write(dev_priv, PIPE_CSC_COEFF_RU_GU(pipe), + coeff[3] << 16 | coeff[4]); + intel_de_write(dev_priv, PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16); - I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]); - I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16); + intel_de_write(dev_priv, PIPE_CSC_COEFF_RV_GV(pipe), + coeff[6] << 16 | coeff[7]); + intel_de_write(dev_priv, PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16); if (INTEL_GEN(dev_priv) >= 7) { - I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff[0]); - I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff[1]); - I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff[2]); + intel_de_write(dev_priv, PIPE_CSC_POSTOFF_HI(pipe), + postoff[0]); + intel_de_write(dev_priv, PIPE_CSC_POSTOFF_ME(pipe), + postoff[1]); + intel_de_write(dev_priv, PIPE_CSC_POSTOFF_LO(pipe), + postoff[2]); } } @@ -185,22 +191,28 @@ static void icl_update_output_csc(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]); - I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]); - I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]); + intel_de_write(dev_priv, PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]); + intel_de_write(dev_priv, PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]); + intel_de_write(dev_priv, PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]); - I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe), coeff[0] << 16 | coeff[1]); - I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BY(pipe), coeff[2] << 16); + intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe), + coeff[0] << 16 | coeff[1]); + intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_BY(pipe), + coeff[2] << 16); - I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe), coeff[3] << 16 | coeff[4]); - I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BU(pipe), coeff[5] << 16); + intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe), + coeff[3] << 16 | coeff[4]); + intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_BU(pipe), + coeff[5] << 16); - I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]); - I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BV(pipe), coeff[8] << 16); + intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe), + coeff[6] << 16 | coeff[7]); + intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_BV(pipe), + coeff[8] << 16); - I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]); - I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]); - I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]); + intel_de_write(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]); + intel_de_write(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]); + intel_de_write(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]); } static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state) @@ -297,14 +309,16 @@ static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state) * LUT is needed but CSC is not we need to load an * identity matrix. */ - WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_GEMINILAKE(dev_priv)); + drm_WARN_ON(&dev_priv->drm, !IS_CANNONLAKE(dev_priv) && + !IS_GEMINILAKE(dev_priv)); ilk_update_pipe_csc(crtc, ilk_csc_off_zero, ilk_csc_coeff_identity, ilk_csc_off_zero); } - I915_WRITE(PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode); + intel_de_write(dev_priv, PIPE_CSC_MODE(crtc->pipe), + crtc_state->csc_mode); } static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state) @@ -330,51 +344,74 @@ static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state) ilk_csc_postoff_limited_range); } - I915_WRITE(PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode); + intel_de_write(dev_priv, PIPE_CSC_MODE(crtc->pipe), + crtc_state->csc_mode); } -/* - * Set up the pipe CSC unit on CherryView. - */ -static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state) +static void chv_load_cgm_csc(struct intel_crtc *crtc, + const struct drm_property_blob *blob) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct drm_color_ctm *ctm = blob->data; enum pipe pipe = crtc->pipe; + u16 coeffs[9]; + int i; - if (crtc_state->hw.ctm) { - const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data; - u16 coeffs[9] = {}; - int i; - - for (i = 0; i < ARRAY_SIZE(coeffs); i++) { - u64 abs_coeff = - ((1ULL << 63) - 1) & ctm->matrix[i]; - - /* Round coefficient. */ - abs_coeff += 1 << (32 - 13); - /* Clamp to hardware limits. */ - abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_8_0 - 1); - - /* Write coefficients in S3.12 format. */ - if (ctm->matrix[i] & (1ULL << 63)) - coeffs[i] = 1 << 15; - coeffs[i] |= ((abs_coeff >> 32) & 7) << 12; - coeffs[i] |= (abs_coeff >> 20) & 0xfff; - } + for (i = 0; i < ARRAY_SIZE(coeffs); i++) { + u64 abs_coeff = ((1ULL << 63) - 1) & ctm->matrix[i]; + + /* Round coefficient. */ + abs_coeff += 1 << (32 - 13); + /* Clamp to hardware limits. */ + abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_8_0 - 1); + + coeffs[i] = 0; + + /* Write coefficients in S3.12 format. */ + if (ctm->matrix[i] & (1ULL << 63)) + coeffs[i] |= 1 << 15; - I915_WRITE(CGM_PIPE_CSC_COEFF01(pipe), - coeffs[1] << 16 | coeffs[0]); - I915_WRITE(CGM_PIPE_CSC_COEFF23(pipe), - coeffs[3] << 16 | coeffs[2]); - I915_WRITE(CGM_PIPE_CSC_COEFF45(pipe), - coeffs[5] << 16 | coeffs[4]); - I915_WRITE(CGM_PIPE_CSC_COEFF67(pipe), - coeffs[7] << 16 | coeffs[6]); - I915_WRITE(CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]); + coeffs[i] |= ((abs_coeff >> 32) & 7) << 12; + coeffs[i] |= (abs_coeff >> 20) & 0xfff; } - I915_WRITE(CGM_PIPE_MODE(pipe), crtc_state->cgm_mode); + intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF01(pipe), + coeffs[1] << 16 | coeffs[0]); + intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF23(pipe), + coeffs[3] << 16 | coeffs[2]); + intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF45(pipe), + coeffs[5] << 16 | coeffs[4]); + intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF67(pipe), + coeffs[7] << 16 | coeffs[6]); + intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF8(pipe), + coeffs[8]); +} + +/* convert hw value with given bit_precision to lut property val */ +static u32 intel_color_lut_pack(u32 val, int bit_precision) +{ + u32 max = 0xffff >> (16 - bit_precision); + + val = clamp_val(val, 0, max); + + if (bit_precision < 16) + val <<= 16 - bit_precision; + + return val; +} + +static u32 i9xx_lut_8(const struct drm_color_lut *color) +{ + return drm_color_lut_extract(color->red, 8) << 16 | + drm_color_lut_extract(color->green, 8) << 8 | + drm_color_lut_extract(color->blue, 8); +} + +static void i9xx_lut_8_pack(struct drm_color_lut *entry, u32 val) +{ + entry->red = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_RED_MASK, val), 8); + entry->green = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_GREEN_MASK, val), 8); + entry->blue = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_BLUE_MASK, val), 8); } /* i965+ "10.6" bit interpolated format "even DW" (low 8 bits) */ @@ -393,49 +430,34 @@ static u32 i965_lut_10p6_udw(const struct drm_color_lut *color) (color->blue >> 8); } -static u32 ilk_lut_10(const struct drm_color_lut *color) +static void i965_lut_10p6_pack(struct drm_color_lut *entry, u32 ldw, u32 udw) { - return drm_color_lut_extract(color->red, 10) << 20 | - drm_color_lut_extract(color->green, 10) << 10 | - drm_color_lut_extract(color->blue, 10); + entry->red = REG_FIELD_GET(PALETTE_RED_MASK, udw) << 8 | + REG_FIELD_GET(PALETTE_RED_MASK, ldw); + entry->green = REG_FIELD_GET(PALETTE_GREEN_MASK, udw) << 8 | + REG_FIELD_GET(PALETTE_GREEN_MASK, ldw); + entry->blue = REG_FIELD_GET(PALETTE_BLUE_MASK, udw) << 8 | + REG_FIELD_GET(PALETTE_BLUE_MASK, ldw); } -/* Loads the legacy palette/gamma unit for the CRTC. */ -static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state, - const struct drm_property_blob *blob) +static u16 i965_lut_11p6_max_pack(u32 val) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; - int i; - - if (HAS_GMCH(dev_priv)) { - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) - assert_dsi_pll_enabled(dev_priv); - else - assert_pll_enabled(dev_priv, pipe); - } - - if (blob) { - const struct drm_color_lut *lut = blob->data; - - for (i = 0; i < 256; i++) { - u32 word = - (drm_color_lut_extract(lut[i].red, 8) << 16) | - (drm_color_lut_extract(lut[i].green, 8) << 8) | - drm_color_lut_extract(lut[i].blue, 8); + /* PIPEGCMAX is 11.6, clamp to 10.6 */ + return clamp_val(val, 0, 0xffff); +} - if (HAS_GMCH(dev_priv)) - I915_WRITE(PALETTE(pipe, i), word); - else - I915_WRITE(LGC_PALETTE(pipe, i), word); - } - } +static u32 ilk_lut_10(const struct drm_color_lut *color) +{ + return drm_color_lut_extract(color->red, 10) << 20 | + drm_color_lut_extract(color->green, 10) << 10 | + drm_color_lut_extract(color->blue, 10); } -static void i9xx_load_luts(const struct intel_crtc_state *crtc_state) +static void ilk_lut_10_pack(struct drm_color_lut *entry, u32 val) { - i9xx_load_luts_internal(crtc_state, crtc_state->hw.gamma_lut); + entry->red = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_RED_MASK, val), 10); + entry->green = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_GREEN_MASK, val), 10); + entry->blue = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_BLUE_MASK, val), 10); } static void i9xx_color_commit(const struct intel_crtc_state *crtc_state) @@ -445,10 +467,10 @@ static void i9xx_color_commit(const struct intel_crtc_state *crtc_state) enum pipe pipe = crtc->pipe; u32 val; - val = I915_READ(PIPECONF(pipe)); + val = intel_de_read(dev_priv, PIPECONF(pipe)); val &= ~PIPECONF_GAMMA_MODE_MASK_I9XX; val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); - I915_WRITE(PIPECONF(pipe), val); + intel_de_write(dev_priv, PIPECONF(pipe), val); } static void ilk_color_commit(const struct intel_crtc_state *crtc_state) @@ -458,10 +480,10 @@ static void ilk_color_commit(const struct intel_crtc_state *crtc_state) enum pipe pipe = crtc->pipe; u32 val; - val = I915_READ(PIPECONF(pipe)); + val = intel_de_read(dev_priv, PIPECONF(pipe)); val &= ~PIPECONF_GAMMA_MODE_MASK_ILK; val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); - I915_WRITE(PIPECONF(pipe), val); + intel_de_write(dev_priv, PIPECONF(pipe), val); ilk_load_csc_matrix(crtc_state); } @@ -471,7 +493,8 @@ static void hsw_color_commit(const struct intel_crtc_state *crtc_state) struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode); + intel_de_write(dev_priv, GAMMA_MODE(crtc->pipe), + crtc_state->gamma_mode); ilk_load_csc_matrix(crtc_state); } @@ -492,9 +515,10 @@ static void skl_color_commit(const struct intel_crtc_state *crtc_state) val |= SKL_BOTTOM_COLOR_GAMMA_ENABLE; if (crtc_state->csc_enable) val |= SKL_BOTTOM_COLOR_CSC_ENABLE; - I915_WRITE(SKL_BOTTOM_COLOR(pipe), val); + intel_de_write(dev_priv, SKL_BOTTOM_COLOR(pipe), val); - I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode); + intel_de_write(dev_priv, GAMMA_MODE(crtc->pipe), + crtc_state->gamma_mode); if (INTEL_GEN(dev_priv) >= 11) icl_load_csc_matrix(crtc_state); @@ -502,6 +526,35 @@ static void skl_color_commit(const struct intel_crtc_state *crtc_state) ilk_load_csc_matrix(crtc_state); } +static void i9xx_load_lut_8(struct intel_crtc *crtc, + const struct drm_property_blob *blob) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct drm_color_lut *lut; + enum pipe pipe = crtc->pipe; + int i; + + if (!blob) + return; + + lut = blob->data; + + for (i = 0; i < 256; i++) + intel_de_write(dev_priv, PALETTE(pipe, i), + i9xx_lut_8(&lut[i])); +} + +static void i9xx_load_luts(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; + + assert_pll_enabled(dev_priv, crtc->pipe); + + i9xx_load_lut_8(crtc, gamma_lut); +} + static void i965_load_lut_10p6(struct intel_crtc *crtc, const struct drm_property_blob *blob) { @@ -511,28 +564,52 @@ static void i965_load_lut_10p6(struct intel_crtc *crtc, enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size - 1; i++) { - I915_WRITE(PALETTE(pipe, 2 * i + 0), - i965_lut_10p6_ldw(&lut[i])); - I915_WRITE(PALETTE(pipe, 2 * i + 1), - i965_lut_10p6_udw(&lut[i])); + intel_de_write(dev_priv, PALETTE(pipe, 2 * i + 0), + i965_lut_10p6_ldw(&lut[i])); + intel_de_write(dev_priv, PALETTE(pipe, 2 * i + 1), + i965_lut_10p6_udw(&lut[i])); } - I915_WRITE(PIPEGCMAX(pipe, 0), lut[i].red); - I915_WRITE(PIPEGCMAX(pipe, 1), lut[i].green); - I915_WRITE(PIPEGCMAX(pipe, 2), lut[i].blue); + intel_de_write(dev_priv, PIPEGCMAX(pipe, 0), lut[i].red); + intel_de_write(dev_priv, PIPEGCMAX(pipe, 1), lut[i].green); + intel_de_write(dev_priv, PIPEGCMAX(pipe, 2), lut[i].blue); } static void i965_load_luts(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) + assert_dsi_pll_enabled(dev_priv); + else + assert_pll_enabled(dev_priv, crtc->pipe); + if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) - i9xx_load_luts(crtc_state); + i9xx_load_lut_8(crtc, gamma_lut); else i965_load_lut_10p6(crtc, gamma_lut); } +static void ilk_load_lut_8(struct intel_crtc *crtc, + const struct drm_property_blob *blob) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct drm_color_lut *lut; + enum pipe pipe = crtc->pipe; + int i; + + if (!blob) + return; + + lut = blob->data; + + for (i = 0; i < 256; i++) + intel_de_write(dev_priv, LGC_PALETTE(pipe, i), + i9xx_lut_8(&lut[i])); +} + static void ilk_load_lut_10(struct intel_crtc *crtc, const struct drm_property_blob *blob) { @@ -542,7 +619,8 @@ static void ilk_load_lut_10(struct intel_crtc *crtc, enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size; i++) - I915_WRITE(PREC_PALETTE(pipe, i), ilk_lut_10(&lut[i])); + intel_de_write(dev_priv, PREC_PALETTE(pipe, i), + ilk_lut_10(&lut[i])); } static void ilk_load_luts(const struct intel_crtc_state *crtc_state) @@ -551,7 +629,7 @@ static void ilk_load_luts(const struct intel_crtc_state *crtc_state) const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) - i9xx_load_luts(crtc_state); + ilk_load_lut_8(crtc, gamma_lut); else ilk_load_lut_10(crtc, gamma_lut); } @@ -584,15 +662,16 @@ static void ivb_load_lut_10(struct intel_crtc *crtc, const struct drm_color_lut *entry = &lut[i * (lut_size - 1) / (hw_lut_size - 1)]; - I915_WRITE(PREC_PAL_INDEX(pipe), prec_index++); - I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_10(entry)); + intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), prec_index++); + intel_de_write(dev_priv, PREC_PAL_DATA(pipe), + ilk_lut_10(entry)); } /* * Reset the index, otherwise it prevents the legacy palette to be * written properly. */ - I915_WRITE(PREC_PAL_INDEX(pipe), 0); + intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0); } /* On BDW+ the index auto increment mode actually works */ @@ -606,22 +685,23 @@ static void bdw_load_lut_10(struct intel_crtc *crtc, int i, lut_size = drm_color_lut_size(blob); enum pipe pipe = crtc->pipe; - I915_WRITE(PREC_PAL_INDEX(pipe), prec_index | - PAL_PREC_AUTO_INCREMENT); + intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), + prec_index | PAL_PREC_AUTO_INCREMENT); for (i = 0; i < hw_lut_size; i++) { /* We discard half the user entries in split gamma mode */ const struct drm_color_lut *entry = &lut[i * (lut_size - 1) / (hw_lut_size - 1)]; - I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_10(entry)); + intel_de_write(dev_priv, PREC_PAL_DATA(pipe), + ilk_lut_10(entry)); } /* * Reset the index, otherwise it prevents the legacy palette to be * written properly. */ - I915_WRITE(PREC_PAL_INDEX(pipe), 0); + intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0); } static void ivb_load_lut_ext_max(struct intel_crtc *crtc) @@ -659,7 +739,7 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state) const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut; if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) { - i9xx_load_luts(crtc_state); + ilk_load_lut_8(crtc, gamma_lut); } else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) { ivb_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE | PAL_PREC_INDEX_VALUE(0)); @@ -682,7 +762,7 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state) const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut; if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) { - i9xx_load_luts(crtc_state); + ilk_load_lut_8(crtc, gamma_lut); } else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) { bdw_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE | PAL_PREC_INDEX_VALUE(0)); @@ -703,17 +783,17 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state) struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size; + int i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size; const struct drm_color_lut *lut = crtc_state->hw.degamma_lut->data; - u32 i; /* * When setting the auto-increment bit, the hardware seems to * ignore the index bits, so we need to reset it to index 0 * separately. */ - I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0); - I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT); + intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); + intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), + PRE_CSC_GAMC_AUTO_INCREMENT); for (i = 0; i < lut_size; i++) { /* @@ -729,12 +809,13 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state) * ToDo: Extend to max 7.0. Enable 32 bit input value * as compared to just 16 to achieve this. */ - I915_WRITE(PRE_CSC_GAMC_DATA(pipe), lut[i].green); + intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), + lut[i].green); } /* Clamp values > 1.0. */ while (i++ < 35) - I915_WRITE(PRE_CSC_GAMC_DATA(pipe), 1 << 16); + intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16); } static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state) @@ -742,26 +823,26 @@ static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_stat struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size; - u32 i; + int i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size; /* * When setting the auto-increment bit, the hardware seems to * ignore the index bits, so we need to reset it to index 0 * separately. */ - I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0); - I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT); + intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); + intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), + PRE_CSC_GAMC_AUTO_INCREMENT); for (i = 0; i < lut_size; i++) { u32 v = (i << 16) / (lut_size - 1); - I915_WRITE(PRE_CSC_GAMC_DATA(pipe), v); + intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), v); } /* Clamp values > 1.0. */ while (i++ < 35) - I915_WRITE(PRE_CSC_GAMC_DATA(pipe), 1 << 16); + intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16); } static void glk_load_luts(const struct intel_crtc_state *crtc_state) @@ -783,7 +864,7 @@ static void glk_load_luts(const struct intel_crtc_state *crtc_state) glk_load_degamma_lut_linear(crtc_state); if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) { - i9xx_load_luts(crtc_state); + ilk_load_lut_8(crtc, gamma_lut); } else { bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0)); ivb_load_lut_ext_max(crtc); @@ -827,7 +908,7 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state) const struct drm_color_lut *lut = blob->data; struct intel_dsb *dsb = intel_dsb_get(crtc); enum pipe pipe = crtc->pipe; - u32 i; + int i; /* * Program Super Fine segment (let's call it seg1)... @@ -860,7 +941,7 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state) const struct drm_color_lut *entry; struct intel_dsb *dsb = intel_dsb_get(crtc); enum pipe pipe = crtc->pipe; - u32 i; + int i; /* * Program Fine segment (let's call it seg2)... @@ -919,7 +1000,7 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state) switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) { case GAMMA_MODE_MODE_8BIT: - i9xx_load_luts(crtc_state); + ilk_load_lut_8(crtc, gamma_lut); break; case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED: icl_program_gamma_superfine_segment(crtc_state); @@ -945,6 +1026,13 @@ static u32 chv_cgm_degamma_udw(const struct drm_color_lut *color) return drm_color_lut_extract(color->red, 14); } +static void chv_cgm_gamma_pack(struct drm_color_lut *entry, u32 ldw, u32 udw) +{ + entry->green = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_GREEN_MASK, ldw), 10); + entry->blue = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_BLUE_MASK, ldw), 10); + entry->red = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_RED_MASK, udw), 10); +} + static void chv_load_cgm_degamma(struct intel_crtc *crtc, const struct drm_property_blob *blob) { @@ -954,10 +1042,10 @@ static void chv_load_cgm_degamma(struct intel_crtc *crtc, enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size; i++) { - I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 0), - chv_cgm_degamma_ldw(&lut[i])); - I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 1), - chv_cgm_degamma_udw(&lut[i])); + intel_de_write(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 0), + chv_cgm_degamma_ldw(&lut[i])); + intel_de_write(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 1), + chv_cgm_degamma_udw(&lut[i])); } } @@ -981,31 +1069,34 @@ static void chv_load_cgm_gamma(struct intel_crtc *crtc, enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size; i++) { - I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 0), - chv_cgm_gamma_ldw(&lut[i])); - I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 1), - chv_cgm_gamma_udw(&lut[i])); + intel_de_write(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0), + chv_cgm_gamma_ldw(&lut[i])); + intel_de_write(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1), + chv_cgm_gamma_udw(&lut[i])); } } static void chv_load_luts(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut; + const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut; + const struct drm_property_blob *ctm = crtc_state->hw.ctm; - cherryview_load_csc_matrix(crtc_state); + if (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC) + chv_load_cgm_csc(crtc, ctm); - if (crtc_state_is_legacy_gamma(crtc_state)) { - i9xx_load_luts(crtc_state); - return; - } - - if (degamma_lut) + if (crtc_state->cgm_mode & CGM_PIPE_MODE_DEGAMMA) chv_load_cgm_degamma(crtc, degamma_lut); - if (gamma_lut) + if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA) chv_load_cgm_gamma(crtc, gamma_lut); + else + i965_load_luts(crtc_state); + + intel_de_write(dev_priv, CGM_PIPE_MODE(crtc->pipe), + crtc_state->cgm_mode); } void intel_color_load_luts(const struct intel_crtc_state *crtc_state) @@ -1167,7 +1258,8 @@ static int check_luts(const struct intel_crtc_state *crtc_state) /* C8 relies on its palette being stored in the legacy LUT */ if (crtc_state->c8_planes) { - DRM_DEBUG_KMS("C8 pixelformat requires the legacy LUT\n"); + drm_dbg_kms(&dev_priv->drm, + "C8 pixelformat requires the legacy LUT\n"); return -EINVAL; } @@ -1630,28 +1722,13 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1, return true; } -/* convert hw value with given bit_precision to lut property val */ -static u32 intel_color_lut_pack(u32 val, u32 bit_precision) +static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc) { - u32 max = 0xffff >> (16 - bit_precision); - - val = clamp_val(val, 0, max); - - if (bit_precision < 16) - val <<= 16 - bit_precision; - - return val; -} - -static struct drm_property_blob * -i9xx_read_lut_8(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; - struct drm_color_lut *blob_data; - u32 i, val; + struct drm_color_lut *lut; + int i; blob = drm_property_create_blob(&dev_priv->drm, sizeof(struct drm_color_lut) * LEGACY_LUT_LENGTH, @@ -1659,20 +1736,12 @@ i9xx_read_lut_8(const struct intel_crtc_state *crtc_state) if (IS_ERR(blob)) return NULL; - blob_data = blob->data; + lut = blob->data; for (i = 0; i < LEGACY_LUT_LENGTH; i++) { - if (HAS_GMCH(dev_priv)) - val = I915_READ(PALETTE(pipe, i)); - else - val = I915_READ(LGC_PALETTE(pipe, i)); - - blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET( - LGC_PALETTE_RED_MASK, val), 8); - blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET( - LGC_PALETTE_GREEN_MASK, val), 8); - blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET( - LGC_PALETTE_BLUE_MASK, val), 8); + u32 val = intel_de_read(dev_priv, PALETTE(pipe, i)); + + i9xx_lut_8_pack(&lut[i], val); } return blob; @@ -1680,22 +1749,21 @@ i9xx_read_lut_8(const struct intel_crtc_state *crtc_state) static void i9xx_read_luts(struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + if (!crtc_state->gamma_enable) return; - crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state); + crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc); } -static struct drm_property_blob * -i965_read_lut_10p6(const struct intel_crtc_state *crtc_state) +static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; + int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; - struct drm_color_lut *blob_data; - u32 i, val1, val2; + struct drm_color_lut *lut; blob = drm_property_create_blob(&dev_priv->drm, sizeof(struct drm_color_lut) * lut_size, @@ -1703,51 +1771,42 @@ i965_read_lut_10p6(const struct intel_crtc_state *crtc_state) if (IS_ERR(blob)) return NULL; - blob_data = blob->data; + lut = blob->data; for (i = 0; i < lut_size - 1; i++) { - val1 = I915_READ(PALETTE(pipe, 2 * i + 0)); - val2 = I915_READ(PALETTE(pipe, 2 * i + 1)); - - blob_data[i].red = REG_FIELD_GET(PALETTE_RED_MASK, val2) << 8 | - REG_FIELD_GET(PALETTE_RED_MASK, val1); - blob_data[i].green = REG_FIELD_GET(PALETTE_GREEN_MASK, val2) << 8 | - REG_FIELD_GET(PALETTE_GREEN_MASK, val1); - blob_data[i].blue = REG_FIELD_GET(PALETTE_BLUE_MASK, val2) << 8 | - REG_FIELD_GET(PALETTE_BLUE_MASK, val1); + u32 ldw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 0)); + u32 udw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 1)); + + i965_lut_10p6_pack(&lut[i], ldw, udw); } - blob_data[i].red = REG_FIELD_GET(PIPEGCMAX_RGB_MASK, - I915_READ(PIPEGCMAX(pipe, 0))); - blob_data[i].green = REG_FIELD_GET(PIPEGCMAX_RGB_MASK, - I915_READ(PIPEGCMAX(pipe, 1))); - blob_data[i].blue = REG_FIELD_GET(PIPEGCMAX_RGB_MASK, - I915_READ(PIPEGCMAX(pipe, 2))); + lut[i].red = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 0))); + lut[i].green = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 1))); + lut[i].blue = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 2))); return blob; } static void i965_read_luts(struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + if (!crtc_state->gamma_enable) return; if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) - crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state); + crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc); else - crtc_state->hw.gamma_lut = i965_read_lut_10p6(crtc_state); + crtc_state->hw.gamma_lut = i965_read_lut_10p6(crtc); } -static struct drm_property_blob * -chv_read_cgm_lut(const struct intel_crtc_state *crtc_state) +static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; + int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; - struct drm_color_lut *blob_data; - u32 i, val; + struct drm_color_lut *lut; blob = drm_property_create_blob(&dev_priv->drm, sizeof(struct drm_color_lut) * lut_size, @@ -1755,18 +1814,13 @@ chv_read_cgm_lut(const struct intel_crtc_state *crtc_state) if (IS_ERR(blob)) return NULL; - blob_data = blob->data; + lut = blob->data; for (i = 0; i < lut_size; i++) { - val = I915_READ(CGM_PIPE_GAMMA(pipe, i, 0)); - blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET( - CGM_PIPE_GAMMA_GREEN_MASK, val), 10); - blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET( - CGM_PIPE_GAMMA_BLUE_MASK, val), 10); - - val = I915_READ(CGM_PIPE_GAMMA(pipe, i, 1)); - blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET( - CGM_PIPE_GAMMA_RED_MASK, val), 10); + u32 ldw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0)); + u32 udw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1)); + + chv_cgm_gamma_pack(&lut[i], ldw, udw); } return blob; @@ -1774,22 +1828,46 @@ chv_read_cgm_lut(const struct intel_crtc_state *crtc_state) static void chv_read_luts(struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA) - crtc_state->hw.gamma_lut = chv_read_cgm_lut(crtc_state); + crtc_state->hw.gamma_lut = chv_read_cgm_gamma(crtc); else i965_read_luts(crtc_state); } -static struct drm_property_blob * -ilk_read_lut_10(const struct intel_crtc_state *crtc_state) +static struct drm_property_blob *ilk_read_lut_8(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + struct drm_property_blob *blob; + struct drm_color_lut *lut; + int i; + + blob = drm_property_create_blob(&dev_priv->drm, + sizeof(struct drm_color_lut) * LEGACY_LUT_LENGTH, + NULL); + if (IS_ERR(blob)) + return NULL; + + lut = blob->data; + + for (i = 0; i < LEGACY_LUT_LENGTH; i++) { + u32 val = intel_de_read(dev_priv, LGC_PALETTE(pipe, i)); + + i9xx_lut_8_pack(&lut[i], val); + } + + return blob; +} + +static struct drm_property_blob *ilk_read_lut_10(struct intel_crtc *crtc) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; + int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; - struct drm_color_lut *blob_data; - u32 i, val; + struct drm_color_lut *lut; blob = drm_property_create_blob(&dev_priv->drm, sizeof(struct drm_color_lut) * lut_size, @@ -1797,17 +1875,12 @@ ilk_read_lut_10(const struct intel_crtc_state *crtc_state) if (IS_ERR(blob)) return NULL; - blob_data = blob->data; + lut = blob->data; for (i = 0; i < lut_size; i++) { - val = I915_READ(PREC_PALETTE(pipe, i)); - - blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET( - PREC_PALETTE_RED_MASK, val), 10); - blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET( - PREC_PALETTE_GREEN_MASK, val), 10); - blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET( - PREC_PALETTE_BLUE_MASK, val), 10); + u32 val = intel_de_read(dev_priv, PREC_PALETTE(pipe, i)); + + ilk_lut_10_pack(&lut[i], val); } return blob; @@ -1815,6 +1888,8 @@ ilk_read_lut_10(const struct intel_crtc_state *crtc_state) static void ilk_read_luts(struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + if (!crtc_state->gamma_enable) return; @@ -1822,21 +1897,19 @@ static void ilk_read_luts(struct intel_crtc_state *crtc_state) return; if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) - crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state); + crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc); else - crtc_state->hw.gamma_lut = ilk_read_lut_10(crtc_state); + crtc_state->hw.gamma_lut = ilk_read_lut_10(crtc); } -static struct drm_property_blob * -glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index) +static struct drm_property_blob *glk_read_lut_10(struct intel_crtc *crtc, + u32 prec_index) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - int hw_lut_size = ivb_lut_10_size(prec_index); + int i, hw_lut_size = ivb_lut_10_size(prec_index); enum pipe pipe = crtc->pipe; struct drm_property_blob *blob; - struct drm_color_lut *blob_data; - u32 i, val; + struct drm_color_lut *lut; blob = drm_property_create_blob(&dev_priv->drm, sizeof(struct drm_color_lut) * hw_lut_size, @@ -1844,36 +1917,33 @@ glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index) if (IS_ERR(blob)) return NULL; - blob_data = blob->data; + lut = blob->data; - I915_WRITE(PREC_PAL_INDEX(pipe), prec_index | - PAL_PREC_AUTO_INCREMENT); + intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), + prec_index | PAL_PREC_AUTO_INCREMENT); for (i = 0; i < hw_lut_size; i++) { - val = I915_READ(PREC_PAL_DATA(pipe)); - - blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET( - PREC_PAL_DATA_RED_MASK, val), 10); - blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET( - PREC_PAL_DATA_GREEN_MASK, val), 10); - blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET( - PREC_PAL_DATA_BLUE_MASK, val), 10); + u32 val = intel_de_read(dev_priv, PREC_PAL_DATA(pipe)); + + ilk_lut_10_pack(&lut[i], val); } - I915_WRITE(PREC_PAL_INDEX(pipe), 0); + intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0); return blob; } static void glk_read_luts(struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + if (!crtc_state->gamma_enable) return; if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) - crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state); + crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc); else - crtc_state->hw.gamma_lut = glk_read_lut_10(crtc_state, PAL_PREC_INDEX_VALUE(0)); + crtc_state->hw.gamma_lut = glk_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0)); } void intel_color_init(struct intel_crtc *crtc) diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c index 44bbc7e74fc3..9ff05ec12115 100644 --- a/drivers/gpu/drm/i915/display/intel_combo_phy.c +++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c @@ -48,7 +48,7 @@ cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy) const struct cnl_procmon *procmon; u32 val; - val = I915_READ(ICL_PORT_COMP_DW3(phy)); + val = intel_de_read(dev_priv, ICL_PORT_COMP_DW3(phy)); switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) { default: MISSING_CASE(val); @@ -81,26 +81,27 @@ static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv, procmon = cnl_get_procmon_ref_values(dev_priv, phy); - val = I915_READ(ICL_PORT_COMP_DW1(phy)); + val = intel_de_read(dev_priv, ICL_PORT_COMP_DW1(phy)); val &= ~((0xff << 16) | 0xff); val |= procmon->dw1; - I915_WRITE(ICL_PORT_COMP_DW1(phy), val); + intel_de_write(dev_priv, ICL_PORT_COMP_DW1(phy), val); - I915_WRITE(ICL_PORT_COMP_DW9(phy), procmon->dw9); - I915_WRITE(ICL_PORT_COMP_DW10(phy), procmon->dw10); + intel_de_write(dev_priv, ICL_PORT_COMP_DW9(phy), procmon->dw9); + intel_de_write(dev_priv, ICL_PORT_COMP_DW10(phy), procmon->dw10); } static bool check_phy_reg(struct drm_i915_private *dev_priv, enum phy phy, i915_reg_t reg, u32 mask, u32 expected_val) { - u32 val = I915_READ(reg); + u32 val = intel_de_read(dev_priv, reg); if ((val & mask) != expected_val) { - DRM_DEBUG_DRIVER("Combo PHY %c reg %08x state mismatch: " - "current %08x mask %08x expected %08x\n", - phy_name(phy), - reg.reg, val, mask, expected_val); + drm_dbg(&dev_priv->drm, + "Combo PHY %c reg %08x state mismatch: " + "current %08x mask %08x expected %08x\n", + phy_name(phy), + reg.reg, val, mask, expected_val); return false; } @@ -127,8 +128,8 @@ static bool cnl_verify_procmon_ref_values(struct drm_i915_private *dev_priv, static bool cnl_combo_phy_enabled(struct drm_i915_private *dev_priv) { - return !(I915_READ(CHICKEN_MISC_2) & CNL_COMP_PWR_DOWN) && - (I915_READ(CNL_PORT_COMP_DW0) & COMP_INIT); + return !(intel_de_read(dev_priv, CHICKEN_MISC_2) & CNL_COMP_PWR_DOWN) && + (intel_de_read(dev_priv, CNL_PORT_COMP_DW0) & COMP_INIT); } static bool cnl_combo_phy_verify_state(struct drm_i915_private *dev_priv) @@ -151,20 +152,20 @@ static void cnl_combo_phys_init(struct drm_i915_private *dev_priv) { u32 val; - val = I915_READ(CHICKEN_MISC_2); + val = intel_de_read(dev_priv, CHICKEN_MISC_2); val &= ~CNL_COMP_PWR_DOWN; - I915_WRITE(CHICKEN_MISC_2, val); + intel_de_write(dev_priv, CHICKEN_MISC_2, val); /* Dummy PORT_A to get the correct CNL register from the ICL macro */ cnl_set_procmon_ref_values(dev_priv, PHY_A); - val = I915_READ(CNL_PORT_COMP_DW0); + val = intel_de_read(dev_priv, CNL_PORT_COMP_DW0); val |= COMP_INIT; - I915_WRITE(CNL_PORT_COMP_DW0, val); + intel_de_write(dev_priv, CNL_PORT_COMP_DW0, val); - val = I915_READ(CNL_PORT_CL1CM_DW5); + val = intel_de_read(dev_priv, CNL_PORT_CL1CM_DW5); val |= CL_POWER_DOWN_ENABLE; - I915_WRITE(CNL_PORT_CL1CM_DW5, val); + intel_de_write(dev_priv, CNL_PORT_CL1CM_DW5, val); } static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv) @@ -172,11 +173,12 @@ static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv) u32 val; if (!cnl_combo_phy_verify_state(dev_priv)) - DRM_WARN("Combo PHY HW state changed unexpectedly.\n"); + drm_warn(&dev_priv->drm, + "Combo PHY HW state changed unexpectedly.\n"); - val = I915_READ(CHICKEN_MISC_2); + val = intel_de_read(dev_priv, CHICKEN_MISC_2); val |= CNL_COMP_PWR_DOWN; - I915_WRITE(CHICKEN_MISC_2, val); + intel_de_write(dev_priv, CHICKEN_MISC_2, val); } static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv, @@ -184,27 +186,65 @@ static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv, { /* The PHY C added by EHL has no PHY_MISC register */ if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C) - return I915_READ(ICL_PORT_COMP_DW0(phy)) & COMP_INIT; + return intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)) & COMP_INIT; else - return !(I915_READ(ICL_PHY_MISC(phy)) & + return !(intel_de_read(dev_priv, ICL_PHY_MISC(phy)) & ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) && - (I915_READ(ICL_PORT_COMP_DW0(phy)) & COMP_INIT); + (intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)) & COMP_INIT); +} + +static bool ehl_vbt_ddi_d_present(struct drm_i915_private *i915) +{ + bool ddi_a_present = intel_bios_is_port_present(i915, PORT_A); + bool ddi_d_present = intel_bios_is_port_present(i915, PORT_D); + bool dsi_present = intel_bios_is_dsi_present(i915, NULL); + + /* + * VBT's 'dvo port' field for child devices references the DDI, not + * the PHY. So if combo PHY A is wired up to drive an external + * display, we should see a child device present on PORT_D and + * nothing on PORT_A and no DSI. + */ + if (ddi_d_present && !ddi_a_present && !dsi_present) + return true; + + /* + * If we encounter a VBT that claims to have an external display on + * DDI-D _and_ an internal display on DDI-A/DSI leave an error message + * in the log and let the internal display win. + */ + if (ddi_d_present) + drm_err(&i915->drm, + "VBT claims to have both internal and external displays on PHY A. Configuring for internal.\n"); + + return false; } static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv, enum phy phy) { bool ret; + u32 expected_val = 0; if (!icl_combo_phy_enabled(dev_priv, phy)) return false; ret = cnl_verify_procmon_ref_values(dev_priv, phy); - if (phy == PHY_A) + if (phy == PHY_A) { ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy), IREFGEN, IREFGEN); + if (IS_ELKHARTLAKE(dev_priv)) { + if (ehl_vbt_ddi_d_present(dev_priv)) + expected_val = ICL_PHY_MISC_MUX_DDID; + + ret &= check_phy_reg(dev_priv, phy, ICL_PHY_MISC(phy), + ICL_PHY_MISC_MUX_DDID, + expected_val); + } + } + ret &= check_phy_reg(dev_priv, phy, ICL_PORT_CL_DW5(phy), CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE); @@ -219,7 +259,7 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv, u32 val; if (is_dsi) { - WARN_ON(lane_reversal); + drm_WARN_ON(&dev_priv->drm, lane_reversal); switch (lane_count) { case 1: @@ -257,36 +297,10 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv, } } - val = I915_READ(ICL_PORT_CL_DW10(phy)); + val = intel_de_read(dev_priv, ICL_PORT_CL_DW10(phy)); val &= ~PWR_DOWN_LN_MASK; val |= lane_mask << PWR_DOWN_LN_SHIFT; - I915_WRITE(ICL_PORT_CL_DW10(phy), val); -} - -static u32 ehl_combo_phy_a_mux(struct drm_i915_private *i915, u32 val) -{ - bool ddi_a_present = i915->vbt.ddi_port_info[PORT_A].child != NULL; - bool ddi_d_present = i915->vbt.ddi_port_info[PORT_D].child != NULL; - bool dsi_present = intel_bios_is_dsi_present(i915, NULL); - - /* - * VBT's 'dvo port' field for child devices references the DDI, not - * the PHY. So if combo PHY A is wired up to drive an external - * display, we should see a child device present on PORT_D and - * nothing on PORT_A and no DSI. - */ - if (ddi_d_present && !ddi_a_present && !dsi_present) - return val | ICL_PHY_MISC_MUX_DDID; - - /* - * If we encounter a VBT that claims to have an external display on - * DDI-D _and_ an internal display on DDI-A/DSI leave an error message - * in the log and let the internal display win. - */ - if (ddi_d_present) - DRM_ERROR("VBT claims to have both internal and external displays on PHY A. Configuring for internal.\n"); - - return val & ~ICL_PHY_MISC_MUX_DDID; + intel_de_write(dev_priv, ICL_PORT_CL_DW10(phy), val); } static void icl_combo_phys_init(struct drm_i915_private *dev_priv) @@ -297,8 +311,9 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv) u32 val; if (icl_combo_phy_verify_state(dev_priv, phy)) { - DRM_DEBUG_DRIVER("Combo PHY %c already enabled, won't reprogram it.\n", - phy_name(phy)); + drm_dbg(&dev_priv->drm, + "Combo PHY %c already enabled, won't reprogram it.\n", + phy_name(phy)); continue; } @@ -318,28 +333,33 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv) * based on whether our VBT indicates the presence of any * "internal" child devices. */ - val = I915_READ(ICL_PHY_MISC(phy)); - if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_A) - val = ehl_combo_phy_a_mux(dev_priv, val); + val = intel_de_read(dev_priv, ICL_PHY_MISC(phy)); + if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_A) { + val &= ~ICL_PHY_MISC_MUX_DDID; + + if (ehl_vbt_ddi_d_present(dev_priv)) + val |= ICL_PHY_MISC_MUX_DDID; + } + val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN; - I915_WRITE(ICL_PHY_MISC(phy), val); + intel_de_write(dev_priv, ICL_PHY_MISC(phy), val); skip_phy_misc: cnl_set_procmon_ref_values(dev_priv, phy); if (phy == PHY_A) { - val = I915_READ(ICL_PORT_COMP_DW8(phy)); + val = intel_de_read(dev_priv, ICL_PORT_COMP_DW8(phy)); val |= IREFGEN; - I915_WRITE(ICL_PORT_COMP_DW8(phy), val); + intel_de_write(dev_priv, ICL_PORT_COMP_DW8(phy), val); } - val = I915_READ(ICL_PORT_COMP_DW0(phy)); + val = intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)); val |= COMP_INIT; - I915_WRITE(ICL_PORT_COMP_DW0(phy), val); + intel_de_write(dev_priv, ICL_PORT_COMP_DW0(phy), val); - val = I915_READ(ICL_PORT_CL_DW5(phy)); + val = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy)); val |= CL_POWER_DOWN_ENABLE; - I915_WRITE(ICL_PORT_CL_DW5(phy), val); + intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), val); } } @@ -352,7 +372,8 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv) if (phy == PHY_A && !icl_combo_phy_verify_state(dev_priv, phy)) - DRM_WARN("Combo PHY %c HW state changed unexpectedly\n", + drm_warn(&dev_priv->drm, + "Combo PHY %c HW state changed unexpectedly\n", phy_name(phy)); /* @@ -363,14 +384,14 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv) if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C) goto skip_phy_misc; - val = I915_READ(ICL_PHY_MISC(phy)); + val = intel_de_read(dev_priv, ICL_PHY_MISC(phy)); val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN; - I915_WRITE(ICL_PHY_MISC(phy), val); + intel_de_write(dev_priv, ICL_PHY_MISC(phy), val); skip_phy_misc: - val = I915_READ(ICL_PORT_COMP_DW0(phy)); + val = intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)); val &= ~COMP_INIT; - I915_WRITE(ICL_PORT_COMP_DW0(phy), val); + intel_de_write(dev_priv, ICL_PORT_COMP_DW0(phy), val); } } diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c index 1133c4e97bb4..903e49659f56 100644 --- a/drivers/gpu/drm/i915/display/intel_connector.c +++ b/drivers/gpu/drm/i915/display/intel_connector.c @@ -153,7 +153,7 @@ void intel_connector_attach_encoder(struct intel_connector *connector, bool intel_connector_get_hw_state(struct intel_connector *connector) { enum pipe pipe = 0; - struct intel_encoder *encoder = connector->encoder; + struct intel_encoder *encoder = intel_attached_encoder(connector); return encoder->get_hw_state(encoder, &pipe); } @@ -162,7 +162,8 @@ enum pipe intel_connector_get_pipe(struct intel_connector *connector) { struct drm_device *dev = connector->base.dev; - WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); + drm_WARN_ON(dev, + !drm_modeset_is_locked(&dev->mode_config.connection_mutex)); if (!connector->base.state->crtc) return INVALID_PIPE; diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index f976b800b245..78f9b6cde810 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -32,7 +32,6 @@ #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_probe_helper.h> -#include <drm/i915_drm.h> #include "i915_drv.h" #include "intel_connector.h" @@ -75,7 +74,7 @@ bool intel_crt_port_enabled(struct drm_i915_private *dev_priv, { u32 val; - val = I915_READ(adpa_reg); + val = intel_de_read(dev_priv, adpa_reg); /* asserts want to know the pipe even if the port is disabled */ if (HAS_PCH_CPT(dev_priv)) @@ -112,7 +111,7 @@ static unsigned int intel_crt_get_flags(struct intel_encoder *encoder) struct intel_crt *crt = intel_encoder_to_crt(encoder); u32 tmp, flags = 0; - tmp = I915_READ(crt->adpa_reg); + tmp = intel_de_read(dev_priv, crt->adpa_reg); if (tmp & ADPA_HSYNC_ACTIVE_HIGH) flags |= DRM_MODE_FLAG_PHSYNC; @@ -184,7 +183,7 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, adpa |= ADPA_PIPE_SEL(crtc->pipe); if (!HAS_PCH_SPLIT(dev_priv)) - I915_WRITE(BCLRPAT(crtc->pipe), 0); + intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0); switch (mode) { case DRM_MODE_DPMS_ON: @@ -201,7 +200,7 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, break; } - I915_WRITE(crt->adpa_reg, adpa); + intel_de_write(dev_priv, crt->adpa_reg, adpa); } static void intel_disable_crt(struct intel_encoder *encoder, @@ -230,7 +229,7 @@ static void hsw_disable_crt(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - WARN_ON(!old_crtc_state->has_pch_encoder); + drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); } @@ -258,7 +257,7 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder, intel_ddi_fdi_post_disable(encoder, old_crtc_state, old_conn_state); - WARN_ON(!old_crtc_state->has_pch_encoder); + drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); } @@ -269,7 +268,7 @@ static void hsw_pre_pll_enable_crt(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - WARN_ON(!crtc_state->has_pch_encoder); + drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); } @@ -282,7 +281,7 @@ static void hsw_pre_enable_crt(struct intel_encoder *encoder, struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum pipe pipe = crtc->pipe; - WARN_ON(!crtc_state->has_pch_encoder); + drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder); intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); @@ -299,7 +298,13 @@ static void hsw_enable_crt(struct intel_encoder *encoder, struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum pipe pipe = crtc->pipe; - WARN_ON(!crtc_state->has_pch_encoder); + drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder); + + intel_enable_pipe(crtc_state); + + lpt_pch_enable(crtc_state); + + intel_crtc_vblank_on(crtc_state); intel_crt_set_dpms(encoder, crtc_state, DRM_MODE_DPMS_ON); @@ -414,7 +419,8 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder, /* LPT FDI RX only supports 8bpc. */ if (HAS_PCH_LPT(dev_priv)) { if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) { - DRM_DEBUG_KMS("LPT only supports 24bpp\n"); + drm_dbg_kms(&dev_priv->drm, + "LPT only supports 24bpp\n"); return -EINVAL; } @@ -442,34 +448,37 @@ static bool ilk_crt_detect_hotplug(struct drm_connector *connector) crt->force_hotplug_required = false; - save_adpa = adpa = I915_READ(crt->adpa_reg); - DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); + save_adpa = adpa = intel_de_read(dev_priv, crt->adpa_reg); + drm_dbg_kms(&dev_priv->drm, + "trigger hotplug detect cycle: adpa=0x%x\n", adpa); adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; if (turn_off_dac) adpa &= ~ADPA_DAC_ENABLE; - I915_WRITE(crt->adpa_reg, adpa); + intel_de_write(dev_priv, crt->adpa_reg, adpa); if (intel_de_wait_for_clear(dev_priv, crt->adpa_reg, ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 1000)) - DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); + drm_dbg_kms(&dev_priv->drm, + "timed out waiting for FORCE_TRIGGER"); if (turn_off_dac) { - I915_WRITE(crt->adpa_reg, save_adpa); - POSTING_READ(crt->adpa_reg); + intel_de_write(dev_priv, crt->adpa_reg, save_adpa); + intel_de_posting_read(dev_priv, crt->adpa_reg); } } /* Check the status to see if both blue and green are on now */ - adpa = I915_READ(crt->adpa_reg); + adpa = intel_de_read(dev_priv, crt->adpa_reg); if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) ret = true; else ret = false; - DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret); + drm_dbg_kms(&dev_priv->drm, "ironlake hotplug adpa=0x%x, result %d\n", + adpa, ret); return ret; } @@ -498,27 +507,30 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) */ reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin); - save_adpa = adpa = I915_READ(crt->adpa_reg); - DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); + save_adpa = adpa = intel_de_read(dev_priv, crt->adpa_reg); + drm_dbg_kms(&dev_priv->drm, + "trigger hotplug detect cycle: adpa=0x%x\n", adpa); adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; - I915_WRITE(crt->adpa_reg, adpa); + intel_de_write(dev_priv, crt->adpa_reg, adpa); if (intel_de_wait_for_clear(dev_priv, crt->adpa_reg, ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 1000)) { - DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); - I915_WRITE(crt->adpa_reg, save_adpa); + drm_dbg_kms(&dev_priv->drm, + "timed out waiting for FORCE_TRIGGER"); + intel_de_write(dev_priv, crt->adpa_reg, save_adpa); } /* Check the status to see if both blue and green are on now */ - adpa = I915_READ(crt->adpa_reg); + adpa = intel_de_read(dev_priv, crt->adpa_reg); if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) ret = true; else ret = false; - DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret); + drm_dbg_kms(&dev_priv->drm, + "valleyview hotplug adpa=0x%x, result %d\n", adpa, ret); if (reenable_hpd) intel_hpd_enable(dev_priv, crt->base.hpd_pin); @@ -558,15 +570,16 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) /* wait for FORCE_DETECT to go off */ if (intel_de_wait_for_clear(dev_priv, PORT_HOTPLUG_EN, CRT_HOTPLUG_FORCE_DETECT, 1000)) - DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off"); + drm_dbg_kms(&dev_priv->drm, + "timed out waiting for FORCE_DETECT to go off"); } - stat = I915_READ(PORT_HOTPLUG_STAT); + stat = intel_de_read(dev_priv, PORT_HOTPLUG_STAT); if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE) ret = true; /* clear the interrupt we just generated, if any */ - I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS); + intel_de_write(dev_priv, PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS); i915_hotplug_interrupt_update(dev_priv, CRT_HOTPLUG_FORCE_DETECT, 0); @@ -629,13 +642,16 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) * have to check the EDID input spec of the attached device. */ if (!is_digital) { - DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); + drm_dbg_kms(&dev_priv->drm, + "CRT detected via DDC:0x50 [EDID]\n"); ret = true; } else { - DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); + drm_dbg_kms(&dev_priv->drm, + "CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); } } else { - DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n"); + drm_dbg_kms(&dev_priv->drm, + "CRT not detected via DDC:0x50 [no valid EDID found]\n"); } kfree(edid); @@ -660,7 +676,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe) u8 st00; enum drm_connector_status status; - DRM_DEBUG_KMS("starting load-detect on CRT\n"); + drm_dbg_kms(&dev_priv->drm, "starting load-detect on CRT\n"); bclrpat_reg = BCLRPAT(pipe); vtotal_reg = VTOTAL(pipe); @@ -706,7 +722,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe) * Yes, this will flicker */ if (vblank_start <= vactive && vblank_end >= vtotal) { - u32 vsync = I915_READ(vsync_reg); + u32 vsync = intel_de_read(dev_priv, vsync_reg); u32 vsync_start = (vsync & 0xffff) + 1; vblank_start = vsync_start; @@ -801,9 +817,9 @@ intel_crt_detect(struct drm_connector *connector, int status, ret; struct intel_load_detect_pipe tmp; - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n", - connector->base.id, connector->name, - force); + drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s] force=%d\n", + connector->base.id, connector->name, + force); if (i915_modparams.load_detect_test) { wakeref = intel_display_power_get(dev_priv, @@ -824,11 +840,13 @@ intel_crt_detect(struct drm_connector *connector, * only trust an assertion that the monitor is connected. */ if (intel_crt_detect_hotplug(connector)) { - DRM_DEBUG_KMS("CRT detected via hotplug\n"); + drm_dbg_kms(&dev_priv->drm, + "CRT detected via hotplug\n"); status = connector_status_connected; goto out; } else - DRM_DEBUG_KMS("CRT not detected via hotplug\n"); + drm_dbg_kms(&dev_priv->drm, + "CRT not detected via hotplug\n"); } if (intel_crt_detect_ddc(connector)) { @@ -918,13 +936,13 @@ void intel_crt_reset(struct drm_encoder *encoder) if (INTEL_GEN(dev_priv) >= 5) { u32 adpa; - adpa = I915_READ(crt->adpa_reg); + adpa = intel_de_read(dev_priv, crt->adpa_reg); adpa &= ~ADPA_CRT_HOTPLUG_MASK; adpa |= ADPA_HOTPLUG_BITS; - I915_WRITE(crt->adpa_reg, adpa); - POSTING_READ(crt->adpa_reg); + intel_de_write(dev_priv, crt->adpa_reg, adpa); + intel_de_posting_read(dev_priv, crt->adpa_reg); - DRM_DEBUG_KMS("crt adpa set to 0x%x\n", adpa); + drm_dbg_kms(&dev_priv->drm, "crt adpa set to 0x%x\n", adpa); crt->force_hotplug_required = true; } @@ -969,7 +987,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv) else adpa_reg = ADPA; - adpa = I915_READ(adpa_reg); + adpa = intel_de_read(dev_priv, adpa_reg); if ((adpa & ADPA_DAC_ENABLE) == 0) { /* * On some machines (some IVB at least) CRT can be @@ -979,11 +997,11 @@ void intel_crt_init(struct drm_i915_private *dev_priv) * take. So the only way to tell is attempt to enable * it and see what happens. */ - I915_WRITE(adpa_reg, adpa | ADPA_DAC_ENABLE | - ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); - if ((I915_READ(adpa_reg) & ADPA_DAC_ENABLE) == 0) + intel_de_write(dev_priv, adpa_reg, + adpa | ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); + if ((intel_de_read(dev_priv, adpa_reg) & ADPA_DAC_ENABLE) == 0) return; - I915_WRITE(adpa_reg, adpa); + intel_de_write(dev_priv, adpa_reg, adpa); } crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL); @@ -1027,6 +1045,9 @@ void intel_crt_init(struct drm_i915_private *dev_priv) !dmi_check_system(intel_spurious_crt_detect)) { crt->base.hpd_pin = HPD_CRT; crt->base.hotplug = intel_encoder_hotplug; + intel_connector->polled = DRM_CONNECTOR_POLL_HPD; + } else { + intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT; } if (HAS_DDI(dev_priv)) { @@ -1057,14 +1078,6 @@ void intel_crt_init(struct drm_i915_private *dev_priv) drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); - if (!I915_HAS_HOTPLUG(dev_priv)) - intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT; - - /* - * Configure the automatic hotplug detection stuff - */ - crt->force_hotplug_required = false; - /* * TODO: find a proper way to discover whether we need to set the the * polarity and link reversal bits or not, instead of relying on the @@ -1074,7 +1087,8 @@ void intel_crt_init(struct drm_i915_private *dev_priv) u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT | FDI_RX_LINK_REVERSAL_OVERRIDE; - dev_priv->fdi_rx_config = I915_READ(FDI_RX_CTL(PIPE_A)) & fdi_config; + dev_priv->fdi_rx_config = intel_de_read(dev_priv, + FDI_RX_CTL(PIPE_A)) & fdi_config; } intel_crt_reset(&crt->base.base); diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/display/intel_csr.c index 09870a31b4f0..3112572cfb7d 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/display/intel_csr.c @@ -27,6 +27,7 @@ #include "i915_drv.h" #include "i915_reg.h" #include "intel_csr.h" +#include "intel_de.h" /** * DOC: csr support for dmc @@ -39,8 +40,8 @@ #define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE -#define TGL_CSR_PATH "i915/tgl_dmc_ver2_04.bin" -#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 4) +#define TGL_CSR_PATH "i915/tgl_dmc_ver2_06.bin" +#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 6) #define TGL_CSR_MAX_FW_SIZE 0x6000 MODULE_FIRMWARE(TGL_CSR_PATH); @@ -276,11 +277,11 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv) mask |= DC_STATE_DEBUG_MASK_CORES; /* The below bit doesn't need to be cleared ever afterwards */ - val = I915_READ(DC_STATE_DEBUG); + val = intel_de_read(dev_priv, DC_STATE_DEBUG); if ((val & mask) != mask) { val |= mask; - I915_WRITE(DC_STATE_DEBUG, val); - POSTING_READ(DC_STATE_DEBUG); + intel_de_write(dev_priv, DC_STATE_DEBUG, val); + intel_de_posting_read(dev_priv, DC_STATE_DEBUG); } } @@ -298,12 +299,14 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv) u32 i, fw_size; if (!HAS_CSR(dev_priv)) { - DRM_ERROR("No CSR support available for this platform\n"); + drm_err(&dev_priv->drm, + "No CSR support available for this platform\n"); return; } if (!dev_priv->csr.dmc_payload) { - DRM_ERROR("Tried to program CSR with empty payload\n"); + drm_err(&dev_priv->drm, + "Tried to program CSR with empty payload\n"); return; } @@ -313,13 +316,14 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv) preempt_disable(); for (i = 0; i < fw_size; i++) - I915_WRITE_FW(CSR_PROGRAM(i), payload[i]); + intel_uncore_write_fw(&dev_priv->uncore, CSR_PROGRAM(i), + payload[i]); preempt_enable(); for (i = 0; i < dev_priv->csr.mmio_count; i++) { - I915_WRITE(dev_priv->csr.mmioaddr[i], - dev_priv->csr.mmiodata[i]); + intel_de_write(dev_priv, dev_priv->csr.mmioaddr[i], + dev_priv->csr.mmiodata[i]); } dev_priv->csr.dc_state = 0; @@ -607,7 +611,7 @@ static void parse_csr_fw(struct drm_i915_private *dev_priv, static void intel_csr_runtime_pm_get(struct drm_i915_private *dev_priv) { - WARN_ON(dev_priv->csr.wakeref); + drm_WARN_ON(&dev_priv->drm, dev_priv->csr.wakeref); dev_priv->csr.wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); } @@ -636,16 +640,16 @@ static void csr_load_work_fn(struct work_struct *work) intel_csr_load_program(dev_priv); intel_csr_runtime_pm_put(dev_priv); - DRM_INFO("Finished loading DMC firmware %s (v%u.%u)\n", - dev_priv->csr.fw_path, - CSR_VERSION_MAJOR(csr->version), + drm_info(&dev_priv->drm, + "Finished loading DMC firmware %s (v%u.%u)\n", + dev_priv->csr.fw_path, CSR_VERSION_MAJOR(csr->version), CSR_VERSION_MINOR(csr->version)); } else { - dev_notice(dev_priv->drm.dev, + drm_notice(&dev_priv->drm, "Failed to load DMC firmware %s." " Disabling runtime power management.\n", csr->fw_path); - dev_notice(dev_priv->drm.dev, "DMC firmware homepage: %s", + drm_notice(&dev_priv->drm, "DMC firmware homepage: %s", INTEL_UC_FIRMWARE_URL); } @@ -712,7 +716,8 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) if (i915_modparams.dmc_firmware_path) { if (strlen(i915_modparams.dmc_firmware_path) == 0) { csr->fw_path = NULL; - DRM_INFO("Disabling CSR firmware and runtime PM\n"); + drm_info(&dev_priv->drm, + "Disabling CSR firmware and runtime PM\n"); return; } @@ -722,11 +727,12 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) } if (csr->fw_path == NULL) { - DRM_DEBUG_KMS("No known CSR firmware for platform, disabling runtime PM\n"); + drm_dbg_kms(&dev_priv->drm, + "No known CSR firmware for platform, disabling runtime PM\n"); return; } - DRM_DEBUG_KMS("Loading %s\n", csr->fw_path); + drm_dbg_kms(&dev_priv->drm, "Loading %s\n", csr->fw_path); schedule_work(&dev_priv->csr.work); } @@ -783,7 +789,7 @@ void intel_csr_ucode_fini(struct drm_i915_private *dev_priv) return; intel_csr_ucode_suspend(dev_priv); - WARN_ON(dev_priv->csr.wakeref); + drm_WARN_ON(&dev_priv->drm, dev_priv->csr.wakeref); kfree(dev_priv->csr.dmc_payload); } diff --git a/drivers/gpu/drm/i915/intel_csr.h b/drivers/gpu/drm/i915/display/intel_csr.h index 03c64f8af7ab..03c64f8af7ab 100644 --- a/drivers/gpu/drm/i915/intel_csr.h +++ b/drivers/gpu/drm/i915/display/intel_csr.h diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index d9a61f341070..52db7852827b 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -568,6 +568,20 @@ static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = { { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ }; +static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_hbr2_hbr3[] = { + /* NT mV Trans mV db */ + { 0xA, 0x33, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x47, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */ + { 0xC, 0x64, 0x30, 0x00, 0x0F }, /* 350 700 6.0 */ + { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */ + { 0xA, 0x46, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xC, 0x64, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */ + { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */ + { 0xC, 0x61, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */ + { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 600 900 3.5 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ +}; + struct icl_mg_phy_ddi_buf_trans { u32 cri_txdeemph_override_5_0; u32 cri_txdeemph_override_11_6; @@ -622,6 +636,34 @@ static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_hdmi_ddi_trans[] = { { 0x0, 0x0, 0xA }, /* 10 Full -3 dB */ }; +static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr[] = { + /* NT mV Trans mV db */ + { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ + { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ + { 0x6, 0x7D, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ + { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ + { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ + { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ + { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ +}; + +static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr2[] = { + /* NT mV Trans mV db */ + { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ + { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ + { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ + { 0xA, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xC, 0x63, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ + { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ + { 0xC, 0x61, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ + { 0x6, 0x7B, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ +}; + static const struct ddi_buf_trans * bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) { @@ -818,7 +860,7 @@ bxt_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) static const struct cnl_ddi_buf_trans * cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) { - u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; + u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; if (voltage == VOLTAGE_INFO_0_85V) { *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_85V); @@ -839,7 +881,7 @@ cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) static const struct cnl_ddi_buf_trans * cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) { - u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; + u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; if (voltage == VOLTAGE_INFO_0_85V) { *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_85V); @@ -860,7 +902,7 @@ cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) static const struct cnl_ddi_buf_trans * cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) { - u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; + u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; if (dev_priv->vbt.edp.low_vswing) { if (voltage == VOLTAGE_INFO_0_85V) { @@ -901,15 +943,43 @@ icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate, return icl_combo_phy_ddi_translations_dp_hbr2; } -static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port) +static const struct cnl_ddi_buf_trans * +ehl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate, + int *n_entries) +{ + if (type != INTEL_OUTPUT_HDMI && type != INTEL_OUTPUT_EDP && + rate > 270000) { + *n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_hbr2_hbr3); + return ehl_combo_phy_ddi_translations_hbr2_hbr3; + } + + return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries); +} + +static const struct cnl_ddi_buf_trans * +tgl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate, + int *n_entries) +{ + if (type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_EDP) { + return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries); + } else if (rate > 270000) { + *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2); + return tgl_combo_phy_ddi_translations_dp_hbr2; + } + + *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr); + return tgl_combo_phy_ddi_translations_dp_hbr; +} + +static int intel_ddi_hdmi_level(struct intel_encoder *encoder) { - struct ddi_vbt_port_info *port_info = &dev_priv->vbt.ddi_port_info[port]; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); int n_entries, level, default_entry; - enum phy phy = intel_port_to_phy(dev_priv, port); + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); if (INTEL_GEN(dev_priv) >= 12) { if (intel_phy_is_combo(dev_priv, phy)) - icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, + tgl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, 0, &n_entries); else n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans); @@ -937,19 +1007,18 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries); default_entry = 6; } else { - WARN(1, "ddi translation table missing\n"); + drm_WARN(&dev_priv->drm, 1, "ddi translation table missing\n"); return 0; } - if (WARN_ON_ONCE(n_entries == 0)) + if (drm_WARN_ON_ONCE(&dev_priv->drm, n_entries == 0)) return 0; - if (port_info->hdmi_level_shift_set) - level = port_info->hdmi_level_shift; - else + level = intel_bios_hdmi_level_shift(encoder); + if (level < 0) level = default_entry; - if (WARN_ON_ONCE(level >= n_entries)) + if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries)) level = n_entries - 1; return level; @@ -980,15 +1049,14 @@ static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder, &n_entries); /* If we're boosting the current, set bit 31 of trans1 */ - if (IS_GEN9_BC(dev_priv) && - dev_priv->vbt.ddi_port_info[port].dp_boost_level) + if (IS_GEN9_BC(dev_priv) && intel_bios_dp_boost_level(encoder)) iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE; for (i = 0; i < n_entries; i++) { - I915_WRITE(DDI_BUF_TRANS_LO(port, i), - ddi_translations[i].trans1 | iboost_bit); - I915_WRITE(DDI_BUF_TRANS_HI(port, i), - ddi_translations[i].trans2); + intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, i), + ddi_translations[i].trans1 | iboost_bit); + intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, i), + ddi_translations[i].trans2); } } @@ -1008,21 +1076,20 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder, ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries); - if (WARN_ON_ONCE(!ddi_translations)) + if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) return; - if (WARN_ON_ONCE(level >= n_entries)) + if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries)) level = n_entries - 1; /* If we're boosting the current, set bit 31 of trans1 */ - if (IS_GEN9_BC(dev_priv) && - dev_priv->vbt.ddi_port_info[port].hdmi_boost_level) + if (IS_GEN9_BC(dev_priv) && intel_bios_hdmi_boost_level(encoder)) iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE; /* Entry 9 is for HDMI: */ - I915_WRITE(DDI_BUF_TRANS_LO(port, 9), - ddi_translations[level].trans1 | iboost_bit); - I915_WRITE(DDI_BUF_TRANS_HI(port, 9), - ddi_translations[level].trans2); + intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, 9), + ddi_translations[level].trans1 | iboost_bit); + intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, 9), + ddi_translations[level].trans2); } static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, @@ -1033,7 +1100,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, for (i = 0; i < 16; i++) { udelay(1); - if (I915_READ(reg) & DDI_BUF_IS_IDLE) + if (intel_de_read(dev_priv, reg) & DDI_BUF_IS_IDLE) return; } DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port)); @@ -1124,70 +1191,64 @@ void hsw_fdi_link_train(struct intel_encoder *encoder, * * WaFDIAutoLinkSetTimingOverrride:hsw */ - I915_WRITE(FDI_RX_MISC(PIPE_A), FDI_RX_PWRDN_LANE1_VAL(2) | - FDI_RX_PWRDN_LANE0_VAL(2) | - FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); + intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), + FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); /* Enable the PCH Receiver FDI PLL */ rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE | FDI_RX_PLL_ENABLE | FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); - I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val); - POSTING_READ(FDI_RX_CTL(PIPE_A)); + intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); + intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A)); udelay(220); /* Switch from Rawclk to PCDclk */ rx_ctl_val |= FDI_PCDCLK; - I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val); + intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); /* Configure Port Clock Select */ ddi_pll_sel = hsw_pll_to_ddi_pll_sel(crtc_state->shared_dpll); - I915_WRITE(PORT_CLK_SEL(PORT_E), ddi_pll_sel); - WARN_ON(ddi_pll_sel != PORT_CLK_SEL_SPLL); + intel_de_write(dev_priv, PORT_CLK_SEL(PORT_E), ddi_pll_sel); + drm_WARN_ON(&dev_priv->drm, ddi_pll_sel != PORT_CLK_SEL_SPLL); /* Start the training iterating through available voltages and emphasis, * testing each value twice. */ for (i = 0; i < ARRAY_SIZE(hsw_ddi_translations_fdi) * 2; i++) { /* Configure DP_TP_CTL with auto-training */ - I915_WRITE(DP_TP_CTL(PORT_E), - DP_TP_CTL_FDI_AUTOTRAIN | - DP_TP_CTL_ENHANCED_FRAME_ENABLE | - DP_TP_CTL_LINK_TRAIN_PAT1 | - DP_TP_CTL_ENABLE); + intel_de_write(dev_priv, DP_TP_CTL(PORT_E), + DP_TP_CTL_FDI_AUTOTRAIN | DP_TP_CTL_ENHANCED_FRAME_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_ENABLE); /* Configure and enable DDI_BUF_CTL for DDI E with next voltage. * DDI E does not support port reversal, the functionality is * achieved on the PCH side in FDI_RX_CTL, so no need to set the * port reversal bit */ - I915_WRITE(DDI_BUF_CTL(PORT_E), - DDI_BUF_CTL_ENABLE | - ((crtc_state->fdi_lanes - 1) << 1) | - DDI_BUF_TRANS_SELECT(i / 2)); - POSTING_READ(DDI_BUF_CTL(PORT_E)); + intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), + DDI_BUF_CTL_ENABLE | ((crtc_state->fdi_lanes - 1) << 1) | DDI_BUF_TRANS_SELECT(i / 2)); + intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E)); udelay(600); /* Program PCH FDI Receiver TU */ - I915_WRITE(FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64)); + intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64)); /* Enable PCH FDI Receiver with auto-training */ rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO; - I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val); - POSTING_READ(FDI_RX_CTL(PIPE_A)); + intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); + intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A)); /* Wait for FDI receiver lane calibration */ udelay(30); /* Unset FDI_RX_MISC pwrdn lanes */ - temp = I915_READ(FDI_RX_MISC(PIPE_A)); + temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A)); temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); - I915_WRITE(FDI_RX_MISC(PIPE_A), temp); - POSTING_READ(FDI_RX_MISC(PIPE_A)); + intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp); + intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A)); /* Wait for FDI auto training time */ udelay(5); - temp = I915_READ(DP_TP_STATUS(PORT_E)); + temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E)); if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) { DRM_DEBUG_KMS("FDI link training done on step %d\n", i); break; @@ -1203,37 +1264,34 @@ void hsw_fdi_link_train(struct intel_encoder *encoder, } rx_ctl_val &= ~FDI_RX_ENABLE; - I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val); - POSTING_READ(FDI_RX_CTL(PIPE_A)); + intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); + intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A)); - temp = I915_READ(DDI_BUF_CTL(PORT_E)); + temp = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E)); temp &= ~DDI_BUF_CTL_ENABLE; - I915_WRITE(DDI_BUF_CTL(PORT_E), temp); - POSTING_READ(DDI_BUF_CTL(PORT_E)); + intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), temp); + intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E)); /* Disable DP_TP_CTL and FDI_RX_CTL and retry */ - temp = I915_READ(DP_TP_CTL(PORT_E)); + temp = intel_de_read(dev_priv, DP_TP_CTL(PORT_E)); temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); temp |= DP_TP_CTL_LINK_TRAIN_PAT1; - I915_WRITE(DP_TP_CTL(PORT_E), temp); - POSTING_READ(DP_TP_CTL(PORT_E)); + intel_de_write(dev_priv, DP_TP_CTL(PORT_E), temp); + intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E)); intel_wait_ddi_buf_idle(dev_priv, PORT_E); /* Reset FDI_RX_MISC pwrdn lanes */ - temp = I915_READ(FDI_RX_MISC(PIPE_A)); + temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A)); temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); - I915_WRITE(FDI_RX_MISC(PIPE_A), temp); - POSTING_READ(FDI_RX_MISC(PIPE_A)); + intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp); + intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A)); } /* Enable normal pixel sending for FDI */ - I915_WRITE(DP_TP_CTL(PORT_E), - DP_TP_CTL_FDI_AUTOTRAIN | - DP_TP_CTL_LINK_TRAIN_NORMAL | - DP_TP_CTL_ENHANCED_FRAME_ENABLE | - DP_TP_CTL_ENABLE); + intel_de_write(dev_priv, DP_TP_CTL(PORT_E), + DP_TP_CTL_FDI_AUTOTRAIN | DP_TP_CTL_LINK_TRAIN_NORMAL | DP_TP_CTL_ENHANCED_FRAME_ENABLE | DP_TP_CTL_ENABLE); } static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder) @@ -1260,175 +1318,18 @@ intel_ddi_get_crtc_encoder(struct intel_crtc *crtc) } if (num_encoders != 1) - WARN(1, "%d encoders on crtc for pipe %c\n", num_encoders, - pipe_name(crtc->pipe)); + drm_WARN(dev, 1, "%d encoders on crtc for pipe %c\n", + num_encoders, + pipe_name(crtc->pipe)); BUG_ON(ret == NULL); return ret; } -static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, - i915_reg_t reg) -{ - int refclk; - int n, p, r; - u32 wrpll; - - wrpll = I915_READ(reg); - switch (wrpll & WRPLL_REF_MASK) { - case WRPLL_REF_SPECIAL_HSW: - /* - * muxed-SSC for BDW. - * non-SSC for non-ULT HSW. Check FUSE_STRAP3 - * for the non-SSC reference frequency. - */ - if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) { - if (I915_READ(FUSE_STRAP3) & HSW_REF_CLK_SELECT) - refclk = 24; - else - refclk = 135; - break; - } - /* fall through */ - case WRPLL_REF_PCH_SSC: - /* - * We could calculate spread here, but our checking - * code only cares about 5% accuracy, and spread is a max of - * 0.5% downspread. - */ - refclk = 135; - break; - case WRPLL_REF_LCPLL: - refclk = 2700; - break; - default: - MISSING_CASE(wrpll); - return 0; - } - - r = wrpll & WRPLL_DIVIDER_REF_MASK; - p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT; - n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT; - - /* Convert to KHz, p & r have a fixed point portion */ - return (refclk * n * 100) / (p * r); -} - -static int skl_calc_wrpll_link(const struct intel_dpll_hw_state *pll_state) -{ - u32 p0, p1, p2, dco_freq; - - p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK; - p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK; - - if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1)) - p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8; - else - p1 = 1; - - - switch (p0) { - case DPLL_CFGCR2_PDIV_1: - p0 = 1; - break; - case DPLL_CFGCR2_PDIV_2: - p0 = 2; - break; - case DPLL_CFGCR2_PDIV_3: - p0 = 3; - break; - case DPLL_CFGCR2_PDIV_7: - p0 = 7; - break; - } - - switch (p2) { - case DPLL_CFGCR2_KDIV_5: - p2 = 5; - break; - case DPLL_CFGCR2_KDIV_2: - p2 = 2; - break; - case DPLL_CFGCR2_KDIV_3: - p2 = 3; - break; - case DPLL_CFGCR2_KDIV_1: - p2 = 1; - break; - } - - dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) - * 24 * 1000; - - dco_freq += (((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) - * 24 * 1000) / 0x8000; - - if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0)) - return 0; - - return dco_freq / (p0 * p1 * p2 * 5); -} - -int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv, - struct intel_dpll_hw_state *pll_state) -{ - u32 p0, p1, p2, dco_freq, ref_clock; - - p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK; - p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK; - - if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) - p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >> - DPLL_CFGCR1_QDIV_RATIO_SHIFT; - else - p1 = 1; - - - switch (p0) { - case DPLL_CFGCR1_PDIV_2: - p0 = 2; - break; - case DPLL_CFGCR1_PDIV_3: - p0 = 3; - break; - case DPLL_CFGCR1_PDIV_5: - p0 = 5; - break; - case DPLL_CFGCR1_PDIV_7: - p0 = 7; - break; - } - - switch (p2) { - case DPLL_CFGCR1_KDIV_1: - p2 = 1; - break; - case DPLL_CFGCR1_KDIV_2: - p2 = 2; - break; - case DPLL_CFGCR1_KDIV_3: - p2 = 3; - break; - } - - ref_clock = cnl_hdmi_pll_ref_clock(dev_priv); - - dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) - * ref_clock; - - dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >> - DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000; - - if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0)) - return 0; - - return dco_freq / (p0 * p1 * p2 * 5); -} - static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv, enum port port) { - u32 val = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK; + u32 val = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK; switch (val) { case DDI_CLK_SEL_NONE: @@ -1447,77 +1348,6 @@ static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv, } } -static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv, - const struct intel_dpll_hw_state *pll_state) -{ - u32 m1, m2_int, m2_frac, div1, div2, ref_clock; - u64 tmp; - - ref_clock = dev_priv->cdclk.hw.ref; - - if (INTEL_GEN(dev_priv) >= 12) { - m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK; - m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT; - m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK; - - if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) { - m2_frac = pll_state->mg_pll_bias & - DKL_PLL_BIAS_FBDIV_FRAC_MASK; - m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT; - } else { - m2_frac = 0; - } - } else { - m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK; - m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK; - - if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) { - m2_frac = pll_state->mg_pll_div0 & - MG_PLL_DIV0_FBDIV_FRAC_MASK; - m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT; - } else { - m2_frac = 0; - } - } - - switch (pll_state->mg_clktop2_hsclkctl & - MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) { - case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2: - div1 = 2; - break; - case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3: - div1 = 3; - break; - case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5: - div1 = 5; - break; - case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7: - div1 = 7; - break; - default: - MISSING_CASE(pll_state->mg_clktop2_hsclkctl); - return 0; - } - - div2 = (pll_state->mg_clktop2_hsclkctl & - MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >> - MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT; - - /* div2 value of 0 is same as 1 means no div */ - if (div2 == 0) - div2 = 1; - - /* - * Adjust the original formula to delay the division by 2^22 in order to - * minimize possible rounding errors. - */ - tmp = (u64)m1 * m2_int * ref_clock + - (((u64)m1 * m2_frac * ref_clock) >> 22); - tmp = div_u64(tmp, 5 * div1 * div2); - - return tmp; -} - static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) { int dotclock; @@ -1543,214 +1373,22 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) pipe_config->hw.adjusted_mode.crtc_clock = dotclock; } -static void icl_ddi_clock_get(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state; - enum port port = encoder->port; - enum phy phy = intel_port_to_phy(dev_priv, port); - int link_clock; - - if (intel_phy_is_combo(dev_priv, phy)) { - link_clock = cnl_calc_wrpll_link(dev_priv, pll_state); - } else { - enum intel_dpll_id pll_id = intel_get_shared_dpll_id(dev_priv, - pipe_config->shared_dpll); - - if (pll_id == DPLL_ID_ICL_TBTPLL) - link_clock = icl_calc_tbt_pll_link(dev_priv, port); - else - link_clock = icl_calc_mg_pll_link(dev_priv, pll_state); - } - - pipe_config->port_clock = link_clock; - - ddi_dotclock_get(pipe_config); -} - -static void cnl_ddi_clock_get(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state; - int link_clock; - - if (pll_state->cfgcr0 & DPLL_CFGCR0_HDMI_MODE) { - link_clock = cnl_calc_wrpll_link(dev_priv, pll_state); - } else { - link_clock = pll_state->cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK; - - switch (link_clock) { - case DPLL_CFGCR0_LINK_RATE_810: - link_clock = 81000; - break; - case DPLL_CFGCR0_LINK_RATE_1080: - link_clock = 108000; - break; - case DPLL_CFGCR0_LINK_RATE_1350: - link_clock = 135000; - break; - case DPLL_CFGCR0_LINK_RATE_1620: - link_clock = 162000; - break; - case DPLL_CFGCR0_LINK_RATE_2160: - link_clock = 216000; - break; - case DPLL_CFGCR0_LINK_RATE_2700: - link_clock = 270000; - break; - case DPLL_CFGCR0_LINK_RATE_3240: - link_clock = 324000; - break; - case DPLL_CFGCR0_LINK_RATE_4050: - link_clock = 405000; - break; - default: - WARN(1, "Unsupported link rate\n"); - break; - } - link_clock *= 2; - } - - pipe_config->port_clock = link_clock; - - ddi_dotclock_get(pipe_config); -} - -static void skl_ddi_clock_get(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config) -{ - struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state; - int link_clock; - - /* - * ctrl1 register is already shifted for each pll, just use 0 to get - * the internal shift for each field - */ - if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0)) { - link_clock = skl_calc_wrpll_link(pll_state); - } else { - link_clock = pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0); - link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(0); - - switch (link_clock) { - case DPLL_CTRL1_LINK_RATE_810: - link_clock = 81000; - break; - case DPLL_CTRL1_LINK_RATE_1080: - link_clock = 108000; - break; - case DPLL_CTRL1_LINK_RATE_1350: - link_clock = 135000; - break; - case DPLL_CTRL1_LINK_RATE_1620: - link_clock = 162000; - break; - case DPLL_CTRL1_LINK_RATE_2160: - link_clock = 216000; - break; - case DPLL_CTRL1_LINK_RATE_2700: - link_clock = 270000; - break; - default: - WARN(1, "Unsupported link rate\n"); - break; - } - link_clock *= 2; - } - - pipe_config->port_clock = link_clock; - - ddi_dotclock_get(pipe_config); -} - -static void hsw_ddi_clock_get(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - int link_clock = 0; - u32 val, pll; - - val = hsw_pll_to_ddi_pll_sel(pipe_config->shared_dpll); - switch (val & PORT_CLK_SEL_MASK) { - case PORT_CLK_SEL_LCPLL_810: - link_clock = 81000; - break; - case PORT_CLK_SEL_LCPLL_1350: - link_clock = 135000; - break; - case PORT_CLK_SEL_LCPLL_2700: - link_clock = 270000; - break; - case PORT_CLK_SEL_WRPLL1: - link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(0)); - break; - case PORT_CLK_SEL_WRPLL2: - link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(1)); - break; - case PORT_CLK_SEL_SPLL: - pll = I915_READ(SPLL_CTL) & SPLL_FREQ_MASK; - if (pll == SPLL_FREQ_810MHz) - link_clock = 81000; - else if (pll == SPLL_FREQ_1350MHz) - link_clock = 135000; - else if (pll == SPLL_FREQ_2700MHz) - link_clock = 270000; - else { - WARN(1, "bad spll freq\n"); - return; - } - break; - default: - WARN(1, "bad port clock sel\n"); - return; - } - - pipe_config->port_clock = link_clock * 2; - - ddi_dotclock_get(pipe_config); -} - -static int bxt_calc_pll_link(const struct intel_dpll_hw_state *pll_state) -{ - struct dpll clock; - - clock.m1 = 2; - clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22; - if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE) - clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK; - clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT; - clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT; - clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT; - - return chv_calc_dpll_params(100000, &clock); -} - -static void bxt_ddi_clock_get(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config) -{ - pipe_config->port_clock = - bxt_calc_pll_link(&pipe_config->dpll_hw_state); - - ddi_dotclock_get(pipe_config); -} - static void intel_ddi_clock_get(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); - if (INTEL_GEN(dev_priv) >= 11) - icl_ddi_clock_get(encoder, pipe_config); - else if (IS_CANNONLAKE(dev_priv)) - cnl_ddi_clock_get(encoder, pipe_config); - else if (IS_GEN9_LP(dev_priv)) - bxt_ddi_clock_get(encoder, pipe_config); - else if (IS_GEN9_BC(dev_priv)) - skl_ddi_clock_get(encoder, pipe_config); - else if (INTEL_GEN(dev_priv) <= 8) - hsw_ddi_clock_get(encoder, pipe_config); + if (intel_phy_is_tc(dev_priv, phy) && + intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll) == + DPLL_ID_ICL_TBTPLL) + pipe_config->port_clock = icl_calc_tbt_pll_link(dev_priv, + encoder->port); + else + pipe_config->port_clock = + intel_dpll_get_freq(dev_priv, pipe_config->shared_dpll); + + ddi_dotclock_get(pipe_config); } void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state, @@ -1764,7 +1402,7 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state, if (!intel_crtc_has_dp_encoder(crtc_state)) return; - WARN_ON(transcoder_is_dsi(cpu_transcoder)); + drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder)); temp = DP_MSA_MISC_SYNC_CLOCK; @@ -1787,8 +1425,8 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state, } /* nonsense combination */ - WARN_ON(crtc_state->limited_color_range && - crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); + drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range && + crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); if (crtc_state->limited_color_range) temp |= DP_MSA_MISC_COLOR_CEA_RGB; @@ -1810,7 +1448,7 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state, if (intel_dp_needs_vsc_sdp(crtc_state, conn_state)) temp |= DP_MSA_MISC_COLOR_VSC_SDP; - I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp); + intel_de_write(dev_priv, TRANS_MSA_MISC(cpu_transcoder), temp); } /* @@ -1904,7 +1542,8 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state) enum transcoder master; master = crtc_state->mst_master_transcoder; - WARN_ON(master == INVALID_TRANSCODER); + drm_WARN_ON(&dev_priv->drm, + master == INVALID_TRANSCODER); temp |= TRANS_DDI_MST_TRANSPORT_SELECT(master); } } else { @@ -1925,7 +1564,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state) temp = intel_ddi_transcoder_func_reg_val_get(crtc_state); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC; - I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); + intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); } /* @@ -1942,7 +1581,7 @@ intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state) temp = intel_ddi_transcoder_func_reg_val_get(crtc_state); temp &= ~TRANS_DDI_FUNC_ENABLE; - I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); + intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); } void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state) @@ -1952,16 +1591,18 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 val; - val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); + val = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); val &= ~TRANS_DDI_FUNC_ENABLE; if (INTEL_GEN(dev_priv) >= 12) { - if (!intel_dp_mst_is_master_trans(crtc_state)) - val &= ~TGL_TRANS_DDI_PORT_MASK; + if (!intel_dp_mst_is_master_trans(crtc_state)) { + val &= ~(TGL_TRANS_DDI_PORT_MASK | + TRANS_DDI_MODE_SELECT_MASK); + } } else { - val &= ~TRANS_DDI_PORT_MASK; + val &= ~(TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK); } - I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), val); + intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), val); if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { @@ -1983,20 +1624,21 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, wakeref = intel_display_power_get_if_enabled(dev_priv, intel_encoder->power_domain); - if (WARN_ON(!wakeref)) + if (drm_WARN_ON(dev, !wakeref)) return -ENXIO; - if (WARN_ON(!intel_encoder->get_hw_state(intel_encoder, &pipe))) { + if (drm_WARN_ON(dev, + !intel_encoder->get_hw_state(intel_encoder, &pipe))) { ret = -EIO; goto out; } - tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe)); + tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe)); if (enable) tmp |= TRANS_DDI_HDCP_SIGNALLING; else tmp &= ~TRANS_DDI_HDCP_SIGNALLING; - I915_WRITE(TRANS_DDI_FUNC_CTL(pipe), tmp); + intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), tmp); out: intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref); return ret; @@ -2006,7 +1648,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) { struct drm_device *dev = intel_connector->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_encoder *encoder = intel_connector->encoder; + struct intel_encoder *encoder = intel_attached_encoder(intel_connector); int type = intel_connector->base.connector_type; enum port port = encoder->port; enum transcoder cpu_transcoder; @@ -2030,7 +1672,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) else cpu_transcoder = (enum transcoder) pipe; - tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); + tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); switch (tmp & TRANS_DDI_MODE_SELECT_MASK) { case TRANS_DDI_MODE_SELECT_HDMI: @@ -2083,12 +1725,13 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, if (!wakeref) return; - tmp = I915_READ(DDI_BUF_CTL(port)); + tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port)); if (!(tmp & DDI_BUF_CTL_ENABLE)) goto out; if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) { - tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); + tmp = intel_de_read(dev_priv, + TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { default: @@ -2128,7 +1771,8 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, ddi_select = TRANS_DDI_SELECT_PORT(port); } - tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); + tmp = intel_de_read(dev_priv, + TRANS_DDI_FUNC_CTL(cpu_transcoder)); intel_display_power_put(dev_priv, POWER_DOMAIN_TRANSCODER(cpu_transcoder), trans_wakeref); @@ -2162,7 +1806,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, out: if (*pipe_mask && IS_GEN9_LP(dev_priv)) { - tmp = I915_READ(BXT_PHY_CTL(port)); + tmp = intel_de_read(dev_priv, BXT_PHY_CTL(port)); if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK | BXT_PHY_LANE_POWERDOWN_ACK | BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED) @@ -2221,11 +1865,16 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder, * happen since fake-MST encoders don't set their get_power_domains() * hook. */ - if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))) + if (drm_WARN_ON(&dev_priv->drm, + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))) return; dig_port = enc_to_dig_port(encoder); - intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); + + if (!intel_phy_is_tc(dev_priv, phy) || + dig_port->tc_mode != TC_PORT_TBT_ALT) + intel_display_power_get(dev_priv, + dig_port->ddi_io_power_domain); /* * AUX power is only needed for (e)DP mode, and for HDMI mode on TC @@ -2254,11 +1903,13 @@ void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state) if (cpu_transcoder != TRANSCODER_EDP) { if (INTEL_GEN(dev_priv) >= 12) - I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), - TGL_TRANS_CLK_SEL_PORT(port)); + intel_de_write(dev_priv, + TRANS_CLK_SEL(cpu_transcoder), + TGL_TRANS_CLK_SEL_PORT(port)); else - I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), - TRANS_CLK_SEL_PORT(port)); + intel_de_write(dev_priv, + TRANS_CLK_SEL(cpu_transcoder), + TRANS_CLK_SEL_PORT(port)); } } @@ -2269,11 +1920,13 @@ void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state) if (cpu_transcoder != TRANSCODER_EDP) { if (INTEL_GEN(dev_priv) >= 12) - I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), - TGL_TRANS_CLK_SEL_DISABLED); + intel_de_write(dev_priv, + TRANS_CLK_SEL(cpu_transcoder), + TGL_TRANS_CLK_SEL_DISABLED); else - I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), - TRANS_CLK_SEL_DISABLED); + intel_de_write(dev_priv, + TRANS_CLK_SEL(cpu_transcoder), + TRANS_CLK_SEL_DISABLED); } } @@ -2282,13 +1935,13 @@ static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv, { u32 tmp; - tmp = I915_READ(DISPIO_CR_TX_BMU_CR0); + tmp = intel_de_read(dev_priv, DISPIO_CR_TX_BMU_CR0); tmp &= ~(BALANCE_LEG_MASK(port) | BALANCE_LEG_DISABLE(port)); if (iboost) tmp |= iboost << BALANCE_LEG_SHIFT(port); else tmp |= BALANCE_LEG_DISABLE(port); - I915_WRITE(DISPIO_CR_TX_BMU_CR0, tmp); + intel_de_write(dev_priv, DISPIO_CR_TX_BMU_CR0, tmp); } static void skl_ddi_set_iboost(struct intel_encoder *encoder, @@ -2300,9 +1953,9 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, u8 iboost; if (type == INTEL_OUTPUT_HDMI) - iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level; + iboost = intel_bios_hdmi_boost_level(encoder); else - iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level; + iboost = intel_bios_dp_boost_level(encoder); if (iboost == 0) { const struct ddi_buf_trans *ddi_translations; @@ -2315,9 +1968,9 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, else ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries); - if (WARN_ON_ONCE(!ddi_translations)) + if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) return; - if (WARN_ON_ONCE(level >= n_entries)) + if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries)) level = n_entries - 1; iboost = ddi_translations[level].i_boost; @@ -2350,9 +2003,9 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder, else ddi_translations = bxt_get_buf_trans_dp(dev_priv, &n_entries); - if (WARN_ON_ONCE(!ddi_translations)) + if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) return; - if (WARN_ON_ONCE(level >= n_entries)) + if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries)) level = n_entries - 1; bxt_ddi_phy_set_signal_level(dev_priv, port, @@ -2372,12 +2025,15 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) if (INTEL_GEN(dev_priv) >= 12) { if (intel_phy_is_combo(dev_priv, phy)) - icl_get_combo_buf_trans(dev_priv, encoder->type, + tgl_get_combo_buf_trans(dev_priv, encoder->type, intel_dp->link_rate, &n_entries); else n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans); } else if (INTEL_GEN(dev_priv) == 11) { - if (intel_phy_is_combo(dev_priv, phy)) + if (IS_ELKHARTLAKE(dev_priv)) + ehl_get_combo_buf_trans(dev_priv, encoder->type, + intel_dp->link_rate, &n_entries); + else if (intel_phy_is_combo(dev_priv, phy)) icl_get_combo_buf_trans(dev_priv, encoder->type, intel_dp->link_rate, &n_entries); else @@ -2399,9 +2055,10 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries); } - if (WARN_ON(n_entries < 1)) + if (drm_WARN_ON(&dev_priv->drm, n_entries < 1)) n_entries = 1; - if (WARN_ON(n_entries > ARRAY_SIZE(index_to_dp_signal_levels))) + if (drm_WARN_ON(&dev_priv->drm, + n_entries > ARRAY_SIZE(index_to_dp_signal_levels))) n_entries = ARRAY_SIZE(index_to_dp_signal_levels); return index_to_dp_signal_levels[n_entries - 1] & @@ -2444,52 +2101,52 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder, else ddi_translations = cnl_get_buf_trans_dp(dev_priv, &n_entries); - if (WARN_ON_ONCE(!ddi_translations)) + if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) return; - if (WARN_ON_ONCE(level >= n_entries)) + if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries)) level = n_entries - 1; /* Set PORT_TX_DW5 Scaling Mode Sel to 010b. */ - val = I915_READ(CNL_PORT_TX_DW5_LN0(port)); + val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port)); val &= ~SCALING_MODE_SEL_MASK; val |= SCALING_MODE_SEL(2); - I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val); + intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val); /* Program PORT_TX_DW2 */ - val = I915_READ(CNL_PORT_TX_DW2_LN0(port)); + val = intel_de_read(dev_priv, CNL_PORT_TX_DW2_LN0(port)); val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | RCOMP_SCALAR_MASK); val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel); val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel); /* Rcomp scalar is fixed as 0x98 for every table entry */ val |= RCOMP_SCALAR(0x98); - I915_WRITE(CNL_PORT_TX_DW2_GRP(port), val); + intel_de_write(dev_priv, CNL_PORT_TX_DW2_GRP(port), val); /* Program PORT_TX_DW4 */ /* We cannot write to GRP. It would overrite individual loadgen */ for (ln = 0; ln < 4; ln++) { - val = I915_READ(CNL_PORT_TX_DW4_LN(ln, port)); + val = intel_de_read(dev_priv, CNL_PORT_TX_DW4_LN(ln, port)); val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK); val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1); val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2); val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff); - I915_WRITE(CNL_PORT_TX_DW4_LN(ln, port), val); + intel_de_write(dev_priv, CNL_PORT_TX_DW4_LN(ln, port), val); } /* Program PORT_TX_DW5 */ /* All DW5 values are fixed for every table entry */ - val = I915_READ(CNL_PORT_TX_DW5_LN0(port)); + val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port)); val &= ~RTERM_SELECT_MASK; val |= RTERM_SELECT(6); val |= TAP3_DISABLE; - I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val); + intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val); /* Program PORT_TX_DW7 */ - val = I915_READ(CNL_PORT_TX_DW7_LN0(port)); + val = intel_de_read(dev_priv, CNL_PORT_TX_DW7_LN0(port)); val &= ~N_SCALAR_MASK; val |= N_SCALAR(ddi_translations[level].dw7_n_scalar); - I915_WRITE(CNL_PORT_TX_DW7_GRP(port), val); + intel_de_write(dev_priv, CNL_PORT_TX_DW7_GRP(port), val); } static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, @@ -2515,12 +2172,12 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, * set PORT_PCS_DW1 cmnkeeper_enable to 1b, * else clear to 0b. */ - val = I915_READ(CNL_PORT_PCS_DW1_LN0(port)); + val = intel_de_read(dev_priv, CNL_PORT_PCS_DW1_LN0(port)); if (type != INTEL_OUTPUT_HDMI) val |= COMMON_KEEPER_EN; else val &= ~COMMON_KEEPER_EN; - I915_WRITE(CNL_PORT_PCS_DW1_GRP(port), val); + intel_de_write(dev_priv, CNL_PORT_PCS_DW1_GRP(port), val); /* 2. Program loadgen select */ /* @@ -2530,33 +2187,33 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, * > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0) */ for (ln = 0; ln <= 3; ln++) { - val = I915_READ(CNL_PORT_TX_DW4_LN(ln, port)); + val = intel_de_read(dev_priv, CNL_PORT_TX_DW4_LN(ln, port)); val &= ~LOADGEN_SELECT; if ((rate <= 600000 && width == 4 && ln >= 1) || (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) { val |= LOADGEN_SELECT; } - I915_WRITE(CNL_PORT_TX_DW4_LN(ln, port), val); + intel_de_write(dev_priv, CNL_PORT_TX_DW4_LN(ln, port), val); } /* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */ - val = I915_READ(CNL_PORT_CL1CM_DW5); + val = intel_de_read(dev_priv, CNL_PORT_CL1CM_DW5); val |= SUS_CLOCK_CONFIG; - I915_WRITE(CNL_PORT_CL1CM_DW5, val); + intel_de_write(dev_priv, CNL_PORT_CL1CM_DW5, val); /* 4. Clear training enable to change swing values */ - val = I915_READ(CNL_PORT_TX_DW5_LN0(port)); + val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port)); val &= ~TX_TRAINING_EN; - I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val); + intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val); /* 5. Program swing and de-emphasis */ cnl_ddi_vswing_program(encoder, level, type); /* 6. Set training enable to trigger update */ - val = I915_READ(CNL_PORT_TX_DW5_LN0(port)); + val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port)); val |= TX_TRAINING_EN; - I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val); + intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val); } static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, @@ -2567,8 +2224,15 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, u32 n_entries, val; int ln; - ddi_translations = icl_get_combo_buf_trans(dev_priv, type, rate, - &n_entries); + if (INTEL_GEN(dev_priv) >= 12) + ddi_translations = tgl_get_combo_buf_trans(dev_priv, type, rate, + &n_entries); + else if (IS_ELKHARTLAKE(dev_priv)) + ddi_translations = ehl_get_combo_buf_trans(dev_priv, type, rate, + &n_entries); + else + ddi_translations = icl_get_combo_buf_trans(dev_priv, type, rate, + &n_entries); if (!ddi_translations) return; @@ -2578,41 +2242,41 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, } /* Set PORT_TX_DW5 */ - val = I915_READ(ICL_PORT_TX_DW5_LN0(phy)); + val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy)); val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK | TAP2_DISABLE | TAP3_DISABLE); val |= SCALING_MODE_SEL(0x2); val |= RTERM_SELECT(0x6); val |= TAP3_DISABLE; - I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val); + intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val); /* Program PORT_TX_DW2 */ - val = I915_READ(ICL_PORT_TX_DW2_LN0(phy)); + val = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy)); val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | RCOMP_SCALAR_MASK); val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel); val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel); /* Program Rcomp scalar for every table entry */ val |= RCOMP_SCALAR(0x98); - I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), val); + intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), val); /* Program PORT_TX_DW4 */ /* We cannot write to GRP. It would overwrite individual loadgen. */ for (ln = 0; ln <= 3; ln++) { - val = I915_READ(ICL_PORT_TX_DW4_LN(ln, phy)); + val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy)); val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK); val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1); val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2); val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff); - I915_WRITE(ICL_PORT_TX_DW4_LN(ln, phy), val); + intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val); } /* Program PORT_TX_DW7 */ - val = I915_READ(ICL_PORT_TX_DW7_LN0(phy)); + val = intel_de_read(dev_priv, ICL_PORT_TX_DW7_LN0(phy)); val &= ~N_SCALAR_MASK; val |= N_SCALAR(ddi_translations[level].dw7_n_scalar); - I915_WRITE(ICL_PORT_TX_DW7_GRP(phy), val); + intel_de_write(dev_priv, ICL_PORT_TX_DW7_GRP(phy), val); } static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, @@ -2641,12 +2305,12 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, * set PORT_PCS_DW1 cmnkeeper_enable to 1b, * else clear to 0b. */ - val = I915_READ(ICL_PORT_PCS_DW1_LN0(phy)); + val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy)); if (type == INTEL_OUTPUT_HDMI) val &= ~COMMON_KEEPER_EN; else val |= COMMON_KEEPER_EN; - I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), val); + intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val); /* 2. Program loadgen select */ /* @@ -2656,33 +2320,33 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, * > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0) */ for (ln = 0; ln <= 3; ln++) { - val = I915_READ(ICL_PORT_TX_DW4_LN(ln, phy)); + val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy)); val &= ~LOADGEN_SELECT; if ((rate <= 600000 && width == 4 && ln >= 1) || (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) { val |= LOADGEN_SELECT; } - I915_WRITE(ICL_PORT_TX_DW4_LN(ln, phy), val); + intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val); } /* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */ - val = I915_READ(ICL_PORT_CL_DW5(phy)); + val = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy)); val |= SUS_CLOCK_CONFIG; - I915_WRITE(ICL_PORT_CL_DW5(phy), val); + intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), val); /* 4. Clear training enable to change swing values */ - val = I915_READ(ICL_PORT_TX_DW5_LN0(phy)); + val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy)); val &= ~TX_TRAINING_EN; - I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val); + intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val); /* 5. Program swing and de-emphasis */ icl_ddi_combo_vswing_program(dev_priv, level, phy, type, rate); /* 6. Set training enable to trigger update */ - val = I915_READ(ICL_PORT_TX_DW5_LN0(phy)); + val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy)); val |= TX_TRAINING_EN; - I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val); + intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val); } static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, @@ -2706,33 +2370,33 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, /* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */ for (ln = 0; ln < 2; ln++) { - val = I915_READ(MG_TX1_LINK_PARAMS(ln, tc_port)); + val = intel_de_read(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port)); val &= ~CRI_USE_FS32; - I915_WRITE(MG_TX1_LINK_PARAMS(ln, tc_port), val); + intel_de_write(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port), val); - val = I915_READ(MG_TX2_LINK_PARAMS(ln, tc_port)); + val = intel_de_read(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port)); val &= ~CRI_USE_FS32; - I915_WRITE(MG_TX2_LINK_PARAMS(ln, tc_port), val); + intel_de_write(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port), val); } /* Program MG_TX_SWINGCTRL with values from vswing table */ for (ln = 0; ln < 2; ln++) { - val = I915_READ(MG_TX1_SWINGCTRL(ln, tc_port)); + val = intel_de_read(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port)); val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK; val |= CRI_TXDEEMPH_OVERRIDE_17_12( ddi_translations[level].cri_txdeemph_override_17_12); - I915_WRITE(MG_TX1_SWINGCTRL(ln, tc_port), val); + intel_de_write(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port), val); - val = I915_READ(MG_TX2_SWINGCTRL(ln, tc_port)); + val = intel_de_read(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port)); val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK; val |= CRI_TXDEEMPH_OVERRIDE_17_12( ddi_translations[level].cri_txdeemph_override_17_12); - I915_WRITE(MG_TX2_SWINGCTRL(ln, tc_port), val); + intel_de_write(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port), val); } /* Program MG_TX_DRVCTRL with values from vswing table */ for (ln = 0; ln < 2; ln++) { - val = I915_READ(MG_TX1_DRVCTRL(ln, tc_port)); + val = intel_de_read(dev_priv, MG_TX1_DRVCTRL(ln, tc_port)); val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK | CRI_TXDEEMPH_OVERRIDE_5_0_MASK); val |= CRI_TXDEEMPH_OVERRIDE_5_0( @@ -2740,9 +2404,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, CRI_TXDEEMPH_OVERRIDE_11_6( ddi_translations[level].cri_txdeemph_override_11_6) | CRI_TXDEEMPH_OVERRIDE_EN; - I915_WRITE(MG_TX1_DRVCTRL(ln, tc_port), val); + intel_de_write(dev_priv, MG_TX1_DRVCTRL(ln, tc_port), val); - val = I915_READ(MG_TX2_DRVCTRL(ln, tc_port)); + val = intel_de_read(dev_priv, MG_TX2_DRVCTRL(ln, tc_port)); val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK | CRI_TXDEEMPH_OVERRIDE_5_0_MASK); val |= CRI_TXDEEMPH_OVERRIDE_5_0( @@ -2750,7 +2414,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, CRI_TXDEEMPH_OVERRIDE_11_6( ddi_translations[level].cri_txdeemph_override_11_6) | CRI_TXDEEMPH_OVERRIDE_EN; - I915_WRITE(MG_TX2_DRVCTRL(ln, tc_port), val); + intel_de_write(dev_priv, MG_TX2_DRVCTRL(ln, tc_port), val); /* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */ } @@ -2761,17 +2425,17 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, * values from table for which TX1 and TX2 enabled. */ for (ln = 0; ln < 2; ln++) { - val = I915_READ(MG_CLKHUB(ln, tc_port)); + val = intel_de_read(dev_priv, MG_CLKHUB(ln, tc_port)); if (link_clock < 300000) val |= CFG_LOW_RATE_LKREN_EN; else val &= ~CFG_LOW_RATE_LKREN_EN; - I915_WRITE(MG_CLKHUB(ln, tc_port), val); + intel_de_write(dev_priv, MG_CLKHUB(ln, tc_port), val); } /* Program the MG_TX_DCC<LN, port being used> based on the link frequency */ for (ln = 0; ln < 2; ln++) { - val = I915_READ(MG_TX1_DCC(ln, tc_port)); + val = intel_de_read(dev_priv, MG_TX1_DCC(ln, tc_port)); val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK; if (link_clock <= 500000) { val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN; @@ -2779,9 +2443,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, val |= CFG_AMI_CK_DIV_OVERRIDE_EN | CFG_AMI_CK_DIV_OVERRIDE_VAL(1); } - I915_WRITE(MG_TX1_DCC(ln, tc_port), val); + intel_de_write(dev_priv, MG_TX1_DCC(ln, tc_port), val); - val = I915_READ(MG_TX2_DCC(ln, tc_port)); + val = intel_de_read(dev_priv, MG_TX2_DCC(ln, tc_port)); val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK; if (link_clock <= 500000) { val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN; @@ -2789,18 +2453,22 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, val |= CFG_AMI_CK_DIV_OVERRIDE_EN | CFG_AMI_CK_DIV_OVERRIDE_VAL(1); } - I915_WRITE(MG_TX2_DCC(ln, tc_port), val); + intel_de_write(dev_priv, MG_TX2_DCC(ln, tc_port), val); } /* Program MG_TX_PISO_READLOAD with values from vswing table */ for (ln = 0; ln < 2; ln++) { - val = I915_READ(MG_TX1_PISO_READLOAD(ln, tc_port)); + val = intel_de_read(dev_priv, + MG_TX1_PISO_READLOAD(ln, tc_port)); val |= CRI_CALCINIT; - I915_WRITE(MG_TX1_PISO_READLOAD(ln, tc_port), val); + intel_de_write(dev_priv, MG_TX1_PISO_READLOAD(ln, tc_port), + val); - val = I915_READ(MG_TX2_PISO_READLOAD(ln, tc_port)); + val = intel_de_read(dev_priv, + MG_TX2_PISO_READLOAD(ln, tc_port)); val |= CRI_CALCINIT; - I915_WRITE(MG_TX2_PISO_READLOAD(ln, tc_port), val); + intel_de_write(dev_priv, MG_TX2_PISO_READLOAD(ln, tc_port), + val); } } @@ -2846,24 +2514,25 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock, dpcnt_val |= DKL_TX_PRESHOOT_COEFF(ddi_translations[level].dkl_preshoot_control); for (ln = 0; ln < 2; ln++) { - I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln)); + intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), + HIP_INDEX_VAL(tc_port, ln)); - I915_WRITE(DKL_TX_PMD_LANE_SUS(tc_port), 0); + intel_de_write(dev_priv, DKL_TX_PMD_LANE_SUS(tc_port), 0); /* All the registers are RMW */ - val = I915_READ(DKL_TX_DPCNTL0(tc_port)); + val = intel_de_read(dev_priv, DKL_TX_DPCNTL0(tc_port)); val &= ~dpcnt_mask; val |= dpcnt_val; - I915_WRITE(DKL_TX_DPCNTL0(tc_port), val); + intel_de_write(dev_priv, DKL_TX_DPCNTL0(tc_port), val); - val = I915_READ(DKL_TX_DPCNTL1(tc_port)); + val = intel_de_read(dev_priv, DKL_TX_DPCNTL1(tc_port)); val &= ~dpcnt_mask; val |= dpcnt_val; - I915_WRITE(DKL_TX_DPCNTL1(tc_port), val); + intel_de_write(dev_priv, DKL_TX_DPCNTL1(tc_port), val); - val = I915_READ(DKL_TX_DPCNTL2(tc_port)); + val = intel_de_read(dev_priv, DKL_TX_DPCNTL2(tc_port)); val &= ~DKL_TX_DP20BITMODE; - I915_WRITE(DKL_TX_DPCNTL2(tc_port), val); + intel_de_write(dev_priv, DKL_TX_DPCNTL2(tc_port), val); } } @@ -2963,10 +2632,11 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder, enum phy phy = intel_port_to_phy(dev_priv, encoder->port); u32 val; - mutex_lock(&dev_priv->dpll_lock); + mutex_lock(&dev_priv->dpll.lock); - val = I915_READ(ICL_DPCLKA_CFGCR0); - WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, phy)) == 0); + val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); + drm_WARN_ON(&dev_priv->drm, + (val & icl_dpclka_cfgcr0_clk_off(dev_priv, phy)) == 0); if (intel_phy_is_combo(dev_priv, phy)) { /* @@ -2981,14 +2651,14 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder, */ val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy); - I915_WRITE(ICL_DPCLKA_CFGCR0, val); - POSTING_READ(ICL_DPCLKA_CFGCR0); + intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); + intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0); } val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, phy); - I915_WRITE(ICL_DPCLKA_CFGCR0, val); + intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); - mutex_unlock(&dev_priv->dpll_lock); + mutex_unlock(&dev_priv->dpll.lock); } static void icl_unmap_plls_to_ports(struct intel_encoder *encoder) @@ -2997,13 +2667,13 @@ static void icl_unmap_plls_to_ports(struct intel_encoder *encoder) enum phy phy = intel_port_to_phy(dev_priv, encoder->port); u32 val; - mutex_lock(&dev_priv->dpll_lock); + mutex_lock(&dev_priv->dpll.lock); - val = I915_READ(ICL_DPCLKA_CFGCR0); + val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy); - I915_WRITE(ICL_DPCLKA_CFGCR0, val); + intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); - mutex_unlock(&dev_priv->dpll_lock); + mutex_unlock(&dev_priv->dpll.lock); } static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv, @@ -3012,7 +2682,7 @@ static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv, enum port port; u32 val; - val = I915_READ(ICL_DPCLKA_CFGCR0); + val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0); for_each_port_masked(port, port_mask) { enum phy phy = intel_port_to_phy(dev_priv, port); bool ddi_clk_off = val & icl_dpclka_cfgcr0_clk_off(dev_priv, @@ -3025,13 +2695,13 @@ static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv, * Punt on the case now where clock is gated, but it would * be needed by the port. Something else is really broken then. */ - if (WARN_ON(ddi_clk_needed)) + if (drm_WARN_ON(&dev_priv->drm, ddi_clk_needed)) continue; DRM_NOTE("PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n", phy_name(phy)); val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy); - I915_WRITE(ICL_DPCLKA_CFGCR0, val); + intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); } } @@ -3057,7 +2727,7 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) * In the unlikely case that BIOS enables DP in MST mode, just * warn since our MST HW readout is incomplete. */ - if (WARN_ON(is_mst)) + if (drm_WARN_ON(&dev_priv->drm, is_mst)) return; } @@ -3076,7 +2746,8 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) if (other_encoder == encoder) continue; - if (WARN_ON(port_mask & BIT(other_encoder->port))) + if (drm_WARN_ON(&dev_priv->drm, + port_mask & BIT(other_encoder->port))) return; } /* @@ -3098,52 +2769,54 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder, u32 val; const struct intel_shared_dpll *pll = crtc_state->shared_dpll; - if (WARN_ON(!pll)) + if (drm_WARN_ON(&dev_priv->drm, !pll)) return; - mutex_lock(&dev_priv->dpll_lock); + mutex_lock(&dev_priv->dpll.lock); if (INTEL_GEN(dev_priv) >= 11) { if (!intel_phy_is_combo(dev_priv, phy)) - I915_WRITE(DDI_CLK_SEL(port), - icl_pll_to_ddi_clk_sel(encoder, crtc_state)); + intel_de_write(dev_priv, DDI_CLK_SEL(port), + icl_pll_to_ddi_clk_sel(encoder, crtc_state)); else if (IS_ELKHARTLAKE(dev_priv) && port >= PORT_C) /* * MG does not exist but the programming is required * to ungate DDIC and DDID */ - I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_MG); + intel_de_write(dev_priv, DDI_CLK_SEL(port), + DDI_CLK_SEL_MG); } else if (IS_CANNONLAKE(dev_priv)) { /* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */ - val = I915_READ(DPCLKA_CFGCR0); + val = intel_de_read(dev_priv, DPCLKA_CFGCR0); val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port); - I915_WRITE(DPCLKA_CFGCR0, val); + intel_de_write(dev_priv, DPCLKA_CFGCR0, val); /* * Configure DPCLKA_CFGCR0 to turn on the clock for the DDI. * This step and the step before must be done with separate * register writes. */ - val = I915_READ(DPCLKA_CFGCR0); + val = intel_de_read(dev_priv, DPCLKA_CFGCR0); val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port); - I915_WRITE(DPCLKA_CFGCR0, val); + intel_de_write(dev_priv, DPCLKA_CFGCR0, val); } else if (IS_GEN9_BC(dev_priv)) { /* DDI -> PLL mapping */ - val = I915_READ(DPLL_CTRL2); + val = intel_de_read(dev_priv, DPLL_CTRL2); val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) | DPLL_CTRL2_DDI_CLK_SEL_MASK(port)); val |= (DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) | DPLL_CTRL2_DDI_SEL_OVERRIDE(port)); - I915_WRITE(DPLL_CTRL2, val); + intel_de_write(dev_priv, DPLL_CTRL2, val); } else if (INTEL_GEN(dev_priv) < 9) { - I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll)); + intel_de_write(dev_priv, PORT_CLK_SEL(port), + hsw_pll_to_ddi_pll_sel(pll)); } - mutex_unlock(&dev_priv->dpll_lock); + mutex_unlock(&dev_priv->dpll.lock); } static void intel_ddi_clk_disable(struct intel_encoder *encoder) @@ -3155,15 +2828,17 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder) if (INTEL_GEN(dev_priv) >= 11) { if (!intel_phy_is_combo(dev_priv, phy) || (IS_ELKHARTLAKE(dev_priv) && port >= PORT_C)) - I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_NONE); + intel_de_write(dev_priv, DDI_CLK_SEL(port), + DDI_CLK_SEL_NONE); } else if (IS_CANNONLAKE(dev_priv)) { - I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) | - DPCLKA_CFGCR0_DDI_CLK_OFF(port)); + intel_de_write(dev_priv, DPCLKA_CFGCR0, + intel_de_read(dev_priv, DPCLKA_CFGCR0) | DPCLKA_CFGCR0_DDI_CLK_OFF(port)); } else if (IS_GEN9_BC(dev_priv)) { - I915_WRITE(DPLL_CTRL2, I915_READ(DPLL_CTRL2) | - DPLL_CTRL2_DDI_CLK_OFF(port)); + intel_de_write(dev_priv, DPLL_CTRL2, + intel_de_read(dev_priv, DPLL_CTRL2) | DPLL_CTRL2_DDI_CLK_OFF(port)); } else if (INTEL_GEN(dev_priv) < 9) { - I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE); + intel_de_write(dev_priv, PORT_CLK_SEL(port), + PORT_CLK_SEL_NONE); } } @@ -3180,13 +2855,15 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port, return; if (INTEL_GEN(dev_priv) >= 12) { - I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x0)); - ln0 = I915_READ(DKL_DP_MODE(tc_port)); - I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x1)); - ln1 = I915_READ(DKL_DP_MODE(tc_port)); + intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), + HIP_INDEX_VAL(tc_port, 0x0)); + ln0 = intel_de_read(dev_priv, DKL_DP_MODE(tc_port)); + intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), + HIP_INDEX_VAL(tc_port, 0x1)); + ln1 = intel_de_read(dev_priv, DKL_DP_MODE(tc_port)); } else { - ln0 = I915_READ(MG_DP_MODE(0, tc_port)); - ln1 = I915_READ(MG_DP_MODE(1, tc_port)); + ln0 = intel_de_read(dev_priv, MG_DP_MODE(0, tc_port)); + ln1 = intel_de_read(dev_priv, MG_DP_MODE(1, tc_port)); } ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X1_MODE); @@ -3198,7 +2875,8 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port, switch (pin_assignment) { case 0x0: - WARN_ON(intel_dig_port->tc_mode != TC_PORT_LEGACY); + drm_WARN_ON(&dev_priv->drm, + intel_dig_port->tc_mode != TC_PORT_LEGACY); if (width == 1) { ln1 |= MG_DP_MODE_CFG_DP_X1_MODE; } else { @@ -3243,13 +2921,15 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port, } if (INTEL_GEN(dev_priv) >= 12) { - I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x0)); - I915_WRITE(DKL_DP_MODE(tc_port), ln0); - I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x1)); - I915_WRITE(DKL_DP_MODE(tc_port), ln1); + intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), + HIP_INDEX_VAL(tc_port, 0x0)); + intel_de_write(dev_priv, DKL_DP_MODE(tc_port), ln0); + intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), + HIP_INDEX_VAL(tc_port, 0x1)); + intel_de_write(dev_priv, DKL_DP_MODE(tc_port), ln1); } else { - I915_WRITE(MG_DP_MODE(0, tc_port), ln0); - I915_WRITE(MG_DP_MODE(1, tc_port), ln1); + intel_de_write(dev_priv, MG_DP_MODE(0, tc_port), ln0); + intel_de_write(dev_priv, MG_DP_MODE(1, tc_port), ln1); } } @@ -3274,9 +2954,9 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder, return; intel_dp = enc_to_intel_dp(encoder); - val = I915_READ(intel_dp->regs.dp_tp_ctl); + val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl); val |= DP_TP_CTL_FEC_ENABLE; - I915_WRITE(intel_dp->regs.dp_tp_ctl, val); + intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val); if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status, DP_TP_STATUS_FEC_ENABLE_LIVE, 1)) @@ -3294,90 +2974,10 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, return; intel_dp = enc_to_intel_dp(encoder); - val = I915_READ(intel_dp->regs.dp_tp_ctl); + val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl); val &= ~DP_TP_CTL_FEC_ENABLE; - I915_WRITE(intel_dp->regs.dp_tp_ctl, val); - POSTING_READ(intel_dp->regs.dp_tp_ctl); -} - -static void -tgl_clear_psr2_transcoder_exitline(const struct intel_crtc_state *cstate) -{ - struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev); - u32 val; - - if (!cstate->dc3co_exitline) - return; - - val = I915_READ(EXITLINE(cstate->cpu_transcoder)); - val &= ~(EXITLINE_MASK | EXITLINE_ENABLE); - I915_WRITE(EXITLINE(cstate->cpu_transcoder), val); -} - -static void -tgl_set_psr2_transcoder_exitline(const struct intel_crtc_state *cstate) -{ - u32 val, exit_scanlines; - struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev); - - if (!cstate->dc3co_exitline) - return; - - exit_scanlines = cstate->dc3co_exitline; - exit_scanlines <<= EXITLINE_SHIFT; - val = I915_READ(EXITLINE(cstate->cpu_transcoder)); - val &= ~(EXITLINE_MASK | EXITLINE_ENABLE); - val |= exit_scanlines; - val |= EXITLINE_ENABLE; - I915_WRITE(EXITLINE(cstate->cpu_transcoder), val); -} - -static void tgl_dc3co_exitline_compute_config(struct intel_encoder *encoder, - struct intel_crtc_state *cstate) -{ - u32 exit_scanlines; - struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev); - u32 crtc_vdisplay = cstate->hw.adjusted_mode.crtc_vdisplay; - - cstate->dc3co_exitline = 0; - - if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC3CO)) - return; - - /* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/ - if (to_intel_crtc(cstate->uapi.crtc)->pipe != PIPE_A || - encoder->port != PORT_A) - return; - - if (!cstate->has_psr2 || !cstate->hw.active) - return; - - /* - * DC3CO Exit time 200us B.Spec 49196 - * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1 - */ - exit_scanlines = - intel_usecs_to_scanlines(&cstate->hw.adjusted_mode, 200) + 1; - - if (WARN_ON(exit_scanlines > crtc_vdisplay)) - return; - - cstate->dc3co_exitline = crtc_vdisplay - exit_scanlines; - DRM_DEBUG_KMS("DC3CO exit scanlines %d\n", cstate->dc3co_exitline); -} - -static void tgl_dc3co_exitline_get_config(struct intel_crtc_state *crtc_state) -{ - u32 val; - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - - if (INTEL_GEN(dev_priv) < 12) - return; - - val = I915_READ(EXITLINE(crtc_state->cpu_transcoder)); - - if (val & EXITLINE_ENABLE) - crtc_state->dc3co_exitline = val & EXITLINE_MASK; + intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val); + intel_de_posting_read(dev_priv, intel_dp->regs.dp_tp_ctl); } static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder, @@ -3392,7 +2992,6 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder, int level = intel_ddi_dp_level(intel_dp); enum transcoder transcoder = crtc_state->cpu_transcoder; - tgl_set_psr2_transcoder_exitline(crtc_state); intel_dp_set_link_params(intel_dp, crtc_state->port_clock, crtc_state->lane_count, is_mst); @@ -3534,16 +3133,14 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder, int level = intel_ddi_dp_level(intel_dp); if (INTEL_GEN(dev_priv) < 11) - WARN_ON(is_mst && (port == PORT_A || port == PORT_E)); + drm_WARN_ON(&dev_priv->drm, + is_mst && (port == PORT_A || port == PORT_E)); else - WARN_ON(is_mst && port == PORT_A); + drm_WARN_ON(&dev_priv->drm, is_mst && port == PORT_A); intel_dp_set_link_params(intel_dp, crtc_state->port_clock, crtc_state->lane_count, is_mst); - intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port); - intel_dp->regs.dp_tp_status = DP_TP_STATUS(port); - intel_edp_panel_on(intel_dp); intel_ddi_clk_select(encoder, crtc_state); @@ -3607,8 +3204,11 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, /* MST will call a setting of MSA after an allocating of Virtual Channel * from MST encoder pre_enable callback. */ - if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) + if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) { intel_ddi_set_dp_msa(crtc_state, conn_state); + + intel_dp_set_m_n(crtc_state, M1_N1); + } } static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, @@ -3618,8 +3218,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum port port = encoder->port; - int level = intel_ddi_hdmi_level(dev_priv, port); + int level = intel_ddi_hdmi_level(encoder); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); @@ -3673,7 +3272,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder, * the DP link parameteres */ - WARN_ON(crtc_state->has_pch_encoder); + drm_WARN_ON(&dev_priv->drm, crtc_state->has_pch_encoder); if (INTEL_GEN(dev_priv) >= 11) icl_map_plls_to_ports(encoder, crtc_state); @@ -3706,20 +3305,20 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder, bool wait = false; u32 val; - val = I915_READ(DDI_BUF_CTL(port)); + val = intel_de_read(dev_priv, DDI_BUF_CTL(port)); if (val & DDI_BUF_CTL_ENABLE) { val &= ~DDI_BUF_CTL_ENABLE; - I915_WRITE(DDI_BUF_CTL(port), val); + intel_de_write(dev_priv, DDI_BUF_CTL(port), val); wait = true; } if (intel_crtc_has_dp_encoder(crtc_state)) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - val = I915_READ(intel_dp->regs.dp_tp_ctl); + val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl); val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); val |= DP_TP_CTL_LINK_TRAIN_PAT1; - I915_WRITE(intel_dp->regs.dp_tp_ctl, val); + intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val); } /* Disable FEC in DP Sink */ @@ -3751,9 +3350,13 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; u32 val; - val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); - val &= ~TGL_TRANS_DDI_PORT_MASK; - I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), val); + val = intel_de_read(dev_priv, + TRANS_DDI_FUNC_CTL(cpu_transcoder)); + val &= ~(TGL_TRANS_DDI_PORT_MASK | + TRANS_DDI_MODE_SELECT_MASK); + intel_de_write(dev_priv, + TRANS_DDI_FUNC_CTL(cpu_transcoder), + val); } } else { if (!is_mst) @@ -3779,7 +3382,6 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, dig_port->ddi_io_power_domain); intel_ddi_clk_disable(encoder); - tgl_clear_psr2_transcoder_exitline(old_crtc_state); } static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder, @@ -3816,7 +3418,8 @@ static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_ DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n", transcoder_name(old_crtc_state->cpu_transcoder)); - I915_WRITE(TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0); + intel_de_write(dev_priv, + TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0); } static void intel_ddi_post_disable(struct intel_encoder *encoder, @@ -3890,25 +3493,25 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder, * step 13 is the correct place for it. Step 18 is where it was * originally before the BUN. */ - val = I915_READ(FDI_RX_CTL(PIPE_A)); + val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); val &= ~FDI_RX_ENABLE; - I915_WRITE(FDI_RX_CTL(PIPE_A), val); + intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); intel_disable_ddi_buf(encoder, old_crtc_state); intel_ddi_clk_disable(encoder); - val = I915_READ(FDI_RX_MISC(PIPE_A)); + val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A)); val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); - I915_WRITE(FDI_RX_MISC(PIPE_A), val); + intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), val); - val = I915_READ(FDI_RX_CTL(PIPE_A)); + val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); val &= ~FDI_PCDCLK; - I915_WRITE(FDI_RX_CTL(PIPE_A), val); + intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); - val = I915_READ(FDI_RX_CTL(PIPE_A)); + val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); val &= ~FDI_RX_PLL_ENABLE; - I915_WRITE(FDI_RX_CTL(PIPE_A), val); + intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); } static void intel_enable_ddi_dp(struct intel_encoder *encoder, @@ -3944,9 +3547,9 @@ gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv, [PORT_E] = TRANSCODER_A, }; - WARN_ON(INTEL_GEN(dev_priv) < 9); + drm_WARN_ON(&dev_priv->drm, INTEL_GEN(dev_priv) < 9); - if (WARN_ON(port < PORT_A || port > PORT_E)) + if (drm_WARN_ON(&dev_priv->drm, port < PORT_A || port > PORT_E)) port = PORT_A; return CHICKEN_TRANS(trans[port]); @@ -3964,8 +3567,9 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder, if (!intel_hdmi_handle_sink_scrambling(encoder, connector, crtc_state->hdmi_high_tmds_clock_ratio, crtc_state->hdmi_scrambling)) - DRM_ERROR("[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n", - connector->base.id, connector->name); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Failed to configure sink " + "scrambling/TMDS bit clock ratio\n", + connector->base.id, connector->name); /* Display WA #1143: skl,kbl,cfl */ if (IS_GEN9_BC(dev_priv)) { @@ -3978,7 +3582,7 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder, i915_reg_t reg = gen9_chicken_trans_reg_by_port(dev_priv, port); u32 val; - val = I915_READ(reg); + val = intel_de_read(dev_priv, reg); if (port == PORT_E) val |= DDIE_TRAINING_OVERRIDE_ENABLE | @@ -3987,8 +3591,8 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder, val |= DDI_TRAINING_OVERRIDE_ENABLE | DDI_TRAINING_OVERRIDE_VALUE; - I915_WRITE(reg, val); - POSTING_READ(reg); + intel_de_write(dev_priv, reg, val); + intel_de_posting_read(dev_priv, reg); udelay(1); @@ -3999,15 +3603,15 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder, val &= ~(DDI_TRAINING_OVERRIDE_ENABLE | DDI_TRAINING_OVERRIDE_VALUE); - I915_WRITE(reg, val); + intel_de_write(dev_priv, reg, val); } /* In HDMI/DVI mode, the port width, and swing/emphasis values * are ignored so nothing special needs to be done besides * enabling the port. */ - I915_WRITE(DDI_BUF_CTL(port), - dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE); + intel_de_write(dev_priv, DDI_BUF_CTL(port), + dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE); if (crtc_state->has_audio) intel_audio_codec_enable(encoder, crtc_state, conn_state); @@ -4017,6 +3621,12 @@ static void intel_enable_ddi(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { + WARN_ON(crtc_state->has_pch_encoder); + + intel_enable_pipe(crtc_state); + + intel_crtc_vblank_on(crtc_state); + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) intel_enable_ddi_hdmi(encoder, crtc_state, conn_state); else @@ -4096,43 +3706,11 @@ static void intel_ddi_update_pipe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct intel_connector *connector = - to_intel_connector(conn_state->connector); - struct intel_hdcp *hdcp = &connector->hdcp; - bool content_protection_type_changed = - (conn_state->hdcp_content_type != hdcp->content_type && - conn_state->content_protection != - DRM_MODE_CONTENT_PROTECTION_UNDESIRED); if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state); - /* - * During the HDCP encryption session if Type change is requested, - * disable the HDCP and reenable it with new TYPE value. - */ - if (conn_state->content_protection == - DRM_MODE_CONTENT_PROTECTION_UNDESIRED || - content_protection_type_changed) - intel_hdcp_disable(connector); - - /* - * Mark the hdcp state as DESIRED after the hdcp disable of type - * change procedure. - */ - if (content_protection_type_changed) { - mutex_lock(&hdcp->mutex); - hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; - schedule_work(&hdcp->prop_work); - mutex_unlock(&hdcp->mutex); - } - - if (conn_state->content_protection == - DRM_MODE_CONTENT_PROTECTION_DESIRED || - content_protection_type_changed) - intel_hdcp_enable(connector, - crtc_state->cpu_transcoder, - (u8)conn_state->hdcp_content_type); + intel_hdcp_update_pipe(encoder, crtc_state, conn_state); } static void @@ -4197,20 +3775,20 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) u32 dp_tp_ctl, ddi_buf_ctl; bool wait = false; - dp_tp_ctl = I915_READ(intel_dp->regs.dp_tp_ctl); + dp_tp_ctl = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl); if (dp_tp_ctl & DP_TP_CTL_ENABLE) { - ddi_buf_ctl = I915_READ(DDI_BUF_CTL(port)); + ddi_buf_ctl = intel_de_read(dev_priv, DDI_BUF_CTL(port)); if (ddi_buf_ctl & DDI_BUF_CTL_ENABLE) { - I915_WRITE(DDI_BUF_CTL(port), - ddi_buf_ctl & ~DDI_BUF_CTL_ENABLE); + intel_de_write(dev_priv, DDI_BUF_CTL(port), + ddi_buf_ctl & ~DDI_BUF_CTL_ENABLE); wait = true; } dp_tp_ctl &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); dp_tp_ctl |= DP_TP_CTL_LINK_TRAIN_PAT1; - I915_WRITE(intel_dp->regs.dp_tp_ctl, dp_tp_ctl); - POSTING_READ(intel_dp->regs.dp_tp_ctl); + intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, dp_tp_ctl); + intel_de_posting_read(dev_priv, intel_dp->regs.dp_tp_ctl); if (wait) intel_wait_ddi_buf_idle(dev_priv, port); @@ -4225,12 +3803,12 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; } - I915_WRITE(intel_dp->regs.dp_tp_ctl, dp_tp_ctl); - POSTING_READ(intel_dp->regs.dp_tp_ctl); + intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, dp_tp_ctl); + intel_de_posting_read(dev_priv, intel_dp->regs.dp_tp_ctl); intel_dp->DP |= DDI_BUF_CTL_ENABLE; - I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP); - POSTING_READ(DDI_BUF_CTL(port)); + intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP); + intel_de_posting_read(dev_priv, DDI_BUF_CTL(port)); udelay(600); } @@ -4244,14 +3822,16 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv, if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) return false; - return I915_READ(HSW_AUD_PIN_ELD_CP_VLD) & + return intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD) & AUDIO_OUTPUT_ENABLE(cpu_transcoder); } void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv, struct intel_crtc_state *crtc_state) { - if (IS_ELKHARTLAKE(dev_priv) && crtc_state->port_clock > 594000) + if (INTEL_GEN(dev_priv) >= 12 && crtc_state->port_clock > 594000) + crtc_state->min_voltage_level = 2; + else if (IS_ELKHARTLAKE(dev_priv) && crtc_state->port_clock > 594000) crtc_state->min_voltage_level = 3; else if (INTEL_GEN(dev_priv) >= 11 && crtc_state->port_clock > 594000) crtc_state->min_voltage_level = 1; @@ -4265,15 +3845,21 @@ void intel_ddi_get_config(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); u32 temp, flags = 0; /* XXX: DSI transcoder paranoia */ - if (WARN_ON(transcoder_is_dsi(cpu_transcoder))) + if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder))) return; + if (INTEL_GEN(dev_priv) >= 12) { + intel_dp->regs.dp_tp_ctl = TGL_DP_TP_CTL(cpu_transcoder); + intel_dp->regs.dp_tp_status = TGL_DP_TP_STATUS(cpu_transcoder); + } + intel_dsc_get_config(encoder, pipe_config); - temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); + temp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); if (temp & TRANS_DDI_PHSYNC) flags |= DRM_MODE_FLAG_PHSYNC; else @@ -4342,7 +3928,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder, dp_tp_ctl = TGL_DP_TP_CTL(pipe_config->cpu_transcoder); pipe_config->fec_enable = - I915_READ(dp_tp_ctl) & DP_TP_CTL_FEC_ENABLE; + intel_de_read(dev_priv, dp_tp_ctl) & DP_TP_CTL_FEC_ENABLE; DRM_DEBUG_KMS("[ENCODER:%d:%s] Fec status: %u\n", encoder->base.base.id, encoder->base.name, @@ -4365,9 +3951,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder, break; } - if (encoder->type == INTEL_OUTPUT_EDP) - tgl_dc3co_exitline_get_config(pipe_config); - pipe_config->has_audio = intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder); @@ -4449,7 +4032,6 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder, ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state); } else { ret = intel_dp_compute_config(encoder, pipe_config, conn_state); - tgl_dc3co_exitline_compute_config(encoder, pipe_config); } if (ret) @@ -4470,6 +4052,112 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder, return 0; } +static bool mode_equal(const struct drm_display_mode *mode1, + const struct drm_display_mode *mode2) +{ + return drm_mode_match(mode1, mode2, + DRM_MODE_MATCH_TIMINGS | + DRM_MODE_MATCH_FLAGS | + DRM_MODE_MATCH_3D_FLAGS) && + mode1->clock == mode2->clock; /* we want an exact match */ +} + +static bool m_n_equal(const struct intel_link_m_n *m_n_1, + const struct intel_link_m_n *m_n_2) +{ + return m_n_1->tu == m_n_2->tu && + m_n_1->gmch_m == m_n_2->gmch_m && + m_n_1->gmch_n == m_n_2->gmch_n && + m_n_1->link_m == m_n_2->link_m && + m_n_1->link_n == m_n_2->link_n; +} + +static bool crtcs_port_sync_compatible(const struct intel_crtc_state *crtc_state1, + const struct intel_crtc_state *crtc_state2) +{ + return crtc_state1->hw.active && crtc_state2->hw.active && + crtc_state1->output_types == crtc_state2->output_types && + crtc_state1->output_format == crtc_state2->output_format && + crtc_state1->lane_count == crtc_state2->lane_count && + crtc_state1->port_clock == crtc_state2->port_clock && + mode_equal(&crtc_state1->hw.adjusted_mode, + &crtc_state2->hw.adjusted_mode) && + m_n_equal(&crtc_state1->dp_m_n, &crtc_state2->dp_m_n); +} + +static u8 +intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state, + int tile_group_id) +{ + struct drm_connector *connector; + const struct drm_connector_state *conn_state; + struct drm_i915_private *dev_priv = to_i915(ref_crtc_state->uapi.crtc->dev); + struct intel_atomic_state *state = + to_intel_atomic_state(ref_crtc_state->uapi.state); + u8 transcoders = 0; + int i; + + if (INTEL_GEN(dev_priv) < 11) + return 0; + + if (!intel_crtc_has_type(ref_crtc_state, INTEL_OUTPUT_DP)) + return 0; + + for_each_new_connector_in_state(&state->base, connector, conn_state, i) { + struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc); + const struct intel_crtc_state *crtc_state; + + if (!crtc) + continue; + + if (!connector->has_tile || + connector->tile_group->id != + tile_group_id) + continue; + crtc_state = intel_atomic_get_new_crtc_state(state, + crtc); + if (!crtcs_port_sync_compatible(ref_crtc_state, + crtc_state)) + continue; + transcoders |= BIT(crtc_state->cpu_transcoder); + } + + return transcoders; +} + +static int intel_ddi_compute_config_late(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct drm_connector *connector = conn_state->connector; + u8 port_sync_transcoders = 0; + + DRM_DEBUG_KMS("[ENCODER:%d:%s] [CRTC:%d:%s]", + encoder->base.base.id, encoder->base.name, + crtc_state->uapi.crtc->base.id, crtc_state->uapi.crtc->name); + + if (connector->has_tile) + port_sync_transcoders = intel_ddi_port_sync_transcoders(crtc_state, + connector->tile_group->id); + + /* + * EDP Transcoders cannot be ensalved + * make them a master always when present + */ + if (port_sync_transcoders & BIT(TRANSCODER_EDP)) + crtc_state->master_transcoder = TRANSCODER_EDP; + else + crtc_state->master_transcoder = ffs(port_sync_transcoders) - 1; + + if (crtc_state->master_transcoder == crtc_state->cpu_transcoder) { + crtc_state->master_transcoder = INVALID_TRANSCODER; + crtc_state->sync_mode_slaves_mask = + port_sync_transcoders & ~BIT(crtc_state->cpu_transcoder); + } + + return 0; +} + static void intel_ddi_encoder_destroy(struct drm_encoder *encoder) { struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); @@ -4488,6 +4176,7 @@ static const struct drm_encoder_funcs intel_ddi_funcs = { static struct intel_connector * intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port) { + struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); struct intel_connector *connector; enum port port = intel_dig_port->base.port; @@ -4498,6 +4187,10 @@ intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port) intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); intel_dig_port->dp.prepare_link_retrain = intel_ddi_prepare_link_retrain; + if (INTEL_GEN(dev_priv) < 12) { + intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port); + intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port); + } if (!intel_dp_init_connector(intel_dig_port, connector)) { kfree(connector); @@ -4569,7 +4262,8 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder, crtc_state = to_intel_crtc_state(crtc->base.state); - WARN_ON(!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)); + drm_WARN_ON(&dev_priv->drm, + !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)); if (!crtc_state->hw.active) return 0; @@ -4636,7 +4330,8 @@ intel_ddi_hotplug(struct intel_encoder *encoder, drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); - WARN(ret, "Acquiring modeset locks failed with %i\n", ret); + drm_WARN(encoder->base.dev, ret, + "Acquiring modeset locks failed with %i\n", ret); /* * Unpowered type-c dongles can take some time to boot and be @@ -4716,7 +4411,7 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport) return max_lanes; if (port == PORT_A || port == PORT_E) { - if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) + if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) max_lanes = port == PORT_A ? 4 : 0; else /* Both A and E share 2 lanes */ @@ -4739,15 +4434,14 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport) void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) { - struct ddi_vbt_port_info *port_info = - &dev_priv->vbt.ddi_port_info[port]; struct intel_digital_port *intel_dig_port; struct intel_encoder *encoder; bool init_hdmi, init_dp, init_lspcon = false; enum phy phy = intel_port_to_phy(dev_priv, port); - init_hdmi = port_info->supports_dvi || port_info->supports_hdmi; - init_dp = port_info->supports_dp; + init_hdmi = intel_bios_port_supports_dvi(dev_priv, port) || + intel_bios_port_supports_hdmi(dev_priv, port); + init_dp = intel_bios_port_supports_dp(dev_priv, port); if (intel_bios_is_lspcon_present(dev_priv, port)) { /* @@ -4779,6 +4473,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) encoder->hotplug = intel_ddi_hotplug; encoder->compute_output_type = intel_ddi_compute_output_type; encoder->compute_config = intel_ddi_compute_config; + encoder->compute_config_late = intel_ddi_compute_config_late; encoder->enable = intel_enable_ddi; encoder->pre_pll_enable = intel_ddi_pre_pll_enable; encoder->pre_enable = intel_ddi_pre_enable; @@ -4797,10 +4492,12 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) encoder->pipe_mask = ~0; if (INTEL_GEN(dev_priv) >= 11) - intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & + intel_dig_port->saved_port_bits = intel_de_read(dev_priv, + DDI_BUF_CTL(port)) & DDI_BUF_PORT_REVERSAL; else - intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & + intel_dig_port->saved_port_bits = intel_de_read(dev_priv, + DDI_BUF_CTL(port)) & (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES); intel_dig_port->dp.output_reg = INVALID_MMIO_REG; @@ -4808,8 +4505,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); if (intel_phy_is_tc(dev_priv, phy)) { - bool is_legacy = !port_info->supports_typec_usb && - !port_info->supports_tbt; + bool is_legacy = + !intel_bios_port_supports_typec_usb(dev_priv, port) && + !intel_bios_port_supports_tbt(dev_priv, port); intel_tc_port_init(intel_dig_port, is_legacy); @@ -4817,7 +4515,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) encoder->update_complete = intel_ddi_update_complete; } - WARN_ON(port > PORT_I); + drm_WARN_ON(&dev_priv->drm, port > PORT_I); intel_dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_A_IO + port - PORT_A; diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h index 167c6579d972..55fd72b901fe 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.h +++ b/drivers/gpu/drm/i915/display/intel_ddi.h @@ -6,8 +6,6 @@ #ifndef __INTEL_DDI_H__ #define __INTEL_DDI_H__ -#include <drm/i915_drm.h> - #include "intel_display.h" struct drm_connector_state; @@ -47,7 +45,5 @@ u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder, int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, bool enable); void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder); -int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv, - struct intel_dpll_hw_state *state); #endif /* __INTEL_DDI_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h new file mode 100644 index 000000000000..00da10bf35f5 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_de.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_DE_H__ +#define __INTEL_DE_H__ + +#include "i915_drv.h" +#include "i915_reg.h" +#include "intel_uncore.h" + +static inline u32 +intel_de_read(struct drm_i915_private *i915, i915_reg_t reg) +{ + return intel_uncore_read(&i915->uncore, reg); +} + +static inline void +intel_de_posting_read(struct drm_i915_private *i915, i915_reg_t reg) +{ + intel_uncore_posting_read(&i915->uncore, reg); +} + +/* Note: read the warnings for intel_uncore_*_fw() functions! */ +static inline u32 +intel_de_read_fw(struct drm_i915_private *i915, i915_reg_t reg) +{ + return intel_uncore_read_fw(&i915->uncore, reg); +} + +static inline void +intel_de_write(struct drm_i915_private *i915, i915_reg_t reg, u32 val) +{ + intel_uncore_write(&i915->uncore, reg, val); +} + +/* Note: read the warnings for intel_uncore_*_fw() functions! */ +static inline void +intel_de_write_fw(struct drm_i915_private *i915, i915_reg_t reg, u32 val) +{ + intel_uncore_write_fw(&i915->uncore, reg, val); +} + +static inline void +intel_de_rmw(struct drm_i915_private *i915, i915_reg_t reg, u32 clear, u32 set) +{ + intel_uncore_rmw(&i915->uncore, reg, clear, set); +} + +static inline int +intel_de_wait_for_register(struct drm_i915_private *i915, i915_reg_t reg, + u32 mask, u32 value, unsigned int timeout) +{ + return intel_wait_for_register(&i915->uncore, reg, mask, value, timeout); +} + +static inline int +intel_de_wait_for_set(struct drm_i915_private *i915, i915_reg_t reg, + u32 mask, unsigned int timeout) +{ + return intel_de_wait_for_register(i915, reg, mask, mask, timeout); +} + +static inline int +intel_de_wait_for_clear(struct drm_i915_private *i915, i915_reg_t reg, + u32 mask, unsigned int timeout) +{ + return intel_de_wait_for_register(i915, reg, mask, 0, timeout); +} + +#endif /* __INTEL_DE_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index aa453953908b..346846609f45 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -41,7 +41,6 @@ #include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> #include <drm/drm_rect.h> -#include <drm/i915_drm.h> #include "display/intel_crt.h" #include "display/intel_ddi.h" @@ -203,9 +202,9 @@ int vlv_get_cck_clock(struct drm_i915_private *dev_priv, val = vlv_cck_read(dev_priv, reg); divider = val & CCK_FREQUENCY_VALUES; - WARN((val & CCK_FREQUENCY_STATUS) != - (divider << CCK_FREQUENCY_STATUS_SHIFT), - "%s change in progress\n", name); + drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) != + (divider << CCK_FREQUENCY_STATUS_SHIFT), + "%s change in progress\n", name); return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); } @@ -235,7 +234,8 @@ static void intel_update_czclk(struct drm_i915_private *dev_priv) dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", CCK_CZ_CLOCK_CONTROL); - DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq); + drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n", + dev_priv->czclk_freq); } static inline u32 /* units of 100MHz */ @@ -518,13 +518,11 @@ static void skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) { if (enable) - I915_WRITE(CLKGATE_DIS_PSL(pipe), - I915_READ(CLKGATE_DIS_PSL(pipe)) | - DUPS1_GATING_DIS | DUPS2_GATING_DIS); + intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), + intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS); else - I915_WRITE(CLKGATE_DIS_PSL(pipe), - I915_READ(CLKGATE_DIS_PSL(pipe)) & - ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS)); + intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), + intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS)); } /* Wa_2006604312:icl */ @@ -533,11 +531,11 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) { if (enable) - I915_WRITE(CLKGATE_DIS_PSL(pipe), - I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS); + intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), + intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS); else - I915_WRITE(CLKGATE_DIS_PSL(pipe), - I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS); + intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), + intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS); } static bool @@ -883,7 +881,7 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, return calculated_clock->p > best_clock->p; } - if (WARN_ON_ONCE(!target_freq)) + if (drm_WARN_ON_ONCE(dev, !target_freq)) return false; *error_ppm = div_u64(1000000ULL * @@ -1049,9 +1047,9 @@ static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv, else line_mask = DSL_LINEMASK_GEN3; - line1 = I915_READ(reg) & line_mask; + line1 = intel_de_read(dev_priv, reg) & line_mask; msleep(5); - line2 = I915_READ(reg) & line_mask; + line2 = intel_de_read(dev_priv, reg) & line_mask; return line1 != line2; } @@ -1063,8 +1061,9 @@ static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state) /* Wait for the display line to settle/start moving */ if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100)) - DRM_ERROR("pipe %c scanline %s wait timed out\n", - pipe_name(pipe), onoff(state)); + drm_err(&dev_priv->drm, + "pipe %c scanline %s wait timed out\n", + pipe_name(pipe), onoff(state)); } static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc) @@ -1090,7 +1089,8 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) /* Wait for the Pipe State to go off */ if (intel_de_wait_for_clear(dev_priv, reg, I965_PIPECONF_ACTIVE, 100)) - WARN(1, "pipe_off wait timed out\n"); + drm_WARN(&dev_priv->drm, 1, + "pipe_off wait timed out\n"); } else { intel_wait_for_pipe_scanline_stopped(crtc); } @@ -1103,7 +1103,7 @@ void assert_pll(struct drm_i915_private *dev_priv, u32 val; bool cur_state; - val = I915_READ(DPLL(pipe)); + val = intel_de_read(dev_priv, DPLL(pipe)); cur_state = !!(val & DPLL_VCO_ENABLE); I915_STATE_WARN(cur_state != state, "PLL state assertion failure (expected %s, current %s)\n", @@ -1139,10 +1139,11 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv, * so pipe->transcoder cast is fine here. */ enum transcoder cpu_transcoder = (enum transcoder)pipe; - u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); + u32 val = intel_de_read(dev_priv, + TRANS_DDI_FUNC_CTL(cpu_transcoder)); cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); } else { - u32 val = I915_READ(FDI_TX_CTL(pipe)); + u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe)); cur_state = !!(val & FDI_TX_ENABLE); } I915_STATE_WARN(cur_state != state, @@ -1158,7 +1159,7 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv, u32 val; bool cur_state; - val = I915_READ(FDI_RX_CTL(pipe)); + val = intel_de_read(dev_priv, FDI_RX_CTL(pipe)); cur_state = !!(val & FDI_RX_ENABLE); I915_STATE_WARN(cur_state != state, "FDI RX state assertion failure (expected %s, current %s)\n", @@ -1180,7 +1181,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, if (HAS_DDI(dev_priv)) return; - val = I915_READ(FDI_TX_CTL(pipe)); + val = intel_de_read(dev_priv, FDI_TX_CTL(pipe)); I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); } @@ -1190,7 +1191,7 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, u32 val; bool cur_state; - val = I915_READ(FDI_RX_CTL(pipe)); + val = intel_de_read(dev_priv, FDI_RX_CTL(pipe)); cur_state = !!(val & FDI_RX_PLL_ENABLE); I915_STATE_WARN(cur_state != state, "FDI RX PLL assertion failure (expected %s, current %s)\n", @@ -1204,14 +1205,14 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) enum pipe panel_pipe = INVALID_PIPE; bool locked = true; - if (WARN_ON(HAS_DDI(dev_priv))) + if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv))) return; if (HAS_PCH_SPLIT(dev_priv)) { u32 port_sel; pp_reg = PP_CONTROL(0); - port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; + port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; switch (port_sel) { case PANEL_PORT_SELECT_LVDS: @@ -1238,13 +1239,14 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) u32 port_sel; pp_reg = PP_CONTROL(0); - port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; + port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; - WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS); + drm_WARN_ON(&dev_priv->drm, + port_sel != PANEL_PORT_SELECT_LVDS); intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe); } - val = I915_READ(pp_reg); + val = intel_de_read(dev_priv, pp_reg); if (!(val & PANEL_POWER_ON) || ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) locked = false; @@ -1268,7 +1270,7 @@ void assert_pipe(struct drm_i915_private *dev_priv, power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); if (wakeref) { - u32 val = I915_READ(PIPECONF(cpu_transcoder)); + u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); cur_state = !!(val & PIPECONF_ENABLE); intel_display_power_put(dev_priv, power_domain, wakeref); @@ -1318,7 +1320,7 @@ void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, u32 val; bool enabled; - val = I915_READ(PCH_TRANSCONF(pipe)); + val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe)); enabled = !!(val & TRANS_ENABLE); I915_STATE_WARN(enabled, "transcoder assertion failed, should be off on pipe %c but is still active\n", @@ -1392,12 +1394,12 @@ static void _vlv_enable_pll(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); - POSTING_READ(DPLL(pipe)); + intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll); + intel_de_posting_read(dev_priv, DPLL(pipe)); udelay(150); if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) - DRM_ERROR("DPLL %d failed to lock\n", pipe); + drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe); } static void vlv_enable_pll(struct intel_crtc *crtc, @@ -1414,8 +1416,9 @@ static void vlv_enable_pll(struct intel_crtc *crtc, if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) _vlv_enable_pll(crtc, pipe_config); - I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); - POSTING_READ(DPLL_MD(pipe)); + intel_de_write(dev_priv, DPLL_MD(pipe), + pipe_config->dpll_hw_state.dpll_md); + intel_de_posting_read(dev_priv, DPLL_MD(pipe)); } @@ -1442,11 +1445,11 @@ static void _chv_enable_pll(struct intel_crtc *crtc, udelay(1); /* Enable PLL */ - I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); + intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll); /* Check PLL is locked */ if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1)) - DRM_ERROR("PLL %d failed to lock\n", pipe); + drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe); } static void chv_enable_pll(struct intel_crtc *crtc, @@ -1470,19 +1473,23 @@ static void chv_enable_pll(struct intel_crtc *crtc, * DPLLCMD is AWOL. Use chicken bits to propagate * the value from DPLLBMD to either pipe B or C. */ - I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe)); - I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md); - I915_WRITE(CBR4_VLV, 0); + intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe)); + intel_de_write(dev_priv, DPLL_MD(PIPE_B), + pipe_config->dpll_hw_state.dpll_md); + intel_de_write(dev_priv, CBR4_VLV, 0); dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md; /* * DPLLB VGA mode also seems to cause problems. * We should always have it disabled. */ - WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0); + drm_WARN_ON(&dev_priv->drm, + (intel_de_read(dev_priv, DPLL(PIPE_B)) & + DPLL_VGA_MODE_DIS) == 0); } else { - I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); - POSTING_READ(DPLL_MD(pipe)); + intel_de_write(dev_priv, DPLL_MD(pipe), + pipe_config->dpll_hw_state.dpll_md); + intel_de_posting_read(dev_priv, DPLL_MD(pipe)); } } @@ -1513,29 +1520,29 @@ static void i9xx_enable_pll(struct intel_crtc *crtc, * the P1/P2 dividers. Otherwise the DPLL will keep using the old * dividers, even though the register value does change. */ - I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS); - I915_WRITE(reg, dpll); + intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS); + intel_de_write(dev_priv, reg, dpll); /* Wait for the clocks to stabilize. */ - POSTING_READ(reg); + intel_de_posting_read(dev_priv, reg); udelay(150); if (INTEL_GEN(dev_priv) >= 4) { - I915_WRITE(DPLL_MD(crtc->pipe), - crtc_state->dpll_hw_state.dpll_md); + intel_de_write(dev_priv, DPLL_MD(crtc->pipe), + crtc_state->dpll_hw_state.dpll_md); } else { /* The pixel multiplier can only be updated once the * DPLL is enabled and the clocks are stable. * * So write it again. */ - I915_WRITE(reg, dpll); + intel_de_write(dev_priv, reg, dpll); } /* We do this three times for luck */ for (i = 0; i < 3; i++) { - I915_WRITE(reg, dpll); - POSTING_READ(reg); + intel_de_write(dev_priv, reg, dpll); + intel_de_posting_read(dev_priv, reg); udelay(150); /* wait for warmup */ } } @@ -1553,8 +1560,8 @@ static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state) /* Make sure the pipe isn't still relying on us */ assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); - I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); - POSTING_READ(DPLL(pipe)); + intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS); + intel_de_posting_read(dev_priv, DPLL(pipe)); } static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) @@ -1569,8 +1576,8 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) if (pipe != PIPE_A) val |= DPLL_INTEGRATED_CRI_CLK_VLV; - I915_WRITE(DPLL(pipe), val); - POSTING_READ(DPLL(pipe)); + intel_de_write(dev_priv, DPLL(pipe), val); + intel_de_posting_read(dev_priv, DPLL(pipe)); } static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) @@ -1586,8 +1593,8 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) if (pipe != PIPE_A) val |= DPLL_INTEGRATED_CRI_CLK_VLV; - I915_WRITE(DPLL(pipe), val); - POSTING_READ(DPLL(pipe)); + intel_de_write(dev_priv, DPLL(pipe), val); + intel_de_posting_read(dev_priv, DPLL(pipe)); vlv_dpio_get(dev_priv); @@ -1626,9 +1633,11 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, if (intel_de_wait_for_register(dev_priv, dpll_reg, port_mask, expected_mask, 1000)) - WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n", - dport->base.base.base.id, dport->base.base.name, - I915_READ(dpll_reg) & port_mask, expected_mask); + drm_WARN(&dev_priv->drm, 1, + "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n", + dport->base.base.base.id, dport->base.base.name, + intel_de_read(dev_priv, dpll_reg) & port_mask, + expected_mask); } static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) @@ -1648,7 +1657,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) if (HAS_PCH_CPT(dev_priv)) { reg = TRANS_CHICKEN2(pipe); - val = I915_READ(reg); + val = intel_de_read(dev_priv, reg); /* * Workaround: Set the timing override bit * before enabling the pch transcoder. @@ -1657,12 +1666,12 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) /* Configure frame start delay to match the CPU */ val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; val |= TRANS_CHICKEN2_FRAME_START_DELAY(0); - I915_WRITE(reg, val); + intel_de_write(dev_priv, reg, val); } reg = PCH_TRANSCONF(pipe); - val = I915_READ(reg); - pipeconf_val = I915_READ(PIPECONF(pipe)); + val = intel_de_read(dev_priv, reg); + pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe)); if (HAS_PCH_IBX(dev_priv)) { /* Configure frame start delay to match the CPU */ @@ -1692,9 +1701,10 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) val |= TRANS_PROGRESSIVE; } - I915_WRITE(reg, val | TRANS_ENABLE); + intel_de_write(dev_priv, reg, val | TRANS_ENABLE); if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100)) - DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); + drm_err(&dev_priv->drm, "failed to enable transcoder %c\n", + pipe_name(pipe)); } static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, @@ -1706,16 +1716,16 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); assert_fdi_rx_enabled(dev_priv, PIPE_A); - val = I915_READ(TRANS_CHICKEN2(PIPE_A)); + val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); /* Workaround: set timing override bit. */ val |= TRANS_CHICKEN2_TIMING_OVERRIDE; /* Configure frame start delay to match the CPU */ val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; val |= TRANS_CHICKEN2_FRAME_START_DELAY(0); - I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); + intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); val = TRANS_ENABLE; - pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); + pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == PIPECONF_INTERLACED_ILK) @@ -1723,10 +1733,10 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, else val |= TRANS_PROGRESSIVE; - I915_WRITE(LPT_TRANSCONF, val); + intel_de_write(dev_priv, LPT_TRANSCONF, val); if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF, TRANS_STATE_ENABLE, 100)) - DRM_ERROR("Failed to enable PCH transcoder\n"); + drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n"); } static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv, @@ -1743,19 +1753,20 @@ static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv, assert_pch_ports_disabled(dev_priv, pipe); reg = PCH_TRANSCONF(pipe); - val = I915_READ(reg); + val = intel_de_read(dev_priv, reg); val &= ~TRANS_ENABLE; - I915_WRITE(reg, val); + intel_de_write(dev_priv, reg, val); /* wait for PCH transcoder off, transcoder state */ if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50)) - DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); + drm_err(&dev_priv->drm, "failed to disable transcoder %c\n", + pipe_name(pipe)); if (HAS_PCH_CPT(dev_priv)) { /* Workaround: Clear the timing override chicken bit again. */ reg = TRANS_CHICKEN2(pipe); - val = I915_READ(reg); + val = intel_de_read(dev_priv, reg); val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; - I915_WRITE(reg, val); + intel_de_write(dev_priv, reg, val); } } @@ -1763,18 +1774,18 @@ void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) { u32 val; - val = I915_READ(LPT_TRANSCONF); + val = intel_de_read(dev_priv, LPT_TRANSCONF); val &= ~TRANS_ENABLE; - I915_WRITE(LPT_TRANSCONF, val); + intel_de_write(dev_priv, LPT_TRANSCONF, val); /* wait for PCH transcoder off, transcoder state */ if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF, TRANS_STATE_ENABLE, 50)) - DRM_ERROR("Failed to disable PCH transcoder\n"); + drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n"); /* Workaround: clear timing override bit. */ - val = I915_READ(TRANS_CHICKEN2(PIPE_A)); + val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; - I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); + intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); } enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc) @@ -1807,7 +1818,7 @@ static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state return 0; /* Gen2 doesn't have a hardware frame counter */ } -static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state) +void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -1825,7 +1836,7 @@ void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state) assert_vblank_disabled(&crtc->base); } -static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) +void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -1834,7 +1845,7 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) i915_reg_t reg; u32 val; - DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe)); + drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe)); assert_planes_disabled(crtc); @@ -1862,15 +1873,15 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) trace_intel_pipe_enable(crtc); reg = PIPECONF(cpu_transcoder); - val = I915_READ(reg); + val = intel_de_read(dev_priv, reg); if (val & PIPECONF_ENABLE) { /* we keep both pipes enabled on 830 */ - WARN_ON(!IS_I830(dev_priv)); + drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv)); return; } - I915_WRITE(reg, val | PIPECONF_ENABLE); - POSTING_READ(reg); + intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE); + intel_de_posting_read(dev_priv, reg); /* * Until the pipe starts PIPEDSL reads will return a stale value, @@ -1892,7 +1903,7 @@ void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state) i915_reg_t reg; u32 val; - DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe)); + drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe)); /* * Make sure planes won't keep trying to pump pixels to us, @@ -1903,7 +1914,7 @@ void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state) trace_intel_pipe_disable(crtc); reg = PIPECONF(cpu_transcoder); - val = I915_READ(reg); + val = intel_de_read(dev_priv, reg); if ((val & PIPECONF_ENABLE) == 0) return; @@ -1918,7 +1929,7 @@ void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state) if (!IS_I830(dev_priv)) val &= ~PIPECONF_ENABLE; - I915_WRITE(reg, val); + intel_de_write(dev_priv, reg, val); if ((val & PIPECONF_ENABLE) == 0) intel_wait_for_pipe_off(old_crtc_state); } @@ -2211,11 +2222,11 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int pinctl; u32 alignment; - if (WARN_ON(!i915_gem_object_is_framebuffer(obj))) + if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj))) return ERR_PTR(-EINVAL); alignment = intel_surf_alignment(fb, 0); - if (WARN_ON(alignment && !is_power_of_2(alignment))) + if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment))) return ERR_PTR(-EINVAL); /* Note that the w/a also requires 64 PTE of padding following the @@ -2386,7 +2397,7 @@ static u32 intel_adjust_aligned_offset(int *x, int *y, struct drm_i915_private *dev_priv = to_i915(fb->dev); unsigned int cpp = fb->format->cpp[color_plane]; - WARN_ON(new_offset > old_offset); + drm_WARN_ON(&dev_priv->drm, new_offset > old_offset); if (!is_surface_linear(fb, color_plane)) { unsigned int tile_size, tile_width, tile_height; @@ -2537,8 +2548,9 @@ static int intel_fb_offset_to_xy(int *x, int *y, alignment = 0; if (alignment != 0 && fb->offsets[color_plane] % alignment) { - DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n", - fb->offsets[color_plane], color_plane); + drm_dbg_kms(&dev_priv->drm, + "Misaligned offset 0x%08x for color plane %d\n", + fb->offsets[color_plane], color_plane); return -EINVAL; } @@ -2548,9 +2560,10 @@ static int intel_fb_offset_to_xy(int *x, int *y, /* Catch potential overflows early */ if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]), fb->offsets[color_plane])) { - DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n", - fb->offsets[color_plane], fb->pitches[color_plane], - color_plane); + drm_dbg_kms(&dev_priv->drm, + "Bad offset 0x%08x or pitch %d for color plane %d\n", + fb->offsets[color_plane], fb->pitches[color_plane], + color_plane); return -ERANGE; } @@ -2706,9 +2719,10 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, /* * We assume the primary plane for pipe A has - * the highest stride limits of them all. + * the highest stride limits of them all, + * if in case pipe A is disabled, use the first pipe from pipe_mask. */ - crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A); + crtc = intel_get_first_crtc(dev_priv); if (!crtc) return 0; @@ -3034,8 +3048,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, ret = intel_fb_offset_to_xy(&x, &y, fb, i); if (ret) { - DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n", - i, fb->offsets[i]); + drm_dbg_kms(&dev_priv->drm, + "bad fb plane %d offset: 0x%x\n", + i, fb->offsets[i]); return ret; } @@ -3054,8 +3069,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, */ if (i == 0 && i915_gem_object_is_tiled(obj) && (x + width) * cpp > fb->pitches[i]) { - DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n", - i, fb->offsets[i]); + drm_dbg_kms(&dev_priv->drm, + "bad fb plane %d offset: 0x%x\n", + i, fb->offsets[i]); return -EINVAL; } @@ -3111,8 +3127,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, } if (mul_u32_u32(max_size, tile_size) > obj->base.size) { - DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n", - mul_u32_u32(max_size, tile_size), obj->base.size); + drm_dbg_kms(&dev_priv->drm, + "fb too big for bo (need %llu bytes, have %zu bytes)\n", + mul_u32_u32(max_size, tile_size), obj->base.size); return -EINVAL; } @@ -3143,7 +3160,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state) src_w = drm_rect_width(&plane_state->uapi.src) >> 16; src_h = drm_rect_height(&plane_state->uapi.src) >> 16; - WARN_ON(is_ccs_modifier(fb->modifier)); + drm_WARN_ON(&dev_priv->drm, is_ccs_modifier(fb->modifier)); /* Make src coordinates relative to the viewport */ drm_rect_translate(&plane_state->uapi.src, @@ -3184,7 +3201,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state) DRM_MODE_ROTATE_0, tile_size); offset /= tile_size; - WARN_ON(i >= ARRAY_SIZE(info->plane)); + drm_WARN_ON(&dev_priv->drm, i >= ARRAY_SIZE(info->plane)); info->plane[i].offset = offset; info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp); @@ -3377,6 +3394,67 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) } } +static struct i915_vma * +initial_plane_vma(struct drm_i915_private *i915, + struct intel_initial_plane_config *plane_config) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + u32 base, size; + + if (plane_config->size == 0) + return NULL; + + base = round_down(plane_config->base, + I915_GTT_MIN_ALIGNMENT); + size = round_up(plane_config->base + plane_config->size, + I915_GTT_MIN_ALIGNMENT); + size -= base; + + /* + * If the FB is too big, just don't use it since fbdev is not very + * important and we should probably use that space with FBC or other + * features. + */ + if (size * 2 > i915->stolen_usable_size) + return NULL; + + obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size); + if (IS_ERR(obj)) + return NULL; + + switch (plane_config->tiling) { + case I915_TILING_NONE: + break; + case I915_TILING_X: + case I915_TILING_Y: + obj->tiling_and_stride = + plane_config->fb->base.pitches[0] | + plane_config->tiling; + break; + default: + MISSING_CASE(plane_config->tiling); + goto err_obj; + } + + vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + if (IS_ERR(vma)) + goto err_obj; + + if (i915_ggtt_pin(vma, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base)) + goto err_obj; + + if (i915_gem_object_is_tiled(obj) && + !i915_vma_is_map_and_fenceable(vma)) + goto err_obj; + + return vma; + +err_obj: + i915_gem_object_put(obj); + return NULL; +} + static bool intel_alloc_initial_plane_obj(struct intel_crtc *crtc, struct intel_initial_plane_config *plane_config) @@ -3385,22 +3463,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = to_i915(dev); struct drm_mode_fb_cmd2 mode_cmd = { 0 }; struct drm_framebuffer *fb = &plane_config->fb->base; - u32 base_aligned = round_down(plane_config->base, PAGE_SIZE); - u32 size_aligned = round_up(plane_config->base + plane_config->size, - PAGE_SIZE); - struct drm_i915_gem_object *obj; - bool ret = false; - - size_aligned -= base_aligned; - - if (plane_config->size == 0) - return false; - - /* If the FB is too big, just don't use it since fbdev is not very - * important and we should probably use that space with FBC or other - * features. */ - if (size_aligned * 2 > dev_priv->stolen_usable_size) - return false; + struct i915_vma *vma; switch (fb->modifier) { case DRM_FORMAT_MOD_LINEAR: @@ -3408,30 +3471,16 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, case I915_FORMAT_MOD_Y_TILED: break; default: - DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n", - fb->modifier); + drm_dbg(&dev_priv->drm, + "Unsupported modifier for initial FB: 0x%llx\n", + fb->modifier); return false; } - obj = i915_gem_object_create_stolen_for_preallocated(dev_priv, - base_aligned, - base_aligned, - size_aligned); - if (IS_ERR(obj)) + vma = initial_plane_vma(dev_priv, plane_config); + if (!vma) return false; - switch (plane_config->tiling) { - case I915_TILING_NONE: - break; - case I915_TILING_X: - case I915_TILING_Y: - obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling; - break; - default: - MISSING_CASE(plane_config->tiling); - goto out; - } - mode_cmd.pixel_format = fb->format->format; mode_cmd.width = fb->width; mode_cmd.height = fb->height; @@ -3439,17 +3488,18 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, mode_cmd.modifier[0] = fb->modifier; mode_cmd.flags = DRM_MODE_FB_MODIFIERS; - if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) { - DRM_DEBUG_KMS("intel fb init failed\n"); - goto out; + if (intel_framebuffer_init(to_intel_framebuffer(fb), + vma->obj, &mode_cmd)) { + drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n"); + goto err_vma; } + plane_config->vma = vma; + return true; - DRM_DEBUG_KMS("initial plane fb obj %p\n", obj); - ret = true; -out: - i915_gem_object_put(obj); - return ret; +err_vma: + i915_vma_put(vma); + return false; } static void @@ -3493,9 +3543,10 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc, struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); - DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n", - plane->base.base.id, plane->base.name, - crtc->base.base.id, crtc->base.name); + drm_dbg_kms(&dev_priv->drm, + "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n", + plane->base.base.id, plane->base.name, + crtc->base.base.id, crtc->base.name); intel_set_plane_visible(crtc_state, plane_state, false); fixup_active_planes(crtc_state); @@ -3547,17 +3598,17 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, struct intel_plane_state *intel_state = to_intel_plane_state(plane_state); struct drm_framebuffer *fb; + struct i915_vma *vma; if (!plane_config->fb) return; if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) { fb = &plane_config->fb->base; + vma = plane_config->vma; goto valid_fb; } - kfree(plane_config->fb); - /* * Failed to alloc the obj, check to see if we should share * an fb with another CRTC instead @@ -3577,7 +3628,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, if (intel_plane_ggtt_offset(state) == plane_config->base) { fb = state->hw.fb; - drm_framebuffer_get(fb); + vma = state->vma; goto valid_fb; } } @@ -3600,21 +3651,11 @@ valid_fb: intel_state->color_plane[0].stride = intel_fb_pitch(fb, 0, intel_state->hw.rotation); - intel_state->vma = - intel_pin_and_fence_fb_obj(fb, - &intel_state->view, - intel_plane_uses_fence(intel_state), - &intel_state->flags); - if (IS_ERR(intel_state->vma)) { - DRM_ERROR("failed to pin boot fb on pipe %d: %li\n", - intel_crtc->pipe, PTR_ERR(intel_state->vma)); - - intel_state->vma = NULL; - drm_framebuffer_put(fb); - return; - } - - intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB); + __i915_vma_pin(vma); + intel_state->vma = i915_vma_get(vma); + if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0) + if (vma->fence) + intel_state->flags |= PLANE_HAS_FENCE; plane_state->src_x = 0; plane_state->src_y = 0; @@ -3633,9 +3674,13 @@ valid_fb: dev_priv->preserve_bios_swizzle = true; plane_state->fb = fb; + drm_framebuffer_get(fb); + plane_state->crtc = &intel_crtc->base; intel_plane_copy_uapi_to_hw_state(intel_state, intel_state); + intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB); + atomic_or(to_intel_plane(primary)->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits); } @@ -3798,15 +3843,16 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) max_height = skl_max_plane_height(); if (w > max_width || h > max_height) { - DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n", - w, h, max_width, max_height); + drm_dbg_kms(&dev_priv->drm, + "requested Y/RGB source size %dx%d too big (limit %dx%d)\n", + w, h, max_width, max_height); return -EINVAL; } intel_add_fb_offsets(&x, &y, plane_state, 0); offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0); alignment = intel_surf_alignment(fb, 0); - if (WARN_ON(alignment && !is_power_of_2(alignment))) + if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment))) return -EINVAL; /* @@ -3829,7 +3875,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) while ((x + w) * cpp > plane_state->color_plane[0].stride) { if (offset == 0) { - DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n"); + drm_dbg_kms(&dev_priv->drm, + "Unable to find suitable display surface offset due to X-tiling\n"); return -EINVAL; } @@ -3854,7 +3901,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) if (x != plane_state->color_plane[aux_plane].x || y != plane_state->color_plane[aux_plane].y) { - DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n"); + drm_dbg_kms(&dev_priv->drm, + "Unable to find suitable display surface offset due to CCS\n"); return -EINVAL; } } @@ -3875,6 +3923,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) { + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int rotation = plane_state->hw.rotation; int uv_plane = 1; @@ -3892,8 +3941,9 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) /* FIXME not quite sure how/if these apply to the chroma plane */ if (w > max_width || h > max_height) { - DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n", - w, h, max_width, max_height); + drm_dbg_kms(&i915->drm, + "CbCr source size %dx%d too big (limit %dx%d)\n", + w, h, max_width, max_height); return -EINVAL; } @@ -3922,7 +3972,8 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) if (x != plane_state->color_plane[ccs_plane].x || y != plane_state->color_plane[ccs_plane].y) { - DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n"); + drm_dbg_kms(&i915->drm, + "Unable to find suitable display surface offset due to CCS\n"); return -EINVAL; } } @@ -4331,7 +4382,8 @@ static void i9xx_update_plane(struct intel_plane *plane, spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride); + intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane), + plane_state->color_plane[0].stride); if (INTEL_GEN(dev_priv) < 4) { /* @@ -4339,21 +4391,26 @@ static void i9xx_update_plane(struct intel_plane *plane, * generator but let's assume we still need to * program whatever is there. */ - I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x); - I915_WRITE_FW(DSPSIZE(i9xx_plane), - ((crtc_h - 1) << 16) | (crtc_w - 1)); + intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane), + (crtc_y << 16) | crtc_x); + intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane), + ((crtc_h - 1) << 16) | (crtc_w - 1)); } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) { - I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x); - I915_WRITE_FW(PRIMSIZE(i9xx_plane), - ((crtc_h - 1) << 16) | (crtc_w - 1)); - I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0); + intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane), + (crtc_y << 16) | crtc_x); + intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane), + ((crtc_h - 1) << 16) | (crtc_w - 1)); + intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0); } if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x); + intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane), + (y << 16) | x); } else if (INTEL_GEN(dev_priv) >= 4) { - I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset); - I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x); + intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane), + linear_offset); + intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane), + (y << 16) | x); } /* @@ -4361,15 +4418,13 @@ static void i9xx_update_plane(struct intel_plane *plane, * disabled. Try to make the plane enable atomic by writing * the control register just before the surface register. */ - I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr); + intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr); if (INTEL_GEN(dev_priv) >= 4) - I915_WRITE_FW(DSPSURF(i9xx_plane), - intel_plane_ggtt_offset(plane_state) + - dspaddr_offset); + intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), + intel_plane_ggtt_offset(plane_state) + dspaddr_offset); else - I915_WRITE_FW(DSPADDR(i9xx_plane), - intel_plane_ggtt_offset(plane_state) + - dspaddr_offset); + intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), + intel_plane_ggtt_offset(plane_state) + dspaddr_offset); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } @@ -4396,11 +4451,11 @@ static void i9xx_disable_plane(struct intel_plane *plane, spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr); + intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr); if (INTEL_GEN(dev_priv) >= 4) - I915_WRITE_FW(DSPSURF(i9xx_plane), 0); + intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0); else - I915_WRITE_FW(DSPADDR(i9xx_plane), 0); + intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } @@ -4425,7 +4480,7 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane, if (!wakeref) return false; - val = I915_READ(DSPCNTR(i9xx_plane)); + val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); ret = val & DISPLAY_PLANE_ENABLE; @@ -4444,10 +4499,15 @@ static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) { struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + + intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0); + intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); + intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0); - I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0); - I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); - I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0); + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } /* @@ -4791,7 +4851,7 @@ __intel_display_resume(struct drm_device *dev, ret = drm_atomic_helper_commit_duplicated_state(state, ctx); - WARN_ON(ret == -EDEADLK); + drm_WARN_ON(dev, ret == -EDEADLK); return ret; } @@ -4819,7 +4879,8 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv) wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET); if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) { - DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n"); + drm_dbg_kms(&dev_priv->drm, + "Modeset potentially stuck, unbreaking through wedging\n"); intel_gt_set_wedged(&dev_priv->gt); } @@ -4843,13 +4904,15 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv) state = drm_atomic_helper_duplicate_state(dev, ctx); if (IS_ERR(state)) { ret = PTR_ERR(state); - DRM_ERROR("Duplicating state failed with %i\n", ret); + drm_err(&dev_priv->drm, "Duplicating state failed with %i\n", + ret); return; } ret = drm_atomic_helper_disable_all(dev, ctx); if (ret) { - DRM_ERROR("Suspending crtc's failed with %i\n", ret); + drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n", + ret); drm_atomic_state_put(state); return; } @@ -4878,7 +4941,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) /* for testing only restore the display */ ret = __intel_display_resume(dev, state, ctx); if (ret) - DRM_ERROR("Restoring old state failed with %i\n", ret); + drm_err(&dev_priv->drm, + "Restoring old state failed with %i\n", ret); } else { /* * The display has been reset as well, @@ -4895,7 +4959,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) ret = __intel_display_resume(dev, state, ctx); if (ret) - DRM_ERROR("Restoring old state failed with %i\n", ret); + drm_err(&dev_priv->drm, + "Restoring old state failed with %i\n", ret); intel_hpd_init(dev_priv); } @@ -4915,7 +4980,7 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc) enum pipe pipe = crtc->pipe; u32 tmp; - tmp = I915_READ(PIPE_CHICKEN(pipe)); + tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe)); /* * Display WA #1153: icl @@ -4930,7 +4995,7 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc) * across pipe */ tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; - I915_WRITE(PIPE_CHICKEN(pipe), tmp); + intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp); } static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state) @@ -4959,8 +5024,9 @@ static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state /* Enable Transcoder Port Sync */ trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE; - I915_WRITE(TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder), - trans_ddi_func_ctl2_val); + intel_de_write(dev_priv, + TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder), + trans_ddi_func_ctl2_val); } static void intel_fdi_normal_train(struct intel_crtc *crtc) @@ -4973,7 +5039,7 @@ static void intel_fdi_normal_train(struct intel_crtc *crtc) /* enable normal train */ reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); + temp = intel_de_read(dev_priv, reg); if (IS_IVYBRIDGE(dev_priv)) { temp &= ~FDI_LINK_TRAIN_NONE_IVB; temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; @@ -4981,10 +5047,10 @@ static void intel_fdi_normal_train(struct intel_crtc *crtc) temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; } - I915_WRITE(reg, temp); + intel_de_write(dev_priv, reg, temp); reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); + temp = intel_de_read(dev_priv, reg); if (HAS_PCH_CPT(dev_priv)) { temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_NORMAL_CPT; @@ -4992,16 +5058,16 @@ static void intel_fdi_normal_train(struct intel_crtc *crtc) temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_NONE; } - I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); + intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); /* wait one idle pattern time */ - POSTING_READ(reg); + intel_de_posting_read(dev_priv, reg); udelay(1000); /* IVB wants error correction enabled */ if (IS_IVYBRIDGE(dev_priv)) - I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | - FDI_FE_ERRC_ENABLE); + intel_de_write(dev_priv, reg, + intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE); } /* The FDI link training functions for ILK/Ibexpeak. */ @@ -5020,81 +5086,83 @@ static void ilk_fdi_link_train(struct intel_crtc *crtc, /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit for train result */ reg = FDI_RX_IMR(pipe); - temp = I915_READ(reg); + temp = intel_de_read(dev_priv, reg); temp &= ~FDI_RX_SYMBOL_LOCK; temp &= ~FDI_RX_BIT_LOCK; - I915_WRITE(reg, temp); - I915_READ(reg); + intel_de_write(dev_priv, reg, temp); + intel_de_read(dev_priv, reg); udelay(150); /* enable CPU FDI TX and PCH FDI RX */ reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); + temp = intel_de_read(dev_priv, reg); temp &= ~FDI_DP_PORT_WIDTH_MASK; temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; - I915_WRITE(reg, temp | FDI_TX_ENABLE); + intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE); reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); + temp = intel_de_read(dev_priv, reg); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; - I915_WRITE(reg, temp | FDI_RX_ENABLE); + intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE); - POSTING_READ(reg); + intel_de_posting_read(dev_priv, reg); udelay(150); /* Ironlake workaround, enable clock pointer after FDI enable*/ - I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); - I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | - FDI_RX_PHASE_SYNC_POINTER_EN); + intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe), + FDI_RX_PHASE_SYNC_POINTER_OVR); + intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe), + FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN); reg = FDI_RX_IIR(pipe); for (tries = 0; tries < 5; tries++) { - temp = I915_READ(reg); - DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); + temp = intel_de_read(dev_priv, reg); + drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); if ((temp & FDI_RX_BIT_LOCK)) { - DRM_DEBUG_KMS("FDI train 1 done.\n"); - I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); + drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n"); + intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK); break; } } if (tries == 5) - DRM_ERROR("FDI train 1 fail!\n"); + drm_err(&dev_priv->drm, "FDI train 1 fail!\n"); /* Train 2 */ reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); + temp = intel_de_read(dev_priv, reg); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_2; - I915_WRITE(reg, temp); + intel_de_write(dev_priv, reg, temp); reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); + temp = intel_de_read(dev_priv, reg); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_2; - I915_WRITE(reg, temp); + intel_de_write(dev_priv, reg, temp); - POSTING_READ(reg); + intel_de_posting_read(dev_priv, reg); udelay(150); reg = FDI_RX_IIR(pipe); for (tries = 0; tries < 5; tries++) { - temp = I915_READ(reg); - DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); + temp = intel_de_read(dev_priv, reg); + drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); if (temp & FDI_RX_SYMBOL_LOCK) { - I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); - DRM_DEBUG_KMS("FDI train 2 done.\n"); + intel_de_write(dev_priv, reg, + temp | FDI_RX_SYMBOL_LOCK); + drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n"); break; } } if (tries == 5) - DRM_ERROR("FDI train 2 fail!\n"); + drm_err(&dev_priv->drm, "FDI train 2 fail!\n"); - DRM_DEBUG_KMS("FDI train done\n"); + drm_dbg_kms(&dev_priv->drm, "FDI train done\n"); } @@ -5118,17 +5186,17 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc, /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit for train result */ reg = FDI_RX_IMR(pipe); - temp = I915_READ(reg); + temp = intel_de_read(dev_priv, reg); temp &= ~FDI_RX_SYMBOL_LOCK; temp &= ~FDI_RX_BIT_LOCK; - I915_WRITE(reg, temp); + intel_de_write(dev_priv, reg, temp); - POSTING_READ(reg); + intel_de_posting_read(dev_priv, reg); udelay(150); /* enable CPU FDI TX and PCH FDI RX */ reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); + temp = intel_de_read(dev_priv, reg); temp &= ~FDI_DP_PORT_WIDTH_MASK; temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); temp &= ~FDI_LINK_TRAIN_NONE; @@ -5136,13 +5204,13 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc, temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; /* SNB-B */ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; - I915_WRITE(reg, temp | FDI_TX_ENABLE); + intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE); - I915_WRITE(FDI_RX_MISC(pipe), - FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); + intel_de_write(dev_priv, FDI_RX_MISC(pipe), + FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); + temp = intel_de_read(dev_priv, reg); if (HAS_PCH_CPT(dev_priv)) { temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; @@ -5150,28 +5218,30 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc, temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; } - I915_WRITE(reg, temp | FDI_RX_ENABLE); + intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE); - POSTING_READ(reg); + intel_de_posting_read(dev_priv, reg); udelay(150); for (i = 0; i < 4; i++) { reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); + temp = intel_de_read(dev_priv, reg); temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; temp |= snb_b_fdi_train_param[i]; - I915_WRITE(reg, temp); + intel_de_write(dev_priv, reg, temp); - POSTING_READ(reg); + intel_de_posting_read(dev_priv, reg); udelay(500); for (retry = 0; retry < 5; retry++) { reg = FDI_RX_IIR(pipe); - temp = I915_READ(reg); - DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); + temp = intel_de_read(dev_priv, reg); + drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); if (temp & FDI_RX_BIT_LOCK) { - I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); - DRM_DEBUG_KMS("FDI train 1 done.\n"); + intel_de_write(dev_priv, reg, + temp | FDI_RX_BIT_LOCK); + drm_dbg_kms(&dev_priv->drm, + "FDI train 1 done.\n"); break; } udelay(50); @@ -5180,11 +5250,11 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc, break; } if (i == 4) - DRM_ERROR("FDI train 1 fail!\n"); + drm_err(&dev_priv->drm, "FDI train 1 fail!\n"); /* Train 2 */ reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); + temp = intel_de_read(dev_priv, reg); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_2; if (IS_GEN(dev_priv, 6)) { @@ -5192,10 +5262,10 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc, /* SNB-B */ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; } - I915_WRITE(reg, temp); + intel_de_write(dev_priv, reg, temp); reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); + temp = intel_de_read(dev_priv, reg); if (HAS_PCH_CPT(dev_priv)) { temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; @@ -5203,28 +5273,30 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc, temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_2; } - I915_WRITE(reg, temp); + intel_de_write(dev_priv, reg, temp); - POSTING_READ(reg); + intel_de_posting_read(dev_priv, reg); udelay(150); for (i = 0; i < 4; i++) { reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); + temp = intel_de_read(dev_priv, reg); temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; temp |= snb_b_fdi_train_param[i]; - I915_WRITE(reg, temp); + intel_de_write(dev_priv, reg, temp); - POSTING_READ(reg); + intel_de_posting_read(dev_priv, reg); udelay(500); for (retry = 0; retry < 5; retry++) { reg = FDI_RX_IIR(pipe); - temp = I915_READ(reg); - DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); + temp = intel_de_read(dev_priv, reg); + drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); if (temp & FDI_RX_SYMBOL_LOCK) { - I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); - DRM_DEBUG_KMS("FDI train 2 done.\n"); + intel_de_write(dev_priv, reg, + temp | FDI_RX_SYMBOL_LOCK); + drm_dbg_kms(&dev_priv->drm, + "FDI train 2 done.\n"); break; } udelay(50); @@ -5233,9 +5305,9 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc, break; } if (i == 4) - DRM_ERROR("FDI train 2 fail!\n"); + drm_err(&dev_priv->drm, "FDI train 2 fail!\n"); - DRM_DEBUG_KMS("FDI train done.\n"); + drm_dbg_kms(&dev_priv->drm, "FDI train done.\n"); } /* Manual link training for Ivy Bridge A0 parts */ @@ -5251,111 +5323,117 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc, /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit for train result */ reg = FDI_RX_IMR(pipe); - temp = I915_READ(reg); + temp = intel_de_read(dev_priv, reg); temp &= ~FDI_RX_SYMBOL_LOCK; temp &= ~FDI_RX_BIT_LOCK; - I915_WRITE(reg, temp); + intel_de_write(dev_priv, reg, temp); - POSTING_READ(reg); + intel_de_posting_read(dev_priv, reg); udelay(150); - DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", - I915_READ(FDI_RX_IIR(pipe))); + drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n", + intel_de_read(dev_priv, FDI_RX_IIR(pipe))); /* Try each vswing and preemphasis setting twice before moving on */ for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { /* disable first in case we need to retry */ reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); + temp = intel_de_read(dev_priv, reg); temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); temp &= ~FDI_TX_ENABLE; - I915_WRITE(reg, temp); + intel_de_write(dev_priv, reg, temp); reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); + temp = intel_de_read(dev_priv, reg); temp &= ~FDI_LINK_TRAIN_AUTO; temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp &= ~FDI_RX_ENABLE; - I915_WRITE(reg, temp); + intel_de_write(dev_priv, reg, temp); /* enable CPU FDI TX and PCH FDI RX */ reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); + temp = intel_de_read(dev_priv, reg); temp &= ~FDI_DP_PORT_WIDTH_MASK; temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; temp |= snb_b_fdi_train_param[j/2]; temp |= FDI_COMPOSITE_SYNC; - I915_WRITE(reg, temp | FDI_TX_ENABLE); + intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE); - I915_WRITE(FDI_RX_MISC(pipe), - FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); + intel_de_write(dev_priv, FDI_RX_MISC(pipe), + FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); + temp = intel_de_read(dev_priv, reg); temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; temp |= FDI_COMPOSITE_SYNC; - I915_WRITE(reg, temp | FDI_RX_ENABLE); + intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE); - POSTING_READ(reg); + intel_de_posting_read(dev_priv, reg); udelay(1); /* should be 0.5us */ for (i = 0; i < 4; i++) { reg = FDI_RX_IIR(pipe); - temp = I915_READ(reg); - DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); + temp = intel_de_read(dev_priv, reg); + drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); if (temp & FDI_RX_BIT_LOCK || - (I915_READ(reg) & FDI_RX_BIT_LOCK)) { - I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); - DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", - i); + (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) { + intel_de_write(dev_priv, reg, + temp | FDI_RX_BIT_LOCK); + drm_dbg_kms(&dev_priv->drm, + "FDI train 1 done, level %i.\n", + i); break; } udelay(1); /* should be 0.5us */ } if (i == 4) { - DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2); + drm_dbg_kms(&dev_priv->drm, + "FDI train 1 fail on vswing %d\n", j / 2); continue; } /* Train 2 */ reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); + temp = intel_de_read(dev_priv, reg); temp &= ~FDI_LINK_TRAIN_NONE_IVB; temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; - I915_WRITE(reg, temp); + intel_de_write(dev_priv, reg, temp); reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); + temp = intel_de_read(dev_priv, reg); temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; - I915_WRITE(reg, temp); + intel_de_write(dev_priv, reg, temp); - POSTING_READ(reg); + intel_de_posting_read(dev_priv, reg); udelay(2); /* should be 1.5us */ for (i = 0; i < 4; i++) { reg = FDI_RX_IIR(pipe); - temp = I915_READ(reg); - DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); + temp = intel_de_read(dev_priv, reg); + drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp); if (temp & FDI_RX_SYMBOL_LOCK || - (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) { - I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); - DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", - i); + (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) { + intel_de_write(dev_priv, reg, + temp | FDI_RX_SYMBOL_LOCK); + drm_dbg_kms(&dev_priv->drm, + "FDI train 2 done, level %i.\n", + i); goto train_done; } udelay(2); /* should be 1.5us */ } if (i == 4) - DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2); + drm_dbg_kms(&dev_priv->drm, + "FDI train 2 fail on vswing %d\n", j / 2); } train_done: - DRM_DEBUG_KMS("FDI train done.\n"); + drm_dbg_kms(&dev_priv->drm, "FDI train done.\n"); } static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state) @@ -5368,29 +5446,29 @@ static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state) /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); + temp = intel_de_read(dev_priv, reg); temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); - temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; - I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); + temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; + intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE); - POSTING_READ(reg); + intel_de_posting_read(dev_priv, reg); udelay(200); /* Switch from Rawclk to PCDclk */ - temp = I915_READ(reg); - I915_WRITE(reg, temp | FDI_PCDCLK); + temp = intel_de_read(dev_priv, reg); + intel_de_write(dev_priv, reg, temp | FDI_PCDCLK); - POSTING_READ(reg); + intel_de_posting_read(dev_priv, reg); udelay(200); /* Enable CPU FDI TX PLL, always on for Ironlake */ reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); + temp = intel_de_read(dev_priv, reg); if ((temp & FDI_TX_PLL_ENABLE) == 0) { - I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); + intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE); - POSTING_READ(reg); + intel_de_posting_read(dev_priv, reg); udelay(100); } } @@ -5405,23 +5483,23 @@ static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc) /* Switch from PCDclk to Rawclk */ reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); - I915_WRITE(reg, temp & ~FDI_PCDCLK); + temp = intel_de_read(dev_priv, reg); + intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK); /* Disable CPU FDI TX PLL */ reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); - I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); + temp = intel_de_read(dev_priv, reg); + intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE); - POSTING_READ(reg); + intel_de_posting_read(dev_priv, reg); udelay(100); reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); - I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); + temp = intel_de_read(dev_priv, reg); + intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE); /* Wait for the clocks to turn off. */ - POSTING_READ(reg); + intel_de_posting_read(dev_priv, reg); udelay(100); } @@ -5434,32 +5512,33 @@ static void ilk_fdi_disable(struct intel_crtc *crtc) /* disable CPU FDI tx and PCH FDI rx */ reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); - I915_WRITE(reg, temp & ~FDI_TX_ENABLE); - POSTING_READ(reg); + temp = intel_de_read(dev_priv, reg); + intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE); + intel_de_posting_read(dev_priv, reg); reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); + temp = intel_de_read(dev_priv, reg); temp &= ~(0x7 << 16); - temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; - I915_WRITE(reg, temp & ~FDI_RX_ENABLE); + temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; + intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE); - POSTING_READ(reg); + intel_de_posting_read(dev_priv, reg); udelay(100); /* Ironlake workaround, disable clock pointer after downing FDI */ if (HAS_PCH_IBX(dev_priv)) - I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); + intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe), + FDI_RX_PHASE_SYNC_POINTER_OVR); /* still set train pattern 1 */ reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); + temp = intel_de_read(dev_priv, reg); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; - I915_WRITE(reg, temp); + intel_de_write(dev_priv, reg, temp); reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); + temp = intel_de_read(dev_priv, reg); if (HAS_PCH_CPT(dev_priv)) { temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; @@ -5469,10 +5548,10 @@ static void ilk_fdi_disable(struct intel_crtc *crtc) } /* BPC in FDI rx is consistent with that in PIPECONF */ temp &= ~(0x07 << 16); - temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; - I915_WRITE(reg, temp); + temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; + intel_de_write(dev_priv, reg, temp); - POSTING_READ(reg); + intel_de_posting_read(dev_priv, reg); udelay(100); } @@ -5505,7 +5584,7 @@ void lpt_disable_iclkip(struct drm_i915_private *dev_priv) { u32 temp; - I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); + intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE); mutex_lock(&dev_priv->sb_lock); @@ -5552,17 +5631,14 @@ static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) } /* This should not happen with any sane values */ - WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) & - ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); - WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) & - ~SBI_SSCDIVINTPHASE_INCVAL_MASK); - - DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", - clock, - auxdiv, - divsel, - phasedir, - phaseinc); + drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) & + ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); + drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) & + ~SBI_SSCDIVINTPHASE_INCVAL_MASK); + + drm_dbg_kms(&dev_priv->drm, + "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", + clock, auxdiv, divsel, phasedir, phaseinc); mutex_lock(&dev_priv->sb_lock); @@ -5592,7 +5668,7 @@ static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) /* Wait for initialization time */ udelay(24); - I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); + intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE); } int lpt_get_iclkip(struct drm_i915_private *dev_priv) @@ -5603,7 +5679,7 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv) u32 desired_divisor; u32 temp; - if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) + if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) return 0; mutex_lock(&dev_priv->sb_lock); @@ -5639,41 +5715,46 @@ static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_s struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), - I915_READ(HTOTAL(cpu_transcoder))); - I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder), - I915_READ(HBLANK(cpu_transcoder))); - I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder), - I915_READ(HSYNC(cpu_transcoder))); + intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder), + intel_de_read(dev_priv, HTOTAL(cpu_transcoder))); + intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder), + intel_de_read(dev_priv, HBLANK(cpu_transcoder))); + intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder), + intel_de_read(dev_priv, HSYNC(cpu_transcoder))); - I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder), - I915_READ(VTOTAL(cpu_transcoder))); - I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder), - I915_READ(VBLANK(cpu_transcoder))); - I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder), - I915_READ(VSYNC(cpu_transcoder))); - I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder), - I915_READ(VSYNCSHIFT(cpu_transcoder))); + intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder), + intel_de_read(dev_priv, VTOTAL(cpu_transcoder))); + intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder), + intel_de_read(dev_priv, VBLANK(cpu_transcoder))); + intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder), + intel_de_read(dev_priv, VSYNC(cpu_transcoder))); + intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder), + intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder))); } static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable) { u32 temp; - temp = I915_READ(SOUTH_CHICKEN1); + temp = intel_de_read(dev_priv, SOUTH_CHICKEN1); if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) return; - WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); - WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); + drm_WARN_ON(&dev_priv->drm, + intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) & + FDI_RX_ENABLE); + drm_WARN_ON(&dev_priv->drm, + intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) & + FDI_RX_ENABLE); temp &= ~FDI_BC_BIFURCATION_SELECT; if (enable) temp |= FDI_BC_BIFURCATION_SELECT; - DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis"); - I915_WRITE(SOUTH_CHICKEN1, temp); - POSTING_READ(SOUTH_CHICKEN1); + drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n", + enable ? "en" : "dis"); + intel_de_write(dev_priv, SOUTH_CHICKEN1, temp); + intel_de_posting_read(dev_priv, SOUTH_CHICKEN1); } static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state) @@ -5723,8 +5804,9 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state, num_encoders++; } - WARN(num_encoders != 1, "%d encoders for pipe %c\n", - num_encoders, pipe_name(crtc->pipe)); + drm_WARN(encoder->base.dev, num_encoders != 1, + "%d encoders for pipe %c\n", + num_encoders, pipe_name(crtc->pipe)); return encoder; } @@ -5753,8 +5835,8 @@ static void ilk_pch_enable(const struct intel_atomic_state *state, /* Write the TU size bits before fdi link training, so that error * detection works. */ - I915_WRITE(FDI_RX_TUSIZE1(pipe), - I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); + intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe), + intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); /* For PCH output, training FDI link */ dev_priv->display.fdi_link_train(crtc, crtc_state); @@ -5764,7 +5846,7 @@ static void ilk_pch_enable(const struct intel_atomic_state *state, if (HAS_PCH_CPT(dev_priv)) { u32 sel; - temp = I915_READ(PCH_DPLL_SEL); + temp = intel_de_read(dev_priv, PCH_DPLL_SEL); temp |= TRANS_DPLL_ENABLE(pipe); sel = TRANS_DPLLB_SEL(pipe); if (crtc_state->shared_dpll == @@ -5772,7 +5854,7 @@ static void ilk_pch_enable(const struct intel_atomic_state *state, temp |= sel; else temp &= ~sel; - I915_WRITE(PCH_DPLL_SEL, temp); + intel_de_write(dev_priv, PCH_DPLL_SEL, temp); } /* XXX: pch pll's can be enabled any time before we enable the PCH @@ -5795,11 +5877,11 @@ static void ilk_pch_enable(const struct intel_atomic_state *state, intel_crtc_has_dp_encoder(crtc_state)) { const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; + u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; i915_reg_t reg = TRANS_DP_CTL(pipe); enum port port; - temp = I915_READ(reg); + temp = intel_de_read(dev_priv, reg); temp &= ~(TRANS_DP_PORT_SEL_MASK | TRANS_DP_SYNC_MASK | TRANS_DP_BPC_MASK); @@ -5812,17 +5894,16 @@ static void ilk_pch_enable(const struct intel_atomic_state *state, temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; port = intel_get_crtc_new_encoder(state, crtc_state)->port; - WARN_ON(port < PORT_B || port > PORT_D); + drm_WARN_ON(dev, port < PORT_B || port > PORT_D); temp |= TRANS_DP_PORT_SEL(port); - I915_WRITE(reg, temp); + intel_de_write(dev_priv, reg, temp); } ilk_enable_pch_transcoder(crtc_state); } -static void lpt_pch_enable(const struct intel_atomic_state *state, - const struct intel_crtc_state *crtc_state) +void lpt_pch_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -5844,11 +5925,13 @@ static void cpt_verify_modeset(struct drm_i915_private *dev_priv, i915_reg_t dslreg = PIPEDSL(pipe); u32 temp; - temp = I915_READ(dslreg); + temp = intel_de_read(dev_priv, dslreg); udelay(500); - if (wait_for(I915_READ(dslreg) != temp, 5)) { - if (wait_for(I915_READ(dslreg) != temp, 5)) - DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe)); + if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) { + if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) + drm_err(&dev_priv->drm, + "mode set failed: pipe %c stuck\n", + pipe_name(pipe)); } } @@ -5963,7 +6046,8 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, */ if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable && need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { - DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n"); + drm_dbg_kms(&dev_priv->drm, + "Pipe/Plane scaling not supported with IF-ID mode\n"); return -EINVAL; } @@ -5982,10 +6066,11 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, scaler_state->scaler_users &= ~(1 << scaler_user); scaler_state->scalers[*scaler_id].in_use = 0; - DRM_DEBUG_KMS("scaler_user index %u.%u: " - "Staged freeing scaler id %d scaler_users = 0x%x\n", - intel_crtc->pipe, scaler_user, *scaler_id, - scaler_state->scaler_users); + drm_dbg_kms(&dev_priv->drm, + "scaler_user index %u.%u: " + "Staged freeing scaler id %d scaler_users = 0x%x\n", + intel_crtc->pipe, scaler_user, *scaler_id, + scaler_state->scaler_users); *scaler_id = -1; } return 0; @@ -5993,7 +6078,8 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, if (format && intel_format_info_is_yuv_semiplanar(format, modifier) && (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) { - DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n"); + drm_dbg_kms(&dev_priv->drm, + "Planar YUV: src dimensions not met\n"); return -EINVAL; } @@ -6006,18 +6092,20 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, (INTEL_GEN(dev_priv) < 11 && (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) { - DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u " - "size is out of scaler range\n", - intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h); + drm_dbg_kms(&dev_priv->drm, + "scaler_user index %u.%u: src %ux%u dst %ux%u " + "size is out of scaler range\n", + intel_crtc->pipe, scaler_user, src_w, src_h, + dst_w, dst_h); return -EINVAL; } /* mark this plane as a scaler user in crtc_state */ scaler_state->scaler_users |= (1 << scaler_user); - DRM_DEBUG_KMS("scaler_user index %u.%u: " - "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n", - intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, - scaler_state->scaler_users); + drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: " + "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n", + intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, + scaler_state->scaler_users); return 0; } @@ -6036,7 +6124,8 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state) const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode; bool need_scaler = false; - if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) + if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || + state->pch_pfit.enabled) need_scaler = true; return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX, @@ -6088,9 +6177,10 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, /* check colorkey */ if (plane_state->ckey.flags) { - DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed", - intel_plane->base.base.id, - intel_plane->base.name); + drm_dbg_kms(&dev_priv->drm, + "[PLANE:%d:%s] scaling with color key not allowed", + intel_plane->base.base.id, + intel_plane->base.name); return -EINVAL; } @@ -6128,9 +6218,10 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, break; /* fall through */ default: - DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", - intel_plane->base.base.id, intel_plane->base.name, - fb->base.id, fb->format->format); + drm_dbg_kms(&dev_priv->drm, + "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", + intel_plane->base.base.id, intel_plane->base.name, + fb->base.id, fb->format->format); return -EINVAL; } @@ -6157,9 +6248,11 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state) if (crtc_state->pch_pfit.enabled) { u16 uv_rgb_hphase, uv_rgb_vphase; int pfit_w, pfit_h, hscale, vscale; + unsigned long irqflags; int id; - if (WARN_ON(crtc_state->scaler_state.scaler_id < 0)) + if (drm_WARN_ON(&dev_priv->drm, + crtc_state->scaler_state.scaler_id < 0)) return; pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF; @@ -6172,14 +6265,21 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state) uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); id = scaler_state->scaler_id; - I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | - PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); - I915_WRITE_FW(SKL_PS_VPHASE(pipe, id), - PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase)); - I915_WRITE_FW(SKL_PS_HPHASE(pipe, id), - PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase)); - I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos); - I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size); + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + + intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), PS_SCALER_EN | + PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); + intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id), + PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase)); + intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id), + PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase)); + intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id), + crtc_state->pch_pfit.pos); + intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id), + crtc_state->pch_pfit.size); + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } } @@ -6195,12 +6295,15 @@ static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state) * e.g. x201. */ if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) - I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | - PF_PIPE_SEL_IVB(pipe)); + intel_de_write(dev_priv, PF_CTL(pipe), + PF_ENABLE | PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe)); else - I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); - I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos); - I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size); + intel_de_write(dev_priv, PF_CTL(pipe), + PF_ENABLE | PF_FILTER_MED_3x3); + intel_de_write(dev_priv, PF_WIN_POS(pipe), + crtc_state->pch_pfit.pos); + intel_de_write(dev_priv, PF_WIN_SZ(pipe), + crtc_state->pch_pfit.size); } } @@ -6218,25 +6321,26 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state) * This function is called from post_plane_update, which is run after * a vblank wait. */ - WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR))); + drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR))); if (IS_BROADWELL(dev_priv)) { - WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, - IPS_ENABLE | IPS_PCODE_CONTROL)); + drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, + IPS_ENABLE | IPS_PCODE_CONTROL)); /* Quoting Art Runyan: "its not safe to expect any particular * value in IPS_CTL bit 31 after enabling IPS through the * mailbox." Moreover, the mailbox may return a bogus state, * so we need to just enable it and continue on. */ } else { - I915_WRITE(IPS_CTL, IPS_ENABLE); + intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE); /* The bit only becomes 1 in the next vblank, so this wait here * is essentially intel_wait_for_vblank. If we don't have this * and don't wait for vblanks until the end of crtc_enable, then * the HW state readout code will complain that the expected * IPS_CTL value is not the one we read. */ if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50)) - DRM_ERROR("Timed out waiting for IPS enable\n"); + drm_err(&dev_priv->drm, + "Timed out waiting for IPS enable\n"); } } @@ -6250,17 +6354,19 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state) return; if (IS_BROADWELL(dev_priv)) { - WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); + drm_WARN_ON(dev, + sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); /* * Wait for PCODE to finish disabling IPS. The BSpec specified * 42ms timeout value leads to occasional timeouts so use 100ms * instead. */ if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100)) - DRM_ERROR("Timed out waiting for IPS disable\n"); + drm_err(&dev_priv->drm, + "Timed out waiting for IPS disable\n"); } else { - I915_WRITE(IPS_CTL, 0); - POSTING_READ(IPS_CTL); + intel_de_write(dev_priv, IPS_CTL, 0); + intel_de_posting_read(dev_priv, IPS_CTL); } /* We need to wait for a vblank before we can disable the plane. */ @@ -6382,13 +6488,10 @@ static void intel_post_plane_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct intel_plane *primary = to_intel_plane(crtc->base.primary); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - const struct intel_plane_state *new_primary_state = - intel_atomic_get_new_plane_state(state, primary); enum pipe pipe = crtc->pipe; intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits); @@ -6399,8 +6502,7 @@ static void intel_post_plane_update(struct intel_atomic_state *state, if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state)) hsw_enable_ips(new_crtc_state); - if (new_primary_state) - intel_fbc_post_update(crtc); + intel_fbc_post_update(state, crtc); if (needs_nv12_wa(old_crtc_state) && !needs_nv12_wa(new_crtc_state)) @@ -6415,20 +6517,16 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct intel_plane *primary = to_intel_plane(crtc->base.primary); const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - const struct intel_plane_state *new_primary_state = - intel_atomic_get_new_plane_state(state, primary); enum pipe pipe = crtc->pipe; if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state)) hsw_disable_ips(old_crtc_state); - if (new_primary_state && - intel_fbc_pre_update(crtc, new_crtc_state, new_primary_state)) + if (intel_fbc_pre_update(state, crtc)) intel_wait_for_vblank(dev_priv, pipe); /* Display WA 827 */ @@ -6770,7 +6868,7 @@ static void ilk_crtc_enable(struct intel_atomic_state *state, struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - if (WARN_ON(crtc->active)) + if (drm_WARN_ON(&dev_priv->drm, crtc->active)) return; /* @@ -6863,7 +6961,7 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv, enum pipe pipe, bool apply) { - u32 val = I915_READ(CLKGATE_DIS_PSL(pipe)); + u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)); u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS; if (apply) @@ -6871,7 +6969,7 @@ static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv, else val &= ~mask; - I915_WRITE(CLKGATE_DIS_PSL(pipe), val); + intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val); } static void icl_pipe_mbus_enable(struct intel_crtc *crtc) @@ -6890,7 +6988,17 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc) val |= MBUS_DBOX_B_CREDIT(8); } - I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val); + intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val); +} + +static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + + intel_de_write(dev_priv, WM_LINETIME(crtc->pipe), + HSW_LINETIME(crtc_state->linetime) | + HSW_IPS_LINETIME(crtc_state->ips_linetime)); } static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) @@ -6900,10 +7008,10 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder); u32 val; - val = I915_READ(reg); + val = intel_de_read(dev_priv, reg); val &= ~HSW_FRAME_START_DELAY_MASK; val |= HSW_FRAME_START_DELAY(0); - I915_WRITE(reg, val); + intel_de_write(dev_priv, reg, val); } static void hsw_crtc_enable(struct intel_atomic_state *state, @@ -6916,7 +7024,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; bool psl_clkgate_wa; - if (WARN_ON(crtc->active)) + if (drm_WARN_ON(&dev_priv->drm, crtc->active)) return; intel_encoders_pre_pll_enable(state, crtc); @@ -6926,9 +7034,6 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, intel_encoders_pre_enable(state, crtc); - if (intel_crtc_has_dp_encoder(new_crtc_state)) - intel_dp_set_m_n(new_crtc_state, M1_N1); - if (!transcoder_is_dsi(cpu_transcoder)) intel_set_pipe_timings(new_crtc_state); @@ -6939,8 +7044,8 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, if (cpu_transcoder != TRANSCODER_EDP && !transcoder_is_dsi(cpu_transcoder)) - I915_WRITE(PIPE_MULT(cpu_transcoder), - new_crtc_state->pixel_multiplier - 1); + intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder), + new_crtc_state->pixel_multiplier - 1); if (new_crtc_state->has_pch_encoder) intel_cpu_transcoder_set_m_n(new_crtc_state, @@ -6977,6 +7082,8 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, if (INTEL_GEN(dev_priv) < 9) intel_disable_primary_plane(new_crtc_state); + hsw_set_linetime_wm(new_crtc_state); + if (INTEL_GEN(dev_priv) >= 11) icl_set_pipe_chicken(crtc); @@ -6989,15 +7096,6 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, if (INTEL_GEN(dev_priv) >= 11) icl_pipe_mbus_enable(crtc); - /* XXX: Do the pipe assertions at the right place for BXT DSI. */ - if (!transcoder_is_dsi(cpu_transcoder)) - intel_enable_pipe(new_crtc_state); - - if (new_crtc_state->has_pch_encoder) - lpt_pch_enable(state, new_crtc_state); - - intel_crtc_vblank_on(new_crtc_state); - intel_encoders_enable(state, crtc); if (psl_clkgate_wa) { @@ -7023,9 +7121,9 @@ void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state) /* To avoid upsetting the power well on haswell only disable the pfit if * it's in use. The hw state code will make sure we get this right. */ if (old_crtc_state->pch_pfit.enabled) { - I915_WRITE(PF_CTL(pipe), 0); - I915_WRITE(PF_WIN_POS(pipe), 0); - I915_WRITE(PF_WIN_SZ(pipe), 0); + intel_de_write(dev_priv, PF_CTL(pipe), 0); + intel_de_write(dev_priv, PF_WIN_POS(pipe), 0); + intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0); } } @@ -7067,16 +7165,16 @@ static void ilk_crtc_disable(struct intel_atomic_state *state, /* disable TRANS_DP_CTL */ reg = TRANS_DP_CTL(pipe); - temp = I915_READ(reg); + temp = intel_de_read(dev_priv, reg); temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); temp |= TRANS_DP_PORT_SEL_NONE; - I915_WRITE(reg, temp); + intel_de_write(dev_priv, reg, temp); /* disable DPLL_SEL */ - temp = I915_READ(PCH_DPLL_SEL); + temp = intel_de_read(dev_priv, PCH_DPLL_SEL); temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); - I915_WRITE(PCH_DPLL_SEL, temp); + intel_de_write(dev_priv, PCH_DPLL_SEL, temp); } ilk_fdi_pll_disable(crtc); @@ -7109,15 +7207,17 @@ static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) * The panel fitter should only be adjusted whilst the pipe is disabled, * according to register description and PRM. */ - WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); + drm_WARN_ON(&dev_priv->drm, + intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE); assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder); - I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios); - I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control); + intel_de_write(dev_priv, PFIT_PGM_RATIOS, + crtc_state->gmch_pfit.pgm_ratios); + intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control); /* Border color in case we don't scale up to the full screen. Black by * default, change to something else for debugging. */ - I915_WRITE(BCLRPAT(crtc->pipe), 0); + intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0); } bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) @@ -7304,7 +7404,7 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state, struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - if (WARN_ON(crtc->active)) + if (drm_WARN_ON(&dev_priv->drm, crtc->active)) return; if (intel_crtc_has_dp_encoder(new_crtc_state)) @@ -7314,8 +7414,8 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state, intel_set_pipe_src_size(new_crtc_state); if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { - I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); - I915_WRITE(CHV_CANVAS(pipe), 0); + intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY); + intel_de_write(dev_priv, CHV_CANVAS(pipe), 0); } i9xx_set_pipeconf(new_crtc_state); @@ -7356,8 +7456,10 @@ static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state) struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0); - I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1); + intel_de_write(dev_priv, FP0(crtc->pipe), + crtc_state->dpll_hw_state.fp0); + intel_de_write(dev_priv, FP1(crtc->pipe), + crtc_state->dpll_hw_state.fp1); } static void i9xx_crtc_enable(struct intel_atomic_state *state, @@ -7368,7 +7470,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state, struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - if (WARN_ON(crtc->active)) + if (drm_WARN_ON(&dev_priv->drm, crtc->active)) return; i9xx_set_pll_dividers(new_crtc_state); @@ -7418,9 +7520,9 @@ static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder); - DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n", - I915_READ(PFIT_CONTROL)); - I915_WRITE(PFIT_CONTROL, 0); + drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n", + intel_de_read(dev_priv, PFIT_CONTROL)); + intel_de_write(dev_priv, PFIT_CONTROL, 0); } static void i9xx_crtc_disable(struct intel_atomic_state *state, @@ -7477,6 +7579,8 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_bw_state *bw_state = to_intel_bw_state(dev_priv->bw_obj.state); + struct intel_cdclk_state *cdclk_state = + to_intel_cdclk_state(dev_priv->cdclk.obj.state); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); enum intel_display_power_domain domain; @@ -7500,8 +7604,9 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, state = drm_atomic_state_alloc(&dev_priv->drm); if (!state) { - DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory", - crtc->base.base.id, crtc->base.name); + drm_dbg_kms(&dev_priv->drm, + "failed to disable [CRTC:%d:%s], out of memory", + crtc->base.base.id, crtc->base.name); return; } @@ -7511,19 +7616,21 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, temp_crtc_state = intel_atomic_get_crtc_state(state, crtc); ret = drm_atomic_add_affected_connectors(state, &crtc->base); - WARN_ON(IS_ERR(temp_crtc_state) || ret); + drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret); dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc); drm_atomic_state_put(state); - DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n", - crtc->base.base.id, crtc->base.name); + drm_dbg_kms(&dev_priv->drm, + "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n", + crtc->base.base.id, crtc->base.name); crtc->active = false; crtc->base.enabled = false; - WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0); + drm_WARN_ON(&dev_priv->drm, + drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0); crtc_state->uapi.active = false; crtc_state->uapi.connector_mask = 0; crtc_state->uapi.encoder_mask = 0; @@ -7543,8 +7650,9 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, crtc->enabled_power_domains = 0; dev_priv->active_pipes &= ~BIT(pipe); - dev_priv->min_cdclk[pipe] = 0; - dev_priv->min_voltage_level[pipe] = 0; + cdclk_state->min_cdclk[pipe] = 0; + cdclk_state->min_voltage_level[pipe] = 0; + cdclk_state->active_pipes &= ~BIT(pipe); bw_state->data_rate[pipe] = 0; bw_state->num_active_planes[pipe] = 0; @@ -7563,7 +7671,8 @@ int intel_display_suspend(struct drm_device *dev) state = drm_atomic_helper_suspend(dev); ret = PTR_ERR_OR_ZERO(state); if (ret) - DRM_ERROR("Suspending crtc's failed with %i\n", ret); + drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n", + ret); else dev_priv->modeset_restore_state = state; return ret; @@ -7583,13 +7692,13 @@ static void intel_connector_verify_state(struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct intel_connector *connector = to_intel_connector(conn_state->connector); + struct drm_i915_private *i915 = to_i915(connector->base.dev); - DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", - connector->base.base.id, - connector->base.name); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n", + connector->base.base.id, connector->base.name); if (connector->get_hw_state(connector)) { - struct intel_encoder *encoder = connector->encoder; + struct intel_encoder *encoder = intel_attached_encoder(connector); I915_STATE_WARN(!crtc_state, "connector enabled without attached crtc\n"); @@ -7632,18 +7741,21 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, struct intel_crtc *other_crtc; struct intel_crtc_state *other_crtc_state; - DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n", - pipe_name(pipe), pipe_config->fdi_lanes); + drm_dbg_kms(&dev_priv->drm, + "checking fdi config on pipe %c, lanes %i\n", + pipe_name(pipe), pipe_config->fdi_lanes); if (pipe_config->fdi_lanes > 4) { - DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n", - pipe_name(pipe), pipe_config->fdi_lanes); + drm_dbg_kms(&dev_priv->drm, + "invalid fdi lane config on pipe %c: %i lanes\n", + pipe_name(pipe), pipe_config->fdi_lanes); return -EINVAL; } if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { if (pipe_config->fdi_lanes > 2) { - DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n", - pipe_config->fdi_lanes); + drm_dbg_kms(&dev_priv->drm, + "only 2 lanes on haswell, required: %i lanes\n", + pipe_config->fdi_lanes); return -EINVAL; } else { return 0; @@ -7668,15 +7780,17 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, return PTR_ERR(other_crtc_state); if (pipe_required_fdi_lanes(other_crtc_state) > 0) { - DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", - pipe_name(pipe), pipe_config->fdi_lanes); + drm_dbg_kms(&dev_priv->drm, + "invalid shared fdi lane config on pipe %c: %i lanes\n", + pipe_name(pipe), pipe_config->fdi_lanes); return -EINVAL; } return 0; case PIPE_C: if (pipe_config->fdi_lanes > 2) { - DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n", - pipe_name(pipe), pipe_config->fdi_lanes); + drm_dbg_kms(&dev_priv->drm, + "only 2 lanes on pipe %c: required %i lanes\n", + pipe_name(pipe), pipe_config->fdi_lanes); return -EINVAL; } @@ -7687,7 +7801,8 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, return PTR_ERR(other_crtc_state); if (pipe_required_fdi_lanes(other_crtc_state) > 2) { - DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); + drm_dbg_kms(&dev_priv->drm, + "fdi link B uses too many lanes to enable link C\n"); return -EINVAL; } return 0; @@ -7701,6 +7816,7 @@ static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = intel_crtc->base.dev; + struct drm_i915_private *i915 = to_i915(dev); const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; int lane, link_bw, fdi_dotclock, ret; bool needs_recompute = false; @@ -7713,7 +7829,7 @@ retry: * Hence the bw of each lane in terms of the mode signal * is: */ - link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config); + link_bw = intel_fdi_link_freq(i915, pipe_config); fdi_dotclock = adjusted_mode->crtc_clock; @@ -7731,8 +7847,9 @@ retry: if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { pipe_config->pipe_bpp -= 2*3; - DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", - pipe_config->pipe_bpp); + drm_dbg_kms(&i915->drm, + "fdi link bw constraint, reducing pipe bpp to %i\n", + pipe_config->pipe_bpp); needs_recompute = true; pipe_config->bw_constrained = true; @@ -7774,15 +7891,17 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state) return true; } -static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state) +static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - struct intel_atomic_state *intel_state = + struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); + crtc_state->ips_enabled = false; + if (!hsw_crtc_state_ips_capable(crtc_state)) - return false; + return 0; /* * When IPS gets enabled, the pipe CRC changes. Since IPS gets @@ -7791,18 +7910,27 @@ static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state) * completely disable it. */ if (crtc_state->crc_enabled) - return false; + return 0; /* IPS should be fine as long as at least one plane is enabled. */ if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR))) - return false; + return 0; - /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ - if (IS_BROADWELL(dev_priv) && - crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100) - return false; + if (IS_BROADWELL(dev_priv)) { + const struct intel_cdclk_state *cdclk_state; - return true; + cdclk_state = intel_atomic_get_cdclk_state(state); + if (IS_ERR(cdclk_state)) + return PTR_ERR(cdclk_state); + + /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ + if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100) + return 0; + } + + crtc_state->ips_enabled = true; + + return 0; } static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) @@ -7884,9 +8012,10 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, } if (adjusted_mode->crtc_clock > clock_limit) { - DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", - adjusted_mode->crtc_clock, clock_limit, - yesno(pipe_config->double_wide)); + drm_dbg_kms(&dev_priv->drm, + "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", + adjusted_mode->crtc_clock, clock_limit, + yesno(pipe_config->double_wide)); return -EINVAL; } @@ -7898,7 +8027,8 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, * for output conversion from RGB->YCBCR. So if CTM is already * applied we can't support YCBCR420 output. */ - DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n"); + drm_dbg_kms(&dev_priv->drm, + "YCBCR420 and CTM together are not possible\n"); return -EINVAL; } @@ -7910,13 +8040,15 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, */ if (pipe_config->pipe_src_w & 1) { if (pipe_config->double_wide) { - DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n"); + drm_dbg_kms(&dev_priv->drm, + "Odd pipe source width not supported with double wide pipe\n"); return -EINVAL; } if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) && intel_is_dual_link_lvds(dev_priv)) { - DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n"); + drm_dbg_kms(&dev_priv->drm, + "Odd pipe source width not supported with dual link LVDS\n"); return -EINVAL; } } @@ -7997,13 +8129,15 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) * indicates as much. */ if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { - bool bios_lvds_use_ssc = I915_READ(PCH_DREF_CONTROL) & + bool bios_lvds_use_ssc = intel_de_read(dev_priv, + PCH_DREF_CONTROL) & DREF_SSC1_ENABLE; if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { - DRM_DEBUG_KMS("SSC %s by BIOS, overriding VBT which says %s\n", - enableddisabled(bios_lvds_use_ssc), - enableddisabled(dev_priv->vbt.lvds_use_ssc)); + drm_dbg_kms(&dev_priv->drm, + "SSC %s by BIOS, overriding VBT which says %s\n", + enableddisabled(bios_lvds_use_ssc), + enableddisabled(dev_priv->vbt.lvds_use_ssc)); dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; } } @@ -8090,10 +8224,11 @@ static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); - I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); - I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m); - I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n); + intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe), + TU_SIZE(m_n->tu) | m_n->gmch_m); + intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); + intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m); + intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n); } static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv, @@ -8119,33 +8254,42 @@ static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta enum transcoder transcoder = crtc_state->cpu_transcoder; if (INTEL_GEN(dev_priv) >= 5) { - I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); - I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); - I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); - I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); + intel_de_write(dev_priv, PIPE_DATA_M1(transcoder), + TU_SIZE(m_n->tu) | m_n->gmch_m); + intel_de_write(dev_priv, PIPE_DATA_N1(transcoder), + m_n->gmch_n); + intel_de_write(dev_priv, PIPE_LINK_M1(transcoder), + m_n->link_m); + intel_de_write(dev_priv, PIPE_LINK_N1(transcoder), + m_n->link_n); /* * M2_N2 registers are set only if DRRS is supported * (to make sure the registers are not unnecessarily accessed). */ if (m2_n2 && crtc_state->has_drrs && transcoder_has_m2_n2(dev_priv, transcoder)) { - I915_WRITE(PIPE_DATA_M2(transcoder), - TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); - I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n); - I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m); - I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n); + intel_de_write(dev_priv, PIPE_DATA_M2(transcoder), + TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); + intel_de_write(dev_priv, PIPE_DATA_N2(transcoder), + m2_n2->gmch_n); + intel_de_write(dev_priv, PIPE_LINK_M2(transcoder), + m2_n2->link_m); + intel_de_write(dev_priv, PIPE_LINK_N2(transcoder), + m2_n2->link_n); } } else { - I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); - I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n); - I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m); - I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n); + intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe), + TU_SIZE(m_n->tu) | m_n->gmch_m); + intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n); + intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m); + intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n); } } void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n) { const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); if (m_n == M1_N1) { dp_m_n = &crtc_state->dp_m_n; @@ -8158,7 +8302,7 @@ void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_s */ dp_m_n = &crtc_state->dp_m2_n2; } else { - DRM_ERROR("Unsupported divider value\n"); + drm_err(&i915->drm, "Unsupported divider value\n"); return; } @@ -8212,9 +8356,8 @@ static void vlv_prepare_pll(struct intel_crtc *crtc, u32 coreclk, reg_val; /* Enable Refclk */ - I915_WRITE(DPLL(pipe), - pipe_config->dpll_hw_state.dpll & - ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); + intel_de_write(dev_priv, DPLL(pipe), + pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); /* No need to actually set up the DPLL with DSI */ if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) @@ -8314,8 +8457,8 @@ static void chv_prepare_pll(struct intel_crtc *crtc, int vco; /* Enable Refclk and SSC */ - I915_WRITE(DPLL(pipe), - pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); + intel_de_write(dev_priv, DPLL(pipe), + pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); /* No need to actually set up the DPLL with DSI */ if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) @@ -8614,27 +8757,22 @@ static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state) } if (INTEL_GEN(dev_priv) > 3) - I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); - - I915_WRITE(HTOTAL(cpu_transcoder), - (adjusted_mode->crtc_hdisplay - 1) | - ((adjusted_mode->crtc_htotal - 1) << 16)); - I915_WRITE(HBLANK(cpu_transcoder), - (adjusted_mode->crtc_hblank_start - 1) | - ((adjusted_mode->crtc_hblank_end - 1) << 16)); - I915_WRITE(HSYNC(cpu_transcoder), - (adjusted_mode->crtc_hsync_start - 1) | - ((adjusted_mode->crtc_hsync_end - 1) << 16)); - - I915_WRITE(VTOTAL(cpu_transcoder), - (adjusted_mode->crtc_vdisplay - 1) | - ((crtc_vtotal - 1) << 16)); - I915_WRITE(VBLANK(cpu_transcoder), - (adjusted_mode->crtc_vblank_start - 1) | - ((crtc_vblank_end - 1) << 16)); - I915_WRITE(VSYNC(cpu_transcoder), - (adjusted_mode->crtc_vsync_start - 1) | - ((adjusted_mode->crtc_vsync_end - 1) << 16)); + intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder), + vsyncshift); + + intel_de_write(dev_priv, HTOTAL(cpu_transcoder), + (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16)); + intel_de_write(dev_priv, HBLANK(cpu_transcoder), + (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16)); + intel_de_write(dev_priv, HSYNC(cpu_transcoder), + (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16)); + + intel_de_write(dev_priv, VTOTAL(cpu_transcoder), + (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16)); + intel_de_write(dev_priv, VBLANK(cpu_transcoder), + (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16)); + intel_de_write(dev_priv, VSYNC(cpu_transcoder), + (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16)); /* Workaround: when the EDP input selection is B, the VTOTAL_B must be * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is @@ -8642,7 +8780,8 @@ static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state) * bits. */ if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && (pipe == PIPE_B || pipe == PIPE_C)) - I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); + intel_de_write(dev_priv, VTOTAL(pipe), + intel_de_read(dev_priv, VTOTAL(cpu_transcoder))); } @@ -8655,9 +8794,8 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) /* pipesrc controls the size that is scaled from, which should * always be the user's requested size. */ - I915_WRITE(PIPESRC(pipe), - ((crtc_state->pipe_src_w - 1) << 16) | - (crtc_state->pipe_src_h - 1)); + intel_de_write(dev_priv, PIPESRC(pipe), + ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1)); } static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) @@ -8670,9 +8808,9 @@ static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) - return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW; + return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW; else - return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK; + return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK; } static void intel_get_pipe_timings(struct intel_crtc *crtc, @@ -8683,33 +8821,33 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc, enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; u32 tmp; - tmp = I915_READ(HTOTAL(cpu_transcoder)); + tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder)); pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; if (!transcoder_is_dsi(cpu_transcoder)) { - tmp = I915_READ(HBLANK(cpu_transcoder)); + tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder)); pipe_config->hw.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1; pipe_config->hw.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1; } - tmp = I915_READ(HSYNC(cpu_transcoder)); + tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder)); pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; - tmp = I915_READ(VTOTAL(cpu_transcoder)); + tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder)); pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; if (!transcoder_is_dsi(cpu_transcoder)) { - tmp = I915_READ(VBLANK(cpu_transcoder)); + tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder)); pipe_config->hw.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1; pipe_config->hw.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1; } - tmp = I915_READ(VSYNC(cpu_transcoder)); + tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder)); pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; @@ -8727,7 +8865,7 @@ static void intel_get_pipe_src_size(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = to_i915(dev); u32 tmp; - tmp = I915_READ(PIPESRC(crtc->pipe)); + tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe)); pipe_config->pipe_src_h = (tmp & 0xffff) + 1; pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; @@ -8768,7 +8906,7 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) /* we keep both pipes enabled on 830 */ if (IS_I830(dev_priv)) - pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE; + pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE; if (crtc_state->double_wide) pipeconf |= PIPECONF_DOUBLE_WIDE; @@ -8815,8 +8953,8 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) pipeconf |= PIPECONF_FRAME_START_DELAY(0); - I915_WRITE(PIPECONF(crtc->pipe), pipeconf); - POSTING_READ(PIPECONF(crtc->pipe)); + intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf); + intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe)); } static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, @@ -8833,7 +8971,9 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { refclk = dev_priv->vbt.lvds_ssc_freq; - DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); + drm_dbg_kms(&dev_priv->drm, + "using SSC reference clock of %d kHz\n", + refclk); } limit = &intel_limits_i8xx_lvds; @@ -8846,7 +8986,8 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, if (!crtc_state->clock_set && !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, refclk, NULL, &crtc_state->dpll)) { - DRM_ERROR("Couldn't find PLL settings for mode!\n"); + drm_err(&dev_priv->drm, + "Couldn't find PLL settings for mode!\n"); return -EINVAL; } @@ -8868,7 +9009,9 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc, if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { refclk = dev_priv->vbt.lvds_ssc_freq; - DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); + drm_dbg_kms(&dev_priv->drm, + "using SSC reference clock of %d kHz\n", + refclk); } if (intel_is_dual_link_lvds(dev_priv)) @@ -8888,7 +9031,8 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc, if (!crtc_state->clock_set && !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, refclk, NULL, &crtc_state->dpll)) { - DRM_ERROR("Couldn't find PLL settings for mode!\n"); + drm_err(&dev_priv->drm, + "Couldn't find PLL settings for mode!\n"); return -EINVAL; } @@ -8911,7 +9055,9 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc, if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { refclk = dev_priv->vbt.lvds_ssc_freq; - DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); + drm_dbg_kms(&dev_priv->drm, + "using SSC reference clock of %d kHz\n", + refclk); } limit = &pnv_limits_lvds; @@ -8922,7 +9068,8 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc, if (!crtc_state->clock_set && !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, refclk, NULL, &crtc_state->dpll)) { - DRM_ERROR("Couldn't find PLL settings for mode!\n"); + drm_err(&dev_priv->drm, + "Couldn't find PLL settings for mode!\n"); return -EINVAL; } @@ -8945,7 +9092,9 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { refclk = dev_priv->vbt.lvds_ssc_freq; - DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); + drm_dbg_kms(&dev_priv->drm, + "using SSC reference clock of %d kHz\n", + refclk); } limit = &intel_limits_i9xx_lvds; @@ -8956,7 +9105,8 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, if (!crtc_state->clock_set && !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, refclk, NULL, &crtc_state->dpll)) { - DRM_ERROR("Couldn't find PLL settings for mode!\n"); + drm_err(&dev_priv->drm, + "Couldn't find PLL settings for mode!\n"); return -EINVAL; } @@ -8970,6 +9120,7 @@ static int chv_crtc_compute_clock(struct intel_crtc *crtc, { int refclk = 100000; const struct intel_limit *limit = &intel_limits_chv; + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); @@ -8977,7 +9128,7 @@ static int chv_crtc_compute_clock(struct intel_crtc *crtc, if (!crtc_state->clock_set && !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, refclk, NULL, &crtc_state->dpll)) { - DRM_ERROR("Couldn't find PLL settings for mode!\n"); + drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n"); return -EINVAL; } @@ -8991,6 +9142,7 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc, { int refclk = 100000; const struct intel_limit *limit = &intel_limits_vlv; + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); @@ -8998,7 +9150,7 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc, if (!crtc_state->clock_set && !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, refclk, NULL, &crtc_state->dpll)) { - DRM_ERROR("Couldn't find PLL settings for mode!\n"); + drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n"); return -EINVAL; } @@ -9025,7 +9177,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc, if (!i9xx_has_pfit(dev_priv)) return; - tmp = I915_READ(PFIT_CONTROL); + tmp = intel_de_read(dev_priv, PFIT_CONTROL); if (!(tmp & PFIT_ENABLE)) return; @@ -9039,7 +9191,8 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc, } pipe_config->gmch_pfit.control = tmp; - pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); + pipe_config->gmch_pfit.pgm_ratios = intel_de_read(dev_priv, + PFIT_PGM_RATIOS); } static void vlv_crtc_clock_get(struct intel_crtc *crtc, @@ -9087,11 +9240,11 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, if (!plane->get_hw_state(plane, &pipe)) return; - WARN_ON(pipe != crtc->pipe); + drm_WARN_ON(dev, pipe != crtc->pipe); intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); if (!intel_fb) { - DRM_DEBUG_KMS("failed to alloc fb\n"); + drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n"); return; } @@ -9099,7 +9252,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, fb->dev = dev; - val = I915_READ(DSPCNTR(i9xx_plane)); + val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); if (INTEL_GEN(dev_priv) >= 4) { if (val & DISPPLANE_TILED) { @@ -9120,34 +9273,37 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, fb->format = drm_format_info(fourcc); if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - offset = I915_READ(DSPOFFSET(i9xx_plane)); - base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000; + offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane)); + base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000; } else if (INTEL_GEN(dev_priv) >= 4) { if (plane_config->tiling) - offset = I915_READ(DSPTILEOFF(i9xx_plane)); + offset = intel_de_read(dev_priv, + DSPTILEOFF(i9xx_plane)); else - offset = I915_READ(DSPLINOFF(i9xx_plane)); - base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000; + offset = intel_de_read(dev_priv, + DSPLINOFF(i9xx_plane)); + base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000; } else { - base = I915_READ(DSPADDR(i9xx_plane)); + base = intel_de_read(dev_priv, DSPADDR(i9xx_plane)); } plane_config->base = base; - val = I915_READ(PIPESRC(pipe)); + val = intel_de_read(dev_priv, PIPESRC(pipe)); fb->width = ((val >> 16) & 0xfff) + 1; fb->height = ((val >> 0) & 0xfff) + 1; - val = I915_READ(DSPSTRIDE(i9xx_plane)); + val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane)); fb->pitches[0] = val & 0xffffffc0; aligned_height = intel_fb_align_height(fb, 0, fb->height); plane_config->size = fb->pitches[0] * aligned_height; - DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", - crtc->base.name, plane->base.name, fb->width, fb->height, - fb->format->cpp[0] * 8, base, fb->pitches[0], - plane_config->size); + drm_dbg_kms(&dev_priv->drm, + "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", + crtc->base.name, plane->base.name, fb->width, fb->height, + fb->format->cpp[0] * 8, base, fb->pitches[0], + plane_config->size); plane_config->fb = intel_fb; } @@ -9192,11 +9348,12 @@ bdw_get_pipemisc_output_format(struct intel_crtc *crtc) struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 tmp; - tmp = I915_READ(PIPEMISC(crtc->pipe)); + tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe)); if (tmp & PIPEMISC_YUV420_ENABLE) { /* We support 4:2:0 in full blend mode only */ - WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0); + drm_WARN_ON(&dev_priv->drm, + (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0); return INTEL_OUTPUT_FORMAT_YCBCR420; } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) { @@ -9214,7 +9371,7 @@ static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state) enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; u32 tmp; - tmp = I915_READ(DSPCNTR(i9xx_plane)); + tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); if (tmp & DISPPLANE_GAMMA_ENABLE) crtc_state->gamma_enable = true; @@ -9245,7 +9402,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, ret = false; - tmp = I915_READ(PIPECONF(crtc->pipe)); + tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe)); if (!(tmp & PIPECONF_ENABLE)) goto out; @@ -9274,7 +9431,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, PIPECONF_GAMMA_MODE_SHIFT; if (IS_CHERRYVIEW(dev_priv)) - pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe)); + pipe_config->cgm_mode = intel_de_read(dev_priv, + CGM_PIPE_MODE(crtc->pipe)); i9xx_get_pipe_color_config(pipe_config); intel_color_get_config(pipe_config); @@ -9292,14 +9450,14 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) tmp = dev_priv->chv_dpll_md[crtc->pipe]; else - tmp = I915_READ(DPLL_MD(crtc->pipe)); + tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe)); pipe_config->pixel_multiplier = ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; pipe_config->dpll_hw_state.dpll_md = tmp; } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { - tmp = I915_READ(DPLL(crtc->pipe)); + tmp = intel_de_read(dev_priv, DPLL(crtc->pipe)); pipe_config->pixel_multiplier = ((tmp & SDVO_MULTIPLIER_MASK) >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; @@ -9309,10 +9467,13 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, * function. */ pipe_config->pixel_multiplier = 1; } - pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); + pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv, + DPLL(crtc->pipe)); if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { - pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); - pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); + pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv, + FP0(crtc->pipe)); + pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv, + FP1(crtc->pipe)); } else { /* Mask out read-only status bits. */ pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | @@ -9381,8 +9542,8 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) } /* Check if any DPLLs are using the SSC source */ - for (i = 0; i < dev_priv->num_shared_dpll; i++) { - u32 temp = I915_READ(PCH_DPLL(i)); + for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { + u32 temp = intel_de_read(dev_priv, PCH_DPLL(i)); if (!(temp & DPLL_VCO_ENABLE)) continue; @@ -9394,15 +9555,16 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) } } - DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", - has_panel, has_lvds, has_ck505, using_ssc_source); + drm_dbg_kms(&dev_priv->drm, + "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", + has_panel, has_lvds, has_ck505, using_ssc_source); /* Ironlake: try to setup display ref clock before DPLL * enabling. This is only under driver's control after * PCH B stepping, previous chipset stepping should be * ignoring this setting. */ - val = I915_READ(PCH_DREF_CONTROL); + val = intel_de_read(dev_priv, PCH_DREF_CONTROL); /* As we must carefully and slowly disable/enable each source in turn, * compute the final state we want first and check if we need to @@ -9454,14 +9616,14 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) /* SSC must be turned on before enabling the CPU output */ if (intel_panel_use_ssc(dev_priv) && can_ssc) { - DRM_DEBUG_KMS("Using SSC on panel\n"); + drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n"); val |= DREF_SSC1_ENABLE; } else val &= ~DREF_SSC1_ENABLE; /* Get SSC going before enabling the outputs */ - I915_WRITE(PCH_DREF_CONTROL, val); - POSTING_READ(PCH_DREF_CONTROL); + intel_de_write(dev_priv, PCH_DREF_CONTROL, val); + intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); udelay(200); val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; @@ -9469,30 +9631,31 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) /* Enable CPU source on CPU attached eDP */ if (has_cpu_edp) { if (intel_panel_use_ssc(dev_priv) && can_ssc) { - DRM_DEBUG_KMS("Using SSC on eDP\n"); + drm_dbg_kms(&dev_priv->drm, + "Using SSC on eDP\n"); val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; } else val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; } else val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; - I915_WRITE(PCH_DREF_CONTROL, val); - POSTING_READ(PCH_DREF_CONTROL); + intel_de_write(dev_priv, PCH_DREF_CONTROL, val); + intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); udelay(200); } else { - DRM_DEBUG_KMS("Disabling CPU source output\n"); + drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n"); val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; /* Turn off CPU output */ val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; - I915_WRITE(PCH_DREF_CONTROL, val); - POSTING_READ(PCH_DREF_CONTROL); + intel_de_write(dev_priv, PCH_DREF_CONTROL, val); + intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); udelay(200); if (!using_ssc_source) { - DRM_DEBUG_KMS("Disabling SSC source\n"); + drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n"); /* Turn off the SSC source */ val &= ~DREF_SSC_SOURCE_MASK; @@ -9501,8 +9664,8 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) /* Turn off SSC1 */ val &= ~DREF_SSC1_ENABLE; - I915_WRITE(PCH_DREF_CONTROL, val); - POSTING_READ(PCH_DREF_CONTROL); + intel_de_write(dev_priv, PCH_DREF_CONTROL, val); + intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); udelay(200); } } @@ -9514,21 +9677,21 @@ static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) { u32 tmp; - tmp = I915_READ(SOUTH_CHICKEN2); + tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); tmp |= FDI_MPHY_IOSFSB_RESET_CTL; - I915_WRITE(SOUTH_CHICKEN2, tmp); + intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); - if (wait_for_us(I915_READ(SOUTH_CHICKEN2) & + if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) & FDI_MPHY_IOSFSB_RESET_STATUS, 100)) - DRM_ERROR("FDI mPHY reset assert timeout\n"); + drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n"); - tmp = I915_READ(SOUTH_CHICKEN2); + tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; - I915_WRITE(SOUTH_CHICKEN2, tmp); + intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); - if (wait_for_us((I915_READ(SOUTH_CHICKEN2) & + if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) & FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) - DRM_ERROR("FDI mPHY reset de-assert timeout\n"); + drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n"); } /* WaMPhyProgramming:hsw */ @@ -9617,10 +9780,11 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, { u32 reg, tmp; - if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) + if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread, + "FDI requires downspread\n")) with_spread = true; - if (WARN(HAS_PCH_LPT_LP(dev_priv) && - with_fdi, "LP PCH doesn't have FDI\n")) + if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) && + with_fdi, "LP PCH doesn't have FDI\n")) with_fdi = false; mutex_lock(&dev_priv->sb_lock); @@ -9714,10 +9878,10 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) u32 tmp; int idx = BEND_IDX(steps); - if (WARN_ON(steps % 5 != 0)) + if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0)) return; - if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase))) + if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase))) return; mutex_lock(&dev_priv->sb_lock); @@ -9740,8 +9904,8 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv) { - u32 fuse_strap = I915_READ(FUSE_STRAP); - u32 ctl = I915_READ(SPLL_CTL); + u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); + u32 ctl = intel_de_read(dev_priv, SPLL_CTL); if ((ctl & SPLL_PLL_ENABLE) == 0) return false; @@ -9760,8 +9924,8 @@ static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv) static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv, enum intel_dpll_id id) { - u32 fuse_strap = I915_READ(FUSE_STRAP); - u32 ctl = I915_READ(WRPLL_CTL(id)); + u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); + u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id)); if ((ctl & WRPLL_PLL_ENABLE) == 0) return false; @@ -9810,17 +9974,17 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) dev_priv->pch_ssc_use = 0; if (spll_uses_pch_ssc(dev_priv)) { - DRM_DEBUG_KMS("SPLL using PCH SSC\n"); + drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n"); dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL); } if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) { - DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n"); + drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n"); dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1); } if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) { - DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n"); + drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n"); dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2); } @@ -9885,8 +10049,8 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) * This would end up with an odd purple hue over * the entire display. Make sure we don't do it. */ - WARN_ON(crtc_state->limited_color_range && - crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); + drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range && + crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); if (crtc_state->limited_color_range) val |= PIPECONF_COLOR_RANGE_SELECT; @@ -9898,8 +10062,8 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) val |= PIPECONF_FRAME_START_DELAY(0); - I915_WRITE(PIPECONF(pipe), val); - POSTING_READ(PIPECONF(pipe)); + intel_de_write(dev_priv, PIPECONF(pipe), val); + intel_de_posting_read(dev_priv, PIPECONF(pipe)); } static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state) @@ -9921,8 +10085,8 @@ static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state) crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW; - I915_WRITE(PIPECONF(cpu_transcoder), val); - POSTING_READ(PIPECONF(cpu_transcoder)); + intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val); + intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder)); } static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) @@ -9965,7 +10129,10 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) BIT(PLANE_CURSOR))) == 0) val |= PIPEMISC_HDR_MODE_PRECISION; - I915_WRITE(PIPEMISC(crtc->pipe), val); + if (INTEL_GEN(dev_priv) >= 12) + val |= PIPEMISC_PIXEL_ROUNDING_TRUNC; + + intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val); } int bdw_get_pipemisc_bpp(struct intel_crtc *crtc) @@ -9973,7 +10140,7 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc) struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 tmp; - tmp = I915_READ(PIPEMISC(crtc->pipe)); + tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe)); switch (tmp & PIPEMISC_DITHER_BPC_MASK) { case PIPEMISC_DITHER_6_BPC: @@ -10126,8 +10293,9 @@ static int ilk_crtc_compute_clock(struct intel_crtc *crtc, if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_panel_use_ssc(dev_priv)) { - DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", - dev_priv->vbt.lvds_ssc_freq); + drm_dbg_kms(&dev_priv->drm, + "using SSC reference clock of %d kHz\n", + dev_priv->vbt.lvds_ssc_freq); refclk = dev_priv->vbt.lvds_ssc_freq; } @@ -10149,15 +10317,17 @@ static int ilk_crtc_compute_clock(struct intel_crtc *crtc, if (!crtc_state->clock_set && !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, refclk, NULL, &crtc_state->dpll)) { - DRM_ERROR("Couldn't find PLL settings for mode!\n"); + drm_err(&dev_priv->drm, + "Couldn't find PLL settings for mode!\n"); return -EINVAL; } ilk_compute_dpll(crtc, crtc_state, NULL); if (!intel_reserve_shared_dplls(state, crtc, NULL)) { - DRM_DEBUG_KMS("failed to find PLL for pipe %c\n", - pipe_name(crtc->pipe)); + drm_dbg_kms(&dev_priv->drm, + "failed to find PLL for pipe %c\n", + pipe_name(crtc->pipe)); return -EINVAL; } @@ -10171,12 +10341,12 @@ static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = crtc->pipe; - m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe)); - m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe)); - m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe)) + m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe)); + m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe)); + m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe)) & ~TU_SIZE_MASK; - m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe)); - m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe)) + m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe)); + m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe)) & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; } @@ -10189,30 +10359,38 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, enum pipe pipe = crtc->pipe; if (INTEL_GEN(dev_priv) >= 5) { - m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder)); - m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder)); - m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) + m_n->link_m = intel_de_read(dev_priv, + PIPE_LINK_M1(transcoder)); + m_n->link_n = intel_de_read(dev_priv, + PIPE_LINK_N1(transcoder)); + m_n->gmch_m = intel_de_read(dev_priv, + PIPE_DATA_M1(transcoder)) & ~TU_SIZE_MASK; - m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); - m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) + m_n->gmch_n = intel_de_read(dev_priv, + PIPE_DATA_N1(transcoder)); + m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder)) & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) { - m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder)); - m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder)); - m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder)) + m2_n2->link_m = intel_de_read(dev_priv, + PIPE_LINK_M2(transcoder)); + m2_n2->link_n = intel_de_read(dev_priv, + PIPE_LINK_N2(transcoder)); + m2_n2->gmch_m = intel_de_read(dev_priv, + PIPE_DATA_M2(transcoder)) & ~TU_SIZE_MASK; - m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder)); - m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder)) + m2_n2->gmch_n = intel_de_read(dev_priv, + PIPE_DATA_N2(transcoder)); + m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder)) & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; } } else { - m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe)); - m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe)); - m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe)) + m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe)); + m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe)); + m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe)) & ~TU_SIZE_MASK; - m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe)); - m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe)) + m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe)); + m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe)) & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; } } @@ -10247,12 +10425,14 @@ static void skl_get_pfit_config(struct intel_crtc *crtc, /* find scaler attached to this pipe */ for (i = 0; i < crtc->num_scalers; i++) { - ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i)); + ps_ctrl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i)); if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) { id = i; pipe_config->pch_pfit.enabled = true; - pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i)); - pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i)); + pipe_config->pch_pfit.pos = intel_de_read(dev_priv, + SKL_PS_WIN_POS(crtc->pipe, i)); + pipe_config->pch_pfit.size = intel_de_read(dev_priv, + SKL_PS_WIN_SZ(crtc->pipe, i)); scaler_state->scalers[i].in_use = true; break; } @@ -10284,11 +10464,11 @@ skl_get_initial_plane_config(struct intel_crtc *crtc, if (!plane->get_hw_state(plane, &pipe)) return; - WARN_ON(pipe != crtc->pipe); + drm_WARN_ON(dev, pipe != crtc->pipe); intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); if (!intel_fb) { - DRM_DEBUG_KMS("failed to alloc fb\n"); + drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n"); return; } @@ -10296,7 +10476,7 @@ skl_get_initial_plane_config(struct intel_crtc *crtc, fb->dev = dev; - val = I915_READ(PLANE_CTL(pipe, plane_id)); + val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id)); if (INTEL_GEN(dev_priv) >= 11) pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK; @@ -10304,7 +10484,8 @@ skl_get_initial_plane_config(struct intel_crtc *crtc, pixel_format = val & PLANE_CTL_FORMAT_MASK; if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { - alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id)); + alpha = intel_de_read(dev_priv, + PLANE_COLOR_CTL(pipe, plane_id)); alpha &= PLANE_COLOR_ALPHA_MASK; } else { alpha = val & PLANE_CTL_ALPHA_MASK; @@ -10368,16 +10549,16 @@ skl_get_initial_plane_config(struct intel_crtc *crtc, val & PLANE_CTL_FLIP_HORIZONTAL) plane_config->rotation |= DRM_MODE_REFLECT_X; - base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000; + base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000; plane_config->base = base; - offset = I915_READ(PLANE_OFFSET(pipe, plane_id)); + offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id)); - val = I915_READ(PLANE_SIZE(pipe, plane_id)); + val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id)); fb->height = ((val >> 16) & 0xffff) + 1; fb->width = ((val >> 0) & 0xffff) + 1; - val = I915_READ(PLANE_STRIDE(pipe, plane_id)); + val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id)); stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0); fb->pitches[0] = (val & 0x3ff) * stride_mult; @@ -10385,10 +10566,11 @@ skl_get_initial_plane_config(struct intel_crtc *crtc, plane_config->size = fb->pitches[0] * aligned_height; - DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", - crtc->base.name, plane->base.name, fb->width, fb->height, - fb->format->cpp[0] * 8, base, fb->pitches[0], - plane_config->size); + drm_dbg_kms(&dev_priv->drm, + "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", + crtc->base.name, plane->base.name, fb->width, fb->height, + fb->format->cpp[0] * 8, base, fb->pitches[0], + plane_config->size); plane_config->fb = intel_fb; return; @@ -10404,19 +10586,21 @@ static void ilk_get_pfit_config(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = to_i915(dev); u32 tmp; - tmp = I915_READ(PF_CTL(crtc->pipe)); + tmp = intel_de_read(dev_priv, PF_CTL(crtc->pipe)); if (tmp & PF_ENABLE) { pipe_config->pch_pfit.enabled = true; - pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); - pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); + pipe_config->pch_pfit.pos = intel_de_read(dev_priv, + PF_WIN_POS(crtc->pipe)); + pipe_config->pch_pfit.size = intel_de_read(dev_priv, + PF_WIN_SZ(crtc->pipe)); /* We currently do not free assignements of panel fitters on * ivb/hsw (since we don't use the higher upscaling modes which * differentiates them) so just WARN about this case for now. */ if (IS_GEN(dev_priv, 7)) { - WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) != - PF_PIPE_SEL_IVB(crtc->pipe)); + drm_WARN_ON(dev, (tmp & PF_PIPE_SEL_MASK_IVB) != + PF_PIPE_SEL_IVB(crtc->pipe)); } } } @@ -10441,7 +10625,7 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc, pipe_config->master_transcoder = INVALID_TRANSCODER; ret = false; - tmp = I915_READ(PIPECONF(crtc->pipe)); + tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe)); if (!(tmp & PIPECONF_ENABLE)) goto out; @@ -10478,18 +10662,19 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc, pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >> PIPECONF_GAMMA_MODE_SHIFT; - pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe)); + pipe_config->csc_mode = intel_de_read(dev_priv, + PIPE_CSC_MODE(crtc->pipe)); i9xx_get_pipe_color_config(pipe_config); intel_color_get_config(pipe_config); - if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { + if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { struct intel_shared_dpll *pll; enum intel_dpll_id pll_id; pipe_config->has_pch_encoder = true; - tmp = I915_READ(FDI_RX_CTL(crtc->pipe)); + tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe)); pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> FDI_DP_PORT_WIDTH_SHIFT) + 1; @@ -10502,7 +10687,7 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc, */ pll_id = (enum intel_dpll_id) crtc->pipe; } else { - tmp = I915_READ(PCH_DPLL_SEL); + tmp = intel_de_read(dev_priv, PCH_DPLL_SEL); if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) pll_id = DPLL_ID_PCH_PLL_B; else @@ -10513,8 +10698,8 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc, intel_get_shared_dpll_by_id(dev_priv, pll_id); pll = pipe_config->shared_dpll; - WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll, - &pipe_config->dpll_hw_state)); + drm_WARN_ON(dev, !pll->info->funcs->get_hw_state(dev_priv, pll, + &pipe_config->dpll_hw_state)); tmp = pipe_config->dpll_hw_state.dpll; pipe_config->pixel_multiplier = @@ -10552,8 +10737,9 @@ static int hsw_crtc_compute_clock(struct intel_crtc *crtc, intel_get_crtc_new_encoder(state, crtc_state); if (!intel_reserve_shared_dplls(state, crtc, encoder)) { - DRM_DEBUG_KMS("failed to find PLL for pipe %c\n", - pipe_name(crtc->pipe)); + drm_dbg_kms(&dev_priv->drm, + "failed to find PLL for pipe %c\n", + pipe_name(crtc->pipe)); return -EINVAL; } } @@ -10567,10 +10753,10 @@ static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, enum intel_dpll_id id; u32 temp; - temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); + temp = intel_de_read(dev_priv, DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); - if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2)) + if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL2)) return; pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); @@ -10585,24 +10771,25 @@ static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, u32 temp; if (intel_phy_is_combo(dev_priv, phy)) { - temp = I915_READ(ICL_DPCLKA_CFGCR0) & + temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) & ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy); port_dpll_id = ICL_PORT_DPLL_DEFAULT; } else if (intel_phy_is_tc(dev_priv, phy)) { - u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK; + u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK; if (clk_sel == DDI_CLK_SEL_MG) { id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port)); port_dpll_id = ICL_PORT_DPLL_MG_PHY; } else { - WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162); + drm_WARN_ON(&dev_priv->drm, + clk_sel < DDI_CLK_SEL_TBT_162); id = DPLL_ID_ICL_TBTPLL; port_dpll_id = ICL_PORT_DPLL_DEFAULT; } } else { - WARN(1, "Invalid port %x\n", port); + drm_WARN(&dev_priv->drm, 1, "Invalid port %x\n", port); return; } @@ -10629,7 +10816,7 @@ static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, id = DPLL_ID_SKL_DPLL2; break; default: - DRM_ERROR("Incorrect port type\n"); + drm_err(&dev_priv->drm, "Incorrect port type\n"); return; } @@ -10642,10 +10829,10 @@ static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, enum intel_dpll_id id; u32 temp; - temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); + temp = intel_de_read(dev_priv, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); id = temp >> (port * 3 + 1); - if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3)) + if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL3)) return; pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); @@ -10655,7 +10842,7 @@ static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, struct intel_crtc_state *pipe_config) { enum intel_dpll_id id; - u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); + u32 ddi_pll_sel = intel_de_read(dev_priv, PORT_CLK_SEL(port)); switch (ddi_pll_sel) { case PORT_CLK_SEL_WRPLL1: @@ -10723,7 +10910,8 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, bool force_thru = false; enum pipe trans_pipe; - tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder)); + tmp = intel_de_read(dev_priv, + TRANS_DDI_FUNC_CTL(panel_transcoder)); if (!(tmp & TRANS_DDI_FUNC_ENABLE)) continue; @@ -10738,8 +10926,9 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { default: - WARN(1, "unknown pipe linked to transcoder %s\n", - transcoder_name(panel_transcoder)); + drm_WARN(dev, 1, + "unknown pipe linked to transcoder %s\n", + transcoder_name(panel_transcoder)); /* fall through */ case TRANS_DDI_EDP_INPUT_A_ONOFF: force_thru = true; @@ -10767,11 +10956,11 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, /* * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1 */ - WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) && - enabled_panel_transcoders != BIT(TRANSCODER_EDP)); + drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) && + enabled_panel_transcoders != BIT(TRANSCODER_EDP)); power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder); - WARN_ON(*power_domain_mask & BIT_ULL(power_domain)); + drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain)); wf = intel_display_power_get_if_enabled(dev_priv, power_domain); if (!wf) @@ -10780,7 +10969,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, wakerefs[power_domain] = wf; *power_domain_mask |= BIT_ULL(power_domain); - tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); + tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder)); return tmp & PIPECONF_ENABLE; } @@ -10805,7 +10994,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, cpu_transcoder = TRANSCODER_DSI_C; power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); - WARN_ON(*power_domain_mask & BIT_ULL(power_domain)); + drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain)); wf = intel_display_power_get_if_enabled(dev_priv, power_domain); if (!wf) @@ -10825,11 +11014,11 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, break; /* XXX: this works for video mode only */ - tmp = I915_READ(BXT_MIPI_PORT_CTRL(port)); + tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port)); if (!(tmp & DPI_ENABLE)) continue; - tmp = I915_READ(MIPI_CTRL(port)); + tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) continue; @@ -10853,7 +11042,8 @@ static void hsw_get_ddi_port_state(struct intel_crtc *crtc, port = (cpu_transcoder == TRANSCODER_DSI_A) ? PORT_A : PORT_B; } else { - tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); + tmp = intel_de_read(dev_priv, + TRANS_DDI_FUNC_CTL(cpu_transcoder)); if (INTEL_GEN(dev_priv) >= 12) port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); else @@ -10873,7 +11063,8 @@ static void hsw_get_ddi_port_state(struct intel_crtc *crtc, pll = pipe_config->shared_dpll; if (pll) { - WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll, + drm_WARN_ON(&dev_priv->drm, + !pll->info->funcs->get_hw_state(dev_priv, pll, &pipe_config->dpll_hw_state)); } @@ -10883,10 +11074,10 @@ static void hsw_get_ddi_port_state(struct intel_crtc *crtc, * the PCH transcoder is on. */ if (INTEL_GEN(dev_priv) < 9 && - (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { + (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) { pipe_config->has_pch_encoder = true; - tmp = I915_READ(FDI_RX_CTL(PIPE_A)); + tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> FDI_DP_PORT_WIDTH_SHIFT) + 1; @@ -10899,7 +11090,8 @@ static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_pr { u32 trans_port_sync, master_select; - trans_port_sync = I915_READ(TRANS_DDI_FUNC_CTL2(cpu_transcoder)); + trans_port_sync = intel_de_read(dev_priv, + TRANS_DDI_FUNC_CTL2(cpu_transcoder)); if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0) return INVALID_TRANSCODER; @@ -10943,8 +11135,9 @@ static void icl_get_trans_port_sync_config(struct intel_crtc_state *crtc_state) intel_display_power_put(dev_priv, power_domain, trans_wakeref); } - WARN_ON(crtc_state->master_transcoder != INVALID_TRANSCODER && - crtc_state->sync_mode_slaves_mask); + drm_WARN_ON(&dev_priv->drm, + crtc_state->master_transcoder != INVALID_TRANSCODER && + crtc_state->sync_mode_slaves_mask); } static bool hsw_get_pipe_config(struct intel_crtc *crtc, @@ -10955,6 +11148,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, enum intel_display_power_domain power_domain; u64 power_domain_mask; bool active; + u32 tmp; pipe_config->master_transcoder = INVALID_TRANSCODER; @@ -10974,7 +11168,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, if (IS_GEN9_LP(dev_priv) && bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask, wakerefs)) { - WARN_ON(active); + drm_WARN_ON(&dev_priv->drm, active); active = true; } @@ -10990,7 +11184,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, intel_get_pipe_src_size(crtc, pipe_config); if (IS_HASWELL(dev_priv)) { - u32 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); + u32 tmp = intel_de_read(dev_priv, + PIPECONF(pipe_config->cpu_transcoder)); if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW) pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; @@ -11013,12 +11208,14 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444; } - pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe)); + pipe_config->gamma_mode = intel_de_read(dev_priv, + GAMMA_MODE(crtc->pipe)); - pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe)); + pipe_config->csc_mode = intel_de_read(dev_priv, + PIPE_CSC_MODE(crtc->pipe)); if (INTEL_GEN(dev_priv) >= 9) { - u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe)); + tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe)); if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE) pipe_config->gamma_enable = true; @@ -11031,8 +11228,14 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, intel_color_get_config(pipe_config); + tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe)); + pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp); + if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) + pipe_config->ips_linetime = + REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp); + power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); - WARN_ON(power_domain_mask & BIT_ULL(power_domain)); + drm_WARN_ON(&dev_priv->drm, power_domain_mask & BIT_ULL(power_domain)); wf = intel_display_power_get_if_enabled(dev_priv, power_domain); if (wf) { @@ -11047,7 +11250,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, if (hsw_crtc_supports_ips(crtc)) { if (IS_HASWELL(dev_priv)) - pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE; + pipe_config->ips_enabled = intel_de_read(dev_priv, + IPS_CTL) & IPS_ENABLE; else { /* * We cannot readout IPS state on broadwell, set to @@ -11061,7 +11265,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, if (pipe_config->cpu_transcoder != TRANSCODER_EDP && !transcoder_is_dsi(pipe_config->cpu_transcoder)) { pipe_config->pixel_multiplier = - I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1; + intel_de_read(dev_priv, + PIPE_MULT(pipe_config->cpu_transcoder)) + 1; } else { pipe_config->pixel_multiplier = 1; } @@ -11150,7 +11355,8 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state) plane_state, 0); if (src_x != 0 || src_y != 0) { - DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n"); + drm_dbg_kms(&dev_priv->drm, + "Arbitrary cursor panning not supported\n"); return -EINVAL; } @@ -11181,10 +11387,11 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { const struct drm_framebuffer *fb = plane_state->hw.fb; + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); int ret; if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) { - DRM_DEBUG_KMS("cursor cannot be tiled\n"); + drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n"); return -EINVAL; } @@ -11255,6 +11462,7 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { const struct drm_framebuffer *fb = plane_state->hw.fb; + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); int ret; ret = intel_check_cursor(crtc_state, plane_state); @@ -11267,14 +11475,15 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state, /* Check for which cursor types we support */ if (!i845_cursor_size_ok(plane_state)) { - DRM_DEBUG("Cursor dimension %dx%d not supported\n", - drm_rect_width(&plane_state->uapi.dst), - drm_rect_height(&plane_state->uapi.dst)); + drm_dbg_kms(&i915->drm, + "Cursor dimension %dx%d not supported\n", + drm_rect_width(&plane_state->uapi.dst), + drm_rect_height(&plane_state->uapi.dst)); return -EINVAL; } - WARN_ON(plane_state->uapi.visible && - plane_state->color_plane[0].stride != fb->pitches[0]); + drm_WARN_ON(&i915->drm, plane_state->uapi.visible && + plane_state->color_plane[0].stride != fb->pitches[0]); switch (fb->pitches[0]) { case 256: @@ -11283,8 +11492,8 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state, case 2048: break; default: - DRM_DEBUG_KMS("Invalid cursor stride (%u)\n", - fb->pitches[0]); + drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n", + fb->pitches[0]); return -EINVAL; } @@ -11322,17 +11531,17 @@ static void i845_update_cursor(struct intel_plane *plane, if (plane->cursor.base != base || plane->cursor.size != size || plane->cursor.cntl != cntl) { - I915_WRITE_FW(CURCNTR(PIPE_A), 0); - I915_WRITE_FW(CURBASE(PIPE_A), base); - I915_WRITE_FW(CURSIZE, size); - I915_WRITE_FW(CURPOS(PIPE_A), pos); - I915_WRITE_FW(CURCNTR(PIPE_A), cntl); + intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0); + intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base); + intel_de_write_fw(dev_priv, CURSIZE, size); + intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos); + intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl); plane->cursor.base = base; plane->cursor.size = size; plane->cursor.cntl = cntl; } else { - I915_WRITE_FW(CURPOS(PIPE_A), pos); + intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos); } spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); @@ -11357,7 +11566,7 @@ static bool i845_cursor_get_hw_state(struct intel_plane *plane, if (!wakeref) return false; - ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; + ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE; *pipe = PIPE_A; @@ -11483,20 +11692,22 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, /* Check for which cursor types we support */ if (!i9xx_cursor_size_ok(plane_state)) { - DRM_DEBUG("Cursor dimension %dx%d not supported\n", - drm_rect_width(&plane_state->uapi.dst), - drm_rect_height(&plane_state->uapi.dst)); + drm_dbg(&dev_priv->drm, + "Cursor dimension %dx%d not supported\n", + drm_rect_width(&plane_state->uapi.dst), + drm_rect_height(&plane_state->uapi.dst)); return -EINVAL; } - WARN_ON(plane_state->uapi.visible && - plane_state->color_plane[0].stride != fb->pitches[0]); + drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible && + plane_state->color_plane[0].stride != fb->pitches[0]); if (fb->pitches[0] != drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) { - DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n", - fb->pitches[0], - drm_rect_width(&plane_state->uapi.dst)); + drm_dbg_kms(&dev_priv->drm, + "Invalid cursor stride (%u) (cursor width %d)\n", + fb->pitches[0], + drm_rect_width(&plane_state->uapi.dst)); return -EINVAL; } @@ -11512,7 +11723,8 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, */ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C && plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) { - DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n"); + drm_dbg_kms(&dev_priv->drm, + "CHV cursor C not allowed to straddle the left screen edge\n"); return -EINVAL; } @@ -11573,17 +11785,18 @@ static void i9xx_update_cursor(struct intel_plane *plane, plane->cursor.size != fbc_ctl || plane->cursor.cntl != cntl) { if (HAS_CUR_FBC(dev_priv)) - I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl); - I915_WRITE_FW(CURCNTR(pipe), cntl); - I915_WRITE_FW(CURPOS(pipe), pos); - I915_WRITE_FW(CURBASE(pipe), base); + intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe), + fbc_ctl); + intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl); + intel_de_write_fw(dev_priv, CURPOS(pipe), pos); + intel_de_write_fw(dev_priv, CURBASE(pipe), base); plane->cursor.base = base; plane->cursor.size = fbc_ctl; plane->cursor.cntl = cntl; } else { - I915_WRITE_FW(CURPOS(pipe), pos); - I915_WRITE_FW(CURBASE(pipe), base); + intel_de_write_fw(dev_priv, CURPOS(pipe), pos); + intel_de_write_fw(dev_priv, CURBASE(pipe), base); } spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); @@ -11614,7 +11827,7 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, if (!wakeref) return false; - val = I915_READ(CURCNTR(plane->pipe)); + val = intel_de_read(dev_priv, CURCNTR(plane->pipe)); ret = val & MCURSOR_MODE; @@ -11700,13 +11913,13 @@ int intel_get_load_detect_pipe(struct drm_connector *connector, struct intel_crtc_state *crtc_state; int ret, i = -1; - DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", - connector->base.id, connector->name, - encoder->base.id, encoder->name); + drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", + connector->base.id, connector->name, + encoder->base.id, encoder->name); old->restore_state = NULL; - WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); + drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex)); /* * Algorithm gets a little messy: @@ -11753,7 +11966,8 @@ int intel_get_load_detect_pipe(struct drm_connector *connector, * If we didn't find an unused CRTC, don't use any. */ if (!crtc) { - DRM_DEBUG_KMS("no pipe available for load-detect\n"); + drm_dbg_kms(&dev_priv->drm, + "no pipe available for load-detect\n"); ret = -ENODEV; goto fail; } @@ -11804,13 +12018,16 @@ found: if (!ret) ret = drm_atomic_add_affected_planes(restore_state, crtc); if (ret) { - DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret); + drm_dbg_kms(&dev_priv->drm, + "Failed to create a copy of old state to restore: %i\n", + ret); goto fail; } ret = drm_atomic_commit(state); if (ret) { - DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); + drm_dbg_kms(&dev_priv->drm, + "failed to set mode on load-detect pipe\n"); goto fail; } @@ -11843,20 +12060,22 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, { struct intel_encoder *intel_encoder = intel_attached_encoder(to_intel_connector(connector)); + struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev); struct drm_encoder *encoder = &intel_encoder->base; struct drm_atomic_state *state = old->restore_state; int ret; - DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", - connector->base.id, connector->name, - encoder->base.id, encoder->name); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", + connector->base.id, connector->name, + encoder->base.id, encoder->name); if (!state) return; ret = drm_atomic_helper_commit_duplicated_state(state, ctx); if (ret) - DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret); + drm_dbg_kms(&i915->drm, + "Couldn't release load detect pipe: %i\n", ret); drm_atomic_state_put(state); } @@ -11921,8 +12140,9 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 7 : 14; break; default: - DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " - "mode\n", (int)(dpll & DPLL_MODE_MASK)); + drm_dbg_kms(&dev_priv->drm, + "Unknown DPLL mode %08x in programmed " + "mode\n", (int)(dpll & DPLL_MODE_MASK)); return; } @@ -11931,7 +12151,8 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc, else port_clock = i9xx_calc_dpll_params(refclk, &clock); } else { - u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS); + u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv, + LVDS); bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); if (is_lvds) { @@ -12142,7 +12363,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat was_visible = old_plane_state->uapi.visible; visible = plane_state->uapi.visible; - if (!was_crtc_enabled && WARN_ON(was_visible)) + if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible)) was_visible = false; /* @@ -12168,11 +12389,12 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat turn_off = was_visible && (!visible || mode_changed); turn_on = visible && (!was_visible || mode_changed); - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", - crtc->base.base.id, crtc->base.name, - plane->base.base.id, plane->base.name, - was_visible, visible, - turn_off, turn_on, mode_changed); + drm_dbg_atomic(&dev_priv->drm, + "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", + crtc->base.base.id, crtc->base.name, + plane->base.base.id, plane->base.name, + was_visible, visible, + turn_off, turn_on, mode_changed); if (turn_on) { if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) @@ -12349,8 +12571,9 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) } if (!linked_state) { - DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n", - hweight8(crtc_state->nv12_planes)); + drm_dbg_kms(&dev_priv->drm, + "Need %d free Y planes for planar YUV\n", + hweight8(crtc_state->nv12_planes)); return -EINVAL; } @@ -12361,7 +12584,8 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) linked_state->planar_linked_plane = plane; crtc_state->active_planes |= BIT(linked->id); crtc_state->update_planes |= BIT(linked->id); - DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name); + drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n", + linked->base.name, plane->base.name); /* Copy parameters to slave plane */ linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE; @@ -12398,120 +12622,74 @@ static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state) return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes; } -static bool -intel_atomic_is_master_connector(struct intel_crtc_state *crtc_state) +static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state) { - struct drm_crtc *crtc = crtc_state->uapi.crtc; - struct drm_atomic_state *state = crtc_state->uapi.state; - struct drm_connector *connector; - struct drm_connector_state *connector_state; - int i; + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; - for_each_new_connector_in_state(state, connector, connector_state, i) { - if (connector_state->crtc != crtc) - continue; - if (connector->has_tile && - connector->tile_h_loc == connector->num_h_tile - 1 && - connector->tile_v_loc == connector->num_v_tile - 1) - return true; - } + if (!crtc_state->hw.enable) + return 0; - return false; + return DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, + adjusted_mode->crtc_clock); } -static void reset_port_sync_mode_state(struct intel_crtc_state *crtc_state) +static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state, + const struct intel_cdclk_state *cdclk_state) { - crtc_state->master_transcoder = INVALID_TRANSCODER; - crtc_state->sync_mode_slaves_mask = 0; + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + + if (!crtc_state->hw.enable) + return 0; + + return DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, + cdclk_state->logical.cdclk); } -static int icl_compute_port_sync_crtc_state(struct drm_connector *connector, - struct intel_crtc_state *crtc_state, - int num_tiled_conns) +static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state) { - struct drm_crtc *crtc = crtc_state->uapi.crtc; - struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - struct drm_connector *master_connector; - struct drm_connector_list_iter conn_iter; - struct drm_crtc *master_crtc = NULL; - struct drm_crtc_state *master_crtc_state; - struct intel_crtc_state *master_pipe_config; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + u16 linetime_wm; - if (INTEL_GEN(dev_priv) < 11) + if (!crtc_state->hw.enable) return 0; - if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP)) - return 0; + linetime_wm = DIV_ROUND_UP(adjusted_mode->crtc_htotal * 1000 * 8, + crtc_state->pixel_rate); - /* - * In case of tiled displays there could be one or more slaves but there is - * only one master. Lets make the CRTC used by the connector corresponding - * to the last horizonal and last vertical tile a master/genlock CRTC. - * All the other CRTCs corresponding to other tiles of the same Tile group - * are the slave CRTCs and hold a pointer to their genlock CRTC. - * If all tiles not present do not make master slave assignments. - */ - if (!connector->has_tile || - crtc_state->hw.mode.hdisplay != connector->tile_h_size || - crtc_state->hw.mode.vdisplay != connector->tile_v_size || - num_tiled_conns < connector->num_h_tile * connector->num_v_tile) { - reset_port_sync_mode_state(crtc_state); - return 0; - } - /* Last Horizontal and last vertical tile connector is a master - * Master's crtc state is already populated in slave for port sync - */ - if (connector->tile_h_loc == connector->num_h_tile - 1 && - connector->tile_v_loc == connector->num_v_tile - 1) - return 0; + /* Display WA #1135: BXT:ALL GLK:ALL */ + if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled) + linetime_wm /= 2; - /* Loop through all connectors and configure the Slave crtc_state - * to point to the correct master. - */ - drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); - drm_for_each_connector_iter(master_connector, &conn_iter) { - struct drm_connector_state *master_conn_state = NULL; + return linetime_wm; +} - if (!(master_connector->has_tile && - master_connector->tile_group->id == connector->tile_group->id)) - continue; - if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 || - master_connector->tile_v_loc != master_connector->num_v_tile - 1) - continue; +static int hsw_compute_linetime_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_cdclk_state *cdclk_state; - master_conn_state = drm_atomic_get_connector_state(&state->base, - master_connector); - if (IS_ERR(master_conn_state)) { - drm_connector_list_iter_end(&conn_iter); - return PTR_ERR(master_conn_state); - } - if (master_conn_state->crtc) { - master_crtc = master_conn_state->crtc; - break; - } - } - drm_connector_list_iter_end(&conn_iter); + if (INTEL_GEN(dev_priv) >= 9) + crtc_state->linetime = skl_linetime_wm(crtc_state); + else + crtc_state->linetime = hsw_linetime_wm(crtc_state); - if (!master_crtc) { - DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n", - crtc->base.id); - return -EINVAL; - } + if (!hsw_crtc_supports_ips(crtc)) + return 0; - master_crtc_state = drm_atomic_get_crtc_state(&state->base, - master_crtc); - if (IS_ERR(master_crtc_state)) - return PTR_ERR(master_crtc_state); + cdclk_state = intel_atomic_get_cdclk_state(state); + if (IS_ERR(cdclk_state)) + return PTR_ERR(cdclk_state); - master_pipe_config = to_intel_crtc_state(master_crtc_state); - crtc_state->master_transcoder = master_pipe_config->cpu_transcoder; - master_pipe_config->sync_mode_slaves_mask |= - BIT(crtc_state->cpu_transcoder); - DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n", - transcoder_name(crtc_state->master_transcoder), - crtc->base.id, - master_pipe_config->sync_mode_slaves_mask); + crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state, + cdclk_state); return 0; } @@ -12531,7 +12709,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state, if (mode_changed && crtc_state->hw.enable && dev_priv->display.crtc_compute_clock && - !WARN_ON(crtc_state->shared_dpll)) { + !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) { ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state); if (ret) return ret; @@ -12551,17 +12729,18 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state, return ret; } - ret = 0; if (dev_priv->display.compute_pipe_wm) { ret = dev_priv->display.compute_pipe_wm(crtc_state); if (ret) { - DRM_DEBUG_KMS("Target pipe watermarks are invalid\n"); + drm_dbg_kms(&dev_priv->drm, + "Target pipe watermarks are invalid\n"); return ret; } } if (dev_priv->display.compute_intermediate_wm) { - if (WARN_ON(!dev_priv->display.compute_pipe_wm)) + if (drm_WARN_ON(&dev_priv->drm, + !dev_priv->display.compute_pipe_wm)) return 0; /* @@ -12571,23 +12750,39 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state, */ ret = dev_priv->display.compute_intermediate_wm(crtc_state); if (ret) { - DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); + drm_dbg_kms(&dev_priv->drm, + "No valid intermediate pipe watermarks are possible\n"); return ret; } } if (INTEL_GEN(dev_priv) >= 9) { - if (mode_changed || crtc_state->update_pipe) + if (mode_changed || crtc_state->update_pipe) { ret = skl_update_scaler_crtc(crtc_state); - if (!ret) - ret = intel_atomic_setup_scalers(dev_priv, crtc, - crtc_state); + if (ret) + return ret; + } + + ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state); + if (ret) + return ret; } - if (HAS_IPS(dev_priv)) - crtc_state->ips_enabled = hsw_compute_ips_config(crtc_state); + if (HAS_IPS(dev_priv)) { + ret = hsw_compute_ips_config(crtc_state); + if (ret) + return ret; + } - return ret; + if (INTEL_GEN(dev_priv) >= 9 || + IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { + ret = hsw_compute_linetime_wm(state, crtc); + if (ret) + return ret; + + } + + return 0; } static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) @@ -12620,6 +12815,7 @@ compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, struct intel_crtc_state *pipe_config) { struct drm_connector *connector = conn_state->connector; + struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev); const struct drm_display_info *info = &connector->display_info; int bpp; @@ -12641,11 +12837,13 @@ compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, } if (bpp < pipe_config->pipe_bpp) { - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of " - "EDID bpp %d, requested bpp %d, max platform bpp %d\n", - connector->base.id, connector->name, - bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc, - pipe_config->pipe_bpp); + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of " + "EDID bpp %d, requested bpp %d, max platform bpp %d\n", + connector->base.id, connector->name, + bpp, 3 * info->bpc, + 3 * conn_state->max_requested_bpc, + pipe_config->pipe_bpp); pipe_config->pipe_bpp = bpp; } @@ -12705,10 +12903,13 @@ intel_dump_m_n_config(const struct intel_crtc_state *pipe_config, const char *id, unsigned int lane_count, const struct intel_link_m_n *m_n) { - DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", - id, lane_count, - m_n->gmch_m, m_n->gmch_n, - m_n->link_m, m_n->link_n, m_n->tu); + struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev); + + drm_dbg_kms(&i915->drm, + "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", + id, lane_count, + m_n->gmch_m, m_n->gmch_n, + m_n->link_m, m_n->link_n, m_n->tu); } static void @@ -12784,27 +12985,31 @@ static const char *output_formats(enum intel_output_format format) static void intel_dump_plane_state(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *i915 = to_i915(plane->base.dev); const struct drm_framebuffer *fb = plane_state->hw.fb; struct drm_format_name_buf format_name; if (!fb) { - DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n", - plane->base.base.id, plane->base.name, - yesno(plane_state->uapi.visible)); + drm_dbg_kms(&i915->drm, + "[PLANE:%d:%s] fb: [NOFB], visible: %s\n", + plane->base.base.id, plane->base.name, + yesno(plane_state->uapi.visible)); return; } - DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n", - plane->base.base.id, plane->base.name, - fb->base.id, fb->width, fb->height, - drm_get_format_name(fb->format->format, &format_name), - yesno(plane_state->uapi.visible)); - DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n", - plane_state->hw.rotation, plane_state->scaler_id); + drm_dbg_kms(&i915->drm, + "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n", + plane->base.base.id, plane->base.name, + fb->base.id, fb->width, fb->height, + drm_get_format_name(fb->format->format, &format_name), + yesno(plane_state->uapi.visible)); + drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n", + plane_state->hw.rotation, plane_state->scaler_id); if (plane_state->uapi.visible) - DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n", - DRM_RECT_FP_ARG(&plane_state->uapi.src), - DRM_RECT_ARG(&plane_state->uapi.dst)); + drm_dbg_kms(&i915->drm, + "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n", + DRM_RECT_FP_ARG(&plane_state->uapi.src), + DRM_RECT_ARG(&plane_state->uapi.dst)); } static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, @@ -12818,22 +13023,24 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, char buf[64]; int i; - DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n", - crtc->base.base.id, crtc->base.name, - yesno(pipe_config->hw.enable), context); + drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n", + crtc->base.base.id, crtc->base.name, + yesno(pipe_config->hw.enable), context); if (!pipe_config->hw.enable) goto dump_planes; snprintf_output_types(buf, sizeof(buf), pipe_config->output_types); - DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n", - yesno(pipe_config->hw.active), - buf, pipe_config->output_types, - output_formats(pipe_config->output_format)); + drm_dbg_kms(&dev_priv->drm, + "active: %s, output_types: %s (0x%x), output format: %s\n", + yesno(pipe_config->hw.active), + buf, pipe_config->output_types, + output_formats(pipe_config->output_format)); - DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", - transcoder_name(pipe_config->cpu_transcoder), - pipe_config->pipe_bpp, pipe_config->dither); + drm_dbg_kms(&dev_priv->drm, + "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", + transcoder_name(pipe_config->cpu_transcoder), + pipe_config->pipe_bpp, pipe_config->dither); if (pipe_config->has_pch_encoder) intel_dump_m_n_config(pipe_config, "fdi", @@ -12849,13 +13056,15 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, &pipe_config->dp_m2_n2); } - DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n", - pipe_config->has_audio, pipe_config->has_infoframe, - pipe_config->infoframes.enable); + drm_dbg_kms(&dev_priv->drm, + "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n", + pipe_config->has_audio, pipe_config->has_infoframe, + pipe_config->infoframes.enable); if (pipe_config->infoframes.enable & intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) - DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp); + drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n", + pipe_config->infoframes.gcp); if (pipe_config->infoframes.enable & intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi); @@ -12866,50 +13075,59 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR)) intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi); - DRM_DEBUG_KMS("requested mode:\n"); + drm_dbg_kms(&dev_priv->drm, "requested mode:\n"); drm_mode_debug_printmodeline(&pipe_config->hw.mode); - DRM_DEBUG_KMS("adjusted mode:\n"); + drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n"); drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode); intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode); - DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n", - pipe_config->port_clock, - pipe_config->pipe_src_w, pipe_config->pipe_src_h, - pipe_config->pixel_rate); + drm_dbg_kms(&dev_priv->drm, + "port clock: %d, pipe src size: %dx%d, pixel rate %d\n", + pipe_config->port_clock, + pipe_config->pipe_src_w, pipe_config->pipe_src_h, + pipe_config->pixel_rate); + + drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n", + pipe_config->linetime, pipe_config->ips_linetime); if (INTEL_GEN(dev_priv) >= 9) - DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", - crtc->num_scalers, - pipe_config->scaler_state.scaler_users, - pipe_config->scaler_state.scaler_id); + drm_dbg_kms(&dev_priv->drm, + "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", + crtc->num_scalers, + pipe_config->scaler_state.scaler_users, + pipe_config->scaler_state.scaler_id); if (HAS_GMCH(dev_priv)) - DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", - pipe_config->gmch_pfit.control, - pipe_config->gmch_pfit.pgm_ratios, - pipe_config->gmch_pfit.lvds_border_bits); + drm_dbg_kms(&dev_priv->drm, + "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", + pipe_config->gmch_pfit.control, + pipe_config->gmch_pfit.pgm_ratios, + pipe_config->gmch_pfit.lvds_border_bits); else - DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n", - pipe_config->pch_pfit.pos, - pipe_config->pch_pfit.size, - enableddisabled(pipe_config->pch_pfit.enabled), - yesno(pipe_config->pch_pfit.force_thru)); + drm_dbg_kms(&dev_priv->drm, + "pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n", + pipe_config->pch_pfit.pos, + pipe_config->pch_pfit.size, + enableddisabled(pipe_config->pch_pfit.enabled), + yesno(pipe_config->pch_pfit.force_thru)); - DRM_DEBUG_KMS("ips: %i, double wide: %i\n", - pipe_config->ips_enabled, pipe_config->double_wide); + drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n", + pipe_config->ips_enabled, pipe_config->double_wide); intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state); if (IS_CHERRYVIEW(dev_priv)) - DRM_DEBUG_KMS("cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", - pipe_config->cgm_mode, pipe_config->gamma_mode, - pipe_config->gamma_enable, pipe_config->csc_enable); + drm_dbg_kms(&dev_priv->drm, + "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", + pipe_config->cgm_mode, pipe_config->gamma_mode, + pipe_config->gamma_enable, pipe_config->csc_enable); else - DRM_DEBUG_KMS("csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", - pipe_config->csc_mode, pipe_config->gamma_mode, - pipe_config->gamma_enable, pipe_config->csc_enable); + drm_dbg_kms(&dev_priv->drm, + "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", + pipe_config->csc_mode, pipe_config->gamma_mode, + pipe_config->gamma_enable, pipe_config->csc_enable); - DRM_DEBUG_KMS("MST master transcoder: %s\n", - transcoder_name(pipe_config->mst_master_transcoder)); + drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n", + transcoder_name(pipe_config->mst_master_transcoder)); dump_planes: if (!state) @@ -12957,24 +13175,21 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state) encoder = to_intel_encoder(connector_state->best_encoder); - WARN_ON(!connector_state->crtc); + drm_WARN_ON(dev, !connector_state->crtc); switch (encoder->type) { - unsigned int port_mask; case INTEL_OUTPUT_DDI: - if (WARN_ON(!HAS_DDI(to_i915(dev)))) + if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev)))) break; /* else, fall through */ case INTEL_OUTPUT_DP: case INTEL_OUTPUT_HDMI: case INTEL_OUTPUT_EDP: - port_mask = 1 << encoder->port; - /* the same port mustn't appear more than once */ - if (used_ports & port_mask) + if (used_ports & BIT(encoder->port)) ret = false; - used_ports |= port_mask; + used_ports |= BIT(encoder->port); break; case INTEL_OUTPUT_DP_MST: used_mst_ports |= @@ -13055,15 +13270,6 @@ intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state) if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) saved_state->wm = crtc_state->wm; - /* - * Save the slave bitmask which gets filled for master crtc state during - * slave atomic check call. For all other CRTCs reset the port sync variables - * crtc_state->master_transcoder needs to be set to INVALID - */ - reset_port_sync_mode_state(saved_state); - if (intel_atomic_is_master_connector(crtc_state)) - saved_state->sync_mode_slaves_mask = - crtc_state->sync_mode_slaves_mask; memcpy(crtc_state, saved_state, sizeof(*crtc_state)); kfree(saved_state); @@ -13078,11 +13284,10 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config) { struct drm_crtc *crtc = pipe_config->uapi.crtc; struct drm_atomic_state *state = pipe_config->uapi.state; - struct intel_encoder *encoder; + struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev); struct drm_connector *connector; struct drm_connector_state *connector_state; - int base_bpp, ret; - int i, tile_group_id = -1, num_tiled_conns = 0; + int base_bpp, ret, i; bool retry = true; pipe_config->cpu_transcoder = @@ -13121,13 +13326,15 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config) &pipe_config->pipe_src_h); for_each_new_connector_in_state(state, connector, connector_state, i) { + struct intel_encoder *encoder = + to_intel_encoder(connector_state->best_encoder); + if (connector_state->crtc != crtc) continue; - encoder = to_intel_encoder(connector_state->best_encoder); - if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) { - DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); + drm_dbg_kms(&i915->drm, + "rejecting invalid cloning configuration\n"); return -EINVAL; } @@ -13152,47 +13359,24 @@ encoder_retry: drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode, CRTC_STEREO_DOUBLE); - /* Get tile_group_id of tiled connector */ - for_each_new_connector_in_state(state, connector, connector_state, i) { - if (connector_state->crtc == crtc && - connector->has_tile) { - tile_group_id = connector->tile_group->id; - break; - } - } - - /* Get total number of tiled connectors in state that belong to - * this tile group. - */ - for_each_new_connector_in_state(state, connector, connector_state, i) { - if (connector->has_tile && - connector->tile_group->id == tile_group_id) - num_tiled_conns++; - } - /* Pass our mode to the connectors and the CRTC to give them a chance to * adjust it according to limitations or connector properties, and also * a chance to reject the mode entirely. */ for_each_new_connector_in_state(state, connector, connector_state, i) { + struct intel_encoder *encoder = + to_intel_encoder(connector_state->best_encoder); + if (connector_state->crtc != crtc) continue; - ret = icl_compute_port_sync_crtc_state(connector, pipe_config, - num_tiled_conns); - if (ret) { - DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n", - ret); - return ret; - } - - encoder = to_intel_encoder(connector_state->best_encoder); ret = encoder->compute_config(encoder, pipe_config, connector_state); if (ret < 0) { if (ret != -EDEADLK) - DRM_DEBUG_KMS("Encoder config failure: %d\n", - ret); + drm_dbg_kms(&i915->drm, + "Encoder config failure: %d\n", + ret); return ret; } } @@ -13207,15 +13391,16 @@ encoder_retry: if (ret == -EDEADLK) return ret; if (ret < 0) { - DRM_DEBUG_KMS("CRTC fixup failed\n"); + drm_dbg_kms(&i915->drm, "CRTC fixup failed\n"); return ret; } if (ret == RETRY) { - if (WARN(!retry, "loop in pipe configuration computation\n")) + if (drm_WARN(&i915->drm, !retry, + "loop in pipe configuration computation\n")) return -EINVAL; - DRM_DEBUG_KMS("CRTC bw constrained, retrying\n"); + drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n"); retry = false; goto encoder_retry; } @@ -13226,8 +13411,9 @@ encoder_retry: */ pipe_config->dither = (pipe_config->pipe_bpp == 6*3) && !pipe_config->dither_force_disable; - DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n", - base_bpp, pipe_config->pipe_bpp, pipe_config->dither); + drm_dbg_kms(&i915->drm, + "hw max bpp: %i, pipe bpp: %i, dithering: %i\n", + base_bpp, pipe_config->pipe_bpp, pipe_config->dither); /* * Make drm_calc_timestamping_constants in @@ -13238,6 +13424,35 @@ encoder_retry: return 0; } +static int +intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state) +{ + struct intel_atomic_state *state = + to_intel_atomic_state(crtc_state->uapi.state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_connector_state *conn_state; + struct drm_connector *connector; + int i; + + for_each_new_connector_in_state(&state->base, connector, + conn_state, i) { + struct intel_encoder *encoder = + to_intel_encoder(conn_state->best_encoder); + int ret; + + if (conn_state->crtc != &crtc->base || + !encoder->compute_config_late) + continue; + + ret = encoder->compute_config_late(encoder, crtc_state, + conn_state); + if (ret) + return ret; + } + + return 0; +} + bool intel_fuzzy_clock_check(int clock1, int clock2) { int diff; @@ -13316,16 +13531,17 @@ pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv, if (!drm_debug_enabled(DRM_UT_KMS)) return; - DRM_DEBUG_KMS("fastset mismatch in %s infoframe\n", name); - DRM_DEBUG_KMS("expected:\n"); + drm_dbg_kms(&dev_priv->drm, + "fastset mismatch in %s infoframe\n", name); + drm_dbg_kms(&dev_priv->drm, "expected:\n"); hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a); - DRM_DEBUG_KMS("found:\n"); + drm_dbg_kms(&dev_priv->drm, "found:\n"); hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b); } else { - DRM_ERROR("mismatch in %s infoframe\n", name); - DRM_ERROR("expected:\n"); + drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name); + drm_err(&dev_priv->drm, "expected:\n"); hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a); - DRM_ERROR("found:\n"); + drm_err(&dev_priv->drm, "found:\n"); hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b); } } @@ -13334,6 +13550,7 @@ static void __printf(4, 5) pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc, const char *name, const char *format, ...) { + struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct va_format vaf; va_list args; @@ -13342,11 +13559,12 @@ pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc, vaf.va = &args; if (fastset) - DRM_DEBUG_KMS("[CRTC:%d:%s] fastset mismatch in %s %pV\n", - crtc->base.base.id, crtc->base.name, name, &vaf); + drm_dbg_kms(&i915->drm, + "[CRTC:%d:%s] fastset mismatch in %s %pV\n", + crtc->base.base.id, crtc->base.name, name, &vaf); else - DRM_ERROR("[CRTC:%d:%s] mismatch in %s %pV\n", - crtc->base.base.id, crtc->base.name, name, &vaf); + drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n", + crtc->base.base.id, crtc->base.name, name, &vaf); va_end(args); } @@ -13382,7 +13600,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, !(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED); if (fixup_inherited && !fastboot_enabled(dev_priv)) { - DRM_DEBUG_KMS("initial modeset and fastboot not set\n"); + drm_dbg_kms(&dev_priv->drm, + "initial modeset and fastboot not set\n"); ret = false; } @@ -13584,7 +13803,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_I(pixel_multiplier); PIPE_CONF_CHECK_I(output_format); - PIPE_CONF_CHECK_I(dc3co_exitline); PIPE_CONF_CHECK_BOOL(has_hdmi_sink); if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) @@ -13644,10 +13862,12 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_BOOL(gamma_enable); PIPE_CONF_CHECK_BOOL(csc_enable); + PIPE_CONF_CHECK_I(linetime); + PIPE_CONF_CHECK_I(ips_linetime); + bp_gamma = intel_color_get_gamma_bit_precision(pipe_config); if (bp_gamma) PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma); - } PIPE_CONF_CHECK_BOOL(double_wide); @@ -13703,7 +13923,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_INFOFRAME(hdmi); PIPE_CONF_CHECK_INFOFRAME(drm); - PIPE_CONF_CHECK_I(sync_mode_slaves_mask); + PIPE_CONF_CHECK_X(sync_mode_slaves_mask); PIPE_CONF_CHECK_I(master_transcoder); PIPE_CONF_CHECK_I(dsc.compression_enable); @@ -13737,9 +13957,10 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv, * FDI already provided one idea for the dotclock. * Yell if the encoder disagrees. */ - WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock), - "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", - fdi_dotclock, dotclock); + drm_WARN(&dev_priv->drm, + !intel_fuzzy_clock_check(fdi_dotclock, dotclock), + "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", + fdi_dotclock, dotclock); } } @@ -13750,12 +13971,11 @@ static void verify_wm_state(struct intel_crtc *crtc, struct skl_hw_state { struct skl_ddb_entry ddb_y[I915_MAX_PLANES]; struct skl_ddb_entry ddb_uv[I915_MAX_PLANES]; - struct skl_ddb_allocation ddb; struct skl_pipe_wm wm; } *hw; - struct skl_ddb_allocation *sw_ddb; struct skl_pipe_wm *sw_wm; struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; + u8 hw_enabled_slices; const enum pipe pipe = crtc->pipe; int plane, level, max_level = ilk_wm_max_level(dev_priv); @@ -13771,14 +13991,14 @@ static void verify_wm_state(struct intel_crtc *crtc, skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv); - skl_ddb_get_hw_state(dev_priv, &hw->ddb); - sw_ddb = &dev_priv->wm.skl_hw.ddb; + hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv); if (INTEL_GEN(dev_priv) >= 11 && - hw->ddb.enabled_slices != sw_ddb->enabled_slices) - DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n", - sw_ddb->enabled_slices, - hw->ddb.enabled_slices); + hw_enabled_slices != dev_priv->enabled_dbuf_slices_mask) + drm_err(&dev_priv->drm, + "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n", + dev_priv->enabled_dbuf_slices_mask, + hw_enabled_slices); /* planes */ for_each_universal_plane(dev_priv, pipe, plane) { @@ -13793,26 +14013,28 @@ static void verify_wm_state(struct intel_crtc *crtc, &sw_plane_wm->wm[level])) continue; - DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", - pipe_name(pipe), plane + 1, level, - sw_plane_wm->wm[level].plane_en, - sw_plane_wm->wm[level].plane_res_b, - sw_plane_wm->wm[level].plane_res_l, - hw_plane_wm->wm[level].plane_en, - hw_plane_wm->wm[level].plane_res_b, - hw_plane_wm->wm[level].plane_res_l); + drm_err(&dev_priv->drm, + "mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", + pipe_name(pipe), plane + 1, level, + sw_plane_wm->wm[level].plane_en, + sw_plane_wm->wm[level].plane_res_b, + sw_plane_wm->wm[level].plane_res_l, + hw_plane_wm->wm[level].plane_en, + hw_plane_wm->wm[level].plane_res_b, + hw_plane_wm->wm[level].plane_res_l); } if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, &sw_plane_wm->trans_wm)) { - DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", - pipe_name(pipe), plane + 1, - sw_plane_wm->trans_wm.plane_en, - sw_plane_wm->trans_wm.plane_res_b, - sw_plane_wm->trans_wm.plane_res_l, - hw_plane_wm->trans_wm.plane_en, - hw_plane_wm->trans_wm.plane_res_b, - hw_plane_wm->trans_wm.plane_res_l); + drm_err(&dev_priv->drm, + "mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", + pipe_name(pipe), plane + 1, + sw_plane_wm->trans_wm.plane_en, + sw_plane_wm->trans_wm.plane_res_b, + sw_plane_wm->trans_wm.plane_res_l, + hw_plane_wm->trans_wm.plane_en, + hw_plane_wm->trans_wm.plane_res_b, + hw_plane_wm->trans_wm.plane_res_l); } /* DDB */ @@ -13820,10 +14042,11 @@ static void verify_wm_state(struct intel_crtc *crtc, sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane]; if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { - DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n", - pipe_name(pipe), plane + 1, - sw_ddb_entry->start, sw_ddb_entry->end, - hw_ddb_entry->start, hw_ddb_entry->end); + drm_err(&dev_priv->drm, + "mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n", + pipe_name(pipe), plane + 1, + sw_ddb_entry->start, sw_ddb_entry->end, + hw_ddb_entry->start, hw_ddb_entry->end); } } @@ -13845,26 +14068,28 @@ static void verify_wm_state(struct intel_crtc *crtc, &sw_plane_wm->wm[level])) continue; - DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", - pipe_name(pipe), level, - sw_plane_wm->wm[level].plane_en, - sw_plane_wm->wm[level].plane_res_b, - sw_plane_wm->wm[level].plane_res_l, - hw_plane_wm->wm[level].plane_en, - hw_plane_wm->wm[level].plane_res_b, - hw_plane_wm->wm[level].plane_res_l); + drm_err(&dev_priv->drm, + "mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", + pipe_name(pipe), level, + sw_plane_wm->wm[level].plane_en, + sw_plane_wm->wm[level].plane_res_b, + sw_plane_wm->wm[level].plane_res_l, + hw_plane_wm->wm[level].plane_en, + hw_plane_wm->wm[level].plane_res_b, + hw_plane_wm->wm[level].plane_res_l); } if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, &sw_plane_wm->trans_wm)) { - DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", - pipe_name(pipe), - sw_plane_wm->trans_wm.plane_en, - sw_plane_wm->trans_wm.plane_res_b, - sw_plane_wm->trans_wm.plane_res_l, - hw_plane_wm->trans_wm.plane_en, - hw_plane_wm->trans_wm.plane_res_b, - hw_plane_wm->trans_wm.plane_res_l); + drm_err(&dev_priv->drm, + "mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", + pipe_name(pipe), + sw_plane_wm->trans_wm.plane_en, + sw_plane_wm->trans_wm.plane_res_b, + sw_plane_wm->trans_wm.plane_res_l, + hw_plane_wm->trans_wm.plane_en, + hw_plane_wm->trans_wm.plane_res_b, + hw_plane_wm->trans_wm.plane_res_l); } /* DDB */ @@ -13872,10 +14097,11 @@ static void verify_wm_state(struct intel_crtc *crtc, sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR]; if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { - DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n", - pipe_name(pipe), - sw_ddb_entry->start, sw_ddb_entry->end, - hw_ddb_entry->start, hw_ddb_entry->end); + drm_err(&dev_priv->drm, + "mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n", + pipe_name(pipe), + sw_ddb_entry->start, sw_ddb_entry->end, + hw_ddb_entry->start, hw_ddb_entry->end); } } @@ -13919,9 +14145,9 @@ verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_stat bool enabled = false, found = false; enum pipe pipe; - DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", - encoder->base.base.id, - encoder->base.name); + drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n", + encoder->base.base.id, + encoder->base.name); for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state, new_conn_state, i) { @@ -13973,7 +14199,8 @@ verify_crtc_state(struct intel_crtc *crtc, intel_crtc_state_reset(old_crtc_state, crtc); old_crtc_state->uapi.state = state; - DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name); + drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id, + crtc->base.name); active = dev_priv->display.get_pipe_config(crtc, pipe_config); @@ -14048,7 +14275,7 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv, memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); - DRM_DEBUG_KMS("%s\n", pll->info->name); + drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name); active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state); @@ -14075,11 +14302,11 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv, if (new_crtc_state->hw.active) I915_STATE_WARN(!(pll->active_mask & crtc_mask), "pll active mismatch (expected pipe %c in active mask 0x%02x)\n", - pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask); + pipe_name(crtc->pipe), pll->active_mask); else I915_STATE_WARN(pll->active_mask & crtc_mask, "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n", - pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask); + pipe_name(crtc->pipe), pll->active_mask); I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask), "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n", @@ -14108,10 +14335,10 @@ verify_shared_dpll_state(struct intel_crtc *crtc, I915_STATE_WARN(pll->active_mask & crtc_mask, "pll active mismatch (didn't expect pipe %c in active mask)\n", - pipe_name(drm_crtc_index(&crtc->base))); + pipe_name(crtc->pipe)); I915_STATE_WARN(pll->state.crtc_mask & crtc_mask, "pll enabled crtcs mismatch (found %x in enabled mask)\n", - pipe_name(drm_crtc_index(&crtc->base))); + pipe_name(crtc->pipe)); } } @@ -14135,8 +14362,10 @@ verify_disabled_dpll_state(struct drm_i915_private *dev_priv) { int i; - for (i = 0; i < dev_priv->num_shared_dpll; i++) - verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL); + for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) + verify_single_dpll_state(dev_priv, + &dev_priv->dpll.shared_dplls[i], + NULL, NULL); } static void @@ -14280,35 +14509,35 @@ static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state) return 0; } -static int intel_modeset_checks(struct intel_atomic_state *state) +u8 intel_calc_active_pipes(struct intel_atomic_state *state, + u8 active_pipes) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct intel_crtc_state *old_crtc_state, *new_crtc_state; + const struct intel_crtc_state *crtc_state; struct intel_crtc *crtc; - int ret, i; + int i; - /* keep the current setting */ - if (!state->cdclk.force_min_cdclk_changed) - state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk; + for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { + if (crtc_state->hw.active) + active_pipes |= BIT(crtc->pipe); + else + active_pipes &= ~BIT(crtc->pipe); + } - state->modeset = true; - state->active_pipes = dev_priv->active_pipes; - state->cdclk.logical = dev_priv->cdclk.logical; - state->cdclk.actual = dev_priv->cdclk.actual; + return active_pipes; +} - for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, - new_crtc_state, i) { - if (new_crtc_state->hw.active) - state->active_pipes |= BIT(crtc->pipe); - else - state->active_pipes &= ~BIT(crtc->pipe); +static int intel_modeset_checks(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + int ret; - if (old_crtc_state->hw.active != new_crtc_state->hw.active) - state->active_pipe_changes |= BIT(crtc->pipe); - } + state->modeset = true; + state->active_pipes = intel_calc_active_pipes(state, dev_priv->active_pipes); + + state->active_pipe_changes = state->active_pipes ^ dev_priv->active_pipes; if (state->active_pipe_changes) { - ret = intel_atomic_lock_global_state(state); + ret = _intel_atomic_lock_global_state(state); if (ret) return ret; } @@ -14399,7 +14628,7 @@ static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv) } static int intel_atomic_check_planes(struct intel_atomic_state *state, - bool *need_modeset) + bool *need_cdclk_calc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *old_crtc_state, *new_crtc_state; @@ -14415,8 +14644,9 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state, for_each_new_intel_plane_in_state(state, plane, plane_state, i) { ret = intel_plane_atomic_check(state, plane); if (ret) { - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n", - plane->base.base.id, plane->base.name); + drm_dbg_atomic(&dev_priv->drm, + "[PLANE:%d:%s] atomic driver check failed\n", + plane->base.base.id, plane->base.name); return ret; } } @@ -14453,8 +14683,11 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state, * affected planes are part of the state. We can now * compute the minimum cdclk for each plane. */ - for_each_new_intel_plane_in_state(state, plane, plane_state, i) - *need_modeset |= intel_plane_calc_min_cdclk(state, plane); + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc); + if (ret) + return ret; + } return 0; } @@ -14467,9 +14700,11 @@ static int intel_atomic_check_crtcs(struct intel_atomic_state *state) for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { int ret = intel_crtc_atomic_check(state, crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); if (ret) { - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n", - crtc->base.base.id, crtc->base.name); + drm_dbg_atomic(&i915->drm, + "[CRTC:%d:%s] atomic driver check failed\n", + crtc->base.base.id, crtc->base.name); return ret; } } @@ -14494,76 +14729,6 @@ static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state, return false; } -static int -intel_modeset_all_tiles(struct intel_atomic_state *state, int tile_grp_id) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; - int ret = 0; - - drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - struct drm_connector_state *conn_state; - struct drm_crtc_state *crtc_state; - - if (!connector->has_tile || - connector->tile_group->id != tile_grp_id) - continue; - conn_state = drm_atomic_get_connector_state(&state->base, - connector); - if (IS_ERR(conn_state)) { - ret = PTR_ERR(conn_state); - break; - } - - if (!conn_state->crtc) - continue; - - crtc_state = drm_atomic_get_crtc_state(&state->base, - conn_state->crtc); - if (IS_ERR(crtc_state)) { - ret = PTR_ERR(crtc_state); - break; - } - crtc_state->mode_changed = true; - ret = drm_atomic_add_affected_connectors(&state->base, - conn_state->crtc); - if (ret) - break; - } - drm_connector_list_iter_end(&conn_iter); - - return ret; -} - -static int -intel_atomic_check_tiled_conns(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct drm_connector *connector; - struct drm_connector_state *old_conn_state, *new_conn_state; - int i, ret; - - if (INTEL_GEN(dev_priv) < 11) - return 0; - - /* Is tiled, mark all other tiled CRTCs as needing a modeset */ - for_each_oldnew_connector_in_state(&state->base, connector, - old_conn_state, new_conn_state, i) { - if (!connector->has_tile) - continue; - if (!intel_connector_needs_modeset(state, connector)) - continue; - - ret = intel_modeset_all_tiles(state, connector->tile_group->id); - if (ret) - return ret; - } - - return 0; -} - /** * intel_atomic_check - validate state object * @dev: drm device @@ -14575,6 +14740,7 @@ static int intel_atomic_check(struct drm_device *dev, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_atomic_state *state = to_intel_atomic_state(_state); struct intel_crtc_state *old_crtc_state, *new_crtc_state; + struct intel_cdclk_state *new_cdclk_state; struct intel_crtc *crtc; int ret, i; bool any_ms = false; @@ -14582,8 +14748,8 @@ static int intel_atomic_check(struct drm_device *dev, /* Catch I915_MODE_FLAG_INHERITED */ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (new_crtc_state->hw.mode.private_flags != - old_crtc_state->hw.mode.private_flags) + if (new_crtc_state->uapi.mode.private_flags != + old_crtc_state->uapi.mode.private_flags) new_crtc_state->uapi.mode_changed = true; } @@ -14591,21 +14757,6 @@ static int intel_atomic_check(struct drm_device *dev, if (ret) goto fail; - /** - * This check adds all the connectors in current state that belong to - * the same tile group to a full modeset. - * This function directly sets the mode_changed to true and we also call - * drm_atomic_add_affected_connectors(). Hence we are not explicitly - * calling drm_atomic_helper_check_modeset() after this. - * - * Fixme: Handle some corner cases where one of the - * tiled connectors gets disconnected and tile info is lost but since it - * was previously synced to other conn, we need to add that to the modeset. - */ - ret = intel_atomic_check_tiled_conns(state); - if (ret) - goto fail; - for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (!needs_modeset(new_crtc_state)) { @@ -14615,18 +14766,26 @@ static int intel_atomic_check(struct drm_device *dev, continue; } - if (!new_crtc_state->uapi.enable) { - intel_crtc_copy_uapi_to_hw_state(new_crtc_state); - continue; - } - ret = intel_crtc_prepare_cleared_state(new_crtc_state); if (ret) goto fail; + if (!new_crtc_state->hw.enable) + continue; + ret = intel_modeset_pipe_config(new_crtc_state); if (ret) goto fail; + } + + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + if (!needs_modeset(new_crtc_state)) + continue; + + ret = intel_modeset_pipe_config_late(new_crtc_state); + if (ret) + goto fail; intel_crtc_check_fastset(old_crtc_state, new_crtc_state); } @@ -14656,8 +14815,10 @@ static int intel_atomic_check(struct drm_device *dev, } if (is_trans_port_sync_mode(new_crtc_state)) { - u8 trans = new_crtc_state->sync_mode_slaves_mask | - BIT(new_crtc_state->master_transcoder); + u8 trans = new_crtc_state->sync_mode_slaves_mask; + + if (new_crtc_state->master_transcoder != INVALID_TRANSCODER) + trans |= BIT(new_crtc_state->master_transcoder); if (intel_cpu_transcoders_need_modeset(state, trans)) { new_crtc_state->uapi.mode_changed = true; @@ -14680,7 +14841,8 @@ static int intel_atomic_check(struct drm_device *dev, } if (any_ms && !check_digital_port_conflicts(state)) { - DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); + drm_dbg_kms(&dev_priv->drm, + "rejecting conflicting digital port configuration\n"); ret = EINVAL; goto fail; } @@ -14689,18 +14851,32 @@ static int intel_atomic_check(struct drm_device *dev, if (ret) goto fail; - any_ms |= state->cdclk.force_min_cdclk_changed; - ret = intel_atomic_check_planes(state, &any_ms); if (ret) goto fail; + new_cdclk_state = intel_atomic_get_new_cdclk_state(state); + if (new_cdclk_state && new_cdclk_state->force_min_cdclk_changed) + any_ms = true; + + /* + * distrust_bios_wm will force a full dbuf recomputation + * but the hardware state will only get updated accordingly + * if state->modeset==true. Hence distrust_bios_wm==true && + * state->modeset==false is an invalid combination which + * would cause the hardware and software dbuf state to get + * out of sync. We must prevent that. + * + * FIXME clean up this mess and introduce better + * state tracking for dbuf. + */ + if (dev_priv->wm.distrust_bios_wm) + any_ms = true; + if (any_ms) { ret = intel_modeset_checks(state); if (ret) goto fail; - } else { - state->cdclk.logical = dev_priv->cdclk.logical; } ret = intel_atomic_check_crtcs(state); @@ -14806,6 +14982,18 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, ilk_pfit_disable(old_crtc_state); } + /* + * The register is supposedly single buffered so perhaps + * not 100% correct to do this here. But SKL+ calculate + * this based on the adjust pixel rate so pfit changes do + * affect it and so it must be updated for fastsets. + * HSW/BDW only really need this here for fastboot, after + * that the value should not change without a full modeset. + */ + if (INTEL_GEN(dev_priv) >= 9 || + IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) + hsw_set_linetime_wm(new_crtc_state); + if (INTEL_GEN(dev_priv) >= 11) icl_set_pipe_chicken(crtc); } @@ -14848,9 +15036,6 @@ static void intel_update_crtc(struct intel_crtc *crtc, { struct drm_i915_private *dev_priv = to_i915(state->base.dev); bool modeset = needs_modeset(new_crtc_state); - struct intel_plane_state *new_plane_state = - intel_atomic_get_new_plane_state(state, - to_intel_plane(crtc->base.primary)); if (modeset) { intel_crtc_update_active_timings(new_crtc_state); @@ -14873,8 +15058,8 @@ static void intel_update_crtc(struct intel_crtc *crtc, if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc) intel_fbc_disable(crtc); - else if (new_plane_state) - intel_fbc_enable(crtc, new_crtc_state, new_plane_state); + else + intel_fbc_enable(state, crtc); /* Perform vblank evasion around commit operation */ intel_pipe_update_start(new_crtc_state); @@ -14904,7 +15089,8 @@ static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *ne struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev); enum transcoder slave_transcoder; - WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask)); + drm_WARN_ON(&dev_priv->drm, + !is_power_of_2(new_crtc_state->sync_mode_slaves_mask)); slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1; return intel_get_crtc_for_pipe(dev_priv, @@ -15021,7 +15207,7 @@ static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc, if (conn_state->crtc == &crtc->base) break; } - intel_dp = enc_to_intel_dp(intel_attached_encoder(to_intel_connector(conn))); + intel_dp = intel_attached_dp(to_intel_connector(conn)); intel_dp_stop_link_train(intel_dp); } @@ -15036,15 +15222,12 @@ static void intel_post_crtc_enable_updates(struct intel_crtc *crtc, intel_atomic_get_new_crtc_state(state, crtc); struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); - struct intel_plane_state *new_plane_state = - intel_atomic_get_new_plane_state(state, - to_intel_plane(crtc->base.primary)); bool modeset = needs_modeset(new_crtc_state); if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc) intel_fbc_disable(crtc); - else if (new_plane_state) - intel_fbc_enable(crtc, new_crtc_state, new_plane_state); + else + intel_fbc_enable(state, crtc); /* Perform vblank evasion around commit operation */ intel_pipe_update_start(new_crtc_state); @@ -15068,18 +15251,20 @@ static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc, struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *new_crtc_state) { + struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state); struct intel_crtc_state *new_slave_crtc_state = intel_atomic_get_new_crtc_state(state, slave_crtc); struct intel_crtc_state *old_slave_crtc_state = intel_atomic_get_old_crtc_state(state, slave_crtc); - WARN_ON(!slave_crtc || !new_slave_crtc_state || - !old_slave_crtc_state); + drm_WARN_ON(&i915->drm, !slave_crtc || !new_slave_crtc_state || + !old_slave_crtc_state); - DRM_DEBUG_KMS("Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n", - crtc->base.base.id, crtc->base.name, slave_crtc->base.base.id, - slave_crtc->base.name); + drm_dbg_kms(&i915->drm, + "Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n", + crtc->base.base.id, crtc->base.name, + slave_crtc->base.base.id, slave_crtc->base.name); /* Enable seq for slave with with DP_TP_CTL left Idle until the * master is ready @@ -15109,35 +15294,53 @@ static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc, state); } +static void icl_dbuf_slice_pre_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask; + u8 required_slices = state->enabled_dbuf_slices_mask; + u8 slices_union = hw_enabled_slices | required_slices; + + /* If 2nd DBuf slice required, enable it here */ + if (INTEL_GEN(dev_priv) >= 11 && slices_union != hw_enabled_slices) + icl_dbuf_slices_update(dev_priv, slices_union); +} + +static void icl_dbuf_slice_post_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask; + u8 required_slices = state->enabled_dbuf_slices_mask; + + /* If 2nd DBuf slice is no more required disable it */ + if (INTEL_GEN(dev_priv) >= 11 && required_slices != hw_enabled_slices) + icl_dbuf_slices_update(dev_priv, required_slices); +} + static void skl_commit_modeset_enables(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc *crtc; struct intel_crtc_state *old_crtc_state, *new_crtc_state; - u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; - u8 required_slices = state->wm_results.ddb.enabled_slices; struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; - const u8 num_pipes = INTEL_NUM_PIPES(dev_priv); u8 update_pipes = 0, modeset_pipes = 0; int i; for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + enum pipe pipe = crtc->pipe; + if (!new_crtc_state->hw.active) continue; /* ignore allocations for crtc's that have been turned off. */ if (!needs_modeset(new_crtc_state)) { - entries[i] = old_crtc_state->wm.skl.ddb; - update_pipes |= BIT(crtc->pipe); + entries[pipe] = old_crtc_state->wm.skl.ddb; + update_pipes |= BIT(pipe); } else { - modeset_pipes |= BIT(crtc->pipe); + modeset_pipes |= BIT(pipe); } } - /* If 2nd DBuf slice required, enable it here */ - if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices) - icl_dbuf_slices_update(dev_priv, required_slices); - /* * Whenever the number of active pipes changes, we need to make sure we * update the pipes in the right order so that their ddb allocations @@ -15156,10 +15359,10 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) continue; if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, - entries, num_pipes, i)) + entries, I915_MAX_PIPES, pipe)) continue; - entries[i] = new_crtc_state->wm.skl.ddb; + entries[pipe] = new_crtc_state->wm.skl.ddb; update_pipes &= ~BIT(pipe); intel_update_crtc(crtc, state, old_crtc_state, @@ -15193,10 +15396,10 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) is_trans_port_sync_slave(new_crtc_state)) continue; - WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, - entries, num_pipes, i)); + drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, + entries, I915_MAX_PIPES, pipe)); - entries[i] = new_crtc_state->wm.skl.ddb; + entries[pipe] = new_crtc_state->wm.skl.ddb; modeset_pipes &= ~BIT(pipe); if (is_trans_port_sync_mode(new_crtc_state)) { @@ -15228,20 +15431,17 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) if ((modeset_pipes & BIT(pipe)) == 0) continue; - WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, - entries, num_pipes, i)); + drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, + entries, I915_MAX_PIPES, pipe)); - entries[i] = new_crtc_state->wm.skl.ddb; + entries[pipe] = new_crtc_state->wm.skl.ddb; modeset_pipes &= ~BIT(pipe); intel_update_crtc(crtc, state, old_crtc_state, new_crtc_state); } - WARN_ON(modeset_pipes); + drm_WARN_ON(&dev_priv->drm, modeset_pipes); - /* If 2nd DBuf slice is no more required disable it */ - if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices) - icl_dbuf_slices_update(dev_priv, required_slices); } static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) @@ -15338,10 +15538,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) if (state->modeset) { drm_atomic_helper_update_legacy_modeset_state(dev, &state->base); - intel_set_cdclk_pre_plane_update(dev_priv, - &state->cdclk.actual, - &dev_priv->cdclk.actual, - state->cdclk.pipe); + intel_set_cdclk_pre_plane_update(state); /* * SKL workaround: bspec recommends we disable the SAGV when we @@ -15371,16 +15568,17 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) if (state->modeset) intel_encoders_update_prepare(state); + /* Enable all new slices, we might need */ + if (state->modeset) + icl_dbuf_slice_pre_update(state); + /* Now enable the clocks, plane, pipe, and connectors that we set up. */ dev_priv->display.commit_modeset_enables(state); if (state->modeset) { intel_encoders_update_complete(state); - intel_set_cdclk_post_plane_update(dev_priv, - &state->cdclk.actual, - &dev_priv->cdclk.actual, - state->cdclk.pipe); + intel_set_cdclk_post_plane_update(state); } /* FIXME: We should call drm_atomic_helper_commit_hw_done() here @@ -15427,6 +15625,10 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) dev_priv->display.optimize_watermarks(state, crtc); } + /* Disable all slices, we don't need */ + if (state->modeset) + icl_dbuf_slice_post_update(state); + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { intel_post_plane_update(state, crtc); @@ -15570,7 +15772,8 @@ static int intel_atomic_commit(struct drm_device *dev, ret = intel_atomic_prepare_commit(state); if (ret) { - DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); + drm_dbg_atomic(&dev_priv->drm, + "Preparing state failed with %i\n", ret); i915_sw_fence_commit(&state->commit_ready); intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); return ret; @@ -15579,6 +15782,8 @@ static int intel_atomic_commit(struct drm_device *dev, ret = drm_atomic_helper_setup_commit(&state->base, nonblock); if (!ret) ret = drm_atomic_helper_swap_state(&state->base, true); + if (!ret) + intel_atomic_swap_global_state(state); if (ret) { i915_sw_fence_commit(&state->commit_ready); @@ -15594,14 +15799,7 @@ static int intel_atomic_commit(struct drm_device *dev, if (state->global_state_changed) { assert_global_state_locked(dev_priv); - memcpy(dev_priv->min_cdclk, state->min_cdclk, - sizeof(state->min_cdclk)); - memcpy(dev_priv->min_voltage_level, state->min_voltage_level, - sizeof(state->min_voltage_level)); dev_priv->active_pipes = state->active_pipes; - dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk; - - intel_cdclk_swap_state(state); } drm_atomic_state_get(&state->base); @@ -15729,7 +15927,7 @@ static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj) /** * intel_prepare_plane_fb - Prepare fb for usage on plane - * @plane: drm plane to prepare for + * @_plane: drm plane to prepare for * @_new_plane_state: the plane state being prepared * * Prepares a framebuffer for usage on a display plane. Generally this @@ -15740,23 +15938,25 @@ static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj) * Returns 0 on success, negative error code on failure. */ int -intel_prepare_plane_fb(struct drm_plane *plane, +intel_prepare_plane_fb(struct drm_plane *_plane, struct drm_plane_state *_new_plane_state) { + struct intel_plane *plane = to_intel_plane(_plane); struct intel_plane_state *new_plane_state = to_intel_plane_state(_new_plane_state); - struct intel_atomic_state *intel_state = + struct intel_atomic_state *state = to_intel_atomic_state(new_plane_state->uapi.state); - struct drm_i915_private *dev_priv = to_i915(plane->dev); - struct drm_framebuffer *fb = new_plane_state->hw.fb; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); - struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + const struct intel_plane_state *old_plane_state = + intel_atomic_get_old_plane_state(state, plane); + struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb); + struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb); int ret; if (old_obj) { - struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(intel_state, - to_intel_crtc(plane->state->crtc)); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, + to_intel_crtc(old_plane_state->hw.crtc)); /* Big Hammer, we also need to ensure that any pending * MI_WAIT_FOR_EVENT inside a user batch buffer on the @@ -15770,7 +15970,7 @@ intel_prepare_plane_fb(struct drm_plane *plane, * can safely continue. */ if (needs_modeset(crtc_state)) { - ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, + ret = i915_sw_fence_await_reservation(&state->commit_ready, old_obj->base.resv, NULL, false, 0, GFP_KERNEL); @@ -15780,7 +15980,7 @@ intel_prepare_plane_fb(struct drm_plane *plane, } if (new_plane_state->uapi.fence) { /* explicit fencing */ - ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready, + ret = i915_sw_fence_await_dma_fence(&state->commit_ready, new_plane_state->uapi.fence, I915_FENCE_TIMEOUT, GFP_KERNEL); @@ -15807,12 +16007,12 @@ intel_prepare_plane_fb(struct drm_plane *plane, if (!new_plane_state->uapi.fence) { /* implicit fencing */ struct dma_fence *fence; - ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, + ret = i915_sw_fence_await_reservation(&state->commit_ready, obj->base.resv, NULL, false, I915_FENCE_TIMEOUT, GFP_KERNEL); if (ret < 0) - return ret; + goto unpin_fb; fence = dma_resv_get_excl_rcu(obj->base.resv); if (fence) { @@ -15833,12 +16033,17 @@ intel_prepare_plane_fb(struct drm_plane *plane, * that are not quite steady state without resorting to forcing * maximum clocks following a vblank miss (see do_rps_boost()). */ - if (!intel_state->rps_interactive) { + if (!state->rps_interactive) { intel_rps_mark_interactive(&dev_priv->gt.rps, true); - intel_state->rps_interactive = true; + state->rps_interactive = true; } return 0; + +unpin_fb: + intel_plane_unpin_fb(new_plane_state); + + return ret; } /** @@ -15854,13 +16059,17 @@ intel_cleanup_plane_fb(struct drm_plane *plane, { struct intel_plane_state *old_plane_state = to_intel_plane_state(_old_plane_state); - struct intel_atomic_state *intel_state = + struct intel_atomic_state *state = to_intel_atomic_state(old_plane_state->uapi.state); struct drm_i915_private *dev_priv = to_i915(plane->dev); + struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb); + + if (!obj) + return; - if (intel_state->rps_interactive) { + if (state->rps_interactive) { intel_rps_mark_interactive(&dev_priv->gt.rps, false); - intel_state->rps_interactive = false; + state->rps_interactive = false; } /* Should only be called after a successful intel_prepare_plane_fb()! */ @@ -16029,6 +16238,8 @@ intel_legacy_cursor_update(struct drm_plane *_plane, new_plane_state->uapi.crtc_w = crtc_w; new_plane_state->uapi.crtc_h = crtc_h; + intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state); + ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state, old_plane_state, new_plane_state); if (ret) @@ -16113,7 +16324,6 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) struct intel_plane *plane; const struct drm_plane_funcs *plane_funcs; unsigned int supported_rotations; - unsigned int possible_crtcs; const u32 *formats; int num_formats; int ret, zpos; @@ -16194,18 +16404,16 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) plane->get_hw_state = i9xx_plane_get_hw_state; plane->check_plane = i9xx_plane_check; - possible_crtcs = BIT(pipe); - if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, - possible_crtcs, plane_funcs, + 0, plane_funcs, formats, num_formats, i9xx_format_modifiers, DRM_PLANE_TYPE_PRIMARY, "primary %c", pipe_name(pipe)); else ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, - possible_crtcs, plane_funcs, + 0, plane_funcs, formats, num_formats, i9xx_format_modifiers, DRM_PLANE_TYPE_PRIMARY, @@ -16247,7 +16455,6 @@ static struct intel_plane * intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) { - unsigned int possible_crtcs; struct intel_plane *cursor; int ret, zpos; @@ -16280,10 +16487,8 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv)) cursor->cursor.size = ~0; - possible_crtcs = BIT(pipe); - ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, - possible_crtcs, &intel_cursor_plane_funcs, + 0, &intel_cursor_plane_funcs, intel_cursor_formats, ARRAY_SIZE(intel_cursor_formats), cursor_format_modifiers, @@ -16328,6 +16533,7 @@ static const struct drm_crtc_funcs bdw_crtc_funcs = { .get_vblank_counter = g4x_get_vblank_counter, .enable_vblank = bdw_enable_vblank, .disable_vblank = bdw_disable_vblank, + .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, }; static const struct drm_crtc_funcs ilk_crtc_funcs = { @@ -16336,6 +16542,7 @@ static const struct drm_crtc_funcs ilk_crtc_funcs = { .get_vblank_counter = g4x_get_vblank_counter, .enable_vblank = ilk_enable_vblank, .disable_vblank = ilk_disable_vblank, + .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, }; static const struct drm_crtc_funcs g4x_crtc_funcs = { @@ -16344,6 +16551,7 @@ static const struct drm_crtc_funcs g4x_crtc_funcs = { .get_vblank_counter = g4x_get_vblank_counter, .enable_vblank = i965_enable_vblank, .disable_vblank = i965_disable_vblank, + .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, }; static const struct drm_crtc_funcs i965_crtc_funcs = { @@ -16352,6 +16560,7 @@ static const struct drm_crtc_funcs i965_crtc_funcs = { .get_vblank_counter = i915_get_vblank_counter, .enable_vblank = i965_enable_vblank, .disable_vblank = i965_disable_vblank, + .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, }; static const struct drm_crtc_funcs i915gm_crtc_funcs = { @@ -16360,6 +16569,7 @@ static const struct drm_crtc_funcs i915gm_crtc_funcs = { .get_vblank_counter = i915_get_vblank_counter, .enable_vblank = i915gm_enable_vblank, .disable_vblank = i915gm_disable_vblank, + .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, }; static const struct drm_crtc_funcs i915_crtc_funcs = { @@ -16368,6 +16578,7 @@ static const struct drm_crtc_funcs i915_crtc_funcs = { .get_vblank_counter = i915_get_vblank_counter, .enable_vblank = i8xx_enable_vblank, .disable_vblank = i8xx_disable_vblank, + .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, }; static const struct drm_crtc_funcs i8xx_crtc_funcs = { @@ -16376,6 +16587,7 @@ static const struct drm_crtc_funcs i8xx_crtc_funcs = { /* no hw vblank counter */ .enable_vblank = i8xx_enable_vblank, .disable_vblank = i8xx_disable_vblank, + .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, }; static struct intel_crtc *intel_crtc_alloc(void) @@ -16405,6 +16617,18 @@ static void intel_crtc_free(struct intel_crtc *crtc) kfree(crtc); } +static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv) +{ + struct intel_plane *plane; + + for_each_intel_plane(&dev_priv->drm, plane) { + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, + plane->pipe); + + plane->base.possible_crtcs = drm_crtc_mask(&crtc->base); + } +} + static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) { struct intel_plane *primary, *cursor; @@ -16483,7 +16707,9 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) intel_color_init(crtc); - WARN_ON(drm_crtc_index(&crtc->base) != crtc->pipe); + intel_crtc_crc_init(crtc); + + drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe); return 0; @@ -16543,10 +16769,10 @@ static bool ilk_has_edp_a(struct drm_i915_private *dev_priv) if (!IS_MOBILE(dev_priv)) return false; - if ((I915_READ(DP_A) & DP_DETECTED) == 0) + if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0) return false; - if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) + if (IS_GEN(dev_priv, 5) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE)) return false; return true; @@ -16561,11 +16787,11 @@ static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv) return false; if (HAS_PCH_LPT_H(dev_priv) && - I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) + intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) return false; /* DDI E can't be used if DDI A requires 4 lanes */ - if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) + if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) return false; if (!dev_priv->vbt.int_crt_support) @@ -16591,10 +16817,10 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv) pps_num = 1; for (pps_idx = 0; pps_idx < pps_num; pps_idx++) { - u32 val = I915_READ(PP_CONTROL(pps_idx)); + u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx)); val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS; - I915_WRITE(PP_CONTROL(pps_idx), val); + intel_de_write(dev_priv, PP_CONTROL(pps_idx), val); } } @@ -16674,14 +16900,14 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) * On SKL pre-D0 the strap isn't connected, so we assume * it's there. */ - found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; + found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; /* WaIgnoreDDIAStrap: skl */ if (found || IS_GEN9_BC(dev_priv)) intel_ddi_init(dev_priv, PORT_A); /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP * register */ - found = I915_READ(SFUSE_STRAP); + found = intel_de_read(dev_priv, SFUSE_STRAP); if (found & SFUSE_STRAP_DDIB_DETECTED) intel_ddi_init(dev_priv, PORT_B); @@ -16714,25 +16940,25 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) if (ilk_has_edp_a(dev_priv)) intel_dp_init(dev_priv, DP_A, PORT_A); - if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { + if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) { /* PCH SDVOB multiplex with HDMIB */ found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B); if (!found) intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B); - if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) + if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED)) intel_dp_init(dev_priv, PCH_DP_B, PORT_B); } - if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) + if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED) intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C); - if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) + if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED) intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D); - if (I915_READ(PCH_DP_C) & DP_DETECTED) + if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED) intel_dp_init(dev_priv, PCH_DP_C, PORT_C); - if (I915_READ(PCH_DP_D) & DP_DETECTED) + if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED) intel_dp_init(dev_priv, PCH_DP_D, PORT_D); } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { bool has_edp, has_port; @@ -16757,16 +16983,16 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) */ has_edp = intel_dp_is_port_edp(dev_priv, PORT_B); has_port = intel_bios_is_port_present(dev_priv, PORT_B); - if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port) + if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port) has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B); - if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) + if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B); has_edp = intel_dp_is_port_edp(dev_priv, PORT_C); has_port = intel_bios_is_port_present(dev_priv, PORT_C); - if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port) + if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port) has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C); - if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) + if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C); if (IS_CHERRYVIEW(dev_priv)) { @@ -16775,9 +17001,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) * so no need to worry about it */ has_port = intel_bios_is_port_present(dev_priv, PORT_D); - if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port) + if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port) intel_dp_init(dev_priv, CHV_DP_D, PORT_D); - if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port) + if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port) intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D); } @@ -16793,11 +17019,12 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) intel_crt_init(dev_priv); - if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { - DRM_DEBUG_KMS("probing SDVOB\n"); + if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { + drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n"); found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B); if (!found && IS_G4X(dev_priv)) { - DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); + drm_dbg_kms(&dev_priv->drm, + "probing HDMI on SDVOB\n"); intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B); } @@ -16807,22 +17034,23 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) /* Before G4X SDVOC doesn't have its own detect register */ - if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { - DRM_DEBUG_KMS("probing SDVOC\n"); + if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) { + drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n"); found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C); } - if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { + if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) { if (IS_G4X(dev_priv)) { - DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); + drm_dbg_kms(&dev_priv->drm, + "probing HDMI on SDVOC\n"); intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C); } if (IS_G4X(dev_priv)) intel_dp_init(dev_priv, DP_C, PORT_C); } - if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED)) + if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED)) intel_dp_init(dev_priv, DP_D, PORT_D); if (SUPPORTS_TV(dev_priv)) @@ -16864,9 +17092,11 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, unsigned int *handle) { struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct drm_i915_private *i915 = to_i915(obj->base.dev); if (obj->userptr.mm) { - DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n"); + drm_dbg(&i915->drm, + "attempting to use a userptr for a framebuffer, denied\n"); return -EINVAL; } @@ -16920,14 +17150,16 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, */ if (tiling != I915_TILING_NONE && tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { - DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n"); + drm_dbg_kms(&dev_priv->drm, + "tiling_mode doesn't match fb modifier\n"); goto err; } } else { if (tiling == I915_TILING_X) { mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; } else if (tiling == I915_TILING_Y) { - DRM_DEBUG_KMS("No Y tiling for legacy addfb\n"); + drm_dbg_kms(&dev_priv->drm, + "No Y tiling for legacy addfb\n"); goto err; } } @@ -16937,10 +17169,11 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, mode_cmd->modifier[0])) { struct drm_format_name_buf format_name; - DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n", - drm_get_format_name(mode_cmd->pixel_format, - &format_name), - mode_cmd->modifier[0]); + drm_dbg_kms(&dev_priv->drm, + "unsupported pixel format %s / modifier 0x%llx\n", + drm_get_format_name(mode_cmd->pixel_format, + &format_name), + mode_cmd->modifier[0]); goto err; } @@ -16950,17 +17183,19 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, */ if (INTEL_GEN(dev_priv) < 4 && tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { - DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n"); + drm_dbg_kms(&dev_priv->drm, + "tiling_mode must match fb modifier exactly on gen2/3\n"); goto err; } max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format, mode_cmd->modifier[0]); if (mode_cmd->pitches[0] > max_stride) { - DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n", - mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ? - "tiled" : "linear", - mode_cmd->pitches[0], max_stride); + drm_dbg_kms(&dev_priv->drm, + "%s pitch (%u) must be at most %d\n", + mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ? + "tiled" : "linear", + mode_cmd->pitches[0], max_stride); goto err; } @@ -16969,15 +17204,17 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, * the fb pitch and fence stride match. */ if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) { - DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n", - mode_cmd->pitches[0], stride); + drm_dbg_kms(&dev_priv->drm, + "pitch (%d) must match tiling stride (%d)\n", + mode_cmd->pitches[0], stride); goto err; } /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ if (mode_cmd->offsets[0] != 0) { - DRM_DEBUG_KMS("plane 0 offset (0x%08x) must be 0\n", - mode_cmd->offsets[0]); + drm_dbg_kms(&dev_priv->drm, + "plane 0 offset (0x%08x) must be 0\n", + mode_cmd->offsets[0]); goto err; } @@ -16987,14 +17224,16 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, u32 stride_alignment; if (mode_cmd->handles[i] != mode_cmd->handles[0]) { - DRM_DEBUG_KMS("bad plane %d handle\n", i); + drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n", + i); goto err; } stride_alignment = intel_fb_stride_alignment(fb, i); if (fb->pitches[i] & (stride_alignment - 1)) { - DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n", - i, fb->pitches[i], stride_alignment); + drm_dbg_kms(&dev_priv->drm, + "plane %d pitch (%d) must be at least %u byte aligned\n", + i, fb->pitches[i], stride_alignment); goto err; } @@ -17002,9 +17241,10 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, int ccs_aux_stride = gen12_ccs_aux_stride(fb, i); if (fb->pitches[i] != ccs_aux_stride) { - DRM_DEBUG_KMS("ccs aux plane %d pitch (%d) must be %d\n", - i, - fb->pitches[i], ccs_aux_stride); + drm_dbg_kms(&dev_priv->drm, + "ccs aux plane %d pitch (%d) must be %d\n", + i, + fb->pitches[i], ccs_aux_stride); goto err; } } @@ -17018,7 +17258,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs); if (ret) { - DRM_ERROR("framebuffer init failed %d\n", ret); + drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret); goto err; } @@ -17048,17 +17288,6 @@ intel_user_framebuffer_create(struct drm_device *dev, return fb; } -static void intel_atomic_state_free(struct drm_atomic_state *state) -{ - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - - drm_atomic_state_default_release(state); - - i915_sw_fence_fini(&intel_state->commit_ready); - - kfree(state); -} - static enum drm_mode_status intel_mode_valid(struct drm_device *dev, const struct drm_display_mode *mode) @@ -17290,9 +17519,36 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) void intel_modeset_init_hw(struct drm_i915_private *i915) { + struct intel_cdclk_state *cdclk_state = + to_intel_cdclk_state(i915->cdclk.obj.state); + intel_update_cdclk(i915); - intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK"); - i915->cdclk.logical = i915->cdclk.actual = i915->cdclk.hw; + intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK"); + cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw; +} + +static int sanitize_watermarks_add_affected(struct drm_atomic_state *state) +{ + struct drm_plane *plane; + struct drm_crtc *crtc; + + drm_for_each_crtc(crtc, state->dev) { + struct drm_crtc_state *crtc_state; + + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + } + + drm_for_each_plane(plane, state->dev) { + struct drm_plane_state *plane_state; + + plane_state = drm_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) + return PTR_ERR(plane_state); + } + + return 0; } /* @@ -17305,9 +17561,8 @@ void intel_modeset_init_hw(struct drm_i915_private *i915) * through the atomic check code to calculate new watermark values in the * state object. */ -static void sanitize_watermarks(struct drm_device *dev) +static void sanitize_watermarks(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct drm_atomic_state *state; struct intel_atomic_state *intel_state; struct intel_crtc *crtc; @@ -17320,26 +17575,17 @@ static void sanitize_watermarks(struct drm_device *dev) if (!dev_priv->display.optimize_watermarks) return; - /* - * We need to hold connection_mutex before calling duplicate_state so - * that the connector loop is protected. - */ - drm_modeset_acquire_init(&ctx, 0); -retry: - ret = drm_modeset_lock_all_ctx(dev, &ctx); - if (ret == -EDEADLK) { - drm_modeset_backoff(&ctx); - goto retry; - } else if (WARN_ON(ret)) { - goto fail; - } - - state = drm_atomic_helper_duplicate_state(dev, &ctx); - if (WARN_ON(IS_ERR(state))) - goto fail; + state = drm_atomic_state_alloc(&dev_priv->drm); + if (drm_WARN_ON(&dev_priv->drm, !state)) + return; intel_state = to_intel_atomic_state(state); + drm_modeset_acquire_init(&ctx, 0); + +retry: + state->acquire_ctx = &ctx; + /* * Hardware readout is the only time we don't want to calculate * intermediate watermarks (since we don't trust the current @@ -17348,22 +17594,13 @@ retry: if (!HAS_GMCH(dev_priv)) intel_state->skip_intermediate_wm = true; - ret = intel_atomic_check(dev, state); - if (ret) { - /* - * If we fail here, it means that the hardware appears to be - * programmed in a way that shouldn't be possible, given our - * understanding of watermark requirements. This might mean a - * mistake in the hardware readout code or a mistake in the - * watermark calculations for a given platform. Raise a WARN - * so that this is noticeable. - * - * If this actually happens, we'll have to just leave the - * BIOS-programmed watermarks untouched and hope for the best. - */ - WARN(true, "Could not determine valid watermarks for inherited state\n"); - goto put_state; - } + ret = sanitize_watermarks_add_affected(state); + if (ret) + goto fail; + + ret = intel_atomic_check(&dev_priv->drm, state); + if (ret) + goto fail; /* Write calculated watermark values back */ for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { @@ -17373,9 +17610,29 @@ retry: to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm; } -put_state: - drm_atomic_state_put(state); fail: + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + drm_modeset_backoff(&ctx); + goto retry; + } + + /* + * If we fail here, it means that the hardware appears to be + * programmed in a way that shouldn't be possible, given our + * understanding of watermark requirements. This might mean a + * mistake in the hardware readout code or a mistake in the + * watermark calculations for a given platform. Raise a WARN + * so that this is noticeable. + * + * If this actually happens, we'll have to just leave the + * BIOS-programmed watermarks untouched and hope for the best. + */ + drm_WARN(&dev_priv->drm, ret, + "Could not determine valid watermarks for inherited state\n"); + + drm_atomic_state_put(state); + drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); } @@ -17384,7 +17641,7 @@ static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv) { if (IS_GEN(dev_priv, 5)) { u32 fdi_pll_clk = - I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK; + intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK; dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000; } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) { @@ -17393,7 +17650,7 @@ static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv) return; } - DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq); + drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq); } static int intel_initial_commit(struct drm_device *dev) @@ -17476,6 +17733,7 @@ static void intel_mode_config_init(struct drm_i915_private *i915) struct drm_mode_config *mode_config = &i915->drm.mode_config; drm_mode_config_init(&i915->drm); + INIT_LIST_HEAD(&i915->global_obj_list); mode_config->min_width = 0; mode_config->min_height = 0; @@ -17517,11 +17775,31 @@ static void intel_mode_config_init(struct drm_i915_private *i915) } } -int intel_modeset_init(struct drm_i915_private *i915) +static void intel_mode_config_cleanup(struct drm_i915_private *i915) +{ + intel_atomic_global_obj_cleanup(i915); + drm_mode_config_cleanup(&i915->drm); +} + +static void plane_config_fini(struct intel_initial_plane_config *plane_config) +{ + if (plane_config->fb) { + struct drm_framebuffer *fb = &plane_config->fb->base; + + /* We may only have the stub and not a full framebuffer */ + if (drm_framebuffer_read_refcount(fb)) + drm_framebuffer_put(fb); + else + kfree(fb); + } + + if (plane_config->vma) + i915_vma_put(plane_config->vma); +} + +/* part #1: call before irq install */ +int intel_modeset_init_noirq(struct drm_i915_private *i915) { - struct drm_device *dev = &i915->drm; - enum pipe pipe; - struct intel_crtc *crtc; int ret; i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); @@ -17530,6 +17808,10 @@ int intel_modeset_init(struct drm_i915_private *i915) intel_mode_config_init(i915); + ret = intel_cdclk_init(i915); + if (ret) + return ret; + ret = intel_bw_init(i915); if (ret) return ret; @@ -17542,26 +17824,38 @@ int intel_modeset_init(struct drm_i915_private *i915) intel_fbc_init(i915); + return 0; +} + +/* part #2: call after irq install */ +int intel_modeset_init(struct drm_i915_private *i915) +{ + struct drm_device *dev = &i915->drm; + enum pipe pipe; + struct intel_crtc *crtc; + int ret; + intel_init_pm(i915); intel_panel_sanitize_ssc(i915); intel_gmbus_setup(i915); - DRM_DEBUG_KMS("%d display pipe%s available.\n", - INTEL_NUM_PIPES(i915), - INTEL_NUM_PIPES(i915) > 1 ? "s" : ""); + drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n", + INTEL_NUM_PIPES(i915), + INTEL_NUM_PIPES(i915) > 1 ? "s" : ""); if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) { for_each_pipe(i915, pipe) { ret = intel_crtc_init(i915, pipe); if (ret) { - drm_mode_config_cleanup(dev); + intel_mode_config_cleanup(i915); return ret; } } } + intel_plane_possible_crtcs_init(i915); intel_shared_dpll_init(dev); intel_update_fdi_pll_freq(i915); @@ -17601,6 +17895,8 @@ int intel_modeset_init(struct drm_i915_private *i915) * just get the first one. */ intel_find_initial_plane_obj(crtc, &plane_config); + + plane_config_fini(&plane_config); } /* @@ -17609,7 +17905,7 @@ int intel_modeset_init(struct drm_i915_private *i915) * since the watermark calculation done here will use pstate->fb. */ if (!HAS_GMCH(i915)) - sanitize_watermarks(dev); + sanitize_watermarks(i915); /* * Force all active planes to recompute their states. So that on @@ -17619,7 +17915,7 @@ int intel_modeset_init(struct drm_i915_private *i915) */ ret = intel_initial_commit(dev); if (ret) - DRM_DEBUG_KMS("Initial commit in probe failed.\n"); + drm_dbg_kms(&i915->drm, "Initial commit in probe failed.\n"); return 0; } @@ -17638,10 +17934,12 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) u32 dpll, fp; int i; - WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154); + drm_WARN_ON(&dev_priv->drm, + i9xx_calc_dpll_params(48000, &clock) != 25154); - DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n", - pipe_name(pipe), clock.vco, clock.dot); + drm_dbg_kms(&dev_priv->drm, + "enabling pipe %c due to force quirk (vco=%d dot=%d)\n", + pipe_name(pipe), clock.vco, clock.dot); fp = i9xx_dpll_compute_fp(&clock); dpll = DPLL_DVO_2X_MODE | @@ -17651,27 +17949,27 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) PLL_REF_INPUT_DREFCLK | DPLL_VCO_ENABLE; - I915_WRITE(FP0(pipe), fp); - I915_WRITE(FP1(pipe), fp); + intel_de_write(dev_priv, FP0(pipe), fp); + intel_de_write(dev_priv, FP1(pipe), fp); - I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16)); - I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16)); - I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16)); - I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16)); - I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16)); - I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16)); - I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1)); + intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16)); + intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16)); + intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16)); + intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16)); + intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16)); + intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16)); + intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1)); /* * Apparently we need to have VGA mode enabled prior to changing * the P1/P2 dividers. Otherwise the DPLL will keep using the old * dividers, even though the register value does change. */ - I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS); - I915_WRITE(DPLL(pipe), dpll); + intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS); + intel_de_write(dev_priv, DPLL(pipe), dpll); /* Wait for the clocks to stabilize. */ - POSTING_READ(DPLL(pipe)); + intel_de_posting_read(dev_priv, DPLL(pipe)); udelay(150); /* The pixel multiplier can only be updated once the @@ -17679,17 +17977,18 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) * * So write it again. */ - I915_WRITE(DPLL(pipe), dpll); + intel_de_write(dev_priv, DPLL(pipe), dpll); /* We do this three times for luck */ for (i = 0; i < 3 ; i++) { - I915_WRITE(DPLL(pipe), dpll); - POSTING_READ(DPLL(pipe)); + intel_de_write(dev_priv, DPLL(pipe), dpll); + intel_de_posting_read(dev_priv, DPLL(pipe)); udelay(150); /* wait for warmup */ } - I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE); - POSTING_READ(PIPECONF(pipe)); + intel_de_write(dev_priv, PIPECONF(pipe), + PIPECONF_ENABLE | PIPECONF_PROGRESSIVE); + intel_de_posting_read(dev_priv, PIPECONF(pipe)); intel_wait_for_pipe_scanline_moving(crtc); } @@ -17698,22 +17997,30 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) { struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n", - pipe_name(pipe)); - - WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE); - WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE); - WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE); - WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE); - WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE); - - I915_WRITE(PIPECONF(pipe), 0); - POSTING_READ(PIPECONF(pipe)); + drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n", + pipe_name(pipe)); + + drm_WARN_ON(&dev_priv->drm, + intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & + DISPLAY_PLANE_ENABLE); + drm_WARN_ON(&dev_priv->drm, + intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & + DISPLAY_PLANE_ENABLE); + drm_WARN_ON(&dev_priv->drm, + intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & + DISPLAY_PLANE_ENABLE); + drm_WARN_ON(&dev_priv->drm, + intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE); + drm_WARN_ON(&dev_priv->drm, + intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE); + + intel_de_write(dev_priv, PIPECONF(pipe), 0); + intel_de_posting_read(dev_priv, PIPECONF(pipe)); intel_wait_for_pipe_scanline_stopped(crtc); - I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); - POSTING_READ(DPLL(pipe)); + intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS); + intel_de_posting_read(dev_priv, DPLL(pipe)); } static void @@ -17736,8 +18043,9 @@ intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv) if (pipe == crtc->pipe) continue; - DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n", - plane->base.base.id, plane->base.name); + drm_dbg_kms(&dev_priv->drm, + "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n", + plane->base.base.id, plane->base.name); plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe); intel_plane_disable_noatomic(plane_crtc, plane); @@ -17787,18 +18095,18 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc if (transcoder_is_dsi(cpu_transcoder)) return; - val = I915_READ(reg); + val = intel_de_read(dev_priv, reg); val &= ~HSW_FRAME_START_DELAY_MASK; val |= HSW_FRAME_START_DELAY(0); - I915_WRITE(reg, val); + intel_de_write(dev_priv, reg, val); } else { i915_reg_t reg = PIPECONF(cpu_transcoder); u32 val; - val = I915_READ(reg); + val = intel_de_read(dev_priv, reg); val &= ~PIPECONF_FRAME_START_DELAY_MASK; val |= PIPECONF_FRAME_START_DELAY(0); - I915_WRITE(reg, val); + intel_de_write(dev_priv, reg, val); } if (!crtc_state->has_pch_encoder) @@ -17808,19 +18116,19 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc i915_reg_t reg = PCH_TRANSCONF(crtc->pipe); u32 val; - val = I915_READ(reg); + val = intel_de_read(dev_priv, reg); val &= ~TRANS_FRAME_START_DELAY_MASK; val |= TRANS_FRAME_START_DELAY(0); - I915_WRITE(reg, val); + intel_de_write(dev_priv, reg, val); } else { enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc); i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder); u32 val; - val = I915_READ(reg); + val = intel_de_read(dev_priv, reg); val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; val |= TRANS_CHICKEN2_FRAME_START_DELAY(0); - I915_WRITE(reg, val); + intel_de_write(dev_priv, reg, val); } } @@ -17852,9 +18160,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, * gamma and CSC to match how we program our planes. */ if (INTEL_GEN(dev_priv) >= 9) - I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe), - SKL_BOTTOM_COLOR_GAMMA_ENABLE | - SKL_BOTTOM_COLOR_CSC_ENABLE); + intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe), + SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE); } /* Adjust the state of the output pipe according to whether we @@ -17926,16 +18233,18 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) crtc_state->hw.active; if (crtc_state && has_bogus_dpll_config(crtc_state)) { - DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n", - pipe_name(crtc->pipe)); + drm_dbg_kms(&dev_priv->drm, + "BIOS has misprogrammed the hardware. Disabling pipe %c\n", + pipe_name(crtc->pipe)); has_active_crtc = false; } connector = intel_encoder_find_connector(encoder); if (connector && !has_active_crtc) { - DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", - encoder->base.base.id, - encoder->base.name); + drm_dbg_kms(&dev_priv->drm, + "[ENCODER:%d:%s] has active connectors but no active pipe!\n", + encoder->base.base.id, + encoder->base.name); /* Connector is active, but has no active pipe. This is * fallout from our resume register restoring. Disable @@ -17943,9 +18252,10 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) if (crtc_state) { struct drm_encoder *best_encoder; - DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", - encoder->base.base.id, - encoder->base.name); + drm_dbg_kms(&dev_priv->drm, + "[ENCODER:%d:%s] manually disabled\n", + encoder->base.base.id, + encoder->base.name); /* avoid oopsing in case the hooks consult best_encoder */ best_encoder = connector->base.state->best_encoder; @@ -17998,9 +18308,10 @@ static void readout_plane_state(struct drm_i915_private *dev_priv) intel_set_plane_visible(crtc_state, plane_state, visible); - DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n", - plane->base.base.id, plane->base.name, - enableddisabled(visible), pipe_name(pipe)); + drm_dbg_kms(&dev_priv->drm, + "[PLANE:%d:%s] hw state readout: %s, pipe %c\n", + plane->base.base.id, plane->base.name, + enableddisabled(visible), pipe_name(pipe)); } for_each_intel_crtc(&dev_priv->drm, crtc) { @@ -18014,14 +18325,14 @@ static void readout_plane_state(struct drm_i915_private *dev_priv) static void intel_modeset_readout_hw_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_cdclk_state *cdclk_state = + to_intel_cdclk_state(dev_priv->cdclk.obj.state); enum pipe pipe; struct intel_crtc *crtc; struct intel_encoder *encoder; struct intel_connector *connector; struct drm_connector_list_iter conn_iter; - int i; - - dev_priv->active_pipes = 0; + u8 active_pipes = 0; for_each_intel_crtc(dev, crtc) { struct intel_crtc_state *crtc_state = @@ -18038,41 +18349,19 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) crtc->active = crtc_state->hw.active; if (crtc_state->hw.active) - dev_priv->active_pipes |= BIT(crtc->pipe); + active_pipes |= BIT(crtc->pipe); - DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", - crtc->base.base.id, crtc->base.name, - enableddisabled(crtc_state->hw.active)); + drm_dbg_kms(&dev_priv->drm, + "[CRTC:%d:%s] hw state readout: %s\n", + crtc->base.base.id, crtc->base.name, + enableddisabled(crtc_state->hw.active)); } - readout_plane_state(dev_priv); - - for (i = 0; i < dev_priv->num_shared_dpll; i++) { - struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; + dev_priv->active_pipes = cdclk_state->active_pipes = active_pipes; - pll->on = pll->info->funcs->get_hw_state(dev_priv, pll, - &pll->state.hw_state); - - if (IS_ELKHARTLAKE(dev_priv) && pll->on && - pll->info->id == DPLL_ID_EHL_DPLL4) { - pll->wakeref = intel_display_power_get(dev_priv, - POWER_DOMAIN_DPLL_DC_OFF); - } - - pll->state.crtc_mask = 0; - for_each_intel_crtc(dev, crtc) { - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - - if (crtc_state->hw.active && - crtc_state->shared_dpll == pll) - pll->state.crtc_mask |= 1 << crtc->pipe; - } - pll->active_mask = pll->state.crtc_mask; + readout_plane_state(dev_priv); - DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n", - pll->info->name, pll->state.crtc_mask, pll->on); - } + intel_dpll_readout_hw_state(dev_priv); for_each_intel_encoder(dev, encoder) { pipe = 0; @@ -18089,10 +18378,11 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) encoder->base.crtc = NULL; } - DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", - encoder->base.base.id, encoder->base.name, - enableddisabled(encoder->base.crtc), - pipe_name(pipe)); + drm_dbg_kms(&dev_priv->drm, + "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", + encoder->base.base.id, encoder->base.name, + enableddisabled(encoder->base.crtc), + pipe_name(pipe)); } drm_connector_list_iter_begin(dev, &conn_iter); @@ -18103,7 +18393,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) connector->base.dpms = DRM_MODE_DPMS_ON; - encoder = connector->encoder; + encoder = intel_attached_encoder(connector); connector->base.encoder = &encoder->base; crtc = to_intel_crtc(encoder->base.crtc); @@ -18124,9 +18414,10 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) connector->base.dpms = DRM_MODE_DPMS_OFF; connector->base.encoder = NULL; } - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", - connector->base.base.id, connector->base.name, - enableddisabled(connector->base.encoder)); + drm_dbg_kms(&dev_priv->drm, + "[CONNECTOR:%d:%s] hw state readout: %s\n", + connector->base.base.id, connector->base.name, + enableddisabled(connector->base.encoder)); } drm_connector_list_iter_end(&conn_iter); @@ -18190,19 +18481,20 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) crtc_state->min_cdclk[plane->id] = crtc_state->pixel_rate; } - DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk %d kHz\n", - plane->base.base.id, plane->base.name, - crtc_state->min_cdclk[plane->id]); + drm_dbg_kms(&dev_priv->drm, + "[PLANE:%d:%s] min_cdclk %d kHz\n", + plane->base.base.id, plane->base.name, + crtc_state->min_cdclk[plane->id]); } if (crtc_state->hw.active) { min_cdclk = intel_crtc_compute_min_cdclk(crtc_state); - if (WARN_ON(min_cdclk < 0)) + if (drm_WARN_ON(dev, min_cdclk < 0)) min_cdclk = 0; } - dev_priv->min_cdclk[crtc->pipe] = min_cdclk; - dev_priv->min_voltage_level[crtc->pipe] = + cdclk_state->min_cdclk[crtc->pipe] = min_cdclk; + cdclk_state->min_voltage_level[crtc->pipe] = crtc_state->min_voltage_level; intel_bw_crtc_update(bw_state, crtc_state); @@ -18241,53 +18533,55 @@ static void intel_early_display_was(struct drm_i915_private *dev_priv) * Also known as Wa_14010480278. */ if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv)) - I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | - DARBF_GATING_DIS); + intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0, + intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS); if (IS_HASWELL(dev_priv)) { /* * WaRsPkgCStateDisplayPMReq:hsw * System hang if this isn't done before disabling all planes! */ - I915_WRITE(CHICKEN_PAR1_1, - I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); + intel_de_write(dev_priv, CHICKEN_PAR1_1, + intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); } } static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv, enum port port, i915_reg_t hdmi_reg) { - u32 val = I915_READ(hdmi_reg); + u32 val = intel_de_read(dev_priv, hdmi_reg); if (val & SDVO_ENABLE || (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A)) return; - DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n", - port_name(port)); + drm_dbg_kms(&dev_priv->drm, + "Sanitizing transcoder select for HDMI %c\n", + port_name(port)); val &= ~SDVO_PIPE_SEL_MASK; val |= SDVO_PIPE_SEL(PIPE_A); - I915_WRITE(hdmi_reg, val); + intel_de_write(dev_priv, hdmi_reg, val); } static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv, enum port port, i915_reg_t dp_reg) { - u32 val = I915_READ(dp_reg); + u32 val = intel_de_read(dev_priv, dp_reg); if (val & DP_PORT_EN || (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A)) return; - DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n", - port_name(port)); + drm_dbg_kms(&dev_priv->drm, + "Sanitizing transcoder select for DP %c\n", + port_name(port)); val &= ~DP_PIPE_SEL_MASK; val |= DP_PIPE_SEL(PIPE_A); - I915_WRITE(dp_reg, val); + intel_de_write(dev_priv, dp_reg, val); } static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv) @@ -18324,7 +18618,6 @@ intel_modeset_setup_hw_state(struct drm_device *dev, struct intel_encoder *encoder; struct intel_crtc *crtc; intel_wakeref_t wakeref; - int i; wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); @@ -18377,18 +18670,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev, intel_modeset_update_connector_atomic_state(dev); - for (i = 0; i < dev_priv->num_shared_dpll; i++) { - struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; - - if (!pll->on || pll->active_mask) - continue; - - DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", - pll->info->name); - - pll->info->funcs->disable(dev_priv, pll); - pll->on = false; - } + intel_dpll_sanitize_state(dev_priv); if (IS_G4X(dev_priv)) { g4x_wm_get_hw_state(dev_priv); @@ -18408,7 +18690,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev, u64 put_domains; put_domains = modeset_get_crtc_power_domains(crtc_state); - if (WARN_ON(put_domains)) + if (drm_WARN_ON(dev, put_domains)) modeset_put_power_domains(dev_priv, put_domains); } @@ -18444,7 +18726,8 @@ void intel_display_resume(struct drm_device *dev) drm_modeset_acquire_fini(&ctx); if (ret) - DRM_ERROR("Restoring old state failed with %i\n", ret); + drm_err(&dev_priv->drm, + "Restoring old state failed with %i\n", ret); if (state) drm_atomic_state_put(state); } @@ -18467,21 +18750,19 @@ static void intel_hpd_poll_fini(struct drm_i915_private *i915) drm_connector_list_iter_end(&conn_iter); } +/* part #1: call before irq uninstall */ void intel_modeset_driver_remove(struct drm_i915_private *i915) { flush_workqueue(i915->flip_wq); flush_workqueue(i915->modeset_wq); flush_work(&i915->atomic_helper.free_work); - WARN_ON(!llist_empty(&i915->atomic_helper.free_list)); - - /* - * Interrupts and polling as the first thing to avoid creating havoc. - * Too much stuff here (turning of connectors, ...) would - * experience fancy races otherwise. - */ - intel_irq_uninstall(i915); + drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list)); +} +/* part #2: call after irq uninstall */ +void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915) +{ /* * Due to the hpd irq storm handling the hotplug work can re-arm the * poll handlers. Hence disable polling after hpd handling is shut down. @@ -18507,14 +18788,12 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915) intel_hdcp_component_fini(i915); - drm_mode_config_cleanup(&i915->drm); + intel_mode_config_cleanup(i915); intel_overlay_cleanup(i915); intel_gmbus_teardown(i915); - intel_bw_cleanup(i915); - destroy_workqueue(i915->flip_wq); destroy_workqueue(i915->modeset_wq); @@ -18523,6 +18802,15 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915) #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) +static bool +has_transcoder(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder) +{ + if (cpu_transcoder == TRANSCODER_EDP) + return HAS_TRANSCODER_EDP(dev_priv); + else + return INTEL_INFO(dev_priv)->pipe_mask & BIT(cpu_transcoder); +} + struct intel_display_error_state { u32 power_well_driver; @@ -18589,7 +18877,8 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv) return NULL; if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2); + error->power_well_driver = intel_de_read(dev_priv, + HSW_PWR_WELL_CTL2); for_each_pipe(dev_priv, i) { error->pipe[i].power_domain_on = @@ -18598,33 +18887,39 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv) if (!error->pipe[i].power_domain_on) continue; - error->cursor[i].control = I915_READ(CURCNTR(i)); - error->cursor[i].position = I915_READ(CURPOS(i)); - error->cursor[i].base = I915_READ(CURBASE(i)); + error->cursor[i].control = intel_de_read(dev_priv, CURCNTR(i)); + error->cursor[i].position = intel_de_read(dev_priv, CURPOS(i)); + error->cursor[i].base = intel_de_read(dev_priv, CURBASE(i)); - error->plane[i].control = I915_READ(DSPCNTR(i)); - error->plane[i].stride = I915_READ(DSPSTRIDE(i)); + error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i)); + error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i)); if (INTEL_GEN(dev_priv) <= 3) { - error->plane[i].size = I915_READ(DSPSIZE(i)); - error->plane[i].pos = I915_READ(DSPPOS(i)); + error->plane[i].size = intel_de_read(dev_priv, + DSPSIZE(i)); + error->plane[i].pos = intel_de_read(dev_priv, + DSPPOS(i)); } if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) - error->plane[i].addr = I915_READ(DSPADDR(i)); + error->plane[i].addr = intel_de_read(dev_priv, + DSPADDR(i)); if (INTEL_GEN(dev_priv) >= 4) { - error->plane[i].surface = I915_READ(DSPSURF(i)); - error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); + error->plane[i].surface = intel_de_read(dev_priv, + DSPSURF(i)); + error->plane[i].tile_offset = intel_de_read(dev_priv, + DSPTILEOFF(i)); } - error->pipe[i].source = I915_READ(PIPESRC(i)); + error->pipe[i].source = intel_de_read(dev_priv, PIPESRC(i)); if (HAS_GMCH(dev_priv)) - error->pipe[i].stat = I915_READ(PIPESTAT(i)); + error->pipe[i].stat = intel_de_read(dev_priv, + PIPESTAT(i)); } for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) { enum transcoder cpu_transcoder = transcoders[i]; - if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder]) + if (!has_transcoder(dev_priv, cpu_transcoder)) continue; error->transcoder[i].available = true; @@ -18636,13 +18931,20 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv) error->transcoder[i].cpu_transcoder = cpu_transcoder; - error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); - error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); - error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); - error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); - error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); - error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); - error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); + error->transcoder[i].conf = intel_de_read(dev_priv, + PIPECONF(cpu_transcoder)); + error->transcoder[i].htotal = intel_de_read(dev_priv, + HTOTAL(cpu_transcoder)); + error->transcoder[i].hblank = intel_de_read(dev_priv, + HBLANK(cpu_transcoder)); + error->transcoder[i].hsync = intel_de_read(dev_priv, + HSYNC(cpu_transcoder)); + error->transcoder[i].vtotal = intel_de_read(dev_priv, + VTOTAL(cpu_transcoder)); + error->transcoder[i].vblank = intel_de_read(dev_priv, + VBLANK(cpu_transcoder)); + error->transcoder[i].vsync = intel_de_read(dev_priv, + VSYNC(cpu_transcoder)); } return error; diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 028aab728514..adb1225a3480 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -26,7 +26,6 @@ #define _INTEL_DISPLAY_H_ #include <drm/drm_util.h> -#include <drm/i915_drm.h> enum link_m_n_set; struct dpll; @@ -40,12 +39,15 @@ struct drm_framebuffer; struct drm_i915_error_state_buf; struct drm_i915_gem_object; struct drm_i915_private; +struct drm_mode_fb_cmd2; struct drm_modeset_acquire_ctx; struct drm_plane; struct drm_plane_state; struct i915_ggtt_view; +struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; +struct intel_crtc_state; struct intel_digital_port; struct intel_dp; struct intel_encoder; @@ -54,7 +56,6 @@ struct intel_plane; struct intel_plane_state; struct intel_remapped_info; struct intel_rotation_info; -struct intel_crtc_state; enum i915_gpio { GPIOA, @@ -312,10 +313,11 @@ enum phy_fia { }; #define for_each_pipe(__dev_priv, __p) \ - for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++) + for ((__p) = 0; (__p) < I915_MAX_PIPES; (__p)++) \ + for_each_if(INTEL_INFO(__dev_priv)->pipe_mask & BIT(__p)) #define for_each_pipe_masked(__dev_priv, __p, __mask) \ - for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++) \ + for_each_pipe(__dev_priv, __p) \ for_each_if((__mask) & BIT(__p)) #define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \ @@ -469,6 +471,8 @@ enum phy_fia { ((connector) = to_intel_connector((__state)->base.connectors[__i].ptr), \ (new_connector_state) = to_intel_digital_connector_state((__state)->base.connectors[__i].new_state), 1)) +u8 intel_calc_active_pipes(struct intel_atomic_state *state, + u8 active_pipes); void intel_link_compute_m_n(u16 bpp, int nlanes, int pixel_clock, int link_clock, struct intel_link_m_n *m_n, @@ -486,6 +490,7 @@ enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port); bool is_trans_port_sync_mode(const struct intel_crtc_state *state); void intel_plane_destroy(struct drm_plane *plane); +void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state); void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state); void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe); void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe); @@ -495,6 +500,7 @@ int vlv_get_cck_clock(struct drm_i915_private *dev_priv, const char *name, u32 reg, int ref_freq); int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, const char *name, u32 reg); +void lpt_pch_enable(const struct intel_crtc_state *crtc_state); void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv); void lpt_disable_iclkip(struct drm_i915_private *dev_priv); void intel_init_display_hooks(struct drm_i915_private *dev_priv); @@ -520,6 +526,7 @@ enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc); +void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state); void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state); int ilk_get_lanes_required(int target_clock, int link_bw, int bpp); @@ -608,8 +615,10 @@ intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, /* modesetting */ void intel_modeset_init_hw(struct drm_i915_private *i915); +int intel_modeset_init_noirq(struct drm_i915_private *i915); int intel_modeset_init(struct drm_i915_private *i915); void intel_modeset_driver_remove(struct drm_i915_private *i915); +void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915); void intel_display_resume(struct drm_device *dev); void intel_init_pch_refclk(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c new file mode 100644 index 000000000000..1e6eb7f2f72d --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -0,0 +1,2134 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include <drm/drm_debugfs.h> +#include <drm/drm_fourcc.h> + +#include "i915_debugfs.h" +#include "intel_csr.h" +#include "intel_display_debugfs.h" +#include "intel_display_types.h" +#include "intel_dp.h" +#include "intel_fbc.h" +#include "intel_hdcp.h" +#include "intel_hdmi.h" +#include "intel_pm.h" +#include "intel_psr.h" +#include "intel_sideband.h" + +static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) +{ + return to_i915(node->minor->dev); +} + +static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) +{ + struct drm_i915_private *dev_priv = node_to_i915(m->private); + + seq_printf(m, "FB tracking busy bits: 0x%08x\n", + dev_priv->fb_tracking.busy_bits); + + seq_printf(m, "FB tracking flip bits: 0x%08x\n", + dev_priv->fb_tracking.flip_bits); + + return 0; +} + +static int i915_fbc_status(struct seq_file *m, void *unused) +{ + struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct intel_fbc *fbc = &dev_priv->fbc; + intel_wakeref_t wakeref; + + if (!HAS_FBC(dev_priv)) + return -ENODEV; + + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + mutex_lock(&fbc->lock); + + if (intel_fbc_is_active(dev_priv)) + seq_puts(m, "FBC enabled\n"); + else + seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason); + + if (intel_fbc_is_active(dev_priv)) { + u32 mask; + + if (INTEL_GEN(dev_priv) >= 8) + mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK; + else if (INTEL_GEN(dev_priv) >= 7) + mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK; + else if (INTEL_GEN(dev_priv) >= 5) + mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK; + else if (IS_G4X(dev_priv)) + mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK; + else + mask = intel_de_read(dev_priv, FBC_STATUS) & + (FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED); + + seq_printf(m, "Compressing: %s\n", yesno(mask)); + } + + mutex_unlock(&fbc->lock); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + + return 0; +} + +static int i915_fbc_false_color_get(void *data, u64 *val) +{ + struct drm_i915_private *dev_priv = data; + + if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv)) + return -ENODEV; + + *val = dev_priv->fbc.false_color; + + return 0; +} + +static int i915_fbc_false_color_set(void *data, u64 val) +{ + struct drm_i915_private *dev_priv = data; + u32 reg; + + if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv)) + return -ENODEV; + + mutex_lock(&dev_priv->fbc.lock); + + reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL); + dev_priv->fbc.false_color = val; + + intel_de_write(dev_priv, ILK_DPFC_CONTROL, + val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR)); + + mutex_unlock(&dev_priv->fbc.lock); + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops, + i915_fbc_false_color_get, i915_fbc_false_color_set, + "%llu\n"); + +static int i915_ips_status(struct seq_file *m, void *unused) +{ + struct drm_i915_private *dev_priv = node_to_i915(m->private); + intel_wakeref_t wakeref; + + if (!HAS_IPS(dev_priv)) + return -ENODEV; + + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + + seq_printf(m, "Enabled by kernel parameter: %s\n", + yesno(i915_modparams.enable_ips)); + + if (INTEL_GEN(dev_priv) >= 8) { + seq_puts(m, "Currently: unknown\n"); + } else { + if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE) + seq_puts(m, "Currently: enabled\n"); + else + seq_puts(m, "Currently: disabled\n"); + } + + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + + return 0; +} + +static int i915_sr_status(struct seq_file *m, void *unused) +{ + struct drm_i915_private *dev_priv = node_to_i915(m->private); + intel_wakeref_t wakeref; + bool sr_enabled = false; + + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + + if (INTEL_GEN(dev_priv) >= 9) + /* no global SR status; inspect per-plane WM */; + else if (HAS_PCH_SPLIT(dev_priv)) + sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN; + else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) || + IS_I945G(dev_priv) || IS_I945GM(dev_priv)) + sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN; + else if (IS_I915GM(dev_priv)) + sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN; + else if (IS_PINEVIEW(dev_priv)) + sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN; + else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; + + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); + + seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled)); + + return 0; +} + +static int i915_opregion(struct seq_file *m, void *unused) +{ + struct intel_opregion *opregion = &node_to_i915(m->private)->opregion; + + if (opregion->header) + seq_write(m, opregion->header, OPREGION_SIZE); + + return 0; +} + +static int i915_vbt(struct seq_file *m, void *unused) +{ + struct intel_opregion *opregion = &node_to_i915(m->private)->opregion; + + if (opregion->vbt) + seq_write(m, opregion->vbt, opregion->vbt_size); + + return 0; +} + +static int i915_gem_framebuffer_info(struct seq_file *m, void *data) +{ + struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct drm_device *dev = &dev_priv->drm; + struct intel_framebuffer *fbdev_fb = NULL; + struct drm_framebuffer *drm_fb; + +#ifdef CONFIG_DRM_FBDEV_EMULATION + if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) { + fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb); + + seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", + fbdev_fb->base.width, + fbdev_fb->base.height, + fbdev_fb->base.format->depth, + fbdev_fb->base.format->cpp[0] * 8, + fbdev_fb->base.modifier, + drm_framebuffer_read_refcount(&fbdev_fb->base)); + i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base)); + seq_putc(m, '\n'); + } +#endif + + mutex_lock(&dev->mode_config.fb_lock); + drm_for_each_fb(drm_fb, dev) { + struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); + if (fb == fbdev_fb) + continue; + + seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", + fb->base.width, + fb->base.height, + fb->base.format->depth, + fb->base.format->cpp[0] * 8, + fb->base.modifier, + drm_framebuffer_read_refcount(&fb->base)); + i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base)); + seq_putc(m, '\n'); + } + mutex_unlock(&dev->mode_config.fb_lock); + + return 0; +} + +static int i915_psr_sink_status_show(struct seq_file *m, void *data) +{ + u8 val; + static const char * const sink_status[] = { + "inactive", + "transition to active, capture and display", + "active, display from RFB", + "active, capture and display on sink device timings", + "transition to inactive, capture and display, timing re-sync", + "reserved", + "reserved", + "sink internal error", + }; + struct drm_connector *connector = m->private; + struct drm_i915_private *dev_priv = to_i915(connector->dev); + struct intel_dp *intel_dp = + intel_attached_dp(to_intel_connector(connector)); + int ret; + + if (!CAN_PSR(dev_priv)) { + seq_puts(m, "PSR Unsupported\n"); + return -ENODEV; + } + + if (connector->status != connector_status_connected) + return -ENODEV; + + ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val); + + if (ret == 1) { + const char *str = "unknown"; + + val &= DP_PSR_SINK_STATE_MASK; + if (val < ARRAY_SIZE(sink_status)) + str = sink_status[val]; + seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str); + } else { + return ret; + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status); + +static void +psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m) +{ + u32 val, status_val; + const char *status = "unknown"; + + if (dev_priv->psr.psr2_enabled) { + static const char * const live_status[] = { + "IDLE", + "CAPTURE", + "CAPTURE_FS", + "SLEEP", + "BUFON_FW", + "ML_UP", + "SU_STANDBY", + "FAST_SLEEP", + "DEEP_SLEEP", + "BUF_ON", + "TG_ON" + }; + val = intel_de_read(dev_priv, + EDP_PSR2_STATUS(dev_priv->psr.transcoder)); + status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >> + EDP_PSR2_STATUS_STATE_SHIFT; + if (status_val < ARRAY_SIZE(live_status)) + status = live_status[status_val]; + } else { + static const char * const live_status[] = { + "IDLE", + "SRDONACK", + "SRDENT", + "BUFOFF", + "BUFON", + "AUXACK", + "SRDOFFACK", + "SRDENT_ON", + }; + val = intel_de_read(dev_priv, + EDP_PSR_STATUS(dev_priv->psr.transcoder)); + status_val = (val & EDP_PSR_STATUS_STATE_MASK) >> + EDP_PSR_STATUS_STATE_SHIFT; + if (status_val < ARRAY_SIZE(live_status)) + status = live_status[status_val]; + } + + seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val); +} + +static int i915_edp_psr_status(struct seq_file *m, void *data) +{ + struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct i915_psr *psr = &dev_priv->psr; + intel_wakeref_t wakeref; + const char *status; + bool enabled; + u32 val; + + if (!HAS_PSR(dev_priv)) + return -ENODEV; + + seq_printf(m, "Sink support: %s", yesno(psr->sink_support)); + if (psr->dp) + seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]); + seq_puts(m, "\n"); + + if (!psr->sink_support) + return 0; + + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + mutex_lock(&psr->lock); + + if (psr->enabled) + status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled"; + else + status = "disabled"; + seq_printf(m, "PSR mode: %s\n", status); + + if (!psr->enabled) { + seq_printf(m, "PSR sink not reliable: %s\n", + yesno(psr->sink_not_reliable)); + + goto unlock; + } + + if (psr->psr2_enabled) { + val = intel_de_read(dev_priv, + EDP_PSR2_CTL(dev_priv->psr.transcoder)); + enabled = val & EDP_PSR2_ENABLE; + } else { + val = intel_de_read(dev_priv, + EDP_PSR_CTL(dev_priv->psr.transcoder)); + enabled = val & EDP_PSR_ENABLE; + } + seq_printf(m, "Source PSR ctl: %s [0x%08x]\n", + enableddisabled(enabled), val); + psr_source_status(dev_priv, m); + seq_printf(m, "Busy frontbuffer bits: 0x%08x\n", + psr->busy_frontbuffer_bits); + + /* + * SKL+ Perf counter is reset to 0 everytime DC state is entered + */ + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { + val = intel_de_read(dev_priv, + EDP_PSR_PERF_CNT(dev_priv->psr.transcoder)); + val &= EDP_PSR_PERF_CNT_MASK; + seq_printf(m, "Performance counter: %u\n", val); + } + + if (psr->debug & I915_PSR_DEBUG_IRQ) { + seq_printf(m, "Last attempted entry at: %lld\n", + psr->last_entry_attempt); + seq_printf(m, "Last exit at: %lld\n", psr->last_exit); + } + + if (psr->psr2_enabled) { + u32 su_frames_val[3]; + int frame; + + /* + * Reading all 3 registers before hand to minimize crossing a + * frame boundary between register reads + */ + for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) { + val = intel_de_read(dev_priv, + PSR2_SU_STATUS(dev_priv->psr.transcoder, frame)); + su_frames_val[frame / 3] = val; + } + + seq_puts(m, "Frame:\tPSR2 SU blocks:\n"); + + for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) { + u32 su_blocks; + + su_blocks = su_frames_val[frame / 3] & + PSR2_SU_STATUS_MASK(frame); + su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame); + seq_printf(m, "%d\t%d\n", frame, su_blocks); + } + } + +unlock: + mutex_unlock(&psr->lock); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + + return 0; +} + +static int +i915_edp_psr_debug_set(void *data, u64 val) +{ + struct drm_i915_private *dev_priv = data; + intel_wakeref_t wakeref; + int ret; + + if (!CAN_PSR(dev_priv)) + return -ENODEV; + + drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val); + + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + + ret = intel_psr_debug_set(dev_priv, val); + + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + + return ret; +} + +static int +i915_edp_psr_debug_get(void *data, u64 *val) +{ + struct drm_i915_private *dev_priv = data; + + if (!CAN_PSR(dev_priv)) + return -ENODEV; + + *val = READ_ONCE(dev_priv->psr.debug); + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops, + i915_edp_psr_debug_get, i915_edp_psr_debug_set, + "%llu\n"); + +static int i915_power_domain_info(struct seq_file *m, void *unused) +{ + struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct i915_power_domains *power_domains = &dev_priv->power_domains; + int i; + + mutex_lock(&power_domains->lock); + + seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); + for (i = 0; i < power_domains->power_well_count; i++) { + struct i915_power_well *power_well; + enum intel_display_power_domain power_domain; + + power_well = &power_domains->power_wells[i]; + seq_printf(m, "%-25s %d\n", power_well->desc->name, + power_well->count); + + for_each_power_domain(power_domain, power_well->desc->domains) + seq_printf(m, " %-23s %d\n", + intel_display_power_domain_str(power_domain), + power_domains->domain_use_count[power_domain]); + } + + mutex_unlock(&power_domains->lock); + + return 0; +} + +static int i915_dmc_info(struct seq_file *m, void *unused) +{ + struct drm_i915_private *dev_priv = node_to_i915(m->private); + intel_wakeref_t wakeref; + struct intel_csr *csr; + i915_reg_t dc5_reg, dc6_reg = {}; + + if (!HAS_CSR(dev_priv)) + return -ENODEV; + + csr = &dev_priv->csr; + + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + + seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL)); + seq_printf(m, "path: %s\n", csr->fw_path); + + if (!csr->dmc_payload) + goto out; + + seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version), + CSR_VERSION_MINOR(csr->version)); + + if (INTEL_GEN(dev_priv) >= 12) { + dc5_reg = TGL_DMC_DEBUG_DC5_COUNT; + dc6_reg = TGL_DMC_DEBUG_DC6_COUNT; + /* + * NOTE: DMC_DEBUG3 is a general purpose reg. + * According to B.Specs:49196 DMC f/w reuses DC5/6 counter + * reg for DC3CO debugging and validation, + * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter. + */ + seq_printf(m, "DC3CO count: %d\n", + intel_de_read(dev_priv, DMC_DEBUG3)); + } else { + dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT : + SKL_CSR_DC3_DC5_COUNT; + if (!IS_GEN9_LP(dev_priv)) + dc6_reg = SKL_CSR_DC5_DC6_COUNT; + } + + seq_printf(m, "DC3 -> DC5 count: %d\n", + intel_de_read(dev_priv, dc5_reg)); + if (dc6_reg.reg) + seq_printf(m, "DC5 -> DC6 count: %d\n", + intel_de_read(dev_priv, dc6_reg)); + +out: + seq_printf(m, "program base: 0x%08x\n", + intel_de_read(dev_priv, CSR_PROGRAM(0))); + seq_printf(m, "ssp base: 0x%08x\n", + intel_de_read(dev_priv, CSR_SSP_BASE)); + seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, CSR_HTP_SKL)); + + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + + return 0; +} + +static void intel_seq_print_mode(struct seq_file *m, int tabs, + const struct drm_display_mode *mode) +{ + int i; + + for (i = 0; i < tabs; i++) + seq_putc(m, '\t'); + + seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); +} + +static void intel_encoder_info(struct seq_file *m, + struct intel_crtc *crtc, + struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct drm_connector_list_iter conn_iter; + struct drm_connector *connector; + + seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n", + encoder->base.base.id, encoder->base.name); + + drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + const struct drm_connector_state *conn_state = + connector->state; + + if (conn_state->best_encoder != &encoder->base) + continue; + + seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n", + connector->base.id, connector->name); + } + drm_connector_list_iter_end(&conn_iter); +} + +static void intel_panel_info(struct seq_file *m, struct intel_panel *panel) +{ + const struct drm_display_mode *mode = panel->fixed_mode; + + seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); +} + +static void intel_hdcp_info(struct seq_file *m, + struct intel_connector *intel_connector) +{ + bool hdcp_cap, hdcp2_cap; + + hdcp_cap = intel_hdcp_capable(intel_connector); + hdcp2_cap = intel_hdcp2_capable(intel_connector); + + if (hdcp_cap) + seq_puts(m, "HDCP1.4 "); + if (hdcp2_cap) + seq_puts(m, "HDCP2.2 "); + + if (!hdcp_cap && !hdcp2_cap) + seq_puts(m, "None"); + + seq_puts(m, "\n"); +} + +static void intel_dp_info(struct seq_file *m, + struct intel_connector *intel_connector) +{ + struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector); + struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); + + seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); + seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio)); + if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) + intel_panel_info(m, &intel_connector->panel); + + drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports, + &intel_dp->aux); + if (intel_connector->hdcp.shim) { + seq_puts(m, "\tHDCP version: "); + intel_hdcp_info(m, intel_connector); + } +} + +static void intel_dp_mst_info(struct seq_file *m, + struct intel_connector *intel_connector) +{ + struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector); + struct intel_dp_mst_encoder *intel_mst = + enc_to_mst(intel_encoder); + struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &intel_dig_port->dp; + bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, + intel_connector->port); + + seq_printf(m, "\taudio support: %s\n", yesno(has_audio)); +} + +static void intel_hdmi_info(struct seq_file *m, + struct intel_connector *intel_connector) +{ + struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder); + + seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio)); + if (intel_connector->hdcp.shim) { + seq_puts(m, "\tHDCP version: "); + intel_hdcp_info(m, intel_connector); + } +} + +static void intel_lvds_info(struct seq_file *m, + struct intel_connector *intel_connector) +{ + intel_panel_info(m, &intel_connector->panel); +} + +static void intel_connector_info(struct seq_file *m, + struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + const struct drm_connector_state *conn_state = connector->state; + struct intel_encoder *encoder = + to_intel_encoder(conn_state->best_encoder); + const struct drm_display_mode *mode; + + seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n", + connector->base.id, connector->name, + drm_get_connector_status_name(connector->status)); + + if (connector->status == connector_status_disconnected) + return; + + seq_printf(m, "\tphysical dimensions: %dx%dmm\n", + connector->display_info.width_mm, + connector->display_info.height_mm); + seq_printf(m, "\tsubpixel order: %s\n", + drm_get_subpixel_order_name(connector->display_info.subpixel_order)); + seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev); + + if (!encoder) + return; + + switch (connector->connector_type) { + case DRM_MODE_CONNECTOR_DisplayPort: + case DRM_MODE_CONNECTOR_eDP: + if (encoder->type == INTEL_OUTPUT_DP_MST) + intel_dp_mst_info(m, intel_connector); + else + intel_dp_info(m, intel_connector); + break; + case DRM_MODE_CONNECTOR_LVDS: + if (encoder->type == INTEL_OUTPUT_LVDS) + intel_lvds_info(m, intel_connector); + break; + case DRM_MODE_CONNECTOR_HDMIA: + if (encoder->type == INTEL_OUTPUT_HDMI || + encoder->type == INTEL_OUTPUT_DDI) + intel_hdmi_info(m, intel_connector); + break; + default: + break; + } + + seq_printf(m, "\tmodes:\n"); + list_for_each_entry(mode, &connector->modes, head) + intel_seq_print_mode(m, 2, mode); +} + +static const char *plane_type(enum drm_plane_type type) +{ + switch (type) { + case DRM_PLANE_TYPE_OVERLAY: + return "OVL"; + case DRM_PLANE_TYPE_PRIMARY: + return "PRI"; + case DRM_PLANE_TYPE_CURSOR: + return "CUR"; + /* + * Deliberately omitting default: to generate compiler warnings + * when a new drm_plane_type gets added. + */ + } + + return "unknown"; +} + +static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation) +{ + /* + * According to doc only one DRM_MODE_ROTATE_ is allowed but this + * will print them all to visualize if the values are misused + */ + snprintf(buf, bufsize, + "%s%s%s%s%s%s(0x%08x)", + (rotation & DRM_MODE_ROTATE_0) ? "0 " : "", + (rotation & DRM_MODE_ROTATE_90) ? "90 " : "", + (rotation & DRM_MODE_ROTATE_180) ? "180 " : "", + (rotation & DRM_MODE_ROTATE_270) ? "270 " : "", + (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "", + (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "", + rotation); +} + +static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane) +{ + const struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + const struct drm_framebuffer *fb = plane_state->uapi.fb; + struct drm_format_name_buf format_name; + struct drm_rect src, dst; + char rot_str[48]; + + src = drm_plane_state_src(&plane_state->uapi); + dst = drm_plane_state_dest(&plane_state->uapi); + + if (fb) + drm_get_format_name(fb->format->format, &format_name); + + plane_rotation(rot_str, sizeof(rot_str), + plane_state->uapi.rotation); + + seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n", + fb ? fb->base.id : 0, fb ? format_name.str : "n/a", + fb ? fb->width : 0, fb ? fb->height : 0, + DRM_RECT_FP_ARG(&src), + DRM_RECT_ARG(&dst), + rot_str); +} + +static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane) +{ + const struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + const struct drm_framebuffer *fb = plane_state->hw.fb; + struct drm_format_name_buf format_name; + char rot_str[48]; + + if (!fb) + return; + + drm_get_format_name(fb->format->format, &format_name); + + plane_rotation(rot_str, sizeof(rot_str), + plane_state->hw.rotation); + + seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n", + fb->base.id, format_name.str, + fb->width, fb->height, + yesno(plane_state->uapi.visible), + DRM_RECT_FP_ARG(&plane_state->uapi.src), + DRM_RECT_ARG(&plane_state->uapi.dst), + rot_str); +} + +static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct intel_plane *plane; + + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { + seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n", + plane->base.base.id, plane->base.name, + plane_type(plane->base.type)); + intel_plane_uapi_info(m, plane); + intel_plane_hw_info(m, plane); + } +} + +static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc) +{ + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + int num_scalers = crtc->num_scalers; + int i; + + /* Not all platformas have a scaler */ + if (num_scalers) { + seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d", + num_scalers, + crtc_state->scaler_state.scaler_users, + crtc_state->scaler_state.scaler_id); + + for (i = 0; i < num_scalers; i++) { + const struct intel_scaler *sc = + &crtc_state->scaler_state.scalers[i]; + + seq_printf(m, ", scalers[%d]: use=%s, mode=%x", + i, yesno(sc->in_use), sc->mode); + } + seq_puts(m, "\n"); + } else { + seq_puts(m, "\tNo scalers available on this platform\n"); + } +} + +static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = node_to_i915(m->private); + const struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + struct intel_encoder *encoder; + + seq_printf(m, "[CRTC:%d:%s]:\n", + crtc->base.base.id, crtc->base.name); + + seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n", + yesno(crtc_state->uapi.enable), + yesno(crtc_state->uapi.active), + DRM_MODE_ARG(&crtc_state->uapi.mode)); + + if (crtc_state->hw.enable) { + seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n", + yesno(crtc_state->hw.active), + DRM_MODE_ARG(&crtc_state->hw.adjusted_mode)); + + seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n", + crtc_state->pipe_src_w, crtc_state->pipe_src_h, + yesno(crtc_state->dither), crtc_state->pipe_bpp); + + intel_scaler_info(m, crtc); + } + + for_each_intel_encoder_mask(&dev_priv->drm, encoder, + crtc_state->uapi.encoder_mask) + intel_encoder_info(m, crtc, encoder); + + intel_plane_info(m, crtc); + + seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n", + yesno(!crtc->cpu_fifo_underrun_disabled), + yesno(!crtc->pch_fifo_underrun_disabled)); +} + +static int i915_display_info(struct seq_file *m, void *unused) +{ + struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct drm_device *dev = &dev_priv->drm; + struct intel_crtc *crtc; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + intel_wakeref_t wakeref; + + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + + drm_modeset_lock_all(dev); + + seq_printf(m, "CRTC info\n"); + seq_printf(m, "---------\n"); + for_each_intel_crtc(dev, crtc) + intel_crtc_info(m, crtc); + + seq_printf(m, "\n"); + seq_printf(m, "Connector info\n"); + seq_printf(m, "--------------\n"); + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) + intel_connector_info(m, connector); + drm_connector_list_iter_end(&conn_iter); + + drm_modeset_unlock_all(dev); + + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + + return 0; +} + +static int i915_shared_dplls_info(struct seq_file *m, void *unused) +{ + struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct drm_device *dev = &dev_priv->drm; + int i; + + drm_modeset_lock_all(dev); + + seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n", + dev_priv->dpll.ref_clks.nssc, + dev_priv->dpll.ref_clks.ssc); + + for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { + struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i]; + + seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name, + pll->info->id); + seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n", + pll->state.crtc_mask, pll->active_mask, yesno(pll->on)); + seq_printf(m, " tracked hardware state:\n"); + seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll); + seq_printf(m, " dpll_md: 0x%08x\n", + pll->state.hw_state.dpll_md); + seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0); + seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1); + seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll); + seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0); + seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1); + seq_printf(m, " mg_refclkin_ctl: 0x%08x\n", + pll->state.hw_state.mg_refclkin_ctl); + seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n", + pll->state.hw_state.mg_clktop2_coreclkctl1); + seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n", + pll->state.hw_state.mg_clktop2_hsclkctl); + seq_printf(m, " mg_pll_div0: 0x%08x\n", + pll->state.hw_state.mg_pll_div0); + seq_printf(m, " mg_pll_div1: 0x%08x\n", + pll->state.hw_state.mg_pll_div1); + seq_printf(m, " mg_pll_lf: 0x%08x\n", + pll->state.hw_state.mg_pll_lf); + seq_printf(m, " mg_pll_frac_lock: 0x%08x\n", + pll->state.hw_state.mg_pll_frac_lock); + seq_printf(m, " mg_pll_ssc: 0x%08x\n", + pll->state.hw_state.mg_pll_ssc); + seq_printf(m, " mg_pll_bias: 0x%08x\n", + pll->state.hw_state.mg_pll_bias); + seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n", + pll->state.hw_state.mg_pll_tdc_coldst_bias); + } + drm_modeset_unlock_all(dev); + + return 0; +} + +static int i915_ipc_status_show(struct seq_file *m, void *data) +{ + struct drm_i915_private *dev_priv = m->private; + + seq_printf(m, "Isochronous Priority Control: %s\n", + yesno(dev_priv->ipc_enabled)); + return 0; +} + +static int i915_ipc_status_open(struct inode *inode, struct file *file) +{ + struct drm_i915_private *dev_priv = inode->i_private; + + if (!HAS_IPC(dev_priv)) + return -ENODEV; + + return single_open(file, i915_ipc_status_show, dev_priv); +} + +static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct drm_i915_private *dev_priv = m->private; + intel_wakeref_t wakeref; + bool enable; + int ret; + + ret = kstrtobool_from_user(ubuf, len, &enable); + if (ret < 0) + return ret; + + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { + if (!dev_priv->ipc_enabled && enable) + drm_info(&dev_priv->drm, + "Enabling IPC: WM will be proper only after next commit\n"); + dev_priv->wm.distrust_bios_wm = true; + dev_priv->ipc_enabled = enable; + intel_enable_ipc(dev_priv); + } + + return len; +} + +static const struct file_operations i915_ipc_status_fops = { + .owner = THIS_MODULE, + .open = i915_ipc_status_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = i915_ipc_status_write +}; + +static int i915_ddb_info(struct seq_file *m, void *unused) +{ + struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct drm_device *dev = &dev_priv->drm; + struct skl_ddb_entry *entry; + struct intel_crtc *crtc; + + if (INTEL_GEN(dev_priv) < 9) + return -ENODEV; + + drm_modeset_lock_all(dev); + + seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size"); + + for_each_intel_crtc(&dev_priv->drm, crtc) { + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + enum pipe pipe = crtc->pipe; + enum plane_id plane_id; + + seq_printf(m, "Pipe %c\n", pipe_name(pipe)); + + for_each_plane_id_on_crtc(crtc, plane_id) { + entry = &crtc_state->wm.skl.plane_ddb_y[plane_id]; + seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1, + entry->start, entry->end, + skl_ddb_entry_size(entry)); + } + + entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR]; + seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start, + entry->end, skl_ddb_entry_size(entry)); + } + + drm_modeset_unlock_all(dev); + + return 0; +} + +static void drrs_status_per_crtc(struct seq_file *m, + struct drm_device *dev, + struct intel_crtc *intel_crtc) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_drrs *drrs = &dev_priv->drrs; + int vrefresh = 0; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->state->crtc != &intel_crtc->base) + continue; + + seq_printf(m, "%s:\n", connector->name); + } + drm_connector_list_iter_end(&conn_iter); + + seq_puts(m, "\n"); + + if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) { + struct intel_panel *panel; + + mutex_lock(&drrs->mutex); + /* DRRS Supported */ + seq_puts(m, "\tDRRS Supported: Yes\n"); + + /* disable_drrs() will make drrs->dp NULL */ + if (!drrs->dp) { + seq_puts(m, "Idleness DRRS: Disabled\n"); + if (dev_priv->psr.enabled) + seq_puts(m, + "\tAs PSR is enabled, DRRS is not enabled\n"); + mutex_unlock(&drrs->mutex); + return; + } + + panel = &drrs->dp->attached_connector->panel; + seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X", + drrs->busy_frontbuffer_bits); + + seq_puts(m, "\n\t\t"); + if (drrs->refresh_rate_type == DRRS_HIGH_RR) { + seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n"); + vrefresh = panel->fixed_mode->vrefresh; + } else if (drrs->refresh_rate_type == DRRS_LOW_RR) { + seq_puts(m, "DRRS_State: DRRS_LOW_RR\n"); + vrefresh = panel->downclock_mode->vrefresh; + } else { + seq_printf(m, "DRRS_State: Unknown(%d)\n", + drrs->refresh_rate_type); + mutex_unlock(&drrs->mutex); + return; + } + seq_printf(m, "\t\tVrefresh: %d", vrefresh); + + seq_puts(m, "\n\t\t"); + mutex_unlock(&drrs->mutex); + } else { + /* DRRS not supported. Print the VBT parameter*/ + seq_puts(m, "\tDRRS Supported : No"); + } + seq_puts(m, "\n"); +} + +static int i915_drrs_status(struct seq_file *m, void *unused) +{ + struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct drm_device *dev = &dev_priv->drm; + struct intel_crtc *intel_crtc; + int active_crtc_cnt = 0; + + drm_modeset_lock_all(dev); + for_each_intel_crtc(dev, intel_crtc) { + if (intel_crtc->base.state->active) { + active_crtc_cnt++; + seq_printf(m, "\nCRTC %d: ", active_crtc_cnt); + + drrs_status_per_crtc(m, dev, intel_crtc); + } + } + drm_modeset_unlock_all(dev); + + if (!active_crtc_cnt) + seq_puts(m, "No active crtc found\n"); + + return 0; +} + +static int i915_dp_mst_info(struct seq_file *m, void *unused) +{ + struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct drm_device *dev = &dev_priv->drm; + struct intel_encoder *intel_encoder; + struct intel_digital_port *intel_dig_port; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) + continue; + + intel_encoder = intel_attached_encoder(to_intel_connector(connector)); + if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST) + continue; + + intel_dig_port = enc_to_dig_port(intel_encoder); + if (!intel_dig_port->dp.can_mst) + continue; + + seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n", + intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name); + drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr); + } + drm_connector_list_iter_end(&conn_iter); + + return 0; +} + +static ssize_t i915_displayport_test_active_write(struct file *file, + const char __user *ubuf, + size_t len, loff_t *offp) +{ + char *input_buffer; + int status = 0; + struct drm_device *dev; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + struct intel_dp *intel_dp; + int val = 0; + + dev = ((struct seq_file *)file->private_data)->private; + + if (len == 0) + return 0; + + input_buffer = memdup_user_nul(ubuf, len); + if (IS_ERR(input_buffer)) + return PTR_ERR(input_buffer); + + drm_dbg(&to_i915(dev)->drm, + "Copied %d bytes from user\n", (unsigned int)len); + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + struct intel_encoder *encoder; + + if (connector->connector_type != + DRM_MODE_CONNECTOR_DisplayPort) + continue; + + encoder = to_intel_encoder(connector->encoder); + if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) + continue; + + if (encoder && connector->status == connector_status_connected) { + intel_dp = enc_to_intel_dp(encoder); + status = kstrtoint(input_buffer, 10, &val); + if (status < 0) + break; + drm_dbg(&to_i915(dev)->drm, + "Got %d for test active\n", val); + /* To prevent erroneous activation of the compliance + * testing code, only accept an actual value of 1 here + */ + if (val == 1) + intel_dp->compliance.test_active = true; + else + intel_dp->compliance.test_active = false; + } + } + drm_connector_list_iter_end(&conn_iter); + kfree(input_buffer); + if (status < 0) + return status; + + *offp += len; + return len; +} + +static int i915_displayport_test_active_show(struct seq_file *m, void *data) +{ + struct drm_i915_private *dev_priv = m->private; + struct drm_device *dev = &dev_priv->drm; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + struct intel_dp *intel_dp; + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + struct intel_encoder *encoder; + + if (connector->connector_type != + DRM_MODE_CONNECTOR_DisplayPort) + continue; + + encoder = to_intel_encoder(connector->encoder); + if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) + continue; + + if (encoder && connector->status == connector_status_connected) { + intel_dp = enc_to_intel_dp(encoder); + if (intel_dp->compliance.test_active) + seq_puts(m, "1"); + else + seq_puts(m, "0"); + } else + seq_puts(m, "0"); + } + drm_connector_list_iter_end(&conn_iter); + + return 0; +} + +static int i915_displayport_test_active_open(struct inode *inode, + struct file *file) +{ + return single_open(file, i915_displayport_test_active_show, + inode->i_private); +} + +static const struct file_operations i915_displayport_test_active_fops = { + .owner = THIS_MODULE, + .open = i915_displayport_test_active_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = i915_displayport_test_active_write +}; + +static int i915_displayport_test_data_show(struct seq_file *m, void *data) +{ + struct drm_i915_private *dev_priv = m->private; + struct drm_device *dev = &dev_priv->drm; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + struct intel_dp *intel_dp; + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + struct intel_encoder *encoder; + + if (connector->connector_type != + DRM_MODE_CONNECTOR_DisplayPort) + continue; + + encoder = to_intel_encoder(connector->encoder); + if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) + continue; + + if (encoder && connector->status == connector_status_connected) { + intel_dp = enc_to_intel_dp(encoder); + if (intel_dp->compliance.test_type == + DP_TEST_LINK_EDID_READ) + seq_printf(m, "%lx", + intel_dp->compliance.test_data.edid); + else if (intel_dp->compliance.test_type == + DP_TEST_LINK_VIDEO_PATTERN) { + seq_printf(m, "hdisplay: %d\n", + intel_dp->compliance.test_data.hdisplay); + seq_printf(m, "vdisplay: %d\n", + intel_dp->compliance.test_data.vdisplay); + seq_printf(m, "bpc: %u\n", + intel_dp->compliance.test_data.bpc); + } + } else + seq_puts(m, "0"); + } + drm_connector_list_iter_end(&conn_iter); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data); + +static int i915_displayport_test_type_show(struct seq_file *m, void *data) +{ + struct drm_i915_private *dev_priv = m->private; + struct drm_device *dev = &dev_priv->drm; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + struct intel_dp *intel_dp; + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + struct intel_encoder *encoder; + + if (connector->connector_type != + DRM_MODE_CONNECTOR_DisplayPort) + continue; + + encoder = to_intel_encoder(connector->encoder); + if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) + continue; + + if (encoder && connector->status == connector_status_connected) { + intel_dp = enc_to_intel_dp(encoder); + seq_printf(m, "%02lx", intel_dp->compliance.test_type); + } else + seq_puts(m, "0"); + } + drm_connector_list_iter_end(&conn_iter); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type); + +static void wm_latency_show(struct seq_file *m, const u16 wm[8]) +{ + struct drm_i915_private *dev_priv = m->private; + struct drm_device *dev = &dev_priv->drm; + int level; + int num_levels; + + if (IS_CHERRYVIEW(dev_priv)) + num_levels = 3; + else if (IS_VALLEYVIEW(dev_priv)) + num_levels = 1; + else if (IS_G4X(dev_priv)) + num_levels = 3; + else + num_levels = ilk_wm_max_level(dev_priv) + 1; + + drm_modeset_lock_all(dev); + + for (level = 0; level < num_levels; level++) { + unsigned int latency = wm[level]; + + /* + * - WM1+ latency values in 0.5us units + * - latencies are in us on gen9/vlv/chv + */ + if (INTEL_GEN(dev_priv) >= 9 || + IS_VALLEYVIEW(dev_priv) || + IS_CHERRYVIEW(dev_priv) || + IS_G4X(dev_priv)) + latency *= 10; + else if (level > 0) + latency *= 5; + + seq_printf(m, "WM%d %u (%u.%u usec)\n", + level, wm[level], latency / 10, latency % 10); + } + + drm_modeset_unlock_all(dev); +} + +static int pri_wm_latency_show(struct seq_file *m, void *data) +{ + struct drm_i915_private *dev_priv = m->private; + const u16 *latencies; + + if (INTEL_GEN(dev_priv) >= 9) + latencies = dev_priv->wm.skl_latency; + else + latencies = dev_priv->wm.pri_latency; + + wm_latency_show(m, latencies); + + return 0; +} + +static int spr_wm_latency_show(struct seq_file *m, void *data) +{ + struct drm_i915_private *dev_priv = m->private; + const u16 *latencies; + + if (INTEL_GEN(dev_priv) >= 9) + latencies = dev_priv->wm.skl_latency; + else + latencies = dev_priv->wm.spr_latency; + + wm_latency_show(m, latencies); + + return 0; +} + +static int cur_wm_latency_show(struct seq_file *m, void *data) +{ + struct drm_i915_private *dev_priv = m->private; + const u16 *latencies; + + if (INTEL_GEN(dev_priv) >= 9) + latencies = dev_priv->wm.skl_latency; + else + latencies = dev_priv->wm.cur_latency; + + wm_latency_show(m, latencies); + + return 0; +} + +static int pri_wm_latency_open(struct inode *inode, struct file *file) +{ + struct drm_i915_private *dev_priv = inode->i_private; + + if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) + return -ENODEV; + + return single_open(file, pri_wm_latency_show, dev_priv); +} + +static int spr_wm_latency_open(struct inode *inode, struct file *file) +{ + struct drm_i915_private *dev_priv = inode->i_private; + + if (HAS_GMCH(dev_priv)) + return -ENODEV; + + return single_open(file, spr_wm_latency_show, dev_priv); +} + +static int cur_wm_latency_open(struct inode *inode, struct file *file) +{ + struct drm_i915_private *dev_priv = inode->i_private; + + if (HAS_GMCH(dev_priv)) + return -ENODEV; + + return single_open(file, cur_wm_latency_show, dev_priv); +} + +static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *offp, u16 wm[8]) +{ + struct seq_file *m = file->private_data; + struct drm_i915_private *dev_priv = m->private; + struct drm_device *dev = &dev_priv->drm; + u16 new[8] = { 0 }; + int num_levels; + int level; + int ret; + char tmp[32]; + + if (IS_CHERRYVIEW(dev_priv)) + num_levels = 3; + else if (IS_VALLEYVIEW(dev_priv)) + num_levels = 1; + else if (IS_G4X(dev_priv)) + num_levels = 3; + else + num_levels = ilk_wm_max_level(dev_priv) + 1; + + if (len >= sizeof(tmp)) + return -EINVAL; + + if (copy_from_user(tmp, ubuf, len)) + return -EFAULT; + + tmp[len] = '\0'; + + ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", + &new[0], &new[1], &new[2], &new[3], + &new[4], &new[5], &new[6], &new[7]); + if (ret != num_levels) + return -EINVAL; + + drm_modeset_lock_all(dev); + + for (level = 0; level < num_levels; level++) + wm[level] = new[level]; + + drm_modeset_unlock_all(dev); + + return len; +} + + +static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct drm_i915_private *dev_priv = m->private; + u16 *latencies; + + if (INTEL_GEN(dev_priv) >= 9) + latencies = dev_priv->wm.skl_latency; + else + latencies = dev_priv->wm.pri_latency; + + return wm_latency_write(file, ubuf, len, offp, latencies); +} + +static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct drm_i915_private *dev_priv = m->private; + u16 *latencies; + + if (INTEL_GEN(dev_priv) >= 9) + latencies = dev_priv->wm.skl_latency; + else + latencies = dev_priv->wm.spr_latency; + + return wm_latency_write(file, ubuf, len, offp, latencies); +} + +static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct drm_i915_private *dev_priv = m->private; + u16 *latencies; + + if (INTEL_GEN(dev_priv) >= 9) + latencies = dev_priv->wm.skl_latency; + else + latencies = dev_priv->wm.cur_latency; + + return wm_latency_write(file, ubuf, len, offp, latencies); +} + +static const struct file_operations i915_pri_wm_latency_fops = { + .owner = THIS_MODULE, + .open = pri_wm_latency_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = pri_wm_latency_write +}; + +static const struct file_operations i915_spr_wm_latency_fops = { + .owner = THIS_MODULE, + .open = spr_wm_latency_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = spr_wm_latency_write +}; + +static const struct file_operations i915_cur_wm_latency_fops = { + .owner = THIS_MODULE, + .open = cur_wm_latency_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = cur_wm_latency_write +}; + +static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) +{ + struct drm_i915_private *dev_priv = m->private; + struct i915_hotplug *hotplug = &dev_priv->hotplug; + + /* Synchronize with everything first in case there's been an HPD + * storm, but we haven't finished handling it in the kernel yet + */ + intel_synchronize_irq(dev_priv); + flush_work(&dev_priv->hotplug.dig_port_work); + flush_delayed_work(&dev_priv->hotplug.hotplug_work); + + seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); + seq_printf(m, "Detected: %s\n", + yesno(delayed_work_pending(&hotplug->reenable_work))); + + return 0; +} + +static ssize_t i915_hpd_storm_ctl_write(struct file *file, + const char __user *ubuf, size_t len, + loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct drm_i915_private *dev_priv = m->private; + struct i915_hotplug *hotplug = &dev_priv->hotplug; + unsigned int new_threshold; + int i; + char *newline; + char tmp[16]; + + if (len >= sizeof(tmp)) + return -EINVAL; + + if (copy_from_user(tmp, ubuf, len)) + return -EFAULT; + + tmp[len] = '\0'; + + /* Strip newline, if any */ + newline = strchr(tmp, '\n'); + if (newline) + *newline = '\0'; + + if (strcmp(tmp, "reset") == 0) + new_threshold = HPD_STORM_DEFAULT_THRESHOLD; + else if (kstrtouint(tmp, 10, &new_threshold) != 0) + return -EINVAL; + + if (new_threshold > 0) + drm_dbg_kms(&dev_priv->drm, + "Setting HPD storm detection threshold to %d\n", + new_threshold); + else + drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n"); + + spin_lock_irq(&dev_priv->irq_lock); + hotplug->hpd_storm_threshold = new_threshold; + /* Reset the HPD storm stats so we don't accidentally trigger a storm */ + for_each_hpd_pin(i) + hotplug->stats[i].count = 0; + spin_unlock_irq(&dev_priv->irq_lock); + + /* Re-enable hpd immediately if we were in an irq storm */ + flush_delayed_work(&dev_priv->hotplug.reenable_work); + + return len; +} + +static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file) +{ + return single_open(file, i915_hpd_storm_ctl_show, inode->i_private); +} + +static const struct file_operations i915_hpd_storm_ctl_fops = { + .owner = THIS_MODULE, + .open = i915_hpd_storm_ctl_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = i915_hpd_storm_ctl_write +}; + +static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data) +{ + struct drm_i915_private *dev_priv = m->private; + + seq_printf(m, "Enabled: %s\n", + yesno(dev_priv->hotplug.hpd_short_storm_enabled)); + + return 0; +} + +static int +i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file) +{ + return single_open(file, i915_hpd_short_storm_ctl_show, + inode->i_private); +} + +static ssize_t i915_hpd_short_storm_ctl_write(struct file *file, + const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct drm_i915_private *dev_priv = m->private; + struct i915_hotplug *hotplug = &dev_priv->hotplug; + char *newline; + char tmp[16]; + int i; + bool new_state; + + if (len >= sizeof(tmp)) + return -EINVAL; + + if (copy_from_user(tmp, ubuf, len)) + return -EFAULT; + + tmp[len] = '\0'; + + /* Strip newline, if any */ + newline = strchr(tmp, '\n'); + if (newline) + *newline = '\0'; + + /* Reset to the "default" state for this system */ + if (strcmp(tmp, "reset") == 0) + new_state = !HAS_DP_MST(dev_priv); + else if (kstrtobool(tmp, &new_state) != 0) + return -EINVAL; + + drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n", + new_state ? "En" : "Dis"); + + spin_lock_irq(&dev_priv->irq_lock); + hotplug->hpd_short_storm_enabled = new_state; + /* Reset the HPD storm stats so we don't accidentally trigger a storm */ + for_each_hpd_pin(i) + hotplug->stats[i].count = 0; + spin_unlock_irq(&dev_priv->irq_lock); + + /* Re-enable hpd immediately if we were in an irq storm */ + flush_delayed_work(&dev_priv->hotplug.reenable_work); + + return len; +} + +static const struct file_operations i915_hpd_short_storm_ctl_fops = { + .owner = THIS_MODULE, + .open = i915_hpd_short_storm_ctl_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = i915_hpd_short_storm_ctl_write, +}; + +static int i915_drrs_ctl_set(void *data, u64 val) +{ + struct drm_i915_private *dev_priv = data; + struct drm_device *dev = &dev_priv->drm; + struct intel_crtc *crtc; + + if (INTEL_GEN(dev_priv) < 7) + return -ENODEV; + + for_each_intel_crtc(dev, crtc) { + struct drm_connector_list_iter conn_iter; + struct intel_crtc_state *crtc_state; + struct drm_connector *connector; + struct drm_crtc_commit *commit; + int ret; + + ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex); + if (ret) + return ret; + + crtc_state = to_intel_crtc_state(crtc->base.state); + + if (!crtc_state->hw.active || + !crtc_state->has_drrs) + goto out; + + commit = crtc_state->uapi.commit; + if (commit) { + ret = wait_for_completion_interruptible(&commit->hw_done); + if (ret) + goto out; + } + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + struct intel_encoder *encoder; + struct intel_dp *intel_dp; + + if (!(crtc_state->uapi.connector_mask & + drm_connector_mask(connector))) + continue; + + encoder = intel_attached_encoder(to_intel_connector(connector)); + if (encoder->type != INTEL_OUTPUT_EDP) + continue; + + drm_dbg(&dev_priv->drm, + "Manually %sabling DRRS. %llu\n", + val ? "en" : "dis", val); + + intel_dp = enc_to_intel_dp(encoder); + if (val) + intel_edp_drrs_enable(intel_dp, + crtc_state); + else + intel_edp_drrs_disable(intel_dp, + crtc_state); + } + drm_connector_list_iter_end(&conn_iter); + +out: + drm_modeset_unlock(&crtc->base.mutex); + if (ret) + return ret; + } + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n"); + +static ssize_t +i915_fifo_underrun_reset_write(struct file *filp, + const char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + struct drm_i915_private *dev_priv = filp->private_data; + struct intel_crtc *intel_crtc; + struct drm_device *dev = &dev_priv->drm; + int ret; + bool reset; + + ret = kstrtobool_from_user(ubuf, cnt, &reset); + if (ret) + return ret; + + if (!reset) + return cnt; + + for_each_intel_crtc(dev, intel_crtc) { + struct drm_crtc_commit *commit; + struct intel_crtc_state *crtc_state; + + ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex); + if (ret) + return ret; + + crtc_state = to_intel_crtc_state(intel_crtc->base.state); + commit = crtc_state->uapi.commit; + if (commit) { + ret = wait_for_completion_interruptible(&commit->hw_done); + if (!ret) + ret = wait_for_completion_interruptible(&commit->flip_done); + } + + if (!ret && crtc_state->hw.active) { + drm_dbg_kms(&dev_priv->drm, + "Re-arming FIFO underruns on pipe %c\n", + pipe_name(intel_crtc->pipe)); + + intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state); + } + + drm_modeset_unlock(&intel_crtc->base.mutex); + + if (ret) + return ret; + } + + ret = intel_fbc_reset_underrun(dev_priv); + if (ret) + return ret; + + return cnt; +} + +static const struct file_operations i915_fifo_underrun_reset_ops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = i915_fifo_underrun_reset_write, + .llseek = default_llseek, +}; + +static const struct drm_info_list intel_display_debugfs_list[] = { + {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, + {"i915_fbc_status", i915_fbc_status, 0}, + {"i915_ips_status", i915_ips_status, 0}, + {"i915_sr_status", i915_sr_status, 0}, + {"i915_opregion", i915_opregion, 0}, + {"i915_vbt", i915_vbt, 0}, + {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, + {"i915_edp_psr_status", i915_edp_psr_status, 0}, + {"i915_power_domain_info", i915_power_domain_info, 0}, + {"i915_dmc_info", i915_dmc_info, 0}, + {"i915_display_info", i915_display_info, 0}, + {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, + {"i915_dp_mst_info", i915_dp_mst_info, 0}, + {"i915_ddb_info", i915_ddb_info, 0}, + {"i915_drrs_status", i915_drrs_status, 0}, +}; + +static const struct { + const char *name; + const struct file_operations *fops; +} intel_display_debugfs_files[] = { + {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops}, + {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, + {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, + {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, + {"i915_fbc_false_color", &i915_fbc_false_color_fops}, + {"i915_dp_test_data", &i915_displayport_test_data_fops}, + {"i915_dp_test_type", &i915_displayport_test_type_fops}, + {"i915_dp_test_active", &i915_displayport_test_active_fops}, + {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}, + {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops}, + {"i915_ipc_status", &i915_ipc_status_fops}, + {"i915_drrs_ctl", &i915_drrs_ctl_fops}, + {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}, +}; + +int intel_display_debugfs_register(struct drm_i915_private *i915) +{ + struct drm_minor *minor = i915->drm.primary; + int i; + + for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) { + debugfs_create_file(intel_display_debugfs_files[i].name, + S_IRUGO | S_IWUSR, + minor->debugfs_root, + to_i915(minor->dev), + intel_display_debugfs_files[i].fops); + } + + return drm_debugfs_create_files(intel_display_debugfs_list, + ARRAY_SIZE(intel_display_debugfs_list), + minor->debugfs_root, minor); +} + +static int i915_panel_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct intel_dp *intel_dp = + intel_attached_dp(to_intel_connector(connector)); + + if (connector->status != connector_status_connected) + return -ENODEV; + + seq_printf(m, "Panel power up delay: %d\n", + intel_dp->panel_power_up_delay); + seq_printf(m, "Panel power down delay: %d\n", + intel_dp->panel_power_down_delay); + seq_printf(m, "Backlight on delay: %d\n", + intel_dp->backlight_on_delay); + seq_printf(m, "Backlight off delay: %d\n", + intel_dp->backlight_off_delay); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(i915_panel); + +static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct intel_connector *intel_connector = to_intel_connector(connector); + + if (connector->status != connector_status_connected) + return -ENODEV; + + /* HDCP is supported by connector */ + if (!intel_connector->hdcp.shim) + return -EINVAL; + + seq_printf(m, "%s:%d HDCP version: ", connector->name, + connector->base.id); + intel_hdcp_info(m, intel_connector); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability); + +static int i915_dsc_fec_support_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct drm_device *dev = connector->dev; + struct drm_crtc *crtc; + struct intel_dp *intel_dp; + struct drm_modeset_acquire_ctx ctx; + struct intel_crtc_state *crtc_state = NULL; + int ret = 0; + bool try_again = false; + + drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); + + do { + try_again = false; + ret = drm_modeset_lock(&dev->mode_config.connection_mutex, + &ctx); + if (ret) { + if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) { + try_again = true; + continue; + } + break; + } + crtc = connector->state->crtc; + if (connector->status != connector_status_connected || !crtc) { + ret = -ENODEV; + break; + } + ret = drm_modeset_lock(&crtc->mutex, &ctx); + if (ret == -EDEADLK) { + ret = drm_modeset_backoff(&ctx); + if (!ret) { + try_again = true; + continue; + } + break; + } else if (ret) { + break; + } + intel_dp = intel_attached_dp(to_intel_connector(connector)); + crtc_state = to_intel_crtc_state(crtc->state); + seq_printf(m, "DSC_Enabled: %s\n", + yesno(crtc_state->dsc.compression_enable)); + seq_printf(m, "DSC_Sink_Support: %s\n", + yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd))); + seq_printf(m, "Force_DSC_Enable: %s\n", + yesno(intel_dp->force_dsc_en)); + if (!intel_dp_is_edp(intel_dp)) + seq_printf(m, "FEC_Sink_Support: %s\n", + yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable))); + } while (try_again); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + return ret; +} + +static ssize_t i915_dsc_fec_support_write(struct file *file, + const char __user *ubuf, + size_t len, loff_t *offp) +{ + bool dsc_enable = false; + int ret; + struct drm_connector *connector = + ((struct seq_file *)file->private_data)->private; + struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + + if (len == 0) + return 0; + + drm_dbg(&i915->drm, + "Copied %zu bytes from user to force DSC\n", len); + + ret = kstrtobool_from_user(ubuf, len, &dsc_enable); + if (ret < 0) + return ret; + + drm_dbg(&i915->drm, "Got %s for DSC Enable\n", + (dsc_enable) ? "true" : "false"); + intel_dp->force_dsc_en = dsc_enable; + + *offp += len; + return len; +} + +static int i915_dsc_fec_support_open(struct inode *inode, + struct file *file) +{ + return single_open(file, i915_dsc_fec_support_show, + inode->i_private); +} + +static const struct file_operations i915_dsc_fec_support_fops = { + .owner = THIS_MODULE, + .open = i915_dsc_fec_support_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = i915_dsc_fec_support_write +}; + +/** + * intel_connector_debugfs_add - add i915 specific connector debugfs files + * @connector: pointer to a registered drm_connector + * + * Cleanup will be done by drm_connector_unregister() through a call to + * drm_debugfs_connector_remove(). + * + * Returns 0 on success, negative error codes on error. + */ +int intel_connector_debugfs_add(struct drm_connector *connector) +{ + struct dentry *root = connector->debugfs_entry; + struct drm_i915_private *dev_priv = to_i915(connector->dev); + + /* The connector must have been registered beforehands. */ + if (!root) + return -ENODEV; + + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { + debugfs_create_file("i915_panel_timings", S_IRUGO, root, + connector, &i915_panel_fops); + debugfs_create_file("i915_psr_sink_status", S_IRUGO, root, + connector, &i915_psr_sink_status_fops); + } + + if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || + connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || + connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) { + debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root, + connector, &i915_hdcp_sink_capability_fops); + } + + if (INTEL_GEN(dev_priv) >= 10 && + (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || + connector->connector_type == DRM_MODE_CONNECTOR_eDP)) + debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root, + connector, &i915_dsc_fec_support_fops); + + return 0; +} diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.h b/drivers/gpu/drm/i915/display/intel_display_debugfs.h new file mode 100644 index 000000000000..a3bea1ce04c2 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2020 Intel Corporation + */ + +#ifndef __INTEL_DISPLAY_DEBUGFS_H__ +#define __INTEL_DISPLAY_DEBUGFS_H__ + +struct drm_connector; +struct drm_i915_private; + +#ifdef CONFIG_DEBUG_FS +int intel_display_debugfs_register(struct drm_i915_private *i915); +int intel_connector_debugfs_add(struct drm_connector *connector); +#else +static inline int intel_display_debugfs_register(struct drm_i915_private *i915) { return 0; } +static inline int intel_connector_debugfs_add(struct drm_connector *connector) { return 0; } +#endif + +#endif /* __INTEL_DISPLAY_DEBUGFS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 46c40db992dd..84ecf8e58523 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -15,6 +15,7 @@ #include "intel_display_types.h" #include "intel_dpio_phy.h" #include "intel_hotplug.h" +#include "intel_pm.h" #include "intel_sideband.h" #include "intel_tc.h" #include "intel_vga.h" @@ -159,7 +160,7 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) static void intel_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name); + drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name); power_well->desc->ops->enable(dev_priv, power_well); power_well->hw_enabled = true; } @@ -167,7 +168,7 @@ static void intel_power_well_enable(struct drm_i915_private *dev_priv, static void intel_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name); + drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name); power_well->hw_enabled = false; power_well->desc->ops->disable(dev_priv, power_well); } @@ -182,8 +183,9 @@ static void intel_power_well_get(struct drm_i915_private *dev_priv, static void intel_power_well_put(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - WARN(!power_well->count, "Use count on power well %s is already zero", - power_well->desc->name); + drm_WARN(&dev_priv->drm, !power_well->count, + "Use count on power well %s is already zero", + power_well->desc->name); if (!--power_well->count) intel_power_well_disable(dev_priv, power_well); @@ -289,11 +291,11 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ if (intel_de_wait_for_set(dev_priv, regs->driver, HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) { - DRM_DEBUG_KMS("%s power well enable timeout\n", - power_well->desc->name); + drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n", + power_well->desc->name); /* An AUX timeout is expected if the TBT DP tunnel is down. */ - WARN_ON(!power_well->desc->hsw.is_tc_tbt); + drm_WARN_ON(&dev_priv->drm, !power_well->desc->hsw.is_tc_tbt); } } @@ -304,11 +306,11 @@ static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx); u32 ret; - ret = I915_READ(regs->bios) & req_mask ? 1 : 0; - ret |= I915_READ(regs->driver) & req_mask ? 2 : 0; + ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0; + ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0; if (regs->kvmr.reg) - ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0; - ret |= I915_READ(regs->debug) & req_mask ? 8 : 0; + ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0; + ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0; return ret; } @@ -330,23 +332,25 @@ static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, * Skip the wait in case any of the request bits are set and print a * diagnostic message. */ - wait_for((disabled = !(I915_READ(regs->driver) & + wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) & HSW_PWR_WELL_CTL_STATE(pw_idx))) || (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1); if (disabled) return; - DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", - power_well->desc->name, - !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); + drm_dbg_kms(&dev_priv->drm, + "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", + power_well->desc->name, + !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); } static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, enum skl_power_gate pg) { /* Timeout 5us for PG#0, for other PGs 1us */ - WARN_ON(intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS, - SKL_FUSE_PG_DIST_STATUS(pg), 1)); + drm_WARN_ON(&dev_priv->drm, + intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS, + SKL_FUSE_PG_DIST_STATUS(pg), 1)); } static void hsw_power_well_enable(struct drm_i915_private *dev_priv, @@ -372,17 +376,18 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv, gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); } - val = I915_READ(regs->driver); - I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx)); + val = intel_de_read(dev_priv, regs->driver); + intel_de_write(dev_priv, regs->driver, + val | HSW_PWR_WELL_CTL_REQ(pw_idx)); hsw_wait_for_power_well_enable(dev_priv, power_well); /* Display WA #1178: cnl */ if (IS_CANNONLAKE(dev_priv) && pw_idx >= GLK_PW_CTL_IDX_AUX_B && pw_idx <= CNL_PW_CTL_IDX_AUX_F) { - val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx)); + val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx)); val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS; - I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val); + intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val); } if (wait_fuses) @@ -403,8 +408,9 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv, hsw_power_well_pre_disable(dev_priv, power_well->desc->hsw.irq_pipe_mask); - val = I915_READ(regs->driver); - I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); + val = intel_de_read(dev_priv, regs->driver); + intel_de_write(dev_priv, regs->driver, + val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); hsw_wait_for_power_well_disable(dev_priv, power_well); } @@ -419,14 +425,16 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); u32 val; - WARN_ON(!IS_ICELAKE(dev_priv)); + drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); - val = I915_READ(regs->driver); - I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx)); + val = intel_de_read(dev_priv, regs->driver); + intel_de_write(dev_priv, regs->driver, + val | HSW_PWR_WELL_CTL_REQ(pw_idx)); if (INTEL_GEN(dev_priv) < 12) { - val = I915_READ(ICL_PORT_CL_DW12(phy)); - I915_WRITE(ICL_PORT_CL_DW12(phy), val | ICL_LANE_ENABLE_AUX); + val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); + intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), + val | ICL_LANE_ENABLE_AUX); } hsw_wait_for_power_well_enable(dev_priv, power_well); @@ -434,9 +442,9 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, /* Display WA #1178: icl */ if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && !intel_bios_is_port_edp(dev_priv, (enum port)phy)) { - val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx)); + val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx)); val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; - I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val); + intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val); } } @@ -449,13 +457,15 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); u32 val; - WARN_ON(!IS_ICELAKE(dev_priv)); + drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); - val = I915_READ(ICL_PORT_CL_DW12(phy)); - I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX); + val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); + intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), + val & ~ICL_LANE_ENABLE_AUX); - val = I915_READ(regs->driver); - I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); + val = intel_de_read(dev_priv, regs->driver); + intel_de_write(dev_priv, regs->driver, + val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); hsw_wait_for_power_well_disable(dev_priv, power_well); } @@ -485,7 +495,7 @@ static int power_well_async_ref_count(struct drm_i915_private *dev_priv, int refs = hweight64(power_well->desc->domains & async_put_domains_mask(&dev_priv->power_domains)); - WARN_ON(refs > power_well->count); + drm_WARN_ON(&dev_priv->drm, refs > power_well->count); return refs; } @@ -515,7 +525,7 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, continue; dig_port = enc_to_dig_port(encoder); - if (WARN_ON(!dig_port)) + if (drm_WARN_ON(&dev_priv->drm, !dig_port)) continue; if (dig_port->aux_ch != aux_ch) { @@ -526,10 +536,10 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, break; } - if (WARN_ON(!dig_port)) + if (drm_WARN_ON(&dev_priv->drm, !dig_port)) return; - WARN_ON(!intel_tc_port_ref_held(dig_port)); + drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port)); } #else @@ -552,11 +562,11 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, icl_tc_port_assert_ref_held(dev_priv, power_well); - val = I915_READ(DP_AUX_CH_CTL(aux_ch)); + val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch)); val &= ~DP_AUX_CH_CTL_TBT_IO; if (power_well->desc->hsw.is_tc_tbt) val |= DP_AUX_CH_CTL_TBT_IO; - I915_WRITE(DP_AUX_CH_CTL(aux_ch), val); + intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val); hsw_power_well_enable(dev_priv, power_well); @@ -564,11 +574,13 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, enum tc_port tc_port; tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx); - I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2)); + intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), + HIP_INDEX_VAL(tc_port, 0x2)); if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port), DKL_CMN_UC_DW27_UC_HEALTH, 1)) - DRM_WARN("Timeout waiting TC uC health\n"); + drm_warn(&dev_priv->drm, + "Timeout waiting TC uC health\n"); } } @@ -596,7 +608,7 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, HSW_PWR_WELL_CTL_STATE(pw_idx); u32 val; - val = I915_READ(regs->driver); + val = intel_de_read(dev_priv, regs->driver); /* * On GEN9 big core due to a DMC bug the driver's request bits for PW1 @@ -606,22 +618,26 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, */ if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) && (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) - val |= I915_READ(regs->bios); + val |= intel_de_read(dev_priv, regs->bios); return (val & mask) == mask; } static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) { - WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9), - "DC9 already programmed to be enabled.\n"); - WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, - "DC5 still not disabled to enable DC9.\n"); - WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) & - HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), - "Power well 2 on.\n"); - WARN_ONCE(intel_irqs_enabled(dev_priv), - "Interrupts not disabled yet.\n"); + drm_WARN_ONCE(&dev_priv->drm, + (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9), + "DC9 already programmed to be enabled.\n"); + drm_WARN_ONCE(&dev_priv->drm, + intel_de_read(dev_priv, DC_STATE_EN) & + DC_STATE_EN_UPTO_DC5, + "DC5 still not disabled to enable DC9.\n"); + drm_WARN_ONCE(&dev_priv->drm, + intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) & + HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), + "Power well 2 on.\n"); + drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), + "Interrupts not disabled yet.\n"); /* * TODO: check for the following to verify the conditions to enter DC9 @@ -634,10 +650,12 @@ static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) { - WARN_ONCE(intel_irqs_enabled(dev_priv), - "Interrupts not disabled yet.\n"); - WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, - "DC5 still not disabled.\n"); + drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), + "Interrupts not disabled yet.\n"); + drm_WARN_ONCE(&dev_priv->drm, + intel_de_read(dev_priv, DC_STATE_EN) & + DC_STATE_EN_UPTO_DC5, + "DC5 still not disabled.\n"); /* * TODO: check for the following to verify DC9 state was indeed @@ -655,7 +673,7 @@ static void gen9_write_dc_state(struct drm_i915_private *dev_priv, int rereads = 0; u32 v; - I915_WRITE(DC_STATE_EN, state); + intel_de_write(dev_priv, DC_STATE_EN, state); /* It has been observed that disabling the dc6 state sometimes * doesn't stick and dmc keeps returning old value. Make sure @@ -663,10 +681,10 @@ static void gen9_write_dc_state(struct drm_i915_private *dev_priv, * we are confident that state is exactly what we want. */ do { - v = I915_READ(DC_STATE_EN); + v = intel_de_read(dev_priv, DC_STATE_EN); if (v != state) { - I915_WRITE(DC_STATE_EN, state); + intel_de_write(dev_priv, DC_STATE_EN, state); rewrites++; rereads = 0; } else if (rereads++ > 5) { @@ -676,13 +694,15 @@ static void gen9_write_dc_state(struct drm_i915_private *dev_priv, } while (rewrites < 100); if (v != state) - DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n", - state, v); + drm_err(&dev_priv->drm, + "Writing dc state to 0x%x failed, now 0x%x\n", + state, v); /* Most of the times we need one retry, avoid spam */ if (rewrites > 1) - DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n", - state, rewrites); + drm_dbg_kms(&dev_priv->drm, + "Rewrote dc state to 0x%x %d times\n", + state, rewrites); } static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) @@ -708,10 +728,11 @@ static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) { u32 val; - val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv); + val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv); - DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n", - dev_priv->csr.dc_state, val); + drm_dbg_kms(&dev_priv->drm, + "Resetting DC state tracking from %02x to %02x\n", + dev_priv->csr.dc_state, val); dev_priv->csr.dc_state = val; } @@ -743,18 +764,19 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) u32 val; u32 mask; - if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask)) + if (drm_WARN_ON_ONCE(&dev_priv->drm, + state & ~dev_priv->csr.allowed_dc_mask)) state &= dev_priv->csr.allowed_dc_mask; - val = I915_READ(DC_STATE_EN); + val = intel_de_read(dev_priv, DC_STATE_EN); mask = gen9_dc_mask(dev_priv); - DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n", - val & mask, state); + drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n", + val & mask, state); /* Check if DMC is ignoring our DC state requests */ if ((val & mask) != dev_priv->csr.dc_state) - DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n", - dev_priv->csr.dc_state, val & mask); + drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n", + dev_priv->csr.dc_state, val & mask); val &= ~mask; val |= state; @@ -791,7 +813,7 @@ sanitize_target_dc_state(struct drm_i915_private *dev_priv, static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) { - DRM_DEBUG_KMS("Enabling DC3CO\n"); + drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n"); gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO); } @@ -799,10 +821,10 @@ static void tgl_disable_dc3co(struct drm_i915_private *dev_priv) { u32 val; - DRM_DEBUG_KMS("Disabling DC3CO\n"); - val = I915_READ(DC_STATE_EN); + drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n"); + val = intel_de_read(dev_priv, DC_STATE_EN); val &= ~DC_STATE_DC3CO_STATUS; - I915_WRITE(DC_STATE_EN, val); + intel_de_write(dev_priv, DC_STATE_EN, val); gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); /* * Delay of 200us DC3CO Exit time B.Spec 49196 @@ -814,7 +836,7 @@ static void bxt_enable_dc9(struct drm_i915_private *dev_priv) { assert_can_enable_dc9(dev_priv); - DRM_DEBUG_KMS("Enabling DC9\n"); + drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n"); /* * Power sequencer reset is not needed on * platforms with South Display Engine on PCH, @@ -829,7 +851,7 @@ static void bxt_disable_dc9(struct drm_i915_private *dev_priv) { assert_can_disable_dc9(dev_priv); - DRM_DEBUG_KMS("Disabling DC9\n"); + drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n"); gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); @@ -838,10 +860,13 @@ static void bxt_disable_dc9(struct drm_i915_private *dev_priv) static void assert_csr_loaded(struct drm_i915_private *dev_priv) { - WARN_ONCE(!I915_READ(CSR_PROGRAM(0)), - "CSR program storage start is NULL\n"); - WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n"); - WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n"); + drm_WARN_ONCE(&dev_priv->drm, + !intel_de_read(dev_priv, CSR_PROGRAM(0)), + "CSR program storage start is NULL\n"); + drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_SSP_BASE), + "CSR SSP Base Not fine\n"); + drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_HTP_SKL), + "CSR HTP Not fine\n"); } static struct i915_power_well * @@ -861,7 +886,9 @@ lookup_power_well(struct drm_i915_private *dev_priv, * the first power well and hope the WARN gets reported so we can fix * our driver. */ - WARN(1, "Power well %d not defined for this platform\n", power_well_id); + drm_WARN(&dev_priv->drm, 1, + "Power well %d not defined for this platform\n", + power_well_id); return &dev_priv->power_domains.power_wells[0]; } @@ -884,7 +911,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, mutex_lock(&power_domains->lock); power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF); - if (WARN_ON(!power_well)) + if (drm_WARN_ON(&dev_priv->drm, !power_well)) goto unlock; state = sanitize_target_dc_state(dev_priv, state); @@ -912,13 +939,22 @@ unlock: static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) { - bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, - SKL_DISP_PW_2); + enum i915_power_well_id high_pg; - WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n"); + /* Power wells at this level and above must be disabled for DC5 entry */ + if (INTEL_GEN(dev_priv) >= 12) + high_pg = TGL_DISP_PW_3; + else + high_pg = SKL_DISP_PW_2; - WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), - "DC5 already programmed to be enabled.\n"); + drm_WARN_ONCE(&dev_priv->drm, + intel_display_power_well_is_enabled(dev_priv, high_pg), + "Power wells above platform's DC5 limit still enabled.\n"); + + drm_WARN_ONCE(&dev_priv->drm, + (intel_de_read(dev_priv, DC_STATE_EN) & + DC_STATE_EN_UPTO_DC5), + "DC5 already programmed to be enabled.\n"); assert_rpm_wakelock_held(&dev_priv->runtime_pm); assert_csr_loaded(dev_priv); @@ -928,22 +964,25 @@ static void gen9_enable_dc5(struct drm_i915_private *dev_priv) { assert_can_enable_dc5(dev_priv); - DRM_DEBUG_KMS("Enabling DC5\n"); + drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n"); /* Wa Display #1183: skl,kbl,cfl */ if (IS_GEN9_BC(dev_priv)) - I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | - SKL_SELECT_ALTERNATE_DC_EXIT); + intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, + intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); } static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) { - WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, - "Backlight is not disabled.\n"); - WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), - "DC6 already programmed to be enabled.\n"); + drm_WARN_ONCE(&dev_priv->drm, + intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, + "Backlight is not disabled.\n"); + drm_WARN_ONCE(&dev_priv->drm, + (intel_de_read(dev_priv, DC_STATE_EN) & + DC_STATE_EN_UPTO_DC6), + "DC6 already programmed to be enabled.\n"); assert_csr_loaded(dev_priv); } @@ -952,12 +991,12 @@ static void skl_enable_dc6(struct drm_i915_private *dev_priv) { assert_can_enable_dc6(dev_priv); - DRM_DEBUG_KMS("Enabling DC6\n"); + drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n"); /* Wa Display #1183: skl,kbl,cfl */ if (IS_GEN9_BC(dev_priv)) - I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | - SKL_SELECT_ALTERNATE_DC_EXIT); + intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, + intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); } @@ -968,15 +1007,15 @@ static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; int pw_idx = power_well->desc->hsw.idx; u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); - u32 bios_req = I915_READ(regs->bios); + u32 bios_req = intel_de_read(dev_priv, regs->bios); /* Take over the request bit if set by BIOS. */ if (bios_req & mask) { - u32 drv_req = I915_READ(regs->driver); + u32 drv_req = intel_de_read(dev_priv, regs->driver); if (!(drv_req & mask)) - I915_WRITE(regs->driver, drv_req | mask); - I915_WRITE(regs->bios, bios_req & ~mask); + intel_de_write(dev_priv, regs->driver, drv_req | mask); + intel_de_write(dev_priv, regs->bios, bios_req & ~mask); } } @@ -1022,22 +1061,25 @@ static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - return ((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 && - (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0); + return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 && + (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0); } static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) { - u32 tmp = I915_READ(DBUF_CTL); + u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv); + u8 enabled_dbuf_slices = dev_priv->enabled_dbuf_slices_mask; - WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) != - (DBUF_POWER_STATE | DBUF_POWER_REQUEST), - "Unexpected DBuf power power state (0x%08x)\n", tmp); + drm_WARN(&dev_priv->drm, + hw_enabled_dbuf_slices != enabled_dbuf_slices, + "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n", + hw_enabled_dbuf_slices, + enabled_dbuf_slices); } static void gen9_disable_dc_states(struct drm_i915_private *dev_priv) { - struct intel_cdclk_state cdclk_state = {}; + struct intel_cdclk_config cdclk_config = {}; if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) { tgl_disable_dc3co(dev_priv); @@ -1046,9 +1088,11 @@ static void gen9_disable_dc_states(struct drm_i915_private *dev_priv) gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); - dev_priv->display.get_cdclk(dev_priv, &cdclk_state); + dev_priv->display.get_cdclk(dev_priv, &cdclk_config); /* Can't read out voltage_level so can't use intel_cdclk_changed() */ - WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state)); + drm_WARN_ON(&dev_priv->drm, + intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, + &cdclk_config)); gen9_assert_dbuf_enabled(dev_priv); @@ -1108,9 +1152,9 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0) + if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0) i830_enable_pipe(dev_priv, PIPE_A); - if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0) + if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0) i830_enable_pipe(dev_priv, PIPE_B); } @@ -1124,8 +1168,8 @@ static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv, static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE && - I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE; + return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE && + intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE; } static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, @@ -1163,9 +1207,10 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv, vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); if (wait_for(COND, 100)) - DRM_ERROR("timeout setting power well state %08x (%08x)\n", - state, - vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); + drm_err(&dev_priv->drm, + "timeout setting power well state %08x (%08x)\n", + state, + vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); #undef COND @@ -1204,8 +1249,8 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, * We only ever set the power-on and power-gate states, anything * else is unexpected. */ - WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) && - state != PUNIT_PWRGT_PWR_GATE(pw_idx)); + drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) && + state != PUNIT_PWRGT_PWR_GATE(pw_idx)); if (state == ctrl) enabled = true; @@ -1214,7 +1259,7 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, * is poking at the power controls too. */ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; - WARN_ON(ctrl != state); + drm_WARN_ON(&dev_priv->drm, ctrl != state); vlv_punit_put(dev_priv); @@ -1231,21 +1276,22 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) * (and never recovering) in this case. intel_dsi_post_disable() will * clear it when we turn off the display. */ - val = I915_READ(DSPCLK_GATE_D); + val = intel_de_read(dev_priv, DSPCLK_GATE_D); val &= DPOUNIT_CLOCK_GATE_DISABLE; val |= VRHUNIT_CLOCK_GATE_DISABLE; - I915_WRITE(DSPCLK_GATE_D, val); + intel_de_write(dev_priv, DSPCLK_GATE_D, val); /* * Disable trickle feed and enable pnd deadline calculation */ - I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); - I915_WRITE(CBR1_VLV, 0); - - WARN_ON(dev_priv->rawclk_freq == 0); + intel_de_write(dev_priv, MI_ARB_VLV, + MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); + intel_de_write(dev_priv, CBR1_VLV, 0); - I915_WRITE(RAWCLK_FREQ_VLV, - DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000)); + drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0); + intel_de_write(dev_priv, RAWCLK_FREQ_VLV, + DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, + 1000)); } static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) @@ -1262,13 +1308,13 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) * CHV DPLL B/C have some issues if VGA mode is enabled. */ for_each_pipe(dev_priv, pipe) { - u32 val = I915_READ(DPLL(pipe)); + u32 val = intel_de_read(dev_priv, DPLL(pipe)); val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; if (pipe != PIPE_A) val |= DPLL_INTEGRATED_CRI_CLK_VLV; - I915_WRITE(DPLL(pipe), val); + intel_de_write(dev_priv, DPLL(pipe), val); } vlv_init_display_clock_gating(dev_priv); @@ -1348,7 +1394,8 @@ static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, * both PLLs disabled, or we risk losing DPIO and PLL * synchronization. */ - I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST); + intel_de_write(dev_priv, DPIO_CTL, + intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST); } static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, @@ -1360,7 +1407,8 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, assert_pll_disabled(dev_priv, pipe); /* Assert common reset */ - I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST); + intel_de_write(dev_priv, DPIO_CTL, + intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST); vlv_set_power_well(dev_priv, power_well, false); } @@ -1422,7 +1470,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) */ if (BITS_SET(phy_control, PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && - (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) + (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); if (BITS_SET(phy_control, @@ -1467,9 +1515,10 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) */ if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS, phy_status_mask, phy_status, 10)) - DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", - I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask, - phy_status, dev_priv->chv_phy_control); + drm_err(&dev_priv->drm, + "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", + intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask, + phy_status, dev_priv->chv_phy_control); } #undef BITS_SET @@ -1481,8 +1530,9 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, enum pipe pipe; u32 tmp; - WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && - power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); + drm_WARN_ON_ONCE(&dev_priv->drm, + power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && + power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { pipe = PIPE_A; @@ -1499,7 +1549,8 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, /* Poll for phypwrgood signal */ if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS, PHY_POWERGOOD(phy), 1)) - DRM_ERROR("Display PHY %d is not power up\n", phy); + drm_err(&dev_priv->drm, "Display PHY %d is not power up\n", + phy); vlv_dpio_get(dev_priv); @@ -1527,10 +1578,12 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, vlv_dpio_put(dev_priv); dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); - I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); + intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, + dev_priv->chv_phy_control); - DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", - phy, dev_priv->chv_phy_control); + drm_dbg_kms(&dev_priv->drm, + "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", + phy, dev_priv->chv_phy_control); assert_chv_phy_status(dev_priv); } @@ -1540,8 +1593,9 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, { enum dpio_phy phy; - WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && - power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); + drm_WARN_ON_ONCE(&dev_priv->drm, + power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && + power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { phy = DPIO_PHY0; @@ -1553,12 +1607,14 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, } dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); - I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); + intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, + dev_priv->chv_phy_control); vlv_set_power_well(dev_priv, power_well, false); - DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", - phy, dev_priv->chv_phy_control); + drm_dbg_kms(&dev_priv->drm, + "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", + phy, dev_priv->chv_phy_control); /* PHY is fully reset now, so we can enable the PHY state asserts */ dev_priv->chv_phy_assert[phy] = true; @@ -1621,11 +1677,13 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; - WARN(actual != expected, - "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", - !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN), - !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN), - reg, val); + drm_WARN(&dev_priv->drm, actual != expected, + "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", + !!(actual & DPIO_ALLDL_POWERDOWN), + !!(actual & DPIO_ANYDL_POWERDOWN), + !!(expected & DPIO_ALLDL_POWERDOWN), + !!(expected & DPIO_ANYDL_POWERDOWN), + reg, val); } bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, @@ -1646,10 +1704,12 @@ bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, else dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); - I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); + intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, + dev_priv->chv_phy_control); - DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", - phy, ch, dev_priv->chv_phy_control); + drm_dbg_kms(&dev_priv->drm, + "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", + phy, ch, dev_priv->chv_phy_control); assert_chv_phy_status(dev_priv); @@ -1677,10 +1737,12 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder, else dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); - I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); + intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, + dev_priv->chv_phy_control); - DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", - phy, ch, mask, dev_priv->chv_phy_control); + drm_dbg_kms(&dev_priv->drm, + "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", + phy, ch, mask, dev_priv->chv_phy_control); assert_chv_phy_status(dev_priv); @@ -1703,7 +1765,8 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, * We only ever set the power-on and power-gate states, anything * else is unexpected. */ - WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe)); + drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) && + state != DP_SSS_PWR_GATE(pipe)); enabled = state == DP_SSS_PWR_ON(pipe); /* @@ -1711,7 +1774,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, * is poking at the power controls too. */ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe); - WARN_ON(ctrl << 16 != state); + drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state); vlv_punit_put(dev_priv); @@ -1742,9 +1805,10 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl); if (wait_for(COND, 100)) - DRM_ERROR("timeout setting power well state %08x (%08x)\n", - state, - vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM)); + drm_err(&dev_priv->drm, + "timeout setting power well state %08x (%08x)\n", + state, + vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM)); #undef COND @@ -1752,6 +1816,13 @@ out: vlv_punit_put(dev_priv); } +static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, + dev_priv->chv_phy_control); +} + static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { @@ -1981,12 +2052,13 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv, power_domains = &dev_priv->power_domains; - WARN(!power_domains->domain_use_count[domain], - "Use count on domain %s is already zero\n", - name); - WARN(async_put_domains_mask(power_domains) & BIT_ULL(domain), - "Async disabling of domain %s is pending\n", - name); + drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain], + "Use count on domain %s is already zero\n", + name); + drm_WARN(&dev_priv->drm, + async_put_domains_mask(power_domains) & BIT_ULL(domain), + "Async disabling of domain %s is pending\n", + name); power_domains->domain_use_count[domain]--; @@ -2131,7 +2203,7 @@ void __intel_display_power_put_async(struct drm_i915_private *i915, goto out_verify; } - WARN_ON(power_domains->domain_use_count[domain] != 1); + drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1); /* Let a pending work requeue itself or queue a new one. */ if (power_domains->async_put_wakeref) { @@ -2206,7 +2278,7 @@ intel_display_power_flush_work_sync(struct drm_i915_private *i915) verify_async_put_domains_state(power_domains); - WARN_ON(power_domains->async_put_wakeref); + drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) @@ -2674,7 +2746,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, BIT_ULL(POWER_DOMAIN_INIT)) #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ - TGL_PW_2_POWER_DOMAINS | \ + TGL_PW_3_POWER_DOMAINS | \ BIT_ULL(POWER_DOMAIN_MODESET) | \ BIT_ULL(POWER_DOMAIN_AUX_A) | \ BIT_ULL(POWER_DOMAIN_AUX_B) | \ @@ -2734,7 +2806,7 @@ static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { }; static const struct i915_power_well_ops chv_pipe_power_well_ops = { - .sync_hw = i9xx_power_well_sync_hw_noop, + .sync_hw = chv_pipe_power_well_sync_hw, .enable = chv_pipe_power_well_enable, .disable = chv_pipe_power_well_disable, .is_enabled = chv_pipe_power_well_enabled, @@ -3870,7 +3942,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .name = "power well 3", .domains = TGL_PW_3_POWER_DOMAINS, .ops = &hsw_power_well_ops, - .id = DISP_PW_ID_NONE, + .id = TGL_DISP_PW_3, { .hsw.regs = &hsw_power_well_regs, .hsw.idx = ICL_PW_CTL_IDX_PW_3, @@ -4068,7 +4140,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX D TBT1", .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, + .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4079,7 +4151,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX E TBT2", .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, + .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4090,7 +4162,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX F TBT3", .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, + .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4101,7 +4173,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX G TBT4", .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, + .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4112,7 +4184,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX H TBT5", .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, + .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4123,7 +4195,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { { .name = "AUX I TBT6", .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS, - .ops = &hsw_power_well_ops, + .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { .hsw.regs = &icl_aux_power_well_regs, @@ -4204,11 +4276,13 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, } else if (enable_dc == -1) { requested_dc = max_dc; } else if (enable_dc > max_dc && enable_dc <= 4) { - DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n", - enable_dc, max_dc); + drm_dbg_kms(&dev_priv->drm, + "Adjusting requested max DC state (%d->%d)\n", + enable_dc, max_dc); requested_dc = max_dc; } else { - DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc); + drm_err(&dev_priv->drm, + "Unexpected value for enable_dc (%d)\n", enable_dc); requested_dc = max_dc; } @@ -4227,7 +4301,7 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, break; } - DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask); + drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask); return mask; } @@ -4371,16 +4445,16 @@ bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv, { u32 val, status; - val = I915_READ(reg); + val = intel_de_read(dev_priv, reg); val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST); - I915_WRITE(reg, val); - POSTING_READ(reg); + intel_de_write(dev_priv, reg, val); + intel_de_posting_read(dev_priv, reg); udelay(10); - status = I915_READ(reg) & DBUF_POWER_STATE; + status = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE; if ((enable && !status) || (!enable && status)) { - DRM_ERROR("DBus power %s timeout!\n", - enable ? "enable" : "disable"); + drm_err(&dev_priv->drm, "DBus power %s timeout!\n", + enable ? "enable" : "disable"); return false; } return true; @@ -4388,80 +4462,60 @@ bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv, static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) { - intel_dbuf_slice_set(dev_priv, DBUF_CTL, true); + icl_dbuf_slices_update(dev_priv, BIT(DBUF_S1)); } static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) { - intel_dbuf_slice_set(dev_priv, DBUF_CTL, false); -} - -static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv) -{ - if (INTEL_GEN(dev_priv) < 11) - return 1; - return 2; + icl_dbuf_slices_update(dev_priv, 0); } void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, u8 req_slices) { - const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; - bool ret; + int i; + int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices; + struct i915_power_domains *power_domains = &dev_priv->power_domains; - if (req_slices > intel_dbuf_max_slices(dev_priv)) { - DRM_ERROR("Invalid number of dbuf slices requested\n"); - return; - } + drm_WARN(&dev_priv->drm, hweight8(req_slices) > max_slices, + "Invalid number of dbuf slices requested\n"); - if (req_slices == hw_enabled_slices || req_slices == 0) - return; + DRM_DEBUG_KMS("Updating dbuf slices to 0x%x\n", req_slices); - if (req_slices > hw_enabled_slices) - ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true); - else - ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false); + /* + * Might be running this in parallel to gen9_dc_off_power_well_enable + * being called from intel_dp_detect for instance, + * which causes assertion triggered by race condition, + * as gen9_assert_dbuf_enabled might preempt this when registers + * were already updated, while dev_priv was not. + */ + mutex_lock(&power_domains->lock); + + for (i = 0; i < max_slices; i++) { + intel_dbuf_slice_set(dev_priv, + DBUF_CTL_S(i), + (req_slices & BIT(i)) != 0); + } - if (ret) - dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices; + dev_priv->enabled_dbuf_slices_mask = req_slices; + + mutex_unlock(&power_domains->lock); } static void icl_dbuf_enable(struct drm_i915_private *dev_priv) { - I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST); - I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST); - POSTING_READ(DBUF_CTL_S2); - - udelay(10); - - if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) || - !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)) - DRM_ERROR("DBuf power enable timeout\n"); - else - /* - * FIXME: for now pretend that we only have 1 slice, see - * intel_enabled_dbuf_slices_num(). - */ - dev_priv->wm.skl_hw.ddb.enabled_slices = 1; + skl_ddb_get_hw_state(dev_priv); + /* + * Just power up at least 1 slice, we will + * figure out later which slices we have and what we need. + */ + icl_dbuf_slices_update(dev_priv, dev_priv->enabled_dbuf_slices_mask | + BIT(DBUF_S1)); } static void icl_dbuf_disable(struct drm_i915_private *dev_priv) { - I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST); - I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST); - POSTING_READ(DBUF_CTL_S2); - - udelay(10); - - if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) || - (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)) - DRM_ERROR("DBuf power disable timeout!\n"); - else - /* - * FIXME: for now pretend that the first slice is always - * enabled, see intel_enabled_dbuf_slices_num(). - */ - dev_priv->wm.skl_hw.ddb.enabled_slices = 1; + icl_dbuf_slices_update(dev_priv, 0); } static void icl_mbus_init(struct drm_i915_private *dev_priv) @@ -4472,19 +4526,21 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv) MBUS_ABOX_BT_CREDIT_POOL2_MASK | MBUS_ABOX_B_CREDIT_MASK | MBUS_ABOX_BW_CREDIT_MASK; - - val = I915_READ(MBUS_ABOX_CTL); - val &= ~mask; - val |= MBUS_ABOX_BT_CREDIT_POOL1(16) | + val = MBUS_ABOX_BT_CREDIT_POOL1(16) | MBUS_ABOX_BT_CREDIT_POOL2(16) | MBUS_ABOX_B_CREDIT(1) | MBUS_ABOX_BW_CREDIT(1); - I915_WRITE(MBUS_ABOX_CTL, val); + + intel_de_rmw(dev_priv, MBUS_ABOX_CTL, mask, val); + if (INTEL_GEN(dev_priv) >= 12) { + intel_de_rmw(dev_priv, MBUS_ABOX1_CTL, mask, val); + intel_de_rmw(dev_priv, MBUS_ABOX2_CTL, mask, val); + } } static void hsw_assert_cdclk(struct drm_i915_private *dev_priv) { - u32 val = I915_READ(LCPLL_CTL); + u32 val = intel_de_read(dev_priv, LCPLL_CTL); /* * The LCPLL register should be turned on by the BIOS. For now @@ -4493,13 +4549,13 @@ static void hsw_assert_cdclk(struct drm_i915_private *dev_priv) */ if (val & LCPLL_CD_SOURCE_FCLK) - DRM_ERROR("CDCLK source is not LCPLL\n"); + drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n"); if (val & LCPLL_PLL_DISABLE) - DRM_ERROR("LCPLL is disabled\n"); + drm_err(&dev_priv->drm, "LCPLL is disabled\n"); if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC) - DRM_ERROR("LCPLL not using non-SSC reference\n"); + drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n"); } static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) @@ -4511,26 +4567,26 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", pipe_name(crtc->pipe)); - I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2), + I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2), "Display power well on\n"); - I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, + I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); - I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, + I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); - I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, + I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); - I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, + I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON, "Panel power on\n"); - I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, + I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, "CPU PWM1 enabled\n"); if (IS_HASWELL(dev_priv)) - I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, + I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, "CPU PWM2 enabled\n"); - I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, + I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, "PCH PWM1 enabled\n"); - I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, + I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, "Utility pin enabled\n"); - I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, + I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n"); /* @@ -4545,9 +4601,9 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) { if (IS_HASWELL(dev_priv)) - return I915_READ(D_COMP_HSW); + return intel_de_read(dev_priv, D_COMP_HSW); else - return I915_READ(D_COMP_BDW); + return intel_de_read(dev_priv, D_COMP_BDW); } static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val) @@ -4555,10 +4611,11 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val) if (IS_HASWELL(dev_priv)) { if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val)) - DRM_DEBUG_KMS("Failed to write to D_COMP\n"); + drm_dbg_kms(&dev_priv->drm, + "Failed to write to D_COMP\n"); } else { - I915_WRITE(D_COMP_BDW, val); - POSTING_READ(D_COMP_BDW); + intel_de_write(dev_priv, D_COMP_BDW, val); + intel_de_posting_read(dev_priv, D_COMP_BDW); } } @@ -4577,25 +4634,25 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, assert_can_disable_lcpll(dev_priv); - val = I915_READ(LCPLL_CTL); + val = intel_de_read(dev_priv, LCPLL_CTL); if (switch_to_fclk) { val |= LCPLL_CD_SOURCE_FCLK; - I915_WRITE(LCPLL_CTL, val); + intel_de_write(dev_priv, LCPLL_CTL, val); - if (wait_for_us(I915_READ(LCPLL_CTL) & + if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) & LCPLL_CD_SOURCE_FCLK_DONE, 1)) - DRM_ERROR("Switching to FCLK failed\n"); + drm_err(&dev_priv->drm, "Switching to FCLK failed\n"); - val = I915_READ(LCPLL_CTL); + val = intel_de_read(dev_priv, LCPLL_CTL); } val |= LCPLL_PLL_DISABLE; - I915_WRITE(LCPLL_CTL, val); - POSTING_READ(LCPLL_CTL); + intel_de_write(dev_priv, LCPLL_CTL, val); + intel_de_posting_read(dev_priv, LCPLL_CTL); if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1)) - DRM_ERROR("LCPLL still locked\n"); + drm_err(&dev_priv->drm, "LCPLL still locked\n"); val = hsw_read_dcomp(dev_priv); val |= D_COMP_COMP_DISABLE; @@ -4604,13 +4661,13 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) - DRM_ERROR("D_COMP RCOMP still in progress\n"); + drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n"); if (allow_power_down) { - val = I915_READ(LCPLL_CTL); + val = intel_de_read(dev_priv, LCPLL_CTL); val |= LCPLL_POWER_DOWN_ALLOW; - I915_WRITE(LCPLL_CTL, val); - POSTING_READ(LCPLL_CTL); + intel_de_write(dev_priv, LCPLL_CTL, val); + intel_de_posting_read(dev_priv, LCPLL_CTL); } } @@ -4622,7 +4679,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) { u32 val; - val = I915_READ(LCPLL_CTL); + val = intel_de_read(dev_priv, LCPLL_CTL); if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) @@ -4636,8 +4693,8 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) if (val & LCPLL_POWER_DOWN_ALLOW) { val &= ~LCPLL_POWER_DOWN_ALLOW; - I915_WRITE(LCPLL_CTL, val); - POSTING_READ(LCPLL_CTL); + intel_de_write(dev_priv, LCPLL_CTL, val); + intel_de_posting_read(dev_priv, LCPLL_CTL); } val = hsw_read_dcomp(dev_priv); @@ -4645,27 +4702,28 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) val &= ~D_COMP_COMP_DISABLE; hsw_write_dcomp(dev_priv, val); - val = I915_READ(LCPLL_CTL); + val = intel_de_read(dev_priv, LCPLL_CTL); val &= ~LCPLL_PLL_DISABLE; - I915_WRITE(LCPLL_CTL, val); + intel_de_write(dev_priv, LCPLL_CTL, val); if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5)) - DRM_ERROR("LCPLL not locked yet\n"); + drm_err(&dev_priv->drm, "LCPLL not locked yet\n"); if (val & LCPLL_CD_SOURCE_FCLK) { - val = I915_READ(LCPLL_CTL); + val = intel_de_read(dev_priv, LCPLL_CTL); val &= ~LCPLL_CD_SOURCE_FCLK; - I915_WRITE(LCPLL_CTL, val); + intel_de_write(dev_priv, LCPLL_CTL, val); - if (wait_for_us((I915_READ(LCPLL_CTL) & + if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) & LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) - DRM_ERROR("Switching back to LCPLL failed\n"); + drm_err(&dev_priv->drm, + "Switching back to LCPLL failed\n"); } intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); intel_update_cdclk(dev_priv); - intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK"); + intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK"); } /* @@ -4695,12 +4753,12 @@ static void hsw_enable_pc8(struct drm_i915_private *dev_priv) { u32 val; - DRM_DEBUG_KMS("Enabling package C8+\n"); + drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n"); if (HAS_PCH_LPT_LP(dev_priv)) { - val = I915_READ(SOUTH_DSPCLK_GATE_D); + val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; - I915_WRITE(SOUTH_DSPCLK_GATE_D, val); + intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); } lpt_disable_clkout_dp(dev_priv); @@ -4711,15 +4769,15 @@ static void hsw_disable_pc8(struct drm_i915_private *dev_priv) { u32 val; - DRM_DEBUG_KMS("Disabling package C8+\n"); + drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n"); hsw_restore_lcpll(dev_priv); intel_init_pch_refclk(dev_priv); if (HAS_PCH_LPT_LP(dev_priv)) { - val = I915_READ(SOUTH_DSPCLK_GATE_D); + val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); val |= PCH_LP_PARTITION_LEVEL_DISABLE; - I915_WRITE(SOUTH_DSPCLK_GATE_D, val); + intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); } } @@ -4737,14 +4795,14 @@ static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, reset_bits = RESET_PCH_HANDSHAKE_ENABLE; } - val = I915_READ(reg); + val = intel_de_read(dev_priv, reg); if (enable) val |= reset_bits; else val &= ~reset_bits; - I915_WRITE(reg, val); + intel_de_write(dev_priv, reg, val); } static void skl_display_core_init(struct drm_i915_private *dev_priv, @@ -4769,7 +4827,7 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv, mutex_unlock(&power_domains->lock); - intel_cdclk_init(dev_priv); + intel_cdclk_init_hw(dev_priv); gen9_dbuf_enable(dev_priv); @@ -4786,7 +4844,7 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv) gen9_dbuf_disable(dev_priv); - intel_cdclk_uninit(dev_priv); + intel_cdclk_uninit_hw(dev_priv); /* The spec doesn't call for removing the reset handshake flag */ /* disable PG1 and Misc I/O */ @@ -4830,7 +4888,7 @@ static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume mutex_unlock(&power_domains->lock); - intel_cdclk_init(dev_priv); + intel_cdclk_init_hw(dev_priv); gen9_dbuf_enable(dev_priv); @@ -4847,7 +4905,7 @@ static void bxt_display_core_uninit(struct drm_i915_private *dev_priv) gen9_dbuf_disable(dev_priv); - intel_cdclk_uninit(dev_priv); + intel_cdclk_uninit_hw(dev_priv); /* The spec doesn't call for removing the reset handshake flag */ @@ -4889,7 +4947,7 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume mutex_unlock(&power_domains->lock); /* 5. Enable CD clock */ - intel_cdclk_init(dev_priv); + intel_cdclk_init_hw(dev_priv); /* 6. Enable DBUF */ gen9_dbuf_enable(dev_priv); @@ -4911,7 +4969,7 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv) gen9_dbuf_disable(dev_priv); /* 3. Disable CD clock */ - intel_cdclk_uninit(dev_priv); + intel_cdclk_uninit_hw(dev_priv); /* * 4. Disable Power Well 1 (PG1). @@ -4970,25 +5028,23 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) break; if (table[i].page_mask == 0) { - DRM_DEBUG_DRIVER("Unknown memory configuration; disabling address buddy logic.\n"); - I915_WRITE(BW_BUDDY1_CTL, BW_BUDDY_DISABLE); - I915_WRITE(BW_BUDDY2_CTL, BW_BUDDY_DISABLE); + drm_dbg(&dev_priv->drm, + "Unknown memory configuration; disabling address buddy logic.\n"); + intel_de_write(dev_priv, BW_BUDDY1_CTL, BW_BUDDY_DISABLE); + intel_de_write(dev_priv, BW_BUDDY2_CTL, BW_BUDDY_DISABLE); } else { - u32 val; - - I915_WRITE(BW_BUDDY1_PAGE_MASK, table[i].page_mask); - I915_WRITE(BW_BUDDY2_PAGE_MASK, table[i].page_mask); + intel_de_write(dev_priv, BW_BUDDY1_PAGE_MASK, + table[i].page_mask); + intel_de_write(dev_priv, BW_BUDDY2_PAGE_MASK, + table[i].page_mask); /* Wa_22010178259:tgl */ - val = I915_READ(BW_BUDDY1_CTL); - val &= ~BW_BUDDY_TLB_REQ_TIMER_MASK; - val |= REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8); - I915_WRITE(BW_BUDDY1_CTL, val); - - val = I915_READ(BW_BUDDY2_CTL); - val &= ~BW_BUDDY_TLB_REQ_TIMER_MASK; - val |= REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8); - I915_WRITE(BW_BUDDY2_CTL, val); + intel_de_rmw(dev_priv, BW_BUDDY1_CTL, + BW_BUDDY_TLB_REQ_TIMER_MASK, + REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8)); + intel_de_rmw(dev_priv, BW_BUDDY2_CTL, + BW_BUDDY_TLB_REQ_TIMER_MASK, + REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8)); } } @@ -5016,7 +5072,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, mutex_unlock(&power_domains->lock); /* 4. Enable CDCLK. */ - intel_cdclk_init(dev_priv); + intel_cdclk_init_hw(dev_priv); /* 5. Enable DBUF. */ icl_dbuf_enable(dev_priv); @@ -5045,7 +5101,7 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv) icl_dbuf_disable(dev_priv); /* 3. Disable CD clock */ - intel_cdclk_uninit(dev_priv); + intel_cdclk_uninit_hw(dev_priv); /* * 4. Disable Power Well 1 (PG1). @@ -5090,7 +5146,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv) * current lane status. */ if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { - u32 status = I915_READ(DPLL(PIPE_A)); + u32 status = intel_de_read(dev_priv, DPLL(PIPE_A)); unsigned int mask; mask = status & DPLL_PORTB_READY_MASK; @@ -5121,7 +5177,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv) } if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { - u32 status = I915_READ(DPIO_PHY_STATUS); + u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS); unsigned int mask; mask = status & DPLL_PORTD_READY_MASK; @@ -5142,10 +5198,10 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv) dev_priv->chv_phy_assert[DPIO_PHY1] = true; } - I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); + drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n", + dev_priv->chv_phy_control); - DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n", - dev_priv->chv_phy_control); + /* Defer application of initial phy_control to enabling the powerwell */ } static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) @@ -5158,10 +5214,10 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) /* If the display might be already active skip this */ if (cmn->desc->ops->is_enabled(dev_priv, cmn) && disp2d->desc->ops->is_enabled(dev_priv, disp2d) && - I915_READ(DPIO_CTL) & DPIO_CMNRST) + intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST) return; - DRM_DEBUG_KMS("toggling display PHY side reset\n"); + drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n"); /* cmnlane needs DPLL registers */ disp2d->desc->ops->enable(dev_priv, disp2d); @@ -5189,8 +5245,9 @@ static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0 static void assert_ved_power_gated(struct drm_i915_private *dev_priv) { - WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0), - "VED not power gated\n"); + drm_WARN(&dev_priv->drm, + !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0), + "VED not power gated\n"); } static void assert_isp_power_gated(struct drm_i915_private *dev_priv) @@ -5201,9 +5258,9 @@ static void assert_isp_power_gated(struct drm_i915_private *dev_priv) {} }; - WARN(!pci_dev_present(isp_ids) && - !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0), - "ISP not power gated\n"); + drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) && + !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0), + "ISP not power gated\n"); } static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); @@ -5230,9 +5287,6 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) power_domains->initializing = true; - /* Must happen before power domain init on VLV/CHV */ - intel_update_rawclk(i915); - if (INTEL_GEN(i915) >= 11) { icl_display_core_init(i915, resume); } else if (IS_CANNONLAKE(i915)) { @@ -5336,7 +5390,7 @@ void intel_power_domains_disable(struct drm_i915_private *i915) { struct i915_power_domains *power_domains = &i915->power_domains; - WARN_ON(power_domains->wakeref); + drm_WARN_ON(&i915->drm, power_domains->wakeref); power_domains->wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT); @@ -5418,7 +5472,7 @@ void intel_power_domains_resume(struct drm_i915_private *i915) intel_power_domains_init_hw(i915, true); power_domains->display_core_suspended = false; } else { - WARN_ON(power_domains->wakeref); + drm_WARN_ON(&i915->drm, power_domains->wakeref); power_domains->wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT); } @@ -5436,13 +5490,13 @@ static void intel_power_domains_dump_info(struct drm_i915_private *i915) for_each_power_well(i915, power_well) { enum intel_display_power_domain domain; - DRM_DEBUG_DRIVER("%-25s %d\n", - power_well->desc->name, power_well->count); + drm_dbg(&i915->drm, "%-25s %d\n", + power_well->desc->name, power_well->count); for_each_power_domain(domain, power_well->desc->domains) - DRM_DEBUG_DRIVER(" %-23s %d\n", - intel_display_power_domain_str(domain), - power_domains->domain_use_count[domain]); + drm_dbg(&i915->drm, " %-23s %d\n", + intel_display_power_domain_str(domain), + power_domains->domain_use_count[domain]); } } @@ -5475,19 +5529,21 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915) enabled = power_well->desc->ops->is_enabled(i915, power_well); if ((power_well->count || power_well->desc->always_on) != enabled) - DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)", - power_well->desc->name, - power_well->count, enabled); + drm_err(&i915->drm, + "power well %s state mismatch (refcount %d/enabled %d)", + power_well->desc->name, + power_well->count, enabled); domains_count = 0; for_each_power_domain(domain, power_well->desc->domains) domains_count += power_domains->domain_use_count[domain]; if (power_well->count != domains_count) { - DRM_ERROR("power well %s refcount/domain refcount mismatch " - "(refcount %d/domains refcount %d)\n", - power_well->desc->name, power_well->count, - domains_count); + drm_err(&i915->drm, + "power well %s refcount/domain refcount mismatch " + "(refcount %d/domains refcount %d)\n", + power_well->desc->name, power_well->count, + domains_count); dump_domain_info = true; } } diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index 2608a65af7fa..da64a5edae7a 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -100,6 +100,7 @@ enum i915_power_well_id { SKL_DISP_PW_MISC_IO, SKL_DISP_PW_1, SKL_DISP_PW_2, + TGL_DISP_PW_3, SKL_DISP_DC_OFF, }; @@ -307,6 +308,11 @@ intel_display_power_put_async(struct drm_i915_private *i915, } #endif +enum dbuf_slice { + DBUF_S1, + DBUF_S2, +}; + #define with_intel_display_power(i915, domain, wf) \ for ((wf) = intel_display_power_get((i915), (domain)); (wf); \ intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0) diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 888ea8a170d1..5e00e611f077 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -39,13 +39,14 @@ #include <drm/drm_probe_helper.h> #include <drm/drm_rect.h> #include <drm/drm_vblank.h> -#include <drm/i915_drm.h> #include <drm/i915_mei_hdcp_interface.h> #include <media/cec-notifier.h> #include "i915_drv.h" +#include "intel_de.h" struct drm_printer; +struct __intel_global_objs_state; /* * Display related stuff @@ -139,6 +140,9 @@ struct intel_encoder { int (*compute_config)(struct intel_encoder *, struct intel_crtc_state *, struct drm_connector_state *); + int (*compute_config_late)(struct intel_encoder *, + struct intel_crtc_state *, + struct drm_connector_state *); void (*update_prepare)(struct intel_atomic_state *, struct intel_encoder *, struct intel_crtc *); @@ -214,6 +218,9 @@ struct intel_panel { u8 controller; /* bxt+ only */ struct pwm_device *pwm; + /* DPCD backlight */ + u8 pwmgen_bit_count; + struct backlight_device *device; /* Connector and platform specific backlight functions */ @@ -458,25 +465,8 @@ struct intel_atomic_state { intel_wakeref_t wakeref; - struct { - /* - * Logical state of cdclk (used for all scaling, watermark, - * etc. calculations and checks). This is computed as if all - * enabled crtcs were active. - */ - struct intel_cdclk_state logical; - - /* - * Actual state of cdclk, can be different from the logical - * state only when all crtc's are DPMS off. - */ - struct intel_cdclk_state actual; - - int force_min_cdclk; - bool force_min_cdclk_changed; - /* pipe to which cd2x update is synchronized */ - enum pipe pipe; - } cdclk; + struct __intel_global_objs_state *global_objs; + int num_global_objs; bool dpll_set, modeset; @@ -491,10 +481,6 @@ struct intel_atomic_state { u8 active_pipe_changes; u8 active_pipes; - /* minimum acceptable cdclk for each pipe */ - int min_cdclk[I915_MAX_PIPES]; - /* minimum acceptable voltage level for each pipe */ - u8 min_voltage_level[I915_MAX_PIPES]; struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS]; @@ -508,14 +494,11 @@ struct intel_atomic_state { /* * active_pipes - * min_cdclk[] - * min_voltage_level[] - * cdclk.* */ bool global_state_changed; - /* Gen9+ only */ - struct skl_ddb_values wm_results; + /* Number of enabled DBuf slices */ + u8 enabled_dbuf_slices_mask; struct i915_sw_fence commit_ready; @@ -611,6 +594,7 @@ struct intel_plane_state { struct intel_initial_plane_config { struct intel_framebuffer *fb; + struct i915_vma *vma; unsigned int tiling; int size; u32 base; @@ -657,15 +641,30 @@ struct intel_crtc_scaler_state { /* Flag to use the scanline counter instead of the pixel counter */ #define I915_MODE_FLAG_USE_SCANLINE_COUNTER (1<<2) +struct intel_wm_level { + bool enable; + u32 pri_val; + u32 spr_val; + u32 cur_val; + u32 fbc_val; +}; + struct intel_pipe_wm { struct intel_wm_level wm[5]; - u32 linetime; bool fbc_wm_enabled; bool pipe_enabled; bool sprites_enabled; bool sprites_scaled; }; +struct skl_wm_level { + u16 min_ddb_alloc; + u16 plane_res_b; + u8 plane_res_l; + bool plane_en; + bool ignore_lines; +}; + struct skl_plane_wm { struct skl_wm_level wm[8]; struct skl_wm_level uv_wm[8]; @@ -675,7 +674,6 @@ struct skl_plane_wm { struct skl_pipe_wm { struct skl_plane_wm planes[I915_MAX_PLANES]; - u32 linetime; }; enum vlv_wm_level { @@ -1046,6 +1044,10 @@ struct intel_crtc_state { struct drm_dsc_config config; } dsc; + /* HSW+ linetime watermarks */ + u16 linetime; + u16 ips_linetime; + /* Forward Error correction State */ bool fec_enable; @@ -1059,6 +1061,32 @@ struct intel_crtc_state { enum transcoder mst_master_transcoder; }; +enum intel_pipe_crc_source { + INTEL_PIPE_CRC_SOURCE_NONE, + INTEL_PIPE_CRC_SOURCE_PLANE1, + INTEL_PIPE_CRC_SOURCE_PLANE2, + INTEL_PIPE_CRC_SOURCE_PLANE3, + INTEL_PIPE_CRC_SOURCE_PLANE4, + INTEL_PIPE_CRC_SOURCE_PLANE5, + INTEL_PIPE_CRC_SOURCE_PLANE6, + INTEL_PIPE_CRC_SOURCE_PLANE7, + INTEL_PIPE_CRC_SOURCE_PIPE, + /* TV/DP on pre-gen5/vlv can't use the pipe source. */ + INTEL_PIPE_CRC_SOURCE_TV, + INTEL_PIPE_CRC_SOURCE_DP_B, + INTEL_PIPE_CRC_SOURCE_DP_C, + INTEL_PIPE_CRC_SOURCE_DP_D, + INTEL_PIPE_CRC_SOURCE_AUTO, + INTEL_PIPE_CRC_SOURCE_MAX, +}; + +#define INTEL_PIPE_CRC_ENTRIES_NR 128 +struct intel_pipe_crc { + spinlock_t lock; + int skipped; + enum intel_pipe_crc_source source; +}; + struct intel_crtc { struct drm_crtc base; enum pipe pipe; @@ -1102,6 +1130,10 @@ struct intel_crtc { /* per pipe DSB related info */ struct intel_dsb dsb; + +#ifdef CONFIG_DEBUG_FS + struct intel_pipe_crc pipe_crc; +#endif }; struct intel_plane { @@ -1181,8 +1213,6 @@ struct intel_hdmi { }; struct intel_dp_mst_encoder; -#define DP_MAX_DOWNSTREAM_PORTS 0x10 - /* * enum link_m_n_set: * When platform provides two set of M_N registers for dp, we can @@ -1250,6 +1280,7 @@ struct intel_dp { int max_link_rate; /* sink or branch descriptor */ struct drm_dp_desc desc; + u32 edid_quirks; struct drm_dp_aux aux; u32 aux_busy_last_status; u8 train_set[4]; @@ -1422,8 +1453,17 @@ vlv_pipe_to_channel(enum pipe pipe) } static inline struct intel_crtc * +intel_get_first_crtc(struct drm_i915_private *dev_priv) +{ + return to_intel_crtc(drm_crtc_from_index(&dev_priv->drm, 0)); +} + +static inline struct intel_crtc * intel_get_crtc_for_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) { + /* pipe_to_crtc_mapping may have hole on any of 3 display pipe system */ + drm_WARN_ON(&dev_priv->drm, + !(INTEL_INFO(dev_priv)->pipe_mask & BIT(pipe))); return dev_priv->pipe_to_crtc_mapping[pipe]; } @@ -1469,7 +1509,7 @@ enc_to_dig_port(struct intel_encoder *encoder) } static inline struct intel_digital_port * -conn_to_dig_port(struct intel_connector *connector) +intel_attached_dig_port(struct intel_connector *connector) { return enc_to_dig_port(intel_attached_encoder(connector)); } @@ -1486,6 +1526,11 @@ static inline struct intel_dp *enc_to_intel_dp(struct intel_encoder *encoder) return &enc_to_dig_port(encoder)->dp; } +static inline struct intel_dp *intel_attached_dp(struct intel_connector *connector) +{ + return enc_to_intel_dp(intel_attached_encoder(connector)); +} + static inline bool intel_encoder_is_dp(struct intel_encoder *encoder) { switch (encoder->type) { @@ -1608,11 +1653,15 @@ intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state) (1 << INTEL_OUTPUT_DP_MST) | (1 << INTEL_OUTPUT_EDP)); } + static inline void intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) { - drm_wait_one_vblank(&dev_priv->drm, pipe); + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + + drm_crtc_wait_one_vblank(&crtc->base); } + static inline void intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, enum pipe pipe) { diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index c7424e2a04a3..a2fafd4499f2 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -40,7 +40,6 @@ #include <drm/drm_edid.h> #include <drm/drm_hdcp.h> #include <drm/drm_probe_helper.h> -#include <drm/i915_drm.h> #include "i915_debugfs.h" #include "i915_drv.h" @@ -49,6 +48,7 @@ #include "intel_audio.h" #include "intel_connector.h" #include "intel_ddi.h" +#include "intel_display_debugfs.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_link_training.h" @@ -146,11 +146,6 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp) return intel_dig_port->base.type == INTEL_OUTPUT_EDP; } -static struct intel_dp *intel_attached_dp(struct intel_connector *connector) -{ - return enc_to_intel_dp(intel_attached_encoder(connector)); -} - static void intel_dp_link_down(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state); static bool edp_panel_vdd_on(struct intel_dp *intel_dp); @@ -272,7 +267,7 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); enum port port = dig_port->base.port; - u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; + u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; /* Low voltage SKUs are limited to max of 5.4G */ if (voltage == VOLTAGE_INFO_0_85V) @@ -323,14 +318,14 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp) 162000, 270000 }; struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct intel_encoder *encoder = &dig_port->base; struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - const struct ddi_vbt_port_info *info = - &dev_priv->vbt.ddi_port_info[dig_port->base.port]; const int *source_rates; - int size, max_rate = 0, vbt_max_rate = info->dp_max_link_rate; + int size, max_rate = 0, vbt_max_rate; /* This should only be done once */ - WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates); + drm_WARN_ON(&dev_priv->drm, + intel_dp->source_rates || intel_dp->num_source_rates); if (INTEL_GEN(dev_priv) >= 10) { source_rates = cnl_rates; @@ -354,6 +349,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp) size = ARRAY_SIZE(g4x_rates); } + vbt_max_rate = intel_bios_dp_max_link_rate(encoder); if (max_rate && vbt_max_rate) max_rate = min(max_rate, vbt_max_rate); else if (vbt_max_rate) @@ -519,12 +515,13 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, */ bits_per_pixel = (link_clock * lane_count * 8) / intel_dp_mode_to_fec_clock(mode_clock); - DRM_DEBUG_KMS("Max link bpp: %u\n", bits_per_pixel); + drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel); /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / mode_hdisplay; - DRM_DEBUG_KMS("Max small joiner bpp: %u\n", max_bpp_small_joiner_ram); + drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n", + max_bpp_small_joiner_ram); /* * Greatest allowed DSC BPP = MIN (output BPP from available Link BW @@ -534,8 +531,8 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, /* Error out if the max bpp is less than smallest allowed valid bpp */ if (bits_per_pixel < valid_dsc_bpp[0]) { - DRM_DEBUG_KMS("Unsupported BPP %u, min %u\n", - bits_per_pixel, valid_dsc_bpp[0]); + drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n", + bits_per_pixel, valid_dsc_bpp[0]); return 0; } @@ -760,20 +757,22 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) enum dpio_channel ch = vlv_pipe_to_channel(pipe); u32 DP; - if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN, - "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n", - pipe_name(pipe), intel_dig_port->base.base.base.id, - intel_dig_port->base.base.name)) + if (drm_WARN(&dev_priv->drm, + intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN, + "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n", + pipe_name(pipe), intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name)) return; - DRM_DEBUG_KMS("kicking pipe %c power sequencer for [ENCODER:%d:%s]\n", - pipe_name(pipe), intel_dig_port->base.base.base.id, - intel_dig_port->base.base.name); + drm_dbg_kms(&dev_priv->drm, + "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n", + pipe_name(pipe), intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name); /* Preserve the BIOS-computed detected bit. This is * supposed to be read-only. */ - DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; + DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; DP |= DP_PORT_WIDTH(1); DP |= DP_LINK_TRAIN_PAT_1; @@ -783,7 +782,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) else DP |= DP_PIPE_SEL(pipe); - pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE; + pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE; /* * The DPLL for the pipe must be enabled for this to work. @@ -795,8 +794,9 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ? &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) { - DRM_ERROR("Failed to force on pll for pipe %c!\n", - pipe_name(pipe)); + drm_err(&dev_priv->drm, + "Failed to force on pll for pipe %c!\n", + pipe_name(pipe)); return; } } @@ -807,14 +807,14 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) * to make this power sequencer lock onto the port. * Otherwise even VDD force bit won't work. */ - I915_WRITE(intel_dp->output_reg, DP); - POSTING_READ(intel_dp->output_reg); + intel_de_write(dev_priv, intel_dp->output_reg, DP); + intel_de_posting_read(dev_priv, intel_dp->output_reg); - I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN); - POSTING_READ(intel_dp->output_reg); + intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN); + intel_de_posting_read(dev_priv, intel_dp->output_reg); - I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); - POSTING_READ(intel_dp->output_reg); + intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN); + intel_de_posting_read(dev_priv, intel_dp->output_reg); if (!pll_enabled) { vlv_force_pll_off(dev_priv, pipe); @@ -837,13 +837,16 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv) struct intel_dp *intel_dp = enc_to_intel_dp(encoder); if (encoder->type == INTEL_OUTPUT_EDP) { - WARN_ON(intel_dp->active_pipe != INVALID_PIPE && - intel_dp->active_pipe != intel_dp->pps_pipe); + drm_WARN_ON(&dev_priv->drm, + intel_dp->active_pipe != INVALID_PIPE && + intel_dp->active_pipe != + intel_dp->pps_pipe); if (intel_dp->pps_pipe != INVALID_PIPE) pipes &= ~(1 << intel_dp->pps_pipe); } else { - WARN_ON(intel_dp->pps_pipe != INVALID_PIPE); + drm_WARN_ON(&dev_priv->drm, + intel_dp->pps_pipe != INVALID_PIPE); if (intel_dp->active_pipe != INVALID_PIPE) pipes &= ~(1 << intel_dp->active_pipe); @@ -866,10 +869,10 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp) lockdep_assert_held(&dev_priv->pps_mutex); /* We should never land here with regular DP ports */ - WARN_ON(!intel_dp_is_edp(intel_dp)); + drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); - WARN_ON(intel_dp->active_pipe != INVALID_PIPE && - intel_dp->active_pipe != intel_dp->pps_pipe); + drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE && + intel_dp->active_pipe != intel_dp->pps_pipe); if (intel_dp->pps_pipe != INVALID_PIPE) return intel_dp->pps_pipe; @@ -880,16 +883,17 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp) * Didn't find one. This should not happen since there * are two power sequencers and up to two eDP ports. */ - if (WARN_ON(pipe == INVALID_PIPE)) + if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE)) pipe = PIPE_A; vlv_steal_power_sequencer(dev_priv, pipe); intel_dp->pps_pipe = pipe; - DRM_DEBUG_KMS("picked pipe %c power sequencer for [ENCODER:%d:%s]\n", - pipe_name(intel_dp->pps_pipe), - intel_dig_port->base.base.base.id, - intel_dig_port->base.base.name); + drm_dbg_kms(&dev_priv->drm, + "picked pipe %c power sequencer for [ENCODER:%d:%s]\n", + pipe_name(intel_dp->pps_pipe), + intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name); /* init power sequencer on this pipe and port */ intel_dp_init_panel_power_sequencer(intel_dp); @@ -913,7 +917,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp) lockdep_assert_held(&dev_priv->pps_mutex); /* We should never land here with regular DP ports */ - WARN_ON(!intel_dp_is_edp(intel_dp)); + drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp)); if (!intel_dp->pps_reset) return backlight_controller; @@ -935,13 +939,13 @@ typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv, enum pipe pipe) { - return I915_READ(PP_STATUS(pipe)) & PP_ON; + return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON; } static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv, enum pipe pipe) { - return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD; + return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD; } static bool vlv_pipe_any(struct drm_i915_private *dev_priv, @@ -958,7 +962,7 @@ vlv_initial_pps_pipe(struct drm_i915_private *dev_priv, enum pipe pipe; for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { - u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) & + u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) & PANEL_PORT_SELECT_MASK; if (port_sel != PANEL_PORT_SELECT_VLV(port)) @@ -997,16 +1001,18 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */ if (intel_dp->pps_pipe == INVALID_PIPE) { - DRM_DEBUG_KMS("no initial power sequencer for [ENCODER:%d:%s]\n", - intel_dig_port->base.base.base.id, - intel_dig_port->base.base.name); + drm_dbg_kms(&dev_priv->drm, + "no initial power sequencer for [ENCODER:%d:%s]\n", + intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name); return; } - DRM_DEBUG_KMS("initial power sequencer for [ENCODER:%d:%s]: pipe %c\n", - intel_dig_port->base.base.base.id, - intel_dig_port->base.base.name, - pipe_name(intel_dp->pps_pipe)); + drm_dbg_kms(&dev_priv->drm, + "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n", + intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name, + pipe_name(intel_dp->pps_pipe)); intel_dp_init_panel_power_sequencer(intel_dp); intel_dp_init_panel_power_sequencer_registers(intel_dp, false); @@ -1016,8 +1022,10 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) { struct intel_encoder *encoder; - if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && - !IS_GEN9_LP(dev_priv))) + if (drm_WARN_ON(&dev_priv->drm, + !(IS_VALLEYVIEW(dev_priv) || + IS_CHERRYVIEW(dev_priv) || + IS_GEN9_LP(dev_priv)))) return; /* @@ -1033,7 +1041,8 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) for_each_intel_dp(&dev_priv->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - WARN_ON(intel_dp->active_pipe != INVALID_PIPE); + drm_WARN_ON(&dev_priv->drm, + intel_dp->active_pipe != INVALID_PIPE); if (encoder->type != INTEL_OUTPUT_EDP) continue; @@ -1119,12 +1128,13 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code, pp_ctrl_reg = PP_CONTROL(pipe); pp_div_reg = PP_DIVISOR(pipe); - pp_div = I915_READ(pp_div_reg); + pp_div = intel_de_read(dev_priv, pp_div_reg); pp_div &= PP_REFERENCE_DIVIDER_MASK; /* 0x1F write to PP_DIV_REG sets max cycle delay */ - I915_WRITE(pp_div_reg, pp_div | 0x1F); - I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS); + intel_de_write(dev_priv, pp_div_reg, pp_div | 0x1F); + intel_de_write(dev_priv, pp_ctrl_reg, + PANEL_UNLOCK_REGS); msleep(intel_dp->panel_power_cycle_delay); } } @@ -1142,7 +1152,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp) intel_dp->pps_pipe == INVALID_PIPE) return false; - return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0; + return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0; } static bool edp_have_panel_vdd(struct intel_dp *intel_dp) @@ -1155,7 +1165,7 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp) intel_dp->pps_pipe == INVALID_PIPE) return false; - return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD; + return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD; } static void @@ -1167,10 +1177,11 @@ intel_dp_check_edp(struct intel_dp *intel_dp) return; if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) { - WARN(1, "eDP powered off while attempting aux channel communication.\n"); - DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", - I915_READ(_pp_stat_reg(intel_dp)), - I915_READ(_pp_ctrl_reg(intel_dp))); + drm_WARN(&dev_priv->drm, 1, + "eDP powered off while attempting aux channel communication.\n"); + drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n", + intel_de_read(dev_priv, _pp_stat_reg(intel_dp)), + intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp))); } } @@ -1191,8 +1202,9 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp) trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); if (!done) - DRM_ERROR("%s did not complete or timeout within %ums (status 0x%08x)\n", - intel_dp->aux.name, timeout_ms, status); + drm_err(&i915->drm, + "%s: did not complete or timeout within %ums (status 0x%08x)\n", + intel_dp->aux.name, timeout_ms, status); #undef C return status; @@ -1209,13 +1221,14 @@ static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) * The clock divider is based off the hrawclk, and would like to run at * 2MHz. So, take the hrawclk value and divide by 2000 and use that */ - return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000); + return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000); } static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + u32 freq; if (index) return 0; @@ -1226,9 +1239,10 @@ static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) * divide by 2000 and use that */ if (dig_port->aux_ch == AUX_CH_A) - return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000); + freq = dev_priv->cdclk.hw.cdclk; else - return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000); + freq = RUNTIME_INFO(dev_priv)->rawclk_freq; + return DIV_ROUND_CLOSEST(freq, 2000); } static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) @@ -1360,7 +1374,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, * lowest possible wakeup latency and so prevent the cpu from going into * deep sleep states. */ - pm_qos_update_request(&i915->pm_qos, 0); + cpu_latency_qos_update_request(&i915->pm_qos, 0); intel_dp_check_edp(intel_dp); @@ -1378,8 +1392,9 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, const u32 status = intel_uncore_read(uncore, ch_ctl); if (status != intel_dp->aux_busy_last_status) { - WARN(1, "dp_aux_ch not started status 0x%08x\n", - status); + drm_WARN(&i915->drm, 1, + "%s: not started (status 0x%08x)\n", + intel_dp->aux.name, status); intel_dp->aux_busy_last_status = status; } @@ -1388,7 +1403,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, } /* Only 5 data registers! */ - if (WARN_ON(send_bytes > 20 || recv_size > 20)) { + if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) { ret = -E2BIG; goto out; } @@ -1440,7 +1455,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, } if ((status & DP_AUX_CH_CTL_DONE) == 0) { - DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); + drm_err(&i915->drm, "%s: not done (status 0x%08x)\n", + intel_dp->aux.name, status); ret = -EBUSY; goto out; } @@ -1450,7 +1466,8 @@ done: * Timeouts occur when the sink is not connected */ if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { - DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); + drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n", + intel_dp->aux.name, status); ret = -EIO; goto out; } @@ -1458,7 +1475,8 @@ done: /* Timeouts occur when the device isn't connected, so they're * "normal" -- don't fill the kernel log with these */ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { - DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); + drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n", + intel_dp->aux.name, status); ret = -ETIMEDOUT; goto out; } @@ -1473,8 +1491,9 @@ done: * drm layer takes care for the necessary retries. */ if (recv_bytes == 0 || recv_bytes > 20) { - DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n", - recv_bytes); + drm_dbg_kms(&i915->drm, + "%s: Forbidden recv_bytes = %d on aux transaction\n", + intel_dp->aux.name, recv_bytes); ret = -EBUSY; goto out; } @@ -1488,7 +1507,7 @@ done: ret = recv_bytes; out: - pm_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE); + cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE); if (vdd) edp_panel_vdd_off(intel_dp, false); @@ -1742,7 +1761,8 @@ intel_dp_aux_init(struct intel_dp *intel_dp) drm_dp_aux_init(&intel_dp->aux); /* Failure to allocate our preferred name is not critical */ - intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", + intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/port %c", + aux_ch_name(dig_port->aux_ch), port_name(encoder->port)); intel_dp->aux.transfer = intel_dp_aux_transfer; } @@ -1918,8 +1938,9 @@ static int intel_dp_compute_bpp(struct intel_dp *intel_dp, /* Get bpp from vbt only for panels that dont have bpp in edid */ if (intel_connector->base.display_info.bpc == 0 && dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) { - DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", - dev_priv->vbt.edp.bpp); + drm_dbg_kms(&dev_priv->drm, + "clamping bpp for eDP panel to BIOS-provided %i\n", + dev_priv->vbt.edp.bpp); bpp = dev_priv->vbt.edp.bpp; } } @@ -2115,7 +2136,8 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, /* Min Input BPC for ICL+ is 8 */ if (pipe_bpp < 8 * 3) { - DRM_DEBUG_KMS("No DSC support for less than 8bpc\n"); + drm_dbg_kms(&dev_priv->drm, + "No DSC support for less than 8bpc\n"); return -EINVAL; } @@ -2150,7 +2172,8 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, adjusted_mode->crtc_clock, adjusted_mode->crtc_hdisplay); if (!dsc_max_output_bpp || !dsc_dp_slice_count) { - DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n"); + drm_dbg_kms(&dev_priv->drm, + "Compressed BPP/Slice Count not supported\n"); return -EINVAL; } pipe_config->dsc.compressed_bpp = min_t(u16, @@ -2167,26 +2190,28 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, if (pipe_config->dsc.slice_count > 1) { pipe_config->dsc.dsc_split = true; } else { - DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n"); + drm_dbg_kms(&dev_priv->drm, + "Cannot split stream to use 2 VDSC instances\n"); return -EINVAL; } } ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config); if (ret < 0) { - DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d " - "Compressed BPP = %d\n", - pipe_config->pipe_bpp, - pipe_config->dsc.compressed_bpp); + drm_dbg_kms(&dev_priv->drm, + "Cannot compute valid DSC parameters for Input Bpp = %d " + "Compressed BPP = %d\n", + pipe_config->pipe_bpp, + pipe_config->dsc.compressed_bpp); return ret; } pipe_config->dsc.compression_enable = true; - DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d " - "Compressed Bpp = %d Slice Count = %d\n", - pipe_config->pipe_bpp, - pipe_config->dsc.compressed_bpp, - pipe_config->dsc.slice_count); + drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d " + "Compressed Bpp = %d Slice Count = %d\n", + pipe_config->pipe_bpp, + pipe_config->dsc.compressed_bpp, + pipe_config->dsc.slice_count); return 0; } @@ -2214,7 +2239,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, intel_dp->max_link_rate); /* No common link rates between source and sink */ - WARN_ON(common_len <= 0); + drm_WARN_ON(encoder->base.dev, common_len <= 0); limits.min_clock = 0; limits.max_clock = common_len - 1; @@ -2373,7 +2398,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, struct intel_connector *intel_connector = intel_dp->attached_connector; struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(conn_state); - bool constant_n = drm_dp_has_quirk(&intel_dp->desc, + bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0, DP_DPCD_QUIRK_CONSTANT_N); int ret = 0, output_bpp; @@ -2492,9 +2517,6 @@ static void intel_dp_prepare(struct intel_encoder *encoder, intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)); - intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port); - intel_dp->regs.dp_tp_status = DP_TP_STATUS(port); - /* * There are four kinds of DP registers: * @@ -2515,7 +2537,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder, /* Preserve the BIOS-computed detected bit. This is * supposed to be read-only. */ - intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; + intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED; /* Handle DP bits in common between all three register formats */ intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; @@ -2539,12 +2561,12 @@ static void intel_dp_prepare(struct intel_encoder *encoder, intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; - trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe)); + trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe)); if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) trans_dp |= TRANS_DP_ENH_FRAMING; else trans_dp &= ~TRANS_DP_ENH_FRAMING; - I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp); + intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp); } else { if (IS_G4X(dev_priv) && pipe_config->limited_color_range) intel_dp->DP |= DP_COLOR_RANGE_16_235; @@ -2590,18 +2612,20 @@ static void wait_panel_status(struct intel_dp *intel_dp, pp_stat_reg = _pp_stat_reg(intel_dp); pp_ctrl_reg = _pp_ctrl_reg(intel_dp); - DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", - mask, value, - I915_READ(pp_stat_reg), - I915_READ(pp_ctrl_reg)); + drm_dbg_kms(&dev_priv->drm, + "mask %08x value %08x status %08x control %08x\n", + mask, value, + intel_de_read(dev_priv, pp_stat_reg), + intel_de_read(dev_priv, pp_ctrl_reg)); if (intel_de_wait_for_register(dev_priv, pp_stat_reg, mask, value, 5000)) - DRM_ERROR("Panel status timeout: status %08x control %08x\n", - I915_READ(pp_stat_reg), - I915_READ(pp_ctrl_reg)); + drm_err(&dev_priv->drm, + "Panel status timeout: status %08x control %08x\n", + intel_de_read(dev_priv, pp_stat_reg), + intel_de_read(dev_priv, pp_ctrl_reg)); - DRM_DEBUG_KMS("Wait complete\n"); + drm_dbg_kms(&dev_priv->drm, "Wait complete\n"); } static void wait_panel_on(struct intel_dp *intel_dp) @@ -2660,9 +2684,9 @@ static u32 ilk_get_pp_control(struct intel_dp *intel_dp) lockdep_assert_held(&dev_priv->pps_mutex); - control = I915_READ(_pp_ctrl_reg(intel_dp)); - if (WARN_ON(!HAS_DDI(dev_priv) && - (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) { + control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)); + if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) && + (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) { control &= ~PANEL_UNLOCK_MASK; control |= PANEL_UNLOCK_REGS; } @@ -2696,9 +2720,9 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp) intel_display_power_get(dev_priv, intel_aux_power_domain(intel_dig_port)); - DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD on\n", - intel_dig_port->base.base.base.id, - intel_dig_port->base.base.name); + drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n", + intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name); if (!edp_have_panel_power(intel_dp)) wait_panel_power_cycle(intel_dp); @@ -2709,17 +2733,19 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp) pp_stat_reg = _pp_stat_reg(intel_dp); pp_ctrl_reg = _pp_ctrl_reg(intel_dp); - I915_WRITE(pp_ctrl_reg, pp); - POSTING_READ(pp_ctrl_reg); - DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", - I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); + intel_de_write(dev_priv, pp_ctrl_reg, pp); + intel_de_posting_read(dev_priv, pp_ctrl_reg); + drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", + intel_de_read(dev_priv, pp_stat_reg), + intel_de_read(dev_priv, pp_ctrl_reg)); /* * If the panel wasn't on, delay before accessing aux channel */ if (!edp_have_panel_power(intel_dp)) { - DRM_DEBUG_KMS("[ENCODER:%d:%s] panel power wasn't enabled\n", - intel_dig_port->base.base.base.id, - intel_dig_port->base.base.name); + drm_dbg_kms(&dev_priv->drm, + "[ENCODER:%d:%s] panel power wasn't enabled\n", + intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name); msleep(intel_dp->panel_power_up_delay); } @@ -2759,14 +2785,14 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) lockdep_assert_held(&dev_priv->pps_mutex); - WARN_ON(intel_dp->want_panel_vdd); + drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd); if (!edp_have_panel_vdd(intel_dp)) return; - DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD off\n", - intel_dig_port->base.base.base.id, - intel_dig_port->base.base.name); + drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n", + intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name); pp = ilk_get_pp_control(intel_dp); pp &= ~EDP_FORCE_VDD; @@ -2774,12 +2800,13 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) pp_ctrl_reg = _pp_ctrl_reg(intel_dp); pp_stat_reg = _pp_stat_reg(intel_dp); - I915_WRITE(pp_ctrl_reg, pp); - POSTING_READ(pp_ctrl_reg); + intel_de_write(dev_priv, pp_ctrl_reg, pp); + intel_de_posting_read(dev_priv, pp_ctrl_reg); /* Make sure sequencer is idle before allowing subsequent activity */ - DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", - I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); + drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", + intel_de_read(dev_priv, pp_stat_reg), + intel_de_read(dev_priv, pp_ctrl_reg)); if ((pp & PANEL_POWER_ON) == 0) intel_dp->panel_power_off_time = ktime_get_boottime(); @@ -2851,14 +2878,14 @@ static void edp_panel_on(struct intel_dp *intel_dp) if (!intel_dp_is_edp(intel_dp)) return; - DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power on\n", - dp_to_dig_port(intel_dp)->base.base.base.id, - dp_to_dig_port(intel_dp)->base.base.name); + drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n", + dp_to_dig_port(intel_dp)->base.base.base.id, + dp_to_dig_port(intel_dp)->base.base.name); - if (WARN(edp_have_panel_power(intel_dp), - "[ENCODER:%d:%s] panel power already on\n", - dp_to_dig_port(intel_dp)->base.base.base.id, - dp_to_dig_port(intel_dp)->base.base.name)) + if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp), + "[ENCODER:%d:%s] panel power already on\n", + dp_to_dig_port(intel_dp)->base.base.base.id, + dp_to_dig_port(intel_dp)->base.base.name)) return; wait_panel_power_cycle(intel_dp); @@ -2868,24 +2895,24 @@ static void edp_panel_on(struct intel_dp *intel_dp) if (IS_GEN(dev_priv, 5)) { /* ILK workaround: disable reset around power sequence */ pp &= ~PANEL_POWER_RESET; - I915_WRITE(pp_ctrl_reg, pp); - POSTING_READ(pp_ctrl_reg); + intel_de_write(dev_priv, pp_ctrl_reg, pp); + intel_de_posting_read(dev_priv, pp_ctrl_reg); } pp |= PANEL_POWER_ON; if (!IS_GEN(dev_priv, 5)) pp |= PANEL_POWER_RESET; - I915_WRITE(pp_ctrl_reg, pp); - POSTING_READ(pp_ctrl_reg); + intel_de_write(dev_priv, pp_ctrl_reg, pp); + intel_de_posting_read(dev_priv, pp_ctrl_reg); wait_panel_on(intel_dp); intel_dp->last_power_on = jiffies; if (IS_GEN(dev_priv, 5)) { pp |= PANEL_POWER_RESET; /* restore panel reset bit */ - I915_WRITE(pp_ctrl_reg, pp); - POSTING_READ(pp_ctrl_reg); + intel_de_write(dev_priv, pp_ctrl_reg, pp); + intel_de_posting_read(dev_priv, pp_ctrl_reg); } } @@ -2913,11 +2940,12 @@ static void edp_panel_off(struct intel_dp *intel_dp) if (!intel_dp_is_edp(intel_dp)) return; - DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power off\n", - dig_port->base.base.base.id, dig_port->base.base.name); + drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n", + dig_port->base.base.base.id, dig_port->base.base.name); - WARN(!intel_dp->want_panel_vdd, "Need [ENCODER:%d:%s] VDD to turn off panel\n", - dig_port->base.base.base.id, dig_port->base.base.name); + drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd, + "Need [ENCODER:%d:%s] VDD to turn off panel\n", + dig_port->base.base.base.id, dig_port->base.base.name); pp = ilk_get_pp_control(intel_dp); /* We need to switch off panel power _and_ force vdd, for otherwise some @@ -2929,8 +2957,8 @@ static void edp_panel_off(struct intel_dp *intel_dp) intel_dp->want_panel_vdd = false; - I915_WRITE(pp_ctrl_reg, pp); - POSTING_READ(pp_ctrl_reg); + intel_de_write(dev_priv, pp_ctrl_reg, pp); + intel_de_posting_read(dev_priv, pp_ctrl_reg); wait_panel_off(intel_dp); intel_dp->panel_power_off_time = ktime_get_boottime(); @@ -2971,8 +2999,8 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp) pp = ilk_get_pp_control(intel_dp); pp |= EDP_BLC_ENABLE; - I915_WRITE(pp_ctrl_reg, pp); - POSTING_READ(pp_ctrl_reg); + intel_de_write(dev_priv, pp_ctrl_reg, pp); + intel_de_posting_read(dev_priv, pp_ctrl_reg); } } @@ -3007,8 +3035,8 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp) pp = ilk_get_pp_control(intel_dp); pp &= ~EDP_BLC_ENABLE; - I915_WRITE(pp_ctrl_reg, pp); - POSTING_READ(pp_ctrl_reg); + intel_de_write(dev_priv, pp_ctrl_reg, pp); + intel_de_posting_read(dev_priv, pp_ctrl_reg); } intel_dp->last_backlight_off = jiffies; @@ -3059,7 +3087,7 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN; + bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN; I915_STATE_WARN(cur_state != state, "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n", @@ -3070,7 +3098,7 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state) static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) { - bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE; + bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE; I915_STATE_WARN(cur_state != state, "eDP PLL state assertion failure (expected %s, current %s)\n", @@ -3089,8 +3117,8 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp, assert_dp_port_disabled(intel_dp); assert_edp_pll_disabled(dev_priv); - DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n", - pipe_config->port_clock); + drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n", + pipe_config->port_clock); intel_dp->DP &= ~DP_PLL_FREQ_MASK; @@ -3099,8 +3127,8 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp, else intel_dp->DP |= DP_PLL_FREQ_270MHZ; - I915_WRITE(DP_A, intel_dp->DP); - POSTING_READ(DP_A); + intel_de_write(dev_priv, DP_A, intel_dp->DP); + intel_de_posting_read(dev_priv, DP_A); udelay(500); /* @@ -3114,8 +3142,8 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp, intel_dp->DP |= DP_PLL_ENABLE; - I915_WRITE(DP_A, intel_dp->DP); - POSTING_READ(DP_A); + intel_de_write(dev_priv, DP_A, intel_dp->DP); + intel_de_posting_read(dev_priv, DP_A); udelay(200); } @@ -3129,12 +3157,12 @@ static void ilk_edp_pll_off(struct intel_dp *intel_dp, assert_dp_port_disabled(intel_dp); assert_edp_pll_enabled(dev_priv); - DRM_DEBUG_KMS("disabling eDP PLL\n"); + drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n"); intel_dp->DP &= ~DP_PLL_ENABLE; - I915_WRITE(DP_A, intel_dp->DP); - POSTING_READ(DP_A); + intel_de_write(dev_priv, DP_A, intel_dp->DP); + intel_de_posting_read(dev_priv, DP_A); udelay(200); } @@ -3149,7 +3177,7 @@ static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) * FIXME should really check all downstream ports... */ return intel_dp->dpcd[DP_DPCD_REV] == 0x11 && - intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT && + drm_dp_is_branch(intel_dp->dpcd) && intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; } @@ -3214,7 +3242,7 @@ static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv, enum pipe p; for_each_pipe(dev_priv, p) { - u32 val = I915_READ(TRANS_DP_CTL(p)); + u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p)); if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) { *pipe = p; @@ -3222,7 +3250,8 @@ static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv, } } - DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port)); + drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n", + port_name(port)); /* must initialize pipe to something for the asserts */ *pipe = PIPE_A; @@ -3237,7 +3266,7 @@ bool intel_dp_port_enabled(struct drm_i915_private *dev_priv, bool ret; u32 val; - val = I915_READ(dp_reg); + val = intel_de_read(dev_priv, dp_reg); ret = val & DP_PORT_EN; @@ -3289,12 +3318,13 @@ static void intel_dp_get_config(struct intel_encoder *encoder, else pipe_config->output_types |= BIT(INTEL_OUTPUT_DP); - tmp = I915_READ(intel_dp->output_reg); + tmp = intel_de_read(dev_priv, intel_dp->output_reg); pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A; if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { - u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe)); + u32 trans_dp = intel_de_read(dev_priv, + TRANS_DP_CTL(crtc->pipe)); if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH) flags |= DRM_MODE_FLAG_PHSYNC; @@ -3328,7 +3358,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder, intel_dp_get_m_n(crtc, pipe_config); if (port == PORT_A) { - if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ) + if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ) pipe_config->port_clock = 162000; else pipe_config->port_clock = 270000; @@ -3353,8 +3383,9 @@ static void intel_dp_get_config(struct intel_encoder *encoder, * up by the BIOS, and thus we can't get the mode at module * load. */ - DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", - pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp); + drm_dbg_kms(&dev_priv->drm, + "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", + pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp); dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp; } } @@ -3447,11 +3478,12 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp, u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd); if (dp_train_pat & train_pat_mask) - DRM_DEBUG_KMS("Using DP training pattern TPS%d\n", - dp_train_pat & train_pat_mask); + drm_dbg_kms(&dev_priv->drm, + "Using DP training pattern TPS%d\n", + dp_train_pat & train_pat_mask); if (HAS_DDI(dev_priv)) { - u32 temp = I915_READ(intel_dp->regs.dp_tp_ctl); + u32 temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl); if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) temp |= DP_TP_CTL_SCRAMBLE_DISABLE; @@ -3477,7 +3509,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp, temp |= DP_TP_CTL_LINK_TRAIN_PAT4; break; } - I915_WRITE(intel_dp->regs.dp_tp_ctl, temp); + intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, temp); } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { @@ -3494,7 +3526,8 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp, *DP |= DP_LINK_TRAIN_PAT_2_CPT; break; case DP_TRAINING_PATTERN_3: - DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n"); + drm_dbg_kms(&dev_priv->drm, + "TPS3 not supported, using TPS2 instead\n"); *DP |= DP_LINK_TRAIN_PAT_2_CPT; break; } @@ -3513,7 +3546,8 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp, *DP |= DP_LINK_TRAIN_PAT_2; break; case DP_TRAINING_PATTERN_3: - DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n"); + drm_dbg_kms(&dev_priv->drm, + "TPS3 not supported, using TPS2 instead\n"); *DP |= DP_LINK_TRAIN_PAT_2; break; } @@ -3539,8 +3573,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp, if (old_crtc_state->has_audio) intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; - I915_WRITE(intel_dp->output_reg, intel_dp->DP); - POSTING_READ(intel_dp->output_reg); + intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); + intel_de_posting_read(dev_priv, intel_dp->output_reg); } static void intel_enable_dp(struct intel_encoder *encoder, @@ -3550,11 +3584,11 @@ static void intel_enable_dp(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); - u32 dp_reg = I915_READ(intel_dp->output_reg); + u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg); enum pipe pipe = crtc->pipe; intel_wakeref_t wakeref; - if (WARN_ON(dp_reg & DP_PORT_EN)) + if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN)) return; with_pps_lock(intel_dp, wakeref) { @@ -3583,8 +3617,8 @@ static void intel_enable_dp(struct intel_encoder *encoder, intel_dp_stop_link_train(intel_dp); if (pipe_config->has_audio) { - DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", - pipe_name(pipe)); + drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n", + pipe_name(pipe)); intel_audio_codec_enable(encoder, pipe_config, conn_state); } } @@ -3625,9 +3659,9 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) enum pipe pipe = intel_dp->pps_pipe; i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe); - WARN_ON(intel_dp->active_pipe != INVALID_PIPE); + drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE); - if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B)) + if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B)) return; edp_panel_vdd_off_sync(intel_dp); @@ -3641,11 +3675,12 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) * port select always when logically disconnecting a power sequencer * from a port. */ - DRM_DEBUG_KMS("detaching pipe %c power sequencer from [ENCODER:%d:%s]\n", - pipe_name(pipe), intel_dig_port->base.base.base.id, - intel_dig_port->base.base.name); - I915_WRITE(pp_on_reg, 0); - POSTING_READ(pp_on_reg); + drm_dbg_kms(&dev_priv->drm, + "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n", + pipe_name(pipe), intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name); + intel_de_write(dev_priv, pp_on_reg, 0); + intel_de_posting_read(dev_priv, pp_on_reg); intel_dp->pps_pipe = INVALID_PIPE; } @@ -3660,17 +3695,18 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, for_each_intel_dp(&dev_priv->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - WARN(intel_dp->active_pipe == pipe, - "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n", - pipe_name(pipe), encoder->base.base.id, - encoder->base.name); + drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe, + "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n", + pipe_name(pipe), encoder->base.base.id, + encoder->base.name); if (intel_dp->pps_pipe != pipe) continue; - DRM_DEBUG_KMS("stealing pipe %c power sequencer from [ENCODER:%d:%s]\n", - pipe_name(pipe), encoder->base.base.id, - encoder->base.name); + drm_dbg_kms(&dev_priv->drm, + "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n", + pipe_name(pipe), encoder->base.base.id, + encoder->base.name); /* make sure vdd is off before we steal it */ vlv_detach_power_sequencer(intel_dp); @@ -3686,7 +3722,7 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, lockdep_assert_held(&dev_priv->pps_mutex); - WARN_ON(intel_dp->active_pipe != INVALID_PIPE); + drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE); if (intel_dp->pps_pipe != INVALID_PIPE && intel_dp->pps_pipe != crtc->pipe) { @@ -3712,9 +3748,10 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, /* now it's all ours */ intel_dp->pps_pipe = crtc->pipe; - DRM_DEBUG_KMS("initializing pipe %c power sequencer for [ENCODER:%d:%s]\n", - pipe_name(intel_dp->pps_pipe), encoder->base.base.id, - encoder->base.name); + drm_dbg_kms(&dev_priv->drm, + "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n", + pipe_name(intel_dp->pps_pipe), encoder->base.base.id, + encoder->base.name); /* init power sequencer on this pipe and port */ intel_dp_init_panel_power_sequencer(intel_dp); @@ -4140,18 +4177,22 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp) } if (mask) - DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels); - - DRM_DEBUG_KMS("Using vswing level %d\n", - train_set & DP_TRAIN_VOLTAGE_SWING_MASK); - DRM_DEBUG_KMS("Using pre-emphasis level %d\n", - (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >> - DP_TRAIN_PRE_EMPHASIS_SHIFT); + drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n", + signal_levels); + + drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n", + train_set & DP_TRAIN_VOLTAGE_SWING_MASK, + train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : ""); + drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n", + (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >> + DP_TRAIN_PRE_EMPHASIS_SHIFT, + train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? + " (max)" : ""); intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels; - I915_WRITE(intel_dp->output_reg, intel_dp->DP); - POSTING_READ(intel_dp->output_reg); + intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); + intel_de_posting_read(dev_priv, intel_dp->output_reg); } void @@ -4164,8 +4205,8 @@ intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat); - I915_WRITE(intel_dp->output_reg, intel_dp->DP); - POSTING_READ(intel_dp->output_reg); + intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); + intel_de_posting_read(dev_priv, intel_dp->output_reg); } void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) @@ -4178,10 +4219,10 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) if (!HAS_DDI(dev_priv)) return; - val = I915_READ(intel_dp->regs.dp_tp_ctl); + val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl); val &= ~DP_TP_CTL_LINK_TRAIN_MASK; val |= DP_TP_CTL_LINK_TRAIN_IDLE; - I915_WRITE(intel_dp->regs.dp_tp_ctl, val); + intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val); /* * Until TGL on PORT_A we can have only eDP in SST mode. There the only @@ -4195,7 +4236,8 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status, DP_TP_STATUS_IDLE_DONE, 1)) - DRM_ERROR("Timed out waiting for DP idle patterns\n"); + drm_err(&dev_priv->drm, + "Timed out waiting for DP idle patterns\n"); } static void @@ -4208,10 +4250,12 @@ intel_dp_link_down(struct intel_encoder *encoder, enum port port = encoder->port; u32 DP = intel_dp->DP; - if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) + if (drm_WARN_ON(&dev_priv->drm, + (intel_de_read(dev_priv, intel_dp->output_reg) & + DP_PORT_EN) == 0)) return; - DRM_DEBUG_KMS("\n"); + drm_dbg_kms(&dev_priv->drm, "\n"); if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { @@ -4221,12 +4265,12 @@ intel_dp_link_down(struct intel_encoder *encoder, DP &= ~DP_LINK_TRAIN_MASK; DP |= DP_LINK_TRAIN_PAT_IDLE; } - I915_WRITE(intel_dp->output_reg, DP); - POSTING_READ(intel_dp->output_reg); + intel_de_write(dev_priv, intel_dp->output_reg, DP); + intel_de_posting_read(dev_priv, intel_dp->output_reg); DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); - I915_WRITE(intel_dp->output_reg, DP); - POSTING_READ(intel_dp->output_reg); + intel_de_write(dev_priv, intel_dp->output_reg, DP); + intel_de_posting_read(dev_priv, intel_dp->output_reg); /* * HW workaround for IBX, we need to move the port @@ -4245,12 +4289,12 @@ intel_dp_link_down(struct intel_encoder *encoder, DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK); DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) | DP_LINK_TRAIN_PAT_1; - I915_WRITE(intel_dp->output_reg, DP); - POSTING_READ(intel_dp->output_reg); + intel_de_write(dev_priv, intel_dp->output_reg, DP); + intel_de_posting_read(dev_priv, intel_dp->output_reg); DP &= ~DP_PORT_EN; - I915_WRITE(intel_dp->output_reg, DP); - POSTING_READ(intel_dp->output_reg); + intel_de_write(dev_priv, intel_dp->output_reg, DP); + intel_de_posting_read(dev_priv, intel_dp->output_reg); intel_wait_for_vblank_if_active(dev_priv, PIPE_A); intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); @@ -4370,7 +4414,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) to_i915(dp_to_dig_port(intel_dp)->base.base.dev); /* this function is meant to be called only once */ - WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0); + drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); if (!intel_dp_read_dpcd(intel_dp)) return false; @@ -4390,8 +4434,9 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == sizeof(intel_dp->edp_dpcd)) - DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd), - intel_dp->edp_dpcd); + drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", + (int)sizeof(intel_dp->edp_dpcd), + intel_dp->edp_dpcd); /* * This has to be called after intel_dp->edp_dpcd is filled, PSR checks @@ -4466,7 +4511,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) * it don't care about read it here and in intel_edp_init_dpcd(). */ if (!intel_dp_is_edp(intel_dp) && - !drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_SINK_COUNT)) { + !drm_dp_has_quirk(&intel_dp->desc, 0, + DP_DPCD_QUIRK_NO_SINK_COUNT)) { u8 count; ssize_t r; @@ -5125,7 +5171,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder, crtc_state = to_intel_crtc_state(crtc->base.state); - WARN_ON(!intel_crtc_has_dp_encoder(crtc_state)); + drm_WARN_ON(&dev_priv->drm, !intel_crtc_has_dp_encoder(crtc_state)); if (!crtc_state->hw.active) return 0; @@ -5195,7 +5241,8 @@ intel_dp_hotplug(struct intel_encoder *encoder, drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); - WARN(ret, "Acquiring modeset locks failed with %i\n", ret); + drm_WARN(encoder->base.dev, ret, + "Acquiring modeset locks failed with %i\n", ret); /* * Keeping it consistent with intel_ddi_hotplug() and @@ -5281,7 +5328,8 @@ intel_dp_short_pulse(struct intel_dp *intel_dp) intel_psr_short_pulse(intel_dp); if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { - DRM_DEBUG_KMS("Link Training Compliance Test requested\n"); + drm_dbg_kms(&dev_priv->drm, + "Link Training Compliance Test requested\n"); /* Send a Hotplug Uevent to userspace to start modeset */ drm_kms_helper_hotplug_event(&dev_priv->drm); } @@ -5370,7 +5418,7 @@ static bool ibx_digital_port_connected(struct intel_encoder *encoder) return false; } - return I915_READ(SDEISR) & bit; + return intel_de_read(dev_priv, SDEISR) & bit; } static bool cpt_digital_port_connected(struct intel_encoder *encoder) @@ -5393,7 +5441,7 @@ static bool cpt_digital_port_connected(struct intel_encoder *encoder) return false; } - return I915_READ(SDEISR) & bit; + return intel_de_read(dev_priv, SDEISR) & bit; } static bool spt_digital_port_connected(struct intel_encoder *encoder) @@ -5412,7 +5460,7 @@ static bool spt_digital_port_connected(struct intel_encoder *encoder) return cpt_digital_port_connected(encoder); } - return I915_READ(SDEISR) & bit; + return intel_de_read(dev_priv, SDEISR) & bit; } static bool g4x_digital_port_connected(struct intel_encoder *encoder) @@ -5435,7 +5483,7 @@ static bool g4x_digital_port_connected(struct intel_encoder *encoder) return false; } - return I915_READ(PORT_HOTPLUG_STAT) & bit; + return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; } static bool gm45_digital_port_connected(struct intel_encoder *encoder) @@ -5458,7 +5506,7 @@ static bool gm45_digital_port_connected(struct intel_encoder *encoder) return false; } - return I915_READ(PORT_HOTPLUG_STAT) & bit; + return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; } static bool ilk_digital_port_connected(struct intel_encoder *encoder) @@ -5466,7 +5514,7 @@ static bool ilk_digital_port_connected(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); if (encoder->hpd_pin == HPD_PORT_A) - return I915_READ(DEISR) & DE_DP_A_HOTPLUG; + return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG; else return ibx_digital_port_connected(encoder); } @@ -5476,7 +5524,7 @@ static bool snb_digital_port_connected(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); if (encoder->hpd_pin == HPD_PORT_A) - return I915_READ(DEISR) & DE_DP_A_HOTPLUG; + return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG; else return cpt_digital_port_connected(encoder); } @@ -5486,7 +5534,7 @@ static bool ivb_digital_port_connected(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); if (encoder->hpd_pin == HPD_PORT_A) - return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB; + return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG_IVB; else return cpt_digital_port_connected(encoder); } @@ -5496,7 +5544,7 @@ static bool bdw_digital_port_connected(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); if (encoder->hpd_pin == HPD_PORT_A) - return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG; + return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG; else return cpt_digital_port_connected(encoder); } @@ -5521,16 +5569,16 @@ static bool bxt_digital_port_connected(struct intel_encoder *encoder) return false; } - return I915_READ(GEN8_DE_PORT_ISR) & bit; + return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit; } static bool intel_combo_phy_connected(struct drm_i915_private *dev_priv, enum phy phy) { if (HAS_PCH_MCC(dev_priv) && phy == PHY_C) - return I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(PORT_TC1); + return intel_de_read(dev_priv, SDEISR) & SDE_TC_HOTPLUG_ICP(PORT_TC1); - return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(phy); + return intel_de_read(dev_priv, SDEISR) & SDE_DDI_HOTPLUG_ICP(phy); } static bool icp_digital_port_connected(struct intel_encoder *encoder) @@ -5631,6 +5679,7 @@ intel_dp_set_edid(struct intel_dp *intel_dp) intel_dp->has_audio = drm_detect_monitor_audio(edid); drm_dp_cec_set_edid(&intel_dp->aux, edid); + intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid); } static void @@ -5643,6 +5692,7 @@ intel_dp_unset_edid(struct intel_dp *intel_dp) intel_connector->detect_edid = NULL; intel_dp->has_audio = false; + intel_dp->edid_quirks = 0; } static int @@ -5656,9 +5706,10 @@ intel_dp_detect(struct drm_connector *connector, struct intel_encoder *encoder = &dig_port->base; enum drm_connector_status status; - DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", - connector->base.id, connector->name); - WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); + drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", + connector->base.id, connector->name); + drm_WARN_ON(&dev_priv->drm, + !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); /* Can't disconnect eDP */ if (intel_dp_is_edp(intel_dp)) @@ -5673,9 +5724,10 @@ intel_dp_detect(struct drm_connector *connector, memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); if (intel_dp->is_mst) { - DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", - intel_dp->is_mst, - intel_dp->mst_mgr.mst_state); + drm_dbg_kms(&dev_priv->drm, + "MST device may have disappeared %d vs %d\n", + intel_dp->is_mst, + intel_dp->mst_mgr.mst_state); intel_dp->is_mst = false; drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); @@ -5763,8 +5815,8 @@ intel_dp_force(struct drm_connector *connector) intel_aux_power_domain(dig_port); intel_wakeref_t wakeref; - DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", - connector->base.id, connector->name); + drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", + connector->base.id, connector->name); intel_dp_unset_edid(intel_dp); if (connector->status != connector_status_connected) @@ -5815,7 +5867,7 @@ intel_dp_connector_register(struct drm_connector *connector) if (ret) return ret; - i915_debugfs_connector_add(connector); + intel_connector_debugfs_add(connector); DRM_DEBUG_KMS("registering %s bus for %s\n", intel_dp->aux.name, connector->kdev->kobj.name); @@ -6396,6 +6448,7 @@ static int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port, bool is_repeater, u8 content_type) { + int ret; struct hdcp2_dp_errata_stream_type stream_type_msg; if (is_repeater) @@ -6411,8 +6464,11 @@ int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port, stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE; stream_type_msg.stream_type = content_type; - return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg, + ret = intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg, sizeof(stream_type_msg)); + + return ret < 0 ? ret : 0; + } static @@ -6492,7 +6548,8 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) * schedule a vdd off, so we don't hold on to the reference * indefinitely. */ - DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n"); + drm_dbg_kms(&dev_priv->drm, + "VDD left on by BIOS, adjusting state tracking\n"); intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port)); edp_panel_vdd_schedule_off(intel_dp); @@ -6519,7 +6576,7 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder) intel_wakeref_t wakeref; if (!HAS_DDI(dev_priv)) - intel_dp->DP = I915_READ(intel_dp->output_reg); + intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); if (lspcon->active) lspcon_resume(lspcon); @@ -6545,6 +6602,140 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder) } } +static int intel_modeset_tile_group(struct intel_atomic_state *state, + int tile_group_id) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct drm_connector_list_iter conn_iter; + struct drm_connector *connector; + int ret = 0; + + drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + struct drm_connector_state *conn_state; + struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc; + + if (!connector->has_tile || + connector->tile_group->id != tile_group_id) + continue; + + conn_state = drm_atomic_get_connector_state(&state->base, + connector); + if (IS_ERR(conn_state)) { + ret = PTR_ERR(conn_state); + break; + } + + crtc = to_intel_crtc(conn_state->crtc); + + if (!crtc) + continue; + + crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + crtc_state->uapi.mode_changed = true; + + ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); + if (ret) + break; + } + drm_connector_list_iter_end(&conn_iter); + + return ret; +} + +static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc *crtc; + + if (transcoders == 0) + return 0; + + for_each_intel_crtc(&dev_priv->drm, crtc) { + struct intel_crtc_state *crtc_state; + int ret; + + crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + if (!crtc_state->hw.enable) + continue; + + if (!(transcoders & BIT(crtc_state->cpu_transcoder))) + continue; + + crtc_state->uapi.mode_changed = true; + + ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); + if (ret) + return ret; + + ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); + if (ret) + return ret; + + transcoders &= ~BIT(crtc_state->cpu_transcoder); + } + + drm_WARN_ON(&dev_priv->drm, transcoders != 0); + + return 0; +} + +static int intel_modeset_synced_crtcs(struct intel_atomic_state *state, + struct drm_connector *connector) +{ + const struct drm_connector_state *old_conn_state = + drm_atomic_get_old_connector_state(&state->base, connector); + const struct intel_crtc_state *old_crtc_state; + struct intel_crtc *crtc; + u8 transcoders; + + crtc = to_intel_crtc(old_conn_state->crtc); + if (!crtc) + return 0; + + old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); + + if (!old_crtc_state->hw.active) + return 0; + + transcoders = old_crtc_state->sync_mode_slaves_mask; + if (old_crtc_state->master_transcoder != INVALID_TRANSCODER) + transcoders |= BIT(old_crtc_state->master_transcoder); + + return intel_modeset_affected_transcoders(state, + transcoders); +} + +static int intel_dp_connector_atomic_check(struct drm_connector *conn, + struct drm_atomic_state *_state) +{ + struct drm_i915_private *dev_priv = to_i915(conn->dev); + struct intel_atomic_state *state = to_intel_atomic_state(_state); + int ret; + + ret = intel_digital_connector_atomic_check(conn, &state->base); + if (ret) + return ret; + + if (INTEL_GEN(dev_priv) < 11) + return 0; + + if (!intel_connector_needs_modeset(state, conn)) + return 0; + + if (conn->has_tile) { + ret = intel_modeset_tile_group(state, conn->tile_group->id); + if (ret) + return ret; + } + + return intel_modeset_synced_crtcs(state, conn); +} + static const struct drm_connector_funcs intel_dp_connector_funcs = { .force = intel_dp_force, .fill_modes = drm_helper_probe_single_connector_modes, @@ -6561,7 +6752,7 @@ static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = .detect_ctx = intel_dp_detect, .get_modes = intel_dp_get_modes, .mode_valid = intel_dp_mode_valid, - .atomic_check = intel_digital_connector_atomic_check, + .atomic_check = intel_dp_connector_atomic_check, }; static const struct drm_encoder_funcs intel_dp_enc_funcs = { @@ -6697,10 +6888,10 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) /* Ensure PPS is unlocked */ if (!HAS_DDI(dev_priv)) - I915_WRITE(regs.pp_ctrl, pp_ctl); + intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); - pp_on = I915_READ(regs.pp_on); - pp_off = I915_READ(regs.pp_off); + pp_on = intel_de_read(dev_priv, regs.pp_on); + pp_off = intel_de_read(dev_priv, regs.pp_off); /* Pull timing values out of registers */ seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on); @@ -6711,7 +6902,7 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) if (i915_mmio_reg_valid(regs.pp_div)) { u32 pp_div; - pp_div = I915_READ(regs.pp_div); + pp_div = intel_de_read(dev_priv, regs.pp_div); seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000; } else { @@ -6768,8 +6959,9 @@ intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp) */ if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10); - DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n", - vbt.t11_t12); + drm_dbg_kms(&dev_priv->drm, + "Increasing T12 panel delay as per the quirk to %d\n", + vbt.t11_t12); } /* T11_T12 delay is special and actually in units of 100ms, but zero * based in the hw (so we need to add 100 ms). But the sw vbt @@ -6811,12 +7003,15 @@ intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp) intel_dp->panel_power_cycle_delay = get_delay(t11_t12); #undef get_delay - DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", - intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, - intel_dp->panel_power_cycle_delay); + drm_dbg_kms(&dev_priv->drm, + "panel power up delay %d, power down delay %d, power cycle delay %d\n", + intel_dp->panel_power_up_delay, + intel_dp->panel_power_down_delay, + intel_dp->panel_power_cycle_delay); - DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", - intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); + drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n", + intel_dp->backlight_on_delay, + intel_dp->backlight_off_delay); /* * We override the HW backlight delays to 1 because we do manual waits @@ -6841,7 +7036,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 pp_on, pp_off, port_sel = 0; - int div = dev_priv->rawclk_freq / 1000; + int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000; struct pps_registers regs; enum port port = dp_to_dig_port(intel_dp)->base.port; const struct edp_power_seq *seq = &intel_dp->pps_delays; @@ -6865,14 +7060,16 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, if (force_disable_vdd) { u32 pp = ilk_get_pp_control(intel_dp); - WARN(pp & PANEL_POWER_ON, "Panel power already on\n"); + drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON, + "Panel power already on\n"); if (pp & EDP_FORCE_VDD) - DRM_DEBUG_KMS("VDD already on, disabling first\n"); + drm_dbg_kms(&dev_priv->drm, + "VDD already on, disabling first\n"); pp &= ~EDP_FORCE_VDD; - I915_WRITE(regs.pp_ctrl, pp); + intel_de_write(dev_priv, regs.pp_ctrl, pp); } pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) | @@ -6903,31 +7100,31 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, pp_on |= port_sel; - I915_WRITE(regs.pp_on, pp_on); - I915_WRITE(regs.pp_off, pp_off); + intel_de_write(dev_priv, regs.pp_on, pp_on); + intel_de_write(dev_priv, regs.pp_off, pp_off); /* * Compute the divisor for the pp clock, simply match the Bspec formula. */ if (i915_mmio_reg_valid(regs.pp_div)) { - I915_WRITE(regs.pp_div, - REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | - REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000))); + intel_de_write(dev_priv, regs.pp_div, + REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000))); } else { u32 pp_ctl; - pp_ctl = I915_READ(regs.pp_ctrl); + pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl); pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK; pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)); - I915_WRITE(regs.pp_ctrl, pp_ctl); + intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); } - DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", - I915_READ(regs.pp_on), - I915_READ(regs.pp_off), - i915_mmio_reg_valid(regs.pp_div) ? - I915_READ(regs.pp_div) : - (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK)); + drm_dbg_kms(&dev_priv->drm, + "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", + intel_de_read(dev_priv, regs.pp_on), + intel_de_read(dev_priv, regs.pp_off), + i915_mmio_reg_valid(regs.pp_div) ? + intel_de_read(dev_priv, regs.pp_div) : + (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK)); } static void intel_dp_pps_init(struct intel_dp *intel_dp) @@ -6964,22 +7161,24 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, enum drrs_refresh_rate_type index = DRRS_HIGH_RR; if (refresh_rate <= 0) { - DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n"); + drm_dbg_kms(&dev_priv->drm, + "Refresh rate should be positive non-zero.\n"); return; } if (intel_dp == NULL) { - DRM_DEBUG_KMS("DRRS not supported.\n"); + drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n"); return; } if (!intel_crtc) { - DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n"); + drm_dbg_kms(&dev_priv->drm, + "DRRS: intel_crtc not initialized\n"); return; } if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) { - DRM_DEBUG_KMS("Only Seamless DRRS supported.\n"); + drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n"); return; } @@ -6988,13 +7187,14 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, index = DRRS_LOW_RR; if (index == dev_priv->drrs.refresh_rate_type) { - DRM_DEBUG_KMS( - "DRRS requested for previously set RR...ignoring\n"); + drm_dbg_kms(&dev_priv->drm, + "DRRS requested for previously set RR...ignoring\n"); return; } if (!crtc_state->hw.active) { - DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n"); + drm_dbg_kms(&dev_priv->drm, + "eDP encoder disabled. CRTC not Active\n"); return; } @@ -7008,13 +7208,14 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, break; case DRRS_MAX_RR: default: - DRM_ERROR("Unsupported refreshrate type\n"); + drm_err(&dev_priv->drm, + "Unsupported refreshrate type\n"); } } else if (INTEL_GEN(dev_priv) > 6) { i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder); u32 val; - val = I915_READ(reg); + val = intel_de_read(dev_priv, reg); if (index > DRRS_HIGH_RR) { if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; @@ -7026,12 +7227,13 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, else val &= ~PIPECONF_EDP_RR_MODE_SWITCH; } - I915_WRITE(reg, val); + intel_de_write(dev_priv, reg, val); } dev_priv->drrs.refresh_rate_type = index; - DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate); + drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n", + refresh_rate); } /** @@ -7047,18 +7249,19 @@ void intel_edp_drrs_enable(struct intel_dp *intel_dp, struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (!crtc_state->has_drrs) { - DRM_DEBUG_KMS("Panel doesn't support DRRS\n"); + drm_dbg_kms(&dev_priv->drm, "Panel doesn't support DRRS\n"); return; } if (dev_priv->psr.enabled) { - DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n"); + drm_dbg_kms(&dev_priv->drm, + "PSR enabled. Not enabling DRRS.\n"); return; } mutex_lock(&dev_priv->drrs.mutex); if (dev_priv->drrs.dp) { - DRM_DEBUG_KMS("DRRS already enabled\n"); + drm_dbg_kms(&dev_priv->drm, "DRRS already enabled\n"); goto unlock; } @@ -7284,25 +7487,28 @@ intel_dp_drrs_init(struct intel_connector *connector, mutex_init(&dev_priv->drrs.mutex); if (INTEL_GEN(dev_priv) <= 6) { - DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n"); + drm_dbg_kms(&dev_priv->drm, + "DRRS supported for Gen7 and above\n"); return NULL; } if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { - DRM_DEBUG_KMS("VBT doesn't support DRRS\n"); + drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n"); return NULL; } downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode); if (!downclock_mode) { - DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n"); + drm_dbg_kms(&dev_priv->drm, + "Downclock mode is not found. DRRS not supported\n"); return NULL; } dev_priv->drrs.type = dev_priv->vbt.drrs_type; dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR; - DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n"); + drm_dbg_kms(&dev_priv->drm, + "seamless DRRS supported for eDP panel.\n"); return downclock_mode; } @@ -7331,8 +7537,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, * with an already powered-on LVDS power sequencer. */ if (intel_get_lvds_encoder(dev_priv)) { - WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); - DRM_INFO("LVDS was detected, not registering eDP\n"); + drm_WARN_ON(dev, + !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); + drm_info(&dev_priv->drm, + "LVDS was detected, not registering eDP\n"); return false; } @@ -7348,7 +7556,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, if (!has_dpcd) { /* if this fails, presume the device is a ghost */ - DRM_INFO("failed to retrieve link info, disabling eDP\n"); + drm_info(&dev_priv->drm, + "failed to retrieve link info, disabling eDP\n"); goto out_vdd_off; } @@ -7356,8 +7565,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, edid = drm_get_edid(connector, &intel_dp->aux.ddc); if (edid) { if (drm_add_edid_modes(connector, edid)) { - drm_connector_update_edid_property(connector, - edid); + drm_connector_update_edid_property(connector, edid); + intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid); } else { kfree(edid); edid = ERR_PTR(-EINVAL); @@ -7393,17 +7602,20 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, if (pipe != PIPE_A && pipe != PIPE_B) pipe = PIPE_A; - DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n", - pipe_name(pipe)); + drm_dbg_kms(&dev_priv->drm, + "using pipe %c for initial backlight setup\n", + pipe_name(pipe)); } intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); intel_connector->panel.backlight.power = intel_edp_backlight_power; intel_panel_setup_backlight(connector, pipe); - if (fixed_mode) - drm_connector_init_panel_orientation_property( - connector, fixed_mode->hdisplay, fixed_mode->vdisplay); + if (fixed_mode) { + drm_connector_set_panel_orientation_with_quirk(connector, + dev_priv->vbt.orientation, + fixed_mode->hdisplay, fixed_mode->vdisplay); + } return true; @@ -7459,10 +7671,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, INIT_WORK(&intel_connector->modeset_retry_work, intel_dp_modeset_retry_work_fn); - if (WARN(intel_dig_port->max_lanes < 1, - "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", - intel_dig_port->max_lanes, intel_encoder->base.base.id, - intel_encoder->base.name)) + if (drm_WARN(dev, intel_dig_port->max_lanes < 1, + "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", + intel_dig_port->max_lanes, intel_encoder->base.base.id, + intel_encoder->base.name)) return false; intel_dp_set_source_rates(intel_dp); @@ -7472,7 +7684,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, intel_dp->active_pipe = INVALID_PIPE; /* Preserve the current hw state. */ - intel_dp->DP = I915_READ(intel_dp->output_reg); + intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); intel_dp->attached_connector = intel_connector; if (intel_dp_is_port_edp(dev_priv, port)) { @@ -7480,7 +7692,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, * Currently we don't support eDP on TypeC ports, although in * theory it could work on TypeC legacy ports. */ - WARN_ON(intel_phy_is_tc(dev_priv, phy)); + drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy)); type = DRM_MODE_CONNECTOR_eDP; } else { type = DRM_MODE_CONNECTOR_DisplayPort; @@ -7498,14 +7710,16 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, intel_encoder->type = INTEL_OUTPUT_EDP; /* eDP only on port B and/or C on vlv/chv */ - if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && - intel_dp_is_edp(intel_dp) && - port != PORT_B && port != PORT_C)) + if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || + IS_CHERRYVIEW(dev_priv)) && + intel_dp_is_edp(intel_dp) && + port != PORT_B && port != PORT_C)) return false; - DRM_DEBUG_KMS("Adding %s connector on [ENCODER:%d:%s]\n", - type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", - intel_encoder->base.base.id, intel_encoder->base.name); + drm_dbg_kms(&dev_priv->drm, + "Adding %s connector on [ENCODER:%d:%s]\n", + type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", + intel_encoder->base.base.id, intel_encoder->base.name); drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); @@ -7518,6 +7732,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, connector->ycbcr_420_allowed = true; intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); + intel_connector->polled = DRM_CONNECTOR_POLL_HPD; intel_dp_aux_init(intel_dp); @@ -7543,7 +7758,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim); if (ret) - DRM_DEBUG_KMS("HDCP init failed, skipping.\n"); + drm_dbg_kms(&dev_priv->drm, + "HDCP init failed, skipping.\n"); } /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written @@ -7551,8 +7767,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, * generated on the port when a cable is not attached. */ if (IS_G45(dev_priv)) { - u32 temp = I915_READ(PEG_BAND_GAP_DATA); - I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); + u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA); + intel_de_write(dev_priv, PEG_BAND_GAP_DATA, + (temp & ~0xf) | 0xd); } return true; @@ -7616,6 +7833,8 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, intel_dig_port->dp.output_reg = output_reg; intel_dig_port->max_lanes = 4; + intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port); + intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port); intel_encoder->type = INTEL_OUTPUT_DP; intel_encoder->power_domain = intel_port_to_power_domain(port); diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index 3da166054788..0c7be8ed1423 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -8,8 +8,6 @@ #include <linux/types.h> -#include <drm/i915_drm.h> - #include "i915_reg.h" enum pipe; diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 7c653f8c307f..dbfa6895795b 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -57,10 +57,27 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable) */ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector) { - struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); + struct intel_dp *intel_dp = intel_attached_dp(connector); u8 read_val[2] = { 0x0 }; + u8 mode_reg; u16 level = 0; + if (drm_dp_dpcd_readb(&intel_dp->aux, + DP_EDP_BACKLIGHT_MODE_SET_REGISTER, + &mode_reg) != 1) { + DRM_DEBUG_KMS("Failed to read the DPCD register 0x%x\n", + DP_EDP_BACKLIGHT_MODE_SET_REGISTER); + return 0; + } + + /* + * If we're not in DPCD control mode yet, the programmed brightness + * value is meaningless and we should assume max brightness + */ + if ((mode_reg & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) != + DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) + return connector->panel.backlight.max; + if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, &read_val, sizeof(read_val)) < 0) { DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", @@ -82,7 +99,7 @@ static void intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); + struct intel_dp *intel_dp = intel_attached_dp(connector); u8 vals[2] = { 0x0 }; vals[0] = level; @@ -110,62 +127,29 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); - int freq, fxp, fxp_min, fxp_max, fxp_actual, f = 1; - u8 pn, pn_min, pn_max; + struct intel_dp *intel_dp = intel_attached_dp(connector); + const u8 pn = connector->panel.backlight.pwmgen_bit_count; + int freq, fxp, f, fxp_actual, fxp_min, fxp_max; - /* Find desired value of (F x P) - * Note that, if F x P is out of supported range, the maximum value or - * minimum value will applied automatically. So no need to check that. - */ freq = dev_priv->vbt.backlight.pwm_freq_hz; - DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n", freq); if (!freq) { DRM_DEBUG_KMS("Use panel default backlight frequency\n"); return false; } fxp = DIV_ROUND_CLOSEST(KHz(DP_EDP_BACKLIGHT_FREQ_BASE_KHZ), freq); + f = clamp(DIV_ROUND_CLOSEST(fxp, 1 << pn), 1, 255); + fxp_actual = f << pn; - /* Use highest possible value of Pn for more granularity of brightness - * adjustment while satifying the conditions below. - * - Pn is in the range of Pn_min and Pn_max - * - F is in the range of 1 and 255 - * - FxP is within 25% of desired value. - * Note: 25% is arbitrary value and may need some tweak. - */ - if (drm_dp_dpcd_readb(&intel_dp->aux, - DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min) != 1) { - DRM_DEBUG_KMS("Failed to read pwmgen bit count cap min\n"); - return false; - } - if (drm_dp_dpcd_readb(&intel_dp->aux, - DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max) != 1) { - DRM_DEBUG_KMS("Failed to read pwmgen bit count cap max\n"); - return false; - } - pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK; - pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK; - + /* Ensure frequency is within 25% of desired value */ fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4); fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4); - if (fxp_min < (1 << pn_min) || (255 << pn_max) < fxp_max) { - DRM_DEBUG_KMS("VBT defined backlight frequency out of range\n"); - return false; - } - - for (pn = pn_max; pn >= pn_min; pn--) { - f = clamp(DIV_ROUND_CLOSEST(fxp, 1 << pn), 1, 255); - fxp_actual = f << pn; - if (fxp_min <= fxp_actual && fxp_actual <= fxp_max) - break; - } - if (drm_dp_dpcd_writeb(&intel_dp->aux, - DP_EDP_PWMGEN_BIT_COUNT, pn) < 0) { - DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n"); + if (fxp_min > fxp_actual || fxp_actual > fxp_max) { + DRM_DEBUG_KMS("Actual frequency out of range\n"); return false; } + if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_BACKLIGHT_FREQ_SET, (u8) f) < 0) { DRM_DEBUG_KMS("Failed to write aux backlight freq\n"); @@ -178,7 +162,8 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st const struct drm_connector_state *conn_state) { struct intel_connector *connector = to_intel_connector(conn_state->connector); - struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); + struct intel_dp *intel_dp = intel_attached_dp(connector); + struct intel_panel *panel = &connector->panel; u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode; if (drm_dp_dpcd_readb(&intel_dp->aux, @@ -197,6 +182,12 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st case DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT: new_dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; new_dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD; + + if (drm_dp_dpcd_writeb(&intel_dp->aux, + DP_EDP_PWMGEN_BIT_COUNT, + panel->backlight.pwmgen_bit_count) < 0) + DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n"); + break; /* Do nothing when it is already DPCD mode */ @@ -216,8 +207,9 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st } } + intel_dp_aux_set_backlight(conn_state, + connector->panel.backlight.level); set_aux_backlight_enable(intel_dp, true); - intel_dp_aux_set_backlight(conn_state, connector->panel.backlight.level); } static void intel_dp_aux_disable_backlight(const struct drm_connector_state *old_conn_state) @@ -226,20 +218,91 @@ static void intel_dp_aux_disable_backlight(const struct drm_connector_state *old false); } +static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector) +{ + struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_dp *intel_dp = intel_attached_dp(connector); + struct intel_panel *panel = &connector->panel; + u32 max_backlight = 0; + int freq, fxp, fxp_min, fxp_max, fxp_actual, f = 1; + u8 pn, pn_min, pn_max; + + if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_PWMGEN_BIT_COUNT, &pn) == 1) { + pn &= DP_EDP_PWMGEN_BIT_COUNT_MASK; + max_backlight = (1 << pn) - 1; + } + + /* Find desired value of (F x P) + * Note that, if F x P is out of supported range, the maximum value or + * minimum value will applied automatically. So no need to check that. + */ + freq = i915->vbt.backlight.pwm_freq_hz; + DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n", freq); + if (!freq) { + DRM_DEBUG_KMS("Use panel default backlight frequency\n"); + return max_backlight; + } + + fxp = DIV_ROUND_CLOSEST(KHz(DP_EDP_BACKLIGHT_FREQ_BASE_KHZ), freq); + + /* Use highest possible value of Pn for more granularity of brightness + * adjustment while satifying the conditions below. + * - Pn is in the range of Pn_min and Pn_max + * - F is in the range of 1 and 255 + * - FxP is within 25% of desired value. + * Note: 25% is arbitrary value and may need some tweak. + */ + if (drm_dp_dpcd_readb(&intel_dp->aux, + DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min) != 1) { + DRM_DEBUG_KMS("Failed to read pwmgen bit count cap min\n"); + return max_backlight; + } + if (drm_dp_dpcd_readb(&intel_dp->aux, + DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max) != 1) { + DRM_DEBUG_KMS("Failed to read pwmgen bit count cap max\n"); + return max_backlight; + } + pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK; + pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK; + + fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4); + fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4); + if (fxp_min < (1 << pn_min) || (255 << pn_max) < fxp_max) { + DRM_DEBUG_KMS("VBT defined backlight frequency out of range\n"); + return max_backlight; + } + + for (pn = pn_max; pn >= pn_min; pn--) { + f = clamp(DIV_ROUND_CLOSEST(fxp, 1 << pn), 1, 255); + fxp_actual = f << pn; + if (fxp_min <= fxp_actual && fxp_actual <= fxp_max) + break; + } + + DRM_DEBUG_KMS("Using eDP pwmgen bit count of %d\n", pn); + if (drm_dp_dpcd_writeb(&intel_dp->aux, + DP_EDP_PWMGEN_BIT_COUNT, pn) < 0) { + DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n"); + return max_backlight; + } + panel->backlight.pwmgen_bit_count = pn; + + max_backlight = (1 << pn) - 1; + + return max_backlight; +} + static int intel_dp_aux_setup_backlight(struct intel_connector *connector, enum pipe pipe) { - struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); struct intel_panel *panel = &connector->panel; - if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) - panel->backlight.max = 0xFFFF; - else - panel->backlight.max = 0xFF; + panel->backlight.max = intel_dp_aux_calc_max_backlight(connector); + if (!panel->backlight.max) + return -ENODEV; panel->backlight.min = 0; panel->backlight.level = intel_dp_aux_get_backlight(connector); - panel->backlight.enabled = panel->backlight.level != 0; return 0; @@ -248,7 +311,7 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector, static bool intel_dp_aux_display_control_capable(struct intel_connector *connector) { - struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); + struct intel_dp *intel_dp = intel_attached_dp(connector); /* Check the eDP Display control capabilities registers to determine if * the panel can support backlight control over the aux channel @@ -265,15 +328,32 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector) int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector) { struct intel_panel *panel = &intel_connector->panel; - struct drm_i915_private *dev_priv = to_i915(intel_connector->base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(intel_connector->encoder); + struct drm_device *dev = intel_connector->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); if (i915_modparams.enable_dpcd_backlight == 0 || - (i915_modparams.enable_dpcd_backlight == -1 && - dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE)) + !intel_dp_aux_display_control_capable(intel_connector)) return -ENODEV; - if (!intel_dp_aux_display_control_capable(intel_connector)) + /* + * There are a lot of machines that don't advertise the backlight + * control interface to use properly in their VBIOS, :\ + */ + if (dev_priv->vbt.backlight.type != + INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE && + i915_modparams.enable_dpcd_backlight != 1 && + !drm_dp_has_quirk(&intel_dp->desc, intel_dp->edid_quirks, + DP_QUIRK_FORCE_DPCD_BACKLIGHT)) { + DRM_DEV_INFO(dev->dev, + "Panel advertises DPCD backlight support, but " + "VBT disagrees. If your backlight controls " + "don't work try booting with " + "i915.enable_dpcd_backlight=1. If your machine " + "needs this, please file a _new_ bug report on " + "drm/i915, see " FDO_BUG_URL " for details.\n"); return -ENODEV; + } panel->backlight.setup = intel_dp_aux_setup_backlight; panel->backlight.enable = intel_dp_aux_enable_backlight; diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 2a1130dd1ad0..a7defb37ab00 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -130,6 +130,7 @@ static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp) static bool intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 voltage; int voltage_tries, cr_tries, max_cr_tries; bool max_vswing_reached = false; @@ -143,9 +144,11 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) &link_bw, &rate_select); if (link_bw) - DRM_DEBUG_KMS("Using LINK_BW_SET value %02x\n", link_bw); + drm_dbg_kms(&i915->drm, + "Using LINK_BW_SET value %02x\n", link_bw); else - DRM_DEBUG_KMS("Using LINK_RATE_SET value %02x\n", rate_select); + drm_dbg_kms(&i915->drm, + "Using LINK_RATE_SET value %02x\n", rate_select); /* Write the link configuration data */ link_config[0] = link_bw; @@ -169,7 +172,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) if (!intel_dp_reset_link_train(intel_dp, DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE)) { - DRM_ERROR("failed to enable link training\n"); + drm_err(&i915->drm, "failed to enable link training\n"); return false; } @@ -193,22 +196,23 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); if (!intel_dp_get_link_status(intel_dp, link_status)) { - DRM_ERROR("failed to get link status\n"); + drm_err(&i915->drm, "failed to get link status\n"); return false; } if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { - DRM_DEBUG_KMS("clock recovery OK\n"); + drm_dbg_kms(&i915->drm, "clock recovery OK\n"); return true; } if (voltage_tries == 5) { - DRM_DEBUG_KMS("Same voltage tried 5 times\n"); + drm_dbg_kms(&i915->drm, + "Same voltage tried 5 times\n"); return false; } if (max_vswing_reached) { - DRM_DEBUG_KMS("Max Voltage Swing reached\n"); + drm_dbg_kms(&i915->drm, "Max Voltage Swing reached\n"); return false; } @@ -217,7 +221,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) /* Update training set as requested by target */ intel_get_adjust_train(intel_dp, link_status); if (!intel_dp_update_link_train(intel_dp)) { - DRM_ERROR("failed to update link training\n"); + drm_err(&i915->drm, + "failed to update link training\n"); return false; } @@ -231,7 +236,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) max_vswing_reached = true; } - DRM_ERROR("Failed clock recovery %d times, giving up!\n", max_cr_tries); + drm_err(&i915->drm, + "Failed clock recovery %d times, giving up!\n", max_cr_tries); return false; } @@ -256,9 +262,11 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp) return DP_TRAINING_PATTERN_4; } else if (intel_dp->link_rate == 810000) { if (!source_tps4) - DRM_DEBUG_KMS("8.1 Gbps link rate without source HBR3/TPS4 support\n"); + drm_dbg_kms(&dp_to_i915(intel_dp)->drm, + "8.1 Gbps link rate without source HBR3/TPS4 support\n"); if (!sink_tps4) - DRM_DEBUG_KMS("8.1 Gbps link rate without sink TPS4 support\n"); + drm_dbg_kms(&dp_to_i915(intel_dp)->drm, + "8.1 Gbps link rate without sink TPS4 support\n"); } /* * Intel platforms that support HBR2 also support TPS3. TPS3 support is @@ -271,9 +279,11 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp) return DP_TRAINING_PATTERN_3; } else if (intel_dp->link_rate >= 540000) { if (!source_tps3) - DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without source HBR2/TPS3 support\n"); + drm_dbg_kms(&dp_to_i915(intel_dp)->drm, + ">=5.4/6.48 Gbps link rate without source HBR2/TPS3 support\n"); if (!sink_tps3) - DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without sink TPS3 support\n"); + drm_dbg_kms(&dp_to_i915(intel_dp)->drm, + ">=5.4/6.48 Gbps link rate without sink TPS3 support\n"); } return DP_TRAINING_PATTERN_2; @@ -282,6 +292,7 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp) static bool intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); int tries; u32 training_pattern; u8 link_status[DP_LINK_STATUS_SIZE]; @@ -295,7 +306,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) /* channel equalization */ if (!intel_dp_set_link_train(intel_dp, training_pattern)) { - DRM_ERROR("failed to start channel equalization\n"); + drm_err(&i915->drm, "failed to start channel equalization\n"); return false; } @@ -303,7 +314,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); if (!intel_dp_get_link_status(intel_dp, link_status)) { - DRM_ERROR("failed to get link status\n"); + drm_err(&i915->drm, + "failed to get link status\n"); break; } @@ -311,23 +323,25 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { intel_dp_dump_link_status(link_status); - DRM_DEBUG_KMS("Clock recovery check failed, cannot " - "continue channel equalization\n"); + drm_dbg_kms(&i915->drm, + "Clock recovery check failed, cannot " + "continue channel equalization\n"); break; } if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { channel_eq = true; - DRM_DEBUG_KMS("Channel EQ done. DP Training " - "successful\n"); + drm_dbg_kms(&i915->drm, "Channel EQ done. DP Training " + "successful\n"); break; } /* Update training set as requested by target */ intel_get_adjust_train(intel_dp, link_status); if (!intel_dp_update_link_train(intel_dp)) { - DRM_ERROR("failed to update link training\n"); + drm_err(&i915->drm, + "failed to update link training\n"); break; } } @@ -335,7 +349,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) /* Try 5 times, else fail and try at lower BW */ if (tries == 5) { intel_dp_dump_link_status(link_status); - DRM_DEBUG_KMS("Channel equalization failed 5 times\n"); + drm_dbg_kms(&i915->drm, + "Channel equalization failed 5 times\n"); } intel_dp_set_idle_link_train(intel_dp); @@ -362,17 +377,19 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) if (!intel_dp_link_training_channel_equalization(intel_dp)) goto failure_handling; - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training Passed at Link Rate = %d, Lane count = %d", - intel_connector->base.base.id, - intel_connector->base.name, - intel_dp->link_rate, intel_dp->lane_count); + drm_dbg_kms(&dp_to_i915(intel_dp)->drm, + "[CONNECTOR:%d:%s] Link Training Passed at Link Rate = %d, Lane count = %d", + intel_connector->base.base.id, + intel_connector->base.name, + intel_dp->link_rate, intel_dp->lane_count); return; failure_handling: - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d", - intel_connector->base.base.id, - intel_connector->base.name, - intel_dp->link_rate, intel_dp->lane_count); + drm_dbg_kms(&dp_to_i915(intel_dp)->drm, + "[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d", + intel_connector->base.base.id, + intel_connector->base.name, + intel_dp->link_rate, intel_dp->lane_count); if (!intel_dp_get_link_train_fallback_values(intel_dp, intel_dp->link_rate, intel_dp->lane_count)) diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index cba68c5a80fa..44f3fd251ca1 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -50,7 +50,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; void *port = connector->port; - bool constant_n = drm_dp_has_quirk(&intel_dp->desc, + bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0, DP_DPCD_QUIRK_CONSTANT_N); int bpp, slots = -EINVAL; @@ -352,8 +352,9 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder, intel_dp->active_mst_links--; last_mst_stream = intel_dp->active_mst_links == 0; - WARN_ON(INTEL_GEN(dev_priv) >= 12 && last_mst_stream && - !intel_dp_mst_is_master_trans(old_crtc_state)); + drm_WARN_ON(&dev_priv->drm, + INTEL_GEN(dev_priv) >= 12 && last_mst_stream && + !intel_dp_mst_is_master_trans(old_crtc_state)); intel_crtc_vblank_off(old_crtc_state); @@ -361,9 +362,12 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder, drm_dp_update_payload_part2(&intel_dp->mst_mgr); - val = I915_READ(TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder)); + val = intel_de_read(dev_priv, + TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder)); val &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC; - I915_WRITE(TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder), val); + intel_de_write(dev_priv, + TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder), + val); if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status, DP_TP_STATUS_ACT_SENT, 1)) @@ -437,8 +441,9 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, connector->encoder = encoder; intel_mst->connector = connector; first_mst_stream = intel_dp->active_mst_links == 0; - WARN_ON(INTEL_GEN(dev_priv) >= 12 && first_mst_stream && - !intel_dp_mst_is_master_trans(pipe_config)); + drm_WARN_ON(&dev_priv->drm, + INTEL_GEN(dev_priv) >= 12 && first_mst_stream && + !intel_dp_mst_is_master_trans(pipe_config)); DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links); @@ -459,8 +464,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, DRM_ERROR("failed to allocate vcpi\n"); intel_dp->active_mst_links++; - temp = I915_READ(intel_dp->regs.dp_tp_status); - I915_WRITE(intel_dp->regs.dp_tp_status, temp); + temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_status); + intel_de_write(dev_priv, intel_dp->regs.dp_tp_status, temp); ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); @@ -475,6 +480,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, intel_ddi_enable_pipe_clock(pipe_config); intel_ddi_set_dp_msa(pipe_config, conn_state); + + intel_dp_set_m_n(pipe_config, M1_N1); } static void intel_mst_enable_dp(struct intel_encoder *encoder, @@ -486,6 +493,12 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder, struct intel_dp *intel_dp = &intel_dig_port->dp; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder); + + intel_enable_pipe(pipe_config); + + intel_crtc_vblank_on(pipe_config); + DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links); if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status, @@ -535,12 +548,41 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector) return ret; } +static int +intel_dp_mst_connector_late_register(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + int ret; + + ret = drm_dp_mst_connector_late_register(connector, + intel_connector->port); + if (ret < 0) + return ret; + + ret = intel_connector_register(connector); + if (ret < 0) + drm_dp_mst_connector_early_unregister(connector, + intel_connector->port); + + return ret; +} + +static void +intel_dp_mst_connector_early_unregister(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + + intel_connector_unregister(connector); + drm_dp_mst_connector_early_unregister(connector, + intel_connector->port); +} + static const struct drm_connector_funcs intel_dp_mst_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .atomic_get_property = intel_digital_connector_atomic_get_property, .atomic_set_property = intel_digital_connector_atomic_set_property, - .late_register = intel_connector_register, - .early_unregister = intel_connector_unregister, + .late_register = intel_dp_mst_connector_late_register, + .early_unregister = intel_dp_mst_connector_early_unregister, .destroy = intel_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = intel_digital_connector_duplicate_state, @@ -632,9 +674,9 @@ static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = { static bool intel_dp_mst_get_hw_state(struct intel_connector *connector) { - if (connector->encoder && connector->base.state->crtc) { + if (intel_attached_encoder(connector) && connector->base.state->crtc) { enum pipe pipe; - if (!connector->encoder->get_hw_state(connector->encoder, &pipe)) + if (!intel_attached_encoder(connector)->get_hw_state(intel_attached_encoder(connector), &pipe)) return false; return true; } @@ -706,36 +748,8 @@ err: return NULL; } -static void intel_dp_register_mst_connector(struct drm_connector *connector) -{ - struct drm_i915_private *dev_priv = to_i915(connector->dev); - - if (dev_priv->fbdev) - drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, - connector); - - drm_connector_register(connector); -} - -static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, - struct drm_connector *connector) -{ - struct drm_i915_private *dev_priv = to_i915(connector->dev); - - DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); - drm_connector_unregister(connector); - - if (dev_priv->fbdev) - drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, - connector); - - drm_connector_put(connector); -} - static const struct drm_dp_mst_topology_cbs mst_cbs = { .add_connector = intel_dp_add_mst_connector, - .register_connector = intel_dp_register_mst_connector, - .destroy_connector = intel_dp_destroy_mst_connector, }; static struct intel_dp_mst_encoder * diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c index 6fb1f7a7364e..399a7edb4568 100644 --- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c @@ -259,7 +259,8 @@ void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port, } } - WARN(1, "PHY not found for PORT %c", port_name(port)); + drm_WARN(&dev_priv->drm, 1, "PHY not found for PORT %c", + port_name(port)); *phy = DPIO_PHY0; *ch = DPIO_CH0; } @@ -278,33 +279,34 @@ void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv, * While we write to the group register to program all lanes at once we * can read only lane registers and we pick lanes 0/1 for that. */ - val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch)); + val = intel_de_read(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch)); val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT); - I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val); + intel_de_write(dev_priv, BXT_PORT_PCS_DW10_GRP(phy, ch), val); - val = I915_READ(BXT_PORT_TX_DW2_LN0(phy, ch)); + val = intel_de_read(dev_priv, BXT_PORT_TX_DW2_LN0(phy, ch)); val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE); val |= margin << MARGIN_000_SHIFT | scale << UNIQ_TRANS_SCALE_SHIFT; - I915_WRITE(BXT_PORT_TX_DW2_GRP(phy, ch), val); + intel_de_write(dev_priv, BXT_PORT_TX_DW2_GRP(phy, ch), val); - val = I915_READ(BXT_PORT_TX_DW3_LN0(phy, ch)); + val = intel_de_read(dev_priv, BXT_PORT_TX_DW3_LN0(phy, ch)); val &= ~SCALE_DCOMP_METHOD; if (enable) val |= SCALE_DCOMP_METHOD; if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD)) - DRM_ERROR("Disabled scaling while ouniqetrangenmethod was set"); + drm_err(&dev_priv->drm, + "Disabled scaling while ouniqetrangenmethod was set"); - I915_WRITE(BXT_PORT_TX_DW3_GRP(phy, ch), val); + intel_de_write(dev_priv, BXT_PORT_TX_DW3_GRP(phy, ch), val); - val = I915_READ(BXT_PORT_TX_DW4_LN0(phy, ch)); + val = intel_de_read(dev_priv, BXT_PORT_TX_DW4_LN0(phy, ch)); val &= ~DE_EMPHASIS; val |= deemphasis << DEEMPH_SHIFT; - I915_WRITE(BXT_PORT_TX_DW4_GRP(phy, ch), val); + intel_de_write(dev_priv, BXT_PORT_TX_DW4_GRP(phy, ch), val); - val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch)); + val = intel_de_read(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch)); val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT; - I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val); + intel_de_write(dev_priv, BXT_PORT_PCS_DW10_GRP(phy, ch), val); } bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, @@ -314,20 +316,20 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, phy_info = bxt_get_phy_info(dev_priv, phy); - if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask)) + if (!(intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask)) return false; - if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) & + if ((intel_de_read(dev_priv, BXT_PORT_CL1CM_DW0(phy)) & (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) { - DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n", - phy); + drm_dbg(&dev_priv->drm, + "DDI PHY %d powered, but power hasn't settled\n", phy); return false; } - if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) { - DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n", - phy); + if (!(intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) { + drm_dbg(&dev_priv->drm, + "DDI PHY %d powered, but still in reset\n", phy); return false; } @@ -337,7 +339,7 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy) { - u32 val = I915_READ(BXT_PORT_REF_DW6(phy)); + u32 val = intel_de_read(dev_priv, BXT_PORT_REF_DW6(phy)); return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT; } @@ -347,7 +349,8 @@ static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv, { if (intel_de_wait_for_set(dev_priv, BXT_PORT_REF_DW3(phy), GRC_DONE, 10)) - DRM_ERROR("timeout waiting for PHY%d GRC\n", phy); + drm_err(&dev_priv->drm, "timeout waiting for PHY%d GRC\n", + phy); } static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, @@ -364,18 +367,19 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy); if (bxt_ddi_phy_verify_state(dev_priv, phy)) { - DRM_DEBUG_DRIVER("DDI PHY %d already enabled, " - "won't reprogram it\n", phy); + drm_dbg(&dev_priv->drm, "DDI PHY %d already enabled, " + "won't reprogram it\n", phy); return; } - DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, " - "force reprogramming it\n", phy); + drm_dbg(&dev_priv->drm, + "DDI PHY %d enabled with invalid state, " + "force reprogramming it\n", phy); } - val = I915_READ(BXT_P_CR_GT_DISP_PWRON); + val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON); val |= phy_info->pwron_mask; - I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val); + intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, val); /* * The PHY registers start out inaccessible and respond to reads with @@ -390,29 +394,30 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, PHY_RESERVED | PHY_POWER_GOOD, PHY_POWER_GOOD, 1)) - DRM_ERROR("timeout during PHY%d power on\n", phy); + drm_err(&dev_priv->drm, "timeout during PHY%d power on\n", + phy); /* Program PLL Rcomp code offset */ - val = I915_READ(BXT_PORT_CL1CM_DW9(phy)); + val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW9(phy)); val &= ~IREF0RC_OFFSET_MASK; val |= 0xE4 << IREF0RC_OFFSET_SHIFT; - I915_WRITE(BXT_PORT_CL1CM_DW9(phy), val); + intel_de_write(dev_priv, BXT_PORT_CL1CM_DW9(phy), val); - val = I915_READ(BXT_PORT_CL1CM_DW10(phy)); + val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW10(phy)); val &= ~IREF1RC_OFFSET_MASK; val |= 0xE4 << IREF1RC_OFFSET_SHIFT; - I915_WRITE(BXT_PORT_CL1CM_DW10(phy), val); + intel_de_write(dev_priv, BXT_PORT_CL1CM_DW10(phy), val); /* Program power gating */ - val = I915_READ(BXT_PORT_CL1CM_DW28(phy)); + val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW28(phy)); val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG; - I915_WRITE(BXT_PORT_CL1CM_DW28(phy), val); + intel_de_write(dev_priv, BXT_PORT_CL1CM_DW28(phy), val); if (phy_info->dual_channel) { - val = I915_READ(BXT_PORT_CL2CM_DW6(phy)); + val = intel_de_read(dev_priv, BXT_PORT_CL2CM_DW6(phy)); val |= DW6_OLDO_DYN_PWR_DOWN_EN; - I915_WRITE(BXT_PORT_CL2CM_DW6(phy), val); + intel_de_write(dev_priv, BXT_PORT_CL2CM_DW6(phy), val); } if (phy_info->rcomp_phy != -1) { @@ -430,19 +435,19 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, grc_code = val << GRC_CODE_FAST_SHIFT | val << GRC_CODE_SLOW_SHIFT | val; - I915_WRITE(BXT_PORT_REF_DW6(phy), grc_code); + intel_de_write(dev_priv, BXT_PORT_REF_DW6(phy), grc_code); - val = I915_READ(BXT_PORT_REF_DW8(phy)); + val = intel_de_read(dev_priv, BXT_PORT_REF_DW8(phy)); val |= GRC_DIS | GRC_RDY_OVRD; - I915_WRITE(BXT_PORT_REF_DW8(phy), val); + intel_de_write(dev_priv, BXT_PORT_REF_DW8(phy), val); } if (phy_info->reset_delay) udelay(phy_info->reset_delay); - val = I915_READ(BXT_PHY_CTL_FAMILY(phy)); + val = intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy)); val |= COMMON_RESET_DIS; - I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val); + intel_de_write(dev_priv, BXT_PHY_CTL_FAMILY(phy), val); } void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy) @@ -452,13 +457,13 @@ void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy) phy_info = bxt_get_phy_info(dev_priv, phy); - val = I915_READ(BXT_PHY_CTL_FAMILY(phy)); + val = intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy)); val &= ~COMMON_RESET_DIS; - I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val); + intel_de_write(dev_priv, BXT_PHY_CTL_FAMILY(phy), val); - val = I915_READ(BXT_P_CR_GT_DISP_PWRON); + val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON); val &= ~phy_info->pwron_mask; - I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val); + intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, val); } void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy) @@ -496,7 +501,7 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy, va_list args; u32 val; - val = I915_READ(reg); + val = intel_de_read(dev_priv, reg); if ((val & mask) == expected) return true; @@ -504,7 +509,7 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy, vaf.fmt = reg_fmt; vaf.va = &args; - DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: " + drm_dbg(&dev_priv->drm, "DDI PHY %d reg %pV [%08x] state mismatch: " "current %08x, expected %08x (mask %08x)\n", phy, &vaf, reg.reg, val, (val & ~mask) | expected, mask); @@ -599,7 +604,8 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder, bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); for (lane = 0; lane < 4; lane++) { - u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane)); + u32 val = intel_de_read(dev_priv, + BXT_PORT_TX_DW14_LN(phy, ch, lane)); /* * Note that on CHV this flag is called UPAR, but has @@ -609,7 +615,8 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder, if (lane_lat_optim_mask & BIT(lane)) val |= LATENCY_OPTIM; - I915_WRITE(BXT_PORT_TX_DW14_LN(phy, ch, lane), val); + intel_de_write(dev_priv, BXT_PORT_TX_DW14_LN(phy, ch, lane), + val); } } @@ -627,7 +634,8 @@ bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder) mask = 0; for (lane = 0; lane < 4; lane++) { - u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane)); + u32 val = intel_de_read(dev_priv, + BXT_PORT_TX_DW14_LN(phy, ch, lane)); if (val & LATENCY_OPTIM) mask |= BIT(lane); diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index c75e34d87111..2d47f1f756a2 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -45,6 +45,22 @@ * commit phase. */ +struct intel_dpll_mgr { + const struct dpll_info *dpll_info; + + bool (*get_dplls)(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder); + void (*put_dplls)(struct intel_atomic_state *state, + struct intel_crtc *crtc); + void (*update_active_dpll)(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder); + void (*update_ref_clks)(struct drm_i915_private *i915); + void (*dump_hw_state)(struct drm_i915_private *dev_priv, + const struct intel_dpll_hw_state *hw_state); +}; + static void intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll_state *shared_dpll) @@ -52,8 +68,8 @@ intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv, enum intel_dpll_id i; /* Copy shared dpll state */ - for (i = 0; i < dev_priv->num_shared_dpll; i++) { - struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; + for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { + struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i]; shared_dpll[i] = pll->state; } @@ -88,7 +104,7 @@ struct intel_shared_dpll * intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv, enum intel_dpll_id id) { - return &dev_priv->shared_dplls[id]; + return &dev_priv->dpll.shared_dplls[id]; } /** @@ -103,11 +119,14 @@ enum intel_dpll_id intel_get_shared_dpll_id(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { - if (WARN_ON(pll < dev_priv->shared_dplls|| - pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll])) + long pll_idx = pll - dev_priv->dpll.shared_dplls; + + if (drm_WARN_ON(&dev_priv->drm, + pll_idx < 0 || + pll_idx >= dev_priv->dpll.num_shared_dpll)) return -1; - return (enum intel_dpll_id) (pll - dev_priv->shared_dplls); + return pll_idx; } /* For ILK+ */ @@ -118,7 +137,8 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, bool cur_state; struct intel_dpll_hw_state hw_state; - if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state))) + if (drm_WARN(&dev_priv->drm, !pll, + "asserting DPLL %s with no DPLL\n", onoff(state))) return; cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state); @@ -140,19 +160,19 @@ void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state) struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll = crtc_state->shared_dpll; - if (WARN_ON(pll == NULL)) + if (drm_WARN_ON(&dev_priv->drm, pll == NULL)) return; - mutex_lock(&dev_priv->dpll_lock); - WARN_ON(!pll->state.crtc_mask); + mutex_lock(&dev_priv->dpll.lock); + drm_WARN_ON(&dev_priv->drm, !pll->state.crtc_mask); if (!pll->active_mask) { - DRM_DEBUG_DRIVER("setting up %s\n", pll->info->name); - WARN_ON(pll->on); + drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name); + drm_WARN_ON(&dev_priv->drm, pll->on); assert_shared_dpll_disabled(dev_priv, pll); pll->info->funcs->prepare(dev_priv, pll); } - mutex_unlock(&dev_priv->dpll_lock); + mutex_unlock(&dev_priv->dpll.lock); } /** @@ -169,35 +189,36 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state) unsigned int crtc_mask = drm_crtc_mask(&crtc->base); unsigned int old_mask; - if (WARN_ON(pll == NULL)) + if (drm_WARN_ON(&dev_priv->drm, pll == NULL)) return; - mutex_lock(&dev_priv->dpll_lock); + mutex_lock(&dev_priv->dpll.lock); old_mask = pll->active_mask; - if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) || - WARN_ON(pll->active_mask & crtc_mask)) + if (drm_WARN_ON(&dev_priv->drm, !(pll->state.crtc_mask & crtc_mask)) || + drm_WARN_ON(&dev_priv->drm, pll->active_mask & crtc_mask)) goto out; pll->active_mask |= crtc_mask; - DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n", - pll->info->name, pll->active_mask, pll->on, - crtc->base.base.id); + drm_dbg_kms(&dev_priv->drm, + "enable %s (active %x, on? %d) for crtc %d\n", + pll->info->name, pll->active_mask, pll->on, + crtc->base.base.id); if (old_mask) { - WARN_ON(!pll->on); + drm_WARN_ON(&dev_priv->drm, !pll->on); assert_shared_dpll_enabled(dev_priv, pll); goto out; } - WARN_ON(pll->on); + drm_WARN_ON(&dev_priv->drm, pll->on); - DRM_DEBUG_KMS("enabling %s\n", pll->info->name); + drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name); pll->info->funcs->enable(dev_priv, pll); pll->on = true; out: - mutex_unlock(&dev_priv->dpll_lock); + mutex_unlock(&dev_priv->dpll.lock); } /** @@ -220,27 +241,28 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state) if (pll == NULL) return; - mutex_lock(&dev_priv->dpll_lock); - if (WARN_ON(!(pll->active_mask & crtc_mask))) + mutex_lock(&dev_priv->dpll.lock); + if (drm_WARN_ON(&dev_priv->drm, !(pll->active_mask & crtc_mask))) goto out; - DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n", - pll->info->name, pll->active_mask, pll->on, - crtc->base.base.id); + drm_dbg_kms(&dev_priv->drm, + "disable %s (active %x, on? %d) for crtc %d\n", + pll->info->name, pll->active_mask, pll->on, + crtc->base.base.id); assert_shared_dpll_enabled(dev_priv, pll); - WARN_ON(!pll->on); + drm_WARN_ON(&dev_priv->drm, !pll->on); pll->active_mask &= ~crtc_mask; if (pll->active_mask) goto out; - DRM_DEBUG_KMS("disabling %s\n", pll->info->name); + drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name); pll->info->funcs->disable(dev_priv, pll); pll->on = false; out: - mutex_unlock(&dev_priv->dpll_lock); + mutex_unlock(&dev_priv->dpll.lock); } static struct intel_shared_dpll * @@ -256,10 +278,10 @@ intel_find_shared_dpll(struct intel_atomic_state *state, shared_dpll = intel_atomic_get_shared_dpll_state(&state->base); - WARN_ON(dpll_mask & ~(BIT(I915_NUM_PLLS) - 1)); + drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1)); for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) { - pll = &dev_priv->shared_dplls[i]; + pll = &dev_priv->dpll.shared_dplls[i]; /* Only want to check enabled timings first */ if (shared_dpll[i].crtc_mask == 0) { @@ -271,20 +293,21 @@ intel_find_shared_dpll(struct intel_atomic_state *state, if (memcmp(pll_state, &shared_dpll[i].hw_state, sizeof(*pll_state)) == 0) { - DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n", - crtc->base.base.id, crtc->base.name, - pll->info->name, - shared_dpll[i].crtc_mask, - pll->active_mask); + drm_dbg_kms(&dev_priv->drm, + "[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n", + crtc->base.base.id, crtc->base.name, + pll->info->name, + shared_dpll[i].crtc_mask, + pll->active_mask); return pll; } } /* Ok no matching timings, maybe there's a free one? */ if (unused_pll) { - DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n", - crtc->base.base.id, crtc->base.name, - unused_pll->info->name); + drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n", + crtc->base.base.id, crtc->base.name, + unused_pll->info->name); return unused_pll; } @@ -297,6 +320,7 @@ intel_reference_shared_dpll(struct intel_atomic_state *state, const struct intel_shared_dpll *pll, const struct intel_dpll_hw_state *pll_state) { + struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_shared_dpll_state *shared_dpll; const enum intel_dpll_id id = pll->info->id; @@ -305,8 +329,8 @@ intel_reference_shared_dpll(struct intel_atomic_state *state, if (shared_dpll[id].crtc_mask == 0) shared_dpll[id].hw_state = *pll_state; - DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name, - pipe_name(crtc->pipe)); + drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name, + pipe_name(crtc->pipe)); shared_dpll[id].crtc_mask |= 1 << crtc->pipe; } @@ -357,9 +381,9 @@ void intel_shared_dpll_swap_state(struct intel_atomic_state *state) if (!state->dpll_set) return; - for (i = 0; i < dev_priv->num_shared_dpll; i++) { + for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { struct intel_shared_dpll *pll = - &dev_priv->shared_dplls[i]; + &dev_priv->dpll.shared_dplls[i]; swap(pll->state, shared_dpll[i]); } @@ -378,10 +402,10 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, if (!wakeref) return false; - val = I915_READ(PCH_DPLL(id)); + val = intel_de_read(dev_priv, PCH_DPLL(id)); hw_state->dpll = val; - hw_state->fp0 = I915_READ(PCH_FP0(id)); - hw_state->fp1 = I915_READ(PCH_FP1(id)); + hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id)); + hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id)); intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); @@ -393,8 +417,8 @@ static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv, { const enum intel_dpll_id id = pll->info->id; - I915_WRITE(PCH_FP0(id), pll->state.hw_state.fp0); - I915_WRITE(PCH_FP1(id), pll->state.hw_state.fp1); + intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0); + intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1); } static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) @@ -404,7 +428,7 @@ static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); - val = I915_READ(PCH_DREF_CONTROL); + val = intel_de_read(dev_priv, PCH_DREF_CONTROL); enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | DREF_SUPERSPREAD_SOURCE_MASK)); I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); @@ -418,10 +442,10 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, /* PCH refclock must be enabled first */ ibx_assert_pch_refclk_enabled(dev_priv); - I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll); + intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll); /* Wait for the clocks to stabilize. */ - POSTING_READ(PCH_DPLL(id)); + intel_de_posting_read(dev_priv, PCH_DPLL(id)); udelay(150); /* The pixel multiplier can only be updated once the @@ -429,8 +453,8 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, * * So write it again. */ - I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll); - POSTING_READ(PCH_DPLL(id)); + intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll); + intel_de_posting_read(dev_priv, PCH_DPLL(id)); udelay(200); } @@ -439,8 +463,8 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, { const enum intel_dpll_id id = pll->info->id; - I915_WRITE(PCH_DPLL(id), 0); - POSTING_READ(PCH_DPLL(id)); + intel_de_write(dev_priv, PCH_DPLL(id), 0); + intel_de_posting_read(dev_priv, PCH_DPLL(id)); udelay(200); } @@ -457,11 +481,12 @@ static bool ibx_get_dpll(struct intel_atomic_state *state, if (HAS_PCH_IBX(dev_priv)) { /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ i = (enum intel_dpll_id) crtc->pipe; - pll = &dev_priv->shared_dplls[i]; + pll = &dev_priv->dpll.shared_dplls[i]; - DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n", - crtc->base.base.id, crtc->base.name, - pll->info->name); + drm_dbg_kms(&dev_priv->drm, + "[CRTC:%d:%s] using pre-allocated %s\n", + crtc->base.base.id, crtc->base.name, + pll->info->name); } else { pll = intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state, @@ -484,12 +509,13 @@ static bool ibx_get_dpll(struct intel_atomic_state *state, static void ibx_dump_hw_state(struct drm_i915_private *dev_priv, const struct intel_dpll_hw_state *hw_state) { - DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " - "fp0: 0x%x, fp1: 0x%x\n", - hw_state->dpll, - hw_state->dpll_md, - hw_state->fp0, - hw_state->fp1); + drm_dbg_kms(&dev_priv->drm, + "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " + "fp0: 0x%x, fp1: 0x%x\n", + hw_state->dpll, + hw_state->dpll_md, + hw_state->fp0, + hw_state->fp1); } static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = { @@ -499,21 +525,34 @@ static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = { .get_hw_state = ibx_pch_dpll_get_hw_state, }; +static const struct dpll_info pch_plls[] = { + { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 }, + { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 }, + { }, +}; + +static const struct intel_dpll_mgr pch_pll_mgr = { + .dpll_info = pch_plls, + .get_dplls = ibx_get_dpll, + .put_dplls = intel_put_dpll, + .dump_hw_state = ibx_dump_hw_state, +}; + static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { const enum intel_dpll_id id = pll->info->id; - I915_WRITE(WRPLL_CTL(id), pll->state.hw_state.wrpll); - POSTING_READ(WRPLL_CTL(id)); + intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll); + intel_de_posting_read(dev_priv, WRPLL_CTL(id)); udelay(20); } static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { - I915_WRITE(SPLL_CTL, pll->state.hw_state.spll); - POSTING_READ(SPLL_CTL); + intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll); + intel_de_posting_read(dev_priv, SPLL_CTL); udelay(20); } @@ -523,9 +562,9 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv, const enum intel_dpll_id id = pll->info->id; u32 val; - val = I915_READ(WRPLL_CTL(id)); - I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE); - POSTING_READ(WRPLL_CTL(id)); + val = intel_de_read(dev_priv, WRPLL_CTL(id)); + intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE); + intel_de_posting_read(dev_priv, WRPLL_CTL(id)); /* * Try to set up the PCH reference clock once all DPLLs @@ -541,9 +580,9 @@ static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv, enum intel_dpll_id id = pll->info->id; u32 val; - val = I915_READ(SPLL_CTL); - I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE); - POSTING_READ(SPLL_CTL); + val = intel_de_read(dev_priv, SPLL_CTL); + intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE); + intel_de_posting_read(dev_priv, SPLL_CTL); /* * Try to set up the PCH reference clock once all DPLLs @@ -566,7 +605,7 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv, if (!wakeref) return false; - val = I915_READ(WRPLL_CTL(id)); + val = intel_de_read(dev_priv, WRPLL_CTL(id)); hw_state->wrpll = val; intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); @@ -586,7 +625,7 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv, if (!wakeref) return false; - val = I915_READ(SPLL_CTL); + val = intel_de_read(dev_priv, SPLL_CTL); hw_state->spll = val; intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); @@ -811,8 +850,8 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */, } static struct intel_shared_dpll * -hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state, - struct intel_crtc *crtc) +hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -839,8 +878,47 @@ hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state, return pll; } +static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv, + const struct intel_shared_dpll *pll) +{ + int refclk; + int n, p, r; + u32 wrpll = pll->state.hw_state.wrpll; + + switch (wrpll & WRPLL_REF_MASK) { + case WRPLL_REF_SPECIAL_HSW: + /* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */ + if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) { + refclk = dev_priv->dpll.ref_clks.nssc; + break; + } + /* fall through */ + case WRPLL_REF_PCH_SSC: + /* + * We could calculate spread here, but our checking + * code only cares about 5% accuracy, and spread is a max of + * 0.5% downspread. + */ + refclk = dev_priv->dpll.ref_clks.ssc; + break; + case WRPLL_REF_LCPLL: + refclk = 2700000; + break; + default: + MISSING_CASE(wrpll); + return 0; + } + + r = wrpll & WRPLL_DIVIDER_REF_MASK; + p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT; + n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT; + + /* Convert to KHz, p & r have a fixed point portion */ + return (refclk * n / 10) / (p * r) * 2; +} + static struct intel_shared_dpll * -hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state) +hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); struct intel_shared_dpll *pll; @@ -858,7 +936,8 @@ hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state) pll_id = DPLL_ID_LCPLL_2700; break; default: - DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock); + drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n", + clock); return NULL; } @@ -870,6 +949,69 @@ hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state) return pll; } +static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll) +{ + int link_clock = 0; + + switch (pll->info->id) { + case DPLL_ID_LCPLL_810: + link_clock = 81000; + break; + case DPLL_ID_LCPLL_1350: + link_clock = 135000; + break; + case DPLL_ID_LCPLL_2700: + link_clock = 270000; + break; + default: + drm_WARN(&i915->drm, 1, "bad port clock sel\n"); + break; + } + + return link_clock * 2; +} + +static struct intel_shared_dpll * +hsw_ddi_spll_get_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + if (WARN_ON(crtc_state->port_clock / 2 != 135000)) + return NULL; + + crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | + SPLL_REF_MUXED_SSC; + + return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state, + BIT(DPLL_ID_SPLL)); +} + +static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll) +{ + int link_clock = 0; + + switch (pll->state.hw_state.spll & SPLL_FREQ_MASK) { + case SPLL_FREQ_810MHz: + link_clock = 81000; + break; + case SPLL_FREQ_1350MHz: + link_clock = 135000; + break; + case SPLL_FREQ_2700MHz: + link_clock = 270000; + break; + default: + drm_WARN(&i915->drm, 1, "bad spll freq\n"); + break; + } + + return link_clock * 2; +} + static bool hsw_get_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) @@ -881,23 +1023,14 @@ static bool hsw_get_dpll(struct intel_atomic_state *state, memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { - pll = hsw_ddi_hdmi_get_dpll(state, crtc); - } else if (intel_crtc_has_dp_encoder(crtc_state)) { - pll = hsw_ddi_dp_get_dpll(crtc_state); - } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { - if (WARN_ON(crtc_state->port_clock / 2 != 135000)) - return false; - - crtc_state->dpll_hw_state.spll = - SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC; - - pll = intel_find_shared_dpll(state, crtc, - &crtc_state->dpll_hw_state, - BIT(DPLL_ID_SPLL)); - } else { + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + pll = hsw_ddi_wrpll_get_dpll(state, crtc); + else if (intel_crtc_has_dp_encoder(crtc_state)) + pll = hsw_ddi_lcpll_get_dpll(crtc_state); + else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) + pll = hsw_ddi_spll_get_dpll(state, crtc); + else return false; - } if (!pll) return false; @@ -910,23 +1043,35 @@ static bool hsw_get_dpll(struct intel_atomic_state *state, return true; } +static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915) +{ + i915->dpll.ref_clks.ssc = 135000; + /* Non-SSC is only used on non-ULT HSW. */ + if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT) + i915->dpll.ref_clks.nssc = 24000; + else + i915->dpll.ref_clks.nssc = 135000; +} + static void hsw_dump_hw_state(struct drm_i915_private *dev_priv, const struct intel_dpll_hw_state *hw_state) { - DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n", - hw_state->wrpll, hw_state->spll); + drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n", + hw_state->wrpll, hw_state->spll); } static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = { .enable = hsw_ddi_wrpll_enable, .disable = hsw_ddi_wrpll_disable, .get_hw_state = hsw_ddi_wrpll_get_hw_state, + .get_freq = hsw_ddi_wrpll_get_freq, }; static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = { .enable = hsw_ddi_spll_enable, .disable = hsw_ddi_spll_disable, .get_hw_state = hsw_ddi_spll_get_hw_state, + .get_freq = hsw_ddi_spll_get_freq, }; static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv, @@ -950,6 +1095,25 @@ static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = { .enable = hsw_ddi_lcpll_enable, .disable = hsw_ddi_lcpll_disable, .get_hw_state = hsw_ddi_lcpll_get_hw_state, + .get_freq = hsw_ddi_lcpll_get_freq, +}; + +static const struct dpll_info hsw_plls[] = { + { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 }, + { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 }, + { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 }, + { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON }, + { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON }, + { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON }, + { }, +}; + +static const struct intel_dpll_mgr hsw_pll_mgr = { + .dpll_info = hsw_plls, + .get_dplls = hsw_get_dpll, + .put_dplls = intel_put_dpll, + .update_ref_clks = hsw_update_dpll_ref_clks, + .dump_hw_state = hsw_dump_hw_state, }; struct skl_dpll_regs { @@ -989,15 +1153,15 @@ static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv, const enum intel_dpll_id id = pll->info->id; u32 val; - val = I915_READ(DPLL_CTRL1); + val = intel_de_read(dev_priv, DPLL_CTRL1); val &= ~(DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id)); val |= pll->state.hw_state.ctrl1 << (id * 6); - I915_WRITE(DPLL_CTRL1, val); - POSTING_READ(DPLL_CTRL1); + intel_de_write(dev_priv, DPLL_CTRL1, val); + intel_de_posting_read(dev_priv, DPLL_CTRL1); } static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv, @@ -1008,17 +1172,17 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv, skl_ddi_pll_write_ctrl1(dev_priv, pll); - I915_WRITE(regs[id].cfgcr1, pll->state.hw_state.cfgcr1); - I915_WRITE(regs[id].cfgcr2, pll->state.hw_state.cfgcr2); - POSTING_READ(regs[id].cfgcr1); - POSTING_READ(regs[id].cfgcr2); + intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1); + intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2); + intel_de_posting_read(dev_priv, regs[id].cfgcr1); + intel_de_posting_read(dev_priv, regs[id].cfgcr2); /* the enable bit is always bit 31 */ - I915_WRITE(regs[id].ctl, - I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE); + intel_de_write(dev_priv, regs[id].ctl, + intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE); if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5)) - DRM_ERROR("DPLL %d not locked\n", id); + drm_err(&dev_priv->drm, "DPLL %d not locked\n", id); } static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv, @@ -1034,9 +1198,9 @@ static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv, const enum intel_dpll_id id = pll->info->id; /* the enable bit is always bit 31 */ - I915_WRITE(regs[id].ctl, - I915_READ(regs[id].ctl) & ~LCPLL_PLL_ENABLE); - POSTING_READ(regs[id].ctl); + intel_de_write(dev_priv, regs[id].ctl, + intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE); + intel_de_posting_read(dev_priv, regs[id].ctl); } static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv, @@ -1061,17 +1225,17 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, ret = false; - val = I915_READ(regs[id].ctl); + val = intel_de_read(dev_priv, regs[id].ctl); if (!(val & LCPLL_PLL_ENABLE)) goto out; - val = I915_READ(DPLL_CTRL1); + val = intel_de_read(dev_priv, DPLL_CTRL1); hw_state->ctrl1 = (val >> (id * 6)) & 0x3f; /* avoid reading back stale values if HDMI mode is not enabled */ if (val & DPLL_CTRL1_HDMI_MODE(id)) { - hw_state->cfgcr1 = I915_READ(regs[id].cfgcr1); - hw_state->cfgcr2 = I915_READ(regs[id].cfgcr2); + hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1); + hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2); } ret = true; @@ -1099,11 +1263,11 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv, ret = false; /* DPLL0 is always enabled since it drives CDCLK */ - val = I915_READ(regs[id].ctl); - if (WARN_ON(!(val & LCPLL_PLL_ENABLE))) + val = intel_de_read(dev_priv, regs[id].ctl); + if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE))) goto out; - val = I915_READ(DPLL_CTRL1); + val = intel_de_read(dev_priv, DPLL_CTRL1); hw_state->ctrl1 = (val >> (id * 6)) & 0x3f; ret = true; @@ -1222,6 +1386,7 @@ struct skl_wrpll_params { static void skl_wrpll_params_populate(struct skl_wrpll_params *params, u64 afe_clock, + int ref_clock, u64 central_freq, u32 p0, u32 p1, u32 p2) { @@ -1281,14 +1446,15 @@ static void skl_wrpll_params_populate(struct skl_wrpll_params *params, * Intermediate values are in Hz. * Divide by MHz to match bsepc */ - params->dco_integer = div_u64(dco_freq, 24 * MHz(1)); + params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1)); params->dco_fraction = - div_u64((div_u64(dco_freq, 24) - + div_u64((div_u64(dco_freq, ref_clock / KHz(1)) - params->dco_integer * MHz(1)) * 0x8000, MHz(1)); } static bool skl_ddi_calculate_wrpll(int clock /* in Hz */, + int ref_clock, struct skl_wrpll_params *wrpll_params) { u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */ @@ -1354,14 +1520,15 @@ skip_remaining_dividers: */ p0 = p1 = p2 = 0; skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2); - skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq, - p0, p1, p2); + skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock, + ctx.central_freq, p0, p1, p2); return true; } static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) { + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); u32 ctrl1, cfgcr1, cfgcr2; struct skl_wrpll_params wrpll_params = { 0, }; @@ -1374,6 +1541,7 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) ctrl1 |= DPLL_CTRL1_HDMI_MODE(0); if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000, + i915->dpll.ref_clks.nssc, &wrpll_params)) return false; @@ -1396,6 +1564,64 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) return true; } +static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll) +{ + const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state; + int ref_clock = i915->dpll.ref_clks.nssc; + u32 p0, p1, p2, dco_freq; + + p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK; + p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK; + + if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1)) + p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8; + else + p1 = 1; + + + switch (p0) { + case DPLL_CFGCR2_PDIV_1: + p0 = 1; + break; + case DPLL_CFGCR2_PDIV_2: + p0 = 2; + break; + case DPLL_CFGCR2_PDIV_3: + p0 = 3; + break; + case DPLL_CFGCR2_PDIV_7: + p0 = 7; + break; + } + + switch (p2) { + case DPLL_CFGCR2_KDIV_5: + p2 = 5; + break; + case DPLL_CFGCR2_KDIV_2: + p2 = 2; + break; + case DPLL_CFGCR2_KDIV_3: + p2 = 3; + break; + case DPLL_CFGCR2_KDIV_1: + p2 = 1; + break; + } + + dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) * + ref_clock; + + dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * + ref_clock / 0x8000; + + if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0)) + return 0; + + return dco_freq / (p0 * p1 * p2 * 5); +} + static bool skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) { @@ -1436,25 +1662,62 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) return true; } +static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll) +{ + int link_clock = 0; + + switch ((pll->state.hw_state.ctrl1 & + DPLL_CTRL1_LINK_RATE_MASK(0)) >> + DPLL_CTRL1_LINK_RATE_SHIFT(0)) { + case DPLL_CTRL1_LINK_RATE_810: + link_clock = 81000; + break; + case DPLL_CTRL1_LINK_RATE_1080: + link_clock = 108000; + break; + case DPLL_CTRL1_LINK_RATE_1350: + link_clock = 135000; + break; + case DPLL_CTRL1_LINK_RATE_1620: + link_clock = 162000; + break; + case DPLL_CTRL1_LINK_RATE_2160: + link_clock = 216000; + break; + case DPLL_CTRL1_LINK_RATE_2700: + link_clock = 270000; + break; + default: + drm_WARN(&i915->drm, 1, "Unsupported link rate\n"); + break; + } + + return link_clock * 2; +} + static bool skl_get_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_shared_dpll *pll; bool bret; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { bret = skl_ddi_hdmi_pll_dividers(crtc_state); if (!bret) { - DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n"); + drm_dbg_kms(&i915->drm, + "Could not get HDMI pll dividers.\n"); return false; } } else if (intel_crtc_has_dp_encoder(crtc_state)) { bret = skl_ddi_dp_set_dpll_hw_state(crtc_state); if (!bret) { - DRM_DEBUG_KMS("Could not set DP dpll HW state.\n"); + drm_dbg_kms(&i915->drm, + "Could not set DP dpll HW state.\n"); return false; } } else { @@ -1482,10 +1745,29 @@ static bool skl_get_dpll(struct intel_atomic_state *state, return true; } +static int skl_ddi_pll_get_freq(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll) +{ + /* + * ctrl1 register is already shifted for each pll, just use 0 to get + * the internal shift for each field + */ + if (pll->state.hw_state.ctrl1 & DPLL_CTRL1_HDMI_MODE(0)) + return skl_ddi_wrpll_get_freq(i915, pll); + else + return skl_ddi_lcpll_get_freq(i915, pll); +} + +static void skl_update_dpll_ref_clks(struct drm_i915_private *i915) +{ + /* No SSC ref */ + i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref; +} + static void skl_dump_hw_state(struct drm_i915_private *dev_priv, const struct intel_dpll_hw_state *hw_state) { - DRM_DEBUG_KMS("dpll_hw_state: " + drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: " "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n", hw_state->ctrl1, hw_state->cfgcr1, @@ -1496,12 +1778,30 @@ static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = { .enable = skl_ddi_pll_enable, .disable = skl_ddi_pll_disable, .get_hw_state = skl_ddi_pll_get_hw_state, + .get_freq = skl_ddi_pll_get_freq, }; static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = { .enable = skl_ddi_dpll0_enable, .disable = skl_ddi_dpll0_disable, .get_hw_state = skl_ddi_dpll0_get_hw_state, + .get_freq = skl_ddi_pll_get_freq, +}; + +static const struct dpll_info skl_plls[] = { + { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON }, + { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 }, + { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 }, + { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 }, + { }, +}; + +static const struct intel_dpll_mgr skl_pll_mgr = { + .dpll_info = skl_plls, + .get_dplls = skl_get_dpll, + .put_dplls = intel_put_dpll, + .update_ref_clks = skl_update_dpll_ref_clks, + .dump_hw_state = skl_dump_hw_state, }; static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, @@ -1515,113 +1815,114 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); /* Non-SSC reference */ - temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); + temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); temp |= PORT_PLL_REF_SEL; - I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); + intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp); if (IS_GEMINILAKE(dev_priv)) { - temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); + temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); temp |= PORT_PLL_POWER_ENABLE; - I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); + intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp); - if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & + if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_POWER_STATE), 200)) - DRM_ERROR("Power state not set for PLL:%d\n", port); + drm_err(&dev_priv->drm, + "Power state not set for PLL:%d\n", port); } /* Disable 10 bit clock */ - temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch)); + temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch)); temp &= ~PORT_PLL_10BIT_CLK_ENABLE; - I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp); + intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp); /* Write P1 & P2 */ - temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch)); + temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch)); temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK); temp |= pll->state.hw_state.ebb0; - I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp); + intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp); /* Write M2 integer */ - temp = I915_READ(BXT_PORT_PLL(phy, ch, 0)); + temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0)); temp &= ~PORT_PLL_M2_MASK; temp |= pll->state.hw_state.pll0; - I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp); + intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp); /* Write N */ - temp = I915_READ(BXT_PORT_PLL(phy, ch, 1)); + temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1)); temp &= ~PORT_PLL_N_MASK; temp |= pll->state.hw_state.pll1; - I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp); + intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp); /* Write M2 fraction */ - temp = I915_READ(BXT_PORT_PLL(phy, ch, 2)); + temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2)); temp &= ~PORT_PLL_M2_FRAC_MASK; temp |= pll->state.hw_state.pll2; - I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp); + intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp); /* Write M2 fraction enable */ - temp = I915_READ(BXT_PORT_PLL(phy, ch, 3)); + temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3)); temp &= ~PORT_PLL_M2_FRAC_ENABLE; temp |= pll->state.hw_state.pll3; - I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp); + intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp); /* Write coeff */ - temp = I915_READ(BXT_PORT_PLL(phy, ch, 6)); + temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6)); temp &= ~PORT_PLL_PROP_COEFF_MASK; temp &= ~PORT_PLL_INT_COEFF_MASK; temp &= ~PORT_PLL_GAIN_CTL_MASK; temp |= pll->state.hw_state.pll6; - I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp); + intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp); /* Write calibration val */ - temp = I915_READ(BXT_PORT_PLL(phy, ch, 8)); + temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8)); temp &= ~PORT_PLL_TARGET_CNT_MASK; temp |= pll->state.hw_state.pll8; - I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp); + intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp); - temp = I915_READ(BXT_PORT_PLL(phy, ch, 9)); + temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9)); temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK; temp |= pll->state.hw_state.pll9; - I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp); + intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp); - temp = I915_READ(BXT_PORT_PLL(phy, ch, 10)); + temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10)); temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H; temp &= ~PORT_PLL_DCO_AMP_MASK; temp |= pll->state.hw_state.pll10; - I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp); + intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp); /* Recalibrate with new settings */ - temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch)); + temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch)); temp |= PORT_PLL_RECALIBRATE; - I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp); + intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp); temp &= ~PORT_PLL_10BIT_CLK_ENABLE; temp |= pll->state.hw_state.ebb4; - I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp); + intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp); /* Enable PLL */ - temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); + temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); temp |= PORT_PLL_ENABLE; - I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); - POSTING_READ(BXT_PORT_PLL_ENABLE(port)); + intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp); + intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); - if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK), + if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK), 200)) - DRM_ERROR("PLL %d not locked\n", port); + drm_err(&dev_priv->drm, "PLL %d not locked\n", port); if (IS_GEMINILAKE(dev_priv)) { - temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch)); + temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch)); temp |= DCC_DELAY_RANGE_2; - I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp); + intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp); } /* * While we write to the group register to program all lanes at once we * can read only lane registers and we pick lanes 0/1 for that. */ - temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch)); + temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch)); temp &= ~LANE_STAGGER_MASK; temp &= ~LANESTAGGER_STRAP_OVRD; temp |= pll->state.hw_state.pcsdw12; - I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp); + intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp); } static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv, @@ -1630,19 +1931,20 @@ static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv, enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ u32 temp; - temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); + temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); temp &= ~PORT_PLL_ENABLE; - I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); - POSTING_READ(BXT_PORT_PLL_ENABLE(port)); + intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp); + intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); if (IS_GEMINILAKE(dev_priv)) { - temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); + temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); temp &= ~PORT_PLL_POWER_ENABLE; - I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); + intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp); - if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) & - PORT_PLL_POWER_STATE), 200)) - DRM_ERROR("Power state not reset for PLL:%d\n", port); + if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & + PORT_PLL_POWER_STATE), 200)) + drm_err(&dev_priv->drm, + "Power state not reset for PLL:%d\n", port); } } @@ -1666,40 +1968,40 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, ret = false; - val = I915_READ(BXT_PORT_PLL_ENABLE(port)); + val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); if (!(val & PORT_PLL_ENABLE)) goto out; - hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch)); + hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch)); hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK; - hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch)); + hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch)); hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE; - hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0)); + hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0)); hw_state->pll0 &= PORT_PLL_M2_MASK; - hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1)); + hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1)); hw_state->pll1 &= PORT_PLL_N_MASK; - hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2)); + hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2)); hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK; - hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3)); + hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3)); hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE; - hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6)); + hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6)); hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK | PORT_PLL_INT_COEFF_MASK | PORT_PLL_GAIN_CTL_MASK; - hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8)); + hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8)); hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK; - hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9)); + hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9)); hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK; - hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10)); + hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10)); hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H | PORT_PLL_DCO_AMP_MASK; @@ -1708,11 +2010,14 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, * can read only lane registers. We configure all lanes the same way, so * here just read out lanes 0/1 and output a note if lanes 2/3 differ. */ - hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch)); - if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12) - DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n", - hw_state->pcsdw12, - I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch))); + hw_state->pcsdw12 = intel_de_read(dev_priv, + BXT_PORT_PCS_DW12_LN01(phy, ch)); + if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12) + drm_dbg(&dev_priv->drm, + "lane stagger config different for lane 01 (%08x) and 23 (%08x)\n", + hw_state->pcsdw12, + intel_de_read(dev_priv, + BXT_PORT_PCS_DW12_LN23(phy, ch))); hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD; ret = true; @@ -1751,6 +2056,7 @@ static bool bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state, struct bxt_clk_div *clk_div) { + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct dpll best_clock; @@ -1760,9 +2066,9 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state, * i9xx_crtc_compute_clock */ if (!bxt_find_best_dpll(crtc_state, &best_clock)) { - DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n", - crtc_state->port_clock, - pipe_name(crtc->pipe)); + drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n", + crtc_state->port_clock, + pipe_name(crtc->pipe)); return false; } @@ -1799,6 +2105,7 @@ static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state, static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state, const struct bxt_clk_div *clk_div) { + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state; int clock = crtc_state->port_clock; int vco = clk_div->vco; @@ -1824,7 +2131,7 @@ static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state, gain_ctl = 1; targ_cnt = 9; } else { - DRM_ERROR("Invalid VCO\n"); + drm_err(&i915->drm, "Invalid VCO\n"); return false; } @@ -1885,6 +2192,23 @@ bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state) return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div); } +static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll) +{ + const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state; + struct dpll clock; + + clock.m1 = 2; + clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22; + if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE) + clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK; + clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT; + clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT; + clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT; + + return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock); +} + static bool bxt_get_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) @@ -1907,8 +2231,8 @@ static bool bxt_get_dpll(struct intel_atomic_state *state, id = (enum intel_dpll_id) encoder->port; pll = intel_get_shared_dpll_by_id(dev_priv, id); - DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n", - crtc->base.base.id, crtc->base.name, pll->info->name); + drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n", + crtc->base.base.id, crtc->base.name, pll->info->name); intel_reference_shared_dpll(state, crtc, pll, &crtc_state->dpll_hw_state); @@ -1918,89 +2242,37 @@ static bool bxt_get_dpll(struct intel_atomic_state *state, return true; } +static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915) +{ + i915->dpll.ref_clks.ssc = 100000; + i915->dpll.ref_clks.nssc = 100000; + /* DSI non-SSC ref 19.2MHz */ +} + static void bxt_dump_hw_state(struct drm_i915_private *dev_priv, const struct intel_dpll_hw_state *hw_state) { - DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x," - "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, " - "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n", - hw_state->ebb0, - hw_state->ebb4, - hw_state->pll0, - hw_state->pll1, - hw_state->pll2, - hw_state->pll3, - hw_state->pll6, - hw_state->pll8, - hw_state->pll9, - hw_state->pll10, - hw_state->pcsdw12); + drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x," + "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, " + "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n", + hw_state->ebb0, + hw_state->ebb4, + hw_state->pll0, + hw_state->pll1, + hw_state->pll2, + hw_state->pll3, + hw_state->pll6, + hw_state->pll8, + hw_state->pll9, + hw_state->pll10, + hw_state->pcsdw12); } static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = { .enable = bxt_ddi_pll_enable, .disable = bxt_ddi_pll_disable, .get_hw_state = bxt_ddi_pll_get_hw_state, -}; - -struct intel_dpll_mgr { - const struct dpll_info *dpll_info; - - bool (*get_dplls)(struct intel_atomic_state *state, - struct intel_crtc *crtc, - struct intel_encoder *encoder); - void (*put_dplls)(struct intel_atomic_state *state, - struct intel_crtc *crtc); - void (*update_active_dpll)(struct intel_atomic_state *state, - struct intel_crtc *crtc, - struct intel_encoder *encoder); - void (*dump_hw_state)(struct drm_i915_private *dev_priv, - const struct intel_dpll_hw_state *hw_state); -}; - -static const struct dpll_info pch_plls[] = { - { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 }, - { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 }, - { }, -}; - -static const struct intel_dpll_mgr pch_pll_mgr = { - .dpll_info = pch_plls, - .get_dplls = ibx_get_dpll, - .put_dplls = intel_put_dpll, - .dump_hw_state = ibx_dump_hw_state, -}; - -static const struct dpll_info hsw_plls[] = { - { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 }, - { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 }, - { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 }, - { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON }, - { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON }, - { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON }, - { }, -}; - -static const struct intel_dpll_mgr hsw_pll_mgr = { - .dpll_info = hsw_plls, - .get_dplls = hsw_get_dpll, - .put_dplls = intel_put_dpll, - .dump_hw_state = hsw_dump_hw_state, -}; - -static const struct dpll_info skl_plls[] = { - { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON }, - { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 }, - { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 }, - { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 }, - { }, -}; - -static const struct intel_dpll_mgr skl_pll_mgr = { - .dpll_info = skl_plls, - .get_dplls = skl_get_dpll, - .put_dplls = intel_put_dpll, - .dump_hw_state = skl_dump_hw_state, + .get_freq = bxt_ddi_pll_get_freq, }; static const struct dpll_info bxt_plls[] = { @@ -2014,6 +2286,7 @@ static const struct intel_dpll_mgr bxt_pll_mgr = { .dpll_info = bxt_plls, .get_dplls = bxt_get_dpll, .put_dplls = intel_put_dpll, + .update_ref_clks = bxt_update_dpll_ref_clks, .dump_hw_state = bxt_dump_hw_state, }; @@ -2024,32 +2297,32 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv, u32 val; /* 1. Enable DPLL power in DPLL_ENABLE. */ - val = I915_READ(CNL_DPLL_ENABLE(id)); + val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id)); val |= PLL_POWER_ENABLE; - I915_WRITE(CNL_DPLL_ENABLE(id), val); + intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val); /* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */ if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_POWER_STATE, 5)) - DRM_ERROR("PLL %d Power not enabled\n", id); + drm_err(&dev_priv->drm, "PLL %d Power not enabled\n", id); /* * 3. Configure DPLL_CFGCR0 to set SSC enable/disable, * select DP mode, and set DP link rate. */ val = pll->state.hw_state.cfgcr0; - I915_WRITE(CNL_DPLL_CFGCR0(id), val); + intel_de_write(dev_priv, CNL_DPLL_CFGCR0(id), val); /* 4. Reab back to ensure writes completed */ - POSTING_READ(CNL_DPLL_CFGCR0(id)); + intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR0(id)); /* 3. Configure DPLL_CFGCR0 */ /* Avoid touch CFGCR1 if HDMI mode is not enabled */ if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) { val = pll->state.hw_state.cfgcr1; - I915_WRITE(CNL_DPLL_CFGCR1(id), val); + intel_de_write(dev_priv, CNL_DPLL_CFGCR1(id), val); /* 4. Reab back to ensure writes completed */ - POSTING_READ(CNL_DPLL_CFGCR1(id)); + intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR1(id)); } /* @@ -2062,13 +2335,13 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv, */ /* 6. Enable DPLL in DPLL_ENABLE. */ - val = I915_READ(CNL_DPLL_ENABLE(id)); + val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id)); val |= PLL_ENABLE; - I915_WRITE(CNL_DPLL_ENABLE(id), val); + intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val); /* 7. Wait for PLL lock status in DPLL_ENABLE. */ if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5)) - DRM_ERROR("PLL %d not locked\n", id); + drm_err(&dev_priv->drm, "PLL %d not locked\n", id); /* * 8. If the frequency will result in a change to the voltage @@ -2106,13 +2379,13 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv, */ /* 3. Disable DPLL through DPLL_ENABLE. */ - val = I915_READ(CNL_DPLL_ENABLE(id)); + val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id)); val &= ~PLL_ENABLE; - I915_WRITE(CNL_DPLL_ENABLE(id), val); + intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val); /* 4. Wait for PLL not locked status in DPLL_ENABLE. */ if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5)) - DRM_ERROR("PLL %d locked\n", id); + drm_err(&dev_priv->drm, "PLL %d locked\n", id); /* * 5. If the frequency will result in a change to the voltage @@ -2124,14 +2397,14 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv, */ /* 6. Disable DPLL power in DPLL_ENABLE. */ - val = I915_READ(CNL_DPLL_ENABLE(id)); + val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id)); val &= ~PLL_POWER_ENABLE; - I915_WRITE(CNL_DPLL_ENABLE(id), val); + intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val); /* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */ if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_POWER_STATE, 5)) - DRM_ERROR("PLL %d Power not disabled\n", id); + drm_err(&dev_priv->drm, "PLL %d Power not disabled\n", id); } static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, @@ -2150,16 +2423,17 @@ static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, ret = false; - val = I915_READ(CNL_DPLL_ENABLE(id)); + val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id)); if (!(val & PLL_ENABLE)) goto out; - val = I915_READ(CNL_DPLL_CFGCR0(id)); + val = intel_de_read(dev_priv, CNL_DPLL_CFGCR0(id)); hw_state->cfgcr0 = val; /* avoid reading back stale values if HDMI mode is not enabled */ if (val & DPLL_CFGCR0_HDMI_MODE) { - hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(id)); + hw_state->cfgcr1 = intel_de_read(dev_priv, + CNL_DPLL_CFGCR1(id)); } ret = true; @@ -2256,27 +2530,12 @@ static void cnl_wrpll_params_populate(struct skl_wrpll_params *params, params->dco_fraction = dco & 0x7fff; } -int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv) -{ - int ref_clock = dev_priv->cdclk.hw.ref; - - /* - * For ICL+, the spec states: if reference frequency is 38.4, - * use 19.2 because the DPLL automatically divides that by 2. - */ - if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400) - ref_clock = 19200; - - return ref_clock; -} - static bool -cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state, - struct skl_wrpll_params *wrpll_params) +__cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state, + struct skl_wrpll_params *wrpll_params, + int ref_clock) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); u32 afe_clock = crtc_state->port_clock * 5; - u32 ref_clock; u32 dco_min = 7998000; u32 dco_max = 10000000; u32 dco_mid = (dco_min + dco_max) / 2; @@ -2308,15 +2567,22 @@ cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state, return false; cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv); - - ref_clock = cnl_hdmi_pll_ref_clock(dev_priv); - cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock, pdiv, qdiv, kdiv); return true; } +static bool +cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state, + struct skl_wrpll_params *wrpll_params) +{ + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + + return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params, + i915->dpll.ref_clks.nssc); +} + static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) { u32 cfgcr0, cfgcr1; @@ -2344,6 +2610,68 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) return true; } +static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv, + const struct intel_shared_dpll *pll, + int ref_clock) +{ + const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state; + u32 p0, p1, p2, dco_freq; + + p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK; + p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK; + + if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) + p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >> + DPLL_CFGCR1_QDIV_RATIO_SHIFT; + else + p1 = 1; + + + switch (p0) { + case DPLL_CFGCR1_PDIV_2: + p0 = 2; + break; + case DPLL_CFGCR1_PDIV_3: + p0 = 3; + break; + case DPLL_CFGCR1_PDIV_5: + p0 = 5; + break; + case DPLL_CFGCR1_PDIV_7: + p0 = 7; + break; + } + + switch (p2) { + case DPLL_CFGCR1_KDIV_1: + p2 = 1; + break; + case DPLL_CFGCR1_KDIV_2: + p2 = 2; + break; + case DPLL_CFGCR1_KDIV_3: + p2 = 3; + break; + } + + dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) * + ref_clock; + + dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >> + DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000; + + if (drm_WARN_ON(&dev_priv->drm, p0 == 0 || p1 == 0 || p2 == 0)) + return 0; + + return dco_freq / (p0 * p1 * p2 * 5); +} + +static int cnl_ddi_wrpll_get_freq(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll) +{ + return __cnl_ddi_wrpll_get_freq(i915, pll, i915->dpll.ref_clks.nssc); +} + static bool cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) { @@ -2389,30 +2717,72 @@ cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) return true; } +static int cnl_ddi_lcpll_get_freq(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll) +{ + int link_clock = 0; + + switch (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK) { + case DPLL_CFGCR0_LINK_RATE_810: + link_clock = 81000; + break; + case DPLL_CFGCR0_LINK_RATE_1080: + link_clock = 108000; + break; + case DPLL_CFGCR0_LINK_RATE_1350: + link_clock = 135000; + break; + case DPLL_CFGCR0_LINK_RATE_1620: + link_clock = 162000; + break; + case DPLL_CFGCR0_LINK_RATE_2160: + link_clock = 216000; + break; + case DPLL_CFGCR0_LINK_RATE_2700: + link_clock = 270000; + break; + case DPLL_CFGCR0_LINK_RATE_3240: + link_clock = 324000; + break; + case DPLL_CFGCR0_LINK_RATE_4050: + link_clock = 405000; + break; + default: + drm_WARN(&i915->drm, 1, "Unsupported link rate\n"); + break; + } + + return link_clock * 2; +} + static bool cnl_get_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); struct intel_shared_dpll *pll; bool bret; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { bret = cnl_ddi_hdmi_pll_dividers(crtc_state); if (!bret) { - DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n"); + drm_dbg_kms(&i915->drm, + "Could not get HDMI pll dividers.\n"); return false; } } else if (intel_crtc_has_dp_encoder(crtc_state)) { bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state); if (!bret) { - DRM_DEBUG_KMS("Could not set DP dpll HW state.\n"); + drm_dbg_kms(&i915->drm, + "Could not set DP dpll HW state.\n"); return false; } } else { - DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n", - crtc_state->output_types); + drm_dbg_kms(&i915->drm, + "Skip DPLL setup for output_types 0x%x\n", + crtc_state->output_types); return false; } @@ -2422,7 +2792,7 @@ static bool cnl_get_dpll(struct intel_atomic_state *state, BIT(DPLL_ID_SKL_DPLL1) | BIT(DPLL_ID_SKL_DPLL0)); if (!pll) { - DRM_DEBUG_KMS("No PLL selected\n"); + drm_dbg_kms(&i915->drm, "No PLL selected\n"); return false; } @@ -2434,19 +2804,35 @@ static bool cnl_get_dpll(struct intel_atomic_state *state, return true; } +static int cnl_ddi_pll_get_freq(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll) +{ + if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) + return cnl_ddi_wrpll_get_freq(i915, pll); + else + return cnl_ddi_lcpll_get_freq(i915, pll); +} + +static void cnl_update_dpll_ref_clks(struct drm_i915_private *i915) +{ + /* No SSC reference */ + i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref; +} + static void cnl_dump_hw_state(struct drm_i915_private *dev_priv, const struct intel_dpll_hw_state *hw_state) { - DRM_DEBUG_KMS("dpll_hw_state: " - "cfgcr0: 0x%x, cfgcr1: 0x%x\n", - hw_state->cfgcr0, - hw_state->cfgcr1); + drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: " + "cfgcr0: 0x%x, cfgcr1: 0x%x\n", + hw_state->cfgcr0, + hw_state->cfgcr1); } static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = { .enable = cnl_ddi_pll_enable, .disable = cnl_ddi_pll_disable, .get_hw_state = cnl_ddi_pll_get_hw_state, + .get_freq = cnl_ddi_pll_get_freq, }; static const struct dpll_info cnl_plls[] = { @@ -2460,6 +2846,7 @@ static const struct intel_dpll_mgr cnl_pll_mgr = { .dpll_info = cnl_plls, .get_dplls = cnl_get_dpll, .put_dplls = intel_put_dpll, + .update_ref_clks = cnl_update_dpll_ref_clks, .dump_hw_state = cnl_dump_hw_state, }; @@ -2555,7 +2942,7 @@ static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state, { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); const struct icl_combo_pll_params *params = - dev_priv->cdclk.hw.ref == 24000 ? + dev_priv->dpll.ref_clks.nssc == 24000 ? icl_dp_combo_pll_24MHz_values : icl_dp_combo_pll_19_2MHz_values; int clock = crtc_state->port_clock; @@ -2578,9 +2965,9 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); if (INTEL_GEN(dev_priv) >= 12) { - switch (dev_priv->cdclk.hw.ref) { + switch (dev_priv->dpll.ref_clks.nssc) { default: - MISSING_CASE(dev_priv->cdclk.hw.ref); + MISSING_CASE(dev_priv->dpll.ref_clks.nssc); /* fall-through */ case 19200: case 38400: @@ -2591,9 +2978,9 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, break; } } else { - switch (dev_priv->cdclk.hw.ref) { + switch (dev_priv->dpll.ref_clks.nssc) { default: - MISSING_CASE(dev_priv->cdclk.hw.ref); + MISSING_CASE(dev_priv->dpll.ref_clks.nssc); /* fall-through */ case 19200: case 38400: @@ -2608,6 +2995,49 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, return true; } +static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll) +{ + /* + * The PLL outputs multiple frequencies at the same time, selection is + * made at DDI clock mux level. + */ + drm_WARN_ON(&i915->drm, 1); + + return 0; +} + +static int icl_wrpll_ref_clock(struct drm_i915_private *i915) +{ + int ref_clock = i915->dpll.ref_clks.nssc; + + /* + * For ICL+, the spec states: if reference frequency is 38.4, + * use 19.2 because the DPLL automatically divides that by 2. + */ + if (ref_clock == 38400) + ref_clock = 19200; + + return ref_clock; +} + +static bool +icl_calc_wrpll(struct intel_crtc_state *crtc_state, + struct skl_wrpll_params *wrpll_params) +{ + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); + + return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params, + icl_wrpll_ref_clock(i915)); +} + +static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll) +{ + return __cnl_ddi_wrpll_get_freq(i915, pll, + icl_wrpll_ref_clock(i915)); +} + static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder, struct intel_dpll_hw_state *pll_state) @@ -2622,7 +3052,7 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state, ret = icl_calc_tbt_pll(crtc_state, &pll_params); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) - ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params); + ret = icl_calc_wrpll(crtc_state, &pll_params); else ret = icl_calc_dp_combo_pll(crtc_state, &pll_params); @@ -2745,7 +3175,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, struct intel_dpll_hw_state *pll_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - int refclk_khz = dev_priv->cdclk.hw.ref; + int refclk_khz = dev_priv->dpll.ref_clks.nssc; int clock = crtc_state->port_clock; u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac; u32 iref_ndiv, iref_trim, iref_pulse_w; @@ -2761,7 +3191,8 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz, pll_state, is_dkl)) { - DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock); + drm_dbg_kms(&dev_priv->drm, + "Failed to find divisors for clock %d\n", clock); return false; } @@ -2774,8 +3205,9 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, } if (m2div_int > 255) { - DRM_DEBUG_KMS("Failed to find mdiv for clock %d\n", - clock); + drm_dbg_kms(&dev_priv->drm, + "Failed to find mdiv for clock %d\n", + clock); return false; } } @@ -2944,6 +3376,78 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, return true; } +static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv, + const struct intel_shared_dpll *pll) +{ + const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state; + u32 m1, m2_int, m2_frac, div1, div2, ref_clock; + u64 tmp; + + ref_clock = dev_priv->dpll.ref_clks.nssc; + + if (INTEL_GEN(dev_priv) >= 12) { + m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK; + m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT; + m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK; + + if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) { + m2_frac = pll_state->mg_pll_bias & + DKL_PLL_BIAS_FBDIV_FRAC_MASK; + m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT; + } else { + m2_frac = 0; + } + } else { + m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK; + m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK; + + if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) { + m2_frac = pll_state->mg_pll_div0 & + MG_PLL_DIV0_FBDIV_FRAC_MASK; + m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT; + } else { + m2_frac = 0; + } + } + + switch (pll_state->mg_clktop2_hsclkctl & + MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) { + case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2: + div1 = 2; + break; + case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3: + div1 = 3; + break; + case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5: + div1 = 5; + break; + case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7: + div1 = 7; + break; + default: + MISSING_CASE(pll_state->mg_clktop2_hsclkctl); + return 0; + } + + div2 = (pll_state->mg_clktop2_hsclkctl & + MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >> + MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT; + + /* div2 value of 0 is same as 1 means no div */ + if (div2 == 0) + div2 = 1; + + /* + * Adjust the original formula to delay the division by 2^22 in order to + * minimize possible rounding errors. + */ + tmp = (u64)m1 * m2_int * ref_clock + + (((u64)m1 * m2_frac * ref_clock) >> 22); + tmp = div_u64(tmp, 5 * div1 * div2); + + return tmp; +} + /** * icl_set_active_port_dpll - select the active port DPLL for a given CRTC * @crtc_state: state for the CRTC to select the DPLL for @@ -2996,7 +3500,8 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state, unsigned long dpll_mask; if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) { - DRM_DEBUG_KMS("Could not calculate combo PHY PLL state.\n"); + drm_dbg_kms(&dev_priv->drm, + "Could not calculate combo PHY PLL state.\n"); return false; } @@ -3013,8 +3518,9 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state, &port_dpll->hw_state, dpll_mask); if (!port_dpll->pll) { - DRM_DEBUG_KMS("No combo PHY PLL found for [ENCODER:%d:%s]\n", - encoder->base.base.id, encoder->base.name); + drm_dbg_kms(&dev_priv->drm, + "No combo PHY PLL found for [ENCODER:%d:%s]\n", + encoder->base.base.id, encoder->base.name); return false; } @@ -3038,7 +3544,8 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state, port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT]; if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) { - DRM_DEBUG_KMS("Could not calculate TBT PLL state.\n"); + drm_dbg_kms(&dev_priv->drm, + "Could not calculate TBT PLL state.\n"); return false; } @@ -3046,7 +3553,7 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state, &port_dpll->hw_state, BIT(DPLL_ID_ICL_TBTPLL)); if (!port_dpll->pll) { - DRM_DEBUG_KMS("No TBT-ALT PLL found\n"); + drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n"); return false; } intel_reference_shared_dpll(state, crtc, @@ -3055,7 +3562,8 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state, port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY]; if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) { - DRM_DEBUG_KMS("Could not calculate MG PHY PLL state.\n"); + drm_dbg_kms(&dev_priv->drm, + "Could not calculate MG PHY PLL state.\n"); goto err_unreference_tbt_pll; } @@ -3065,7 +3573,7 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state, &port_dpll->hw_state, BIT(dpll_id)); if (!port_dpll->pll) { - DRM_DEBUG_KMS("No MG PHY PLL found\n"); + drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n"); goto err_unreference_tbt_pll; } intel_reference_shared_dpll(state, crtc, @@ -3140,37 +3648,39 @@ static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv, if (!wakeref) return false; - val = I915_READ(MG_PLL_ENABLE(tc_port)); + val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port)); if (!(val & PLL_ENABLE)) goto out; - hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port)); + hw_state->mg_refclkin_ctl = intel_de_read(dev_priv, + MG_REFCLKIN_CTL(tc_port)); hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK; hw_state->mg_clktop2_coreclkctl1 = - I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port)); + intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port)); hw_state->mg_clktop2_coreclkctl1 &= MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; hw_state->mg_clktop2_hsclkctl = - I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port)); + intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port)); hw_state->mg_clktop2_hsclkctl &= MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK; - hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port)); - hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port)); - hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port)); - hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port)); - hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port)); + hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port)); + hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port)); + hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port)); + hw_state->mg_pll_frac_lock = intel_de_read(dev_priv, + MG_PLL_FRAC_LOCK(tc_port)); + hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port)); - hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port)); + hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port)); hw_state->mg_pll_tdc_coldst_bias = - I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port)); + intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port)); - if (dev_priv->cdclk.hw.ref == 38400) { + if (dev_priv->dpll.ref_clks.nssc == 38400) { hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART; hw_state->mg_pll_bias_mask = 0; } else { @@ -3202,7 +3712,7 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv, if (!wakeref) return false; - val = I915_READ(MG_PLL_ENABLE(tc_port)); + val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port)); if (!(val & PLL_ENABLE)) goto out; @@ -3210,13 +3720,15 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv, * All registers read here have the same HIP_INDEX_REG even though * they are on different building blocks */ - I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2)); + intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), + HIP_INDEX_VAL(tc_port, 0x2)); - hw_state->mg_refclkin_ctl = I915_READ(DKL_REFCLKIN_CTL(tc_port)); + hw_state->mg_refclkin_ctl = intel_de_read(dev_priv, + DKL_REFCLKIN_CTL(tc_port)); hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK; hw_state->mg_clktop2_hsclkctl = - I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port)); + intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port)); hw_state->mg_clktop2_hsclkctl &= MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | @@ -3224,32 +3736,32 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv, MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK; hw_state->mg_clktop2_coreclkctl1 = - I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port)); + intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port)); hw_state->mg_clktop2_coreclkctl1 &= MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; - hw_state->mg_pll_div0 = I915_READ(DKL_PLL_DIV0(tc_port)); + hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port)); hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK | DKL_PLL_DIV0_PROP_COEFF_MASK | DKL_PLL_DIV0_FBPREDIV_MASK | DKL_PLL_DIV0_FBDIV_INT_MASK); - hw_state->mg_pll_div1 = I915_READ(DKL_PLL_DIV1(tc_port)); + hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port)); hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK | DKL_PLL_DIV1_TDC_TARGET_CNT_MASK); - hw_state->mg_pll_ssc = I915_READ(DKL_PLL_SSC(tc_port)); + hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port)); hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK | DKL_PLL_SSC_STEP_LEN_MASK | DKL_PLL_SSC_STEP_NUM_MASK | DKL_PLL_SSC_EN); - hw_state->mg_pll_bias = I915_READ(DKL_PLL_BIAS(tc_port)); + hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port)); hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H | DKL_PLL_BIAS_FBDIV_FRAC_MASK); hw_state->mg_pll_tdc_coldst_bias = - I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port)); + intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port)); hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK | DKL_PLL_TDC_FEED_FWD_GAIN_MASK); @@ -3274,20 +3786,26 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, if (!wakeref) return false; - val = I915_READ(enable_reg); + val = intel_de_read(dev_priv, enable_reg); if (!(val & PLL_ENABLE)) goto out; if (INTEL_GEN(dev_priv) >= 12) { - hw_state->cfgcr0 = I915_READ(TGL_DPLL_CFGCR0(id)); - hw_state->cfgcr1 = I915_READ(TGL_DPLL_CFGCR1(id)); + hw_state->cfgcr0 = intel_de_read(dev_priv, + TGL_DPLL_CFGCR0(id)); + hw_state->cfgcr1 = intel_de_read(dev_priv, + TGL_DPLL_CFGCR1(id)); } else { if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) { - hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(4)); - hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(4)); + hw_state->cfgcr0 = intel_de_read(dev_priv, + ICL_DPLL_CFGCR0(4)); + hw_state->cfgcr1 = intel_de_read(dev_priv, + ICL_DPLL_CFGCR1(4)); } else { - hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id)); - hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id)); + hw_state->cfgcr0 = intel_de_read(dev_priv, + ICL_DPLL_CFGCR0(id)); + hw_state->cfgcr1 = intel_de_read(dev_priv, + ICL_DPLL_CFGCR1(id)); } } @@ -3338,9 +3856,9 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv, } } - I915_WRITE(cfgcr0_reg, hw_state->cfgcr0); - I915_WRITE(cfgcr1_reg, hw_state->cfgcr1); - POSTING_READ(cfgcr1_reg); + intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0); + intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1); + intel_de_posting_read(dev_priv, cfgcr1_reg); } static void icl_mg_pll_write(struct drm_i915_private *dev_priv, @@ -3356,41 +3874,42 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv, * during the calc/readout phase if the mask depends on some other HW * state like refclk, see icl_calc_mg_pll_state(). */ - val = I915_READ(MG_REFCLKIN_CTL(tc_port)); + val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port)); val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK; val |= hw_state->mg_refclkin_ctl; - I915_WRITE(MG_REFCLKIN_CTL(tc_port), val); + intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val); - val = I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port)); + val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port)); val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; val |= hw_state->mg_clktop2_coreclkctl1; - I915_WRITE(MG_CLKTOP2_CORECLKCTL1(tc_port), val); + intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val); - val = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port)); + val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port)); val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK); val |= hw_state->mg_clktop2_hsclkctl; - I915_WRITE(MG_CLKTOP2_HSCLKCTL(tc_port), val); + intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val); - I915_WRITE(MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0); - I915_WRITE(MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1); - I915_WRITE(MG_PLL_LF(tc_port), hw_state->mg_pll_lf); - I915_WRITE(MG_PLL_FRAC_LOCK(tc_port), hw_state->mg_pll_frac_lock); - I915_WRITE(MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc); + intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0); + intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1); + intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf); + intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port), + hw_state->mg_pll_frac_lock); + intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc); - val = I915_READ(MG_PLL_BIAS(tc_port)); + val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port)); val &= ~hw_state->mg_pll_bias_mask; val |= hw_state->mg_pll_bias; - I915_WRITE(MG_PLL_BIAS(tc_port), val); + intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val); - val = I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port)); + val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port)); val &= ~hw_state->mg_pll_tdc_coldst_bias_mask; val |= hw_state->mg_pll_tdc_coldst_bias; - I915_WRITE(MG_PLL_TDC_COLDST_BIAS(tc_port), val); + intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val); - POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port)); + intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port)); } static void dkl_pll_write(struct drm_i915_private *dev_priv, @@ -3404,62 +3923,63 @@ static void dkl_pll_write(struct drm_i915_private *dev_priv, * All registers programmed here have the same HIP_INDEX_REG even * though on different building block */ - I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2)); + intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), + HIP_INDEX_VAL(tc_port, 0x2)); /* All the registers are RMW */ - val = I915_READ(DKL_REFCLKIN_CTL(tc_port)); + val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port)); val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK; val |= hw_state->mg_refclkin_ctl; - I915_WRITE(DKL_REFCLKIN_CTL(tc_port), val); + intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val); - val = I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port)); + val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port)); val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; val |= hw_state->mg_clktop2_coreclkctl1; - I915_WRITE(DKL_CLKTOP2_CORECLKCTL1(tc_port), val); + intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val); - val = I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port)); + val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port)); val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK); val |= hw_state->mg_clktop2_hsclkctl; - I915_WRITE(DKL_CLKTOP2_HSCLKCTL(tc_port), val); + intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val); - val = I915_READ(DKL_PLL_DIV0(tc_port)); + val = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port)); val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK | DKL_PLL_DIV0_PROP_COEFF_MASK | DKL_PLL_DIV0_FBPREDIV_MASK | DKL_PLL_DIV0_FBDIV_INT_MASK); val |= hw_state->mg_pll_div0; - I915_WRITE(DKL_PLL_DIV0(tc_port), val); + intel_de_write(dev_priv, DKL_PLL_DIV0(tc_port), val); - val = I915_READ(DKL_PLL_DIV1(tc_port)); + val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port)); val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK | DKL_PLL_DIV1_TDC_TARGET_CNT_MASK); val |= hw_state->mg_pll_div1; - I915_WRITE(DKL_PLL_DIV1(tc_port), val); + intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val); - val = I915_READ(DKL_PLL_SSC(tc_port)); + val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port)); val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK | DKL_PLL_SSC_STEP_LEN_MASK | DKL_PLL_SSC_STEP_NUM_MASK | DKL_PLL_SSC_EN); val |= hw_state->mg_pll_ssc; - I915_WRITE(DKL_PLL_SSC(tc_port), val); + intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val); - val = I915_READ(DKL_PLL_BIAS(tc_port)); + val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port)); val &= ~(DKL_PLL_BIAS_FRAC_EN_H | DKL_PLL_BIAS_FBDIV_FRAC_MASK); val |= hw_state->mg_pll_bias; - I915_WRITE(DKL_PLL_BIAS(tc_port), val); + intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val); - val = I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port)); + val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port)); val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK | DKL_PLL_TDC_FEED_FWD_GAIN_MASK); val |= hw_state->mg_pll_tdc_coldst_bias; - I915_WRITE(DKL_PLL_TDC_COLDST_BIAS(tc_port), val); + intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val); - POSTING_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port)); + intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port)); } static void icl_pll_power_enable(struct drm_i915_private *dev_priv, @@ -3468,16 +3988,17 @@ static void icl_pll_power_enable(struct drm_i915_private *dev_priv, { u32 val; - val = I915_READ(enable_reg); + val = intel_de_read(dev_priv, enable_reg); val |= PLL_POWER_ENABLE; - I915_WRITE(enable_reg, val); + intel_de_write(dev_priv, enable_reg, val); /* * The spec says we need to "wait" but it also says it should be * immediate. */ if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1)) - DRM_ERROR("PLL %d Power not enabled\n", pll->info->id); + drm_err(&dev_priv->drm, "PLL %d Power not enabled\n", + pll->info->id); } static void icl_pll_enable(struct drm_i915_private *dev_priv, @@ -3486,13 +4007,13 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv, { u32 val; - val = I915_READ(enable_reg); + val = intel_de_read(dev_priv, enable_reg); val |= PLL_ENABLE; - I915_WRITE(enable_reg, val); + intel_de_write(dev_priv, enable_reg, val); /* Timeout is actually 600us. */ if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1)) - DRM_ERROR("PLL %d not locked\n", pll->info->id); + drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id); } static void combo_pll_enable(struct drm_i915_private *dev_priv, @@ -3584,26 +4105,27 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv, * nothign here. */ - val = I915_READ(enable_reg); + val = intel_de_read(dev_priv, enable_reg); val &= ~PLL_ENABLE; - I915_WRITE(enable_reg, val); + intel_de_write(dev_priv, enable_reg, val); /* Timeout is actually 1us. */ if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1)) - DRM_ERROR("PLL %d locked\n", pll->info->id); + drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id); /* DVFS post sequence would be here. See the comment above. */ - val = I915_READ(enable_reg); + val = intel_de_read(dev_priv, enable_reg); val &= ~PLL_POWER_ENABLE; - I915_WRITE(enable_reg, val); + intel_de_write(dev_priv, enable_reg, val); /* * The spec says we need to "wait" but it also says it should be * immediate. */ if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1)) - DRM_ERROR("PLL %d Power not disabled\n", pll->info->id); + drm_err(&dev_priv->drm, "PLL %d Power not disabled\n", + pll->info->id); } static void combo_pll_disable(struct drm_i915_private *dev_priv, @@ -3639,44 +4161,54 @@ static void mg_pll_disable(struct drm_i915_private *dev_priv, icl_pll_disable(dev_priv, pll, enable_reg); } +static void icl_update_dpll_ref_clks(struct drm_i915_private *i915) +{ + /* No SSC ref */ + i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref; +} + static void icl_dump_hw_state(struct drm_i915_private *dev_priv, const struct intel_dpll_hw_state *hw_state) { - DRM_DEBUG_KMS("dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, " - "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, " - "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, " - "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, " - "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, " - "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n", - hw_state->cfgcr0, hw_state->cfgcr1, - hw_state->mg_refclkin_ctl, - hw_state->mg_clktop2_coreclkctl1, - hw_state->mg_clktop2_hsclkctl, - hw_state->mg_pll_div0, - hw_state->mg_pll_div1, - hw_state->mg_pll_lf, - hw_state->mg_pll_frac_lock, - hw_state->mg_pll_ssc, - hw_state->mg_pll_bias, - hw_state->mg_pll_tdc_coldst_bias); + drm_dbg_kms(&dev_priv->drm, + "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, " + "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, " + "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, " + "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, " + "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, " + "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n", + hw_state->cfgcr0, hw_state->cfgcr1, + hw_state->mg_refclkin_ctl, + hw_state->mg_clktop2_coreclkctl1, + hw_state->mg_clktop2_hsclkctl, + hw_state->mg_pll_div0, + hw_state->mg_pll_div1, + hw_state->mg_pll_lf, + hw_state->mg_pll_frac_lock, + hw_state->mg_pll_ssc, + hw_state->mg_pll_bias, + hw_state->mg_pll_tdc_coldst_bias); } static const struct intel_shared_dpll_funcs combo_pll_funcs = { .enable = combo_pll_enable, .disable = combo_pll_disable, .get_hw_state = combo_pll_get_hw_state, + .get_freq = icl_ddi_combo_pll_get_freq, }; static const struct intel_shared_dpll_funcs tbt_pll_funcs = { .enable = tbt_pll_enable, .disable = tbt_pll_disable, .get_hw_state = tbt_pll_get_hw_state, + .get_freq = icl_ddi_tbt_pll_get_freq, }; static const struct intel_shared_dpll_funcs mg_pll_funcs = { .enable = mg_pll_enable, .disable = mg_pll_disable, .get_hw_state = mg_pll_get_hw_state, + .get_freq = icl_ddi_mg_pll_get_freq, }; static const struct dpll_info icl_plls[] = { @@ -3695,6 +4227,7 @@ static const struct intel_dpll_mgr icl_pll_mgr = { .get_dplls = icl_get_dplls, .put_dplls = icl_put_dplls, .update_active_dpll = icl_update_active_dpll, + .update_ref_clks = icl_update_dpll_ref_clks, .dump_hw_state = icl_dump_hw_state, }; @@ -3709,6 +4242,7 @@ static const struct intel_dpll_mgr ehl_pll_mgr = { .dpll_info = ehl_plls, .get_dplls = icl_get_dplls, .put_dplls = icl_put_dplls, + .update_ref_clks = icl_update_dpll_ref_clks, .dump_hw_state = icl_dump_hw_state, }; @@ -3716,6 +4250,7 @@ static const struct intel_shared_dpll_funcs dkl_pll_funcs = { .enable = mg_pll_enable, .disable = mg_pll_disable, .get_hw_state = dkl_pll_get_hw_state, + .get_freq = icl_ddi_mg_pll_get_freq, }; static const struct dpll_info tgl_plls[] = { @@ -3736,6 +4271,7 @@ static const struct intel_dpll_mgr tgl_pll_mgr = { .get_dplls = icl_get_dplls, .put_dplls = icl_put_dplls, .update_active_dpll = icl_update_active_dpll, + .update_ref_clks = icl_update_dpll_ref_clks, .dump_hw_state = icl_dump_hw_state, }; @@ -3770,22 +4306,22 @@ void intel_shared_dpll_init(struct drm_device *dev) dpll_mgr = &pch_pll_mgr; if (!dpll_mgr) { - dev_priv->num_shared_dpll = 0; + dev_priv->dpll.num_shared_dpll = 0; return; } dpll_info = dpll_mgr->dpll_info; for (i = 0; dpll_info[i].name; i++) { - WARN_ON(i != dpll_info[i].id); - dev_priv->shared_dplls[i].info = &dpll_info[i]; + drm_WARN_ON(dev, i != dpll_info[i].id); + dev_priv->dpll.shared_dplls[i].info = &dpll_info[i]; } - dev_priv->dpll_mgr = dpll_mgr; - dev_priv->num_shared_dpll = i; - mutex_init(&dev_priv->dpll_lock); + dev_priv->dpll.mgr = dpll_mgr; + dev_priv->dpll.num_shared_dpll = i; + mutex_init(&dev_priv->dpll.lock); - BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); + BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS); } /** @@ -3812,9 +4348,9 @@ bool intel_reserve_shared_dplls(struct intel_atomic_state *state, struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr; + const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr; - if (WARN_ON(!dpll_mgr)) + if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr)) return false; return dpll_mgr->get_dplls(state, crtc, encoder); @@ -3835,7 +4371,7 @@ void intel_release_shared_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr; + const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr; /* * FIXME: this function is called for every platform having a @@ -3864,35 +4400,114 @@ void intel_update_active_dpll(struct intel_atomic_state *state, struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr; + const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr; - if (WARN_ON(!dpll_mgr)) + if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr)) return; dpll_mgr->update_active_dpll(state, crtc, encoder); } /** + * intel_dpll_get_freq - calculate the DPLL's output frequency + * @i915: i915 device + * @pll: DPLL for which to calculate the output frequency + * + * Return the output frequency corresponding to @pll's current state. + */ +int intel_dpll_get_freq(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll) +{ + if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq)) + return 0; + + return pll->info->funcs->get_freq(i915, pll); +} + +static void readout_dpll_hw_state(struct drm_i915_private *i915, + struct intel_shared_dpll *pll) +{ + struct intel_crtc *crtc; + + pll->on = pll->info->funcs->get_hw_state(i915, pll, + &pll->state.hw_state); + + if (IS_ELKHARTLAKE(i915) && pll->on && + pll->info->id == DPLL_ID_EHL_DPLL4) { + pll->wakeref = intel_display_power_get(i915, + POWER_DOMAIN_DPLL_DC_OFF); + } + + pll->state.crtc_mask = 0; + for_each_intel_crtc(&i915->drm, crtc) { + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + + if (crtc_state->hw.active && crtc_state->shared_dpll == pll) + pll->state.crtc_mask |= 1 << crtc->pipe; + } + pll->active_mask = pll->state.crtc_mask; + + drm_dbg_kms(&i915->drm, + "%s hw state readout: crtc_mask 0x%08x, on %i\n", + pll->info->name, pll->state.crtc_mask, pll->on); +} + +void intel_dpll_readout_hw_state(struct drm_i915_private *i915) +{ + int i; + + if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks) + i915->dpll.mgr->update_ref_clks(i915); + + for (i = 0; i < i915->dpll.num_shared_dpll; i++) + readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]); +} + +static void sanitize_dpll_state(struct drm_i915_private *i915, + struct intel_shared_dpll *pll) +{ + if (!pll->on || pll->active_mask) + return; + + drm_dbg_kms(&i915->drm, + "%s enabled but not in use, disabling\n", + pll->info->name); + + pll->info->funcs->disable(i915, pll); + pll->on = false; +} + +void intel_dpll_sanitize_state(struct drm_i915_private *i915) +{ + int i; + + for (i = 0; i < i915->dpll.num_shared_dpll; i++) + sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]); +} + +/** * intel_shared_dpll_dump_hw_state - write hw_state to dmesg * @dev_priv: i915 drm device * @hw_state: hw state to be written to the log * - * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS. + * Write the relevant values in @hw_state to dmesg using drm_dbg_kms. */ void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv, const struct intel_dpll_hw_state *hw_state) { - if (dev_priv->dpll_mgr) { - dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state); + if (dev_priv->dpll.mgr) { + dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state); } else { /* fallback for platforms that don't use the shared dpll * infrastructure */ - DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " - "fp0: 0x%x, fp1: 0x%x\n", - hw_state->dpll, - hw_state->dpll_md, - hw_state->fp0, - hw_state->fp1); + drm_dbg_kms(&dev_priv->drm, + "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " + "fp0: 0x%x, fp1: 0x%x\n", + hw_state->dpll, + hw_state->dpll_md, + hw_state->fp0, + hw_state->fp1); } } diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index 2a104c64291d..5d9a2bc371e7 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -278,6 +278,15 @@ struct intel_shared_dpll_funcs { bool (*get_hw_state)(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state); + + /** + * @get_freq: + * + * Hook for calculating the pll's output frequency based on its + * current state. + */ + int (*get_freq)(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll); }; /** @@ -372,15 +381,18 @@ void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state, void intel_update_active_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder); +int intel_dpll_get_freq(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll); void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_shared_dpll_swap_state(struct intel_atomic_state *state); void intel_shared_dpll_init(struct drm_device *dev); +void intel_dpll_readout_hw_state(struct drm_i915_private *dev_priv); +void intel_dpll_sanitize_state(struct drm_i915_private *dev_priv); void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv, const struct intel_dpll_hw_state *hw_state); -int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv); enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port); bool intel_dpll_is_combophy(enum intel_dpll_id id); diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index ada006a690df..d7a6bf2277df 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -40,7 +40,7 @@ static inline bool is_dsb_busy(struct intel_dsb *dsb) struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - return DSB_STATUS & I915_READ(DSB_CTRL(pipe, dsb->id)); + return DSB_STATUS & intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id)); } static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb) @@ -50,16 +50,16 @@ static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb) enum pipe pipe = crtc->pipe; u32 dsb_ctrl; - dsb_ctrl = I915_READ(DSB_CTRL(pipe, dsb->id)); + dsb_ctrl = intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id)); if (DSB_STATUS & dsb_ctrl) { - DRM_DEBUG_KMS("DSB engine is busy.\n"); + drm_dbg_kms(&dev_priv->drm, "DSB engine is busy.\n"); return false; } dsb_ctrl |= DSB_ENABLE; - I915_WRITE(DSB_CTRL(pipe, dsb->id), dsb_ctrl); + intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), dsb_ctrl); - POSTING_READ(DSB_CTRL(pipe, dsb->id)); + intel_de_posting_read(dev_priv, DSB_CTRL(pipe, dsb->id)); return true; } @@ -70,16 +70,16 @@ static inline bool intel_dsb_disable_engine(struct intel_dsb *dsb) enum pipe pipe = crtc->pipe; u32 dsb_ctrl; - dsb_ctrl = I915_READ(DSB_CTRL(pipe, dsb->id)); + dsb_ctrl = intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id)); if (DSB_STATUS & dsb_ctrl) { - DRM_DEBUG_KMS("DSB engine is busy.\n"); + drm_dbg_kms(&dev_priv->drm, "DSB engine is busy.\n"); return false; } dsb_ctrl &= ~DSB_ENABLE; - I915_WRITE(DSB_CTRL(pipe, dsb->id), dsb_ctrl); + intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), dsb_ctrl); - POSTING_READ(DSB_CTRL(pipe, dsb->id)); + intel_de_posting_read(dev_priv, DSB_CTRL(pipe, dsb->id)); return true; } @@ -115,20 +115,20 @@ intel_dsb_get(struct intel_crtc *crtc) obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE); if (IS_ERR(obj)) { - DRM_ERROR("Gem object creation failed\n"); + drm_err(&i915->drm, "Gem object creation failed\n"); goto out; } vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); if (IS_ERR(vma)) { - DRM_ERROR("Vma creation failed\n"); + drm_err(&i915->drm, "Vma creation failed\n"); i915_gem_object_put(obj); goto out; } buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC); if (IS_ERR(buf)) { - DRM_ERROR("Command buffer creation failed\n"); + drm_err(&i915->drm, "Command buffer creation failed\n"); goto out; } @@ -165,7 +165,7 @@ void intel_dsb_put(struct intel_dsb *dsb) if (!HAS_DSB(i915)) return; - if (WARN_ON(dsb->refcount == 0)) + if (drm_WARN_ON(&i915->drm, dsb->refcount == 0)) return; if (--dsb->refcount == 0) { @@ -198,12 +198,12 @@ void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 reg_val; if (!buf) { - I915_WRITE(reg, val); + intel_de_write(dev_priv, reg, val); return; } - if (WARN_ON(dsb->free_pos >= DSB_BUF_SIZE)) { - DRM_DEBUG_KMS("DSB buffer overflow\n"); + if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) { + drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n"); return; } @@ -272,12 +272,12 @@ void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val) u32 *buf = dsb->cmd_buf; if (!buf) { - I915_WRITE(reg, val); + intel_de_write(dev_priv, reg, val); return; } - if (WARN_ON(dsb->free_pos >= DSB_BUF_SIZE)) { - DRM_DEBUG_KMS("DSB buffer overflow\n"); + if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) { + drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n"); return; } @@ -310,10 +310,12 @@ void intel_dsb_commit(struct intel_dsb *dsb) goto reset; if (is_dsb_busy(dsb)) { - DRM_ERROR("HEAD_PTR write failed - dsb engine is busy.\n"); + drm_err(&dev_priv->drm, + "HEAD_PTR write failed - dsb engine is busy.\n"); goto reset; } - I915_WRITE(DSB_HEAD(pipe, dsb->id), i915_ggtt_offset(dsb->vma)); + intel_de_write(dev_priv, DSB_HEAD(pipe, dsb->id), + i915_ggtt_offset(dsb->vma)); tail = ALIGN(dsb->free_pos * 4, CACHELINE_BYTES); if (tail > dsb->free_pos * 4) @@ -321,14 +323,18 @@ void intel_dsb_commit(struct intel_dsb *dsb) (tail - dsb->free_pos * 4)); if (is_dsb_busy(dsb)) { - DRM_ERROR("TAIL_PTR write failed - dsb engine is busy.\n"); + drm_err(&dev_priv->drm, + "TAIL_PTR write failed - dsb engine is busy.\n"); goto reset; } - DRM_DEBUG_KMS("DSB execution started - head 0x%x, tail 0x%x\n", - i915_ggtt_offset(dsb->vma), tail); - I915_WRITE(DSB_TAIL(pipe, dsb->id), i915_ggtt_offset(dsb->vma) + tail); + drm_dbg_kms(&dev_priv->drm, + "DSB execution started - head 0x%x, tail 0x%x\n", + i915_ggtt_offset(dsb->vma), tail); + intel_de_write(dev_priv, DSB_TAIL(pipe, dsb->id), + i915_ggtt_offset(dsb->vma) + tail); if (wait_for(!is_dsb_busy(dsb), 1)) { - DRM_ERROR("Timed out waiting for DSB workload completion.\n"); + drm_err(&dev_priv->drm, + "Timed out waiting for DSB workload completion.\n"); goto reset; } diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c index c87838843d0b..b53c50372918 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c @@ -45,7 +45,7 @@ static u32 dcs_get_backlight(struct intel_connector *connector) { - struct intel_encoder *encoder = connector->encoder; + struct intel_encoder *encoder = intel_attached_encoder(connector); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct mipi_dsi_device *dsi_device; u8 data = 0; @@ -160,13 +160,13 @@ int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector) { struct drm_device *dev = intel_connector->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_encoder *encoder = intel_connector->encoder; + struct intel_encoder *encoder = intel_attached_encoder(intel_connector); struct intel_panel *panel = &intel_connector->panel; if (dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS) return -ENODEV; - if (WARN_ON(encoder->type != INTEL_OUTPUT_DSI)) + if (drm_WARN_ON(dev, encoder->type != INTEL_OUTPUT_DSI)) return -EINVAL; panel->backlight.setup = dcs_setup_backlight; diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index 04f953ba8f00..574dcfec9577 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -36,7 +36,6 @@ #include <drm/drm_crtc.h> #include <drm/drm_edid.h> -#include <drm/i915_drm.h> #include <video/mipi_display.h> @@ -136,7 +135,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u16 len; enum port port; - DRM_DEBUG_KMS("\n"); + drm_dbg_kms(&dev_priv->drm, "\n"); flags = *data++; type = *data++; @@ -158,7 +157,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, dsi_device = intel_dsi->dsi_hosts[port]->device; if (!dsi_device) { - DRM_DEBUG_KMS("no dsi device for port %c\n", port_name(port)); + drm_dbg_kms(&dev_priv->drm, "no dsi device for port %c\n", + port_name(port)); goto out; } @@ -182,7 +182,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM: case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM: case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM: - DRM_DEBUG_DRIVER("Generic Read not yet implemented or used\n"); + drm_dbg(&dev_priv->drm, + "Generic Read not yet implemented or used\n"); break; case MIPI_DSI_GENERIC_LONG_WRITE: mipi_dsi_generic_write(dsi_device, data, len); @@ -194,7 +195,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, mipi_dsi_dcs_write_buffer(dsi_device, data, 2); break; case MIPI_DSI_DCS_READ: - DRM_DEBUG_DRIVER("DCS Read not yet implemented or used\n"); + drm_dbg(&dev_priv->drm, + "DCS Read not yet implemented or used\n"); break; case MIPI_DSI_DCS_LONG_WRITE: mipi_dsi_dcs_write_buffer(dsi_device, data, len); @@ -212,9 +214,10 @@ out: static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data) { + struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev); u32 delay = *((const u32 *) data); - DRM_DEBUG_KMS("\n"); + drm_dbg_kms(&i915->drm, "\n"); usleep_range(delay, delay + 10); data += 4; @@ -231,7 +234,8 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv, u8 port; if (gpio_index >= ARRAY_SIZE(vlv_gpio_table)) { - DRM_DEBUG_KMS("unknown gpio index %u\n", gpio_index); + drm_dbg_kms(&dev_priv->drm, "unknown gpio index %u\n", + gpio_index); return; } @@ -244,10 +248,11 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv, if (gpio_source == 0) { port = IOSF_PORT_GPIO_NC; } else if (gpio_source == 1) { - DRM_DEBUG_KMS("SC gpio not supported\n"); + drm_dbg_kms(&dev_priv->drm, "SC gpio not supported\n"); return; } else { - DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source); + drm_dbg_kms(&dev_priv->drm, + "unknown gpio source %u\n", gpio_source); return; } } @@ -291,13 +296,15 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv, } else { /* XXX: The spec is unclear about CHV GPIO on seq v2 */ if (gpio_source != 0) { - DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source); + drm_dbg_kms(&dev_priv->drm, + "unknown gpio source %u\n", gpio_source); return; } if (gpio_index >= CHV_GPIO_IDX_START_E) { - DRM_DEBUG_KMS("invalid gpio index %u for GPIO N\n", - gpio_index); + drm_dbg_kms(&dev_priv->drm, + "invalid gpio index %u for GPIO N\n", + gpio_index); return; } @@ -332,8 +339,9 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv, GPIOD_OUT_HIGH); if (IS_ERR_OR_NULL(gpio_desc)) { - DRM_ERROR("GPIO index %u request failed (%ld)\n", - gpio_index, PTR_ERR(gpio_desc)); + drm_err(&dev_priv->drm, + "GPIO index %u request failed (%ld)\n", + gpio_index, PTR_ERR(gpio_desc)); return; } @@ -346,7 +354,7 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv, static void icl_exec_gpio(struct drm_i915_private *dev_priv, u8 gpio_source, u8 gpio_index, bool value) { - DRM_DEBUG_KMS("Skipping ICL GPIO element execution\n"); + drm_dbg_kms(&dev_priv->drm, "Skipping ICL GPIO element execution\n"); } static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) @@ -356,7 +364,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) u8 gpio_source, gpio_index = 0, gpio_number; bool value; - DRM_DEBUG_KMS("\n"); + drm_dbg_kms(&dev_priv->drm, "\n"); if (dev_priv->vbt.dsi.seq_version >= 3) gpio_index = *data++; @@ -494,13 +502,16 @@ err_bus: static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data) { - DRM_DEBUG_KMS("Skipping SPI element execution\n"); + struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev); + + drm_dbg_kms(&i915->drm, "Skipping SPI element execution\n"); return data + *(data + 5) + 6; } static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data) { + struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev); #ifdef CONFIG_PMIC_OPREGION u32 value, mask, reg_address; u16 i2c_address; @@ -516,9 +527,10 @@ static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data) reg_address, value, mask); if (ret) - DRM_ERROR("%s failed, error: %d\n", __func__, ret); + drm_err(&i915->drm, "%s failed, error: %d\n", __func__, ret); #else - DRM_ERROR("Your hardware requires CONFIG_PMIC_OPREGION and it is not set\n"); + drm_err(&i915->drm, + "Your hardware requires CONFIG_PMIC_OPREGION and it is not set\n"); #endif return data + 15; @@ -570,17 +582,18 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi, const u8 *data; fn_mipi_elem_exec mipi_elem_exec; - if (WARN_ON(seq_id >= ARRAY_SIZE(dev_priv->vbt.dsi.sequence))) + if (drm_WARN_ON(&dev_priv->drm, + seq_id >= ARRAY_SIZE(dev_priv->vbt.dsi.sequence))) return; data = dev_priv->vbt.dsi.sequence[seq_id]; if (!data) return; - WARN_ON(*data != seq_id); + drm_WARN_ON(&dev_priv->drm, *data != seq_id); - DRM_DEBUG_KMS("Starting MIPI sequence %d - %s\n", - seq_id, sequence_name(seq_id)); + drm_dbg_kms(&dev_priv->drm, "Starting MIPI sequence %d - %s\n", + seq_id, sequence_name(seq_id)); /* Skip Sequence Byte. */ data++; @@ -612,18 +625,21 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi, /* Consistency check if we have size. */ if (operation_size && data != next) { - DRM_ERROR("Inconsistent operation size\n"); + drm_err(&dev_priv->drm, + "Inconsistent operation size\n"); return; } } else if (operation_size) { /* We have size, skip. */ - DRM_DEBUG_KMS("Unsupported MIPI operation byte %u\n", - operation_byte); + drm_dbg_kms(&dev_priv->drm, + "Unsupported MIPI operation byte %u\n", + operation_byte); data += operation_size; } else { /* No size, can't skip without parsing. */ - DRM_ERROR("Unsupported MIPI operation byte %u\n", - operation_byte); + drm_err(&dev_priv->drm, + "Unsupported MIPI operation byte %u\n", + operation_byte); return; } } @@ -658,40 +674,54 @@ void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec) void intel_dsi_log_params(struct intel_dsi *intel_dsi) { - DRM_DEBUG_KMS("Pclk %d\n", intel_dsi->pclk); - DRM_DEBUG_KMS("Pixel overlap %d\n", intel_dsi->pixel_overlap); - DRM_DEBUG_KMS("Lane count %d\n", intel_dsi->lane_count); - DRM_DEBUG_KMS("DPHY param reg 0x%x\n", intel_dsi->dphy_reg); - DRM_DEBUG_KMS("Video mode format %s\n", - intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ? - "non-burst with sync pulse" : - intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS ? - "non-burst with sync events" : - intel_dsi->video_mode_format == VIDEO_MODE_BURST ? - "burst" : "<unknown>"); - DRM_DEBUG_KMS("Burst mode ratio %d\n", intel_dsi->burst_mode_ratio); - DRM_DEBUG_KMS("Reset timer %d\n", intel_dsi->rst_timer_val); - DRM_DEBUG_KMS("Eot %s\n", enableddisabled(intel_dsi->eotp_pkt)); - DRM_DEBUG_KMS("Clockstop %s\n", enableddisabled(!intel_dsi->clock_stop)); - DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video"); + struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev); + + drm_dbg_kms(&i915->drm, "Pclk %d\n", intel_dsi->pclk); + drm_dbg_kms(&i915->drm, "Pixel overlap %d\n", + intel_dsi->pixel_overlap); + drm_dbg_kms(&i915->drm, "Lane count %d\n", intel_dsi->lane_count); + drm_dbg_kms(&i915->drm, "DPHY param reg 0x%x\n", intel_dsi->dphy_reg); + drm_dbg_kms(&i915->drm, "Video mode format %s\n", + intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ? + "non-burst with sync pulse" : + intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS ? + "non-burst with sync events" : + intel_dsi->video_mode_format == VIDEO_MODE_BURST ? + "burst" : "<unknown>"); + drm_dbg_kms(&i915->drm, "Burst mode ratio %d\n", + intel_dsi->burst_mode_ratio); + drm_dbg_kms(&i915->drm, "Reset timer %d\n", intel_dsi->rst_timer_val); + drm_dbg_kms(&i915->drm, "Eot %s\n", + enableddisabled(intel_dsi->eotp_pkt)); + drm_dbg_kms(&i915->drm, "Clockstop %s\n", + enableddisabled(!intel_dsi->clock_stop)); + drm_dbg_kms(&i915->drm, "Mode %s\n", + intel_dsi->operation_mode ? "command" : "video"); if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) - DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_FRONT_BACK\n"); + drm_dbg_kms(&i915->drm, + "Dual link: DSI_DUAL_LINK_FRONT_BACK\n"); else if (intel_dsi->dual_link == DSI_DUAL_LINK_PIXEL_ALT) - DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_PIXEL_ALT\n"); + drm_dbg_kms(&i915->drm, + "Dual link: DSI_DUAL_LINK_PIXEL_ALT\n"); else - DRM_DEBUG_KMS("Dual link: NONE\n"); - DRM_DEBUG_KMS("Pixel Format %d\n", intel_dsi->pixel_format); - DRM_DEBUG_KMS("TLPX %d\n", intel_dsi->escape_clk_div); - DRM_DEBUG_KMS("LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout); - DRM_DEBUG_KMS("Turnaround Timeout 0x%x\n", intel_dsi->turn_arnd_val); - DRM_DEBUG_KMS("Init Count 0x%x\n", intel_dsi->init_count); - DRM_DEBUG_KMS("HS to LP Count 0x%x\n", intel_dsi->hs_to_lp_count); - DRM_DEBUG_KMS("LP Byte Clock %d\n", intel_dsi->lp_byte_clk); - DRM_DEBUG_KMS("DBI BW Timer 0x%x\n", intel_dsi->bw_timer); - DRM_DEBUG_KMS("LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count); - DRM_DEBUG_KMS("HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count); - DRM_DEBUG_KMS("BTA %s\n", - enableddisabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA))); + drm_dbg_kms(&i915->drm, "Dual link: NONE\n"); + drm_dbg_kms(&i915->drm, "Pixel Format %d\n", intel_dsi->pixel_format); + drm_dbg_kms(&i915->drm, "TLPX %d\n", intel_dsi->escape_clk_div); + drm_dbg_kms(&i915->drm, "LP RX Timeout 0x%x\n", + intel_dsi->lp_rx_timeout); + drm_dbg_kms(&i915->drm, "Turnaround Timeout 0x%x\n", + intel_dsi->turn_arnd_val); + drm_dbg_kms(&i915->drm, "Init Count 0x%x\n", intel_dsi->init_count); + drm_dbg_kms(&i915->drm, "HS to LP Count 0x%x\n", + intel_dsi->hs_to_lp_count); + drm_dbg_kms(&i915->drm, "LP Byte Clock %d\n", intel_dsi->lp_byte_clk); + drm_dbg_kms(&i915->drm, "DBI BW Timer 0x%x\n", intel_dsi->bw_timer); + drm_dbg_kms(&i915->drm, "LP to HS Clock Count 0x%x\n", + intel_dsi->clk_lp_to_hs_count); + drm_dbg_kms(&i915->drm, "HS to LP Clock Count 0x%x\n", + intel_dsi->clk_hs_to_lp_count); + drm_dbg_kms(&i915->drm, "BTA %s\n", + enableddisabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA))); } bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) @@ -704,7 +734,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) u16 burst_mode_ratio; enum port port; - DRM_DEBUG_KMS("\n"); + drm_dbg_kms(&dev_priv->drm, "\n"); intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1; intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0; @@ -763,7 +793,8 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) mipi_config->target_burst_mode_freq = bitrate; if (mipi_config->target_burst_mode_freq < bitrate) { - DRM_ERROR("Burst mode freq is less than computed\n"); + drm_err(&dev_priv->drm, + "Burst mode freq is less than computed\n"); return false; } @@ -773,7 +804,8 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100); } else { - DRM_ERROR("Burst mode target is not set\n"); + drm_err(&dev_priv->drm, + "Burst mode target is not set\n"); return false; } } else @@ -856,17 +888,20 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on) ret = pinctrl_register_mappings(soc_pwm_pinctrl_map, ARRAY_SIZE(soc_pwm_pinctrl_map)); if (ret) - DRM_ERROR("Failed to register pwm0 pinmux mapping\n"); + drm_err(&dev_priv->drm, + "Failed to register pwm0 pinmux mapping\n"); pinctrl = devm_pinctrl_get_select(dev->dev, "soc_pwm0"); if (IS_ERR(pinctrl)) - DRM_ERROR("Failed to set pinmux to PWM\n"); + drm_err(&dev_priv->drm, + "Failed to set pinmux to PWM\n"); } if (want_panel_gpio) { intel_dsi->gpio_panel = gpiod_get(dev->dev, "panel", flags); if (IS_ERR(intel_dsi->gpio_panel)) { - DRM_ERROR("Failed to own gpio for panel control\n"); + drm_err(&dev_priv->drm, + "Failed to own gpio for panel control\n"); intel_dsi->gpio_panel = NULL; } } @@ -875,7 +910,8 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on) intel_dsi->gpio_backlight = gpiod_get(dev->dev, "backlight", flags); if (IS_ERR(intel_dsi->gpio_backlight)) { - DRM_ERROR("Failed to own gpio for backlight control\n"); + drm_err(&dev_priv->drm, + "Failed to own gpio for backlight control\n"); intel_dsi->gpio_backlight = NULL; } } diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c index 86a337c9d85d..341d5ce8b062 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo.c +++ b/drivers/gpu/drm/i915/display/intel_dvo.c @@ -30,7 +30,6 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> -#include <drm/i915_drm.h> #include "i915_drv.h" #include "intel_connector.h" @@ -44,6 +43,7 @@ #define INTEL_DVO_CHIP_LVDS 1 #define INTEL_DVO_CHIP_TMDS 2 #define INTEL_DVO_CHIP_TVOUT 4 +#define INTEL_DVO_CHIP_LVDS_NO_FIXED 5 #define SIL164_ADDR 0x38 #define CH7xxx_ADDR 0x76 @@ -101,13 +101,13 @@ static const struct intel_dvo_device intel_dvo_devices[] = { .dev_ops = &ch7017_ops, }, { - .type = INTEL_DVO_CHIP_TMDS, + .type = INTEL_DVO_CHIP_LVDS_NO_FIXED, .name = "ns2501", .dvo_reg = DVOB, .dvo_srcdim_reg = DVOB_SRCDIM, .slave_addr = NS2501_ADDR, .dev_ops = &ns2501_ops, - } + }, }; struct intel_dvo { @@ -137,7 +137,7 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector) struct intel_dvo *intel_dvo = intel_attached_dvo(connector); u32 tmp; - tmp = I915_READ(intel_dvo->dev.dvo_reg); + tmp = intel_de_read(dev_priv, intel_dvo->dev.dvo_reg); if (!(tmp & DVO_ENABLE)) return false; @@ -152,7 +152,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder, struct intel_dvo *intel_dvo = enc_to_dvo(encoder); u32 tmp; - tmp = I915_READ(intel_dvo->dev.dvo_reg); + tmp = intel_de_read(dev_priv, intel_dvo->dev.dvo_reg); *pipe = (tmp & DVO_PIPE_SEL_MASK) >> DVO_PIPE_SEL_SHIFT; @@ -168,7 +168,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder, pipe_config->output_types |= BIT(INTEL_OUTPUT_DVO); - tmp = I915_READ(intel_dvo->dev.dvo_reg); + tmp = intel_de_read(dev_priv, intel_dvo->dev.dvo_reg); if (tmp & DVO_HSYNC_ACTIVE_HIGH) flags |= DRM_MODE_FLAG_PHSYNC; else @@ -190,11 +190,11 @@ static void intel_disable_dvo(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dvo *intel_dvo = enc_to_dvo(encoder); i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg; - u32 temp = I915_READ(dvo_reg); + u32 temp = intel_de_read(dev_priv, dvo_reg); intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false); - I915_WRITE(dvo_reg, temp & ~DVO_ENABLE); - I915_READ(dvo_reg); + intel_de_write(dev_priv, dvo_reg, temp & ~DVO_ENABLE); + intel_de_read(dev_priv, dvo_reg); } static void intel_enable_dvo(struct intel_encoder *encoder, @@ -204,14 +204,14 @@ static void intel_enable_dvo(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dvo *intel_dvo = enc_to_dvo(encoder); i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg; - u32 temp = I915_READ(dvo_reg); + u32 temp = intel_de_read(dev_priv, dvo_reg); intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, &pipe_config->hw.mode, &pipe_config->hw.adjusted_mode); - I915_WRITE(dvo_reg, temp | DVO_ENABLE); - I915_READ(dvo_reg); + intel_de_write(dev_priv, dvo_reg, temp | DVO_ENABLE); + intel_de_read(dev_priv, dvo_reg); intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true); } @@ -286,7 +286,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder, i915_reg_t dvo_srcdim_reg = intel_dvo->dev.dvo_srcdim_reg; /* Save the data order, since I don't know what it should be set to. */ - dvo_val = I915_READ(dvo_reg) & + dvo_val = intel_de_read(dev_priv, dvo_reg) & (DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG); dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE | DVO_BLANK_ACTIVE_HIGH; @@ -301,11 +301,10 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder, /*I915_WRITE(DVOB_SRCDIM, (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/ - I915_WRITE(dvo_srcdim_reg, - (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | - (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT)); + intel_de_write(dev_priv, dvo_srcdim_reg, + (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT)); /*I915_WRITE(DVOB, dvo_val);*/ - I915_WRITE(dvo_reg, dvo_val); + intel_de_write(dev_priv, dvo_reg, dvo_val); } static enum drm_connector_status @@ -481,15 +480,16 @@ void intel_dvo_init(struct drm_i915_private *dev_priv) * initialize the device. */ for_each_pipe(dev_priv, pipe) { - dpll[pipe] = I915_READ(DPLL(pipe)); - I915_WRITE(DPLL(pipe), dpll[pipe] | DPLL_DVO_2X_MODE); + dpll[pipe] = intel_de_read(dev_priv, DPLL(pipe)); + intel_de_write(dev_priv, DPLL(pipe), + dpll[pipe] | DPLL_DVO_2X_MODE); } dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c); /* restore the DVO 2x clock state to original */ for_each_pipe(dev_priv, pipe) { - I915_WRITE(DPLL(pipe), dpll[pipe]); + intel_de_write(dev_priv, DPLL(pipe), dpll[pipe]); } intel_gmbus_force_bit(i2c, false); @@ -507,17 +507,21 @@ void intel_dvo_init(struct drm_i915_private *dev_priv) intel_encoder->port = port; intel_encoder->pipe_mask = ~0; - switch (dvo->type) { - case INTEL_DVO_CHIP_TMDS: + if (dvo->type != INTEL_DVO_CHIP_LVDS) intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) | (1 << INTEL_OUTPUT_DVO); + + switch (dvo->type) { + case INTEL_DVO_CHIP_TMDS: + intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT | + DRM_CONNECTOR_POLL_DISCONNECT; drm_connector_init(&dev_priv->drm, connector, &intel_dvo_connector_funcs, DRM_MODE_CONNECTOR_DVII); encoder_type = DRM_MODE_ENCODER_TMDS; break; + case INTEL_DVO_CHIP_LVDS_NO_FIXED: case INTEL_DVO_CHIP_LVDS: - intel_encoder->cloneable = 0; drm_connector_init(&dev_priv->drm, connector, &intel_dvo_connector_funcs, DRM_MODE_CONNECTOR_LVDS); diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index a1048ece541e..c125ca9ab9b3 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -41,15 +41,12 @@ #include <drm/drm_fourcc.h> #include "i915_drv.h" +#include "i915_trace.h" +#include "i915_vgpu.h" #include "intel_display_types.h" #include "intel_fbc.h" #include "intel_frontbuffer.h" -static inline bool fbc_supported(struct drm_i915_private *dev_priv) -{ - return HAS_FBC(dev_priv); -} - /* * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's @@ -97,12 +94,12 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv) u32 fbc_ctl; /* Disable compression */ - fbc_ctl = I915_READ(FBC_CONTROL); + fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL); if ((fbc_ctl & FBC_CTL_EN) == 0) return; fbc_ctl &= ~FBC_CTL_EN; - I915_WRITE(FBC_CONTROL, fbc_ctl); + intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl); /* Wait for compressing bit to clear */ if (intel_de_wait_for_clear(dev_priv, FBC_STATUS, @@ -132,7 +129,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) /* Clear old tags */ for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) - I915_WRITE(FBC_TAG(i), 0); + intel_de_write(dev_priv, FBC_TAG(i), 0); if (IS_GEN(dev_priv, 4)) { u32 fbc_ctl2; @@ -142,12 +139,13 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane); if (params->fence_id >= 0) fbc_ctl2 |= FBC_CTL_CPU_FENCE; - I915_WRITE(FBC_CONTROL2, fbc_ctl2); - I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset); + intel_de_write(dev_priv, FBC_CONTROL2, fbc_ctl2); + intel_de_write(dev_priv, FBC_FENCE_OFF, + params->crtc.fence_y_offset); } /* enable it... */ - fbc_ctl = I915_READ(FBC_CONTROL); + fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL); fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT; fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC; if (IS_I945GM(dev_priv)) @@ -155,12 +153,12 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; if (params->fence_id >= 0) fbc_ctl |= params->fence_id; - I915_WRITE(FBC_CONTROL, fbc_ctl); + intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl); } static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv) { - return I915_READ(FBC_CONTROL) & FBC_CTL_EN; + return intel_de_read(dev_priv, FBC_CONTROL) & FBC_CTL_EN; } static void g4x_fbc_activate(struct drm_i915_private *dev_priv) @@ -176,13 +174,14 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv) if (params->fence_id >= 0) { dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id; - I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset); + intel_de_write(dev_priv, DPFC_FENCE_YOFF, + params->crtc.fence_y_offset); } else { - I915_WRITE(DPFC_FENCE_YOFF, 0); + intel_de_write(dev_priv, DPFC_FENCE_YOFF, 0); } /* enable it... */ - I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); + intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); } static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv) @@ -190,23 +189,27 @@ static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv) u32 dpfc_ctl; /* Disable compression */ - dpfc_ctl = I915_READ(DPFC_CONTROL); + dpfc_ctl = intel_de_read(dev_priv, DPFC_CONTROL); if (dpfc_ctl & DPFC_CTL_EN) { dpfc_ctl &= ~DPFC_CTL_EN; - I915_WRITE(DPFC_CONTROL, dpfc_ctl); + intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl); } } static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv) { - return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; + return intel_de_read(dev_priv, DPFC_CONTROL) & DPFC_CTL_EN; } /* This function forces a CFB recompression through the nuke operation. */ static void intel_fbc_recompress(struct drm_i915_private *dev_priv) { - I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE); - POSTING_READ(MSG_FBC_REND_STATE); + struct intel_fbc *fbc = &dev_priv->fbc; + + trace_intel_fbc_nuke(fbc->crtc); + + intel_de_write(dev_priv, MSG_FBC_REND_STATE, FBC_REND_NUKE); + intel_de_posting_read(dev_priv, MSG_FBC_REND_STATE); } static void ilk_fbc_activate(struct drm_i915_private *dev_priv) @@ -237,22 +240,22 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv) if (IS_GEN(dev_priv, 5)) dpfc_ctl |= params->fence_id; if (IS_GEN(dev_priv, 6)) { - I915_WRITE(SNB_DPFC_CTL_SA, - SNB_CPU_FENCE_ENABLE | - params->fence_id); - I915_WRITE(DPFC_CPU_FENCE_OFFSET, - params->crtc.fence_y_offset); + intel_de_write(dev_priv, SNB_DPFC_CTL_SA, + SNB_CPU_FENCE_ENABLE | params->fence_id); + intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, + params->crtc.fence_y_offset); } } else { if (IS_GEN(dev_priv, 6)) { - I915_WRITE(SNB_DPFC_CTL_SA, 0); - I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0); + intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0); + intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0); } } - I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset); + intel_de_write(dev_priv, ILK_DPFC_FENCE_YOFF, + params->crtc.fence_y_offset); /* enable it... */ - I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); + intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); intel_fbc_recompress(dev_priv); } @@ -262,16 +265,16 @@ static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv) u32 dpfc_ctl; /* Disable compression */ - dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); + dpfc_ctl = intel_de_read(dev_priv, ILK_DPFC_CONTROL); if (dpfc_ctl & DPFC_CTL_EN) { dpfc_ctl &= ~DPFC_CTL_EN; - I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); + intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl); } } static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv) { - return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; + return intel_de_read(dev_priv, ILK_DPFC_CONTROL) & DPFC_CTL_EN; } static void gen7_fbc_activate(struct drm_i915_private *dev_priv) @@ -282,14 +285,14 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv) /* Display WA #0529: skl, kbl, bxt. */ if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) { - u32 val = I915_READ(CHICKEN_MISC_4); + u32 val = intel_de_read(dev_priv, CHICKEN_MISC_4); val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK); if (params->gen9_wa_cfb_stride) val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride; - I915_WRITE(CHICKEN_MISC_4, val); + intel_de_write(dev_priv, CHICKEN_MISC_4, val); } dpfc_ctl = 0; @@ -314,13 +317,13 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv) if (params->fence_id >= 0) { dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; - I915_WRITE(SNB_DPFC_CTL_SA, - SNB_CPU_FENCE_ENABLE | - params->fence_id); - I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); - } else { - I915_WRITE(SNB_DPFC_CTL_SA,0); - I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0); + intel_de_write(dev_priv, SNB_DPFC_CTL_SA, + SNB_CPU_FENCE_ENABLE | params->fence_id); + intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, + params->crtc.fence_y_offset); + } else if (dev_priv->ggtt.num_fences) { + intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0); + intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0); } if (dev_priv->fbc.false_color) @@ -328,21 +331,20 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv) if (IS_IVYBRIDGE(dev_priv)) { /* WaFbcAsynchFlipDisableFbcQueue:ivb */ - I915_WRITE(ILK_DISPLAY_CHICKEN1, - I915_READ(ILK_DISPLAY_CHICKEN1) | - ILK_FBCQ_DIS); + intel_de_write(dev_priv, ILK_DISPLAY_CHICKEN1, + intel_de_read(dev_priv, ILK_DISPLAY_CHICKEN1) | ILK_FBCQ_DIS); } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ - I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe), - I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) | - HSW_FBCQ_DIS); + intel_de_write(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe), + intel_de_read(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe)) | HSW_FBCQ_DIS); } if (INTEL_GEN(dev_priv) >= 11) /* Wa_1409120013:icl,ehl,tgl */ - I915_WRITE(ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); + intel_de_write(dev_priv, ILK_DPFC_CHICKEN, + ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); - I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); + intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); intel_fbc_recompress(dev_priv); } @@ -361,6 +363,8 @@ static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv) { struct intel_fbc *fbc = &dev_priv->fbc; + trace_intel_fbc_activate(fbc->crtc); + fbc->active = true; fbc->activated = true; @@ -378,6 +382,8 @@ static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv) { struct intel_fbc *fbc = &dev_priv->fbc; + trace_intel_fbc_deactivate(fbc->crtc); + fbc->active = false; if (INTEL_GEN(dev_priv) >= 5) @@ -407,7 +413,7 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv, { struct intel_fbc *fbc = &dev_priv->fbc; - WARN_ON(!mutex_is_locked(&fbc->lock)); + drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); if (fbc->active) intel_fbc_hw_deactivate(dev_priv); @@ -471,23 +477,25 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, struct drm_mm_node *uninitialized_var(compressed_llb); int ret; - WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb)); + drm_WARN_ON(&dev_priv->drm, + drm_mm_node_allocated(&fbc->compressed_fb)); ret = find_compression_threshold(dev_priv, &fbc->compressed_fb, size, fb_cpp); if (!ret) goto err_llb; else if (ret > 1) { - DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); - + DRM_INFO_ONCE("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); } fbc->threshold = ret; if (INTEL_GEN(dev_priv) >= 5) - I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start); + intel_de_write(dev_priv, ILK_DPFC_CB_BASE, + fbc->compressed_fb.start); else if (IS_GM45(dev_priv)) { - I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start); + intel_de_write(dev_priv, DPFC_CB_BASE, + fbc->compressed_fb.start); } else { compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); if (!compressed_llb) @@ -500,16 +508,16 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, fbc->compressed_llb = compressed_llb; - GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start, - fbc->compressed_fb.start, - U32_MAX)); - GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start, - fbc->compressed_llb->start, - U32_MAX)); - I915_WRITE(FBC_CFB_BASE, - dev_priv->dsm.start + fbc->compressed_fb.start); - I915_WRITE(FBC_LL_BASE, - dev_priv->dsm.start + compressed_llb->start); + GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start, + fbc->compressed_fb.start, + U32_MAX)); + GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start, + fbc->compressed_llb->start, + U32_MAX)); + intel_de_write(dev_priv, FBC_CFB_BASE, + dev_priv->dsm.start + fbc->compressed_fb.start); + intel_de_write(dev_priv, FBC_LL_BASE, + dev_priv->dsm.start + compressed_llb->start); } DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n", @@ -530,20 +538,22 @@ static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) { struct intel_fbc *fbc = &dev_priv->fbc; - if (drm_mm_node_allocated(&fbc->compressed_fb)) - i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); + if (!drm_mm_node_allocated(&fbc->compressed_fb)) + return; if (fbc->compressed_llb) { i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb); kfree(fbc->compressed_llb); } + + i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); } void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) { struct intel_fbc *fbc = &dev_priv->fbc; - if (!fbc_supported(dev_priv)) + if (!HAS_FBC(dev_priv)) return; mutex_lock(&fbc->lock); @@ -555,7 +565,7 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv, unsigned int stride) { /* This should have been caught earlier. */ - if (WARN_ON_ONCE((stride & (64 - 1)) != 0)) + if (drm_WARN_ON_ONCE(&dev_priv->drm, (stride & (64 - 1)) != 0)) return false; /* Below are the additional FBC restrictions. */ @@ -663,8 +673,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, cache->fb.format = fb->format; cache->fb.stride = fb->pitches[0]; - WARN_ON(plane_state->flags & PLANE_HAS_FENCE && - !plane_state->vma->fence); + drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE && + !plane_state->vma->fence); if (plane_state->flags & PLANE_HAS_FENCE && plane_state->vma->fence) @@ -681,12 +691,37 @@ static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv) fbc->compressed_fb.size * fbc->threshold; } +static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + if (intel_vgpu_active(dev_priv)) { + fbc->no_fbc_reason = "VGPU is active"; + return false; + } + + if (!i915_modparams.enable_fbc) { + fbc->no_fbc_reason = "disabled per module param or by default"; + return false; + } + + if (fbc->underrun_detected) { + fbc->no_fbc_reason = "underrun detected"; + return false; + } + + return true; +} + static bool intel_fbc_can_activate(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; struct intel_fbc_state_cache *cache = &fbc->state_cache; + if (!intel_fbc_can_enable(dev_priv)) + return false; + if (!cache->plane.visible) { fbc->no_fbc_reason = "primary plane not visible"; return false; @@ -785,28 +820,6 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) return true; } -static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv) -{ - struct intel_fbc *fbc = &dev_priv->fbc; - - if (intel_vgpu_active(dev_priv)) { - fbc->no_fbc_reason = "VGPU is active"; - return false; - } - - if (!i915_modparams.enable_fbc) { - fbc->no_fbc_reason = "disabled per module param or by default"; - return false; - } - - if (fbc->underrun_detected) { - fbc->no_fbc_reason = "underrun detected"; - return false; - } - - return true; -} - static void intel_fbc_get_reg_params(struct intel_crtc *crtc, struct intel_fbc_reg_params *params) { @@ -867,16 +880,20 @@ static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state) return true; } -bool intel_fbc_pre_update(struct intel_crtc *crtc, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +bool intel_fbc_pre_update(struct intel_atomic_state *state, + struct intel_crtc *crtc) { + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *plane_state = + intel_atomic_get_new_plane_state(state, plane); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; const char *reason = "update pending"; bool need_vblank_wait = false; - if (!fbc_supported(dev_priv)) + if (!plane->has_fbc || !plane_state) return need_vblank_wait; mutex_lock(&fbc->lock); @@ -926,9 +943,9 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv) struct intel_fbc *fbc = &dev_priv->fbc; struct intel_crtc *crtc = fbc->crtc; - WARN_ON(!mutex_is_locked(&fbc->lock)); - WARN_ON(!fbc->crtc); - WARN_ON(fbc->active); + drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); + drm_WARN_ON(&dev_priv->drm, !fbc->crtc); + drm_WARN_ON(&dev_priv->drm, fbc->active); DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe)); @@ -942,7 +959,7 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc) struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; - WARN_ON(!mutex_is_locked(&fbc->lock)); + drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); if (fbc->crtc != crtc) return; @@ -967,12 +984,16 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc) intel_fbc_deactivate(dev_priv, "frontbuffer write"); } -void intel_fbc_post_update(struct intel_crtc *crtc) +void intel_fbc_post_update(struct intel_atomic_state *state, + struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + const struct intel_plane_state *plane_state = + intel_atomic_get_new_plane_state(state, plane); struct intel_fbc *fbc = &dev_priv->fbc; - if (!fbc_supported(dev_priv)) + if (!plane->has_fbc || !plane_state) return; mutex_lock(&fbc->lock); @@ -994,7 +1015,7 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv, { struct intel_fbc *fbc = &dev_priv->fbc; - if (!fbc_supported(dev_priv)) + if (!HAS_FBC(dev_priv)) return; if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) @@ -1015,7 +1036,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv, { struct intel_fbc *fbc = &dev_priv->fbc; - if (!fbc_supported(dev_priv)) + if (!HAS_FBC(dev_priv)) return; mutex_lock(&fbc->lock); @@ -1099,24 +1120,26 @@ out: /** * intel_fbc_enable: tries to enable FBC on the CRTC * @crtc: the CRTC - * @crtc_state: corresponding &drm_crtc_state for @crtc - * @plane_state: corresponding &drm_plane_state for the primary plane of @crtc + * @state: corresponding &drm_crtc_state for @crtc * * This function checks if the given CRTC was chosen for FBC, then enables it if * possible. Notice that it doesn't activate FBC. It is valid to call * intel_fbc_enable multiple times for the same pipe without an * intel_fbc_disable in the middle, as long as it is deactivated. */ -void intel_fbc_enable(struct intel_crtc *crtc, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +void intel_fbc_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *plane_state = + intel_atomic_get_new_plane_state(state, plane); struct intel_fbc *fbc = &dev_priv->fbc; struct intel_fbc_state_cache *cache = &fbc->state_cache; - const struct drm_framebuffer *fb = plane_state->hw.fb; - if (!fbc_supported(dev_priv)) + if (!plane->has_fbc || !plane_state) return; mutex_lock(&fbc->lock); @@ -1129,7 +1152,7 @@ void intel_fbc_enable(struct intel_crtc *crtc, __intel_fbc_disable(dev_priv); } - WARN_ON(fbc->active); + drm_WARN_ON(&dev_priv->drm, fbc->active); intel_fbc_update_state_cache(crtc, crtc_state, plane_state); @@ -1139,14 +1162,14 @@ void intel_fbc_enable(struct intel_crtc *crtc, if (intel_fbc_alloc_cfb(dev_priv, intel_fbc_calculate_cfb_size(dev_priv, cache), - fb->format->cpp[0])) { + plane_state->hw.fb->format->cpp[0])) { cache->plane.visible = false; fbc->no_fbc_reason = "not enough stolen memory"; goto out; } if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) && - fb->modifier != I915_FORMAT_MOD_X_TILED) + plane_state->hw.fb->modifier != I915_FORMAT_MOD_X_TILED) cache->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8; else @@ -1169,9 +1192,10 @@ out: void intel_fbc_disable(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); struct intel_fbc *fbc = &dev_priv->fbc; - if (!fbc_supported(dev_priv)) + if (!plane->has_fbc) return; mutex_lock(&fbc->lock); @@ -1190,12 +1214,12 @@ void intel_fbc_global_disable(struct drm_i915_private *dev_priv) { struct intel_fbc *fbc = &dev_priv->fbc; - if (!fbc_supported(dev_priv)) + if (!HAS_FBC(dev_priv)) return; mutex_lock(&fbc->lock); if (fbc->crtc) { - WARN_ON(fbc->crtc->active); + drm_WARN_ON(&dev_priv->drm, fbc->crtc->active); __intel_fbc_disable(dev_priv); } mutex_unlock(&fbc->lock); @@ -1267,7 +1291,7 @@ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv) { struct intel_fbc *fbc = &dev_priv->fbc; - if (!fbc_supported(dev_priv)) + if (!HAS_FBC(dev_priv)) return; /* There's no guarantee that underrun_detected won't be set to true @@ -1348,7 +1372,8 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) /* This value was pulled out of someone's hat */ if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv)) - I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT); + intel_de_write(dev_priv, FBC_CONTROL, + 500 << FBC_CTL_INTERVAL_SHIFT); /* We still don't have any sort of hardware state readout for FBC, so * deactivate it in case the BIOS activated it to make sure software diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h index c8a5e5098687..6dc1edefe81b 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.h +++ b/drivers/gpu/drm/i915/display/intel_fbc.h @@ -19,14 +19,13 @@ struct intel_plane_state; void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, struct intel_atomic_state *state); bool intel_fbc_is_active(struct drm_i915_private *dev_priv); -bool intel_fbc_pre_update(struct intel_crtc *crtc, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state); -void intel_fbc_post_update(struct intel_crtc *crtc); +bool intel_fbc_pre_update(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void intel_fbc_post_update(struct intel_atomic_state *state, + struct intel_crtc *crtc); void intel_fbc_init(struct drm_i915_private *dev_priv); -void intel_fbc_enable(struct intel_crtc *crtc, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state); +void intel_fbc_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc); void intel_fbc_disable(struct intel_crtc *crtc); void intel_fbc_global_disable(struct drm_i915_private *dev_priv); void intel_fbc_invalidate(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index 1e98e432c9fa..3bc804212a99 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -40,7 +40,6 @@ #include <drm/drm_crtc.h> #include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> -#include <drm/i915_drm.h> #include "i915_drv.h" #include "intel_display_types.h" @@ -191,7 +190,7 @@ static int intelfb_create(struct drm_fb_helper *helper, drm_framebuffer_put(&intel_fb->base); intel_fb = ifbdev->fb = NULL; } - if (!intel_fb || WARN_ON(!intel_fb_obj(&intel_fb->base))) { + if (!intel_fb || drm_WARN_ON(dev, !intel_fb_obj(&intel_fb->base))) { DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n"); ret = intelfb_alloc(helper, sizes); if (ret) @@ -410,9 +409,9 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, if (!crtc->state->active) continue; - WARN(!crtc->primary->state->fb, - "re-used BIOS config but lost an fb on crtc %d\n", - crtc->base.id); + drm_WARN(dev, !crtc->primary->state->fb, + "re-used BIOS config but lost an fb on crtc %d\n", + crtc->base.id); } @@ -439,7 +438,8 @@ int intel_fbdev_init(struct drm_device *dev) struct intel_fbdev *ifbdev; int ret; - if (WARN_ON(!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))) + if (drm_WARN_ON(dev, !HAS_DISPLAY(dev_priv) || + !INTEL_DISPLAY_ENABLED(dev_priv))) return -ENODEV; ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); @@ -452,7 +452,7 @@ int intel_fbdev_init(struct drm_device *dev) if (!intel_fbdev_init_bios(dev, ifbdev)) ifbdev->preferred_bpp = 32; - ret = drm_fb_helper_init(dev, &ifbdev->helper, 4); + ret = drm_fb_helper_init(dev, &ifbdev->helper); if (ret) { kfree(ifbdev); return ret; @@ -461,8 +461,6 @@ int intel_fbdev_init(struct drm_device *dev) dev_priv->fbdev = ifbdev; INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker); - drm_fb_helper_single_add_all_connectors(&ifbdev->helper); - return 0; } @@ -569,7 +567,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous * to all the printk activity. Try to keep it out of the hot * path of resume if possible. */ - WARN_ON(state != FBINFO_STATE_RUNNING); + drm_WARN_ON(dev, state != FBINFO_STATE_RUNNING); if (!console_trylock()) { /* Don't block our own workqueue as this can * be run in parallel with other i915.ko tasks. diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c index 6c83b350525d..813a4f7033e1 100644 --- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c @@ -95,15 +95,15 @@ static void i9xx_check_fifo_underruns(struct intel_crtc *crtc) lockdep_assert_held(&dev_priv->irq_lock); - if ((I915_READ(reg) & PIPE_FIFO_UNDERRUN_STATUS) == 0) + if ((intel_de_read(dev_priv, reg) & PIPE_FIFO_UNDERRUN_STATUS) == 0) return; enable_mask = i915_pipestat_enable_mask(dev_priv, crtc->pipe); - I915_WRITE(reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS); - POSTING_READ(reg); + intel_de_write(dev_priv, reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS); + intel_de_posting_read(dev_priv, reg); trace_intel_cpu_fifo_underrun(dev_priv, crtc->pipe); - DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe)); + drm_err(&dev_priv->drm, "pipe %c underrun\n", pipe_name(crtc->pipe)); } static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev, @@ -118,11 +118,13 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev, if (enable) { u32 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); - I915_WRITE(reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS); - POSTING_READ(reg); + intel_de_write(dev_priv, reg, + enable_mask | PIPE_FIFO_UNDERRUN_STATUS); + intel_de_posting_read(dev_priv, reg); } else { - if (old && I915_READ(reg) & PIPE_FIFO_UNDERRUN_STATUS) - DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); + if (old && intel_de_read(dev_priv, reg) & PIPE_FIFO_UNDERRUN_STATUS) + drm_err(&dev_priv->drm, "pipe %c underrun\n", + pipe_name(pipe)); } } @@ -143,18 +145,18 @@ static void ivb_check_fifo_underruns(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - u32 err_int = I915_READ(GEN7_ERR_INT); + u32 err_int = intel_de_read(dev_priv, GEN7_ERR_INT); lockdep_assert_held(&dev_priv->irq_lock); if ((err_int & ERR_INT_FIFO_UNDERRUN(pipe)) == 0) return; - I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); - POSTING_READ(GEN7_ERR_INT); + intel_de_write(dev_priv, GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); + intel_de_posting_read(dev_priv, GEN7_ERR_INT); trace_intel_cpu_fifo_underrun(dev_priv, pipe); - DRM_ERROR("fifo underrun on pipe %c\n", pipe_name(pipe)); + drm_err(&dev_priv->drm, "fifo underrun on pipe %c\n", pipe_name(pipe)); } static void ivb_set_fifo_underrun_reporting(struct drm_device *dev, @@ -163,7 +165,8 @@ static void ivb_set_fifo_underrun_reporting(struct drm_device *dev, { struct drm_i915_private *dev_priv = to_i915(dev); if (enable) { - I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); + intel_de_write(dev_priv, GEN7_ERR_INT, + ERR_INT_FIFO_UNDERRUN(pipe)); if (!ivb_can_enable_err_int(dev)) return; @@ -173,9 +176,10 @@ static void ivb_set_fifo_underrun_reporting(struct drm_device *dev, ilk_disable_display_irq(dev_priv, DE_ERR_INT_IVB); if (old && - I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) { - DRM_ERROR("uncleared fifo underrun on pipe %c\n", - pipe_name(pipe)); + intel_de_read(dev_priv, GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) { + drm_err(&dev_priv->drm, + "uncleared fifo underrun on pipe %c\n", + pipe_name(pipe)); } } } @@ -209,19 +213,20 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pch_transcoder = crtc->pipe; - u32 serr_int = I915_READ(SERR_INT); + u32 serr_int = intel_de_read(dev_priv, SERR_INT); lockdep_assert_held(&dev_priv->irq_lock); if ((serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) == 0) return; - I915_WRITE(SERR_INT, SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); - POSTING_READ(SERR_INT); + intel_de_write(dev_priv, SERR_INT, + SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); + intel_de_posting_read(dev_priv, SERR_INT); trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder); - DRM_ERROR("pch fifo underrun on pch transcoder %c\n", - pipe_name(pch_transcoder)); + drm_err(&dev_priv->drm, "pch fifo underrun on pch transcoder %c\n", + pipe_name(pch_transcoder)); } static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, @@ -231,8 +236,8 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, struct drm_i915_private *dev_priv = to_i915(dev); if (enable) { - I915_WRITE(SERR_INT, - SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); + intel_de_write(dev_priv, SERR_INT, + SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); if (!cpt_can_enable_serr_int(dev)) return; @@ -241,10 +246,11 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, } else { ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); - if (old && I915_READ(SERR_INT) & + if (old && intel_de_read(dev_priv, SERR_INT) & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) { - DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n", - pipe_name(pch_transcoder)); + drm_err(&dev_priv->drm, + "uncleared pch fifo underrun on pch transcoder %c\n", + pipe_name(pch_transcoder)); } } } @@ -378,8 +384,8 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) { trace_intel_cpu_fifo_underrun(dev_priv, pipe); - DRM_ERROR("CPU pipe %c FIFO underrun\n", - pipe_name(pipe)); + drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun\n", + pipe_name(pipe)); } intel_fbc_handle_fifo_underrun_irq(dev_priv); @@ -400,8 +406,8 @@ void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, false)) { trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder); - DRM_ERROR("PCH transcoder %c FIFO underrun\n", - pipe_name(pch_transcoder)); + drm_err(&dev_priv->drm, "PCH transcoder %c FIFO underrun\n", + pipe_name(pch_transcoder)); } } diff --git a/drivers/gpu/drm/i915/display/intel_global_state.c b/drivers/gpu/drm/i915/display/intel_global_state.c new file mode 100644 index 000000000000..a0cc894c3868 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_global_state.c @@ -0,0 +1,223 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include <linux/string.h> + +#include "i915_drv.h" +#include "intel_atomic.h" +#include "intel_display_types.h" +#include "intel_global_state.h" + +void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv, + struct intel_global_obj *obj, + struct intel_global_state *state, + const struct intel_global_state_funcs *funcs) +{ + memset(obj, 0, sizeof(*obj)); + + obj->state = state; + obj->funcs = funcs; + list_add_tail(&obj->head, &dev_priv->global_obj_list); +} + +void intel_atomic_global_obj_cleanup(struct drm_i915_private *dev_priv) +{ + struct intel_global_obj *obj, *next; + + list_for_each_entry_safe(obj, next, &dev_priv->global_obj_list, head) { + list_del(&obj->head); + obj->funcs->atomic_destroy_state(obj, obj->state); + } +} + +static void assert_global_state_write_locked(struct drm_i915_private *dev_priv) +{ + struct intel_crtc *crtc; + + for_each_intel_crtc(&dev_priv->drm, crtc) + drm_modeset_lock_assert_held(&crtc->base.mutex); +} + +static bool modeset_lock_is_held(struct drm_modeset_acquire_ctx *ctx, + struct drm_modeset_lock *lock) +{ + struct drm_modeset_lock *l; + + list_for_each_entry(l, &ctx->locked, head) { + if (lock == l) + return true; + } + + return false; +} + +static void assert_global_state_read_locked(struct intel_atomic_state *state) +{ + struct drm_modeset_acquire_ctx *ctx = state->base.acquire_ctx; + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc *crtc; + + for_each_intel_crtc(&dev_priv->drm, crtc) { + if (modeset_lock_is_held(ctx, &crtc->base.mutex)) + return; + } + + WARN(1, "Global state not read locked\n"); +} + +struct intel_global_state * +intel_atomic_get_global_obj_state(struct intel_atomic_state *state, + struct intel_global_obj *obj) +{ + int index, num_objs, i; + size_t size; + struct __intel_global_objs_state *arr; + struct intel_global_state *obj_state; + + for (i = 0; i < state->num_global_objs; i++) + if (obj == state->global_objs[i].ptr) + return state->global_objs[i].state; + + assert_global_state_read_locked(state); + + num_objs = state->num_global_objs + 1; + size = sizeof(*state->global_objs) * num_objs; + arr = krealloc(state->global_objs, size, GFP_KERNEL); + if (!arr) + return ERR_PTR(-ENOMEM); + + state->global_objs = arr; + index = state->num_global_objs; + memset(&state->global_objs[index], 0, sizeof(*state->global_objs)); + + obj_state = obj->funcs->atomic_duplicate_state(obj); + if (!obj_state) + return ERR_PTR(-ENOMEM); + + obj_state->changed = false; + + state->global_objs[index].state = obj_state; + state->global_objs[index].old_state = obj->state; + state->global_objs[index].new_state = obj_state; + state->global_objs[index].ptr = obj; + obj_state->state = state; + + state->num_global_objs = num_objs; + + DRM_DEBUG_ATOMIC("Added new global object %p state %p to %p\n", + obj, obj_state, state); + + return obj_state; +} + +struct intel_global_state * +intel_atomic_get_old_global_obj_state(struct intel_atomic_state *state, + struct intel_global_obj *obj) +{ + int i; + + for (i = 0; i < state->num_global_objs; i++) + if (obj == state->global_objs[i].ptr) + return state->global_objs[i].old_state; + + return NULL; +} + +struct intel_global_state * +intel_atomic_get_new_global_obj_state(struct intel_atomic_state *state, + struct intel_global_obj *obj) +{ + int i; + + for (i = 0; i < state->num_global_objs; i++) + if (obj == state->global_objs[i].ptr) + return state->global_objs[i].new_state; + + return NULL; +} + +void intel_atomic_swap_global_state(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_global_state *old_obj_state, *new_obj_state; + struct intel_global_obj *obj; + int i; + + for_each_oldnew_global_obj_in_state(state, obj, old_obj_state, + new_obj_state, i) { + WARN_ON(obj->state != old_obj_state); + + /* + * If the new state wasn't modified (and properly + * locked for write access) we throw it away. + */ + if (!new_obj_state->changed) + continue; + + assert_global_state_write_locked(dev_priv); + + old_obj_state->state = state; + new_obj_state->state = NULL; + + state->global_objs[i].state = old_obj_state; + obj->state = new_obj_state; + } +} + +void intel_atomic_clear_global_state(struct intel_atomic_state *state) +{ + int i; + + for (i = 0; i < state->num_global_objs; i++) { + struct intel_global_obj *obj = state->global_objs[i].ptr; + + obj->funcs->atomic_destroy_state(obj, + state->global_objs[i].state); + state->global_objs[i].ptr = NULL; + state->global_objs[i].state = NULL; + state->global_objs[i].old_state = NULL; + state->global_objs[i].new_state = NULL; + } + state->num_global_objs = 0; +} + +int intel_atomic_lock_global_state(struct intel_global_state *obj_state) +{ + struct intel_atomic_state *state = obj_state->state; + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc *crtc; + + for_each_intel_crtc(&dev_priv->drm, crtc) { + int ret; + + ret = drm_modeset_lock(&crtc->base.mutex, + state->base.acquire_ctx); + if (ret) + return ret; + } + + obj_state->changed = true; + + return 0; +} + +int intel_atomic_serialize_global_state(struct intel_global_state *obj_state) +{ + struct intel_atomic_state *state = obj_state->state; + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc *crtc; + + for_each_intel_crtc(&dev_priv->drm, crtc) { + struct intel_crtc_state *crtc_state; + + crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + } + + obj_state->changed = true; + + return 0; +} diff --git a/drivers/gpu/drm/i915/display/intel_global_state.h b/drivers/gpu/drm/i915/display/intel_global_state.h new file mode 100644 index 000000000000..e6163a469029 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_global_state.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2020 Intel Corporation + */ + +#ifndef __INTEL_GLOBAL_STATE_H__ +#define __INTEL_GLOBAL_STATE_H__ + +#include <linux/list.h> + +struct drm_i915_private; +struct intel_atomic_state; +struct intel_global_obj; +struct intel_global_state; + +struct intel_global_state_funcs { + struct intel_global_state *(*atomic_duplicate_state)(struct intel_global_obj *obj); + void (*atomic_destroy_state)(struct intel_global_obj *obj, + struct intel_global_state *state); +}; + +struct intel_global_obj { + struct list_head head; + struct intel_global_state *state; + const struct intel_global_state_funcs *funcs; +}; + +#define intel_for_each_global_obj(obj, dev_priv) \ + list_for_each_entry(obj, &(dev_priv)->global_obj_list, head) + +#define for_each_new_global_obj_in_state(__state, obj, new_obj_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_global_objs && \ + ((obj) = (__state)->global_objs[__i].ptr, \ + (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \ + (__i)++) \ + for_each_if(obj) + +#define for_each_old_global_obj_in_state(__state, obj, new_obj_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_global_objs && \ + ((obj) = (__state)->global_objs[__i].ptr, \ + (new_obj_state) = (__state)->global_objs[__i].old_state, 1); \ + (__i)++) \ + for_each_if(obj) + +#define for_each_oldnew_global_obj_in_state(__state, obj, old_obj_state, new_obj_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_global_objs && \ + ((obj) = (__state)->global_objs[__i].ptr, \ + (old_obj_state) = (__state)->global_objs[__i].old_state, \ + (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \ + (__i)++) \ + for_each_if(obj) + +struct intel_global_state { + struct intel_atomic_state *state; + bool changed; +}; + +struct __intel_global_objs_state { + struct intel_global_obj *ptr; + struct intel_global_state *state, *old_state, *new_state; +}; + +void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv, + struct intel_global_obj *obj, + struct intel_global_state *state, + const struct intel_global_state_funcs *funcs); +void intel_atomic_global_obj_cleanup(struct drm_i915_private *dev_priv); + +struct intel_global_state * +intel_atomic_get_global_obj_state(struct intel_atomic_state *state, + struct intel_global_obj *obj); +struct intel_global_state * +intel_atomic_get_old_global_obj_state(struct intel_atomic_state *state, + struct intel_global_obj *obj); +struct intel_global_state * +intel_atomic_get_new_global_obj_state(struct intel_atomic_state *state, + struct intel_global_obj *obj); + +void intel_atomic_swap_global_state(struct intel_atomic_state *state); +void intel_atomic_clear_global_state(struct intel_atomic_state *state); +int intel_atomic_lock_global_state(struct intel_global_state *obj_state); +int intel_atomic_serialize_global_state(struct intel_global_state *obj_state); + +#endif diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c index 3d4d19ac1d14..1fd3a5a6296b 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.c +++ b/drivers/gpu/drm/i915/display/intel_gmbus.c @@ -32,7 +32,6 @@ #include <linux/i2c.h> #include <drm/drm_hdcp.h> -#include <drm/i915_drm.h> #include "i915_drv.h" #include "intel_display_types.h" @@ -143,8 +142,8 @@ to_intel_gmbus(struct i2c_adapter *i2c) void intel_gmbus_reset(struct drm_i915_private *dev_priv) { - I915_WRITE(GMBUS0, 0); - I915_WRITE(GMBUS4, 0); + intel_de_write(dev_priv, GMBUS0, 0); + intel_de_write(dev_priv, GMBUS4, 0); } static void pnv_gmbus_clock_gating(struct drm_i915_private *dev_priv, @@ -153,12 +152,12 @@ static void pnv_gmbus_clock_gating(struct drm_i915_private *dev_priv, u32 val; /* When using bit bashing for I2C, this bit needs to be set to 1 */ - val = I915_READ(DSPCLK_GATE_D); + val = intel_de_read(dev_priv, DSPCLK_GATE_D); if (!enable) val |= PNV_GMBUSUNIT_CLOCK_GATE_DISABLE; else val &= ~PNV_GMBUSUNIT_CLOCK_GATE_DISABLE; - I915_WRITE(DSPCLK_GATE_D, val); + intel_de_write(dev_priv, DSPCLK_GATE_D, val); } static void pch_gmbus_clock_gating(struct drm_i915_private *dev_priv, @@ -166,12 +165,12 @@ static void pch_gmbus_clock_gating(struct drm_i915_private *dev_priv, { u32 val; - val = I915_READ(SOUTH_DSPCLK_GATE_D); + val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); if (!enable) val |= PCH_GMBUSUNIT_CLOCK_GATE_DISABLE; else val &= ~PCH_GMBUSUNIT_CLOCK_GATE_DISABLE; - I915_WRITE(SOUTH_DSPCLK_GATE_D, val); + intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); } static void bxt_gmbus_clock_gating(struct drm_i915_private *dev_priv, @@ -179,12 +178,12 @@ static void bxt_gmbus_clock_gating(struct drm_i915_private *dev_priv, { u32 val; - val = I915_READ(GEN9_CLKGATE_DIS_4); + val = intel_de_read(dev_priv, GEN9_CLKGATE_DIS_4); if (!enable) val |= BXT_GMBUS_GATING_DIS; else val &= ~BXT_GMBUS_GATING_DIS; - I915_WRITE(GEN9_CLKGATE_DIS_4, val); + intel_de_write(dev_priv, GEN9_CLKGATE_DIS_4, val); } static u32 get_reserved(struct intel_gmbus *bus) @@ -337,14 +336,16 @@ static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en) irq_en = 0; add_wait_queue(&dev_priv->gmbus_wait_queue, &wait); - I915_WRITE_FW(GMBUS4, irq_en); + intel_de_write_fw(dev_priv, GMBUS4, irq_en); status |= GMBUS_SATOER; - ret = wait_for_us((gmbus2 = I915_READ_FW(GMBUS2)) & status, 2); + ret = wait_for_us((gmbus2 = intel_de_read_fw(dev_priv, GMBUS2)) & status, + 2); if (ret) - ret = wait_for((gmbus2 = I915_READ_FW(GMBUS2)) & status, 50); + ret = wait_for((gmbus2 = intel_de_read_fw(dev_priv, GMBUS2)) & status, + 50); - I915_WRITE_FW(GMBUS4, 0); + intel_de_write_fw(dev_priv, GMBUS4, 0); remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait); if (gmbus2 & GMBUS_SATOER) @@ -366,13 +367,13 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv) irq_enable = GMBUS_IDLE_EN; add_wait_queue(&dev_priv->gmbus_wait_queue, &wait); - I915_WRITE_FW(GMBUS4, irq_enable); + intel_de_write_fw(dev_priv, GMBUS4, irq_enable); ret = intel_wait_for_register_fw(&dev_priv->uncore, GMBUS2, GMBUS_ACTIVE, 0, 10); - I915_WRITE_FW(GMBUS4, 0); + intel_de_write_fw(dev_priv, GMBUS4, 0); remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait); return ret; @@ -404,15 +405,12 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv, len++; } size = len % 256 + 256; - I915_WRITE_FW(GMBUS0, gmbus0_reg | GMBUS_BYTE_CNT_OVERRIDE); + intel_de_write_fw(dev_priv, GMBUS0, + gmbus0_reg | GMBUS_BYTE_CNT_OVERRIDE); } - I915_WRITE_FW(GMBUS1, - gmbus1_index | - GMBUS_CYCLE_WAIT | - (size << GMBUS_BYTE_COUNT_SHIFT) | - (addr << GMBUS_SLAVE_ADDR_SHIFT) | - GMBUS_SLAVE_READ | GMBUS_SW_RDY); + intel_de_write_fw(dev_priv, GMBUS1, + gmbus1_index | GMBUS_CYCLE_WAIT | (size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_READ | GMBUS_SW_RDY); while (len) { int ret; u32 val, loop = 0; @@ -421,7 +419,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv, if (ret) return ret; - val = I915_READ_FW(GMBUS3); + val = intel_de_read_fw(dev_priv, GMBUS3); do { if (extra_byte_added && len == 1) break; @@ -432,7 +430,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv, if (burst_read && len == size - 4) /* Reset the override bit */ - I915_WRITE_FW(GMBUS0, gmbus0_reg); + intel_de_write_fw(dev_priv, GMBUS0, gmbus0_reg); } return 0; @@ -489,12 +487,9 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv, len -= 1; } - I915_WRITE_FW(GMBUS3, val); - I915_WRITE_FW(GMBUS1, - gmbus1_index | GMBUS_CYCLE_WAIT | - (chunk_size << GMBUS_BYTE_COUNT_SHIFT) | - (addr << GMBUS_SLAVE_ADDR_SHIFT) | - GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); + intel_de_write_fw(dev_priv, GMBUS3, val); + intel_de_write_fw(dev_priv, GMBUS1, + gmbus1_index | GMBUS_CYCLE_WAIT | (chunk_size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); while (len) { int ret; @@ -503,7 +498,7 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv, val |= *buf++ << (8 * loop); } while (--len && ++loop < 4); - I915_WRITE_FW(GMBUS3, val); + intel_de_write_fw(dev_priv, GMBUS3, val); ret = gmbus_wait(dev_priv, GMBUS_HW_RDY, GMBUS_HW_RDY_EN); if (ret) @@ -568,7 +563,7 @@ gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs, /* GMBUS5 holds 16-bit index */ if (gmbus5) - I915_WRITE_FW(GMBUS5, gmbus5); + intel_de_write_fw(dev_priv, GMBUS5, gmbus5); if (msgs[1].flags & I2C_M_RD) ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus0_reg, @@ -578,7 +573,7 @@ gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs, /* Clear GMBUS5 after each index transfer */ if (gmbus5) - I915_WRITE_FW(GMBUS5, 0); + intel_de_write_fw(dev_priv, GMBUS5, 0); return ret; } @@ -601,7 +596,7 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num, pch_gmbus_clock_gating(dev_priv, false); retry: - I915_WRITE_FW(GMBUS0, gmbus0_source | bus->reg0); + intel_de_write_fw(dev_priv, GMBUS0, gmbus0_source | bus->reg0); for (; i < num; i += inc) { inc = 1; @@ -629,18 +624,19 @@ retry: * a STOP on the very first cycle. To simplify the code we * unconditionally generate the STOP condition with an additional gmbus * cycle. */ - I915_WRITE_FW(GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY); + intel_de_write_fw(dev_priv, GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY); /* Mark the GMBUS interface as disabled after waiting for idle. * We will re-enable it at the start of the next xfer, * till then let it sleep. */ if (gmbus_wait_idle(dev_priv)) { - DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n", - adapter->name); + drm_dbg_kms(&dev_priv->drm, + "GMBUS [%s] timed out waiting for idle\n", + adapter->name); ret = -ETIMEDOUT; } - I915_WRITE_FW(GMBUS0, 0); + intel_de_write_fw(dev_priv, GMBUS0, 0); ret = ret ?: i; goto out; @@ -660,8 +656,9 @@ clear_err: */ ret = -ENXIO; if (gmbus_wait_idle(dev_priv)) { - DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n", - adapter->name); + drm_dbg_kms(&dev_priv->drm, + "GMBUS [%s] timed out after NAK\n", + adapter->name); ret = -ETIMEDOUT; } @@ -669,13 +666,13 @@ clear_err: * of resetting the GMBUS controller and so clearing the * BUS_ERROR raised by the slave's NAK. */ - I915_WRITE_FW(GMBUS1, GMBUS_SW_CLR_INT); - I915_WRITE_FW(GMBUS1, 0); - I915_WRITE_FW(GMBUS0, 0); + intel_de_write_fw(dev_priv, GMBUS1, GMBUS_SW_CLR_INT); + intel_de_write_fw(dev_priv, GMBUS1, 0); + intel_de_write_fw(dev_priv, GMBUS0, 0); - DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n", - adapter->name, msgs[i].addr, - (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len); + drm_dbg_kms(&dev_priv->drm, "GMBUS [%s] NAK for addr: %04x %c(%d)\n", + adapter->name, msgs[i].addr, + (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len); /* * Passive adapters sometimes NAK the first probe. Retry the first @@ -684,17 +681,19 @@ clear_err: * drm_do_probe_ddc_edid, which bails out on the first -ENXIO. */ if (ret == -ENXIO && i == 0 && try++ == 0) { - DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n", - adapter->name); + drm_dbg_kms(&dev_priv->drm, + "GMBUS [%s] NAK on first message, retry\n", + adapter->name); goto retry; } goto out; timeout: - DRM_DEBUG_KMS("GMBUS [%s] timed out, falling back to bit banging on pin %d\n", - bus->adapter.name, bus->reg0 & 0xff); - I915_WRITE_FW(GMBUS0, 0); + drm_dbg_kms(&dev_priv->drm, + "GMBUS [%s] timed out, falling back to bit banging on pin %d\n", + bus->adapter.name, bus->reg0 & 0xff); + intel_de_write_fw(dev_priv, GMBUS0, 0); /* * Hardware may not support GMBUS over these pins? Try GPIO bitbanging @@ -908,7 +907,8 @@ err: struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin) { - if (WARN_ON(!intel_gmbus_is_valid_pin(dev_priv, pin))) + if (drm_WARN_ON(&dev_priv->drm, + !intel_gmbus_is_valid_pin(dev_priv, pin))) return NULL; return &dev_priv->gmbus[pin].adapter; @@ -929,9 +929,10 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) mutex_lock(&dev_priv->gmbus_mutex); bus->force_bit += force_bit ? 1 : -1; - DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n", - force_bit ? "en" : "dis", adapter->name, - bus->force_bit); + drm_dbg_kms(&dev_priv->drm, + "%sabling bit-banging on %s. force bit now %d\n", + force_bit ? "en" : "dis", adapter->name, + bus->force_bit); mutex_unlock(&dev_priv->gmbus_mutex); } diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 0fdbd39f6641..ee0f27ea2810 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -43,6 +43,7 @@ static int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port, const struct intel_hdcp_shim *shim, u8 *bksv) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); int ret, i, tries = 2; /* HDCP spec states that we must retry the bksv if it is invalid */ @@ -54,7 +55,7 @@ int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port, break; } if (i == tries) { - DRM_DEBUG_KMS("Bksv is invalid\n"); + drm_dbg_kms(&i915->drm, "Bksv is invalid\n"); return -ENODEV; } @@ -64,7 +65,7 @@ int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port, /* Is HDCP1.4 capable on Platform and Sink */ bool intel_hdcp_capable(struct intel_connector *connector) { - struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); const struct intel_hdcp_shim *shim = connector->hdcp.shim; bool capable = false; u8 bksv[5]; @@ -85,8 +86,8 @@ bool intel_hdcp_capable(struct intel_connector *connector) /* Is HDCP2.2 capable on Platform and Sink */ bool intel_hdcp2_capable(struct intel_connector *connector) { + struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); struct intel_hdcp *hdcp = &connector->hdcp; bool capable = false; @@ -112,7 +113,8 @@ static inline bool intel_hdcp_in_use(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder, enum port port) { - return I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) & + return intel_de_read(dev_priv, + HDCP_STATUS(dev_priv, cpu_transcoder, port)) & HDCP_STATUS_ENC; } @@ -120,7 +122,8 @@ static inline bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder, enum port port) { - return I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & + return intel_de_read(dev_priv, + HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & LINK_ENCRYPTION_STATUS; } @@ -184,9 +187,9 @@ static bool hdcp_key_loadable(struct drm_i915_private *dev_priv) static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv) { - I915_WRITE(HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER); - I915_WRITE(HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | - HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE); + intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER); + intel_de_write(dev_priv, HDCP_KEY_STATUS, + HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE); } static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv) @@ -194,7 +197,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv) int ret; u32 val; - val = I915_READ(HDCP_KEY_STATUS); + val = intel_de_read(dev_priv, HDCP_KEY_STATUS); if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS)) return 0; @@ -203,7 +206,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv) * out of reset. So if Key is not already loaded, its an error state. */ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - if (!(I915_READ(HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE)) + if (!(intel_de_read(dev_priv, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE)) return -ENXIO; /* @@ -217,12 +220,13 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv) ret = sandybridge_pcode_write(dev_priv, SKL_PCODE_LOAD_HDCP_KEYS, 1); if (ret) { - DRM_ERROR("Failed to initiate HDCP key load (%d)\n", - ret); + drm_err(&dev_priv->drm, + "Failed to initiate HDCP key load (%d)\n", + ret); return ret; } } else { - I915_WRITE(HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER); + intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER); } /* Wait for the keys to load (500us) */ @@ -235,7 +239,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv) return -ENXIO; /* Send Aksv over to PCH display for use in authentication */ - I915_WRITE(HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER); + intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER); return 0; } @@ -243,9 +247,9 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv) /* Returns updated SHA-1 index */ static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text) { - I915_WRITE(HDCP_SHA_TEXT, sha_text); + intel_de_write(dev_priv, HDCP_SHA_TEXT, sha_text); if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) { - DRM_ERROR("Timed out waiting for SHA1 ready\n"); + drm_err(&dev_priv->drm, "Timed out waiting for SHA1 ready\n"); return -ETIMEDOUT; } return 0; @@ -270,7 +274,8 @@ u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv, return HDCP_TRANSD_REP_PRESENT | HDCP_TRANSD_SHA1_M0; default: - DRM_ERROR("Unknown transcoder %d\n", cpu_transcoder); + drm_err(&dev_priv->drm, "Unknown transcoder %d\n", + cpu_transcoder); return -EINVAL; } } @@ -287,7 +292,7 @@ u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv, case PORT_E: return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0; default: - DRM_ERROR("Unknown port %d\n", port); + drm_err(&dev_priv->drm, "Unknown port %d\n", port); return -EINVAL; } } @@ -297,21 +302,19 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, const struct intel_hdcp_shim *shim, u8 *ksv_fifo, u8 num_downstream, u8 *bstatus) { - struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); - struct drm_i915_private *dev_priv; + struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; enum port port = intel_dig_port->base.port; u32 vprime, sha_text, sha_leftovers, rep_ctl; int ret, i, j, sha_idx; - dev_priv = intel_dig_port->base.base.dev->dev_private; - /* Process V' values from the receiver */ for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) { ret = shim->read_v_prime_part(intel_dig_port, i, &vprime); if (ret) return ret; - I915_WRITE(HDCP_SHA_V_PRIME(i), vprime); + intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime); } /* @@ -328,7 +331,7 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, sha_text = 0; sha_leftovers = 0; rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port); - I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); + intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); for (i = 0; i < num_downstream; i++) { unsigned int sha_empty; u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN]; @@ -345,7 +348,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, /* Programming guide writes this every 64 bytes */ sha_idx += sizeof(sha_text); if (!(sha_idx % 64)) - I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); + intel_de_write(dev_priv, HDCP_REP_CTL, + rep_ctl | HDCP_SHA1_TEXT_32); /* Store the leftover bytes from the ksv in sha_text */ sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty; @@ -377,7 +381,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, */ if (sha_leftovers == 0) { /* Write 16 bits of text, 16 bits of M0 */ - I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16); + intel_de_write(dev_priv, HDCP_REP_CTL, + rep_ctl | HDCP_SHA1_TEXT_16); ret = intel_write_sha_text(dev_priv, bstatus[0] << 8 | bstatus[1]); if (ret < 0) @@ -385,14 +390,16 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, sha_idx += sizeof(sha_text); /* Write 32 bits of M0 */ - I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); + intel_de_write(dev_priv, HDCP_REP_CTL, + rep_ctl | HDCP_SHA1_TEXT_0); ret = intel_write_sha_text(dev_priv, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 16 bits of M0 */ - I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16); + intel_de_write(dev_priv, HDCP_REP_CTL, + rep_ctl | HDCP_SHA1_TEXT_16); ret = intel_write_sha_text(dev_priv, 0); if (ret < 0) return ret; @@ -400,7 +407,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, } else if (sha_leftovers == 1) { /* Write 24 bits of text, 8 bits of M0 */ - I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24); + intel_de_write(dev_priv, HDCP_REP_CTL, + rep_ctl | HDCP_SHA1_TEXT_24); sha_text |= bstatus[0] << 16 | bstatus[1] << 8; /* Only 24-bits of data, must be in the LSB */ sha_text = (sha_text & 0xffffff00) >> 8; @@ -410,14 +418,16 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, sha_idx += sizeof(sha_text); /* Write 32 bits of M0 */ - I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); + intel_de_write(dev_priv, HDCP_REP_CTL, + rep_ctl | HDCP_SHA1_TEXT_0); ret = intel_write_sha_text(dev_priv, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 24 bits of M0 */ - I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8); + intel_de_write(dev_priv, HDCP_REP_CTL, + rep_ctl | HDCP_SHA1_TEXT_8); ret = intel_write_sha_text(dev_priv, 0); if (ret < 0) return ret; @@ -425,7 +435,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, } else if (sha_leftovers == 2) { /* Write 32 bits of text */ - I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); + intel_de_write(dev_priv, HDCP_REP_CTL, + rep_ctl | HDCP_SHA1_TEXT_32); sha_text |= bstatus[0] << 24 | bstatus[1] << 16; ret = intel_write_sha_text(dev_priv, sha_text); if (ret < 0) @@ -433,7 +444,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, sha_idx += sizeof(sha_text); /* Write 64 bits of M0 */ - I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); + intel_de_write(dev_priv, HDCP_REP_CTL, + rep_ctl | HDCP_SHA1_TEXT_0); for (i = 0; i < 2; i++) { ret = intel_write_sha_text(dev_priv, 0); if (ret < 0) @@ -442,7 +454,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, } } else if (sha_leftovers == 3) { /* Write 32 bits of text */ - I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); + intel_de_write(dev_priv, HDCP_REP_CTL, + rep_ctl | HDCP_SHA1_TEXT_32); sha_text |= bstatus[0] << 24; ret = intel_write_sha_text(dev_priv, sha_text); if (ret < 0) @@ -450,32 +463,35 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, sha_idx += sizeof(sha_text); /* Write 8 bits of text, 24 bits of M0 */ - I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8); + intel_de_write(dev_priv, HDCP_REP_CTL, + rep_ctl | HDCP_SHA1_TEXT_8); ret = intel_write_sha_text(dev_priv, bstatus[1]); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 32 bits of M0 */ - I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); + intel_de_write(dev_priv, HDCP_REP_CTL, + rep_ctl | HDCP_SHA1_TEXT_0); ret = intel_write_sha_text(dev_priv, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); /* Write 8 bits of M0 */ - I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24); + intel_de_write(dev_priv, HDCP_REP_CTL, + rep_ctl | HDCP_SHA1_TEXT_24); ret = intel_write_sha_text(dev_priv, 0); if (ret < 0) return ret; sha_idx += sizeof(sha_text); } else { - DRM_DEBUG_KMS("Invalid number of leftovers %d\n", - sha_leftovers); + drm_dbg_kms(&dev_priv->drm, "Invalid number of leftovers %d\n", + sha_leftovers); return -EINVAL; } - I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); + intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); /* Fill up to 64-4 bytes with zeros (leave the last write for length) */ while ((sha_idx % 64) < (64 - sizeof(sha_text))) { ret = intel_write_sha_text(dev_priv, 0); @@ -495,14 +511,15 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, return ret; /* Tell the HW we're done with the hash and wait for it to ACK */ - I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH); + intel_de_write(dev_priv, HDCP_REP_CTL, + rep_ctl | HDCP_SHA1_COMPLETE_HASH); if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_COMPLETE, 1)) { - DRM_ERROR("Timed out waiting for SHA1 complete\n"); + drm_err(&dev_priv->drm, "Timed out waiting for SHA1 complete\n"); return -ETIMEDOUT; } - if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) { - DRM_DEBUG_KMS("SHA-1 mismatch, HDCP failed\n"); + if (!(intel_de_read(dev_priv, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) { + drm_dbg_kms(&dev_priv->drm, "SHA-1 mismatch, HDCP failed\n"); return -ENXIO; } @@ -513,15 +530,16 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, static int intel_hdcp_auth_downstream(struct intel_connector *connector) { - struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); const struct intel_hdcp_shim *shim = connector->hdcp.shim; - struct drm_device *dev = connector->base.dev; u8 bstatus[2], num_downstream, *ksv_fifo; int ret, i, tries = 3; ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim); if (ret) { - DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret); + drm_dbg_kms(&dev_priv->drm, + "KSV list failed to become ready (%d)\n", ret); return ret; } @@ -531,7 +549,7 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector) if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) || DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) { - DRM_DEBUG_KMS("Max Topology Limit Exceeded\n"); + drm_dbg_kms(&dev_priv->drm, "Max Topology Limit Exceeded\n"); return -EPERM; } @@ -544,13 +562,14 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector) */ num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]); if (num_downstream == 0) { - DRM_DEBUG_KMS("Repeater with zero downstream devices\n"); + drm_dbg_kms(&dev_priv->drm, + "Repeater with zero downstream devices\n"); return -EINVAL; } ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL); if (!ksv_fifo) { - DRM_DEBUG_KMS("Out of mem: ksv_fifo\n"); + drm_dbg_kms(&dev_priv->drm, "Out of mem: ksv_fifo\n"); return -ENOMEM; } @@ -558,8 +577,9 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector) if (ret) goto err; - if (drm_hdcp_check_ksvs_revoked(dev, ksv_fifo, num_downstream)) { - DRM_ERROR("Revoked Ksv(s) in ksv_fifo\n"); + if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo, + num_downstream)) { + drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n"); ret = -EPERM; goto err; } @@ -577,12 +597,13 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector) } if (i == tries) { - DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret); + drm_dbg_kms(&dev_priv->drm, + "V Prime validation failed.(%d)\n", ret); goto err; } - DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n", - num_downstream); + drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (%d downstream devices)\n", + num_downstream); ret = 0; err: kfree(ksv_fifo); @@ -592,13 +613,12 @@ err: /* Implements Part 1 of the HDCP authorization procedure */ static int intel_hdcp_auth(struct intel_connector *connector) { - struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; - struct drm_device *dev = connector->base.dev; const struct intel_hdcp_shim *shim = hdcp->shim; - struct drm_i915_private *dev_priv; enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; - enum port port; + enum port port = intel_dig_port->base.port; unsigned long r0_prime_gen_start; int ret, i, tries = 2; union { @@ -615,10 +635,6 @@ static int intel_hdcp_auth(struct intel_connector *connector) } ri; bool repeater_present, hdcp_capable; - dev_priv = intel_dig_port->base.base.dev->dev_private; - - port = intel_dig_port->base.port; - /* * Detects whether the display is HDCP capable. Although we check for * valid Bksv below, the HDCP over DP spec requires that we check @@ -630,28 +646,32 @@ static int intel_hdcp_auth(struct intel_connector *connector) if (ret) return ret; if (!hdcp_capable) { - DRM_DEBUG_KMS("Panel is not HDCP capable\n"); + drm_dbg_kms(&dev_priv->drm, + "Panel is not HDCP capable\n"); return -EINVAL; } } /* Initialize An with 2 random values and acquire it */ for (i = 0; i < 2; i++) - I915_WRITE(HDCP_ANINIT(dev_priv, cpu_transcoder, port), - get_random_u32()); - I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port), - HDCP_CONF_CAPTURE_AN); + intel_de_write(dev_priv, + HDCP_ANINIT(dev_priv, cpu_transcoder, port), + get_random_u32()); + intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), + HDCP_CONF_CAPTURE_AN); /* Wait for An to be acquired */ if (intel_de_wait_for_set(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port), HDCP_STATUS_AN_READY, 1)) { - DRM_ERROR("Timed out waiting for An\n"); + drm_err(&dev_priv->drm, "Timed out waiting for An\n"); return -ETIMEDOUT; } - an.reg[0] = I915_READ(HDCP_ANLO(dev_priv, cpu_transcoder, port)); - an.reg[1] = I915_READ(HDCP_ANHI(dev_priv, cpu_transcoder, port)); + an.reg[0] = intel_de_read(dev_priv, + HDCP_ANLO(dev_priv, cpu_transcoder, port)); + an.reg[1] = intel_de_read(dev_priv, + HDCP_ANHI(dev_priv, cpu_transcoder, port)); ret = shim->write_an_aksv(intel_dig_port, an.shim); if (ret) return ret; @@ -664,33 +684,34 @@ static int intel_hdcp_auth(struct intel_connector *connector) if (ret < 0) return ret; - if (drm_hdcp_check_ksvs_revoked(dev, bksv.shim, 1)) { - DRM_ERROR("BKSV is revoked\n"); + if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1)) { + drm_err(&dev_priv->drm, "BKSV is revoked\n"); return -EPERM; } - I915_WRITE(HDCP_BKSVLO(dev_priv, cpu_transcoder, port), bksv.reg[0]); - I915_WRITE(HDCP_BKSVHI(dev_priv, cpu_transcoder, port), bksv.reg[1]); + intel_de_write(dev_priv, HDCP_BKSVLO(dev_priv, cpu_transcoder, port), + bksv.reg[0]); + intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port), + bksv.reg[1]); ret = shim->repeater_present(intel_dig_port, &repeater_present); if (ret) return ret; if (repeater_present) - I915_WRITE(HDCP_REP_CTL, - intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, - port)); + intel_de_write(dev_priv, HDCP_REP_CTL, + intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port)); ret = shim->toggle_signalling(intel_dig_port, true); if (ret) return ret; - I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port), - HDCP_CONF_AUTH_AND_ENC); + intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), + HDCP_CONF_AUTH_AND_ENC); /* Wait for R0 ready */ - if (wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) & + if (wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) & (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) { - DRM_ERROR("Timed out waiting for R0 ready\n"); + drm_err(&dev_priv->drm, "Timed out waiting for R0 ready\n"); return -ETIMEDOUT; } @@ -716,19 +737,21 @@ static int intel_hdcp_auth(struct intel_connector *connector) ret = shim->read_ri_prime(intel_dig_port, ri.shim); if (ret) return ret; - I915_WRITE(HDCP_RPRIME(dev_priv, cpu_transcoder, port), ri.reg); + intel_de_write(dev_priv, + HDCP_RPRIME(dev_priv, cpu_transcoder, port), + ri.reg); /* Wait for Ri prime match */ - if (!wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, - port)) & - (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) + if (!wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) & + (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) break; } if (i == tries) { - DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n", - I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, - port))); + drm_dbg_kms(&dev_priv->drm, + "Timed out waiting for Ri prime match (%x)\n", + intel_de_read(dev_priv, HDCP_STATUS(dev_priv, + cpu_transcoder, port))); return -ETIMEDOUT; } @@ -737,7 +760,7 @@ static int intel_hdcp_auth(struct intel_connector *connector) HDCP_STATUS(dev_priv, cpu_transcoder, port), HDCP_STATUS_ENC, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { - DRM_ERROR("Timed out waiting for encryption\n"); + drm_err(&dev_priv->drm, "Timed out waiting for encryption\n"); return -ETIMEDOUT; } @@ -749,52 +772,53 @@ static int intel_hdcp_auth(struct intel_connector *connector) if (repeater_present) return intel_hdcp_auth_downstream(connector); - DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n"); + drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (no repeater present)\n"); return 0; } static int _intel_hdcp_disable(struct intel_connector *connector) { + struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; - struct drm_i915_private *dev_priv = connector->base.dev->dev_private; - struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); enum port port = intel_dig_port->base.port; enum transcoder cpu_transcoder = hdcp->cpu_transcoder; int ret; - DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n", - connector->base.name, connector->base.base.id); + drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n", + connector->base.name, connector->base.base.id); hdcp->hdcp_encrypted = false; - I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port), 0); + intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0); if (intel_de_wait_for_clear(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port), ~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { - DRM_ERROR("Failed to disable HDCP, timeout clearing status\n"); + drm_err(&dev_priv->drm, + "Failed to disable HDCP, timeout clearing status\n"); return -ETIMEDOUT; } ret = hdcp->shim->toggle_signalling(intel_dig_port, false); if (ret) { - DRM_ERROR("Failed to disable HDCP signalling\n"); + drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n"); return ret; } - DRM_DEBUG_KMS("HDCP is disabled\n"); + drm_dbg_kms(&dev_priv->drm, "HDCP is disabled\n"); return 0; } static int _intel_hdcp_enable(struct intel_connector *connector) { + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; - struct drm_i915_private *dev_priv = connector->base.dev->dev_private; int i, ret, tries = 3; - DRM_DEBUG_KMS("[%s:%d] HDCP is being enabled...\n", - connector->base.name, connector->base.base.id); + drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being enabled...\n", + connector->base.name, connector->base.base.id); if (!hdcp_key_loadable(dev_priv)) { - DRM_ERROR("HDCP key Load is not possible\n"); + drm_err(&dev_priv->drm, "HDCP key Load is not possible\n"); return -ENXIO; } @@ -805,7 +829,8 @@ static int _intel_hdcp_enable(struct intel_connector *connector) intel_hdcp_clear_keys(dev_priv); } if (ret) { - DRM_ERROR("Could not load HDCP keys, (%d)\n", ret); + drm_err(&dev_priv->drm, "Could not load HDCP keys, (%d)\n", + ret); return ret; } @@ -817,13 +842,14 @@ static int _intel_hdcp_enable(struct intel_connector *connector) return 0; } - DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret); + drm_dbg_kms(&dev_priv->drm, "HDCP Auth failure (%d)\n", ret); /* Ensuring HDCP encryption and signalling are stopped. */ _intel_hdcp_disable(connector); } - DRM_DEBUG_KMS("HDCP authentication failed (%d tries/%d)\n", tries, ret); + drm_dbg_kms(&dev_priv->drm, + "HDCP authentication failed (%d tries/%d)\n", tries, ret); return ret; } @@ -836,9 +862,9 @@ struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp) /* Implements Part 3 of the HDCP authorization procedure */ static int intel_hdcp_check_link(struct intel_connector *connector) { + struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; - struct drm_i915_private *dev_priv = connector->base.dev->dev_private; - struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); enum port port = intel_dig_port->base.port; enum transcoder cpu_transcoder; int ret = 0; @@ -853,11 +879,12 @@ static int intel_hdcp_check_link(struct intel_connector *connector) goto out; } - if (WARN_ON(!intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) { - DRM_ERROR("%s:%d HDCP link stopped encryption,%x\n", - connector->base.name, connector->base.base.id, - I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, - port))); + if (drm_WARN_ON(&dev_priv->drm, + !intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) { + drm_err(&dev_priv->drm, + "%s:%d HDCP link stopped encryption,%x\n", + connector->base.name, connector->base.base.id, + intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port))); ret = -ENXIO; hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; schedule_work(&hdcp->prop_work); @@ -872,12 +899,13 @@ static int intel_hdcp_check_link(struct intel_connector *connector) goto out; } - DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n", - connector->base.name, connector->base.base.id); + drm_dbg_kms(&dev_priv->drm, + "[%s:%d] HDCP link failed, retrying authentication\n", + connector->base.name, connector->base.base.id); ret = _intel_hdcp_disable(connector); if (ret) { - DRM_ERROR("Failed to disable hdcp (%d)\n", ret); + drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret); hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; schedule_work(&hdcp->prop_work); goto out; @@ -885,7 +913,7 @@ static int intel_hdcp_check_link(struct intel_connector *connector) ret = _intel_hdcp_enable(connector); if (ret) { - DRM_ERROR("Failed to enable hdcp (%d)\n", ret); + drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret); hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; schedule_work(&hdcp->prop_work); goto out; @@ -901,9 +929,9 @@ static void intel_hdcp_prop_work(struct work_struct *work) struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp, prop_work); struct intel_connector *connector = intel_hdcp_to_connector(hdcp); - struct drm_device *dev = connector->base.dev; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL); mutex_lock(&hdcp->mutex); /* @@ -916,13 +944,13 @@ static void intel_hdcp_prop_work(struct work_struct *work) hdcp->value); mutex_unlock(&hdcp->mutex); - drm_modeset_unlock(&dev->mode_config.connection_mutex); + drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex); } bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port) { - /* PORT E doesn't have HDCP, and PORT F is disabled */ - return INTEL_INFO(dev_priv)->display.has_hdcp && port < PORT_E; + return INTEL_INFO(dev_priv)->display.has_hdcp && + (INTEL_GEN(dev_priv) >= 12 || port < PORT_E); } static int @@ -944,7 +972,8 @@ hdcp2_prepare_ake_init(struct intel_connector *connector, ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data); if (ret) - DRM_DEBUG_KMS("Prepare_ake_init failed. %d\n", ret); + drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n", + ret); mutex_unlock(&dev_priv->hdcp_comp_mutex); return ret; @@ -974,7 +1003,8 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector, rx_cert, paired, ek_pub_km, msg_sz); if (ret < 0) - DRM_DEBUG_KMS("Verify rx_cert failed. %d\n", ret); + drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n", + ret); mutex_unlock(&dev_priv->hdcp_comp_mutex); return ret; @@ -998,7 +1028,7 @@ static int hdcp2_verify_hprime(struct intel_connector *connector, ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime); if (ret < 0) - DRM_DEBUG_KMS("Verify hprime failed. %d\n", ret); + drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret); mutex_unlock(&dev_priv->hdcp_comp_mutex); return ret; @@ -1023,7 +1053,8 @@ hdcp2_store_pairing_info(struct intel_connector *connector, ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info); if (ret < 0) - DRM_DEBUG_KMS("Store pairing info failed. %d\n", ret); + drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n", + ret); mutex_unlock(&dev_priv->hdcp_comp_mutex); return ret; @@ -1048,7 +1079,8 @@ hdcp2_prepare_lc_init(struct intel_connector *connector, ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init); if (ret < 0) - DRM_DEBUG_KMS("Prepare lc_init failed. %d\n", ret); + drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n", + ret); mutex_unlock(&dev_priv->hdcp_comp_mutex); return ret; @@ -1073,7 +1105,8 @@ hdcp2_verify_lprime(struct intel_connector *connector, ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime); if (ret < 0) - DRM_DEBUG_KMS("Verify L_Prime failed. %d\n", ret); + drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n", + ret); mutex_unlock(&dev_priv->hdcp_comp_mutex); return ret; @@ -1097,7 +1130,8 @@ static int hdcp2_prepare_skey(struct intel_connector *connector, ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data); if (ret < 0) - DRM_DEBUG_KMS("Get session key failed. %d\n", ret); + drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n", + ret); mutex_unlock(&dev_priv->hdcp_comp_mutex); return ret; @@ -1126,7 +1160,8 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector, rep_topology, rep_send_ack); if (ret < 0) - DRM_DEBUG_KMS("Verify rep topology failed. %d\n", ret); + drm_dbg_kms(&dev_priv->drm, + "Verify rep topology failed. %d\n", ret); mutex_unlock(&dev_priv->hdcp_comp_mutex); return ret; @@ -1151,7 +1186,7 @@ hdcp2_verify_mprime(struct intel_connector *connector, ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready); if (ret < 0) - DRM_DEBUG_KMS("Verify mprime failed. %d\n", ret); + drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret); mutex_unlock(&dev_priv->hdcp_comp_mutex); return ret; @@ -1174,7 +1209,8 @@ static int hdcp2_authenticate_port(struct intel_connector *connector) ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data); if (ret < 0) - DRM_DEBUG_KMS("Enable hdcp auth failed. %d\n", ret); + drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n", + ret); mutex_unlock(&dev_priv->hdcp_comp_mutex); return ret; @@ -1209,9 +1245,9 @@ static int hdcp2_deauthenticate_port(struct intel_connector *connector) /* Authentication flow starts from here */ static int hdcp2_authentication_key_exchange(struct intel_connector *connector) { - struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; - struct drm_device *dev = connector->base.dev; union { struct hdcp2_ake_init ake_init; struct hdcp2_ake_send_cert send_cert; @@ -1242,15 +1278,16 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector) return ret; if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) { - DRM_DEBUG_KMS("cert.rx_caps dont claim HDCP2.2\n"); + drm_dbg_kms(&dev_priv->drm, "cert.rx_caps dont claim HDCP2.2\n"); return -EINVAL; } hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]); - if (drm_hdcp_check_ksvs_revoked(dev, msgs.send_cert.cert_rx.receiver_id, + if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, + msgs.send_cert.cert_rx.receiver_id, 1)) { - DRM_ERROR("Receiver ID is revoked\n"); + drm_err(&dev_priv->drm, "Receiver ID is revoked\n"); return -EPERM; } @@ -1297,7 +1334,7 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector) static int hdcp2_locality_check(struct intel_connector *connector) { - struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); struct intel_hdcp *hdcp = &connector->hdcp; union { struct hdcp2_lc_init lc_init; @@ -1333,7 +1370,7 @@ static int hdcp2_locality_check(struct intel_connector *connector) static int hdcp2_session_key_exchange(struct intel_connector *connector) { - struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); struct intel_hdcp *hdcp = &connector->hdcp; struct hdcp2_ske_send_eks send_eks; int ret; @@ -1353,7 +1390,7 @@ static int hdcp2_session_key_exchange(struct intel_connector *connector) static int hdcp2_propagate_stream_management_info(struct intel_connector *connector) { - struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); struct intel_hdcp *hdcp = &connector->hdcp; union { struct hdcp2_rep_stream_manage stream_manage; @@ -1404,9 +1441,9 @@ int hdcp2_propagate_stream_management_info(struct intel_connector *connector) static int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) { - struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; - struct drm_device *dev = connector->base.dev; union { struct hdcp2_rep_send_receiverid_list recvid_list; struct hdcp2_rep_send_ack rep_ack; @@ -1425,7 +1462,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) || HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) { - DRM_DEBUG_KMS("Topology Max Size Exceeded\n"); + drm_dbg_kms(&dev_priv->drm, "Topology Max Size Exceeded\n"); return -EINVAL; } @@ -1433,17 +1470,24 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) seq_num_v = drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v); + if (!hdcp->hdcp2_encrypted && seq_num_v) { + drm_dbg_kms(&dev_priv->drm, + "Non zero Seq_num_v at first RecvId_List msg\n"); + return -EINVAL; + } + if (seq_num_v < hdcp->seq_num_v) { /* Roll over of the seq_num_v from repeater. Reauthenticate. */ - DRM_DEBUG_KMS("Seq_num_v roll over.\n"); + drm_dbg_kms(&dev_priv->drm, "Seq_num_v roll over.\n"); return -EINVAL; } device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | HDCP_2_2_DEV_COUNT_LO(rx_info[1])); - if (drm_hdcp_check_ksvs_revoked(dev, msgs.recvid_list.receiver_ids, + if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, + msgs.recvid_list.receiver_ids, device_cnt)) { - DRM_ERROR("Revoked receiver ID(s) is in list\n"); + drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n"); return -EPERM; } @@ -1475,26 +1519,28 @@ static int hdcp2_authenticate_repeater(struct intel_connector *connector) static int hdcp2_authenticate_sink(struct intel_connector *connector) { - struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); + struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; const struct intel_hdcp_shim *shim = hdcp->shim; int ret; ret = hdcp2_authentication_key_exchange(connector); if (ret < 0) { - DRM_DEBUG_KMS("AKE Failed. Err : %d\n", ret); + drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret); return ret; } ret = hdcp2_locality_check(connector); if (ret < 0) { - DRM_DEBUG_KMS("Locality Check failed. Err : %d\n", ret); + drm_dbg_kms(&i915->drm, + "Locality Check failed. Err : %d\n", ret); return ret; } ret = hdcp2_session_key_exchange(connector); if (ret < 0) { - DRM_DEBUG_KMS("SKE Failed. Err : %d\n", ret); + drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret); return ret; } @@ -1509,7 +1555,8 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector) if (hdcp->is_repeater) { ret = hdcp2_authenticate_repeater(connector); if (ret < 0) { - DRM_DEBUG_KMS("Repeater Auth Failed. Err: %d\n", ret); + drm_dbg_kms(&i915->drm, + "Repeater Auth Failed. Err: %d\n", ret); return ret; } } @@ -1524,31 +1571,32 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector) static int hdcp2_enable_encryption(struct intel_connector *connector) { - struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; - enum port port = connector->encoder->port; + enum port port = intel_dig_port->base.port; enum transcoder cpu_transcoder = hdcp->cpu_transcoder; int ret; - WARN_ON(I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & - LINK_ENCRYPTION_STATUS); + drm_WARN_ON(&dev_priv->drm, + intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & + LINK_ENCRYPTION_STATUS); if (hdcp->shim->toggle_signalling) { ret = hdcp->shim->toggle_signalling(intel_dig_port, true); if (ret) { - DRM_ERROR("Failed to enable HDCP signalling. %d\n", - ret); + drm_err(&dev_priv->drm, + "Failed to enable HDCP signalling. %d\n", + ret); return ret; } } - if (I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & + if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & LINK_AUTH_STATUS) { /* Link is Authenticated. Now set for Encryption */ - I915_WRITE(HDCP2_CTL(dev_priv, cpu_transcoder, port), - I915_READ(HDCP2_CTL(dev_priv, cpu_transcoder, - port)) | - CTL_LINK_ENCRYPTION_REQ); + intel_de_write(dev_priv, + HDCP2_CTL(dev_priv, cpu_transcoder, port), + intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ); } ret = intel_de_wait_for_set(dev_priv, @@ -1562,19 +1610,18 @@ static int hdcp2_enable_encryption(struct intel_connector *connector) static int hdcp2_disable_encryption(struct intel_connector *connector) { - struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; - enum port port = connector->encoder->port; + enum port port = intel_dig_port->base.port; enum transcoder cpu_transcoder = hdcp->cpu_transcoder; int ret; - WARN_ON(!(I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & - LINK_ENCRYPTION_STATUS)); + drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & + LINK_ENCRYPTION_STATUS)); - I915_WRITE(HDCP2_CTL(dev_priv, cpu_transcoder, port), - I915_READ(HDCP2_CTL(dev_priv, cpu_transcoder, port)) & - ~CTL_LINK_ENCRYPTION_REQ); + intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port), + intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ); ret = intel_de_wait_for_clear(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, @@ -1582,13 +1629,14 @@ static int hdcp2_disable_encryption(struct intel_connector *connector) LINK_ENCRYPTION_STATUS, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); if (ret == -ETIMEDOUT) - DRM_DEBUG_KMS("Disable Encryption Timedout"); + drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout"); if (hdcp->shim->toggle_signalling) { ret = hdcp->shim->toggle_signalling(intel_dig_port, false); if (ret) { - DRM_ERROR("Failed to disable HDCP signalling. %d\n", - ret); + drm_err(&dev_priv->drm, + "Failed to disable HDCP signalling. %d\n", + ret); return ret; } } @@ -1598,6 +1646,7 @@ static int hdcp2_disable_encryption(struct intel_connector *connector) static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector) { + struct drm_i915_private *i915 = to_i915(connector->base.dev); int ret, i, tries = 3; for (i = 0; i < tries; i++) { @@ -1606,10 +1655,10 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector) break; /* Clearing the mei hdcp session */ - DRM_DEBUG_KMS("HDCP2.2 Auth %d of %d Failed.(%d)\n", - i + 1, tries, ret); + drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n", + i + 1, tries, ret); if (hdcp2_deauthenticate_port(connector) < 0) - DRM_DEBUG_KMS("Port deauth failed.\n"); + drm_dbg_kms(&i915->drm, "Port deauth failed.\n"); } if (i != tries) { @@ -1620,9 +1669,10 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector) msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN); ret = hdcp2_enable_encryption(connector); if (ret < 0) { - DRM_DEBUG_KMS("Encryption Enable Failed.(%d)\n", ret); + drm_dbg_kms(&i915->drm, + "Encryption Enable Failed.(%d)\n", ret); if (hdcp2_deauthenticate_port(connector) < 0) - DRM_DEBUG_KMS("Port deauth failed.\n"); + drm_dbg_kms(&i915->drm, "Port deauth failed.\n"); } } @@ -1631,23 +1681,24 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector) static int _intel_hdcp2_enable(struct intel_connector *connector) { + struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; int ret; - DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being enabled. Type: %d\n", - connector->base.name, connector->base.base.id, - hdcp->content_type); + drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n", + connector->base.name, connector->base.base.id, + hdcp->content_type); ret = hdcp2_authenticate_and_encrypt(connector); if (ret) { - DRM_DEBUG_KMS("HDCP2 Type%d Enabling Failed. (%d)\n", - hdcp->content_type, ret); + drm_dbg_kms(&i915->drm, "HDCP2 Type%d Enabling Failed. (%d)\n", + hdcp->content_type, ret); return ret; } - DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is enabled. Type %d\n", - connector->base.name, connector->base.base.id, - hdcp->content_type); + drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n", + connector->base.name, connector->base.base.id, + hdcp->content_type); hdcp->hdcp2_encrypted = true; return 0; @@ -1655,15 +1706,16 @@ static int _intel_hdcp2_enable(struct intel_connector *connector) static int _intel_hdcp2_disable(struct intel_connector *connector) { + struct drm_i915_private *i915 = to_i915(connector->base.dev); int ret; - DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being Disabled\n", - connector->base.name, connector->base.base.id); + drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n", + connector->base.name, connector->base.base.id); ret = hdcp2_disable_encryption(connector); if (hdcp2_deauthenticate_port(connector) < 0) - DRM_DEBUG_KMS("Port deauth failed.\n"); + drm_dbg_kms(&i915->drm, "Port deauth failed.\n"); connector->hdcp.hdcp2_encrypted = false; @@ -1673,10 +1725,10 @@ static int _intel_hdcp2_disable(struct intel_connector *connector) /* Implements the Link Integrity Check for HDCP2.2 */ static int intel_hdcp2_check_link(struct intel_connector *connector) { - struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; - enum port port = connector->encoder->port; + enum port port = intel_dig_port->base.port; enum transcoder cpu_transcoder; int ret = 0; @@ -1690,10 +1742,11 @@ static int intel_hdcp2_check_link(struct intel_connector *connector) goto out; } - if (WARN_ON(!intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) { - DRM_ERROR("HDCP2.2 link stopped the encryption, %x\n", - I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, - port))); + if (drm_WARN_ON(&dev_priv->drm, + !intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) { + drm_err(&dev_priv->drm, + "HDCP2.2 link stopped the encryption, %x\n", + intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port))); ret = -ENXIO; hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; schedule_work(&hdcp->prop_work); @@ -1713,25 +1766,29 @@ static int intel_hdcp2_check_link(struct intel_connector *connector) if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) goto out; - DRM_DEBUG_KMS("HDCP2.2 Downstream topology change\n"); + drm_dbg_kms(&dev_priv->drm, + "HDCP2.2 Downstream topology change\n"); ret = hdcp2_authenticate_repeater_topology(connector); if (!ret) { hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; schedule_work(&hdcp->prop_work); goto out; } - DRM_DEBUG_KMS("[%s:%d] Repeater topology auth failed.(%d)\n", - connector->base.name, connector->base.base.id, - ret); + drm_dbg_kms(&dev_priv->drm, + "[%s:%d] Repeater topology auth failed.(%d)\n", + connector->base.name, connector->base.base.id, + ret); } else { - DRM_DEBUG_KMS("[%s:%d] HDCP2.2 link failed, retrying auth\n", - connector->base.name, connector->base.base.id); + drm_dbg_kms(&dev_priv->drm, + "[%s:%d] HDCP2.2 link failed, retrying auth\n", + connector->base.name, connector->base.base.id); } ret = _intel_hdcp2_disable(connector); if (ret) { - DRM_ERROR("[%s:%d] Failed to disable hdcp2.2 (%d)\n", - connector->base.name, connector->base.base.id, ret); + drm_err(&dev_priv->drm, + "[%s:%d] Failed to disable hdcp2.2 (%d)\n", + connector->base.name, connector->base.base.id, ret); hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; schedule_work(&hdcp->prop_work); goto out; @@ -1739,9 +1796,10 @@ static int intel_hdcp2_check_link(struct intel_connector *connector) ret = _intel_hdcp2_enable(connector); if (ret) { - DRM_DEBUG_KMS("[%s:%d] Failed to enable hdcp2.2 (%d)\n", - connector->base.name, connector->base.base.id, - ret); + drm_dbg_kms(&dev_priv->drm, + "[%s:%d] Failed to enable hdcp2.2 (%d)\n", + connector->base.name, connector->base.base.id, + ret); hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; schedule_work(&hdcp->prop_work); goto out; @@ -1772,7 +1830,7 @@ static int i915_hdcp_component_bind(struct device *i915_kdev, { struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev); - DRM_DEBUG("I915 HDCP comp bind\n"); + drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n"); mutex_lock(&dev_priv->hdcp_comp_mutex); dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data; dev_priv->hdcp_master->mei_dev = mei_kdev; @@ -1786,7 +1844,7 @@ static void i915_hdcp_component_unbind(struct device *i915_kdev, { struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev); - DRM_DEBUG("I915 HDCP comp unbind\n"); + drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n"); mutex_lock(&dev_priv->hdcp_comp_mutex); dev_priv->hdcp_master = NULL; mutex_unlock(&dev_priv->hdcp_comp_mutex); @@ -1830,7 +1888,7 @@ static inline int initialize_hdcp_port_data(struct intel_connector *connector, if (INTEL_GEN(dev_priv) < 12) data->fw_ddi = - intel_get_mei_fw_ddi_index(connector->encoder->port); + intel_get_mei_fw_ddi_index(intel_attached_encoder(connector)->port); else /* * As per ME FW API expectation, for GEN 12+, fw_ddi is filled @@ -1854,7 +1912,7 @@ static inline int initialize_hdcp_port_data(struct intel_connector *connector, sizeof(struct hdcp2_streamid_type), GFP_KERNEL); if (!data->streams) { - DRM_ERROR("Out of Memory\n"); + drm_err(&dev_priv->drm, "Out of Memory\n"); return -ENOMEM; } @@ -1881,14 +1939,15 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv) return; mutex_lock(&dev_priv->hdcp_comp_mutex); - WARN_ON(dev_priv->hdcp_comp_added); + drm_WARN_ON(&dev_priv->drm, dev_priv->hdcp_comp_added); dev_priv->hdcp_comp_added = true; mutex_unlock(&dev_priv->hdcp_comp_mutex); ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops, I915_COMPONENT_HDCP); if (ret < 0) { - DRM_DEBUG_KMS("Failed at component add(%d)\n", ret); + drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n", + ret); mutex_lock(&dev_priv->hdcp_comp_mutex); dev_priv->hdcp_comp_added = false; mutex_unlock(&dev_priv->hdcp_comp_mutex); @@ -1899,12 +1958,13 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv) static void intel_hdcp2_init(struct intel_connector *connector, const struct intel_hdcp_shim *shim) { + struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; int ret; ret = initialize_hdcp_port_data(connector, shim); if (ret) { - DRM_DEBUG_KMS("Mei hdcp data init failed\n"); + drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n"); return; } @@ -1954,7 +2014,8 @@ int intel_hdcp_enable(struct intel_connector *connector, return -ENOENT; mutex_lock(&hdcp->mutex); - WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED); + drm_WARN_ON(&dev_priv->drm, + hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED); hdcp->content_type = content_type; if (INTEL_GEN(dev_priv) >= 12) { @@ -2014,6 +2075,46 @@ int intel_hdcp_disable(struct intel_connector *connector) return ret; } +void intel_hdcp_update_pipe(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_connector *connector = + to_intel_connector(conn_state->connector); + struct intel_hdcp *hdcp = &connector->hdcp; + bool content_protection_type_changed = + (conn_state->hdcp_content_type != hdcp->content_type && + conn_state->content_protection != + DRM_MODE_CONTENT_PROTECTION_UNDESIRED); + + /* + * During the HDCP encryption session if Type change is requested, + * disable the HDCP and reenable it with new TYPE value. + */ + if (conn_state->content_protection == + DRM_MODE_CONTENT_PROTECTION_UNDESIRED || + content_protection_type_changed) + intel_hdcp_disable(connector); + + /* + * Mark the hdcp state as DESIRED after the hdcp disable of type + * change procedure. + */ + if (content_protection_type_changed) { + mutex_lock(&hdcp->mutex); + hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; + schedule_work(&hdcp->prop_work); + mutex_unlock(&hdcp->mutex); + } + + if (conn_state->content_protection == + DRM_MODE_CONTENT_PROTECTION_DESIRED || + content_protection_type_changed) + intel_hdcp_enable(connector, + crtc_state->cpu_transcoder, + (u8)conn_state->hdcp_content_type); +} + void intel_hdcp_component_fini(struct drm_i915_private *dev_priv) { mutex_lock(&dev_priv->hdcp_comp_mutex); diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h index f3c3272e712a..7c12ad609b1f 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.h +++ b/drivers/gpu/drm/i915/display/intel_hdcp.h @@ -8,12 +8,12 @@ #include <linux/types.h> -#include <drm/i915_drm.h> - struct drm_connector; struct drm_connector_state; struct drm_i915_private; struct intel_connector; +struct intel_crtc_state; +struct intel_encoder; struct intel_hdcp_shim; enum port; enum transcoder; @@ -26,6 +26,9 @@ int intel_hdcp_init(struct intel_connector *connector, int intel_hdcp_enable(struct intel_connector *connector, enum transcoder cpu_transcoder, u8 content_type); int intel_hdcp_disable(struct intel_connector *connector); +void intel_hdcp_update_pipe(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port); bool intel_hdcp_capable(struct intel_connector *connector); bool intel_hdcp2_capable(struct intel_connector *connector); diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 93ac0f296852..821411b93dac 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -36,7 +36,6 @@ #include <drm/drm_edid.h> #include <drm/drm_hdcp.h> #include <drm/drm_scdc_helper.h> -#include <drm/i915_drm.h> #include <drm/intel_lpe_audio.h> #include "i915_debugfs.h" @@ -45,6 +44,7 @@ #include "intel_audio.h" #include "intel_connector.h" #include "intel_ddi.h" +#include "intel_display_debugfs.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dpio_phy.h" @@ -72,17 +72,19 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) enabled_bits = HAS_DDI(dev_priv) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE; - WARN(I915_READ(intel_hdmi->hdmi_reg) & enabled_bits, - "HDMI port enabled, expecting disabled\n"); + drm_WARN(dev, + intel_de_read(dev_priv, intel_hdmi->hdmi_reg) & enabled_bits, + "HDMI port enabled, expecting disabled\n"); } static void assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder) { - WARN(I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)) & - TRANS_DDI_FUNC_ENABLE, - "HDMI transcoder function enabled, expecting disabled\n"); + drm_WARN(&dev_priv->drm, + intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)) & + TRANS_DDI_FUNC_ENABLE, + "HDMI transcoder function enabled, expecting disabled\n"); } struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder) @@ -215,32 +217,33 @@ static void g4x_write_infoframe(struct intel_encoder *encoder, { const u32 *data = frame; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 val = I915_READ(VIDEO_DIP_CTL); + u32 val = intel_de_read(dev_priv, VIDEO_DIP_CTL); int i; - WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); + drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE), + "Writing DIP with CTL reg disabled\n"); val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ val |= g4x_infoframe_index(type); val &= ~g4x_infoframe_enable(type); - I915_WRITE(VIDEO_DIP_CTL, val); + intel_de_write(dev_priv, VIDEO_DIP_CTL, val); for (i = 0; i < len; i += 4) { - I915_WRITE(VIDEO_DIP_DATA, *data); + intel_de_write(dev_priv, VIDEO_DIP_DATA, *data); data++; } /* Write every possible data byte to force correct ECC calculation. */ for (; i < VIDEO_DIP_DATA_SIZE; i += 4) - I915_WRITE(VIDEO_DIP_DATA, 0); + intel_de_write(dev_priv, VIDEO_DIP_DATA, 0); val |= g4x_infoframe_enable(type); val &= ~VIDEO_DIP_FREQ_MASK; val |= VIDEO_DIP_FREQ_VSYNC; - I915_WRITE(VIDEO_DIP_CTL, val); - POSTING_READ(VIDEO_DIP_CTL); + intel_de_write(dev_priv, VIDEO_DIP_CTL, val); + intel_de_posting_read(dev_priv, VIDEO_DIP_CTL); } static void g4x_read_infoframe(struct intel_encoder *encoder, @@ -252,22 +255,22 @@ static void g4x_read_infoframe(struct intel_encoder *encoder, u32 val, *data = frame; int i; - val = I915_READ(VIDEO_DIP_CTL); + val = intel_de_read(dev_priv, VIDEO_DIP_CTL); val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ val |= g4x_infoframe_index(type); - I915_WRITE(VIDEO_DIP_CTL, val); + intel_de_write(dev_priv, VIDEO_DIP_CTL, val); for (i = 0; i < len; i += 4) - *data++ = I915_READ(VIDEO_DIP_DATA); + *data++ = intel_de_read(dev_priv, VIDEO_DIP_DATA); } static u32 g4x_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 val = I915_READ(VIDEO_DIP_CTL); + u32 val = intel_de_read(dev_priv, VIDEO_DIP_CTL); if ((val & VIDEO_DIP_ENABLE) == 0) return 0; @@ -288,32 +291,34 @@ static void ibx_write_infoframe(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); - u32 val = I915_READ(reg); + u32 val = intel_de_read(dev_priv, reg); int i; - WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); + drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE), + "Writing DIP with CTL reg disabled\n"); val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ val |= g4x_infoframe_index(type); val &= ~g4x_infoframe_enable(type); - I915_WRITE(reg, val); + intel_de_write(dev_priv, reg, val); for (i = 0; i < len; i += 4) { - I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); + intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe), + *data); data++; } /* Write every possible data byte to force correct ECC calculation. */ for (; i < VIDEO_DIP_DATA_SIZE; i += 4) - I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0); + intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe), 0); val |= g4x_infoframe_enable(type); val &= ~VIDEO_DIP_FREQ_MASK; val |= VIDEO_DIP_FREQ_VSYNC; - I915_WRITE(reg, val); - POSTING_READ(reg); + intel_de_write(dev_priv, reg, val); + intel_de_posting_read(dev_priv, reg); } static void ibx_read_infoframe(struct intel_encoder *encoder, @@ -326,15 +331,15 @@ static void ibx_read_infoframe(struct intel_encoder *encoder, u32 val, *data = frame; int i; - val = I915_READ(TVIDEO_DIP_CTL(crtc->pipe)); + val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(crtc->pipe)); val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ val |= g4x_infoframe_index(type); - I915_WRITE(TVIDEO_DIP_CTL(crtc->pipe), val); + intel_de_write(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), val); for (i = 0; i < len; i += 4) - *data++ = I915_READ(TVIDEO_DIP_DATA(crtc->pipe)); + *data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe)); } static u32 ibx_infoframes_enabled(struct intel_encoder *encoder, @@ -343,7 +348,7 @@ static u32 ibx_infoframes_enabled(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe; i915_reg_t reg = TVIDEO_DIP_CTL(pipe); - u32 val = I915_READ(reg); + u32 val = intel_de_read(dev_priv, reg); if ((val & VIDEO_DIP_ENABLE) == 0) return 0; @@ -365,10 +370,11 @@ static void cpt_write_infoframe(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); - u32 val = I915_READ(reg); + u32 val = intel_de_read(dev_priv, reg); int i; - WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); + drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE), + "Writing DIP with CTL reg disabled\n"); val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ val |= g4x_infoframe_index(type); @@ -378,22 +384,23 @@ static void cpt_write_infoframe(struct intel_encoder *encoder, if (type != HDMI_INFOFRAME_TYPE_AVI) val &= ~g4x_infoframe_enable(type); - I915_WRITE(reg, val); + intel_de_write(dev_priv, reg, val); for (i = 0; i < len; i += 4) { - I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); + intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe), + *data); data++; } /* Write every possible data byte to force correct ECC calculation. */ for (; i < VIDEO_DIP_DATA_SIZE; i += 4) - I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0); + intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe), 0); val |= g4x_infoframe_enable(type); val &= ~VIDEO_DIP_FREQ_MASK; val |= VIDEO_DIP_FREQ_VSYNC; - I915_WRITE(reg, val); - POSTING_READ(reg); + intel_de_write(dev_priv, reg, val); + intel_de_posting_read(dev_priv, reg); } static void cpt_read_infoframe(struct intel_encoder *encoder, @@ -406,15 +413,15 @@ static void cpt_read_infoframe(struct intel_encoder *encoder, u32 val, *data = frame; int i; - val = I915_READ(TVIDEO_DIP_CTL(crtc->pipe)); + val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(crtc->pipe)); val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ val |= g4x_infoframe_index(type); - I915_WRITE(TVIDEO_DIP_CTL(crtc->pipe), val); + intel_de_write(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), val); for (i = 0; i < len; i += 4) - *data++ = I915_READ(TVIDEO_DIP_DATA(crtc->pipe)); + *data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe)); } static u32 cpt_infoframes_enabled(struct intel_encoder *encoder, @@ -422,7 +429,7 @@ static u32 cpt_infoframes_enabled(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe; - u32 val = I915_READ(TVIDEO_DIP_CTL(pipe)); + u32 val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(pipe)); if ((val & VIDEO_DIP_ENABLE) == 0) return 0; @@ -441,32 +448,35 @@ static void vlv_write_infoframe(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); - u32 val = I915_READ(reg); + u32 val = intel_de_read(dev_priv, reg); int i; - WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); + drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE), + "Writing DIP with CTL reg disabled\n"); val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ val |= g4x_infoframe_index(type); val &= ~g4x_infoframe_enable(type); - I915_WRITE(reg, val); + intel_de_write(dev_priv, reg, val); for (i = 0; i < len; i += 4) { - I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data); + intel_de_write(dev_priv, + VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data); data++; } /* Write every possible data byte to force correct ECC calculation. */ for (; i < VIDEO_DIP_DATA_SIZE; i += 4) - I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0); + intel_de_write(dev_priv, + VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0); val |= g4x_infoframe_enable(type); val &= ~VIDEO_DIP_FREQ_MASK; val |= VIDEO_DIP_FREQ_VSYNC; - I915_WRITE(reg, val); - POSTING_READ(reg); + intel_de_write(dev_priv, reg, val); + intel_de_posting_read(dev_priv, reg); } static void vlv_read_infoframe(struct intel_encoder *encoder, @@ -479,15 +489,16 @@ static void vlv_read_infoframe(struct intel_encoder *encoder, u32 val, *data = frame; int i; - val = I915_READ(VLV_TVIDEO_DIP_CTL(crtc->pipe)); + val = intel_de_read(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe)); val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ val |= g4x_infoframe_index(type); - I915_WRITE(VLV_TVIDEO_DIP_CTL(crtc->pipe), val); + intel_de_write(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe), val); for (i = 0; i < len; i += 4) - *data++ = I915_READ(VLV_TVIDEO_DIP_DATA(crtc->pipe)); + *data++ = intel_de_read(dev_priv, + VLV_TVIDEO_DIP_DATA(crtc->pipe)); } static u32 vlv_infoframes_enabled(struct intel_encoder *encoder, @@ -495,7 +506,7 @@ static u32 vlv_infoframes_enabled(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe; - u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe)); + u32 val = intel_de_read(dev_priv, VLV_TVIDEO_DIP_CTL(pipe)); if ((val & VIDEO_DIP_ENABLE) == 0) return 0; @@ -519,28 +530,30 @@ static void hsw_write_infoframe(struct intel_encoder *encoder, i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); int data_size; int i; - u32 val = I915_READ(ctl_reg); + u32 val = intel_de_read(dev_priv, ctl_reg); data_size = hsw_dip_data_size(dev_priv, type); - WARN_ON(len > data_size); + drm_WARN_ON(&dev_priv->drm, len > data_size); val &= ~hsw_infoframe_enable(type); - I915_WRITE(ctl_reg, val); + intel_de_write(dev_priv, ctl_reg, val); for (i = 0; i < len; i += 4) { - I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder, - type, i >> 2), *data); + intel_de_write(dev_priv, + hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2), + *data); data++; } /* Write every possible data byte to force correct ECC calculation. */ for (; i < data_size; i += 4) - I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder, - type, i >> 2), 0); + intel_de_write(dev_priv, + hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2), + 0); val |= hsw_infoframe_enable(type); - I915_WRITE(ctl_reg, val); - POSTING_READ(ctl_reg); + intel_de_write(dev_priv, ctl_reg, val); + intel_de_posting_read(dev_priv, ctl_reg); } static void hsw_read_infoframe(struct intel_encoder *encoder, @@ -553,18 +566,19 @@ static void hsw_read_infoframe(struct intel_encoder *encoder, u32 val, *data = frame; int i; - val = I915_READ(HSW_TVIDEO_DIP_CTL(cpu_transcoder)); + val = intel_de_read(dev_priv, HSW_TVIDEO_DIP_CTL(cpu_transcoder)); for (i = 0; i < len; i += 4) - *data++ = I915_READ(hsw_dip_data_reg(dev_priv, cpu_transcoder, - type, i >> 2)); + *data++ = intel_de_read(dev_priv, + hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2)); } static u32 hsw_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder)); + u32 val = intel_de_read(dev_priv, + HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder)); u32 mask; mask = (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW | @@ -655,12 +669,12 @@ static void intel_write_infoframe(struct intel_encoder *encoder, intel_hdmi_infoframe_enable(type)) == 0) return; - if (WARN_ON(frame->any.type != type)) + if (drm_WARN_ON(encoder->base.dev, frame->any.type != type)) return; /* see comment above for the reason for this offset */ len = hdmi_infoframe_pack_only(frame, buffer + 1, sizeof(buffer) - 1); - if (WARN_ON(len < 0)) + if (drm_WARN_ON(encoder->base.dev, len < 0)) return; /* Insert the 'hole' (see big comment above) at position 3 */ @@ -734,8 +748,8 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder, drm_hdmi_avi_infoframe_colorspace(frame, conn_state); /* nonsense combination */ - WARN_ON(crtc_state->limited_color_range && - crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); + drm_WARN_ON(encoder->base.dev, crtc_state->limited_color_range && + crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) { drm_hdmi_avi_infoframe_quant_range(frame, connector, @@ -753,7 +767,7 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder, /* TODO: handle pixel repetition for YCBCR420 outputs */ ret = hdmi_avi_infoframe_check(frame); - if (WARN_ON(ret)) + if (drm_WARN_ON(encoder->base.dev, ret)) return false; return true; @@ -774,13 +788,13 @@ intel_hdmi_compute_spd_infoframe(struct intel_encoder *encoder, intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD); ret = hdmi_spd_infoframe_init(frame, "Intel", "Integrated gfx"); - if (WARN_ON(ret)) + if (drm_WARN_ON(encoder->base.dev, ret)) return false; frame->sdi = HDMI_SPD_SDI_PC; ret = hdmi_spd_infoframe_check(frame); - if (WARN_ON(ret)) + if (drm_WARN_ON(encoder->base.dev, ret)) return false; return true; @@ -806,11 +820,11 @@ intel_hdmi_compute_hdmi_infoframe(struct intel_encoder *encoder, ret = drm_hdmi_vendor_infoframe_from_display_mode(frame, conn_state->connector, &crtc_state->hw.adjusted_mode); - if (WARN_ON(ret)) + if (drm_WARN_ON(encoder->base.dev, ret)) return false; ret = hdmi_vendor_infoframe_check(frame); - if (WARN_ON(ret)) + if (drm_WARN_ON(encoder->base.dev, ret)) return false; return true; @@ -844,7 +858,7 @@ intel_hdmi_compute_drm_infoframe(struct intel_encoder *encoder, } ret = hdmi_drm_infoframe_check(frame); - if (WARN_ON(ret)) + if (drm_WARN_ON(&dev_priv->drm, ret)) return false; return true; @@ -859,7 +873,7 @@ static void g4x_set_infoframes(struct intel_encoder *encoder, struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; i915_reg_t reg = VIDEO_DIP_CTL; - u32 val = I915_READ(reg); + u32 val = intel_de_read(dev_priv, reg); u32 port = VIDEO_DIP_PORT(encoder->port); assert_hdmi_port_disabled(intel_hdmi); @@ -885,8 +899,8 @@ static void g4x_set_infoframes(struct intel_encoder *encoder, } val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD); - I915_WRITE(reg, val); - POSTING_READ(reg); + intel_de_write(dev_priv, reg, val); + intel_de_posting_read(dev_priv, reg); return; } @@ -904,8 +918,8 @@ static void g4x_set_infoframes(struct intel_encoder *encoder, val &= ~(VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD); - I915_WRITE(reg, val); - POSTING_READ(reg); + intel_de_write(dev_priv, reg, val); + intel_de_posting_read(dev_priv, reg); intel_write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_AVI, @@ -982,7 +996,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder, else return false; - I915_WRITE(reg, crtc_state->infoframes.gcp); + intel_de_write(dev_priv, reg, crtc_state->infoframes.gcp); return true; } @@ -1007,7 +1021,7 @@ void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder, else return; - crtc_state->infoframes.gcp = I915_READ(reg); + crtc_state->infoframes.gcp = intel_de_read(dev_priv, reg); } static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder, @@ -1042,7 +1056,7 @@ static void ibx_set_infoframes(struct intel_encoder *encoder, struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); - u32 val = I915_READ(reg); + u32 val = intel_de_read(dev_priv, reg); u32 port = VIDEO_DIP_PORT(encoder->port); assert_hdmi_port_disabled(intel_hdmi); @@ -1056,15 +1070,15 @@ static void ibx_set_infoframes(struct intel_encoder *encoder, val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); - I915_WRITE(reg, val); - POSTING_READ(reg); + intel_de_write(dev_priv, reg, val); + intel_de_posting_read(dev_priv, reg); return; } if (port != (val & VIDEO_DIP_PORT_MASK)) { - WARN(val & VIDEO_DIP_ENABLE, - "DIP already enabled on port %c\n", - (val & VIDEO_DIP_PORT_MASK) >> 29); + drm_WARN(&dev_priv->drm, val & VIDEO_DIP_ENABLE, + "DIP already enabled on port %c\n", + (val & VIDEO_DIP_PORT_MASK) >> 29); val &= ~VIDEO_DIP_PORT_MASK; val |= port; } @@ -1077,8 +1091,8 @@ static void ibx_set_infoframes(struct intel_encoder *encoder, if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state)) val |= VIDEO_DIP_ENABLE_GCP; - I915_WRITE(reg, val); - POSTING_READ(reg); + intel_de_write(dev_priv, reg, val); + intel_de_posting_read(dev_priv, reg); intel_write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_AVI, @@ -1100,7 +1114,7 @@ static void cpt_set_infoframes(struct intel_encoder *encoder, struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); - u32 val = I915_READ(reg); + u32 val = intel_de_read(dev_priv, reg); assert_hdmi_port_disabled(intel_hdmi); @@ -1113,8 +1127,8 @@ static void cpt_set_infoframes(struct intel_encoder *encoder, val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); - I915_WRITE(reg, val); - POSTING_READ(reg); + intel_de_write(dev_priv, reg, val); + intel_de_posting_read(dev_priv, reg); return; } @@ -1126,8 +1140,8 @@ static void cpt_set_infoframes(struct intel_encoder *encoder, if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state)) val |= VIDEO_DIP_ENABLE_GCP; - I915_WRITE(reg, val); - POSTING_READ(reg); + intel_de_write(dev_priv, reg, val); + intel_de_posting_read(dev_priv, reg); intel_write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_AVI, @@ -1149,7 +1163,7 @@ static void vlv_set_infoframes(struct intel_encoder *encoder, struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); - u32 val = I915_READ(reg); + u32 val = intel_de_read(dev_priv, reg); u32 port = VIDEO_DIP_PORT(encoder->port); assert_hdmi_port_disabled(intel_hdmi); @@ -1163,15 +1177,15 @@ static void vlv_set_infoframes(struct intel_encoder *encoder, val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); - I915_WRITE(reg, val); - POSTING_READ(reg); + intel_de_write(dev_priv, reg, val); + intel_de_posting_read(dev_priv, reg); return; } if (port != (val & VIDEO_DIP_PORT_MASK)) { - WARN(val & VIDEO_DIP_ENABLE, - "DIP already enabled on port %c\n", - (val & VIDEO_DIP_PORT_MASK) >> 29); + drm_WARN(&dev_priv->drm, val & VIDEO_DIP_ENABLE, + "DIP already enabled on port %c\n", + (val & VIDEO_DIP_PORT_MASK) >> 29); val &= ~VIDEO_DIP_PORT_MASK; val |= port; } @@ -1184,8 +1198,8 @@ static void vlv_set_infoframes(struct intel_encoder *encoder, if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state)) val |= VIDEO_DIP_ENABLE_GCP; - I915_WRITE(reg, val); - POSTING_READ(reg); + intel_de_write(dev_priv, reg, val); + intel_de_posting_read(dev_priv, reg); intel_write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_AVI, @@ -1205,7 +1219,7 @@ static void hsw_set_infoframes(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); - u32 val = I915_READ(reg); + u32 val = intel_de_read(dev_priv, reg); assert_hdmi_transcoder_func_disabled(dev_priv, crtc_state->cpu_transcoder); @@ -1216,16 +1230,16 @@ static void hsw_set_infoframes(struct intel_encoder *encoder, VIDEO_DIP_ENABLE_DRM_GLK); if (!enable) { - I915_WRITE(reg, val); - POSTING_READ(reg); + intel_de_write(dev_priv, reg, val); + intel_de_posting_read(dev_priv, reg); return; } if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state)) val |= VIDEO_DIP_ENABLE_GCP_HSW; - I915_WRITE(reg, val); - POSTING_READ(reg); + intel_de_write(dev_priv, reg, val); + intel_de_posting_read(dev_priv, reg); intel_write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_AVI, @@ -1260,10 +1274,9 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable) static int intel_hdmi_hdcp_read(struct intel_digital_port *intel_dig_port, unsigned int offset, void *buffer, size_t size) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); struct intel_hdmi *hdmi = &intel_dig_port->hdmi; - struct drm_i915_private *dev_priv = - intel_dig_port->base.base.dev->dev_private; - struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv, + struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915, hdmi->ddc_bus); int ret; u8 start = offset & 0xff; @@ -1290,10 +1303,9 @@ static int intel_hdmi_hdcp_read(struct intel_digital_port *intel_dig_port, static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port, unsigned int offset, void *buffer, size_t size) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); struct intel_hdmi *hdmi = &intel_dig_port->hdmi; - struct drm_i915_private *dev_priv = - intel_dig_port->base.base.dev->dev_private; - struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv, + struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915, hdmi->ddc_bus); int ret; u8 *write_buf; @@ -1325,10 +1337,9 @@ static int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, u8 *an) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); struct intel_hdmi *hdmi = &intel_dig_port->hdmi; - struct drm_i915_private *dev_priv = - intel_dig_port->base.base.dev->dev_private; - struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv, + struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915, hdmi->ddc_bus); int ret; @@ -1447,7 +1458,7 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port, static int kbl_repositioning_enc_en_signal(struct intel_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); struct drm_crtc *crtc = connector->base.state->crtc; struct intel_crtc *intel_crtc = container_of(crtc, struct intel_crtc, base); @@ -1455,7 +1466,7 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector) int ret; for (;;) { - scanline = I915_READ(PIPEDSL(intel_crtc->pipe)); + scanline = intel_de_read(dev_priv, PIPEDSL(intel_crtc->pipe)); if (scanline > 100 && scanline < 200) break; usleep_range(25, 50); @@ -1507,8 +1518,7 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port, static bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port) { - struct drm_i915_private *dev_priv = - intel_dig_port->base.base.dev->dev_private; + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); struct intel_connector *connector = intel_dig_port->hdmi.attached_connector; enum port port = intel_dig_port->base.port; @@ -1523,14 +1533,14 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port) if (ret) return false; - I915_WRITE(HDCP_RPRIME(dev_priv, cpu_transcoder, port), ri.reg); + intel_de_write(i915, HDCP_RPRIME(i915, cpu_transcoder, port), ri.reg); /* Wait for Ri prime match */ - if (wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) & + if (wait_for((intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) & + (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC)) == (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) { DRM_ERROR("Ri' mismatch detected, link check failed (%x)\n", - I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, - port))); + intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port))); return false; } return true; @@ -1767,8 +1777,8 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder, else hdmi_val |= SDVO_PIPE_SEL(crtc->pipe); - I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val); - POSTING_READ(intel_hdmi->hdmi_reg); + intel_de_write(dev_priv, intel_hdmi->hdmi_reg, hdmi_val); + intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); } static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, @@ -1802,7 +1812,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI); - tmp = I915_READ(intel_hdmi->hdmi_reg); + tmp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg); if (tmp & SDVO_HSYNC_ACTIVE_HIGH) flags |= DRM_MODE_FLAG_PHSYNC; @@ -1863,7 +1873,7 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder, { struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); - WARN_ON(!pipe_config->has_hdmi_sink); + drm_WARN_ON(encoder->base.dev, !pipe_config->has_hdmi_sink); DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n", pipe_name(crtc->pipe)); intel_audio_codec_enable(encoder, pipe_config, conn_state); @@ -1878,14 +1888,14 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder, struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); u32 temp; - temp = I915_READ(intel_hdmi->hdmi_reg); + temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg); temp |= SDVO_ENABLE; if (pipe_config->has_audio) temp |= HDMI_AUDIO_ENABLE; - I915_WRITE(intel_hdmi->hdmi_reg, temp); - POSTING_READ(intel_hdmi->hdmi_reg); + intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); + intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); if (pipe_config->has_audio) intel_enable_hdmi_audio(encoder, pipe_config, conn_state); @@ -1900,7 +1910,7 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder, struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); u32 temp; - temp = I915_READ(intel_hdmi->hdmi_reg); + temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg); temp |= SDVO_ENABLE; if (pipe_config->has_audio) @@ -1910,10 +1920,10 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder, * HW workaround, need to write this twice for issue * that may result in first write getting masked. */ - I915_WRITE(intel_hdmi->hdmi_reg, temp); - POSTING_READ(intel_hdmi->hdmi_reg); - I915_WRITE(intel_hdmi->hdmi_reg, temp); - POSTING_READ(intel_hdmi->hdmi_reg); + intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); + intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); + intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); + intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); /* * HW workaround, need to toggle enable bit off and on @@ -1924,17 +1934,18 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder, */ if (pipe_config->pipe_bpp > 24 && pipe_config->pixel_multiplier > 1) { - I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE); - POSTING_READ(intel_hdmi->hdmi_reg); + intel_de_write(dev_priv, intel_hdmi->hdmi_reg, + temp & ~SDVO_ENABLE); + intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); /* * HW workaround, need to write this twice for issue * that may result in first write getting masked. */ - I915_WRITE(intel_hdmi->hdmi_reg, temp); - POSTING_READ(intel_hdmi->hdmi_reg); - I915_WRITE(intel_hdmi->hdmi_reg, temp); - POSTING_READ(intel_hdmi->hdmi_reg); + intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); + intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); + intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); + intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); } if (pipe_config->has_audio) @@ -1952,7 +1963,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder, enum pipe pipe = crtc->pipe; u32 temp; - temp = I915_READ(intel_hdmi->hdmi_reg); + temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg); temp |= SDVO_ENABLE; if (pipe_config->has_audio) @@ -1969,27 +1980,25 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder, */ if (pipe_config->pipe_bpp > 24) { - I915_WRITE(TRANS_CHICKEN1(pipe), - I915_READ(TRANS_CHICKEN1(pipe)) | - TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE); + intel_de_write(dev_priv, TRANS_CHICKEN1(pipe), + intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) | TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE); temp &= ~SDVO_COLOR_FORMAT_MASK; temp |= SDVO_COLOR_FORMAT_8bpc; } - I915_WRITE(intel_hdmi->hdmi_reg, temp); - POSTING_READ(intel_hdmi->hdmi_reg); + intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); + intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); if (pipe_config->pipe_bpp > 24) { temp &= ~SDVO_COLOR_FORMAT_MASK; temp |= HDMI_COLOR_FORMAT_12bpc; - I915_WRITE(intel_hdmi->hdmi_reg, temp); - POSTING_READ(intel_hdmi->hdmi_reg); + intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); + intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); - I915_WRITE(TRANS_CHICKEN1(pipe), - I915_READ(TRANS_CHICKEN1(pipe)) & - ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE); + intel_de_write(dev_priv, TRANS_CHICKEN1(pipe), + intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) & ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE); } if (pipe_config->has_audio) @@ -2014,11 +2023,11 @@ static void intel_disable_hdmi(struct intel_encoder *encoder, struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); u32 temp; - temp = I915_READ(intel_hdmi->hdmi_reg); + temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg); temp &= ~(SDVO_ENABLE | HDMI_AUDIO_ENABLE); - I915_WRITE(intel_hdmi->hdmi_reg, temp); - POSTING_READ(intel_hdmi->hdmi_reg); + intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); + intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); /* * HW workaround for IBX, we need to move the port @@ -2039,14 +2048,14 @@ static void intel_disable_hdmi(struct intel_encoder *encoder, * HW workaround, need to write this twice for issue * that may result in first write getting masked. */ - I915_WRITE(intel_hdmi->hdmi_reg, temp); - POSTING_READ(intel_hdmi->hdmi_reg); - I915_WRITE(intel_hdmi->hdmi_reg, temp); - POSTING_READ(intel_hdmi->hdmi_reg); + intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); + intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); + intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); + intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); temp &= ~SDVO_ENABLE; - I915_WRITE(intel_hdmi->hdmi_reg, temp); - POSTING_READ(intel_hdmi->hdmi_reg); + intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); + intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); intel_wait_for_vblank_if_active(dev_priv, PIPE_A); intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); @@ -2090,9 +2099,7 @@ static void pch_post_disable_hdmi(struct intel_encoder *encoder, static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - const struct ddi_vbt_port_info *info = - &dev_priv->vbt.ddi_port_info[encoder->port]; - int max_tmds_clock; + int max_tmds_clock, vbt_max_tmds_clock; if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) max_tmds_clock = 594000; @@ -2103,15 +2110,23 @@ static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder) else max_tmds_clock = 165000; - if (info->max_tmds_clock) - max_tmds_clock = min(max_tmds_clock, info->max_tmds_clock); + vbt_max_tmds_clock = intel_bios_max_tmds_clock(encoder); + if (vbt_max_tmds_clock) + max_tmds_clock = min(max_tmds_clock, vbt_max_tmds_clock); return max_tmds_clock; } +static bool intel_has_hdmi_sink(struct intel_hdmi *hdmi, + const struct drm_connector_state *conn_state) +{ + return hdmi->has_hdmi_sink && + READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI; +} + static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, bool respect_downstream_limits, - bool force_dvi) + bool has_hdmi_sink) { struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base; int max_tmds_clock = intel_hdmi_source_max_tmds_clock(encoder); @@ -2127,7 +2142,7 @@ static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, if (info->max_tmds_clock) max_tmds_clock = min(max_tmds_clock, info->max_tmds_clock); - else if (!hdmi->has_hdmi_sink || force_dvi) + else if (!has_hdmi_sink) max_tmds_clock = min(max_tmds_clock, 165000); } @@ -2137,13 +2152,14 @@ static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, static enum drm_mode_status hdmi_port_clock_valid(struct intel_hdmi *hdmi, int clock, bool respect_downstream_limits, - bool force_dvi) + bool has_hdmi_sink) { struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi)); if (clock < 25000) return MODE_CLOCK_LOW; - if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits, force_dvi)) + if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits, + has_hdmi_sink)) return MODE_CLOCK_HIGH; /* BXT DPLL can't generate 223-240 MHz */ @@ -2165,16 +2181,13 @@ intel_hdmi_mode_valid(struct drm_connector *connector, struct drm_device *dev = intel_hdmi_to_dev(hdmi); struct drm_i915_private *dev_priv = to_i915(dev); enum drm_mode_status status; - int clock; + int clock = mode->clock; int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; - bool force_dvi = - READ_ONCE(to_intel_digital_connector_state(connector->state)->force_audio) == HDMI_AUDIO_OFF_DVI; + bool has_hdmi_sink = intel_has_hdmi_sink(hdmi, connector->state); if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; - clock = mode->clock; - if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING) clock *= 2; @@ -2188,18 +2201,18 @@ intel_hdmi_mode_valid(struct drm_connector *connector, clock /= 2; /* check if we can do 8bpc */ - status = hdmi_port_clock_valid(hdmi, clock, true, force_dvi); + status = hdmi_port_clock_valid(hdmi, clock, true, has_hdmi_sink); - if (hdmi->has_hdmi_sink && !force_dvi) { + if (has_hdmi_sink) { /* if we can't do 8bpc we may still be able to do 12bpc */ if (status != MODE_OK && !HAS_GMCH(dev_priv)) status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, - true, force_dvi); + true, has_hdmi_sink); /* if we can't do 8,12bpc we may still be able to do 10bpc */ if (status != MODE_OK && INTEL_GEN(dev_priv) >= 11) status = hdmi_port_clock_valid(hdmi, clock * 5 / 4, - true, force_dvi); + true, has_hdmi_sink); } if (status != MODE_OK) return status; @@ -2263,14 +2276,9 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, } } - /* Display WA #1139: glk */ - if (bpc == 12 && IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) && - adjusted_mode->htotal > 5460) - return false; - - /* Display Wa_1405510057:icl */ + /* Display Wa_1405510057:icl,ehl */ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 && - bpc == 10 && INTEL_GEN(dev_priv) >= 11 && + bpc == 10 && IS_GEN(dev_priv, 11) && (adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start) % 8 == 2) return false; @@ -2315,7 +2323,7 @@ static int intel_hdmi_port_clock(int clock, int bpc) static int intel_hdmi_compute_bpc(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, - int clock, bool force_dvi) + int clock) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); int bpc; @@ -2324,7 +2332,7 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder, if (hdmi_deep_color_possible(crtc_state, bpc) && hdmi_port_clock_valid(intel_hdmi, intel_hdmi_port_clock(clock, bpc), - true, force_dvi) == MODE_OK) + true, crtc_state->has_hdmi_sink) == MODE_OK) return bpc; } @@ -2332,8 +2340,7 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder, } static int intel_hdmi_compute_clock(struct intel_encoder *encoder, - struct intel_crtc_state *crtc_state, - bool force_dvi) + struct intel_crtc_state *crtc_state) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); const struct drm_display_mode *adjusted_mode = @@ -2347,8 +2354,7 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder, if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) clock /= 2; - bpc = intel_hdmi_compute_bpc(encoder, crtc_state, - clock, force_dvi); + bpc = intel_hdmi_compute_bpc(encoder, crtc_state, clock); crtc_state->port_clock = intel_hdmi_port_clock(clock, bpc); @@ -2364,7 +2370,7 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder, bpc, crtc_state->pipe_bpp); if (hdmi_port_clock_valid(intel_hdmi, crtc_state->port_clock, - false, force_dvi) != MODE_OK) { + false, crtc_state->has_hdmi_sink) != MODE_OK) { DRM_DEBUG_KMS("unsupported HDMI clock (%d kHz), rejecting mode\n", crtc_state->port_clock); return -EINVAL; @@ -2412,14 +2418,14 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, struct drm_scdc *scdc = &connector->display_info.hdmi.scdc; struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(conn_state); - bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI; int ret; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; - pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink; + pipe_config->has_hdmi_sink = intel_has_hdmi_sink(intel_hdmi, + conn_state); if (pipe_config->has_hdmi_sink) pipe_config->has_infoframe = true; @@ -2448,7 +2454,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, intel_conn_state->force_audio == HDMI_AUDIO_ON; } - ret = intel_hdmi_compute_clock(encoder, pipe_config, force_dvi); + ret = intel_hdmi_compute_clock(encoder, pipe_config); if (ret) return ret; @@ -2808,7 +2814,7 @@ intel_hdmi_connector_register(struct drm_connector *connector) if (ret) return ret; - i915_debugfs_connector_add(connector); + intel_connector_debugfs_add(connector); intel_hdmi_create_i2c_symlink(connector); @@ -3002,7 +3008,7 @@ static u8 icl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) else if (intel_phy_is_tc(dev_priv, phy)) return GMBUS_PIN_9_TC1_ICP + intel_port_to_tc(dev_priv, port); - WARN(1, "Unknown port:%c\n", port_name(port)); + drm_WARN(&dev_priv->drm, 1, "Unknown port:%c\n", port_name(port)); return GMBUS_PIN_2_BXT; } @@ -3052,17 +3058,17 @@ static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv, return ddc_pin; } -static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv, - enum port port) +static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder) { - const struct ddi_vbt_port_info *info = - &dev_priv->vbt.ddi_port_info[port]; + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; u8 ddc_pin; - if (info->alternate_ddc_pin) { + ddc_pin = intel_bios_alternate_ddc_pin(encoder); + if (ddc_pin) { DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n", - info->alternate_ddc_pin, port_name(port)); - return info->alternate_ddc_pin; + ddc_pin, port_name(port)); + return ddc_pin; } if (HAS_PCH_MCC(dev_priv)) @@ -3139,16 +3145,16 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, DRM_DEBUG_KMS("Adding HDMI connector on [ENCODER:%d:%s]\n", intel_encoder->base.base.id, intel_encoder->base.name); - if (INTEL_GEN(dev_priv) < 12 && WARN_ON(port == PORT_A)) + if (INTEL_GEN(dev_priv) < 12 && drm_WARN_ON(dev, port == PORT_A)) return; - if (WARN(intel_dig_port->max_lanes < 4, - "Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n", - intel_dig_port->max_lanes, intel_encoder->base.base.id, - intel_encoder->base.name)) + if (drm_WARN(dev, intel_dig_port->max_lanes < 4, + "Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n", + intel_dig_port->max_lanes, intel_encoder->base.base.id, + intel_encoder->base.name)) return; - intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port); + intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(intel_encoder); ddc = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus); drm_connector_init_with_ddc(dev, connector, @@ -3165,6 +3171,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, connector->ycbcr_420_allowed = true; intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); + intel_connector->polled = DRM_CONNECTOR_POLL_HPD; if (HAS_DDI(dev_priv)) intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; @@ -3188,8 +3195,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, * generated on the port when a cable is not attached. */ if (IS_G45(dev_priv)) { - u32 temp = I915_READ(PEG_BAND_GAP_DATA); - I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); + u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA); + intel_de_write(dev_priv, PEG_BAND_GAP_DATA, + (temp & ~0xf) | 0xd); } cec_fill_conn_info_from_drm(&conn_info, connector); diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h index d3659d0b408b..8ff1f76a63df 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.h +++ b/drivers/gpu/drm/i915/display/intel_hdmi.h @@ -9,8 +9,6 @@ #include <linux/hdmi.h> #include <linux/types.h> -#include <drm/i915_drm.h> - #include "i915_reg.h" struct drm_connector; diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index 99d3a3c7989e..a091442efba4 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -23,8 +23,6 @@ #include <linux/kernel.h> -#include <drm/i915_drm.h> - #include "i915_drv.h" #include "intel_display_types.h" #include "intel_hotplug.h" @@ -89,29 +87,16 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv, enum port port) { - switch (port) { - case PORT_A: - return HPD_PORT_A; - case PORT_B: - return HPD_PORT_B; - case PORT_C: - return HPD_PORT_C; - case PORT_D: - return HPD_PORT_D; - case PORT_E: - return HPD_PORT_E; - case PORT_F: - if (IS_CNL_WITH_PORT_F(dev_priv)) - return HPD_PORT_E; - return HPD_PORT_F; - case PORT_G: - return HPD_PORT_G; - case PORT_H: - return HPD_PORT_H; - case PORT_I: - return HPD_PORT_I; + enum phy phy = intel_port_to_phy(dev_priv, port); + + switch (phy) { + case PHY_F: + return IS_CNL_WITH_PORT_F(dev_priv) ? HPD_PORT_E : HPD_PORT_F; + case PHY_A ... PHY_E: + case PHY_G ... PHY_I: + return HPD_PORT_A + phy - PHY_A; default: - MISSING_CASE(port); + MISSING_CASE(phy); return HPD_NONE; } } @@ -120,6 +105,20 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv, #define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000) #define HPD_RETRY_DELAY 1000 +static enum hpd_pin +intel_connector_hpd_pin(struct intel_connector *connector) +{ + struct intel_encoder *encoder = intel_attached_encoder(connector); + + /* + * MST connectors get their encoder attached dynamically + * so need to make sure we have an encoder here. But since + * MST encoders have their hpd_pin set to HPD_NONE we don't + * have to special case them beyond that. + */ + return encoder ? encoder->hpd_pin : HPD_NONE; +} + /** * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin * @dev_priv: private driver data pointer @@ -171,10 +170,13 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, hpd->stats[pin].count += increment; if (hpd->stats[pin].count > threshold) { hpd->stats[pin].state = HPD_MARK_DISABLED; - DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin); + drm_dbg_kms(&dev_priv->drm, + "HPD interrupt storm detected on PIN %d\n", pin); storm = true; } else { - DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin, + drm_dbg_kms(&dev_priv->drm, + "Received HPD interrupt on PIN %d - cnt: %d\n", + pin, hpd->stats[pin].count); } @@ -185,37 +187,32 @@ static void intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) { struct drm_device *dev = &dev_priv->drm; - struct intel_connector *intel_connector; - struct intel_encoder *intel_encoder; - struct drm_connector *connector; struct drm_connector_list_iter conn_iter; - enum hpd_pin pin; + struct intel_connector *connector; bool hpd_disabled = false; lockdep_assert_held(&dev_priv->irq_lock); drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - if (connector->polled != DRM_CONNECTOR_POLL_HPD) - continue; + for_each_intel_connector_iter(connector, &conn_iter) { + enum hpd_pin pin; - intel_connector = to_intel_connector(connector); - intel_encoder = intel_connector->encoder; - if (!intel_encoder) + if (connector->base.polled != DRM_CONNECTOR_POLL_HPD) continue; - pin = intel_encoder->hpd_pin; + pin = intel_connector_hpd_pin(connector); if (pin == HPD_NONE || dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED) continue; - DRM_INFO("HPD interrupt storm detected on connector %s: " + drm_info(&dev_priv->drm, + "HPD interrupt storm detected on connector %s: " "switching from hotplug detection to polling\n", - connector->name); + connector->base.name); dev_priv->hotplug.stats[pin].state = HPD_DISABLED; - connector->polled = DRM_CONNECTOR_POLL_CONNECT - | DRM_CONNECTOR_POLL_DISCONNECT; + connector->base.polled = DRM_CONNECTOR_POLL_CONNECT | + DRM_CONNECTOR_POLL_DISCONNECT; hpd_disabled = true; } drm_connector_list_iter_end(&conn_iter); @@ -234,40 +231,38 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) container_of(work, typeof(*dev_priv), hotplug.reenable_work.work); struct drm_device *dev = &dev_priv->drm; + struct drm_connector_list_iter conn_iter; + struct intel_connector *connector; intel_wakeref_t wakeref; enum hpd_pin pin; wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); spin_lock_irq(&dev_priv->irq_lock); - for_each_hpd_pin(pin) { - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; - if (dev_priv->hotplug.stats[pin].state != HPD_DISABLED) + drm_connector_list_iter_begin(dev, &conn_iter); + for_each_intel_connector_iter(connector, &conn_iter) { + pin = intel_connector_hpd_pin(connector); + if (pin == HPD_NONE || + dev_priv->hotplug.stats[pin].state != HPD_DISABLED) continue; - dev_priv->hotplug.stats[pin].state = HPD_ENABLED; - - drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - struct intel_connector *intel_connector = to_intel_connector(connector); - - /* Don't check MST ports, they don't have pins */ - if (!intel_connector->mst_port && - intel_connector->encoder->hpd_pin == pin) { - if (connector->polled != intel_connector->polled) - DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", - connector->name); - connector->polled = intel_connector->polled; - if (!connector->polled) - connector->polled = DRM_CONNECTOR_POLL_HPD; - } - } - drm_connector_list_iter_end(&conn_iter); + if (connector->base.polled != connector->polled) + drm_dbg(&dev_priv->drm, + "Reenabling HPD on connector %s\n", + connector->base.name); + connector->base.polled = connector->polled; } + drm_connector_list_iter_end(&conn_iter); + + for_each_hpd_pin(pin) { + if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) + dev_priv->hotplug.stats[pin].state = HPD_ENABLED; + } + if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) dev_priv->display.hpd_irq_setup(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); @@ -281,7 +276,7 @@ intel_encoder_hotplug(struct intel_encoder *encoder, struct drm_device *dev = connector->base.dev; enum drm_connector_status old_status; - WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); + drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex)); old_status = connector->base.status; connector->base.status = @@ -290,11 +285,12 @@ intel_encoder_hotplug(struct intel_encoder *encoder, if (old_status == connector->base.status) return INTEL_HOTPLUG_UNCHANGED; - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", - connector->base.base.id, - connector->base.name, - drm_get_connector_status_name(old_status), - drm_get_connector_status_name(connector->base.status)); + drm_dbg_kms(&to_i915(dev)->drm, + "[CONNECTOR:%d:%s] status updated from %s to %s\n", + connector->base.base.id, + connector->base.name, + drm_get_connector_status_name(old_status), + drm_get_connector_status_name(connector->base.status)); return INTEL_HOTPLUG_CHANGED; } @@ -361,16 +357,14 @@ static void i915_hotplug_work_func(struct work_struct *work) container_of(work, struct drm_i915_private, hotplug.hotplug_work.work); struct drm_device *dev = &dev_priv->drm; - struct intel_connector *intel_connector; - struct intel_encoder *intel_encoder; - struct drm_connector *connector; struct drm_connector_list_iter conn_iter; + struct intel_connector *connector; u32 changed = 0, retry = 0; u32 hpd_event_bits; u32 hpd_retry_bits; mutex_lock(&dev->mode_config.mutex); - DRM_DEBUG_KMS("running encoder hotplug functions\n"); + drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n"); spin_lock_irq(&dev_priv->irq_lock); @@ -385,21 +379,25 @@ static void i915_hotplug_work_func(struct work_struct *work) spin_unlock_irq(&dev_priv->irq_lock); drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { + for_each_intel_connector_iter(connector, &conn_iter) { + enum hpd_pin pin; u32 hpd_bit; - intel_connector = to_intel_connector(connector); - if (!intel_connector->encoder) + pin = intel_connector_hpd_pin(connector); + if (pin == HPD_NONE) continue; - intel_encoder = intel_connector->encoder; - hpd_bit = BIT(intel_encoder->hpd_pin); + + hpd_bit = BIT(pin); if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) { - DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", - connector->name, intel_encoder->hpd_pin); + struct intel_encoder *encoder = + intel_attached_encoder(connector); + + drm_dbg_kms(&dev_priv->drm, + "Connector %s (pin %i) received hotplug event.\n", + connector->base.name, pin); - switch (intel_encoder->hotplug(intel_encoder, - intel_connector, - hpd_event_bits & hpd_bit)) { + switch (encoder->hotplug(encoder, connector, + hpd_event_bits & hpd_bit)) { case INTEL_HOTPLUG_UNCHANGED: break; case INTEL_HOTPLUG_CHANGED: @@ -481,9 +479,10 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, long_hpd = long_mask & BIT(pin); - DRM_DEBUG_DRIVER("digital hpd on [ENCODER:%d:%s] - %s\n", - encoder->base.base.id, encoder->base.name, - long_hpd ? "long" : "short"); + drm_dbg(&dev_priv->drm, + "digital hpd on [ENCODER:%d:%s] - %s\n", + encoder->base.base.id, encoder->base.name, + long_hpd ? "long" : "short"); queue_dig = true; if (long_hpd) { @@ -509,8 +508,9 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, * hotplug bits itself. So only WARN about unexpected * interrupts on saner platforms. */ - WARN_ONCE(!HAS_GMCH(dev_priv), - "Received HPD interrupt on pin %d although disabled\n", pin); + drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv), + "Received HPD interrupt on pin %d although disabled\n", + pin); continue; } @@ -601,8 +601,8 @@ static void i915_hpd_poll_init_work(struct work_struct *work) container_of(work, struct drm_i915_private, hotplug.poll_init_work); struct drm_device *dev = &dev_priv->drm; - struct drm_connector *connector; struct drm_connector_list_iter conn_iter; + struct intel_connector *connector; bool enabled; mutex_lock(&dev->mode_config.mutex); @@ -610,23 +610,18 @@ static void i915_hpd_poll_init_work(struct work_struct *work) enabled = READ_ONCE(dev_priv->hotplug.poll_enabled); drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - struct intel_connector *intel_connector = - to_intel_connector(connector); - connector->polled = intel_connector->polled; - - /* MST has a dynamic intel_connector->encoder and it's reprobing - * is all handled by the MST helpers. */ - if (intel_connector->mst_port) + for_each_intel_connector_iter(connector, &conn_iter) { + enum hpd_pin pin; + + pin = intel_connector_hpd_pin(connector); + if (pin == HPD_NONE) continue; - if (!connector->polled && I915_HAS_HOTPLUG(dev_priv) && - intel_connector->encoder->hpd_pin > HPD_NONE) { - connector->polled = enabled ? - DRM_CONNECTOR_POLL_CONNECT | - DRM_CONNECTOR_POLL_DISCONNECT : - DRM_CONNECTOR_POLL_HPD; - } + connector->base.polled = connector->polled; + + if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD) + connector->base.polled = DRM_CONNECTOR_POLL_CONNECT | + DRM_CONNECTOR_POLL_DISCONNECT; } drm_connector_list_iter_end(&conn_iter); diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h index 087b5f57b321..1e6b4fda2900 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.h +++ b/drivers/gpu/drm/i915/display/intel_hotplug.h @@ -8,8 +8,6 @@ #include <linux/types.h> -#include <drm/i915_drm.h> - struct drm_i915_private; struct intel_connector; struct intel_encoder; diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c index 0b67f7887cd0..ad5cc13037ae 100644 --- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c @@ -71,6 +71,7 @@ #include <drm/intel_lpe_audio.h> #include "i915_drv.h" +#include "intel_de.h" #include "intel_lpe_audio.h" #define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->lpe_audio.platdev != NULL) @@ -126,7 +127,8 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv) kfree(pdata); if (IS_ERR(platdev)) { - DRM_ERROR("Failed to allocate LPE audio platform device\n"); + drm_err(&dev_priv->drm, + "Failed to allocate LPE audio platform device\n"); return platdev; } @@ -166,7 +168,7 @@ static int lpe_audio_irq_init(struct drm_i915_private *dev_priv) { int irq = dev_priv->lpe_audio.irq; - WARN_ON(!intel_irqs_enabled(dev_priv)); + drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); irq_set_chip_and_handler_name(irq, &lpe_audio_irqchip, handle_simple_irq, @@ -189,7 +191,8 @@ static bool lpe_audio_detect(struct drm_i915_private *dev_priv) }; if (!pci_dev_present(atom_hdaudio_ids)) { - DRM_INFO("HDaudio controller not detected, using LPE audio instead\n"); + drm_info(&dev_priv->drm, + "HDaudio controller not detected, using LPE audio instead\n"); lpe_present = true; } } @@ -202,18 +205,19 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv) dev_priv->lpe_audio.irq = irq_alloc_desc(0); if (dev_priv->lpe_audio.irq < 0) { - DRM_ERROR("Failed to allocate IRQ desc: %d\n", + drm_err(&dev_priv->drm, "Failed to allocate IRQ desc: %d\n", dev_priv->lpe_audio.irq); ret = dev_priv->lpe_audio.irq; goto err; } - DRM_DEBUG("irq = %d\n", dev_priv->lpe_audio.irq); + drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->lpe_audio.irq); ret = lpe_audio_irq_init(dev_priv); if (ret) { - DRM_ERROR("Failed to initialize irqchip for lpe audio: %d\n", + drm_err(&dev_priv->drm, + "Failed to initialize irqchip for lpe audio: %d\n", ret); goto err_free_irq; } @@ -222,7 +226,8 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv) if (IS_ERR(dev_priv->lpe_audio.platdev)) { ret = PTR_ERR(dev_priv->lpe_audio.platdev); - DRM_ERROR("Failed to create lpe audio platform device: %d\n", + drm_err(&dev_priv->drm, + "Failed to create lpe audio platform device: %d\n", ret); goto err_free_irq; } @@ -230,7 +235,8 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv) /* enable chicken bit; at least this is required for Dell Wyse 3040 * with DP outputs (but only sometimes by some reason!) */ - I915_WRITE(VLV_AUD_CHICKEN_BIT_REG, VLV_CHICKEN_BIT_DBG_ENABLE); + intel_de_write(dev_priv, VLV_AUD_CHICKEN_BIT_REG, + VLV_CHICKEN_BIT_DBG_ENABLE); return 0; err_free_irq: @@ -257,8 +263,8 @@ void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv) ret = generic_handle_irq(dev_priv->lpe_audio.irq); if (ret) - DRM_ERROR_RATELIMITED("error handling LPE audio irq: %d\n", - ret); + drm_err_ratelimited(&dev_priv->drm, + "error handling LPE audio irq: %d\n", ret); } /** @@ -276,7 +282,8 @@ int intel_lpe_audio_init(struct drm_i915_private *dev_priv) if (lpe_audio_detect(dev_priv)) { ret = lpe_audio_setup(dev_priv); if (ret < 0) - DRM_ERROR("failed to setup LPE Audio bridge\n"); + drm_err(&dev_priv->drm, + "failed to setup LPE Audio bridge\n"); } return ret; } @@ -334,7 +341,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, spin_lock_irqsave(&pdata->lpe_audio_slock, irqflags); - audio_enable = I915_READ(VLV_AUD_PORT_EN_DBG(port)); + audio_enable = intel_de_read(dev_priv, VLV_AUD_PORT_EN_DBG(port)); if (eld != NULL) { memcpy(ppdata->eld, eld, HDMI_MAX_ELD_BYTES); @@ -343,8 +350,8 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, ppdata->dp_output = dp_output; /* Unmute the amp for both DP and HDMI */ - I915_WRITE(VLV_AUD_PORT_EN_DBG(port), - audio_enable & ~VLV_AMP_MUTE); + intel_de_write(dev_priv, VLV_AUD_PORT_EN_DBG(port), + audio_enable & ~VLV_AMP_MUTE); } else { memset(ppdata->eld, 0, HDMI_MAX_ELD_BYTES); ppdata->pipe = -1; @@ -352,8 +359,8 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, ppdata->dp_output = false; /* Mute the amp for both DP and HDMI */ - I915_WRITE(VLV_AUD_PORT_EN_DBG(port), - audio_enable | VLV_AMP_MUTE); + intel_de_write(dev_priv, VLV_AUD_PORT_EN_DBG(port), + audio_enable | VLV_AMP_MUTE); } if (pdata->notify_audio_lpe) diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index 10696bb99dcf..9a067effcfa0 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -37,7 +37,6 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> -#include <drm/i915_drm.h> #include "i915_drv.h" #include "intel_atomic.h" @@ -85,7 +84,7 @@ bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv, { u32 val; - val = I915_READ(lvds_reg); + val = intel_de_read(dev_priv, lvds_reg); /* asserts want to know the pipe even if the port is disabled */ if (HAS_PCH_CPT(dev_priv)) @@ -125,7 +124,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, pipe_config->output_types |= BIT(INTEL_OUTPUT_LVDS); - tmp = I915_READ(lvds_encoder->reg); + tmp = intel_de_read(dev_priv, lvds_encoder->reg); if (tmp & LVDS_HSYNC_POLARITY) flags |= DRM_MODE_FLAG_NHSYNC; else @@ -143,7 +142,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, /* gen2/3 store dither state in pfit control, needs to match */ if (INTEL_GEN(dev_priv) < 4) { - tmp = I915_READ(PFIT_CONTROL); + tmp = intel_de_read(dev_priv, PFIT_CONTROL); pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE; } @@ -156,18 +155,18 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv, { u32 val; - pps->powerdown_on_reset = I915_READ(PP_CONTROL(0)) & PANEL_POWER_RESET; + pps->powerdown_on_reset = intel_de_read(dev_priv, PP_CONTROL(0)) & PANEL_POWER_RESET; - val = I915_READ(PP_ON_DELAYS(0)); + val = intel_de_read(dev_priv, PP_ON_DELAYS(0)); pps->port = REG_FIELD_GET(PANEL_PORT_SELECT_MASK, val); pps->t1_t2 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, val); pps->t5 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, val); - val = I915_READ(PP_OFF_DELAYS(0)); + val = intel_de_read(dev_priv, PP_OFF_DELAYS(0)); pps->t3 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, val); pps->tx = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, val); - val = I915_READ(PP_DIVISOR(0)); + val = intel_de_read(dev_priv, PP_DIVISOR(0)); pps->divider = REG_FIELD_GET(PP_REFERENCE_DIVIDER_MASK, val); val = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, val); /* @@ -182,8 +181,9 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv, if (INTEL_GEN(dev_priv) <= 4 && pps->t1_t2 == 0 && pps->t5 == 0 && pps->t3 == 0 && pps->tx == 0) { - DRM_DEBUG_KMS("Panel power timings uninitialized, " - "setting defaults\n"); + drm_dbg_kms(&dev_priv->drm, + "Panel power timings uninitialized, " + "setting defaults\n"); /* Set T2 to 40ms and T5 to 200ms in 100 usec units */ pps->t1_t2 = 40 * 10; pps->t5 = 200 * 10; @@ -192,10 +192,10 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv, pps->tx = 200 * 10; } - DRM_DEBUG_DRIVER("LVDS PPS:t1+t2 %d t3 %d t4 %d t5 %d tx %d " - "divider %d port %d powerdown_on_reset %d\n", - pps->t1_t2, pps->t3, pps->t4, pps->t5, pps->tx, - pps->divider, pps->port, pps->powerdown_on_reset); + drm_dbg(&dev_priv->drm, "LVDS PPS:t1+t2 %d t3 %d t4 %d t5 %d tx %d " + "divider %d port %d powerdown_on_reset %d\n", + pps->t1_t2, pps->t3, pps->t4, pps->t5, pps->tx, + pps->divider, pps->port, pps->powerdown_on_reset); } static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv, @@ -203,25 +203,21 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv, { u32 val; - val = I915_READ(PP_CONTROL(0)); - WARN_ON((val & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS); + val = intel_de_read(dev_priv, PP_CONTROL(0)); + drm_WARN_ON(&dev_priv->drm, + (val & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS); if (pps->powerdown_on_reset) val |= PANEL_POWER_RESET; - I915_WRITE(PP_CONTROL(0), val); + intel_de_write(dev_priv, PP_CONTROL(0), val); - I915_WRITE(PP_ON_DELAYS(0), - REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) | - REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) | - REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5)); + intel_de_write(dev_priv, PP_ON_DELAYS(0), + REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) | REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) | REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5)); - I915_WRITE(PP_OFF_DELAYS(0), - REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) | - REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx)); + intel_de_write(dev_priv, PP_OFF_DELAYS(0), + REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) | REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx)); - I915_WRITE(PP_DIVISOR(0), - REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) | - REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, - DIV_ROUND_UP(pps->t4, 1000) + 1)); + intel_de_write(dev_priv, PP_DIVISOR(0), + REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1)); } static void intel_pre_enable_lvds(struct intel_encoder *encoder, @@ -299,7 +295,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) temp |= LVDS_VSYNC_POLARITY; - I915_WRITE(lvds_encoder->reg, temp); + intel_de_write(dev_priv, lvds_encoder->reg, temp); } /* @@ -313,13 +309,16 @@ static void intel_enable_lvds(struct intel_encoder *encoder, struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); struct drm_i915_private *dev_priv = to_i915(dev); - I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN); + intel_de_write(dev_priv, lvds_encoder->reg, + intel_de_read(dev_priv, lvds_encoder->reg) | LVDS_PORT_EN); - I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON); - POSTING_READ(lvds_encoder->reg); + intel_de_write(dev_priv, PP_CONTROL(0), + intel_de_read(dev_priv, PP_CONTROL(0)) | PANEL_POWER_ON); + intel_de_posting_read(dev_priv, lvds_encoder->reg); if (intel_de_wait_for_set(dev_priv, PP_STATUS(0), PP_ON, 5000)) - DRM_ERROR("timed out waiting for panel to power on\n"); + drm_err(&dev_priv->drm, + "timed out waiting for panel to power on\n"); intel_panel_enable_backlight(pipe_config, conn_state); } @@ -331,12 +330,15 @@ static void intel_disable_lvds(struct intel_encoder *encoder, struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) & ~PANEL_POWER_ON); + intel_de_write(dev_priv, PP_CONTROL(0), + intel_de_read(dev_priv, PP_CONTROL(0)) & ~PANEL_POWER_ON); if (intel_de_wait_for_clear(dev_priv, PP_STATUS(0), PP_ON, 1000)) - DRM_ERROR("timed out waiting for panel to power off\n"); + drm_err(&dev_priv->drm, + "timed out waiting for panel to power off\n"); - I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN); - POSTING_READ(lvds_encoder->reg); + intel_de_write(dev_priv, lvds_encoder->reg, + intel_de_read(dev_priv, lvds_encoder->reg) & ~LVDS_PORT_EN); + intel_de_posting_read(dev_priv, lvds_encoder->reg); } static void gmch_disable_lvds(struct intel_encoder *encoder, @@ -398,7 +400,7 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder, /* Should never happen!! */ if (INTEL_GEN(dev_priv) < 4 && intel_crtc->pipe == 0) { - DRM_ERROR("Can't support LVDS on pipe A\n"); + drm_err(&dev_priv->drm, "Can't support LVDS on pipe A\n"); return -EINVAL; } @@ -408,8 +410,9 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder, lvds_bpp = 6*3; if (lvds_bpp != pipe_config->pipe_bpp && !pipe_config->bw_constrained) { - DRM_DEBUG_KMS("forcing display bpp (was %d) to LVDS (%d)\n", - pipe_config->pipe_bpp, lvds_bpp); + drm_dbg_kms(&dev_priv->drm, + "forcing display bpp (was %d) to LVDS (%d)\n", + pipe_config->pipe_bpp, lvds_bpp); pipe_config->pipe_bpp = lvds_bpp; } @@ -791,7 +794,7 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) * we need to check "the value to be set" in VBT when LVDS * register is uninitialized. */ - val = I915_READ(lvds_encoder->reg); + val = intel_de_read(dev_priv, lvds_encoder->reg); if (HAS_PCH_CPT(dev_priv)) val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK_CPT); else @@ -827,13 +830,14 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) /* Skip init on machines we know falsely report LVDS */ if (dmi_check_system(intel_no_lvds)) { - WARN(!dev_priv->vbt.int_lvds_support, - "Useless DMI match. Internal LVDS support disabled by VBT\n"); + drm_WARN(dev, !dev_priv->vbt.int_lvds_support, + "Useless DMI match. Internal LVDS support disabled by VBT\n"); return; } if (!dev_priv->vbt.int_lvds_support) { - DRM_DEBUG_KMS("Internal LVDS support disabled by VBT\n"); + drm_dbg_kms(&dev_priv->drm, + "Internal LVDS support disabled by VBT\n"); return; } @@ -842,7 +846,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) else lvds_reg = LVDS; - lvds = I915_READ(lvds_reg); + lvds = intel_de_read(dev_priv, lvds_reg); if (HAS_PCH_SPLIT(dev_priv)) { if ((lvds & LVDS_DETECTED) == 0) @@ -852,10 +856,12 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) pin = GMBUS_PIN_PANEL; if (!intel_bios_is_lvds_present(dev_priv, &pin)) { if ((lvds & LVDS_PORT_EN) == 0) { - DRM_DEBUG_KMS("LVDS is not present in VBT\n"); + drm_dbg_kms(&dev_priv->drm, + "LVDS is not present in VBT\n"); return; } - DRM_DEBUG_KMS("LVDS is not present in VBT, but enabled anyway\n"); + drm_dbg_kms(&dev_priv->drm, + "LVDS is not present in VBT, but enabled anyway\n"); } lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL); @@ -969,7 +975,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) */ fixed_mode = intel_encoder_current_mode(intel_encoder); if (fixed_mode) { - DRM_DEBUG_KMS("using current (BIOS) mode: "); + drm_dbg_kms(&dev_priv->drm, "using current (BIOS) mode: "); drm_mode_debug_printmodeline(fixed_mode); fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; } @@ -985,8 +991,8 @@ out: intel_panel_setup_backlight(connector, INVALID_PIPE); lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder); - DRM_DEBUG_KMS("detected %s-link lvds configuration\n", - lvds_encoder->is_dual_link ? "dual" : "single"); + drm_dbg_kms(&dev_priv->drm, "detected %s-link lvds configuration\n", + lvds_encoder->is_dual_link ? "dual" : "single"); lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK; @@ -995,7 +1001,7 @@ out: failed: mutex_unlock(&dev->mode_config.mutex); - DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); + drm_dbg_kms(&dev_priv->drm, "No LVDS modes found, disabling.\n"); drm_connector_cleanup(connector); drm_encoder_cleanup(encoder); kfree(lvds_encoder); diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index e59b4992ba1b..cc6b00959586 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -30,11 +30,10 @@ #include <linux/firmware.h> #include <acpi/video.h> -#include <drm/i915_drm.h> - #include "display/intel_panel.h" #include "i915_drv.h" +#include "intel_acpi.h" #include "intel_display_types.h" #include "intel_opregion.h" @@ -242,29 +241,6 @@ struct opregion_asle_ext { #define SWSCI_SBCB_POST_VBE_PM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19) #define SWSCI_SBCB_ENABLE_DISABLE_AUDIO SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21) -/* - * ACPI Specification, Revision 5.0, Appendix B.3.2 _DOD (Enumerate All Devices - * Attached to the Display Adapter). - */ -#define ACPI_DISPLAY_INDEX_SHIFT 0 -#define ACPI_DISPLAY_INDEX_MASK (0xf << 0) -#define ACPI_DISPLAY_PORT_ATTACHMENT_SHIFT 4 -#define ACPI_DISPLAY_PORT_ATTACHMENT_MASK (0xf << 4) -#define ACPI_DISPLAY_TYPE_SHIFT 8 -#define ACPI_DISPLAY_TYPE_MASK (0xf << 8) -#define ACPI_DISPLAY_TYPE_OTHER (0 << 8) -#define ACPI_DISPLAY_TYPE_VGA (1 << 8) -#define ACPI_DISPLAY_TYPE_TV (2 << 8) -#define ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL (3 << 8) -#define ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL (4 << 8) -#define ACPI_VENDOR_SPECIFIC_SHIFT 12 -#define ACPI_VENDOR_SPECIFIC_MASK (0xf << 12) -#define ACPI_BIOS_CAN_DETECT (1 << 16) -#define ACPI_DEPENDS_ON_VGA (1 << 17) -#define ACPI_PIPE_ID_SHIFT 18 -#define ACPI_PIPE_ID_MASK (7 << 18) -#define ACPI_DEVICE_ID_SCHEME (1 << 31) - #define MAX_DSLP 1500 static int swsci(struct drm_i915_private *dev_priv, @@ -311,7 +287,7 @@ static int swsci(struct drm_i915_private *dev_priv, /* The spec tells us to do this, but we are the only user... */ scic = swsci->scic; if (scic & SWSCI_SCIC_INDICATOR) { - DRM_DEBUG_DRIVER("SWSCI request already in progress\n"); + drm_dbg(&dev_priv->drm, "SWSCI request already in progress\n"); return -EBUSY; } @@ -335,7 +311,7 @@ static int swsci(struct drm_i915_private *dev_priv, /* Poll for the result. */ #define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0) if (wait_for(C, dslp)) { - DRM_DEBUG_DRIVER("SWSCI request timed out\n"); + drm_dbg(&dev_priv->drm, "SWSCI request timed out\n"); return -ETIMEDOUT; } @@ -344,7 +320,7 @@ static int swsci(struct drm_i915_private *dev_priv, /* Note: scic == 0 is an error! */ if (scic != SWSCI_SCIC_EXIT_STATUS_SUCCESS) { - DRM_DEBUG_DRIVER("SWSCI request error %u\n", scic); + drm_dbg(&dev_priv->drm, "SWSCI request error %u\n", scic); return -EIO; } @@ -403,8 +379,9 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL; break; default: - WARN_ONCE(1, "unsupported intel_encoder type %d\n", - intel_encoder->type); + drm_WARN_ONCE(&dev_priv->drm, 1, + "unsupported intel_encoder type %d\n", + intel_encoder->type); return -EINVAL; } @@ -448,10 +425,11 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp) struct opregion_asle *asle = dev_priv->opregion.asle; struct drm_device *dev = &dev_priv->drm; - DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); + drm_dbg(&dev_priv->drm, "bclp = 0x%08x\n", bclp); if (acpi_video_get_backlight_type() == acpi_backlight_native) { - DRM_DEBUG_KMS("opregion backlight request ignored\n"); + drm_dbg_kms(&dev_priv->drm, + "opregion backlight request ignored\n"); return 0; } @@ -468,7 +446,8 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp) * Update backlight on all connectors that support backlight (usually * only one). */ - DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp); + drm_dbg_kms(&dev_priv->drm, "updating opregion backlight %d/255\n", + bclp); drm_connector_list_iter_begin(dev, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) intel_panel_set_backlight_acpi(connector->base.state, bclp, 255); @@ -485,13 +464,13 @@ static u32 asle_set_als_illum(struct drm_i915_private *dev_priv, u32 alsi) { /* alsi is the current ALS reading in lux. 0 indicates below sensor range, 0xffff indicates above sensor range. 1-0xfffe are valid */ - DRM_DEBUG_DRIVER("Illum is not supported\n"); + drm_dbg(&dev_priv->drm, "Illum is not supported\n"); return ASLC_ALS_ILLUM_FAILED; } static u32 asle_set_pwm_freq(struct drm_i915_private *dev_priv, u32 pfmb) { - DRM_DEBUG_DRIVER("PWM freq is not supported\n"); + drm_dbg(&dev_priv->drm, "PWM freq is not supported\n"); return ASLC_PWM_FREQ_FAILED; } @@ -499,30 +478,36 @@ static u32 asle_set_pfit(struct drm_i915_private *dev_priv, u32 pfit) { /* Panel fitting is currently controlled by the X code, so this is a noop until modesetting support works fully */ - DRM_DEBUG_DRIVER("Pfit is not supported\n"); + drm_dbg(&dev_priv->drm, "Pfit is not supported\n"); return ASLC_PFIT_FAILED; } static u32 asle_set_supported_rotation_angles(struct drm_i915_private *dev_priv, u32 srot) { - DRM_DEBUG_DRIVER("SROT is not supported\n"); + drm_dbg(&dev_priv->drm, "SROT is not supported\n"); return ASLC_ROTATION_ANGLES_FAILED; } static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer) { if (!iuer) - DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n"); + drm_dbg(&dev_priv->drm, + "Button array event is not supported (nothing)\n"); if (iuer & ASLE_IUER_ROTATION_LOCK_BTN) - DRM_DEBUG_DRIVER("Button array event is not supported (rotation lock)\n"); + drm_dbg(&dev_priv->drm, + "Button array event is not supported (rotation lock)\n"); if (iuer & ASLE_IUER_VOLUME_DOWN_BTN) - DRM_DEBUG_DRIVER("Button array event is not supported (volume down)\n"); + drm_dbg(&dev_priv->drm, + "Button array event is not supported (volume down)\n"); if (iuer & ASLE_IUER_VOLUME_UP_BTN) - DRM_DEBUG_DRIVER("Button array event is not supported (volume up)\n"); + drm_dbg(&dev_priv->drm, + "Button array event is not supported (volume up)\n"); if (iuer & ASLE_IUER_WINDOWS_BTN) - DRM_DEBUG_DRIVER("Button array event is not supported (windows)\n"); + drm_dbg(&dev_priv->drm, + "Button array event is not supported (windows)\n"); if (iuer & ASLE_IUER_POWER_BTN) - DRM_DEBUG_DRIVER("Button array event is not supported (power)\n"); + drm_dbg(&dev_priv->drm, + "Button array event is not supported (power)\n"); return ASLC_BUTTON_ARRAY_FAILED; } @@ -530,9 +515,11 @@ static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer) static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer) { if (iuer & ASLE_IUER_CONVERTIBLE) - DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n"); + drm_dbg(&dev_priv->drm, + "Convertible is not supported (clamshell)\n"); else - DRM_DEBUG_DRIVER("Convertible is not supported (slate)\n"); + drm_dbg(&dev_priv->drm, + "Convertible is not supported (slate)\n"); return ASLC_CONVERTIBLE_FAILED; } @@ -540,16 +527,17 @@ static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer) static u32 asle_set_docking(struct drm_i915_private *dev_priv, u32 iuer) { if (iuer & ASLE_IUER_DOCKING) - DRM_DEBUG_DRIVER("Docking is not supported (docked)\n"); + drm_dbg(&dev_priv->drm, "Docking is not supported (docked)\n"); else - DRM_DEBUG_DRIVER("Docking is not supported (undocked)\n"); + drm_dbg(&dev_priv->drm, + "Docking is not supported (undocked)\n"); return ASLC_DOCKING_FAILED; } static u32 asle_isct_state(struct drm_i915_private *dev_priv) { - DRM_DEBUG_DRIVER("ISCT is not supported\n"); + drm_dbg(&dev_priv->drm, "ISCT is not supported\n"); return ASLC_ISCT_STATE_FAILED; } @@ -569,8 +557,8 @@ static void asle_work(struct work_struct *work) aslc_req = asle->aslc; if (!(aslc_req & ASLC_REQ_MSK)) { - DRM_DEBUG_DRIVER("No request on ASLC interrupt 0x%08x\n", - aslc_req); + drm_dbg(&dev_priv->drm, + "No request on ASLC interrupt 0x%08x\n", aslc_req); return; } @@ -662,54 +650,12 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val) } } -static u32 acpi_display_type(struct intel_connector *connector) -{ - u32 display_type; - - switch (connector->base.connector_type) { - case DRM_MODE_CONNECTOR_VGA: - case DRM_MODE_CONNECTOR_DVIA: - display_type = ACPI_DISPLAY_TYPE_VGA; - break; - case DRM_MODE_CONNECTOR_Composite: - case DRM_MODE_CONNECTOR_SVIDEO: - case DRM_MODE_CONNECTOR_Component: - case DRM_MODE_CONNECTOR_9PinDIN: - case DRM_MODE_CONNECTOR_TV: - display_type = ACPI_DISPLAY_TYPE_TV; - break; - case DRM_MODE_CONNECTOR_DVII: - case DRM_MODE_CONNECTOR_DVID: - case DRM_MODE_CONNECTOR_DisplayPort: - case DRM_MODE_CONNECTOR_HDMIA: - case DRM_MODE_CONNECTOR_HDMIB: - display_type = ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL; - break; - case DRM_MODE_CONNECTOR_LVDS: - case DRM_MODE_CONNECTOR_eDP: - case DRM_MODE_CONNECTOR_DSI: - display_type = ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL; - break; - case DRM_MODE_CONNECTOR_Unknown: - case DRM_MODE_CONNECTOR_VIRTUAL: - display_type = ACPI_DISPLAY_TYPE_OTHER; - break; - default: - MISSING_CASE(connector->base.connector_type); - display_type = ACPI_DISPLAY_TYPE_OTHER; - break; - } - - return display_type; -} - static void intel_didl_outputs(struct drm_i915_private *dev_priv) { struct intel_opregion *opregion = &dev_priv->opregion; struct intel_connector *connector; struct drm_connector_list_iter conn_iter; int i = 0, max_outputs; - int display_index[16] = {}; /* * In theory, did2, the extended didl, gets added at opregion version @@ -721,29 +667,22 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv) max_outputs = ARRAY_SIZE(opregion->acpi->didl) + ARRAY_SIZE(opregion->acpi->did2); + intel_acpi_device_id_update(dev_priv); + drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { - u32 device_id, type; - - device_id = acpi_display_type(connector); - - /* Use display type specific display index. */ - type = (device_id & ACPI_DISPLAY_TYPE_MASK) - >> ACPI_DISPLAY_TYPE_SHIFT; - device_id |= display_index[type]++ << ACPI_DISPLAY_INDEX_SHIFT; - - connector->acpi_device_id = device_id; if (i < max_outputs) - set_did(opregion, i, device_id); + set_did(opregion, i, connector->acpi_device_id); i++; } drm_connector_list_iter_end(&conn_iter); - DRM_DEBUG_KMS("%d outputs detected\n", i); + drm_dbg_kms(&dev_priv->drm, "%d outputs detected\n", i); if (i > max_outputs) - DRM_ERROR("More than %d outputs in connector list\n", - max_outputs); + drm_err(&dev_priv->drm, + "More than %d outputs in connector list\n", + max_outputs); /* If fewer than max outputs, the list must be null terminated */ if (i < max_outputs) @@ -823,7 +762,9 @@ static void swsci_setup(struct drm_i915_private *dev_priv) if (requested_callbacks) { u32 req = opregion->swsci_sbcb_sub_functions; if ((req & tmp) != req) - DRM_DEBUG_DRIVER("SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n", req, tmp); + drm_dbg(&dev_priv->drm, + "SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n", + req, tmp); /* XXX: for now, trust the requested callbacks */ /* opregion->swsci_sbcb_sub_functions &= tmp; */ } else { @@ -831,9 +772,10 @@ static void swsci_setup(struct drm_i915_private *dev_priv) } } - DRM_DEBUG_DRIVER("SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n", - opregion->swsci_gbda_sub_functions, - opregion->swsci_sbcb_sub_functions); + drm_dbg(&dev_priv->drm, + "SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n", + opregion->swsci_gbda_sub_functions, + opregion->swsci_sbcb_sub_functions); } static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id) @@ -867,15 +809,17 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv) ret = request_firmware(&fw, name, &dev_priv->drm.pdev->dev); if (ret) { - DRM_ERROR("Requesting VBT firmware \"%s\" failed (%d)\n", - name, ret); + drm_err(&dev_priv->drm, + "Requesting VBT firmware \"%s\" failed (%d)\n", + name, ret); return ret; } if (intel_bios_is_valid_vbt(fw->data, fw->size)) { opregion->vbt_firmware = kmemdup(fw->data, fw->size, GFP_KERNEL); if (opregion->vbt_firmware) { - DRM_DEBUG_KMS("Found valid VBT firmware \"%s\"\n", name); + drm_dbg_kms(&dev_priv->drm, + "Found valid VBT firmware \"%s\"\n", name); opregion->vbt = opregion->vbt_firmware; opregion->vbt_size = fw->size; ret = 0; @@ -883,7 +827,8 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv) ret = -ENOMEM; } } else { - DRM_DEBUG_KMS("Invalid VBT firmware \"%s\"\n", name); + drm_dbg_kms(&dev_priv->drm, "Invalid VBT firmware \"%s\"\n", + name); ret = -EINVAL; } @@ -910,9 +855,10 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400); pci_read_config_dword(pdev, ASLS, &asls); - DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls); + drm_dbg(&dev_priv->drm, "graphic opregion physical addr: 0x%x\n", + asls); if (asls == 0) { - DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n"); + drm_dbg(&dev_priv->drm, "ACPI OpRegion not supported!\n"); return -ENOTSUPP; } @@ -925,21 +871,21 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) memcpy(buf, base, sizeof(buf)); if (memcmp(buf, OPREGION_SIGNATURE, 16)) { - DRM_DEBUG_DRIVER("opregion signature mismatch\n"); + drm_dbg(&dev_priv->drm, "opregion signature mismatch\n"); err = -EINVAL; goto err_out; } opregion->header = base; opregion->lid_state = base + ACPI_CLID; - DRM_DEBUG_DRIVER("ACPI OpRegion version %u.%u.%u\n", - opregion->header->over.major, - opregion->header->over.minor, - opregion->header->over.revision); + drm_dbg(&dev_priv->drm, "ACPI OpRegion version %u.%u.%u\n", + opregion->header->over.major, + opregion->header->over.minor, + opregion->header->over.revision); mboxes = opregion->header->mboxes; if (mboxes & MBOX_ACPI) { - DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); + drm_dbg(&dev_priv->drm, "Public ACPI methods supported\n"); opregion->acpi = base + OPREGION_ACPI_OFFSET; /* * Indicate we handle monitor hotplug events ourselves so we do @@ -951,20 +897,20 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) } if (mboxes & MBOX_SWSCI) { - DRM_DEBUG_DRIVER("SWSCI supported\n"); + drm_dbg(&dev_priv->drm, "SWSCI supported\n"); opregion->swsci = base + OPREGION_SWSCI_OFFSET; swsci_setup(dev_priv); } if (mboxes & MBOX_ASLE) { - DRM_DEBUG_DRIVER("ASLE supported\n"); + drm_dbg(&dev_priv->drm, "ASLE supported\n"); opregion->asle = base + OPREGION_ASLE_OFFSET; opregion->asle->ardy = ASLE_ARDY_NOT_READY; } if (mboxes & MBOX_ASLE_EXT) - DRM_DEBUG_DRIVER("ASLE extension supported\n"); + drm_dbg(&dev_priv->drm, "ASLE extension supported\n"); if (intel_load_vbt_firmware(dev_priv) == 0) goto out; @@ -984,7 +930,7 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) */ if (opregion->header->over.major > 2 || opregion->header->over.minor >= 1) { - WARN_ON(rvda < OPREGION_SIZE); + drm_WARN_ON(&dev_priv->drm, rvda < OPREGION_SIZE); rvda += asls; } @@ -995,12 +941,14 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) vbt = opregion->rvda; vbt_size = opregion->asle->rvds; if (intel_bios_is_valid_vbt(vbt, vbt_size)) { - DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (RVDA)\n"); + drm_dbg_kms(&dev_priv->drm, + "Found valid VBT in ACPI OpRegion (RVDA)\n"); opregion->vbt = vbt; opregion->vbt_size = vbt_size; goto out; } else { - DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n"); + drm_dbg_kms(&dev_priv->drm, + "Invalid VBT in ACPI OpRegion (RVDA)\n"); memunmap(opregion->rvda); opregion->rvda = NULL; } @@ -1018,11 +966,13 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE; vbt_size -= OPREGION_VBT_OFFSET; if (intel_bios_is_valid_vbt(vbt, vbt_size)) { - DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n"); + drm_dbg_kms(&dev_priv->drm, + "Found valid VBT in ACPI OpRegion (Mailbox #4)\n"); opregion->vbt = vbt; opregion->vbt_size = vbt_size; } else { - DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (Mailbox #4)\n"); + drm_dbg_kms(&dev_priv->drm, + "Invalid VBT in ACPI OpRegion (Mailbox #4)\n"); } out: @@ -1058,20 +1008,22 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv) ret = swsci(dev_priv, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details); if (ret) { - DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n", - ret); + drm_dbg_kms(&dev_priv->drm, + "Failed to get panel details from OpRegion (%d)\n", + ret); return ret; } ret = (panel_details >> 8) & 0xff; if (ret > 0x10) { - DRM_DEBUG_KMS("Invalid OpRegion panel type 0x%x\n", ret); + drm_dbg_kms(&dev_priv->drm, + "Invalid OpRegion panel type 0x%x\n", ret); return -EINVAL; } /* fall back to VBT panel type? */ if (ret == 0x0) { - DRM_DEBUG_KMS("No panel type in OpRegion\n"); + drm_dbg_kms(&dev_priv->drm, "No panel type in OpRegion\n"); return -ENODEV; } @@ -1081,7 +1033,8 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv) * via a quirk list :( */ if (!dmi_check_system(intel_use_opregion_panel_type)) { - DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1); + drm_dbg_kms(&dev_priv->drm, + "Ignoring OpRegion panel type (%d)\n", ret - 1); return -ENODEV; } diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index e40c3a0e2cd7..481187223101 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -27,7 +27,6 @@ */ #include <drm/drm_fourcc.h> -#include <drm/i915_drm.h> #include "gem/i915_gem_pm.h" #include "gt/intel_ring.h" @@ -204,9 +203,10 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv, /* WA_OVERLAY_CLKGATE:alm */ if (enable) - I915_WRITE(DSPCLK_GATE_D, 0); + intel_de_write(dev_priv, DSPCLK_GATE_D, 0); else - I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); + intel_de_write(dev_priv, DSPCLK_GATE_D, + OVRUNIT_CLOCK_GATE_DISABLE); /* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */ pci_bus_read_config_byte(pdev->bus, @@ -247,7 +247,7 @@ static int intel_overlay_on(struct intel_overlay *overlay) struct i915_request *rq; u32 *cs; - WARN_ON(overlay->active); + drm_WARN_ON(&dev_priv->drm, overlay->active); rq = alloc_request(overlay, NULL); if (IS_ERR(rq)) @@ -315,15 +315,15 @@ static int intel_overlay_continue(struct intel_overlay *overlay, u32 flip_addr = overlay->flip_addr; u32 tmp, *cs; - WARN_ON(!overlay->active); + drm_WARN_ON(&dev_priv->drm, !overlay->active); if (load_polyphase_filter) flip_addr |= OFC_UPDATE; /* check for underruns */ - tmp = I915_READ(DOVSTA); + tmp = intel_de_read(dev_priv, DOVSTA); if (tmp & (1 << 17)) - DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); + drm_dbg(&dev_priv->drm, "overlay underrun, DOVSTA: %x\n", tmp); rq = alloc_request(overlay, NULL); if (IS_ERR(rq)) @@ -456,7 +456,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) if (!overlay->old_vma) return 0; - if (!(I915_READ(GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) { + if (!(intel_de_read(dev_priv, GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) { intel_overlay_release_old_vid_tail(overlay); return 0; } @@ -759,7 +759,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, struct i915_vma *vma; int ret, tmp_width; - WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); + drm_WARN_ON(&dev_priv->drm, + !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); ret = intel_overlay_release_old_vid(overlay); if (ret != 0) @@ -857,7 +858,8 @@ int intel_overlay_switch_off(struct intel_overlay *overlay) struct drm_i915_private *dev_priv = overlay->i915; int ret; - WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); + drm_WARN_ON(&dev_priv->drm, + !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); ret = intel_overlay_recover_from_interrupt(overlay); if (ret != 0) @@ -891,7 +893,7 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, static void update_pfit_vscale_ratio(struct intel_overlay *overlay) { struct drm_i915_private *dev_priv = overlay->i915; - u32 pfit_control = I915_READ(PFIT_CONTROL); + u32 pfit_control = intel_de_read(dev_priv, PFIT_CONTROL); u32 ratio; /* XXX: This is not the same logic as in the xorg driver, but more in @@ -899,12 +901,12 @@ static void update_pfit_vscale_ratio(struct intel_overlay *overlay) */ if (INTEL_GEN(dev_priv) >= 4) { /* on i965 use the PGM reg to read out the autoscaler values */ - ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965; + ratio = intel_de_read(dev_priv, PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965; } else { if (pfit_control & VERT_AUTO_SCALE) - ratio = I915_READ(PFIT_AUTO_RATIOS); + ratio = intel_de_read(dev_priv, PFIT_AUTO_RATIOS); else - ratio = I915_READ(PFIT_PGM_RATIOS); + ratio = intel_de_read(dev_priv, PFIT_PGM_RATIOS); ratio >>= PFIT_VERT_SCALE_SHIFT; } @@ -1066,7 +1068,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, overlay = dev_priv->overlay; if (!overlay) { - DRM_DEBUG("userspace bug: no overlay\n"); + drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n"); return -ENODEV; } @@ -1090,7 +1092,8 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, drm_modeset_lock_all(dev); if (i915_gem_object_is_tiled(new_bo)) { - DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n"); + drm_dbg_kms(&dev_priv->drm, + "buffer used for overlay image can not be tiled\n"); ret = -EINVAL; goto out_unlock; } @@ -1225,7 +1228,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, overlay = dev_priv->overlay; if (!overlay) { - DRM_DEBUG("userspace bug: no overlay\n"); + drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n"); return -ENODEV; } @@ -1239,12 +1242,12 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, attrs->saturation = overlay->saturation; if (!IS_GEN(dev_priv, 2)) { - attrs->gamma0 = I915_READ(OGAMC0); - attrs->gamma1 = I915_READ(OGAMC1); - attrs->gamma2 = I915_READ(OGAMC2); - attrs->gamma3 = I915_READ(OGAMC3); - attrs->gamma4 = I915_READ(OGAMC4); - attrs->gamma5 = I915_READ(OGAMC5); + attrs->gamma0 = intel_de_read(dev_priv, OGAMC0); + attrs->gamma1 = intel_de_read(dev_priv, OGAMC1); + attrs->gamma2 = intel_de_read(dev_priv, OGAMC2); + attrs->gamma3 = intel_de_read(dev_priv, OGAMC3); + attrs->gamma4 = intel_de_read(dev_priv, OGAMC4); + attrs->gamma5 = intel_de_read(dev_priv, OGAMC5); } } else { if (attrs->brightness < -128 || attrs->brightness > 127) @@ -1274,12 +1277,12 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, if (ret) goto out_unlock; - I915_WRITE(OGAMC0, attrs->gamma0); - I915_WRITE(OGAMC1, attrs->gamma1); - I915_WRITE(OGAMC2, attrs->gamma2); - I915_WRITE(OGAMC3, attrs->gamma3); - I915_WRITE(OGAMC4, attrs->gamma4); - I915_WRITE(OGAMC5, attrs->gamma5); + intel_de_write(dev_priv, OGAMC0, attrs->gamma0); + intel_de_write(dev_priv, OGAMC1, attrs->gamma1); + intel_de_write(dev_priv, OGAMC2, attrs->gamma2); + intel_de_write(dev_priv, OGAMC3, attrs->gamma3); + intel_de_write(dev_priv, OGAMC4, attrs->gamma4); + intel_de_write(dev_priv, OGAMC5, attrs->gamma5); } } overlay->color_key_enabled = (attrs->flags & I915_OVERLAY_DISABLE_DEST_COLORKEY) == 0; @@ -1369,7 +1372,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv) update_reg_attrs(overlay, overlay->regs); dev_priv->overlay = overlay; - DRM_INFO("Initialized overlay support.\n"); + drm_info(&dev_priv->drm, "Initialized overlay support.\n"); return; out_free: @@ -1389,7 +1392,7 @@ void intel_overlay_cleanup(struct drm_i915_private *dev_priv) * Furthermore modesetting teardown happens beforehand so the * hardware should be off already. */ - WARN_ON(overlay->active); + drm_WARN_ON(&dev_priv->drm, overlay->active); i915_gem_object_put(overlay->reg_bo); i915_active_fini(&overlay->last_flip); @@ -1419,8 +1422,8 @@ intel_overlay_capture_error_state(struct drm_i915_private *dev_priv) if (error == NULL) return NULL; - error->dovsta = I915_READ(DOVSTA); - error->isr = I915_READ(GEN2_ISR); + error->dovsta = intel_de_read(dev_priv, DOVSTA); + error->isr = intel_de_read(dev_priv, GEN2_ISR); error->base = overlay->flip_addr; memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs)); diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index 7b3ec6eb3382..276f43870802 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -96,8 +96,9 @@ intel_panel_edid_downclock_mode(struct intel_connector *connector, if (!downclock_mode) return NULL; - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using downclock mode from EDID: ", - connector->base.base.id, connector->base.name); + drm_dbg_kms(&dev_priv->drm, + "[CONNECTOR:%d:%s] using downclock mode from EDID: ", + connector->base.base.id, connector->base.name); drm_mode_debug_printmodeline(downclock_mode); return downclock_mode; @@ -122,8 +123,9 @@ intel_panel_edid_fixed_mode(struct intel_connector *connector) if (!fixed_mode) return NULL; - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using preferred mode from EDID: ", - connector->base.base.id, connector->base.name); + drm_dbg_kms(&dev_priv->drm, + "[CONNECTOR:%d:%s] using preferred mode from EDID: ", + connector->base.base.id, connector->base.name); drm_mode_debug_printmodeline(fixed_mode); return fixed_mode; @@ -138,8 +140,9 @@ intel_panel_edid_fixed_mode(struct intel_connector *connector) fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using first mode from EDID: ", - connector->base.base.id, connector->base.name); + drm_dbg_kms(&dev_priv->drm, + "[CONNECTOR:%d:%s] using first mode from EDID: ", + connector->base.base.id, connector->base.name); drm_mode_debug_printmodeline(fixed_mode); return fixed_mode; @@ -162,8 +165,8 @@ intel_panel_vbt_fixed_mode(struct intel_connector *connector) fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using mode from VBT: ", - connector->base.base.id, connector->base.name); + drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s] using mode from VBT: ", + connector->base.base.id, connector->base.name); drm_mode_debug_printmodeline(fixed_mode); info->width_mm = fixed_mode->width_mm; @@ -423,15 +426,15 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc, } break; default: - WARN(1, "bad panel fit mode: %d\n", fitting_mode); + drm_WARN(&dev_priv->drm, 1, "bad panel fit mode: %d\n", + fitting_mode); return; } /* 965+ wants fuzzy fitting */ /* FIXME: handle multiple panels by failing gracefully */ if (INTEL_GEN(dev_priv) >= 4) - pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) | - PFIT_FILTER_FUZZY); + pfit_control |= PFIT_PIPE(intel_crtc->pipe) | PFIT_FILTER_FUZZY; out: if ((pfit_control & PFIT_ENABLE) == 0) { @@ -520,7 +523,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector, struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; - WARN_ON(panel->backlight.max == 0); + drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0); if (i915_modparams.invert_brightness < 0) return val; @@ -537,14 +540,14 @@ static u32 lpt_get_backlight(struct intel_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - return I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK; + return intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK; } static u32 pch_get_backlight(struct intel_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - return I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; + return intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; } static u32 i9xx_get_backlight(struct intel_connector *connector) @@ -553,7 +556,7 @@ static u32 i9xx_get_backlight(struct intel_connector *connector) struct intel_panel *panel = &connector->panel; u32 val; - val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; + val = intel_de_read(dev_priv, BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; if (INTEL_GEN(dev_priv) < 4) val >>= 1; @@ -569,10 +572,10 @@ static u32 i9xx_get_backlight(struct intel_connector *connector) static u32 _vlv_get_backlight(struct drm_i915_private *dev_priv, enum pipe pipe) { - if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B)) + if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B)) return 0; - return I915_READ(VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK; + return intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK; } static u32 vlv_get_backlight(struct intel_connector *connector) @@ -588,7 +591,8 @@ static u32 bxt_get_backlight(struct intel_connector *connector) struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; - return I915_READ(BXT_BLC_PWM_DUTY(panel->backlight.controller)); + return intel_de_read(dev_priv, + BXT_BLC_PWM_DUTY(panel->backlight.controller)); } static u32 pwm_get_backlight(struct intel_connector *connector) @@ -605,8 +609,8 @@ static void lpt_set_backlight(const struct drm_connector_state *conn_state, u32 struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK; - I915_WRITE(BLC_PWM_PCH_CTL2, val | level); + u32 val = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK; + intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, val | level); } static void pch_set_backlight(const struct drm_connector_state *conn_state, u32 level) @@ -615,8 +619,8 @@ static void pch_set_backlight(const struct drm_connector_state *conn_state, u32 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); u32 tmp; - tmp = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; - I915_WRITE(BLC_PWM_CPU_CTL, tmp | level); + tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; + intel_de_write(dev_priv, BLC_PWM_CPU_CTL, tmp | level); } static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32 level) @@ -626,7 +630,7 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32 struct intel_panel *panel = &connector->panel; u32 tmp, mask; - WARN_ON(panel->backlight.max == 0); + drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0); if (panel->backlight.combination_mode) { u8 lbpc; @@ -643,8 +647,8 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32 mask = BACKLIGHT_DUTY_CYCLE_MASK_PNV; } - tmp = I915_READ(BLC_PWM_CTL) & ~mask; - I915_WRITE(BLC_PWM_CTL, tmp | level); + tmp = intel_de_read(dev_priv, BLC_PWM_CTL) & ~mask; + intel_de_write(dev_priv, BLC_PWM_CTL, tmp | level); } static void vlv_set_backlight(const struct drm_connector_state *conn_state, u32 level) @@ -654,8 +658,8 @@ static void vlv_set_backlight(const struct drm_connector_state *conn_state, u32 enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe; u32 tmp; - tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK; - I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level); + tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK; + intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), tmp | level); } static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32 level) @@ -664,7 +668,8 @@ static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; - I915_WRITE(BXT_BLC_PWM_DUTY(panel->backlight.controller), level); + intel_de_write(dev_priv, + BXT_BLC_PWM_DUTY(panel->backlight.controller), level); } static void pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level) @@ -709,7 +714,7 @@ void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state mutex_lock(&dev_priv->backlight_lock); - WARN_ON(panel->backlight.max == 0); + drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0); hw_level = clamp_user_to_hw(connector, user_level, user_max); panel->backlight.level = hw_level; @@ -742,14 +747,16 @@ static void lpt_disable_backlight(const struct drm_connector_state *old_conn_sta * This needs rework if we need to add support for CPU PWM on PCH split * platforms. */ - tmp = I915_READ(BLC_PWM_CPU_CTL2); + tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2); if (tmp & BLM_PWM_ENABLE) { - DRM_DEBUG_KMS("cpu backlight was enabled, disabling\n"); - I915_WRITE(BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE); + drm_dbg_kms(&dev_priv->drm, + "cpu backlight was enabled, disabling\n"); + intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, + tmp & ~BLM_PWM_ENABLE); } - tmp = I915_READ(BLC_PWM_PCH_CTL1); - I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE); + tmp = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1); + intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE); } static void pch_disable_backlight(const struct drm_connector_state *old_conn_state) @@ -760,11 +767,11 @@ static void pch_disable_backlight(const struct drm_connector_state *old_conn_sta intel_panel_actually_set_backlight(old_conn_state, 0); - tmp = I915_READ(BLC_PWM_CPU_CTL2); - I915_WRITE(BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE); + tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2); + intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE); - tmp = I915_READ(BLC_PWM_PCH_CTL1); - I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE); + tmp = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1); + intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE); } static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state) @@ -779,8 +786,8 @@ static void i965_disable_backlight(const struct drm_connector_state *old_conn_st intel_panel_actually_set_backlight(old_conn_state, 0); - tmp = I915_READ(BLC_PWM_CTL2); - I915_WRITE(BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE); + tmp = intel_de_read(dev_priv, BLC_PWM_CTL2); + intel_de_write(dev_priv, BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE); } static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state) @@ -792,8 +799,9 @@ static void vlv_disable_backlight(const struct drm_connector_state *old_conn_sta intel_panel_actually_set_backlight(old_conn_state, 0); - tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe)); - I915_WRITE(VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE); + tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe)); + intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), + tmp & ~BLM_PWM_ENABLE); } static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state) @@ -805,14 +813,15 @@ static void bxt_disable_backlight(const struct drm_connector_state *old_conn_sta intel_panel_actually_set_backlight(old_conn_state, 0); - tmp = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller)); - I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller), - tmp & ~BXT_BLC_PWM_ENABLE); + tmp = intel_de_read(dev_priv, + BXT_BLC_PWM_CTL(panel->backlight.controller)); + intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller), + tmp & ~BXT_BLC_PWM_ENABLE); if (panel->backlight.controller == 1) { - val = I915_READ(UTIL_PIN_CTL); + val = intel_de_read(dev_priv, UTIL_PIN_CTL); val &= ~UTIL_PIN_ENABLE; - I915_WRITE(UTIL_PIN_CTL, val); + intel_de_write(dev_priv, UTIL_PIN_CTL, val); } } @@ -825,9 +834,10 @@ static void cnp_disable_backlight(const struct drm_connector_state *old_conn_sta intel_panel_actually_set_backlight(old_conn_state, 0); - tmp = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller)); - I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller), - tmp & ~BXT_BLC_PWM_ENABLE); + tmp = intel_de_read(dev_priv, + BXT_BLC_PWM_CTL(panel->backlight.controller)); + intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller), + tmp & ~BXT_BLC_PWM_ENABLE); } static void pwm_disable_backlight(const struct drm_connector_state *old_conn_state) @@ -857,7 +867,8 @@ void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_st * another client is not activated. */ if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) { - DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n"); + drm_dbg(&dev_priv->drm, + "Skipping backlight disable on vga switch\n"); return; } @@ -879,31 +890,31 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state, struct intel_panel *panel = &connector->panel; u32 pch_ctl1, pch_ctl2, schicken; - pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1); + pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1); if (pch_ctl1 & BLM_PCH_PWM_ENABLE) { - DRM_DEBUG_KMS("pch backlight already enabled\n"); + drm_dbg_kms(&dev_priv->drm, "pch backlight already enabled\n"); pch_ctl1 &= ~BLM_PCH_PWM_ENABLE; - I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1); + intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1); } if (HAS_PCH_LPT(dev_priv)) { - schicken = I915_READ(SOUTH_CHICKEN2); + schicken = intel_de_read(dev_priv, SOUTH_CHICKEN2); if (panel->backlight.alternate_pwm_increment) schicken |= LPT_PWM_GRANULARITY; else schicken &= ~LPT_PWM_GRANULARITY; - I915_WRITE(SOUTH_CHICKEN2, schicken); + intel_de_write(dev_priv, SOUTH_CHICKEN2, schicken); } else { - schicken = I915_READ(SOUTH_CHICKEN1); + schicken = intel_de_read(dev_priv, SOUTH_CHICKEN1); if (panel->backlight.alternate_pwm_increment) schicken |= SPT_PWM_GRANULARITY; else schicken &= ~SPT_PWM_GRANULARITY; - I915_WRITE(SOUTH_CHICKEN1, schicken); + intel_de_write(dev_priv, SOUTH_CHICKEN1, schicken); } pch_ctl2 = panel->backlight.max << 16; - I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2); + intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2); pch_ctl1 = 0; if (panel->backlight.active_low_pwm) @@ -913,9 +924,10 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state, if (HAS_PCH_LPT(dev_priv)) pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE; - I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1); - POSTING_READ(BLC_PWM_PCH_CTL1); - I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE); + intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1); + intel_de_posting_read(dev_priv, BLC_PWM_PCH_CTL1); + intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, + pch_ctl1 | BLM_PCH_PWM_ENABLE); /* This won't stick until the above enable. */ intel_panel_actually_set_backlight(conn_state, panel->backlight.level); @@ -930,41 +942,42 @@ static void pch_enable_backlight(const struct intel_crtc_state *crtc_state, enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 cpu_ctl2, pch_ctl1, pch_ctl2; - cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2); + cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2); if (cpu_ctl2 & BLM_PWM_ENABLE) { - DRM_DEBUG_KMS("cpu backlight already enabled\n"); + drm_dbg_kms(&dev_priv->drm, "cpu backlight already enabled\n"); cpu_ctl2 &= ~BLM_PWM_ENABLE; - I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2); + intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2); } - pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1); + pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1); if (pch_ctl1 & BLM_PCH_PWM_ENABLE) { - DRM_DEBUG_KMS("pch backlight already enabled\n"); + drm_dbg_kms(&dev_priv->drm, "pch backlight already enabled\n"); pch_ctl1 &= ~BLM_PCH_PWM_ENABLE; - I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1); + intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1); } if (cpu_transcoder == TRANSCODER_EDP) cpu_ctl2 = BLM_TRANSCODER_EDP; else cpu_ctl2 = BLM_PIPE(cpu_transcoder); - I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2); - POSTING_READ(BLC_PWM_CPU_CTL2); - I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE); + intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2); + intel_de_posting_read(dev_priv, BLC_PWM_CPU_CTL2); + intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE); /* This won't stick until the above enable. */ intel_panel_actually_set_backlight(conn_state, panel->backlight.level); pch_ctl2 = panel->backlight.max << 16; - I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2); + intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2); pch_ctl1 = 0; if (panel->backlight.active_low_pwm) pch_ctl1 |= BLM_PCH_POLARITY; - I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1); - POSTING_READ(BLC_PWM_PCH_CTL1); - I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE); + intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1); + intel_de_posting_read(dev_priv, BLC_PWM_PCH_CTL1); + intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, + pch_ctl1 | BLM_PCH_PWM_ENABLE); } static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state, @@ -975,10 +988,10 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state, struct intel_panel *panel = &connector->panel; u32 ctl, freq; - ctl = I915_READ(BLC_PWM_CTL); + ctl = intel_de_read(dev_priv, BLC_PWM_CTL); if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) { - DRM_DEBUG_KMS("backlight already enabled\n"); - I915_WRITE(BLC_PWM_CTL, 0); + drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n"); + intel_de_write(dev_priv, BLC_PWM_CTL, 0); } freq = panel->backlight.max; @@ -991,8 +1004,8 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state, if (IS_PINEVIEW(dev_priv) && panel->backlight.active_low_pwm) ctl |= BLM_POLARITY_PNV; - I915_WRITE(BLC_PWM_CTL, ctl); - POSTING_READ(BLC_PWM_CTL); + intel_de_write(dev_priv, BLC_PWM_CTL, ctl); + intel_de_posting_read(dev_priv, BLC_PWM_CTL); /* XXX: combine this into above write? */ intel_panel_actually_set_backlight(conn_state, panel->backlight.level); @@ -1003,7 +1016,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state, * that has backlight. */ if (IS_GEN(dev_priv, 2)) - I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE); + intel_de_write(dev_priv, BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE); } static void i965_enable_backlight(const struct intel_crtc_state *crtc_state, @@ -1015,11 +1028,11 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state, enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe; u32 ctl, ctl2, freq; - ctl2 = I915_READ(BLC_PWM_CTL2); + ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2); if (ctl2 & BLM_PWM_ENABLE) { - DRM_DEBUG_KMS("backlight already enabled\n"); + drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n"); ctl2 &= ~BLM_PWM_ENABLE; - I915_WRITE(BLC_PWM_CTL2, ctl2); + intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2); } freq = panel->backlight.max; @@ -1027,16 +1040,16 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state, freq /= 0xff; ctl = freq << 16; - I915_WRITE(BLC_PWM_CTL, ctl); + intel_de_write(dev_priv, BLC_PWM_CTL, ctl); ctl2 = BLM_PIPE(pipe); if (panel->backlight.combination_mode) ctl2 |= BLM_COMBINATION_MODE; if (panel->backlight.active_low_pwm) ctl2 |= BLM_POLARITY_I965; - I915_WRITE(BLC_PWM_CTL2, ctl2); - POSTING_READ(BLC_PWM_CTL2); - I915_WRITE(BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE); + intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2); + intel_de_posting_read(dev_priv, BLC_PWM_CTL2); + intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE); intel_panel_actually_set_backlight(conn_state, panel->backlight.level); } @@ -1050,15 +1063,15 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state, enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; u32 ctl, ctl2; - ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe)); + ctl2 = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe)); if (ctl2 & BLM_PWM_ENABLE) { - DRM_DEBUG_KMS("backlight already enabled\n"); + drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n"); ctl2 &= ~BLM_PWM_ENABLE; - I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2); + intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2); } ctl = panel->backlight.max << 16; - I915_WRITE(VLV_BLC_PWM_CTL(pipe), ctl); + intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), ctl); /* XXX: combine this into above write? */ intel_panel_actually_set_backlight(conn_state, panel->backlight.level); @@ -1066,9 +1079,10 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state, ctl2 = 0; if (panel->backlight.active_low_pwm) ctl2 |= BLM_POLARITY_I965; - I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2); - POSTING_READ(VLV_BLC_PWM_CTL2(pipe)); - I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE); + intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2); + intel_de_posting_read(dev_priv, VLV_BLC_PWM_CTL2(pipe)); + intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), + ctl2 | BLM_PWM_ENABLE); } static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state, @@ -1082,30 +1096,34 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state, /* Controller 1 uses the utility pin. */ if (panel->backlight.controller == 1) { - val = I915_READ(UTIL_PIN_CTL); + val = intel_de_read(dev_priv, UTIL_PIN_CTL); if (val & UTIL_PIN_ENABLE) { - DRM_DEBUG_KMS("util pin already enabled\n"); + drm_dbg_kms(&dev_priv->drm, + "util pin already enabled\n"); val &= ~UTIL_PIN_ENABLE; - I915_WRITE(UTIL_PIN_CTL, val); + intel_de_write(dev_priv, UTIL_PIN_CTL, val); } val = 0; if (panel->backlight.util_pin_active_low) val |= UTIL_PIN_POLARITY; - I915_WRITE(UTIL_PIN_CTL, val | UTIL_PIN_PIPE(pipe) | - UTIL_PIN_MODE_PWM | UTIL_PIN_ENABLE); + intel_de_write(dev_priv, UTIL_PIN_CTL, + val | UTIL_PIN_PIPE(pipe) | UTIL_PIN_MODE_PWM | UTIL_PIN_ENABLE); } - pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller)); + pwm_ctl = intel_de_read(dev_priv, + BXT_BLC_PWM_CTL(panel->backlight.controller)); if (pwm_ctl & BXT_BLC_PWM_ENABLE) { - DRM_DEBUG_KMS("backlight already enabled\n"); + drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n"); pwm_ctl &= ~BXT_BLC_PWM_ENABLE; - I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller), - pwm_ctl); + intel_de_write(dev_priv, + BXT_BLC_PWM_CTL(panel->backlight.controller), + pwm_ctl); } - I915_WRITE(BXT_BLC_PWM_FREQ(panel->backlight.controller), - panel->backlight.max); + intel_de_write(dev_priv, + BXT_BLC_PWM_FREQ(panel->backlight.controller), + panel->backlight.max); intel_panel_actually_set_backlight(conn_state, panel->backlight.level); @@ -1113,10 +1131,12 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state, if (panel->backlight.active_low_pwm) pwm_ctl |= BXT_BLC_PWM_POLARITY; - I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl); - POSTING_READ(BXT_BLC_PWM_CTL(panel->backlight.controller)); - I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller), - pwm_ctl | BXT_BLC_PWM_ENABLE); + intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller), + pwm_ctl); + intel_de_posting_read(dev_priv, + BXT_BLC_PWM_CTL(panel->backlight.controller)); + intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller), + pwm_ctl | BXT_BLC_PWM_ENABLE); } static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state, @@ -1127,16 +1147,19 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state, struct intel_panel *panel = &connector->panel; u32 pwm_ctl; - pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller)); + pwm_ctl = intel_de_read(dev_priv, + BXT_BLC_PWM_CTL(panel->backlight.controller)); if (pwm_ctl & BXT_BLC_PWM_ENABLE) { - DRM_DEBUG_KMS("backlight already enabled\n"); + drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n"); pwm_ctl &= ~BXT_BLC_PWM_ENABLE; - I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller), - pwm_ctl); + intel_de_write(dev_priv, + BXT_BLC_PWM_CTL(panel->backlight.controller), + pwm_ctl); } - I915_WRITE(BXT_BLC_PWM_FREQ(panel->backlight.controller), - panel->backlight.max); + intel_de_write(dev_priv, + BXT_BLC_PWM_FREQ(panel->backlight.controller), + panel->backlight.max); intel_panel_actually_set_backlight(conn_state, panel->backlight.level); @@ -1144,10 +1167,12 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state, if (panel->backlight.active_low_pwm) pwm_ctl |= BXT_BLC_PWM_POLARITY; - I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl); - POSTING_READ(BXT_BLC_PWM_CTL(panel->backlight.controller)); - I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller), - pwm_ctl | BXT_BLC_PWM_ENABLE); + intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller), + pwm_ctl); + intel_de_posting_read(dev_priv, + BXT_BLC_PWM_CTL(panel->backlight.controller)); + intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller), + pwm_ctl | BXT_BLC_PWM_ENABLE); } static void pwm_enable_backlight(const struct intel_crtc_state *crtc_state, @@ -1194,7 +1219,7 @@ void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state, if (!panel->backlight.present) return; - DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe)); + drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(pipe)); mutex_lock(&dev_priv->backlight_lock); @@ -1219,7 +1244,7 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector) mutex_unlock(&dev_priv->backlight_lock); - DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); + drm_dbg(&dev_priv->drm, "get backlight PWM = %d\n", val); return val; } @@ -1237,7 +1262,7 @@ static void intel_panel_set_backlight(const struct drm_connector_state *conn_sta mutex_lock(&dev_priv->backlight_lock); - WARN_ON(panel->backlight.max == 0); + drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0); hw_level = scale_user_to_hw(connector, user_level, user_max); panel->backlight.level = hw_level; @@ -1380,7 +1405,8 @@ static u32 cnp_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - return DIV_ROUND_CLOSEST(KHz(dev_priv->rawclk_freq), pwm_freq_hz); + return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(dev_priv)->rawclk_freq), + pwm_freq_hz); } /* @@ -1441,7 +1467,8 @@ static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - return DIV_ROUND_CLOSEST(KHz(dev_priv->rawclk_freq), pwm_freq_hz * 128); + return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(dev_priv)->rawclk_freq), + pwm_freq_hz * 128); } /* @@ -1458,7 +1485,7 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) int clock; if (IS_PINEVIEW(dev_priv)) - clock = KHz(dev_priv->rawclk_freq); + clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq); else clock = KHz(dev_priv->cdclk.hw.cdclk); @@ -1476,7 +1503,7 @@ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) int clock; if (IS_G4X(dev_priv)) - clock = KHz(dev_priv->rawclk_freq); + clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq); else clock = KHz(dev_priv->cdclk.hw.cdclk); @@ -1493,14 +1520,14 @@ static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) struct drm_i915_private *dev_priv = to_i915(connector->base.dev); int mul, clock; - if ((I915_READ(CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) { + if ((intel_de_read(dev_priv, CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) { if (IS_CHERRYVIEW(dev_priv)) clock = KHz(19200); else clock = MHz(25); mul = 16; } else { - clock = KHz(dev_priv->rawclk_freq); + clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq); mul = 128; } @@ -1515,22 +1542,26 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector) u32 pwm; if (!panel->backlight.hz_to_pwm) { - DRM_DEBUG_KMS("backlight frequency conversion not supported\n"); + drm_dbg_kms(&dev_priv->drm, + "backlight frequency conversion not supported\n"); return 0; } if (pwm_freq_hz) { - DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n", - pwm_freq_hz); + drm_dbg_kms(&dev_priv->drm, + "VBT defined backlight frequency %u Hz\n", + pwm_freq_hz); } else { pwm_freq_hz = 200; - DRM_DEBUG_KMS("default backlight frequency %u Hz\n", - pwm_freq_hz); + drm_dbg_kms(&dev_priv->drm, + "default backlight frequency %u Hz\n", + pwm_freq_hz); } pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz); if (!pwm) { - DRM_DEBUG_KMS("backlight frequency conversion failed\n"); + drm_dbg_kms(&dev_priv->drm, + "backlight frequency conversion failed\n"); return 0; } @@ -1546,7 +1577,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector) struct intel_panel *panel = &connector->panel; int min; - WARN_ON(panel->backlight.max == 0); + drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0); /* * XXX: If the vbt value is 255, it makes min equal to max, which leads @@ -1557,8 +1588,9 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector) */ min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64); if (min != dev_priv->vbt.backlight.min_brightness) { - DRM_DEBUG_KMS("clamping VBT min backlight %d/255 to %d/255\n", - dev_priv->vbt.backlight.min_brightness, min); + drm_dbg_kms(&dev_priv->drm, + "clamping VBT min backlight %d/255 to %d/255\n", + dev_priv->vbt.backlight.min_brightness, min); } /* vbt value is a coefficient in range [0..255] */ @@ -1573,18 +1605,18 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus bool alt, cpu_mode; if (HAS_PCH_LPT(dev_priv)) - alt = I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY; + alt = intel_de_read(dev_priv, SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY; else - alt = I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY; + alt = intel_de_read(dev_priv, SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY; panel->backlight.alternate_pwm_increment = alt; - pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1); + pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1); panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY; - pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2); + pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2); panel->backlight.max = pch_ctl2 >> 16; - cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2); + cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2); if (!panel->backlight.max) panel->backlight.max = get_backlight_max_vbt(connector); @@ -1608,13 +1640,16 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus panel->backlight.max); if (cpu_mode) { - DRM_DEBUG_KMS("CPU backlight register was enabled, switching to PCH override\n"); + drm_dbg_kms(&dev_priv->drm, + "CPU backlight register was enabled, switching to PCH override\n"); /* Write converted CPU PWM value to PCH override register */ lpt_set_backlight(connector->base.state, panel->backlight.level); - I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE); + intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, + pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE); - I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 & ~BLM_PWM_ENABLE); + intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, + cpu_ctl2 & ~BLM_PWM_ENABLE); } return 0; @@ -1626,10 +1661,10 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus struct intel_panel *panel = &connector->panel; u32 cpu_ctl2, pch_ctl1, pch_ctl2, val; - pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1); + pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1); panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY; - pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2); + pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2); panel->backlight.max = pch_ctl2 >> 16; if (!panel->backlight.max) @@ -1645,7 +1680,7 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus panel->backlight.level = clamp(val, panel->backlight.min, panel->backlight.max); - cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2); + cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2); panel->backlight.enabled = (cpu_ctl2 & BLM_PWM_ENABLE) && (pch_ctl1 & BLM_PCH_PWM_ENABLE); @@ -1658,7 +1693,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu struct intel_panel *panel = &connector->panel; u32 ctl, val; - ctl = I915_READ(BLC_PWM_CTL); + ctl = intel_de_read(dev_priv, BLC_PWM_CTL); if (IS_GEN(dev_priv, 2) || IS_I915GM(dev_priv) || IS_I945GM(dev_priv)) panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE; @@ -1697,11 +1732,11 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu struct intel_panel *panel = &connector->panel; u32 ctl, ctl2, val; - ctl2 = I915_READ(BLC_PWM_CTL2); + ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2); panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE; panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965; - ctl = I915_READ(BLC_PWM_CTL); + ctl = intel_de_read(dev_priv, BLC_PWM_CTL); panel->backlight.max = ctl >> 16; if (!panel->backlight.max) @@ -1731,13 +1766,13 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe struct intel_panel *panel = &connector->panel; u32 ctl, ctl2, val; - if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B)) + if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B)) return -ENODEV; - ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe)); + ctl2 = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe)); panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965; - ctl = I915_READ(VLV_BLC_PWM_CTL(pipe)); + ctl = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)); panel->backlight.max = ctl >> 16; if (!panel->backlight.max) @@ -1767,18 +1802,20 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused) panel->backlight.controller = dev_priv->vbt.backlight.controller; - pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller)); + pwm_ctl = intel_de_read(dev_priv, + BXT_BLC_PWM_CTL(panel->backlight.controller)); /* Controller 1 uses the utility pin. */ if (panel->backlight.controller == 1) { - val = I915_READ(UTIL_PIN_CTL); + val = intel_de_read(dev_priv, UTIL_PIN_CTL); panel->backlight.util_pin_active_low = val & UTIL_PIN_POLARITY; } panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY; panel->backlight.max = - I915_READ(BXT_BLC_PWM_FREQ(panel->backlight.controller)); + intel_de_read(dev_priv, + BXT_BLC_PWM_FREQ(panel->backlight.controller)); if (!panel->backlight.max) panel->backlight.max = get_backlight_max_vbt(connector); @@ -1812,11 +1849,13 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused) */ panel->backlight.controller = 0; - pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller)); + pwm_ctl = intel_de_read(dev_priv, + BXT_BLC_PWM_CTL(panel->backlight.controller)); panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY; panel->backlight.max = - I915_READ(BXT_BLC_PWM_FREQ(panel->backlight.controller)); + intel_de_read(dev_priv, + BXT_BLC_PWM_FREQ(panel->backlight.controller)); if (!panel->backlight.max) panel->backlight.max = get_backlight_max_vbt(connector); @@ -1843,6 +1882,7 @@ static int pwm_setup_backlight(struct intel_connector *connector, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_panel *panel = &connector->panel; const char *desc; + u32 level, ns; int retval; /* Get the right PWM chip for DSI backlight according to VBT */ @@ -1855,7 +1895,8 @@ static int pwm_setup_backlight(struct intel_connector *connector, } if (IS_ERR(panel->backlight.pwm)) { - DRM_ERROR("Failed to get the %s PWM chip\n", desc); + drm_err(&dev_priv->drm, "Failed to get the %s PWM chip\n", + desc); panel->backlight.pwm = NULL; return -ENODEV; } @@ -1866,23 +1907,27 @@ static int pwm_setup_backlight(struct intel_connector *connector, */ pwm_apply_args(panel->backlight.pwm); - retval = pwm_config(panel->backlight.pwm, CRC_PMIC_PWM_PERIOD_NS, - CRC_PMIC_PWM_PERIOD_NS); + panel->backlight.min = 0; /* 0% */ + panel->backlight.max = 100; /* 100% */ + level = intel_panel_compute_brightness(connector, 100); + ns = DIV_ROUND_UP(level * CRC_PMIC_PWM_PERIOD_NS, 100); + + retval = pwm_config(panel->backlight.pwm, ns, CRC_PMIC_PWM_PERIOD_NS); if (retval < 0) { - DRM_ERROR("Failed to configure the pwm chip\n"); + drm_err(&dev_priv->drm, "Failed to configure the pwm chip\n"); pwm_put(panel->backlight.pwm); panel->backlight.pwm = NULL; return retval; } - panel->backlight.min = 0; /* 0% */ - panel->backlight.max = 100; /* 100% */ - panel->backlight.level = DIV_ROUND_UP( - pwm_get_duty_cycle(panel->backlight.pwm) * 100, - CRC_PMIC_PWM_PERIOD_NS); + level = DIV_ROUND_UP(pwm_get_duty_cycle(panel->backlight.pwm) * 100, + CRC_PMIC_PWM_PERIOD_NS); + panel->backlight.level = + intel_panel_compute_brightness(connector, level); panel->backlight.enabled = panel->backlight.level != 0; - DRM_INFO("Using %s PWM for LCD backlight control\n", desc); + drm_info(&dev_priv->drm, "Using %s PWM for LCD backlight control\n", + desc); return 0; } @@ -1913,15 +1958,17 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe) if (!dev_priv->vbt.backlight.present) { if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) { - DRM_DEBUG_KMS("no backlight present per VBT, but present per quirk\n"); + drm_dbg_kms(&dev_priv->drm, + "no backlight present per VBT, but present per quirk\n"); } else { - DRM_DEBUG_KMS("no backlight present per VBT\n"); + drm_dbg_kms(&dev_priv->drm, + "no backlight present per VBT\n"); return 0; } } /* ensure intel_panel has been initialized first */ - if (WARN_ON(!panel->backlight.setup)) + if (drm_WARN_ON(&dev_priv->drm, !panel->backlight.setup)) return -ENODEV; /* set level and max in panel struct */ @@ -1930,17 +1977,19 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe) mutex_unlock(&dev_priv->backlight_lock); if (ret) { - DRM_DEBUG_KMS("failed to setup backlight for connector %s\n", - connector->name); + drm_dbg_kms(&dev_priv->drm, + "failed to setup backlight for connector %s\n", + connector->name); return ret; } panel->backlight.present = true; - DRM_DEBUG_KMS("Connector %s backlight initialized, %s, brightness %u/%u\n", - connector->name, - enableddisabled(panel->backlight.enabled), - panel->backlight.level, panel->backlight.max); + drm_dbg_kms(&dev_priv->drm, + "Connector %s backlight initialized, %s, brightness %u/%u\n", + connector->name, + enableddisabled(panel->backlight.enabled), + panel->backlight.level, panel->backlight.max); return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c index 520408e83681..a9a5df2fee4d 100644 --- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c +++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c @@ -110,8 +110,8 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv, *source = INTEL_PIPE_CRC_SOURCE_DP_D; break; default: - WARN(1, "nonexisting DP port %c\n", - port_name(dig_port->base.port)); + drm_WARN(dev, 1, "nonexisting DP port %c\n", + port_name(dig_port->base.port)); break; } break; @@ -172,7 +172,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, * - DisplayPort scrambling: used for EMI reduction */ if (need_stable_symbols) { - u32 tmp = I915_READ(PORT_DFT2_G4X); + u32 tmp = intel_de_read(dev_priv, PORT_DFT2_G4X); tmp |= DC_BALANCE_RESET_VLV; switch (pipe) { @@ -188,7 +188,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, default: return -EINVAL; } - I915_WRITE(PORT_DFT2_G4X, tmp); + intel_de_write(dev_priv, PORT_DFT2_G4X, tmp); } return 0; @@ -237,7 +237,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv, enum pipe pipe) { - u32 tmp = I915_READ(PORT_DFT2_G4X); + u32 tmp = intel_de_read(dev_priv, PORT_DFT2_G4X); switch (pipe) { case PIPE_A: @@ -254,7 +254,7 @@ static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv, } if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) tmp &= ~DC_BALANCE_RESET_VLV; - I915_WRITE(PORT_DFT2_G4X, tmp); + intel_de_write(dev_priv, PORT_DFT2_G4X, tmp); } static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, @@ -328,7 +328,8 @@ put_state: drm_atomic_state_put(state); unlock: - WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret); + drm_WARN(&dev_priv->drm, ret, + "Toggling workaround to %i returns %i\n", enable, ret); drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); } @@ -440,15 +441,11 @@ display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s) return 0; } -void intel_display_crc_init(struct drm_i915_private *dev_priv) +void intel_crtc_crc_init(struct intel_crtc *crtc) { - enum pipe pipe; + struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; - for_each_pipe(dev_priv, pipe) { - struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; - - spin_lock_init(&pipe_crc->lock); - } + spin_lock_init(&pipe_crc->lock); } static int i8xx_crc_source_valid(struct drm_i915_private *dev_priv, @@ -570,7 +567,7 @@ int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name, enum intel_pipe_crc_source source; if (display_crc_ctl_parse_source(source_name, &source) < 0) { - DRM_DEBUG_DRIVER("unknown source %s\n", source_name); + drm_dbg(&dev_priv->drm, "unknown source %s\n", source_name); return -EINVAL; } @@ -586,7 +583,8 @@ int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name, int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name) { struct drm_i915_private *dev_priv = to_i915(crtc->dev); - struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index]; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_pipe_crc *pipe_crc = &intel_crtc->pipe_crc; enum intel_display_power_domain power_domain; enum intel_pipe_crc_source source; intel_wakeref_t wakeref; @@ -595,14 +593,15 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name) bool enable; if (display_crc_ctl_parse_source(source_name, &source) < 0) { - DRM_DEBUG_DRIVER("unknown source %s\n", source_name); + drm_dbg(&dev_priv->drm, "unknown source %s\n", source_name); return -EINVAL; } power_domain = POWER_DOMAIN_PIPE(crtc->index); wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); if (!wakeref) { - DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n"); + drm_dbg_kms(&dev_priv->drm, + "Trying to capture CRC while pipe is off\n"); return -EIO; } @@ -615,8 +614,8 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name) goto out; pipe_crc->source = source; - I915_WRITE(PIPE_CRC_CTL(crtc->index), val); - POSTING_READ(PIPE_CRC_CTL(crtc->index)); + intel_de_write(dev_priv, PIPE_CRC_CTL(crtc->index), val); + intel_de_posting_read(dev_priv, PIPE_CRC_CTL(crtc->index)); if (!source) { if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) @@ -638,7 +637,7 @@ void intel_crtc_enable_pipe_crc(struct intel_crtc *intel_crtc) { struct drm_crtc *crtc = &intel_crtc->base; struct drm_i915_private *dev_priv = to_i915(crtc->dev); - struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index]; + struct intel_pipe_crc *pipe_crc = &intel_crtc->pipe_crc; u32 val = 0; if (!crtc->crc.opened) @@ -650,22 +649,22 @@ void intel_crtc_enable_pipe_crc(struct intel_crtc *intel_crtc) /* Don't need pipe_crc->lock here, IRQs are not generated. */ pipe_crc->skipped = 0; - I915_WRITE(PIPE_CRC_CTL(crtc->index), val); - POSTING_READ(PIPE_CRC_CTL(crtc->index)); + intel_de_write(dev_priv, PIPE_CRC_CTL(crtc->index), val); + intel_de_posting_read(dev_priv, PIPE_CRC_CTL(crtc->index)); } void intel_crtc_disable_pipe_crc(struct intel_crtc *intel_crtc) { struct drm_crtc *crtc = &intel_crtc->base; struct drm_i915_private *dev_priv = to_i915(crtc->dev); - struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index]; + struct intel_pipe_crc *pipe_crc = &intel_crtc->pipe_crc; /* Swallow crc's until we stop generating them. */ spin_lock_irq(&pipe_crc->lock); pipe_crc->skipped = INT_MIN; spin_unlock_irq(&pipe_crc->lock); - I915_WRITE(PIPE_CRC_CTL(crtc->index), 0); - POSTING_READ(PIPE_CRC_CTL(crtc->index)); + intel_de_write(dev_priv, PIPE_CRC_CTL(crtc->index), 0); + intel_de_posting_read(dev_priv, PIPE_CRC_CTL(crtc->index)); intel_synchronize_irq(dev_priv); } diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.h b/drivers/gpu/drm/i915/display/intel_pipe_crc.h index db258a756fc6..43012b189415 100644 --- a/drivers/gpu/drm/i915/display/intel_pipe_crc.h +++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.h @@ -13,7 +13,7 @@ struct drm_i915_private; struct intel_crtc; #ifdef CONFIG_DEBUG_FS -void intel_display_crc_init(struct drm_i915_private *dev_priv); +void intel_crtc_crc_init(struct intel_crtc *crtc); int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name); int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name, size_t *values_cnt); @@ -22,7 +22,7 @@ const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc, void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc); void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc); #else -static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {} +static inline void intel_crtc_crc_init(struct intel_crtc *crtc) {} #define intel_crtc_set_crc_source NULL #define intel_crtc_verify_crc_source NULL #define intel_crtc_get_crc_sources NULL diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 83025052c965..fd9b146e3aba 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -59,11 +59,28 @@ * get called by the frontbuffer tracking code. Note that because of locking * issues the self-refresh re-enable code is done from a work queue, which * must be correctly synchronized/cancelled when shutting down the pipe." + * + * DC3CO (DC3 clock off) + * + * On top of PSR2, GEN12 adds a intermediate power savings state that turns + * clock off automatically during PSR2 idle state. + * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep + * entry/exit allows the HW to enter a low-power state even when page flipping + * periodically (for instance a 30fps video playback scenario). + * + * Every time a flips occurs PSR2 will get out of deep sleep state(if it was), + * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6 + * frames, if no other flip occurs and the function above is executed, DC3CO is + * disabled and PSR2 is configured to enter deep sleep, resetting again in case + * of another flip. + * Front buffer modifications do not trigger DC3CO activation on purpose as it + * would bring a lot of complexity and most of the moderns systems will only + * use page flips. */ -static bool psr_global_enabled(u32 debug) +static bool psr_global_enabled(struct drm_i915_private *i915) { - switch (debug & I915_PSR_DEBUG_MODE_MASK) { + switch (i915->psr.debug & I915_PSR_DEBUG_MODE_MASK) { case I915_PSR_DEBUG_DEFAULT: return i915_modparams.enable_psr; case I915_PSR_DEBUG_DISABLE: @@ -77,8 +94,8 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv, const struct intel_crtc_state *crtc_state) { /* Cannot enable DSC and PSR2 simultaneously */ - WARN_ON(crtc_state->dsc.compression_enable && - crtc_state->has_psr2); + drm_WARN_ON(&dev_priv->drm, crtc_state->dsc.compression_enable && + crtc_state->has_psr2); switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) { case I915_PSR_DEBUG_DISABLE: @@ -114,10 +131,10 @@ static void psr_irq_control(struct drm_i915_private *dev_priv) EDP_PSR_PRE_ENTRY(trans_shift); /* Warning: it is masking/setting reserved bits too */ - val = I915_READ(imr_reg); + val = intel_de_read(dev_priv, imr_reg); val &= ~EDP_PSR_TRANS_MASK(trans_shift); val |= ~mask; - I915_WRITE(imr_reg, val); + intel_de_write(dev_priv, imr_reg, val); } static void psr_event_print(u32 val, bool psr2_enabled) @@ -174,20 +191,24 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) { dev_priv->psr.last_entry_attempt = time_ns; - DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n", - transcoder_name(cpu_transcoder)); + drm_dbg_kms(&dev_priv->drm, + "[transcoder %s] PSR entry attempt in 2 vblanks\n", + transcoder_name(cpu_transcoder)); } if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) { dev_priv->psr.last_exit = time_ns; - DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n", - transcoder_name(cpu_transcoder)); + drm_dbg_kms(&dev_priv->drm, + "[transcoder %s] PSR exit completed\n", + transcoder_name(cpu_transcoder)); if (INTEL_GEN(dev_priv) >= 9) { - u32 val = I915_READ(PSR_EVENT(cpu_transcoder)); + u32 val = intel_de_read(dev_priv, + PSR_EVENT(cpu_transcoder)); bool psr2_enabled = dev_priv->psr.psr2_enabled; - I915_WRITE(PSR_EVENT(cpu_transcoder), val); + intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder), + val); psr_event_print(val, psr2_enabled); } } @@ -195,7 +216,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) if (psr_iir & EDP_PSR_ERROR(trans_shift)) { u32 val; - DRM_WARN("[transcoder %s] PSR aux error\n", + drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n", transcoder_name(cpu_transcoder)); dev_priv->psr.irq_aux_error = true; @@ -208,9 +229,9 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) * again so we don't care about unmask the interruption * or unset irq_aux_error. */ - val = I915_READ(imr_reg); + val = intel_de_read(dev_priv, imr_reg); val |= EDP_PSR_ERROR(trans_shift); - I915_WRITE(imr_reg, val); + intel_de_write(dev_priv, imr_reg, val); schedule_work(&dev_priv->psr.work); } @@ -270,7 +291,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) to_i915(dp_to_dig_port(intel_dp)->base.base.dev); if (dev_priv->psr.dp) { - DRM_WARN("More than one eDP panel found, PSR support should be extended\n"); + drm_warn(&dev_priv->drm, + "More than one eDP panel found, PSR support should be extended\n"); return; } @@ -279,16 +301,18 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) if (!intel_dp->psr_dpcd[0]) return; - DRM_DEBUG_KMS("eDP panel supports PSR version %x\n", - intel_dp->psr_dpcd[0]); + drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n", + intel_dp->psr_dpcd[0]); - if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) { - DRM_DEBUG_KMS("PSR support not currently available for this panel\n"); + if (drm_dp_has_quirk(&intel_dp->desc, 0, DP_DPCD_QUIRK_NO_PSR)) { + drm_dbg_kms(&dev_priv->drm, + "PSR support not currently available for this panel\n"); return; } if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { - DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n"); + drm_dbg_kms(&dev_priv->drm, + "Panel lacks power state control, PSR cannot be enabled\n"); return; } @@ -316,8 +340,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) * GTC first. */ dev_priv->psr.sink_psr2_support = y_req && alpm; - DRM_DEBUG_KMS("PSR2 %ssupported\n", - dev_priv->psr.sink_psr2_support ? "" : "not "); + drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n", + dev_priv->psr.sink_psr2_support ? "" : "not "); if (dev_priv->psr.sink_psr2_support) { dev_priv->psr.colorimetry_support = @@ -380,8 +404,9 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp) BUILD_BUG_ON(sizeof(aux_msg) > 20); for (i = 0; i < sizeof(aux_msg); i += 4) - I915_WRITE(EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2), - intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); + intel_de_write(dev_priv, + EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2), + intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); @@ -391,7 +416,8 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp) /* Select only valid bits for SRD_AUX_CTL */ aux_ctl &= psr_aux_mask; - I915_WRITE(EDP_PSR_AUX_CTL(dev_priv->psr.transcoder), aux_ctl); + intel_de_write(dev_priv, EDP_PSR_AUX_CTL(dev_priv->psr.transcoder), + aux_ctl); } static void intel_psr_enable_sink(struct intel_dp *intel_dp) @@ -454,22 +480,30 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) return val; } -static void hsw_activate_psr1(struct intel_dp *intel_dp) +static u8 psr_compute_idle_frames(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - u32 max_sleep_time = 0x1f; - u32 val = EDP_PSR_ENABLE; + int idle_frames; /* Let's use 6 as the minimum to cover all known cases including the * off-by-one issue that HW has in some cases. */ - int idle_frames = max(6, dev_priv->vbt.psr.idle_frames); - - /* sink_sync_latency of 8 means source has to wait for more than 8 - * frames, we'll go with 9 frames for now - */ + idle_frames = max(6, dev_priv->vbt.psr.idle_frames); idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); - val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT; + + if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf)) + idle_frames = 0xf; + + return idle_frames; +} + +static void hsw_activate_psr1(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + u32 max_sleep_time = 0x1f; + u32 val = EDP_PSR_ENABLE; + + val |= psr_compute_idle_frames(intel_dp) << EDP_PSR_IDLE_FRAME_SHIFT; val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; if (IS_HASWELL(dev_priv)) @@ -483,9 +517,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp) if (INTEL_GEN(dev_priv) >= 8) val |= EDP_PSR_CRC_ENABLE; - val |= (I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & + val |= (intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK); - I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val); + intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), val); } static void hsw_activate_psr2(struct intel_dp *intel_dp) @@ -493,13 +527,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); u32 val; - /* Let's use 6 as the minimum to cover all known cases including the - * off-by-one issue that HW has in some cases. - */ - int idle_frames = max(6, dev_priv->vbt.psr.idle_frames); - - idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); - val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT; + val = psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT; val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE; if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) @@ -521,9 +549,9 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is * recommending keep this bit unset while PSR2 is enabled. */ - I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), 0); + intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), 0); - I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val); + intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder), val); } static bool @@ -552,10 +580,10 @@ static void psr2_program_idle_frames(struct drm_i915_private *dev_priv, u32 val; idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT; - val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)); + val = intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder)); val &= ~EDP_PSR2_IDLE_FRAME_MASK; val |= idle_frames; - I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val); + intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder), val); } static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv) @@ -566,29 +594,22 @@ static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv) static void tgl_psr2_disable_dc3co(struct drm_i915_private *dev_priv) { - int idle_frames; + struct intel_dp *intel_dp = dev_priv->psr.dp; intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); - /* - * Restore PSR2 idle frame let's use 6 as the minimum to cover all known - * cases including the off-by-one issue that HW has in some cases. - */ - idle_frames = max(6, dev_priv->vbt.psr.idle_frames); - idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); - psr2_program_idle_frames(dev_priv, idle_frames); + psr2_program_idle_frames(dev_priv, psr_compute_idle_frames(intel_dp)); } -static void tgl_dc5_idle_thread(struct work_struct *work) +static void tgl_dc3co_disable_work(struct work_struct *work) { struct drm_i915_private *dev_priv = - container_of(work, typeof(*dev_priv), psr.idle_work.work); + container_of(work, typeof(*dev_priv), psr.dc3co_work.work); mutex_lock(&dev_priv->psr.lock); /* If delayed work is pending, it is not idle */ - if (delayed_work_pending(&dev_priv->psr.idle_work)) + if (delayed_work_pending(&dev_priv->psr.dc3co_work)) goto unlock; - DRM_DEBUG_KMS("DC5/6 idle thread\n"); tgl_psr2_disable_dc3co(dev_priv); unlock: mutex_unlock(&dev_priv->psr.lock); @@ -599,11 +620,41 @@ static void tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_private *dev_priv) if (!dev_priv->psr.dc3co_enabled) return; - cancel_delayed_work(&dev_priv->psr.idle_work); + cancel_delayed_work(&dev_priv->psr.dc3co_work); /* Before PSR2 exit disallow dc3co*/ tgl_psr2_disable_dc3co(dev_priv); } +static void +tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp, + struct intel_crtc_state *crtc_state) +{ + const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay; + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + u32 exit_scanlines; + + if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC3CO)) + return; + + /* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/ + if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A || + dig_port->base.port != PORT_A) + return; + + /* + * DC3CO Exit time 200us B.Spec 49196 + * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1 + */ + exit_scanlines = + intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1; + + if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay)) + return; + + crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines; +} + static bool intel_psr2_config_valid(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { @@ -616,8 +667,9 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) { - DRM_DEBUG_KMS("PSR2 not supported in transcoder %s\n", - transcoder_name(crtc_state->cpu_transcoder)); + drm_dbg_kms(&dev_priv->drm, + "PSR2 not supported in transcoder %s\n", + transcoder_name(crtc_state->cpu_transcoder)); return false; } @@ -627,7 +679,8 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, * over PSR2. */ if (crtc_state->dsc.compression_enable) { - DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n"); + drm_dbg_kms(&dev_priv->drm, + "PSR2 cannot be enabled since DSC is enabled\n"); return false; } @@ -646,15 +699,17 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, } if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) { - DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n", - crtc_hdisplay, crtc_vdisplay, - psr_max_h, psr_max_v); + drm_dbg_kms(&dev_priv->drm, + "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n", + crtc_hdisplay, crtc_vdisplay, + psr_max_h, psr_max_v); return false; } if (crtc_state->pipe_bpp > max_bpp) { - DRM_DEBUG_KMS("PSR2 not enabled, pipe bpp %d > max supported %d\n", - crtc_state->pipe_bpp, max_bpp); + drm_dbg_kms(&dev_priv->drm, + "PSR2 not enabled, pipe bpp %d > max supported %d\n", + crtc_state->pipe_bpp, max_bpp); return false; } @@ -665,16 +720,19 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, * x granularity. */ if (crtc_hdisplay % dev_priv->psr.su_x_granularity) { - DRM_DEBUG_KMS("PSR2 not enabled, hdisplay(%d) not multiple of %d\n", - crtc_hdisplay, dev_priv->psr.su_x_granularity); + drm_dbg_kms(&dev_priv->drm, + "PSR2 not enabled, hdisplay(%d) not multiple of %d\n", + crtc_hdisplay, dev_priv->psr.su_x_granularity); return false; } if (crtc_state->crc_enabled) { - DRM_DEBUG_KMS("PSR2 not enabled because it would inhibit pipe CRC calculation\n"); + drm_dbg_kms(&dev_priv->drm, + "PSR2 not enabled because it would inhibit pipe CRC calculation\n"); return false; } + tgl_dc3co_exitline_compute_config(intel_dp, crtc_state); return true; } @@ -700,31 +758,36 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, * hardcoded to PORT_A */ if (dig_port->base.port != PORT_A) { - DRM_DEBUG_KMS("PSR condition failed: Port not supported\n"); + drm_dbg_kms(&dev_priv->drm, + "PSR condition failed: Port not supported\n"); return; } if (dev_priv->psr.sink_not_reliable) { - DRM_DEBUG_KMS("PSR sink implementation is not reliable\n"); + drm_dbg_kms(&dev_priv->drm, + "PSR sink implementation is not reliable\n"); return; } if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { - DRM_DEBUG_KMS("PSR condition failed: Interlaced mode enabled\n"); + drm_dbg_kms(&dev_priv->drm, + "PSR condition failed: Interlaced mode enabled\n"); return; } psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd); if (psr_setup_time < 0) { - DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n", - intel_dp->psr_dpcd[1]); + drm_dbg_kms(&dev_priv->drm, + "PSR condition failed: Invalid PSR setup time (0x%02x)\n", + intel_dp->psr_dpcd[1]); return; } if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) > adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) { - DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n", - psr_setup_time); + drm_dbg_kms(&dev_priv->drm, + "PSR condition failed: PSR setup time (%d us) too long\n", + psr_setup_time); return; } @@ -737,10 +800,12 @@ static void intel_psr_activate(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) - WARN_ON(I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE); + drm_WARN_ON(&dev_priv->drm, + intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE); - WARN_ON(I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE); - WARN_ON(dev_priv->psr.active); + drm_WARN_ON(&dev_priv->drm, + intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE); + drm_WARN_ON(&dev_priv->drm, dev_priv->psr.active); lockdep_assert_held(&dev_priv->psr.lock); /* psr1 and psr2 are mutually exclusive.*/ @@ -768,11 +833,11 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))) { i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder); - u32 chicken = I915_READ(reg); + u32 chicken = intel_de_read(dev_priv, reg); chicken |= PSR2_VSC_ENABLE_PROG_HEADER | PSR2_ADD_VERTICAL_LINE_COUNT; - I915_WRITE(reg, chicken); + intel_de_write(dev_priv, reg, chicken); } /* @@ -789,9 +854,24 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, if (INTEL_GEN(dev_priv) < 11) mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE; - I915_WRITE(EDP_PSR_DEBUG(dev_priv->psr.transcoder), mask); + intel_de_write(dev_priv, EDP_PSR_DEBUG(dev_priv->psr.transcoder), + mask); psr_irq_control(dev_priv); + + if (crtc_state->dc3co_exitline) { + u32 val; + + /* + * TODO: if future platforms supports DC3CO in more than one + * transcoder, EXITLINE will need to be unset when disabling PSR + */ + val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder)); + val &= ~EXITLINE_MASK; + val |= crtc_state->dc3co_exitline << EXITLINE_SHIFT; + val |= EXITLINE_ENABLE; + intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val); + } } static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, @@ -800,14 +880,16 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, struct intel_dp *intel_dp = dev_priv->psr.dp; u32 val; - WARN_ON(dev_priv->psr.enabled); + drm_WARN_ON(&dev_priv->drm, dev_priv->psr.enabled); dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state); dev_priv->psr.busy_frontbuffer_bits = 0; dev_priv->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline; - dev_priv->psr.dc3co_exit_delay = intel_get_frame_time_us(crtc_state); dev_priv->psr.transcoder = crtc_state->cpu_transcoder; + /* DC5/DC6 requires at least 6 idle frames */ + val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6); + dev_priv->psr.dc3co_exit_delay = val; /* * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR @@ -818,20 +900,22 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, * to avoid any rendering problems. */ if (INTEL_GEN(dev_priv) >= 12) { - val = I915_READ(TRANS_PSR_IIR(dev_priv->psr.transcoder)); + val = intel_de_read(dev_priv, + TRANS_PSR_IIR(dev_priv->psr.transcoder)); val &= EDP_PSR_ERROR(0); } else { - val = I915_READ(EDP_PSR_IIR); + val = intel_de_read(dev_priv, EDP_PSR_IIR); val &= EDP_PSR_ERROR(dev_priv->psr.transcoder); } if (val) { dev_priv->psr.sink_not_reliable = true; - DRM_DEBUG_KMS("PSR interruption error set, not enabling PSR\n"); + drm_dbg_kms(&dev_priv->drm, + "PSR interruption error set, not enabling PSR\n"); return; } - DRM_DEBUG_KMS("Enabling PSR%s\n", - dev_priv->psr.psr2_enabled ? "2" : "1"); + drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n", + dev_priv->psr.psr2_enabled ? "2" : "1"); intel_psr_setup_vsc(intel_dp, crtc_state); intel_psr_enable_sink(intel_dp); intel_psr_enable_source(intel_dp, crtc_state); @@ -860,12 +944,12 @@ void intel_psr_enable(struct intel_dp *intel_dp, if (!crtc_state->has_psr) return; - WARN_ON(dev_priv->drrs.dp); + drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp); mutex_lock(&dev_priv->psr.lock); - if (!psr_global_enabled(dev_priv->psr.debug)) { - DRM_DEBUG_KMS("PSR disabled by flag\n"); + if (!psr_global_enabled(dev_priv)) { + drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n"); goto unlock; } @@ -881,27 +965,33 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv) if (!dev_priv->psr.active) { if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) { - val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)); - WARN_ON(val & EDP_PSR2_ENABLE); + val = intel_de_read(dev_priv, + EDP_PSR2_CTL(dev_priv->psr.transcoder)); + drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE); } - val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)); - WARN_ON(val & EDP_PSR_ENABLE); + val = intel_de_read(dev_priv, + EDP_PSR_CTL(dev_priv->psr.transcoder)); + drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE); return; } if (dev_priv->psr.psr2_enabled) { tgl_disallow_dc3co_on_psr2_exit(dev_priv); - val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)); - WARN_ON(!(val & EDP_PSR2_ENABLE)); + val = intel_de_read(dev_priv, + EDP_PSR2_CTL(dev_priv->psr.transcoder)); + drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE)); val &= ~EDP_PSR2_ENABLE; - I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val); + intel_de_write(dev_priv, + EDP_PSR2_CTL(dev_priv->psr.transcoder), val); } else { - val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)); - WARN_ON(!(val & EDP_PSR_ENABLE)); + val = intel_de_read(dev_priv, + EDP_PSR_CTL(dev_priv->psr.transcoder)); + drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE)); val &= ~EDP_PSR_ENABLE; - I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val); + intel_de_write(dev_priv, + EDP_PSR_CTL(dev_priv->psr.transcoder), val); } dev_priv->psr.active = false; } @@ -917,8 +1007,8 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) if (!dev_priv->psr.enabled) return; - DRM_DEBUG_KMS("Disabling PSR%s\n", - dev_priv->psr.psr2_enabled ? "2" : "1"); + drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n", + dev_priv->psr.psr2_enabled ? "2" : "1"); intel_psr_exit(dev_priv); @@ -933,7 +1023,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) /* Wait till PSR is idle */ if (intel_de_wait_for_clear(dev_priv, psr_status, psr_status_mask, 2000)) - DRM_ERROR("Timed out waiting PSR idle state\n"); + drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n"); /* Disable PSR on Sink */ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); @@ -959,7 +1049,7 @@ void intel_psr_disable(struct intel_dp *intel_dp, if (!old_crtc_state->has_psr) return; - if (WARN_ON(!CAN_PSR(dev_priv))) + if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(dev_priv))) return; mutex_lock(&dev_priv->psr.lock); @@ -968,7 +1058,7 @@ void intel_psr_disable(struct intel_dp *intel_dp, mutex_unlock(&dev_priv->psr.lock); cancel_work_sync(&dev_priv->psr.work); - cancel_delayed_work_sync(&dev_priv->psr.idle_work); + cancel_delayed_work_sync(&dev_priv->psr.dc3co_work); } static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv) @@ -983,7 +1073,7 @@ static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv) * but it makes more sense write to the current active * pipe. */ - I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0); + intel_de_write(dev_priv, CURSURFLIVE(dev_priv->psr.pipe), 0); else /* * A write to CURSURFLIVE do not cause HW tracking to exit PSR @@ -1015,7 +1105,7 @@ void intel_psr_update(struct intel_dp *intel_dp, mutex_lock(&dev_priv->psr.lock); - enable = crtc_state->has_psr && psr_global_enabled(psr->debug); + enable = crtc_state->has_psr && psr_global_enabled(dev_priv); psr2_enable = intel_psr2_enabled(dev_priv, crtc_state); if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) { @@ -1103,7 +1193,8 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv) err = intel_de_wait_for_clear(dev_priv, reg, mask, 50); if (err) - DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); + drm_err(&dev_priv->drm, + "Timed out waiting for PSR Idle for re-enable\n"); /* After the unlocked wait, verify that PSR is still wanted! */ mutex_lock(&dev_priv->psr.lock); @@ -1167,7 +1258,7 @@ int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val) if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) || mode > I915_PSR_DEBUG_FORCE_PSR1) { - DRM_DEBUG_KMS("Invalid debug mask %llx\n", val); + drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val); return -EINVAL; } @@ -1279,14 +1370,12 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv, * When we will be completely rely on PSR2 S/W tracking in future, * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP * event also therefore tgl_dc3co_flush() require to be changed - * accrodingly in future. + * accordingly in future. */ static void tgl_dc3co_flush(struct drm_i915_private *dev_priv, unsigned int frontbuffer_bits, enum fb_op_origin origin) { - u32 delay; - mutex_lock(&dev_priv->psr.lock); if (!dev_priv->psr.dc3co_enabled) @@ -1304,10 +1393,8 @@ tgl_dc3co_flush(struct drm_i915_private *dev_priv, goto unlock; tgl_psr2_enable_dc3co(dev_priv); - /* DC5/DC6 required idle frames = 6 */ - delay = 6 * dev_priv->psr.dc3co_exit_delay; - mod_delayed_work(system_wq, &dev_priv->psr.idle_work, - usecs_to_jiffies(delay)); + mod_delayed_work(system_wq, &dev_priv->psr.dc3co_work, + dev_priv->psr.dc3co_exit_delay); unlock: mutex_unlock(&dev_priv->psr.lock); @@ -1391,7 +1478,7 @@ void intel_psr_init(struct drm_i915_private *dev_priv) dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link; INIT_WORK(&dev_priv->psr.work, intel_psr_work); - INIT_DELAYED_WORK(&dev_priv->psr.idle_work, tgl_dc5_idle_thread); + INIT_DELAYED_WORK(&dev_priv->psr.dc3co_work, tgl_dc3co_disable_work); mutex_init(&dev_priv->psr.lock); } @@ -1427,14 +1514,15 @@ static void psr_alpm_check(struct intel_dp *intel_dp) r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val); if (r != 1) { - DRM_ERROR("Error reading ALPM status\n"); + drm_err(&dev_priv->drm, "Error reading ALPM status\n"); return; } if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) { intel_psr_disable_locked(intel_dp); psr->sink_not_reliable = true; - DRM_DEBUG_KMS("ALPM lock timeout error, disabling PSR\n"); + drm_dbg_kms(&dev_priv->drm, + "ALPM lock timeout error, disabling PSR\n"); /* Clearing error */ drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val); @@ -1450,14 +1538,15 @@ static void psr_capability_changed_check(struct intel_dp *intel_dp) r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val); if (r != 1) { - DRM_ERROR("Error reading DP_PSR_ESI\n"); + drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n"); return; } if (val & DP_PSR_CAPS_CHANGE) { intel_psr_disable_locked(intel_dp); psr->sink_not_reliable = true; - DRM_DEBUG_KMS("Sink PSR capability changed, disabling PSR\n"); + drm_dbg_kms(&dev_priv->drm, + "Sink PSR capability changed, disabling PSR\n"); /* Clearing it */ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val); @@ -1482,7 +1571,8 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp) goto exit; if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) { - DRM_ERROR("Error reading PSR status or error status\n"); + drm_err(&dev_priv->drm, + "Error reading PSR status or error status\n"); goto exit; } @@ -1492,17 +1582,22 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp) } if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status) - DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n"); + drm_dbg_kms(&dev_priv->drm, + "PSR sink internal error, disabling PSR\n"); if (error_status & DP_PSR_RFB_STORAGE_ERROR) - DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n"); + drm_dbg_kms(&dev_priv->drm, + "PSR RFB storage error, disabling PSR\n"); if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR) - DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n"); + drm_dbg_kms(&dev_priv->drm, + "PSR VSC SDP uncorrectable error, disabling PSR\n"); if (error_status & DP_PSR_LINK_CRC_ERROR) - DRM_DEBUG_KMS("PSR Link CRC error, disabling PSR\n"); + drm_dbg_kms(&dev_priv->drm, + "PSR Link CRC error, disabling PSR\n"); if (error_status & ~errors) - DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n", - error_status & ~errors); + drm_err(&dev_priv->drm, + "PSR_ERROR_STATUS unhandled errors %x\n", + error_status & ~errors); /* clear status register */ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status); @@ -1542,7 +1637,7 @@ void intel_psr_atomic_check(struct drm_connector *connector, return; intel_connector = to_intel_connector(connector); - dig_port = enc_to_dig_port(intel_connector->encoder); + dig_port = enc_to_dig_port(intel_attached_encoder(intel_connector)); if (dev_priv->psr.dp != &dig_port->dp) return; diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c index 399b1542509f..46beb155d835 100644 --- a/drivers/gpu/drm/i915/display/intel_quirks.c +++ b/drivers/gpu/drm/i915/display/intel_quirks.c @@ -14,7 +14,7 @@ static void quirk_ssc_force_disable(struct drm_i915_private *i915) { i915->quirks |= QUIRK_LVDS_SSC_DISABLE; - DRM_INFO("applying lvds SSC disable quirk\n"); + drm_info(&i915->drm, "applying lvds SSC disable quirk\n"); } /* @@ -24,14 +24,14 @@ static void quirk_ssc_force_disable(struct drm_i915_private *i915) static void quirk_invert_brightness(struct drm_i915_private *i915) { i915->quirks |= QUIRK_INVERT_BRIGHTNESS; - DRM_INFO("applying inverted panel brightness quirk\n"); + drm_info(&i915->drm, "applying inverted panel brightness quirk\n"); } /* Some VBT's incorrectly indicate no backlight is present */ static void quirk_backlight_present(struct drm_i915_private *i915) { i915->quirks |= QUIRK_BACKLIGHT_PRESENT; - DRM_INFO("applying backlight present quirk\n"); + drm_info(&i915->drm, "applying backlight present quirk\n"); } /* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms @@ -40,7 +40,7 @@ static void quirk_backlight_present(struct drm_i915_private *i915) static void quirk_increase_t12_delay(struct drm_i915_private *i915) { i915->quirks |= QUIRK_INCREASE_T12_DELAY; - DRM_INFO("Applying T12 delay quirk\n"); + drm_info(&i915->drm, "Applying T12 delay quirk\n"); } /* @@ -50,7 +50,7 @@ static void quirk_increase_t12_delay(struct drm_i915_private *i915) static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915) { i915->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME; - DRM_INFO("Applying Increase DDI Disabled quirk\n"); + drm_info(&i915->drm, "Applying Increase DDI Disabled quirk\n"); } struct intel_quirk { @@ -82,6 +82,16 @@ static const struct intel_dmi_quirk intel_dmi_quirks[] = { DMI_MATCH(DMI_PRODUCT_NAME, ""), }, }, + { + .callback = intel_dmi_reverse_brightness, + .ident = "Thundersoft TST178 tablet", + /* DMI strings are too generic, also match on BIOS date */ + .matches = {DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "Aptio CRB"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."), + DMI_EXACT_MATCH(DMI_BIOS_DATE, "04/15/2014"), + }, + }, { } /* terminating entry */ }, .hook = quirk_invert_brightness, diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index e8819fd21e03..637d8fe2f8c2 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -34,7 +34,6 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> -#include <drm/i915_drm.h> #include "i915_drv.h" #include "intel_atomic.h" @@ -217,23 +216,23 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) int i; if (HAS_PCH_SPLIT(dev_priv)) { - I915_WRITE(intel_sdvo->sdvo_reg, val); - POSTING_READ(intel_sdvo->sdvo_reg); + intel_de_write(dev_priv, intel_sdvo->sdvo_reg, val); + intel_de_posting_read(dev_priv, intel_sdvo->sdvo_reg); /* * HW workaround, need to write this twice for issue * that may result in first write getting masked. */ if (HAS_PCH_IBX(dev_priv)) { - I915_WRITE(intel_sdvo->sdvo_reg, val); - POSTING_READ(intel_sdvo->sdvo_reg); + intel_de_write(dev_priv, intel_sdvo->sdvo_reg, val); + intel_de_posting_read(dev_priv, intel_sdvo->sdvo_reg); } return; } if (intel_sdvo->port == PORT_B) - cval = I915_READ(GEN3_SDVOC); + cval = intel_de_read(dev_priv, GEN3_SDVOC); else - bval = I915_READ(GEN3_SDVOB); + bval = intel_de_read(dev_priv, GEN3_SDVOB); /* * Write the registers twice for luck. Sometimes, @@ -241,11 +240,11 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) * The BIOS does this too. Yay, magic */ for (i = 0; i < 2; i++) { - I915_WRITE(GEN3_SDVOB, bval); - POSTING_READ(GEN3_SDVOB); + intel_de_write(dev_priv, GEN3_SDVOB, bval); + intel_de_posting_read(dev_priv, GEN3_SDVOB); - I915_WRITE(GEN3_SDVOC, cval); - POSTING_READ(GEN3_SDVOC); + intel_de_write(dev_priv, GEN3_SDVOC, cval); + intel_de_posting_read(dev_priv, GEN3_SDVOC); } } @@ -414,12 +413,10 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, { const char *cmd_name; int i, pos = 0; -#define BUF_LEN 256 - char buffer[BUF_LEN]; + char buffer[64]; #define BUF_PRINT(args...) \ - pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args) - + pos += snprintf(buffer + pos, max_t(int, sizeof(buffer) - pos, 0), args) for (i = 0; i < args_len; i++) { BUF_PRINT("%02X ", ((u8 *)args)[i]); @@ -433,9 +430,9 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, BUF_PRINT("(%s)", cmd_name); else BUF_PRINT("(%02X)", cmd); - BUG_ON(pos >= BUF_LEN - 1); + + WARN_ON(pos >= sizeof(buffer) - 1); #undef BUF_PRINT -#undef BUF_LEN DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer); } @@ -540,8 +537,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, u8 retry = 15; /* 5 quick checks, followed by 10 long checks */ u8 status; int i, pos = 0; -#define BUF_LEN 256 - char buffer[BUF_LEN]; + char buffer[64]; buffer[0] = '\0'; @@ -581,7 +577,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, } #define BUF_PRINT(args...) \ - pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args) + pos += snprintf(buffer + pos, max_t(int, sizeof(buffer) - pos, 0), args) cmd_status = sdvo_cmd_status(status); if (cmd_status) @@ -600,9 +596,9 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, goto log_fail; BUF_PRINT(" %02X", ((u8 *)response)[i]); } - BUG_ON(pos >= BUF_LEN - 1); + + WARN_ON(pos >= sizeof(buffer) - 1); #undef BUF_PRINT -#undef BUF_LEN DRM_DEBUG_KMS("%s: R: %s\n", SDVO_NAME(intel_sdvo), buffer); return true; @@ -1267,6 +1263,13 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config) pipe_config->clock_set = true; } +static bool intel_has_hdmi_sink(struct intel_sdvo *sdvo, + const struct drm_connector_state *conn_state) +{ + return sdvo->has_hdmi_monitor && + READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI; +} + static int intel_sdvo_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) @@ -1322,12 +1325,15 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder, pipe_config->pixel_multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode); - if (intel_sdvo_state->base.force_audio != HDMI_AUDIO_OFF_DVI) - pipe_config->has_hdmi_sink = intel_sdvo->has_hdmi_monitor; + pipe_config->has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo, conn_state); - if (intel_sdvo_state->base.force_audio == HDMI_AUDIO_ON || - (intel_sdvo_state->base.force_audio == HDMI_AUDIO_AUTO && intel_sdvo->has_hdmi_audio)) - pipe_config->has_audio = true; + if (pipe_config->has_hdmi_sink) { + if (intel_sdvo_state->base.force_audio == HDMI_AUDIO_AUTO) + pipe_config->has_audio = intel_sdvo->has_hdmi_audio; + else + pipe_config->has_audio = + intel_sdvo_state->base.force_audio == HDMI_AUDIO_ON; + } if (intel_sdvo_state->base.broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { /* @@ -1470,7 +1476,8 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder, else intel_sdvo_get_dtd_from_mode(&output_dtd, mode); if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd)) - DRM_INFO("Setting output timings on %s failed\n", + drm_info(&dev_priv->drm, + "Setting output timings on %s failed\n", SDVO_NAME(intel_sdvo)); /* Set the input timing to the screen. Assume always input 0. */ @@ -1494,12 +1501,14 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder, if (IS_TV(intel_sdvo_connector) || IS_LVDS(intel_sdvo_connector)) input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags; if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd)) - DRM_INFO("Setting input timings on %s failed\n", + drm_info(&dev_priv->drm, + "Setting input timings on %s failed\n", SDVO_NAME(intel_sdvo)); switch (crtc_state->pixel_multiplier) { default: - WARN(1, "unknown pixel multiplier specified\n"); + drm_WARN(&dev_priv->drm, 1, + "unknown pixel multiplier specified\n"); /* fall through */ case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break; @@ -1518,7 +1527,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder, if (INTEL_GEN(dev_priv) < 5) sdvox |= SDVO_BORDER_ENABLE; } else { - sdvox = I915_READ(intel_sdvo->sdvo_reg); + sdvox = intel_de_read(dev_priv, intel_sdvo->sdvo_reg); if (intel_sdvo->port == PORT_B) sdvox &= SDVOB_PRESERVE_MASK; else @@ -1564,7 +1573,7 @@ bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv, { u32 val; - val = I915_READ(sdvo_reg); + val = intel_de_read(dev_priv, sdvo_reg); /* asserts want to know the pipe even if the port is disabled */ if (HAS_PCH_CPT(dev_priv)) @@ -1607,7 +1616,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, pipe_config->output_types |= BIT(INTEL_OUTPUT_SDVO); - sdvox = I915_READ(intel_sdvo->sdvo_reg); + sdvox = intel_de_read(dev_priv, intel_sdvo->sdvo_reg); ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd); if (!ret) { @@ -1615,7 +1624,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, * Some sdvo encoders are not spec compliant and don't * implement the mandatory get_timings function. */ - DRM_DEBUG_DRIVER("failed to retrieve SDVO DTD\n"); + drm_dbg(&dev_priv->drm, "failed to retrieve SDVO DTD\n"); pipe_config->quirks |= PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS; } else { if (dtd.part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE) @@ -1667,9 +1676,10 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, } } - WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier, - "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n", - pipe_config->pixel_multiplier, encoder_pixel_multiplier); + drm_WARN(dev, + encoder_pixel_multiplier != pipe_config->pixel_multiplier, + "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n", + pipe_config->pixel_multiplier, encoder_pixel_multiplier); if (sdvox & HDMI_COLOR_RANGE_16_235) pipe_config->limited_color_range = true; @@ -1734,7 +1744,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder, intel_sdvo_set_encoder_power_state(intel_sdvo, DRM_MODE_DPMS_OFF); - temp = I915_READ(intel_sdvo->sdvo_reg); + temp = intel_de_read(dev_priv, intel_sdvo->sdvo_reg); temp &= ~SDVO_ENABLE; intel_sdvo_write_sdvox(intel_sdvo, temp); @@ -1791,7 +1801,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder, int i; bool success; - temp = I915_READ(intel_sdvo->sdvo_reg); + temp = intel_de_read(dev_priv, intel_sdvo->sdvo_reg); temp |= SDVO_ENABLE; intel_sdvo_write_sdvox(intel_sdvo, temp); @@ -1806,8 +1816,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder, * a given it the status is a success, we succeeded. */ if (success && !input1) { - DRM_DEBUG_KMS("First %s output reported failure to " - "sync\n", SDVO_NAME(intel_sdvo)); + drm_dbg_kms(&dev_priv->drm, + "First %s output reported failure to " + "sync\n", SDVO_NAME(intel_sdvo)); } if (0) @@ -2219,8 +2230,8 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) struct drm_i915_private *dev_priv = to_i915(connector->dev); struct drm_display_mode *newmode; - DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", - connector->base.id, connector->name); + drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", + connector->base.id, connector->name); /* * Fetch modes from VBT. For SDVO prefer the VBT mode since some @@ -2709,6 +2720,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) * Some SDVO devices have one-shot hotplug interrupts. * Ensure that they get re-enabled when an interrupt happens. */ + intel_connector->polled = DRM_CONNECTOR_POLL_HPD; intel_encoder->hotplug = intel_sdvo_hotplug; intel_sdvo_enable_hotplug(intel_encoder); } else { @@ -3229,9 +3241,9 @@ static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv, enum port port) { if (HAS_PCH_SPLIT(dev_priv)) - WARN_ON(port != PORT_B); + drm_WARN_ON(&dev_priv->drm, port != PORT_B); else - WARN_ON(port != PORT_B && port != PORT_C); + drm_WARN_ON(&dev_priv->drm, port != PORT_B && port != PORT_C); } bool intel_sdvo_init(struct drm_i915_private *dev_priv, @@ -3269,8 +3281,9 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv, u8 byte; if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) { - DRM_DEBUG_KMS("No SDVO device found on %s\n", - SDVO_NAME(intel_sdvo)); + drm_dbg_kms(&dev_priv->drm, + "No SDVO device found on %s\n", + SDVO_NAME(intel_sdvo)); goto err; } } @@ -3293,8 +3306,9 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv, if (intel_sdvo_output_setup(intel_sdvo, intel_sdvo->caps.output_flags) != true) { - DRM_DEBUG_KMS("SDVO output failed to setup on %s\n", - SDVO_NAME(intel_sdvo)); + drm_dbg_kms(&dev_priv->drm, + "SDVO output failed to setup on %s\n", + SDVO_NAME(intel_sdvo)); /* Output_setup can leave behind connectors! */ goto err_output; } @@ -3331,7 +3345,7 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv, &intel_sdvo->pixel_clock_max)) goto err_output; - DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, " + drm_dbg_kms(&dev_priv->drm, "%s device VID/DID: %02X:%02X.%02X, " "clock range %dMHz - %dMHz, " "input 1: %c, input 2: %c, " "output 1: %c, output 2: %c\n", diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.h b/drivers/gpu/drm/i915/display/intel_sdvo.h index a66f224aa17d..72065e4360d5 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.h +++ b/drivers/gpu/drm/i915/display/intel_sdvo.h @@ -8,8 +8,6 @@ #include <linux/types.h> -#include <drm/i915_drm.h> - #include "i915_reg.h" struct drm_i915_private; diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index fca77ec1e0dd..33d886141138 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -37,10 +37,10 @@ #include <drm/drm_fourcc.h> #include <drm/drm_plane_helper.h> #include <drm/drm_rect.h> -#include <drm/i915_drm.h> #include "i915_drv.h" #include "i915_trace.h" +#include "i915_vgpu.h" #include "intel_atomic_plane.h" #include "intel_display_types.h" #include "intel_frontbuffer.h" @@ -104,7 +104,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) if (min <= 0 || max <= 0) goto irq_disable; - if (WARN_ON(drm_crtc_vblank_get(&crtc->base))) + if (drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base))) goto irq_disable; /* @@ -113,8 +113,9 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) * re-entry as well. */ if (intel_psr_wait_for_idle(new_crtc_state, &psr_status)) - DRM_ERROR("PSR idle timed out 0x%x, atomic update may fail\n", - psr_status); + drm_err(&dev_priv->drm, + "PSR idle timed out 0x%x, atomic update may fail\n", + psr_status); local_irq_disable(); @@ -135,8 +136,9 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) break; if (!timeout) { - DRM_ERROR("Potential atomic update failure on pipe %c\n", - pipe_name(crtc->pipe)); + drm_err(&dev_priv->drm, + "Potential atomic update failure on pipe %c\n", + pipe_name(crtc->pipe)); break; } @@ -204,7 +206,8 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) * event outside of the critical section - the spinlock might spin for a * while ... */ if (new_crtc_state->uapi.event) { - WARN_ON(drm_crtc_vblank_get(&crtc->base) != 0); + drm_WARN_ON(&dev_priv->drm, + drm_crtc_vblank_get(&crtc->base) != 0); spin_lock(&crtc->base.dev->event_lock); drm_crtc_arm_vblank_event(&crtc->base, @@ -221,17 +224,20 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) if (crtc->debug.start_vbl_count && crtc->debug.start_vbl_count != end_vbl_count) { - DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n", - pipe_name(pipe), crtc->debug.start_vbl_count, - end_vbl_count, - ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time), - crtc->debug.min_vbl, crtc->debug.max_vbl, - crtc->debug.scanline_start, scanline_end); + drm_err(&dev_priv->drm, + "Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n", + pipe_name(pipe), crtc->debug.start_vbl_count, + end_vbl_count, + ktime_us_delta(end_vbl_time, + crtc->debug.start_vbl_time), + crtc->debug.min_vbl, crtc->debug.max_vbl, + crtc->debug.scanline_start, scanline_end); } #ifdef CONFIG_DRM_I915_DEBUG_VBLANK_EVADE else if (ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time) > VBLANK_EVASION_TIME_US) - DRM_WARN("Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n", + drm_warn(&dev_priv->drm, + "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n", pipe_name(pipe), ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time), VBLANK_EVASION_TIME_US); @@ -278,6 +284,16 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation); /* + * FIXME hsub/vsub vs. block size is a mess. Pre-tgl CCS + * abuses hsub/vsub so we can't use them here. But as they + * are limited to 32bpp RGB formats we don't actually need + * to check anything. + */ + if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || + fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) + return 0; + + /* * Hardware doesn't handle subpixel coordinates. * Adjust to (macro)pixel boundary, but be careful not to * increase the source viewport size, because that could @@ -291,26 +307,26 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) drm_rect_init(src, src_x << 16, src_y << 16, src_w << 16, src_h << 16); - if (!fb->format->is_yuv) - return 0; - - /* YUV specific checks */ - if (!rotated) { + if (fb->format->format == DRM_FORMAT_RGB565 && rotated) { + hsub = 2; + vsub = 2; + } else { hsub = fb->format->hsub; vsub = fb->format->vsub; - } else { - hsub = vsub = max(fb->format->hsub, fb->format->vsub); } + if (rotated) + hsub = vsub = max(hsub, vsub); + if (src_x % hsub || src_w % hsub) { - DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of %u for %sYUV planes\n", - src_x, src_w, hsub, rotated ? "rotated " : ""); + DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n", + src_x, src_w, hsub, yesno(rotated)); return -EINVAL; } if (src_y % vsub || src_h % vsub) { - DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of %u for %sYUV planes\n", - src_y, src_h, vsub, rotated ? "rotated " : ""); + DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n", + src_y, src_h, vsub, yesno(rotated)); return -EINVAL; } @@ -349,9 +365,8 @@ static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev); - unsigned int pixel_rate = crtc_state->pixel_rate; - unsigned int src_w, src_h, dst_w, dst_h; unsigned int num, den; + unsigned int pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state); skl_plane_ratio(crtc_state, plane_state, &num, &den); @@ -359,17 +374,7 @@ static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state, if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) den *= 2; - src_w = drm_rect_width(&plane_state->uapi.src) >> 16; - src_h = drm_rect_height(&plane_state->uapi.src) >> 16; - dst_w = drm_rect_width(&plane_state->uapi.dst); - dst_h = drm_rect_height(&plane_state->uapi.dst); - - /* Downscaling limits the maximum pixel rate */ - dst_w = min(src_w, dst_w); - dst_h = min(src_h, dst_h); - - return DIV64_U64_ROUND_UP(mul_u32_u32(pixel_rate * num, src_w * src_h), - mul_u32_u32(den, dst_w * dst_h)); + return DIV_ROUND_UP(pixel_rate * num, den); } static unsigned int @@ -434,14 +439,16 @@ skl_program_scaler(struct intel_plane *plane, uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); } - I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id), - PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode); - I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id), - PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase)); - I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id), - PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase)); - I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y); - I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (crtc_w << 16) | crtc_h); + intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, scaler_id), + PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode); + intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, scaler_id), + PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase)); + intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, scaler_id), + PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase)); + intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, scaler_id), + (crtc_x << 16) | crtc_y); + intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, scaler_id), + (crtc_w << 16) | crtc_h); } /* Preoffset values for YUV to RGB Conversion */ @@ -547,28 +554,37 @@ icl_program_input_csc(struct intel_plane *plane, else csc = input_csc_matrix_lr[plane_state->hw.color_encoding]; - I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), ROFF(csc[0]) | - GOFF(csc[1])); - I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1), BOFF(csc[2])); - I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2), ROFF(csc[3]) | - GOFF(csc[4])); - I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3), BOFF(csc[5])); - I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4), ROFF(csc[6]) | - GOFF(csc[7])); - I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5), BOFF(csc[8])); - - I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0), - PREOFF_YUV_TO_RGB_HI); + intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), + ROFF(csc[0]) | GOFF(csc[1])); + intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1), + BOFF(csc[2])); + intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2), + ROFF(csc[3]) | GOFF(csc[4])); + intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3), + BOFF(csc[5])); + intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4), + ROFF(csc[6]) | GOFF(csc[7])); + intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5), + BOFF(csc[8])); + + intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0), + PREOFF_YUV_TO_RGB_HI); if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) - I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), 0); + intel_de_write_fw(dev_priv, + PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), + 0); else - I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), - PREOFF_YUV_TO_RGB_ME); - I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2), - PREOFF_YUV_TO_RGB_LO); - I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0); - I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0); - I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0); + intel_de_write_fw(dev_priv, + PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), + PREOFF_YUV_TO_RGB_ME); + intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2), + PREOFF_YUV_TO_RGB_LO); + intel_de_write_fw(dev_priv, + PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0); + intel_de_write_fw(dev_priv, + PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0); + intel_de_write_fw(dev_priv, + PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0); } static void @@ -623,44 +639,49 @@ skl_program_plane(struct intel_plane *plane, spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride); - I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x); - I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w); + intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), stride); + intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id), + (crtc_y << 16) | crtc_x); + intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id), + (src_h << 16) | src_w); if (INTEL_GEN(dev_priv) < 12) aux_dist |= aux_stride; - I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id), aux_dist); + intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist); if (icl_is_hdr_plane(dev_priv, plane_id)) - I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), plane_state->cus_ctl); + intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), + plane_state->cus_ctl); if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) - I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl); + intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), + plane_color_ctl); if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id)) icl_program_input_csc(plane, crtc_state, plane_state); skl_write_plane_wm(plane, crtc_state); - I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value); - I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), keymsk); - I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), keymax); + intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), + key->min_value); + intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), keymsk); + intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), keymax); - I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x); + intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id), + (y << 16) | x); if (INTEL_GEN(dev_priv) < 11) - I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id), - (plane_state->color_plane[1].y << 16) | - plane_state->color_plane[1].x); + intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id), + (plane_state->color_plane[1].y << 16) | plane_state->color_plane[1].x); /* * The control register self-arms if the plane was previously * disabled. Try to make the plane enable atomic by writing * the control register just before the surface register. */ - I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl); - I915_WRITE_FW(PLANE_SURF(pipe, plane_id), - intel_plane_ggtt_offset(plane_state) + surf_addr); + intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl); + intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), + intel_plane_ggtt_offset(plane_state) + surf_addr); if (plane_state->scaler_id >= 0) skl_program_scaler(plane, crtc_state, plane_state); @@ -693,12 +714,12 @@ skl_disable_plane(struct intel_plane *plane, spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); if (icl_is_hdr_plane(dev_priv, plane_id)) - I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), 0); + intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), 0); skl_write_plane_wm(plane, crtc_state); - I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0); - I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0); + intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0); + intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } @@ -718,7 +739,7 @@ skl_plane_get_hw_state(struct intel_plane *plane, if (!wakeref) return false; - ret = I915_READ(PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE; + ret = intel_de_read(dev_priv, PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE; *pipe = plane->pipe; @@ -774,23 +795,36 @@ chv_update_csc(const struct intel_plane_state *plane_state) if (!fb->format->is_yuv) return; - I915_WRITE_FW(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0)); - I915_WRITE_FW(SPCSCCBOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0)); - I915_WRITE_FW(SPCSCCROFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0)); - - I915_WRITE_FW(SPCSCC01(plane_id), SPCSC_C1(csc[1]) | SPCSC_C0(csc[0])); - I915_WRITE_FW(SPCSCC23(plane_id), SPCSC_C1(csc[3]) | SPCSC_C0(csc[2])); - I915_WRITE_FW(SPCSCC45(plane_id), SPCSC_C1(csc[5]) | SPCSC_C0(csc[4])); - I915_WRITE_FW(SPCSCC67(plane_id), SPCSC_C1(csc[7]) | SPCSC_C0(csc[6])); - I915_WRITE_FW(SPCSCC8(plane_id), SPCSC_C0(csc[8])); - - I915_WRITE_FW(SPCSCYGICLAMP(plane_id), SPCSC_IMAX(1023) | SPCSC_IMIN(0)); - I915_WRITE_FW(SPCSCCBICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512)); - I915_WRITE_FW(SPCSCCRICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512)); - - I915_WRITE_FW(SPCSCYGOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0)); - I915_WRITE_FW(SPCSCCBOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0)); - I915_WRITE_FW(SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0)); + intel_de_write_fw(dev_priv, SPCSCYGOFF(plane_id), + SPCSC_OOFF(0) | SPCSC_IOFF(0)); + intel_de_write_fw(dev_priv, SPCSCCBOFF(plane_id), + SPCSC_OOFF(0) | SPCSC_IOFF(0)); + intel_de_write_fw(dev_priv, SPCSCCROFF(plane_id), + SPCSC_OOFF(0) | SPCSC_IOFF(0)); + + intel_de_write_fw(dev_priv, SPCSCC01(plane_id), + SPCSC_C1(csc[1]) | SPCSC_C0(csc[0])); + intel_de_write_fw(dev_priv, SPCSCC23(plane_id), + SPCSC_C1(csc[3]) | SPCSC_C0(csc[2])); + intel_de_write_fw(dev_priv, SPCSCC45(plane_id), + SPCSC_C1(csc[5]) | SPCSC_C0(csc[4])); + intel_de_write_fw(dev_priv, SPCSCC67(plane_id), + SPCSC_C1(csc[7]) | SPCSC_C0(csc[6])); + intel_de_write_fw(dev_priv, SPCSCC8(plane_id), SPCSC_C0(csc[8])); + + intel_de_write_fw(dev_priv, SPCSCYGICLAMP(plane_id), + SPCSC_IMAX(1023) | SPCSC_IMIN(0)); + intel_de_write_fw(dev_priv, SPCSCCBICLAMP(plane_id), + SPCSC_IMAX(512) | SPCSC_IMIN(-512)); + intel_de_write_fw(dev_priv, SPCSCCRICLAMP(plane_id), + SPCSC_IMAX(512) | SPCSC_IMIN(-512)); + + intel_de_write_fw(dev_priv, SPCSCYGOCLAMP(plane_id), + SPCSC_OMAX(1023) | SPCSC_OMIN(0)); + intel_de_write_fw(dev_priv, SPCSCCBOCLAMP(plane_id), + SPCSC_OMAX(1023) | SPCSC_OMIN(0)); + intel_de_write_fw(dev_priv, SPCSCCROCLAMP(plane_id), + SPCSC_OMAX(1023) | SPCSC_OMIN(0)); } #define SIN_0 0 @@ -829,10 +863,10 @@ vlv_update_clrc(const struct intel_plane_state *plane_state) } /* FIXME these register are single buffered :( */ - I915_WRITE_FW(SPCLRC0(pipe, plane_id), - SP_CONTRAST(contrast) | SP_BRIGHTNESS(brightness)); - I915_WRITE_FW(SPCLRC1(pipe, plane_id), - SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos)); + intel_de_write_fw(dev_priv, SPCLRC0(pipe, plane_id), + SP_CONTRAST(contrast) | SP_BRIGHTNESS(brightness)); + intel_de_write_fw(dev_priv, SPCLRC1(pipe, plane_id), + SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos)); } static void @@ -1019,10 +1053,8 @@ static void vlv_update_gamma(const struct intel_plane_state *plane_state) /* FIXME these register are single buffered :( */ /* The two end points are implicit (0.0 and 1.0) */ for (i = 1; i < 8 - 1; i++) - I915_WRITE_FW(SPGAMC(pipe, plane_id, i - 1), - gamma[i] << 16 | - gamma[i] << 8 | - gamma[i]); + intel_de_write_fw(dev_priv, SPGAMC(pipe, plane_id, i - 1), + gamma[i] << 16 | gamma[i] << 8 | gamma[i]); } static void @@ -1055,32 +1087,37 @@ vlv_update_plane(struct intel_plane *plane, spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - I915_WRITE_FW(SPSTRIDE(pipe, plane_id), - plane_state->color_plane[0].stride); - I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x); - I915_WRITE_FW(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w); - I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0); + intel_de_write_fw(dev_priv, SPSTRIDE(pipe, plane_id), + plane_state->color_plane[0].stride); + intel_de_write_fw(dev_priv, SPPOS(pipe, plane_id), + (crtc_y << 16) | crtc_x); + intel_de_write_fw(dev_priv, SPSIZE(pipe, plane_id), + (crtc_h << 16) | crtc_w); + intel_de_write_fw(dev_priv, SPCONSTALPHA(pipe, plane_id), 0); if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) chv_update_csc(plane_state); if (key->flags) { - I915_WRITE_FW(SPKEYMINVAL(pipe, plane_id), key->min_value); - I915_WRITE_FW(SPKEYMSK(pipe, plane_id), key->channel_mask); - I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value); + intel_de_write_fw(dev_priv, SPKEYMINVAL(pipe, plane_id), + key->min_value); + intel_de_write_fw(dev_priv, SPKEYMSK(pipe, plane_id), + key->channel_mask); + intel_de_write_fw(dev_priv, SPKEYMAXVAL(pipe, plane_id), + key->max_value); } - I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset); - I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x); + intel_de_write_fw(dev_priv, SPLINOFF(pipe, plane_id), linear_offset); + intel_de_write_fw(dev_priv, SPTILEOFF(pipe, plane_id), (y << 16) | x); /* * The control register self-arms if the plane was previously * disabled. Try to make the plane enable atomic by writing * the control register just before the surface register. */ - I915_WRITE_FW(SPCNTR(pipe, plane_id), sprctl); - I915_WRITE_FW(SPSURF(pipe, plane_id), - intel_plane_ggtt_offset(plane_state) + sprsurf_offset); + intel_de_write_fw(dev_priv, SPCNTR(pipe, plane_id), sprctl); + intel_de_write_fw(dev_priv, SPSURF(pipe, plane_id), + intel_plane_ggtt_offset(plane_state) + sprsurf_offset); vlv_update_clrc(plane_state); vlv_update_gamma(plane_state); @@ -1099,8 +1136,8 @@ vlv_disable_plane(struct intel_plane *plane, spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - I915_WRITE_FW(SPCNTR(pipe, plane_id), 0); - I915_WRITE_FW(SPSURF(pipe, plane_id), 0); + intel_de_write_fw(dev_priv, SPCNTR(pipe, plane_id), 0); + intel_de_write_fw(dev_priv, SPSURF(pipe, plane_id), 0); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } @@ -1120,7 +1157,7 @@ vlv_plane_get_hw_state(struct intel_plane *plane, if (!wakeref) return false; - ret = I915_READ(SPCNTR(plane->pipe, plane_id)) & SP_ENABLE; + ret = intel_de_read(dev_priv, SPCNTR(plane->pipe, plane_id)) & SP_ENABLE; *pipe = plane->pipe; @@ -1424,19 +1461,17 @@ static void ivb_update_gamma(const struct intel_plane_state *plane_state) /* FIXME these register are single buffered :( */ for (i = 0; i < 16; i++) - I915_WRITE_FW(SPRGAMC(pipe, i), - gamma[i] << 20 | - gamma[i] << 10 | - gamma[i]); - - I915_WRITE_FW(SPRGAMC16(pipe, 0), gamma[i]); - I915_WRITE_FW(SPRGAMC16(pipe, 1), gamma[i]); - I915_WRITE_FW(SPRGAMC16(pipe, 2), gamma[i]); + intel_de_write_fw(dev_priv, SPRGAMC(pipe, i), + gamma[i] << 20 | gamma[i] << 10 | gamma[i]); + + intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 0), gamma[i]); + intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 1), gamma[i]); + intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 2), gamma[i]); i++; - I915_WRITE_FW(SPRGAMC17(pipe, 0), gamma[i]); - I915_WRITE_FW(SPRGAMC17(pipe, 1), gamma[i]); - I915_WRITE_FW(SPRGAMC17(pipe, 2), gamma[i]); + intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 0), gamma[i]); + intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 1), gamma[i]); + intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 2), gamma[i]); i++; } @@ -1476,25 +1511,27 @@ ivb_update_plane(struct intel_plane *plane, spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - I915_WRITE_FW(SPRSTRIDE(pipe), plane_state->color_plane[0].stride); - I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x); - I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w); + intel_de_write_fw(dev_priv, SPRSTRIDE(pipe), + plane_state->color_plane[0].stride); + intel_de_write_fw(dev_priv, SPRPOS(pipe), (crtc_y << 16) | crtc_x); + intel_de_write_fw(dev_priv, SPRSIZE(pipe), (crtc_h << 16) | crtc_w); if (IS_IVYBRIDGE(dev_priv)) - I915_WRITE_FW(SPRSCALE(pipe), sprscale); + intel_de_write_fw(dev_priv, SPRSCALE(pipe), sprscale); if (key->flags) { - I915_WRITE_FW(SPRKEYVAL(pipe), key->min_value); - I915_WRITE_FW(SPRKEYMSK(pipe), key->channel_mask); - I915_WRITE_FW(SPRKEYMAX(pipe), key->max_value); + intel_de_write_fw(dev_priv, SPRKEYVAL(pipe), key->min_value); + intel_de_write_fw(dev_priv, SPRKEYMSK(pipe), + key->channel_mask); + intel_de_write_fw(dev_priv, SPRKEYMAX(pipe), key->max_value); } /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET * register */ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - I915_WRITE_FW(SPROFFSET(pipe), (y << 16) | x); + intel_de_write_fw(dev_priv, SPROFFSET(pipe), (y << 16) | x); } else { - I915_WRITE_FW(SPRLINOFF(pipe), linear_offset); - I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x); + intel_de_write_fw(dev_priv, SPRLINOFF(pipe), linear_offset); + intel_de_write_fw(dev_priv, SPRTILEOFF(pipe), (y << 16) | x); } /* @@ -1502,9 +1539,9 @@ ivb_update_plane(struct intel_plane *plane, * disabled. Try to make the plane enable atomic by writing * the control register just before the surface register. */ - I915_WRITE_FW(SPRCTL(pipe), sprctl); - I915_WRITE_FW(SPRSURF(pipe), - intel_plane_ggtt_offset(plane_state) + sprsurf_offset); + intel_de_write_fw(dev_priv, SPRCTL(pipe), sprctl); + intel_de_write_fw(dev_priv, SPRSURF(pipe), + intel_plane_ggtt_offset(plane_state) + sprsurf_offset); ivb_update_gamma(plane_state); @@ -1521,11 +1558,11 @@ ivb_disable_plane(struct intel_plane *plane, spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - I915_WRITE_FW(SPRCTL(pipe), 0); + intel_de_write_fw(dev_priv, SPRCTL(pipe), 0); /* Disable the scaler */ if (IS_IVYBRIDGE(dev_priv)) - I915_WRITE_FW(SPRSCALE(pipe), 0); - I915_WRITE_FW(SPRSURF(pipe), 0); + intel_de_write_fw(dev_priv, SPRSCALE(pipe), 0); + intel_de_write_fw(dev_priv, SPRSURF(pipe), 0); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } @@ -1544,7 +1581,7 @@ ivb_plane_get_hw_state(struct intel_plane *plane, if (!wakeref) return false; - ret = I915_READ(SPRCTL(plane->pipe)) & SPRITE_ENABLE; + ret = intel_de_read(dev_priv, SPRCTL(plane->pipe)) & SPRITE_ENABLE; *pipe = plane->pipe; @@ -1710,10 +1747,8 @@ static void g4x_update_gamma(const struct intel_plane_state *plane_state) /* FIXME these register are single buffered :( */ /* The two end points are implicit (0.0 and 1.0) */ for (i = 1; i < 8 - 1; i++) - I915_WRITE_FW(DVSGAMC_G4X(pipe, i - 1), - gamma[i] << 16 | - gamma[i] << 8 | - gamma[i]); + intel_de_write_fw(dev_priv, DVSGAMC_G4X(pipe, i - 1), + gamma[i] << 16 | gamma[i] << 8 | gamma[i]); } static void ilk_sprite_linear_gamma(u16 gamma[17]) @@ -1741,14 +1776,12 @@ static void ilk_update_gamma(const struct intel_plane_state *plane_state) /* FIXME these register are single buffered :( */ for (i = 0; i < 16; i++) - I915_WRITE_FW(DVSGAMC_ILK(pipe, i), - gamma[i] << 20 | - gamma[i] << 10 | - gamma[i]); - - I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 0), gamma[i]); - I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 1), gamma[i]); - I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 2), gamma[i]); + intel_de_write_fw(dev_priv, DVSGAMC_ILK(pipe, i), + gamma[i] << 20 | gamma[i] << 10 | gamma[i]); + + intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 0), gamma[i]); + intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 1), gamma[i]); + intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 2), gamma[i]); i++; } @@ -1788,28 +1821,30 @@ g4x_update_plane(struct intel_plane *plane, spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride); - I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x); - I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); - I915_WRITE_FW(DVSSCALE(pipe), dvsscale); + intel_de_write_fw(dev_priv, DVSSTRIDE(pipe), + plane_state->color_plane[0].stride); + intel_de_write_fw(dev_priv, DVSPOS(pipe), (crtc_y << 16) | crtc_x); + intel_de_write_fw(dev_priv, DVSSIZE(pipe), (crtc_h << 16) | crtc_w); + intel_de_write_fw(dev_priv, DVSSCALE(pipe), dvsscale); if (key->flags) { - I915_WRITE_FW(DVSKEYVAL(pipe), key->min_value); - I915_WRITE_FW(DVSKEYMSK(pipe), key->channel_mask); - I915_WRITE_FW(DVSKEYMAX(pipe), key->max_value); + intel_de_write_fw(dev_priv, DVSKEYVAL(pipe), key->min_value); + intel_de_write_fw(dev_priv, DVSKEYMSK(pipe), + key->channel_mask); + intel_de_write_fw(dev_priv, DVSKEYMAX(pipe), key->max_value); } - I915_WRITE_FW(DVSLINOFF(pipe), linear_offset); - I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x); + intel_de_write_fw(dev_priv, DVSLINOFF(pipe), linear_offset); + intel_de_write_fw(dev_priv, DVSTILEOFF(pipe), (y << 16) | x); /* * The control register self-arms if the plane was previously * disabled. Try to make the plane enable atomic by writing * the control register just before the surface register. */ - I915_WRITE_FW(DVSCNTR(pipe), dvscntr); - I915_WRITE_FW(DVSSURF(pipe), - intel_plane_ggtt_offset(plane_state) + dvssurf_offset); + intel_de_write_fw(dev_priv, DVSCNTR(pipe), dvscntr); + intel_de_write_fw(dev_priv, DVSSURF(pipe), + intel_plane_ggtt_offset(plane_state) + dvssurf_offset); if (IS_G4X(dev_priv)) g4x_update_gamma(plane_state); @@ -1829,10 +1864,10 @@ g4x_disable_plane(struct intel_plane *plane, spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - I915_WRITE_FW(DVSCNTR(pipe), 0); + intel_de_write_fw(dev_priv, DVSCNTR(pipe), 0); /* Disable the scaler */ - I915_WRITE_FW(DVSSCALE(pipe), 0); - I915_WRITE_FW(DVSSURF(pipe), 0); + intel_de_write_fw(dev_priv, DVSSCALE(pipe), 0); + intel_de_write_fw(dev_priv, DVSSURF(pipe), 0); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } @@ -1851,7 +1886,7 @@ g4x_plane_get_hw_state(struct intel_plane *plane, if (!wakeref) return false; - ret = I915_READ(DVSCNTR(plane->pipe)) & DVS_ENABLE; + ret = intel_de_read(dev_priv, DVSCNTR(plane->pipe)) & DVS_ENABLE; *pipe = plane->pipe; @@ -1999,7 +2034,8 @@ int chv_plane_check_rotation(const struct intel_plane_state *plane_state) if (IS_CHERRYVIEW(dev_priv) && rotation & DRM_MODE_ROTATE_180 && rotation & DRM_MODE_REFLECT_X) { - DRM_DEBUG_KMS("Cannot rotate and reflect at the same time\n"); + drm_dbg_kms(&dev_priv->drm, + "Cannot rotate and reflect at the same time\n"); return -EINVAL; } @@ -2040,6 +2076,18 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state, return 0; } +static bool intel_format_is_p01x(u32 format) +{ + switch (format) { + case DRM_FORMAT_P010: + case DRM_FORMAT_P012: + case DRM_FORMAT_P016: + return true; + default: + return false; + } +} + static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -2054,21 +2102,24 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, if (rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180) && is_ccs_modifier(fb->modifier)) { - DRM_DEBUG_KMS("RC support only with 0/180 degree rotation (%x)\n", - rotation); + drm_dbg_kms(&dev_priv->drm, + "RC support only with 0/180 degree rotation (%x)\n", + rotation); return -EINVAL; } if (rotation & DRM_MODE_REFLECT_X && fb->modifier == DRM_FORMAT_MOD_LINEAR) { - DRM_DEBUG_KMS("horizontal flip is not supported with linear surface formats\n"); + drm_dbg_kms(&dev_priv->drm, + "horizontal flip is not supported with linear surface formats\n"); return -EINVAL; } if (drm_rotation_90_or_270(rotation)) { if (fb->modifier != I915_FORMAT_MOD_Y_TILED && fb->modifier != I915_FORMAT_MOD_Yf_TILED) { - DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n"); + drm_dbg_kms(&dev_priv->drm, + "Y/Yf tiling required for 90/270!\n"); return -EINVAL; } @@ -2091,9 +2142,10 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, case DRM_FORMAT_Y216: case DRM_FORMAT_XVYU12_16161616: case DRM_FORMAT_XVYU16161616: - DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n", - drm_get_format_name(fb->format->format, - &format_name)); + drm_dbg_kms(&dev_priv->drm, + "Unsupported pixel format %s for 90/270!\n", + drm_get_format_name(fb->format->format, + &format_name)); return -EINVAL; default: break; @@ -2109,7 +2161,17 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS || fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)) { - DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n"); + drm_dbg_kms(&dev_priv->drm, + "Y/Yf tiling not supported in IF-ID mode\n"); + return -EINVAL; + } + + /* Wa_1606054188:tgl */ + if (IS_TIGERLAKE(dev_priv) && + plane_state->ckey.flags & I915_SET_COLORKEY_SOURCE && + intel_format_is_p01x(fb->format->format)) { + drm_dbg_kms(&dev_priv->drm, + "Source color keying not supported with P01x formats\n"); return -EINVAL; } @@ -2136,10 +2198,11 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s */ if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && (crtc_x + crtc_w < 4 || crtc_x > pipe_src_w - 4)) { - DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n", - crtc_x + crtc_w < 4 ? "end" : "start", - crtc_x + crtc_w < 4 ? crtc_x + crtc_w : crtc_x, - 4, pipe_src_w - 4); + drm_dbg_kms(&dev_priv->drm, + "requested plane X %s position %d invalid (valid range %d-%d)\n", + crtc_x + crtc_w < 4 ? "end" : "start", + crtc_x + crtc_w < 4 ? crtc_x + crtc_w : crtc_x, + 4, pipe_src_w - 4); return -ERANGE; } @@ -2754,19 +2817,25 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, } } -static bool gen12_plane_supports_mc_ccs(enum plane_id plane_id) +static bool gen12_plane_supports_mc_ccs(struct drm_i915_private *dev_priv, + enum plane_id plane_id) { + /* Wa_14010477008:tgl[a0..c0] */ + if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_C0)) + return false; + return plane_id < PLANE_SPRITE4; } static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { + struct drm_i915_private *dev_priv = to_i915(_plane->dev); struct intel_plane *plane = to_intel_plane(_plane); switch (modifier) { case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: - if (!gen12_plane_supports_mc_ccs(plane->id)) + if (!gen12_plane_supports_mc_ccs(dev_priv, plane->id)) return false; /* fall through */ case DRM_FORMAT_MOD_LINEAR: @@ -2935,9 +3004,10 @@ static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv, } } -static const u64 *gen12_get_plane_modifiers(enum plane_id plane_id) +static const u64 *gen12_get_plane_modifiers(struct drm_i915_private *dev_priv, + enum plane_id plane_id) { - if (gen12_plane_supports_mc_ccs(plane_id)) + if (gen12_plane_supports_mc_ccs(dev_priv, plane_id)) return gen12_plane_format_modifiers_mc_ccs; else return gen12_plane_format_modifiers_rc_ccs; @@ -2968,7 +3038,6 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, struct intel_plane *plane; enum drm_plane_type plane_type; unsigned int supported_rotations; - unsigned int possible_crtcs; const u64 *modifiers; const u32 *formats; int num_formats; @@ -3008,7 +3077,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id); if (INTEL_GEN(dev_priv) >= 12) { - modifiers = gen12_get_plane_modifiers(plane_id); + modifiers = gen12_get_plane_modifiers(dev_priv, plane_id); plane_funcs = &gen12_plane_funcs; } else { if (plane->has_ccs) @@ -3023,10 +3092,8 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, else plane_type = DRM_PLANE_TYPE_OVERLAY; - possible_crtcs = BIT(pipe); - ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, - possible_crtcs, plane_funcs, + 0, plane_funcs, formats, num_formats, modifiers, plane_type, "plane %d%c", plane_id + 1, @@ -3077,7 +3144,6 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, { struct intel_plane *plane; const struct drm_plane_funcs *plane_funcs; - unsigned long possible_crtcs; unsigned int supported_rotations; const u64 *modifiers; const u32 *formats; @@ -3162,10 +3228,8 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, plane->id = PLANE_SPRITE0 + sprite; plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); - possible_crtcs = BIT(pipe); - ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, - possible_crtcs, plane_funcs, + 0, plane_funcs, formats, num_formats, modifiers, DRM_PLANE_TYPE_OVERLAY, "sprite %c", sprite_name(pipe, sprite)); diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 7773169b7331..9b850c11aa78 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -61,7 +61,7 @@ u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port) lane_mask = intel_uncore_read(uncore, PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia)); - WARN_ON(lane_mask == 0xffffffff); + drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff); lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx); return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx); @@ -76,7 +76,7 @@ u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port) pin_mask = intel_uncore_read(uncore, PORT_TX_DFLEXPA1(dig_port->tc_phy_fia)); - WARN_ON(pin_mask == 0xffffffff); + drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff); return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >> DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx); @@ -120,7 +120,8 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, struct intel_uncore *uncore = &i915->uncore; u32 val; - WARN_ON(lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY); + drm_WARN_ON(&i915->drm, + lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY); val = intel_uncore_read(uncore, PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia)); @@ -181,8 +182,9 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port) PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia)); if (val == 0xffffffff) { - DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, nothing connected\n", - dig_port->tc_port_name); + drm_dbg_kms(&i915->drm, + "Port %s: PHY in TCCOLD, nothing connected\n", + dig_port->tc_port_name); return mask; } @@ -195,7 +197,7 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port) mask |= BIT(TC_PORT_LEGACY); /* The sink can be connected only in a single mode. */ - if (!WARN_ON(hweight32(mask) > 1)) + if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1)) tc_port_fixup_legacy_flag(dig_port, mask); return mask; @@ -210,8 +212,9 @@ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port) val = intel_uncore_read(uncore, PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia)); if (val == 0xffffffff) { - DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assuming not complete\n", - dig_port->tc_port_name); + drm_dbg_kms(&i915->drm, + "Port %s: PHY in TCCOLD, assuming not complete\n", + dig_port->tc_port_name); return false; } @@ -228,8 +231,9 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port, val = intel_uncore_read(uncore, PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia)); if (val == 0xffffffff) { - DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, can't set safe-mode to %s\n", - dig_port->tc_port_name, + drm_dbg_kms(&i915->drm, + "Port %s: PHY in TCCOLD, can't set safe-mode to %s\n", + dig_port->tc_port_name, enableddisabled(enable)); return false; @@ -243,8 +247,9 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port, PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val); if (enable && wait_for(!icl_tc_phy_status_complete(dig_port), 10)) - DRM_DEBUG_KMS("Port %s: PHY complete clear timed out\n", - dig_port->tc_port_name); + drm_dbg_kms(&i915->drm, + "Port %s: PHY complete clear timed out\n", + dig_port->tc_port_name); return true; } @@ -258,8 +263,9 @@ static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port) val = intel_uncore_read(uncore, PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia)); if (val == 0xffffffff) { - DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assume safe mode\n", - dig_port->tc_port_name); + drm_dbg_kms(&i915->drm, + "Port %s: PHY in TCCOLD, assume safe mode\n", + dig_port->tc_port_name); return true; } @@ -409,16 +415,17 @@ static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port, enum tc_port_mode old_tc_mode = dig_port->tc_mode; intel_display_power_flush_work(i915); - WARN_ON(intel_display_power_is_enabled(i915, - intel_aux_power_domain(dig_port))); + drm_WARN_ON(&i915->drm, + intel_display_power_is_enabled(i915, + intel_aux_power_domain(dig_port))); icl_tc_phy_disconnect(dig_port); icl_tc_phy_connect(dig_port, required_lanes); - DRM_DEBUG_KMS("Port %s: TC port mode reset (%s -> %s)\n", - dig_port->tc_port_name, - tc_port_mode_name(old_tc_mode), - tc_port_mode_name(dig_port->tc_mode)); + drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n", + dig_port->tc_port_name, + tc_port_mode_name(old_tc_mode), + tc_port_mode_name(dig_port->tc_mode)); } static void @@ -503,7 +510,7 @@ static void __intel_tc_port_lock(struct intel_digital_port *dig_port, intel_tc_port_needs_reset(dig_port)) intel_tc_port_reset_mode(dig_port, required_lanes); - WARN_ON(dig_port->tc_lock_wakeref); + drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref); dig_port->tc_lock_wakeref = wakeref; } @@ -550,7 +557,7 @@ void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) enum port port = dig_port->base.port; enum tc_port tc_port = intel_port_to_tc(i915, port); - if (WARN_ON(tc_port == PORT_TC_NONE)) + if (drm_WARN_ON(&i915->drm, tc_port == PORT_TC_NONE)) return; snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name), diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index c75e0ceecee6..d2e3a3a323e9 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -33,7 +33,6 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> -#include <drm/i915_drm.h> #include "i915_drv.h" #include "intel_connector.h" @@ -907,7 +906,7 @@ static bool intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 tmp = I915_READ(TV_CTL); + u32 tmp = intel_de_read(dev_priv, TV_CTL); *pipe = (tmp & TV_ENC_PIPE_SEL_MASK) >> TV_ENC_PIPE_SEL_SHIFT; @@ -926,7 +925,8 @@ intel_enable_tv(struct intel_encoder *encoder, intel_wait_for_vblank(dev_priv, to_intel_crtc(pipe_config->uapi.crtc)->pipe); - I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE); + intel_de_write(dev_priv, TV_CTL, + intel_de_read(dev_priv, TV_CTL) | TV_ENC_ENABLE); } static void @@ -937,7 +937,8 @@ intel_disable_tv(struct intel_encoder *encoder, struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE); + intel_de_write(dev_priv, TV_CTL, + intel_de_read(dev_priv, TV_CTL) & ~TV_ENC_ENABLE); } static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state) @@ -1095,11 +1096,11 @@ intel_tv_get_config(struct intel_encoder *encoder, pipe_config->output_types |= BIT(INTEL_OUTPUT_TVOUT); - tv_ctl = I915_READ(TV_CTL); - hctl1 = I915_READ(TV_H_CTL_1); - hctl3 = I915_READ(TV_H_CTL_3); - vctl1 = I915_READ(TV_V_CTL_1); - vctl2 = I915_READ(TV_V_CTL_2); + tv_ctl = intel_de_read(dev_priv, TV_CTL); + hctl1 = intel_de_read(dev_priv, TV_H_CTL_1); + hctl3 = intel_de_read(dev_priv, TV_H_CTL_3); + vctl1 = intel_de_read(dev_priv, TV_V_CTL_1); + vctl2 = intel_de_read(dev_priv, TV_V_CTL_2); tv_mode.htotal = (hctl1 & TV_HTOTAL_MASK) >> TV_HTOTAL_SHIFT; tv_mode.hsync_end = (hctl1 & TV_HSYNC_END_MASK) >> TV_HSYNC_END_SHIFT; @@ -1134,17 +1135,17 @@ intel_tv_get_config(struct intel_encoder *encoder, break; } - tmp = I915_READ(TV_WIN_POS); + tmp = intel_de_read(dev_priv, TV_WIN_POS); xpos = tmp >> 16; ypos = tmp & 0xffff; - tmp = I915_READ(TV_WIN_SIZE); + tmp = intel_de_read(dev_priv, TV_WIN_SIZE); xsize = tmp >> 16; ysize = tmp & 0xffff; intel_tv_mode_to_mode(&mode, &tv_mode); - DRM_DEBUG_KMS("TV mode:\n"); + drm_dbg_kms(&dev_priv->drm, "TV mode:\n"); drm_mode_debug_printmodeline(&mode); intel_tv_scale_mode_horiz(&mode, hdisplay, @@ -1200,7 +1201,7 @@ intel_tv_compute_config(struct intel_encoder *encoder, pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; - DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); + drm_dbg_kms(&dev_priv->drm, "forcing bpc to 8 for TV\n"); pipe_config->pipe_bpp = 8*3; pipe_config->port_clock = tv_mode->clock; @@ -1215,7 +1216,8 @@ intel_tv_compute_config(struct intel_encoder *encoder, extra = adjusted_mode->crtc_vdisplay - vdisplay; if (extra < 0) { - DRM_DEBUG_KMS("No vertical scaling for >1024 pixel wide modes\n"); + drm_dbg_kms(&dev_priv->drm, + "No vertical scaling for >1024 pixel wide modes\n"); return -EINVAL; } @@ -1248,7 +1250,7 @@ intel_tv_compute_config(struct intel_encoder *encoder, tv_conn_state->bypass_vfilter = false; } - DRM_DEBUG_KMS("TV mode:\n"); + drm_dbg_kms(&dev_priv->drm, "TV mode:\n"); drm_mode_debug_printmodeline(adjusted_mode); /* @@ -1380,16 +1382,16 @@ set_tv_mode_timings(struct drm_i915_private *dev_priv, vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) | (tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT); - I915_WRITE(TV_H_CTL_1, hctl1); - I915_WRITE(TV_H_CTL_2, hctl2); - I915_WRITE(TV_H_CTL_3, hctl3); - I915_WRITE(TV_V_CTL_1, vctl1); - I915_WRITE(TV_V_CTL_2, vctl2); - I915_WRITE(TV_V_CTL_3, vctl3); - I915_WRITE(TV_V_CTL_4, vctl4); - I915_WRITE(TV_V_CTL_5, vctl5); - I915_WRITE(TV_V_CTL_6, vctl6); - I915_WRITE(TV_V_CTL_7, vctl7); + intel_de_write(dev_priv, TV_H_CTL_1, hctl1); + intel_de_write(dev_priv, TV_H_CTL_2, hctl2); + intel_de_write(dev_priv, TV_H_CTL_3, hctl3); + intel_de_write(dev_priv, TV_V_CTL_1, vctl1); + intel_de_write(dev_priv, TV_V_CTL_2, vctl2); + intel_de_write(dev_priv, TV_V_CTL_3, vctl3); + intel_de_write(dev_priv, TV_V_CTL_4, vctl4); + intel_de_write(dev_priv, TV_V_CTL_5, vctl5); + intel_de_write(dev_priv, TV_V_CTL_6, vctl6); + intel_de_write(dev_priv, TV_V_CTL_7, vctl7); } static void set_color_conversion(struct drm_i915_private *dev_priv, @@ -1398,18 +1400,18 @@ static void set_color_conversion(struct drm_i915_private *dev_priv, if (!color_conversion) return; - I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) | - color_conversion->gy); - I915_WRITE(TV_CSC_Y2, (color_conversion->by << 16) | - color_conversion->ay); - I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) | - color_conversion->gu); - I915_WRITE(TV_CSC_U2, (color_conversion->bu << 16) | - color_conversion->au); - I915_WRITE(TV_CSC_V, (color_conversion->rv << 16) | - color_conversion->gv); - I915_WRITE(TV_CSC_V2, (color_conversion->bv << 16) | - color_conversion->av); + intel_de_write(dev_priv, TV_CSC_Y, + (color_conversion->ry << 16) | color_conversion->gy); + intel_de_write(dev_priv, TV_CSC_Y2, + (color_conversion->by << 16) | color_conversion->ay); + intel_de_write(dev_priv, TV_CSC_U, + (color_conversion->ru << 16) | color_conversion->gu); + intel_de_write(dev_priv, TV_CSC_U2, + (color_conversion->bu << 16) | color_conversion->au); + intel_de_write(dev_priv, TV_CSC_V, + (color_conversion->rv << 16) | color_conversion->gv); + intel_de_write(dev_priv, TV_CSC_V2, + (color_conversion->bv << 16) | color_conversion->av); } static void intel_tv_pre_enable(struct intel_encoder *encoder, @@ -1434,7 +1436,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder, if (!tv_mode) return; /* can't happen (mode_prepare prevents this) */ - tv_ctl = I915_READ(TV_CTL); + tv_ctl = intel_de_read(dev_priv, TV_CTL); tv_ctl &= TV_CTL_SAVE; switch (intel_tv->type) { @@ -1511,21 +1513,20 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder, set_tv_mode_timings(dev_priv, tv_mode, burst_ena); - I915_WRITE(TV_SC_CTL_1, scctl1); - I915_WRITE(TV_SC_CTL_2, scctl2); - I915_WRITE(TV_SC_CTL_3, scctl3); + intel_de_write(dev_priv, TV_SC_CTL_1, scctl1); + intel_de_write(dev_priv, TV_SC_CTL_2, scctl2); + intel_de_write(dev_priv, TV_SC_CTL_3, scctl3); set_color_conversion(dev_priv, color_conversion); if (INTEL_GEN(dev_priv) >= 4) - I915_WRITE(TV_CLR_KNOBS, 0x00404000); + intel_de_write(dev_priv, TV_CLR_KNOBS, 0x00404000); else - I915_WRITE(TV_CLR_KNOBS, 0x00606000); + intel_de_write(dev_priv, TV_CLR_KNOBS, 0x00606000); if (video_levels) - I915_WRITE(TV_CLR_LEVEL, - ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | - (video_levels->blank << TV_BLANK_LEVEL_SHIFT))); + intel_de_write(dev_priv, TV_CLR_LEVEL, + ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | (video_levels->blank << TV_BLANK_LEVEL_SHIFT))); assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder); @@ -1533,7 +1534,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder, tv_filter_ctl = TV_AUTO_SCALE; if (tv_conn_state->bypass_vfilter) tv_filter_ctl |= TV_V_FILTER_BYPASS; - I915_WRITE(TV_FILTER_CTL_1, tv_filter_ctl); + intel_de_write(dev_priv, TV_FILTER_CTL_1, tv_filter_ctl); xsize = tv_mode->hblank_start - tv_mode->hblank_end; ysize = intel_tv_mode_vdisplay(tv_mode); @@ -1544,20 +1545,25 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder, conn_state->tv.margins.right); ysize -= (tv_conn_state->margins.top + tv_conn_state->margins.bottom); - I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos); - I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize); + intel_de_write(dev_priv, TV_WIN_POS, (xpos << 16) | ypos); + intel_de_write(dev_priv, TV_WIN_SIZE, (xsize << 16) | ysize); j = 0; for (i = 0; i < 60; i++) - I915_WRITE(TV_H_LUMA(i), tv_mode->filter_table[j++]); + intel_de_write(dev_priv, TV_H_LUMA(i), + tv_mode->filter_table[j++]); for (i = 0; i < 60; i++) - I915_WRITE(TV_H_CHROMA(i), tv_mode->filter_table[j++]); + intel_de_write(dev_priv, TV_H_CHROMA(i), + tv_mode->filter_table[j++]); for (i = 0; i < 43; i++) - I915_WRITE(TV_V_LUMA(i), tv_mode->filter_table[j++]); + intel_de_write(dev_priv, TV_V_LUMA(i), + tv_mode->filter_table[j++]); for (i = 0; i < 43; i++) - I915_WRITE(TV_V_CHROMA(i), tv_mode->filter_table[j++]); - I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE); - I915_WRITE(TV_CTL, tv_ctl); + intel_de_write(dev_priv, TV_V_CHROMA(i), + tv_mode->filter_table[j++]); + intel_de_write(dev_priv, TV_DAC, + intel_de_read(dev_priv, TV_DAC) & TV_DAC_SAVE); + intel_de_write(dev_priv, TV_CTL, tv_ctl); } static int @@ -1581,8 +1587,8 @@ intel_tv_detect_type(struct intel_tv *intel_tv, spin_unlock_irq(&dev_priv->irq_lock); } - save_tv_dac = tv_dac = I915_READ(TV_DAC); - save_tv_ctl = tv_ctl = I915_READ(TV_CTL); + save_tv_dac = tv_dac = intel_de_read(dev_priv, TV_DAC); + save_tv_ctl = tv_ctl = intel_de_read(dev_priv, TV_CTL); /* Poll for TV detection */ tv_ctl &= ~(TV_ENC_ENABLE | TV_ENC_PIPE_SEL_MASK | TV_TEST_MODE_MASK); @@ -1608,15 +1614,15 @@ intel_tv_detect_type(struct intel_tv *intel_tv, tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL | TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL); - I915_WRITE(TV_CTL, tv_ctl); - I915_WRITE(TV_DAC, tv_dac); - POSTING_READ(TV_DAC); + intel_de_write(dev_priv, TV_CTL, tv_ctl); + intel_de_write(dev_priv, TV_DAC, tv_dac); + intel_de_posting_read(dev_priv, TV_DAC); intel_wait_for_vblank(dev_priv, intel_crtc->pipe); type = -1; - tv_dac = I915_READ(TV_DAC); - DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac); + tv_dac = intel_de_read(dev_priv, TV_DAC); + drm_dbg_kms(&dev_priv->drm, "TV detected: %x, %x\n", tv_ctl, tv_dac); /* * A B C * 0 1 1 Composite @@ -1624,22 +1630,25 @@ intel_tv_detect_type(struct intel_tv *intel_tv, * 0 0 0 Component */ if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) { - DRM_DEBUG_KMS("Detected Composite TV connection\n"); + drm_dbg_kms(&dev_priv->drm, + "Detected Composite TV connection\n"); type = DRM_MODE_CONNECTOR_Composite; } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) { - DRM_DEBUG_KMS("Detected S-Video TV connection\n"); + drm_dbg_kms(&dev_priv->drm, + "Detected S-Video TV connection\n"); type = DRM_MODE_CONNECTOR_SVIDEO; } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) { - DRM_DEBUG_KMS("Detected Component TV connection\n"); + drm_dbg_kms(&dev_priv->drm, + "Detected Component TV connection\n"); type = DRM_MODE_CONNECTOR_Component; } else { - DRM_DEBUG_KMS("Unrecognised TV connection\n"); + drm_dbg_kms(&dev_priv->drm, "Unrecognised TV connection\n"); type = -1; } - I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); - I915_WRITE(TV_CTL, save_tv_ctl); - POSTING_READ(TV_CTL); + intel_de_write(dev_priv, TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); + intel_de_write(dev_priv, TV_CTL, save_tv_ctl); + intel_de_posting_read(dev_priv, TV_CTL); /* For unknown reasons the hw barfs if we don't do this vblank wait. */ intel_wait_for_vblank(dev_priv, intel_crtc->pipe); @@ -1794,7 +1803,7 @@ intel_tv_get_modes(struct drm_connector *connector) */ intel_tv_mode_to_mode(mode, tv_mode); if (count == 0) { - DRM_DEBUG_KMS("TV mode:\n"); + drm_dbg_kms(&dev_priv->drm, "TV mode:\n"); drm_mode_debug_printmodeline(mode); } intel_tv_scale_mode_horiz(mode, input->w, 0, 0); @@ -1870,11 +1879,11 @@ intel_tv_init(struct drm_i915_private *dev_priv) int i, initial_mode = 0; struct drm_connector_state *state; - if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED) + if ((intel_de_read(dev_priv, TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED) return; if (!intel_bios_is_tv_present(dev_priv)) { - DRM_DEBUG_KMS("Integrated TV is not present.\n"); + drm_dbg_kms(&dev_priv->drm, "Integrated TV is not present.\n"); return; } @@ -1882,15 +1891,15 @@ intel_tv_init(struct drm_i915_private *dev_priv) * Sanity check the TV output by checking to see if the * DAC register holds a value */ - save_tv_dac = I915_READ(TV_DAC); + save_tv_dac = intel_de_read(dev_priv, TV_DAC); - I915_WRITE(TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN); - tv_dac_on = I915_READ(TV_DAC); + intel_de_write(dev_priv, TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN); + tv_dac_on = intel_de_read(dev_priv, TV_DAC); - I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); - tv_dac_off = I915_READ(TV_DAC); + intel_de_write(dev_priv, TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); + tv_dac_off = intel_de_read(dev_priv, TV_DAC); - I915_WRITE(TV_DAC, save_tv_dac); + intel_de_write(dev_priv, TV_DAC, save_tv_dac); /* * If the register does not hold the state change enable diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index 4d0c23b29248..05c7cbe32eb4 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h @@ -111,7 +111,7 @@ enum bdb_block_id { BDB_LVDS_LFP_DATA_PTRS = 41, BDB_LVDS_LFP_DATA = 42, BDB_LVDS_BACKLIGHT = 43, - BDB_LVDS_POWER = 44, + BDB_LFP_POWER = 44, BDB_MIPI_CONFIG = 52, BDB_MIPI_SEQUENCE = 53, BDB_COMPRESSION_PARAMETERS = 56, diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index 9e6aaa302e40..95ad87d4ccb3 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -6,8 +6,6 @@ * Manasi Navare <manasi.d.navare@intel.com> */ -#include <drm/i915_drm.h> - #include "i915_drv.h" #include "intel_display_types.h" #include "intel_dsi.h" @@ -374,7 +372,7 @@ static bool is_pipe_dsc(const struct intel_crtc_state *crtc_state) return false; /* There's no pipe A DSC engine on ICL */ - WARN_ON(crtc->pipe == PIPE_A); + drm_WARN_ON(&i915->drm, crtc->pipe == PIPE_A); return true; } @@ -518,119 +516,149 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder, pps_val |= DSC_422_ENABLE; if (vdsc_cfg->vbr_enable) pps_val |= DSC_VBR_ENABLE; - DRM_INFO("PPS0 = 0x%08x\n", pps_val); + drm_info(&dev_priv->drm, "PPS0 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc_state)) { - I915_WRITE(DSCA_PICTURE_PARAMETER_SET_0, pps_val); + intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_0, + pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ if (crtc_state->dsc.dsc_split) - I915_WRITE(DSCC_PICTURE_PARAMETER_SET_0, pps_val); + intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_0, + pps_val); } else { - I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe), pps_val); + intel_de_write(dev_priv, + ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe), + pps_val); if (crtc_state->dsc.dsc_split) - I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe), - pps_val); + intel_de_write(dev_priv, + ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe), + pps_val); } /* Populate PICTURE_PARAMETER_SET_1 registers */ pps_val = 0; pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel); - DRM_INFO("PPS1 = 0x%08x\n", pps_val); + drm_info(&dev_priv->drm, "PPS1 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc_state)) { - I915_WRITE(DSCA_PICTURE_PARAMETER_SET_1, pps_val); + intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_1, + pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ if (crtc_state->dsc.dsc_split) - I915_WRITE(DSCC_PICTURE_PARAMETER_SET_1, pps_val); + intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_1, + pps_val); } else { - I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe), pps_val); + intel_de_write(dev_priv, + ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe), + pps_val); if (crtc_state->dsc.dsc_split) - I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe), - pps_val); + intel_de_write(dev_priv, + ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe), + pps_val); } /* Populate PICTURE_PARAMETER_SET_2 registers */ pps_val = 0; pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) | DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances); - DRM_INFO("PPS2 = 0x%08x\n", pps_val); + drm_info(&dev_priv->drm, "PPS2 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc_state)) { - I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val); + intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_2, + pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ if (crtc_state->dsc.dsc_split) - I915_WRITE(DSCC_PICTURE_PARAMETER_SET_2, pps_val); + intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_2, + pps_val); } else { - I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe), pps_val); + intel_de_write(dev_priv, + ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe), + pps_val); if (crtc_state->dsc.dsc_split) - I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe), - pps_val); + intel_de_write(dev_priv, + ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe), + pps_val); } /* Populate PICTURE_PARAMETER_SET_3 registers */ pps_val = 0; pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) | DSC_SLICE_WIDTH(vdsc_cfg->slice_width); - DRM_INFO("PPS3 = 0x%08x\n", pps_val); + drm_info(&dev_priv->drm, "PPS3 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc_state)) { - I915_WRITE(DSCA_PICTURE_PARAMETER_SET_3, pps_val); + intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_3, + pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ if (crtc_state->dsc.dsc_split) - I915_WRITE(DSCC_PICTURE_PARAMETER_SET_3, pps_val); + intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_3, + pps_val); } else { - I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe), pps_val); + intel_de_write(dev_priv, + ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe), + pps_val); if (crtc_state->dsc.dsc_split) - I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe), - pps_val); + intel_de_write(dev_priv, + ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe), + pps_val); } /* Populate PICTURE_PARAMETER_SET_4 registers */ pps_val = 0; pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) | DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay); - DRM_INFO("PPS4 = 0x%08x\n", pps_val); + drm_info(&dev_priv->drm, "PPS4 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc_state)) { - I915_WRITE(DSCA_PICTURE_PARAMETER_SET_4, pps_val); + intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_4, + pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ if (crtc_state->dsc.dsc_split) - I915_WRITE(DSCC_PICTURE_PARAMETER_SET_4, pps_val); + intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_4, + pps_val); } else { - I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe), pps_val); + intel_de_write(dev_priv, + ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe), + pps_val); if (crtc_state->dsc.dsc_split) - I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe), - pps_val); + intel_de_write(dev_priv, + ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe), + pps_val); } /* Populate PICTURE_PARAMETER_SET_5 registers */ pps_val = 0; pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) | DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval); - DRM_INFO("PPS5 = 0x%08x\n", pps_val); + drm_info(&dev_priv->drm, "PPS5 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc_state)) { - I915_WRITE(DSCA_PICTURE_PARAMETER_SET_5, pps_val); + intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_5, + pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ if (crtc_state->dsc.dsc_split) - I915_WRITE(DSCC_PICTURE_PARAMETER_SET_5, pps_val); + intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_5, + pps_val); } else { - I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe), pps_val); + intel_de_write(dev_priv, + ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe), + pps_val); if (crtc_state->dsc.dsc_split) - I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe), - pps_val); + intel_de_write(dev_priv, + ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe), + pps_val); } /* Populate PICTURE_PARAMETER_SET_6 registers */ @@ -639,80 +667,100 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder, DSC_FIRST_LINE_BPG_OFFSET(vdsc_cfg->first_line_bpg_offset) | DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) | DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp); - DRM_INFO("PPS6 = 0x%08x\n", pps_val); + drm_info(&dev_priv->drm, "PPS6 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc_state)) { - I915_WRITE(DSCA_PICTURE_PARAMETER_SET_6, pps_val); + intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_6, + pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ if (crtc_state->dsc.dsc_split) - I915_WRITE(DSCC_PICTURE_PARAMETER_SET_6, pps_val); + intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_6, + pps_val); } else { - I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe), pps_val); + intel_de_write(dev_priv, + ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe), + pps_val); if (crtc_state->dsc.dsc_split) - I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe), - pps_val); + intel_de_write(dev_priv, + ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe), + pps_val); } /* Populate PICTURE_PARAMETER_SET_7 registers */ pps_val = 0; pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) | DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset); - DRM_INFO("PPS7 = 0x%08x\n", pps_val); + drm_info(&dev_priv->drm, "PPS7 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc_state)) { - I915_WRITE(DSCA_PICTURE_PARAMETER_SET_7, pps_val); + intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_7, + pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ if (crtc_state->dsc.dsc_split) - I915_WRITE(DSCC_PICTURE_PARAMETER_SET_7, pps_val); + intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_7, + pps_val); } else { - I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe), pps_val); + intel_de_write(dev_priv, + ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe), + pps_val); if (crtc_state->dsc.dsc_split) - I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe), - pps_val); + intel_de_write(dev_priv, + ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe), + pps_val); } /* Populate PICTURE_PARAMETER_SET_8 registers */ pps_val = 0; pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) | DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset); - DRM_INFO("PPS8 = 0x%08x\n", pps_val); + drm_info(&dev_priv->drm, "PPS8 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc_state)) { - I915_WRITE(DSCA_PICTURE_PARAMETER_SET_8, pps_val); + intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_8, + pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ if (crtc_state->dsc.dsc_split) - I915_WRITE(DSCC_PICTURE_PARAMETER_SET_8, pps_val); + intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_8, + pps_val); } else { - I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe), pps_val); + intel_de_write(dev_priv, + ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe), + pps_val); if (crtc_state->dsc.dsc_split) - I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe), - pps_val); + intel_de_write(dev_priv, + ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe), + pps_val); } /* Populate PICTURE_PARAMETER_SET_9 registers */ pps_val = 0; pps_val |= DSC_RC_MODEL_SIZE(DSC_RC_MODEL_SIZE_CONST) | DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST); - DRM_INFO("PPS9 = 0x%08x\n", pps_val); + drm_info(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc_state)) { - I915_WRITE(DSCA_PICTURE_PARAMETER_SET_9, pps_val); + intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_9, + pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ if (crtc_state->dsc.dsc_split) - I915_WRITE(DSCC_PICTURE_PARAMETER_SET_9, pps_val); + intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_9, + pps_val); } else { - I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe), pps_val); + intel_de_write(dev_priv, + ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe), + pps_val); if (crtc_state->dsc.dsc_split) - I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe), - pps_val); + intel_de_write(dev_priv, + ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe), + pps_val); } /* Populate PICTURE_PARAMETER_SET_10 registers */ @@ -721,20 +769,25 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder, DSC_RC_QUANT_INC_LIMIT1(vdsc_cfg->rc_quant_incr_limit1) | DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) | DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST); - DRM_INFO("PPS10 = 0x%08x\n", pps_val); + drm_info(&dev_priv->drm, "PPS10 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc_state)) { - I915_WRITE(DSCA_PICTURE_PARAMETER_SET_10, pps_val); + intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_10, + pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ if (crtc_state->dsc.dsc_split) - I915_WRITE(DSCC_PICTURE_PARAMETER_SET_10, pps_val); + intel_de_write(dev_priv, + DSCC_PICTURE_PARAMETER_SET_10, pps_val); } else { - I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe), pps_val); + intel_de_write(dev_priv, + ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe), + pps_val); if (crtc_state->dsc.dsc_split) - I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe), - pps_val); + intel_de_write(dev_priv, + ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe), + pps_val); } /* Populate Picture parameter set 16 */ @@ -744,20 +797,25 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder, vdsc_cfg->slice_width) | DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height / vdsc_cfg->slice_height); - DRM_INFO("PPS16 = 0x%08x\n", pps_val); + drm_info(&dev_priv->drm, "PPS16 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc_state)) { - I915_WRITE(DSCA_PICTURE_PARAMETER_SET_16, pps_val); + intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_16, + pps_val); /* * If 2 VDSC instances are needed, configure PPS for second * VDSC */ if (crtc_state->dsc.dsc_split) - I915_WRITE(DSCC_PICTURE_PARAMETER_SET_16, pps_val); + intel_de_write(dev_priv, + DSCC_PICTURE_PARAMETER_SET_16, pps_val); } else { - I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe), pps_val); + intel_de_write(dev_priv, + ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe), + pps_val); if (crtc_state->dsc.dsc_split) - I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe), - pps_val); + intel_de_write(dev_priv, + ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe), + pps_val); } /* Populate the RC_BUF_THRESH registers */ @@ -766,42 +824,50 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder, rc_buf_thresh_dword[i / 4] |= (u32)(vdsc_cfg->rc_buf_thresh[i] << BITS_PER_BYTE * (i % 4)); - DRM_INFO(" RC_BUF_THRESH%d = 0x%08x\n", i, + drm_info(&dev_priv->drm, " RC_BUF_THRESH%d = 0x%08x\n", i, rc_buf_thresh_dword[i / 4]); } if (!is_pipe_dsc(crtc_state)) { - I915_WRITE(DSCA_RC_BUF_THRESH_0, rc_buf_thresh_dword[0]); - I915_WRITE(DSCA_RC_BUF_THRESH_0_UDW, rc_buf_thresh_dword[1]); - I915_WRITE(DSCA_RC_BUF_THRESH_1, rc_buf_thresh_dword[2]); - I915_WRITE(DSCA_RC_BUF_THRESH_1_UDW, rc_buf_thresh_dword[3]); + intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0, + rc_buf_thresh_dword[0]); + intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0_UDW, + rc_buf_thresh_dword[1]); + intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_1, + rc_buf_thresh_dword[2]); + intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_1_UDW, + rc_buf_thresh_dword[3]); if (crtc_state->dsc.dsc_split) { - I915_WRITE(DSCC_RC_BUF_THRESH_0, - rc_buf_thresh_dword[0]); - I915_WRITE(DSCC_RC_BUF_THRESH_0_UDW, - rc_buf_thresh_dword[1]); - I915_WRITE(DSCC_RC_BUF_THRESH_1, - rc_buf_thresh_dword[2]); - I915_WRITE(DSCC_RC_BUF_THRESH_1_UDW, - rc_buf_thresh_dword[3]); + intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_0, + rc_buf_thresh_dword[0]); + intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_0_UDW, + rc_buf_thresh_dword[1]); + intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_1, + rc_buf_thresh_dword[2]); + intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_1_UDW, + rc_buf_thresh_dword[3]); } } else { - I915_WRITE(ICL_DSC0_RC_BUF_THRESH_0(pipe), - rc_buf_thresh_dword[0]); - I915_WRITE(ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe), - rc_buf_thresh_dword[1]); - I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1(pipe), - rc_buf_thresh_dword[2]); - I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe), - rc_buf_thresh_dword[3]); + intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_0(pipe), + rc_buf_thresh_dword[0]); + intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe), + rc_buf_thresh_dword[1]); + intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_1(pipe), + rc_buf_thresh_dword[2]); + intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe), + rc_buf_thresh_dword[3]); if (crtc_state->dsc.dsc_split) { - I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0(pipe), - rc_buf_thresh_dword[0]); - I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe), - rc_buf_thresh_dword[1]); - I915_WRITE(ICL_DSC1_RC_BUF_THRESH_1(pipe), - rc_buf_thresh_dword[2]); - I915_WRITE(ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe), - rc_buf_thresh_dword[3]); + intel_de_write(dev_priv, + ICL_DSC1_RC_BUF_THRESH_0(pipe), + rc_buf_thresh_dword[0]); + intel_de_write(dev_priv, + ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe), + rc_buf_thresh_dword[1]); + intel_de_write(dev_priv, + ICL_DSC1_RC_BUF_THRESH_1(pipe), + rc_buf_thresh_dword[2]); + intel_de_write(dev_priv, + ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe), + rc_buf_thresh_dword[3]); } } @@ -815,78 +881,94 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder, RC_MAX_QP_SHIFT) | (vdsc_cfg->rc_range_params[i].range_min_qp << RC_MIN_QP_SHIFT)) << 16 * (i % 2)); - DRM_INFO(" RC_RANGE_PARAM_%d = 0x%08x\n", i, + drm_info(&dev_priv->drm, " RC_RANGE_PARAM_%d = 0x%08x\n", i, rc_range_params_dword[i / 2]); } if (!is_pipe_dsc(crtc_state)) { - I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0, - rc_range_params_dword[0]); - I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0_UDW, - rc_range_params_dword[1]); - I915_WRITE(DSCA_RC_RANGE_PARAMETERS_1, - rc_range_params_dword[2]); - I915_WRITE(DSCA_RC_RANGE_PARAMETERS_1_UDW, - rc_range_params_dword[3]); - I915_WRITE(DSCA_RC_RANGE_PARAMETERS_2, - rc_range_params_dword[4]); - I915_WRITE(DSCA_RC_RANGE_PARAMETERS_2_UDW, - rc_range_params_dword[5]); - I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3, - rc_range_params_dword[6]); - I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3_UDW, - rc_range_params_dword[7]); + intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0, + rc_range_params_dword[0]); + intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0_UDW, + rc_range_params_dword[1]); + intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_1, + rc_range_params_dword[2]); + intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_1_UDW, + rc_range_params_dword[3]); + intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_2, + rc_range_params_dword[4]); + intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_2_UDW, + rc_range_params_dword[5]); + intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_3, + rc_range_params_dword[6]); + intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_3_UDW, + rc_range_params_dword[7]); if (crtc_state->dsc.dsc_split) { - I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0, - rc_range_params_dword[0]); - I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0_UDW, - rc_range_params_dword[1]); - I915_WRITE(DSCC_RC_RANGE_PARAMETERS_1, - rc_range_params_dword[2]); - I915_WRITE(DSCC_RC_RANGE_PARAMETERS_1_UDW, - rc_range_params_dword[3]); - I915_WRITE(DSCC_RC_RANGE_PARAMETERS_2, - rc_range_params_dword[4]); - I915_WRITE(DSCC_RC_RANGE_PARAMETERS_2_UDW, - rc_range_params_dword[5]); - I915_WRITE(DSCC_RC_RANGE_PARAMETERS_3, - rc_range_params_dword[6]); - I915_WRITE(DSCC_RC_RANGE_PARAMETERS_3_UDW, - rc_range_params_dword[7]); + intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_0, + rc_range_params_dword[0]); + intel_de_write(dev_priv, + DSCC_RC_RANGE_PARAMETERS_0_UDW, + rc_range_params_dword[1]); + intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_1, + rc_range_params_dword[2]); + intel_de_write(dev_priv, + DSCC_RC_RANGE_PARAMETERS_1_UDW, + rc_range_params_dword[3]); + intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_2, + rc_range_params_dword[4]); + intel_de_write(dev_priv, + DSCC_RC_RANGE_PARAMETERS_2_UDW, + rc_range_params_dword[5]); + intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_3, + rc_range_params_dword[6]); + intel_de_write(dev_priv, + DSCC_RC_RANGE_PARAMETERS_3_UDW, + rc_range_params_dword[7]); } } else { - I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe), - rc_range_params_dword[0]); - I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe), - rc_range_params_dword[1]); - I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe), - rc_range_params_dword[2]); - I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe), - rc_range_params_dword[3]); - I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe), - rc_range_params_dword[4]); - I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe), - rc_range_params_dword[5]); - I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe), - rc_range_params_dword[6]); - I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe), - rc_range_params_dword[7]); + intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe), + rc_range_params_dword[0]); + intel_de_write(dev_priv, + ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe), + rc_range_params_dword[1]); + intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe), + rc_range_params_dword[2]); + intel_de_write(dev_priv, + ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe), + rc_range_params_dword[3]); + intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe), + rc_range_params_dword[4]); + intel_de_write(dev_priv, + ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe), + rc_range_params_dword[5]); + intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe), + rc_range_params_dword[6]); + intel_de_write(dev_priv, + ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe), + rc_range_params_dword[7]); if (crtc_state->dsc.dsc_split) { - I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe), - rc_range_params_dword[0]); - I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe), - rc_range_params_dword[1]); - I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe), - rc_range_params_dword[2]); - I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe), - rc_range_params_dword[3]); - I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe), - rc_range_params_dword[4]); - I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe), - rc_range_params_dword[5]); - I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe), - rc_range_params_dword[6]); - I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe), - rc_range_params_dword[7]); + intel_de_write(dev_priv, + ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe), + rc_range_params_dword[0]); + intel_de_write(dev_priv, + ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe), + rc_range_params_dword[1]); + intel_de_write(dev_priv, + ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe), + rc_range_params_dword[2]); + intel_de_write(dev_priv, + ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe), + rc_range_params_dword[3]); + intel_de_write(dev_priv, + ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe), + rc_range_params_dword[4]); + intel_de_write(dev_priv, + ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe), + rc_range_params_dword[5]); + intel_de_write(dev_priv, + ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe), + rc_range_params_dword[6]); + intel_de_write(dev_priv, + ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe), + rc_range_params_dword[7]); } } } @@ -912,11 +994,11 @@ void intel_dsc_get_config(struct intel_encoder *encoder, return; if (!is_pipe_dsc(crtc_state)) { - dss_ctl1 = I915_READ(DSS_CTL1); - dss_ctl2 = I915_READ(DSS_CTL2); + dss_ctl1 = intel_de_read(dev_priv, DSS_CTL1); + dss_ctl2 = intel_de_read(dev_priv, DSS_CTL2); } else { - dss_ctl1 = I915_READ(ICL_PIPE_DSS_CTL1(pipe)); - dss_ctl2 = I915_READ(ICL_PIPE_DSS_CTL2(pipe)); + dss_ctl1 = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); + dss_ctl2 = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL2(pipe)); } crtc_state->dsc.compression_enable = dss_ctl2 & LEFT_BRANCH_VDSC_ENABLE; @@ -930,9 +1012,10 @@ void intel_dsc_get_config(struct intel_encoder *encoder, /* PPS1 */ if (!is_pipe_dsc(crtc_state)) - val = I915_READ(DSCA_PICTURE_PARAMETER_SET_1); + val = intel_de_read(dev_priv, DSCA_PICTURE_PARAMETER_SET_1); else - val = I915_READ(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe)); + val = intel_de_read(dev_priv, + ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe)); vdsc_cfg->bits_per_pixel = val; crtc_state->dsc.compressed_bpp = vdsc_cfg->bits_per_pixel >> 4; out: @@ -1013,8 +1096,8 @@ void intel_dsc_enable(struct intel_encoder *encoder, dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE; dss_ctl1_val |= JOINER_ENABLE; } - I915_WRITE(dss_ctl1_reg, dss_ctl1_val); - I915_WRITE(dss_ctl2_reg, dss_ctl2_val); + intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1_val); + intel_de_write(dev_priv, dss_ctl2_reg, dss_ctl2_val); } void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state) @@ -1035,17 +1118,17 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state) dss_ctl1_reg = ICL_PIPE_DSS_CTL1(pipe); dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe); } - dss_ctl1_val = I915_READ(dss_ctl1_reg); + dss_ctl1_val = intel_de_read(dev_priv, dss_ctl1_reg); if (dss_ctl1_val & JOINER_ENABLE) dss_ctl1_val &= ~JOINER_ENABLE; - I915_WRITE(dss_ctl1_reg, dss_ctl1_val); + intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1_val); - dss_ctl2_val = I915_READ(dss_ctl2_reg); + dss_ctl2_val = intel_de_read(dev_priv, dss_ctl2_reg); if (dss_ctl2_val & LEFT_BRANCH_VDSC_ENABLE || dss_ctl2_val & RIGHT_BRANCH_VDSC_ENABLE) dss_ctl2_val &= ~(LEFT_BRANCH_VDSC_ENABLE | RIGHT_BRANCH_VDSC_ENABLE); - I915_WRITE(dss_ctl2_reg, dss_ctl2_val); + intel_de_write(dev_priv, dss_ctl2_reg, dss_ctl2_val); /* Disable Power wells for VDSC/joining */ intel_display_power_put_unchecked(dev_priv, diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c index 2ff7293986d4..be333699c515 100644 --- a/drivers/gpu/drm/i915/display/intel_vga.c +++ b/drivers/gpu/drm/i915/display/intel_vga.c @@ -9,6 +9,7 @@ #include <drm/i915_drm.h> #include "i915_drv.h" +#include "intel_de.h" #include "intel_vga.h" static i915_reg_t intel_vga_cntrl_reg(struct drm_i915_private *i915) @@ -36,16 +37,17 @@ void intel_vga_disable(struct drm_i915_private *dev_priv) vga_put(pdev, VGA_RSRC_LEGACY_IO); udelay(300); - I915_WRITE(vga_reg, VGA_DISP_DISABLE); - POSTING_READ(vga_reg); + intel_de_write(dev_priv, vga_reg, VGA_DISP_DISABLE); + intel_de_posting_read(dev_priv, vga_reg); } void intel_vga_redisable_power_on(struct drm_i915_private *dev_priv) { i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv); - if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { - DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); + if (!(intel_de_read(dev_priv, vga_reg) & VGA_DISP_DISABLE)) { + drm_dbg_kms(&dev_priv->drm, + "Something enabled VGA plane, disabling it\n"); intel_vga_disable(dev_priv); } } @@ -98,7 +100,7 @@ intel_vga_set_state(struct drm_i915_private *i915, bool enable_decode) u16 gmch_ctrl; if (pci_read_config_word(i915->bridge_dev, reg, &gmch_ctrl)) { - DRM_ERROR("failed to read control word\n"); + drm_err(&i915->drm, "failed to read control word\n"); return -EIO; } @@ -111,7 +113,7 @@ intel_vga_set_state(struct drm_i915_private *i915, bool enable_decode) gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; if (pci_write_config_word(i915->bridge_dev, reg, gmch_ctrl)) { - DRM_ERROR("failed to write control word\n"); + drm_err(&i915->drm, "failed to write control word\n"); return -EIO; } diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index daf4fc3dab6f..f4c362dc6e15 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -85,7 +85,7 @@ void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port) if (intel_de_wait_for_set(dev_priv, MIPI_GEN_FIFO_STAT(port), mask, 100)) - DRM_ERROR("DPI FIFOs are not empty\n"); + drm_err(&dev_priv->drm, "DPI FIFOs are not empty\n"); } static void write_data(struct drm_i915_private *dev_priv, @@ -100,7 +100,7 @@ static void write_data(struct drm_i915_private *dev_priv, for (j = 0; j < min_t(u32, len - i, 4); j++) val |= *data++ << 8 * j; - I915_WRITE(reg, val); + intel_de_write(dev_priv, reg, val); } } @@ -111,7 +111,7 @@ static void read_data(struct drm_i915_private *dev_priv, u32 i, j; for (i = 0; i < len; i += 4) { - u32 val = I915_READ(reg); + u32 val = intel_de_read(dev_priv, reg); for (j = 0; j < min_t(u32, len - i, 4); j++) *data++ = val >> 8 * j; @@ -154,29 +154,34 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host, if (packet.payload_length) { if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port), data_mask, 50)) - DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n"); + drm_err(&dev_priv->drm, + "Timeout waiting for HS/LP DATA FIFO !full\n"); write_data(dev_priv, data_reg, packet.payload, packet.payload_length); } if (msg->rx_len) { - I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL); + intel_de_write(dev_priv, MIPI_INTR_STAT(port), + GEN_READ_DATA_AVAIL); } if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port), ctrl_mask, 50)) { - DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n"); + drm_err(&dev_priv->drm, + "Timeout waiting for HS/LP CTRL FIFO !full\n"); } - I915_WRITE(ctrl_reg, header[2] << 16 | header[1] << 8 | header[0]); + intel_de_write(dev_priv, ctrl_reg, + header[2] << 16 | header[1] << 8 | header[0]); /* ->rx_len is set only for reads */ if (msg->rx_len) { data_mask = GEN_READ_DATA_AVAIL; if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port), data_mask, 50)) - DRM_ERROR("Timeout waiting for read data.\n"); + drm_err(&dev_priv->drm, + "Timeout waiting for read data.\n"); read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len); } @@ -223,17 +228,19 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs, cmd |= DPI_LP_MODE; /* clear bit */ - I915_WRITE(MIPI_INTR_STAT(port), SPL_PKT_SENT_INTERRUPT); + intel_de_write(dev_priv, MIPI_INTR_STAT(port), SPL_PKT_SENT_INTERRUPT); /* XXX: old code skips write if control unchanged */ - if (cmd == I915_READ(MIPI_DPI_CONTROL(port))) - DRM_DEBUG_KMS("Same special packet %02x twice in a row.\n", cmd); + if (cmd == intel_de_read(dev_priv, MIPI_DPI_CONTROL(port))) + drm_dbg_kms(&dev_priv->drm, + "Same special packet %02x twice in a row.\n", cmd); - I915_WRITE(MIPI_DPI_CONTROL(port), cmd); + intel_de_write(dev_priv, MIPI_DPI_CONTROL(port), cmd); mask = SPL_PKT_SENT_INTERRUPT; if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port), mask, 100)) - DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd); + drm_err(&dev_priv->drm, + "Video mode command 0x%08x send failed.\n", cmd); return 0; } @@ -265,7 +272,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder, struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; int ret; - DRM_DEBUG_KMS("\n"); + drm_dbg_kms(&dev_priv->drm, "\n"); pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; if (fixed_mode) { @@ -328,36 +335,37 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder) * Power ON MIPI IO first and then write into IO reset and LP wake bits */ for_each_dsi_port(port, intel_dsi->ports) { - tmp = I915_READ(MIPI_CTRL(port)); - I915_WRITE(MIPI_CTRL(port), tmp | GLK_MIPIIO_ENABLE); + tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); + intel_de_write(dev_priv, MIPI_CTRL(port), + tmp | GLK_MIPIIO_ENABLE); } /* Put the IO into reset */ - tmp = I915_READ(MIPI_CTRL(PORT_A)); + tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A)); tmp &= ~GLK_MIPIIO_RESET_RELEASED; - I915_WRITE(MIPI_CTRL(PORT_A), tmp); + intel_de_write(dev_priv, MIPI_CTRL(PORT_A), tmp); /* Program LP Wake */ for_each_dsi_port(port, intel_dsi->ports) { - tmp = I915_READ(MIPI_CTRL(port)); - if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY)) + tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); + if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY)) tmp &= ~GLK_LP_WAKE; else tmp |= GLK_LP_WAKE; - I915_WRITE(MIPI_CTRL(port), tmp); + intel_de_write(dev_priv, MIPI_CTRL(port), tmp); } /* Wait for Pwr ACK */ for_each_dsi_port(port, intel_dsi->ports) { if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port), GLK_MIPIIO_PORT_POWERED, 20)) - DRM_ERROR("MIPIO port is powergated\n"); + drm_err(&dev_priv->drm, "MIPIO port is powergated\n"); } /* Check for cold boot scenario */ for_each_dsi_port(port, intel_dsi->ports) { cold_boot |= - !(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY); + !(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY); } return cold_boot; @@ -374,48 +382,49 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder) for_each_dsi_port(port, intel_dsi->ports) { if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port), GLK_PHY_STATUS_PORT_READY, 20)) - DRM_ERROR("PHY is not ON\n"); + drm_err(&dev_priv->drm, "PHY is not ON\n"); } /* Get IO out of reset */ - val = I915_READ(MIPI_CTRL(PORT_A)); - I915_WRITE(MIPI_CTRL(PORT_A), val | GLK_MIPIIO_RESET_RELEASED); + val = intel_de_read(dev_priv, MIPI_CTRL(PORT_A)); + intel_de_write(dev_priv, MIPI_CTRL(PORT_A), + val | GLK_MIPIIO_RESET_RELEASED); /* Get IO out of Low power state*/ for_each_dsi_port(port, intel_dsi->ports) { - if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY)) { - val = I915_READ(MIPI_DEVICE_READY(port)); + if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY)) { + val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port)); val &= ~ULPS_STATE_MASK; val |= DEVICE_READY; - I915_WRITE(MIPI_DEVICE_READY(port), val); + intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val); usleep_range(10, 15); } else { /* Enter ULPS */ - val = I915_READ(MIPI_DEVICE_READY(port)); + val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port)); val &= ~ULPS_STATE_MASK; val |= (ULPS_STATE_ENTER | DEVICE_READY); - I915_WRITE(MIPI_DEVICE_READY(port), val); + intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val); /* Wait for ULPS active */ if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port), GLK_ULPS_NOT_ACTIVE, 20)) - DRM_ERROR("ULPS not active\n"); + drm_err(&dev_priv->drm, "ULPS not active\n"); /* Exit ULPS */ - val = I915_READ(MIPI_DEVICE_READY(port)); + val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port)); val &= ~ULPS_STATE_MASK; val |= (ULPS_STATE_EXIT | DEVICE_READY); - I915_WRITE(MIPI_DEVICE_READY(port), val); + intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val); /* Enter Normal Mode */ - val = I915_READ(MIPI_DEVICE_READY(port)); + val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port)); val &= ~ULPS_STATE_MASK; val |= (ULPS_STATE_NORMAL_OPERATION | DEVICE_READY); - I915_WRITE(MIPI_DEVICE_READY(port), val); + intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val); - val = I915_READ(MIPI_CTRL(port)); + val = intel_de_read(dev_priv, MIPI_CTRL(port)); val &= ~GLK_LP_WAKE; - I915_WRITE(MIPI_CTRL(port), val); + intel_de_write(dev_priv, MIPI_CTRL(port), val); } } @@ -423,14 +432,16 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder) for_each_dsi_port(port, intel_dsi->ports) { if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port), GLK_DATA_LANE_STOP_STATE, 20)) - DRM_ERROR("Date lane not in STOP state\n"); + drm_err(&dev_priv->drm, + "Date lane not in STOP state\n"); } /* Wait for AFE LATCH */ for_each_dsi_port(port, intel_dsi->ports) { if (intel_de_wait_for_set(dev_priv, BXT_MIPI_PORT_CTRL(port), AFE_LATCHOUT, 20)) - DRM_ERROR("D-PHY not entering LP-11 state\n"); + drm_err(&dev_priv->drm, + "D-PHY not entering LP-11 state\n"); } } @@ -441,23 +452,24 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder) enum port port; u32 val; - DRM_DEBUG_KMS("\n"); + drm_dbg_kms(&dev_priv->drm, "\n"); /* Enable MIPI PHY transparent latch */ for_each_dsi_port(port, intel_dsi->ports) { - val = I915_READ(BXT_MIPI_PORT_CTRL(port)); - I915_WRITE(BXT_MIPI_PORT_CTRL(port), val | LP_OUTPUT_HOLD); + val = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port)); + intel_de_write(dev_priv, BXT_MIPI_PORT_CTRL(port), + val | LP_OUTPUT_HOLD); usleep_range(2000, 2500); } /* Clear ULPS and set device ready */ for_each_dsi_port(port, intel_dsi->ports) { - val = I915_READ(MIPI_DEVICE_READY(port)); + val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port)); val &= ~ULPS_STATE_MASK; - I915_WRITE(MIPI_DEVICE_READY(port), val); + intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val); usleep_range(2000, 2500); val |= DEVICE_READY; - I915_WRITE(MIPI_DEVICE_READY(port), val); + intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val); } } @@ -468,7 +480,7 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder) enum port port; u32 val; - DRM_DEBUG_KMS("\n"); + drm_dbg_kms(&dev_priv->drm, "\n"); vlv_flisdsi_get(dev_priv); /* program rcomp for compliance, reduce from 50 ohms to 45 ohms @@ -481,21 +493,25 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder) for_each_dsi_port(port, intel_dsi->ports) { - I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_ENTER); + intel_de_write(dev_priv, MIPI_DEVICE_READY(port), + ULPS_STATE_ENTER); usleep_range(2500, 3000); /* Enable MIPI PHY transparent latch * Common bit for both MIPI Port A & MIPI Port C * No similar bit in MIPI Port C reg */ - val = I915_READ(MIPI_PORT_CTRL(PORT_A)); - I915_WRITE(MIPI_PORT_CTRL(PORT_A), val | LP_OUTPUT_HOLD); + val = intel_de_read(dev_priv, MIPI_PORT_CTRL(PORT_A)); + intel_de_write(dev_priv, MIPI_PORT_CTRL(PORT_A), + val | LP_OUTPUT_HOLD); usleep_range(1000, 1500); - I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_EXIT); + intel_de_write(dev_priv, MIPI_DEVICE_READY(port), + ULPS_STATE_EXIT); usleep_range(2500, 3000); - I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY); + intel_de_write(dev_priv, MIPI_DEVICE_READY(port), + DEVICE_READY); usleep_range(2500, 3000); } } @@ -521,24 +537,25 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder) /* Enter ULPS */ for_each_dsi_port(port, intel_dsi->ports) { - val = I915_READ(MIPI_DEVICE_READY(port)); + val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port)); val &= ~ULPS_STATE_MASK; val |= (ULPS_STATE_ENTER | DEVICE_READY); - I915_WRITE(MIPI_DEVICE_READY(port), val); + intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val); } /* Wait for MIPI PHY status bit to unset */ for_each_dsi_port(port, intel_dsi->ports) { if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port), GLK_PHY_STATUS_PORT_READY, 20)) - DRM_ERROR("PHY is not turning OFF\n"); + drm_err(&dev_priv->drm, "PHY is not turning OFF\n"); } /* Wait for Pwr ACK bit to unset */ for_each_dsi_port(port, intel_dsi->ports) { if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port), GLK_MIPIIO_PORT_POWERED, 20)) - DRM_ERROR("MIPI IO Port is not powergated\n"); + drm_err(&dev_priv->drm, + "MIPI IO Port is not powergated\n"); } } @@ -550,22 +567,22 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder) u32 tmp; /* Put the IO into reset */ - tmp = I915_READ(MIPI_CTRL(PORT_A)); + tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A)); tmp &= ~GLK_MIPIIO_RESET_RELEASED; - I915_WRITE(MIPI_CTRL(PORT_A), tmp); + intel_de_write(dev_priv, MIPI_CTRL(PORT_A), tmp); /* Wait for MIPI PHY status bit to unset */ for_each_dsi_port(port, intel_dsi->ports) { if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port), GLK_PHY_STATUS_PORT_READY, 20)) - DRM_ERROR("PHY is not turning OFF\n"); + drm_err(&dev_priv->drm, "PHY is not turning OFF\n"); } /* Clear MIPI mode */ for_each_dsi_port(port, intel_dsi->ports) { - tmp = I915_READ(MIPI_CTRL(port)); + tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); tmp &= ~GLK_MIPIIO_ENABLE; - I915_WRITE(MIPI_CTRL(port), tmp); + intel_de_write(dev_priv, MIPI_CTRL(port), tmp); } } @@ -581,23 +598,23 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder) struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - DRM_DEBUG_KMS("\n"); + drm_dbg_kms(&dev_priv->drm, "\n"); for_each_dsi_port(port, intel_dsi->ports) { /* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */ i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ? BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A); u32 val; - I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY | - ULPS_STATE_ENTER); + intel_de_write(dev_priv, MIPI_DEVICE_READY(port), + DEVICE_READY | ULPS_STATE_ENTER); usleep_range(2000, 2500); - I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY | - ULPS_STATE_EXIT); + intel_de_write(dev_priv, MIPI_DEVICE_READY(port), + DEVICE_READY | ULPS_STATE_EXIT); usleep_range(2000, 2500); - I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY | - ULPS_STATE_ENTER); + intel_de_write(dev_priv, MIPI_DEVICE_READY(port), + DEVICE_READY | ULPS_STATE_ENTER); usleep_range(2000, 2500); /* @@ -607,14 +624,14 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder) if ((IS_GEN9_LP(dev_priv) || port == PORT_A) && intel_de_wait_for_clear(dev_priv, port_ctrl, AFE_LATCHOUT, 30)) - DRM_ERROR("DSI LP not going Low\n"); + drm_err(&dev_priv->drm, "DSI LP not going Low\n"); /* Disable MIPI PHY transparent latch */ - val = I915_READ(port_ctrl); - I915_WRITE(port_ctrl, val & ~LP_OUTPUT_HOLD); + val = intel_de_read(dev_priv, port_ctrl); + intel_de_write(dev_priv, port_ctrl, val & ~LP_OUTPUT_HOLD); usleep_range(1000, 1500); - I915_WRITE(MIPI_DEVICE_READY(port), 0x00); + intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x00); usleep_range(2000, 2500); } } @@ -631,18 +648,20 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder, u32 temp; if (IS_GEN9_LP(dev_priv)) { for_each_dsi_port(port, intel_dsi->ports) { - temp = I915_READ(MIPI_CTRL(port)); + temp = intel_de_read(dev_priv, + MIPI_CTRL(port)); temp &= ~BXT_PIXEL_OVERLAP_CNT_MASK | intel_dsi->pixel_overlap << BXT_PIXEL_OVERLAP_CNT_SHIFT; - I915_WRITE(MIPI_CTRL(port), temp); + intel_de_write(dev_priv, MIPI_CTRL(port), + temp); } } else { - temp = I915_READ(VLV_CHICKEN_3); + temp = intel_de_read(dev_priv, VLV_CHICKEN_3); temp &= ~PIXEL_OVERLAP_CNT_MASK | intel_dsi->pixel_overlap << PIXEL_OVERLAP_CNT_SHIFT; - I915_WRITE(VLV_CHICKEN_3, temp); + intel_de_write(dev_priv, VLV_CHICKEN_3, temp); } } @@ -651,7 +670,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder, BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); u32 temp; - temp = I915_READ(port_ctrl); + temp = intel_de_read(dev_priv, port_ctrl); temp &= ~LANE_CONFIGURATION_MASK; temp &= ~DUAL_LINK_MODE_MASK; @@ -671,8 +690,8 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder, temp |= DITHERING_ENABLE; /* assert ip_tg_enable signal */ - I915_WRITE(port_ctrl, temp | DPI_ENABLE); - POSTING_READ(port_ctrl); + intel_de_write(dev_priv, port_ctrl, temp | DPI_ENABLE); + intel_de_posting_read(dev_priv, port_ctrl); } } @@ -689,9 +708,9 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder) u32 temp; /* de-assert ip_tg_enable signal */ - temp = I915_READ(port_ctrl); - I915_WRITE(port_ctrl, temp & ~DPI_ENABLE); - POSTING_READ(port_ctrl); + temp = intel_de_read(dev_priv, port_ctrl); + intel_de_write(dev_priv, port_ctrl, temp & ~DPI_ENABLE); + intel_de_posting_read(dev_priv, port_ctrl); } } @@ -753,7 +772,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder, u32 val; bool glk_cold_boot = false; - DRM_DEBUG_KMS("\n"); + drm_dbg_kms(&dev_priv->drm, "\n"); intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); @@ -771,22 +790,22 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder, if (IS_BROXTON(dev_priv)) { /* Add MIPI IO reset programming for modeset */ - val = I915_READ(BXT_P_CR_GT_DISP_PWRON); - I915_WRITE(BXT_P_CR_GT_DISP_PWRON, - val | MIPIO_RST_CTRL); + val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON); + intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, + val | MIPIO_RST_CTRL); /* Power up DSI regulator */ - I915_WRITE(BXT_P_DSI_REGULATOR_CFG, STAP_SELECT); - I915_WRITE(BXT_P_DSI_REGULATOR_TX_CTRL, 0); + intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT); + intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_TX_CTRL, 0); } if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { u32 val; /* Disable DPOunit clock gating, can stall pipe */ - val = I915_READ(DSPCLK_GATE_D); + val = intel_de_read(dev_priv, DSPCLK_GATE_D); val |= DPOUNIT_CLOCK_GATE_DISABLE; - I915_WRITE(DSPCLK_GATE_D, val); + intel_de_write(dev_priv, DSPCLK_GATE_D, val); } if (!IS_GEMINILAKE(dev_priv)) @@ -820,7 +839,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder, * recommendation, port should be enabled befor plane & pipe */ if (is_cmd_mode(intel_dsi)) { for_each_dsi_port(port, intel_dsi->ports) - I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4); + intel_de_write(dev_priv, + MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_TEAR_ON); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON); } else { @@ -838,6 +858,15 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder, intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON); } +static void bxt_dsi_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + WARN_ON(crtc_state->has_pch_encoder); + + intel_crtc_vblank_on(crtc_state); +} + /* * DSI port disable has to be done after pipe and plane disable, so we do it in * the post_disable hook. @@ -886,7 +915,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder, enum port port; u32 val; - DRM_DEBUG_KMS("\n"); + drm_dbg_kms(&dev_priv->drm, "\n"); if (IS_GEN9_LP(dev_priv)) { intel_crtc_vblank_off(old_crtc_state); @@ -917,13 +946,14 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder, if (IS_BROXTON(dev_priv)) { /* Power down DSI regulator to save power */ - I915_WRITE(BXT_P_DSI_REGULATOR_CFG, STAP_SELECT); - I915_WRITE(BXT_P_DSI_REGULATOR_TX_CTRL, HS_IO_CTRL_SELECT); + intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT); + intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_TX_CTRL, + HS_IO_CTRL_SELECT); /* Add MIPI IO reset programming for modeset */ - val = I915_READ(BXT_P_CR_GT_DISP_PWRON); - I915_WRITE(BXT_P_CR_GT_DISP_PWRON, - val & ~MIPIO_RST_CTRL); + val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON); + intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, + val & ~MIPIO_RST_CTRL); } if (IS_GEN9_LP(dev_priv)) { @@ -933,9 +963,9 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder, vlv_dsi_pll_disable(encoder); - val = I915_READ(DSPCLK_GATE_D); + val = intel_de_read(dev_priv, DSPCLK_GATE_D); val &= ~DPOUNIT_CLOCK_GATE_DISABLE; - I915_WRITE(DSPCLK_GATE_D, val); + intel_de_write(dev_priv, DSPCLK_GATE_D, val); } /* Assert reset */ @@ -960,7 +990,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, enum port port; bool active = false; - DRM_DEBUG_KMS("\n"); + drm_dbg_kms(&dev_priv->drm, "\n"); wakeref = intel_display_power_get_if_enabled(dev_priv, encoder->power_domain); @@ -979,7 +1009,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, for_each_dsi_port(port, intel_dsi->ports) { i915_reg_t ctrl_reg = IS_GEN9_LP(dev_priv) ? BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); - bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE; + bool enabled = intel_de_read(dev_priv, ctrl_reg) & DPI_ENABLE; /* * Due to some hardware limitations on VLV/CHV, the DPI enable @@ -988,26 +1018,27 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, */ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && port == PORT_C) - enabled = I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE; + enabled = intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE; /* Try command mode if video mode not enabled */ if (!enabled) { - u32 tmp = I915_READ(MIPI_DSI_FUNC_PRG(port)); + u32 tmp = intel_de_read(dev_priv, + MIPI_DSI_FUNC_PRG(port)); enabled = tmp & CMD_MODE_DATA_WIDTH_MASK; } if (!enabled) continue; - if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY)) + if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY)) continue; if (IS_GEN9_LP(dev_priv)) { - u32 tmp = I915_READ(MIPI_CTRL(port)); + u32 tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); tmp &= BXT_PIPE_SELECT_MASK; tmp >>= BXT_PIPE_SELECT_SHIFT; - if (WARN_ON(tmp > PIPE_C)) + if (drm_WARN_ON(&dev_priv->drm, tmp > PIPE_C)) continue; *pipe = tmp; @@ -1051,11 +1082,11 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, * encoder->get_hw_state() returns true. */ for_each_dsi_port(port, intel_dsi->ports) { - if (I915_READ(BXT_MIPI_PORT_CTRL(port)) & DPI_ENABLE) + if (intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port)) & DPI_ENABLE) break; } - fmt = I915_READ(MIPI_DSI_FUNC_PRG(port)) & VID_MODE_FORMAT_MASK; + fmt = intel_de_read(dev_priv, MIPI_DSI_FUNC_PRG(port)) & VID_MODE_FORMAT_MASK; bpp = mipi_dsi_pixel_format_to_bpp( pixel_format_from_register_bits(fmt)); @@ -1067,21 +1098,24 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, /* In terms of pixels */ adjusted_mode->crtc_hdisplay = - I915_READ(BXT_MIPI_TRANS_HACTIVE(port)); + intel_de_read(dev_priv, + BXT_MIPI_TRANS_HACTIVE(port)); adjusted_mode->crtc_vdisplay = - I915_READ(BXT_MIPI_TRANS_VACTIVE(port)); + intel_de_read(dev_priv, + BXT_MIPI_TRANS_VACTIVE(port)); adjusted_mode->crtc_vtotal = - I915_READ(BXT_MIPI_TRANS_VTOTAL(port)); + intel_de_read(dev_priv, + BXT_MIPI_TRANS_VTOTAL(port)); hactive = adjusted_mode->crtc_hdisplay; - hfp = I915_READ(MIPI_HFP_COUNT(port)); + hfp = intel_de_read(dev_priv, MIPI_HFP_COUNT(port)); /* * Meaningful for video mode non-burst sync pulse mode only, * can be zero for non-burst sync events and burst modes */ - hsync = I915_READ(MIPI_HSYNC_PADDING_COUNT(port)); - hbp = I915_READ(MIPI_HBP_COUNT(port)); + hsync = intel_de_read(dev_priv, MIPI_HSYNC_PADDING_COUNT(port)); + hbp = intel_de_read(dev_priv, MIPI_HBP_COUNT(port)); /* harizontal values are in terms of high speed byte clock */ hfp = pixels_from_txbyteclkhs(hfp, bpp, lane_count, @@ -1098,9 +1132,9 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, } /* vertical values are in terms of lines */ - vfp = I915_READ(MIPI_VFP_COUNT(port)); - vsync = I915_READ(MIPI_VSYNC_PADDING_COUNT(port)); - vbp = I915_READ(MIPI_VBP_COUNT(port)); + vfp = intel_de_read(dev_priv, MIPI_VFP_COUNT(port)); + vsync = intel_de_read(dev_priv, MIPI_VSYNC_PADDING_COUNT(port)); + vbp = intel_de_read(dev_priv, MIPI_VBP_COUNT(port)); adjusted_mode->crtc_htotal = hactive + hfp + hsync + hbp; adjusted_mode->crtc_hsync_start = hfp + adjusted_mode->crtc_hdisplay; @@ -1191,7 +1225,7 @@ static void intel_dsi_get_config(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 pclk; - DRM_DEBUG_KMS("\n"); + drm_dbg_kms(&dev_priv->drm, "\n"); pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI); @@ -1268,26 +1302,29 @@ static void set_dsi_timings(struct drm_encoder *encoder, * vactive, as they are calculated per channel basis, * whereas these values should be based on resolution. */ - I915_WRITE(BXT_MIPI_TRANS_HACTIVE(port), - adjusted_mode->crtc_hdisplay); - I915_WRITE(BXT_MIPI_TRANS_VACTIVE(port), - adjusted_mode->crtc_vdisplay); - I915_WRITE(BXT_MIPI_TRANS_VTOTAL(port), - adjusted_mode->crtc_vtotal); + intel_de_write(dev_priv, BXT_MIPI_TRANS_HACTIVE(port), + adjusted_mode->crtc_hdisplay); + intel_de_write(dev_priv, BXT_MIPI_TRANS_VACTIVE(port), + adjusted_mode->crtc_vdisplay); + intel_de_write(dev_priv, BXT_MIPI_TRANS_VTOTAL(port), + adjusted_mode->crtc_vtotal); } - I915_WRITE(MIPI_HACTIVE_AREA_COUNT(port), hactive); - I915_WRITE(MIPI_HFP_COUNT(port), hfp); + intel_de_write(dev_priv, MIPI_HACTIVE_AREA_COUNT(port), + hactive); + intel_de_write(dev_priv, MIPI_HFP_COUNT(port), hfp); /* meaningful for video mode non-burst sync pulse mode only, * can be zero for non-burst sync events and burst modes */ - I915_WRITE(MIPI_HSYNC_PADDING_COUNT(port), hsync); - I915_WRITE(MIPI_HBP_COUNT(port), hbp); + intel_de_write(dev_priv, MIPI_HSYNC_PADDING_COUNT(port), + hsync); + intel_de_write(dev_priv, MIPI_HBP_COUNT(port), hbp); /* vertical values are in terms of lines */ - I915_WRITE(MIPI_VFP_COUNT(port), vfp); - I915_WRITE(MIPI_VSYNC_PADDING_COUNT(port), vsync); - I915_WRITE(MIPI_VBP_COUNT(port), vbp); + intel_de_write(dev_priv, MIPI_VFP_COUNT(port), vfp); + intel_de_write(dev_priv, MIPI_VSYNC_PADDING_COUNT(port), + vsync); + intel_de_write(dev_priv, MIPI_VBP_COUNT(port), vbp); } } @@ -1322,7 +1359,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, u32 val, tmp; u16 mode_hdisplay; - DRM_DEBUG_KMS("pipe %c\n", pipe_name(intel_crtc->pipe)); + drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(intel_crtc->pipe)); mode_hdisplay = adjusted_mode->crtc_hdisplay; @@ -1338,35 +1375,35 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, * escape clock divider, 20MHz, shared for A and C. * device ready must be off when doing this! txclkesc? */ - tmp = I915_READ(MIPI_CTRL(PORT_A)); + tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A)); tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK; - I915_WRITE(MIPI_CTRL(PORT_A), tmp | - ESCAPE_CLOCK_DIVIDER_1); + intel_de_write(dev_priv, MIPI_CTRL(PORT_A), + tmp | ESCAPE_CLOCK_DIVIDER_1); /* read request priority is per pipe */ - tmp = I915_READ(MIPI_CTRL(port)); + tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); tmp &= ~READ_REQUEST_PRIORITY_MASK; - I915_WRITE(MIPI_CTRL(port), tmp | - READ_REQUEST_PRIORITY_HIGH); + intel_de_write(dev_priv, MIPI_CTRL(port), + tmp | READ_REQUEST_PRIORITY_HIGH); } else if (IS_GEN9_LP(dev_priv)) { enum pipe pipe = intel_crtc->pipe; - tmp = I915_READ(MIPI_CTRL(port)); + tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); tmp &= ~BXT_PIPE_SELECT_MASK; tmp |= BXT_PIPE_SELECT(pipe); - I915_WRITE(MIPI_CTRL(port), tmp); + intel_de_write(dev_priv, MIPI_CTRL(port), tmp); } /* XXX: why here, why like this? handling in irq handler?! */ - I915_WRITE(MIPI_INTR_STAT(port), 0xffffffff); - I915_WRITE(MIPI_INTR_EN(port), 0xffffffff); + intel_de_write(dev_priv, MIPI_INTR_STAT(port), 0xffffffff); + intel_de_write(dev_priv, MIPI_INTR_EN(port), 0xffffffff); - I915_WRITE(MIPI_DPHY_PARAM(port), intel_dsi->dphy_reg); + intel_de_write(dev_priv, MIPI_DPHY_PARAM(port), + intel_dsi->dphy_reg); - I915_WRITE(MIPI_DPI_RESOLUTION(port), - adjusted_mode->crtc_vdisplay << VERTICAL_ADDRESS_SHIFT | - mode_hdisplay << HORIZONTAL_ADDRESS_SHIFT); + intel_de_write(dev_priv, MIPI_DPI_RESOLUTION(port), + adjusted_mode->crtc_vdisplay << VERTICAL_ADDRESS_SHIFT | mode_hdisplay << HORIZONTAL_ADDRESS_SHIFT); } set_dsi_timings(encoder, adjusted_mode); @@ -1393,7 +1430,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, } for_each_dsi_port(port, intel_dsi->ports) { - I915_WRITE(MIPI_DSI_FUNC_PRG(port), val); + intel_de_write(dev_priv, MIPI_DSI_FUNC_PRG(port), val); /* timeouts for recovery. one frame IIUC. if counter expires, * EOT and stop state. */ @@ -1414,28 +1451,24 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, if (is_vid_mode(intel_dsi) && intel_dsi->video_mode_format == VIDEO_MODE_BURST) { - I915_WRITE(MIPI_HS_TX_TIMEOUT(port), - txbyteclkhs(adjusted_mode->crtc_htotal, bpp, - intel_dsi->lane_count, - intel_dsi->burst_mode_ratio) + 1); + intel_de_write(dev_priv, MIPI_HS_TX_TIMEOUT(port), + txbyteclkhs(adjusted_mode->crtc_htotal, bpp, intel_dsi->lane_count, intel_dsi->burst_mode_ratio) + 1); } else { - I915_WRITE(MIPI_HS_TX_TIMEOUT(port), - txbyteclkhs(adjusted_mode->crtc_vtotal * - adjusted_mode->crtc_htotal, - bpp, intel_dsi->lane_count, - intel_dsi->burst_mode_ratio) + 1); + intel_de_write(dev_priv, MIPI_HS_TX_TIMEOUT(port), + txbyteclkhs(adjusted_mode->crtc_vtotal * adjusted_mode->crtc_htotal, bpp, intel_dsi->lane_count, intel_dsi->burst_mode_ratio) + 1); } - I915_WRITE(MIPI_LP_RX_TIMEOUT(port), intel_dsi->lp_rx_timeout); - I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(port), - intel_dsi->turn_arnd_val); - I915_WRITE(MIPI_DEVICE_RESET_TIMER(port), - intel_dsi->rst_timer_val); + intel_de_write(dev_priv, MIPI_LP_RX_TIMEOUT(port), + intel_dsi->lp_rx_timeout); + intel_de_write(dev_priv, MIPI_TURN_AROUND_TIMEOUT(port), + intel_dsi->turn_arnd_val); + intel_de_write(dev_priv, MIPI_DEVICE_RESET_TIMER(port), + intel_dsi->rst_timer_val); /* dphy stuff */ /* in terms of low power clock */ - I915_WRITE(MIPI_INIT_COUNT(port), - txclkesc(intel_dsi->escape_clk_div, 100)); + intel_de_write(dev_priv, MIPI_INIT_COUNT(port), + txclkesc(intel_dsi->escape_clk_div, 100)); if (IS_GEN9_LP(dev_priv) && (!intel_dsi->dual_link)) { /* @@ -1444,24 +1477,25 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, * getting used. So write the other port * if not in dual link mode. */ - I915_WRITE(MIPI_INIT_COUNT(port == - PORT_A ? PORT_C : PORT_A), - intel_dsi->init_count); + intel_de_write(dev_priv, + MIPI_INIT_COUNT(port == PORT_A ? PORT_C : PORT_A), + intel_dsi->init_count); } /* recovery disables */ - I915_WRITE(MIPI_EOT_DISABLE(port), tmp); + intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), tmp); /* in terms of low power clock */ - I915_WRITE(MIPI_INIT_COUNT(port), intel_dsi->init_count); + intel_de_write(dev_priv, MIPI_INIT_COUNT(port), + intel_dsi->init_count); /* in terms of txbyteclkhs. actual high to low switch + * MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK. * * XXX: write MIPI_STOP_STATE_STALL? */ - I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(port), - intel_dsi->hs_to_lp_count); + intel_de_write(dev_priv, MIPI_HIGH_LOW_SWITCH_COUNT(port), + intel_dsi->hs_to_lp_count); /* XXX: low power clock equivalence in terms of byte clock. * the number of byte clocks occupied in one low power clock. @@ -1469,14 +1503,15 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, * txclkesc time / txbyteclk time * (105 + MIPI_STOP_STATE_STALL * ) / 105.??? */ - I915_WRITE(MIPI_LP_BYTECLK(port), intel_dsi->lp_byte_clk); + intel_de_write(dev_priv, MIPI_LP_BYTECLK(port), + intel_dsi->lp_byte_clk); if (IS_GEMINILAKE(dev_priv)) { - I915_WRITE(MIPI_TLPX_TIME_COUNT(port), - intel_dsi->lp_byte_clk); + intel_de_write(dev_priv, MIPI_TLPX_TIME_COUNT(port), + intel_dsi->lp_byte_clk); /* Shadow of DPHY reg */ - I915_WRITE(MIPI_CLK_LANE_TIMING(port), - intel_dsi->dphy_reg); + intel_de_write(dev_priv, MIPI_CLK_LANE_TIMING(port), + intel_dsi->dphy_reg); } /* the bw essential for transmitting 16 long packets containing @@ -1484,21 +1519,18 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, * this register in terms of byte clocks. based on dsi transfer * rate and the number of lanes configured the time taken to * transmit 16 long packets in a dsi stream varies. */ - I915_WRITE(MIPI_DBI_BW_CTRL(port), intel_dsi->bw_timer); + intel_de_write(dev_priv, MIPI_DBI_BW_CTRL(port), + intel_dsi->bw_timer); - I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(port), - intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT | - intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT); + intel_de_write(dev_priv, MIPI_CLK_LANE_SWITCH_TIME_CNT(port), + intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT | intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT); if (is_vid_mode(intel_dsi)) /* Some panels might have resolution which is not a * multiple of 64 like 1366 x 768. Enable RANDOM * resolution support for such panels by default */ - I915_WRITE(MIPI_VIDEO_MODE_FORMAT(port), - intel_dsi->video_frmt_cfg_bits | - intel_dsi->video_mode_format | - IP_TG_CONFIG | - RANDOM_DPI_DISPLAY_RESOLUTION); + intel_de_write(dev_priv, MIPI_VIDEO_MODE_FORMAT(port), + intel_dsi->video_frmt_cfg_bits | intel_dsi->video_mode_format | IP_TG_CONFIG | RANDOM_DPI_DISPLAY_RESOLUTION); } } @@ -1514,19 +1546,19 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder) for_each_dsi_port(port, intel_dsi->ports) { /* Panel commands can be sent when clock is in LP11 */ - I915_WRITE(MIPI_DEVICE_READY(port), 0x0); + intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x0); if (IS_GEN9_LP(dev_priv)) bxt_dsi_reset_clocks(encoder, port); else vlv_dsi_reset_clocks(encoder, port); - I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP); + intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP); - val = I915_READ(MIPI_DSI_FUNC_PRG(port)); + val = intel_de_read(dev_priv, MIPI_DSI_FUNC_PRG(port)); val &= ~VID_MODE_FORMAT_MASK; - I915_WRITE(MIPI_DSI_FUNC_PRG(port), val); + intel_de_write(dev_priv, MIPI_DSI_FUNC_PRG(port), val); - I915_WRITE(MIPI_DEVICE_READY(port), 0x1); + intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x1); } } @@ -1559,59 +1591,6 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = { .atomic_duplicate_state = intel_digital_connector_duplicate_state, }; -static enum drm_panel_orientation -vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_encoder *encoder = connector->encoder; - enum intel_display_power_domain power_domain; - enum drm_panel_orientation orientation; - struct intel_plane *plane; - struct intel_crtc *crtc; - intel_wakeref_t wakeref; - enum pipe pipe; - u32 val; - - if (!encoder->get_hw_state(encoder, &pipe)) - return DRM_MODE_PANEL_ORIENTATION_UNKNOWN; - - crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - plane = to_intel_plane(crtc->base.primary); - - power_domain = POWER_DOMAIN_PIPE(pipe); - wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); - if (!wakeref) - return DRM_MODE_PANEL_ORIENTATION_UNKNOWN; - - val = I915_READ(DSPCNTR(plane->i9xx_plane)); - - if (!(val & DISPLAY_PLANE_ENABLE)) - orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN; - else if (val & DISPPLANE_ROTATE_180) - orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP; - else - orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL; - - intel_display_power_put(dev_priv, power_domain, wakeref); - - return orientation; -} - -static enum drm_panel_orientation -vlv_dsi_get_panel_orientation(struct intel_connector *connector) -{ - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - enum drm_panel_orientation orientation; - - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - orientation = vlv_dsi_get_hw_panel_orientation(connector); - if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN) - return orientation; - } - - return intel_dsi_get_panel_orientation(connector); -} - static void vlv_dsi_add_properties(struct intel_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); @@ -1628,10 +1607,9 @@ static void vlv_dsi_add_properties(struct intel_connector *connector) connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT; - connector->base.display_info.panel_orientation = - vlv_dsi_get_panel_orientation(connector); - drm_connector_init_panel_orientation_property( + drm_connector_set_panel_orientation_with_quirk( &connector->base, + intel_dsi_get_panel_orientation(connector), connector->panel.fixed_mode->hdisplay, connector->panel.fixed_mode->vdisplay); } @@ -1703,7 +1681,8 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * mul); if (prepare_cnt > PREPARE_CNT_MAX) { - DRM_DEBUG_KMS("prepare count too high %u\n", prepare_cnt); + drm_dbg_kms(&dev_priv->drm, "prepare count too high %u\n", + prepare_cnt); prepare_cnt = PREPARE_CNT_MAX; } @@ -1723,7 +1702,8 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) exit_zero_cnt += 1; if (exit_zero_cnt > EXIT_ZERO_CNT_MAX) { - DRM_DEBUG_KMS("exit zero count too high %u\n", exit_zero_cnt); + drm_dbg_kms(&dev_priv->drm, "exit zero count too high %u\n", + exit_zero_cnt); exit_zero_cnt = EXIT_ZERO_CNT_MAX; } @@ -1733,7 +1713,8 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) * ui_den, ui_num * mul); if (clk_zero_cnt > CLK_ZERO_CNT_MAX) { - DRM_DEBUG_KMS("clock zero count too high %u\n", clk_zero_cnt); + drm_dbg_kms(&dev_priv->drm, "clock zero count too high %u\n", + clk_zero_cnt); clk_zero_cnt = CLK_ZERO_CNT_MAX; } @@ -1742,7 +1723,8 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, ui_num * mul); if (trail_cnt > TRAIL_CNT_MAX) { - DRM_DEBUG_KMS("trail count too high %u\n", trail_cnt); + drm_dbg_kms(&dev_priv->drm, "trail count too high %u\n", + trail_cnt); trail_cnt = TRAIL_CNT_MAX; } @@ -1817,7 +1799,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) enum port port; enum pipe pipe; - DRM_DEBUG_KMS("\n"); + drm_dbg_kms(&dev_priv->drm, "\n"); /* There is no detection method for MIPI so rely on VBT */ if (!intel_bios_is_dsi_present(dev_priv, &port)) @@ -1849,6 +1831,8 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) intel_encoder->compute_config = intel_dsi_compute_config; intel_encoder->pre_enable = intel_dsi_pre_enable; + if (IS_GEN9_LP(dev_priv)) + intel_encoder->enable = bxt_dsi_enable; intel_encoder->disable = intel_dsi_disable; intel_encoder->post_disable = intel_dsi_post_disable; intel_encoder->get_hw_state = intel_dsi_get_hw_state; @@ -1894,18 +1878,18 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) } if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) { - DRM_DEBUG_KMS("no device found\n"); + drm_dbg_kms(&dev_priv->drm, "no device found\n"); goto err; } /* Use clock read-back from current hw-state for fastboot */ current_mode = intel_encoder_current_mode(intel_encoder); if (current_mode) { - DRM_DEBUG_KMS("Calculated pclk %d GOP %d\n", - intel_dsi->pclk, current_mode->clock); + drm_dbg_kms(&dev_priv->drm, "Calculated pclk %d GOP %d\n", + intel_dsi->pclk, current_mode->clock); if (intel_fuzzy_clock_check(intel_dsi->pclk, current_mode->clock)) { - DRM_DEBUG_KMS("Using GOP pclk\n"); + drm_dbg_kms(&dev_priv->drm, "Using GOP pclk\n"); intel_dsi->pclk = current_mode->clock; } @@ -1933,7 +1917,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) mutex_unlock(&dev->mode_config.mutex); if (!fixed_mode) { - DRM_DEBUG_KMS("no fixed mode\n"); + drm_dbg_kms(&dev_priv->drm, "no fixed mode\n"); goto err_cleanup_connector; } diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c index 6b89e67b120f..d0a514301575 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c @@ -64,7 +64,7 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv, /* target_dsi_clk is expected in kHz */ if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) { - DRM_ERROR("DSI CLK Out of Range\n"); + drm_err(&dev_priv->drm, "DSI CLK Out of Range\n"); return -ECHRNG; } @@ -126,7 +126,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder, ret = dsi_calc_mnp(dev_priv, config, dsi_clk); if (ret) { - DRM_DEBUG_KMS("dsi_calc_mnp failed\n"); + drm_dbg_kms(&dev_priv->drm, "dsi_calc_mnp failed\n"); return ret; } @@ -138,8 +138,8 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder, config->dsi_pll.ctrl |= DSI_PLL_VCO_EN; - DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n", - config->dsi_pll.div, config->dsi_pll.ctrl); + drm_dbg_kms(&dev_priv->drm, "dsi pll div %08x, ctrl %08x\n", + config->dsi_pll.div, config->dsi_pll.ctrl); return 0; } @@ -149,7 +149,7 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - DRM_DEBUG_KMS("\n"); + drm_dbg_kms(&dev_priv->drm, "\n"); vlv_cck_get(dev_priv); @@ -169,12 +169,12 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder, DSI_PLL_LOCK, 20)) { vlv_cck_put(dev_priv); - DRM_ERROR("DSI PLL lock failed\n"); + drm_err(&dev_priv->drm, "DSI PLL lock failed\n"); return; } vlv_cck_put(dev_priv); - DRM_DEBUG_KMS("DSI PLL locked\n"); + drm_dbg_kms(&dev_priv->drm, "DSI PLL locked\n"); } void vlv_dsi_pll_disable(struct intel_encoder *encoder) @@ -182,7 +182,7 @@ void vlv_dsi_pll_disable(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 tmp; - DRM_DEBUG_KMS("\n"); + drm_dbg_kms(&dev_priv->drm, "\n"); vlv_cck_get(dev_priv); @@ -201,7 +201,7 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) u32 mask; mask = BXT_DSI_PLL_DO_ENABLE | BXT_DSI_PLL_LOCKED; - val = I915_READ(BXT_DSI_PLL_ENABLE); + val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE); enabled = (val & mask) == mask; if (!enabled) @@ -215,15 +215,17 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) * times, and since accessing DSI registers with invalid dividers * causes a system hang. */ - val = I915_READ(BXT_DSI_PLL_CTL); + val = intel_de_read(dev_priv, BXT_DSI_PLL_CTL); if (IS_GEMINILAKE(dev_priv)) { if (!(val & BXT_DSIA_16X_MASK)) { - DRM_DEBUG_DRIVER("Invalid PLL divider (%08x)\n", val); + drm_dbg(&dev_priv->drm, + "Invalid PLL divider (%08x)\n", val); enabled = false; } } else { if (!(val & BXT_DSIA_16X_MASK) || !(val & BXT_DSIC_16X_MASK)) { - DRM_DEBUG_DRIVER("Invalid PLL divider (%08x)\n", val); + drm_dbg(&dev_priv->drm, + "Invalid PLL divider (%08x)\n", val); enabled = false; } } @@ -236,11 +238,11 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 val; - DRM_DEBUG_KMS("\n"); + drm_dbg_kms(&dev_priv->drm, "\n"); - val = I915_READ(BXT_DSI_PLL_ENABLE); + val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE); val &= ~BXT_DSI_PLL_DO_ENABLE; - I915_WRITE(BXT_DSI_PLL_ENABLE, val); + intel_de_write(dev_priv, BXT_DSI_PLL_ENABLE, val); /* * PLL lock should deassert within 200us. @@ -248,7 +250,8 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder) */ if (intel_de_wait_for_clear(dev_priv, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_LOCKED, 1)) - DRM_ERROR("Timeout waiting for PLL lock deassertion\n"); + drm_err(&dev_priv->drm, + "Timeout waiting for PLL lock deassertion\n"); } u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, @@ -263,7 +266,7 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000; int i; - DRM_DEBUG_KMS("\n"); + drm_dbg_kms(&dev_priv->drm, "\n"); vlv_cck_get(dev_priv); pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); @@ -292,7 +295,7 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, p--; if (!p) { - DRM_ERROR("wrong P1 divisor\n"); + drm_err(&dev_priv->drm, "wrong P1 divisor\n"); return 0; } @@ -302,7 +305,7 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, } if (i == ARRAY_SIZE(lfsr_converts)) { - DRM_ERROR("wrong m_seed programmed\n"); + drm_err(&dev_priv->drm, "wrong m_seed programmed\n"); return 0; } @@ -325,7 +328,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); - config->dsi_pll.ctrl = I915_READ(BXT_DSI_PLL_CTL); + config->dsi_pll.ctrl = intel_de_read(dev_priv, BXT_DSI_PLL_CTL); dsi_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK; @@ -333,7 +336,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, pclk = DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, bpp); - DRM_DEBUG_DRIVER("Calculated pclk=%u\n", pclk); + drm_dbg(&dev_priv->drm, "Calculated pclk=%u\n", pclk); return pclk; } @@ -343,11 +346,10 @@ void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); - temp = I915_READ(MIPI_CTRL(port)); + temp = intel_de_read(dev_priv, MIPI_CTRL(port)); temp &= ~ESCAPE_CLOCK_DIVIDER_MASK; - I915_WRITE(MIPI_CTRL(port), temp | - intel_dsi->escape_clk_div << - ESCAPE_CLOCK_DIVIDER_SHIFT); + intel_de_write(dev_priv, MIPI_CTRL(port), + temp | intel_dsi->escape_clk_div << ESCAPE_CLOCK_DIVIDER_SHIFT); } static void glk_dsi_program_esc_clock(struct drm_device *dev, @@ -393,8 +395,10 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev, else txesc2_div = 10; - I915_WRITE(MIPIO_TXESC_CLK_DIV1, (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK); - I915_WRITE(MIPIO_TXESC_CLK_DIV2, (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK); + intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV1, + (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK); + intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV2, + (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK); } /* Program BXT Mipi clocks and dividers */ @@ -412,7 +416,7 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port, u32 mipi_8by3_divider; /* Clear old configurations */ - tmp = I915_READ(BXT_MIPI_CLOCK_CTL); + tmp = intel_de_read(dev_priv, BXT_MIPI_CLOCK_CTL); tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port)); tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port)); tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port)); @@ -448,7 +452,7 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port, tmp |= BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, rx_div_lower); tmp |= BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, rx_div_upper); - I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp); + intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp); } int bxt_dsi_pll_compute(struct intel_encoder *encoder, @@ -478,10 +482,11 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder, } if (dsi_ratio < dsi_ratio_min || dsi_ratio > dsi_ratio_max) { - DRM_ERROR("Cant get a suitable ratio from DSI PLL ratios\n"); + drm_err(&dev_priv->drm, + "Cant get a suitable ratio from DSI PLL ratios\n"); return -ECHRNG; } else - DRM_DEBUG_KMS("DSI PLL calculation is Done!!\n"); + drm_dbg_kms(&dev_priv->drm, "DSI PLL calculation is Done!!\n"); /* * Program DSI ratio and Select MIPIC and MIPIA PLL output as 8x @@ -507,11 +512,11 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder, enum port port; u32 val; - DRM_DEBUG_KMS("\n"); + drm_dbg_kms(&dev_priv->drm, "\n"); /* Configure PLL vales */ - I915_WRITE(BXT_DSI_PLL_CTL, config->dsi_pll.ctrl); - POSTING_READ(BXT_DSI_PLL_CTL); + intel_de_write(dev_priv, BXT_DSI_PLL_CTL, config->dsi_pll.ctrl); + intel_de_posting_read(dev_priv, BXT_DSI_PLL_CTL); /* Program TX, RX, Dphy clocks */ if (IS_BROXTON(dev_priv)) { @@ -522,18 +527,19 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder, } /* Enable DSI PLL */ - val = I915_READ(BXT_DSI_PLL_ENABLE); + val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE); val |= BXT_DSI_PLL_DO_ENABLE; - I915_WRITE(BXT_DSI_PLL_ENABLE, val); + intel_de_write(dev_priv, BXT_DSI_PLL_ENABLE, val); /* Timeout and fail if PLL not locked */ if (intel_de_wait_for_set(dev_priv, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_LOCKED, 1)) { - DRM_ERROR("Timed out waiting for DSI PLL to lock\n"); + drm_err(&dev_priv->drm, + "Timed out waiting for DSI PLL to lock\n"); return; } - DRM_DEBUG_KMS("DSI PLL locked\n"); + drm_dbg_kms(&dev_priv->drm, "DSI PLL locked\n"); } void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) @@ -544,20 +550,20 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) /* Clear old configurations */ if (IS_BROXTON(dev_priv)) { - tmp = I915_READ(BXT_MIPI_CLOCK_CTL); + tmp = intel_de_read(dev_priv, BXT_MIPI_CLOCK_CTL); tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port)); tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port)); tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port)); tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port)); - I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp); + intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp); } else { - tmp = I915_READ(MIPIO_TXESC_CLK_DIV1); + tmp = intel_de_read(dev_priv, MIPIO_TXESC_CLK_DIV1); tmp &= ~GLK_TX_ESC_CLK_DIV1_MASK; - I915_WRITE(MIPIO_TXESC_CLK_DIV1, tmp); + intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV1, tmp); - tmp = I915_READ(MIPIO_TXESC_CLK_DIV2); + tmp = intel_de_read(dev_priv, MIPIO_TXESC_CLK_DIV2); tmp &= ~GLK_TX_ESC_CLK_DIV2_MASK; - I915_WRITE(MIPIO_TXESC_CLK_DIV2, tmp); + intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV2, tmp); } - I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP); + intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c index 81366aa4812b..0598e5382a1d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c @@ -217,7 +217,7 @@ static void clear_pages_worker(struct work_struct *work) 0); out_request: if (unlikely(err)) { - i915_request_skip(rq, err); + i915_request_set_error_once(rq, err); err = 0; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 151a1e8ae36a..68326ad3b2e0 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -67,14 +67,11 @@ #include <linux/log2.h> #include <linux/nospec.h> -#include <drm/i915_drm.h> - #include "gt/gen6_ppgtt.h" #include "gt/intel_context.h" +#include "gt/intel_context_param.h" #include "gt/intel_engine_heartbeat.h" -#include "gt/intel_engine_pm.h" #include "gt/intel_engine_user.h" -#include "gt/intel_lrc_reg.h" #include "gt/intel_ring.h" #include "i915_gem_context.h" @@ -245,7 +242,6 @@ static void __free_engines(struct i915_gem_engines *e, unsigned int count) if (!e->engines[count]) continue; - RCU_INIT_POINTER(e->engines[count]->gem_context, NULL); intel_context_put(e->engines[count]); } kfree(e); @@ -258,7 +254,51 @@ static void free_engines(struct i915_gem_engines *e) static void free_engines_rcu(struct rcu_head *rcu) { - free_engines(container_of(rcu, struct i915_gem_engines, rcu)); + struct i915_gem_engines *engines = + container_of(rcu, struct i915_gem_engines, rcu); + + i915_sw_fence_fini(&engines->fence); + free_engines(engines); +} + +static int __i915_sw_fence_call +engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) +{ + struct i915_gem_engines *engines = + container_of(fence, typeof(*engines), fence); + + switch (state) { + case FENCE_COMPLETE: + if (!list_empty(&engines->link)) { + struct i915_gem_context *ctx = engines->ctx; + unsigned long flags; + + spin_lock_irqsave(&ctx->stale.lock, flags); + list_del(&engines->link); + spin_unlock_irqrestore(&ctx->stale.lock, flags); + } + i915_gem_context_put(engines->ctx); + break; + + case FENCE_FREE: + init_rcu_head(&engines->rcu); + call_rcu(&engines->rcu, free_engines_rcu); + break; + } + + return NOTIFY_DONE; +} + +static struct i915_gem_engines *alloc_engines(unsigned int count) +{ + struct i915_gem_engines *e; + + e = kzalloc(struct_size(e, engines, count), GFP_KERNEL); + if (!e) + return NULL; + + i915_sw_fence_init(&e->fence, engines_notify); + return e; } static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx) @@ -268,11 +308,10 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx) struct i915_gem_engines *e; enum intel_engine_id id; - e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL); + e = alloc_engines(I915_NUM_ENGINES); if (!e) return ERR_PTR(-ENOMEM); - init_rcu_head(&e->rcu); for_each_engine(engine, gt, id) { struct intel_context *ce; @@ -306,7 +345,6 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) list_del(&ctx->link); spin_unlock(&ctx->i915->gem.contexts.lock); - free_engines(rcu_access_pointer(ctx->engines)); mutex_destroy(&ctx->engines_mutex); if (ctx->timeline) @@ -421,7 +459,7 @@ static struct intel_engine_cs *__active_engine(struct i915_request *rq) } engine = NULL; - if (i915_request_is_active(rq) && !rq->fence.error) + if (i915_request_is_active(rq) && rq->fence.error != -EIO) engine = rq->engine; spin_unlock_irq(&locked->active.lock); @@ -452,7 +490,7 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce) return engine; } -static void kill_context(struct i915_gem_context *ctx) +static void kill_engines(struct i915_gem_engines *engines) { struct i915_gem_engines_iter it; struct intel_context *ce; @@ -464,7 +502,7 @@ static void kill_context(struct i915_gem_context *ctx) * However, we only care about pending requests, so only include * engines on which there are incomplete requests. */ - for_each_gem_engine(ce, __context_engines_static(ctx), it) { + for_each_gem_engine(ce, engines, it) { struct intel_engine_cs *engine; if (intel_context_set_banned(ce)) @@ -486,8 +524,82 @@ static void kill_context(struct i915_gem_context *ctx) * the context from the GPU, we have to resort to a full * reset. We hope the collateral damage is worth it. */ - __reset_context(ctx, engine); + __reset_context(engines->ctx, engine); + } +} + +static void kill_stale_engines(struct i915_gem_context *ctx) +{ + struct i915_gem_engines *pos, *next; + + spin_lock_irq(&ctx->stale.lock); + GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); + list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) { + if (!i915_sw_fence_await(&pos->fence)) { + list_del_init(&pos->link); + continue; + } + + spin_unlock_irq(&ctx->stale.lock); + + kill_engines(pos); + + spin_lock_irq(&ctx->stale.lock); + GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence)); + list_safe_reset_next(pos, next, link); + list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */ + + i915_sw_fence_complete(&pos->fence); } + spin_unlock_irq(&ctx->stale.lock); +} + +static void kill_context(struct i915_gem_context *ctx) +{ + kill_stale_engines(ctx); +} + +static void engines_idle_release(struct i915_gem_context *ctx, + struct i915_gem_engines *engines) +{ + struct i915_gem_engines_iter it; + struct intel_context *ce; + + INIT_LIST_HEAD(&engines->link); + + engines->ctx = i915_gem_context_get(ctx); + + for_each_gem_engine(ce, engines, it) { + struct dma_fence *fence; + int err = 0; + + /* serialises with execbuf */ + set_bit(CONTEXT_CLOSED_BIT, &ce->flags); + if (!intel_context_pin_if_active(ce)) + continue; + + fence = i915_active_fence_get(&ce->timeline->last_request); + if (fence) { + err = i915_sw_fence_await_dma_fence(&engines->fence, + fence, 0, + GFP_KERNEL); + dma_fence_put(fence); + } + intel_context_unpin(ce); + if (err < 0) + goto kill; + } + + spin_lock_irq(&ctx->stale.lock); + if (!i915_gem_context_is_closed(ctx)) + list_add_tail(&engines->link, &ctx->stale.engines); + spin_unlock_irq(&ctx->stale.lock); + +kill: + if (list_empty(&engines->link)) /* raced, already closed */ + kill_engines(engines); + + i915_sw_fence_commit(&engines->fence); } static void set_closed_name(struct i915_gem_context *ctx) @@ -511,11 +623,16 @@ static void context_close(struct i915_gem_context *ctx) { struct i915_address_space *vm; + /* Flush any concurrent set_engines() */ + mutex_lock(&ctx->engines_mutex); + engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1)); i915_gem_context_set_closed(ctx); - set_closed_name(ctx); + mutex_unlock(&ctx->engines_mutex); mutex_lock(&ctx->mutex); + set_closed_name(ctx); + vm = i915_gem_context_vm(ctx); if (vm) i915_vm_close(vm); @@ -604,6 +721,9 @@ __create_context(struct drm_i915_private *i915) ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL); mutex_init(&ctx->mutex); + spin_lock_init(&ctx->stale.lock); + INIT_LIST_HEAD(&ctx->stale.engines); + mutex_init(&ctx->engines_mutex); e = default_engines(ctx); if (IS_ERR(e)) { @@ -637,23 +757,30 @@ err_free: return ERR_PTR(err); } -static void +static int context_apply_all(struct i915_gem_context *ctx, - void (*fn)(struct intel_context *ce, void *data), + int (*fn)(struct intel_context *ce, void *data), void *data) { struct i915_gem_engines_iter it; struct intel_context *ce; + int err = 0; - for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) - fn(ce, data); + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { + err = fn(ce, data); + if (err) + break; + } i915_gem_context_unlock_engines(ctx); + + return err; } -static void __apply_ppgtt(struct intel_context *ce, void *vm) +static int __apply_ppgtt(struct intel_context *ce, void *vm) { i915_vm_put(ce->vm); ce->vm = i915_vm_get(vm); + return 0; } static struct i915_address_space * @@ -691,9 +818,10 @@ static void __set_timeline(struct intel_timeline **dst, intel_timeline_put(old); } -static void __apply_timeline(struct intel_context *ce, void *timeline) +static int __apply_timeline(struct intel_context *ce, void *timeline) { __set_timeline(&ce->timeline, timeline); + return 0; } static void __assign_timeline(struct i915_gem_context *ctx, @@ -724,8 +852,8 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags) ppgtt = i915_ppgtt_create(&i915->gt); if (IS_ERR(ppgtt)) { - DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", - PTR_ERR(ppgtt)); + drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n", + PTR_ERR(ppgtt)); context_close(ctx); return ERR_CAST(ppgtt); } @@ -767,20 +895,15 @@ static void init_contexts(struct i915_gem_contexts *gc) void i915_gem_init__contexts(struct drm_i915_private *i915) { init_contexts(&i915->gem.contexts); - DRM_DEBUG_DRIVER("%s context support initialized\n", - DRIVER_CAPS(i915)->has_logical_contexts ? - "logical" : "fake"); + drm_dbg(&i915->drm, "%s context support initialized\n", + DRIVER_CAPS(i915)->has_logical_contexts ? + "logical" : "fake"); } void i915_gem_driver_release__contexts(struct drm_i915_private *i915) { flush_work(&i915->gem.contexts.free_work); -} - -static int vm_idr_cleanup(int id, void *p, void *data) -{ - i915_vm_put(p); - return 0; + rcu_barrier(); /* and flush the left over RCU frees */ } static int gem_context_register(struct i915_gem_context *ctx, @@ -820,8 +943,8 @@ int i915_gem_context_open(struct drm_i915_private *i915, xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC); - mutex_init(&file_priv->vm_idr_lock); - idr_init_base(&file_priv->vm_idr, 1); + /* 0 reserved for invalid/unassigned ppgtt */ + xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1); ctx = i915_gem_create_context(i915, 0); if (IS_ERR(ctx)) { @@ -839,9 +962,8 @@ int i915_gem_context_open(struct drm_i915_private *i915, err_ctx: context_close(ctx); err: - idr_destroy(&file_priv->vm_idr); + xa_destroy(&file_priv->vm_xa); xa_destroy(&file_priv->context_xa); - mutex_destroy(&file_priv->vm_idr_lock); return err; } @@ -849,6 +971,7 @@ void i915_gem_context_close(struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_private *i915 = file_priv->dev_priv; + struct i915_address_space *vm; struct i915_gem_context *ctx; unsigned long idx; @@ -856,9 +979,9 @@ void i915_gem_context_close(struct drm_file *file) context_close(ctx); xa_destroy(&file_priv->context_xa); - idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL); - idr_destroy(&file_priv->vm_idr); - mutex_destroy(&file_priv->vm_idr_lock); + xa_for_each(&file_priv->vm_xa, idx, vm) + i915_vm_put(vm); + xa_destroy(&file_priv->vm_xa); contexts_flush_free(&i915->gem.contexts); } @@ -870,6 +993,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_vm_control *args = data; struct drm_i915_file_private *file_priv = file->driver_priv; struct i915_ppgtt *ppgtt; + u32 id; int err; if (!HAS_FULL_PPGTT(i915)) @@ -892,23 +1016,15 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, goto err_put; } - err = mutex_lock_interruptible(&file_priv->vm_idr_lock); + err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm, + xa_limit_32b, GFP_KERNEL); if (err) goto err_put; - err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL); - if (err < 0) - goto err_unlock; - - GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */ - - mutex_unlock(&file_priv->vm_idr_lock); - - args->vm_id = err; + GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ + args->vm_id = id; return 0; -err_unlock: - mutex_unlock(&file_priv->vm_idr_lock); err_put: i915_vm_put(&ppgtt->vm); return err; @@ -920,8 +1036,6 @@ int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_gem_vm_control *args = data; struct i915_address_space *vm; - int err; - u32 id; if (args->flags) return -EINVAL; @@ -929,17 +1043,7 @@ int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, if (args->extensions) return -EINVAL; - id = args->vm_id; - if (!id) - return -ENOENT; - - err = mutex_lock_interruptible(&file_priv->vm_idr_lock); - if (err) - return err; - - vm = idr_remove(&file_priv->vm_idr, id); - - mutex_unlock(&file_priv->vm_idr_lock); + vm = xa_erase(&file_priv->vm_xa, args->vm_id); if (!vm) return -ENOENT; @@ -965,6 +1069,30 @@ static void cb_retire(struct i915_active *base) kfree(cb); } +static inline struct i915_gem_engines * +__context_engines_await(const struct i915_gem_context *ctx) +{ + struct i915_gem_engines *engines; + + rcu_read_lock(); + do { + engines = rcu_dereference(ctx->engines); + if (unlikely(!engines)) + break; + + if (unlikely(!i915_sw_fence_await(&engines->fence))) + continue; + + if (likely(engines == rcu_access_pointer(ctx->engines))) + break; + + i915_sw_fence_complete(&engines->fence); + } while (1); + rcu_read_unlock(); + + return engines; +} + I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault); static int context_barrier_task(struct i915_gem_context *ctx, intel_engine_mask_t engines, @@ -975,6 +1103,7 @@ static int context_barrier_task(struct i915_gem_context *ctx, { struct context_barrier_task *cb; struct i915_gem_engines_iter it; + struct i915_gem_engines *e; struct intel_context *ce; int err = 0; @@ -991,7 +1120,13 @@ static int context_barrier_task(struct i915_gem_context *ctx, return err; } - for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { + e = __context_engines_await(ctx); + if (!e) { + i915_active_release(&cb->base); + return -ENOENT; + } + + for_each_gem_engine(ce, e, it) { struct i915_request *rq; if (I915_SELFTEST_ONLY(context_barrier_inject_fault & @@ -1022,7 +1157,7 @@ static int context_barrier_task(struct i915_gem_context *ctx, if (err) break; } - i915_gem_context_unlock_engines(ctx); + i915_sw_fence_complete(&e->fence); cb->task = err ? NULL : task; /* caller needs to unwind instead */ cb->data = data; @@ -1037,7 +1172,8 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv, struct drm_i915_gem_context_param *args) { struct i915_address_space *vm; - int ret; + int err; + u32 id; if (!rcu_access_pointer(ctx->vm)) return -ENODEV; @@ -1045,27 +1181,22 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv, rcu_read_lock(); vm = context_get_vm_rcu(ctx); rcu_read_unlock(); + if (!vm) + return -ENODEV; - ret = mutex_lock_interruptible(&file_priv->vm_idr_lock); - if (ret) + err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL); + if (err) goto err_put; - ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL); - GEM_BUG_ON(!ret); - if (ret < 0) - goto err_unlock; - i915_vm_open(vm); + GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ + args->value = id; args->size = 0; - args->value = ret; - ret = 0; -err_unlock: - mutex_unlock(&file_priv->vm_idr_lock); err_put: i915_vm_put(vm); - return ret; + return err; } static void set_ppgtt_barrier(void *data) @@ -1167,7 +1298,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv, return -ENOENT; rcu_read_lock(); - vm = idr_find(&file_priv->vm_idr, args->value); + vm = xa_load(&file_priv->vm_xa, args->value); if (vm && !kref_get_unless_zero(&vm->ref)) vm = NULL; rcu_read_unlock(); @@ -1213,87 +1344,61 @@ out: return err; } -static int gen8_emit_rpcs_config(struct i915_request *rq, - struct intel_context *ce, - struct intel_sseu sseu) +static int __apply_ringsize(struct intel_context *ce, void *sz) { - u64 offset; - u32 *cs; + return intel_context_set_ring_size(ce, (unsigned long)sz); +} - cs = intel_ring_begin(rq, 4); - if (IS_ERR(cs)) - return PTR_ERR(cs); +static int set_ringsize(struct i915_gem_context *ctx, + struct drm_i915_gem_context_param *args) +{ + if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915)) + return -ENODEV; + + if (args->size) + return -EINVAL; - offset = i915_ggtt_offset(ce->state) + - LRC_STATE_PN * PAGE_SIZE + - CTX_R_PWR_CLK_STATE * 4; + if (!IS_ALIGNED(args->value, I915_GTT_PAGE_SIZE)) + return -EINVAL; - *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; - *cs++ = lower_32_bits(offset); - *cs++ = upper_32_bits(offset); - *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu); + if (args->value < I915_GTT_PAGE_SIZE) + return -EINVAL; - intel_ring_advance(rq, cs); + if (args->value > 128 * I915_GTT_PAGE_SIZE) + return -EINVAL; - return 0; + return context_apply_all(ctx, + __apply_ringsize, + __intel_context_ring_size(args->value)); } -static int -gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu) +static int __get_ringsize(struct intel_context *ce, void *arg) { - struct i915_request *rq; - int ret; - - lockdep_assert_held(&ce->pin_mutex); - - /* - * If the context is not idle, we have to submit an ordered request to - * modify its context image via the kernel context (writing to our own - * image, or into the registers directory, does not stick). Pristine - * and idle contexts will be configured on pinning. - */ - if (!intel_context_pin_if_active(ce)) - return 0; + long sz; - rq = intel_engine_create_kernel_request(ce->engine); - if (IS_ERR(rq)) { - ret = PTR_ERR(rq); - goto out_unpin; - } - - /* Serialise with the remote context */ - ret = intel_context_prepare_remote_request(ce, rq); - if (ret == 0) - ret = gen8_emit_rpcs_config(rq, ce, sseu); + sz = intel_context_get_ring_size(ce); + GEM_BUG_ON(sz > INT_MAX); - i915_request_add(rq); -out_unpin: - intel_context_unpin(ce); - return ret; + return sz; /* stop on first engine */ } -static int -intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu) +static int get_ringsize(struct i915_gem_context *ctx, + struct drm_i915_gem_context_param *args) { - int ret; + int sz; - GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8); - - ret = intel_context_lock_pinned(ce); - if (ret) - return ret; + if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915)) + return -ENODEV; - /* Nothing to do if unmodified. */ - if (!memcmp(&ce->sseu, &sseu, sizeof(sseu))) - goto unlock; + if (args->size) + return -EINVAL; - ret = gen8_modify_rpcs(ce, sseu); - if (!ret) - ce->sseu = sseu; + sz = context_apply_all(ctx, __get_ringsize, NULL); + if (sz < 0) + return sz; -unlock: - intel_context_unlock_pinned(ce); - return ret; + args->value = sz; + return 0; } static int @@ -1460,6 +1565,7 @@ set_engines__load_balance(struct i915_user_extension __user *base, void *data) struct i915_context_engines_load_balance __user *ext = container_of_user(base, typeof(*ext), base); const struct set_engines *set = data; + struct drm_i915_private *i915 = set->ctx->i915; struct intel_engine_cs *stack[16]; struct intel_engine_cs **siblings; struct intel_context *ce; @@ -1467,24 +1573,25 @@ set_engines__load_balance(struct i915_user_extension __user *base, void *data) unsigned int n; int err; - if (!HAS_EXECLISTS(set->ctx->i915)) + if (!HAS_EXECLISTS(i915)) return -ENODEV; - if (USES_GUC_SUBMISSION(set->ctx->i915)) + if (intel_uc_uses_guc_submission(&i915->gt.uc)) return -ENODEV; /* not implement yet */ if (get_user(idx, &ext->engine_index)) return -EFAULT; if (idx >= set->engines->num_engines) { - DRM_DEBUG("Invalid placement value, %d >= %d\n", - idx, set->engines->num_engines); + drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n", + idx, set->engines->num_engines); return -EINVAL; } idx = array_index_nospec(idx, set->engines->num_engines); if (set->engines->engines[idx]) { - DRM_DEBUG("Invalid placement[%d], already occupied\n", idx); + drm_dbg(&i915->drm, + "Invalid placement[%d], already occupied\n", idx); return -EEXIST; } @@ -1516,12 +1623,13 @@ set_engines__load_balance(struct i915_user_extension __user *base, void *data) goto out_siblings; } - siblings[n] = intel_engine_lookup_user(set->ctx->i915, + siblings[n] = intel_engine_lookup_user(i915, ci.engine_class, ci.engine_instance); if (!siblings[n]) { - DRM_DEBUG("Invalid sibling[%d]: { class:%d, inst:%d }\n", - n, ci.engine_class, ci.engine_instance); + drm_dbg(&i915->drm, + "Invalid sibling[%d]: { class:%d, inst:%d }\n", + n, ci.engine_class, ci.engine_instance); err = -EINVAL; goto out_siblings; } @@ -1554,6 +1662,7 @@ set_engines__bond(struct i915_user_extension __user *base, void *data) struct i915_context_engines_bond __user *ext = container_of_user(base, typeof(*ext), base); const struct set_engines *set = data; + struct drm_i915_private *i915 = set->ctx->i915; struct i915_engine_class_instance ci; struct intel_engine_cs *virtual; struct intel_engine_cs *master; @@ -1564,14 +1673,15 @@ set_engines__bond(struct i915_user_extension __user *base, void *data) return -EFAULT; if (idx >= set->engines->num_engines) { - DRM_DEBUG("Invalid index for virtual engine: %d >= %d\n", - idx, set->engines->num_engines); + drm_dbg(&i915->drm, + "Invalid index for virtual engine: %d >= %d\n", + idx, set->engines->num_engines); return -EINVAL; } idx = array_index_nospec(idx, set->engines->num_engines); if (!set->engines->engines[idx]) { - DRM_DEBUG("Invalid engine at %d\n", idx); + drm_dbg(&i915->drm, "Invalid engine at %d\n", idx); return -EINVAL; } virtual = set->engines->engines[idx]->engine; @@ -1589,11 +1699,12 @@ set_engines__bond(struct i915_user_extension __user *base, void *data) if (copy_from_user(&ci, &ext->master, sizeof(ci))) return -EFAULT; - master = intel_engine_lookup_user(set->ctx->i915, + master = intel_engine_lookup_user(i915, ci.engine_class, ci.engine_instance); if (!master) { - DRM_DEBUG("Unrecognised master engine: { class:%u, instance:%u }\n", - ci.engine_class, ci.engine_instance); + drm_dbg(&i915->drm, + "Unrecognised master engine: { class:%u, instance:%u }\n", + ci.engine_class, ci.engine_instance); return -EINVAL; } @@ -1606,12 +1717,13 @@ set_engines__bond(struct i915_user_extension __user *base, void *data) if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) return -EFAULT; - bond = intel_engine_lookup_user(set->ctx->i915, + bond = intel_engine_lookup_user(i915, ci.engine_class, ci.engine_instance); if (!bond) { - DRM_DEBUG("Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n", - n, ci.engine_class, ci.engine_instance); + drm_dbg(&i915->drm, + "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n", + n, ci.engine_class, ci.engine_instance); return -EINVAL; } @@ -1640,6 +1752,7 @@ static int set_engines(struct i915_gem_context *ctx, const struct drm_i915_gem_context_param *args) { + struct drm_i915_private *i915 = ctx->i915; struct i915_context_param_engines __user *user = u64_to_user_ptr(args->value); struct set_engines set = { .ctx = ctx }; @@ -1661,8 +1774,8 @@ set_engines(struct i915_gem_context *ctx, BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines))); if (args->size < sizeof(*user) || !IS_ALIGNED(args->size, sizeof(*user->engines))) { - DRM_DEBUG("Invalid size for engine array: %d\n", - args->size); + drm_dbg(&i915->drm, "Invalid size for engine array: %d\n", + args->size); return -EINVAL; } @@ -1671,13 +1784,10 @@ set_engines(struct i915_gem_context *ctx, * first 64 engines defined here. */ num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines); - - set.engines = kmalloc(struct_size(set.engines, engines, num_engines), - GFP_KERNEL); + set.engines = alloc_engines(num_engines); if (!set.engines) return -ENOMEM; - init_rcu_head(&set.engines->rcu); for (n = 0; n < num_engines; n++) { struct i915_engine_class_instance ci; struct intel_engine_cs *engine; @@ -1698,8 +1808,9 @@ set_engines(struct i915_gem_context *ctx, ci.engine_class, ci.engine_instance); if (!engine) { - DRM_DEBUG("Invalid engine[%d]: { class:%d, instance:%d }\n", - n, ci.engine_class, ci.engine_instance); + drm_dbg(&i915->drm, + "Invalid engine[%d]: { class:%d, instance:%d }\n", + n, ci.engine_class, ci.engine_instance); __free_engines(set.engines, n); return -ENOENT; } @@ -1729,6 +1840,11 @@ set_engines(struct i915_gem_context *ctx, replace: mutex_lock(&ctx->engines_mutex); + if (i915_gem_context_is_closed(ctx)) { + mutex_unlock(&ctx->engines_mutex); + free_engines(set.engines); + return -ENOENT; + } if (args->size) i915_gem_context_set_user_engines(ctx); else @@ -1736,7 +1852,8 @@ replace: set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1); mutex_unlock(&ctx->engines_mutex); - call_rcu(&set.engines->rcu, free_engines_rcu); + /* Keep track of old engine sets for kill_context() */ + engines_idle_release(ctx, set.engines); return 0; } @@ -1747,11 +1864,10 @@ __copy_engines(struct i915_gem_engines *e) struct i915_gem_engines *copy; unsigned int n; - copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL); + copy = alloc_engines(e->num_engines); if (!copy) return ERR_PTR(-ENOMEM); - init_rcu_head(©->rcu); for (n = 0; n < e->num_engines; n++) { if (e->engines[n]) copy->engines[n] = intel_context_get(e->engines[n]); @@ -1852,17 +1968,19 @@ set_persistence(struct i915_gem_context *ctx, return __context_set_persistence(ctx, args->value); } -static void __apply_priority(struct intel_context *ce, void *arg) +static int __apply_priority(struct intel_context *ce, void *arg) { struct i915_gem_context *ctx = arg; if (!intel_engine_has_semaphores(ce->engine)) - return; + return 0; if (ctx->sched.priority >= I915_PRIORITY_NORMAL) intel_context_set_use_semaphores(ce); else intel_context_clear_use_semaphores(ce); + + return 0; } static int set_priority(struct i915_gem_context *ctx, @@ -1955,6 +2073,10 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv, ret = set_persistence(ctx, args); break; + case I915_CONTEXT_PARAM_RINGSIZE: + ret = set_ringsize(ctx, args); + break; + case I915_CONTEXT_PARAM_BAN_PERIOD: default: ret = -EINVAL; @@ -1983,6 +2105,18 @@ static int create_setparam(struct i915_user_extension __user *ext, void *data) return ctx_setparam(arg->fpriv, arg->ctx, &local.param); } +static int copy_ring_size(struct intel_context *dst, + struct intel_context *src) +{ + long sz; + + sz = intel_context_get_ring_size(src); + if (sz < 0) + return sz; + + return intel_context_set_ring_size(dst, sz); +} + static int clone_engines(struct i915_gem_context *dst, struct i915_gem_context *src) { @@ -1991,11 +2125,10 @@ static int clone_engines(struct i915_gem_context *dst, bool user_engines; unsigned long n; - clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL); + clone = alloc_engines(e->num_engines); if (!clone) goto err_unlock; - init_rcu_head(&clone->rcu); for (n = 0; n < e->num_engines; n++) { struct intel_engine_cs *engine; @@ -2025,6 +2158,12 @@ static int clone_engines(struct i915_gem_context *dst, } intel_context_set_gem(clone->engines[n], dst); + + /* Copy across the preferred ringsize */ + if (copy_ring_size(clone->engines[n], e->engines[n])) { + __free_engines(clone, n + 1); + goto err_unlock; + } } clone->num_engines = n; @@ -2032,8 +2171,7 @@ static int clone_engines(struct i915_gem_context *dst, i915_gem_context_unlock_engines(src); /* Serialised by constructor */ - free_engines(__context_engines_static(dst)); - RCU_INIT_POINTER(dst->engines, clone); + engines_idle_release(dst, rcu_replace_pointer(dst->engines, clone, 1)); if (user_engines) i915_gem_context_set_user_engines(dst); else @@ -2213,8 +2351,9 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, ext_data.fpriv = file->driver_priv; if (client_is_banned(ext_data.fpriv)) { - DRM_DEBUG("client %s[%d] banned from creating ctx\n", - current->comm, task_pid_nr(current)); + drm_dbg(&i915->drm, + "client %s[%d] banned from creating ctx\n", + current->comm, task_pid_nr(current)); return -EIO; } @@ -2236,7 +2375,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, goto err_ctx; args->ctx_id = id; - DRM_DEBUG("HW context %d created\n", args->ctx_id); + drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id); return 0; @@ -2386,6 +2525,10 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, args->value = i915_gem_context_is_persistent(ctx); break; + case I915_CONTEXT_PARAM_RINGSIZE: + ret = get_ringsize(ctx, args); + break; + case I915_CONTEXT_PARAM_BAN_PERIOD: default: ret = -EINVAL; @@ -2459,6 +2602,9 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it) const struct i915_gem_engines *e = it->engines; struct intel_context *ctx; + if (unlikely(!e)) + return NULL; + do { if (it->idx >= e->num_engines) return NULL; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h index 3ae61a355d87..f1d884d304bd 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h @@ -192,12 +192,16 @@ i915_gem_context_unlock_engines(struct i915_gem_context *ctx) static inline struct intel_context * i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx) { - struct intel_context *ce = ERR_PTR(-EINVAL); + struct intel_context *ce; rcu_read_lock(); { struct i915_gem_engines *e = rcu_dereference(ctx->engines); - if (likely(idx < e->num_engines && e->engines[idx])) + if (unlikely(!e)) /* context was closed! */ + ce = ERR_PTR(-ENOENT); + else if (likely(idx < e->num_engines && e->engines[idx])) ce = intel_context_get(e->engines[idx]); + else + ce = ERR_PTR(-EINVAL); } rcu_read_unlock(); return ce; @@ -207,7 +211,6 @@ static inline void i915_gem_engines_iter_init(struct i915_gem_engines_iter *it, struct i915_gem_engines *engines) { - GEM_BUG_ON(!engines); it->engines = engines; it->idx = 0; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h index 017ca803ab47..28760bd03265 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h @@ -20,6 +20,7 @@ #include "gt/intel_context_types.h" #include "i915_scheduler.h" +#include "i915_sw_fence.h" struct pid; @@ -30,7 +31,12 @@ struct intel_timeline; struct intel_ring; struct i915_gem_engines { - struct rcu_head rcu; + union { + struct list_head link; + struct rcu_head rcu; + }; + struct i915_sw_fence fence; + struct i915_gem_context *ctx; unsigned int num_engines; struct intel_context *engines[]; }; @@ -173,6 +179,11 @@ struct i915_gem_context { * context in messages. */ char name[TASK_COMM_LEN + 8]; + + struct { + spinlock_t lock; + struct list_head engines; + } stale; }; #endif /* __I915_GEM_CONTEXT_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index 372b57ca0efc..7db5a793739d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -48,7 +48,9 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme src = sg_next(src); } - if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { + if (!dma_map_sg_attrs(attachment->dev, + st->sgl, st->nents, dir, + DMA_ATTR_SKIP_CPU_SYNC)) { ret = -ENOMEM; goto err_free_sg; } @@ -71,7 +73,9 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, { struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); - dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); + dma_unmap_sg_attrs(attachment->dev, + sg->sgl, sg->nents, dir, + DMA_ATTR_SKIP_CPU_SYNC); sg_free_table(sg); kfree(sg); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index 0cc40e77bbd2..4f96c8788a2e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -368,7 +368,6 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_vma *vma; - GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); if (!atomic_read(&obj->bind_count)) return; @@ -400,12 +399,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) { - struct drm_i915_gem_object *obj = vma->obj; - - assert_object_held(obj); - /* Bump the LRU to try and avoid premature eviction whilst flipping */ - i915_gem_object_bump_inactive_ggtt(obj); + i915_gem_object_bump_inactive_ggtt(vma->obj); i915_vma_unpin(vma); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 7643a30ba4cd..b7440f06c5e2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -10,7 +10,6 @@ #include <linux/uaccess.h> #include <drm/drm_syncobj.h> -#include <drm/i915_drm.h> #include "display/intel_frontbuffer.h" @@ -28,6 +27,19 @@ #include "i915_sw_fence_work.h" #include "i915_trace.h" +struct eb_vma { + struct i915_vma *vma; + unsigned int flags; + + /** This vma's place in the execbuf reservation list */ + struct drm_i915_gem_exec_object2 *exec; + struct list_head bind_link; + struct list_head reloc_link; + + struct hlist_node node; + u32 handle; +}; + enum { FORCE_CPU_RELOC = 1, FORCE_GTT_RELOC, @@ -35,17 +47,15 @@ enum { #define DBG_FORCE_RELOC 0 /* choose one of the above! */ }; -#define __EXEC_OBJECT_HAS_REF BIT(31) -#define __EXEC_OBJECT_HAS_PIN BIT(30) -#define __EXEC_OBJECT_HAS_FENCE BIT(29) -#define __EXEC_OBJECT_NEEDS_MAP BIT(28) -#define __EXEC_OBJECT_NEEDS_BIAS BIT(27) -#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 27) /* all of the above */ +#define __EXEC_OBJECT_HAS_PIN BIT(31) +#define __EXEC_OBJECT_HAS_FENCE BIT(30) +#define __EXEC_OBJECT_NEEDS_MAP BIT(29) +#define __EXEC_OBJECT_NEEDS_BIAS BIT(28) +#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 28) /* all of the above */ #define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE) #define __EXEC_HAS_RELOC BIT(31) -#define __EXEC_VALIDATED BIT(30) -#define __EXEC_INTERNAL_FLAGS (~0u << 30) +#define __EXEC_INTERNAL_FLAGS (~0u << 31) #define UPDATE PIN_OFFSET_FIXED #define BATCH_OFFSET_BIAS (256*1024) @@ -220,15 +230,14 @@ struct i915_execbuffer { struct drm_file *file; /** per-file lookup tables and limits */ struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */ struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */ - struct i915_vma **vma; - unsigned int *flags; + struct eb_vma *vma; struct intel_engine_cs *engine; /** engine to queue the request to */ struct intel_context *context; /* logical state for the request */ struct i915_gem_context *gem_context; /** caller's context */ struct i915_request *request; /** our request to build */ - struct i915_vma *batch; /** identity of the batch obj/vma */ + struct eb_vma *batch; /** identity of the batch obj/vma */ struct i915_vma *trampoline; /** trampoline used for chaining */ /** actual size of execobj[] as we may extend it for the cmdparser */ @@ -276,8 +285,6 @@ struct i915_execbuffer { struct hlist_head *buckets; /** ht for relocation handles */ }; -#define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags]) - static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) { return intel_engine_requires_cmd_parser(eb->engine) || @@ -364,9 +371,9 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry, static inline bool eb_pin_vma(struct i915_execbuffer *eb, const struct drm_i915_gem_exec_object2 *entry, - struct i915_vma *vma) + struct eb_vma *ev) { - unsigned int exec_flags = *vma->exec_flags; + struct i915_vma *vma = ev->vma; u64 pin_flags; if (vma->node.size) @@ -375,24 +382,24 @@ eb_pin_vma(struct i915_execbuffer *eb, pin_flags = entry->offset & PIN_OFFSET_MASK; pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED; - if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_GTT)) + if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT)) pin_flags |= PIN_GLOBAL; if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags))) return false; - if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) { + if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) { if (unlikely(i915_vma_pin_fence(vma))) { i915_vma_unpin(vma); return false; } if (vma->fence) - exec_flags |= __EXEC_OBJECT_HAS_FENCE; + ev->flags |= __EXEC_OBJECT_HAS_FENCE; } - *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN; - return !eb_vma_misplaced(entry, vma, exec_flags); + ev->flags |= __EXEC_OBJECT_HAS_PIN; + return !eb_vma_misplaced(entry, vma, ev->flags); } static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags) @@ -406,13 +413,13 @@ static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags) } static inline void -eb_unreserve_vma(struct i915_vma *vma, unsigned int *flags) +eb_unreserve_vma(struct eb_vma *ev) { - if (!(*flags & __EXEC_OBJECT_HAS_PIN)) + if (!(ev->flags & __EXEC_OBJECT_HAS_PIN)) return; - __eb_unreserve_vma(vma, *flags); - *flags &= ~__EXEC_OBJECT_RESERVED; + __eb_unreserve_vma(ev->vma, ev->flags); + ev->flags &= ~__EXEC_OBJECT_RESERVED; } static int @@ -442,13 +449,6 @@ eb_validate_vma(struct i915_execbuffer *eb, } else { entry->pad_to_size = 0; } - - if (unlikely(vma->exec_flags)) { - DRM_DEBUG("Object [handle %d, index %d] appears more than once in object list\n", - entry->handle, (int)(entry - eb->exec)); - return -EINVAL; - } - /* * From drm_mm perspective address space is continuous, * so from this point we're always using non-canonical @@ -471,41 +471,29 @@ eb_validate_vma(struct i915_execbuffer *eb, return 0; } -static int +static void eb_add_vma(struct i915_execbuffer *eb, unsigned int i, unsigned batch_idx, struct i915_vma *vma) { struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; - int err; + struct eb_vma *ev = &eb->vma[i]; GEM_BUG_ON(i915_vma_is_closed(vma)); - if (!(eb->args->flags & __EXEC_VALIDATED)) { - err = eb_validate_vma(eb, entry, vma); - if (unlikely(err)) - return err; - } + ev->vma = i915_vma_get(vma); + ev->exec = entry; + ev->flags = entry->flags; if (eb->lut_size > 0) { - vma->exec_handle = entry->handle; - hlist_add_head(&vma->exec_node, + ev->handle = entry->handle; + hlist_add_head(&ev->node, &eb->buckets[hash_32(entry->handle, eb->lut_size)]); } if (entry->relocation_count) - list_add_tail(&vma->reloc_link, &eb->relocs); - - /* - * Stash a pointer from the vma to execobj, so we can query its flags, - * size, alignment etc as provided by the user. Also we stash a pointer - * to the vma inside the execobj so that we can use a direct lookup - * to find the right target VMA when doing relocations. - */ - eb->vma[i] = vma; - eb->flags[i] = entry->flags; - vma->exec_flags = &eb->flags[i]; + list_add_tail(&ev->reloc_link, &eb->relocs); /* * SNA is doing fancy tricks with compressing batch buffers, which leads @@ -518,30 +506,23 @@ eb_add_vma(struct i915_execbuffer *eb, */ if (i == batch_idx) { if (entry->relocation_count && - !(eb->flags[i] & EXEC_OBJECT_PINNED)) - eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS; + !(ev->flags & EXEC_OBJECT_PINNED)) + ev->flags |= __EXEC_OBJECT_NEEDS_BIAS; if (eb->reloc_cache.has_fence) - eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE; + ev->flags |= EXEC_OBJECT_NEEDS_FENCE; - eb->batch = vma; + eb->batch = ev; } - err = 0; - if (eb_pin_vma(eb, entry, vma)) { + if (eb_pin_vma(eb, entry, ev)) { if (entry->offset != vma->node.start) { entry->offset = vma->node.start | UPDATE; eb->args->flags |= __EXEC_HAS_RELOC; } } else { - eb_unreserve_vma(vma, vma->exec_flags); - - list_add_tail(&vma->exec_link, &eb->unbound); - if (drm_mm_node_allocated(&vma->node)) - err = i915_vma_unbind(vma); - if (unlikely(err)) - vma->exec_flags = NULL; + eb_unreserve_vma(ev); + list_add_tail(&ev->bind_link, &eb->unbound); } - return err; } static inline int use_cpu_reloc(const struct reloc_cache *cache, @@ -562,14 +543,14 @@ static inline int use_cpu_reloc(const struct reloc_cache *cache, } static int eb_reserve_vma(const struct i915_execbuffer *eb, - struct i915_vma *vma) + struct eb_vma *ev, + u64 pin_flags) { - struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma); - unsigned int exec_flags = *vma->exec_flags; - u64 pin_flags; + struct drm_i915_gem_exec_object2 *entry = ev->exec; + unsigned int exec_flags = ev->flags; + struct i915_vma *vma = ev->vma; int err; - pin_flags = PIN_USER | PIN_NONBLOCK; if (exec_flags & EXEC_OBJECT_NEEDS_GTT) pin_flags |= PIN_GLOBAL; @@ -583,11 +564,16 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb, if (exec_flags & __EXEC_OBJECT_NEEDS_MAP) pin_flags |= PIN_MAPPABLE; - if (exec_flags & EXEC_OBJECT_PINNED) { + if (exec_flags & EXEC_OBJECT_PINNED) pin_flags |= entry->offset | PIN_OFFSET_FIXED; - pin_flags &= ~PIN_NONBLOCK; /* force overlapping checks */ - } else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) { + else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; + + if (drm_mm_node_allocated(&vma->node) && + eb_vma_misplaced(entry, vma, ev->flags)) { + err = i915_vma_unbind(vma); + if (err) + return err; } err = i915_vma_pin(vma, @@ -612,8 +598,8 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb, exec_flags |= __EXEC_OBJECT_HAS_FENCE; } - *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN; - GEM_BUG_ON(eb_vma_misplaced(entry, vma, exec_flags)); + ev->flags = exec_flags | __EXEC_OBJECT_HAS_PIN; + GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags)); return 0; } @@ -621,10 +607,11 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb, static int eb_reserve(struct i915_execbuffer *eb) { const unsigned int count = eb->buffer_count; + unsigned int pin_flags = PIN_USER | PIN_NONBLOCK; struct list_head last; - struct i915_vma *vma; + struct eb_vma *ev; unsigned int i, pass; - int err; + int err = 0; /* * Attempt to pin all of the buffers into the GTT. @@ -640,44 +627,54 @@ static int eb_reserve(struct i915_execbuffer *eb) * room for the earlier objects *unless* we need to defragment. */ + if (mutex_lock_interruptible(&eb->i915->drm.struct_mutex)) + return -EINTR; + pass = 0; - err = 0; do { - list_for_each_entry(vma, &eb->unbound, exec_link) { - err = eb_reserve_vma(eb, vma); + list_for_each_entry(ev, &eb->unbound, bind_link) { + err = eb_reserve_vma(eb, ev, pin_flags); if (err) break; } - if (err != -ENOSPC) - return err; + if (!(err == -ENOSPC || err == -EAGAIN)) + break; /* Resort *all* the objects into priority order */ INIT_LIST_HEAD(&eb->unbound); INIT_LIST_HEAD(&last); for (i = 0; i < count; i++) { - unsigned int flags = eb->flags[i]; - struct i915_vma *vma = eb->vma[i]; + unsigned int flags; + ev = &eb->vma[i]; + flags = ev->flags; if (flags & EXEC_OBJECT_PINNED && flags & __EXEC_OBJECT_HAS_PIN) continue; - eb_unreserve_vma(vma, &eb->flags[i]); + eb_unreserve_vma(ev); if (flags & EXEC_OBJECT_PINNED) /* Pinned must have their slot */ - list_add(&vma->exec_link, &eb->unbound); + list_add(&ev->bind_link, &eb->unbound); else if (flags & __EXEC_OBJECT_NEEDS_MAP) /* Map require the lowest 256MiB (aperture) */ - list_add_tail(&vma->exec_link, &eb->unbound); + list_add_tail(&ev->bind_link, &eb->unbound); else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) /* Prioritise 4GiB region for restricted bo */ - list_add(&vma->exec_link, &last); + list_add(&ev->bind_link, &last); else - list_add_tail(&vma->exec_link, &last); + list_add_tail(&ev->bind_link, &last); } list_splice_tail(&last, &eb->unbound); + if (err == -EAGAIN) { + mutex_unlock(&eb->i915->drm.struct_mutex); + flush_workqueue(eb->i915->mm.userptr_wq); + mutex_lock(&eb->i915->drm.struct_mutex); + continue; + } + switch (pass++) { case 0: break; @@ -688,13 +685,20 @@ static int eb_reserve(struct i915_execbuffer *eb) err = i915_gem_evict_vm(eb->context->vm); mutex_unlock(&eb->context->vm->mutex); if (err) - return err; + goto unlock; break; default: - return -ENOSPC; + err = -ENOSPC; + goto unlock; } + + pin_flags = PIN_USER; } while (1); + +unlock: + mutex_unlock(&eb->i915->drm.struct_mutex); + return err; } static unsigned int eb_batch_index(const struct i915_execbuffer *eb) @@ -731,17 +735,14 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) unsigned int i, batch; int err; + if (unlikely(i915_gem_context_is_closed(eb->gem_context))) + return -ENOENT; + INIT_LIST_HEAD(&eb->relocs); INIT_LIST_HEAD(&eb->unbound); batch = eb_batch_index(eb); - mutex_lock(&eb->gem_context->mutex); - if (unlikely(i915_gem_context_is_closed(eb->gem_context))) { - err = -ENOENT; - goto err_ctx; - } - for (i = 0; i < eb->buffer_count; i++) { u32 handle = eb->exec[i].handle; struct i915_lut_handle *lut; @@ -786,45 +787,37 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) i915_gem_object_unlock(obj); add_vma: - err = eb_add_vma(eb, i, batch, vma); + err = eb_validate_vma(eb, &eb->exec[i], vma); if (unlikely(err)) goto err_vma; - GEM_BUG_ON(vma != eb->vma[i]); - GEM_BUG_ON(vma->exec_flags != &eb->flags[i]); - GEM_BUG_ON(drm_mm_node_allocated(&vma->node) && - eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i])); + eb_add_vma(eb, i, batch, vma); } - mutex_unlock(&eb->gem_context->mutex); - - eb->args->flags |= __EXEC_VALIDATED; - return eb_reserve(eb); + return 0; err_obj: i915_gem_object_put(obj); err_vma: - eb->vma[i] = NULL; -err_ctx: - mutex_unlock(&eb->gem_context->mutex); + eb->vma[i].vma = NULL; return err; } -static struct i915_vma * +static struct eb_vma * eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle) { if (eb->lut_size < 0) { if (handle >= -eb->lut_size) return NULL; - return eb->vma[handle]; + return &eb->vma[handle]; } else { struct hlist_head *head; - struct i915_vma *vma; + struct eb_vma *ev; head = &eb->buckets[hash_32(handle, eb->lut_size)]; - hlist_for_each_entry(vma, head, exec_node) { - if (vma->exec_handle == handle) - return vma; + hlist_for_each_entry(ev, head, node) { + if (ev->handle == handle) + return ev; } return NULL; } @@ -836,32 +829,21 @@ static void eb_release_vmas(const struct i915_execbuffer *eb) unsigned int i; for (i = 0; i < count; i++) { - struct i915_vma *vma = eb->vma[i]; - unsigned int flags = eb->flags[i]; + struct eb_vma *ev = &eb->vma[i]; + struct i915_vma *vma = ev->vma; if (!vma) break; - GEM_BUG_ON(vma->exec_flags != &eb->flags[i]); - vma->exec_flags = NULL; - eb->vma[i] = NULL; + eb->vma[i].vma = NULL; - if (flags & __EXEC_OBJECT_HAS_PIN) - __eb_unreserve_vma(vma, flags); + if (ev->flags & __EXEC_OBJECT_HAS_PIN) + __eb_unreserve_vma(vma, ev->flags); - if (flags & __EXEC_OBJECT_HAS_REF) - i915_vma_put(vma); + i915_vma_put(vma); } } -static void eb_reset_vmas(const struct i915_execbuffer *eb) -{ - eb_release_vmas(eb); - if (eb->lut_size > 0) - memset(eb->buckets, 0, - sizeof(struct hlist_head) << eb->lut_size); -} - static void eb_destroy(const struct i915_execbuffer *eb) { GEM_BUG_ON(eb->reloc_cache.rq); @@ -914,11 +896,13 @@ static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache) static void reloc_gpu_flush(struct reloc_cache *cache) { - GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32)); + struct drm_i915_gem_object *obj = cache->rq->batch->obj; + + GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32)); cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END; - __i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size); - i915_gem_object_unpin_map(cache->rq->batch->obj); + __i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1)); + i915_gem_object_unpin_map(obj); intel_gt_chipset_flush(cache->rq->engine->gt); @@ -1197,7 +1181,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, goto out_pool; skip_request: - i915_request_skip(rq, err); + i915_request_set_error_once(rq, err); err_request: i915_request_add(rq); err_unpin: @@ -1328,10 +1312,11 @@ out: static u64 eb_relocate_entry(struct i915_execbuffer *eb, - struct i915_vma *vma, + struct eb_vma *ev, const struct drm_i915_gem_relocation_entry *reloc) { - struct i915_vma *target; + struct drm_i915_private *i915 = eb->i915; + struct eb_vma *target; int err; /* we've already hold a reference to all valid objects */ @@ -1341,7 +1326,7 @@ eb_relocate_entry(struct i915_execbuffer *eb, /* Validate that the target is in a valid r/w GPU domain */ if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { - DRM_DEBUG("reloc with multiple write domains: " + drm_dbg(&i915->drm, "reloc with multiple write domains: " "target %d offset %d " "read %08x write %08x", reloc->target_handle, @@ -1352,7 +1337,7 @@ eb_relocate_entry(struct i915_execbuffer *eb, } if (unlikely((reloc->write_domain | reloc->read_domains) & ~I915_GEM_GPU_DOMAINS)) { - DRM_DEBUG("reloc with read/write non-GPU domains: " + drm_dbg(&i915->drm, "reloc with read/write non-GPU domains: " "target %d offset %d " "read %08x write %08x", reloc->target_handle, @@ -1363,7 +1348,7 @@ eb_relocate_entry(struct i915_execbuffer *eb, } if (reloc->write_domain) { - *target->exec_flags |= EXEC_OBJECT_WRITE; + target->flags |= EXEC_OBJECT_WRITE; /* * Sandybridge PPGTT errata: We need a global gtt mapping @@ -1373,7 +1358,8 @@ eb_relocate_entry(struct i915_execbuffer *eb, */ if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && IS_GEN(eb->i915, 6)) { - err = i915_vma_bind(target, target->obj->cache_level, + err = i915_vma_bind(target->vma, + target->vma->obj->cache_level, PIN_GLOBAL, NULL); if (WARN_ONCE(err, "Unexpected failure to bind target VMA!")) @@ -1386,21 +1372,21 @@ eb_relocate_entry(struct i915_execbuffer *eb, * more work needs to be done. */ if (!DBG_FORCE_RELOC && - gen8_canonical_addr(target->node.start) == reloc->presumed_offset) + gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset) return 0; /* Check that the relocation address is valid... */ if (unlikely(reloc->offset > - vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) { - DRM_DEBUG("Relocation beyond object bounds: " + ev->vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) { + drm_dbg(&i915->drm, "Relocation beyond object bounds: " "target %d offset %d size %d.\n", reloc->target_handle, (int)reloc->offset, - (int)vma->size); + (int)ev->vma->size); return -EINVAL; } if (unlikely(reloc->offset & 3)) { - DRM_DEBUG("Relocation not 4-byte aligned: " + drm_dbg(&i915->drm, "Relocation not 4-byte aligned: " "target %d offset %d.\n", reloc->target_handle, (int)reloc->offset); @@ -1415,18 +1401,18 @@ eb_relocate_entry(struct i915_execbuffer *eb, * do relocations we are already stalling, disable the user's opt * out of our synchronisation. */ - *vma->exec_flags &= ~EXEC_OBJECT_ASYNC; + ev->flags &= ~EXEC_OBJECT_ASYNC; /* and update the user's relocation entry */ - return relocate_entry(vma, reloc, eb, target); + return relocate_entry(ev->vma, reloc, eb, target->vma); } -static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma) +static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev) { #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) struct drm_i915_gem_relocation_entry stack[N_RELOC(512)]; struct drm_i915_gem_relocation_entry __user *urelocs; - const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma); + const struct drm_i915_gem_exec_object2 *entry = ev->exec; unsigned int remain; urelocs = u64_to_user_ptr(entry->relocs_ptr); @@ -1456,9 +1442,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma) * we would try to acquire the struct mutex again. Obviously * this is bad and so lockdep complains vehemently. */ - pagefault_disable(); - copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0])); - pagefault_enable(); + copied = __copy_from_user(r, urelocs, count * sizeof(r[0])); if (unlikely(copied)) { remain = -EFAULT; goto out; @@ -1466,7 +1450,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma) remain -= count; do { - u64 offset = eb_relocate_entry(eb, vma, r); + u64 offset = eb_relocate_entry(eb, ev, r); if (likely(offset == 0)) { } else if ((s64)offset < 0) { @@ -1495,10 +1479,8 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma) * can read from this userspace address. */ offset = gen8_canonical_addr(offset & ~UPDATE); - if (unlikely(__put_user(offset, &urelocs[r-stack].presumed_offset))) { - remain = -EFAULT; - goto out; - } + __put_user(offset, + &urelocs[r - stack].presumed_offset); } } while (r++, --count); urelocs += ARRAY_SIZE(stack); @@ -1508,281 +1490,34 @@ out: return remain; } -static int -eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma) -{ - const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma); - struct drm_i915_gem_relocation_entry *relocs = - u64_to_ptr(typeof(*relocs), entry->relocs_ptr); - unsigned int i; - int err; - - for (i = 0; i < entry->relocation_count; i++) { - u64 offset = eb_relocate_entry(eb, vma, &relocs[i]); - - if ((s64)offset < 0) { - err = (int)offset; - goto err; - } - } - err = 0; -err: - reloc_cache_reset(&eb->reloc_cache); - return err; -} - -static int check_relocations(const struct drm_i915_gem_exec_object2 *entry) -{ - const char __user *addr, *end; - unsigned long size; - char __maybe_unused c; - - size = entry->relocation_count; - if (size == 0) - return 0; - - if (size > N_RELOC(ULONG_MAX)) - return -EINVAL; - - addr = u64_to_user_ptr(entry->relocs_ptr); - size *= sizeof(struct drm_i915_gem_relocation_entry); - if (!access_ok(addr, size)) - return -EFAULT; - - end = addr + size; - for (; addr < end; addr += PAGE_SIZE) { - int err = __get_user(c, addr); - if (err) - return err; - } - return __get_user(c, end - 1); -} - -static int eb_copy_relocations(const struct i915_execbuffer *eb) +static int eb_relocate(struct i915_execbuffer *eb) { - struct drm_i915_gem_relocation_entry *relocs; - const unsigned int count = eb->buffer_count; - unsigned int i; int err; - for (i = 0; i < count; i++) { - const unsigned int nreloc = eb->exec[i].relocation_count; - struct drm_i915_gem_relocation_entry __user *urelocs; - unsigned long size; - unsigned long copied; - - if (nreloc == 0) - continue; - - err = check_relocations(&eb->exec[i]); - if (err) - goto err; - - urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr); - size = nreloc * sizeof(*relocs); - - relocs = kvmalloc_array(size, 1, GFP_KERNEL); - if (!relocs) { - err = -ENOMEM; - goto err; - } - - /* copy_from_user is limited to < 4GiB */ - copied = 0; - do { - unsigned int len = - min_t(u64, BIT_ULL(31), size - copied); - - if (__copy_from_user((char *)relocs + copied, - (char __user *)urelocs + copied, - len)) - goto end; - - copied += len; - } while (copied < size); - - /* - * As we do not update the known relocation offsets after - * relocating (due to the complexities in lock handling), - * we need to mark them as invalid now so that we force the - * relocation processing next time. Just in case the target - * object is evicted and then rebound into its old - * presumed_offset before the next execbuffer - if that - * happened we would make the mistake of assuming that the - * relocations were valid. - */ - if (!user_access_begin(urelocs, size)) - goto end; - - for (copied = 0; copied < nreloc; copied++) - unsafe_put_user(-1, - &urelocs[copied].presumed_offset, - end_user); - user_access_end(); - - eb->exec[i].relocs_ptr = (uintptr_t)relocs; - } - - return 0; - -end_user: - user_access_end(); -end: - kvfree(relocs); - err = -EFAULT; -err: - while (i--) { - relocs = u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr); - if (eb->exec[i].relocation_count) - kvfree(relocs); - } - return err; -} - -static int eb_prefault_relocations(const struct i915_execbuffer *eb) -{ - const unsigned int count = eb->buffer_count; - unsigned int i; - - if (unlikely(i915_modparams.prefault_disable)) - return 0; - - for (i = 0; i < count; i++) { - int err; - - err = check_relocations(&eb->exec[i]); - if (err) - return err; - } - - return 0; -} - -static noinline int eb_relocate_slow(struct i915_execbuffer *eb) -{ - struct drm_device *dev = &eb->i915->drm; - bool have_copy = false; - struct i915_vma *vma; - int err = 0; - -repeat: - if (signal_pending(current)) { - err = -ERESTARTSYS; - goto out; - } - - /* We may process another execbuffer during the unlock... */ - eb_reset_vmas(eb); - mutex_unlock(&dev->struct_mutex); - - /* - * We take 3 passes through the slowpatch. - * - * 1 - we try to just prefault all the user relocation entries and - * then attempt to reuse the atomic pagefault disabled fast path again. - * - * 2 - we copy the user entries to a local buffer here outside of the - * local and allow ourselves to wait upon any rendering before - * relocations - * - * 3 - we already have a local copy of the relocation entries, but - * were interrupted (EAGAIN) whilst waiting for the objects, try again. - */ - if (!err) { - err = eb_prefault_relocations(eb); - } else if (!have_copy) { - err = eb_copy_relocations(eb); - have_copy = err == 0; - } else { - cond_resched(); - err = 0; - } - if (err) { - mutex_lock(&dev->struct_mutex); - goto out; - } - - /* A frequent cause for EAGAIN are currently unavailable client pages */ - flush_workqueue(eb->i915->mm.userptr_wq); - - err = i915_mutex_lock_interruptible(dev); - if (err) { - mutex_lock(&dev->struct_mutex); - goto out; - } - - /* reacquire the objects */ + mutex_lock(&eb->gem_context->mutex); err = eb_lookup_vmas(eb); + mutex_unlock(&eb->gem_context->mutex); if (err) - goto err; - - GEM_BUG_ON(!eb->batch); - - list_for_each_entry(vma, &eb->relocs, reloc_link) { - if (!have_copy) { - pagefault_disable(); - err = eb_relocate_vma(eb, vma); - pagefault_enable(); - if (err) - goto repeat; - } else { - err = eb_relocate_vma_slow(eb, vma); - if (err) - goto err; - } - } - - /* - * Leave the user relocations as are, this is the painfully slow path, - * and we want to avoid the complication of dropping the lock whilst - * having buffers reserved in the aperture and so causing spurious - * ENOSPC for random operations. - */ - -err: - if (err == -EAGAIN) - goto repeat; - -out: - if (have_copy) { - const unsigned int count = eb->buffer_count; - unsigned int i; - - for (i = 0; i < count; i++) { - const struct drm_i915_gem_exec_object2 *entry = - &eb->exec[i]; - struct drm_i915_gem_relocation_entry *relocs; - - if (!entry->relocation_count) - continue; + return err; - relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr); - kvfree(relocs); - } + if (!list_empty(&eb->unbound)) { + err = eb_reserve(eb); + if (err) + return err; } - return err; -} - -static int eb_relocate(struct i915_execbuffer *eb) -{ - if (eb_lookup_vmas(eb)) - goto slow; - /* The objects are in their final locations, apply the relocations. */ if (eb->args->flags & __EXEC_HAS_RELOC) { - struct i915_vma *vma; + struct eb_vma *ev; - list_for_each_entry(vma, &eb->relocs, reloc_link) { - if (eb_relocate_vma(eb, vma)) - goto slow; + list_for_each_entry(ev, &eb->relocs, reloc_link) { + err = eb_relocate_vma(eb, ev); + if (err) + return err; } } return 0; - -slow: - return eb_relocate_slow(eb); } static int eb_move_to_gpu(struct i915_execbuffer *eb) @@ -1795,27 +1530,19 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) ww_acquire_init(&acquire, &reservation_ww_class); for (i = 0; i < count; i++) { - struct i915_vma *vma = eb->vma[i]; + struct eb_vma *ev = &eb->vma[i]; + struct i915_vma *vma = ev->vma; err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire); - if (!err) - continue; - - GEM_BUG_ON(err == -EALREADY); /* No duplicate vma */ - if (err == -EDEADLK) { GEM_BUG_ON(i == 0); do { int j = i - 1; - ww_mutex_unlock(&eb->vma[j]->resv->lock); + ww_mutex_unlock(&eb->vma[j].vma->resv->lock); - swap(eb->flags[i], eb->flags[j]); swap(eb->vma[i], eb->vma[j]); - eb->vma[i]->exec_flags = &eb->flags[i]; } while (--i); - GEM_BUG_ON(vma != eb->vma[0]); - vma->exec_flags = &eb->flags[0]; err = ww_mutex_lock_slow_interruptible(&vma->resv->lock, &acquire); @@ -1826,8 +1553,9 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) ww_acquire_done(&acquire); while (i--) { - unsigned int flags = eb->flags[i]; - struct i915_vma *vma = eb->vma[i]; + struct eb_vma *ev = &eb->vma[i]; + struct i915_vma *vma = ev->vma; + unsigned int flags = ev->flags; struct drm_i915_gem_object *obj = vma->obj; assert_vma_held(vma); @@ -1871,10 +1599,9 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) i915_vma_unlock(vma); __eb_unreserve_vma(vma, flags); - vma->exec_flags = NULL; + i915_vma_put(vma); - if (unlikely(flags & __EXEC_OBJECT_HAS_REF)) - i915_vma_put(vma); + ev->vma = NULL; } ww_acquire_fini(&acquire); @@ -1888,7 +1615,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) return 0; err_skip: - i915_request_skip(eb->request, err); + i915_request_set_error_once(eb->request, err); return err; } @@ -1922,7 +1649,7 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq) int i; if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) { - DRM_DEBUG("sol reset is gen7/rcs only\n"); + drm_dbg(&rq->i915->drm, "sol reset is gen7/rcs only\n"); return -EINVAL; } @@ -2009,7 +1736,7 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb, if (!pw) return -ENOMEM; - err = i915_active_acquire(&eb->batch->active); + err = i915_active_acquire(&eb->batch->vma->active); if (err) goto err_free; @@ -2026,7 +1753,7 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb, dma_fence_work_init(&pw->base, &eb_parse_ops); pw->engine = eb->engine; - pw->batch = eb->batch; + pw->batch = eb->batch->vma; pw->batch_offset = eb->batch_start_offset; pw->batch_length = eb->batch_len; pw->shadow = shadow; @@ -2068,7 +1795,7 @@ err_trampoline: err_shadow: i915_active_release(&shadow->active); err_batch: - i915_active_release(&eb->batch->active); + i915_active_release(&eb->batch->vma->active); err_free: kfree(pw); return err; @@ -2076,6 +1803,7 @@ err_free: static int eb_parse(struct i915_execbuffer *eb) { + struct drm_i915_private *i915 = eb->i915; struct intel_engine_pool_node *pool; struct i915_vma *shadow, *trampoline; unsigned int len; @@ -2091,7 +1819,8 @@ static int eb_parse(struct i915_execbuffer *eb) * post-scan tampering */ if (!eb->context->vm->has_read_only) { - DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n"); + drm_dbg(&i915->drm, + "Cannot prevent post-scan tampering without RO capable vm\n"); return -EINVAL; } } else { @@ -2129,15 +1858,12 @@ static int eb_parse(struct i915_execbuffer *eb) if (err) goto err_trampoline; - eb->vma[eb->buffer_count] = i915_vma_get(shadow); - eb->flags[eb->buffer_count] = - __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF; - shadow->exec_flags = &eb->flags[eb->buffer_count]; - eb->buffer_count++; + eb->vma[eb->buffer_count].vma = i915_vma_get(shadow); + eb->vma[eb->buffer_count].flags = __EXEC_OBJECT_HAS_PIN; + eb->batch = &eb->vma[eb->buffer_count++]; eb->trampoline = trampoline; eb->batch_start_offset = 0; - eb->batch = shadow; shadow->private = pool; return 0; @@ -2164,7 +1890,7 @@ add_to_client(struct i915_request *rq, struct drm_file *file) spin_unlock(&file_priv->mm.lock); } -static int eb_submit(struct i915_execbuffer *eb) +static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch) { int err; @@ -2191,7 +1917,7 @@ static int eb_submit(struct i915_execbuffer *eb) } err = eb->engine->emit_bb_start(eb->request, - eb->batch->node.start + + batch->node.start + eb->batch_start_offset, eb->batch_len, eb->batch_flags); @@ -2326,15 +2052,22 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce) intel_context_timeline_unlock(tl); if (rq) { - if (i915_request_wait(rq, - I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT) < 0) { - i915_request_put(rq); - err = -EINTR; - goto err_exit; - } + bool nonblock = eb->file->filp->f_flags & O_NONBLOCK; + long timeout; + timeout = MAX_SCHEDULE_TIMEOUT; + if (nonblock) + timeout = 0; + + timeout = i915_request_wait(rq, + I915_WAIT_INTERRUPTIBLE, + timeout); i915_request_put(rq); + + if (timeout < 0) { + err = nonblock ? -EWOULDBLOCK : timeout; + goto err_exit; + } } eb->engine = ce->engine; @@ -2372,8 +2105,9 @@ eb_select_legacy_ring(struct i915_execbuffer *eb, if (user_ring_id != I915_EXEC_BSD && (args->flags & I915_EXEC_BSD_MASK)) { - DRM_DEBUG("execbuf with non bsd ring but with invalid " - "bsd dispatch flags: %d\n", (int)(args->flags)); + drm_dbg(&i915->drm, + "execbuf with non bsd ring but with invalid " + "bsd dispatch flags: %d\n", (int)(args->flags)); return -1; } @@ -2387,8 +2121,9 @@ eb_select_legacy_ring(struct i915_execbuffer *eb, bsd_idx >>= I915_EXEC_BSD_SHIFT; bsd_idx--; } else { - DRM_DEBUG("execbuf with unknown bsd ring: %u\n", - bsd_idx); + drm_dbg(&i915->drm, + "execbuf with unknown bsd ring: %u\n", + bsd_idx); return -1; } @@ -2396,7 +2131,8 @@ eb_select_legacy_ring(struct i915_execbuffer *eb, } if (user_ring_id >= ARRAY_SIZE(user_ring_map)) { - DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id); + drm_dbg(&i915->drm, "execbuf with unknown ring: %u\n", + user_ring_id); return -1; } @@ -2556,6 +2292,73 @@ signal_fence_array(struct i915_execbuffer *eb, } } +static void retire_requests(struct intel_timeline *tl, struct i915_request *end) +{ + struct i915_request *rq, *rn; + + list_for_each_entry_safe(rq, rn, &tl->requests, link) + if (rq == end || !i915_request_retire(rq)) + break; +} + +static void eb_request_add(struct i915_execbuffer *eb) +{ + struct i915_request *rq = eb->request; + struct intel_timeline * const tl = i915_request_timeline(rq); + struct i915_sched_attr attr = {}; + struct i915_request *prev; + + lockdep_assert_held(&tl->mutex); + lockdep_unpin_lock(&tl->mutex, rq->cookie); + + trace_i915_request_add(rq); + + prev = __i915_request_commit(rq); + + /* Check that the context wasn't destroyed before submission */ + if (likely(!intel_context_is_closed(eb->context))) { + attr = eb->gem_context->sched; + + /* + * Boost actual workloads past semaphores! + * + * With semaphores we spin on one engine waiting for another, + * simply to reduce the latency of starting our work when + * the signaler completes. However, if there is any other + * work that we could be doing on this engine instead, that + * is better utilisation and will reduce the overall duration + * of the current work. To avoid PI boosting a semaphore + * far in the distance past over useful work, we keep a history + * of any semaphore use along our dependency chain. + */ + if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN)) + attr.priority |= I915_PRIORITY_NOSEMAPHORE; + + /* + * Boost priorities to new clients (new request flows). + * + * Allow interactive/synchronous clients to jump ahead of + * the bulk clients. (FQ_CODEL) + */ + if (list_empty(&rq->sched.signalers_list)) + attr.priority |= I915_PRIORITY_WAIT; + } else { + /* Serialise with context_close via the add_to_timeline */ + i915_request_set_error_once(rq, -ENOENT); + __i915_request_skip(rq); + } + + local_bh_disable(); + __i915_request_queue(rq, &attr); + local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ + + /* Try to clean up the client's timeline after submitting the request */ + if (prev) + retire_requests(tl, prev); + + mutex_unlock(&tl->mutex); +} + static int i915_gem_do_execbuffer(struct drm_device *dev, struct drm_file *file, @@ -2568,6 +2371,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, struct dma_fence *in_fence = NULL; struct dma_fence *exec_fence = NULL; struct sync_file *out_fence = NULL; + struct i915_vma *batch; int out_fence_fd = -1; int err; @@ -2582,9 +2386,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, args->flags |= __EXEC_HAS_RELOC; eb.exec = exec; - eb.vma = (struct i915_vma **)(exec + args->buffer_count + 1); - eb.vma[0] = NULL; - eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1); + eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1); + eb.vma[0].vma = NULL; eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS; reloc_cache_init(&eb.reloc_cache, eb.i915); @@ -2652,10 +2455,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, if (unlikely(err)) goto err_context; - err = i915_mutex_lock_interruptible(dev); - if (err) - goto err_engine; - err = eb_relocate(&eb); if (err) { /* @@ -2669,20 +2468,23 @@ i915_gem_do_execbuffer(struct drm_device *dev, goto err_vma; } - if (unlikely(*eb.batch->exec_flags & EXEC_OBJECT_WRITE)) { - DRM_DEBUG("Attempting to use self-modifying batch buffer\n"); + if (unlikely(eb.batch->flags & EXEC_OBJECT_WRITE)) { + drm_dbg(&i915->drm, + "Attempting to use self-modifying batch buffer\n"); err = -EINVAL; goto err_vma; } - if (eb.batch_start_offset > eb.batch->size || - eb.batch_len > eb.batch->size - eb.batch_start_offset) { - DRM_DEBUG("Attempting to use out-of-bounds batch\n"); + + if (range_overflows_t(u64, + eb.batch_start_offset, eb.batch_len, + eb.batch->vma->size)) { + drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n"); err = -EINVAL; goto err_vma; } if (eb.batch_len == 0) - eb.batch_len = eb.batch->size - eb.batch_start_offset; + eb.batch_len = eb.batch->vma->size - eb.batch_start_offset; err = eb_parse(&eb); if (err) @@ -2692,6 +2494,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure * batch" bit. Hence we need to pin secure batches into the global gtt. * hsw should have this fixed, but bdw mucks it up again. */ + batch = eb.batch->vma; if (eb.batch_flags & I915_DISPATCH_SECURE) { struct i915_vma *vma; @@ -2705,13 +2508,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, * fitting due to fragmentation. * So this is actually safe. */ - vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0); + vma = i915_gem_object_ggtt_pin(batch->obj, NULL, 0, 0, 0); if (IS_ERR(vma)) { err = PTR_ERR(vma); - goto err_vma; + goto err_parse; } - eb.batch = vma; + batch = vma; } /* All GPU relocation batches must be submitted prior to the user rq */ @@ -2758,16 +2561,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, * inactive_list and lose its active reference. Hence we do not need * to explicitly hold another reference here. */ - eb.request->batch = eb.batch; - if (eb.batch->private) - intel_engine_pool_mark_active(eb.batch->private, eb.request); + eb.request->batch = batch; + if (batch->private) + intel_engine_pool_mark_active(batch->private, eb.request); trace_i915_request_queue(eb.request, eb.batch_flags); - err = eb_submit(&eb); + err = eb_submit(&eb, batch); err_request: add_to_client(eb.request, file); i915_request_get(eb.request); - i915_request_add(eb.request); + eb_request_add(&eb); if (fences) signal_fence_array(&eb, fences); @@ -2786,16 +2589,15 @@ err_request: err_batch_unpin: if (eb.batch_flags & I915_DISPATCH_SECURE) - i915_vma_unpin(eb.batch); - if (eb.batch->private) - intel_engine_pool_put(eb.batch->private); + i915_vma_unpin(batch); +err_parse: + if (batch->private) + intel_engine_pool_put(batch->private); err_vma: if (eb.exec) eb_release_vmas(&eb); if (eb.trampoline) i915_vma_unpin(eb.trampoline); - mutex_unlock(&dev->struct_mutex); -err_engine: eb_unpin_engine(&eb); err_context: i915_gem_context_put(eb.gem_context); @@ -2813,9 +2615,7 @@ err_in_fence: static size_t eb_element_size(void) { - return (sizeof(struct drm_i915_gem_exec_object2) + - sizeof(struct i915_vma *) + - sizeof(unsigned int)); + return sizeof(struct drm_i915_gem_exec_object2) + sizeof(struct eb_vma); } static bool check_buffer_count(size_t count) @@ -2839,6 +2639,7 @@ int i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { + struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_execbuffer *args = data; struct drm_i915_gem_execbuffer2 exec2; struct drm_i915_gem_exec_object *exec_list = NULL; @@ -2848,7 +2649,7 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data, int err; if (!check_buffer_count(count)) { - DRM_DEBUG("execbuf2 with %zd buffers\n", count); + drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count); return -EINVAL; } @@ -2873,8 +2674,9 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data, exec2_list = kvmalloc_array(count + 1, eb_element_size(), __GFP_NOWARN | GFP_KERNEL); if (exec_list == NULL || exec2_list == NULL) { - DRM_DEBUG("Failed to allocate exec list for %d buffers\n", - args->buffer_count); + drm_dbg(&i915->drm, + "Failed to allocate exec list for %d buffers\n", + args->buffer_count); kvfree(exec_list); kvfree(exec2_list); return -ENOMEM; @@ -2883,8 +2685,8 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data, u64_to_user_ptr(args->buffers_ptr), sizeof(*exec_list) * count); if (err) { - DRM_DEBUG("copy %d exec entries failed %d\n", - args->buffer_count, err); + drm_dbg(&i915->drm, "copy %d exec entries failed %d\n", + args->buffer_count, err); kvfree(exec_list); kvfree(exec2_list); return -EFAULT; @@ -2931,6 +2733,7 @@ int i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { + struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_gem_execbuffer2 *args = data; struct drm_i915_gem_exec_object2 *exec2_list; struct drm_syncobj **fences = NULL; @@ -2938,7 +2741,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, int err; if (!check_buffer_count(count)) { - DRM_DEBUG("execbuf2 with %zd buffers\n", count); + drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count); return -EINVAL; } @@ -2950,14 +2753,14 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, exec2_list = kvmalloc_array(count + 1, eb_element_size(), __GFP_NOWARN | GFP_KERNEL); if (exec2_list == NULL) { - DRM_DEBUG("Failed to allocate exec list for %zd buffers\n", - count); + drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n", + count); return -ENOMEM; } if (copy_from_user(exec2_list, u64_to_user_ptr(args->buffers_ptr), sizeof(*exec2_list) * count)) { - DRM_DEBUG("copy %zd exec entries failed\n", count); + drm_dbg(&i915->drm, "copy %zd exec entries failed\n", count); kvfree(exec2_list); return -EFAULT; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c index 9cfb0e41ff06..cbbff81aa0af 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c @@ -8,8 +8,6 @@ #include <linux/slab.h> #include <linux/swiotlb.h> -#include <drm/i915_drm.h> - #include "i915_drv.h" #include "i915_gem.h" #include "i915_gem_object.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 0b6a442108de..b39c24dae64e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -613,8 +613,7 @@ __assign_mmap_offset(struct drm_file *file, if (!obj) return -ENOENT; - if (mmap_type == I915_MMAP_TYPE_GTT && - i915_gem_object_never_bind_ggtt(obj)) { + if (i915_gem_object_never_mmap(obj)) { err = -ENODEV; goto out; } @@ -776,7 +775,7 @@ static struct file *mmap_singleton(struct drm_i915_private *i915) struct file *file; rcu_read_lock(); - file = i915->gem.mmap_singleton; + file = READ_ONCE(i915->gem.mmap_singleton); if (file && !get_file_rcu(file)) file = NULL; rcu_read_unlock(); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 9c86f2dea947..2faa481cc18f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -11,8 +11,6 @@ #include <drm/drm_file.h> #include <drm/drm_device.h> -#include <drm/i915_drm.h> - #include "display/intel_frontbuffer.h" #include "i915_gem_object_types.h" #include "i915_gem_gtt.h" @@ -194,9 +192,9 @@ i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj) } static inline bool -i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object *obj) +i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj) { - return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_GGTT); + return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP); } static inline bool diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c index 70809d8897cd..e00792158f13 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c @@ -186,7 +186,7 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj, 0); out_request: if (unlikely(err)) - i915_request_skip(rq, err); + i915_request_set_error_once(rq, err); i915_request_add(rq); out_batch: @@ -196,6 +196,17 @@ out_unpin: return err; } +/* Wa_1209644611:icl,ehl */ +static bool wa_1209644611_applies(struct drm_i915_private *i915, u32 size) +{ + u32 height = size >> PAGE_SHIFT; + + if (!IS_GEN(i915, 11)) + return false; + + return height % 4 == 3 && height <= 8; +} + struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce, struct i915_vma *src, struct i915_vma *dst) @@ -237,7 +248,8 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce, size = min_t(u64, rem, block_size); GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX); - if (INTEL_GEN(i915) >= 9) { + if (INTEL_GEN(i915) >= 9 && + !wa_1209644611_applies(i915, size)) { *cmd++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2); *cmd++ = BLT_DEPTH_32 | PAGE_SIZE; *cmd++ = 0; @@ -385,7 +397,7 @@ out_unlock: drm_gem_unlock_reservations(objs, ARRAY_SIZE(objs), &acquire); out_request: if (unlikely(err)) - i915_request_skip(rq, err); + i915_request_set_error_once(rq, err); i915_request_add(rq); out_batch: diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index c2174da35bb0..a0b10bcd8d8a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -34,7 +34,7 @@ struct drm_i915_gem_object_ops { #define I915_GEM_OBJECT_HAS_IOMEM BIT(1) #define I915_GEM_OBJECT_IS_SHRINKABLE BIT(2) #define I915_GEM_OBJECT_IS_PROXY BIT(3) -#define I915_GEM_OBJECT_NO_GGTT BIT(4) +#define I915_GEM_OBJECT_NO_MMAP BIT(4) #define I915_GEM_OBJECT_ASYNC_CANCEL BIT(5) /* Interface between the GEM object and its backing storage. diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 54aca5c9101e..24f4cadea114 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -83,10 +83,12 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) { + struct drm_i915_private *i915 = to_i915(obj->base.dev); int err; if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) { - DRM_DEBUG("Attempting to obtain a purgeable object\n"); + drm_dbg(&i915->drm, + "Attempting to obtain a purgeable object\n"); return -EFAULT; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c index b07bb40edd5a..698e22420dc5 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c @@ -194,10 +194,11 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) /* Perma-pin (until release) the physical set of pages */ __i915_gem_object_pin_pages(obj); - if (!IS_ERR_OR_NULL(pages)) { + if (!IS_ERR_OR_NULL(pages)) i915_gem_shmem_ops.put_pages(obj, pages); - i915_gem_object_release_memory_region(obj); - } + + i915_gem_object_release_memory_region(obj); + mutex_unlock(&obj->mm.lock); return 0; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index c8264eb036bf..3d215164dd5a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -85,7 +85,8 @@ void i915_gem_suspend_late(struct drm_i915_private *i915) spin_unlock_irqrestore(&i915->mm.obj_lock, flags); i915_gem_object_lock(obj); - WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false)); + drm_WARN_ON(&i915->drm, + i915_gem_object_set_to_gtt_domain(obj, false)); i915_gem_object_unlock(obj); i915_gem_object_put(obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index a2a980d9d241..5d5d7eef3f43 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -148,7 +148,8 @@ rebuild_st: last_pfn = page_to_pfn(page); /* Check that the i965g/gm workaround works. */ - WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL)); + drm_WARN_ON(&i915->drm, + (gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL)); } if (sg) { /* loop terminated early; short sg table */ sg_page_sizes |= sg->length; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index 59b387ade49c..03e5eb4c99d1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -12,7 +12,6 @@ #include <linux/pci.h> #include <linux/dma-buf.h> #include <linux/vmalloc.h> -#include <drm/i915_drm.h> #include "i915_trace.h" @@ -401,19 +400,22 @@ void i915_gem_driver_register__shrinker(struct drm_i915_private *i915) i915->mm.shrinker.count_objects = i915_gem_shrinker_count; i915->mm.shrinker.seeks = DEFAULT_SEEKS; i915->mm.shrinker.batch = 4096; - WARN_ON(register_shrinker(&i915->mm.shrinker)); + drm_WARN_ON(&i915->drm, register_shrinker(&i915->mm.shrinker)); i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; - WARN_ON(register_oom_notifier(&i915->mm.oom_notifier)); + drm_WARN_ON(&i915->drm, register_oom_notifier(&i915->mm.oom_notifier)); i915->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap; - WARN_ON(register_vmap_purge_notifier(&i915->mm.vmap_notifier)); + drm_WARN_ON(&i915->drm, + register_vmap_purge_notifier(&i915->mm.vmap_notifier)); } void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915) { - WARN_ON(unregister_vmap_purge_notifier(&i915->mm.vmap_notifier)); - WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier)); + drm_WARN_ON(&i915->drm, + unregister_vmap_purge_notifier(&i915->mm.vmap_notifier)); + drm_WARN_ON(&i915->drm, + unregister_oom_notifier(&i915->mm.oom_notifier)); unregister_shrinker(&i915->mm.shrinker); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 451f3078d60d..5557dfa83a7b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -13,6 +13,7 @@ #include "gem/i915_gem_region.h" #include "i915_drv.h" #include "i915_gem_stolen.h" +#include "i915_vgpu.h" /* * The BIOS typically reserves some of the system's memory for the exclusive @@ -110,8 +111,11 @@ static int i915_adjust_stolen(struct drm_i915_private *i915, if (stolen[0].start != stolen[1].start || stolen[0].end != stolen[1].end) { - DRM_DEBUG_DRIVER("GTT within stolen memory at %pR\n", &ggtt_res); - DRM_DEBUG_DRIVER("Stolen memory adjusted to %pR\n", dsm); + drm_dbg(&i915->drm, + "GTT within stolen memory at %pR\n", + &ggtt_res); + drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n", + dsm); } } @@ -142,8 +146,9 @@ static int i915_adjust_stolen(struct drm_i915_private *i915, * range. Apparently this works. */ if (!r && !IS_GEN(i915, 3)) { - DRM_ERROR("conflict detected with stolen region: %pR\n", - dsm); + drm_err(&i915->drm, + "conflict detected with stolen region: %pR\n", + dsm); return -EBUSY; } @@ -171,8 +176,8 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *i915, ELK_STOLEN_RESERVED); resource_size_t stolen_top = i915->dsm.end + 1; - DRM_DEBUG_DRIVER("%s_STOLEN_RESERVED = %08x\n", - IS_GM45(i915) ? "CTG" : "ELK", reg_val); + drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n", + IS_GM45(i915) ? "CTG" : "ELK", reg_val); if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0) return; @@ -181,14 +186,16 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *i915, * Whether ILK really reuses the ELK register for this is unclear. * Let's see if we catch anyone with this supposedly enabled on ILK. */ - WARN(IS_GEN(i915, 5), "ILK stolen reserved found? 0x%08x\n", - reg_val); + drm_WARN(&i915->drm, IS_GEN(i915, 5), + "ILK stolen reserved found? 0x%08x\n", + reg_val); if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK)) return; *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; - WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base); + drm_WARN_ON(&i915->drm, + (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base); *size = stolen_top - *base; } @@ -200,7 +207,7 @@ static void gen6_get_stolen_reserved(struct drm_i915_private *i915, { u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); - DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); + drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) return; @@ -234,7 +241,7 @@ static void vlv_get_stolen_reserved(struct drm_i915_private *i915, u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); resource_size_t stolen_top = i915->dsm.end + 1; - DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); + drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) return; @@ -262,7 +269,7 @@ static void gen7_get_stolen_reserved(struct drm_i915_private *i915, { u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); - DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); + drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) return; @@ -289,7 +296,7 @@ static void chv_get_stolen_reserved(struct drm_i915_private *i915, { u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); - DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); + drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) return; @@ -323,7 +330,7 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *i915, u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED); resource_size_t stolen_top = i915->dsm.end + 1; - DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val); + drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val); if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE)) return; @@ -342,7 +349,7 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915, { u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED); - DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val); + drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val); *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK; @@ -453,8 +460,9 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915) * it likely means we failed to read the registers correctly. */ if (!reserved_base) { - DRM_ERROR("inconsistent reservation %pa + %pa; ignoring\n", - &reserved_base, &reserved_size); + drm_err(&i915->drm, + "inconsistent reservation %pa + %pa; ignoring\n", + &reserved_base, &reserved_size); reserved_base = stolen_top; reserved_size = 0; } @@ -463,8 +471,9 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915) (struct resource)DEFINE_RES_MEM(reserved_base, reserved_size); if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) { - DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n", - &i915->dsm_reserved, &i915->dsm); + drm_err(&i915->drm, + "Stolen reserved area %pR outside stolen memory %pR\n", + &i915->dsm_reserved, &i915->dsm); return 0; } @@ -472,9 +481,10 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915) * memory, so just consider the start. */ reserved_total = stolen_top - reserved_base; - DRM_DEBUG_DRIVER("Memory reserved for graphics device: %lluK, usable: %lluK\n", - (u64)resource_size(&i915->dsm) >> 10, - ((u64)resource_size(&i915->dsm) - reserved_total) >> 10); + drm_dbg(&i915->drm, + "Memory reserved for graphics device: %lluK, usable: %lluK\n", + (u64)resource_size(&i915->dsm) >> 10, + ((u64)resource_size(&i915->dsm) - reserved_total) >> 10); i915->stolen_usable_size = resource_size(&i915->dsm) - reserved_total; @@ -677,26 +687,24 @@ struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915) struct drm_i915_gem_object * i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915, resource_size_t stolen_offset, - resource_size_t gtt_offset, resource_size_t size) { struct intel_memory_region *mem = i915->mm.regions[INTEL_REGION_STOLEN]; - struct i915_ggtt *ggtt = &i915->ggtt; struct drm_i915_gem_object *obj; struct drm_mm_node *stolen; - struct i915_vma *vma; int ret; if (!drm_mm_initialized(&i915->mm.stolen)) return ERR_PTR(-ENODEV); - DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n", - &stolen_offset, >t_offset, &size); + drm_dbg(&i915->drm, + "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n", + &stolen_offset, &size); /* KISS and expect everything to be page-aligned */ - if (WARN_ON(size == 0) || - WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) || - WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT))) + if (GEM_WARN_ON(size == 0) || + GEM_WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) || + GEM_WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT))) return ERR_PTR(-EINVAL); stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); @@ -709,68 +717,20 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915, ret = drm_mm_reserve_node(&i915->mm.stolen, stolen); mutex_unlock(&i915->mm.stolen_lock); if (ret) { - DRM_DEBUG_DRIVER("failed to allocate stolen space\n"); - kfree(stolen); - return ERR_PTR(ret); + obj = ERR_PTR(ret); + goto err_free; } obj = __i915_gem_object_create_stolen(mem, stolen); - if (IS_ERR(obj)) { - DRM_DEBUG_DRIVER("failed to allocate stolen object\n"); - i915_gem_stolen_remove_node(i915, stolen); - kfree(stolen); - return obj; - } - - /* Some objects just need physical mem from stolen space */ - if (gtt_offset == I915_GTT_OFFSET_NONE) - return obj; - - ret = i915_gem_object_pin_pages(obj); - if (ret) - goto err; - - vma = i915_vma_instance(obj, &ggtt->vm, NULL); - if (IS_ERR(vma)) { - ret = PTR_ERR(vma); - goto err_pages; - } - - /* To simplify the initialisation sequence between KMS and GTT, - * we allow construction of the stolen object prior to - * setting up the GTT space. The actual reservation will occur - * later. - */ - mutex_lock(&ggtt->vm.mutex); - ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, - size, gtt_offset, obj->cache_level, - 0); - if (ret) { - DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n"); - mutex_unlock(&ggtt->vm.mutex); - goto err_pages; - } - - GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); - - GEM_BUG_ON(vma->pages); - vma->pages = obj->mm.pages; - atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE); - - set_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma)); - __i915_vma_set_map_and_fenceable(vma); - - list_add_tail(&vma->vm_link, &ggtt->vm.bound_list); - mutex_unlock(&ggtt->vm.mutex); - - GEM_BUG_ON(i915_gem_object_is_shrinkable(obj)); - atomic_inc(&obj->bind_count); + if (IS_ERR(obj)) + goto err_stolen; + i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE); return obj; -err_pages: - i915_gem_object_unpin_pages(obj); -err: - i915_gem_object_put(obj); - return ERR_PTR(ret); +err_stolen: + i915_gem_stolen_remove_node(i915, stolen); +err_free: + kfree(stolen); + return obj; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h index c1040627fbf3..e15c0adad8af 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h @@ -28,7 +28,6 @@ i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, struct drm_i915_gem_object * i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv, resource_size_t stolen_offset, - resource_size_t gtt_offset, resource_size_t size); #endif /* __I915_GEM_STOLEN_H__ */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c index 6c7825a2dc2a..0158e49bf9bb 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c @@ -6,7 +6,6 @@ #include <linux/string.h> #include <linux/bitops.h> -#include <drm/i915_drm.h> #include "i915_drv.h" #include "i915_gem.h" @@ -183,21 +182,35 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, int tiling_mode, unsigned int stride) { struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt; - struct i915_vma *vma; + struct i915_vma *vma, *vn; + LIST_HEAD(unbind); int ret = 0; if (tiling_mode == I915_TILING_NONE) return 0; mutex_lock(&ggtt->vm.mutex); + + spin_lock(&obj->vma.lock); for_each_ggtt_vma(vma, obj) { + GEM_BUG_ON(vma->vm != &ggtt->vm); + if (i915_vma_fence_prepare(vma, tiling_mode, stride)) continue; + list_move(&vma->vm_link, &unbind); + } + spin_unlock(&obj->vma.lock); + + list_for_each_entry_safe(vma, vn, &unbind, vm_link) { ret = __i915_vma_unbind(vma); - if (ret) + if (ret) { + /* Restore the remaining vma on an error */ + list_splice(&unbind, &ggtt->vm.bound_list); break; + } } + mutex_unlock(&ggtt->vm.mutex); return ret; @@ -269,6 +282,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, } mutex_unlock(&obj->mm.lock); + spin_lock(&obj->vma.lock); for_each_ggtt_vma(vma, obj) { vma->fence_size = i915_gem_fence_size(i915, vma->size, tiling, stride); @@ -279,6 +293,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, if (vma->fence) vma->fence->dirty = true; } + spin_unlock(&obj->vma.lock); obj->tiling_and_stride = tiling | stride; i915_gem_object_unlock(obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 580319b7bf1a..7ffd7afeb7a5 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -10,8 +10,6 @@ #include <linux/swap.h> #include <linux/sched/mm.h> -#include <drm/i915_drm.h> - #include "i915_drv.h" #include "i915_gem_ioctls.h" #include "i915_gem_object.h" @@ -704,7 +702,7 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_IS_SHRINKABLE | - I915_GEM_OBJECT_NO_GGTT | + I915_GEM_OBJECT_NO_MMAP | I915_GEM_OBJECT_ASYNC_CANCEL, .get_pages = i915_gem_userptr_get_pages, .put_pages = i915_gem_userptr_put_pages, @@ -770,6 +768,23 @@ i915_gem_userptr_ioctl(struct drm_device *dev, I915_USERPTR_UNSYNCHRONIZED)) return -EINVAL; + /* + * XXX: There is a prevalence of the assumption that we fit the + * object's page count inside a 32bit _signed_ variable. Let's document + * this and catch if we ever need to fix it. In the meantime, if you do + * spot such a local variable, please consider fixing! + * + * Aside from our own locals (for which we have no excuse!): + * - sg_table embeds unsigned int for num_pages + * - get_user_pages*() mixed ints with longs + */ + + if (args->user_size >> PAGE_SHIFT > INT_MAX) + return -E2BIG; + + if (overflows_type(args->user_size, obj->base.size)) + return -E2BIG; + if (!args->user_size) return -EINVAL; diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 9311250d7d6f..d4f94ca9ae0d 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -1208,107 +1208,6 @@ static int igt_write_huge(struct i915_gem_context *ctx, return err; } -static int igt_ppgtt_exhaust_huge(void *arg) -{ - struct i915_gem_context *ctx = arg; - struct drm_i915_private *i915 = ctx->i915; - unsigned long supported = INTEL_INFO(i915)->page_sizes; - static unsigned int pages[ARRAY_SIZE(page_sizes)]; - struct drm_i915_gem_object *obj; - unsigned int size_mask; - unsigned int page_mask; - int n, i; - int err = -ENODEV; - - if (supported == I915_GTT_PAGE_SIZE_4K) - return 0; - - /* - * Sanity check creating objects with a varying mix of page sizes -- - * ensuring that our writes lands in the right place. - */ - - n = 0; - for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) - pages[n++] = BIT(i); - - for (size_mask = 2; size_mask < BIT(n); size_mask++) { - unsigned int size = 0; - - for (i = 0; i < n; i++) { - if (size_mask & BIT(i)) - size |= pages[i]; - } - - /* - * For our page mask we want to enumerate all the page-size - * combinations which will fit into our chosen object size. - */ - for (page_mask = 2; page_mask <= size_mask; page_mask++) { - unsigned int page_sizes = 0; - - for (i = 0; i < n; i++) { - if (page_mask & BIT(i)) - page_sizes |= pages[i]; - } - - /* - * Ensure that we can actually fill the given object - * with our chosen page mask. - */ - if (!IS_ALIGNED(size, BIT(__ffs(page_sizes)))) - continue; - - obj = huge_pages_object(i915, size, page_sizes); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - goto out_device; - } - - err = i915_gem_object_pin_pages(obj); - if (err) { - i915_gem_object_put(obj); - - if (err == -ENOMEM) { - pr_info("unable to get pages, size=%u, pages=%u\n", - size, page_sizes); - err = 0; - break; - } - - pr_err("pin_pages failed, size=%u, pages=%u\n", - size_mask, page_mask); - - goto out_device; - } - - /* Force the page-size for the gtt insertion */ - obj->mm.page_sizes.sg = page_sizes; - - err = igt_write_huge(ctx, obj); - if (err) { - pr_err("exhaust write-huge failed with size=%u\n", - size); - goto out_unpin; - } - - i915_gem_object_unpin_pages(obj); - __i915_gem_object_put_pages(obj); - i915_gem_object_put(obj); - } - } - - goto out_device; - -out_unpin: - i915_gem_object_unpin_pages(obj); - i915_gem_object_put(obj); -out_device: - mkwrite_device_info(i915)->page_sizes = supported; - - return err; -} - typedef struct drm_i915_gem_object * (*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags); @@ -1578,8 +1477,10 @@ static int igt_ppgtt_pin_update(void *arg) unsigned int page_size = BIT(first); obj = i915_gem_object_create_internal(dev_priv, page_size); - if (IS_ERR(obj)) - return PTR_ERR(obj); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto out_vm; + } vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { @@ -1632,8 +1533,10 @@ static int igt_ppgtt_pin_update(void *arg) } obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE); - if (IS_ERR(obj)) - return PTR_ERR(obj); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto out_vm; + } vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { @@ -1900,7 +1803,6 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_shrink_thp), SUBTEST(igt_ppgtt_pin_update), SUBTEST(igt_tmpfs_fallback), - SUBTEST(igt_ppgtt_exhaust_huge), SUBTEST(igt_ppgtt_smoke_huge), SUBTEST(igt_ppgtt_sanity_check), }; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 7fc46861a54d..54b86cf7f5d2 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -1004,7 +1004,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj, return 0; skip_request: - i915_request_skip(rq, err); + i915_request_set_error_once(rq, err); err_request: i915_request_add(rq); err_batch: @@ -1465,9 +1465,12 @@ out_file: static int check_scratch(struct i915_address_space *vm, u64 offset) { - struct drm_mm_node *node = - __drm_mm_interval_first(&vm->mm, - offset, offset + sizeof(u32) - 1); + struct drm_mm_node *node; + + mutex_lock(&vm->mutex); + node = __drm_mm_interval_first(&vm->mm, + offset, offset + sizeof(u32) - 1); + mutex_unlock(&vm->mutex); if (!node || node->start > offset) return 0; @@ -1492,6 +1495,10 @@ static int write_to_scratch(struct i915_gem_context *ctx, GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE); + err = check_scratch(ctx_vm(ctx), offset); + if (err) + return err; + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -1528,10 +1535,6 @@ static int write_to_scratch(struct i915_gem_context *ctx, if (err) goto out_vm; - err = check_scratch(vm, offset); - if (err) - goto err_unpin; - rq = igt_request_alloc(ctx, engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); @@ -1556,7 +1559,7 @@ static int write_to_scratch(struct i915_gem_context *ctx, goto out_vm; skip_request: - i915_request_skip(rq, err); + i915_request_set_error_once(rq, err); err_request: i915_request_add(rq); err_unpin: @@ -1575,64 +1578,95 @@ static int read_from_scratch(struct i915_gem_context *ctx, struct drm_i915_private *i915 = ctx->i915; struct drm_i915_gem_object *obj; struct i915_address_space *vm; - const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */ const u32 result = 0x100; struct i915_request *rq; struct i915_vma *vma; + unsigned int flags; u32 *cmd; int err; GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE); + err = check_scratch(ctx_vm(ctx), offset); + if (err) + return err; + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); - cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); - if (IS_ERR(cmd)) { - err = PTR_ERR(cmd); - goto out; - } - - memset(cmd, POISON_INUSE, PAGE_SIZE); if (INTEL_GEN(i915) >= 8) { + const u32 GPR0 = engine->mmio_base + 0x600; + + vm = i915_gem_context_get_vm_rcu(ctx); + vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto out_vm; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED); + if (err) + goto out_vm; + + cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(cmd)) { + err = PTR_ERR(cmd); + goto out; + } + + memset(cmd, POISON_INUSE, PAGE_SIZE); *cmd++ = MI_LOAD_REGISTER_MEM_GEN8; - *cmd++ = RCS_GPR0; + *cmd++ = GPR0; *cmd++ = lower_32_bits(offset); *cmd++ = upper_32_bits(offset); *cmd++ = MI_STORE_REGISTER_MEM_GEN8; - *cmd++ = RCS_GPR0; + *cmd++ = GPR0; *cmd++ = result; *cmd++ = 0; + *cmd = MI_BATCH_BUFFER_END; + + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); + + flags = 0; } else { + const u32 reg = engine->mmio_base + 0x420; + + /* hsw: register access even to 3DPRIM! is protected */ + vm = i915_vm_get(&engine->gt->ggtt->vm); + vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto out_vm; + } + + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); + if (err) + goto out_vm; + + cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(cmd)) { + err = PTR_ERR(cmd); + goto out; + } + + memset(cmd, POISON_INUSE, PAGE_SIZE); *cmd++ = MI_LOAD_REGISTER_MEM; - *cmd++ = RCS_GPR0; + *cmd++ = reg; *cmd++ = offset; - *cmd++ = MI_STORE_REGISTER_MEM; - *cmd++ = RCS_GPR0; - *cmd++ = result; - } - *cmd = MI_BATCH_BUFFER_END; + *cmd++ = MI_STORE_REGISTER_MEM | MI_USE_GGTT; + *cmd++ = reg; + *cmd++ = vma->node.start + result; + *cmd = MI_BATCH_BUFFER_END; - i915_gem_object_flush_map(obj); - i915_gem_object_unpin_map(obj); + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); - intel_gt_chipset_flush(engine->gt); - - vm = i915_gem_context_get_vm_rcu(ctx); - vma = i915_vma_instance(obj, vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto out_vm; + flags = I915_DISPATCH_SECURE; } - err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED); - if (err) - goto out_vm; - - err = check_scratch(vm, offset); - if (err) - goto err_unpin; + intel_gt_chipset_flush(engine->gt); rq = igt_request_alloc(ctx, engine); if (IS_ERR(rq)) { @@ -1640,7 +1674,7 @@ static int read_from_scratch(struct i915_gem_context *ctx, goto err_unpin; } - err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0); + err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, flags); if (err) goto err_request; @@ -1674,7 +1708,7 @@ static int read_from_scratch(struct i915_gem_context *ctx, goto out_vm; skip_request: - i915_request_skip(rq, err); + i915_request_set_error_once(rq, err); err_request: i915_request_add(rq); err_unpin: @@ -1686,6 +1720,39 @@ out: return err; } +static int check_scratch_page(struct i915_gem_context *ctx, u32 *out) +{ + struct i915_address_space *vm; + struct page *page; + u32 *vaddr; + int err = 0; + + vm = ctx_vm(ctx); + if (!vm) + return -ENODEV; + + page = vm->scratch[0].base.page; + if (!page) { + pr_err("No scratch page!\n"); + return -EINVAL; + } + + vaddr = kmap(page); + if (!vaddr) { + pr_err("No (mappable) scratch page!\n"); + return -EINVAL; + } + + memcpy(out, vaddr, sizeof(*out)); + if (memchr_inv(vaddr, *out, PAGE_SIZE)) { + pr_err("Inconsistent initial state of scratch page!\n"); + err = -EINVAL; + } + kunmap(page); + + return err; +} + static int igt_vm_isolation(void *arg) { struct drm_i915_private *i915 = arg; @@ -1696,6 +1763,7 @@ static int igt_vm_isolation(void *arg) I915_RND_STATE(prng); struct file *file; u64 vm_total; + u32 expected; int err; if (INTEL_GEN(i915) < 7) @@ -1730,9 +1798,17 @@ static int igt_vm_isolation(void *arg) if (ctx_vm(ctx_a) == ctx_vm(ctx_b)) goto out_file; + /* Read the initial state of the scratch page */ + err = check_scratch_page(ctx_a, &expected); + if (err) + goto out_file; + + err = check_scratch_page(ctx_b, &expected); + if (err) + goto out_file; + vm_total = ctx_vm(ctx_a)->total; GEM_BUG_ON(ctx_vm(ctx_b)->total != vm_total); - vm_total -= I915_GTT_PAGE_SIZE; count = 0; num_engines = 0; @@ -1743,14 +1819,18 @@ static int igt_vm_isolation(void *arg) if (!intel_engine_can_store_dword(engine)) continue; + /* Not all engines have their own GPR! */ + if (INTEL_GEN(i915) < 8 && engine->class != RENDER_CLASS) + continue; + while (!__igt_timeout(end_time, NULL)) { u32 value = 0xc5c5c5c5; u64 offset; - div64_u64_rem(i915_prandom_u64_state(&prng), - vm_total, &offset); - offset = round_down(offset, alignof_dword); - offset += I915_GTT_PAGE_SIZE; + /* Leave enough space at offset 0 for the batch */ + offset = igt_random_offset(&prng, + I915_GTT_PAGE_SIZE, vm_total, + sizeof(u32), alignof_dword); err = write_to_scratch(ctx_a, engine, offset, 0xdeadbeef); @@ -1760,7 +1840,7 @@ static int igt_vm_isolation(void *arg) if (err) goto out_file; - if (value) { + if (value != expected) { pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n", engine->name, value, upper_32_bits(offset), diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c index 62077fe46715..31549ad83fa6 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c @@ -210,6 +210,7 @@ static int igt_fill_blt_thread(void *arg) struct intel_context *ce; unsigned int prio; IGT_TIMEOUT(end); + u64 total, max; int err; ctx = thread->ctx; @@ -225,27 +226,32 @@ static int igt_fill_blt_thread(void *arg) ce = i915_gem_context_get_engine(ctx, BCS0); GEM_BUG_ON(IS_ERR(ce)); + /* + * If we have a tiny shared address space, like for the GGTT + * then we can't be too greedy. + */ + max = ce->vm->total; + if (i915_is_ggtt(ce->vm) || thread->ctx) + max = div_u64(max, thread->n_cpus); + max >>= 4; + + total = PAGE_SIZE; do { - const u32 max_block_size = S16_MAX * PAGE_SIZE; + /* Aim to keep the runtime under reasonable bounds! */ + const u32 max_phys_size = SZ_64K; u32 val = prandom_u32_state(prng); - u64 total = ce->vm->total; u32 phys_sz; u32 sz; u32 *vaddr; u32 i; - /* - * If we have a tiny shared address space, like for the GGTT - * then we can't be too greedy. - */ - if (i915_is_ggtt(ce->vm)) - total = div64_u64(total, thread->n_cpus); - - sz = min_t(u64, total >> 4, prandom_u32_state(prng)); - phys_sz = sz % (max_block_size + 1); + total = min(total, max); + sz = i915_prandom_u32_max_state(total, prng) + 1; + phys_sz = sz % max_phys_size + 1; sz = round_up(sz, PAGE_SIZE); phys_sz = round_up(phys_sz, PAGE_SIZE); + phys_sz = min(phys_sz, sz); pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__, phys_sz, sz, val); @@ -276,13 +282,14 @@ static int igt_fill_blt_thread(void *arg) if (err) goto err_unpin; - i915_gem_object_lock(obj); - err = i915_gem_object_set_to_cpu_domain(obj, false); - i915_gem_object_unlock(obj); + err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT); if (err) goto err_unpin; - for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); ++i) { + for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); i += 17) { + if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) + drm_clflush_virt_range(&vaddr[i], sizeof(vaddr[i])); + if (vaddr[i] != val) { pr_err("vaddr[%u]=%x, expected=%x\n", i, vaddr[i], val); @@ -293,6 +300,8 @@ static int igt_fill_blt_thread(void *arg) i915_gem_object_unpin_map(obj); i915_gem_object_put(obj); + + total <<= 1; } while (!time_after(jiffies, end)); goto err_flush; @@ -319,6 +328,7 @@ static int igt_copy_blt_thread(void *arg) struct intel_context *ce; unsigned int prio; IGT_TIMEOUT(end); + u64 total, max; int err; ctx = thread->ctx; @@ -334,23 +344,32 @@ static int igt_copy_blt_thread(void *arg) ce = i915_gem_context_get_engine(ctx, BCS0); GEM_BUG_ON(IS_ERR(ce)); + /* + * If we have a tiny shared address space, like for the GGTT + * then we can't be too greedy. + */ + max = ce->vm->total; + if (i915_is_ggtt(ce->vm) || thread->ctx) + max = div_u64(max, thread->n_cpus); + max >>= 4; + + total = PAGE_SIZE; do { - const u32 max_block_size = S16_MAX * PAGE_SIZE; + /* Aim to keep the runtime under reasonable bounds! */ + const u32 max_phys_size = SZ_64K; u32 val = prandom_u32_state(prng); - u64 total = ce->vm->total; u32 phys_sz; u32 sz; u32 *vaddr; u32 i; - if (i915_is_ggtt(ce->vm)) - total = div64_u64(total, thread->n_cpus); - - sz = min_t(u64, total >> 4, prandom_u32_state(prng)); - phys_sz = sz % (max_block_size + 1); + total = min(total, max); + sz = i915_prandom_u32_max_state(total, prng) + 1; + phys_sz = sz % max_phys_size + 1; sz = round_up(sz, PAGE_SIZE); phys_sz = round_up(phys_sz, PAGE_SIZE); + phys_sz = min(phys_sz, sz); pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__, phys_sz, sz, val); @@ -397,13 +416,14 @@ static int igt_copy_blt_thread(void *arg) if (err) goto err_unpin; - i915_gem_object_lock(dst); - err = i915_gem_object_set_to_cpu_domain(dst, false); - i915_gem_object_unlock(dst); + err = i915_gem_object_wait(dst, 0, MAX_SCHEDULE_TIMEOUT); if (err) goto err_unpin; - for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); ++i) { + for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); i += 17) { + if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) + drm_clflush_virt_range(&vaddr[i], sizeof(vaddr[i])); + if (vaddr[i] != val) { pr_err("vaddr[%u]=%x, expected=%x\n", i, vaddr[i], val); @@ -416,6 +436,8 @@ static int igt_copy_blt_thread(void *arg) i915_gem_object_put(src); i915_gem_object_put(dst); + + total <<= 1; } while (!time_after(jiffies, end)); goto err_flush; diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c index 6718da20f35d..772d8cba7da9 100644 --- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c +++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c @@ -159,7 +159,7 @@ int igt_gpu_fill_dw(struct intel_context *ce, return 0; skip_request: - i915_request_skip(rq, err); + i915_request_set_error_once(rq, err); err_request: i915_request_add(rq); err_batch: diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c index 384143aa7776..e7e3c620f542 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c @@ -23,6 +23,9 @@ mock_context(struct drm_i915_private *i915, INIT_LIST_HEAD(&ctx->link); ctx->i915 = i915; + spin_lock_init(&ctx->stale.lock); + INIT_LIST_HEAD(&ctx->stale.engines); + i915_gem_context_set_persistence(ctx); mutex_init(&ctx->engines_mutex); @@ -37,7 +40,7 @@ mock_context(struct drm_i915_private *i915, if (name) { struct i915_ppgtt *ppgtt; - strncpy(ctx->name, name, sizeof(ctx->name)); + strncpy(ctx->name, name, sizeof(ctx->name) - 1); ppgtt = mock_ppgtt(i915, name); if (!ppgtt) @@ -83,6 +86,8 @@ live_context(struct drm_i915_private *i915, struct file *file) if (IS_ERR(ctx)) return ctx; + i915_gem_context_set_no_error_capture(ctx); + err = gem_context_register(ctx, to_drm_file(file)->driver_priv, &id); if (err < 0) goto err_ctx; @@ -105,6 +110,7 @@ kernel_context(struct drm_i915_private *i915) i915_gem_context_clear_bannable(ctx); i915_gem_context_set_persistence(ctx); + i915_gem_context_set_no_error_capture(ctx); return ctx; } diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c new file mode 100644 index 000000000000..de595b66a746 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c @@ -0,0 +1,402 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + */ + +#include "gen7_renderclear.h" +#include "i915_drv.h" +#include "intel_gpu_commands.h" + +#define MAX_URB_ENTRIES 64 +#define STATE_SIZE (4 * 1024) +#define GT3_INLINE_DATA_DELAYS 0x1E00 +#define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS)) + +struct cb_kernel { + const void *data; + u32 size; +}; + +#define CB_KERNEL(name) { .data = (name), .size = sizeof(name) } + +#include "ivb_clear_kernel.c" +static const struct cb_kernel cb_kernel_ivb = CB_KERNEL(ivb_clear_kernel); + +#include "hsw_clear_kernel.c" +static const struct cb_kernel cb_kernel_hsw = CB_KERNEL(hsw_clear_kernel); + +struct batch_chunk { + struct i915_vma *vma; + u32 offset; + u32 *start; + u32 *end; + u32 max_items; +}; + +struct batch_vals { + u32 max_primitives; + u32 max_urb_entries; + u32 cmd_size; + u32 state_size; + u32 state_start; + u32 batch_size; + u32 surface_height; + u32 surface_width; + u32 scratch_size; + u32 max_size; +}; + +static void +batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv) +{ + if (IS_HASWELL(i915)) { + bv->max_primitives = 280; + bv->max_urb_entries = MAX_URB_ENTRIES; + bv->surface_height = 16 * 16; + bv->surface_width = 32 * 2 * 16; + } else { + bv->max_primitives = 128; + bv->max_urb_entries = MAX_URB_ENTRIES / 2; + bv->surface_height = 16 * 8; + bv->surface_width = 32 * 16; + } + bv->cmd_size = bv->max_primitives * 4096; + bv->state_size = STATE_SIZE; + bv->state_start = bv->cmd_size; + bv->batch_size = bv->cmd_size + bv->state_size; + bv->scratch_size = bv->surface_height * bv->surface_width; + bv->max_size = bv->batch_size + bv->scratch_size; +} + +static void batch_init(struct batch_chunk *bc, + struct i915_vma *vma, + u32 *start, u32 offset, u32 max_bytes) +{ + bc->vma = vma; + bc->offset = offset; + bc->start = start + bc->offset / sizeof(*bc->start); + bc->end = bc->start; + bc->max_items = max_bytes / sizeof(*bc->start); +} + +static u32 batch_offset(const struct batch_chunk *bc, u32 *cs) +{ + return (cs - bc->start) * sizeof(*bc->start) + bc->offset; +} + +static u32 batch_addr(const struct batch_chunk *bc) +{ + return bc->vma->node.start; +} + +static void batch_add(struct batch_chunk *bc, const u32 d) +{ + GEM_BUG_ON((bc->end - bc->start) >= bc->max_items); + *bc->end++ = d; +} + +static u32 *batch_alloc_items(struct batch_chunk *bc, u32 align, u32 items) +{ + u32 *map; + + if (align) { + u32 *end = PTR_ALIGN(bc->end, align); + + memset32(bc->end, 0, end - bc->end); + bc->end = end; + } + + map = bc->end; + bc->end += items; + + return map; +} + +static u32 *batch_alloc_bytes(struct batch_chunk *bc, u32 align, u32 bytes) +{ + GEM_BUG_ON(!IS_ALIGNED(bytes, sizeof(*bc->start))); + return batch_alloc_items(bc, align, bytes / sizeof(*bc->start)); +} + +static u32 +gen7_fill_surface_state(struct batch_chunk *state, + const u32 dst_offset, + const struct batch_vals *bv) +{ + u32 surface_h = bv->surface_height; + u32 surface_w = bv->surface_width; + u32 *cs = batch_alloc_items(state, 32, 8); + u32 offset = batch_offset(state, cs); + +#define SURFACE_2D 1 +#define SURFACEFORMAT_B8G8R8A8_UNORM 0x0C0 +#define RENDER_CACHE_READ_WRITE 1 + + *cs++ = SURFACE_2D << 29 | + (SURFACEFORMAT_B8G8R8A8_UNORM << 18) | + (RENDER_CACHE_READ_WRITE << 8); + + *cs++ = batch_addr(state) + dst_offset; + + *cs++ = ((surface_h / 4 - 1) << 16) | (surface_w / 4 - 1); + *cs++ = surface_w; + *cs++ = 0; + *cs++ = 0; + *cs++ = 0; +#define SHADER_CHANNELS(r, g, b, a) \ + (((r) << 25) | ((g) << 22) | ((b) << 19) | ((a) << 16)) + *cs++ = SHADER_CHANNELS(4, 5, 6, 7); + batch_advance(state, cs); + + return offset; +} + +static u32 +gen7_fill_binding_table(struct batch_chunk *state, + const struct batch_vals *bv) +{ + u32 surface_start = gen7_fill_surface_state(state, bv->batch_size, bv); + u32 *cs = batch_alloc_items(state, 32, 8); + u32 offset = batch_offset(state, cs); + + *cs++ = surface_start - state->offset; + *cs++ = 0; + *cs++ = 0; + *cs++ = 0; + *cs++ = 0; + *cs++ = 0; + *cs++ = 0; + *cs++ = 0; + batch_advance(state, cs); + + return offset; +} + +static u32 +gen7_fill_kernel_data(struct batch_chunk *state, + const u32 *data, + const u32 size) +{ + return batch_offset(state, + memcpy(batch_alloc_bytes(state, 64, size), + data, size)); +} + +static u32 +gen7_fill_interface_descriptor(struct batch_chunk *state, + const struct batch_vals *bv, + const struct cb_kernel *kernel, + unsigned int count) +{ + u32 kernel_offset = + gen7_fill_kernel_data(state, kernel->data, kernel->size); + u32 binding_table = gen7_fill_binding_table(state, bv); + u32 *cs = batch_alloc_items(state, 32, 8 * count); + u32 offset = batch_offset(state, cs); + + *cs++ = kernel_offset; + *cs++ = (1 << 7) | (1 << 13); + *cs++ = 0; + *cs++ = (binding_table - state->offset) | 1; + *cs++ = 0; + *cs++ = 0; + *cs++ = 0; + *cs++ = 0; + + /* 1 - 63dummy idds */ + memset32(cs, 0x00, (count - 1) * 8); + batch_advance(state, cs + (count - 1) * 8); + + return offset; +} + +static void +gen7_emit_state_base_address(struct batch_chunk *batch, + u32 surface_state_base) +{ + u32 *cs = batch_alloc_items(batch, 0, 12); + + *cs++ = STATE_BASE_ADDRESS | (12 - 2); + /* general */ + *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; + /* surface */ + *cs++ = batch_addr(batch) | surface_state_base | BASE_ADDRESS_MODIFY; + /* dynamic */ + *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; + /* indirect */ + *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; + /* instruction */ + *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; + + /* general/dynamic/indirect/instruction access Bound */ + *cs++ = 0; + *cs++ = BASE_ADDRESS_MODIFY; + *cs++ = 0; + *cs++ = BASE_ADDRESS_MODIFY; + *cs++ = 0; + *cs++ = 0; + batch_advance(batch, cs); +} + +static void +gen7_emit_vfe_state(struct batch_chunk *batch, + const struct batch_vals *bv, + u32 urb_size, u32 curbe_size, + u32 mode) +{ + u32 urb_entries = bv->max_urb_entries; + u32 threads = bv->max_primitives - 1; + u32 *cs = batch_alloc_items(batch, 32, 8); + + *cs++ = MEDIA_VFE_STATE | (8 - 2); + + /* scratch buffer */ + *cs++ = 0; + + /* number of threads & urb entries for GPGPU vs Media Mode */ + *cs++ = threads << 16 | urb_entries << 8 | mode << 2; + + *cs++ = 0; + + /* urb entry size & curbe size in 256 bits unit */ + *cs++ = urb_size << 16 | curbe_size; + + /* scoreboard */ + *cs++ = 0; + *cs++ = 0; + *cs++ = 0; + batch_advance(batch, cs); +} + +static void +gen7_emit_interface_descriptor_load(struct batch_chunk *batch, + const u32 interface_descriptor, + unsigned int count) +{ + u32 *cs = batch_alloc_items(batch, 8, 4); + + *cs++ = MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2); + *cs++ = 0; + *cs++ = count * 8 * sizeof(*cs); + + /* + * interface descriptor address - it is relative to the dynamics base + * address + */ + *cs++ = interface_descriptor; + batch_advance(batch, cs); +} + +static void +gen7_emit_media_object(struct batch_chunk *batch, + unsigned int media_object_index) +{ + unsigned int x_offset = (media_object_index % 16) * 64; + unsigned int y_offset = (media_object_index / 16) * 16; + unsigned int inline_data_size; + unsigned int media_batch_size; + unsigned int i; + u32 *cs; + + inline_data_size = 112 * 8; + media_batch_size = inline_data_size + 6; + + cs = batch_alloc_items(batch, 8, media_batch_size); + + *cs++ = MEDIA_OBJECT | (media_batch_size - 2); + + /* interface descriptor offset */ + *cs++ = 0; + + /* without indirect data */ + *cs++ = 0; + *cs++ = 0; + + /* scoreboard */ + *cs++ = 0; + *cs++ = 0; + + /* inline */ + *cs++ = (y_offset << 16) | (x_offset); + *cs++ = 0; + *cs++ = GT3_INLINE_DATA_DELAYS; + for (i = 3; i < inline_data_size; i++) + *cs++ = 0; + + batch_advance(batch, cs); +} + +static void gen7_emit_pipeline_flush(struct batch_chunk *batch) +{ + u32 *cs = batch_alloc_items(batch, 0, 5); + + *cs++ = GFX_OP_PIPE_CONTROL(5); + *cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE | + PIPE_CONTROL_GLOBAL_GTT_IVB; + *cs++ = 0; + *cs++ = 0; + *cs++ = 0; + batch_advance(batch, cs); +} + +static void emit_batch(struct i915_vma * const vma, + u32 *start, + const struct batch_vals *bv) +{ + struct drm_i915_private *i915 = vma->vm->i915; + unsigned int desc_count = 64; + const u32 urb_size = 112; + struct batch_chunk cmds, state; + u32 interface_descriptor; + unsigned int i; + + batch_init(&cmds, vma, start, 0, bv->cmd_size); + batch_init(&state, vma, start, bv->state_start, bv->state_size); + + interface_descriptor = + gen7_fill_interface_descriptor(&state, bv, + IS_HASWELL(i915) ? + &cb_kernel_hsw : + &cb_kernel_ivb, + desc_count); + gen7_emit_pipeline_flush(&cmds); + batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA); + batch_add(&cmds, MI_NOOP); + gen7_emit_state_base_address(&cmds, interface_descriptor); + gen7_emit_pipeline_flush(&cmds); + + gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0); + + gen7_emit_interface_descriptor_load(&cmds, + interface_descriptor, + desc_count); + + for (i = 0; i < bv->max_primitives; i++) + gen7_emit_media_object(&cmds, i); + + batch_add(&cmds, MI_BATCH_BUFFER_END); +} + +int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine, + struct i915_vma * const vma) +{ + struct batch_vals bv; + u32 *batch; + + batch_get_defaults(engine->i915, &bv); + if (!vma) + return bv.max_size; + + GEM_BUG_ON(vma->obj->base.size < bv.max_size); + + batch = i915_gem_object_pin_map(vma->obj, I915_MAP_WC); + if (IS_ERR(batch)) + return PTR_ERR(batch); + + emit_batch(vma, memset(batch, 0, bv.max_size), &bv); + + i915_gem_object_flush_map(vma->obj); + i915_gem_object_unpin_map(vma->obj); + + return 0; +} diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.h b/drivers/gpu/drm/i915/gt/gen7_renderclear.h new file mode 100644 index 000000000000..bb100748e2c6 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __GEN7_RENDERCLEAR_H__ +#define __GEN7_RENDERCLEAR_H__ + +struct intel_engine_cs; +struct i915_vma; + +int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine, + struct i915_vma * const vma); + +#endif /* __GEN7_RENDERCLEAR_H__ */ diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c index 4d1de2d97d5c..94e746af8926 100644 --- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c @@ -8,6 +8,7 @@ #include "gen8_ppgtt.h" #include "i915_scatterlist.h" #include "i915_trace.h" +#include "i915_pvinfo.h" #include "i915_vgpu.h" #include "intel_gt.h" #include "intel_gtt.h" @@ -25,6 +26,30 @@ static u64 gen8_pde_encode(const dma_addr_t addr, return pde; } +static u64 gen8_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags) +{ + gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW; + + if (unlikely(flags & PTE_READ_ONLY)) + pte &= ~_PAGE_RW; + + switch (level) { + case I915_CACHE_NONE: + pte |= PPAT_UNCACHED; + break; + case I915_CACHE_WT: + pte |= PPAT_DISPLAY_ELLC; + break; + default: + pte |= PPAT_CACHED; + break; + } + + return pte; +} + static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create) { struct drm_i915_private *i915 = ppgtt->vm.i915; @@ -706,6 +731,8 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt) ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc; ppgtt->vm.clear_range = gen8_ppgtt_clear; + ppgtt->vm.pte_encode = gen8_pte_encode; + if (intel_vgpu_active(gt->i915)) gen8_ppgtt_notify_vgt(ppgtt, true); diff --git a/drivers/gpu/drm/i915/gt/hsw_clear_kernel.c b/drivers/gpu/drm/i915/gt/hsw_clear_kernel.c new file mode 100644 index 000000000000..b47f9d4a0848 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/hsw_clear_kernel.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + * + * Generated by: IGT Gpu Tools on Fri 21 Feb 2020 05:30:13 AM UTC + */ + +static const u32 hsw_clear_kernel[] = { + 0x00000001, 0x26020128, 0x00000024, 0x00000000, + 0x00000040, 0x20280c21, 0x00000028, 0x00000001, + 0x01000010, 0x20000c20, 0x0000002c, 0x00000000, + 0x00010220, 0x34001c00, 0x00001400, 0x00000160, + 0x00600001, 0x20600061, 0x00000000, 0x00000000, + 0x00000008, 0x20601c85, 0x00000e00, 0x0000000c, + 0x00000005, 0x20601ca5, 0x00000060, 0x00000001, + 0x00000008, 0x20641c85, 0x00000e00, 0x0000000d, + 0x00000005, 0x20641ca5, 0x00000064, 0x00000003, + 0x00000041, 0x207424a5, 0x00000064, 0x00000034, + 0x00000040, 0x206014a5, 0x00000060, 0x00000074, + 0x00000008, 0x20681c85, 0x00000e00, 0x00000008, + 0x00000005, 0x20681ca5, 0x00000068, 0x0000000f, + 0x00000041, 0x20701ca5, 0x00000060, 0x00000010, + 0x00000040, 0x206814a5, 0x00000068, 0x00000070, + 0x00600001, 0x20a00061, 0x00000000, 0x00000000, + 0x00000005, 0x206c1c85, 0x00000e00, 0x00000007, + 0x00000041, 0x206c1ca5, 0x0000006c, 0x00000004, + 0x00600001, 0x20800021, 0x008d0000, 0x00000000, + 0x00000001, 0x20800021, 0x0000006c, 0x00000000, + 0x00000001, 0x20840021, 0x00000068, 0x00000000, + 0x00000001, 0x20880061, 0x00000000, 0x00000003, + 0x00000005, 0x208c0d21, 0x00000086, 0xffffffff, + 0x05600032, 0x20a00fa1, 0x008d0080, 0x02190001, + 0x00000040, 0x20a01ca5, 0x000000a0, 0x00000001, + 0x05600032, 0x20a00fa1, 0x008d0080, 0x040a8001, + 0x02000040, 0x20281c21, 0x00000028, 0xffffffff, + 0x00010220, 0x34001c00, 0x00001400, 0xffffffe0, + 0x00000001, 0x26020128, 0x00000024, 0x00000000, + 0x00000001, 0x220010e4, 0x00000000, 0x00000000, + 0x00000001, 0x220831ec, 0x00000000, 0x007f007f, + 0x00600001, 0x20400021, 0x008d0000, 0x00000000, + 0x00600001, 0x2fe00021, 0x008d0000, 0x00000000, + 0x00200001, 0x20400121, 0x00450020, 0x00000000, + 0x00000001, 0x20480061, 0x00000000, 0x000f000f, + 0x00000005, 0x204c0d21, 0x00000046, 0xffffffef, + 0x00800001, 0x20600061, 0x00000000, 0x00000000, + 0x00800001, 0x20800061, 0x00000000, 0x00000000, + 0x00800001, 0x20a00061, 0x00000000, 0x00000000, + 0x00800001, 0x20c00061, 0x00000000, 0x00000000, + 0x00800001, 0x20e00061, 0x00000000, 0x00000000, + 0x00800001, 0x21000061, 0x00000000, 0x00000000, + 0x00800001, 0x21200061, 0x00000000, 0x00000000, + 0x00800001, 0x21400061, 0x00000000, 0x00000000, + 0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000, + 0x00000040, 0x20402d21, 0x00000020, 0x00100010, + 0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000, + 0x02000040, 0x22083d8c, 0x00000208, 0xffffffff, + 0x00800001, 0xa0000109, 0x00000602, 0x00000000, + 0x00000040, 0x22001c84, 0x00000200, 0x00000020, + 0x00010220, 0x34001c00, 0x00001400, 0xffffffc0, + 0x07600032, 0x20000fa0, 0x008d0fe0, 0x82000010, +}; diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 57e8a051ddc2..aea992e46c42 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -51,6 +51,11 @@ int intel_context_alloc_state(struct intel_context *ce) return -EINTR; if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) { + if (intel_context_is_banned(ce)) { + err = -EIO; + goto unlock; + } + err = ce->ops->alloc(ce); if (unlikely(err)) goto unlock; @@ -92,6 +97,8 @@ int __intel_context_do_pin(struct intel_context *ce) { int err; + GEM_BUG_ON(intel_context_is_closed(ce)); + if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) { err = intel_context_alloc_state(ce); if (err) @@ -116,7 +123,8 @@ int __intel_context_do_pin(struct intel_context *ce) if (unlikely(err)) goto err_active; - CE_TRACE(ce, "pin ring:{head:%04x, tail:%04x}\n", + CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n", + i915_ggtt_offset(ce->ring->vma), ce->ring->head, ce->ring->tail); smp_mb__before_atomic(); /* flush pin before it is visible */ @@ -219,7 +227,9 @@ static void __intel_context_retire(struct i915_active *active) { struct intel_context *ce = container_of(active, typeof(*ce), active); - CE_TRACE(ce, "retire\n"); + CE_TRACE(ce, "retire runtime: { total:%lluns, avg:%lluns }\n", + intel_context_get_total_runtime_ns(ce), + intel_context_get_avg_runtime_ns(ce)); set_bit(CONTEXT_VALID_BIT, &ce->flags); if (ce->state) @@ -280,6 +290,8 @@ intel_context_init(struct intel_context *ce, ce->sseu = engine->sseu; ce->ring = __intel_context_ring_size(SZ_4K); + ewma_runtime_init(&ce->runtime.avg); + ce->vm = i915_vm_get(engine->gt->vm); INIT_LIST_HEAD(&ce->signal_link); diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index 30bd248827d8..07be021882cc 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -12,6 +12,7 @@ #include <linux/types.h> #include "i915_active.h" +#include "i915_drv.h" #include "intel_context_types.h" #include "intel_engine_types.h" #include "intel_ring_types.h" @@ -35,6 +36,9 @@ int intel_context_alloc_state(struct intel_context *ce); void intel_context_free(struct intel_context *ce); +int intel_context_reconfigure_sseu(struct intel_context *ce, + const struct intel_sseu sseu); + /** * intel_context_lock_pinned - Stablises the 'pinned' status of the HW context * @ce - the context @@ -169,6 +173,11 @@ static inline bool intel_context_is_barrier(const struct intel_context *ce) return test_bit(CONTEXT_BARRIER_BIT, &ce->flags); } +static inline bool intel_context_is_closed(const struct intel_context *ce) +{ + return test_bit(CONTEXT_CLOSED_BIT, &ce->flags); +} + static inline bool intel_context_use_semaphores(const struct intel_context *ce) { return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); @@ -224,4 +233,20 @@ intel_context_clear_nopreempt(struct intel_context *ce) clear_bit(CONTEXT_NOPREEMPT, &ce->flags); } +static inline u64 intel_context_get_total_runtime_ns(struct intel_context *ce) +{ + const u32 period = + RUNTIME_INFO(ce->engine->i915)->cs_timestamp_period_ns; + + return READ_ONCE(ce->runtime.total) * period; +} + +static inline u64 intel_context_get_avg_runtime_ns(struct intel_context *ce) +{ + const u32 period = + RUNTIME_INFO(ce->engine->i915)->cs_timestamp_period_ns; + + return mul_u32_u32(ewma_runtime_read(&ce->runtime.avg), period); +} + #endif /* __INTEL_CONTEXT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_context_param.c b/drivers/gpu/drm/i915/gt/intel_context_param.c new file mode 100644 index 000000000000..65dcd090245d --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_context_param.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + */ + +#include "i915_active.h" +#include "intel_context.h" +#include "intel_context_param.h" +#include "intel_ring.h" + +int intel_context_set_ring_size(struct intel_context *ce, long sz) +{ + int err; + + if (intel_context_lock_pinned(ce)) + return -EINTR; + + err = i915_active_wait(&ce->active); + if (err < 0) + goto unlock; + + if (intel_context_is_pinned(ce)) { + err = -EBUSY; /* In active use, come back later! */ + goto unlock; + } + + if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) { + struct intel_ring *ring; + + /* Replace the existing ringbuffer */ + ring = intel_engine_create_ring(ce->engine, sz); + if (IS_ERR(ring)) { + err = PTR_ERR(ring); + goto unlock; + } + + intel_ring_put(ce->ring); + ce->ring = ring; + + /* Context image will be updated on next pin */ + } else { + ce->ring = __intel_context_ring_size(sz); + } + +unlock: + intel_context_unlock_pinned(ce); + return err; +} + +long intel_context_get_ring_size(struct intel_context *ce) +{ + long sz = (unsigned long)READ_ONCE(ce->ring); + + if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) { + if (intel_context_lock_pinned(ce)) + return -EINTR; + + sz = ce->ring->size; + intel_context_unlock_pinned(ce); + } + + return sz; +} diff --git a/drivers/gpu/drm/i915/gt/intel_context_param.h b/drivers/gpu/drm/i915/gt/intel_context_param.h new file mode 100644 index 000000000000..f053d8633fe2 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_context_param.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_CONTEXT_PARAM_H +#define INTEL_CONTEXT_PARAM_H + +struct intel_context; + +int intel_context_set_ring_size(struct intel_context *ce, long sz); +long intel_context_get_ring_size(struct intel_context *ce); + +#endif /* INTEL_CONTEXT_PARAM_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_context_sseu.c b/drivers/gpu/drm/i915/gt/intel_context_sseu.c new file mode 100644 index 000000000000..57a30956c922 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_context_sseu.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + */ + +#include "i915_drv.h" +#include "i915_vma.h" +#include "intel_context.h" +#include "intel_engine_pm.h" +#include "intel_gpu_commands.h" +#include "intel_lrc.h" +#include "intel_lrc_reg.h" +#include "intel_ring.h" +#include "intel_sseu.h" + +static int gen8_emit_rpcs_config(struct i915_request *rq, + const struct intel_context *ce, + const struct intel_sseu sseu) +{ + u64 offset; + u32 *cs; + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + offset = i915_ggtt_offset(ce->state) + + LRC_STATE_PN * PAGE_SIZE + + CTX_R_PWR_CLK_STATE * 4; + + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = lower_32_bits(offset); + *cs++ = upper_32_bits(offset); + *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu); + + intel_ring_advance(rq, cs); + + return 0; +} + +static int +gen8_modify_rpcs(struct intel_context *ce, const struct intel_sseu sseu) +{ + struct i915_request *rq; + int ret; + + lockdep_assert_held(&ce->pin_mutex); + + /* + * If the context is not idle, we have to submit an ordered request to + * modify its context image via the kernel context (writing to our own + * image, or into the registers directory, does not stick). Pristine + * and idle contexts will be configured on pinning. + */ + if (!intel_context_pin_if_active(ce)) + return 0; + + rq = intel_engine_create_kernel_request(ce->engine); + if (IS_ERR(rq)) { + ret = PTR_ERR(rq); + goto out_unpin; + } + + /* Serialise with the remote context */ + ret = intel_context_prepare_remote_request(ce, rq); + if (ret == 0) + ret = gen8_emit_rpcs_config(rq, ce, sseu); + + i915_request_add(rq); +out_unpin: + intel_context_unpin(ce); + return ret; +} + +int +intel_context_reconfigure_sseu(struct intel_context *ce, + const struct intel_sseu sseu) +{ + int ret; + + GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8); + + ret = intel_context_lock_pinned(ce); + if (ret) + return ret; + + /* Nothing to do if unmodified. */ + if (!memcmp(&ce->sseu, &sseu, sizeof(sseu))) + goto unlock; + + ret = gen8_modify_rpcs(ce, sseu); + if (!ret) + ce->sseu = sseu; + +unlock: + intel_context_unlock_pinned(ce); + return ret; +} diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index ca1420fb8b53..ca0d4f4f3615 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -7,6 +7,7 @@ #ifndef __INTEL_CONTEXT_TYPES__ #define __INTEL_CONTEXT_TYPES__ +#include <linux/average.h> #include <linux/kref.h> #include <linux/list.h> #include <linux/mutex.h> @@ -19,6 +20,8 @@ #define CONTEXT_REDZONE POISON_INUSE +DECLARE_EWMA(runtime, 3, 8); + struct i915_gem_context; struct i915_vma; struct intel_context; @@ -42,8 +45,8 @@ struct intel_context { struct intel_engine_cs *engine; struct intel_engine_cs *inflight; -#define intel_context_inflight(ce) ptr_mask_bits((ce)->inflight, 2) -#define intel_context_inflight_count(ce) ptr_unmask_bits((ce)->inflight, 2) +#define intel_context_inflight(ce) ptr_mask_bits(READ_ONCE((ce)->inflight), 2) +#define intel_context_inflight_count(ce) ptr_unmask_bits(READ_ONCE((ce)->inflight), 2) struct i915_address_space *vm; struct i915_gem_context __rcu *gem_context; @@ -59,15 +62,31 @@ struct intel_context { #define CONTEXT_BARRIER_BIT 0 #define CONTEXT_ALLOC_BIT 1 #define CONTEXT_VALID_BIT 2 -#define CONTEXT_USE_SEMAPHORES 3 -#define CONTEXT_BANNED 4 -#define CONTEXT_FORCE_SINGLE_SUBMISSION 5 -#define CONTEXT_NOPREEMPT 6 +#define CONTEXT_CLOSED_BIT 3 +#define CONTEXT_USE_SEMAPHORES 4 +#define CONTEXT_BANNED 5 +#define CONTEXT_FORCE_SINGLE_SUBMISSION 6 +#define CONTEXT_NOPREEMPT 7 u32 *lrc_reg_state; - u64 lrc_desc; + union { + struct { + u32 lrca; + u32 ccid; + }; + u64 desc; + } lrc; u32 tag; /* cookie passed to HW to track this context on submission */ + /* Time on GPU as tracked by the hw. */ + struct { + struct ewma_runtime avg; + u64 total; + u32 last; + I915_SELFTEST_DECLARE(u32 num_underflow); + I915_SELFTEST_DECLARE(u32 max_underflow); + } runtime; + unsigned int active_count; /* protected by timeline->mutex */ atomic_t pin_count; diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 5df003061e44..a1aa0d3e8be1 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -107,7 +107,20 @@ execlists_num_ports(const struct intel_engine_execlists * const execlists) static inline struct i915_request * execlists_active(const struct intel_engine_execlists *execlists) { - return *READ_ONCE(execlists->active); + struct i915_request * const *cur, * const *old, *active; + + cur = READ_ONCE(execlists->active); + smp_rmb(); /* pairs with overwrite protection in process_csb() */ + do { + old = cur; + + active = READ_ONCE(*cur); + cur = READ_ONCE(execlists->active); + + smp_rmb(); /* and complete the seqlock retry */ + } while (unlikely(cur != old)); + + return active; } static inline void @@ -192,6 +205,8 @@ void intel_engines_free(struct intel_gt *gt); int intel_engine_init_common(struct intel_engine_cs *engine); void intel_engine_cleanup_common(struct intel_engine_cs *engine); +int intel_engine_resume(struct intel_engine_cs *engine); + int intel_ring_submission_setup(struct intel_engine_cs *engine); int intel_engine_stop_cs(struct intel_engine_cs *engine); @@ -303,26 +318,6 @@ intel_engine_find_active_request(struct intel_engine_cs *engine); u32 intel_engine_context_size(struct intel_gt *gt, u8 class); -#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) - -static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists) -{ - if (!execlists->preempt_hang.inject_hang) - return false; - - complete(&execlists->preempt_hang.completion); - return true; -} - -#else - -static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists) -{ - return false; -} - -#endif - void intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass); #define ENGINE_PHYSICAL 0 @@ -338,13 +333,4 @@ intel_engine_has_preempt_reset(const struct intel_engine_cs *engine) return intel_engine_has_preemption(engine); } -static inline bool -intel_engine_has_timeslices(const struct intel_engine_cs *engine) -{ - if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) - return false; - - return intel_engine_has_semaphores(engine); -} - #endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 06ff7695fa29..883a9b7fe88d 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -35,6 +35,7 @@ #include "intel_engine_user.h" #include "intel_gt.h" #include "intel_gt_requests.h" +#include "intel_gt_pm.h" #include "intel_lrc.h" #include "intel_reset.h" #include "intel_ring.h" @@ -199,10 +200,10 @@ u32 intel_engine_context_size(struct intel_gt *gt, u8 class) * out in the wash. */ cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1; - DRM_DEBUG_DRIVER("gen%d CXT_SIZE = %d bytes [0x%08x]\n", - INTEL_GEN(gt->i915), - cxt_size * 64, - cxt_size - 1); + drm_dbg(>->i915->drm, + "gen%d CXT_SIZE = %d bytes [0x%08x]\n", + INTEL_GEN(gt->i915), cxt_size * 64, + cxt_size - 1); return round_up(cxt_size * 64, PAGE_SIZE); case 3: case 2: @@ -274,6 +275,7 @@ static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine) static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id) { const struct engine_info *info = &intel_engines[id]; + struct drm_i915_private *i915 = gt->i915; struct intel_engine_cs *engine; BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH)); @@ -300,11 +302,11 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id) engine->id = id; engine->legacy_idx = INVALID_ENGINE; engine->mask = BIT(id); - engine->i915 = gt->i915; + engine->i915 = i915; engine->gt = gt; engine->uncore = gt->uncore; engine->hw_id = engine->guc_id = info->hw_id; - engine->mmio_base = __engine_mmio_base(gt->i915, info->mmio_bases); + engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases); engine->class = info->class; engine->instance = info->instance; @@ -312,6 +314,8 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id) engine->props.heartbeat_interval_ms = CONFIG_DRM_I915_HEARTBEAT_INTERVAL; + engine->props.max_busywait_duration_ns = + CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT; engine->props.preempt_timeout_ms = CONFIG_DRM_I915_PREEMPT_TIMEOUT; engine->props.stop_timeout_ms = @@ -319,11 +323,15 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id) engine->props.timeslice_duration_ms = CONFIG_DRM_I915_TIMESLICE_DURATION; + /* Override to uninterruptible for OpenCL workloads. */ + if (INTEL_GEN(i915) == 12 && engine->class == RENDER_CLASS) + engine->props.preempt_timeout_ms = 0; + engine->context_size = intel_engine_context_size(gt, engine->class); if (WARN_ON(engine->context_size > BIT(20))) engine->context_size = 0; if (engine->context_size) - DRIVER_CAPS(gt->i915)->has_logical_contexts = true; + DRIVER_CAPS(i915)->has_logical_contexts = true; /* Nothing to do here, execute in order of dependencies */ engine->schedule = NULL; @@ -339,7 +347,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id) gt->engine_class[info->class][info->instance] = engine; gt->engine[id] = engine; - gt->i915->engine[id] = engine; + i915->engine[id] = engine; return 0; } @@ -392,8 +400,24 @@ void intel_engines_release(struct intel_gt *gt) struct intel_engine_cs *engine; enum intel_engine_id id; + /* + * Before we release the resources held by engine, we must be certain + * that the HW is no longer accessing them -- having the GPU scribble + * to or read from a page being used for something else causes no end + * of fun. + * + * The GPU should be reset by this point, but assume the worst just + * in case we aborted before completely initialising the engines. + */ + GEM_BUG_ON(intel_gt_pm_is_awake(gt)); + if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) + __intel_gt_reset(gt, ALL_ENGINES); + /* Decouple the backend; but keep the layout for late GPU resets */ for_each_engine(engine, gt, id) { + intel_wakeref_wait_for_idle(&engine->wakeref); + GEM_BUG_ON(intel_engine_pm_is_awake(engine)); + if (!engine->release) continue; @@ -432,9 +456,9 @@ int intel_engines_init_mmio(struct intel_gt *gt) unsigned int i; int err; - WARN_ON(engine_mask == 0); - WARN_ON(engine_mask & - GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES)); + drm_WARN_ON(&i915->drm, engine_mask == 0); + drm_WARN_ON(&i915->drm, engine_mask & + GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES)); if (i915_inject_probe_failure(i915)) return -ENODEV; @@ -455,7 +479,7 @@ int intel_engines_init_mmio(struct intel_gt *gt) * are added to the driver by a warning and disabling the forgotten * engines. */ - if (WARN_ON(mask != engine_mask)) + if (drm_WARN_ON(&i915->drm, mask != engine_mask)) device_info->engine_mask = mask; RUNTIME_INFO(i915)->num_engines = hweight32(mask); @@ -510,7 +534,6 @@ static int pin_ggtt_status_page(struct intel_engine_cs *engine, { unsigned int flags; - flags = PIN_GLOBAL; if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt)) /* * On g33, we cannot place HWS above 256MiB, so @@ -523,11 +546,11 @@ static int pin_ggtt_status_page(struct intel_engine_cs *engine, * above the mappable region (even though we never * actually map it). */ - flags |= PIN_MAPPABLE; + flags = PIN_MAPPABLE; else - flags |= PIN_HIGH; + flags = PIN_HIGH; - return i915_vma_pin(vma, 0, 0, flags); + return i915_ggtt_pin(vma, 0, flags); } static int init_status_page(struct intel_engine_cs *engine) @@ -546,7 +569,8 @@ static int init_status_page(struct intel_engine_cs *engine) */ obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); if (IS_ERR(obj)) { - DRM_ERROR("Failed to allocate status page\n"); + drm_err(&engine->i915->drm, + "Failed to allocate status page\n"); return PTR_ERR(obj); } @@ -614,15 +638,15 @@ static int engine_setup_common(struct intel_engine_cs *engine) struct measure_breadcrumb { struct i915_request rq; - struct intel_timeline timeline; struct intel_ring ring; u32 cs[1024]; }; -static int measure_breadcrumb_dw(struct intel_engine_cs *engine) +static int measure_breadcrumb_dw(struct intel_context *ce) { + struct intel_engine_cs *engine = ce->engine; struct measure_breadcrumb *frame; - int dw = -ENOMEM; + int dw; GEM_BUG_ON(!engine->gt->scratch); @@ -630,39 +654,27 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine) if (!frame) return -ENOMEM; - if (intel_timeline_init(&frame->timeline, - engine->gt, - engine->status_page.vma)) - goto out_frame; - - mutex_lock(&frame->timeline.mutex); + frame->rq.i915 = engine->i915; + frame->rq.engine = engine; + frame->rq.context = ce; + rcu_assign_pointer(frame->rq.timeline, ce->timeline); frame->ring.vaddr = frame->cs; frame->ring.size = sizeof(frame->cs); frame->ring.effective_size = frame->ring.size; intel_ring_update_space(&frame->ring); - - frame->rq.i915 = engine->i915; - frame->rq.engine = engine; frame->rq.ring = &frame->ring; - rcu_assign_pointer(frame->rq.timeline, &frame->timeline); - - dw = intel_timeline_pin(&frame->timeline); - if (dw < 0) - goto out_timeline; + mutex_lock(&ce->timeline->mutex); spin_lock_irq(&engine->active.lock); + dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs; + spin_unlock_irq(&engine->active.lock); + mutex_unlock(&ce->timeline->mutex); GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */ - intel_timeline_unpin(&frame->timeline); - -out_timeline: - mutex_unlock(&frame->timeline.mutex); - intel_timeline_fini(&frame->timeline); -out_frame: kfree(frame); return dw; } @@ -737,12 +749,6 @@ static int engine_init_common(struct intel_engine_cs *engine) engine->set_default_submission(engine); - ret = measure_breadcrumb_dw(engine); - if (ret < 0) - return ret; - - engine->emit_fini_breadcrumb_dw = ret; - /* * We may need to do things with the shrinker which * require us to immediately switch back to the default @@ -755,9 +761,18 @@ static int engine_init_common(struct intel_engine_cs *engine) if (IS_ERR(ce)) return PTR_ERR(ce); + ret = measure_breadcrumb_dw(ce); + if (ret < 0) + goto err_context; + + engine->emit_fini_breadcrumb_dw = ret; engine->kernel_context = ce; return 0; + +err_context: + intel_context_put(ce); + return ret; } int intel_engines_init(struct intel_gt *gt) @@ -824,6 +839,20 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) intel_wa_list_free(&engine->whitelist); } +/** + * intel_engine_resume - re-initializes the HW state of the engine + * @engine: Engine to resume. + * + * Returns zero on success or an error code on failure. + */ +int intel_engine_resume(struct intel_engine_cs *engine) +{ + intel_engine_apply_workarounds(engine); + intel_engine_apply_whitelist(engine); + + return engine->resume(engine); +} + u64 intel_engine_get_active_head(const struct intel_engine_cs *engine) { struct drm_i915_private *i915 = engine->i915; @@ -982,6 +1011,12 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine, instdone->slice_common = intel_uncore_read(uncore, GEN7_SC_INSTDONE); + if (INTEL_GEN(i915) >= 12) { + instdone->slice_common_extra[0] = + intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA); + instdone->slice_common_extra[1] = + intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA2); + } for_each_instdone_slice_subslice(i915, sseu, slice, subslice) { instdone->sampler[slice][subslice] = read_subslice_reg(engine, slice, subslice, @@ -1260,6 +1295,12 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7)) drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID)); + if (HAS_EXECLISTS(dev_priv)) { + drm_printf(m, "\tEL_STAT_HI: 0x%08x\n", + ENGINE_READ(engine, RING_EXECLIST_STATUS_HI)); + drm_printf(m, "\tEL_STAT_LO: 0x%08x\n", + ENGINE_READ(engine, RING_EXECLIST_STATUS_LO)); + } drm_printf(m, "\tRING_START: 0x%08x\n", ENGINE_READ(engine, RING_START)); drm_printf(m, "\tRING_HEAD: 0x%08x\n", @@ -1276,8 +1317,14 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, } if (INTEL_GEN(dev_priv) >= 6) { - drm_printf(m, "\tRING_IMR: %08x\n", + drm_printf(m, "\tRING_IMR: 0x%08x\n", ENGINE_READ(engine, RING_IMR)); + drm_printf(m, "\tRING_ESR: 0x%08x\n", + ENGINE_READ(engine, RING_ESR)); + drm_printf(m, "\tRING_EMR: 0x%08x\n", + ENGINE_READ(engine, RING_EMR)); + drm_printf(m, "\tRING_EIR: 0x%08x\n", + ENGINE_READ(engine, RING_EIR)); } addr = intel_engine_get_active_head(engine); @@ -1342,25 +1389,27 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, execlists_active_lock_bh(execlists); rcu_read_lock(); for (port = execlists->active; (rq = *port); port++) { - char hdr[80]; + char hdr[160]; int len; - len = snprintf(hdr, sizeof(hdr), - "\t\tActive[%d]: ", - (int)(port - execlists->active)); + len = scnprintf(hdr, sizeof(hdr), + "\t\tActive[%d]: ", + (int)(port - execlists->active)); if (!i915_request_signaled(rq)) { struct intel_timeline *tl = get_timeline(rq); - len += snprintf(hdr + len, sizeof(hdr) - len, - "ring:{start:%08x, hwsp:%08x, seqno:%08x}, ", - i915_ggtt_offset(rq->ring->vma), - tl ? tl->hwsp_offset : 0, - hwsp_seqno(rq)); + len += scnprintf(hdr + len, sizeof(hdr) - len, + "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ", + i915_ggtt_offset(rq->ring->vma), + tl ? tl->hwsp_offset : 0, + hwsp_seqno(rq), + DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context), + 1000 * 1000)); if (tl) intel_timeline_put(tl); } - snprintf(hdr + len, sizeof(hdr) - len, "rq: "); + scnprintf(hdr + len, sizeof(hdr) - len, "rq: "); print_request(m, rq, hdr); } for (port = execlists->pending; (rq = *port); port++) { @@ -1657,6 +1706,23 @@ intel_engine_find_active_request(struct intel_engine_cs *engine) * we only care about the snapshot of this moment. */ lockdep_assert_held(&engine->active.lock); + + rcu_read_lock(); + request = execlists_active(&engine->execlists); + if (request) { + struct intel_timeline *tl = request->context->timeline; + + list_for_each_entry_from_reverse(request, &tl->requests, link) { + if (i915_request_completed(request)) + break; + + active = request; + } + } + rcu_read_unlock(); + if (active) + return active; + list_for_each_entry(request, &engine->active.requests, sched.link) { if (i915_request_completed(request)) continue; diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c index 6c6fd185457c..dd825718e4e5 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c @@ -180,7 +180,7 @@ int intel_engine_pulse(struct intel_engine_cs *engine) struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER }; struct intel_context *ce = engine->kernel_context; struct i915_request *rq; - int err = 0; + int err; if (!intel_engine_has_preemption(engine)) return -ENODEV; @@ -188,8 +188,10 @@ int intel_engine_pulse(struct intel_engine_cs *engine) if (!intel_engine_pm_get_if_awake(engine)) return 0; - if (mutex_lock_interruptible(&ce->timeline->mutex)) + if (mutex_lock_interruptible(&ce->timeline->mutex)) { + err = -EINTR; goto out_rpm; + } intel_context_enter(ce); rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN); @@ -204,6 +206,8 @@ int intel_engine_pulse(struct intel_engine_cs *engine) __i915_request_commit(rq); __i915_request_queue(rq, &attr); + GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER); + err = 0; out_unlock: mutex_unlock(&ce->timeline->mutex); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index ea90ab3e396e..b6cf284e3a2d 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -112,7 +112,7 @@ __queue_and_release_pm(struct i915_request *rq, { struct intel_gt_timelines *timelines = &engine->gt->timelines; - ENGINE_TRACE(engine, "\n"); + ENGINE_TRACE(engine, "parking\n"); /* * We have to serialise all potential retirement paths with our @@ -249,7 +249,7 @@ static int __engine_park(struct intel_wakeref *wf) if (!switch_to_kernel_context(engine)) return -EBUSY; - ENGINE_TRACE(engine, "\n"); + ENGINE_TRACE(engine, "parked\n"); call_idle_barriers(engine); /* cleanup after wedging */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 92be41a6903c..0be674ae1cf6 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -75,6 +75,7 @@ struct intel_instdone { u32 instdone; /* The following exist only in the RCS engine */ u32 slice_common; + u32 slice_common_extra[2]; u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES]; u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES]; }; @@ -126,7 +127,6 @@ DECLARE_EWMA(_engine_latency, 6, 4) struct st_preempt_hang { struct completion completion; unsigned int count; - bool inject_hang; }; /** @@ -157,6 +157,30 @@ struct intel_engine_execlists { struct i915_priolist default_priolist; /** + * @ccid: identifier for contexts submitted to this engine + */ + u32 ccid; + + /** + * @yield: CCID at the time of the last semaphore-wait interrupt. + * + * Instead of leaving a semaphore busy-spinning on an engine, we would + * like to switch to another ready context, i.e. yielding the semaphore + * timeslice. + */ + u32 yield; + + /** + * @error_interrupt: CS Master EIR + * + * The CS generates an interrupt when it detects an error. We capture + * the first error interrupt, record the EIR and schedule the tasklet. + * In the tasklet, we process the pending CS events to ensure we have + * the guilty request, and then reset the engine. + */ + u32 error_interrupt; + + /** * @no_priolist: priority lists disabled */ bool no_priolist; @@ -285,8 +309,7 @@ struct intel_engine_cs { u32 context_size; u32 mmio_base; - unsigned int context_tag; -#define NUM_CONTEXT_TAG roundup_pow_of_two(2 * EXECLIST_MAX_PORTS) + unsigned long context_tag; struct rb_node uabi_node; @@ -473,10 +496,11 @@ struct intel_engine_cs { #define I915_ENGINE_SUPPORTS_STATS BIT(1) #define I915_ENGINE_HAS_PREEMPTION BIT(2) #define I915_ENGINE_HAS_SEMAPHORES BIT(3) -#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4) -#define I915_ENGINE_IS_VIRTUAL BIT(5) -#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6) -#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7) +#define I915_ENGINE_HAS_TIMESLICES BIT(4) +#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(5) +#define I915_ENGINE_IS_VIRTUAL BIT(6) +#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(7) +#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(8) unsigned int flags; /* @@ -537,6 +561,7 @@ struct intel_engine_cs { struct { unsigned long heartbeat_interval_ms; + unsigned long max_busywait_duration_ns; unsigned long preempt_timeout_ms; unsigned long stop_timeout_ms; unsigned long timeslice_duration_ms; @@ -574,6 +599,15 @@ intel_engine_has_semaphores(const struct intel_engine_cs *engine) } static inline bool +intel_engine_has_timeslices(const struct intel_engine_cs *engine) +{ + if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) + return false; + + return engine->flags & I915_ENGINE_HAS_TIMESLICES; +} + +static inline bool intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs *engine) { return engine->flags & I915_ENGINE_NEEDS_BREADCRUMB_TASKLET; diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c index 9e7f12bef828..848decee9066 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_user.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c @@ -278,7 +278,8 @@ void intel_engines_driver_register(struct drm_i915_private *i915) } } - if (WARN(errors, "Invalid UABI engine mapping found")) + if (drm_WARN(&i915->drm, errors, + "Invalid UABI engine mapping found")) i915->uabi_engines = RB_ROOT; } diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index 531d501be01f..4c5a209cb669 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -8,6 +8,8 @@ #include <asm/set_memory.h> #include <asm/smp.h> +#include <drm/i915_drm.h> + #include "intel_gt.h" #include "i915_drv.h" #include "i915_scatterlist.h" @@ -104,27 +106,17 @@ static bool needs_idle_maps(struct drm_i915_private *i915) return IS_GEN(i915, 5) && IS_MOBILE(i915) && intel_vtd_active(); } -static void ggtt_suspend_mappings(struct i915_ggtt *ggtt) +void i915_ggtt_suspend(struct i915_ggtt *ggtt) { - struct drm_i915_private *i915 = ggtt->vm.i915; - - /* - * Don't bother messing with faults pre GEN6 as we have little - * documentation supporting that it's a good idea. - */ - if (INTEL_GEN(i915) < 6) - return; + struct i915_vma *vma; - intel_gt_check_and_clear_faults(ggtt->vm.gt); + list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) + i915_vma_wait_for_bind(vma); ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); - ggtt->invalidate(ggtt); -} -void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915) -{ - ggtt_suspend_mappings(&i915->ggtt); + intel_gt_check_and_clear_faults(ggtt->vm.gt); } void gen6_ggtt_invalidate(struct i915_ggtt *ggtt) @@ -167,6 +159,13 @@ static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt) intel_gtt_chipset_flush(); } +static u64 gen8_ggtt_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + u32 flags) +{ + return addr | _PAGE_PRESENT; +} + static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) { writeq(pte, addr); @@ -182,7 +181,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm, gen8_pte_t __iomem *pte = (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE; - gen8_set_pte(pte, gen8_pte_encode(addr, level, 0)); + gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, 0)); ggtt->invalidate(ggtt); } @@ -192,10 +191,11 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, enum i915_cache_level level, u32 flags) { + const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, 0); struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - struct sgt_iter sgt_iter; - gen8_pte_t __iomem *gtt_entries; - const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0); + gen8_pte_t __iomem *gte; + gen8_pte_t __iomem *end; + struct sgt_iter iter; dma_addr_t addr; /* @@ -203,10 +203,17 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, * not to allow the user to override access to a read only page. */ - gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm; - gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE; - for_each_sgt_daddr(addr, sgt_iter, vma->pages) - gen8_set_pte(gtt_entries++, pte_encode | addr); + gte = (gen8_pte_t __iomem *)ggtt->gsm; + gte += vma->node.start / I915_GTT_PAGE_SIZE; + end = gte + vma->node.size / I915_GTT_PAGE_SIZE; + + for_each_sgt_daddr(addr, iter, vma->pages) + gen8_set_pte(gte++, pte_encode | addr); + GEM_BUG_ON(gte > end); + + /* Fill the allocated but "unused" space beyond the end of the buffer */ + while (gte < end) + gen8_set_pte(gte++, vm->scratch[0].encode); /* * We want to flush the TLBs only after we're certain all the PTE @@ -242,13 +249,22 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, u32 flags) { struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); - gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm; - unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE; + gen6_pte_t __iomem *gte; + gen6_pte_t __iomem *end; struct sgt_iter iter; dma_addr_t addr; + gte = (gen6_pte_t __iomem *)ggtt->gsm; + gte += vma->node.start / I915_GTT_PAGE_SIZE; + end = gte + vma->node.size / I915_GTT_PAGE_SIZE; + for_each_sgt_daddr(addr, iter, vma->pages) - iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]); + iowrite32(vm->pte_encode(addr, level, flags), gte++); + GEM_BUG_ON(gte > end); + + /* Fill the allocated but "unused" space beyond the end of the buffer */ + while (gte < end) + iowrite32(vm->scratch[0].encode, gte++); /* * We want to flush the TLBs only after we're certain all the PTE @@ -350,31 +366,6 @@ static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL); } -struct clear_range { - struct i915_address_space *vm; - u64 start; - u64 length; -}; - -static int bxt_vtd_ggtt_clear_range__cb(void *_arg) -{ - struct clear_range *arg = _arg; - - gen8_ggtt_clear_range(arg->vm, arg->start, arg->length); - bxt_vtd_ggtt_wa(arg->vm); - - return 0; -} - -static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm, - u64 start, - u64 length) -{ - struct clear_range arg = { vm, start, length }; - - stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL); -} - static void gen6_ggtt_clear_range(struct i915_address_space *vm, u64 start, u64 length) { @@ -462,7 +453,7 @@ static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt) u64 size; int ret; - if (!USES_GUC(ggtt->vm.i915)) + if (!intel_uc_uses_guc(&ggtt->vm.gt->uc)) return 0; GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP); @@ -472,7 +463,8 @@ static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt) GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE, PIN_NOEVICT); if (ret) - DRM_DEBUG_DRIVER("Failed to reserve top of GGTT for GuC\n"); + drm_dbg(&ggtt->vm.i915->drm, + "Failed to reserve top of GGTT for GuC\n"); return ret; } @@ -544,8 +536,9 @@ static int init_ggtt(struct i915_ggtt *ggtt) /* Clear any non-preallocated blocks */ drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { - DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", - hole_start, hole_end); + drm_dbg_kms(&ggtt->vm.i915->drm, + "clearing unused GTT space: [%lx, %lx]\n", + hole_start, hole_end); ggtt->vm.clear_range(&ggtt->vm, hole_start, hole_end - hole_start); } @@ -879,8 +872,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) IS_CHERRYVIEW(i915) /* fails with concurrent use/update */) { ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; - if (ggtt->vm.clear_range != nop_clear_range) - ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL; + ggtt->vm.bind_async_flags = + I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; } ggtt->invalidate = gen8_ggtt_invalidate; @@ -890,7 +883,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ggtt->vm.vma_ops.set_pages = ggtt_set_pages; ggtt->vm.vma_ops.clear_pages = clear_pages; - ggtt->vm.pte_encode = gen8_pte_encode; + ggtt->vm.pte_encode = gen8_ggtt_pte_encode; setup_private_pat(ggtt->vm.gt->uncore); @@ -1180,7 +1173,7 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt) ggtt->invalidate(ggtt); } -static void ggtt_restore_mappings(struct i915_ggtt *ggtt) +void i915_ggtt_resume(struct i915_ggtt *ggtt) { struct i915_vma *vma; bool flush = false; @@ -1188,8 +1181,6 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt) intel_gt_check_and_clear_faults(ggtt->vm.gt); - mutex_lock(&ggtt->vm.mutex); - /* First fill our portion of the GTT with scratch pages */ ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); @@ -1216,19 +1207,10 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt) atomic_set(&ggtt->vm.open, open); ggtt->invalidate(ggtt); - mutex_unlock(&ggtt->vm.mutex); - if (flush) wbinvd_on_all_cpus(); -} - -void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915) -{ - struct i915_ggtt *ggtt = &i915->ggtt; - - ggtt_restore_mappings(ggtt); - if (INTEL_GEN(i915) >= 8) + if (INTEL_GEN(ggtt->vm.i915) >= 8) setup_private_pat(ggtt->vm.gt->uncore); } @@ -1267,6 +1249,7 @@ intel_rotate_pages(struct intel_rotation_info *rot_info, struct drm_i915_gem_object *obj) { unsigned int size = intel_rotation_info_size(rot_info); + struct drm_i915_private *i915 = to_i915(obj->base.dev); struct sg_table *st; struct scatterlist *sg; int ret = -ENOMEM; @@ -1296,8 +1279,9 @@ err_sg_alloc: kfree(st); err_st_alloc: - DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", - obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size); + drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", + obj->base.size, rot_info->plane[0].width, + rot_info->plane[0].height, size); return ERR_PTR(ret); } @@ -1349,6 +1333,7 @@ intel_remap_pages(struct intel_remapped_info *rem_info, struct drm_i915_gem_object *obj) { unsigned int size = intel_remapped_info_size(rem_info); + struct drm_i915_private *i915 = to_i915(obj->base.dev); struct sg_table *st; struct scatterlist *sg; int ret = -ENOMEM; @@ -1380,8 +1365,9 @@ err_sg_alloc: kfree(st); err_st_alloc: - DRM_DEBUG_DRIVER("Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n", - obj->base.size, rem_info->plane[0].width, rem_info->plane[0].height, size); + drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n", + obj->base.size, rem_info->plane[0].width, + rem_info->plane[0].height, size); return ERR_PTR(ret); } @@ -1479,8 +1465,9 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma) if (IS_ERR(vma->pages)) { ret = PTR_ERR(vma->pages); vma->pages = NULL; - DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n", - vma->ggtt_view.type, ret); + drm_err(&vma->vm->i915->drm, + "Failed to get pages for VMA view type %u (%d)!\n", + vma->ggtt_view.type, ret); } return ret; } diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h index 51b8718513bc..f04214a54f75 100644 --- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h +++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h @@ -292,10 +292,21 @@ #define MI_STORE_URB_MEM MI_INSTR(0x2D, 0) #define MI_CONDITIONAL_BATCH_BUFFER_END MI_INSTR(0x36, 0) -#define PIPELINE_SELECT ((0x3<<29)|(0x1<<27)|(0x1<<24)|(0x4<<16)) -#define GFX_OP_3DSTATE_VF_STATISTICS ((0x3<<29)|(0x1<<27)|(0x0<<24)|(0xB<<16)) -#define MEDIA_VFE_STATE ((0x3<<29)|(0x2<<27)|(0x0<<24)|(0x0<<16)) +#define STATE_BASE_ADDRESS \ + ((0x3 << 29) | (0x0 << 27) | (0x1 << 24) | (0x1 << 16)) +#define BASE_ADDRESS_MODIFY REG_BIT(0) +#define PIPELINE_SELECT \ + ((0x3 << 29) | (0x1 << 27) | (0x1 << 24) | (0x4 << 16)) +#define PIPELINE_SELECT_MEDIA REG_BIT(0) +#define GFX_OP_3DSTATE_VF_STATISTICS \ + ((0x3 << 29) | (0x1 << 27) | (0x0 << 24) | (0xB << 16)) +#define MEDIA_VFE_STATE \ + ((0x3 << 29) | (0x2 << 27) | (0x0 << 24) | (0x0 << 16)) #define MEDIA_VFE_STATE_MMIO_ACCESS_MASK (0x18) +#define MEDIA_INTERFACE_DESCRIPTOR_LOAD \ + ((0x3 << 29) | (0x2 << 27) | (0x0 << 24) | (0x2 << 16)) +#define MEDIA_OBJECT \ + ((0x3 << 29) | (0x2 << 27) | (0x1 << 24) | (0x0 << 16)) #define GPGPU_OBJECT ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x4<<16)) #define GPGPU_WALKER ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x5<<16)) #define GFX_OP_3DSTATE_DX9_CONSTANTF_VS \ diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index da2b6e2ae692..d09f7596cb98 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -198,16 +198,16 @@ static void gen6_check_faults(struct intel_gt *gt) for_each_engine(engine, gt, id) { fault = GEN6_RING_FAULT_REG_READ(engine); if (fault & RING_FAULT_VALID) { - DRM_DEBUG_DRIVER("Unexpected fault\n" - "\tAddr: 0x%08lx\n" - "\tAddress space: %s\n" - "\tSource ID: %d\n" - "\tType: %d\n", - fault & PAGE_MASK, - fault & RING_FAULT_GTTSEL_MASK ? - "GGTT" : "PPGTT", - RING_FAULT_SRCID(fault), - RING_FAULT_FAULT_TYPE(fault)); + drm_dbg(&engine->i915->drm, "Unexpected fault\n" + "\tAddr: 0x%08lx\n" + "\tAddress space: %s\n" + "\tSource ID: %d\n" + "\tType: %d\n", + fault & PAGE_MASK, + fault & RING_FAULT_GTTSEL_MASK ? + "GGTT" : "PPGTT", + RING_FAULT_SRCID(fault), + RING_FAULT_FAULT_TYPE(fault)); } } } @@ -239,18 +239,17 @@ static void gen8_check_faults(struct intel_gt *gt) fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) | ((u64)fault_data0 << 12); - DRM_DEBUG_DRIVER("Unexpected fault\n" - "\tAddr: 0x%08x_%08x\n" - "\tAddress space: %s\n" - "\tEngine ID: %d\n" - "\tSource ID: %d\n" - "\tType: %d\n", - upper_32_bits(fault_addr), - lower_32_bits(fault_addr), - fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT", - GEN8_RING_FAULT_ENGINE_ID(fault), - RING_FAULT_SRCID(fault), - RING_FAULT_FAULT_TYPE(fault)); + drm_dbg(&uncore->i915->drm, "Unexpected fault\n" + "\tAddr: 0x%08x_%08x\n" + "\tAddress space: %s\n" + "\tEngine ID: %d\n" + "\tSource ID: %d\n" + "\tType: %d\n", + upper_32_bits(fault_addr), lower_32_bits(fault_addr), + fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT", + GEN8_RING_FAULT_ENGINE_ID(fault), + RING_FAULT_SRCID(fault), + RING_FAULT_FAULT_TYPE(fault)); } } @@ -345,7 +344,7 @@ static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size) goto err_unref; } - ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); + ret = i915_ggtt_pin(vma, 0, PIN_HIGH); if (ret) goto err_unref; @@ -455,6 +454,11 @@ err_rq: if (!rq) continue; + if (rq->fence.error) { + err = -EIO; + goto out; + } + GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags)); state = rq->context->state; if (!state) @@ -538,6 +542,10 @@ static int __engines_verify_workarounds(struct intel_gt *gt) err = -EIO; } + /* Flush and restore the kernel context for safety */ + if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) + err = -EIO; + return err; } @@ -584,7 +592,9 @@ int intel_gt_init(struct intel_gt *gt) if (err) goto err_engines; - intel_uc_init(>->uc); + err = intel_uc_init(>->uc); + if (err) + goto err_engines; err = intel_gt_resume(gt); if (err) @@ -634,6 +644,13 @@ void intel_gt_driver_remove(struct intel_gt *gt) void intel_gt_driver_unregister(struct intel_gt *gt) { intel_rps_driver_unregister(>->rps); + + /* + * Upon unregistering the device to prevent any new users, cancel + * all in-flight requests so that we can quickly unbind the active + * resources. + */ + intel_gt_set_wedged(gt); } void intel_gt_driver_release(struct intel_gt *gt) @@ -650,6 +667,9 @@ void intel_gt_driver_release(struct intel_gt *gt) void intel_gt_driver_late_release(struct intel_gt *gt) { + /* We need to wait for inflight RCU frees to release their grip */ + rcu_barrier(); + intel_uc_driver_late_release(>->uc); intel_gt_fini_requests(gt); intel_gt_fini_reset(gt); diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 1dac441cb8f4..4fac043750aa 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -14,7 +14,7 @@ struct drm_i915_private; #define GT_TRACE(gt, fmt, ...) do { \ const struct intel_gt *gt__ __maybe_unused = (gt); \ - GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev), \ + GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev), \ ##__VA_ARGS__); \ } while (0) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c index f796bdf1ed30..0cc7dd54f4f9 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c @@ -24,6 +24,30 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir) { bool tasklet = false; + if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) { + u32 eir; + + eir = ENGINE_READ(engine, RING_EIR); + ENGINE_TRACE(engine, "CS error: %x\n", eir); + + /* Disable the error interrupt until after the reset */ + if (likely(eir)) { + ENGINE_WRITE(engine, RING_EMR, ~0u); + ENGINE_WRITE(engine, RING_EIR, eir); + WRITE_ONCE(engine->execlists.error_interrupt, eir); + tasklet = true; + } + } + + if (iir & GT_WAIT_SEMAPHORE_INTERRUPT) { + WRITE_ONCE(engine->execlists.yield, + ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI)); + ENGINE_TRACE(engine, "semaphore yield: %08x\n", + engine->execlists.yield); + if (del_timer(&engine->execlists.timer)) + tasklet = true; + } + if (iir & GT_CONTEXT_SWITCH_INTERRUPT) tasklet = true; @@ -210,7 +234,11 @@ void gen11_gt_irq_reset(struct intel_gt *gt) void gen11_gt_irq_postinstall(struct intel_gt *gt) { - const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT; + const u32 irqs = + GT_CS_MASTER_ERROR_INTERRUPT | + GT_RENDER_USER_INTERRUPT | + GT_CONTEXT_SWITCH_INTERRUPT | + GT_WAIT_SEMAPHORE_INTERRUPT; struct intel_uncore *uncore = gt->uncore; const u32 dmask = irqs << 16 | irqs; const u32 smask = irqs << 16; @@ -279,66 +307,56 @@ void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir) if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | GT_BSD_CS_ERROR_INTERRUPT | - GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) + GT_CS_MASTER_ERROR_INTERRUPT)) DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); if (gt_iir & GT_PARITY_ERROR(gt->i915)) gen7_parity_error_irq_handler(gt, gt_iir); } -void gen8_gt_irq_ack(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]) +void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl) { void __iomem * const regs = gt->uncore->regs; + u32 iir; if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { - gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0)); - if (likely(gt_iir[0])) - raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]); - } - - if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) { - gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1)); - if (likely(gt_iir[1])) - raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]); - } - - if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { - gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2)); - if (likely(gt_iir[2])) - raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]); - } - - if (master_ctl & GEN8_GT_VECS_IRQ) { - gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3)); - if (likely(gt_iir[3])) - raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]); - } -} - -void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]) -{ - if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { - cs_irq_handler(gt->engine_class[RENDER_CLASS][0], - gt_iir[0] >> GEN8_RCS_IRQ_SHIFT); - cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0], - gt_iir[0] >> GEN8_BCS_IRQ_SHIFT); + iir = raw_reg_read(regs, GEN8_GT_IIR(0)); + if (likely(iir)) { + cs_irq_handler(gt->engine_class[RENDER_CLASS][0], + iir >> GEN8_RCS_IRQ_SHIFT); + cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0], + iir >> GEN8_BCS_IRQ_SHIFT); + raw_reg_write(regs, GEN8_GT_IIR(0), iir); + } } if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) { - cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][0], - gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT); - cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][1], - gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT); + iir = raw_reg_read(regs, GEN8_GT_IIR(1)); + if (likely(iir)) { + cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][0], + iir >> GEN8_VCS0_IRQ_SHIFT); + cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][1], + iir >> GEN8_VCS1_IRQ_SHIFT); + raw_reg_write(regs, GEN8_GT_IIR(1), iir); + } } if (master_ctl & GEN8_GT_VECS_IRQ) { - cs_irq_handler(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0], - gt_iir[3] >> GEN8_VECS_IRQ_SHIFT); + iir = raw_reg_read(regs, GEN8_GT_IIR(3)); + if (likely(iir)) { + cs_irq_handler(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0], + iir >> GEN8_VECS_IRQ_SHIFT); + raw_reg_write(regs, GEN8_GT_IIR(3), iir); + } } if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { - gen6_rps_irq_handler(>->rps, gt_iir[2]); - guc_irq_handler(>->uc.guc, gt_iir[2] >> 16); + iir = raw_reg_read(regs, GEN8_GT_IIR(2)); + if (likely(iir)) { + gen6_rps_irq_handler(>->rps, iir); + guc_irq_handler(>->uc.guc, iir >> 16); + raw_reg_write(regs, GEN8_GT_IIR(2), iir); + } } } @@ -354,25 +372,19 @@ void gen8_gt_irq_reset(struct intel_gt *gt) void gen8_gt_irq_postinstall(struct intel_gt *gt) { - struct intel_uncore *uncore = gt->uncore; - /* These are interrupts we'll toggle with the ring mask register */ - u32 gt_interrupts[] = { - (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | - GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | - GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | - GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT), - - (GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT | - GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT | - GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | - GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT), - + const u32 irqs = + GT_CS_MASTER_ERROR_INTERRUPT | + GT_RENDER_USER_INTERRUPT | + GT_CONTEXT_SWITCH_INTERRUPT | + GT_WAIT_SEMAPHORE_INTERRUPT; + const u32 gt_interrupts[] = { + irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT, + irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT, 0, - - (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | - GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT) + irqs << GEN8_VECS_IRQ_SHIFT, }; + struct intel_uncore *uncore = gt->uncore; gt->pm_ier = 0x0; gt->pm_imr = ~gt->pm_ier; diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.h b/drivers/gpu/drm/i915/gt/intel_gt_irq.h index 8f37593712c9..886c5cf408a2 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.h @@ -36,9 +36,8 @@ void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask); void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir); -void gen8_gt_irq_ack(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]); +void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl); void gen8_gt_irq_reset(struct intel_gt *gt); -void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]); void gen8_gt_irq_postinstall(struct intel_gt *gt); #endif /* INTEL_GT_IRQ_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index d1c2f034296a..8b653c0f5e5f 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -216,7 +216,7 @@ int intel_gt_resume(struct intel_gt *gt) intel_engine_pm_get(engine); engine->serial++; /* kernel context lost */ - err = engine->resume(engine); + err = intel_engine_resume(engine); intel_engine_pm_put(engine); if (err) { diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index 16acdc5d6734..2a72cce63fd9 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -171,7 +171,9 @@ void __i915_vm_close(struct i915_address_space *vm) { struct i915_vma *vma, *vn; - mutex_lock(&vm->mutex); + if (!atomic_dec_and_mutex_lock(&vm->open, &vm->mutex)) + return; + list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) { struct drm_i915_gem_object *obj = vma->obj; @@ -186,6 +188,7 @@ void __i915_vm_close(struct i915_address_space *vm) i915_gem_object_put(obj); } GEM_BUG_ON(!list_empty(&vm->bound_list)); + mutex_unlock(&vm->mutex); } @@ -299,6 +302,25 @@ fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count) kunmap_atomic(memset64(kmap_atomic(p->page), val, count)); } +static void poison_scratch_page(struct page *page, unsigned long size) +{ + if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + return; + + GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE)); + + do { + void *vaddr; + + vaddr = kmap(page); + memset(vaddr, POISON_FREE, PAGE_SIZE); + kunmap(page); + + page = pfn_to_page(page_to_pfn(page) + 1); + size -= PAGE_SIZE; + } while (size); +} + int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) { unsigned long size; @@ -331,6 +353,17 @@ int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) if (unlikely(!page)) goto skip; + /* + * Use a non-zero scratch page for debugging. + * + * We want a value that should be reasonably obvious + * to spot in the error state, while also causing a GPU hang + * if executed. We prefer using a clear page in production, so + * should it ever be accidentally used, the effect should be + * fairly benign. + */ + poison_scratch_page(page, size); + addr = dma_map_page_attrs(vm->dma, page, 0, size, PCI_DMA_BIDIRECTIONAL, @@ -448,36 +481,12 @@ void gtt_write_workarounds(struct intel_gt *gt) intel_uncore_write(uncore, HSW_GTT_CACHE_EN, can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0); - WARN_ON_ONCE(can_use_gtt_cache && - intel_uncore_read(uncore, - HSW_GTT_CACHE_EN) == 0); + drm_WARN_ON_ONCE(&i915->drm, can_use_gtt_cache && + intel_uncore_read(uncore, + HSW_GTT_CACHE_EN) == 0); } } -u64 gen8_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - u32 flags) -{ - gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW; - - if (unlikely(flags & PTE_READ_ONLY)) - pte &= ~_PAGE_RW; - - switch (level) { - case I915_CACHE_NONE: - pte |= PPAT_UNCACHED; - break; - case I915_CACHE_WT: - pte |= PPAT_DISPLAY_ELLC; - break; - default: - pte |= PPAT_CACHED; - break; - } - - return pte; -} - static void tgl_setup_private_ppat(struct intel_uncore *uncore) { /* TGL doesn't support LLC or AGE settings */ diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h index 7da7681c20b1..b3116fe8d180 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.h +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -429,8 +429,7 @@ static inline void i915_vm_close(struct i915_address_space *vm) { GEM_BUG_ON(!atomic_read(&vm->open)); - if (atomic_dec_and_test(&vm->open)) - __i915_vm_close(vm); + __i915_vm_close(vm); i915_vm_put(vm); } @@ -512,12 +511,8 @@ int i915_ppgtt_init_hw(struct intel_gt *gt); struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt); -void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915); -void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915); - -u64 gen8_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - u32 flags); +void i915_ggtt_suspend(struct i915_ggtt *gtt); +void i915_ggtt_resume(struct i915_ggtt *ggtt); int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p); void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p); diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c index ceb785b75c25..e3f637b3650e 100644 --- a/drivers/gpu/drm/i915/gt/intel_llc.c +++ b/drivers/gpu/drm/i915/gt/intel_llc.c @@ -50,6 +50,9 @@ static bool get_ia_constants(struct intel_llc *llc, struct drm_i915_private *i915 = llc_to_gt(llc)->i915; struct intel_rps *rps = &llc_to_gt(llc)->rps; + if (!HAS_LLC(i915) || IS_DGFX(i915)) + return false; + if (rps->max_freq <= rps->min_freq) return false; @@ -147,8 +150,7 @@ static void gen6_update_ring_freq(struct intel_llc *llc) void intel_llc_enable(struct intel_llc *llc) { - if (HAS_LLC(llc_to_gt(llc)->i915)) - gen6_update_ring_freq(llc); + gen6_update_ring_freq(llc); } void intel_llc_disable(struct intel_llc *llc) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 31455eceeb0c..2dfaddb8811e 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -176,8 +176,6 @@ /* Typical size of the average request (2 pipecontrols and a MI_BB) */ #define EXECLISTS_REQUEST_SIZE 64 /* bytes */ -#define WA_TAIL_DWORDS 2 -#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS) struct virtual_engine { struct intel_engine_cs base; @@ -247,7 +245,7 @@ static void mark_eio(struct i915_request *rq) GEM_BUG_ON(i915_request_signaled(rq)); - dma_fence_set_error(&rq->fence, -EIO); + i915_request_set_error_once(rq, -EIO); i915_request_mark_complete(rq); } @@ -295,7 +293,7 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb) static inline int rq_prio(const struct i915_request *rq) { - return rq->sched.attr.priority; + return READ_ONCE(rq->sched.attr.priority); } static int effective_prio(const struct i915_request *rq) @@ -458,10 +456,10 @@ assert_priority_queue(const struct i915_request *prev, * engine info, SW context ID and SW counter need to form a unique number * (Context ID) per lrc. */ -static u64 +static u32 lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine) { - u64 desc; + u32 desc; desc = INTEL_LEGACY_32B_CONTEXT; if (i915_vm_is_4lvl(ce->vm)) @@ -472,21 +470,7 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine) if (IS_GEN(engine->i915, 8)) desc |= GEN8_CTX_L3LLC_COHERENT; - desc |= i915_ggtt_offset(ce->state); /* bits 12-31 */ - /* - * The following 32bits are copied into the OA reports (dword 2). - * Consider updating oa_get_render_ctx_id in i915_perf.c when changing - * anything below. - */ - if (INTEL_GEN(engine->i915) >= 11) { - desc |= (u64)engine->instance << GEN11_ENGINE_INSTANCE_SHIFT; - /* bits 48-53 */ - - desc |= (u64)engine->class << GEN11_ENGINE_CLASS_SHIFT; - /* bits 61-63 */ - } - - return desc; + return i915_ggtt_offset(ce->state) | desc; } static inline unsigned int dword_in_page(void *addr) @@ -1006,7 +990,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) i915_request_cancel_breadcrumb(rq); spin_unlock(&rq->lock); } - rq->engine = owner; + WRITE_ONCE(rq->engine, owner); owner->submit_request(rq); active = NULL; } @@ -1194,7 +1178,49 @@ static void reset_active(struct i915_request *rq, __execlists_update_reg_state(ce, engine, head); /* We've switched away, so this should be a no-op, but intent matters */ - ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; + ce->lrc.desc |= CTX_DESC_FORCE_RESTORE; +} + +static u32 intel_context_get_runtime(const struct intel_context *ce) +{ + /* + * We can use either ppHWSP[16] which is recorded before the context + * switch (and so excludes the cost of context switches) or use the + * value from the context image itself, which is saved/restored earlier + * and so includes the cost of the save. + */ + return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]); +} + +static void st_update_runtime_underflow(struct intel_context *ce, s32 dt) +{ +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) + ce->runtime.num_underflow += dt < 0; + ce->runtime.max_underflow = max_t(u32, ce->runtime.max_underflow, -dt); +#endif +} + +static void intel_context_update_runtime(struct intel_context *ce) +{ + u32 old; + s32 dt; + + if (intel_context_is_barrier(ce)) + return; + + old = ce->runtime.last; + ce->runtime.last = intel_context_get_runtime(ce); + dt = ce->runtime.last - old; + + if (unlikely(dt <= 0)) { + CE_TRACE(ce, "runtime underflow: last=%u, new=%u, delta=%d\n", + old, ce->runtime.last, dt); + st_update_runtime_underflow(ce, dt); + return; + } + + ewma_runtime_add(&ce->runtime.avg, dt); + ce->runtime.total += dt; } static inline struct intel_engine_cs * @@ -1213,16 +1239,21 @@ __execlists_schedule_in(struct i915_request *rq) if (ce->tag) { /* Use a fixed tag for OA and friends */ - ce->lrc_desc |= (u64)ce->tag << 32; + GEM_BUG_ON(ce->tag <= BITS_PER_LONG); + ce->lrc.ccid = ce->tag; } else { /* We don't need a strict matching tag, just different values */ - ce->lrc_desc &= ~GENMASK_ULL(47, 37); - ce->lrc_desc |= - (u64)(++engine->context_tag % NUM_CONTEXT_TAG) << - GEN11_SW_CTX_ID_SHIFT; - BUILD_BUG_ON(NUM_CONTEXT_TAG > GEN12_MAX_CONTEXT_HW_ID); + unsigned int tag = ffs(engine->context_tag); + + GEM_BUG_ON(tag == 0 || tag >= BITS_PER_LONG); + clear_bit(tag - 1, &engine->context_tag); + ce->lrc.ccid = tag << (GEN11_SW_CTX_ID_SHIFT - 32); + + BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID); } + ce->lrc.ccid |= engine->execlists.ccid; + __intel_gt_pm_get(engine->gt); execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN); intel_engine_context_in(engine); @@ -1262,7 +1293,8 @@ static void kick_siblings(struct i915_request *rq, struct intel_context *ce) static inline void __execlists_schedule_out(struct i915_request *rq, - struct intel_engine_cs * const engine) + struct intel_engine_cs * const engine, + unsigned int ccid) { struct intel_context * const ce = rq->context; @@ -1276,10 +1308,19 @@ __execlists_schedule_out(struct i915_request *rq, * If we have just completed this context, the engine may now be * idle and we want to re-enter powersaving. */ - if (list_is_last(&rq->link, &ce->timeline->requests) && + if (list_is_last_rcu(&rq->link, &ce->timeline->requests) && i915_request_completed(rq)) intel_engine_add_retire(engine, ce->timeline); + ccid >>= GEN11_SW_CTX_ID_SHIFT - 32; + ccid &= GEN12_MAX_CONTEXT_HW_ID; + if (ccid < BITS_PER_LONG) { + GEM_BUG_ON(ccid == 0); + GEM_BUG_ON(test_bit(ccid - 1, &engine->context_tag)); + set_bit(ccid - 1, &engine->context_tag); + } + + intel_context_update_runtime(ce); intel_engine_context_out(engine); execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT); intel_gt_pm_put_async(engine->gt); @@ -1304,15 +1345,17 @@ execlists_schedule_out(struct i915_request *rq) { struct intel_context * const ce = rq->context; struct intel_engine_cs *cur, *old; + u32 ccid; trace_i915_request_out(rq); + ccid = rq->context->lrc.ccid; old = READ_ONCE(ce->inflight); do cur = ptr_unmask_bits(old, 2) ? ptr_dec(old) : NULL; while (!try_cmpxchg(&ce->inflight, &old, cur)); if (!cur) - __execlists_schedule_out(rq, old); + __execlists_schedule_out(rq, old, ccid); i915_request_put(rq); } @@ -1320,7 +1363,7 @@ execlists_schedule_out(struct i915_request *rq) static u64 execlists_update_context(struct i915_request *rq) { struct intel_context *ce = rq->context; - u64 desc = ce->lrc_desc; + u64 desc = ce->lrc.desc; u32 tail, prev; /* @@ -1359,7 +1402,7 @@ static u64 execlists_update_context(struct i915_request *rq) */ wmb(); - ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE; + ce->lrc.desc &= ~CTX_DESC_FORCE_RESTORE; return desc; } @@ -1395,15 +1438,26 @@ trace_ports(const struct intel_engine_execlists *execlists, ports[1] ? ports[1]->fence.seqno : 0); } +static inline bool +reset_in_progress(const struct intel_engine_execlists *execlists) +{ + return unlikely(!__tasklet_is_enabled(&execlists->tasklet)); +} + static __maybe_unused bool assert_pending_valid(const struct intel_engine_execlists *execlists, const char *msg) { struct i915_request * const *port, *rq; struct intel_context *ce = NULL; + bool sentinel = false; trace_ports(execlists, msg, execlists->pending); + /* We may be messing around with the lists during reset, lalala */ + if (reset_in_progress(execlists)) + return true; + if (!execlists->pending[0]) { GEM_TRACE_ERR("Nothing pending for promotion!\n"); return false; @@ -1430,6 +1484,26 @@ assert_pending_valid(const struct intel_engine_execlists *execlists, } ce = rq->context; + /* + * Sentinels are supposed to be lonely so they flush the + * current exection off the HW. Check that they are the + * only request in the pending submission. + */ + if (sentinel) { + GEM_TRACE_ERR("context:%llx after sentinel in pending[%zd]\n", + ce->timeline->fence_context, + port - execlists->pending); + return false; + } + + sentinel = i915_request_has_sentinel(rq); + if (sentinel && port != execlists->pending) { + GEM_TRACE_ERR("sentinel context:%llx not in prime position[%zd]\n", + ce->timeline->fence_context, + port - execlists->pending); + return false; + } + /* Hold tightly onto the lock to prevent concurrent retires! */ if (!spin_trylock_irqsave(&rq->lock, flags)) continue; @@ -1525,6 +1599,11 @@ static bool can_merge_ctx(const struct intel_context *prev, return true; } +static unsigned long i915_request_flags(const struct i915_request *rq) +{ + return READ_ONCE(rq->fence.flags); +} + static bool can_merge_rq(const struct i915_request *prev, const struct i915_request *next) { @@ -1542,7 +1621,7 @@ static bool can_merge_rq(const struct i915_request *prev, if (i915_request_completed(next)) return true; - if (unlikely((prev->fence.flags ^ next->fence.flags) & + if (unlikely((i915_request_flags(prev) ^ i915_request_flags(next)) & (BIT(I915_FENCE_FLAG_NOPREEMPT) | BIT(I915_FENCE_FLAG_SENTINEL)))) return false; @@ -1550,6 +1629,7 @@ static bool can_merge_rq(const struct i915_request *prev, if (!can_merge_ctx(prev->context, next->context)) return false; + GEM_BUG_ON(i915_seqno_passed(prev->fence.seqno, next->fence.seqno)); return true; } @@ -1585,7 +1665,7 @@ static bool virtual_matches(const struct virtual_engine *ve, } static void virtual_xfer_breadcrumbs(struct virtual_engine *ve, - struct intel_engine_cs *engine) + struct i915_request *rq) { struct intel_engine_cs *old = ve->siblings[0]; @@ -1593,9 +1673,19 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve, spin_lock(&old->breadcrumbs.irq_lock); if (!list_empty(&ve->context.signal_link)) { - list_move_tail(&ve->context.signal_link, - &engine->breadcrumbs.signalers); - intel_engine_signal_breadcrumbs(engine); + list_del_init(&ve->context.signal_link); + + /* + * We cannot acquire the new engine->breadcrumbs.irq_lock + * (as we are holding a breadcrumbs.irq_lock already), + * so attach this request to the signaler on submission. + * The queued irq_work will occur when we finally drop + * the engine->active.lock after dequeue. + */ + set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags); + + /* Also transfer the pending irq_work for the old breadcrumb. */ + intel_engine_signal_breadcrumbs(rq->engine); } spin_unlock(&old->breadcrumbs.irq_lock); } @@ -1605,6 +1695,11 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve, &(rq__)->sched.waiters_list, \ wait_link) +#define for_each_signaler(p__, rq__) \ + list_for_each_entry_rcu(p__, \ + &(rq__)->sched.signalers_list, \ + signal_link) + static void defer_request(struct i915_request *rq, struct list_head * const pl) { LIST_HEAD(list); @@ -1626,6 +1721,9 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl) struct i915_request *w = container_of(p->waiter, typeof(*w), sched); + if (p->flags & I915_DEPENDENCY_WEAK) + continue; + /* Leave semaphores spinning on the other engines */ if (w->engine != rq->engine) continue; @@ -1661,7 +1759,8 @@ static void defer_active(struct intel_engine_cs *engine) } static bool -need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq) +need_timeslice(const struct intel_engine_cs *engine, + const struct i915_request *rq) { int hint; @@ -1675,6 +1774,32 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq) return hint >= effective_prio(rq); } +static bool +timeslice_yield(const struct intel_engine_execlists *el, + const struct i915_request *rq) +{ + /* + * Once bitten, forever smitten! + * + * If the active context ever busy-waited on a semaphore, + * it will be treated as a hog until the end of its timeslice (i.e. + * until it is scheduled out and replaced by a new submission, + * possibly even its own lite-restore). The HW only sends an interrupt + * on the first miss, and we do know if that semaphore has been + * signaled, or even if it is now stuck on another semaphore. Play + * safe, yield if it might be stuck -- it will be given a fresh + * timeslice in the near future. + */ + return rq->context->lrc.ccid == READ_ONCE(el->yield); +} + +static bool +timeslice_expired(const struct intel_engine_execlists *el, + const struct i915_request *rq) +{ + return timer_expired(&el->timer) || timeslice_yield(el, rq); +} + static int switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq) { @@ -1690,15 +1815,15 @@ timeslice(const struct intel_engine_cs *engine) return READ_ONCE(engine->props.timeslice_duration_ms); } -static unsigned long -active_timeslice(const struct intel_engine_cs *engine) +static unsigned long active_timeslice(const struct intel_engine_cs *engine) { - const struct i915_request *rq = *engine->execlists.active; + const struct intel_engine_execlists *execlists = &engine->execlists; + const struct i915_request *rq = *execlists->active; if (!rq || i915_request_completed(rq)) return 0; - if (engine->execlists.switch_priority_hint < effective_prio(rq)) + if (READ_ONCE(execlists->switch_priority_hint) < effective_prio(rq)) return 0; return timeslice(engine); @@ -1715,8 +1840,11 @@ static void set_timeslice(struct intel_engine_cs *engine) static void start_timeslice(struct intel_engine_cs *engine) { struct intel_engine_execlists *execlists = &engine->execlists; + int prio = queue_prio(execlists); - execlists->switch_priority_hint = execlists->queue_priority_hint; + WRITE_ONCE(execlists->switch_priority_hint, prio); + if (prio == INT_MIN) + return; if (timer_pending(&execlists->timer)) return; @@ -1849,13 +1977,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine) last = NULL; } else if (need_timeslice(engine, last) && - timer_expired(&engine->execlists.timer)) { + timeslice_expired(execlists, last)) { ENGINE_TRACE(engine, - "expired last=%llx:%lld, prio=%d, hint=%d\n", + "expired last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n", last->fence.context, last->fence.seqno, last->sched.attr.priority, - execlists->queue_priority_hint); + execlists->queue_priority_hint, + yesno(timeslice_yield(execlists, last))); ring_set_paused(engine, 1); defer_active(engine); @@ -1938,13 +2067,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine) "", yesno(engine != ve->siblings[0])); - ve->request = NULL; - ve->base.execlists.queue_priority_hint = INT_MIN; + WRITE_ONCE(ve->request, NULL); + WRITE_ONCE(ve->base.execlists.queue_priority_hint, + INT_MIN); rb_erase_cached(rb, &execlists->virtual); RB_CLEAR_NODE(rb); GEM_BUG_ON(!(rq->execution_mask & engine->mask)); - rq->engine = engine; + WRITE_ONCE(rq->engine, engine); if (engine != ve->siblings[0]) { u32 *regs = ve->context.lrc_reg_state; @@ -1957,7 +2087,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) engine); if (!list_empty(&ve->context.signals)) - virtual_xfer_breadcrumbs(ve, engine); + virtual_xfer_breadcrumbs(ve, rq); /* * Move the bound engine to the top of the list @@ -2064,6 +2194,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine) GEM_BUG_ON(last && !can_merge_ctx(last->context, rq->context)); + GEM_BUG_ON(last && + i915_seqno_passed(last->fence.seqno, + rq->fence.seqno)); submit = true; last = rq; @@ -2112,6 +2245,7 @@ done: } clear_ports(port + 1, last_port - port); + WRITE_ONCE(execlists->yield, -1); execlists_submit_ports(engine); set_preempt_timeout(engine, *active); } else { @@ -2134,6 +2268,7 @@ cancel_port_requests(struct intel_engine_execlists * const execlists) execlists_schedule_out(*port); clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight)); + smp_wmb(); /* complete the seqlock for execlists_active() */ WRITE_ONCE(execlists->active, execlists->inflight); } @@ -2144,12 +2279,6 @@ invalidate_csb_entries(const u32 *first, const u32 *last) clflush((void *)last); } -static inline bool -reset_in_progress(const struct intel_engine_execlists *execlists) -{ - return unlikely(!__tasklet_is_enabled(&execlists->tasklet)); -} - /* * Starting with Gen12, the status has a new format: * @@ -2240,7 +2369,6 @@ static void process_csb(struct intel_engine_cs *engine) */ head = execlists->csb_head; tail = READ_ONCE(*execlists->csb_write); - ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail); if (unlikely(head == tail)) return; @@ -2254,6 +2382,7 @@ static void process_csb(struct intel_engine_cs *engine) */ rmb(); + ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail); do { bool promote; @@ -2288,11 +2417,13 @@ static void process_csb(struct intel_engine_cs *engine) if (promote) { struct i915_request * const *old = execlists->active; + GEM_BUG_ON(!assert_pending_valid(execlists, "promote")); + + ring_set_paused(engine, 0); + /* Point active to the new ELSP; prevent overwriting */ WRITE_ONCE(execlists->active, execlists->pending); - - if (!inject_preempt_hang(execlists)) - ring_set_paused(engine, 0); + smp_wmb(); /* notify execlists_active() */ /* cancel old inflight, prepare for switch */ trace_ports(execlists, "preempted", old); @@ -2300,12 +2431,12 @@ static void process_csb(struct intel_engine_cs *engine) execlists_schedule_out(*old++); /* switch pending to inflight */ - GEM_BUG_ON(!assert_pending_valid(execlists, "promote")); - WRITE_ONCE(execlists->active, - memcpy(execlists->inflight, - execlists->pending, - execlists_num_ports(execlists) * - sizeof(*execlists->pending))); + memcpy(execlists->inflight, + execlists->pending, + execlists_num_ports(execlists) * + sizeof(*execlists->pending)); + smp_wmb(); /* complete the seqlock */ + WRITE_ONCE(execlists->active, execlists->inflight); WRITE_ONCE(execlists->pending[0], NULL); } else { @@ -2320,8 +2451,37 @@ static void process_csb(struct intel_engine_cs *engine) * coherent (visible from the CPU) before the * user interrupt and CSB is processed. */ - GEM_BUG_ON(!i915_request_completed(*execlists->active) && - !reset_in_progress(execlists)); + if (GEM_SHOW_DEBUG() && + !i915_request_completed(*execlists->active) && + !reset_in_progress(execlists)) { + struct i915_request *rq __maybe_unused = + *execlists->active; + const u32 *regs __maybe_unused = + rq->context->lrc_reg_state; + + ENGINE_TRACE(engine, + "ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n", + ENGINE_READ(engine, RING_START), + ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR, + ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR, + ENGINE_READ(engine, RING_CTL), + ENGINE_READ(engine, RING_MI_MODE)); + ENGINE_TRACE(engine, + "rq:{start:%08x, head:%04x, tail:%04x, seqno:%llx:%d, hwsp:%d}, ", + i915_ggtt_offset(rq->ring->vma), + rq->head, rq->tail, + rq->fence.context, + lower_32_bits(rq->fence.seqno), + hwsp_seqno(rq)); + ENGINE_TRACE(engine, + "ctx:{start:%08x, head:%04x, tail:%04x}, ", + regs[CTX_RING_START], + regs[CTX_RING_HEAD], + regs[CTX_RING_TAIL]); + + GEM_BUG_ON("context completed before request"); + } + execlists_schedule_out(*execlists->active++); GEM_BUG_ON(execlists->active - execlists->inflight > @@ -2349,7 +2509,7 @@ static void process_csb(struct intel_engine_cs *engine) static void __execlists_submission_tasklet(struct intel_engine_cs *const engine) { lockdep_assert_held(&engine->active.lock); - if (!engine->execlists.pending[0]) { + if (!READ_ONCE(engine->execlists.pending[0])) { rcu_read_lock(); /* protect peeking at execlists->active */ execlists_dequeue(engine); rcu_read_unlock(); @@ -2366,12 +2526,12 @@ static void __execlists_hold(struct i915_request *rq) if (i915_request_is_active(rq)) __i915_request_unsubmit(rq); - RQ_TRACE(rq, "on hold\n"); clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); list_move_tail(&rq->sched.link, &rq->engine->active.hold); i915_request_set_hold(rq); + RQ_TRACE(rq, "on hold\n"); - list_for_each_entry(p, &rq->sched.waiters_list, wait_link) { + for_each_waiter(p, rq) { struct i915_request *w = container_of(p->waiter, typeof(*w), sched); @@ -2385,7 +2545,7 @@ static void __execlists_hold(struct i915_request *rq) if (i915_request_completed(w)) continue; - if (i915_request_on_hold(rq)) + if (i915_request_on_hold(w)) continue; list_move_tail(&w->sched.link, &list); @@ -2443,6 +2603,7 @@ static bool execlists_hold(struct intel_engine_cs *engine, GEM_BUG_ON(i915_request_on_hold(rq)); GEM_BUG_ON(rq->engine != engine); __execlists_hold(rq); + GEM_BUG_ON(list_empty(&engine->active.hold)); unlock: spin_unlock_irq(&engine->active.lock); @@ -2452,23 +2613,27 @@ unlock: static bool hold_request(const struct i915_request *rq) { struct i915_dependency *p; + bool result = false; /* * If one of our ancestors is on hold, we must also be on hold, * otherwise we will bypass it and execute before it. */ - list_for_each_entry(p, &rq->sched.signalers_list, signal_link) { + rcu_read_lock(); + for_each_signaler(p, rq) { const struct i915_request *s = container_of(p->signaler, typeof(*s), sched); if (s->engine != rq->engine) continue; - if (i915_request_on_hold(s)) - return true; + result = i915_request_on_hold(s); + if (result) + break; } + rcu_read_unlock(); - return false; + return result; } static void __execlists_unhold(struct i915_request *rq) @@ -2478,6 +2643,8 @@ static void __execlists_unhold(struct i915_request *rq) do { struct i915_dependency *p; + RQ_TRACE(rq, "hold release\n"); + GEM_BUG_ON(!i915_request_on_hold(rq)); GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit)); @@ -2486,21 +2653,24 @@ static void __execlists_unhold(struct i915_request *rq) i915_sched_lookup_priolist(rq->engine, rq_prio(rq))); set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); - RQ_TRACE(rq, "hold release\n"); /* Also release any children on this engine that are ready */ - list_for_each_entry(p, &rq->sched.waiters_list, wait_link) { + for_each_waiter(p, rq) { struct i915_request *w = container_of(p->waiter, typeof(*w), sched); + /* Propagate any change in error status */ + if (rq->fence.error) + i915_request_set_error_once(w, rq->fence.error); + if (w->engine != rq->engine) continue; - if (!i915_request_on_hold(rq)) + if (!i915_request_on_hold(w)) continue; /* Check that no other parents are also on hold */ - if (hold_request(rq)) + if (hold_request(w)) continue; list_move_tail(&w->sched.link, &list); @@ -2615,13 +2785,13 @@ static bool execlists_capture(struct intel_engine_cs *engine) if (!cap) return true; + spin_lock_irq(&engine->active.lock); cap->rq = execlists_active(&engine->execlists); - GEM_BUG_ON(!cap->rq); - - rcu_read_lock(); - cap->rq = active_request(cap->rq->context->timeline, cap->rq); - cap->rq = i915_request_get_rcu(cap->rq); - rcu_read_unlock(); + if (cap->rq) { + cap->rq = active_request(cap->rq->context->timeline, cap->rq); + cap->rq = i915_request_get_rcu(cap->rq); + } + spin_unlock_irq(&engine->active.lock); if (!cap->rq) goto err_free; @@ -2660,27 +2830,25 @@ err_free: return false; } -static noinline void preempt_reset(struct intel_engine_cs *engine) +static void execlists_reset(struct intel_engine_cs *engine, const char *msg) { const unsigned int bit = I915_RESET_ENGINE + engine->id; unsigned long *lock = &engine->gt->reset.flags; - if (i915_modparams.reset < 3) + if (!intel_has_reset_engine(engine->gt)) return; if (test_and_set_bit(bit, lock)) return; + ENGINE_TRACE(engine, "reset for %s\n", msg); + /* Mark this tasklet as disabled to avoid waiting for it to complete */ tasklet_disable_nosync(&engine->execlists.tasklet); - ENGINE_TRACE(engine, "preempt timeout %lu+%ums\n", - READ_ONCE(engine->props.preempt_timeout_ms), - jiffies_to_msecs(jiffies - engine->execlists.preempt.expires)); - ring_set_paused(engine, 1); /* Freeze the current request in place */ if (execlists_capture(engine)) - intel_engine_reset(engine, "preemption time out"); + intel_engine_reset(engine, msg); else ring_set_paused(engine, 0); @@ -2711,6 +2879,13 @@ static void execlists_submission_tasklet(unsigned long data) bool timeout = preempt_timeout(engine); process_csb(engine); + + if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) { + engine->execlists.error_interrupt = 0; + if (ENGINE_READ(engine, RING_ESR)) /* confirm the error */ + execlists_reset(engine, "CS error"); + } + if (!READ_ONCE(engine->execlists.pending[0]) || timeout) { unsigned long flags; @@ -2719,8 +2894,8 @@ static void execlists_submission_tasklet(unsigned long data) spin_unlock_irqrestore(&engine->active.lock, flags); /* Recheck after serialising with direct-submission */ - if (timeout && preempt_timeout(engine)) - preempt_reset(engine); + if (unlikely(timeout && preempt_timeout(engine))) + execlists_reset(engine, "preemption time out"); } } @@ -2793,6 +2968,7 @@ static void execlists_submit_request(struct i915_request *request) spin_lock_irqsave(&engine->active.lock, flags); if (unlikely(ancestor_on_hold(engine, request))) { + RQ_TRACE(request, "ancestor on hold\n"); list_add_tail(&request->sched.link, &engine->active.hold); i915_request_set_hold(request); } else { @@ -2874,6 +3050,7 @@ __execlists_update_reg_state(const struct intel_context *ce, regs[CTX_RING_START] = i915_ggtt_offset(ring->vma); regs[CTX_RING_HEAD] = head; regs[CTX_RING_TAIL] = ring->tail; + regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID; /* RPCS */ if (engine->class == RENDER_CLASS) { @@ -2899,7 +3076,7 @@ __execlists_context_pin(struct intel_context *ce, if (IS_ERR(vaddr)) return PTR_ERR(vaddr); - ce->lrc_desc = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE; + ce->lrc.lrca = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE; ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; __execlists_update_reg_state(ce, engine, ce->ring->tail); @@ -2921,22 +3098,6 @@ static void execlists_context_reset(struct intel_context *ce) CE_TRACE(ce, "reset\n"); GEM_BUG_ON(!intel_context_is_pinned(ce)); - /* - * Because we emit WA_TAIL_DWORDS there may be a disparity - * between our bookkeeping in ce->ring->head and ce->ring->tail and - * that stored in context. As we only write new commands from - * ce->ring->tail onwards, everything before that is junk. If the GPU - * starts reading from its RING_HEAD from the context, it may try to - * execute that junk and die. - * - * The contexts that are stilled pinned on resume belong to the - * kernel, and are local to each engine. All other contexts will - * have their head/tail sanitized upon pinning before use, so they - * will never see garbage, - * - * So to avoid that we reset the context images upon resume. For - * simplicity, we just zero everything out. - */ intel_ring_reset(ce->ring, ce->ring->emit); /* Scrub away the garbage */ @@ -2944,7 +3105,7 @@ static void execlists_context_reset(struct intel_context *ce) ce, ce->engine, ce->ring, true); __execlists_update_reg_state(ce, ce->engine, ce->ring->tail); - ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; + ce->lrc.desc |= CTX_DESC_FORCE_RESTORE; } static const struct intel_context_ops execlists_context_ops = { @@ -2964,7 +3125,8 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq) { u32 *cs; - GEM_BUG_ON(!i915_request_timeline(rq)->has_initial_breadcrumb); + if (!i915_request_timeline(rq)->has_initial_breadcrumb) + return 0; cs = intel_ring_begin(rq, 6); if (IS_ERR(cs)) @@ -3257,7 +3419,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine) goto err; } - err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); + err = i915_ggtt_pin(vma, 0, PIN_HIGH); if (err) goto err; @@ -3346,6 +3508,49 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) return ret; } +static void enable_error_interrupt(struct intel_engine_cs *engine) +{ + u32 status; + + engine->execlists.error_interrupt = 0; + ENGINE_WRITE(engine, RING_EMR, ~0u); + ENGINE_WRITE(engine, RING_EIR, ~0u); /* clear all existing errors */ + + status = ENGINE_READ(engine, RING_ESR); + if (unlikely(status)) { + dev_err(engine->i915->drm.dev, + "engine '%s' resumed still in error: %08x\n", + engine->name, status); + __intel_gt_reset(engine->gt, engine->mask); + } + + /* + * On current gen8+, we have 2 signals to play with + * + * - I915_ERROR_INSTUCTION (bit 0) + * + * Generate an error if the command parser encounters an invalid + * instruction + * + * This is a fatal error. + * + * - CP_PRIV (bit 2) + * + * Generate an error on privilege violation (where the CP replaces + * the instruction with a no-op). This also fires for writes into + * read-only scratch pages. + * + * This is a non-fatal error, parsing continues. + * + * * there are a few others defined for odd HW that we do not use + * + * Since CP_PRIV fires for cases where we have chosen to ignore the + * error (as the HW is validating and suppressing the mistakes), we + * only unmask the instruction error bit. + */ + ENGINE_WRITE(engine, RING_EMR, ~I915_ERROR_INSTRUCTION); +} + static void enable_execlists(struct intel_engine_cs *engine) { u32 mode; @@ -3367,7 +3572,9 @@ static void enable_execlists(struct intel_engine_cs *engine) i915_ggtt_offset(engine->status_page.vma)); ENGINE_POSTING_READ(engine, RING_HWS_PGA); - engine->context_tag = 0; + enable_error_interrupt(engine); + + engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0); } static bool unexpected_starting_state(struct intel_engine_cs *engine) @@ -3384,9 +3591,6 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine) static int execlists_resume(struct intel_engine_cs *engine) { - intel_engine_apply_workarounds(engine); - intel_engine_apply_whitelist(engine); - intel_mocs_init_engine(engine); intel_engine_reset_breadcrumbs(engine); @@ -3517,9 +3721,6 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) if (!rq) goto unwind; - /* We still have requests in-flight; the engine should be active */ - GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); - ce = rq->context; GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); @@ -3529,8 +3730,12 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) goto out_replay; } + /* We still have requests in-flight; the engine should be active */ + GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); + /* Context has requests still in-flight; it should not be idle! */ GEM_BUG_ON(i915_active_is_idle(&ce->active)); + rq = active_request(ce->timeline, rq); head = intel_ring_wrap(ce->ring, rq->head); GEM_BUG_ON(head == ce->ring->tail); @@ -3581,7 +3786,7 @@ out_replay: head, ce->ring->tail); __execlists_reset_reg_state(ce, engine); __execlists_update_reg_state(ce, engine, head); - ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */ + ce->lrc.desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */ unwind: /* Push back any incomplete requests for replay after the reset. */ @@ -3604,7 +3809,10 @@ static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled) static void nop_submission_tasklet(unsigned long data) { + struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; + /* The driver is wedged; don't process any more events. */ + WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN); } static void execlists_reset_cancel(struct intel_engine_cs *engine) @@ -4194,8 +4402,11 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine) engine->flags |= I915_ENGINE_SUPPORTS_STATS; if (!intel_vgpu_active(engine->i915)) { engine->flags |= I915_ENGINE_HAS_SEMAPHORES; - if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) + if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) { engine->flags |= I915_ENGINE_HAS_PREEMPTION; + if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) + engine->flags |= I915_ENGINE_HAS_TIMESLICES; + } } if (INTEL_GEN(engine->i915) >= 12) @@ -4273,6 +4484,8 @@ logical_ring_default_irqs(struct intel_engine_cs *engine) engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; + engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift; + engine->irq_keep_mask |= GT_WAIT_SEMAPHORE_INTERRUPT << shift; } static void rcs_submission_override(struct intel_engine_cs *engine) @@ -4340,6 +4553,11 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine) else execlists->csb_size = GEN11_CSB_ENTRIES; + if (INTEL_GEN(engine->i915) >= 11) { + execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32); + execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32); + } + reset_csb_pointers(engine); /* Finally, take ownership and responsibility for cleanup! */ @@ -4514,8 +4732,13 @@ populate_lr_context(struct intel_context *ce, inhibit = false; } - /* The second page of the context object contains some fields which must - * be set up prior to the first execution. */ + /* Clear the ppHWSP (inc. per-context counters) */ + memset(vaddr, 0, PAGE_SIZE); + + /* + * The second page of the context object contains some registers which + * must be set up prior to the first execution. + */ execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE, ce, engine, ring, inhibit); @@ -4553,8 +4776,17 @@ static int __execlists_context_alloc(struct intel_context *ce, if (!ce->timeline) { struct intel_timeline *tl; + struct i915_vma *hwsp; + + /* + * Use the static global HWSP for the kernel context, and + * a dynamically allocated cacheline for everyone else. + */ + hwsp = NULL; + if (unlikely(intel_context_is_barrier(ce))) + hwsp = engine->status_page.vma; - tl = intel_timeline_create(engine->gt, NULL); + tl = intel_timeline_create(engine->gt, hwsp); if (IS_ERR(tl)) { ret = PTR_ERR(tl); goto error_deref_obj; @@ -4723,7 +4955,7 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve) mask = rq->execution_mask; if (unlikely(!mask)) { /* Invalid selection, submit to a random engine in error */ - i915_request_skip(rq, -ENODEV); + i915_request_set_error_once(rq, -ENODEV); mask = ve->siblings[0]->mask; } @@ -4737,7 +4969,7 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve) static void virtual_submission_tasklet(unsigned long data) { struct virtual_engine * const ve = (struct virtual_engine *)data; - const int prio = ve->base.execlists.queue_priority_hint; + const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint); intel_engine_mask_t mask; unsigned int n; @@ -5133,11 +5365,15 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine, show_request(m, last, "\t\tE "); } - last = NULL; - count = 0; + if (execlists->switch_priority_hint != INT_MIN) + drm_printf(m, "\t\tSwitch priority hint: %d\n", + READ_ONCE(execlists->switch_priority_hint)); if (execlists->queue_priority_hint != INT_MIN) drm_printf(m, "\t\tQueue priority hint: %d\n", - execlists->queue_priority_hint); + READ_ONCE(execlists->queue_priority_hint)); + + last = NULL; + count = 0; for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { struct i915_priolist *p = rb_entry(rb, typeof(*p), node); int i; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h index 08a3be65f700..d39b72590e40 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h +++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h @@ -17,6 +17,7 @@ #define CTX_RING_CTL (0x0a + 1) #define CTX_BB_STATE (0x10 + 1) #define CTX_BB_PER_CTX_PTR (0x18 + 1) +#define CTX_TIMESTAMP (0x22 + 1) #define CTX_PDP3_UDW (0x24 + 1) #define CTX_PDP3_LDW (0x26 + 1) #define CTX_PDP2_UDW (0x28 + 1) diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index eeef90b55c64..632e08a4592b 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -280,9 +280,32 @@ static const struct drm_i915_mocs_entry icl_mocs_table[] = { GEN11_MOCS_ENTRIES }; -static bool get_mocs_settings(const struct drm_i915_private *i915, - struct drm_i915_mocs_table *table) +enum { + HAS_GLOBAL_MOCS = BIT(0), + HAS_ENGINE_MOCS = BIT(1), + HAS_RENDER_L3CC = BIT(2), +}; + +static bool has_l3cc(const struct drm_i915_private *i915) { + return true; +} + +static bool has_global_mocs(const struct drm_i915_private *i915) +{ + return HAS_GLOBAL_MOCS_REGISTERS(i915); +} + +static bool has_mocs(const struct drm_i915_private *i915) +{ + return !IS_DGFX(i915); +} + +static unsigned int get_mocs_settings(const struct drm_i915_private *i915, + struct drm_i915_mocs_table *table) +{ + unsigned int flags; + if (INTEL_GEN(i915) >= 12) { table->size = ARRAY_SIZE(tgl_mocs_table); table->table = tgl_mocs_table; @@ -300,13 +323,13 @@ static bool get_mocs_settings(const struct drm_i915_private *i915, table->n_entries = GEN9_NUM_MOCS_ENTRIES; table->table = broxton_mocs_table; } else { - WARN_ONCE(INTEL_GEN(i915) >= 9, - "Platform that should have a MOCS table does not.\n"); - return false; + drm_WARN_ONCE(&i915->drm, INTEL_GEN(i915) >= 9, + "Platform that should have a MOCS table does not.\n"); + return 0; } if (GEM_DEBUG_WARN_ON(table->size > table->n_entries)) - return false; + return 0; /* WaDisableSkipCaching:skl,bxt,kbl,glk */ if (IS_GEN(i915, 9)) { @@ -315,10 +338,20 @@ static bool get_mocs_settings(const struct drm_i915_private *i915, for (i = 0; i < table->size; i++) if (GEM_DEBUG_WARN_ON(table->table[i].l3cc_value & (L3_ESC(1) | L3_SCC(0x7)))) - return false; + return 0; } - return true; + flags = 0; + if (has_mocs(i915)) { + if (has_global_mocs(i915)) + flags |= HAS_GLOBAL_MOCS; + else + flags |= HAS_ENGINE_MOCS; + } + if (has_l3cc(i915)) + flags |= HAS_RENDER_L3CC; + + return flags; } /* @@ -411,18 +444,20 @@ static void init_l3cc_table(struct intel_engine_cs *engine, void intel_mocs_init_engine(struct intel_engine_cs *engine) { struct drm_i915_mocs_table table; + unsigned int flags; /* Called under a blanket forcewake */ assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL); - if (!get_mocs_settings(engine->i915, &table)) + flags = get_mocs_settings(engine->i915, &table); + if (!flags) return; /* Platforms with global MOCS do not need per-engine initialization. */ - if (!HAS_GLOBAL_MOCS_REGISTERS(engine->i915)) + if (flags & HAS_ENGINE_MOCS) init_mocs_table(engine, &table); - if (engine->class == RENDER_CLASS) + if (flags & HAS_RENDER_L3CC && engine->class == RENDER_CLASS) init_l3cc_table(engine, &table); } @@ -431,26 +466,17 @@ static u32 global_mocs_offset(void) return i915_mmio_reg_offset(GEN12_GLOBAL_MOCS(0)); } -static void init_global_mocs(struct intel_gt *gt) +void intel_mocs_init(struct intel_gt *gt) { struct drm_i915_mocs_table table; + unsigned int flags; /* * LLC and eDRAM control values are not applicable to dgfx */ - if (IS_DGFX(gt->i915)) - return; - - if (!get_mocs_settings(gt->i915, &table)) - return; - - __init_mocs_table(gt->uncore, &table, global_mocs_offset()); -} - -void intel_mocs_init(struct intel_gt *gt) -{ - if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915)) - init_global_mocs(gt); + flags = get_mocs_settings(gt->i915, &table); + if (flags & HAS_GLOBAL_MOCS) + __init_mocs_table(gt->uncore, &table, global_mocs_offset()); } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index 9e303c29d6e3..3847ee44b181 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -7,6 +7,7 @@ #include <linux/pm_runtime.h> #include "i915_drv.h" +#include "i915_vgpu.h" #include "intel_gt.h" #include "intel_gt_pm.h" #include "intel_rc6.h" @@ -226,10 +227,7 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6) set(uncore, GEN6_RC_SLEEP, 0); set(uncore, GEN6_RC1e_THRESHOLD, 1000); - if (IS_IVYBRIDGE(i915)) - set(uncore, GEN6_RC6_THRESHOLD, 125000); - else - set(uncore, GEN6_RC6_THRESHOLD, 50000); + set(uncore, GEN6_RC6_THRESHOLD, 50000); set(uncore, GEN6_RC6p_THRESHOLD, 150000); set(uncore, GEN6_RC6pp_THRESHOLD, 64000); /* unused */ @@ -299,7 +297,6 @@ static int vlv_rc6_init(struct intel_rc6 *rc6) pcbr_offset = (pcbr & ~4095) - i915->dsm.start; pctx = i915_gem_object_create_stolen_for_preallocated(i915, pcbr_offset, - I915_GTT_OFFSET_NONE, pctx_size); if (IS_ERR(pctx)) return PTR_ERR(pctx); @@ -323,10 +320,10 @@ static int vlv_rc6_init(struct intel_rc6 *rc6) return PTR_ERR(pctx); } - GEM_BUG_ON(range_overflows_t(u64, - i915->dsm.start, - pctx->stolen->start, - U32_MAX)); + GEM_BUG_ON(range_overflows_end_t(u64, + i915->dsm.start, + pctx->stolen->start, + U32_MAX)); pctx_paddr = i915->dsm.start + pctx->stolen->start; intel_uncore_write(uncore, VLV_PCBR, pctx_paddr); @@ -542,6 +539,8 @@ void intel_rc6_init(struct intel_rc6 *rc6) void intel_rc6_sanitize(struct intel_rc6 *rc6) { + memset(rc6->prev_hw_residency, 0, sizeof(rc6->prev_hw_residency)); + if (rc6->enabled) { /* unbalanced suspend/resume */ rpm_get(rc6); rc6->enabled = false; @@ -604,6 +603,7 @@ void intel_rc6_unpark(struct intel_rc6 *rc6) void intel_rc6_park(struct intel_rc6 *rc6) { struct intel_uncore *uncore = rc6_to_uncore(rc6); + unsigned int target; if (!rc6->enabled) return; @@ -618,7 +618,14 @@ void intel_rc6_park(struct intel_rc6 *rc6) /* Turn off the HW timers and go directly to rc6 */ set(uncore, GEN6_RC_CONTROL, GEN6_RC_CTL_RC6_ENABLE); - set(uncore, GEN6_RC_STATE, 0x4 << RC_SW_TARGET_STATE_SHIFT); + + if (HAS_RC6pp(rc6_to_i915(rc6))) + target = 0x6; /* deepest rc6 */ + else if (HAS_RC6p(rc6_to_i915(rc6))) + target = 0x5; /* deep rc6 */ + else + target = 0x4; /* normal rc6 */ + set(uncore, GEN6_RC_STATE, target << RC_SW_TARGET_STATE_SHIFT); } void intel_rc6_disable(struct intel_rc6 *rc6) @@ -713,7 +720,7 @@ u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, const i915_reg_t reg) */ i = (i915_mmio_reg_offset(reg) - i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32); - if (WARN_ON_ONCE(i >= ARRAY_SIZE(rc6->cur_residency))) + if (drm_WARN_ON_ONCE(&i915->drm, i >= ARRAY_SIZE(rc6->cur_residency))) return 0; fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ); diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index beee0cf89bce..80db3c9d785e 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -48,8 +48,10 @@ static void engine_skip_context(struct i915_request *rq) lockdep_assert_held(&engine->active.lock); list_for_each_entry_continue(rq, &engine->active.requests, sched.link) - if (rq->context == hung_ctx) - i915_request_skip(rq, -EIO); + if (rq->context == hung_ctx) { + i915_request_set_error_once(rq, -EIO); + __i915_request_skip(rq); + } } static void client_mark_guilty(struct i915_gem_context *ctx, bool banned) @@ -72,9 +74,10 @@ static void client_mark_guilty(struct i915_gem_context *ctx, bool banned) if (score) { atomic_add(score, &file_priv->ban_score); - DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n", - ctx->name, score, - atomic_read(&file_priv->ban_score)); + drm_dbg(&ctx->i915->drm, + "client %s: gained %u ban score, now %u\n", + ctx->name, score, + atomic_read(&file_priv->ban_score)); } } @@ -85,19 +88,18 @@ static bool mark_guilty(struct i915_request *rq) bool banned; int i; + if (intel_context_is_closed(rq->context)) { + intel_context_set_banned(rq->context); + return true; + } + rcu_read_lock(); ctx = rcu_dereference(rq->context->gem_context); if (ctx && !kref_get_unless_zero(&ctx->ref)) ctx = NULL; rcu_read_unlock(); if (!ctx) - return false; - - if (i915_gem_context_is_closed(ctx)) { - intel_context_set_banned(rq->context); - banned = true; - goto out; - } + return intel_context_is_banned(rq->context); atomic_inc(&ctx->guilty_count); @@ -122,8 +124,8 @@ static bool mark_guilty(struct i915_request *rq) if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES)) banned = true; if (banned) { - DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n", - ctx->name, atomic_read(&ctx->guilty_count)); + drm_dbg(&ctx->i915->drm, "context %s: guilty %d, banned\n", + ctx->name, atomic_read(&ctx->guilty_count)); intel_context_set_banned(rq->context); } @@ -153,11 +155,12 @@ void __i915_request_reset(struct i915_request *rq, bool guilty) rcu_read_lock(); /* protect the GEM context */ if (guilty) { - i915_request_skip(rq, -EIO); + i915_request_set_error_once(rq, -EIO); + __i915_request_skip(rq); if (mark_guilty(rq)) engine_skip_context(rq); } else { - dma_fence_set_error(&rq->fence, -EAGAIN); + i915_request_set_error_once(rq, -EAGAIN); mark_innocent(rq); } rcu_read_unlock(); @@ -226,7 +229,7 @@ static int g4x_do_reset(struct intel_gt *gt, GRDOM_MEDIA | GRDOM_RESET_ENABLE); ret = wait_for_atomic(g4x_reset_complete(pdev), 50); if (ret) { - DRM_DEBUG_DRIVER("Wait for media reset failed\n"); + drm_dbg(>->i915->drm, "Wait for media reset failed\n"); goto out; } @@ -234,7 +237,7 @@ static int g4x_do_reset(struct intel_gt *gt, GRDOM_RENDER | GRDOM_RESET_ENABLE); ret = wait_for_atomic(g4x_reset_complete(pdev), 50); if (ret) { - DRM_DEBUG_DRIVER("Wait for render reset failed\n"); + drm_dbg(>->i915->drm, "Wait for render reset failed\n"); goto out; } @@ -260,7 +263,7 @@ static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask, 5000, 0, NULL); if (ret) { - DRM_DEBUG_DRIVER("Wait for render reset failed\n"); + drm_dbg(>->i915->drm, "Wait for render reset failed\n"); goto out; } @@ -271,7 +274,7 @@ static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask, 5000, 0, NULL); if (ret) { - DRM_DEBUG_DRIVER("Wait for media reset failed\n"); + drm_dbg(>->i915->drm, "Wait for media reset failed\n"); goto out; } @@ -300,8 +303,9 @@ static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask) 500, 0, NULL); if (err) - DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n", - hw_domain_mask); + drm_dbg(>->i915->drm, + "Wait for 0x%08x engines reset failed\n", + hw_domain_mask); return err; } @@ -401,7 +405,8 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask) return 0; if (ret) { - DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n"); + drm_dbg(&engine->i915->drm, + "Wait for SFC forced lock ack failed\n"); return ret; } @@ -515,9 +520,10 @@ static int gen8_engine_reset_prepare(struct intel_engine_cs *engine) ret = __intel_wait_for_register_fw(uncore, reg, mask, ack, 700, 0, NULL); if (ret) - DRM_ERROR("%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n", - engine->name, request, - intel_uncore_read_fw(uncore, reg)); + drm_err(&engine->i915->drm, + "%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n", + engine->name, request, + intel_uncore_read_fw(uncore, reg)); return ret; } @@ -781,7 +787,7 @@ static void nop_submit_request(struct i915_request *request) unsigned long flags; RQ_TRACE(request, "-EIO\n"); - dma_fence_set_error(&request->fence, -EIO); + i915_request_set_error_once(request, -EIO); spin_lock_irqsave(&engine->active.lock, flags); __i915_request_submit(request); @@ -800,13 +806,6 @@ static void __intel_gt_set_wedged(struct intel_gt *gt) if (test_bit(I915_WEDGED, >->reset.flags)) return; - if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(gt)) { - struct drm_printer p = drm_debug_printer(__func__); - - for_each_engine(engine, gt, id) - intel_engine_dump(engine, &p, "%s\n", engine->name); - } - GT_TRACE(gt, "start\n"); /* @@ -845,10 +844,30 @@ void intel_gt_set_wedged(struct intel_gt *gt) { intel_wakeref_t wakeref; + if (test_bit(I915_WEDGED, >->reset.flags)) + return; + + wakeref = intel_runtime_pm_get(gt->uncore->rpm); mutex_lock(>->reset.mutex); - with_intel_runtime_pm(gt->uncore->rpm, wakeref) - __intel_gt_set_wedged(gt); + + if (GEM_SHOW_DEBUG()) { + struct drm_printer p = drm_debug_printer(__func__); + struct intel_engine_cs *engine; + enum intel_engine_id id; + + drm_printf(&p, "called from %pS\n", (void *)_RET_IP_); + for_each_engine(engine, gt, id) { + if (intel_engine_is_idle(engine)) + continue; + + intel_engine_dump(engine, &p, "%s\n", engine->name); + } + } + + __intel_gt_set_wedged(gt); + mutex_unlock(>->reset.mutex); + intel_runtime_pm_put(gt->uncore->rpm, wakeref); } static bool __intel_gt_unset_wedged(struct intel_gt *gt) @@ -969,7 +988,7 @@ static int resume(struct intel_gt *gt) int ret; for_each_engine(engine, gt, id) { - ret = engine->resume(engine); + ret = intel_engine_resume(engine); if (ret) return ret; } @@ -1022,7 +1041,7 @@ void intel_gt_reset(struct intel_gt *gt, if (i915_modparams.reset) dev_err(gt->i915->drm.dev, "GPU reset not supported\n"); else - DRM_DEBUG_DRIVER("GPU reset disabled\n"); + drm_dbg(>->i915->drm, "GPU reset disabled\n"); goto error; } @@ -1049,8 +1068,9 @@ void intel_gt_reset(struct intel_gt *gt, */ ret = intel_gt_init_hw(gt); if (ret) { - DRM_ERROR("Failed to initialise HW following reset (%d)\n", - ret); + drm_err(>->i915->drm, + "Failed to initialise HW following reset (%d)\n", + ret); goto taint; } @@ -1126,9 +1146,8 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg) ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine); if (ret) { /* If we fail here, we expect to fallback to a global reset */ - DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n", - uses_guc ? "GuC " : "", - engine->name, ret); + drm_dbg(>->i915->drm, "%sFailed to reset %s, ret=%d\n", + uses_guc ? "GuC " : "", engine->name, ret); goto out; } @@ -1144,7 +1163,7 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg) * have been reset to their default values. Follow the init_ring * process to program RING_MODE, HWSP and re-enable submission. */ - ret = engine->resume(engine); + ret = intel_engine_resume(engine); out: intel_engine_cancel_stop_cs(engine); @@ -1165,7 +1184,7 @@ static void intel_gt_reset_global(struct intel_gt *gt, kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); - DRM_DEBUG_DRIVER("resetting chip\n"); + drm_dbg(>->i915->drm, "resetting chip, engines=%x\n", engine_mask); kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); /* Use a watchdog to ensure that our reset completes */ diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c index 6ff803f397c4..8cda1b7e17ba 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring.c +++ b/drivers/gpu/drm/i915/gt/intel_ring.c @@ -31,17 +31,15 @@ int intel_ring_pin(struct intel_ring *ring) if (atomic_fetch_inc(&ring->pin_count)) return 0; - flags = PIN_GLOBAL; - /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ - flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); + flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); if (vma->obj->stolen) flags |= PIN_MAPPABLE; else flags |= PIN_HIGH; - ret = i915_vma_pin(vma, 0, 0, flags); + ret = i915_ggtt_pin(vma, 0, flags); if (unlikely(ret)) goto err_unpin; diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index bc44fe8e5ffa..fdc3f10e12aa 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -29,11 +29,10 @@ #include <linux/log2.h> -#include <drm/i915_drm.h> - #include "gem/i915_gem_context.h" #include "gen6_ppgtt.h" +#include "gen7_renderclear.h" #include "i915_drv.h" #include "i915_trace.h" #include "intel_context.h" @@ -568,7 +567,8 @@ static void flush_cs_tlb(struct intel_engine_cs *engine) return; /* ring should be idle before issuing a sync flush*/ - WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); + drm_WARN_ON(&dev_priv->drm, + (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); ENGINE_WRITE(engine, RING_INSTPM, _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | @@ -626,6 +626,27 @@ static bool stop_ring(struct intel_engine_cs *engine) return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0; } +static struct i915_address_space *vm_alias(struct i915_address_space *vm) +{ + if (i915_is_ggtt(vm)) + vm = &i915_vm_to_ggtt(vm)->alias->vm; + + return vm; +} + +static void set_pp_dir(struct intel_engine_cs *engine) +{ + struct i915_address_space *vm = vm_alias(engine->gt->vm); + + if (vm) { + struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + + ENGINE_WRITE(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G); + ENGINE_WRITE(engine, RING_PP_DIR_BASE, + px_base(ppgtt->pd)->ggtt_offset << 10); + } +} + static int xcs_resume(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; @@ -684,6 +705,8 @@ static int xcs_resume(struct intel_engine_cs *engine) GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); intel_ring_update_space(ring); + set_pp_dir(engine); + /* First wake the ring up to an empty/idle ring */ ENGINE_WRITE(engine, RING_HEAD, ring->head); ENGINE_WRITE(engine, RING_TAIL, ring->head); @@ -857,43 +880,6 @@ static int rcs_resume(struct intel_engine_cs *engine) intel_uncore_write(uncore, ECOSKPD, _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE)); - /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ - if (IS_GEN_RANGE(i915, 4, 6)) - intel_uncore_write(uncore, MI_MODE, - _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); - - /* We need to disable the AsyncFlip performance optimisations in order - * to use MI_WAIT_FOR_EVENT within the CS. It should already be - * programmed to '1' on all products. - * - * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv - */ - if (IS_GEN_RANGE(i915, 6, 7)) - intel_uncore_write(uncore, MI_MODE, - _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); - - /* Required for the hardware to program scanline values for waiting */ - /* WaEnableFlushTlbInvalidationMode:snb */ - if (IS_GEN(i915, 6)) - intel_uncore_write(uncore, GFX_MODE, - _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); - - /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ - if (IS_GEN(i915, 7)) - intel_uncore_write(uncore, GFX_MODE_GEN7, - _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | - _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); - - if (IS_GEN(i915, 6)) { - /* From the Sandybridge PRM, volume 1 part 3, page 24: - * "If this bit is set, STCunit will have LRA as replacement - * policy. [...] This bit must be reset. LRA replacement - * policy is not supported." - */ - intel_uncore_write(uncore, CACHE_MODE_0, - _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); - } - if (IS_GEN_RANGE(i915, 6, 7)) intel_uncore_write(uncore, INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); @@ -910,9 +896,7 @@ static void reset_cancel(struct intel_engine_cs *engine) /* Mark all submitted requests as skipped. */ list_for_each_entry(request, &engine->active.requests, sched.link) { - if (!i915_request_signaled(request)) - dma_fence_set_error(&request->fence, -EIO); - + i915_request_set_error_once(request, -EIO); i915_request_mark_complete(request); } @@ -1197,23 +1181,12 @@ static void ring_context_destroy(struct kref *ref) intel_context_free(ce); } -static struct i915_address_space *vm_alias(struct intel_context *ce) -{ - struct i915_address_space *vm; - - vm = ce->vm; - if (i915_is_ggtt(vm)) - vm = &i915_vm_to_ggtt(vm)->alias->vm; - - return vm; -} - static int __context_pin_ppgtt(struct intel_context *ce) { struct i915_address_space *vm; int err = 0; - vm = vm_alias(ce); + vm = vm_alias(ce->vm); if (vm) err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm))); @@ -1224,7 +1197,7 @@ static void __context_unpin_ppgtt(struct intel_context *ce) { struct i915_address_space *vm; - vm = vm_alias(ce); + vm = vm_alias(ce->vm); if (vm) gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm)); } @@ -1384,7 +1357,9 @@ static int load_pd_dir(struct i915_request *rq, return rq->engine->emit_flush(rq, EMIT_FLUSH); } -static inline int mi_set_context(struct i915_request *rq, u32 flags) +static inline int mi_set_context(struct i915_request *rq, + struct intel_context *ce, + u32 flags) { struct drm_i915_private *i915 = rq->i915; struct intel_engine_cs *engine = rq->engine; @@ -1459,7 +1434,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) *cs++ = MI_NOOP; *cs++ = MI_SET_CONTEXT; - *cs++ = i915_ggtt_offset(rq->context->state) | flags; + *cs++ = i915_ggtt_offset(ce->state) | flags; /* * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP * WaMiSetContext_Hang:snb,ivb,vlv @@ -1574,21 +1549,64 @@ static int switch_mm(struct i915_request *rq, struct i915_address_space *vm) return rq->engine->emit_flush(rq, EMIT_INVALIDATE); } +static int clear_residuals(struct i915_request *rq) +{ + struct intel_engine_cs *engine = rq->engine; + int ret; + + ret = switch_mm(rq, vm_alias(engine->kernel_context->vm)); + if (ret) + return ret; + + if (engine->kernel_context->state) { + ret = mi_set_context(rq, + engine->kernel_context, + MI_MM_SPACE_GTT | MI_RESTORE_INHIBIT); + if (ret) + return ret; + } + + ret = engine->emit_bb_start(rq, + engine->wa_ctx.vma->node.start, 0, + 0); + if (ret) + return ret; + + ret = engine->emit_flush(rq, EMIT_FLUSH); + if (ret) + return ret; + + /* Always invalidate before the next switch_mm() */ + return engine->emit_flush(rq, EMIT_INVALIDATE); +} + static int switch_context(struct i915_request *rq) { + struct intel_engine_cs *engine = rq->engine; struct intel_context *ce = rq->context; + void **residuals = NULL; int ret; GEM_BUG_ON(HAS_EXECLISTS(rq->i915)); - ret = switch_mm(rq, vm_alias(ce)); + if (engine->wa_ctx.vma && ce != engine->kernel_context) { + if (engine->wa_ctx.vma->private != ce) { + ret = clear_residuals(rq); + if (ret) + return ret; + + residuals = &engine->wa_ctx.vma->private; + } + } + + ret = switch_mm(rq, vm_alias(ce->vm)); if (ret) return ret; if (ce->state) { u32 flags; - GEM_BUG_ON(rq->engine->id != RCS0); + GEM_BUG_ON(engine->id != RCS0); /* For resource streamer on HSW+ and power context elsewhere */ BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN); @@ -1600,7 +1618,7 @@ static int switch_context(struct i915_request *rq) else flags |= MI_RESTORE_INHIBIT; - ret = mi_set_context(rq, flags); + ret = mi_set_context(rq, ce, flags); if (ret) return ret; } @@ -1609,6 +1627,20 @@ static int switch_context(struct i915_request *rq) if (ret) return ret; + /* + * Now past the point of no return, this request _will_ be emitted. + * + * Or at least this preamble will be emitted, the request may be + * interrupted prior to submitting the user payload. If so, we + * still submit the "empty" request in order to preserve global + * state tracking such as this, our tracking of the current + * dirty context. + */ + if (residuals) { + intel_context_put(*residuals); + *residuals = intel_context_get(ce); + } + return 0; } @@ -1662,7 +1694,8 @@ static void gen6_bsd_submit_request(struct i915_request *request) GEN6_BSD_SLEEP_INDICATOR, 0, 1000, 0, NULL)) - DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); + drm_err(&uncore->i915->drm, + "timed out waiting for the BSD ring to wake up\n"); /* Now that the ring is fully powered up, update the tail */ i9xx_submit_request(request); @@ -1787,11 +1820,16 @@ static void ring_release(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - WARN_ON(INTEL_GEN(dev_priv) > 2 && - (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); + drm_WARN_ON(&dev_priv->drm, INTEL_GEN(dev_priv) > 2 && + (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); intel_engine_cleanup_common(engine); + if (engine->wa_ctx.vma) { + intel_context_put(engine->wa_ctx.vma->private); + i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0); + } + intel_ring_unpin(engine->legacy.ring); intel_ring_put(engine->legacy.ring); @@ -1939,6 +1977,64 @@ static void setup_vecs(struct intel_engine_cs *engine) engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb; } +static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine, + struct i915_vma * const vma) +{ + return gen7_setup_clear_gpr_bb(engine, vma); +} + +static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int size; + int err; + + size = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */); + if (size <= 0) + return size; + + size = ALIGN(size, PAGE_SIZE); + obj = i915_gem_object_create_internal(engine->i915, size); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + vma = i915_vma_instance(obj, engine->gt->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_obj; + } + + vma->private = intel_context_create(engine); /* dummy residuals */ + if (IS_ERR(vma->private)) { + err = PTR_ERR(vma->private); + goto err_obj; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH); + if (err) + goto err_private; + + err = i915_vma_sync(vma); + if (err) + goto err_unpin; + + err = gen7_ctx_switch_bb_setup(engine, vma); + if (err) + goto err_unpin; + + engine->wa_ctx.vma = vma; + return 0; + +err_unpin: + i915_vma_unpin(vma); +err_private: + intel_context_put(vma->private); +err_obj: + i915_gem_object_put(obj); + return err; +} + int intel_ring_submission_setup(struct intel_engine_cs *engine) { struct intel_timeline *timeline; @@ -1992,11 +2088,19 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine) GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma); + if (IS_HASWELL(engine->i915) && engine->class == RENDER_CLASS) { + err = gen7_ctx_switch_bb_init(engine); + if (err) + goto err_ring_unpin; + } + /* Finally, take ownership and responsibility for cleanup! */ engine->release = ring_release; return 0; +err_ring_unpin: + intel_ring_unpin(ring); err_ring: intel_ring_put(ring); err_timeline_unpin: @@ -2007,3 +2111,7 @@ err: intel_engine_cleanup_common(engine); return err; } + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftest_ring_submission.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index d2a3d935d186..19542fd9e207 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -4,6 +4,8 @@ * Copyright © 2019 Intel Corporation */ +#include <drm/i915_drm.h> + #include "i915_drv.h" #include "intel_gt.h" #include "intel_gt_irq.h" @@ -55,7 +57,7 @@ static u32 rps_pm_mask(struct intel_rps *rps, u8 val) if (val < rps->max_freq_softlimit) mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; - mask &= rps->pm_events; + mask &= READ_ONCE(rps->pm_events); return rps_pm_sanitize_mask(rps, ~mask); } @@ -68,22 +70,25 @@ static void rps_reset_ei(struct intel_rps *rps) static void rps_enable_interrupts(struct intel_rps *rps) { struct intel_gt *gt = rps_to_gt(rps); + u32 events; rps_reset_ei(rps); if (IS_VALLEYVIEW(gt->i915)) /* WaGsvRC0ResidencyMethod:vlv */ - rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED; + events = GEN6_PM_RP_UP_EI_EXPIRED; else - rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD | - GEN6_PM_RP_DOWN_THRESHOLD | - GEN6_PM_RP_DOWN_TIMEOUT); + events = (GEN6_PM_RP_UP_THRESHOLD | + GEN6_PM_RP_DOWN_THRESHOLD | + GEN6_PM_RP_DOWN_TIMEOUT); + WRITE_ONCE(rps->pm_events, events); spin_lock_irq(>->irq_lock); gen6_gt_pm_enable_irq(gt, rps->pm_events); spin_unlock_irq(>->irq_lock); - set(gt->uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, rps->cur_freq)); + intel_uncore_write(gt->uncore, + GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq)); } static void gen6_rps_reset_interrupts(struct intel_rps *rps) @@ -115,9 +120,10 @@ static void rps_disable_interrupts(struct intel_rps *rps) { struct intel_gt *gt = rps_to_gt(rps); - rps->pm_events = 0; + WRITE_ONCE(rps->pm_events, 0); - set(gt->uncore, GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u)); + intel_uncore_write(gt->uncore, + GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u)); spin_lock_irq(>->irq_lock); gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS); @@ -642,7 +648,7 @@ void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive) { mutex_lock(&rps->power.mutex); if (interactive) { - if (!rps->power.interactive++ && rps->active) + if (!rps->power.interactive++ && READ_ONCE(rps->active)) rps_set_power(rps, HIGH_POWER); } else { GEM_BUG_ON(!rps->power.interactive); @@ -719,11 +725,15 @@ void intel_rps_unpark(struct intel_rps *rps) * performance, jump directly to RPe as our starting frequency. */ mutex_lock(&rps->lock); - rps->active = true; + + WRITE_ONCE(rps->active, true); + freq = max(rps->cur_freq, rps->efficient_freq), freq = clamp(freq, rps->min_freq_softlimit, rps->max_freq_softlimit); intel_rps_set(rps, freq); + rps->last_adj = 0; + mutex_unlock(&rps->lock); if (INTEL_GEN(rps_to_i915(rps)) >= 6) @@ -743,7 +753,7 @@ void intel_rps_park(struct intel_rps *rps) if (INTEL_GEN(i915) >= 6) rps_disable_interrupts(rps); - rps->active = false; + WRITE_ONCE(rps->active, false); if (rps->last_freq <= rps->idle_freq) return; @@ -763,14 +773,27 @@ void intel_rps_park(struct intel_rps *rps) intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA); rps_set(rps, rps->idle_freq, false); intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA); + + /* + * Since we will try and restart from the previously requested + * frequency on unparking, treat this idle point as a downclock + * interrupt and reduce the frequency for resume. If we park/unpark + * more frequently than the rps worker can run, we will not respond + * to any EI and never see a change in frequency. + * + * (Note we accommodate Cherryview's limitation of only using an + * even bin by applying it to all.) + */ + rps->cur_freq = + max_t(int, round_down(rps->cur_freq - 1, 2), rps->min_freq); } void intel_rps_boost(struct i915_request *rq) { - struct intel_rps *rps = &rq->engine->gt->rps; + struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps; unsigned long flags; - if (i915_request_signaled(rq) || !rps->active) + if (i915_request_signaled(rq) || !READ_ONCE(rps->active)) return; /* Serializes with i915_request_retire() */ @@ -1026,7 +1049,8 @@ static bool chv_rps_enable(struct intel_rps *rps) vlv_punit_put(i915); /* RPS code assumes GPLL is used */ - WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); + drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0, + "GPLL not enabled\n"); DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE)); DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); @@ -1123,7 +1147,8 @@ static bool vlv_rps_enable(struct intel_rps *rps) vlv_punit_put(i915); /* RPS code assumes GPLL is used */ - WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); + drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0, + "GPLL not enabled\n"); DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE)); DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); @@ -1191,11 +1216,11 @@ void intel_rps_enable(struct intel_rps *rps) if (!rps->enabled) return; - WARN_ON(rps->max_freq < rps->min_freq); - WARN_ON(rps->idle_freq > rps->max_freq); + drm_WARN_ON(&i915->drm, rps->max_freq < rps->min_freq); + drm_WARN_ON(&i915->drm, rps->idle_freq > rps->max_freq); - WARN_ON(rps->efficient_freq < rps->min_freq); - WARN_ON(rps->efficient_freq > rps->max_freq); + drm_WARN_ON(&i915->drm, rps->efficient_freq < rps->min_freq); + drm_WARN_ON(&i915->drm, rps->efficient_freq > rps->max_freq); } static void gen6_rps_disable(struct intel_rps *rps) @@ -1390,9 +1415,9 @@ static void chv_rps_init(struct intel_rps *rps) BIT(VLV_IOSF_SB_NC) | BIT(VLV_IOSF_SB_CCK)); - WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq | - rps->min_freq) & 1, - "Odd GPU freq values\n"); + drm_WARN_ONCE(&i915->drm, (rps->max_freq | rps->efficient_freq | + rps->rp1_freq | rps->min_freq) & 1, + "Odd GPU freq values\n"); } static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei) @@ -1451,12 +1476,12 @@ static void rps_work(struct work_struct *work) u32 pm_iir = 0; spin_lock_irq(>->irq_lock); - pm_iir = fetch_and_zero(&rps->pm_iir); + pm_iir = fetch_and_zero(&rps->pm_iir) & READ_ONCE(rps->pm_events); client_boost = atomic_read(&rps->num_waiters); spin_unlock_irq(>->irq_lock); /* Make sure we didn't queue anything we're not going to process. */ - if ((pm_iir & rps->pm_events) == 0 && !client_boost) + if (!pm_iir && !client_boost) goto out; mutex_lock(&rps->lock); @@ -1552,11 +1577,15 @@ void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) { struct intel_gt *gt = rps_to_gt(rps); + u32 events; - if (pm_iir & rps->pm_events) { + events = pm_iir & READ_ONCE(rps->pm_events); + if (events) { spin_lock(>->irq_lock); - gen6_gt_pm_mask_irq(gt, pm_iir & rps->pm_events); - rps->pm_iir |= pm_iir & rps->pm_events; + + gen6_gt_pm_mask_irq(gt, events); + rps->pm_iir |= events; + schedule_work(&rps->work); spin_unlock(>->irq_lock); } diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index d8d9f1179c2b..08b56d7ab4f4 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -312,7 +312,7 @@ int intel_timeline_pin(struct intel_timeline *tl) if (atomic_add_unless(&tl->pin_count, 1, 0)) return 0; - err = i915_vma_pin(tl->hwsp_ggtt, 0, 0, PIN_GLOBAL | PIN_HIGH); + err = i915_ggtt_pin(tl->hwsp_ggtt, 0, PIN_HIGH); if (err) return err; @@ -410,6 +410,8 @@ __intel_timeline_get_seqno(struct intel_timeline *tl, void *vaddr; int err; + might_lock(&tl->gt->ggtt->vm.mutex); + /* * If there is an outstanding GPU reference to this cacheline, * such as it being sampled by a HW semaphore on another timeline, @@ -435,7 +437,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl, goto err_rollback; } - err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); + err = i915_ggtt_pin(vma, 0, PIN_HIGH); if (err) { __idle_hwsp_free(vma->private, cacheline); goto err_rollback; @@ -519,6 +521,8 @@ int intel_timeline_read_hwsp(struct i915_request *from, rcu_read_lock(); cl = rcu_dereference(from->hwsp_cacheline); + if (i915_request_completed(from)) /* confirm cacheline is valid */ + goto unlock; if (unlikely(!i915_active_acquire_if_busy(&cl->active))) goto unlock; /* seqno wrapped and completed! */ if (unlikely(i915_request_completed(from))) diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 6c2f8462e0f3..5176ad1a3976 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -116,17 +116,17 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa) } else { wa_ = &wal->list[mid]; - if ((wa->mask & ~wa_->mask) == 0) { - DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n", + if ((wa->clr | wa_->clr) && !(wa->clr & ~wa_->clr)) { + DRM_ERROR("Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x)\n", i915_mmio_reg_offset(wa_->reg), - wa_->mask, wa_->val); + wa_->clr, wa_->set); - wa_->val &= ~wa->mask; + wa_->set &= ~wa->clr; } wal->wa_count++; - wa_->val |= wa->val; - wa_->mask |= wa->mask; + wa_->set |= wa->set; + wa_->clr |= wa->clr; wa_->read |= wa->read; return; } @@ -147,13 +147,13 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa) } } -static void wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, - u32 val, u32 read_mask) +static void wa_add(struct i915_wa_list *wal, i915_reg_t reg, + u32 clear, u32 set, u32 read_mask) { struct i915_wa wa = { .reg = reg, - .mask = mask, - .val = val, + .clr = clear, + .set = set, .read = read_mask, }; @@ -161,38 +161,43 @@ static void wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, } static void -wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, - u32 val) +wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set) { - wa_add(wal, reg, mask, val, mask); + wa_add(wal, reg, clear, set, clear); } static void -wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val) +wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set) { - wa_write_masked_or(wal, reg, val, _MASKED_BIT_ENABLE(val)); + wa_write_masked_or(wal, reg, ~0, set); } static void -wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val) +wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set) { - wa_write_masked_or(wal, reg, ~0, val); + wa_write_masked_or(wal, reg, set, set); } static void -wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val) +wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val) { - wa_write_masked_or(wal, reg, val, val); + wa_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val); +} + +static void +wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val) +{ + wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val); } #define WA_SET_BIT_MASKED(addr, mask) \ - wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_ENABLE(mask)) + wa_masked_en(wal, (addr), (mask)) #define WA_CLR_BIT_MASKED(addr, mask) \ - wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_DISABLE(mask)) + wa_masked_dis(wal, (addr), (mask)) #define WA_SET_FIELD_MASKED(addr, mask, value) \ - wa_write_masked_or(wal, (addr), (mask), _MASKED_FIELD((mask), (value))) + wa_write_masked_or(wal, (addr), 0, _MASKED_FIELD((mask), (value))) static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) @@ -570,12 +575,29 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, /* allow headerless messages for preemptible GPGPU context */ WA_SET_BIT_MASKED(GEN10_SAMPLER_MODE, GEN11_SAMPLER_ENABLE_HEADLESS_MSG); + + /* Wa_1604278689:icl,ehl */ + wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID); + wa_write_masked_or(wal, IVB_FBC_RT_BASE_UPPER, + 0, /* write-only register; skip validation */ + 0xFFFFFFFF); + + /* Wa_1406306137:icl,ehl */ + wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU); } static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) { - /* Wa_1409142259:tgl */ + /* + * Wa_1409142259:tgl + * Wa_1409347922:tgl + * Wa_1409252684:tgl + * Wa_1409217633:tgl + * Wa_1409207793:tgl + * Wa_1409178076:tgl + * Wa_1408979724:tgl + */ WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3, GEN12_DISABLE_CPS_AWARE_COLOR_PIPE); @@ -588,6 +610,11 @@ static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine, */ wa_add(wal, FF_MODE2, FF_MODE2_TDS_TIMER_MASK, FF_MODE2_TDS_TIMER_128, 0); + + /* WaDisableGPGPUMidThreadPreemption:tgl */ + WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, + GEN9_PREEMPT_GPGPU_LEVEL_MASK, + GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL); } static void @@ -657,7 +684,7 @@ int intel_engine_emit_ctx_wa(struct i915_request *rq) *cs++ = MI_LOAD_REGISTER_IMM(wal->count); for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { *cs++ = i915_mmio_reg_offset(wa->reg); - *cs++ = wa->val; + *cs++ = wa->set; } *cs++ = MI_NOOP; @@ -822,7 +849,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) DRM_WARN("No common index found between subslice mask %x and L3 bank mask %x!\n", intel_sseu_get_subslices(sseu, slice), l3_en); subslice = fls(l3_en); - WARN_ON(!subslice); + drm_WARN_ON(&i915->drm, !subslice); } subslice--; @@ -893,11 +920,6 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) SLICE_UNIT_LEVEL_CLKGATE, MSCUNIT_CLKGATE_DIS); - /* Wa_1406680159:icl */ - wa_write_or(wal, - SUBSLICE_UNIT_LEVEL_CLKGATE, - GWUNIT_CLKGATE_DIS); - /* Wa_1406838659:icl (pre-prod) */ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0)) wa_write_or(wal, @@ -926,7 +948,7 @@ tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) SUBSLICE_UNIT_LEVEL_CLKGATE2, CPSSUNIT_CLKGATE_DIS); - /* Wa_1409180338:tgl */ + /* Wa_1607087056:tgl also know as BUG:1409180338 */ if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) wa_write_or(wal, SLICE_UNIT_LEVEL_CLKGATE, @@ -986,11 +1008,10 @@ wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal) static bool wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from) { - if ((cur ^ wa->val) & wa->read) { - DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n", + if ((cur ^ wa->set) & wa->read) { + DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x)\n", name, from, i915_mmio_reg_offset(wa->reg), - cur, cur & wa->read, - wa->val, wa->mask); + cur, cur & wa->read, wa->set); return false; } @@ -1015,7 +1036,10 @@ wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal) intel_uncore_forcewake_get__locked(uncore, fw); for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { - intel_uncore_rmw_fw(uncore, wa->reg, wa->mask, wa->val); + if (wa->clr) + intel_uncore_rmw_fw(uncore, wa->reg, wa->clr, wa->set); + else + intel_uncore_write_fw(uncore, wa->reg, wa->set); if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) wa_verify(wa, intel_uncore_read_fw(uncore, wa->reg), @@ -1239,6 +1263,7 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine) case RENDER_CLASS: /* * WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl + * Wa_1408556865:tgl * * This covers 4 registers which are next to one another : * - PS_INVOCATION_COUNT @@ -1249,6 +1274,12 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine) whitelist_reg_ext(w, PS_INVOCATION_COUNT, RING_FORCE_TO_NONPRIV_ACCESS_RD | RING_FORCE_TO_NONPRIV_RANGE_4); + + /* Wa_1808121037:tgl */ + whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1); + + /* Wa_1806527549:tgl */ + whitelist_reg(w, HIZ_CHICKEN); break; default: break; @@ -1315,19 +1346,21 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) struct drm_i915_private *i915 = engine->i915; if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) { - /* Wa_1606700617:tgl */ - wa_masked_en(wal, - GEN9_CS_DEBUG_MODE1, - FF_DOP_CLOCK_GATE_DISABLE); - - /* Wa_1607138336:tgl */ + /* + * Wa_1607138336:tgl + * Wa_1607063988:tgl + */ wa_write_or(wal, GEN9_CTX_PREEMPT_REG, GEN12_DISABLE_POSH_BUSY_FF_DOP_CG); - /* Wa_1607030317:tgl */ - /* Wa_1607186500:tgl */ - /* Wa_1607297627:tgl */ + /* + * Wa_1607030317:tgl + * Wa_1607186500:tgl + * Wa_1607297627:tgl there is 3 entries for this WA on BSpec, 2 + * of then says it is fixed on B0 the other one says it is + * permanent + */ wa_masked_en(wal, GEN6_RC_SLEEP_PSMI_CONTROL, GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE | @@ -1340,6 +1373,35 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) wa_write_or(wal, GEN7_SARCHKMD, GEN7_DISABLE_SAMPLER_PREFETCH); + + /* Wa_1407928979:tgl */ + wa_write_or(wal, + GEN7_FF_THREAD_MODE, + GEN12_FF_TESSELATION_DOP_GATE_DISABLE); + + /* + * Wa_1409085225:tgl + * Wa_14010229206:tgl + */ + wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH); + + /* Wa_1408615072:tgl */ + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, + VSUNIT_CLKGATE_DIS_TGL); + } + + if (IS_TIGERLAKE(i915)) { + /* Wa_1606931601:tgl */ + wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ); + + /* Wa_1409804808:tgl */ + wa_masked_en(wal, GEN7_ROW_CHICKEN2, + GEN12_PUSH_CONST_DEREF_HOLD_DIS); + + /* Wa_1606700617:tgl */ + wa_masked_en(wal, + GEN9_CS_DEBUG_MODE1, + FF_DOP_CLOCK_GATE_DISABLE); } if (IS_GEN(i915, 11)) { @@ -1405,10 +1467,38 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN11_SCRATCH2, GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE, 0); + + /* WaEnable32PlaneMode:icl */ + wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS, + GEN11_ENABLE_32_PLANE_MODE); + + /* + * Wa_1408615072:icl,ehl (vsunit) + * Wa_1407596294:icl,ehl (hsunit) + */ + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, + VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS); + + /* Wa_1407352427:icl,ehl */ + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, + PSDUNIT_CLKGATE_DIS); + + /* Wa_1406680159:icl,ehl */ + wa_write_or(wal, + SUBSLICE_UNIT_LEVEL_CLKGATE, + GWUNIT_CLKGATE_DIS); + + /* + * Wa_1408767742:icl[a2..forever],ehl[all] + * Wa_1605460711:icl[a0..c0] + */ + wa_write_or(wal, + GEN7_FF_THREAD_MODE, + GEN12_FF_TESSELATION_DOP_GATE_DISABLE); } - if (IS_GEN_RANGE(i915, 9, 11)) { - /* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl */ + if (IS_GEN_RANGE(i915, 9, 12)) { + /* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl,tgl */ wa_masked_en(wal, GEN7_FF_SLICE_CS_CHICKEN1, GEN9_FFSC_PERCTX_PREEMPT_CTRL); @@ -1452,6 +1542,52 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN8_L3SQCREG4, GEN8_LQSC_FLUSH_COHERENT_LINES); } + + if (IS_GEN(i915, 7)) + /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ + wa_masked_en(wal, + GFX_MODE_GEN7, + GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE); + + if (IS_GEN_RANGE(i915, 6, 7)) + /* + * We need to disable the AsyncFlip performance optimisations in + * order to use MI_WAIT_FOR_EVENT within the CS. It should + * already be programmed to '1' on all products. + * + * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv + */ + wa_masked_en(wal, + MI_MODE, + ASYNC_FLIP_PERF_DISABLE); + + if (IS_GEN(i915, 6)) { + /* + * Required for the hardware to program scanline values for + * waiting + * WaEnableFlushTlbInvalidationMode:snb + */ + wa_masked_en(wal, + GFX_MODE, + GFX_TLB_INVALIDATE_EXPLICIT); + + /* + * From the Sandybridge PRM, volume 1 part 3, page 24: + * "If this bit is set, STCunit will have LRA as replacement + * policy. [...] This bit must be reset. LRA replacement + * policy is not supported." + */ + wa_masked_dis(wal, + CACHE_MODE_0, + CM0_STC_EVICT_DISABLE_LRA_SNB); + } + + if (IS_GEN_RANGE(i915, 4, 6)) + /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ + wa_add(wal, MI_MODE, + 0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH), + /* XXX bit doesn't stick on Broadwater */ + IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH); } static void @@ -1470,7 +1606,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) static void engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal) { - if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8)) + if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 4)) return; if (engine->class == RENDER_CLASS) @@ -1483,7 +1619,7 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine) { struct i915_wa_list *wal = &engine->wa_list; - if (INTEL_GEN(engine->i915) < 8) + if (INTEL_GEN(engine->i915) < 4) return; wa_init_start(wal, "engine", engine->name); @@ -1626,6 +1762,16 @@ static int engine_wa_list_verify(struct intel_context *ce, goto err_vma; } + i915_vma_lock(vma); + err = i915_request_await_object(rq, vma->obj, true); + if (err == 0) + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + i915_vma_unlock(vma); + if (err) { + i915_request_add(rq); + goto err_vma; + } + err = wa_list_srm(rq, wal, vma); if (err) goto err_vma; diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds_types.h b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h index e27ab1b710b3..d166a7145720 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds_types.h +++ b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h @@ -13,8 +13,8 @@ struct i915_wa { i915_reg_t reg; - u32 mask; - u32 val; + u32 clr; + u32 set; u32 read; }; diff --git a/drivers/gpu/drm/i915/gt/ivb_clear_kernel.c b/drivers/gpu/drm/i915/gt/ivb_clear_kernel.c new file mode 100644 index 000000000000..610ca7687735 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/ivb_clear_kernel.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + * + * Generated by: IGT Gpu Tools on Fri 21 Feb 2020 05:29:32 AM UTC + */ + +static const u32 ivb_clear_kernel[] = { + 0x00000001, 0x26020128, 0x00000024, 0x00000000, + 0x00000040, 0x20280c21, 0x00000028, 0x00000001, + 0x01000010, 0x20000c20, 0x0000002c, 0x00000000, + 0x00010220, 0x34001c00, 0x00001400, 0x0000002c, + 0x00600001, 0x20600061, 0x00000000, 0x00000000, + 0x00000008, 0x20601c85, 0x00000e00, 0x0000000c, + 0x00000005, 0x20601ca5, 0x00000060, 0x00000001, + 0x00000008, 0x20641c85, 0x00000e00, 0x0000000d, + 0x00000005, 0x20641ca5, 0x00000064, 0x00000003, + 0x00000041, 0x207424a5, 0x00000064, 0x00000034, + 0x00000040, 0x206014a5, 0x00000060, 0x00000074, + 0x00000008, 0x20681c85, 0x00000e00, 0x00000008, + 0x00000005, 0x20681ca5, 0x00000068, 0x0000000f, + 0x00000041, 0x20701ca5, 0x00000060, 0x00000010, + 0x00000040, 0x206814a5, 0x00000068, 0x00000070, + 0x00600001, 0x20a00061, 0x00000000, 0x00000000, + 0x00000005, 0x206c1c85, 0x00000e00, 0x00000007, + 0x00000041, 0x206c1ca5, 0x0000006c, 0x00000004, + 0x00600001, 0x20800021, 0x008d0000, 0x00000000, + 0x00000001, 0x20800021, 0x0000006c, 0x00000000, + 0x00000001, 0x20840021, 0x00000068, 0x00000000, + 0x00000001, 0x20880061, 0x00000000, 0x00000003, + 0x00000005, 0x208c0d21, 0x00000086, 0xffffffff, + 0x05600032, 0x20a00fa1, 0x008d0080, 0x02190001, + 0x00000040, 0x20a01ca5, 0x000000a0, 0x00000001, + 0x05600032, 0x20a00fa1, 0x008d0080, 0x040a8001, + 0x02000040, 0x20281c21, 0x00000028, 0xffffffff, + 0x00010220, 0x34001c00, 0x00001400, 0xfffffffc, + 0x00000001, 0x26020128, 0x00000024, 0x00000000, + 0x00000001, 0x220010e4, 0x00000000, 0x00000000, + 0x00000001, 0x220831ec, 0x00000000, 0x007f007f, + 0x00600001, 0x20400021, 0x008d0000, 0x00000000, + 0x00600001, 0x2fe00021, 0x008d0000, 0x00000000, + 0x00200001, 0x20400121, 0x00450020, 0x00000000, + 0x00000001, 0x20480061, 0x00000000, 0x000f000f, + 0x00000005, 0x204c0d21, 0x00000046, 0xffffffef, + 0x00800001, 0x20600061, 0x00000000, 0x00000000, + 0x00800001, 0x20800061, 0x00000000, 0x00000000, + 0x00800001, 0x20a00061, 0x00000000, 0x00000000, + 0x00800001, 0x20c00061, 0x00000000, 0x00000000, + 0x00800001, 0x20e00061, 0x00000000, 0x00000000, + 0x00800001, 0x21000061, 0x00000000, 0x00000000, + 0x00800001, 0x21200061, 0x00000000, 0x00000000, + 0x00800001, 0x21400061, 0x00000000, 0x00000000, + 0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000, + 0x00000040, 0x20402d21, 0x00000020, 0x00100010, + 0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000, + 0x02000040, 0x22083d8c, 0x00000208, 0xffffffff, + 0x00800001, 0xa0000109, 0x00000602, 0x00000000, + 0x00000040, 0x22001c84, 0x00000200, 0x00000020, + 0x00010220, 0x34001c00, 0x00001400, 0xfffffff8, + 0x07600032, 0x20000fa0, 0x008d0fe0, 0x82000010, +}; diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index f2806381733f..4a53ded7c2dd 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -65,6 +65,9 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine) return NULL; } i915_active_init(&ring->vma->active, NULL, NULL); + __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(ring->vma)); + __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &ring->vma->node.flags); + ring->vma->node.size = sz; intel_ring_update_space(ring); @@ -241,9 +244,7 @@ static void mock_reset_cancel(struct intel_engine_cs *engine) /* Mark all submitted requests as skipped. */ list_for_each_entry(request, &engine->active.requests, sched.link) { - if (!i915_request_signaled(request)) - dma_fence_set_error(&request->fence, -EIO); - + i915_request_set_error_once(request, -EIO); i915_request_mark_complete(request); } diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c index 43d4d589749f..697114dd1f47 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c @@ -142,6 +142,24 @@ out: return err; } +static void engine_heartbeat_disable(struct intel_engine_cs *engine, + unsigned long *saved) +{ + *saved = engine->props.heartbeat_interval_ms; + engine->props.heartbeat_interval_ms = 0; + + intel_engine_pm_get(engine); + intel_engine_park_heartbeat(engine); +} + +static void engine_heartbeat_enable(struct intel_engine_cs *engine, + unsigned long saved) +{ + intel_engine_pm_put(engine); + + engine->props.heartbeat_interval_ms = saved; +} + static int live_idle_flush(void *arg) { struct intel_gt *gt = arg; @@ -152,9 +170,11 @@ static int live_idle_flush(void *arg) /* Check that we can flush the idle barriers */ for_each_engine(engine, gt, id) { - intel_engine_pm_get(engine); + unsigned long heartbeat; + + engine_heartbeat_disable(engine, &heartbeat); err = __live_idle_pulse(engine, intel_engine_flush_barriers); - intel_engine_pm_put(engine); + engine_heartbeat_enable(engine, heartbeat); if (err) break; } @@ -172,9 +192,11 @@ static int live_idle_pulse(void *arg) /* Check that heartbeat pulses flush the idle barriers */ for_each_engine(engine, gt, id) { - intel_engine_pm_get(engine); + unsigned long heartbeat; + + engine_heartbeat_disable(engine, &heartbeat); err = __live_idle_pulse(engine, intel_engine_pulse); - intel_engine_pm_put(engine); + engine_heartbeat_enable(engine, heartbeat); if (err && err != -ENODEV) break; diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 3e5e6c86e843..2b2efff6e19d 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -268,7 +268,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) cancel_rq: if (err) { - i915_request_skip(rq, err); + i915_request_set_error_once(rq, err); i915_request_add(rq); } unpin_hws: @@ -1640,7 +1640,7 @@ static int igt_reset_engines_atomic(void *arg) if (!intel_has_reset_engine(gt)) return 0; - if (USES_GUC_SUBMISSION(gt->i915)) + if (intel_uc_uses_guc_submission(>->uc)) return 0; igt_global_reset_lock(gt); diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c index fd3770e48ac7..a912159693fd 100644 --- a/drivers/gpu/drm/i915/gt/selftest_llc.c +++ b/drivers/gpu/drm/i915/gt/selftest_llc.c @@ -18,10 +18,8 @@ static int gen6_verify_ring_freq(struct intel_llc *llc) wakeref = intel_runtime_pm_get(llc_to_gt(llc)->uncore->rpm); - if (!get_ia_constants(llc, &consts)) { - err = -ENODEV; + if (!get_ia_constants(llc, &consts)) goto out_rpm; - } for (gpu_freq = consts.min_gpu_freq; gpu_freq <= consts.max_gpu_freq; @@ -71,10 +69,5 @@ out_rpm: int st_llc_verify(struct intel_llc *llc) { - int err = 0; - - if (HAS_LLC(llc_to_gt(llc)->i915)) - err = gen6_verify_ring_freq(llc); - - return err; + return gen6_verify_ring_freq(llc); } diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index b292f8cbd0bf..f95ae15ce865 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -68,6 +68,71 @@ static void engine_heartbeat_enable(struct intel_engine_cs *engine, engine->props.heartbeat_interval_ms = saved; } +static int wait_for_submit(struct intel_engine_cs *engine, + struct i915_request *rq, + unsigned long timeout) +{ + timeout += jiffies; + do { + cond_resched(); + intel_engine_flush_submission(engine); + + if (READ_ONCE(engine->execlists.pending[0])) + continue; + + if (i915_request_is_active(rq)) + return 0; + + if (i915_request_started(rq)) /* that was quick! */ + return 0; + } while (time_before(jiffies, timeout)); + + return -ETIME; +} + +static int wait_for_reset(struct intel_engine_cs *engine, + struct i915_request *rq, + unsigned long timeout) +{ + timeout += jiffies; + + do { + cond_resched(); + intel_engine_flush_submission(engine); + + if (READ_ONCE(engine->execlists.pending[0])) + continue; + + if (i915_request_completed(rq)) + break; + + if (READ_ONCE(rq->fence.error)) + break; + } while (time_before(jiffies, timeout)); + + flush_scheduled_work(); + + if (rq->fence.error != -EIO) { + pr_err("%s: hanging request %llx:%lld not reset\n", + engine->name, + rq->fence.context, + rq->fence.seqno); + return -EINVAL; + } + + /* Give the request a jiffie to complete after flushing the worker */ + if (i915_request_wait(rq, 0, + max(0l, (long)(timeout - jiffies)) + 1) < 0) { + pr_err("%s: hanging request %llx:%lld did not complete\n", + engine->name, + rq->fence.context, + rq->fence.seqno); + return -ETIME; + } + + return 0; +} + static int live_sanitycheck(void *arg) { struct intel_gt *gt = arg; @@ -285,6 +350,84 @@ static int live_unlite_preempt(void *arg) return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX)); } +static int live_pin_rewind(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + /* + * We have to be careful not to trust intel_ring too much, for example + * ring->head is updated upon retire which is out of sync with pinning + * the context. Thus we cannot use ring->head to set CTX_RING_HEAD, + * or else we risk writing an older, stale value. + * + * To simulate this, let's apply a bit of deliberate sabotague. + */ + + for_each_engine(engine, gt, id) { + struct intel_context *ce; + struct i915_request *rq; + struct intel_ring *ring; + struct igt_live_test t; + + if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { + err = -EIO; + break; + } + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + break; + } + + err = intel_context_pin(ce); + if (err) { + intel_context_put(ce); + break; + } + + /* Keep the context awake while we play games */ + err = i915_active_acquire(&ce->active); + if (err) { + intel_context_unpin(ce); + intel_context_put(ce); + break; + } + ring = ce->ring; + + /* Poison the ring, and offset the next request from HEAD */ + memset32(ring->vaddr, STACK_MAGIC, ring->size / sizeof(u32)); + ring->emit = ring->size / 2; + ring->tail = ring->emit; + GEM_BUG_ON(ring->head); + + intel_context_unpin(ce); + + /* Submit a simple nop request */ + GEM_BUG_ON(intel_context_is_pinned(ce)); + rq = intel_context_create_request(ce); + i915_active_release(&ce->active); /* e.g. async retire */ + intel_context_put(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + GEM_BUG_ON(!rq->head); + i915_request_add(rq); + + /* Expect not to hang! */ + if (igt_live_test_end(&t)) { + err = -EIO; + break; + } + } + + return err; +} + static int live_hold_reset(void *arg) { struct intel_gt *gt = arg; @@ -386,6 +529,152 @@ out: return err; } +static const char *error_repr(int err) +{ + return err ? "bad" : "good"; +} + +static int live_error_interrupt(void *arg) +{ + static const struct error_phase { + enum { GOOD = 0, BAD = -EIO } error[2]; + } phases[] = { + { { BAD, GOOD } }, + { { BAD, BAD } }, + { { BAD, GOOD } }, + { { GOOD, GOOD } }, /* sentinel */ + }; + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + + /* + * We hook up the CS_MASTER_ERROR_INTERRUPT to have forewarning + * of invalid commands in user batches that will cause a GPU hang. + * This is a faster mechanism than using hangcheck/heartbeats, but + * only detects problems the HW knows about -- it will not warn when + * we kill the HW! + * + * To verify our detection and reset, we throw some invalid commands + * at the HW and wait for the interrupt. + */ + + if (!intel_has_reset_engine(gt)) + return 0; + + for_each_engine(engine, gt, id) { + const struct error_phase *p; + unsigned long heartbeat; + int err = 0; + + engine_heartbeat_disable(engine, &heartbeat); + + for (p = phases; p->error[0] != GOOD; p++) { + struct i915_request *client[ARRAY_SIZE(phases->error)]; + u32 *cs; + int i; + + memset(client, 0, sizeof(*client)); + for (i = 0; i < ARRAY_SIZE(client); i++) { + struct intel_context *ce; + struct i915_request *rq; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + goto out; + } + + rq = intel_context_create_request(ce); + intel_context_put(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out; + } + + if (rq->engine->emit_init_breadcrumb) { + err = rq->engine->emit_init_breadcrumb(rq); + if (err) { + i915_request_add(rq); + goto out; + } + } + + cs = intel_ring_begin(rq, 2); + if (IS_ERR(cs)) { + i915_request_add(rq); + err = PTR_ERR(cs); + goto out; + } + + if (p->error[i]) { + *cs++ = 0xdeadbeef; + *cs++ = 0xdeadbeef; + } else { + *cs++ = MI_NOOP; + *cs++ = MI_NOOP; + } + + client[i] = i915_request_get(rq); + i915_request_add(rq); + } + + err = wait_for_submit(engine, client[0], HZ / 2); + if (err) { + pr_err("%s: first request did not start within time!\n", + engine->name); + err = -ETIME; + goto out; + } + + for (i = 0; i < ARRAY_SIZE(client); i++) { + if (i915_request_wait(client[i], 0, HZ / 5) < 0) + pr_debug("%s: %s request incomplete!\n", + engine->name, + error_repr(p->error[i])); + + if (!i915_request_started(client[i])) { + pr_debug("%s: %s request not stated!\n", + engine->name, + error_repr(p->error[i])); + err = -ETIME; + goto out; + } + + /* Kick the tasklet to process the error */ + intel_engine_flush_submission(engine); + if (client[i]->fence.error != p->error[i]) { + pr_err("%s: %s request completed with wrong error code: %d\n", + engine->name, + error_repr(p->error[i]), + client[i]->fence.error); + err = -EINVAL; + goto out; + } + } + +out: + for (i = 0; i < ARRAY_SIZE(client); i++) + if (client[i]) + i915_request_put(client[i]); + if (err) { + pr_err("%s: failed at phase[%zd] { %d, %d }\n", + engine->name, p - phases, + p->error[0], p->error[1]); + break; + } + } + + engine_heartbeat_enable(engine, heartbeat); + if (err) { + intel_gt_set_wedged(gt); + return err; + } + } + + return 0; +} + static int emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx) { @@ -580,6 +869,10 @@ static int live_timeslice_preempt(void *arg) if (err) goto err_map; + err = i915_vma_sync(vma); + if (err) + goto err_pin; + for_each_prime_number_from(count, 1, 16) { struct intel_engine_cs *engine; enum intel_engine_id id; @@ -614,33 +907,229 @@ err_obj: return err; } -static struct i915_request *nop_request(struct intel_engine_cs *engine) +static struct i915_request * +create_rewinder(struct intel_context *ce, + struct i915_request *wait, + void *slot, int idx) { + const u32 offset = + i915_ggtt_offset(ce->engine->status_page.vma) + + offset_in_page(slot); struct i915_request *rq; + u32 *cs; + int err; - rq = intel_engine_create_kernel_request(engine); + rq = intel_context_create_request(ce); if (IS_ERR(rq)) return rq; + if (wait) { + err = i915_request_await_dma_fence(rq, &wait->fence); + if (err) + goto err; + } + + cs = intel_ring_begin(rq, 14); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto err; + } + + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + *cs++ = MI_NOOP; + + *cs++ = MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_GTE_SDD; + *cs++ = idx; + *cs++ = offset; + *cs++ = 0; + + *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; + *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base)); + *cs++ = offset + idx * sizeof(u32); + *cs++ = 0; + + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = offset; + *cs++ = 0; + *cs++ = idx + 1; + + intel_ring_advance(rq, cs); + + rq->sched.attr.priority = I915_PRIORITY_MASK; + err = 0; +err: i915_request_get(rq); i915_request_add(rq); + if (err) { + i915_request_put(rq); + return ERR_PTR(err); + } return rq; } -static int wait_for_submit(struct intel_engine_cs *engine, - struct i915_request *rq, - unsigned long timeout) +static int live_timeslice_rewind(void *arg) { - timeout += jiffies; - do { - cond_resched(); - intel_engine_flush_submission(engine); - if (i915_request_is_active(rq)) - return 0; - } while (time_before(jiffies, timeout)); + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; - return -ETIME; + /* + * The usual presumption on timeslice expiration is that we replace + * the active context with another. However, given a chain of + * dependencies we may end up with replacing the context with itself, + * but only a few of those requests, forcing us to rewind the + * RING_TAIL of the original request. + */ + if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) + return 0; + + for_each_engine(engine, gt, id) { + enum { A1, A2, B1 }; + enum { X = 1, Z, Y }; + struct i915_request *rq[3] = {}; + struct intel_context *ce; + unsigned long heartbeat; + unsigned long timeslice; + int i, err = 0; + u32 *slot; + + if (!intel_engine_has_timeslices(engine)) + continue; + + /* + * A:rq1 -- semaphore wait, timestamp X + * A:rq2 -- write timestamp Y + * + * B:rq1 [await A:rq1] -- write timestamp Z + * + * Force timeslice, release semaphore. + * + * Expect execution/evaluation order XZY + */ + + engine_heartbeat_disable(engine, &heartbeat); + timeslice = xchg(&engine->props.timeslice_duration_ms, 1); + + slot = memset32(engine->status_page.addr + 1000, 0, 4); + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + goto err; + } + + rq[0] = create_rewinder(ce, NULL, slot, X); + if (IS_ERR(rq[0])) { + intel_context_put(ce); + goto err; + } + + rq[1] = create_rewinder(ce, NULL, slot, Y); + intel_context_put(ce); + if (IS_ERR(rq[1])) + goto err; + + err = wait_for_submit(engine, rq[1], HZ / 2); + if (err) { + pr_err("%s: failed to submit first context\n", + engine->name); + goto err; + } + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + goto err; + } + + rq[2] = create_rewinder(ce, rq[0], slot, Z); + intel_context_put(ce); + if (IS_ERR(rq[2])) + goto err; + + err = wait_for_submit(engine, rq[2], HZ / 2); + if (err) { + pr_err("%s: failed to submit second context\n", + engine->name); + goto err; + } + GEM_BUG_ON(!timer_pending(&engine->execlists.timer)); + + /* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */ + if (i915_request_is_active(rq[A2])) { /* semaphore yielded! */ + /* Wait for the timeslice to kick in */ + del_timer(&engine->execlists.timer); + tasklet_hi_schedule(&engine->execlists.tasklet); + intel_engine_flush_submission(engine); + } + /* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */ + GEM_BUG_ON(!i915_request_is_active(rq[A1])); + GEM_BUG_ON(!i915_request_is_active(rq[B1])); + GEM_BUG_ON(i915_request_is_active(rq[A2])); + + /* Release the hounds! */ + slot[0] = 1; + wmb(); /* "pairs" with GPU; paranoid kick of internal CPU$ */ + + for (i = 1; i <= 3; i++) { + unsigned long timeout = jiffies + HZ / 2; + + while (!READ_ONCE(slot[i]) && + time_before(jiffies, timeout)) + ; + + if (!time_before(jiffies, timeout)) { + pr_err("%s: rq[%d] timed out\n", + engine->name, i - 1); + err = -ETIME; + goto err; + } + + pr_debug("%s: slot[%d]:%x\n", engine->name, i, slot[i]); + } + + /* XZY: XZ < XY */ + if (slot[Z] - slot[X] >= slot[Y] - slot[X]) { + pr_err("%s: timeslicing did not run context B [%u] before A [%u]!\n", + engine->name, + slot[Z] - slot[X], + slot[Y] - slot[X]); + err = -EINVAL; + } + +err: + memset32(&slot[0], -1, 4); + wmb(); + + engine->props.timeslice_duration_ms = timeslice; + engine_heartbeat_enable(engine, heartbeat); + for (i = 0; i < 3; i++) + i915_request_put(rq[i]); + if (igt_flush_test(gt->i915)) + err = -EIO; + if (err) + return err; + } + + return 0; +} + +static struct i915_request *nop_request(struct intel_engine_cs *engine) +{ + struct i915_request *rq; + + rq = intel_engine_create_kernel_request(engine); + if (IS_ERR(rq)) + return rq; + + i915_request_get(rq); + i915_request_add(rq); + + return rq; } static long timeslice_threshold(const struct intel_engine_cs *engine) @@ -688,6 +1177,10 @@ static int live_timeslice_queue(void *arg) if (err) goto err_map; + err = i915_vma_sync(vma); + if (err) + goto err_pin; + for_each_engine(engine, gt, id) { struct i915_sched_attr attr = { .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX), @@ -774,6 +1267,7 @@ err_heartbeat: break; } +err_pin: i915_vma_unpin(vma); err_map: i915_gem_object_unpin_map(obj); @@ -832,6 +1326,10 @@ static int live_busywait_preempt(void *arg) if (err) goto err_map; + err = i915_vma_sync(vma); + if (err) + goto err_vma; + for_each_engine(engine, gt, id) { struct i915_request *lo, *hi; struct igt_live_test t; @@ -1352,14 +1850,9 @@ static int __cancel_active0(struct live_preempt_cancel *arg) if (err) goto out; - if (i915_request_wait(rq, 0, HZ / 5) < 0) { - err = -EIO; - goto out; - } - - if (rq->fence.error != -EIO) { - pr_err("Cancelled inflight0 request did not report -EIO\n"); - err = -EINVAL; + err = wait_for_reset(arg->engine, rq, HZ / 2); + if (err) { + pr_err("Cancelled inflight0 request did not reset\n"); goto out; } @@ -1417,10 +1910,9 @@ static int __cancel_active1(struct live_preempt_cancel *arg) goto out; igt_spinner_end(&arg->a.spin); - if (i915_request_wait(rq[1], 0, HZ / 5) < 0) { - err = -EIO; + err = wait_for_reset(arg->engine, rq[1], HZ / 2); + if (err) goto out; - } if (rq[0]->fence.error != 0) { pr_err("Normal inflight0 request did not complete\n"); @@ -1500,10 +1992,9 @@ static int __cancel_queued(struct live_preempt_cancel *arg) if (err) goto out; - if (i915_request_wait(rq[2], 0, HZ / 5) < 0) { - err = -EIO; + err = wait_for_reset(arg->engine, rq[2], HZ / 2); + if (err) goto out; - } if (rq[0]->fence.error != -EIO) { pr_err("Cancelled inflight0 request did not report -EIO\n"); @@ -1561,14 +2052,9 @@ static int __cancel_hostile(struct live_preempt_cancel *arg) if (err) goto out; - if (i915_request_wait(rq, 0, HZ / 5) < 0) { - err = -EIO; - goto out; - } - - if (rq->fence.error != -EIO) { - pr_err("Cancelled inflight0 request did not report -EIO\n"); - err = -EINVAL; + err = wait_for_reset(arg->engine, rq, HZ / 2); + if (err) { + pr_err("Cancelled inflight0 request did not reset\n"); goto out; } @@ -1656,7 +2142,7 @@ static int live_suppress_self_preempt(void *arg) if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) return 0; - if (USES_GUC_SUBMISSION(gt->i915)) + if (intel_uc_uses_guc_submission(>->uc)) return 0; /* presume black blox */ if (intel_vgpu_active(gt->i915)) @@ -2279,117 +2765,6 @@ static int live_preempt_gang(void *arg) return 0; } -static int live_preempt_hang(void *arg) -{ - struct intel_gt *gt = arg; - struct i915_gem_context *ctx_hi, *ctx_lo; - struct igt_spinner spin_hi, spin_lo; - struct intel_engine_cs *engine; - enum intel_engine_id id; - int err = -ENOMEM; - - if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) - return 0; - - if (!intel_has_reset_engine(gt)) - return 0; - - if (igt_spinner_init(&spin_hi, gt)) - return -ENOMEM; - - if (igt_spinner_init(&spin_lo, gt)) - goto err_spin_hi; - - ctx_hi = kernel_context(gt->i915); - if (!ctx_hi) - goto err_spin_lo; - ctx_hi->sched.priority = - I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); - - ctx_lo = kernel_context(gt->i915); - if (!ctx_lo) - goto err_ctx_hi; - ctx_lo->sched.priority = - I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); - - for_each_engine(engine, gt, id) { - struct i915_request *rq; - - if (!intel_engine_has_preemption(engine)) - continue; - - rq = spinner_create_request(&spin_lo, ctx_lo, engine, - MI_ARB_CHECK); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_ctx_lo; - } - - i915_request_add(rq); - if (!igt_wait_for_spinner(&spin_lo, rq)) { - GEM_TRACE("lo spinner failed to start\n"); - GEM_TRACE_DUMP(); - intel_gt_set_wedged(gt); - err = -EIO; - goto err_ctx_lo; - } - - rq = spinner_create_request(&spin_hi, ctx_hi, engine, - MI_ARB_CHECK); - if (IS_ERR(rq)) { - igt_spinner_end(&spin_lo); - err = PTR_ERR(rq); - goto err_ctx_lo; - } - - init_completion(&engine->execlists.preempt_hang.completion); - engine->execlists.preempt_hang.inject_hang = true; - - i915_request_add(rq); - - if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion, - HZ / 10)) { - pr_err("Preemption did not occur within timeout!"); - GEM_TRACE_DUMP(); - intel_gt_set_wedged(gt); - err = -EIO; - goto err_ctx_lo; - } - - set_bit(I915_RESET_ENGINE + id, >->reset.flags); - intel_engine_reset(engine, NULL); - clear_bit(I915_RESET_ENGINE + id, >->reset.flags); - - engine->execlists.preempt_hang.inject_hang = false; - - if (!igt_wait_for_spinner(&spin_hi, rq)) { - GEM_TRACE("hi spinner failed to start\n"); - GEM_TRACE_DUMP(); - intel_gt_set_wedged(gt); - err = -EIO; - goto err_ctx_lo; - } - - igt_spinner_end(&spin_hi); - igt_spinner_end(&spin_lo); - if (igt_flush_test(gt->i915)) { - err = -EIO; - goto err_ctx_lo; - } - } - - err = 0; -err_ctx_lo: - kernel_context_close(ctx_lo); -err_ctx_hi: - kernel_context_close(ctx_hi); -err_spin_lo: - igt_spinner_fini(&spin_lo); -err_spin_hi: - igt_spinner_fini(&spin_hi); - return err; -} - static int live_preempt_timeout(void *arg) { struct intel_gt *gt = arg; @@ -2882,7 +3257,7 @@ static int live_virtual_engine(void *arg) unsigned int class, inst; int err; - if (USES_GUC_SUBMISSION(gt->i915)) + if (intel_uc_uses_guc_submission(>->uc)) return 0; for_each_engine(engine, gt, id) { @@ -3015,7 +3390,7 @@ static int live_virtual_mask(void *arg) unsigned int class, inst; int err; - if (USES_GUC_SUBMISSION(gt->i915)) + if (intel_uc_uses_guc_submission(>->uc)) return 0; for (class = 0; class <= MAX_ENGINE_CLASS; class++) { @@ -3055,6 +3430,10 @@ static int preserved_virtual_engine(struct intel_gt *gt, if (IS_ERR(scratch)) return PTR_ERR(scratch); + err = i915_vma_sync(scratch); + if (err) + goto out_scratch; + ve = intel_execlists_create_virtual(siblings, nsibling); if (IS_ERR(ve)) { err = PTR_ERR(ve); @@ -3153,7 +3532,7 @@ static int live_virtual_preserved(void *arg) * are preserved. */ - if (USES_GUC_SUBMISSION(gt->i915)) + if (intel_uc_uses_guc_submission(>->uc)) return 0; /* As we use CS_GPR we cannot run before they existed on all engines. */ @@ -3243,15 +3622,21 @@ static int bond_virtual_engine(struct intel_gt *gt, rq[0] = ERR_PTR(-ENOMEM); for_each_engine(master, gt, id) { struct i915_sw_fence fence = {}; + struct intel_context *ce; if (master->class == class) continue; + ce = intel_context_create(master); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + goto out; + } + memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq)); - rq[0] = igt_spinner_create_request(&spin, - master->kernel_context, - MI_NOOP); + rq[0] = igt_spinner_create_request(&spin, ce, MI_NOOP); + intel_context_put(ce); if (IS_ERR(rq[0])) { err = PTR_ERR(rq[0]); goto out; @@ -3377,7 +3762,7 @@ static int live_virtual_bond(void *arg) unsigned int class, inst; int err; - if (USES_GUC_SUBMISSION(gt->i915)) + if (intel_uc_uses_guc_submission(>->uc)) return 0; for (class = 0; class <= MAX_ENGINE_CLASS; class++) { @@ -3538,7 +3923,7 @@ static int live_virtual_reset(void *arg) * forgotten. */ - if (USES_GUC_SUBMISSION(gt->i915)) + if (intel_uc_uses_guc_submission(>->uc)) return 0; if (!intel_has_reset_engine(gt)) @@ -3571,8 +3956,11 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915) SUBTEST(live_sanitycheck), SUBTEST(live_unlite_switch), SUBTEST(live_unlite_preempt), + SUBTEST(live_pin_rewind), SUBTEST(live_hold_reset), + SUBTEST(live_error_interrupt), SUBTEST(live_timeslice_preempt), + SUBTEST(live_timeslice_rewind), SUBTEST(live_timeslice_queue), SUBTEST(live_busywait_preempt), SUBTEST(live_preempt), @@ -3583,7 +3971,6 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915) SUBTEST(live_suppress_wait_preempt), SUBTEST(live_chain_preempt), SUBTEST(live_preempt_gang), - SUBTEST(live_preempt_hang), SUBTEST(live_preempt_timeout), SUBTEST(live_preempt_smoke), SUBTEST(live_virtual_engine), @@ -3631,6 +4018,62 @@ static void hexdump(const void *buf, size_t len) } } +static int emit_semaphore_signal(struct intel_context *ce, void *slot) +{ + const u32 offset = + i915_ggtt_offset(ce->engine->status_page.vma) + + offset_in_page(slot); + struct i915_request *rq; + u32 *cs; + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) { + i915_request_add(rq); + return PTR_ERR(cs); + } + + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = offset; + *cs++ = 0; + *cs++ = 1; + + intel_ring_advance(rq, cs); + + rq->sched.attr.priority = I915_PRIORITY_BARRIER; + i915_request_add(rq); + return 0; +} + +static int context_flush(struct intel_context *ce, long timeout) +{ + struct i915_request *rq; + struct dma_fence *fence; + int err = 0; + + rq = intel_engine_create_kernel_request(ce->engine); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + fence = i915_active_fence_get(&ce->timeline->last_request); + if (fence) { + i915_request_await_dma_fence(rq, fence); + dma_fence_put(fence); + } + + rq = i915_request_get(rq); + i915_request_add(rq); + if (i915_request_wait(rq, 0, timeout) < 0) + err = -ETIME; + i915_request_put(rq); + + rmb(); /* We know the request is written, make sure all state is too! */ + return err; +} + static int live_lrc_layout(void *arg) { struct intel_gt *gt = arg; @@ -3797,6 +4240,11 @@ static int live_lrc_fixed(void *arg) CTX_BB_STATE - 1, "BB_STATE" }, + { + i915_mmio_reg_offset(RING_CTX_TIMESTAMP(engine->mmio_base)), + CTX_TIMESTAMP - 1, + "RING_CTX_TIMESTAMP" + }, { }, }, *t; u32 *hw; @@ -3880,8 +4328,16 @@ static int __live_lrc_state(struct intel_engine_cs *engine, *cs++ = i915_ggtt_offset(scratch) + RING_TAIL_IDX * sizeof(u32); *cs++ = 0; + i915_vma_lock(scratch); + err = i915_request_await_object(rq, scratch->obj, true); + if (!err) + err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE); + i915_vma_unlock(scratch); + i915_request_get(rq); i915_request_add(rq); + if (err) + goto err_rq; intel_engine_flush_submission(engine); expected[RING_TAIL_IDX] = ce->ring->tail; @@ -3947,13 +4403,13 @@ static int live_lrc_state(void *arg) return err; } -static int gpr_make_dirty(struct intel_engine_cs *engine) +static int gpr_make_dirty(struct intel_context *ce) { struct i915_request *rq; u32 *cs; int n; - rq = intel_engine_create_kernel_request(engine); + rq = intel_context_create_request(ce); if (IS_ERR(rq)) return PTR_ERR(rq); @@ -3965,20 +4421,79 @@ static int gpr_make_dirty(struct intel_engine_cs *engine) *cs++ = MI_LOAD_REGISTER_IMM(NUM_GPR_DW); for (n = 0; n < NUM_GPR_DW; n++) { - *cs++ = CS_GPR(engine, n); + *cs++ = CS_GPR(ce->engine, n); *cs++ = STACK_MAGIC; } *cs++ = MI_NOOP; intel_ring_advance(rq, cs); + + rq->sched.attr.priority = I915_PRIORITY_BARRIER; i915_request_add(rq); return 0; } -static int __live_gpr_clear(struct intel_engine_cs *engine, - struct i915_vma *scratch) +static struct i915_request * +__gpr_read(struct intel_context *ce, struct i915_vma *scratch, u32 *slot) { + const u32 offset = + i915_ggtt_offset(ce->engine->status_page.vma) + + offset_in_page(slot); + struct i915_request *rq; + u32 *cs; + int err; + int n; + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) + return rq; + + cs = intel_ring_begin(rq, 6 + 4 * NUM_GPR_DW); + if (IS_ERR(cs)) { + i915_request_add(rq); + return ERR_CAST(cs); + } + + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + *cs++ = MI_NOOP; + + *cs++ = MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_NEQ_SDD; + *cs++ = 0; + *cs++ = offset; + *cs++ = 0; + + for (n = 0; n < NUM_GPR_DW; n++) { + *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; + *cs++ = CS_GPR(ce->engine, n); + *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32); + *cs++ = 0; + } + + i915_vma_lock(scratch); + err = i915_request_await_object(rq, scratch->obj, true); + if (!err) + err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE); + i915_vma_unlock(scratch); + + i915_request_get(rq); + i915_request_add(rq); + if (err) { + i915_request_put(rq); + rq = ERR_PTR(err); + } + + return rq; +} + +static int __live_lrc_gpr(struct intel_engine_cs *engine, + struct i915_vma *scratch, + bool preempt) +{ + u32 *slot = memset32(engine->status_page.addr + 1000, 0, 4); struct intel_context *ce; struct i915_request *rq; u32 *cs; @@ -3988,7 +4503,7 @@ static int __live_gpr_clear(struct intel_engine_cs *engine, if (INTEL_GEN(engine->i915) < 9 && engine->class != RENDER_CLASS) return 0; /* GPR only on rcs0 for gen8 */ - err = gpr_make_dirty(engine); + err = gpr_make_dirty(engine->kernel_context); if (err) return err; @@ -3996,28 +4511,28 @@ static int __live_gpr_clear(struct intel_engine_cs *engine, if (IS_ERR(ce)) return PTR_ERR(ce); - rq = intel_context_create_request(ce); + rq = __gpr_read(ce, scratch, slot); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_put; } - cs = intel_ring_begin(rq, 4 * NUM_GPR_DW); - if (IS_ERR(cs)) { - err = PTR_ERR(cs); - i915_request_add(rq); - goto err_put; - } + err = wait_for_submit(engine, rq, HZ / 2); + if (err) + goto err_rq; - for (n = 0; n < NUM_GPR_DW; n++) { - *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; - *cs++ = CS_GPR(engine, n); - *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32); - *cs++ = 0; - } + if (preempt) { + err = gpr_make_dirty(engine->kernel_context); + if (err) + goto err_rq; - i915_request_get(rq); - i915_request_add(rq); + err = emit_semaphore_signal(engine->kernel_context, slot); + if (err) + goto err_rq; + } else { + slot[0] = 1; + wmb(); + } if (i915_request_wait(rq, 0, HZ / 5) < 0) { err = -ETIME; @@ -4044,13 +4559,15 @@ static int __live_gpr_clear(struct intel_engine_cs *engine, i915_gem_object_unpin_map(scratch->obj); err_rq: + memset32(&slot[0], -1, 4); + wmb(); i915_request_put(rq); err_put: intel_context_put(ce); return err; } -static int live_gpr_clear(void *arg) +static int live_lrc_gpr(void *arg) { struct intel_gt *gt = arg; struct intel_engine_cs *engine; @@ -4068,7 +4585,971 @@ static int live_gpr_clear(void *arg) return PTR_ERR(scratch); for_each_engine(engine, gt, id) { - err = __live_gpr_clear(engine, scratch); + unsigned long heartbeat; + + engine_heartbeat_disable(engine, &heartbeat); + + err = __live_lrc_gpr(engine, scratch, false); + if (err) + goto err; + + err = __live_lrc_gpr(engine, scratch, true); + if (err) + goto err; + +err: + engine_heartbeat_enable(engine, heartbeat); + if (igt_flush_test(gt->i915)) + err = -EIO; + if (err) + break; + } + + i915_vma_unpin_and_release(&scratch, 0); + return err; +} + +static struct i915_request * +create_timestamp(struct intel_context *ce, void *slot, int idx) +{ + const u32 offset = + i915_ggtt_offset(ce->engine->status_page.vma) + + offset_in_page(slot); + struct i915_request *rq; + u32 *cs; + int err; + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) + return rq; + + cs = intel_ring_begin(rq, 10); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto err; + } + + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + *cs++ = MI_NOOP; + + *cs++ = MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_NEQ_SDD; + *cs++ = 0; + *cs++ = offset; + *cs++ = 0; + + *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; + *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(rq->engine->mmio_base)); + *cs++ = offset + idx * sizeof(u32); + *cs++ = 0; + + intel_ring_advance(rq, cs); + + rq->sched.attr.priority = I915_PRIORITY_MASK; + err = 0; +err: + i915_request_get(rq); + i915_request_add(rq); + if (err) { + i915_request_put(rq); + return ERR_PTR(err); + } + + return rq; +} + +struct lrc_timestamp { + struct intel_engine_cs *engine; + struct intel_context *ce[2]; + u32 poison; +}; + +static bool timestamp_advanced(u32 start, u32 end) +{ + return (s32)(end - start) > 0; +} + +static int __lrc_timestamp(const struct lrc_timestamp *arg, bool preempt) +{ + u32 *slot = memset32(arg->engine->status_page.addr + 1000, 0, 4); + struct i915_request *rq; + u32 timestamp; + int err = 0; + + arg->ce[0]->lrc_reg_state[CTX_TIMESTAMP] = arg->poison; + rq = create_timestamp(arg->ce[0], slot, 1); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + err = wait_for_submit(rq->engine, rq, HZ / 2); + if (err) + goto err; + + if (preempt) { + arg->ce[1]->lrc_reg_state[CTX_TIMESTAMP] = 0xdeadbeef; + err = emit_semaphore_signal(arg->ce[1], slot); + if (err) + goto err; + } else { + slot[0] = 1; + wmb(); + } + + /* And wait for switch to kernel (to save our context to memory) */ + err = context_flush(arg->ce[0], HZ / 2); + if (err) + goto err; + + if (!timestamp_advanced(arg->poison, slot[1])) { + pr_err("%s(%s): invalid timestamp on restore, context:%x, request:%x\n", + arg->engine->name, preempt ? "preempt" : "simple", + arg->poison, slot[1]); + err = -EINVAL; + } + + timestamp = READ_ONCE(arg->ce[0]->lrc_reg_state[CTX_TIMESTAMP]); + if (!timestamp_advanced(slot[1], timestamp)) { + pr_err("%s(%s): invalid timestamp on save, request:%x, context:%x\n", + arg->engine->name, preempt ? "preempt" : "simple", + slot[1], timestamp); + err = -EINVAL; + } + +err: + memset32(slot, -1, 4); + i915_request_put(rq); + return err; +} + +static int live_lrc_timestamp(void *arg) +{ + struct lrc_timestamp data = {}; + struct intel_gt *gt = arg; + enum intel_engine_id id; + const u32 poison[] = { + 0, + S32_MAX, + (u32)S32_MAX + 1, + U32_MAX, + }; + + /* + * We want to verify that the timestamp is saved and restore across + * context switches and is monotonic. + * + * So we do this with a little bit of LRC poisoning to check various + * boundary conditions, and see what happens if we preempt the context + * with a second request (carrying more poison into the timestamp). + */ + + for_each_engine(data.engine, gt, id) { + unsigned long heartbeat; + int i, err = 0; + + engine_heartbeat_disable(data.engine, &heartbeat); + + for (i = 0; i < ARRAY_SIZE(data.ce); i++) { + struct intel_context *tmp; + + tmp = intel_context_create(data.engine); + if (IS_ERR(tmp)) { + err = PTR_ERR(tmp); + goto err; + } + + err = intel_context_pin(tmp); + if (err) { + intel_context_put(tmp); + goto err; + } + + data.ce[i] = tmp; + } + + for (i = 0; i < ARRAY_SIZE(poison); i++) { + data.poison = poison[i]; + + err = __lrc_timestamp(&data, false); + if (err) + break; + + err = __lrc_timestamp(&data, true); + if (err) + break; + } + +err: + engine_heartbeat_enable(data.engine, heartbeat); + for (i = 0; i < ARRAY_SIZE(data.ce); i++) { + if (!data.ce[i]) + break; + + intel_context_unpin(data.ce[i]); + intel_context_put(data.ce[i]); + } + + if (igt_flush_test(gt->i915)) + err = -EIO; + if (err) + return err; + } + + return 0; +} + +static struct i915_vma * +create_user_vma(struct i915_address_space *vm, unsigned long size) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int err; + + obj = i915_gem_object_create_internal(vm->i915, size); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(vma)) { + i915_gem_object_put(obj); + return vma; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) { + i915_gem_object_put(obj); + return ERR_PTR(err); + } + + return vma; +} + +static struct i915_vma * +store_context(struct intel_context *ce, struct i915_vma *scratch) +{ + struct i915_vma *batch; + u32 dw, x, *cs, *hw; + + batch = create_user_vma(ce->vm, SZ_64K); + if (IS_ERR(batch)) + return batch; + + cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC); + if (IS_ERR(cs)) { + i915_vma_put(batch); + return ERR_CAST(cs); + } + + x = 0; + dw = 0; + hw = ce->engine->pinned_default_state; + hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw); + do { + u32 len = hw[dw] & 0x7f; + + if (hw[dw] == 0) { + dw++; + continue; + } + + if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) { + dw += len + 2; + continue; + } + + dw++; + len = (len + 1) / 2; + while (len--) { + *cs++ = MI_STORE_REGISTER_MEM_GEN8; + *cs++ = hw[dw]; + *cs++ = lower_32_bits(scratch->node.start + x); + *cs++ = upper_32_bits(scratch->node.start + x); + + dw += 2; + x += 4; + } + } while (dw < PAGE_SIZE / sizeof(u32) && + (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END); + + *cs++ = MI_BATCH_BUFFER_END; + + i915_gem_object_flush_map(batch->obj); + i915_gem_object_unpin_map(batch->obj); + + return batch; +} + +static int move_to_active(struct i915_request *rq, + struct i915_vma *vma, + unsigned int flags) +{ + int err; + + i915_vma_lock(vma); + err = i915_request_await_object(rq, vma->obj, flags); + if (!err) + err = i915_vma_move_to_active(vma, rq, flags); + i915_vma_unlock(vma); + + return err; +} + +static struct i915_request * +record_registers(struct intel_context *ce, + struct i915_vma *before, + struct i915_vma *after, + u32 *sema) +{ + struct i915_vma *b_before, *b_after; + struct i915_request *rq; + u32 *cs; + int err; + + b_before = store_context(ce, before); + if (IS_ERR(b_before)) + return ERR_CAST(b_before); + + b_after = store_context(ce, after); + if (IS_ERR(b_after)) { + rq = ERR_CAST(b_after); + goto err_before; + } + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) + goto err_after; + + err = move_to_active(rq, before, EXEC_OBJECT_WRITE); + if (err) + goto err_rq; + + err = move_to_active(rq, b_before, 0); + if (err) + goto err_rq; + + err = move_to_active(rq, after, EXEC_OBJECT_WRITE); + if (err) + goto err_rq; + + err = move_to_active(rq, b_after, 0); + if (err) + goto err_rq; + + cs = intel_ring_begin(rq, 14); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto err_rq; + } + + *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; + *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8); + *cs++ = lower_32_bits(b_before->node.start); + *cs++ = upper_32_bits(b_before->node.start); + + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + *cs++ = MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_NEQ_SDD; + *cs++ = 0; + *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) + + offset_in_page(sema); + *cs++ = 0; + *cs++ = MI_NOOP; + + *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; + *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8); + *cs++ = lower_32_bits(b_after->node.start); + *cs++ = upper_32_bits(b_after->node.start); + + intel_ring_advance(rq, cs); + + WRITE_ONCE(*sema, 0); + i915_request_get(rq); + i915_request_add(rq); +err_after: + i915_vma_put(b_after); +err_before: + i915_vma_put(b_before); + return rq; + +err_rq: + i915_request_add(rq); + rq = ERR_PTR(err); + goto err_after; +} + +static struct i915_vma *load_context(struct intel_context *ce, u32 poison) +{ + struct i915_vma *batch; + u32 dw, *cs, *hw; + + batch = create_user_vma(ce->vm, SZ_64K); + if (IS_ERR(batch)) + return batch; + + cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC); + if (IS_ERR(cs)) { + i915_vma_put(batch); + return ERR_CAST(cs); + } + + dw = 0; + hw = ce->engine->pinned_default_state; + hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw); + do { + u32 len = hw[dw] & 0x7f; + + if (hw[dw] == 0) { + dw++; + continue; + } + + if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) { + dw += len + 2; + continue; + } + + dw++; + len = (len + 1) / 2; + *cs++ = MI_LOAD_REGISTER_IMM(len); + while (len--) { + *cs++ = hw[dw]; + *cs++ = poison; + dw += 2; + } + } while (dw < PAGE_SIZE / sizeof(u32) && + (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END); + + *cs++ = MI_BATCH_BUFFER_END; + + i915_gem_object_flush_map(batch->obj); + i915_gem_object_unpin_map(batch->obj); + + return batch; +} + +static int poison_registers(struct intel_context *ce, u32 poison, u32 *sema) +{ + struct i915_request *rq; + struct i915_vma *batch; + u32 *cs; + int err; + + batch = load_context(ce, poison); + if (IS_ERR(batch)) + return PTR_ERR(batch); + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_batch; + } + + err = move_to_active(rq, batch, 0); + if (err) + goto err_rq; + + cs = intel_ring_begin(rq, 8); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto err_rq; + } + + *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; + *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8); + *cs++ = lower_32_bits(batch->node.start); + *cs++ = upper_32_bits(batch->node.start); + + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) + + offset_in_page(sema); + *cs++ = 0; + *cs++ = 1; + + intel_ring_advance(rq, cs); + + rq->sched.attr.priority = I915_PRIORITY_BARRIER; +err_rq: + i915_request_add(rq); +err_batch: + i915_vma_put(batch); + return err; +} + +static bool is_moving(u32 a, u32 b) +{ + return a != b; +} + +static int compare_isolation(struct intel_engine_cs *engine, + struct i915_vma *ref[2], + struct i915_vma *result[2], + struct intel_context *ce, + u32 poison) +{ + u32 x, dw, *hw, *lrc; + u32 *A[2], *B[2]; + int err = 0; + + A[0] = i915_gem_object_pin_map(ref[0]->obj, I915_MAP_WC); + if (IS_ERR(A[0])) + return PTR_ERR(A[0]); + + A[1] = i915_gem_object_pin_map(ref[1]->obj, I915_MAP_WC); + if (IS_ERR(A[1])) { + err = PTR_ERR(A[1]); + goto err_A0; + } + + B[0] = i915_gem_object_pin_map(result[0]->obj, I915_MAP_WC); + if (IS_ERR(B[0])) { + err = PTR_ERR(B[0]); + goto err_A1; + } + + B[1] = i915_gem_object_pin_map(result[1]->obj, I915_MAP_WC); + if (IS_ERR(B[1])) { + err = PTR_ERR(B[1]); + goto err_B0; + } + + lrc = i915_gem_object_pin_map(ce->state->obj, + i915_coherent_map_type(engine->i915)); + if (IS_ERR(lrc)) { + err = PTR_ERR(lrc); + goto err_B1; + } + lrc += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw); + + x = 0; + dw = 0; + hw = engine->pinned_default_state; + hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw); + do { + u32 len = hw[dw] & 0x7f; + + if (hw[dw] == 0) { + dw++; + continue; + } + + if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) { + dw += len + 2; + continue; + } + + dw++; + len = (len + 1) / 2; + while (len--) { + if (!is_moving(A[0][x], A[1][x]) && + (A[0][x] != B[0][x] || A[1][x] != B[1][x])) { + switch (hw[dw] & 4095) { + case 0x30: /* RING_HEAD */ + case 0x34: /* RING_TAIL */ + break; + + default: + pr_err("%s[%d]: Mismatch for register %4x, default %08x, reference %08x, result (%08x, %08x), poison %08x, context %08x\n", + engine->name, dw, + hw[dw], hw[dw + 1], + A[0][x], B[0][x], B[1][x], + poison, lrc[dw + 1]); + err = -EINVAL; + break; + } + } + dw += 2; + x++; + } + } while (dw < PAGE_SIZE / sizeof(u32) && + (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END); + + i915_gem_object_unpin_map(ce->state->obj); +err_B1: + i915_gem_object_unpin_map(result[1]->obj); +err_B0: + i915_gem_object_unpin_map(result[0]->obj); +err_A1: + i915_gem_object_unpin_map(ref[1]->obj); +err_A0: + i915_gem_object_unpin_map(ref[0]->obj); + return err; +} + +static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison) +{ + u32 *sema = memset32(engine->status_page.addr + 1000, 0, 1); + struct i915_vma *ref[2], *result[2]; + struct intel_context *A, *B; + struct i915_request *rq; + int err; + + A = intel_context_create(engine); + if (IS_ERR(A)) + return PTR_ERR(A); + + B = intel_context_create(engine); + if (IS_ERR(B)) { + err = PTR_ERR(B); + goto err_A; + } + + ref[0] = create_user_vma(A->vm, SZ_64K); + if (IS_ERR(ref[0])) { + err = PTR_ERR(ref[0]); + goto err_B; + } + + ref[1] = create_user_vma(A->vm, SZ_64K); + if (IS_ERR(ref[1])) { + err = PTR_ERR(ref[1]); + goto err_ref0; + } + + rq = record_registers(A, ref[0], ref[1], sema); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_ref1; + } + + WRITE_ONCE(*sema, 1); + wmb(); + + if (i915_request_wait(rq, 0, HZ / 2) < 0) { + i915_request_put(rq); + err = -ETIME; + goto err_ref1; + } + i915_request_put(rq); + + result[0] = create_user_vma(A->vm, SZ_64K); + if (IS_ERR(result[0])) { + err = PTR_ERR(result[0]); + goto err_ref1; + } + + result[1] = create_user_vma(A->vm, SZ_64K); + if (IS_ERR(result[1])) { + err = PTR_ERR(result[1]); + goto err_result0; + } + + rq = record_registers(A, result[0], result[1], sema); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_result1; + } + + err = poison_registers(B, poison, sema); + if (err) { + WRITE_ONCE(*sema, -1); + i915_request_put(rq); + goto err_result1; + } + + if (i915_request_wait(rq, 0, HZ / 2) < 0) { + i915_request_put(rq); + err = -ETIME; + goto err_result1; + } + i915_request_put(rq); + + err = compare_isolation(engine, ref, result, A, poison); + +err_result1: + i915_vma_put(result[1]); +err_result0: + i915_vma_put(result[0]); +err_ref1: + i915_vma_put(ref[1]); +err_ref0: + i915_vma_put(ref[0]); +err_B: + intel_context_put(B); +err_A: + intel_context_put(A); + return err; +} + +static bool skip_isolation(const struct intel_engine_cs *engine) +{ + if (engine->class == COPY_ENGINE_CLASS && INTEL_GEN(engine->i915) == 9) + return true; + + if (engine->class == RENDER_CLASS && INTEL_GEN(engine->i915) == 11) + return true; + + return false; +} + +static int live_lrc_isolation(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + const u32 poison[] = { + STACK_MAGIC, + 0x3a3a3a3a, + 0x5c5c5c5c, + 0xffffffff, + 0xffff0000, + }; + + /* + * Our goal is try and verify that per-context state cannot be + * tampered with by another non-privileged client. + * + * We take the list of context registers from the LRI in the default + * context image and attempt to modify that list from a remote context. + */ + + for_each_engine(engine, gt, id) { + int err = 0; + int i; + + /* Just don't even ask */ + if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN) && + skip_isolation(engine)) + continue; + + intel_engine_pm_get(engine); + if (engine->pinned_default_state) { + for (i = 0; i < ARRAY_SIZE(poison); i++) { + err = __lrc_isolation(engine, poison[i]); + if (err) + break; + + err = __lrc_isolation(engine, ~poison[i]); + if (err) + break; + } + } + intel_engine_pm_put(engine); + if (igt_flush_test(gt->i915)) + err = -EIO; + if (err) + return err; + } + + return 0; +} + +static void garbage_reset(struct intel_engine_cs *engine, + struct i915_request *rq) +{ + const unsigned int bit = I915_RESET_ENGINE + engine->id; + unsigned long *lock = &engine->gt->reset.flags; + + if (test_and_set_bit(bit, lock)) + return; + + tasklet_disable(&engine->execlists.tasklet); + + if (!rq->fence.error) + intel_engine_reset(engine, NULL); + + tasklet_enable(&engine->execlists.tasklet); + clear_and_wake_up_bit(bit, lock); +} + +static struct i915_request *garbage(struct intel_context *ce, + struct rnd_state *prng) +{ + struct i915_request *rq; + int err; + + err = intel_context_pin(ce); + if (err) + return ERR_PTR(err); + + prandom_bytes_state(prng, + ce->lrc_reg_state, + ce->engine->context_size - + LRC_STATE_PN * PAGE_SIZE); + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_unpin; + } + + i915_request_get(rq); + i915_request_add(rq); + return rq; + +err_unpin: + intel_context_unpin(ce); + return ERR_PTR(err); +} + +static int __lrc_garbage(struct intel_engine_cs *engine, struct rnd_state *prng) +{ + struct intel_context *ce; + struct i915_request *hang; + int err = 0; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + hang = garbage(ce, prng); + if (IS_ERR(hang)) { + err = PTR_ERR(hang); + goto err_ce; + } + + if (wait_for_submit(engine, hang, HZ / 2)) { + i915_request_put(hang); + err = -ETIME; + goto err_ce; + } + + intel_context_set_banned(ce); + garbage_reset(engine, hang); + + intel_engine_flush_submission(engine); + if (!hang->fence.error) { + i915_request_put(hang); + pr_err("%s: corrupted context was not reset\n", + engine->name); + err = -EINVAL; + goto err_ce; + } + + if (i915_request_wait(hang, 0, HZ / 2) < 0) { + pr_err("%s: corrupted context did not recover\n", + engine->name); + i915_request_put(hang); + err = -EIO; + goto err_ce; + } + i915_request_put(hang); + +err_ce: + intel_context_put(ce); + return err; +} + +static int live_lrc_garbage(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + + /* + * Verify that we can recover if one context state is completely + * corrupted. + */ + + if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN)) + return 0; + + for_each_engine(engine, gt, id) { + I915_RND_STATE(prng); + int err = 0, i; + + if (!intel_has_reset_engine(engine->gt)) + continue; + + intel_engine_pm_get(engine); + for (i = 0; i < 3; i++) { + err = __lrc_garbage(engine, &prng); + if (err) + break; + } + intel_engine_pm_put(engine); + + if (igt_flush_test(gt->i915)) + err = -EIO; + if (err) + return err; + } + + return 0; +} + +static int __live_pphwsp_runtime(struct intel_engine_cs *engine) +{ + struct intel_context *ce; + struct i915_request *rq; + IGT_TIMEOUT(end_time); + int err; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + ce->runtime.num_underflow = 0; + ce->runtime.max_underflow = 0; + + do { + unsigned int loop = 1024; + + while (loop) { + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_rq; + } + + if (--loop == 0) + i915_request_get(rq); + + i915_request_add(rq); + } + + if (__igt_timeout(end_time, NULL)) + break; + + i915_request_put(rq); + } while (1); + + err = i915_request_wait(rq, 0, HZ / 5); + if (err < 0) { + pr_err("%s: request not completed!\n", engine->name); + goto err_wait; + } + + igt_flush_test(engine->i915); + + pr_info("%s: pphwsp runtime %lluns, average %lluns\n", + engine->name, + intel_context_get_total_runtime_ns(ce), + intel_context_get_avg_runtime_ns(ce)); + + err = 0; + if (ce->runtime.num_underflow) { + pr_err("%s: pphwsp underflow %u time(s), max %u cycles!\n", + engine->name, + ce->runtime.num_underflow, + ce->runtime.max_underflow); + GEM_TRACE_DUMP(); + err = -EOVERFLOW; + } + +err_wait: + i915_request_put(rq); +err_rq: + intel_context_put(ce); + return err; +} + +static int live_pphwsp_runtime(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + /* + * Check that cumulative context runtime as stored in the pphwsp[16] + * is monotonic. + */ + + for_each_engine(engine, gt, id) { + err = __live_pphwsp_runtime(engine); if (err) break; } @@ -4076,7 +5557,6 @@ static int live_gpr_clear(void *arg) if (igt_flush_test(gt->i915)) err = -EIO; - i915_vma_unpin_and_release(&scratch, 0); return err; } @@ -4086,7 +5566,11 @@ int intel_lrc_live_selftests(struct drm_i915_private *i915) SUBTEST(live_lrc_layout), SUBTEST(live_lrc_fixed), SUBTEST(live_lrc_state), - SUBTEST(live_gpr_clear), + SUBTEST(live_lrc_gpr), + SUBTEST(live_lrc_isolation), + SUBTEST(live_lrc_timestamp), + SUBTEST(live_lrc_garbage), + SUBTEST(live_pphwsp_runtime), }; if (!HAS_LOGICAL_RING_CONTEXTS(i915)) diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c index de1f83100fb6..8831ffee2061 100644 --- a/drivers/gpu/drm/i915/gt/selftest_mocs.c +++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c @@ -12,7 +12,8 @@ #include "selftests/igt_spinner.h" struct live_mocs { - struct drm_i915_mocs_table table; + struct drm_i915_mocs_table mocs; + struct drm_i915_mocs_table l3cc; struct i915_vma *scratch; void *vaddr; }; @@ -70,11 +71,22 @@ static struct i915_vma *create_scratch(struct intel_gt *gt) static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt) { + struct drm_i915_mocs_table table; + unsigned int flags; int err; - if (!get_mocs_settings(gt->i915, &arg->table)) + memset(arg, 0, sizeof(*arg)); + + flags = get_mocs_settings(gt->i915, &table); + if (!flags) return -EINVAL; + if (flags & HAS_RENDER_L3CC) + arg->l3cc = table; + + if (flags & (HAS_GLOBAL_MOCS | HAS_ENGINE_MOCS)) + arg->mocs = table; + arg->scratch = create_scratch(gt); if (IS_ERR(arg->scratch)) return PTR_ERR(arg->scratch); @@ -223,9 +235,9 @@ static int check_mocs_engine(struct live_mocs *arg, /* Read the mocs tables back using SRM */ offset = i915_ggtt_offset(vma); if (!err) - err = read_mocs_table(rq, &arg->table, &offset); + err = read_mocs_table(rq, &arg->mocs, &offset); if (!err && ce->engine->class == RENDER_CLASS) - err = read_l3cc_table(rq, &arg->table, &offset); + err = read_l3cc_table(rq, &arg->l3cc, &offset); offset -= i915_ggtt_offset(vma); GEM_BUG_ON(offset > PAGE_SIZE); @@ -236,9 +248,9 @@ static int check_mocs_engine(struct live_mocs *arg, /* Compare the results against the expected tables */ vaddr = arg->vaddr; if (!err) - err = check_mocs_table(ce->engine, &arg->table, &vaddr); + err = check_mocs_table(ce->engine, &arg->mocs, &vaddr); if (!err && ce->engine->class == RENDER_CLASS) - err = check_l3cc_table(ce->engine, &arg->table, &vaddr); + err = check_l3cc_table(ce->engine, &arg->l3cc, &vaddr); if (err) return err; diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c index 8cc55a0e9e06..95b165faeba7 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rc6.c +++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c @@ -12,6 +12,21 @@ #include "selftests/i915_random.h" +static u64 rc6_residency(struct intel_rc6 *rc6) +{ + u64 result; + + /* XXX VLV_GT_MEDIA_RC6? */ + + result = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6); + if (HAS_RC6p(rc6_to_i915(rc6))) + result += intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6p); + if (HAS_RC6pp(rc6_to_i915(rc6))) + result += intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6pp); + + return result; +} + int live_rc6_manual(void *arg) { struct intel_gt *gt = arg; @@ -38,9 +53,9 @@ int live_rc6_manual(void *arg) __intel_rc6_disable(rc6); msleep(1); /* wakeup is not immediate, takes about 100us on icl */ - res[0] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6); + res[0] = rc6_residency(rc6); msleep(250); - res[1] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6); + res[1] = rc6_residency(rc6); if ((res[1] - res[0]) >> 10) { pr_err("RC6 residency increased by %lldus while disabled for 250ms!\n", (res[1] - res[0]) >> 10); @@ -51,14 +66,15 @@ int live_rc6_manual(void *arg) /* Manually enter RC6 */ intel_rc6_park(rc6); - res[0] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6); + res[0] = rc6_residency(rc6); msleep(100); - res[1] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6); + res[1] = rc6_residency(rc6); if (res[1] == res[0]) { - pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x\n", + pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x, residency=%lld\n", intel_uncore_read_fw(gt->uncore, GEN6_RC_STATE), - intel_uncore_read_fw(gt->uncore, GEN6_RC_CONTROL)); + intel_uncore_read_fw(gt->uncore, GEN6_RC_CONTROL), + res[0]); err = -EINVAL; } diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c index 6ad6aca315f6..35406ecdf0b2 100644 --- a/drivers/gpu/drm/i915/gt/selftest_reset.c +++ b/drivers/gpu/drm/i915/gt/selftest_reset.c @@ -115,7 +115,7 @@ static int igt_atomic_engine_reset(void *arg) if (!intel_has_reset_engine(gt)) return 0; - if (USES_GUC_SUBMISSION(gt->i915)) + if (intel_uc_uses_guc_submission(>->uc)) return 0; intel_gt_pm_get(gt); diff --git a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c new file mode 100644 index 000000000000..9995faadd7e8 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c @@ -0,0 +1,296 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include "intel_engine_pm.h" +#include "selftests/igt_flush_test.h" + +static struct i915_vma *create_wally(struct intel_engine_cs *engine) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + u32 *cs; + int err; + + obj = i915_gem_object_create_internal(engine->i915, 4096); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + vma = i915_vma_instance(obj, engine->gt->vm, NULL); + if (IS_ERR(vma)) { + i915_gem_object_put(obj); + return vma; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH); + if (err) { + i915_gem_object_put(obj); + return ERR_PTR(err); + } + + err = i915_vma_sync(vma); + if (err) { + i915_gem_object_put(obj); + return ERR_PTR(err); + } + + cs = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(cs)) { + i915_gem_object_put(obj); + return ERR_CAST(cs); + } + + if (INTEL_GEN(engine->i915) >= 6) { + *cs++ = MI_STORE_DWORD_IMM_GEN4; + *cs++ = 0; + } else if (INTEL_GEN(engine->i915) >= 4) { + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = 0; + } else { + *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; + } + *cs++ = vma->node.start + 4000; + *cs++ = STACK_MAGIC; + + *cs++ = MI_BATCH_BUFFER_END; + i915_gem_object_unpin_map(obj); + + vma->private = intel_context_create(engine); /* dummy residuals */ + if (IS_ERR(vma->private)) { + vma = ERR_CAST(vma->private); + i915_gem_object_put(obj); + } + + return vma; +} + +static int context_sync(struct intel_context *ce) +{ + struct i915_request *rq; + int err = 0; + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + i915_request_get(rq); + i915_request_add(rq); + + if (i915_request_wait(rq, 0, HZ / 5) < 0) + err = -ETIME; + i915_request_put(rq); + + return err; +} + +static int new_context_sync(struct intel_engine_cs *engine) +{ + struct intel_context *ce; + int err; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + err = context_sync(ce); + intel_context_put(ce); + + return err; +} + +static int mixed_contexts_sync(struct intel_engine_cs *engine, u32 *result) +{ + int pass; + int err; + + for (pass = 0; pass < 2; pass++) { + WRITE_ONCE(*result, 0); + err = context_sync(engine->kernel_context); + if (err || READ_ONCE(*result)) { + if (!err) { + pr_err("pass[%d] wa_bb emitted for the kernel context\n", + pass); + err = -EINVAL; + } + return err; + } + + WRITE_ONCE(*result, 0); + err = new_context_sync(engine); + if (READ_ONCE(*result) != STACK_MAGIC) { + if (!err) { + pr_err("pass[%d] wa_bb *NOT* emitted after the kernel context\n", + pass); + err = -EINVAL; + } + return err; + } + + WRITE_ONCE(*result, 0); + err = new_context_sync(engine); + if (READ_ONCE(*result) != STACK_MAGIC) { + if (!err) { + pr_err("pass[%d] wa_bb *NOT* emitted for the user context switch\n", + pass); + err = -EINVAL; + } + return err; + } + } + + return 0; +} + +static int double_context_sync_00(struct intel_engine_cs *engine, u32 *result) +{ + struct intel_context *ce; + int err, i; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + for (i = 0; i < 2; i++) { + WRITE_ONCE(*result, 0); + err = context_sync(ce); + if (err) + break; + } + intel_context_put(ce); + if (err) + return err; + + if (READ_ONCE(*result)) { + pr_err("wa_bb emitted between the same user context\n"); + return -EINVAL; + } + + return 0; +} + +static int kernel_context_sync_00(struct intel_engine_cs *engine, u32 *result) +{ + struct intel_context *ce; + int err, i; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + for (i = 0; i < 2; i++) { + WRITE_ONCE(*result, 0); + err = context_sync(ce); + if (err) + break; + + err = context_sync(engine->kernel_context); + if (err) + break; + } + intel_context_put(ce); + if (err) + return err; + + if (READ_ONCE(*result)) { + pr_err("wa_bb emitted between the same user context [with intervening kernel]\n"); + return -EINVAL; + } + + return 0; +} + +static int __live_ctx_switch_wa(struct intel_engine_cs *engine) +{ + struct i915_vma *bb; + u32 *result; + int err; + + bb = create_wally(engine); + if (IS_ERR(bb)) + return PTR_ERR(bb); + + result = i915_gem_object_pin_map(bb->obj, I915_MAP_WC); + if (IS_ERR(result)) { + intel_context_put(bb->private); + i915_vma_unpin_and_release(&bb, 0); + return PTR_ERR(result); + } + result += 1000; + + engine->wa_ctx.vma = bb; + + err = mixed_contexts_sync(engine, result); + if (err) + goto out; + + err = double_context_sync_00(engine, result); + if (err) + goto out; + + err = kernel_context_sync_00(engine, result); + if (err) + goto out; + +out: + intel_context_put(engine->wa_ctx.vma->private); + i915_vma_unpin_and_release(&engine->wa_ctx.vma, I915_VMA_RELEASE_MAP); + return err; +} + +static int live_ctx_switch_wa(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + + /* + * Exercise the inter-context wa batch. + * + * Between each user context we run a wa batch, and since it may + * have implications for user visible state, we have to check that + * we do actually execute it. + * + * The trick we use is to replace the normal wa batch with a custom + * one that writes to a marker within it, and we can then look for + * that marker to confirm if the batch was run when we expect it, + * and equally important it was wasn't run when we don't! + */ + + for_each_engine(engine, gt, id) { + struct i915_vma *saved_wa; + int err; + + if (!intel_engine_can_store_dword(engine)) + continue; + + if (IS_GEN_RANGE(gt->i915, 4, 5)) + continue; /* MI_STORE_DWORD is privileged! */ + + saved_wa = fetch_and_zero(&engine->wa_ctx.vma); + + intel_engine_pm_get(engine); + err = __live_ctx_switch_wa(engine); + intel_engine_pm_put(engine); + if (igt_flush_test(gt->i915)) + err = -EIO; + + engine->wa_ctx.vma = saved_wa; + if (err) + return err; + } + + return 0; +} + +int intel_ring_submission_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(live_ctx_switch_wa), + }; + + if (HAS_EXECLISTS(i915)) + return 0; + + return intel_gt_live_subtests(tests, &i915->gt); +} diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c index e2d78cc22fb4..c2578a0f2f14 100644 --- a/drivers/gpu/drm/i915/gt/selftest_timeline.c +++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c @@ -6,6 +6,8 @@ #include <linux/prime_numbers.h> +#include "intel_context.h" +#include "intel_engine_heartbeat.h" #include "intel_engine_pm.h" #include "intel_gt.h" #include "intel_gt_requests.h" @@ -604,7 +606,6 @@ static int live_hwsp_alternate(void *arg) tl = checked_intel_timeline_create(gt); if (IS_ERR(tl)) { - intel_engine_pm_put(engine); err = PTR_ERR(tl); goto out; } @@ -750,6 +751,189 @@ out_free: return err; } +static void engine_heartbeat_disable(struct intel_engine_cs *engine, + unsigned long *saved) +{ + *saved = engine->props.heartbeat_interval_ms; + engine->props.heartbeat_interval_ms = 0; + + intel_engine_pm_get(engine); + intel_engine_park_heartbeat(engine); +} + +static void engine_heartbeat_enable(struct intel_engine_cs *engine, + unsigned long saved) +{ + intel_engine_pm_put(engine); + + engine->props.heartbeat_interval_ms = saved; +} + +static int live_hwsp_rollover_kernel(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + /* + * Run the host for long enough, and even the kernel context will + * see a seqno rollover. + */ + + for_each_engine(engine, gt, id) { + struct intel_context *ce = engine->kernel_context; + struct intel_timeline *tl = ce->timeline; + struct i915_request *rq[3] = {}; + unsigned long heartbeat; + int i; + + engine_heartbeat_disable(engine, &heartbeat); + if (intel_gt_wait_for_idle(gt, HZ / 2)) { + err = -EIO; + goto out; + } + + GEM_BUG_ON(i915_active_fence_isset(&tl->last_request)); + tl->seqno = 0; + timeline_rollback(tl); + timeline_rollback(tl); + WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno); + + for (i = 0; i < ARRAY_SIZE(rq); i++) { + struct i915_request *this; + + this = i915_request_create(ce); + if (IS_ERR(this)) { + err = PTR_ERR(this); + goto out; + } + + pr_debug("%s: create fence.seqnp:%d\n", + engine->name, + lower_32_bits(this->fence.seqno)); + + GEM_BUG_ON(rcu_access_pointer(this->timeline) != tl); + + rq[i] = i915_request_get(this); + i915_request_add(this); + } + + /* We expected a wrap! */ + GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno); + + if (i915_request_wait(rq[2], 0, HZ / 5) < 0) { + pr_err("Wait for timeline wrap timed out!\n"); + err = -EIO; + goto out; + } + + for (i = 0; i < ARRAY_SIZE(rq); i++) { + if (!i915_request_completed(rq[i])) { + pr_err("Pre-wrap request not completed!\n"); + err = -EINVAL; + goto out; + } + } + +out: + for (i = 0; i < ARRAY_SIZE(rq); i++) + i915_request_put(rq[i]); + engine_heartbeat_enable(engine, heartbeat); + if (err) + break; + } + + if (igt_flush_test(gt->i915)) + err = -EIO; + + return err; +} + +static int live_hwsp_rollover_user(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + /* + * Simulate a long running user context, and force the seqno wrap + * on the user's timeline. + */ + + for_each_engine(engine, gt, id) { + struct i915_request *rq[3] = {}; + struct intel_timeline *tl; + struct intel_context *ce; + int i; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + err = intel_context_alloc_state(ce); + if (err) + goto out; + + tl = ce->timeline; + if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline) + goto out; + + timeline_rollback(tl); + timeline_rollback(tl); + WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno); + + for (i = 0; i < ARRAY_SIZE(rq); i++) { + struct i915_request *this; + + this = intel_context_create_request(ce); + if (IS_ERR(this)) { + err = PTR_ERR(this); + goto out; + } + + pr_debug("%s: create fence.seqnp:%d\n", + engine->name, + lower_32_bits(this->fence.seqno)); + + GEM_BUG_ON(rcu_access_pointer(this->timeline) != tl); + + rq[i] = i915_request_get(this); + i915_request_add(this); + } + + /* We expected a wrap! */ + GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno); + + if (i915_request_wait(rq[2], 0, HZ / 5) < 0) { + pr_err("Wait for timeline wrap timed out!\n"); + err = -EIO; + goto out; + } + + for (i = 0; i < ARRAY_SIZE(rq); i++) { + if (!i915_request_completed(rq[i])) { + pr_err("Pre-wrap request not completed!\n"); + err = -EINVAL; + goto out; + } + } + +out: + for (i = 0; i < ARRAY_SIZE(rq); i++) + i915_request_put(rq[i]); + intel_context_put(ce); + if (err) + break; + } + + if (igt_flush_test(gt->i915)) + err = -EIO; + + return err; +} + static int live_hwsp_recycle(void *arg) { struct intel_gt *gt = arg; @@ -827,6 +1011,8 @@ int intel_timeline_live_selftests(struct drm_i915_private *i915) SUBTEST(live_hwsp_engine), SUBTEST(live_hwsp_alternate), SUBTEST(live_hwsp_wrap), + SUBTEST(live_hwsp_rollover_kernel), + SUBTEST(live_hwsp_rollover_user), }; if (intel_gt_is_wedged(&i915->gt)) diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index ac1921854cbf..5ed323254ee1 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -583,6 +583,15 @@ static int check_dirty_whitelist(struct intel_context *ce) if (err) goto err_request; + i915_vma_lock(scratch); + err = i915_request_await_object(rq, scratch->obj, true); + if (err == 0) + err = i915_vma_move_to_active(scratch, rq, + EXEC_OBJECT_WRITE); + i915_vma_unlock(scratch); + if (err) + goto err_request; + err = engine->emit_bb_start(rq, batch->node.start, PAGE_SIZE, 0); diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.c b/drivers/gpu/drm/i915/gt/sysfs_engines.c new file mode 100644 index 000000000000..8f9b2f33dbaf --- /dev/null +++ b/drivers/gpu/drm/i915/gt/sysfs_engines.c @@ -0,0 +1,445 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + */ + +#include <linux/kobject.h> +#include <linux/sysfs.h> + +#include "i915_drv.h" +#include "intel_engine.h" +#include "intel_engine_heartbeat.h" +#include "sysfs_engines.h" + +struct kobj_engine { + struct kobject base; + struct intel_engine_cs *engine; +}; + +static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj) +{ + return container_of(kobj, struct kobj_engine, base)->engine; +} + +static ssize_t +name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%s\n", kobj_to_engine(kobj)->name); +} + +static struct kobj_attribute name_attr = +__ATTR(name, 0444, name_show, NULL); + +static ssize_t +class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_class); +} + +static struct kobj_attribute class_attr = +__ATTR(class, 0444, class_show, NULL); + +static ssize_t +inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance); +} + +static struct kobj_attribute inst_attr = +__ATTR(instance, 0444, inst_show, NULL); + +static ssize_t +mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base); +} + +static struct kobj_attribute mmio_attr = +__ATTR(mmio_base, 0444, mmio_show, NULL); + +static const char * const vcs_caps[] = { + [ilog2(I915_VIDEO_CLASS_CAPABILITY_HEVC)] = "hevc", + [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc", +}; + +static const char * const vecs_caps[] = { + [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc", +}; + +static ssize_t repr_trim(char *buf, ssize_t len) +{ + /* Trim off the trailing space and replace with a newline */ + if (len > PAGE_SIZE) + len = PAGE_SIZE; + if (len > 0) + buf[len - 1] = '\n'; + + return len; +} + +static ssize_t +__caps_show(struct intel_engine_cs *engine, + u32 caps, char *buf, bool show_unknown) +{ + const char * const *repr; + int count, n; + ssize_t len; + + BUILD_BUG_ON(!typecheck(typeof(caps), engine->uabi_capabilities)); + + switch (engine->class) { + case VIDEO_DECODE_CLASS: + repr = vcs_caps; + count = ARRAY_SIZE(vcs_caps); + break; + + case VIDEO_ENHANCEMENT_CLASS: + repr = vecs_caps; + count = ARRAY_SIZE(vecs_caps); + break; + + default: + repr = NULL; + count = 0; + break; + } + GEM_BUG_ON(count > BITS_PER_TYPE(typeof(caps))); + + len = 0; + for_each_set_bit(n, + (unsigned long *)&caps, + show_unknown ? BITS_PER_TYPE(typeof(caps)) : count) { + if (n >= count || !repr[n]) { + if (GEM_WARN_ON(show_unknown)) + len += snprintf(buf + len, PAGE_SIZE - len, + "[%x] ", n); + } else { + len += snprintf(buf + len, PAGE_SIZE - len, + "%s ", repr[n]); + } + if (GEM_WARN_ON(len >= PAGE_SIZE)) + break; + } + return repr_trim(buf, len); +} + +static ssize_t +caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + struct intel_engine_cs *engine = kobj_to_engine(kobj); + + return __caps_show(engine, engine->uabi_capabilities, buf, true); +} + +static struct kobj_attribute caps_attr = +__ATTR(capabilities, 0444, caps_show, NULL); + +static ssize_t +all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + return __caps_show(kobj_to_engine(kobj), -1, buf, false); +} + +static struct kobj_attribute all_caps_attr = +__ATTR(known_capabilities, 0444, all_caps_show, NULL); + +static ssize_t +max_spin_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct intel_engine_cs *engine = kobj_to_engine(kobj); + unsigned long long duration; + int err; + + /* + * When waiting for a request, if is it currently being executed + * on the GPU, we busywait for a short while before sleeping. The + * premise is that most requests are short, and if it is already + * executing then there is a good chance that it will complete + * before we can setup the interrupt handler and go to sleep. + * We try to offset the cost of going to sleep, by first spinning + * on the request -- if it completed in less time than it would take + * to go sleep, process the interrupt and return back to the client, + * then we have saved the client some latency, albeit at the cost + * of spinning on an expensive CPU core. + * + * While we try to avoid waiting at all for a request that is unlikely + * to complete, deciding how long it is worth spinning is for is an + * arbitrary decision: trading off power vs latency. + */ + + err = kstrtoull(buf, 0, &duration); + if (err) + return err; + + if (duration > jiffies_to_nsecs(2)) + return -EINVAL; + + WRITE_ONCE(engine->props.max_busywait_duration_ns, duration); + + return count; +} + +static ssize_t +max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + struct intel_engine_cs *engine = kobj_to_engine(kobj); + + return sprintf(buf, "%lu\n", engine->props.max_busywait_duration_ns); +} + +static struct kobj_attribute max_spin_attr = +__ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store); + +static ssize_t +timeslice_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct intel_engine_cs *engine = kobj_to_engine(kobj); + unsigned long long duration; + int err; + + /* + * Execlists uses a scheduling quantum (a timeslice) to alternate + * execution between ready-to-run contexts of equal priority. This + * ensures that all users (though only if they of equal importance) + * have the opportunity to run and prevents livelocks where contexts + * may have implicit ordering due to userspace semaphores. + */ + + err = kstrtoull(buf, 0, &duration); + if (err) + return err; + + if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)) + return -EINVAL; + + WRITE_ONCE(engine->props.timeslice_duration_ms, duration); + + if (execlists_active(&engine->execlists)) + set_timer_ms(&engine->execlists.timer, duration); + + return count; +} + +static ssize_t +timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + struct intel_engine_cs *engine = kobj_to_engine(kobj); + + return sprintf(buf, "%lu\n", engine->props.timeslice_duration_ms); +} + +static struct kobj_attribute timeslice_duration_attr = +__ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store); + +static ssize_t +stop_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct intel_engine_cs *engine = kobj_to_engine(kobj); + unsigned long long duration; + int err; + + /* + * When we allow ourselves to sleep before a GPU reset after disabling + * submission, even for a few milliseconds, gives an innocent context + * the opportunity to clear the GPU before the reset occurs. However, + * how long to sleep depends on the typical non-preemptible duration + * (a similar problem to determining the ideal preempt-reset timeout + * or even the heartbeat interval). + */ + + err = kstrtoull(buf, 0, &duration); + if (err) + return err; + + if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)) + return -EINVAL; + + WRITE_ONCE(engine->props.stop_timeout_ms, duration); + return count; +} + +static ssize_t +stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + struct intel_engine_cs *engine = kobj_to_engine(kobj); + + return sprintf(buf, "%lu\n", engine->props.stop_timeout_ms); +} + +static struct kobj_attribute stop_timeout_attr = +__ATTR(stop_timeout_ms, 0644, stop_show, stop_store); + +static ssize_t +preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct intel_engine_cs *engine = kobj_to_engine(kobj); + unsigned long long timeout; + int err; + + /* + * After initialising a preemption request, we give the current + * resident a small amount of time to vacate the GPU. The preemption + * request is for a higher priority context and should be immediate to + * maintain high quality of service (and avoid priority inversion). + * However, the preemption granularity of the GPU can be quite coarse + * and so we need a compromise. + */ + + err = kstrtoull(buf, 0, &timeout); + if (err) + return err; + + if (timeout > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)) + return -EINVAL; + + WRITE_ONCE(engine->props.preempt_timeout_ms, timeout); + + if (READ_ONCE(engine->execlists.pending[0])) + set_timer_ms(&engine->execlists.preempt, timeout); + + return count; +} + +static ssize_t +preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + struct intel_engine_cs *engine = kobj_to_engine(kobj); + + return sprintf(buf, "%lu\n", engine->props.preempt_timeout_ms); +} + +static struct kobj_attribute preempt_timeout_attr = +__ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store); + +static ssize_t +heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct intel_engine_cs *engine = kobj_to_engine(kobj); + unsigned long long delay; + int err; + + /* + * We monitor the health of the system via periodic heartbeat pulses. + * The pulses also provide the opportunity to perform garbage + * collection. However, we interpret an incomplete pulse (a missed + * heartbeat) as an indication that the system is no longer responsive, + * i.e. hung, and perform an engine or full GPU reset. Given that the + * preemption granularity can be very coarse on a system, the optimal + * value for any workload is unknowable! + */ + + err = kstrtoull(buf, 0, &delay); + if (err) + return err; + + if (delay >= jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)) + return -EINVAL; + + err = intel_engine_set_heartbeat(engine, delay); + if (err) + return err; + + return count; +} + +static ssize_t +heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + struct intel_engine_cs *engine = kobj_to_engine(kobj); + + return sprintf(buf, "%lu\n", engine->props.heartbeat_interval_ms); +} + +static struct kobj_attribute heartbeat_interval_attr = +__ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store); + +static void kobj_engine_release(struct kobject *kobj) +{ + kfree(kobj); +} + +static struct kobj_type kobj_engine_type = { + .release = kobj_engine_release, + .sysfs_ops = &kobj_sysfs_ops +}; + +static struct kobject * +kobj_engine(struct kobject *dir, struct intel_engine_cs *engine) +{ + struct kobj_engine *ke; + + ke = kzalloc(sizeof(*ke), GFP_KERNEL); + if (!ke) + return NULL; + + kobject_init(&ke->base, &kobj_engine_type); + ke->engine = engine; + + if (kobject_add(&ke->base, dir, "%s", engine->name)) { + kobject_put(&ke->base); + return NULL; + } + + /* xfer ownership to sysfs tree */ + return &ke->base; +} + +void intel_engines_add_sysfs(struct drm_i915_private *i915) +{ + static const struct attribute *files[] = { + &name_attr.attr, + &class_attr.attr, + &inst_attr.attr, + &mmio_attr.attr, + &caps_attr.attr, + &all_caps_attr.attr, + &max_spin_attr.attr, + &stop_timeout_attr.attr, +#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL + &heartbeat_interval_attr.attr, +#endif + NULL + }; + + struct device *kdev = i915->drm.primary->kdev; + struct intel_engine_cs *engine; + struct kobject *dir; + + dir = kobject_create_and_add("engine", &kdev->kobj); + if (!dir) + return; + + for_each_uabi_engine(engine, i915) { + struct kobject *kobj; + + kobj = kobj_engine(dir, engine); + if (!kobj) + goto err_engine; + + if (sysfs_create_files(kobj, files)) + goto err_object; + + if (intel_engine_has_timeslices(engine) && + sysfs_create_file(kobj, ×lice_duration_attr.attr)) + goto err_engine; + + if (intel_engine_has_preempt_reset(engine) && + sysfs_create_file(kobj, &preempt_timeout_attr.attr)) + goto err_engine; + + if (0) { +err_object: + kobject_put(kobj); +err_engine: + dev_err(kdev, "Failed to add sysfs engine '%s'\n", + engine->name); + break; + } + } +} diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.h b/drivers/gpu/drm/i915/gt/sysfs_engines.h new file mode 100644 index 000000000000..9546fffe03a7 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/sysfs_engines.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_ENGINE_SYSFS_H +#define INTEL_ENGINE_SYSFS_H + +struct drm_i915_private; + +void intel_engines_add_sysfs(struct drm_i915_private *i915); + +#endif /* INTEL_ENGINE_SYSFS_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 5d00a3b2d914..819f09ef51fc 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -207,7 +207,7 @@ static u32 guc_ctl_feature_flags(struct intel_guc *guc) { u32 flags = 0; - if (!intel_guc_is_submission_supported(guc)) + if (!intel_guc_submission_is_used(guc)) flags |= GUC_CTL_DISABLE_SCHEDULER; return flags; @@ -217,7 +217,7 @@ static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc) { u32 flags = 0; - if (intel_guc_is_submission_supported(guc)) { + if (intel_guc_submission_is_used(guc)) { u32 ctxnum, base; base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool); @@ -333,7 +333,7 @@ int intel_guc_init(struct intel_guc *guc) ret = intel_uc_fw_init(&guc->fw); if (ret) - goto err_fetch; + goto out; ret = intel_guc_log_create(&guc->log); if (ret) @@ -348,7 +348,7 @@ int intel_guc_init(struct intel_guc *guc) if (ret) goto err_ads; - if (intel_guc_is_submission_supported(guc)) { + if (intel_guc_submission_is_used(guc)) { /* * This is stuff we need to have available at fw load time * if we are planning to enable submission later @@ -364,6 +364,8 @@ int intel_guc_init(struct intel_guc *guc) /* We need to notify the guc whenever we change the GGTT */ i915_ggtt_enable_guc(gt->ggtt); + intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOADABLE); + return 0; err_ct: @@ -374,9 +376,8 @@ err_log: intel_guc_log_destroy(&guc->log); err_fw: intel_uc_fw_fini(&guc->fw); -err_fetch: - intel_uc_fw_cleanup_fetch(&guc->fw); - DRM_DEV_DEBUG_DRIVER(gt->i915->drm.dev, "failed with %d\n", ret); +out: + i915_probe_error(gt->i915, "failed with %d\n", ret); return ret; } @@ -384,12 +385,12 @@ void intel_guc_fini(struct intel_guc *guc) { struct intel_gt *gt = guc_to_gt(guc); - if (!intel_uc_fw_is_available(&guc->fw)) + if (!intel_uc_fw_is_loadable(&guc->fw)) return; i915_ggtt_disable_guc(gt->ggtt); - if (intel_guc_is_submission_supported(guc)) + if (intel_guc_submission_is_used(guc)) intel_guc_submission_fini(guc); intel_guc_ct_fini(&guc->ct); @@ -397,9 +398,6 @@ void intel_guc_fini(struct intel_guc *guc) intel_guc_ads_destroy(guc); intel_guc_log_destroy(&guc->log); intel_uc_fw_fini(&guc->fw); - intel_uc_fw_cleanup_fetch(&guc->fw); - - intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_DISABLED); } /* @@ -544,7 +542,7 @@ int intel_guc_suspend(struct intel_guc *guc) * If GuC communication is enabled but submission is not supported, * we do not need to suspend the GuC. */ - if (!intel_guc_submission_is_enabled(guc)) + if (!intel_guc_submission_is_used(guc) || !intel_guc_is_ready(guc)) return 0; /* @@ -609,7 +607,7 @@ int intel_guc_resume(struct intel_guc *guc) * we do not need to resume the GuC but we do need to enable the * GuC communication on resume (above). */ - if (!intel_guc_submission_is_enabled(guc)) + if (!intel_guc_submission_is_used(guc) || !intel_guc_is_ready(guc)) return 0; return intel_guc_send(guc, action, ARRAY_SIZE(action)); @@ -678,8 +676,8 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size) if (IS_ERR(vma)) goto err; - flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); - ret = i915_vma_pin(vma, 0, 0, flags); + flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); + ret = i915_ggtt_pin(vma, 0, flags); if (ret) { vma = ERR_PTR(ret); goto err; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 910d49590068..4594ccbeaa34 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -39,7 +39,7 @@ struct intel_guc { void (*disable)(struct intel_guc *guc); } interrupts; - bool submission_supported; + bool submission_selected; struct i915_vma *ads_vma; struct __guc_ads_blob *ads_blob; @@ -143,29 +143,36 @@ static inline bool intel_guc_is_supported(struct intel_guc *guc) return intel_uc_fw_is_supported(&guc->fw); } -static inline bool intel_guc_is_enabled(struct intel_guc *guc) +static inline bool intel_guc_is_wanted(struct intel_guc *guc) { return intel_uc_fw_is_enabled(&guc->fw); } -static inline bool intel_guc_is_running(struct intel_guc *guc) +static inline bool intel_guc_is_used(struct intel_guc *guc) +{ + GEM_BUG_ON(__intel_uc_fw_status(&guc->fw) == INTEL_UC_FIRMWARE_SELECTED); + return intel_uc_fw_is_available(&guc->fw); +} + +static inline bool intel_guc_is_fw_running(struct intel_guc *guc) { return intel_uc_fw_is_running(&guc->fw); } +static inline bool intel_guc_is_ready(struct intel_guc *guc) +{ + return intel_guc_is_fw_running(guc) && intel_guc_ct_enabled(&guc->ct); +} + static inline int intel_guc_sanitize(struct intel_guc *guc) { intel_uc_fw_sanitize(&guc->fw); + intel_guc_ct_sanitize(&guc->ct); guc->mmio_msg = 0; return 0; } -static inline bool intel_guc_is_submission_supported(struct intel_guc *guc) -{ - return guc->submission_supported; -} - static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask) { spin_lock_irq(&guc->irq_lock); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c index c6f971a049f9..11742fca0e9e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c @@ -5,11 +5,15 @@ #include "i915_drv.h" #include "intel_guc_ct.h" +#include "gt/intel_gt.h" +#define CT_ERROR(_ct, _fmt, ...) \ + DRM_DEV_ERROR(ct_to_dev(_ct), "CT: " _fmt, ##__VA_ARGS__) #ifdef CONFIG_DRM_I915_DEBUG_GUC -#define CT_DEBUG_DRIVER(...) DRM_DEBUG_DRIVER(__VA_ARGS__) +#define CT_DEBUG(_ct, _fmt, ...) \ + DRM_DEV_DEBUG_DRIVER(ct_to_dev(_ct), "CT: " _fmt, ##__VA_ARGS__) #else -#define CT_DEBUG_DRIVER(...) do { } while (0) +#define CT_DEBUG(...) do { } while (0) #endif struct ct_request { @@ -48,6 +52,21 @@ static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct) return container_of(ct, struct intel_guc, ct); } +static inline struct intel_gt *ct_to_gt(struct intel_guc_ct *ct) +{ + return guc_to_gt(ct_to_guc(ct)); +} + +static inline struct drm_i915_private *ct_to_i915(struct intel_guc_ct *ct) +{ + return ct_to_gt(ct)->i915; +} + +static inline struct device *ct_to_dev(struct intel_guc_ct *ct) +{ + return ct_to_i915(ct)->drm.dev; +} + static inline const char *guc_ct_buffer_type_to_str(u32 type) { switch (type) { @@ -63,7 +82,6 @@ static inline const char *guc_ct_buffer_type_to_str(u32 type) static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc, u32 cmds_addr, u32 size) { - CT_DEBUG_DRIVER("CT: init addr=%#x size=%u\n", cmds_addr, size); memset(desc, 0, sizeof(*desc)); desc->addr = cmds_addr; desc->size = size; @@ -72,8 +90,6 @@ static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc, static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc) { - CT_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n", - desc, desc->head, desc->tail); desc->head = 0; desc->tail = 0; desc->is_in_error = 0; @@ -89,31 +105,40 @@ static int guc_action_register_ct_buffer(struct intel_guc *guc, sizeof(struct guc_ct_buffer_desc), type }; - int err; /* Can't use generic send(), CT registration must go over MMIO */ - err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0); - if (err) - DRM_ERROR("CT: register %s buffer failed; err=%d\n", - guc_ct_buffer_type_to_str(type), err); + return intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0); +} + +static int ct_register_buffer(struct intel_guc_ct *ct, u32 desc_addr, u32 type) +{ + int err = guc_action_register_ct_buffer(ct_to_guc(ct), desc_addr, type); + + if (unlikely(err)) + CT_ERROR(ct, "Failed to register %s buffer (err=%d)\n", + guc_ct_buffer_type_to_str(type), err); return err; } -static int guc_action_deregister_ct_buffer(struct intel_guc *guc, - u32 type) +static int guc_action_deregister_ct_buffer(struct intel_guc *guc, u32 type) { u32 action[] = { INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER, CTB_OWNER_HOST, type }; - int err; /* Can't use generic send(), CT deregistration must go over MMIO */ - err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0); - if (err) - DRM_ERROR("CT: deregister %s buffer failed; err=%d\n", - guc_ct_buffer_type_to_str(type), err); + return intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0); +} + +static int ct_deregister_buffer(struct intel_guc_ct *ct, u32 type) +{ + int err = guc_action_deregister_ct_buffer(ct_to_guc(ct), type); + + if (unlikely(err)) + CT_ERROR(ct, "Failed to deregister %s buffer (err=%d)\n", + guc_ct_buffer_type_to_str(type), err); return err; } @@ -157,13 +182,12 @@ int intel_guc_ct_init(struct intel_guc_ct *ct) */ err = intel_guc_allocate_and_map_vma(guc, PAGE_SIZE, &ct->vma, &blob); - if (err) { - DRM_ERROR("CT: channel allocation failed; err=%d\n", err); + if (unlikely(err)) { + CT_ERROR(ct, "Failed to allocate CT channel (err=%d)\n", err); return err; } - CT_DEBUG_DRIVER("CT: vma base=%#x\n", - intel_guc_ggtt_offset(guc, ct->vma)); + CT_DEBUG(ct, "vma base=%#x\n", intel_guc_ggtt_offset(guc, ct->vma)); /* store pointers to desc and cmds */ for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) { @@ -197,7 +221,7 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct) int intel_guc_ct_enable(struct intel_guc_ct *ct) { struct intel_guc *guc = ct_to_guc(ct); - u32 base; + u32 base, cmds, size; int err; int i; @@ -212,23 +236,23 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct) */ for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) { GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV)); - guc_ct_buffer_desc_init(ct->ctbs[i].desc, - base + PAGE_SIZE/4 * i + PAGE_SIZE/2, - PAGE_SIZE/4); + cmds = base + PAGE_SIZE / 4 * i + PAGE_SIZE / 2; + size = PAGE_SIZE / 4; + CT_DEBUG(ct, "%d: addr=%#x size=%u\n", i, cmds, size); + guc_ct_buffer_desc_init(ct->ctbs[i].desc, cmds, size); } - /* register buffers, starting wirh RECV buffer - * descriptors are in first half of the blob + /* + * Register both CT buffers starting with RECV buffer. + * Descriptors are in first half of the blob. */ - err = guc_action_register_ct_buffer(guc, - base + PAGE_SIZE/4 * CTB_RECV, - INTEL_GUC_CT_BUFFER_TYPE_RECV); + err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_RECV, + INTEL_GUC_CT_BUFFER_TYPE_RECV); if (unlikely(err)) goto err_out; - err = guc_action_register_ct_buffer(guc, - base + PAGE_SIZE/4 * CTB_SEND, - INTEL_GUC_CT_BUFFER_TYPE_SEND); + err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_SEND, + INTEL_GUC_CT_BUFFER_TYPE_SEND); if (unlikely(err)) goto err_deregister; @@ -237,10 +261,9 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct) return 0; err_deregister: - guc_action_deregister_ct_buffer(guc, - INTEL_GUC_CT_BUFFER_TYPE_RECV); + ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_RECV); err_out: - DRM_ERROR("CT: can't open channel; err=%d\n", err); + CT_ERROR(ct, "Failed to open open CT channel (err=%d)\n", err); return err; } @@ -256,18 +279,16 @@ void intel_guc_ct_disable(struct intel_guc_ct *ct) ct->enabled = false; - if (intel_guc_is_running(guc)) { - guc_action_deregister_ct_buffer(guc, - INTEL_GUC_CT_BUFFER_TYPE_SEND); - guc_action_deregister_ct_buffer(guc, - INTEL_GUC_CT_BUFFER_TYPE_RECV); + if (intel_guc_is_fw_running(guc)) { + ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_SEND); + ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_RECV); } } static u32 ct_get_next_fence(struct intel_guc_ct *ct) { /* For now it's trivial */ - return ++ct->requests.next_fence; + return ++ct->requests.last_fence; } /** @@ -288,25 +309,33 @@ static u32 ct_get_next_fence(struct intel_guc_ct *ct) * ^-----------------len-------------------^ */ -static int ctb_write(struct intel_guc_ct_buffer *ctb, - const u32 *action, - u32 len /* in dwords */, - u32 fence, - bool want_response) +static int ct_write(struct intel_guc_ct *ct, + const u32 *action, + u32 len /* in dwords */, + u32 fence, + bool want_response) { + struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_SEND]; struct guc_ct_buffer_desc *desc = ctb->desc; - u32 head = desc->head / 4; /* in dwords */ - u32 tail = desc->tail / 4; /* in dwords */ - u32 size = desc->size / 4; /* in dwords */ - u32 used; /* in dwords */ + u32 head = desc->head; + u32 tail = desc->tail; + u32 size = desc->size; + u32 used; u32 header; u32 *cmds = ctb->cmds; unsigned int i; - GEM_BUG_ON(desc->size % 4); - GEM_BUG_ON(desc->head % 4); - GEM_BUG_ON(desc->tail % 4); - GEM_BUG_ON(tail >= size); + if (unlikely(desc->is_in_error)) + return -EPIPE; + + if (unlikely(!IS_ALIGNED(head | tail | size, 4) || + (tail | head) >= size)) + goto corrupted; + + /* later calculations will be done in dwords */ + head /= 4; + tail /= 4; + size /= 4; /* * tail == head condition indicates empty. GuC FW does not support @@ -332,9 +361,8 @@ static int ctb_write(struct intel_guc_ct_buffer *ctb, (want_response ? GUC_CT_MSG_SEND_STATUS : 0) | (action[0] << GUC_CT_MSG_ACTION_SHIFT); - CT_DEBUG_DRIVER("CT: writing %*ph %*ph %*ph\n", - 4, &header, 4, &fence, - 4 * (len - 1), &action[1]); + CT_DEBUG(ct, "writing %*ph %*ph %*ph\n", + 4, &header, 4, &fence, 4 * (len - 1), &action[1]); cmds[tail] = header; tail = (tail + 1) % size; @@ -346,12 +374,17 @@ static int ctb_write(struct intel_guc_ct_buffer *ctb, cmds[tail] = action[i]; tail = (tail + 1) % size; } + GEM_BUG_ON(tail > size); /* now update desc tail (back in bytes) */ desc->tail = tail * 4; - GEM_BUG_ON(desc->tail > desc->size); - return 0; + +corrupted: + CT_ERROR(ct, "Corrupted descriptor addr=%#x head=%u tail=%u size=%u\n", + desc->addr, desc->head, desc->tail, desc->size); + desc->is_in_error = 1; + return -EPIPE; } /** @@ -469,7 +502,7 @@ static int ct_send(struct intel_guc_ct *ct, list_add_tail(&request.link, &ct->requests.pending); spin_unlock_irqrestore(&ct->requests.lock, flags); - err = ctb_write(ctb, action, len, fence, !!response_buf); + err = ct_write(ct, action, len, fence, !!response_buf); if (unlikely(err)) goto unlink; @@ -526,11 +559,11 @@ int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len, ret = ct_send(ct, action, len, response_buf, response_buf_size, &status); if (unlikely(ret < 0)) { - DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n", - action[0], ret, status); + CT_ERROR(ct, "Sending action %#x failed (err=%d status=%#X)\n", + action[0], ret, status); } else if (unlikely(ret)) { - CT_DEBUG_DRIVER("CT: send action %#x returned %d (%#x)\n", - action[0], ret, ret); + CT_DEBUG(ct, "send action %#x returned %d (%#x)\n", + action[0], ret, ret); } mutex_unlock(&guc->send_mutex); @@ -552,22 +585,29 @@ static inline bool ct_header_is_response(u32 header) return !!(header & GUC_CT_MSG_IS_RESPONSE); } -static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data) +static int ct_read(struct intel_guc_ct *ct, u32 *data) { + struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_RECV]; struct guc_ct_buffer_desc *desc = ctb->desc; - u32 head = desc->head / 4; /* in dwords */ - u32 tail = desc->tail / 4; /* in dwords */ - u32 size = desc->size / 4; /* in dwords */ + u32 head = desc->head; + u32 tail = desc->tail; + u32 size = desc->size; u32 *cmds = ctb->cmds; - s32 available; /* in dwords */ + s32 available; unsigned int len; unsigned int i; - GEM_BUG_ON(desc->size % 4); - GEM_BUG_ON(desc->head % 4); - GEM_BUG_ON(desc->tail % 4); - GEM_BUG_ON(tail >= size); - GEM_BUG_ON(head >= size); + if (unlikely(desc->is_in_error)) + return -EPIPE; + + if (unlikely(!IS_ALIGNED(head | tail | size, 4) || + (tail | head) >= size)) + goto corrupted; + + /* later calculations will be done in dwords */ + head /= 4; + tail /= 4; + size /= 4; /* tail == head condition indicates empty */ available = tail - head; @@ -577,7 +617,7 @@ static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data) /* beware of buffer wrap case */ if (unlikely(available < 0)) available += size; - CT_DEBUG_DRIVER("CT: available %d (%u:%u)\n", available, head, tail); + CT_DEBUG(ct, "available %d (%u:%u)\n", available, head, tail); GEM_BUG_ON(available < 0); data[0] = cmds[head]; @@ -586,23 +626,29 @@ static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data) /* message len with header */ len = ct_header_get_len(data[0]) + 1; if (unlikely(len > (u32)available)) { - DRM_ERROR("CT: incomplete message %*ph %*ph %*ph\n", - 4, data, - 4 * (head + available - 1 > size ? - size - head : available - 1), &cmds[head], - 4 * (head + available - 1 > size ? - available - 1 - size + head : 0), &cmds[0]); - return -EPROTO; + CT_ERROR(ct, "Incomplete message %*ph %*ph %*ph\n", + 4, data, + 4 * (head + available - 1 > size ? + size - head : available - 1), &cmds[head], + 4 * (head + available - 1 > size ? + available - 1 - size + head : 0), &cmds[0]); + goto corrupted; } for (i = 1; i < len; i++) { data[i] = cmds[head]; head = (head + 1) % size; } - CT_DEBUG_DRIVER("CT: received %*ph\n", 4 * len, data); + CT_DEBUG(ct, "received %*ph\n", 4 * len, data); desc->head = head * 4; return 0; + +corrupted: + CT_ERROR(ct, "Corrupted descriptor addr=%#x head=%u tail=%u size=%u\n", + desc->addr, desc->head, desc->tail, desc->size); + desc->is_in_error = 1; + return -EPIPE; } /** @@ -627,7 +673,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) { u32 header = msg[0]; u32 len = ct_header_get_len(header); - u32 msglen = len + 1; /* total message length including header */ + u32 msgsize = (len + 1) * sizeof(u32); /* msg size in bytes w/header */ u32 fence; u32 status; u32 datalen; @@ -639,7 +685,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) /* Response payload shall at least include fence and status */ if (unlikely(len < 2)) { - DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg); + CT_ERROR(ct, "Corrupted response %*ph\n", msgsize, msg); return -EPROTO; } @@ -649,22 +695,22 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) /* Format of the status follows RESPONSE message */ if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) { - DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg); + CT_ERROR(ct, "Corrupted response %*ph\n", msgsize, msg); return -EPROTO; } - CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status); + CT_DEBUG(ct, "response fence %u status %#x\n", fence, status); spin_lock(&ct->requests.lock); list_for_each_entry(req, &ct->requests.pending, link) { if (unlikely(fence != req->fence)) { - CT_DEBUG_DRIVER("CT: request %u awaits response\n", - req->fence); + CT_DEBUG(ct, "request %u awaits response\n", + req->fence); continue; } if (unlikely(datalen > req->response_len)) { - DRM_ERROR("CT: response %u too long %*ph\n", - req->fence, 4 * msglen, msg); + CT_ERROR(ct, "Response for %u is too long %*ph\n", + req->fence, msgsize, msg); datalen = 0; } if (datalen) @@ -677,7 +723,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) spin_unlock(&ct->requests.lock); if (!found) - DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen, msg); + CT_ERROR(ct, "Unsolicited response %*ph\n", msgsize, msg); return 0; } @@ -687,7 +733,7 @@ static void ct_process_request(struct intel_guc_ct *ct, struct intel_guc *guc = ct_to_guc(ct); int ret; - CT_DEBUG_DRIVER("CT: request %x %*ph\n", action, 4 * len, payload); + CT_DEBUG(ct, "request %x %*ph\n", action, 4 * len, payload); switch (action) { case INTEL_GUC_ACTION_DEFAULT: @@ -698,8 +744,8 @@ static void ct_process_request(struct intel_guc_ct *ct, default: fail_unexpected: - DRM_ERROR("CT: unexpected request %x %*ph\n", - action, 4 * len, payload); + CT_ERROR(ct, "Unexpected request %x %*ph\n", + action, 4 * len, payload); break; } } @@ -767,18 +813,18 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg) { u32 header = msg[0]; u32 len = ct_header_get_len(header); - u32 msglen = len + 1; /* total message length including header */ + u32 msgsize = (len + 1) * sizeof(u32); /* msg size in bytes w/header */ struct ct_incoming_request *request; unsigned long flags; GEM_BUG_ON(ct_header_is_response(header)); - request = kmalloc(sizeof(*request) + 4 * msglen, GFP_ATOMIC); + request = kmalloc(sizeof(*request) + msgsize, GFP_ATOMIC); if (unlikely(!request)) { - DRM_ERROR("CT: dropping request %*ph\n", 4 * msglen, msg); + CT_ERROR(ct, "Dropping request %*ph\n", msgsize, msg); return 0; /* XXX: -ENOMEM ? */ } - memcpy(request->msg, msg, 4 * msglen); + memcpy(request->msg, msg, msgsize); spin_lock_irqsave(&ct->requests.lock, flags); list_add_tail(&request->link, &ct->requests.incoming); @@ -794,7 +840,6 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg) */ void intel_guc_ct_event_handler(struct intel_guc_ct *ct) { - struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_RECV]; u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */ int err = 0; @@ -804,7 +849,7 @@ void intel_guc_ct_event_handler(struct intel_guc_ct *ct) } do { - err = ctb_read(ctb, msg); + err = ct_read(ct, msg); if (err) break; @@ -813,10 +858,4 @@ void intel_guc_ct_event_handler(struct intel_guc_ct *ct) else err = ct_handle_request(ct, msg); } while (!err); - - if (GEM_WARN_ON(err == -EPROTO)) { - DRM_ERROR("CT: corrupted message detected!\n"); - ctb->desc->is_in_error = 1; - } } - diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h index 3e7fe237cfa5..494a51a5200f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h @@ -49,7 +49,7 @@ struct intel_guc_ct { struct intel_guc_ct_buffer ctbs[2]; struct { - u32 next_fence; /* fence to be used with next request to send */ + u32 last_fence; /* last fence used to send request */ spinlock_t lock; /* protects pending requests list */ struct list_head pending; /* requests waiting for response */ @@ -65,6 +65,11 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct); int intel_guc_ct_enable(struct intel_guc_ct *ct); void intel_guc_ct_disable(struct intel_guc_ct *ct); +static inline void intel_guc_ct_sanitize(struct intel_guc_ct *ct) +{ + ct->enabled = false; +} + static inline bool intel_guc_ct_enabled(struct intel_guc_ct *ct) { return ct->enabled; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 9e42324fdecd..aa6d56e25a10 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -217,7 +217,7 @@ static void guc_wq_item_append(struct intel_guc *guc, static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) { struct intel_engine_cs *engine = rq->engine; - u32 ctx_desc = lower_32_bits(rq->context->lrc_desc); + u32 ctx_desc = rq->context->lrc.ccid; u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64); guc_wq_item_append(guc, engine->guc_id, ctx_desc, @@ -456,9 +456,7 @@ static void guc_reset_cancel(struct intel_engine_cs *engine) /* Mark all executing requests as skipped. */ list_for_each_entry(rq, &engine->active.requests, sched.link) { - if (!i915_request_signaled(rq)) - dma_fence_set_error(&rq->fence, -EIO); - + i915_request_set_error_once(rq, -EIO); i915_request_mark_complete(rq); } @@ -660,12 +658,9 @@ void intel_guc_submission_disable(struct intel_guc *guc) guc_proc_desc_fini(guc); } -static bool __guc_submission_support(struct intel_guc *guc) +static bool __guc_submission_selected(struct intel_guc *guc) { - /* XXX: GuC submission is unavailable for now */ - return false; - - if (!intel_guc_is_supported(guc)) + if (!intel_guc_submission_is_supported(guc)) return false; return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION; @@ -673,7 +668,7 @@ static bool __guc_submission_support(struct intel_guc *guc) void intel_guc_submission_init_early(struct intel_guc *guc) { - guc->submission_supported = __guc_submission_support(guc); + guc->submission_selected = __guc_submission_selected(guc); } bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h index e402a2932592..4cf9d3e50263 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h @@ -8,7 +8,8 @@ #include <linux/types.h> -struct intel_guc; +#include "intel_guc.h" + struct intel_engine_cs; void intel_guc_submission_init_early(struct intel_guc *guc); @@ -20,4 +21,20 @@ int intel_guc_preempt_work_create(struct intel_guc *guc); void intel_guc_preempt_work_destroy(struct intel_guc *guc); bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine); +static inline bool intel_guc_submission_is_supported(struct intel_guc *guc) +{ + /* XXX: GuC submission is unavailable for now */ + return false; +} + +static inline bool intel_guc_submission_is_wanted(struct intel_guc *guc) +{ + return guc->submission_selected; +} + +static inline bool intel_guc_submission_is_used(struct intel_guc *guc) +{ + return intel_guc_is_used(guc) && intel_guc_submission_is_wanted(guc); +} + #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index 32a069841c14..a74b65694512 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -121,19 +121,20 @@ int intel_huc_init(struct intel_huc *huc) if (err) goto out_fini; + intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOADABLE); + return 0; out_fini: intel_uc_fw_fini(&huc->fw); out: - intel_uc_fw_cleanup_fetch(&huc->fw); - DRM_DEV_DEBUG_DRIVER(i915->drm.dev, "failed with %d\n", err); + i915_probe_error(i915, "failed with %d\n", err); return err; } void intel_huc_fini(struct intel_huc *huc) { - if (!intel_uc_fw_is_available(&huc->fw)) + if (!intel_uc_fw_is_loadable(&huc->fw)) return; intel_huc_rsa_data_destroy(huc); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h index 644c059fe01d..a40b9cfc6c22 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h @@ -41,11 +41,17 @@ static inline bool intel_huc_is_supported(struct intel_huc *huc) return intel_uc_fw_is_supported(&huc->fw); } -static inline bool intel_huc_is_enabled(struct intel_huc *huc) +static inline bool intel_huc_is_wanted(struct intel_huc *huc) { return intel_uc_fw_is_enabled(&huc->fw); } +static inline bool intel_huc_is_used(struct intel_huc *huc) +{ + GEM_BUG_ON(__intel_uc_fw_status(&huc->fw) == INTEL_UC_FIRMWARE_SELECTED); + return intel_uc_fw_is_available(&huc->fw); +} + static inline bool intel_huc_is_authenticated(struct intel_huc *huc) { return intel_uc_fw_is_running(&huc->fw); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c index eee193bf2cc4..9cdf4cbe691c 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c @@ -20,7 +20,7 @@ void intel_huc_fw_init_early(struct intel_huc *huc) struct drm_i915_private *i915 = gt->i915; intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC, - intel_uc_uses_guc(uc), + intel_uc_wants_guc(uc), INTEL_INFO(i915)->platform, INTEL_REVID(i915)); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 64934a876a50..a4cbe06e06bd 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -48,17 +48,17 @@ static void __confirm_options(struct intel_uc *uc) DRM_DEV_DEBUG_DRIVER(i915->drm.dev, "enable_guc=%d (guc:%s submission:%s huc:%s)\n", i915_modparams.enable_guc, - yesno(intel_uc_uses_guc(uc)), - yesno(intel_uc_uses_guc_submission(uc)), - yesno(intel_uc_uses_huc(uc))); + yesno(intel_uc_wants_guc(uc)), + yesno(intel_uc_wants_guc_submission(uc)), + yesno(intel_uc_wants_huc(uc))); if (i915_modparams.enable_guc == -1) return; if (i915_modparams.enable_guc == 0) { - GEM_BUG_ON(intel_uc_uses_guc(uc)); - GEM_BUG_ON(intel_uc_uses_guc_submission(uc)); - GEM_BUG_ON(intel_uc_uses_huc(uc)); + GEM_BUG_ON(intel_uc_wants_guc(uc)); + GEM_BUG_ON(intel_uc_wants_guc_submission(uc)); + GEM_BUG_ON(intel_uc_wants_huc(uc)); return; } @@ -93,7 +93,7 @@ void intel_uc_init_early(struct intel_uc *uc) __confirm_options(uc); - if (intel_uc_uses_guc(uc)) + if (intel_uc_wants_guc(uc)) uc->ops = &uc_ops_on; else uc->ops = &uc_ops_off; @@ -257,13 +257,13 @@ static void __uc_fetch_firmwares(struct intel_uc *uc) { int err; - GEM_BUG_ON(!intel_uc_uses_guc(uc)); + GEM_BUG_ON(!intel_uc_wants_guc(uc)); err = intel_uc_fw_fetch(&uc->guc.fw); if (err) return; - if (intel_uc_uses_huc(uc)) + if (intel_uc_wants_huc(uc)) intel_uc_fw_fetch(&uc->huc.fw); } @@ -273,25 +273,38 @@ static void __uc_cleanup_firmwares(struct intel_uc *uc) intel_uc_fw_cleanup_fetch(&uc->guc.fw); } -static void __uc_init(struct intel_uc *uc) +static int __uc_init(struct intel_uc *uc) { struct intel_guc *guc = &uc->guc; struct intel_huc *huc = &uc->huc; int ret; - GEM_BUG_ON(!intel_uc_uses_guc(uc)); + GEM_BUG_ON(!intel_uc_wants_guc(uc)); + + if (!intel_uc_uses_guc(uc)) + return 0; + + if (i915_inject_probe_failure(uc_to_gt(uc)->i915)) + return -ENOMEM; /* XXX: GuC submission is unavailable for now */ - GEM_BUG_ON(intel_uc_supports_guc_submission(uc)); + GEM_BUG_ON(intel_uc_uses_guc_submission(uc)); ret = intel_guc_init(guc); - if (ret) { - intel_uc_fw_cleanup_fetch(&huc->fw); - return; + if (ret) + return ret; + + if (intel_uc_uses_huc(uc)) { + ret = intel_huc_init(huc); + if (ret) + goto out_guc; } - if (intel_uc_uses_huc(uc)) - intel_huc_init(huc); + return 0; + +out_guc: + intel_guc_fini(guc); + return ret; } static void __uc_fini(struct intel_uc *uc) @@ -402,12 +415,12 @@ static int __uc_init_hw(struct intel_uc *uc) int ret, attempts; GEM_BUG_ON(!intel_uc_supports_guc(uc)); - GEM_BUG_ON(!intel_uc_uses_guc(uc)); + GEM_BUG_ON(!intel_uc_wants_guc(uc)); - if (!intel_uc_fw_is_available(&guc->fw)) { + if (!intel_uc_fw_is_loadable(&guc->fw)) { ret = __uc_check_hw(uc) || intel_uc_fw_is_overridden(&guc->fw) || - intel_uc_supports_guc_submission(uc) ? + intel_uc_wants_guc_submission(uc) ? intel_uc_fw_status_to_error(guc->fw.status) : 0; goto err_out; } @@ -459,14 +472,14 @@ static int __uc_init_hw(struct intel_uc *uc) if (ret) goto err_communication; - if (intel_uc_supports_guc_submission(uc)) + if (intel_uc_uses_guc_submission(uc)) intel_guc_submission_enable(guc); dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n", intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc->fw.path, guc->fw.major_ver_found, guc->fw.minor_ver_found, "submission", - enableddisabled(intel_uc_supports_guc_submission(uc))); + enableddisabled(intel_uc_uses_guc_submission(uc))); if (intel_uc_uses_huc(uc)) { dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n", @@ -505,10 +518,10 @@ static void __uc_fini_hw(struct intel_uc *uc) { struct intel_guc *guc = &uc->guc; - if (!intel_guc_is_running(guc)) + if (!intel_guc_is_fw_running(guc)) return; - if (intel_uc_supports_guc_submission(uc)) + if (intel_uc_uses_guc_submission(uc)) intel_guc_submission_disable(guc); if (guc_communication_enabled(guc)) @@ -527,7 +540,7 @@ void intel_uc_reset_prepare(struct intel_uc *uc) { struct intel_guc *guc = &uc->guc; - if (!intel_guc_is_running(guc)) + if (!intel_guc_is_ready(guc)) return; guc_disable_communication(guc); @@ -539,7 +552,7 @@ void intel_uc_runtime_suspend(struct intel_uc *uc) struct intel_guc *guc = &uc->guc; int err; - if (!intel_guc_is_running(guc)) + if (!intel_guc_is_ready(guc)) return; err = intel_guc_suspend(guc); @@ -554,7 +567,7 @@ void intel_uc_suspend(struct intel_uc *uc) struct intel_guc *guc = &uc->guc; intel_wakeref_t wakeref; - if (!intel_guc_is_running(guc)) + if (!intel_guc_is_ready(guc)) return; with_intel_runtime_pm(uc_to_gt(uc)->uncore->rpm, wakeref) @@ -566,7 +579,7 @@ static int __uc_resume(struct intel_uc *uc, bool enable_communication) struct intel_guc *guc = &uc->guc; int err; - if (!intel_guc_is_running(guc)) + if (!intel_guc_is_fw_running(guc)) return 0; /* Make sure we enable communication if and only if it's disabled */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h index 49c913524686..5ae7b50b7dc1 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h @@ -7,6 +7,7 @@ #define _INTEL_UC_H_ #include "intel_guc.h" +#include "intel_guc_submission.h" #include "intel_huc.h" #include "i915_params.h" @@ -16,7 +17,7 @@ struct intel_uc_ops { int (*sanitize)(struct intel_uc *uc); void (*init_fw)(struct intel_uc *uc); void (*fini_fw)(struct intel_uc *uc); - void (*init)(struct intel_uc *uc); + int (*init)(struct intel_uc *uc); void (*fini)(struct intel_uc *uc); int (*init_hw)(struct intel_uc *uc); void (*fini_hw)(struct intel_uc *uc); @@ -40,35 +41,44 @@ void intel_uc_runtime_suspend(struct intel_uc *uc); int intel_uc_resume(struct intel_uc *uc); int intel_uc_runtime_resume(struct intel_uc *uc); -static inline bool intel_uc_supports_guc(struct intel_uc *uc) -{ - return intel_guc_is_supported(&uc->guc); -} - -static inline bool intel_uc_uses_guc(struct intel_uc *uc) -{ - return intel_guc_is_enabled(&uc->guc); -} +/* + * We need to know as early as possible if we're going to use GuC or not to + * take the correct setup paths. Additionally, once we've started loading the + * GuC, it is unsafe to keep executing without it because some parts of the HW, + * a subset of which is not cleaned on GT reset, will start expecting the GuC FW + * to be running. + * To solve both these requirements, we commit to using the microcontrollers if + * the relevant modparam is set and the blobs are found on the system. At this + * stage, the only thing that can stop us from attempting to load the blobs on + * the HW and use them is a fundamental issue (e.g. no memory for our + * structures); if we hit such a problem during driver load we're broken even + * without GuC, so there is no point in trying to fall back. + * + * Given the above, we can be in one of 4 states, with the last one implying + * we're committed to using the microcontroller: + * - Not supported: not available in HW and/or firmware not defined. + * - Supported: available in HW and firmware defined. + * - Wanted: supported + enabled in modparam. + * - In use: wanted + firmware found on the system and successfully fetched. + */ -static inline bool intel_uc_supports_guc_submission(struct intel_uc *uc) -{ - return intel_guc_is_submission_supported(&uc->guc); +#define __uc_state_checker(x, func, state, required) \ +static inline bool intel_uc_##state##_##func(struct intel_uc *uc) \ +{ \ + return intel_##func##_is_##required(&uc->x); \ } -static inline bool intel_uc_uses_guc_submission(struct intel_uc *uc) -{ - return intel_guc_is_submission_supported(&uc->guc); -} +#define uc_state_checkers(x, func) \ +__uc_state_checker(x, func, supports, supported) \ +__uc_state_checker(x, func, wants, wanted) \ +__uc_state_checker(x, func, uses, used) -static inline bool intel_uc_supports_huc(struct intel_uc *uc) -{ - return intel_uc_supports_guc(uc); -} +uc_state_checkers(guc, guc); +uc_state_checkers(huc, huc); +uc_state_checkers(guc, guc_submission); -static inline bool intel_uc_uses_huc(struct intel_uc *uc) -{ - return intel_huc_is_enabled(&uc->huc); -} +#undef uc_state_checkers +#undef __uc_state_checker #define intel_uc_ops_function(_NAME, _OPS, _TYPE, _RET) \ static inline _TYPE intel_uc_##_NAME(struct intel_uc *uc) \ @@ -80,7 +90,7 @@ static inline _TYPE intel_uc_##_NAME(struct intel_uc *uc) \ intel_uc_ops_function(sanitize, sanitize, int, 0); intel_uc_ops_function(fetch_firmwares, init_fw, void, ); intel_uc_ops_function(cleanup_firmwares, fini_fw, void, ); -intel_uc_ops_function(init, init, void, ); +intel_uc_ops_function(init, init, int, 0); intel_uc_ops_function(fini, fini, void, ); intel_uc_ops_function(init_hw, init_hw, int, 0); intel_uc_ops_function(fini_hw, fini_hw, void, ); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 8ee0a0c7f447..18c755203688 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -43,7 +43,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, * features. */ #define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \ - fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 3)) \ + fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 12)) \ fw_def(ELKHARTLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl, 9, 0, 0)) \ fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 9, 0, 0)) \ fw_def(COFFEELAKE, 5, guc_def(cml, 33, 0, 0), huc_def(cml, 4, 0, 0)) \ @@ -279,7 +279,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw) err = i915_inject_probe_error(i915, -ENXIO); if (err) - return err; + goto fail; __force_fw_fetch_failures(uc_fw, -EINVAL); __force_fw_fetch_failures(uc_fw, -ESTALE); @@ -501,7 +501,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags) if (err) return err; - if (!intel_uc_fw_is_available(uc_fw)) + if (!intel_uc_fw_is_loadable(uc_fw)) return -ENOEXEC; /* Call custom loader */ @@ -544,7 +544,10 @@ int intel_uc_fw_init(struct intel_uc_fw *uc_fw) void intel_uc_fw_fini(struct intel_uc_fw *uc_fw) { - intel_uc_fw_cleanup_fetch(uc_fw); + if (i915_gem_object_has_pinned_pages(uc_fw->obj)) + i915_gem_object_unpin_pages(uc_fw->obj); + + intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE); } /** diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h index 1f30543d0d2d..888ff0de0244 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h @@ -29,8 +29,11 @@ struct intel_gt; * | | SELECTED | * +------------+- / | \ -+ * | | MISSING <--/ | \--> ERROR | - * | fetch | | | - * | | /------> AVAILABLE <---<-----------\ | + * | fetch | V | + * | | AVAILABLE | + * +------------+- | -+ + * | init | V | + * | | /------> LOADABLE <----<-----------\ | * +------------+- \ / \ \ \ -+ * | | FAIL <--< \--> TRANSFERRED \ | * | upload | \ / \ / | @@ -46,6 +49,7 @@ enum intel_uc_fw_status { INTEL_UC_FIRMWARE_MISSING, /* blob not found on the system */ INTEL_UC_FIRMWARE_ERROR, /* invalid format or version */ INTEL_UC_FIRMWARE_AVAILABLE, /* blob found and copied in mem */ + INTEL_UC_FIRMWARE_LOADABLE, /* all fw-required objects are ready */ INTEL_UC_FIRMWARE_FAIL, /* failed to xfer or init/auth the fw */ INTEL_UC_FIRMWARE_TRANSFERRED, /* dma xfer done */ INTEL_UC_FIRMWARE_RUNNING /* init/auth done */ @@ -115,6 +119,8 @@ const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status) return "ERROR"; case INTEL_UC_FIRMWARE_AVAILABLE: return "AVAILABLE"; + case INTEL_UC_FIRMWARE_LOADABLE: + return "LOADABLE"; case INTEL_UC_FIRMWARE_FAIL: return "FAIL"; case INTEL_UC_FIRMWARE_TRANSFERRED: @@ -143,6 +149,7 @@ static inline int intel_uc_fw_status_to_error(enum intel_uc_fw_status status) case INTEL_UC_FIRMWARE_SELECTED: return -ESTALE; case INTEL_UC_FIRMWARE_AVAILABLE: + case INTEL_UC_FIRMWARE_LOADABLE: case INTEL_UC_FIRMWARE_TRANSFERRED: case INTEL_UC_FIRMWARE_RUNNING: return 0; @@ -184,6 +191,11 @@ static inline bool intel_uc_fw_is_available(struct intel_uc_fw *uc_fw) return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_AVAILABLE; } +static inline bool intel_uc_fw_is_loadable(struct intel_uc_fw *uc_fw) +{ + return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_LOADABLE; +} + static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw) { return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_TRANSFERRED; @@ -202,7 +214,7 @@ static inline bool intel_uc_fw_is_overridden(const struct intel_uc_fw *uc_fw) static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw) { if (intel_uc_fw_is_loaded(uc_fw)) - intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE); + intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOADABLE); } static inline u32 __intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw) diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 771420453f82..8b13f091cee2 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -41,7 +41,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) { struct intel_gvt *gvt = vgpu->gvt; - struct drm_i915_private *dev_priv = gvt->dev_priv; + struct intel_gt *gt = gvt->gt; unsigned int flags; u64 start, end, size; struct drm_mm_node *node; @@ -61,14 +61,14 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) flags = PIN_MAPPABLE; } - mutex_lock(&dev_priv->ggtt.vm.mutex); - mmio_hw_access_pre(dev_priv); - ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node, + mutex_lock(>->ggtt->vm.mutex); + mmio_hw_access_pre(gt); + ret = i915_gem_gtt_insert(>->ggtt->vm, node, size, I915_GTT_PAGE_SIZE, I915_COLOR_UNEVICTABLE, start, end, flags); - mmio_hw_access_post(dev_priv); - mutex_unlock(&dev_priv->ggtt.vm.mutex); + mmio_hw_access_post(gt); + mutex_unlock(>->ggtt->vm.mutex); if (ret) gvt_err("fail to alloc %s gm space from host\n", high_gm ? "high" : "low"); @@ -79,7 +79,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) static int alloc_vgpu_gm(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; - struct drm_i915_private *dev_priv = gvt->dev_priv; + struct intel_gt *gt = gvt->gt; int ret; ret = alloc_gm(vgpu, false); @@ -98,20 +98,21 @@ static int alloc_vgpu_gm(struct intel_vgpu *vgpu) return 0; out_free_aperture: - mutex_lock(&dev_priv->ggtt.vm.mutex); + mutex_lock(>->ggtt->vm.mutex); drm_mm_remove_node(&vgpu->gm.low_gm_node); - mutex_unlock(&dev_priv->ggtt.vm.mutex); + mutex_unlock(>->ggtt->vm.mutex); return ret; } static void free_vgpu_gm(struct intel_vgpu *vgpu) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct intel_gvt *gvt = vgpu->gvt; + struct intel_gt *gt = gvt->gt; - mutex_lock(&dev_priv->ggtt.vm.mutex); + mutex_lock(>->ggtt->vm.mutex); drm_mm_remove_node(&vgpu->gm.low_gm_node); drm_mm_remove_node(&vgpu->gm.high_gm_node); - mutex_unlock(&dev_priv->ggtt.vm.mutex); + mutex_unlock(>->ggtt->vm.mutex); } /** @@ -128,28 +129,29 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu, u32 fence, u64 value) { struct intel_gvt *gvt = vgpu->gvt; - struct drm_i915_private *dev_priv = gvt->dev_priv; + struct drm_i915_private *i915 = gvt->gt->i915; + struct intel_uncore *uncore = gvt->gt->uncore; struct i915_fence_reg *reg; i915_reg_t fence_reg_lo, fence_reg_hi; - assert_rpm_wakelock_held(&dev_priv->runtime_pm); + assert_rpm_wakelock_held(uncore->rpm); - if (WARN_ON(fence >= vgpu_fence_sz(vgpu))) + if (drm_WARN_ON(&i915->drm, fence >= vgpu_fence_sz(vgpu))) return; reg = vgpu->fence.regs[fence]; - if (WARN_ON(!reg)) + if (drm_WARN_ON(&i915->drm, !reg)) return; fence_reg_lo = FENCE_REG_GEN6_LO(reg->id); fence_reg_hi = FENCE_REG_GEN6_HI(reg->id); - I915_WRITE(fence_reg_lo, 0); - POSTING_READ(fence_reg_lo); + intel_uncore_write(uncore, fence_reg_lo, 0); + intel_uncore_posting_read(uncore, fence_reg_lo); - I915_WRITE(fence_reg_hi, upper_32_bits(value)); - I915_WRITE(fence_reg_lo, lower_32_bits(value)); - POSTING_READ(fence_reg_lo); + intel_uncore_write(uncore, fence_reg_hi, upper_32_bits(value)); + intel_uncore_write(uncore, fence_reg_lo, lower_32_bits(value)); + intel_uncore_posting_read(uncore, fence_reg_lo); } static void _clear_vgpu_fence(struct intel_vgpu *vgpu) @@ -163,42 +165,43 @@ static void _clear_vgpu_fence(struct intel_vgpu *vgpu) static void free_vgpu_fence(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; - struct drm_i915_private *dev_priv = gvt->dev_priv; + struct intel_uncore *uncore = gvt->gt->uncore; struct i915_fence_reg *reg; + intel_wakeref_t wakeref; u32 i; - if (WARN_ON(!vgpu_fence_sz(vgpu))) + if (drm_WARN_ON(&gvt->gt->i915->drm, !vgpu_fence_sz(vgpu))) return; - intel_runtime_pm_get(&dev_priv->runtime_pm); + wakeref = intel_runtime_pm_get(uncore->rpm); - mutex_lock(&dev_priv->ggtt.vm.mutex); + mutex_lock(&gvt->gt->ggtt->vm.mutex); _clear_vgpu_fence(vgpu); for (i = 0; i < vgpu_fence_sz(vgpu); i++) { reg = vgpu->fence.regs[i]; i915_unreserve_fence(reg); vgpu->fence.regs[i] = NULL; } - mutex_unlock(&dev_priv->ggtt.vm.mutex); + mutex_unlock(&gvt->gt->ggtt->vm.mutex); - intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); + intel_runtime_pm_put(uncore->rpm, wakeref); } static int alloc_vgpu_fence(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; - struct drm_i915_private *dev_priv = gvt->dev_priv; - struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; + struct intel_uncore *uncore = gvt->gt->uncore; struct i915_fence_reg *reg; + intel_wakeref_t wakeref; int i; - intel_runtime_pm_get(rpm); + wakeref = intel_runtime_pm_get(uncore->rpm); /* Request fences from host */ - mutex_lock(&dev_priv->ggtt.vm.mutex); + mutex_lock(&gvt->gt->ggtt->vm.mutex); for (i = 0; i < vgpu_fence_sz(vgpu); i++) { - reg = i915_reserve_fence(&dev_priv->ggtt); + reg = i915_reserve_fence(gvt->gt->ggtt); if (IS_ERR(reg)) goto out_free_fence; @@ -207,9 +210,10 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu) _clear_vgpu_fence(vgpu); - mutex_unlock(&dev_priv->ggtt.vm.mutex); - intel_runtime_pm_put_unchecked(rpm); + mutex_unlock(&gvt->gt->ggtt->vm.mutex); + intel_runtime_pm_put(uncore->rpm, wakeref); return 0; + out_free_fence: gvt_vgpu_err("Failed to alloc fences\n"); /* Return fences to host, if fail */ @@ -220,8 +224,8 @@ out_free_fence: i915_unreserve_fence(reg); vgpu->fence.regs[i] = NULL; } - mutex_unlock(&dev_priv->ggtt.vm.mutex); - intel_runtime_pm_put_unchecked(rpm); + mutex_unlock(&gvt->gt->ggtt->vm.mutex); + intel_runtime_pm_put_unchecked(uncore->rpm); return -ENOSPC; } @@ -315,11 +319,11 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu) */ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct intel_gvt *gvt = vgpu->gvt; + intel_wakeref_t wakeref; - intel_runtime_pm_get(&dev_priv->runtime_pm); - _clear_vgpu_fence(vgpu); - intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); + with_intel_runtime_pm(gvt->gt->uncore->rpm, wakeref) + _clear_vgpu_fence(vgpu); } /** diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index 19cf1bbe059d..072725a448db 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -106,10 +106,13 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off, int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - if (WARN_ON(bytes > 4)) + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; + + if (drm_WARN_ON(&i915->drm, bytes > 4)) return -EINVAL; - if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size)) + if (drm_WARN_ON(&i915->drm, + offset + bytes > vgpu->gvt->device_info.cfg_space_size)) return -EINVAL; memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes); @@ -297,34 +300,36 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; int ret; - if (WARN_ON(bytes > 4)) + if (drm_WARN_ON(&i915->drm, bytes > 4)) return -EINVAL; - if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size)) + if (drm_WARN_ON(&i915->drm, + offset + bytes > vgpu->gvt->device_info.cfg_space_size)) return -EINVAL; /* First check if it's PCI_COMMAND */ if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) { - if (WARN_ON(bytes > 2)) + if (drm_WARN_ON(&i915->drm, bytes > 2)) return -EINVAL; return emulate_pci_command_write(vgpu, offset, p_data, bytes); } switch (rounddown(offset, 4)) { case PCI_ROM_ADDRESS: - if (WARN_ON(!IS_ALIGNED(offset, 4))) + if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4))) return -EINVAL; return emulate_pci_rom_bar_write(vgpu, offset, p_data, bytes); case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5: - if (WARN_ON(!IS_ALIGNED(offset, 4))) + if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4))) return -EINVAL; return emulate_pci_bar_write(vgpu, offset, p_data, bytes); case INTEL_GVT_PCI_SWSCI: - if (WARN_ON(!IS_ALIGNED(offset, 4))) + if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4))) return -EINVAL; ret = intel_vgpu_emulate_opregion_request(vgpu, *(u32 *)p_data); if (ret) @@ -332,7 +337,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, break; case INTEL_GVT_PCI_OPREGION: - if (WARN_ON(!IS_ALIGNED(offset, 4))) + if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4))) return -EINVAL; ret = intel_vgpu_opregion_base_write_handler(vgpu, *(u32 *)p_data); @@ -391,9 +396,9 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size = - pci_resource_len(gvt->dev_priv->drm.pdev, 0); + pci_resource_len(gvt->gt->i915->drm.pdev, 0); vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size = - pci_resource_len(gvt->dev_priv->drm.pdev, 2); + pci_resource_len(gvt->gt->i915->drm.pdev, 2); memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4); } diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 21a176cd8acc..a3cc080a46c6 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -164,6 +164,7 @@ struct decode_info { #define OP_STATE_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x01) #define OP_STATE_SIP OP_3D_MEDIA(0x0, 0x1, 0x02) #define OP_3D_MEDIA_0_1_4 OP_3D_MEDIA(0x0, 0x1, 0x04) +#define OP_SWTESS_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x03) #define OP_3DSTATE_VF_STATISTICS_GM45 OP_3D_MEDIA(0x1, 0x0, 0x0B) @@ -462,7 +463,7 @@ enum { struct parser_exec_state { struct intel_vgpu *vgpu; - int ring_id; + const struct intel_engine_cs *engine; int buf_type; @@ -635,39 +636,42 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { }, }; -static inline u32 get_opcode(u32 cmd, int ring_id) +static inline u32 get_opcode(u32 cmd, const struct intel_engine_cs *engine) { const struct decode_info *d_info; - d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)]; + d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)]; if (d_info == NULL) return INVALID_OP; return cmd >> (32 - d_info->op_len); } -static inline const struct cmd_info *find_cmd_entry(struct intel_gvt *gvt, - unsigned int opcode, int ring_id) +static inline const struct cmd_info * +find_cmd_entry(struct intel_gvt *gvt, unsigned int opcode, + const struct intel_engine_cs *engine) { struct cmd_entry *e; hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) { - if (opcode == e->info->opcode && e->info->rings & BIT(ring_id)) + if (opcode == e->info->opcode && + e->info->rings & engine->mask) return e->info; } return NULL; } -static inline const struct cmd_info *get_cmd_info(struct intel_gvt *gvt, - u32 cmd, int ring_id) +static inline const struct cmd_info * +get_cmd_info(struct intel_gvt *gvt, u32 cmd, + const struct intel_engine_cs *engine) { u32 opcode; - opcode = get_opcode(cmd, ring_id); + opcode = get_opcode(cmd, engine); if (opcode == INVALID_OP) return NULL; - return find_cmd_entry(gvt, opcode, ring_id); + return find_cmd_entry(gvt, opcode, engine); } static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low) @@ -675,12 +679,12 @@ static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low) return (cmd >> low) & ((1U << (hi - low + 1)) - 1); } -static inline void print_opcode(u32 cmd, int ring_id) +static inline void print_opcode(u32 cmd, const struct intel_engine_cs *engine) { const struct decode_info *d_info; int i; - d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)]; + d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)]; if (d_info == NULL) return; @@ -709,10 +713,11 @@ static void parser_exec_state_dump(struct parser_exec_state *s) int cnt = 0; int i; - gvt_dbg_cmd(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)" - " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id, - s->ring_id, s->ring_start, s->ring_start + s->ring_size, - s->ring_head, s->ring_tail); + gvt_dbg_cmd(" vgpu%d RING%s: ring_start(%08lx) ring_end(%08lx)" + " ring_head(%08lx) ring_tail(%08lx)\n", + s->vgpu->id, s->engine->name, + s->ring_start, s->ring_start + s->ring_size, + s->ring_head, s->ring_tail); gvt_dbg_cmd(" %s %s ip_gma(%08lx) ", s->buf_type == RING_BUFFER_INSTRUCTION ? @@ -729,7 +734,7 @@ static void parser_exec_state_dump(struct parser_exec_state *s) s->ip_va, cmd_val(s, 0), cmd_val(s, 1), cmd_val(s, 2), cmd_val(s, 3)); - print_opcode(cmd_val(s, 0), s->ring_id); + print_opcode(cmd_val(s, 0), s->engine); s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12); @@ -840,7 +845,6 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s, unsigned int data; u32 ring_base; u32 nopid; - struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; if (!strcmp(cmd, "lri")) data = cmd_val(s, index + 1); @@ -850,7 +854,7 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s, return -EINVAL; } - ring_base = dev_priv->engine[s->ring_id]->mmio_base; + ring_base = s->engine->mmio_base; nopid = i915_mmio_reg_offset(RING_NOPID(ring_base)); if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data) && @@ -926,9 +930,9 @@ static int cmd_reg_handler(struct parser_exec_state *s, * update reg values in it into vregs, so LRIs in workload with * inhibit context will restore with correct values */ - if (IS_GEN(gvt->dev_priv, 9) && - intel_gvt_mmio_is_in_ctx(gvt, offset) && - !strncmp(cmd, "lri", 3)) { + if (IS_GEN(s->engine->i915, 9) && + intel_gvt_mmio_is_in_ctx(gvt, offset) && + !strncmp(cmd, "lri", 3)) { intel_gvt_hypervisor_read_gpa(s->vgpu, s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4); /* check inhibit context */ @@ -964,23 +968,10 @@ static int cmd_handler_lri(struct parser_exec_state *s) { int i, ret = 0; int cmd_len = cmd_length(s); - struct intel_gvt *gvt = s->vgpu->gvt; - u32 valid_len = CMD_LEN(1); - - /* - * Official intel docs are somewhat sloppy , check the definition of - * MI_LOAD_REGISTER_IMM. - */ - #define MAX_VALID_LEN 127 - if ((cmd_len < valid_len) || (cmd_len > MAX_VALID_LEN)) { - gvt_err("len is not valid: len=%u valid_len=%u\n", - cmd_len, valid_len); - return -EFAULT; - } for (i = 1; i < cmd_len; i += 2) { - if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) { - if (s->ring_id == BCS0 && + if (IS_BROADWELL(s->engine->i915) && s->engine->id != RCS0) { + if (s->engine->id == BCS0 && cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR)) ret |= 0; else @@ -1001,9 +992,9 @@ static int cmd_handler_lrr(struct parser_exec_state *s) int cmd_len = cmd_length(s); for (i = 1; i < cmd_len; i += 2) { - if (IS_BROADWELL(s->vgpu->gvt->dev_priv)) + if (IS_BROADWELL(s->engine->i915)) ret |= ((cmd_reg_inhibit(s, i) || - (cmd_reg_inhibit(s, i + 1)))) ? + (cmd_reg_inhibit(s, i + 1)))) ? -EBADRQC : 0; if (ret) break; @@ -1029,7 +1020,7 @@ static int cmd_handler_lrm(struct parser_exec_state *s) int cmd_len = cmd_length(s); for (i = 1; i < cmd_len;) { - if (IS_BROADWELL(gvt->dev_priv)) + if (IS_BROADWELL(s->engine->i915)) ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0; if (ret) break; @@ -1141,7 +1132,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s) if (ret) return ret; if (index_mode) { - hws_pga = s->vgpu->hws_pga[s->ring_id]; + hws_pga = s->vgpu->hws_pga[s->engine->id]; gma = hws_pga + gma; patch_value(s, cmd_ptr(s, 2), gma); val = cmd_val(s, 1) & (~(1 << 21)); @@ -1155,15 +1146,15 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s) return ret; if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY) - set_bit(cmd_interrupt_events[s->ring_id].pipe_control_notify, - s->workload->pending_events); + set_bit(cmd_interrupt_events[s->engine->id].pipe_control_notify, + s->workload->pending_events); return 0; } static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s) { - set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt, - s->workload->pending_events); + set_bit(cmd_interrupt_events[s->engine->id].mi_user_interrupt, + s->workload->pending_events); patch_value(s, cmd_ptr(s, 0), MI_NOOP); return 0; } @@ -1213,7 +1204,7 @@ struct plane_code_mapping { static int gen8_decode_mi_display_flip(struct parser_exec_state *s, struct mi_display_flip_command_info *info) { - struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; + struct drm_i915_private *dev_priv = s->engine->i915; struct plane_code_mapping gen8_plane_code[] = { [0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE}, [1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE}, @@ -1230,7 +1221,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s, dword2 = cmd_val(s, 2); v = (dword0 & GENMASK(21, 19)) >> 19; - if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code))) + if (drm_WARN_ON(&dev_priv->drm, v >= ARRAY_SIZE(gen8_plane_code))) return -EBADRQC; info->pipe = gen8_plane_code[v].pipe; @@ -1250,7 +1241,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s, info->stride_reg = SPRSTRIDE(info->pipe); info->surf_reg = SPRSURF(info->pipe); } else { - WARN_ON(1); + drm_WARN_ON(&dev_priv->drm, 1); return -EBADRQC; } return 0; @@ -1259,7 +1250,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s, static int skl_decode_mi_display_flip(struct parser_exec_state *s, struct mi_display_flip_command_info *info) { - struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; + struct drm_i915_private *dev_priv = s->engine->i915; struct intel_vgpu *vgpu = s->vgpu; u32 dword0 = cmd_val(s, 0); u32 dword1 = cmd_val(s, 1); @@ -1318,13 +1309,12 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s, static int gen8_check_mi_display_flip(struct parser_exec_state *s, struct mi_display_flip_command_info *info) { - struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; u32 stride, tile; if (!info->async_flip) return 0; - if (INTEL_GEN(dev_priv) >= 9) { + if (INTEL_GEN(s->engine->i915) >= 9) { stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0); tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & GENMASK(12, 10)) >> 10; @@ -1347,7 +1337,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip( struct parser_exec_state *s, struct mi_display_flip_command_info *info) { - struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; + struct drm_i915_private *dev_priv = s->engine->i915; struct intel_vgpu *vgpu = s->vgpu; set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12), @@ -1378,11 +1368,9 @@ static int gen8_update_plane_mmio_from_mi_display_flip( static int decode_mi_display_flip(struct parser_exec_state *s, struct mi_display_flip_command_info *info) { - struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; - - if (IS_BROADWELL(dev_priv)) + if (IS_BROADWELL(s->engine->i915)) return gen8_decode_mi_display_flip(s, info); - if (INTEL_GEN(dev_priv) >= 9) + if (INTEL_GEN(s->engine->i915) >= 9) return skl_decode_mi_display_flip(s, info); return -ENODEV; @@ -1667,7 +1655,7 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s) if (ret) return ret; if (index_mode) { - hws_pga = s->vgpu->hws_pga[s->ring_id]; + hws_pga = s->vgpu->hws_pga[s->engine->id]; gma = hws_pga + gma; patch_value(s, cmd_ptr(s, 1), gma); val = cmd_val(s, 0) & (~(1 << 21)); @@ -1676,8 +1664,8 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s) } /* Check notify bit */ if ((cmd_val(s, 0) & (1 << 8))) - set_bit(cmd_interrupt_events[s->ring_id].mi_flush_dw, - s->workload->pending_events); + set_bit(cmd_interrupt_events[s->engine->id].mi_flush_dw, + s->workload->pending_events); return ret; } @@ -1725,12 +1713,18 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm, static int batch_buffer_needs_scan(struct parser_exec_state *s) { /* Decide privilege based on address space */ - if (cmd_val(s, 0) & (1 << 8) && - !(s->vgpu->scan_nonprivbb & (1 << s->ring_id))) + if (cmd_val(s, 0) & BIT(8) && + !(s->vgpu->scan_nonprivbb & s->engine->mask)) return 0; + return 1; } +static const char *repr_addr_type(unsigned int type) +{ + return type == PPGTT_BUFFER ? "ppgtt" : "ggtt"; +} + static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size, unsigned long *bb_end_cmd_offset) @@ -1753,24 +1747,24 @@ static int find_bb_size(struct parser_exec_state *s, return -EFAULT; cmd = cmd_val(s, 0); - info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); + info = get_cmd_info(s->vgpu->gvt, cmd, s->engine); if (info == NULL) { - gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n", - cmd, get_opcode(cmd, s->ring_id), - (s->buf_addr_type == PPGTT_BUFFER) ? - "ppgtt" : "ggtt", s->ring_id, s->workload); + gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n", + cmd, get_opcode(cmd, s->engine), + repr_addr_type(s->buf_addr_type), + s->engine->name, s->workload); return -EBADRQC; } do { if (copy_gma_to_hva(s->vgpu, mm, - gma, gma + 4, &cmd) < 0) + gma, gma + 4, &cmd) < 0) return -EFAULT; - info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); + info = get_cmd_info(s->vgpu->gvt, cmd, s->engine); if (info == NULL) { - gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n", - cmd, get_opcode(cmd, s->ring_id), - (s->buf_addr_type == PPGTT_BUFFER) ? - "ppgtt" : "ggtt", s->ring_id, s->workload); + gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n", + cmd, get_opcode(cmd, s->engine), + repr_addr_type(s->buf_addr_type), + s->engine->name, s->workload); return -EBADRQC; } @@ -1799,12 +1793,12 @@ static int audit_bb_end(struct parser_exec_state *s, void *va) u32 cmd = *(u32 *)va; const struct cmd_info *info; - info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); + info = get_cmd_info(s->vgpu->gvt, cmd, s->engine); if (info == NULL) { - gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n", - cmd, get_opcode(cmd, s->ring_id), - (s->buf_addr_type == PPGTT_BUFFER) ? - "ppgtt" : "ggtt", s->ring_id, s->workload); + gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n", + cmd, get_opcode(cmd, s->engine), + repr_addr_type(s->buf_addr_type), + s->engine->name, s->workload); return -EBADRQC; } @@ -1857,7 +1851,7 @@ static int perform_bb_shadow(struct parser_exec_state *s) if (bb->ppgtt) start_offset = gma & ~I915_GTT_PAGE_MASK; - bb->obj = i915_gem_object_create_shmem(s->vgpu->gvt->dev_priv, + bb->obj = i915_gem_object_create_shmem(s->engine->i915, round_up(bb_size + start_offset, PAGE_SIZE)); if (IS_ERR(bb->obj)) { @@ -2480,6 +2474,9 @@ static const struct cmd_info cmd_info[] = { {"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4, F_LEN_VAR, R_RCS, D_ALL, ADDR_FIX_1(1), 8, NULL}, + {"OP_SWTESS_BASE_ADDRESS", OP_SWTESS_BASE_ADDRESS, + F_LEN_VAR, R_RCS, D_ALL, ADDR_FIX_2(1, 2), 3, NULL}, + {"3DSTATE_VS", OP_3DSTATE_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_SF", OP_3DSTATE_SF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, @@ -2666,25 +2663,25 @@ static int cmd_parser_exec(struct parser_exec_state *s) if (cmd == MI_NOOP) info = &cmd_info[mi_noop_index]; else - info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); + info = get_cmd_info(s->vgpu->gvt, cmd, s->engine); if (info == NULL) { - gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n", - cmd, get_opcode(cmd, s->ring_id), - (s->buf_addr_type == PPGTT_BUFFER) ? - "ppgtt" : "ggtt", s->ring_id, s->workload); + gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n", + cmd, get_opcode(cmd, s->engine), + repr_addr_type(s->buf_addr_type), + s->engine->name, s->workload); return -EBADRQC; } s->info = info; - trace_gvt_command(vgpu->id, s->ring_id, s->ip_gma, s->ip_va, + trace_gvt_command(vgpu->id, s->engine->id, s->ip_gma, s->ip_va, cmd_length(s), s->buf_type, s->buf_addr_type, s->workload, info->name); if ((info->flag & F_LEN_MASK) == F_LEN_VAR_FIXED) { ret = gvt_check_valid_cmd_length(cmd_length(s), - info->valid_len); + info->valid_len); if (ret) return ret; } @@ -2781,7 +2778,7 @@ static int scan_workload(struct intel_vgpu_workload *workload) s.buf_type = RING_BUFFER_INSTRUCTION; s.buf_addr_type = GTT_BUFFER; s.vgpu = workload->vgpu; - s.ring_id = workload->ring_id; + s.engine = workload->engine; s.ring_start = workload->rb_start; s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl); s.ring_head = gma_head; @@ -2790,8 +2787,7 @@ static int scan_workload(struct intel_vgpu_workload *workload) s.workload = workload; s.is_ctx_wa = false; - if ((bypass_scan_mask & (1 << workload->ring_id)) || - gma_head == gma_tail) + if (bypass_scan_mask & workload->engine->mask || gma_head == gma_tail) return 0; ret = ip_gma_set(&s, gma_head); @@ -2830,7 +2826,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) s.buf_type = RING_BUFFER_INSTRUCTION; s.buf_addr_type = GTT_BUFFER; s.vgpu = workload->vgpu; - s.ring_id = workload->ring_id; + s.engine = workload->engine; s.ring_start = wa_ctx->indirect_ctx.guest_gma; s.ring_size = ring_size; s.ring_head = gma_head; @@ -2855,7 +2851,6 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) struct intel_vgpu_submission *s = &vgpu->submission; unsigned long gma_head, gma_tail, gma_top, guest_rb_size; void *shadow_ring_buffer_va; - int ring_id = workload->ring_id; int ret; guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl); @@ -2868,21 +2863,21 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) gma_tail = workload->rb_start + workload->rb_tail; gma_top = workload->rb_start + guest_rb_size; - if (workload->rb_len > s->ring_scan_buffer_size[ring_id]) { + if (workload->rb_len > s->ring_scan_buffer_size[workload->engine->id]) { void *p; /* realloc the new ring buffer if needed */ - p = krealloc(s->ring_scan_buffer[ring_id], workload->rb_len, - GFP_KERNEL); + p = krealloc(s->ring_scan_buffer[workload->engine->id], + workload->rb_len, GFP_KERNEL); if (!p) { gvt_vgpu_err("fail to re-alloc ring scan buffer\n"); return -ENOMEM; } - s->ring_scan_buffer[ring_id] = p; - s->ring_scan_buffer_size[ring_id] = workload->rb_len; + s->ring_scan_buffer[workload->engine->id] = p; + s->ring_scan_buffer_size[workload->engine->id] = workload->rb_len; } - shadow_ring_buffer_va = s->ring_scan_buffer[ring_id]; + shadow_ring_buffer_va = s->ring_scan_buffer[workload->engine->id]; /* get shadow ring buffer va */ workload->shadow_ring_buffer_va = shadow_ring_buffer_va; @@ -2940,7 +2935,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) int ret = 0; void *map; - obj = i915_gem_object_create_shmem(workload->vgpu->gvt->dev_priv, + obj = i915_gem_object_create_shmem(workload->engine->i915, roundup(ctx_size + CACHELINE_BYTES, PAGE_SIZE)); if (IS_ERR(obj)) @@ -3029,30 +3024,14 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) return 0; } -static const struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt, - unsigned int opcode, unsigned long rings) -{ - const struct cmd_info *info = NULL; - unsigned int ring; - - for_each_set_bit(ring, &rings, I915_NUM_ENGINES) { - info = find_cmd_entry(gvt, opcode, ring); - if (info) - break; - } - return info; -} - static int init_cmd_table(struct intel_gvt *gvt) { + unsigned int gen_type = intel_gvt_get_device_type(gvt); int i; - struct cmd_entry *e; - const struct cmd_info *info; - unsigned int gen_type; - - gen_type = intel_gvt_get_device_type(gvt); for (i = 0; i < ARRAY_SIZE(cmd_info); i++) { + struct cmd_entry *e; + if (!(cmd_info[i].devices & gen_type)) continue; @@ -3061,23 +3040,16 @@ static int init_cmd_table(struct intel_gvt *gvt) return -ENOMEM; e->info = &cmd_info[i]; - info = find_cmd_entry_any_ring(gvt, - e->info->opcode, e->info->rings); - if (info) { - gvt_err("%s %s duplicated\n", e->info->name, - info->name); - kfree(e); - return -EEXIST; - } if (cmd_info[i].opcode == OP_MI_NOOP) mi_noop_index = i; INIT_HLIST_NODE(&e->hlist); add_cmd_entry(gvt, e); gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n", - e->info->name, e->info->opcode, e->info->flag, - e->info->devices, e->info->rings); + e->info->name, e->info->opcode, e->info->flag, + e->info->devices, e->info->rings); } + return 0; } diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c index 285f6011a537..ec47d4114554 100644 --- a/drivers/gpu/drm/i915/gvt/debugfs.c +++ b/drivers/gpu/drm/i915/gvt/debugfs.c @@ -58,12 +58,11 @@ static int mmio_offset_compare(void *priv, static inline int mmio_diff_handler(struct intel_gvt *gvt, u32 offset, void *data) { - struct drm_i915_private *i915 = gvt->dev_priv; struct mmio_diff_param *param = data; struct diff_mmio *node; u32 preg, vreg; - preg = intel_uncore_read_notrace(&i915->uncore, _MMIO(offset)); + preg = intel_uncore_read_notrace(gvt->gt->uncore, _MMIO(offset)); vreg = vgpu_vreg(param->vgpu, offset); if (preg != vreg) { @@ -98,10 +97,10 @@ static int vgpu_mmio_diff_show(struct seq_file *s, void *unused) mutex_lock(&gvt->lock); spin_lock_bh(&gvt->scheduler.mmio_context_lock); - mmio_hw_access_pre(gvt->dev_priv); + mmio_hw_access_pre(gvt->gt); /* Recognize all the diff mmios to list. */ intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, ¶m); - mmio_hw_access_post(gvt->dev_priv); + mmio_hw_access_post(gvt->gt); spin_unlock_bh(&gvt->scheduler.mmio_context_lock); mutex_unlock(&gvt->lock); @@ -128,6 +127,7 @@ static int vgpu_scan_nonprivbb_get(void *data, u64 *val) { struct intel_vgpu *vgpu = (struct intel_vgpu *)data; + *val = vgpu->scan_nonprivbb; return 0; } @@ -142,42 +142,7 @@ static int vgpu_scan_nonprivbb_set(void *data, u64 val) { struct intel_vgpu *vgpu = (struct intel_vgpu *)data; - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - enum intel_engine_id id; - char buf[128], *s; - int len; - - val &= (1 << I915_NUM_ENGINES) - 1; - - if (vgpu->scan_nonprivbb == val) - return 0; - - if (!val) - goto done; - - len = sprintf(buf, - "gvt: vgpu %d turns on non-privileged batch buffers scanning on Engines:", - vgpu->id); - - s = buf + len; - - for (id = 0; id < I915_NUM_ENGINES; id++) { - struct intel_engine_cs *engine; - - engine = dev_priv->engine[id]; - if (engine && (val & (1 << id))) { - len = snprintf(s, 4, "%d, ", engine->id); - s += len; - } else - val &= ~(1 << id); - } - - if (val) - sprintf(s, "low performance expected."); - - pr_warn("%s\n", buf); -done: vgpu->scan_nonprivbb = val; return 0; } @@ -220,7 +185,7 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu) */ void intel_gvt_debugfs_init(struct intel_gvt *gvt) { - struct drm_minor *minor = gvt->dev_priv->drm.primary; + struct drm_minor *minor = gvt->gt->i915->drm.primary; gvt->debugfs_root = debugfs_create_dir("gvt", minor->debugfs_root); diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index a62bdf9be682..a1696e9ce4b6 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -57,7 +57,7 @@ static int get_edp_pipe(struct intel_vgpu *vgpu) static int edp_pipe_is_enabled(struct intel_vgpu *vgpu) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; if (!(vgpu_vreg_t(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE)) return 0; @@ -69,9 +69,10 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu) int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; - if (WARN_ON(pipe < PIPE_A || pipe >= I915_MAX_PIPES)) + if (drm_WARN_ON(&dev_priv->drm, + pipe < PIPE_A || pipe >= I915_MAX_PIPES)) return -EINVAL; if (vgpu_vreg_t(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE) @@ -168,7 +169,7 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = { static void emulate_monitor_status_change(struct intel_vgpu *vgpu) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; int pipe; if (IS_BROXTON(dev_priv)) { @@ -207,20 +208,47 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) SKL_FUSE_PG_DIST_STATUS(SKL_PG0) | SKL_FUSE_PG_DIST_STATUS(SKL_PG1) | SKL_FUSE_PG_DIST_STATUS(SKL_PG2); - vgpu_vreg_t(vgpu, LCPLL1_CTL) |= - LCPLL_PLL_ENABLE | - LCPLL_PLL_LOCK; - vgpu_vreg_t(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE; - + /* + * Only 1 PIPE enabled in current vGPU display and PIPE_A is + * tied to TRANSCODER_A in HW, so it's safe to assume PIPE_A, + * TRANSCODER_A can be enabled. PORT_x depends on the input of + * setup_virtual_dp_monitor, we can bind DPLL0 to any PORT_x + * so we fixed to DPLL0 here. + * Setup DPLL0: DP link clk 1620 MHz, non SSC, DP Mode + */ + vgpu_vreg_t(vgpu, DPLL_CTRL1) = + DPLL_CTRL1_OVERRIDE(DPLL_ID_SKL_DPLL0); + vgpu_vreg_t(vgpu, DPLL_CTRL1) |= + DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, DPLL_ID_SKL_DPLL0); + vgpu_vreg_t(vgpu, LCPLL1_CTL) = + LCPLL_PLL_ENABLE | LCPLL_PLL_LOCK; + vgpu_vreg_t(vgpu, DPLL_STATUS) = DPLL_LOCK(DPLL_ID_SKL_DPLL0); + /* + * Golden M/N are calculated based on: + * 24 bpp, 4 lanes, 154000 pixel clk (from virtual EDID), + * DP link clk 1620 MHz and non-constant_n. + * TODO: calculate DP link symbol clk and stream clk m/n. + */ + vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = 63 << TU_SIZE_SHIFT; + vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) |= 0x5b425e; + vgpu_vreg_t(vgpu, PIPE_DATA_N1(TRANSCODER_A)) = 0x800000; + vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A)) = 0x3cd6e; + vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)) = 0x80000; } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { + vgpu_vreg_t(vgpu, DPLL_CTRL2) &= + ~DPLL_CTRL2_DDI_CLK_OFF(PORT_B); + vgpu_vreg_t(vgpu, DPLL_CTRL2) |= + DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_B); + vgpu_vreg_t(vgpu, DPLL_CTRL2) |= + DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_B); vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED; vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= - (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI | + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (PORT_B << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); if (IS_BROADWELL(dev_priv)) { @@ -235,12 +263,18 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { + vgpu_vreg_t(vgpu, DPLL_CTRL2) &= + ~DPLL_CTRL2_DDI_CLK_OFF(PORT_C); + vgpu_vreg_t(vgpu, DPLL_CTRL2) |= + DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_C); + vgpu_vreg_t(vgpu, DPLL_CTRL2) |= + DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_C); vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT; vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= - (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI | + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (PORT_C << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); if (IS_BROADWELL(dev_priv)) { @@ -255,12 +289,18 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) { + vgpu_vreg_t(vgpu, DPLL_CTRL2) &= + ~DPLL_CTRL2_DDI_CLK_OFF(PORT_D); + vgpu_vreg_t(vgpu, DPLL_CTRL2) |= + DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_D); + vgpu_vreg_t(vgpu, DPLL_CTRL2) |= + DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_D); vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT; vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= - (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI | + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (PORT_D << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); if (IS_BROADWELL(dev_priv)) { @@ -319,9 +359,10 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, int type, unsigned int resolution) { + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); - if (WARN_ON(resolution >= GVT_EDID_NUM)) + if (drm_WARN_ON(&i915->drm, resolution >= GVT_EDID_NUM)) return -EINVAL; port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL); @@ -389,7 +430,7 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt) static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; struct intel_vgpu_irq *irq = &vgpu->irq; int vblank_event[] = { [PIPE_A] = PIPE_A_VBLANK, @@ -421,7 +462,7 @@ static void emulate_vblank(struct intel_vgpu *vgpu) int pipe; mutex_lock(&vgpu->vgpu_lock); - for_each_pipe(vgpu->gvt->dev_priv, pipe) + for_each_pipe(vgpu->gvt->gt->i915, pipe) emulate_vblank_on_pipe(vgpu, pipe); mutex_unlock(&vgpu->vgpu_lock); } @@ -454,11 +495,11 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt) */ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; /* TODO: add more platforms support */ - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || - IS_COFFEELAKE(dev_priv)) { + if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || + IS_COFFEELAKE(i915)) { if (connected) { vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; @@ -484,7 +525,7 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected) */ void intel_vgpu_clean_display(struct intel_vgpu *vgpu) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) @@ -506,7 +547,7 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu) */ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; intel_vgpu_init_i2c_edid(vgpu); diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index ae139f0877ae..37fc460414a8 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -67,11 +67,11 @@ static int vgpu_gem_get_pages( u32 page_num; fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info; - if (WARN_ON(!fb_info)) + if (drm_WARN_ON(&dev_priv->drm, !fb_info)) return -ENODEV; vgpu = fb_info->obj->vgpu; - if (WARN_ON(!vgpu)) + if (drm_WARN_ON(&dev_priv->drm, !vgpu)) return -ENODEV; st = kmalloc(sizeof(*st), GFP_KERNEL); @@ -417,7 +417,7 @@ static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf, int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args) { - struct drm_device *dev = &vgpu->gvt->dev_priv->drm; + struct drm_device *dev = &vgpu->gvt->gt->i915->drm; struct vfio_device_gfx_plane_info *gfx_plane_info = args; struct intel_vgpu_dmabuf_obj *dmabuf_obj; struct intel_vgpu_fb_info fb_info; @@ -523,7 +523,7 @@ out: /* To associate an exposed dmabuf with the dmabuf_obj */ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id) { - struct drm_device *dev = &vgpu->gvt->dev_priv->drm; + struct drm_device *dev = &vgpu->gvt->gt->i915->drm; struct intel_vgpu_dmabuf_obj *dmabuf_obj; struct drm_i915_gem_object *obj; struct dma_buf *dmabuf; diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c index 1fe6124918f1..190651df5db1 100644 --- a/drivers/gpu/drm/i915/gvt/edid.c +++ b/drivers/gpu/drm/i915/gvt/edid.c @@ -135,7 +135,7 @@ static void reset_gmbus_controller(struct intel_vgpu *vgpu) static int gmbus0_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; int port, pin_select; memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes); @@ -147,13 +147,13 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu, if (pin_select == 0) return 0; - if (IS_BROXTON(dev_priv)) + if (IS_BROXTON(i915)) port = bxt_get_port_from_gmbus0(pin_select); - else if (IS_COFFEELAKE(dev_priv)) + else if (IS_COFFEELAKE(i915)) port = cnp_get_port_from_gmbus0(pin_select); else port = get_port_from_gmbus0(pin_select); - if (WARN_ON(port < 0)) + if (drm_WARN_ON(&i915->drm, port < 0)) return 0; vgpu->display.i2c_edid.state = I2C_GMBUS; @@ -276,7 +276,9 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, static int gmbus3_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - WARN_ON(1); + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; + + drm_WARN_ON(&i915->drm, 1); return 0; } @@ -371,7 +373,9 @@ static int gmbus2_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - if (WARN_ON(bytes > 8 && (offset & (bytes - 1)))) + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; + + if (drm_WARN_ON(&i915->drm, bytes > 8 && (offset & (bytes - 1)))) return -EINVAL; if (offset == i915_mmio_reg_offset(PCH_GMBUS2)) @@ -399,7 +403,9 @@ int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu, int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - if (WARN_ON(bytes > 8 && (offset & (bytes - 1)))) + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; + + if (drm_WARN_ON(&i915->drm, bytes > 8 && (offset & (bytes - 1)))) return -EINVAL; if (offset == i915_mmio_reg_offset(PCH_GMBUS0)) @@ -473,6 +479,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data) { + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid; int msg_length, ret_msg_size; int msg, addr, ctrl, op; @@ -532,9 +539,9 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, * support the gfx driver to do EDID access. */ } else { - if (WARN_ON((op & 0x1) != GVT_AUX_I2C_READ)) + if (drm_WARN_ON(&i915->drm, (op & 0x1) != GVT_AUX_I2C_READ)) return; - if (WARN_ON(msg_length != 4)) + if (drm_WARN_ON(&i915->drm, msg_length != 4)) return; if (i2c_edid->edid_available && i2c_edid->slave_selected) { unsigned char val = edid_get_byte(vgpu); diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index d6e7a1189bad..dd25c3024370 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -39,8 +39,7 @@ #define _EL_OFFSET_STATUS_BUF 0x370 #define _EL_OFFSET_STATUS_PTR 0x3A0 -#define execlist_ring_mmio(gvt, ring_id, offset) \ - (gvt->dev_priv->engine[ring_id]->mmio_base + (offset)) +#define execlist_ring_mmio(e, offset) ((e)->mmio_base + (offset)) #define valid_context(ctx) ((ctx)->valid) #define same_context(a, b) (((a)->context_id == (b)->context_id) && \ @@ -54,12 +53,12 @@ static int context_switch_events[] = { [VECS0] = VECS_AS_CONTEXT_SWITCH, }; -static int ring_id_to_context_switch_event(unsigned int ring_id) +static int to_context_switch_event(const struct intel_engine_cs *engine) { - if (WARN_ON(ring_id >= ARRAY_SIZE(context_switch_events))) + if (WARN_ON(engine->id >= ARRAY_SIZE(context_switch_events))) return -EINVAL; - return context_switch_events[ring_id]; + return context_switch_events[engine->id]; } static void switch_virtual_execlist_slot(struct intel_vgpu_execlist *execlist) @@ -93,9 +92,8 @@ static void emulate_execlist_status(struct intel_vgpu_execlist *execlist) struct execlist_ctx_descriptor_format *desc = execlist->running_context; struct intel_vgpu *vgpu = execlist->vgpu; struct execlist_status_format status; - int ring_id = execlist->ring_id; - u32 status_reg = execlist_ring_mmio(vgpu->gvt, - ring_id, _EL_OFFSET_STATUS); + u32 status_reg = + execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS); status.ldw = vgpu_vreg(vgpu, status_reg); status.udw = vgpu_vreg(vgpu, status_reg + 4); @@ -124,21 +122,19 @@ static void emulate_execlist_status(struct intel_vgpu_execlist *execlist) } static void emulate_csb_update(struct intel_vgpu_execlist *execlist, - struct execlist_context_status_format *status, - bool trigger_interrupt_later) + struct execlist_context_status_format *status, + bool trigger_interrupt_later) { struct intel_vgpu *vgpu = execlist->vgpu; - int ring_id = execlist->ring_id; struct execlist_context_status_pointer_format ctx_status_ptr; u32 write_pointer; u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset; unsigned long hwsp_gpa; - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id, - _EL_OFFSET_STATUS_PTR); - ctx_status_buf_reg = execlist_ring_mmio(vgpu->gvt, ring_id, - _EL_OFFSET_STATUS_BUF); + ctx_status_ptr_reg = + execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS_PTR); + ctx_status_buf_reg = + execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS_BUF); ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg); @@ -161,26 +157,24 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist, /* Update the CSB and CSB write pointer in HWSP */ hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, - vgpu->hws_pga[ring_id]); + vgpu->hws_pga[execlist->engine->id]); if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) { intel_gvt_hypervisor_write_gpa(vgpu, - hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + - write_pointer * 8, - status, 8); + hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8, + status, 8); intel_gvt_hypervisor_write_gpa(vgpu, - hwsp_gpa + - intel_hws_csb_write_index(dev_priv) * 4, - &write_pointer, 4); + hwsp_gpa + intel_hws_csb_write_index(execlist->engine->i915) * 4, + &write_pointer, 4); } gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n", - vgpu->id, write_pointer, offset, status->ldw, status->udw); + vgpu->id, write_pointer, offset, status->ldw, status->udw); if (trigger_interrupt_later) return; intel_vgpu_trigger_virtual_event(vgpu, - ring_id_to_context_switch_event(execlist->ring_id)); + to_context_switch_event(execlist->engine)); } static int emulate_execlist_ctx_schedule_out( @@ -261,9 +255,8 @@ static struct intel_vgpu_execlist_slot *get_next_execlist_slot( struct intel_vgpu_execlist *execlist) { struct intel_vgpu *vgpu = execlist->vgpu; - int ring_id = execlist->ring_id; - u32 status_reg = execlist_ring_mmio(vgpu->gvt, ring_id, - _EL_OFFSET_STATUS); + u32 status_reg = + execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS); struct execlist_status_format status; status.ldw = vgpu_vreg(vgpu, status_reg); @@ -379,7 +372,6 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload) struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu_submission *s = &vgpu->submission; struct execlist_ctx_descriptor_format ctx[2]; - int ring_id = workload->ring_id; int ret; if (!workload->emulate_schedule_in) @@ -388,7 +380,8 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload) ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0); ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1); - ret = emulate_execlist_schedule_in(&s->execlist[ring_id], ctx); + ret = emulate_execlist_schedule_in(&s->execlist[workload->engine->id], + ctx); if (ret) { gvt_vgpu_err("fail to emulate execlist schedule in\n"); return ret; @@ -399,21 +392,21 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload) static int complete_execlist_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; - int ring_id = workload->ring_id; struct intel_vgpu_submission *s = &vgpu->submission; - struct intel_vgpu_execlist *execlist = &s->execlist[ring_id]; + struct intel_vgpu_execlist *execlist = + &s->execlist[workload->engine->id]; struct intel_vgpu_workload *next_workload; - struct list_head *next = workload_q_head(vgpu, ring_id)->next; + struct list_head *next = workload_q_head(vgpu, workload->engine)->next; bool lite_restore = false; int ret = 0; - gvt_dbg_el("complete workload %p status %d\n", workload, - workload->status); + gvt_dbg_el("complete workload %p status %d\n", + workload, workload->status); - if (workload->status || (vgpu->resetting_eng & BIT(ring_id))) + if (workload->status || vgpu->resetting_eng & workload->engine->mask) goto out; - if (!list_empty(workload_q_head(vgpu, ring_id))) { + if (!list_empty(workload_q_head(vgpu, workload->engine))) { struct execlist_ctx_descriptor_format *this_desc, *next_desc; next_workload = container_of(next, @@ -436,14 +429,15 @@ out: return ret; } -static int submit_context(struct intel_vgpu *vgpu, int ring_id, - struct execlist_ctx_descriptor_format *desc, - bool emulate_schedule_in) +static int submit_context(struct intel_vgpu *vgpu, + const struct intel_engine_cs *engine, + struct execlist_ctx_descriptor_format *desc, + bool emulate_schedule_in) { struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_workload *workload = NULL; - workload = intel_vgpu_create_workload(vgpu, ring_id, desc); + workload = intel_vgpu_create_workload(vgpu, engine, desc); if (IS_ERR(workload)) return PTR_ERR(workload); @@ -452,19 +446,20 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id, workload->emulate_schedule_in = emulate_schedule_in; if (emulate_schedule_in) - workload->elsp_dwords = s->execlist[ring_id].elsp_dwords; + workload->elsp_dwords = s->execlist[engine->id].elsp_dwords; gvt_dbg_el("workload %p emulate schedule_in %d\n", workload, - emulate_schedule_in); + emulate_schedule_in); intel_vgpu_queue_workload(workload); return 0; } -int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) +int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, + const struct intel_engine_cs *engine) { struct intel_vgpu_submission *s = &vgpu->submission; - struct intel_vgpu_execlist *execlist = &s->execlist[ring_id]; + struct intel_vgpu_execlist *execlist = &s->execlist[engine->id]; struct execlist_ctx_descriptor_format *desc[2]; int i, ret; @@ -489,7 +484,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) for (i = 0; i < ARRAY_SIZE(desc); i++) { if (!desc[i]->valid) continue; - ret = submit_context(vgpu, ring_id, desc[i], i == 0); + ret = submit_context(vgpu, engine, desc[i], i == 0); if (ret) { gvt_vgpu_err("failed to submit desc %d\n", i); return ret; @@ -504,22 +499,22 @@ inv_desc: return -EINVAL; } -static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) +static void init_vgpu_execlist(struct intel_vgpu *vgpu, + const struct intel_engine_cs *engine) { struct intel_vgpu_submission *s = &vgpu->submission; - struct intel_vgpu_execlist *execlist = &s->execlist[ring_id]; + struct intel_vgpu_execlist *execlist = &s->execlist[engine->id]; struct execlist_context_status_pointer_format ctx_status_ptr; u32 ctx_status_ptr_reg; memset(execlist, 0, sizeof(*execlist)); execlist->vgpu = vgpu; - execlist->ring_id = ring_id; + execlist->engine = engine; execlist->slot[0].index = 0; execlist->slot[1].index = 1; - ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id, - _EL_OFFSET_STATUS_PTR); + ctx_status_ptr_reg = execlist_ring_mmio(engine, _EL_OFFSET_STATUS_PTR); ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg); ctx_status_ptr.read_ptr = 0; ctx_status_ptr.write_ptr = 0x7; @@ -529,7 +524,7 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) static void clean_execlist(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; struct intel_engine_cs *engine; struct intel_vgpu_submission *s = &vgpu->submission; intel_engine_mask_t tmp; @@ -544,12 +539,12 @@ static void clean_execlist(struct intel_vgpu *vgpu, static void reset_execlist(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; struct intel_engine_cs *engine; intel_engine_mask_t tmp; for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) - init_vgpu_execlist(vgpu, engine->id); + init_vgpu_execlist(vgpu, engine); } static int init_execlist(struct intel_vgpu *vgpu, diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h index 5c0c1fd30c83..d62cd14605a3 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.h +++ b/drivers/gpu/drm/i915/gvt/execlist.h @@ -170,16 +170,17 @@ struct intel_vgpu_execlist { struct intel_vgpu_execlist_slot *running_slot; struct intel_vgpu_execlist_slot *pending_slot; struct execlist_ctx_descriptor_format *running_context; - int ring_id; struct intel_vgpu *vgpu; struct intel_vgpu_elsp_dwords elsp_dwords; + const struct intel_engine_cs *engine; }; void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu); int intel_vgpu_init_execlist(struct intel_vgpu *vgpu); -int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id); +int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, + const struct intel_engine_cs *engine); void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 8bb292b01271..0889ad8291b0 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -146,7 +146,7 @@ static int skl_format_to_drm(int format, bool rgb_order, bool alpha, static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe, u32 tiled, int stride_mask, int bpp) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask; u32 stride = stride_reg; @@ -202,8 +202,8 @@ static int get_active_pipe(struct intel_vgpu *vgpu) int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, struct intel_vgpu_primary_plane_format *plane) { + struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; u32 val, fmt; - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; int pipe; pipe = get_active_pipe(vgpu); @@ -332,9 +332,9 @@ static int cursor_mode_to_drm(int mode) int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, struct intel_vgpu_cursor_plane_format *plane) { + struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; u32 val, mode, index; u32 alpha_plane, alpha_force; - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; int pipe; pipe = get_active_pipe(vgpu); diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c index b0c1fda32977..990a181094e3 100644 --- a/drivers/gpu/drm/i915/gvt/firmware.c +++ b/drivers/gpu/drm/i915/gvt/firmware.c @@ -68,9 +68,7 @@ static struct bin_attribute firmware_attr = { static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data) { - struct drm_i915_private *i915 = gvt->dev_priv; - - *(u32 *)(data + offset) = intel_uncore_read_notrace(&i915->uncore, + *(u32 *)(data + offset) = intel_uncore_read_notrace(gvt->gt->uncore, _MMIO(offset)); return 0; } @@ -78,7 +76,7 @@ static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data) static int expose_firmware_sysfs(struct intel_gvt *gvt) { struct intel_gvt_device_info *info = &gvt->device_info; - struct pci_dev *pdev = gvt->dev_priv->drm.pdev; + struct pci_dev *pdev = gvt->gt->i915->drm.pdev; struct gvt_firmware_header *h; void *firmware; void *p; @@ -129,7 +127,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt) static void clean_firmware_sysfs(struct intel_gvt *gvt) { - struct pci_dev *pdev = gvt->dev_priv->drm.pdev; + struct pci_dev *pdev = gvt->gt->i915->drm.pdev; device_remove_bin_file(&pdev->dev, &firmware_attr); vfree(firmware_attr.private); @@ -153,8 +151,7 @@ static int verify_firmware(struct intel_gvt *gvt, const struct firmware *fw) { struct intel_gvt_device_info *info = &gvt->device_info; - struct drm_i915_private *dev_priv = gvt->dev_priv; - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = gvt->gt->i915->drm.pdev; struct gvt_firmware_header *h; unsigned long id, crc32_start; const void *mem; @@ -208,8 +205,7 @@ invalid_firmware: int intel_gvt_load_firmware(struct intel_gvt *gvt) { struct intel_gvt_device_info *info = &gvt->device_info; - struct drm_i915_private *dev_priv = gvt->dev_priv; - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = gvt->gt->i915->drm.pdev; struct intel_gvt_firmware *firmware = &gvt->firmware; struct gvt_firmware_header *h; const struct firmware *fw; @@ -244,7 +240,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt) gvt_dbg_core("request hw state firmware %s...\n", path); - ret = request_firmware(&fw, path, &dev_priv->drm.pdev->dev); + ret = request_firmware(&fw, path, &gvt->gt->i915->drm.pdev->dev); kfree(path); if (ret) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 4a4828074cb7..2a4b23f8aa74 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -71,8 +71,10 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size) /* translate a guest gmadr to host gmadr */ int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr) { - if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr), - "invalid guest gmadr %llx\n", g_addr)) + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; + + if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr), + "invalid guest gmadr %llx\n", g_addr)) return -EACCES; if (vgpu_gmadr_is_aperture(vgpu, g_addr)) @@ -87,8 +89,10 @@ int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr) /* translate a host gmadr to guest gmadr */ int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr) { - if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr), - "invalid host gmadr %llx\n", h_addr)) + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; + + if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr), + "invalid host gmadr %llx\n", h_addr)) return -EACCES; if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr)) @@ -275,24 +279,23 @@ static inline int get_pse_type(int type) return gtt_type_table[type].pse_entry_type; } -static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index) +static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index) { - void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; + void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index; return readq(addr); } -static void ggtt_invalidate(struct drm_i915_private *dev_priv) +static void ggtt_invalidate(struct intel_gt *gt) { - mmio_hw_access_pre(dev_priv); - I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); - mmio_hw_access_post(dev_priv); + mmio_hw_access_pre(gt); + intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); + mmio_hw_access_post(gt); } -static void write_pte64(struct drm_i915_private *dev_priv, - unsigned long index, u64 pte) +static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte) { - void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; + void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index; writeq(pte, addr); } @@ -315,7 +318,7 @@ static inline int gtt_get_entry64(void *pt, if (WARN_ON(ret)) return ret; } else if (!pt) { - e->val64 = read_pte64(vgpu->gvt->dev_priv, index); + e->val64 = read_pte64(vgpu->gvt->gt->ggtt, index); } else { e->val64 = *((u64 *)pt + index); } @@ -340,7 +343,7 @@ static inline int gtt_set_entry64(void *pt, if (WARN_ON(ret)) return ret; } else if (!pt) { - write_pte64(vgpu->gvt->dev_priv, index, e->val64); + write_pte64(vgpu->gvt->gt->ggtt, index, e->val64); } else { *((u64 *)pt + index) = e->val64; } @@ -734,7 +737,7 @@ static int detach_oos_page(struct intel_vgpu *vgpu, static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt) { - struct device *kdev = &spt->vgpu->gvt->dev_priv->drm.pdev->dev; + struct device *kdev = &spt->vgpu->gvt->gt->i915->drm.pdev->dev; trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type); @@ -819,7 +822,7 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt); static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt( struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type) { - struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; + struct device *kdev = &vgpu->gvt->gt->i915->drm.pdev->dev; struct intel_vgpu_ppgtt_spt *spt = NULL; dma_addr_t daddr; int ret; @@ -940,6 +943,7 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt); static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *e) { + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *s; enum intel_gvt_gtt_type cur_pt_type; @@ -952,7 +956,9 @@ static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu, if (!gtt_type_is_pt(cur_pt_type) || !gtt_type_is_pt(cur_pt_type + 1)) { - WARN(1, "Invalid page table type, cur_pt_type is: %d\n", cur_pt_type); + drm_WARN(&i915->drm, 1, + "Invalid page table type, cur_pt_type is: %d\n", + cur_pt_type); return -EINVAL; } @@ -1044,7 +1050,7 @@ fail: static bool vgpu_ips_enabled(struct intel_vgpu *vgpu) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) { u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) & @@ -1153,7 +1159,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu, struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; unsigned long pfn; - if (!HAS_PAGE_SIZES(vgpu->gvt->dev_priv, I915_GTT_PAGE_SIZE_2M)) + if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M)) return 0; pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry)); @@ -2314,7 +2320,7 @@ out: ggtt_invalidate_pte(vgpu, &e); ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index); - ggtt_invalidate(gvt->dev_priv); + ggtt_invalidate(gvt->gt); return 0; } @@ -2347,16 +2353,18 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, static int alloc_scratch_pages(struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type) { + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct intel_vgpu_gtt *gtt = &vgpu->gtt; struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; int page_entry_num = I915_GTT_PAGE_SIZE >> vgpu->gvt->device_info.gtt_entry_size_shift; void *scratch_pt; int i; - struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; + struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev; dma_addr_t daddr; - if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) + if (drm_WARN_ON(&i915->drm, + type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) return -EINVAL; scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); @@ -2410,7 +2418,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, static int release_scratch_page_tree(struct intel_vgpu *vgpu) { int i; - struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; + struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev; dma_addr_t daddr; for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { @@ -2682,7 +2690,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) { int ret; void *page; - struct device *dev = &gvt->dev_priv->drm.pdev->dev; + struct device *dev = &gvt->gt->i915->drm.pdev->dev; dma_addr_t daddr; gvt_dbg_core("init gtt\n"); @@ -2731,7 +2739,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) */ void intel_gvt_clean_gtt(struct intel_gvt *gvt) { - struct device *dev = &gvt->dev_priv->drm.pdev->dev; + struct device *dev = &gvt->gt->i915->drm.pdev->dev; dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn << I915_GTT_PAGE_SHIFT); @@ -2779,7 +2787,6 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu) void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old) { struct intel_gvt *gvt = vgpu->gvt; - struct drm_i915_private *dev_priv = gvt->dev_priv; struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE}; struct intel_gvt_gtt_entry old_entry; @@ -2809,7 +2816,7 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old) ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); } - ggtt_invalidate(dev_priv); + ggtt_invalidate(gvt->gt); } /** diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 8f37eefa0a02..9e1787867894 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -35,6 +35,7 @@ #include <linux/kthread.h> #include "i915_drv.h" +#include "intel_gvt.h" #include "gvt.h" #include <linux/vfio.h> #include <linux/mdev.h> @@ -49,15 +50,15 @@ static const char * const supported_hypervisors[] = { static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt, const char *name) { + const char *driver_name = + dev_driver_string(&gvt->gt->i915->drm.pdev->dev); int i; - struct intel_vgpu_type *t; - const char *driver_name = dev_driver_string( - &gvt->dev_priv->drm.pdev->dev); + name += strlen(driver_name) + 1; for (i = 0; i < gvt->num_types; i++) { - t = &gvt->types[i]; - if (!strncmp(t->name, name + strlen(driver_name) + 1, - sizeof(t->name))) + struct intel_vgpu_type *t = &gvt->types[i]; + + if (!strncmp(t->name, name, sizeof(t->name))) return t; } @@ -120,10 +121,8 @@ static struct attribute_group *gvt_vgpu_type_groups[] = { [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL, }; -static bool intel_get_gvt_attrs(struct attribute ***type_attrs, - struct attribute_group ***intel_vgpu_type_groups) +static bool intel_get_gvt_attrs(struct attribute_group ***intel_vgpu_type_groups) { - *type_attrs = gvt_type_attrs; *intel_vgpu_type_groups = gvt_vgpu_type_groups; return true; } @@ -191,7 +190,7 @@ static const struct intel_gvt_ops intel_gvt_ops = { static void init_device_info(struct intel_gvt *gvt) { struct intel_gvt_device_info *info = &gvt->device_info; - struct pci_dev *pdev = gvt->dev_priv->drm.pdev; + struct pci_dev *pdev = gvt->gt->i915->drm.pdev; info->max_support_vgpus = 8; info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE; @@ -257,17 +256,17 @@ static int init_service_thread(struct intel_gvt *gvt) /** * intel_gvt_clean_device - clean a GVT device - * @dev_priv: i915 private + * @i915: i915 private * * This function is called at the driver unloading stage, to free the * resources owned by a GVT device. * */ -void intel_gvt_clean_device(struct drm_i915_private *dev_priv) +void intel_gvt_clean_device(struct drm_i915_private *i915) { - struct intel_gvt *gvt = to_gvt(dev_priv); + struct intel_gvt *gvt = fetch_and_zero(&i915->gvt); - if (WARN_ON(!gvt)) + if (drm_WARN_ON(&i915->drm, !gvt)) return; intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); @@ -285,13 +284,12 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) intel_gvt_clean_mmio_info(gvt); idr_destroy(&gvt->vgpu_idr); - kfree(dev_priv->gvt); - dev_priv->gvt = NULL; + kfree(i915->gvt); } /** * intel_gvt_init_device - initialize a GVT device - * @dev_priv: drm i915 private data + * @i915: drm i915 private data * * This function is called at the initialization stage, to initialize * necessary GVT components. @@ -300,13 +298,13 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) * Zero on success, negative error code if failed. * */ -int intel_gvt_init_device(struct drm_i915_private *dev_priv) +int intel_gvt_init_device(struct drm_i915_private *i915) { struct intel_gvt *gvt; struct intel_vgpu *vgpu; int ret; - if (WARN_ON(dev_priv->gvt)) + if (drm_WARN_ON(&i915->drm, i915->gvt)) return -EEXIST; gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL); @@ -319,7 +317,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) spin_lock_init(&gvt->scheduler.mmio_context_lock); mutex_init(&gvt->lock); mutex_init(&gvt->sched_lock); - gvt->dev_priv = dev_priv; + gvt->gt = &i915->gt; + i915->gvt = gvt; init_device_info(gvt); @@ -378,8 +377,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) intel_gvt_debugfs_init(gvt); gvt_dbg_core("gvt device initialization is done\n"); - dev_priv->gvt = gvt; - intel_gvt_host.dev = &dev_priv->drm.pdev->dev; + intel_gvt_host.dev = &i915->drm.pdev->dev; intel_gvt_host.initialized = true; return 0; @@ -404,6 +402,7 @@ out_clean_mmio_info: out_clean_idr: idr_destroy(&gvt->vgpu_idr); kfree(gvt); + i915->gvt = NULL; return ret; } diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index b47c6acaf9c0..58c2c7932e3f 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -196,41 +196,21 @@ struct intel_vgpu { struct dentry *debugfs; -#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) - struct { - struct mdev_device *mdev; - struct vfio_region *region; - int num_regions; - struct eventfd_ctx *intx_trigger; - struct eventfd_ctx *msi_trigger; - - /* - * Two caches are used to avoid mapping duplicated pages (eg. - * scratch pages). This help to reduce dma setup overhead. - */ - struct rb_root gfn_cache; - struct rb_root dma_addr_cache; - unsigned long nr_cache_entries; - struct mutex cache_lock; - - struct notifier_block iommu_notifier; - struct notifier_block group_notifier; - struct kvm *kvm; - struct work_struct release_work; - atomic_t released; - struct vfio_device *vfio_device; - } vdev; -#endif + /* Hypervisor-specific device state. */ + void *vdev; struct list_head dmabuf_obj_list_head; struct mutex dmabuf_lock; struct idr object_idr; - struct completion vblank_done; - u32 scan_nonprivbb; }; +static inline void *intel_vgpu_vdev(struct intel_vgpu *vgpu) +{ + return vgpu->vdev; +} + /* validating GM healthy status*/ #define vgpu_is_vm_unhealthy(ret_val) \ (((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT)) @@ -306,7 +286,7 @@ struct intel_gvt { /* scheduler scope lock, protect gvt and vgpu schedule related data */ struct mutex sched_lock; - struct drm_i915_private *dev_priv; + struct intel_gt *gt; struct idr vgpu_idr; /* vGPU IDR pool */ struct intel_gvt_device_info device_info; @@ -376,14 +356,15 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt); #define HOST_HIGH_GM_SIZE MB_TO_BYTES(384) #define HOST_FENCE 4 +#define gvt_to_ggtt(gvt) ((gvt)->gt->ggtt) + /* Aperture/GM space definitions for GVT device */ -#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end) -#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start) +#define gvt_aperture_sz(gvt) gvt_to_ggtt(gvt)->mappable_end +#define gvt_aperture_pa_base(gvt) gvt_to_ggtt(gvt)->gmadr.start -#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.vm.total) -#define gvt_ggtt_sz(gvt) \ - ((gvt->dev_priv->ggtt.vm.total >> PAGE_SHIFT) << 3) -#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt)) +#define gvt_ggtt_gm_sz(gvt) gvt_to_ggtt(gvt)->vm.total +#define gvt_ggtt_sz(gvt) (gvt_to_ggtt(gvt)->vm.total >> PAGE_SHIFT << 3) +#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt)) #define gvt_aperture_gmadr_base(gvt) (0) #define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \ @@ -394,7 +375,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt); #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \ + gvt_hidden_sz(gvt) - 1) -#define gvt_fence_sz(gvt) ((gvt)->dev_priv->ggtt.num_fences) +#define gvt_fence_sz(gvt) (gvt_to_ggtt(gvt)->num_fences) /* Aperture/GM space definitions for vGPU */ #define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start) @@ -570,8 +551,7 @@ struct intel_gvt_ops { void (*vgpu_deactivate)(struct intel_vgpu *); struct intel_vgpu_type *(*gvt_find_vgpu_type)(struct intel_gvt *gvt, const char *name); - bool (*get_gvt_attrs)(struct attribute ***type_attrs, - struct attribute_group ***intel_vgpu_type_groups); + bool (*get_gvt_attrs)(struct attribute_group ***intel_vgpu_type_groups); int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *); int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int); int (*write_protect_handler)(struct intel_vgpu *, u64, void *, @@ -586,14 +566,14 @@ enum { GVT_FAILSAFE_GUEST_ERR, }; -static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv) +static inline void mmio_hw_access_pre(struct intel_gt *gt) { - intel_runtime_pm_get(&dev_priv->runtime_pm); + intel_runtime_pm_get(gt->uncore->rpm); } -static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv) +static inline void mmio_hw_access_post(struct intel_gt *gt) { - intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); + intel_runtime_pm_put_unchecked(gt->uncore->rpm); } /** diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 6d28d72e6c7e..2faf50e1b051 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -49,15 +49,17 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt) { - if (IS_BROADWELL(gvt->dev_priv)) + struct drm_i915_private *i915 = gvt->gt->i915; + + if (IS_BROADWELL(i915)) return D_BDW; - else if (IS_SKYLAKE(gvt->dev_priv)) + else if (IS_SKYLAKE(i915)) return D_SKL; - else if (IS_KABYLAKE(gvt->dev_priv)) + else if (IS_KABYLAKE(i915)) return D_KBL; - else if (IS_BROXTON(gvt->dev_priv)) + else if (IS_BROXTON(i915)) return D_BXT; - else if (IS_COFFEELAKE(gvt->dev_priv)) + else if (IS_COFFEELAKE(i915)) return D_CFL; return 0; @@ -142,25 +144,25 @@ static int new_mmio_info(struct intel_gvt *gvt, } /** - * intel_gvt_render_mmio_to_ring_id - convert a mmio offset into ring id + * intel_gvt_render_mmio_to_engine - convert a mmio offset into the engine * @gvt: a GVT device * @offset: register offset * * Returns: - * Ring ID on success, negative error code if failed. + * The engine containing the offset within its mmio page. */ -int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt, - unsigned int offset) +const struct intel_engine_cs * +intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int offset) { - enum intel_engine_id id; struct intel_engine_cs *engine; + enum intel_engine_id id; offset &= ~GENMASK(11, 0); - for_each_engine(engine, gvt->dev_priv, id) { + for_each_engine(engine, gvt->gt, id) if (engine->mmio_base == offset) - return id; - } - return -ENODEV; + return engine; + + return NULL; } #define offset_to_fence_num(offset) \ @@ -217,7 +219,7 @@ static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu, { u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD; - if (INTEL_GEN(vgpu->gvt->dev_priv) <= 10) { + if (INTEL_GEN(vgpu->gvt->gt->i915) <= 10) { if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD) gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id); else if (!ips) @@ -253,7 +255,7 @@ static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off, static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off, void *p_data, unsigned int bytes) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct intel_gvt *gvt = vgpu->gvt; unsigned int fence_num = offset_to_fence_num(off); int ret; @@ -262,10 +264,10 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off, return ret; write_vreg(vgpu, off, p_data, bytes); - mmio_hw_access_pre(dev_priv); + mmio_hw_access_pre(gvt->gt); intel_vgpu_write_fence(vgpu, fence_num, vgpu_vreg64(vgpu, fence_num_to_offset(fence_num))); - mmio_hw_access_post(dev_priv); + mmio_hw_access_post(gvt->gt); return 0; } @@ -283,7 +285,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, old = vgpu_vreg(vgpu, offset); new = CALC_MODE_MASK_REG(old, *(u32 *)p_data); - if (INTEL_GEN(vgpu->gvt->dev_priv) >= 9) { + if (INTEL_GEN(vgpu->gvt->gt->i915) >= 9) { switch (offset) { case FORCEWAKE_RENDER_GEN9_REG: ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG; @@ -345,7 +347,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id); vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET; } - engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask; + engine_mask &= INTEL_INFO(vgpu->gvt->gt->i915)->engine_mask; } /* vgpu_lock already hold by emulate mmio r/w */ @@ -460,11 +462,14 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, return 0; } -/* ascendingly sorted */ +/* sorted in ascending order */ static i915_reg_t force_nonpriv_white_list[] = { + _MMIO(0xd80), GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec) GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248) - PS_INVOCATION_COUNT,//_MMIO(0x2348) + CL_PRIMITIVES_COUNT, //_MMIO(0x2340) + PS_INVOCATION_COUNT, //_MMIO(0x2348) + PS_DEPTH_COUNT, //_MMIO(0x2350) GEN8_CS_CHICKEN1,//_MMIO(0x2580) _MMIO(0x2690), _MMIO(0x2694), @@ -489,10 +494,11 @@ static i915_reg_t force_nonpriv_white_list[] = { _MMIO(0xe18c), _MMIO(0xe48c), _MMIO(0xe5f4), + _MMIO(0x64844), }; /* a simple bsearch */ -static inline bool in_whitelist(unsigned int reg) +static inline bool in_whitelist(u32 reg) { int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list); i915_reg_t *array = force_nonpriv_white_list; @@ -514,26 +520,21 @@ static int force_nonpriv_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 reg_nonpriv = (*(u32 *)p_data) & REG_GENMASK(25, 2); - int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); - u32 ring_base; - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - int ret = -EINVAL; - - if ((bytes != 4) || ((offset & (bytes - 1)) != 0) || ring_id < 0) { - gvt_err("vgpu(%d) ring %d Invalid FORCE_NONPRIV offset %x(%dB)\n", - vgpu->id, ring_id, offset, bytes); - return ret; - } + const struct intel_engine_cs *engine = + intel_gvt_render_mmio_to_engine(vgpu->gvt, offset); - ring_base = dev_priv->engine[ring_id]->mmio_base; + if (bytes != 4 || !IS_ALIGNED(offset, bytes) || !engine) { + gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n", + vgpu->id, offset, bytes); + return -EINVAL; + } - if (in_whitelist(reg_nonpriv) || - reg_nonpriv == i915_mmio_reg_offset(RING_NOPID(ring_base))) { - ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data, - bytes); - } else + if (!in_whitelist(reg_nonpriv) && + reg_nonpriv != i915_mmio_reg_offset(RING_NOPID(engine->mmio_base))) { gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n", - vgpu->id, *(u32 *)p_data, offset); + vgpu->id, reg_nonpriv, offset); + } else + intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes); return 0; } @@ -756,7 +757,7 @@ static int south_chicken2_mmio_write(struct intel_vgpu *vgpu, static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; u32 pipe = DSPSURF_TO_PIPE(offset); int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY); @@ -797,7 +798,7 @@ static int reg50080_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; enum pipe pipe = REG_50080_TO_PIPE(offset); enum plane_id plane = REG_50080_TO_PLANE(offset); int event = SKL_FLIP_EVENT(pipe, plane); @@ -821,7 +822,7 @@ static int reg50080_mmio_write(struct intel_vgpu *vgpu, static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu, unsigned int reg) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; enum intel_gvt_event_type event; if (reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_A))) @@ -836,7 +837,7 @@ static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu, reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_D))) event = AUX_CHANNEL_D; else { - WARN_ON(true); + drm_WARN_ON(&dev_priv->drm, true); return -EINVAL; } @@ -924,11 +925,11 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, write_vreg(vgpu, offset, p_data, bytes); data = vgpu_vreg(vgpu, offset); - if ((INTEL_GEN(vgpu->gvt->dev_priv) >= 9) + if ((INTEL_GEN(vgpu->gvt->gt->i915) >= 9) && offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) { /* SKL DPB/C/D aux ctl register changed */ return 0; - } else if (IS_BROADWELL(vgpu->gvt->dev_priv) && + } else if (IS_BROADWELL(vgpu->gvt->gt->i915) && offset != _REG_HSW_DP_AUX_CH_CTL(port_index)) { /* write to the data registers */ return 0; @@ -1244,8 +1245,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; + struct kobject *kobj = &vgpu->gvt->gt->i915->drm.primary->kdev->kobj; char *env[3] = {NULL, NULL, NULL}; char vmid_str[20]; char display_ready_str[20]; @@ -1306,13 +1306,15 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, static int pf_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; u32 val = *(u32 *)p_data; if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL || offset == _PS_1B_CTRL || offset == _PS_2B_CTRL || offset == _PS_1C_CTRL) && (val & PS_PLANE_SEL_MASK) != 0) { - WARN_ONCE(true, "VM(%d): guest is trying to scaling a plane\n", - vgpu->id); + drm_WARN_ONCE(&i915->drm, true, + "VM(%d): guest is trying to scaling a plane\n", + vgpu->id); return 0; } @@ -1360,13 +1362,15 @@ static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu, static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; u32 mode; write_vreg(vgpu, offset, p_data, bytes); mode = vgpu_vreg(vgpu, offset); if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) { - WARN_ONCE(1, "VM(%d): iGVT-g doesn't support GuC\n", + drm_WARN_ONCE(&i915->drm, 1, + "VM(%d): iGVT-g doesn't support GuC\n", vgpu->id); return 0; } @@ -1377,10 +1381,12 @@ static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset, static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; u32 trtte = *(u32 *)p_data; if ((trtte & 1) && (trtte & (1 << 1)) == 0) { - WARN(1, "VM(%d): Use physical address for TRTT!\n", + drm_WARN(&i915->drm, 1, + "VM(%d): Use physical address for TRTT!\n", vgpu->id); return -EINVAL; } @@ -1427,9 +1433,9 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, switch (cmd) { case GEN9_PCODE_READ_MEM_LATENCY: - if (IS_SKYLAKE(vgpu->gvt->dev_priv) - || IS_KABYLAKE(vgpu->gvt->dev_priv) - || IS_COFFEELAKE(vgpu->gvt->dev_priv)) { + if (IS_SKYLAKE(vgpu->gvt->gt->i915) || + IS_KABYLAKE(vgpu->gvt->gt->i915) || + IS_COFFEELAKE(vgpu->gvt->gt->i915)) { /** * "Read memory latency" command on gen9. * Below memory latency values are read @@ -1439,7 +1445,7 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, *data0 = 0x1e1a1100; else *data0 = 0x61514b3d; - } else if (IS_BROXTON(vgpu->gvt->dev_priv)) { + } else if (IS_BROXTON(vgpu->gvt->gt->i915)) { /** * "Read memory latency" command on gen9. * Below memory latency values are read @@ -1452,9 +1458,9 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, } break; case SKL_PCODE_CDCLK_CONTROL: - if (IS_SKYLAKE(vgpu->gvt->dev_priv) - || IS_KABYLAKE(vgpu->gvt->dev_priv) - || IS_COFFEELAKE(vgpu->gvt->dev_priv)) + if (IS_SKYLAKE(vgpu->gvt->gt->i915) || + IS_KABYLAKE(vgpu->gvt->gt->i915) || + IS_COFFEELAKE(vgpu->gvt->gt->i915)) *data0 = SKL_CDCLK_READY_FOR_CHANGE; break; case GEN6_PCODE_READ_RC6VIDS: @@ -1478,24 +1484,26 @@ static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 value = *(u32 *)p_data; - int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); + const struct intel_engine_cs *engine = + intel_gvt_render_mmio_to_engine(vgpu->gvt, offset); if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) { gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n", offset, value); return -EINVAL; } + /* * Need to emulate all the HWSP register write to ensure host can * update the VM CSB status correctly. Here listed registers can * support BDW, SKL or other platforms with same HWSP registers. */ - if (unlikely(ring_id < 0 || ring_id >= I915_NUM_ENGINES)) { + if (unlikely(!engine)) { gvt_vgpu_err("access unknown hardware status page register:0x%x\n", offset); return -EINVAL; } - vgpu->hws_pga[ring_id] = value; + vgpu->hws_pga[engine->id] = value; gvt_dbg_mmio("VM(%d) write: 0x%x to HWSP: 0x%x\n", vgpu->id, value, offset); @@ -1507,7 +1515,7 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu, { u32 v = *(u32 *)p_data; - if (IS_BROXTON(vgpu->gvt->dev_priv)) + if (IS_BROXTON(vgpu->gvt->gt->i915)) v &= (1 << 31) | (1 << 29); else v &= (1 << 31) | (1 << 29) | (1 << 9) | @@ -1654,26 +1662,24 @@ static int mmio_read_from_hw(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { struct intel_gvt *gvt = vgpu->gvt; - struct drm_i915_private *dev_priv = gvt->dev_priv; - int ring_id; - u32 ring_base; + const struct intel_engine_cs *engine = + intel_gvt_render_mmio_to_engine(gvt, offset); - ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset); /** * Read HW reg in following case * a. the offset isn't a ring mmio * b. the offset's ring is running on hw. * c. the offset is ring time stamp mmio */ - if (ring_id >= 0) - ring_base = dev_priv->engine[ring_id]->mmio_base; - - if (ring_id < 0 || vgpu == gvt->scheduler.engine_owner[ring_id] || - offset == i915_mmio_reg_offset(RING_TIMESTAMP(ring_base)) || - offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(ring_base))) { - mmio_hw_access_pre(dev_priv); - vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset)); - mmio_hw_access_post(dev_priv); + + if (!engine || + vgpu == gvt->scheduler.engine_owner[engine->id] || + offset == i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base)) || + offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(engine->mmio_base))) { + mmio_hw_access_pre(gvt->gt); + vgpu_vreg(vgpu, offset) = + intel_uncore_read(gvt->gt->uncore, _MMIO(offset)); + mmio_hw_access_post(gvt->gt); } return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); @@ -1682,22 +1688,23 @@ static int mmio_read_from_hw(struct intel_vgpu *vgpu, static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; + const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(vgpu->gvt, offset); struct intel_vgpu_execlist *execlist; u32 data = *(u32 *)p_data; int ret = 0; - if (WARN_ON(ring_id < 0 || ring_id >= I915_NUM_ENGINES)) + if (drm_WARN_ON(&i915->drm, !engine)) return -EINVAL; - execlist = &vgpu->submission.execlist[ring_id]; + execlist = &vgpu->submission.execlist[engine->id]; execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data; if (execlist->elsp_dwords.index == 3) { - ret = intel_vgpu_submit_execlist(vgpu, ring_id); + ret = intel_vgpu_submit_execlist(vgpu, engine); if(ret) - gvt_vgpu_err("fail submit workload on ring %d\n", - ring_id); + gvt_vgpu_err("fail submit workload on ring %s\n", + engine->name); } ++execlist->elsp_dwords.index; @@ -1709,12 +1716,13 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 data = *(u32 *)p_data; - int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); + const struct intel_engine_cs *engine = + intel_gvt_render_mmio_to_engine(vgpu->gvt, offset); bool enable_execlist; int ret; (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1); - if (IS_COFFEELAKE(vgpu->gvt->dev_priv)) + if (IS_COFFEELAKE(vgpu->gvt->gt->i915)) (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2); write_vreg(vgpu, offset, p_data, bytes); @@ -1723,7 +1731,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, return 0; } - if (IS_COFFEELAKE(vgpu->gvt->dev_priv) && + if (IS_COFFEELAKE(vgpu->gvt->gt->i915) && data & _MASKED_BIT_ENABLE(2)) { enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST); return 0; @@ -1743,16 +1751,16 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) { enable_execlist = !!(data & GFX_RUN_LIST_ENABLE); - gvt_dbg_core("EXECLIST %s on ring %d\n", - (enable_execlist ? "enabling" : "disabling"), - ring_id); + gvt_dbg_core("EXECLIST %s on ring %s\n", + (enable_execlist ? "enabling" : "disabling"), + engine->name); if (!enable_execlist) return 0; ret = intel_vgpu_select_submission_ops(vgpu, - BIT(ring_id), - INTEL_VGPU_EXECLIST_SUBMISSION); + engine->mask, + INTEL_VGPU_EXECLIST_SUBMISSION); if (ret) return ret; @@ -1876,7 +1884,7 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu, static int init_generic_mmio_info(struct intel_gvt *gvt) { - struct drm_i915_private *dev_priv = gvt->dev_priv; + struct drm_i915_private *dev_priv = gvt->gt->i915; int ret; MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL, @@ -2415,9 +2423,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_F(_MMIO(0x7144c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL); MMIO_F(_MMIO(0x7244c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL); - MMIO_D(PIPE_WM_LINETIME(PIPE_A), D_ALL); - MMIO_D(PIPE_WM_LINETIME(PIPE_B), D_ALL); - MMIO_D(PIPE_WM_LINETIME(PIPE_C), D_ALL); + MMIO_D(WM_LINETIME(PIPE_A), D_ALL); + MMIO_D(WM_LINETIME(PIPE_B), D_ALL); + MMIO_D(WM_LINETIME(PIPE_C), D_ALL); MMIO_D(SPLL_CTL, D_ALL); MMIO_D(_MMIO(_WRPLL_CTL1), D_ALL); MMIO_D(_MMIO(_WRPLL_CTL2), D_ALL); @@ -2693,7 +2701,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) static int init_bdw_mmio_info(struct intel_gvt *gvt) { - struct drm_i915_private *dev_priv = gvt->dev_priv; + struct drm_i915_private *dev_priv = gvt->gt->i915; int ret; MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); @@ -2882,7 +2890,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) static int init_skl_mmio_info(struct intel_gvt *gvt) { - struct drm_i915_private *dev_priv = gvt->dev_priv; + struct drm_i915_private *dev_priv = gvt->gt->i915; int ret; MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write); @@ -2902,7 +2910,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS); MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write); - MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write); + MMIO_DH(DBUF_CTL_S(0), D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write); MMIO_D(GEN9_PG_ENABLE, D_SKL_PLUS); MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); @@ -3131,7 +3139,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) static int init_bxt_mmio_info(struct intel_gvt *gvt) { - struct drm_i915_private *dev_priv = gvt->dev_priv; + struct drm_i915_private *dev_priv = gvt->gt->i915; int ret; MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL); @@ -3367,7 +3375,7 @@ static struct gvt_mmio_block mmio_blocks[] = { int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) { struct intel_gvt_device_info *info = &gvt->device_info; - struct drm_i915_private *dev_priv = gvt->dev_priv; + struct drm_i915_private *i915 = gvt->gt->i915; int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute); int ret; @@ -3379,20 +3387,20 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) if (ret) goto err; - if (IS_BROADWELL(dev_priv)) { + if (IS_BROADWELL(i915)) { ret = init_bdw_mmio_info(gvt); if (ret) goto err; - } else if (IS_SKYLAKE(dev_priv) - || IS_KABYLAKE(dev_priv) - || IS_COFFEELAKE(dev_priv)) { + } else if (IS_SKYLAKE(i915) || + IS_KABYLAKE(i915) || + IS_COFFEELAKE(i915)) { ret = init_bdw_mmio_info(gvt); if (ret) goto err; ret = init_skl_mmio_info(gvt); if (ret) goto err; - } else if (IS_BROXTON(dev_priv)) { + } else if (IS_BROXTON(i915)) { ret = init_bdw_mmio_info(gvt); if (ret) goto err; @@ -3541,13 +3549,14 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt, int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, void *pdata, unsigned int bytes, bool is_read) { + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_mmio_info *mmio_info; struct gvt_mmio_block *mmio_block; gvt_mmio_func func; int ret; - if (WARN_ON(bytes > 8)) + if (drm_WARN_ON(&i915->drm, bytes > 8)) return -EINVAL; /* diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index 11accd3e1023..540017fed908 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -245,6 +245,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu, unsigned int reg, void *p_data, unsigned int bytes) { struct intel_gvt *gvt = vgpu->gvt; + struct drm_i915_private *i915 = gvt->gt->i915; struct intel_gvt_irq_ops *ops = gvt->irq.ops; struct intel_gvt_irq_info *info; u32 ier = *(u32 *)p_data; @@ -255,7 +256,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu, vgpu_vreg(vgpu, reg) = ier; info = regbase_to_irq_info(gvt, ier_to_regbase(reg)); - if (WARN_ON(!info)) + if (drm_WARN_ON(&i915->drm, !info)) return -EINVAL; if (info->has_upstream_irq) @@ -282,6 +283,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu, int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg, void *p_data, unsigned int bytes) { + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct intel_gvt_irq_info *info = regbase_to_irq_info(vgpu->gvt, iir_to_regbase(reg)); u32 iir = *(u32 *)p_data; @@ -289,7 +291,7 @@ int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg, trace_write_ir(vgpu->id, "IIR", reg, iir, vgpu_vreg(vgpu, reg), (vgpu_vreg(vgpu, reg) ^ iir)); - if (WARN_ON(!info)) + if (drm_WARN_ON(&i915->drm, !info)) return -EINVAL; vgpu_vreg(vgpu, reg) &= ~iir; @@ -319,6 +321,7 @@ static struct intel_gvt_irq_map gen8_irq_map[] = { static void update_upstream_irq(struct intel_vgpu *vgpu, struct intel_gvt_irq_info *info) { + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct intel_gvt_irq *irq = &vgpu->gvt->irq; struct intel_gvt_irq_map *map = irq->irq_map; struct intel_gvt_irq_info *up_irq_info = NULL; @@ -340,7 +343,8 @@ static void update_upstream_irq(struct intel_vgpu *vgpu, if (!up_irq_info) up_irq_info = irq->info[map->up_irq_group]; else - WARN_ON(up_irq_info != irq->info[map->up_irq_group]); + drm_WARN_ON(&i915->drm, up_irq_info != + irq->info[map->up_irq_group]); bit = map->up_irq_bit; @@ -350,7 +354,7 @@ static void update_upstream_irq(struct intel_vgpu *vgpu, clear_bits |= (1 << bit); } - if (WARN_ON(!up_irq_info)) + if (drm_WARN_ON(&i915->drm, !up_irq_info)) return; if (up_irq_info->group == INTEL_GVT_IRQ_INFO_MASTER) { @@ -536,7 +540,7 @@ static void gen8_init_irq( SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1); - if (HAS_ENGINE(gvt->dev_priv, VCS1)) { + if (HAS_ENGINE(gvt->gt->i915, VCS1)) { SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW, @@ -568,7 +572,7 @@ static void gen8_init_irq( SET_BIT_INFO(irq, 22, DP_C_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH); SET_BIT_INFO(irq, 23, DP_D_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH); - if (IS_BROADWELL(gvt->dev_priv)) { + if (IS_BROADWELL(gvt->gt->i915)) { SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_PCH); SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_PCH); SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_PCH); @@ -581,7 +585,7 @@ static void gen8_init_irq( SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); - } else if (INTEL_GEN(gvt->dev_priv) >= 9) { + } else if (INTEL_GEN(gvt->gt->i915) >= 9) { SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT); @@ -618,13 +622,14 @@ static struct intel_gvt_irq_ops gen8_irq_ops = { void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu, enum intel_gvt_event_type event) { + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_irq *irq = &gvt->irq; gvt_event_virt_handler_t handler; struct intel_gvt_irq_ops *ops = gvt->irq.ops; handler = get_event_virt_handler(irq, event); - WARN_ON(!handler); + drm_WARN_ON(&i915->drm, !handler); handler(irq, event, vgpu); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 3259a1fa69e1..eee530453aa6 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -108,6 +108,37 @@ struct gvt_dma { struct kref ref; }; +struct kvmgt_vdev { + struct intel_vgpu *vgpu; + struct mdev_device *mdev; + struct vfio_region *region; + int num_regions; + struct eventfd_ctx *intx_trigger; + struct eventfd_ctx *msi_trigger; + + /* + * Two caches are used to avoid mapping duplicated pages (eg. + * scratch pages). This help to reduce dma setup overhead. + */ + struct rb_root gfn_cache; + struct rb_root dma_addr_cache; + unsigned long nr_cache_entries; + struct mutex cache_lock; + + struct notifier_block iommu_notifier; + struct notifier_block group_notifier; + struct kvm *kvm; + struct work_struct release_work; + atomic_t released; + struct vfio_device *vfio_device; + struct vfio_group *vfio_group; +}; + +static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu) +{ + return intel_vgpu_vdev(vgpu); +} + static inline bool handle_valid(unsigned long handle) { return !!(handle & ~0xff); @@ -120,6 +151,8 @@ static bool kvmgt_guest_exit(struct kvmgt_guest_info *info); static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size) { + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); int total_pages; int npage; int ret; @@ -129,8 +162,8 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, for (npage = 0; npage < total_pages; npage++) { unsigned long cur_gfn = gfn + npage; - ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1); - WARN_ON(ret != 1); + ret = vfio_group_unpin_pages(vdev->vfio_group, &cur_gfn, 1); + drm_WARN_ON(&i915->drm, ret != 1); } } @@ -138,6 +171,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, struct page **page) { + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); unsigned long base_pfn = 0; int total_pages; int npage; @@ -152,8 +186,8 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long cur_gfn = gfn + npage; unsigned long pfn; - ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1, - IOMMU_READ | IOMMU_WRITE, &pfn); + ret = vfio_group_pin_pages(vdev->vfio_group, &cur_gfn, 1, + IOMMU_READ | IOMMU_WRITE, &pfn); if (ret != 1) { gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n", cur_gfn, ret); @@ -187,7 +221,7 @@ err: static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, dma_addr_t *dma_addr, unsigned long size) { - struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; + struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev; struct page *page = NULL; int ret; @@ -210,7 +244,7 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn, dma_addr_t dma_addr, unsigned long size) { - struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; + struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev; dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); gvt_unpin_guest_page(vgpu, gfn, size); @@ -219,7 +253,7 @@ static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn, static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu, dma_addr_t dma_addr) { - struct rb_node *node = vgpu->vdev.dma_addr_cache.rb_node; + struct rb_node *node = kvmgt_vdev(vgpu)->dma_addr_cache.rb_node; struct gvt_dma *itr; while (node) { @@ -237,7 +271,7 @@ static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu, static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn) { - struct rb_node *node = vgpu->vdev.gfn_cache.rb_node; + struct rb_node *node = kvmgt_vdev(vgpu)->gfn_cache.rb_node; struct gvt_dma *itr; while (node) { @@ -258,6 +292,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, { struct gvt_dma *new, *itr; struct rb_node **link, *parent = NULL; + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL); if (!new) @@ -270,7 +305,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kref_init(&new->ref); /* gfn_cache maps gfn to struct gvt_dma. */ - link = &vgpu->vdev.gfn_cache.rb_node; + link = &vdev->gfn_cache.rb_node; while (*link) { parent = *link; itr = rb_entry(parent, struct gvt_dma, gfn_node); @@ -281,11 +316,11 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, link = &parent->rb_right; } rb_link_node(&new->gfn_node, parent, link); - rb_insert_color(&new->gfn_node, &vgpu->vdev.gfn_cache); + rb_insert_color(&new->gfn_node, &vdev->gfn_cache); /* dma_addr_cache maps dma addr to struct gvt_dma. */ parent = NULL; - link = &vgpu->vdev.dma_addr_cache.rb_node; + link = &vdev->dma_addr_cache.rb_node; while (*link) { parent = *link; itr = rb_entry(parent, struct gvt_dma, dma_addr_node); @@ -296,46 +331,51 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, link = &parent->rb_right; } rb_link_node(&new->dma_addr_node, parent, link); - rb_insert_color(&new->dma_addr_node, &vgpu->vdev.dma_addr_cache); + rb_insert_color(&new->dma_addr_node, &vdev->dma_addr_cache); - vgpu->vdev.nr_cache_entries++; + vdev->nr_cache_entries++; return 0; } static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu, struct gvt_dma *entry) { - rb_erase(&entry->gfn_node, &vgpu->vdev.gfn_cache); - rb_erase(&entry->dma_addr_node, &vgpu->vdev.dma_addr_cache); + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); + + rb_erase(&entry->gfn_node, &vdev->gfn_cache); + rb_erase(&entry->dma_addr_node, &vdev->dma_addr_cache); kfree(entry); - vgpu->vdev.nr_cache_entries--; + vdev->nr_cache_entries--; } static void gvt_cache_destroy(struct intel_vgpu *vgpu) { struct gvt_dma *dma; struct rb_node *node = NULL; + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); for (;;) { - mutex_lock(&vgpu->vdev.cache_lock); - node = rb_first(&vgpu->vdev.gfn_cache); + mutex_lock(&vdev->cache_lock); + node = rb_first(&vdev->gfn_cache); if (!node) { - mutex_unlock(&vgpu->vdev.cache_lock); + mutex_unlock(&vdev->cache_lock); break; } dma = rb_entry(node, struct gvt_dma, gfn_node); gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size); __gvt_cache_remove_entry(vgpu, dma); - mutex_unlock(&vgpu->vdev.cache_lock); + mutex_unlock(&vdev->cache_lock); } } static void gvt_cache_init(struct intel_vgpu *vgpu) { - vgpu->vdev.gfn_cache = RB_ROOT; - vgpu->vdev.dma_addr_cache = RB_ROOT; - vgpu->vdev.nr_cache_entries = 0; - mutex_init(&vgpu->vdev.cache_lock); + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); + + vdev->gfn_cache = RB_ROOT; + vdev->dma_addr_cache = RB_ROOT; + vdev->nr_cache_entries = 0; + mutex_init(&vdev->cache_lock); } static void kvmgt_protect_table_init(struct kvmgt_guest_info *info) @@ -409,16 +449,18 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info, static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf, size_t count, loff_t *ppos, bool iswrite) { + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS; - void *base = vgpu->vdev.region[i].data; + void *base = vdev->region[i].data; loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; - if (pos >= vgpu->vdev.region[i].size || iswrite) { + + if (pos >= vdev->region[i].size || iswrite) { gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n"); return -EINVAL; } - count = min(count, (size_t)(vgpu->vdev.region[i].size - pos)); + count = min(count, (size_t)(vdev->region[i].size - pos)); memcpy(buf, base + pos, count); return count; @@ -512,7 +554,7 @@ static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf, unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS; struct vfio_edid_region *region = - (struct vfio_edid_region *)vgpu->vdev.region[i].data; + (struct vfio_edid_region *)kvmgt_vdev(vgpu)->region[i].data; loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; if (pos < region->vfio_edid_regs.edid_offset) { @@ -544,32 +586,34 @@ static int intel_vgpu_register_reg(struct intel_vgpu *vgpu, const struct intel_vgpu_regops *ops, size_t size, u32 flags, void *data) { + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); struct vfio_region *region; - region = krealloc(vgpu->vdev.region, - (vgpu->vdev.num_regions + 1) * sizeof(*region), + region = krealloc(vdev->region, + (vdev->num_regions + 1) * sizeof(*region), GFP_KERNEL); if (!region) return -ENOMEM; - vgpu->vdev.region = region; - vgpu->vdev.region[vgpu->vdev.num_regions].type = type; - vgpu->vdev.region[vgpu->vdev.num_regions].subtype = subtype; - vgpu->vdev.region[vgpu->vdev.num_regions].ops = ops; - vgpu->vdev.region[vgpu->vdev.num_regions].size = size; - vgpu->vdev.region[vgpu->vdev.num_regions].flags = flags; - vgpu->vdev.region[vgpu->vdev.num_regions].data = data; - vgpu->vdev.num_regions++; + vdev->region = region; + vdev->region[vdev->num_regions].type = type; + vdev->region[vdev->num_regions].subtype = subtype; + vdev->region[vdev->num_regions].ops = ops; + vdev->region[vdev->num_regions].size = size; + vdev->region[vdev->num_regions].flags = flags; + vdev->region[vdev->num_regions].data = data; + vdev->num_regions++; return 0; } static int kvmgt_get_vfio_device(void *p_vgpu) { struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); - vgpu->vdev.vfio_device = vfio_device_get_from_dev( - mdev_dev(vgpu->vdev.mdev)); - if (!vgpu->vdev.vfio_device) { + vdev->vfio_device = vfio_device_get_from_dev( + mdev_dev(vdev->mdev)); + if (!vdev->vfio_device) { gvt_vgpu_err("failed to get vfio device\n"); return -ENODEV; } @@ -637,10 +681,12 @@ static int kvmgt_set_edid(void *p_vgpu, int port_num) static void kvmgt_put_vfio_device(void *vgpu) { - if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device)) + struct kvmgt_vdev *vdev = kvmgt_vdev((struct intel_vgpu *)vgpu); + + if (WARN_ON(!vdev->vfio_device)) return; - vfio_device_put(((struct intel_vgpu *)vgpu)->vdev.vfio_device); + vfio_device_put(vdev->vfio_device); } static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) @@ -669,9 +715,9 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) goto out; } - INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work); + INIT_WORK(&kvmgt_vdev(vgpu)->release_work, intel_vgpu_release_work); - vgpu->vdev.mdev = mdev; + kvmgt_vdev(vgpu)->mdev = mdev; mdev_set_drvdata(mdev, vgpu); gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", @@ -696,9 +742,10 @@ static int intel_vgpu_remove(struct mdev_device *mdev) static int intel_vgpu_iommu_notifier(struct notifier_block *nb, unsigned long action, void *data) { - struct intel_vgpu *vgpu = container_of(nb, - struct intel_vgpu, - vdev.iommu_notifier); + struct kvmgt_vdev *vdev = container_of(nb, + struct kvmgt_vdev, + iommu_notifier); + struct intel_vgpu *vgpu = vdev->vgpu; if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) { struct vfio_iommu_type1_dma_unmap *unmap = data; @@ -708,7 +755,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb, iov_pfn = unmap->iova >> PAGE_SHIFT; end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE; - mutex_lock(&vgpu->vdev.cache_lock); + mutex_lock(&vdev->cache_lock); for (; iov_pfn < end_iov_pfn; iov_pfn++) { entry = __gvt_cache_find_gfn(vgpu, iov_pfn); if (!entry) @@ -718,7 +765,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb, entry->size); __gvt_cache_remove_entry(vgpu, entry); } - mutex_unlock(&vgpu->vdev.cache_lock); + mutex_unlock(&vdev->cache_lock); } return NOTIFY_OK; @@ -727,16 +774,16 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb, static int intel_vgpu_group_notifier(struct notifier_block *nb, unsigned long action, void *data) { - struct intel_vgpu *vgpu = container_of(nb, - struct intel_vgpu, - vdev.group_notifier); + struct kvmgt_vdev *vdev = container_of(nb, + struct kvmgt_vdev, + group_notifier); /* the only action we care about */ if (action == VFIO_GROUP_NOTIFY_SET_KVM) { - vgpu->vdev.kvm = data; + vdev->kvm = data; if (!data) - schedule_work(&vgpu->vdev.release_work); + schedule_work(&vdev->release_work); } return NOTIFY_OK; @@ -745,15 +792,17 @@ static int intel_vgpu_group_notifier(struct notifier_block *nb, static int intel_vgpu_open(struct mdev_device *mdev) { struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); unsigned long events; int ret; + struct vfio_group *vfio_group; - vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier; - vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier; + vdev->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier; + vdev->group_notifier.notifier_call = intel_vgpu_group_notifier; events = VFIO_IOMMU_NOTIFY_DMA_UNMAP; ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events, - &vgpu->vdev.iommu_notifier); + &vdev->iommu_notifier); if (ret != 0) { gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n", ret); @@ -762,13 +811,21 @@ static int intel_vgpu_open(struct mdev_device *mdev) events = VFIO_GROUP_NOTIFY_SET_KVM; ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events, - &vgpu->vdev.group_notifier); + &vdev->group_notifier); if (ret != 0) { gvt_vgpu_err("vfio_register_notifier for group failed: %d\n", ret); goto undo_iommu; } + vfio_group = vfio_group_get_external_user_from_dev(mdev_dev(mdev)); + if (IS_ERR_OR_NULL(vfio_group)) { + ret = !vfio_group ? -EFAULT : PTR_ERR(vfio_group); + gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n"); + goto undo_register; + } + vdev->vfio_group = vfio_group; + /* Take a module reference as mdev core doesn't take * a reference for vendor driver. */ @@ -781,51 +838,60 @@ static int intel_vgpu_open(struct mdev_device *mdev) intel_gvt_ops->vgpu_activate(vgpu); - atomic_set(&vgpu->vdev.released, 0); + atomic_set(&vdev->released, 0); return ret; undo_group: + vfio_group_put_external_user(vdev->vfio_group); + vdev->vfio_group = NULL; + +undo_register: vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, - &vgpu->vdev.group_notifier); + &vdev->group_notifier); undo_iommu: vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, - &vgpu->vdev.iommu_notifier); + &vdev->iommu_notifier); out: return ret; } static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu) { + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); struct eventfd_ctx *trigger; - trigger = vgpu->vdev.msi_trigger; + trigger = vdev->msi_trigger; if (trigger) { eventfd_ctx_put(trigger); - vgpu->vdev.msi_trigger = NULL; + vdev->msi_trigger = NULL; } } static void __intel_vgpu_release(struct intel_vgpu *vgpu) { + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct kvmgt_guest_info *info; int ret; if (!handle_valid(vgpu->handle)) return; - if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1)) + if (atomic_cmpxchg(&vdev->released, 0, 1)) return; intel_gvt_ops->vgpu_release(vgpu); - ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY, - &vgpu->vdev.iommu_notifier); - WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret); + ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_IOMMU_NOTIFY, + &vdev->iommu_notifier); + drm_WARN(&i915->drm, ret, + "vfio_unregister_notifier for iommu failed: %d\n", ret); - ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY, - &vgpu->vdev.group_notifier); - WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret); + ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_GROUP_NOTIFY, + &vdev->group_notifier); + drm_WARN(&i915->drm, ret, + "vfio_unregister_notifier for group failed: %d\n", ret); /* dereference module reference taken at open */ module_put(THIS_MODULE); @@ -834,8 +900,9 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) kvmgt_guest_exit(info); intel_vgpu_release_msi_eventfd_ctx(vgpu); + vfio_group_put_external_user(vdev->vfio_group); - vgpu->vdev.kvm = NULL; + vdev->kvm = NULL; vgpu->handle = 0; } @@ -848,10 +915,10 @@ static void intel_vgpu_release(struct mdev_device *mdev) static void intel_vgpu_release_work(struct work_struct *work) { - struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu, - vdev.release_work); + struct kvmgt_vdev *vdev = container_of(work, struct kvmgt_vdev, + release_work); - __intel_vgpu_release(vgpu); + __intel_vgpu_release(vdev->vgpu); } static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar) @@ -913,7 +980,7 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off, return -EINVAL; } - aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap, + aperture_va = io_mapping_map_wc(&vgpu->gvt->gt->ggtt->iomap, ALIGN_DOWN(off, PAGE_SIZE), count + offset_in_page(off)); if (!aperture_va) @@ -933,12 +1000,13 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, size_t count, loff_t *ppos, bool is_write) { struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); u64 pos = *ppos & VFIO_PCI_OFFSET_MASK; int ret = -EINVAL; - if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) { + if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) { gvt_vgpu_err("invalid index: %u\n", index); return -EINVAL; } @@ -967,11 +1035,11 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, case VFIO_PCI_ROM_REGION_INDEX: break; default: - if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) + if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) return -EINVAL; index -= VFIO_PCI_NUM_REGIONS; - return vgpu->vdev.region[index].ops->rw(vgpu, buf, count, + return vdev->region[index].ops->rw(vgpu, buf, count, ppos, is_write); } @@ -1224,7 +1292,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu, gvt_vgpu_err("eventfd_ctx_fdget failed\n"); return PTR_ERR(trigger); } - vgpu->vdev.msi_trigger = trigger; + kvmgt_vdev(vgpu)->msi_trigger = trigger; } else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count) intel_vgpu_release_msi_eventfd_ctx(vgpu); @@ -1276,6 +1344,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, unsigned long arg) { struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); unsigned long minsz; gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd); @@ -1294,7 +1363,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, info.flags = VFIO_DEVICE_FLAGS_PCI; info.flags |= VFIO_DEVICE_FLAGS_RESET; info.num_regions = VFIO_PCI_NUM_REGIONS + - vgpu->vdev.num_regions; + vdev->num_regions; info.num_irqs = VFIO_PCI_NUM_IRQS; return copy_to_user((void __user *)arg, &info, minsz) ? @@ -1385,22 +1454,22 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, .header.version = 1 }; if (info.index >= VFIO_PCI_NUM_REGIONS + - vgpu->vdev.num_regions) + vdev->num_regions) return -EINVAL; info.index = array_index_nospec(info.index, VFIO_PCI_NUM_REGIONS + - vgpu->vdev.num_regions); + vdev->num_regions); i = info.index - VFIO_PCI_NUM_REGIONS; info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); - info.size = vgpu->vdev.region[i].size; - info.flags = vgpu->vdev.region[i].flags; + info.size = vdev->region[i].size; + info.flags = vdev->region[i].flags; - cap_type.type = vgpu->vdev.region[i].type; - cap_type.subtype = vgpu->vdev.region[i].subtype; + cap_type.type = vdev->region[i].type; + cap_type.subtype = vdev->region[i].subtype; ret = vfio_info_add_capability(&caps, &cap_type.header, @@ -1597,12 +1666,10 @@ static struct mdev_parent_ops intel_vgpu_ops = { static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops) { - struct attribute **kvm_type_attrs; struct attribute_group **kvm_vgpu_type_groups; intel_gvt_ops = ops; - if (!intel_gvt_ops->get_gvt_attrs(&kvm_type_attrs, - &kvm_vgpu_type_groups)) + if (!intel_gvt_ops->get_gvt_attrs(&kvm_vgpu_type_groups)) return -EFAULT; intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups; @@ -1742,13 +1809,15 @@ static int kvmgt_guest_init(struct mdev_device *mdev) { struct kvmgt_guest_info *info; struct intel_vgpu *vgpu; + struct kvmgt_vdev *vdev; struct kvm *kvm; vgpu = mdev_get_drvdata(mdev); if (handle_valid(vgpu->handle)) return -EEXIST; - kvm = vgpu->vdev.kvm; + vdev = kvmgt_vdev(vgpu); + kvm = vdev->kvm; if (!kvm || kvm->mm != current->mm) { gvt_vgpu_err("KVM is required to use Intel vGPU\n"); return -ESRCH; @@ -1769,8 +1838,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev) kvmgt_protect_table_init(info); gvt_cache_init(vgpu); - init_completion(&vgpu->vblank_done); - info->track_node.track_write = kvmgt_page_track_write; info->track_node.track_flush_slot = kvmgt_page_track_flush_slot; kvm_page_track_register_notifier(kvm, &info->track_node); @@ -1778,7 +1845,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev) info->debugfs_cache_entries = debugfs_create_ulong( "kvmgt_nr_cache_entries", 0444, vgpu->debugfs, - &vgpu->vdev.nr_cache_entries); + &vdev->nr_cache_entries); return 0; } @@ -1795,9 +1862,17 @@ static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) return true; } -static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle) +static int kvmgt_attach_vgpu(void *p_vgpu, unsigned long *handle) { - /* nothing to do here */ + struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; + + vgpu->vdev = kzalloc(sizeof(struct kvmgt_vdev), GFP_KERNEL); + + if (!vgpu->vdev) + return -ENOMEM; + + kvmgt_vdev(vgpu)->vgpu = vgpu; + return 0; } @@ -1805,29 +1880,34 @@ static void kvmgt_detach_vgpu(void *p_vgpu) { int i; struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; + struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu); - if (!vgpu->vdev.region) + if (!vdev->region) return; - for (i = 0; i < vgpu->vdev.num_regions; i++) - if (vgpu->vdev.region[i].ops->release) - vgpu->vdev.region[i].ops->release(vgpu, - &vgpu->vdev.region[i]); - vgpu->vdev.num_regions = 0; - kfree(vgpu->vdev.region); - vgpu->vdev.region = NULL; + for (i = 0; i < vdev->num_regions; i++) + if (vdev->region[i].ops->release) + vdev->region[i].ops->release(vgpu, + &vdev->region[i]); + vdev->num_regions = 0; + kfree(vdev->region); + vdev->region = NULL; + + kfree(vdev); } static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) { struct kvmgt_guest_info *info; struct intel_vgpu *vgpu; + struct kvmgt_vdev *vdev; if (!handle_valid(handle)) return -ESRCH; info = (struct kvmgt_guest_info *)handle; vgpu = info->vgpu; + vdev = kvmgt_vdev(vgpu); /* * When guest is poweroff, msi_trigger is set to NULL, but vgpu's @@ -1838,10 +1918,10 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) * enabled by guest. so if msi_trigger is null, success is still * returned and don't inject interrupt into guest. */ - if (vgpu->vdev.msi_trigger == NULL) + if (vdev->msi_trigger == NULL) return 0; - if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1) + if (eventfd_signal(vdev->msi_trigger, 1) == 1) return 0; return -EFAULT; @@ -1867,26 +1947,26 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr) { - struct kvmgt_guest_info *info; struct intel_vgpu *vgpu; + struct kvmgt_vdev *vdev; struct gvt_dma *entry; int ret; if (!handle_valid(handle)) return -EINVAL; - info = (struct kvmgt_guest_info *)handle; - vgpu = info->vgpu; + vgpu = ((struct kvmgt_guest_info *)handle)->vgpu; + vdev = kvmgt_vdev(vgpu); - mutex_lock(&info->vgpu->vdev.cache_lock); + mutex_lock(&vdev->cache_lock); - entry = __gvt_cache_find_gfn(info->vgpu, gfn); + entry = __gvt_cache_find_gfn(vgpu, gfn); if (!entry) { ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size); if (ret) goto err_unlock; - ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size); + ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size); if (ret) goto err_unmap; } else if (entry->size != size) { @@ -1898,7 +1978,7 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, if (ret) goto err_unlock; - ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size); + ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size); if (ret) goto err_unmap; } else { @@ -1906,19 +1986,20 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, *dma_addr = entry->dma_addr; } - mutex_unlock(&info->vgpu->vdev.cache_lock); + mutex_unlock(&vdev->cache_lock); return 0; err_unmap: gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size); err_unlock: - mutex_unlock(&info->vgpu->vdev.cache_lock); + mutex_unlock(&vdev->cache_lock); return ret; } static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr) { struct kvmgt_guest_info *info; + struct kvmgt_vdev *vdev; struct gvt_dma *entry; int ret = 0; @@ -1926,14 +2007,15 @@ static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr) return -ENODEV; info = (struct kvmgt_guest_info *)handle; + vdev = kvmgt_vdev(info->vgpu); - mutex_lock(&info->vgpu->vdev.cache_lock); + mutex_lock(&vdev->cache_lock); entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr); if (entry) kref_get(&entry->ref); else ret = -ENOMEM; - mutex_unlock(&info->vgpu->vdev.cache_lock); + mutex_unlock(&vdev->cache_lock); return ret; } @@ -1949,52 +2031,35 @@ static void __gvt_dma_release(struct kref *ref) static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr) { - struct kvmgt_guest_info *info; + struct intel_vgpu *vgpu; + struct kvmgt_vdev *vdev; struct gvt_dma *entry; if (!handle_valid(handle)) return; - info = (struct kvmgt_guest_info *)handle; + vgpu = ((struct kvmgt_guest_info *)handle)->vgpu; + vdev = kvmgt_vdev(vgpu); - mutex_lock(&info->vgpu->vdev.cache_lock); - entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr); + mutex_lock(&vdev->cache_lock); + entry = __gvt_cache_find_dma_addr(vgpu, dma_addr); if (entry) kref_put(&entry->ref, __gvt_dma_release); - mutex_unlock(&info->vgpu->vdev.cache_lock); + mutex_unlock(&vdev->cache_lock); } static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, void *buf, unsigned long len, bool write) { struct kvmgt_guest_info *info; - struct kvm *kvm; - int idx, ret; - bool kthread = current->mm == NULL; if (!handle_valid(handle)) return -ESRCH; info = (struct kvmgt_guest_info *)handle; - kvm = info->kvm; - - if (kthread) { - if (!mmget_not_zero(kvm->mm)) - return -EFAULT; - use_mm(kvm->mm); - } - idx = srcu_read_lock(&kvm->srcu); - ret = write ? kvm_write_guest(kvm, gpa, buf, len) : - kvm_read_guest(kvm, gpa, buf, len); - srcu_read_unlock(&kvm->srcu, idx); - - if (kthread) { - unuse_mm(kvm->mm); - mmput(kvm->mm); - } - - return ret; + return vfio_dma_rw(kvmgt_vdev(info->vgpu)->vfio_group, + gpa, buf, len, write); } static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa, diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index a55178884d67..291993615af9 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -103,6 +103,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, void *p_data, unsigned int bytes) { struct intel_gvt *gvt = vgpu->gvt; + struct drm_i915_private *i915 = gvt->gt->i915; unsigned int offset = 0; int ret = -EINVAL; @@ -114,15 +115,17 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); - if (WARN_ON(bytes > 8)) + if (drm_WARN_ON(&i915->drm, bytes > 8)) goto err; if (reg_is_gtt(gvt, offset)) { - if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8))) + if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4) && + !IS_ALIGNED(offset, 8))) goto err; - if (WARN_ON(bytes != 4 && bytes != 8)) + if (drm_WARN_ON(&i915->drm, bytes != 4 && bytes != 8)) goto err; - if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1))) + if (drm_WARN_ON(&i915->drm, + !reg_is_gtt(gvt, offset + bytes - 1))) goto err; ret = intel_vgpu_emulate_ggtt_mmio_read(vgpu, offset, @@ -132,16 +135,16 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, goto out; } - if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) { + if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) { ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes); goto out; } - if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1))) + if (drm_WARN_ON(&i915->drm, !reg_is_mmio(gvt, offset + bytes - 1))) goto err; if (!intel_gvt_mmio_is_unalign(gvt, offset)) { - if (WARN_ON(!IS_ALIGNED(offset, bytes))) + if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, bytes))) goto err; } @@ -175,6 +178,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa, void *p_data, unsigned int bytes) { struct intel_gvt *gvt = vgpu->gvt; + struct drm_i915_private *i915 = gvt->gt->i915; unsigned int offset = 0; int ret = -EINVAL; @@ -187,15 +191,17 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa, offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); - if (WARN_ON(bytes > 8)) + if (drm_WARN_ON(&i915->drm, bytes > 8)) goto err; if (reg_is_gtt(gvt, offset)) { - if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8))) + if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4) && + !IS_ALIGNED(offset, 8))) goto err; - if (WARN_ON(bytes != 4 && bytes != 8)) + if (drm_WARN_ON(&i915->drm, bytes != 4 && bytes != 8)) goto err; - if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1))) + if (drm_WARN_ON(&i915->drm, + !reg_is_gtt(gvt, offset + bytes - 1))) goto err; ret = intel_vgpu_emulate_ggtt_mmio_write(vgpu, offset, @@ -205,7 +211,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa, goto out; } - if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) { + if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) { ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes); goto out; } @@ -245,7 +251,7 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) /* set the bit 0:2(Core C-State ) to C0 */ vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0; - if (IS_BROXTON(vgpu->gvt->dev_priv)) { + if (IS_BROXTON(vgpu->gvt->gt->i915)) { vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= ~(BIT(0) | BIT(1)); vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &= diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index 2e68f4b02c94..cc4812648bf4 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -69,8 +69,8 @@ struct intel_gvt_mmio_info { struct hlist_node node; }; -int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt, - unsigned int reg); +const struct intel_engine_cs * +intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int reg); unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt); bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device); diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index aaf15916d29a..2ccaf78f96e8 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -157,12 +157,13 @@ static u32 gen9_mocs_mmio_offset_list[] = { [VECS0] = 0xcb00, }; -static void load_render_mocs(struct drm_i915_private *dev_priv) +static void load_render_mocs(const struct intel_engine_cs *engine) { - struct intel_gvt *gvt = dev_priv->gvt; - i915_reg_t offset; + struct intel_gvt *gvt = engine->i915->gvt; + struct intel_uncore *uncore = engine->uncore; u32 cnt = gvt->engine_mmio_list.mocs_mmio_offset_list_cnt; u32 *regs = gvt->engine_mmio_list.mocs_mmio_offset_list; + i915_reg_t offset; int ring_id, i; /* Platform doesn't have mocs mmios. */ @@ -170,12 +171,13 @@ static void load_render_mocs(struct drm_i915_private *dev_priv) return; for (ring_id = 0; ring_id < cnt; ring_id++) { - if (!HAS_ENGINE(dev_priv, ring_id)) + if (!HAS_ENGINE(engine->i915, ring_id)) continue; + offset.reg = regs[ring_id]; for (i = 0; i < GEN9_MOCS_SIZE; i++) { gen9_render_mocs.control_table[ring_id][i] = - I915_READ_FW(offset); + intel_uncore_read_fw(uncore, offset); offset.reg += 4; } } @@ -183,7 +185,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv) offset.reg = 0xb020; for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) { gen9_render_mocs.l3cc_table[i] = - I915_READ_FW(offset); + intel_uncore_read_fw(uncore, offset); offset.reg += 4; } gen9_render_mocs.initialized = true; @@ -214,13 +216,11 @@ restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu, *cs++ = MI_LOAD_REGISTER_IMM(count); for (mmio = gvt->engine_mmio_list.mmio; i915_mmio_reg_valid(mmio->reg); mmio++) { - if (mmio->ring_id != ring_id || - !mmio->in_context) + if (mmio->id != ring_id || !mmio->in_context) continue; *cs++ = i915_mmio_reg_offset(mmio->reg); - *cs++ = vgpu_vreg_t(vgpu, mmio->reg) | - (mmio->mask << 16); + *cs++ = vgpu_vreg_t(vgpu, mmio->reg) | (mmio->mask << 16); gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n", *(cs-2), *(cs-1), vgpu->id, ring_id); } @@ -344,10 +344,10 @@ static u32 gen8_tlb_mmio_offset_list[] = { [VECS0] = 0x4270, }; -static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) +static void handle_tlb_pending_event(struct intel_vgpu *vgpu, + const struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - struct intel_uncore *uncore = &dev_priv->uncore; + struct intel_uncore *uncore = engine->uncore; struct intel_vgpu_submission *s = &vgpu->submission; u32 *regs = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list; u32 cnt = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list_cnt; @@ -357,13 +357,13 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) if (!regs) return; - if (WARN_ON(ring_id >= cnt)) + if (drm_WARN_ON(&engine->i915->drm, engine->id >= cnt)) return; - if (!test_and_clear_bit(ring_id, (void *)s->tlb_handle_pending)) + if (!test_and_clear_bit(engine->id, (void *)s->tlb_handle_pending)) return; - reg = _MMIO(regs[ring_id]); + reg = _MMIO(regs[engine->id]); /* WaForceWakeRenderDuringMmioTLBInvalidate:skl * we need to put a forcewake when invalidating RCS TLB caches, @@ -372,30 +372,27 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) */ fw = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ | FW_REG_WRITE); - if (ring_id == RCS0 && INTEL_GEN(dev_priv) >= 9) + if (engine->id == RCS0 && INTEL_GEN(engine->i915) >= 9) fw |= FORCEWAKE_RENDER; intel_uncore_forcewake_get(uncore, fw); intel_uncore_write_fw(uncore, reg, 0x1); - if (wait_for_atomic((intel_uncore_read_fw(uncore, reg) == 0), 50)) - gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id); + if (wait_for_atomic(intel_uncore_read_fw(uncore, reg) == 0, 50)) + gvt_vgpu_err("timeout in invalidate ring %s tlb\n", + engine->name); else vgpu_vreg_t(vgpu, reg) = 0; intel_uncore_forcewake_put(uncore, fw); - gvt_dbg_core("invalidate TLB for ring %d\n", ring_id); + gvt_dbg_core("invalidate TLB for ring %s\n", engine->name); } static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, - int ring_id) + const struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv; - i915_reg_t offset, l3_offset; - u32 old_v, new_v; - u32 regs[] = { [RCS0] = 0xc800, [VCS0] = 0xc900, @@ -403,36 +400,38 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, [BCS0] = 0xcc00, [VECS0] = 0xcb00, }; + struct intel_uncore *uncore = engine->uncore; + i915_reg_t offset, l3_offset; + u32 old_v, new_v; int i; - dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv; - if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) + if (drm_WARN_ON(&engine->i915->drm, engine->id >= ARRAY_SIZE(regs))) return; - if (ring_id == RCS0 && IS_GEN(dev_priv, 9)) + if (engine->id == RCS0 && IS_GEN(engine->i915, 9)) return; if (!pre && !gen9_render_mocs.initialized) - load_render_mocs(dev_priv); + load_render_mocs(engine); - offset.reg = regs[ring_id]; + offset.reg = regs[engine->id]; for (i = 0; i < GEN9_MOCS_SIZE; i++) { if (pre) old_v = vgpu_vreg_t(pre, offset); else - old_v = gen9_render_mocs.control_table[ring_id][i]; + old_v = gen9_render_mocs.control_table[engine->id][i]; if (next) new_v = vgpu_vreg_t(next, offset); else - new_v = gen9_render_mocs.control_table[ring_id][i]; + new_v = gen9_render_mocs.control_table[engine->id][i]; if (old_v != new_v) - I915_WRITE_FW(offset, new_v); + intel_uncore_write_fw(uncore, offset, new_v); offset.reg += 4; } - if (ring_id == RCS0) { + if (engine->id == RCS0) { l3_offset.reg = 0xb020; for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) { if (pre) @@ -445,7 +444,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, new_v = gen9_render_mocs.l3cc_table[i]; if (old_v != new_v) - I915_WRITE_FW(l3_offset, new_v); + intel_uncore_write_fw(uncore, l3_offset, new_v); l3_offset.reg += 4; } @@ -467,38 +466,40 @@ bool is_inhibit_context(struct intel_context *ce) /* Switch ring mmio values (context). */ static void switch_mmio(struct intel_vgpu *pre, struct intel_vgpu *next, - int ring_id) + const struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv; + struct intel_uncore *uncore = engine->uncore; struct intel_vgpu_submission *s; struct engine_mmio *mmio; u32 old_v, new_v; - dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv; - if (INTEL_GEN(dev_priv) >= 9) - switch_mocs(pre, next, ring_id); + if (INTEL_GEN(engine->i915) >= 9) + switch_mocs(pre, next, engine); - for (mmio = dev_priv->gvt->engine_mmio_list.mmio; + for (mmio = engine->i915->gvt->engine_mmio_list.mmio; i915_mmio_reg_valid(mmio->reg); mmio++) { - if (mmio->ring_id != ring_id) + if (mmio->id != engine->id) continue; /* * No need to do save or restore of the mmio which is in context * state image on gen9, it's initialized by lri command and * save or restore with context together. */ - if (IS_GEN(dev_priv, 9) && mmio->in_context) + if (IS_GEN(engine->i915, 9) && mmio->in_context) continue; // save if (pre) { - vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg); + vgpu_vreg_t(pre, mmio->reg) = + intel_uncore_read_fw(uncore, mmio->reg); if (mmio->mask) vgpu_vreg_t(pre, mmio->reg) &= - ~(mmio->mask << 16); + ~(mmio->mask << 16); old_v = vgpu_vreg_t(pre, mmio->reg); - } else - old_v = mmio->value = I915_READ_FW(mmio->reg); + } else { + old_v = mmio->value = + intel_uncore_read_fw(uncore, mmio->reg); + } // restore if (next) { @@ -509,12 +510,12 @@ static void switch_mmio(struct intel_vgpu *pre, * itself. */ if (mmio->in_context && - !is_inhibit_context(s->shadow[ring_id])) + !is_inhibit_context(s->shadow[engine->id])) continue; if (mmio->mask) new_v = vgpu_vreg_t(next, mmio->reg) | - (mmio->mask << 16); + (mmio->mask << 16); else new_v = vgpu_vreg_t(next, mmio->reg); } else { @@ -526,7 +527,7 @@ static void switch_mmio(struct intel_vgpu *pre, new_v = mmio->value; } - I915_WRITE_FW(mmio->reg, new_v); + intel_uncore_write_fw(uncore, mmio->reg, new_v); trace_render_mmio(pre ? pre->id : 0, next ? next->id : 0, @@ -536,39 +537,37 @@ static void switch_mmio(struct intel_vgpu *pre, } if (next) - handle_tlb_pending_event(next, ring_id); + handle_tlb_pending_event(next, engine); } /** * intel_gvt_switch_render_mmio - switch mmio context of specific engine * @pre: the last vGPU that own the engine * @next: the vGPU to switch to - * @ring_id: specify the engine + * @engine: the engine * * If pre is null indicates that host own the engine. If next is null * indicates that we are switching to host workload. */ void intel_gvt_switch_mmio(struct intel_vgpu *pre, - struct intel_vgpu *next, int ring_id) + struct intel_vgpu *next, + const struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv; - - if (WARN_ON(!pre && !next)) + if (WARN(!pre && !next, "switch ring %s from host to HOST\n", + engine->name)) return; - gvt_dbg_render("switch ring %d from %s to %s\n", ring_id, + gvt_dbg_render("switch ring %s from %s to %s\n", engine->name, pre ? "vGPU" : "host", next ? "vGPU" : "HOST"); - dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv; - /** * We are using raw mmio access wrapper to improve the * performace for batch mmio read/write, so we need * handle forcewake mannually. */ - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - switch_mmio(pre, next, ring_id); - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); + intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); + switch_mmio(pre, next, engine); + intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); } /** @@ -580,7 +579,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) { struct engine_mmio *mmio; - if (INTEL_GEN(gvt->dev_priv) >= 9) { + if (INTEL_GEN(gvt->gt->i915) >= 9) { gvt->engine_mmio_list.mmio = gen9_engine_mmio_list; gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list; gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list); @@ -595,7 +594,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) for (mmio = gvt->engine_mmio_list.mmio; i915_mmio_reg_valid(mmio->reg); mmio++) { if (mmio->in_context) { - gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++; + gvt->engine_mmio_list.ctx_mmio_count[mmio->id]++; intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg); } } diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h index f7eaa442403f..970704b18f23 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.h +++ b/drivers/gpu/drm/i915/gvt/mmio_context.h @@ -37,7 +37,7 @@ #define __GVT_RENDER_H__ struct engine_mmio { - int ring_id; + enum intel_engine_id id; i915_reg_t reg; u32 mask; bool in_context; @@ -45,7 +45,8 @@ struct engine_mmio { }; void intel_gvt_switch_mmio(struct intel_vgpu *pre, - struct intel_vgpu *next, int ring_id); + struct intel_vgpu *next, + const struct intel_engine_cs *engine); void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt); diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 2369d4a9af94..036b74fe9298 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -39,8 +39,8 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) enum intel_engine_id i; struct intel_engine_cs *engine; - for_each_engine(engine, vgpu->gvt->dev_priv, i) { - if (!list_empty(workload_q_head(vgpu, i))) + for_each_engine(engine, vgpu->gvt->gt, i) { + if (!list_empty(workload_q_head(vgpu, engine))) return true; } @@ -152,8 +152,8 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) scheduler->need_reschedule = true; /* still have uncompleted workload? */ - for_each_engine(engine, gvt->dev_priv, i) { - if (scheduler->current_workload[i]) + for_each_engine(engine, gvt->gt, i) { + if (scheduler->current_workload[engine->id]) return; } @@ -169,8 +169,8 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) scheduler->need_reschedule = false; /* wake up workload dispatch thread */ - for_each_engine(engine, gvt->dev_priv, i) - wake_up(&scheduler->waitq[i]); + for_each_engine(engine, gvt->gt, i) + wake_up(&scheduler->waitq[engine->id]); } static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data) @@ -444,9 +444,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) { struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler; - int ring_id; struct vgpu_sched_data *vgpu_data = vgpu->sched_data; - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; + struct intel_engine_cs *engine; + enum intel_engine_id id; if (!vgpu_data->active) return; @@ -467,10 +468,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) intel_runtime_pm_get(&dev_priv->runtime_pm); spin_lock_bh(&scheduler->mmio_context_lock); - for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { - if (scheduler->engine_owner[ring_id] == vgpu) { - intel_gvt_switch_mmio(vgpu, NULL, ring_id); - scheduler->engine_owner[ring_id] = NULL; + for_each_engine(engine, vgpu->gvt->gt, id) { + if (scheduler->engine_owner[engine->id] == vgpu) { + intel_gvt_switch_mmio(vgpu, NULL, engine); + scheduler->engine_owner[engine->id] = NULL; } } spin_unlock_bh(&scheduler->mmio_context_lock); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 685d1e04a5ff..e92ed96c9b23 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -84,7 +84,7 @@ static void update_shadow_pdps(struct intel_vgpu_workload *workload) static void sr_oa_regs(struct intel_vgpu_workload *workload, u32 *reg_state, bool save) { - struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; + struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915; u32 ctx_oactxctrl = dev_priv->perf.ctx_oactxctrl_offset; u32 ctx_flexeu0 = dev_priv->perf.ctx_flexeu0_offset; int i = 0; @@ -98,7 +98,7 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload, i915_mmio_reg_offset(EU_PERF_CNTL6), }; - if (workload->ring_id != RCS0) + if (workload->engine->id != RCS0) return; if (save) { @@ -128,7 +128,6 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; struct intel_gvt *gvt = vgpu->gvt; - int ring_id = workload->ring_id; struct drm_i915_gem_object *ctx_obj = workload->req->context->state->obj; struct execlist_ring_context *shadow_ring_context; @@ -154,7 +153,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) COPY_REG_MASKED(ctx_ctrl); COPY_REG(ctx_timestamp); - if (ring_id == RCS0) { + if (workload->engine->id == RCS0) { COPY_REG(bb_per_ctx_ptr); COPY_REG(rcs_indirect_ctx); COPY_REG(rcs_indirect_ctx_offset); @@ -175,14 +174,14 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val)) return 0; - gvt_dbg_sched("ring id %d workload lrca %x", ring_id, - workload->ctx_desc.lrca); - - context_page_num = gvt->dev_priv->engine[ring_id]->context_size; + gvt_dbg_sched("ring %s workload lrca %x", + workload->engine->name, + workload->ctx_desc.lrca); + context_page_num = workload->engine->context_size; context_page_num = context_page_num >> PAGE_SHIFT; - if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS0) + if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0) context_page_num = 19; i = 2; @@ -210,38 +209,43 @@ static inline bool is_gvt_request(struct i915_request *rq) return intel_context_force_single_submission(rq->context); } -static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id) +static void save_ring_hw_state(struct intel_vgpu *vgpu, + const struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - u32 ring_base = dev_priv->engine[ring_id]->mmio_base; + struct intel_uncore *uncore = engine->uncore; i915_reg_t reg; - reg = RING_INSTDONE(ring_base); - vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg); - reg = RING_ACTHD(ring_base); - vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg); - reg = RING_ACTHD_UDW(ring_base); - vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg); + reg = RING_INSTDONE(engine->mmio_base); + vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = + intel_uncore_read(uncore, reg); + + reg = RING_ACTHD(engine->mmio_base); + vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = + intel_uncore_read(uncore, reg); + + reg = RING_ACTHD_UDW(engine->mmio_base); + vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = + intel_uncore_read(uncore, reg); } static int shadow_context_status_change(struct notifier_block *nb, unsigned long action, void *data) { - struct i915_request *req = data; + struct i915_request *rq = data; struct intel_gvt *gvt = container_of(nb, struct intel_gvt, - shadow_ctx_notifier_block[req->engine->id]); + shadow_ctx_notifier_block[rq->engine->id]); struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; - enum intel_engine_id ring_id = req->engine->id; + enum intel_engine_id ring_id = rq->engine->id; struct intel_vgpu_workload *workload; unsigned long flags; - if (!is_gvt_request(req)) { + if (!is_gvt_request(rq)) { spin_lock_irqsave(&scheduler->mmio_context_lock, flags); if (action == INTEL_CONTEXT_SCHEDULE_IN && scheduler->engine_owner[ring_id]) { /* Switch ring from vGPU to host. */ intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], - NULL, ring_id); + NULL, rq->engine); scheduler->engine_owner[ring_id] = NULL; } spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags); @@ -259,7 +263,7 @@ static int shadow_context_status_change(struct notifier_block *nb, if (workload->vgpu != scheduler->engine_owner[ring_id]) { /* Switch ring from host to vGPU or vGPU to vGPU. */ intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], - workload->vgpu, ring_id); + workload->vgpu, rq->engine); scheduler->engine_owner[ring_id] = workload->vgpu; } else gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n", @@ -268,11 +272,11 @@ static int shadow_context_status_change(struct notifier_block *nb, atomic_set(&workload->shadow_ctx_active, 1); break; case INTEL_CONTEXT_SCHEDULE_OUT: - save_ring_hw_state(workload->vgpu, ring_id); + save_ring_hw_state(workload->vgpu, rq->engine); atomic_set(&workload->shadow_ctx_active, 0); break; case INTEL_CONTEXT_SCHEDULE_PREEMPTED: - save_ring_hw_state(workload->vgpu, ring_id); + save_ring_hw_state(workload->vgpu, rq->engine); break; default: WARN_ON(1); @@ -286,17 +290,17 @@ static void shadow_context_descriptor_update(struct intel_context *ce, struct intel_vgpu_workload *workload) { - u64 desc = ce->lrc_desc; + u64 desc = ce->lrc.desc; /* * Update bits 0-11 of the context descriptor which includes flags * like GEN8_CTX_* cached in desc_template */ - desc &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT); - desc |= workload->ctx_desc.addressing_mode << + desc &= ~(0x3ull << GEN8_CTX_ADDRESSING_MODE_SHIFT); + desc |= (u64)workload->ctx_desc.addressing_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT; - ce->lrc_desc = desc; + ce->lrc.desc = desc; } static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) @@ -375,7 +379,11 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) { struct i915_page_directory * const pd = i915_pd_entry(ppgtt->pd, i); - + /* skip now as current i915 ppgtt alloc won't allocate + top level pdp for non 4-level table, won't impact + shadow ppgtt. */ + if (!pd) + break; px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i]; } } @@ -391,7 +399,7 @@ intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload) if (workload->req) return 0; - rq = i915_request_create(s->shadow[workload->ring_id]); + rq = i915_request_create(s->shadow[workload->engine->id]); if (IS_ERR(rq)) { gvt_vgpu_err("fail to allocate gem request\n"); return PTR_ERR(rq); @@ -420,15 +428,16 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) if (workload->shadow) return 0; - if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated)) - shadow_context_descriptor_update(s->shadow[workload->ring_id], + if (!test_and_set_bit(workload->engine->id, s->shadow_ctx_desc_updated)) + shadow_context_descriptor_update(s->shadow[workload->engine->id], workload); ret = intel_gvt_scan_and_shadow_ringbuffer(workload); if (ret) return ret; - if (workload->ring_id == RCS0 && workload->wa_ctx.indirect_ctx.size) { + if (workload->engine->id == RCS0 && + workload->wa_ctx.indirect_ctx.size) { ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); if (ret) goto err_shadow; @@ -436,6 +445,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) workload->shadow = true; return 0; + err_shadow: release_shadow_wa_ctx(&workload->wa_ctx); return ret; @@ -567,12 +577,8 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) static void update_vreg_in_ctx(struct intel_vgpu_workload *workload) { - struct intel_vgpu *vgpu = workload->vgpu; - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - u32 ring_base; - - ring_base = dev_priv->engine[workload->ring_id]->mmio_base; - vgpu_vreg_t(vgpu, RING_START(ring_base)) = workload->rb_start; + vgpu_vreg_t(workload->vgpu, RING_START(workload->engine->mmio_base)) = + workload->rb_start; } static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) @@ -608,7 +614,6 @@ static int prepare_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu_submission *s = &vgpu->submission; - int ring = workload->ring_id; int ret = 0; ret = intel_vgpu_pin_mm(workload->shadow_mm); @@ -625,7 +630,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload) update_shadow_pdps(workload); - set_context_ppgtt_from_shadow(workload, s->shadow[ring]); + set_context_ppgtt_from_shadow(workload, s->shadow[workload->engine->id]); ret = intel_vgpu_sync_oos_pages(workload->vgpu); if (ret) { @@ -677,11 +682,10 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; struct i915_request *rq; - int ring_id = workload->ring_id; int ret; - gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", - ring_id, workload); + gvt_dbg_sched("ring id %s prepare to dispatch workload %p\n", + workload->engine->name, workload); mutex_lock(&vgpu->vgpu_lock); @@ -710,8 +714,8 @@ out: } if (!IS_ERR_OR_NULL(workload->req)) { - gvt_dbg_sched("ring id %d submit workload to i915 %p\n", - ring_id, workload->req); + gvt_dbg_sched("ring id %s submit workload to i915 %p\n", + workload->engine->name, workload->req); i915_request_add(workload->req); workload->dispatched = true; } @@ -722,8 +726,8 @@ err_req: return ret; } -static struct intel_vgpu_workload *pick_next_workload( - struct intel_gvt *gvt, int ring_id) +static struct intel_vgpu_workload * +pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_vgpu_workload *workload = NULL; @@ -735,27 +739,27 @@ static struct intel_vgpu_workload *pick_next_workload( * bail out */ if (!scheduler->current_vgpu) { - gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id); + gvt_dbg_sched("ring %s stop - no current vgpu\n", engine->name); goto out; } if (scheduler->need_reschedule) { - gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id); + gvt_dbg_sched("ring %s stop - will reschedule\n", engine->name); goto out; } if (!scheduler->current_vgpu->active || - list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) + list_empty(workload_q_head(scheduler->current_vgpu, engine))) goto out; /* * still have current workload, maybe the workload disptacher * fail to submit it for some reason, resubmit it. */ - if (scheduler->current_workload[ring_id]) { - workload = scheduler->current_workload[ring_id]; - gvt_dbg_sched("ring id %d still have current workload %p\n", - ring_id, workload); + if (scheduler->current_workload[engine->id]) { + workload = scheduler->current_workload[engine->id]; + gvt_dbg_sched("ring %s still have current workload %p\n", + engine->name, workload); goto out; } @@ -765,13 +769,14 @@ static struct intel_vgpu_workload *pick_next_workload( * will wait the current workload is finished when trying to * schedule out a vgpu. */ - scheduler->current_workload[ring_id] = container_of( - workload_q_head(scheduler->current_vgpu, ring_id)->next, - struct intel_vgpu_workload, list); + scheduler->current_workload[engine->id] = + list_first_entry(workload_q_head(scheduler->current_vgpu, + engine), + struct intel_vgpu_workload, list); - workload = scheduler->current_workload[ring_id]; + workload = scheduler->current_workload[engine->id]; - gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload); + gvt_dbg_sched("ring %s pick new workload %p\n", engine->name, workload); atomic_inc(&workload->vgpu->submission.running_workload_num); out: @@ -783,14 +788,12 @@ static void update_guest_context(struct intel_vgpu_workload *workload) { struct i915_request *rq = workload->req; struct intel_vgpu *vgpu = workload->vgpu; - struct intel_gvt *gvt = vgpu->gvt; struct drm_i915_gem_object *ctx_obj = rq->context->state->obj; struct execlist_ring_context *shadow_ring_context; struct page *page; void *src; unsigned long context_gpa, context_page_num; int i; - struct drm_i915_private *dev_priv = gvt->dev_priv; u32 ring_base; u32 head, tail; u16 wrap_count; @@ -811,14 +814,14 @@ static void update_guest_context(struct intel_vgpu_workload *workload) head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail; - ring_base = dev_priv->engine[workload->ring_id]->mmio_base; + ring_base = rq->engine->mmio_base; vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail; vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head; context_page_num = rq->engine->context_size; context_page_num = context_page_num >> PAGE_SHIFT; - if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS0) + if (IS_BROADWELL(rq->i915) && rq->engine->id == RCS0) context_page_num = 19; i = 2; @@ -869,7 +872,7 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask) { struct intel_vgpu_submission *s = &vgpu->submission; - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; struct intel_engine_cs *engine; struct intel_vgpu_workload *pos, *n; intel_engine_mask_t tmp; @@ -966,54 +969,47 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) mutex_unlock(&vgpu->vgpu_lock); } -struct workload_thread_param { - struct intel_gvt *gvt; - int ring_id; -}; - -static int workload_thread(void *priv) +static int workload_thread(void *arg) { - struct workload_thread_param *p = (struct workload_thread_param *)priv; - struct intel_gvt *gvt = p->gvt; - int ring_id = p->ring_id; + struct intel_engine_cs *engine = arg; + const bool need_force_wake = INTEL_GEN(engine->i915) >= 9; + struct intel_gvt *gvt = engine->i915->gvt; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_vgpu_workload *workload = NULL; struct intel_vgpu *vgpu = NULL; int ret; - bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9); DEFINE_WAIT_FUNC(wait, woken_wake_function); - struct intel_runtime_pm *rpm = &gvt->dev_priv->runtime_pm; - - kfree(p); - gvt_dbg_core("workload thread for ring %d started\n", ring_id); + gvt_dbg_core("workload thread for ring %s started\n", engine->name); while (!kthread_should_stop()) { - add_wait_queue(&scheduler->waitq[ring_id], &wait); + intel_wakeref_t wakeref; + + add_wait_queue(&scheduler->waitq[engine->id], &wait); do { - workload = pick_next_workload(gvt, ring_id); + workload = pick_next_workload(gvt, engine); if (workload) break; wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } while (!kthread_should_stop()); - remove_wait_queue(&scheduler->waitq[ring_id], &wait); + remove_wait_queue(&scheduler->waitq[engine->id], &wait); if (!workload) break; - gvt_dbg_sched("ring id %d next workload %p vgpu %d\n", - workload->ring_id, workload, - workload->vgpu->id); + gvt_dbg_sched("ring %s next workload %p vgpu %d\n", + engine->name, workload, + workload->vgpu->id); - intel_runtime_pm_get(rpm); + wakeref = intel_runtime_pm_get(engine->uncore->rpm); - gvt_dbg_sched("ring id %d will dispatch workload %p\n", - workload->ring_id, workload); + gvt_dbg_sched("ring %s will dispatch workload %p\n", + engine->name, workload); if (need_force_wake) - intel_uncore_forcewake_get(&gvt->dev_priv->uncore, - FORCEWAKE_ALL); + intel_uncore_forcewake_get(engine->uncore, + FORCEWAKE_ALL); /* * Update the vReg of the vGPU which submitted this * workload. The vGPU may use these registers for checking @@ -1030,21 +1026,21 @@ static int workload_thread(void *priv) goto complete; } - gvt_dbg_sched("ring id %d wait workload %p\n", - workload->ring_id, workload); + gvt_dbg_sched("ring %s wait workload %p\n", + engine->name, workload); i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT); complete: gvt_dbg_sched("will complete workload %p, status: %d\n", - workload, workload->status); + workload, workload->status); - complete_current_workload(gvt, ring_id); + complete_current_workload(gvt, engine->id); if (need_force_wake) - intel_uncore_forcewake_put(&gvt->dev_priv->uncore, - FORCEWAKE_ALL); + intel_uncore_forcewake_put(engine->uncore, + FORCEWAKE_ALL); - intel_runtime_pm_put_unchecked(rpm); + intel_runtime_pm_put(engine->uncore->rpm, wakeref); if (ret && (vgpu_is_vm_unhealthy(ret))) enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); } @@ -1073,7 +1069,7 @@ void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt) gvt_dbg_core("clean workload scheduler\n"); - for_each_engine(engine, gvt->dev_priv, i) { + for_each_engine(engine, gvt->gt, i) { atomic_notifier_chain_unregister( &engine->context_status_notifier, &gvt->shadow_ctx_notifier_block[i]); @@ -1084,7 +1080,6 @@ void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt) int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; - struct workload_thread_param *param = NULL; struct intel_engine_cs *engine; enum intel_engine_id i; int ret; @@ -1093,20 +1088,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) init_waitqueue_head(&scheduler->workload_complete_wq); - for_each_engine(engine, gvt->dev_priv, i) { + for_each_engine(engine, gvt->gt, i) { init_waitqueue_head(&scheduler->waitq[i]); - param = kzalloc(sizeof(*param), GFP_KERNEL); - if (!param) { - ret = -ENOMEM; - goto err; - } - - param->gvt = gvt; - param->ring_id = i; - - scheduler->thread[i] = kthread_run(workload_thread, param, - "gvt workload %d", i); + scheduler->thread[i] = kthread_run(workload_thread, engine, + "gvt:%s", engine->name); if (IS_ERR(scheduler->thread[i])) { gvt_err("fail to create workload thread\n"); ret = PTR_ERR(scheduler->thread[i]); @@ -1118,11 +1104,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) atomic_notifier_chain_register(&engine->context_status_notifier, &gvt->shadow_ctx_notifier_block[i]); } + return 0; + err: intel_gvt_clean_workload_scheduler(gvt); - kfree(param); - param = NULL; return ret; } @@ -1160,7 +1146,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm)); - for_each_engine(engine, vgpu->gvt->dev_priv, id) + for_each_engine(engine, vgpu->gvt->gt, id) intel_context_unpin(s->shadow[id]); kmem_cache_destroy(s->workloads); @@ -1217,7 +1203,7 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s, */ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) { - struct drm_i915_private *i915 = vgpu->gvt->dev_priv; + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct intel_vgpu_submission *s = &vgpu->submission; struct intel_engine_cs *engine; struct i915_ppgtt *ppgtt; @@ -1230,7 +1216,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) i915_context_ppgtt_root_save(s, ppgtt); - for_each_engine(engine, i915, i) { + for_each_engine(engine, vgpu->gvt->gt, i) { struct intel_context *ce; INIT_LIST_HEAD(&s->workload_q_head[i]); @@ -1246,7 +1232,8 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) ce->vm = i915_vm_get(&ppgtt->vm); intel_context_set_single_submission(ce); - if (!USES_GUC_SUBMISSION(i915)) { /* Max ring buffer size */ + /* Max ring buffer size */ + if (!intel_uc_wants_guc_submission(&engine->gt->uc)) { const unsigned int ring_size = 512 * SZ_4K; ce->ring = __intel_context_ring_size(ring_size); @@ -1282,7 +1269,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) out_shadow_ctx: i915_context_ppgtt_root_restore(s, ppgtt); - for_each_engine(engine, i915, i) { + for_each_engine(engine, vgpu->gvt->gt, i) { if (IS_ERR(s->shadow[i])) break; @@ -1309,6 +1296,7 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask, unsigned int interface) { + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct intel_vgpu_submission *s = &vgpu->submission; const struct intel_vgpu_submission_ops *ops[] = { [INTEL_VGPU_EXECLIST_SUBMISSION] = @@ -1316,10 +1304,11 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, }; int ret; - if (WARN_ON(interface >= ARRAY_SIZE(ops))) + if (drm_WARN_ON(&i915->drm, interface >= ARRAY_SIZE(ops))) return -EINVAL; - if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES)) + if (drm_WARN_ON(&i915->drm, + interface == 0 && engine_mask != ALL_ENGINES)) return -EINVAL; if (s->active) @@ -1441,7 +1430,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload) /** * intel_vgpu_create_workload - create a vGPU workload * @vgpu: a vGPU - * @ring_id: ring index + * @engine: the engine * @desc: a guest context descriptor * * This function is called when creating a vGPU workload. @@ -1452,14 +1441,14 @@ static int prepare_mm(struct intel_vgpu_workload *workload) * */ struct intel_vgpu_workload * -intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, +intel_vgpu_create_workload(struct intel_vgpu *vgpu, + const struct intel_engine_cs *engine, struct execlist_ctx_descriptor_format *desc) { struct intel_vgpu_submission *s = &vgpu->submission; - struct list_head *q = workload_q_head(vgpu, ring_id); + struct list_head *q = workload_q_head(vgpu, engine); struct intel_vgpu_workload *last_workload = NULL; struct intel_vgpu_workload *workload = NULL; - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; u64 ring_context_gpa; u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx; u32 guest_head; @@ -1486,10 +1475,10 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, list_for_each_entry_reverse(last_workload, q, list) { if (same_context(&last_workload->ctx_desc, desc)) { - gvt_dbg_el("ring id %d cur workload == last\n", - ring_id); + gvt_dbg_el("ring %s cur workload == last\n", + engine->name); gvt_dbg_el("ctx head %x real head %lx\n", head, - last_workload->rb_tail); + last_workload->rb_tail); /* * cannot use guest context head pointer here, * as it might not be updated at this time @@ -1499,7 +1488,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, } } - gvt_dbg_el("ring id %d begin a new workload\n", ring_id); + gvt_dbg_el("ring %s begin a new workload\n", engine->name); /* record some ring buffer register values for scan and shadow */ intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + @@ -1519,7 +1508,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, if (IS_ERR(workload)) return workload; - workload->ring_id = ring_id; + workload->engine = engine; workload->ctx_desc = *desc; workload->ring_context_gpa = ring_context_gpa; workload->rb_head = head; @@ -1528,7 +1517,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, workload->rb_start = start; workload->rb_ctl = ctl; - if (ring_id == RCS0) { + if (engine->id == RCS0) { intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4); intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + @@ -1566,8 +1555,8 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, } } - gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n", - workload, ring_id, head, tail, start, ctl); + gvt_dbg_el("workload %p ring %s head %x tail %x start %x ctl %x\n", + workload, engine->name, head, tail, start, ctl); ret = prepare_mm(workload); if (ret) { @@ -1578,10 +1567,11 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, /* Only scan and shadow the first workload in the queue * as there is only one pre-allocated buf-obj for shadow. */ - if (list_empty(workload_q_head(vgpu, ring_id))) { - intel_runtime_pm_get(&dev_priv->runtime_pm); - ret = intel_gvt_scan_and_shadow_workload(workload); - intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); + if (list_empty(q)) { + intel_wakeref_t wakeref; + + with_intel_runtime_pm(engine->gt->uncore->rpm, wakeref) + ret = intel_gvt_scan_and_shadow_workload(workload); } if (ret) { @@ -1601,7 +1591,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload) { list_add_tail(&workload->list, - workload_q_head(workload->vgpu, workload->ring_id)); + workload_q_head(workload->vgpu, workload->engine)); intel_gvt_kick_schedule(workload->vgpu->gvt); - wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]); + wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->engine->id]); } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index c50d14a9ce85..bf7fc0ca4cb1 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -79,7 +79,7 @@ struct intel_shadow_wa_ctx { struct intel_vgpu_workload { struct intel_vgpu *vgpu; - int ring_id; + const struct intel_engine_cs *engine; struct i915_request *req; /* if this workload has been dispatched to i915? */ bool dispatched; @@ -129,8 +129,8 @@ struct intel_vgpu_shadow_bb { bool ppgtt; }; -#define workload_q_head(vgpu, ring_id) \ - (&(vgpu->submission.workload_q_head[ring_id])) +#define workload_q_head(vgpu, e) \ + (&(vgpu)->submission.workload_q_head[(e)->id]) void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload); @@ -155,7 +155,8 @@ extern const struct intel_vgpu_submission_ops intel_vgpu_execlist_submission_ops; struct intel_vgpu_workload * -intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, +intel_vgpu_create_workload(struct intel_vgpu *vgpu, + const struct intel_engine_cs *engine, struct execlist_ctx_descriptor_format *desc); void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload); diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 345c2aa3b491..1d5ff88078bd 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -37,6 +37,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) { + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; /* setup the ballooning information */ vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC; vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1; @@ -69,7 +70,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu)); gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu)); - WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); + drm_WARN_ON(&i915->drm, sizeof(struct vgt_if) != VGT_PVINFO_SIZE); } #define VGPU_MAX_WEIGHT 16 @@ -148,12 +149,12 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm, high_avail / vgpu_types[i].high_mm); - if (IS_GEN(gvt->dev_priv, 8)) + if (IS_GEN(gvt->gt->i915, 8)) sprintf(gvt->types[i].name, "GVTg_V4_%s", - vgpu_types[i].name); - else if (IS_GEN(gvt->dev_priv, 9)) + vgpu_types[i].name); + else if (IS_GEN(gvt->gt->i915, 9)) sprintf(gvt->types[i].name, "GVTg_V5_%s", - vgpu_types[i].name); + vgpu_types[i].name); gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n", i, gvt->types[i].name, @@ -271,8 +272,9 @@ void intel_gvt_release_vgpu(struct intel_vgpu *vgpu) void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; + struct drm_i915_private *i915 = gvt->gt->i915; - WARN(vgpu->active, "vGPU is still active!\n"); + drm_WARN(&i915->drm, vgpu->active, "vGPU is still active!\n"); /* * remove idr first so later clean can judge if need to stop @@ -432,9 +434,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, if (ret) goto out_clean_sched_policy; - /*TODO: add more platforms support */ - if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) - ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D); + ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D); if (ret) goto out_clean_sched_policy; diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index b0a499753526..c4048628188a 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -7,6 +7,7 @@ #include <linux/debugobjects.h> #include "gt/intel_context.h" +#include "gt/intel_engine_heartbeat.h" #include "gt/intel_engine_pm.h" #include "gt/intel_ring.h" @@ -390,13 +391,23 @@ out: return err; } -void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f) +struct dma_fence * +i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f) { + struct dma_fence *prev; + /* We expect the caller to manage the exclusive timeline ordering */ GEM_BUG_ON(i915_active_is_idle(ref)); - if (!__i915_active_fence_set(&ref->excl, f)) + rcu_read_lock(); + prev = __i915_active_fence_set(&ref->excl, f); + if (prev) + prev = dma_fence_get_rcu(prev); + else atomic_inc(&ref->count); + rcu_read_unlock(); + + return prev; } bool i915_active_acquire_if_busy(struct i915_active *ref) @@ -442,6 +453,9 @@ static void enable_signaling(struct i915_active_fence *active) { struct dma_fence *fence; + if (unlikely(is_barrier(active))) + return; + fence = i915_active_fence_get(active); if (!fence) return; @@ -450,26 +464,49 @@ static void enable_signaling(struct i915_active_fence *active) dma_fence_put(fence); } -int i915_active_wait(struct i915_active *ref) +static int flush_barrier(struct active_node *it) { - struct active_node *it, *n; - int err = 0; + struct intel_engine_cs *engine; - might_sleep(); + if (likely(!is_barrier(&it->base))) + return 0; - if (!i915_active_acquire_if_busy(ref)) + engine = __barrier_to_engine(it); + smp_rmb(); /* serialise with add_active_barriers */ + if (!is_barrier(&it->base)) return 0; - /* Flush lazy signals */ + return intel_engine_flush_barriers(engine); +} + +static int flush_lazy_signals(struct i915_active *ref) +{ + struct active_node *it, *n; + int err = 0; + enable_signaling(&ref->excl); rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { - if (is_barrier(&it->base)) /* unconnected idle barrier */ - continue; + err = flush_barrier(it); /* unconnected idle barrier? */ + if (err) + break; enable_signaling(&it->base); } - /* Any fence added after the wait begins will not be auto-signaled */ + return err; +} + +int i915_active_wait(struct i915_active *ref) +{ + int err; + + might_sleep(); + + if (!i915_active_acquire_if_busy(ref)) + return 0; + + /* Any fence added after the wait begins will not be auto-signaled */ + err = flush_lazy_signals(ref); i915_active_release(ref); if (err) return err; @@ -481,25 +518,81 @@ int i915_active_wait(struct i915_active *ref) return 0; } -int i915_request_await_active(struct i915_request *rq, struct i915_active *ref) +static int __await_active(struct i915_active_fence *active, + int (*fn)(void *arg, struct dma_fence *fence), + void *arg) +{ + struct dma_fence *fence; + + if (is_barrier(active)) /* XXX flush the barrier? */ + return 0; + + fence = i915_active_fence_get(active); + if (fence) { + int err; + + err = fn(arg, fence); + dma_fence_put(fence); + if (err < 0) + return err; + } + + return 0; +} + +static int await_active(struct i915_active *ref, + unsigned int flags, + int (*fn)(void *arg, struct dma_fence *fence), + void *arg) { int err = 0; + /* We must always wait for the exclusive fence! */ if (rcu_access_pointer(ref->excl.fence)) { - struct dma_fence *fence; - - rcu_read_lock(); - fence = dma_fence_get_rcu_safe(&ref->excl.fence); - rcu_read_unlock(); - if (fence) { - err = i915_request_await_dma_fence(rq, fence); - dma_fence_put(fence); + err = __await_active(&ref->excl, fn, arg); + if (err) + return err; + } + + if (flags & I915_ACTIVE_AWAIT_ALL && i915_active_acquire_if_busy(ref)) { + struct active_node *it, *n; + + rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { + err = __await_active(&it->base, fn, arg); + if (err) + break; } + i915_active_release(ref); + if (err) + return err; } - /* In the future we may choose to await on all fences */ + return 0; +} - return err; +static int rq_await_fence(void *arg, struct dma_fence *fence) +{ + return i915_request_await_dma_fence(arg, fence); +} + +int i915_request_await_active(struct i915_request *rq, + struct i915_active *ref, + unsigned int flags) +{ + return await_active(ref, flags, rq_await_fence, rq); +} + +static int sw_await_fence(void *arg, struct dma_fence *fence) +{ + return i915_sw_fence_await_dma_fence(arg, fence, 0, + GFP_NOWAIT | __GFP_NOWARN); +} + +int i915_sw_fence_await_active(struct i915_sw_fence *fence, + struct i915_active *ref, + unsigned int flags) +{ + return await_active(ref, flags, sw_await_fence, fence); } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) @@ -623,6 +716,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref, * We can then use the preallocated nodes in * i915_active_acquire_barrier() */ + GEM_BUG_ON(!mask); for_each_engine_masked(engine, gt, mask, tmp) { u64 idx = engine->kernel_context->timeline->fence_context; struct llist_node *prev = first; @@ -812,7 +906,6 @@ __i915_active_fence_set(struct i915_active_fence *active, __list_del_entry(&active->cb.node); spin_unlock(prev->lock); /* serialise with prev->cb_list */ } - GEM_BUG_ON(rcu_access_pointer(active->fence) != fence); list_add_tail(&active->cb.node, &fence->cb_list); spin_unlock_irqrestore(fence->lock, flags); diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h index 51e1e854ca55..b3282ae7913c 100644 --- a/drivers/gpu/drm/i915/i915_active.h +++ b/drivers/gpu/drm/i915/i915_active.h @@ -173,7 +173,8 @@ i915_active_add_request(struct i915_active *ref, struct i915_request *rq) return i915_active_ref(ref, i915_request_timeline(rq), &rq->fence); } -void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f); +struct dma_fence * +i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f); static inline bool i915_active_has_exclusive(struct i915_active *ref) { @@ -182,7 +183,13 @@ static inline bool i915_active_has_exclusive(struct i915_active *ref) int i915_active_wait(struct i915_active *ref); -int i915_request_await_active(struct i915_request *rq, struct i915_active *ref); +int i915_sw_fence_await_active(struct i915_sw_fence *fence, + struct i915_active *ref, + unsigned int flags); +int i915_request_await_active(struct i915_request *rq, + struct i915_active *ref, + unsigned int flags); +#define I915_ACTIVE_AWAIT_ALL BIT(0) int i915_active_acquire(struct i915_active *ref); bool i915_active_acquire_if_busy(struct i915_active *ref); diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c index 66883af64ca1..20babbdb297d 100644 --- a/drivers/gpu/drm/i915/i915_buddy.c +++ b/drivers/gpu/drm/i915/i915_buddy.c @@ -312,7 +312,8 @@ i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order) return block; out_free: - __i915_buddy_free(mm, block); + if (i != order) + __i915_buddy_free(mm, block); return ERR_PTR(err); } diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index a0e437aa65b7..189b573d02be 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -803,10 +803,11 @@ static bool validate_cmds_sorted(const struct intel_engine_cs *engine, u32 curr = desc->cmd.value & desc->cmd.mask; if (curr < previous) { - DRM_ERROR("CMD: %s [%d] command table not sorted: " - "table=%d entry=%d cmd=0x%08X prev=0x%08X\n", - engine->name, engine->id, - i, j, curr, previous); + drm_err(&engine->i915->drm, + "CMD: %s [%d] command table not sorted: " + "table=%d entry=%d cmd=0x%08X prev=0x%08X\n", + engine->name, engine->id, + i, j, curr, previous); ret = false; } @@ -829,10 +830,11 @@ static bool check_sorted(const struct intel_engine_cs *engine, u32 curr = i915_mmio_reg_offset(reg_table[i].addr); if (curr < previous) { - DRM_ERROR("CMD: %s [%d] register table not sorted: " - "entry=%d reg=0x%08X prev=0x%08X\n", - engine->name, engine->id, - i, curr, previous); + drm_err(&engine->i915->drm, + "CMD: %s [%d] register table not sorted: " + "entry=%d reg=0x%08X prev=0x%08X\n", + engine->name, engine->id, + i, curr, previous); ret = false; } @@ -1010,18 +1012,21 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) } if (!validate_cmds_sorted(engine, cmd_tables, cmd_table_count)) { - DRM_ERROR("%s: command descriptions are not sorted\n", - engine->name); + drm_err(&engine->i915->drm, + "%s: command descriptions are not sorted\n", + engine->name); return; } if (!validate_regs_sorted(engine)) { - DRM_ERROR("%s: registers are not sorted\n", engine->name); + drm_err(&engine->i915->drm, + "%s: registers are not sorted\n", engine->name); return; } ret = init_hash_table(engine, cmd_tables, cmd_table_count); if (ret) { - DRM_ERROR("%s: initialised failed!\n", engine->name); + drm_err(&engine->i915->drm, + "%s: initialised failed!\n", engine->name); fini_hash_table(engine); return; } diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index d5a9b8a964c2..6ca797128aa1 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -30,14 +30,6 @@ #include <linux/sort.h> #include <drm/drm_debugfs.h> -#include <drm/drm_fourcc.h> - -#include "display/intel_display_types.h" -#include "display/intel_dp.h" -#include "display/intel_fbc.h" -#include "display/intel_hdcp.h" -#include "display/intel_hdmi.h" -#include "display/intel_psr.h" #include "gem/i915_gem_context.h" #include "gt/intel_gt_pm.h" @@ -48,9 +40,9 @@ #include "gt/uc/intel_guc_submission.h" #include "i915_debugfs.h" +#include "i915_debugfs_params.h" #include "i915_irq.h" #include "i915_trace.h" -#include "intel_csr.h" #include "intel_pm.h" #include "intel_sideband.h" @@ -127,8 +119,8 @@ stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len) } } -static void -describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) +void +i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct intel_engine_cs *engine; @@ -673,7 +665,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) if (!vma) seq_puts(m, "unused"); else - describe_obj(m, vma->obj); + i915_debugfs_describe_obj(m, vma->obj); seq_putc(m, '\n'); } rcu_read_unlock(); @@ -1004,367 +996,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused) return ret; } -static int ilk_drpc_info(struct seq_file *m) -{ - struct drm_i915_private *i915 = node_to_i915(m->private); - struct intel_uncore *uncore = &i915->uncore; - u32 rgvmodectl, rstdbyctl; - u16 crstandvid; - - rgvmodectl = intel_uncore_read(uncore, MEMMODECTL); - rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL); - crstandvid = intel_uncore_read16(uncore, CRSTANDVID); - - seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN)); - seq_printf(m, "Boost freq: %d\n", - (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> - MEMMODE_BOOST_FREQ_SHIFT); - seq_printf(m, "HW control enabled: %s\n", - yesno(rgvmodectl & MEMMODE_HWIDLE_EN)); - seq_printf(m, "SW control enabled: %s\n", - yesno(rgvmodectl & MEMMODE_SWMODE_EN)); - seq_printf(m, "Gated voltage change: %s\n", - yesno(rgvmodectl & MEMMODE_RCLK_GATE)); - seq_printf(m, "Starting frequency: P%d\n", - (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); - seq_printf(m, "Max P-state: P%d\n", - (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); - seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); - seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); - seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); - seq_printf(m, "Render standby enabled: %s\n", - yesno(!(rstdbyctl & RCX_SW_EXIT))); - seq_puts(m, "Current RS state: "); - switch (rstdbyctl & RSX_STATUS_MASK) { - case RSX_STATUS_ON: - seq_puts(m, "on\n"); - break; - case RSX_STATUS_RC1: - seq_puts(m, "RC1\n"); - break; - case RSX_STATUS_RC1E: - seq_puts(m, "RC1E\n"); - break; - case RSX_STATUS_RS1: - seq_puts(m, "RS1\n"); - break; - case RSX_STATUS_RS2: - seq_puts(m, "RS2 (RC6)\n"); - break; - case RSX_STATUS_RS3: - seq_puts(m, "RC3 (RC6+)\n"); - break; - default: - seq_puts(m, "unknown\n"); - break; - } - - return 0; -} - -static int i915_forcewake_domains(struct seq_file *m, void *data) -{ - struct drm_i915_private *i915 = node_to_i915(m->private); - struct intel_uncore *uncore = &i915->uncore; - struct intel_uncore_forcewake_domain *fw_domain; - unsigned int tmp; - - seq_printf(m, "user.bypass_count = %u\n", - uncore->user_forcewake_count); - - for_each_fw_domain(fw_domain, uncore, tmp) - seq_printf(m, "%s.wake_count = %u\n", - intel_uncore_forcewake_domain_to_str(fw_domain->id), - READ_ONCE(fw_domain->wake_count)); - - return 0; -} - -static void print_rc6_res(struct seq_file *m, - const char *title, - const i915_reg_t reg) -{ - struct drm_i915_private *i915 = node_to_i915(m->private); - intel_wakeref_t wakeref; - - with_intel_runtime_pm(&i915->runtime_pm, wakeref) - seq_printf(m, "%s %u (%llu us)\n", title, - intel_uncore_read(&i915->uncore, reg), - intel_rc6_residency_us(&i915->gt.rc6, reg)); -} - -static int vlv_drpc_info(struct seq_file *m) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - u32 rcctl1, pw_status; - - pw_status = I915_READ(VLV_GTLC_PW_STATUS); - rcctl1 = I915_READ(GEN6_RC_CONTROL); - - seq_printf(m, "RC6 Enabled: %s\n", - yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE | - GEN6_RC_CTL_EI_MODE(1)))); - seq_printf(m, "Render Power Well: %s\n", - (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down"); - seq_printf(m, "Media Power Well: %s\n", - (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down"); - - print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6); - print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6); - - return i915_forcewake_domains(m, NULL); -} - -static int gen6_drpc_info(struct seq_file *m) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - u32 gt_core_status, rcctl1, rc6vids = 0; - u32 gen9_powergate_enable = 0, gen9_powergate_status = 0; - - gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS); - trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true); - - rcctl1 = I915_READ(GEN6_RC_CONTROL); - if (INTEL_GEN(dev_priv) >= 9) { - gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE); - gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS); - } - - if (INTEL_GEN(dev_priv) <= 7) - sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, - &rc6vids, NULL); - - seq_printf(m, "RC1e Enabled: %s\n", - yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); - seq_printf(m, "RC6 Enabled: %s\n", - yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); - if (INTEL_GEN(dev_priv) >= 9) { - seq_printf(m, "Render Well Gating Enabled: %s\n", - yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE)); - seq_printf(m, "Media Well Gating Enabled: %s\n", - yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE)); - } - seq_printf(m, "Deep RC6 Enabled: %s\n", - yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); - seq_printf(m, "Deepest RC6 Enabled: %s\n", - yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); - seq_puts(m, "Current RC state: "); - switch (gt_core_status & GEN6_RCn_MASK) { - case GEN6_RC0: - if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) - seq_puts(m, "Core Power Down\n"); - else - seq_puts(m, "on\n"); - break; - case GEN6_RC3: - seq_puts(m, "RC3\n"); - break; - case GEN6_RC6: - seq_puts(m, "RC6\n"); - break; - case GEN6_RC7: - seq_puts(m, "RC7\n"); - break; - default: - seq_puts(m, "Unknown\n"); - break; - } - - seq_printf(m, "Core Power Down: %s\n", - yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); - if (INTEL_GEN(dev_priv) >= 9) { - seq_printf(m, "Render Power Well: %s\n", - (gen9_powergate_status & - GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down"); - seq_printf(m, "Media Power Well: %s\n", - (gen9_powergate_status & - GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down"); - } - - /* Not exactly sure what this is */ - print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:", - GEN6_GT_GFX_RC6_LOCKED); - print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6); - print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p); - print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp); - - if (INTEL_GEN(dev_priv) <= 7) { - seq_printf(m, "RC6 voltage: %dmV\n", - GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); - seq_printf(m, "RC6+ voltage: %dmV\n", - GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); - seq_printf(m, "RC6++ voltage: %dmV\n", - GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); - } - - return i915_forcewake_domains(m, NULL); -} - -static int i915_drpc_info(struct seq_file *m, void *unused) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - intel_wakeref_t wakeref; - int err = -ENODEV; - - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - err = vlv_drpc_info(m); - else if (INTEL_GEN(dev_priv) >= 6) - err = gen6_drpc_info(m); - else - err = ilk_drpc_info(m); - } - - return err; -} - -static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - - seq_printf(m, "FB tracking busy bits: 0x%08x\n", - dev_priv->fb_tracking.busy_bits); - - seq_printf(m, "FB tracking flip bits: 0x%08x\n", - dev_priv->fb_tracking.flip_bits); - - return 0; -} - -static int i915_fbc_status(struct seq_file *m, void *unused) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct intel_fbc *fbc = &dev_priv->fbc; - intel_wakeref_t wakeref; - - if (!HAS_FBC(dev_priv)) - return -ENODEV; - - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - mutex_lock(&fbc->lock); - - if (intel_fbc_is_active(dev_priv)) - seq_puts(m, "FBC enabled\n"); - else - seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason); - - if (intel_fbc_is_active(dev_priv)) { - u32 mask; - - if (INTEL_GEN(dev_priv) >= 8) - mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK; - else if (INTEL_GEN(dev_priv) >= 7) - mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK; - else if (INTEL_GEN(dev_priv) >= 5) - mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK; - else if (IS_G4X(dev_priv)) - mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK; - else - mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING | - FBC_STAT_COMPRESSED); - - seq_printf(m, "Compressing: %s\n", yesno(mask)); - } - - mutex_unlock(&fbc->lock); - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - - return 0; -} - -static int i915_fbc_false_color_get(void *data, u64 *val) -{ - struct drm_i915_private *dev_priv = data; - - if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv)) - return -ENODEV; - - *val = dev_priv->fbc.false_color; - - return 0; -} - -static int i915_fbc_false_color_set(void *data, u64 val) -{ - struct drm_i915_private *dev_priv = data; - u32 reg; - - if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv)) - return -ENODEV; - - mutex_lock(&dev_priv->fbc.lock); - - reg = I915_READ(ILK_DPFC_CONTROL); - dev_priv->fbc.false_color = val; - - I915_WRITE(ILK_DPFC_CONTROL, val ? - (reg | FBC_CTL_FALSE_COLOR) : - (reg & ~FBC_CTL_FALSE_COLOR)); - - mutex_unlock(&dev_priv->fbc.lock); - return 0; -} - -DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops, - i915_fbc_false_color_get, i915_fbc_false_color_set, - "%llu\n"); - -static int i915_ips_status(struct seq_file *m, void *unused) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - intel_wakeref_t wakeref; - - if (!HAS_IPS(dev_priv)) - return -ENODEV; - - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - - seq_printf(m, "Enabled by kernel parameter: %s\n", - yesno(i915_modparams.enable_ips)); - - if (INTEL_GEN(dev_priv) >= 8) { - seq_puts(m, "Currently: unknown\n"); - } else { - if (I915_READ(IPS_CTL) & IPS_ENABLE) - seq_puts(m, "Currently: enabled\n"); - else - seq_puts(m, "Currently: disabled\n"); - } - - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - - return 0; -} - -static int i915_sr_status(struct seq_file *m, void *unused) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - intel_wakeref_t wakeref; - bool sr_enabled = false; - - wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); - - if (INTEL_GEN(dev_priv) >= 9) - /* no global SR status; inspect per-plane WM */; - else if (HAS_PCH_SPLIT(dev_priv)) - sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; - else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) || - IS_I945G(dev_priv) || IS_I945GM(dev_priv)) - sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; - else if (IS_I915GM(dev_priv)) - sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; - else if (IS_PINEVIEW(dev_priv)) - sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; - else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; - - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); - - seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled)); - - return 0; -} - static int i915_ring_freq_table(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -1406,70 +1037,6 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) return 0; } -static int i915_opregion(struct seq_file *m, void *unused) -{ - struct intel_opregion *opregion = &node_to_i915(m->private)->opregion; - - if (opregion->header) - seq_write(m, opregion->header, OPREGION_SIZE); - - return 0; -} - -static int i915_vbt(struct seq_file *m, void *unused) -{ - struct intel_opregion *opregion = &node_to_i915(m->private)->opregion; - - if (opregion->vbt) - seq_write(m, opregion->vbt, opregion->vbt_size); - - return 0; -} - -static int i915_gem_framebuffer_info(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct drm_device *dev = &dev_priv->drm; - struct intel_framebuffer *fbdev_fb = NULL; - struct drm_framebuffer *drm_fb; - -#ifdef CONFIG_DRM_FBDEV_EMULATION - if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) { - fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb); - - seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", - fbdev_fb->base.width, - fbdev_fb->base.height, - fbdev_fb->base.format->depth, - fbdev_fb->base.format->cpp[0] * 8, - fbdev_fb->base.modifier, - drm_framebuffer_read_refcount(&fbdev_fb->base)); - describe_obj(m, intel_fb_obj(&fbdev_fb->base)); - seq_putc(m, '\n'); - } -#endif - - mutex_lock(&dev->mode_config.fb_lock); - drm_for_each_fb(drm_fb, dev) { - struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); - if (fb == fbdev_fb) - continue; - - seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", - fb->base.width, - fb->base.height, - fb->base.format->depth, - fb->base.format->cpp[0] * 8, - fb->base.modifier, - drm_framebuffer_read_refcount(&fb->base)); - describe_obj(m, intel_fb_obj(&fb->base)); - seq_putc(m, '\n'); - } - mutex_unlock(&dev->mode_config.fb_lock); - - return 0; -} - static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring) { seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)", @@ -1515,7 +1082,7 @@ static int i915_context_status(struct seq_file *m, void *unused) if (intel_context_pin_if_active(ce)) { seq_printf(m, "%s: ", ce->engine->name); if (ce->state) - describe_obj(m, ce->state->obj); + i915_debugfs_describe_obj(m, ce->state->obj); describe_ctx_ring(m, ce->ring); seq_putc(m, '\n'); intel_context_unpin(ce); @@ -1752,10 +1319,8 @@ stringify_guc_log_type(enum guc_log_buffer_type type) return ""; } -static void i915_guc_log_info(struct seq_file *m, - struct drm_i915_private *dev_priv) +static void i915_guc_log_info(struct seq_file *m, struct intel_guc_log *log) { - struct intel_guc_log *log = &dev_priv->gt.uc.guc.log; enum guc_log_buffer_type type; if (!intel_guc_log_relay_created(log)) { @@ -1779,11 +1344,12 @@ static void i915_guc_log_info(struct seq_file *m, static int i915_guc_info(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct intel_uc *uc = &dev_priv->gt.uc; - if (!USES_GUC(dev_priv)) + if (!intel_uc_uses_guc(uc)) return -ENODEV; - i915_guc_log_info(m, dev_priv); + i915_guc_log_info(m, &uc->guc.log); /* Add more as required ... */ @@ -1793,11 +1359,11 @@ static int i915_guc_info(struct seq_file *m, void *data) static int i915_guc_stage_pool(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); - const struct intel_guc *guc = &dev_priv->gt.uc.guc; - struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr; + struct intel_uc *uc = &dev_priv->gt.uc; + struct guc_stage_desc *desc = uc->guc.stage_desc_pool_vaddr; int index; - if (!USES_GUC_SUBMISSION(dev_priv)) + if (!intel_uc_uses_guc_submission(uc)) return -ENODEV; for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) { @@ -1884,11 +1450,12 @@ static int i915_guc_log_dump(struct seq_file *m, void *data) static int i915_guc_log_level_get(void *data, u64 *val) { struct drm_i915_private *dev_priv = data; + struct intel_uc *uc = &dev_priv->gt.uc; - if (!USES_GUC(dev_priv)) + if (!intel_uc_uses_guc(uc)) return -ENODEV; - *val = intel_guc_log_get_level(&dev_priv->gt.uc.guc.log); + *val = intel_guc_log_get_level(&uc->guc.log); return 0; } @@ -1896,11 +1463,12 @@ static int i915_guc_log_level_get(void *data, u64 *val) static int i915_guc_log_level_set(void *data, u64 val) { struct drm_i915_private *dev_priv = data; + struct intel_uc *uc = &dev_priv->gt.uc; - if (!USES_GUC(dev_priv)) + if (!intel_uc_uses_guc(uc)) return -ENODEV; - return intel_guc_log_set_level(&dev_priv->gt.uc.guc.log, val); + return intel_guc_log_set_level(&uc->guc.log, val); } DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops, @@ -1913,7 +1481,7 @@ static int i915_guc_log_relay_open(struct inode *inode, struct file *file) struct intel_guc *guc = &i915->gt.uc.guc; struct intel_guc_log *log = &guc->log; - if (!intel_guc_is_running(guc)) + if (!intel_guc_is_ready(guc)) return -ENODEV; file->private_data = log; @@ -1963,253 +1531,6 @@ static const struct file_operations i915_guc_log_relay_fops = { .release = i915_guc_log_relay_release, }; -static int i915_psr_sink_status_show(struct seq_file *m, void *data) -{ - u8 val; - static const char * const sink_status[] = { - "inactive", - "transition to active, capture and display", - "active, display from RFB", - "active, capture and display on sink device timings", - "transition to inactive, capture and display, timing re-sync", - "reserved", - "reserved", - "sink internal error", - }; - struct drm_connector *connector = m->private; - struct drm_i915_private *dev_priv = to_i915(connector->dev); - struct intel_dp *intel_dp = - enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector))); - int ret; - - if (!CAN_PSR(dev_priv)) { - seq_puts(m, "PSR Unsupported\n"); - return -ENODEV; - } - - if (connector->status != connector_status_connected) - return -ENODEV; - - ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val); - - if (ret == 1) { - const char *str = "unknown"; - - val &= DP_PSR_SINK_STATE_MASK; - if (val < ARRAY_SIZE(sink_status)) - str = sink_status[val]; - seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str); - } else { - return ret; - } - - return 0; -} -DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status); - -static void -psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m) -{ - u32 val, status_val; - const char *status = "unknown"; - - if (dev_priv->psr.psr2_enabled) { - static const char * const live_status[] = { - "IDLE", - "CAPTURE", - "CAPTURE_FS", - "SLEEP", - "BUFON_FW", - "ML_UP", - "SU_STANDBY", - "FAST_SLEEP", - "DEEP_SLEEP", - "BUF_ON", - "TG_ON" - }; - val = I915_READ(EDP_PSR2_STATUS(dev_priv->psr.transcoder)); - status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >> - EDP_PSR2_STATUS_STATE_SHIFT; - if (status_val < ARRAY_SIZE(live_status)) - status = live_status[status_val]; - } else { - static const char * const live_status[] = { - "IDLE", - "SRDONACK", - "SRDENT", - "BUFOFF", - "BUFON", - "AUXACK", - "SRDOFFACK", - "SRDENT_ON", - }; - val = I915_READ(EDP_PSR_STATUS(dev_priv->psr.transcoder)); - status_val = (val & EDP_PSR_STATUS_STATE_MASK) >> - EDP_PSR_STATUS_STATE_SHIFT; - if (status_val < ARRAY_SIZE(live_status)) - status = live_status[status_val]; - } - - seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val); -} - -static int i915_edp_psr_status(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct i915_psr *psr = &dev_priv->psr; - intel_wakeref_t wakeref; - const char *status; - bool enabled; - u32 val; - - if (!HAS_PSR(dev_priv)) - return -ENODEV; - - seq_printf(m, "Sink support: %s", yesno(psr->sink_support)); - if (psr->dp) - seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]); - seq_puts(m, "\n"); - - if (!psr->sink_support) - return 0; - - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - mutex_lock(&psr->lock); - - if (psr->enabled) - status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled"; - else - status = "disabled"; - seq_printf(m, "PSR mode: %s\n", status); - - if (!psr->enabled) { - seq_printf(m, "PSR sink not reliable: %s\n", - yesno(psr->sink_not_reliable)); - - goto unlock; - } - - if (psr->psr2_enabled) { - val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)); - enabled = val & EDP_PSR2_ENABLE; - } else { - val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)); - enabled = val & EDP_PSR_ENABLE; - } - seq_printf(m, "Source PSR ctl: %s [0x%08x]\n", - enableddisabled(enabled), val); - psr_source_status(dev_priv, m); - seq_printf(m, "Busy frontbuffer bits: 0x%08x\n", - psr->busy_frontbuffer_bits); - - /* - * SKL+ Perf counter is reset to 0 everytime DC state is entered - */ - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - val = I915_READ(EDP_PSR_PERF_CNT(dev_priv->psr.transcoder)); - val &= EDP_PSR_PERF_CNT_MASK; - seq_printf(m, "Performance counter: %u\n", val); - } - - if (psr->debug & I915_PSR_DEBUG_IRQ) { - seq_printf(m, "Last attempted entry at: %lld\n", - psr->last_entry_attempt); - seq_printf(m, "Last exit at: %lld\n", psr->last_exit); - } - - if (psr->psr2_enabled) { - u32 su_frames_val[3]; - int frame; - - /* - * Reading all 3 registers before hand to minimize crossing a - * frame boundary between register reads - */ - for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) { - val = I915_READ(PSR2_SU_STATUS(dev_priv->psr.transcoder, - frame)); - su_frames_val[frame / 3] = val; - } - - seq_puts(m, "Frame:\tPSR2 SU blocks:\n"); - - for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) { - u32 su_blocks; - - su_blocks = su_frames_val[frame / 3] & - PSR2_SU_STATUS_MASK(frame); - su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame); - seq_printf(m, "%d\t%d\n", frame, su_blocks); - } - } - -unlock: - mutex_unlock(&psr->lock); - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - - return 0; -} - -static int -i915_edp_psr_debug_set(void *data, u64 val) -{ - struct drm_i915_private *dev_priv = data; - intel_wakeref_t wakeref; - int ret; - - if (!CAN_PSR(dev_priv)) - return -ENODEV; - - DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val); - - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - - ret = intel_psr_debug_set(dev_priv, val); - - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - - return ret; -} - -static int -i915_edp_psr_debug_get(void *data, u64 *val) -{ - struct drm_i915_private *dev_priv = data; - - if (!CAN_PSR(dev_priv)) - return -ENODEV; - - *val = READ_ONCE(dev_priv->psr.debug); - return 0; -} - -DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops, - i915_edp_psr_debug_get, i915_edp_psr_debug_set, - "%llu\n"); - -static int i915_energy_uJ(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - unsigned long long power; - intel_wakeref_t wakeref; - u32 units; - - if (INTEL_GEN(dev_priv) < 6) - return -ENODEV; - - if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) - return -ENODEV; - - units = (power & 0x1f00) >> 8; - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) - power = I915_READ(MCH_SECP_NRG_STTS); - - power = (1000000 * power) >> units; /* convert to uJ */ - seq_printf(m, "%llu", power); - - return 0; -} - static int i915_runtime_pm_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -2243,452 +1564,6 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused) return 0; } -static int i915_power_domain_info(struct seq_file *m, void *unused) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct i915_power_domains *power_domains = &dev_priv->power_domains; - int i; - - mutex_lock(&power_domains->lock); - - seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); - for (i = 0; i < power_domains->power_well_count; i++) { - struct i915_power_well *power_well; - enum intel_display_power_domain power_domain; - - power_well = &power_domains->power_wells[i]; - seq_printf(m, "%-25s %d\n", power_well->desc->name, - power_well->count); - - for_each_power_domain(power_domain, power_well->desc->domains) - seq_printf(m, " %-23s %d\n", - intel_display_power_domain_str(power_domain), - power_domains->domain_use_count[power_domain]); - } - - mutex_unlock(&power_domains->lock); - - return 0; -} - -static int i915_dmc_info(struct seq_file *m, void *unused) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - intel_wakeref_t wakeref; - struct intel_csr *csr; - i915_reg_t dc5_reg, dc6_reg = {}; - - if (!HAS_CSR(dev_priv)) - return -ENODEV; - - csr = &dev_priv->csr; - - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - - seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL)); - seq_printf(m, "path: %s\n", csr->fw_path); - - if (!csr->dmc_payload) - goto out; - - seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version), - CSR_VERSION_MINOR(csr->version)); - - if (INTEL_GEN(dev_priv) >= 12) { - dc5_reg = TGL_DMC_DEBUG_DC5_COUNT; - dc6_reg = TGL_DMC_DEBUG_DC6_COUNT; - /* - * NOTE: DMC_DEBUG3 is a general purpose reg. - * According to B.Specs:49196 DMC f/w reuses DC5/6 counter - * reg for DC3CO debugging and validation, - * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter. - */ - seq_printf(m, "DC3CO count: %d\n", I915_READ(DMC_DEBUG3)); - } else { - dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT : - SKL_CSR_DC3_DC5_COUNT; - if (!IS_GEN9_LP(dev_priv)) - dc6_reg = SKL_CSR_DC5_DC6_COUNT; - } - - seq_printf(m, "DC3 -> DC5 count: %d\n", I915_READ(dc5_reg)); - if (dc6_reg.reg) - seq_printf(m, "DC5 -> DC6 count: %d\n", I915_READ(dc6_reg)); - -out: - seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0))); - seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE)); - seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL)); - - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - - return 0; -} - -static void intel_seq_print_mode(struct seq_file *m, int tabs, - const struct drm_display_mode *mode) -{ - int i; - - for (i = 0; i < tabs; i++) - seq_putc(m, '\t'); - - seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); -} - -static void intel_encoder_info(struct seq_file *m, - struct intel_crtc *crtc, - struct intel_encoder *encoder) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct drm_connector_list_iter conn_iter; - struct drm_connector *connector; - - seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n", - encoder->base.base.id, encoder->base.name); - - drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - const struct drm_connector_state *conn_state = - connector->state; - - if (conn_state->best_encoder != &encoder->base) - continue; - - seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n", - connector->base.id, connector->name); - } - drm_connector_list_iter_end(&conn_iter); -} - -static void intel_panel_info(struct seq_file *m, struct intel_panel *panel) -{ - const struct drm_display_mode *mode = panel->fixed_mode; - - seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); -} - -static void intel_hdcp_info(struct seq_file *m, - struct intel_connector *intel_connector) -{ - bool hdcp_cap, hdcp2_cap; - - hdcp_cap = intel_hdcp_capable(intel_connector); - hdcp2_cap = intel_hdcp2_capable(intel_connector); - - if (hdcp_cap) - seq_puts(m, "HDCP1.4 "); - if (hdcp2_cap) - seq_puts(m, "HDCP2.2 "); - - if (!hdcp_cap && !hdcp2_cap) - seq_puts(m, "None"); - - seq_puts(m, "\n"); -} - -static void intel_dp_info(struct seq_file *m, - struct intel_connector *intel_connector) -{ - struct intel_encoder *intel_encoder = intel_connector->encoder; - struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); - - seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); - seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio)); - if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) - intel_panel_info(m, &intel_connector->panel); - - drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports, - &intel_dp->aux); - if (intel_connector->hdcp.shim) { - seq_puts(m, "\tHDCP version: "); - intel_hdcp_info(m, intel_connector); - } -} - -static void intel_dp_mst_info(struct seq_file *m, - struct intel_connector *intel_connector) -{ - struct intel_encoder *intel_encoder = intel_connector->encoder; - struct intel_dp_mst_encoder *intel_mst = - enc_to_mst(intel_encoder); - struct intel_digital_port *intel_dig_port = intel_mst->primary; - struct intel_dp *intel_dp = &intel_dig_port->dp; - bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, - intel_connector->port); - - seq_printf(m, "\taudio support: %s\n", yesno(has_audio)); -} - -static void intel_hdmi_info(struct seq_file *m, - struct intel_connector *intel_connector) -{ - struct intel_encoder *intel_encoder = intel_connector->encoder; - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder); - - seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio)); - if (intel_connector->hdcp.shim) { - seq_puts(m, "\tHDCP version: "); - intel_hdcp_info(m, intel_connector); - } -} - -static void intel_lvds_info(struct seq_file *m, - struct intel_connector *intel_connector) -{ - intel_panel_info(m, &intel_connector->panel); -} - -static void intel_connector_info(struct seq_file *m, - struct drm_connector *connector) -{ - struct intel_connector *intel_connector = to_intel_connector(connector); - const struct drm_connector_state *conn_state = connector->state; - struct intel_encoder *encoder = - to_intel_encoder(conn_state->best_encoder); - const struct drm_display_mode *mode; - - seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n", - connector->base.id, connector->name, - drm_get_connector_status_name(connector->status)); - - if (connector->status == connector_status_disconnected) - return; - - seq_printf(m, "\tphysical dimensions: %dx%dmm\n", - connector->display_info.width_mm, - connector->display_info.height_mm); - seq_printf(m, "\tsubpixel order: %s\n", - drm_get_subpixel_order_name(connector->display_info.subpixel_order)); - seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev); - - if (!encoder) - return; - - switch (connector->connector_type) { - case DRM_MODE_CONNECTOR_DisplayPort: - case DRM_MODE_CONNECTOR_eDP: - if (encoder->type == INTEL_OUTPUT_DP_MST) - intel_dp_mst_info(m, intel_connector); - else - intel_dp_info(m, intel_connector); - break; - case DRM_MODE_CONNECTOR_LVDS: - if (encoder->type == INTEL_OUTPUT_LVDS) - intel_lvds_info(m, intel_connector); - break; - case DRM_MODE_CONNECTOR_HDMIA: - if (encoder->type == INTEL_OUTPUT_HDMI || - encoder->type == INTEL_OUTPUT_DDI) - intel_hdmi_info(m, intel_connector); - break; - default: - break; - } - - seq_printf(m, "\tmodes:\n"); - list_for_each_entry(mode, &connector->modes, head) - intel_seq_print_mode(m, 2, mode); -} - -static const char *plane_type(enum drm_plane_type type) -{ - switch (type) { - case DRM_PLANE_TYPE_OVERLAY: - return "OVL"; - case DRM_PLANE_TYPE_PRIMARY: - return "PRI"; - case DRM_PLANE_TYPE_CURSOR: - return "CUR"; - /* - * Deliberately omitting default: to generate compiler warnings - * when a new drm_plane_type gets added. - */ - } - - return "unknown"; -} - -static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation) -{ - /* - * According to doc only one DRM_MODE_ROTATE_ is allowed but this - * will print them all to visualize if the values are misused - */ - snprintf(buf, bufsize, - "%s%s%s%s%s%s(0x%08x)", - (rotation & DRM_MODE_ROTATE_0) ? "0 " : "", - (rotation & DRM_MODE_ROTATE_90) ? "90 " : "", - (rotation & DRM_MODE_ROTATE_180) ? "180 " : "", - (rotation & DRM_MODE_ROTATE_270) ? "270 " : "", - (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "", - (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "", - rotation); -} - -static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane) -{ - const struct intel_plane_state *plane_state = - to_intel_plane_state(plane->base.state); - const struct drm_framebuffer *fb = plane_state->uapi.fb; - struct drm_format_name_buf format_name; - struct drm_rect src, dst; - char rot_str[48]; - - src = drm_plane_state_src(&plane_state->uapi); - dst = drm_plane_state_dest(&plane_state->uapi); - - if (fb) - drm_get_format_name(fb->format->format, &format_name); - - plane_rotation(rot_str, sizeof(rot_str), - plane_state->uapi.rotation); - - seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n", - fb ? fb->base.id : 0, fb ? format_name.str : "n/a", - fb ? fb->width : 0, fb ? fb->height : 0, - DRM_RECT_FP_ARG(&src), - DRM_RECT_ARG(&dst), - rot_str); -} - -static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane) -{ - const struct intel_plane_state *plane_state = - to_intel_plane_state(plane->base.state); - const struct drm_framebuffer *fb = plane_state->hw.fb; - struct drm_format_name_buf format_name; - char rot_str[48]; - - if (!fb) - return; - - drm_get_format_name(fb->format->format, &format_name); - - plane_rotation(rot_str, sizeof(rot_str), - plane_state->hw.rotation); - - seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n", - fb->base.id, format_name.str, - fb->width, fb->height, - yesno(plane_state->uapi.visible), - DRM_RECT_FP_ARG(&plane_state->uapi.src), - DRM_RECT_ARG(&plane_state->uapi.dst), - rot_str); -} - -static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct intel_plane *plane; - - for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { - seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n", - plane->base.base.id, plane->base.name, - plane_type(plane->base.type)); - intel_plane_uapi_info(m, plane); - intel_plane_hw_info(m, plane); - } -} - -static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc) -{ - const struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - int num_scalers = crtc->num_scalers; - int i; - - /* Not all platformas have a scaler */ - if (num_scalers) { - seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d", - num_scalers, - crtc_state->scaler_state.scaler_users, - crtc_state->scaler_state.scaler_id); - - for (i = 0; i < num_scalers; i++) { - const struct intel_scaler *sc = - &crtc_state->scaler_state.scalers[i]; - - seq_printf(m, ", scalers[%d]: use=%s, mode=%x", - i, yesno(sc->in_use), sc->mode); - } - seq_puts(m, "\n"); - } else { - seq_puts(m, "\tNo scalers available on this platform\n"); - } -} - -static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - const struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - struct intel_encoder *encoder; - - seq_printf(m, "[CRTC:%d:%s]:\n", - crtc->base.base.id, crtc->base.name); - - seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n", - yesno(crtc_state->uapi.enable), - yesno(crtc_state->uapi.active), - DRM_MODE_ARG(&crtc_state->uapi.mode)); - - if (crtc_state->hw.enable) { - seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n", - yesno(crtc_state->hw.active), - DRM_MODE_ARG(&crtc_state->hw.adjusted_mode)); - - seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n", - crtc_state->pipe_src_w, crtc_state->pipe_src_h, - yesno(crtc_state->dither), crtc_state->pipe_bpp); - - intel_scaler_info(m, crtc); - } - - for_each_intel_encoder_mask(&dev_priv->drm, encoder, - crtc_state->uapi.encoder_mask) - intel_encoder_info(m, crtc, encoder); - - intel_plane_info(m, crtc); - - seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n", - yesno(!crtc->cpu_fifo_underrun_disabled), - yesno(!crtc->pch_fifo_underrun_disabled)); -} - -static int i915_display_info(struct seq_file *m, void *unused) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct drm_device *dev = &dev_priv->drm; - struct intel_crtc *crtc; - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; - intel_wakeref_t wakeref; - - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - - drm_modeset_lock_all(dev); - - seq_printf(m, "CRTC info\n"); - seq_printf(m, "---------\n"); - for_each_intel_crtc(dev, crtc) - intel_crtc_info(m, crtc); - - seq_printf(m, "\n"); - seq_printf(m, "Connector info\n"); - seq_printf(m, "--------------\n"); - drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) - intel_connector_info(m, connector); - drm_connector_list_iter_end(&conn_iter); - - drm_modeset_unlock_all(dev); - - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - - return 0; -} - static int i915_engine_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -2733,55 +1608,6 @@ static int i915_shrinker_info(struct seq_file *m, void *unused) return 0; } -static int i915_shared_dplls_info(struct seq_file *m, void *unused) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct drm_device *dev = &dev_priv->drm; - int i; - - drm_modeset_lock_all(dev); - for (i = 0; i < dev_priv->num_shared_dpll; i++) { - struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; - - seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name, - pll->info->id); - seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n", - pll->state.crtc_mask, pll->active_mask, yesno(pll->on)); - seq_printf(m, " tracked hardware state:\n"); - seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll); - seq_printf(m, " dpll_md: 0x%08x\n", - pll->state.hw_state.dpll_md); - seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0); - seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1); - seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll); - seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0); - seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1); - seq_printf(m, " mg_refclkin_ctl: 0x%08x\n", - pll->state.hw_state.mg_refclkin_ctl); - seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n", - pll->state.hw_state.mg_clktop2_coreclkctl1); - seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n", - pll->state.hw_state.mg_clktop2_hsclkctl); - seq_printf(m, " mg_pll_div0: 0x%08x\n", - pll->state.hw_state.mg_pll_div0); - seq_printf(m, " mg_pll_div1: 0x%08x\n", - pll->state.hw_state.mg_pll_div1); - seq_printf(m, " mg_pll_lf: 0x%08x\n", - pll->state.hw_state.mg_pll_lf); - seq_printf(m, " mg_pll_frac_lock: 0x%08x\n", - pll->state.hw_state.mg_pll_frac_lock); - seq_printf(m, " mg_pll_ssc: 0x%08x\n", - pll->state.hw_state.mg_pll_ssc); - seq_printf(m, " mg_pll_bias: 0x%08x\n", - pll->state.hw_state.mg_pll_bias); - seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n", - pll->state.hw_state.mg_pll_tdc_coldst_bias); - } - drm_modeset_unlock_all(dev); - - return 0; -} - static int i915_wa_registers(struct seq_file *m, void *unused) { struct drm_i915_private *i915 = node_to_i915(m->private); @@ -2802,7 +1628,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused) for (wa = wal->list; count--; wa++) seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n", i915_mmio_reg_offset(wa->reg), - wa->val, wa->mask); + wa->set, wa->clr); seq_printf(m, "\n"); } @@ -2810,646 +1636,6 @@ static int i915_wa_registers(struct seq_file *m, void *unused) return 0; } -static int i915_ipc_status_show(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = m->private; - - seq_printf(m, "Isochronous Priority Control: %s\n", - yesno(dev_priv->ipc_enabled)); - return 0; -} - -static int i915_ipc_status_open(struct inode *inode, struct file *file) -{ - struct drm_i915_private *dev_priv = inode->i_private; - - if (!HAS_IPC(dev_priv)) - return -ENODEV; - - return single_open(file, i915_ipc_status_show, dev_priv); -} - -static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf, - size_t len, loff_t *offp) -{ - struct seq_file *m = file->private_data; - struct drm_i915_private *dev_priv = m->private; - intel_wakeref_t wakeref; - bool enable; - int ret; - - ret = kstrtobool_from_user(ubuf, len, &enable); - if (ret < 0) - return ret; - - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { - if (!dev_priv->ipc_enabled && enable) - DRM_INFO("Enabling IPC: WM will be proper only after next commit\n"); - dev_priv->wm.distrust_bios_wm = true; - dev_priv->ipc_enabled = enable; - intel_enable_ipc(dev_priv); - } - - return len; -} - -static const struct file_operations i915_ipc_status_fops = { - .owner = THIS_MODULE, - .open = i915_ipc_status_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = i915_ipc_status_write -}; - -static int i915_ddb_info(struct seq_file *m, void *unused) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct drm_device *dev = &dev_priv->drm; - struct skl_ddb_entry *entry; - struct intel_crtc *crtc; - - if (INTEL_GEN(dev_priv) < 9) - return -ENODEV; - - drm_modeset_lock_all(dev); - - seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size"); - - for_each_intel_crtc(&dev_priv->drm, crtc) { - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - enum pipe pipe = crtc->pipe; - enum plane_id plane_id; - - seq_printf(m, "Pipe %c\n", pipe_name(pipe)); - - for_each_plane_id_on_crtc(crtc, plane_id) { - entry = &crtc_state->wm.skl.plane_ddb_y[plane_id]; - seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1, - entry->start, entry->end, - skl_ddb_entry_size(entry)); - } - - entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR]; - seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start, - entry->end, skl_ddb_entry_size(entry)); - } - - drm_modeset_unlock_all(dev); - - return 0; -} - -static void drrs_status_per_crtc(struct seq_file *m, - struct drm_device *dev, - struct intel_crtc *intel_crtc) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_drrs *drrs = &dev_priv->drrs; - int vrefresh = 0; - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; - - drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - if (connector->state->crtc != &intel_crtc->base) - continue; - - seq_printf(m, "%s:\n", connector->name); - } - drm_connector_list_iter_end(&conn_iter); - - if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT) - seq_puts(m, "\tVBT: DRRS_type: Static"); - else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT) - seq_puts(m, "\tVBT: DRRS_type: Seamless"); - else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED) - seq_puts(m, "\tVBT: DRRS_type: None"); - else - seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value"); - - seq_puts(m, "\n\n"); - - if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) { - struct intel_panel *panel; - - mutex_lock(&drrs->mutex); - /* DRRS Supported */ - seq_puts(m, "\tDRRS Supported: Yes\n"); - - /* disable_drrs() will make drrs->dp NULL */ - if (!drrs->dp) { - seq_puts(m, "Idleness DRRS: Disabled\n"); - if (dev_priv->psr.enabled) - seq_puts(m, - "\tAs PSR is enabled, DRRS is not enabled\n"); - mutex_unlock(&drrs->mutex); - return; - } - - panel = &drrs->dp->attached_connector->panel; - seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X", - drrs->busy_frontbuffer_bits); - - seq_puts(m, "\n\t\t"); - if (drrs->refresh_rate_type == DRRS_HIGH_RR) { - seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n"); - vrefresh = panel->fixed_mode->vrefresh; - } else if (drrs->refresh_rate_type == DRRS_LOW_RR) { - seq_puts(m, "DRRS_State: DRRS_LOW_RR\n"); - vrefresh = panel->downclock_mode->vrefresh; - } else { - seq_printf(m, "DRRS_State: Unknown(%d)\n", - drrs->refresh_rate_type); - mutex_unlock(&drrs->mutex); - return; - } - seq_printf(m, "\t\tVrefresh: %d", vrefresh); - - seq_puts(m, "\n\t\t"); - mutex_unlock(&drrs->mutex); - } else { - /* DRRS not supported. Print the VBT parameter*/ - seq_puts(m, "\tDRRS Supported : No"); - } - seq_puts(m, "\n"); -} - -static int i915_drrs_status(struct seq_file *m, void *unused) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct drm_device *dev = &dev_priv->drm; - struct intel_crtc *intel_crtc; - int active_crtc_cnt = 0; - - drm_modeset_lock_all(dev); - for_each_intel_crtc(dev, intel_crtc) { - if (intel_crtc->base.state->active) { - active_crtc_cnt++; - seq_printf(m, "\nCRTC %d: ", active_crtc_cnt); - - drrs_status_per_crtc(m, dev, intel_crtc); - } - } - drm_modeset_unlock_all(dev); - - if (!active_crtc_cnt) - seq_puts(m, "No active crtc found\n"); - - return 0; -} - -static int i915_dp_mst_info(struct seq_file *m, void *unused) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct drm_device *dev = &dev_priv->drm; - struct intel_encoder *intel_encoder; - struct intel_digital_port *intel_dig_port; - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; - - drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) - continue; - - intel_encoder = intel_attached_encoder(to_intel_connector(connector)); - if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST) - continue; - - intel_dig_port = enc_to_dig_port(intel_encoder); - if (!intel_dig_port->dp.can_mst) - continue; - - seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n", - intel_dig_port->base.base.base.id, - intel_dig_port->base.base.name); - drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr); - } - drm_connector_list_iter_end(&conn_iter); - - return 0; -} - -static ssize_t i915_displayport_test_active_write(struct file *file, - const char __user *ubuf, - size_t len, loff_t *offp) -{ - char *input_buffer; - int status = 0; - struct drm_device *dev; - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; - struct intel_dp *intel_dp; - int val = 0; - - dev = ((struct seq_file *)file->private_data)->private; - - if (len == 0) - return 0; - - input_buffer = memdup_user_nul(ubuf, len); - if (IS_ERR(input_buffer)) - return PTR_ERR(input_buffer); - - DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len); - - drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - struct intel_encoder *encoder; - - if (connector->connector_type != - DRM_MODE_CONNECTOR_DisplayPort) - continue; - - encoder = to_intel_encoder(connector->encoder); - if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) - continue; - - if (encoder && connector->status == connector_status_connected) { - intel_dp = enc_to_intel_dp(encoder); - status = kstrtoint(input_buffer, 10, &val); - if (status < 0) - break; - DRM_DEBUG_DRIVER("Got %d for test active\n", val); - /* To prevent erroneous activation of the compliance - * testing code, only accept an actual value of 1 here - */ - if (val == 1) - intel_dp->compliance.test_active = true; - else - intel_dp->compliance.test_active = false; - } - } - drm_connector_list_iter_end(&conn_iter); - kfree(input_buffer); - if (status < 0) - return status; - - *offp += len; - return len; -} - -static int i915_displayport_test_active_show(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = m->private; - struct drm_device *dev = &dev_priv->drm; - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; - struct intel_dp *intel_dp; - - drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - struct intel_encoder *encoder; - - if (connector->connector_type != - DRM_MODE_CONNECTOR_DisplayPort) - continue; - - encoder = to_intel_encoder(connector->encoder); - if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) - continue; - - if (encoder && connector->status == connector_status_connected) { - intel_dp = enc_to_intel_dp(encoder); - if (intel_dp->compliance.test_active) - seq_puts(m, "1"); - else - seq_puts(m, "0"); - } else - seq_puts(m, "0"); - } - drm_connector_list_iter_end(&conn_iter); - - return 0; -} - -static int i915_displayport_test_active_open(struct inode *inode, - struct file *file) -{ - return single_open(file, i915_displayport_test_active_show, - inode->i_private); -} - -static const struct file_operations i915_displayport_test_active_fops = { - .owner = THIS_MODULE, - .open = i915_displayport_test_active_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = i915_displayport_test_active_write -}; - -static int i915_displayport_test_data_show(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = m->private; - struct drm_device *dev = &dev_priv->drm; - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; - struct intel_dp *intel_dp; - - drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - struct intel_encoder *encoder; - - if (connector->connector_type != - DRM_MODE_CONNECTOR_DisplayPort) - continue; - - encoder = to_intel_encoder(connector->encoder); - if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) - continue; - - if (encoder && connector->status == connector_status_connected) { - intel_dp = enc_to_intel_dp(encoder); - if (intel_dp->compliance.test_type == - DP_TEST_LINK_EDID_READ) - seq_printf(m, "%lx", - intel_dp->compliance.test_data.edid); - else if (intel_dp->compliance.test_type == - DP_TEST_LINK_VIDEO_PATTERN) { - seq_printf(m, "hdisplay: %d\n", - intel_dp->compliance.test_data.hdisplay); - seq_printf(m, "vdisplay: %d\n", - intel_dp->compliance.test_data.vdisplay); - seq_printf(m, "bpc: %u\n", - intel_dp->compliance.test_data.bpc); - } - } else - seq_puts(m, "0"); - } - drm_connector_list_iter_end(&conn_iter); - - return 0; -} -DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data); - -static int i915_displayport_test_type_show(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = m->private; - struct drm_device *dev = &dev_priv->drm; - struct drm_connector *connector; - struct drm_connector_list_iter conn_iter; - struct intel_dp *intel_dp; - - drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - struct intel_encoder *encoder; - - if (connector->connector_type != - DRM_MODE_CONNECTOR_DisplayPort) - continue; - - encoder = to_intel_encoder(connector->encoder); - if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) - continue; - - if (encoder && connector->status == connector_status_connected) { - intel_dp = enc_to_intel_dp(encoder); - seq_printf(m, "%02lx", intel_dp->compliance.test_type); - } else - seq_puts(m, "0"); - } - drm_connector_list_iter_end(&conn_iter); - - return 0; -} -DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type); - -static void wm_latency_show(struct seq_file *m, const u16 wm[8]) -{ - struct drm_i915_private *dev_priv = m->private; - struct drm_device *dev = &dev_priv->drm; - int level; - int num_levels; - - if (IS_CHERRYVIEW(dev_priv)) - num_levels = 3; - else if (IS_VALLEYVIEW(dev_priv)) - num_levels = 1; - else if (IS_G4X(dev_priv)) - num_levels = 3; - else - num_levels = ilk_wm_max_level(dev_priv) + 1; - - drm_modeset_lock_all(dev); - - for (level = 0; level < num_levels; level++) { - unsigned int latency = wm[level]; - - /* - * - WM1+ latency values in 0.5us units - * - latencies are in us on gen9/vlv/chv - */ - if (INTEL_GEN(dev_priv) >= 9 || - IS_VALLEYVIEW(dev_priv) || - IS_CHERRYVIEW(dev_priv) || - IS_G4X(dev_priv)) - latency *= 10; - else if (level > 0) - latency *= 5; - - seq_printf(m, "WM%d %u (%u.%u usec)\n", - level, wm[level], latency / 10, latency % 10); - } - - drm_modeset_unlock_all(dev); -} - -static int pri_wm_latency_show(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = m->private; - const u16 *latencies; - - if (INTEL_GEN(dev_priv) >= 9) - latencies = dev_priv->wm.skl_latency; - else - latencies = dev_priv->wm.pri_latency; - - wm_latency_show(m, latencies); - - return 0; -} - -static int spr_wm_latency_show(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = m->private; - const u16 *latencies; - - if (INTEL_GEN(dev_priv) >= 9) - latencies = dev_priv->wm.skl_latency; - else - latencies = dev_priv->wm.spr_latency; - - wm_latency_show(m, latencies); - - return 0; -} - -static int cur_wm_latency_show(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = m->private; - const u16 *latencies; - - if (INTEL_GEN(dev_priv) >= 9) - latencies = dev_priv->wm.skl_latency; - else - latencies = dev_priv->wm.cur_latency; - - wm_latency_show(m, latencies); - - return 0; -} - -static int pri_wm_latency_open(struct inode *inode, struct file *file) -{ - struct drm_i915_private *dev_priv = inode->i_private; - - if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) - return -ENODEV; - - return single_open(file, pri_wm_latency_show, dev_priv); -} - -static int spr_wm_latency_open(struct inode *inode, struct file *file) -{ - struct drm_i915_private *dev_priv = inode->i_private; - - if (HAS_GMCH(dev_priv)) - return -ENODEV; - - return single_open(file, spr_wm_latency_show, dev_priv); -} - -static int cur_wm_latency_open(struct inode *inode, struct file *file) -{ - struct drm_i915_private *dev_priv = inode->i_private; - - if (HAS_GMCH(dev_priv)) - return -ENODEV; - - return single_open(file, cur_wm_latency_show, dev_priv); -} - -static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, - size_t len, loff_t *offp, u16 wm[8]) -{ - struct seq_file *m = file->private_data; - struct drm_i915_private *dev_priv = m->private; - struct drm_device *dev = &dev_priv->drm; - u16 new[8] = { 0 }; - int num_levels; - int level; - int ret; - char tmp[32]; - - if (IS_CHERRYVIEW(dev_priv)) - num_levels = 3; - else if (IS_VALLEYVIEW(dev_priv)) - num_levels = 1; - else if (IS_G4X(dev_priv)) - num_levels = 3; - else - num_levels = ilk_wm_max_level(dev_priv) + 1; - - if (len >= sizeof(tmp)) - return -EINVAL; - - if (copy_from_user(tmp, ubuf, len)) - return -EFAULT; - - tmp[len] = '\0'; - - ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", - &new[0], &new[1], &new[2], &new[3], - &new[4], &new[5], &new[6], &new[7]); - if (ret != num_levels) - return -EINVAL; - - drm_modeset_lock_all(dev); - - for (level = 0; level < num_levels; level++) - wm[level] = new[level]; - - drm_modeset_unlock_all(dev); - - return len; -} - - -static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, - size_t len, loff_t *offp) -{ - struct seq_file *m = file->private_data; - struct drm_i915_private *dev_priv = m->private; - u16 *latencies; - - if (INTEL_GEN(dev_priv) >= 9) - latencies = dev_priv->wm.skl_latency; - else - latencies = dev_priv->wm.pri_latency; - - return wm_latency_write(file, ubuf, len, offp, latencies); -} - -static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, - size_t len, loff_t *offp) -{ - struct seq_file *m = file->private_data; - struct drm_i915_private *dev_priv = m->private; - u16 *latencies; - - if (INTEL_GEN(dev_priv) >= 9) - latencies = dev_priv->wm.skl_latency; - else - latencies = dev_priv->wm.spr_latency; - - return wm_latency_write(file, ubuf, len, offp, latencies); -} - -static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, - size_t len, loff_t *offp) -{ - struct seq_file *m = file->private_data; - struct drm_i915_private *dev_priv = m->private; - u16 *latencies; - - if (INTEL_GEN(dev_priv) >= 9) - latencies = dev_priv->wm.skl_latency; - else - latencies = dev_priv->wm.cur_latency; - - return wm_latency_write(file, ubuf, len, offp, latencies); -} - -static const struct file_operations i915_pri_wm_latency_fops = { - .owner = THIS_MODULE, - .open = pri_wm_latency_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = pri_wm_latency_write -}; - -static const struct file_operations i915_spr_wm_latency_fops = { - .owner = THIS_MODULE, - .open = spr_wm_latency_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = spr_wm_latency_write -}; - -static const struct file_operations i915_cur_wm_latency_fops = { - .owner = THIS_MODULE, - .open = cur_wm_latency_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = cur_wm_latency_write -}; - static int i915_wedged_get(void *data, u64 *val) { @@ -3641,7 +1827,8 @@ i915_cache_sharing_set(void *data, u64 val) if (val > 3) return -EINVAL; - DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); + drm_dbg(&dev_priv->drm, + "Manually setting uncore sharing to %llu\n", val); with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { u32 snpcr; @@ -3947,292 +2134,6 @@ static const struct file_operations i915_forcewake_fops = { .release = i915_forcewake_release, }; -static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = m->private; - struct i915_hotplug *hotplug = &dev_priv->hotplug; - - /* Synchronize with everything first in case there's been an HPD - * storm, but we haven't finished handling it in the kernel yet - */ - intel_synchronize_irq(dev_priv); - flush_work(&dev_priv->hotplug.dig_port_work); - flush_delayed_work(&dev_priv->hotplug.hotplug_work); - - seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); - seq_printf(m, "Detected: %s\n", - yesno(delayed_work_pending(&hotplug->reenable_work))); - - return 0; -} - -static ssize_t i915_hpd_storm_ctl_write(struct file *file, - const char __user *ubuf, size_t len, - loff_t *offp) -{ - struct seq_file *m = file->private_data; - struct drm_i915_private *dev_priv = m->private; - struct i915_hotplug *hotplug = &dev_priv->hotplug; - unsigned int new_threshold; - int i; - char *newline; - char tmp[16]; - - if (len >= sizeof(tmp)) - return -EINVAL; - - if (copy_from_user(tmp, ubuf, len)) - return -EFAULT; - - tmp[len] = '\0'; - - /* Strip newline, if any */ - newline = strchr(tmp, '\n'); - if (newline) - *newline = '\0'; - - if (strcmp(tmp, "reset") == 0) - new_threshold = HPD_STORM_DEFAULT_THRESHOLD; - else if (kstrtouint(tmp, 10, &new_threshold) != 0) - return -EINVAL; - - if (new_threshold > 0) - DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n", - new_threshold); - else - DRM_DEBUG_KMS("Disabling HPD storm detection\n"); - - spin_lock_irq(&dev_priv->irq_lock); - hotplug->hpd_storm_threshold = new_threshold; - /* Reset the HPD storm stats so we don't accidentally trigger a storm */ - for_each_hpd_pin(i) - hotplug->stats[i].count = 0; - spin_unlock_irq(&dev_priv->irq_lock); - - /* Re-enable hpd immediately if we were in an irq storm */ - flush_delayed_work(&dev_priv->hotplug.reenable_work); - - return len; -} - -static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file) -{ - return single_open(file, i915_hpd_storm_ctl_show, inode->i_private); -} - -static const struct file_operations i915_hpd_storm_ctl_fops = { - .owner = THIS_MODULE, - .open = i915_hpd_storm_ctl_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = i915_hpd_storm_ctl_write -}; - -static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = m->private; - - seq_printf(m, "Enabled: %s\n", - yesno(dev_priv->hotplug.hpd_short_storm_enabled)); - - return 0; -} - -static int -i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file) -{ - return single_open(file, i915_hpd_short_storm_ctl_show, - inode->i_private); -} - -static ssize_t i915_hpd_short_storm_ctl_write(struct file *file, - const char __user *ubuf, - size_t len, loff_t *offp) -{ - struct seq_file *m = file->private_data; - struct drm_i915_private *dev_priv = m->private; - struct i915_hotplug *hotplug = &dev_priv->hotplug; - char *newline; - char tmp[16]; - int i; - bool new_state; - - if (len >= sizeof(tmp)) - return -EINVAL; - - if (copy_from_user(tmp, ubuf, len)) - return -EFAULT; - - tmp[len] = '\0'; - - /* Strip newline, if any */ - newline = strchr(tmp, '\n'); - if (newline) - *newline = '\0'; - - /* Reset to the "default" state for this system */ - if (strcmp(tmp, "reset") == 0) - new_state = !HAS_DP_MST(dev_priv); - else if (kstrtobool(tmp, &new_state) != 0) - return -EINVAL; - - DRM_DEBUG_KMS("%sabling HPD short storm detection\n", - new_state ? "En" : "Dis"); - - spin_lock_irq(&dev_priv->irq_lock); - hotplug->hpd_short_storm_enabled = new_state; - /* Reset the HPD storm stats so we don't accidentally trigger a storm */ - for_each_hpd_pin(i) - hotplug->stats[i].count = 0; - spin_unlock_irq(&dev_priv->irq_lock); - - /* Re-enable hpd immediately if we were in an irq storm */ - flush_delayed_work(&dev_priv->hotplug.reenable_work); - - return len; -} - -static const struct file_operations i915_hpd_short_storm_ctl_fops = { - .owner = THIS_MODULE, - .open = i915_hpd_short_storm_ctl_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = i915_hpd_short_storm_ctl_write, -}; - -static int i915_drrs_ctl_set(void *data, u64 val) -{ - struct drm_i915_private *dev_priv = data; - struct drm_device *dev = &dev_priv->drm; - struct intel_crtc *crtc; - - if (INTEL_GEN(dev_priv) < 7) - return -ENODEV; - - for_each_intel_crtc(dev, crtc) { - struct drm_connector_list_iter conn_iter; - struct intel_crtc_state *crtc_state; - struct drm_connector *connector; - struct drm_crtc_commit *commit; - int ret; - - ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex); - if (ret) - return ret; - - crtc_state = to_intel_crtc_state(crtc->base.state); - - if (!crtc_state->hw.active || - !crtc_state->has_drrs) - goto out; - - commit = crtc_state->uapi.commit; - if (commit) { - ret = wait_for_completion_interruptible(&commit->hw_done); - if (ret) - goto out; - } - - drm_connector_list_iter_begin(dev, &conn_iter); - drm_for_each_connector_iter(connector, &conn_iter) { - struct intel_encoder *encoder; - struct intel_dp *intel_dp; - - if (!(crtc_state->uapi.connector_mask & - drm_connector_mask(connector))) - continue; - - encoder = intel_attached_encoder(to_intel_connector(connector)); - if (encoder->type != INTEL_OUTPUT_EDP) - continue; - - DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n", - val ? "en" : "dis", val); - - intel_dp = enc_to_intel_dp(encoder); - if (val) - intel_edp_drrs_enable(intel_dp, - crtc_state); - else - intel_edp_drrs_disable(intel_dp, - crtc_state); - } - drm_connector_list_iter_end(&conn_iter); - -out: - drm_modeset_unlock(&crtc->base.mutex); - if (ret) - return ret; - } - - return 0; -} - -DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n"); - -static ssize_t -i915_fifo_underrun_reset_write(struct file *filp, - const char __user *ubuf, - size_t cnt, loff_t *ppos) -{ - struct drm_i915_private *dev_priv = filp->private_data; - struct intel_crtc *intel_crtc; - struct drm_device *dev = &dev_priv->drm; - int ret; - bool reset; - - ret = kstrtobool_from_user(ubuf, cnt, &reset); - if (ret) - return ret; - - if (!reset) - return cnt; - - for_each_intel_crtc(dev, intel_crtc) { - struct drm_crtc_commit *commit; - struct intel_crtc_state *crtc_state; - - ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex); - if (ret) - return ret; - - crtc_state = to_intel_crtc_state(intel_crtc->base.state); - commit = crtc_state->uapi.commit; - if (commit) { - ret = wait_for_completion_interruptible(&commit->hw_done); - if (!ret) - ret = wait_for_completion_interruptible(&commit->flip_done); - } - - if (!ret && crtc_state->hw.active) { - DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n", - pipe_name(intel_crtc->pipe)); - - intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state); - } - - drm_modeset_unlock(&intel_crtc->base.mutex); - - if (ret) - return ret; - } - - ret = intel_fbc_reset_underrun(dev_priv); - if (ret) - return ret; - - return cnt; -} - -static const struct file_operations i915_fifo_underrun_reset_ops = { - .owner = THIS_MODULE, - .open = simple_open, - .write = i915_fifo_underrun_reset_write, - .llseek = default_llseek, -}; - static const struct drm_info_list i915_debugfs_list[] = { {"i915_capabilities", i915_capabilities, 0}, {"i915_gem_objects", i915_gem_object_info, 0}, @@ -4245,34 +2146,16 @@ static const struct drm_info_list i915_debugfs_list[] = { {"i915_guc_stage_pool", i915_guc_stage_pool, 0}, {"i915_huc_load_status", i915_huc_load_status_info, 0}, {"i915_frequency_info", i915_frequency_info, 0}, - {"i915_drpc_info", i915_drpc_info, 0}, {"i915_ring_freq_table", i915_ring_freq_table, 0}, - {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, - {"i915_fbc_status", i915_fbc_status, 0}, - {"i915_ips_status", i915_ips_status, 0}, - {"i915_sr_status", i915_sr_status, 0}, - {"i915_opregion", i915_opregion, 0}, - {"i915_vbt", i915_vbt, 0}, - {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, {"i915_context_status", i915_context_status, 0}, - {"i915_forcewake_domains", i915_forcewake_domains, 0}, {"i915_swizzle_info", i915_swizzle_info, 0}, {"i915_llc", i915_llc, 0}, - {"i915_edp_psr_status", i915_edp_psr_status, 0}, - {"i915_energy_uJ", i915_energy_uJ, 0}, {"i915_runtime_pm_status", i915_runtime_pm_status, 0}, - {"i915_power_domain_info", i915_power_domain_info, 0}, - {"i915_dmc_info", i915_dmc_info, 0}, - {"i915_display_info", i915_display_info, 0}, {"i915_engine_info", i915_engine_info, 0}, {"i915_rcs_topology", i915_rcs_topology, 0}, {"i915_shrinker_info", i915_shrinker_info, 0}, - {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, - {"i915_dp_mst_info", i915_dp_mst_info, 0}, {"i915_wa_registers", i915_wa_registers, 0}, - {"i915_ddb_info", i915_ddb_info, 0}, {"i915_sseu_status", i915_sseu_status, 0}, - {"i915_drrs_status", i915_drrs_status, 0}, {"i915_rps_boost_info", i915_rps_boost_info, 0}, }; #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) @@ -4289,21 +2172,8 @@ static const struct i915_debugfs_files { {"i915_error_state", &i915_error_state_fops}, {"i915_gpu_info", &i915_gpu_info_fops}, #endif - {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops}, - {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, - {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, - {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, - {"i915_fbc_false_color", &i915_fbc_false_color_fops}, - {"i915_dp_test_data", &i915_displayport_test_data_fops}, - {"i915_dp_test_type", &i915_displayport_test_type_fops}, - {"i915_dp_test_active", &i915_displayport_test_active_fops}, {"i915_guc_log_level", &i915_guc_log_level_fops}, {"i915_guc_log_relay", &i915_guc_log_relay_fops}, - {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}, - {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops}, - {"i915_ipc_status", &i915_ipc_status_fops}, - {"i915_drrs_ctl", &i915_drrs_ctl_fops}, - {"i915_edp_psr_debug", &i915_edp_psr_debug_fops} }; int i915_debugfs_register(struct drm_i915_private *dev_priv) @@ -4311,9 +2181,10 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv) struct drm_minor *minor = dev_priv->drm.primary; int i; + i915_debugfs_params(dev_priv); + debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root, to_i915(minor->dev), &i915_forcewake_fops); - for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { debugfs_create_file(i915_debugfs_files[i].name, S_IRUGO | S_IWUSR, @@ -4326,254 +2197,3 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv) I915_DEBUGFS_ENTRIES, minor->debugfs_root, minor); } - -struct dpcd_block { - /* DPCD dump start address. */ - unsigned int offset; - /* DPCD dump end address, inclusive. If unset, .size will be used. */ - unsigned int end; - /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */ - size_t size; - /* Only valid for eDP. */ - bool edp; -}; - -static const struct dpcd_block i915_dpcd_debug[] = { - { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE }, - { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS }, - { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 }, - { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET }, - { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 }, - { .offset = DP_SET_POWER }, - { .offset = DP_EDP_DPCD_REV }, - { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 }, - { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB }, - { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET }, -}; - -static int i915_dpcd_show(struct seq_file *m, void *data) -{ - struct drm_connector *connector = m->private; - struct intel_dp *intel_dp = - enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector))); - u8 buf[16]; - ssize_t err; - int i; - - if (connector->status != connector_status_connected) - return -ENODEV; - - for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) { - const struct dpcd_block *b = &i915_dpcd_debug[i]; - size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1); - - if (b->edp && - connector->connector_type != DRM_MODE_CONNECTOR_eDP) - continue; - - /* low tech for now */ - if (WARN_ON(size > sizeof(buf))) - continue; - - err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size); - if (err < 0) - seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err); - else - seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf); - } - - return 0; -} -DEFINE_SHOW_ATTRIBUTE(i915_dpcd); - -static int i915_panel_show(struct seq_file *m, void *data) -{ - struct drm_connector *connector = m->private; - struct intel_dp *intel_dp = - enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector))); - - if (connector->status != connector_status_connected) - return -ENODEV; - - seq_printf(m, "Panel power up delay: %d\n", - intel_dp->panel_power_up_delay); - seq_printf(m, "Panel power down delay: %d\n", - intel_dp->panel_power_down_delay); - seq_printf(m, "Backlight on delay: %d\n", - intel_dp->backlight_on_delay); - seq_printf(m, "Backlight off delay: %d\n", - intel_dp->backlight_off_delay); - - return 0; -} -DEFINE_SHOW_ATTRIBUTE(i915_panel); - -static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data) -{ - struct drm_connector *connector = m->private; - struct intel_connector *intel_connector = to_intel_connector(connector); - - if (connector->status != connector_status_connected) - return -ENODEV; - - /* HDCP is supported by connector */ - if (!intel_connector->hdcp.shim) - return -EINVAL; - - seq_printf(m, "%s:%d HDCP version: ", connector->name, - connector->base.id); - intel_hdcp_info(m, intel_connector); - - return 0; -} -DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability); - -static int i915_dsc_fec_support_show(struct seq_file *m, void *data) -{ - struct drm_connector *connector = m->private; - struct drm_device *dev = connector->dev; - struct drm_crtc *crtc; - struct intel_dp *intel_dp; - struct drm_modeset_acquire_ctx ctx; - struct intel_crtc_state *crtc_state = NULL; - int ret = 0; - bool try_again = false; - - drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); - - do { - try_again = false; - ret = drm_modeset_lock(&dev->mode_config.connection_mutex, - &ctx); - if (ret) { - if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) { - try_again = true; - continue; - } - break; - } - crtc = connector->state->crtc; - if (connector->status != connector_status_connected || !crtc) { - ret = -ENODEV; - break; - } - ret = drm_modeset_lock(&crtc->mutex, &ctx); - if (ret == -EDEADLK) { - ret = drm_modeset_backoff(&ctx); - if (!ret) { - try_again = true; - continue; - } - break; - } else if (ret) { - break; - } - intel_dp = enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector))); - crtc_state = to_intel_crtc_state(crtc->state); - seq_printf(m, "DSC_Enabled: %s\n", - yesno(crtc_state->dsc.compression_enable)); - seq_printf(m, "DSC_Sink_Support: %s\n", - yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd))); - seq_printf(m, "Force_DSC_Enable: %s\n", - yesno(intel_dp->force_dsc_en)); - if (!intel_dp_is_edp(intel_dp)) - seq_printf(m, "FEC_Sink_Support: %s\n", - yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable))); - } while (try_again); - - drm_modeset_drop_locks(&ctx); - drm_modeset_acquire_fini(&ctx); - - return ret; -} - -static ssize_t i915_dsc_fec_support_write(struct file *file, - const char __user *ubuf, - size_t len, loff_t *offp) -{ - bool dsc_enable = false; - int ret; - struct drm_connector *connector = - ((struct seq_file *)file->private_data)->private; - struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - - if (len == 0) - return 0; - - DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n", - len); - - ret = kstrtobool_from_user(ubuf, len, &dsc_enable); - if (ret < 0) - return ret; - - DRM_DEBUG_DRIVER("Got %s for DSC Enable\n", - (dsc_enable) ? "true" : "false"); - intel_dp->force_dsc_en = dsc_enable; - - *offp += len; - return len; -} - -static int i915_dsc_fec_support_open(struct inode *inode, - struct file *file) -{ - return single_open(file, i915_dsc_fec_support_show, - inode->i_private); -} - -static const struct file_operations i915_dsc_fec_support_fops = { - .owner = THIS_MODULE, - .open = i915_dsc_fec_support_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = i915_dsc_fec_support_write -}; - -/** - * i915_debugfs_connector_add - add i915 specific connector debugfs files - * @connector: pointer to a registered drm_connector - * - * Cleanup will be done by drm_connector_unregister() through a call to - * drm_debugfs_connector_remove(). - * - * Returns 0 on success, negative error codes on error. - */ -int i915_debugfs_connector_add(struct drm_connector *connector) -{ - struct dentry *root = connector->debugfs_entry; - struct drm_i915_private *dev_priv = to_i915(connector->dev); - - /* The connector must have been registered beforehands. */ - if (!root) - return -ENODEV; - - if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || - connector->connector_type == DRM_MODE_CONNECTOR_eDP) - debugfs_create_file("i915_dpcd", S_IRUGO, root, - connector, &i915_dpcd_fops); - - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { - debugfs_create_file("i915_panel_timings", S_IRUGO, root, - connector, &i915_panel_fops); - debugfs_create_file("i915_psr_sink_status", S_IRUGO, root, - connector, &i915_psr_sink_status_fops); - } - - if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || - connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || - connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) { - debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root, - connector, &i915_hdcp_sink_capability_fops); - } - - if (INTEL_GEN(dev_priv) >= 10 && - (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || - connector->connector_type == DRM_MODE_CONNECTOR_eDP)) - debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root, - connector, &i915_dsc_fec_support_fops); - - return 0; -} diff --git a/drivers/gpu/drm/i915/i915_debugfs.h b/drivers/gpu/drm/i915/i915_debugfs.h index c0cd22eb916d..6da39c76ab5e 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.h +++ b/drivers/gpu/drm/i915/i915_debugfs.h @@ -6,15 +6,17 @@ #ifndef __I915_DEBUGFS_H__ #define __I915_DEBUGFS_H__ -struct drm_i915_private; struct drm_connector; +struct drm_i915_gem_object; +struct drm_i915_private; +struct seq_file; #ifdef CONFIG_DEBUG_FS int i915_debugfs_register(struct drm_i915_private *dev_priv); -int i915_debugfs_connector_add(struct drm_connector *connector); +void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj); #else static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) { return 0; } -static inline int i915_debugfs_connector_add(struct drm_connector *connector) { return 0; } +static inline void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) {} #endif #endif /* __I915_DEBUGFS_H__ */ diff --git a/drivers/gpu/drm/i915/i915_debugfs_params.c b/drivers/gpu/drm/i915/i915_debugfs_params.c new file mode 100644 index 000000000000..62b2c5f0495d --- /dev/null +++ b/drivers/gpu/drm/i915/i915_debugfs_params.c @@ -0,0 +1,250 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + */ + +#include <linux/kernel.h> + +#include "i915_debugfs_params.h" +#include "i915_drv.h" +#include "i915_params.h" + +/* int param */ +static int i915_param_int_show(struct seq_file *m, void *data) +{ + int *value = m->private; + + seq_printf(m, "%d\n", *value); + + return 0; +} + +static int i915_param_int_open(struct inode *inode, struct file *file) +{ + return single_open(file, i915_param_int_show, inode->i_private); +} + +static ssize_t i915_param_int_write(struct file *file, + const char __user *ubuf, size_t len, + loff_t *offp) +{ + struct seq_file *m = file->private_data; + int *value = m->private; + int ret; + + ret = kstrtoint_from_user(ubuf, len, 0, value); + if (ret) { + /* support boolean values too */ + bool b; + + ret = kstrtobool_from_user(ubuf, len, &b); + if (!ret) + *value = b; + } + + return ret ?: len; +} + +static const struct file_operations i915_param_int_fops = { + .owner = THIS_MODULE, + .open = i915_param_int_open, + .read = seq_read, + .write = i915_param_int_write, + .llseek = default_llseek, + .release = single_release, +}; + +static const struct file_operations i915_param_int_fops_ro = { + .owner = THIS_MODULE, + .open = i915_param_int_open, + .read = seq_read, + .llseek = default_llseek, + .release = single_release, +}; + +/* unsigned int param */ +static int i915_param_uint_show(struct seq_file *m, void *data) +{ + unsigned int *value = m->private; + + seq_printf(m, "%u\n", *value); + + return 0; +} + +static int i915_param_uint_open(struct inode *inode, struct file *file) +{ + return single_open(file, i915_param_uint_show, inode->i_private); +} + +static ssize_t i915_param_uint_write(struct file *file, + const char __user *ubuf, size_t len, + loff_t *offp) +{ + struct seq_file *m = file->private_data; + unsigned int *value = m->private; + int ret; + + ret = kstrtouint_from_user(ubuf, len, 0, value); + if (ret) { + /* support boolean values too */ + bool b; + + ret = kstrtobool_from_user(ubuf, len, &b); + if (!ret) + *value = b; + } + + return ret ?: len; +} + +static const struct file_operations i915_param_uint_fops = { + .owner = THIS_MODULE, + .open = i915_param_uint_open, + .read = seq_read, + .write = i915_param_uint_write, + .llseek = default_llseek, + .release = single_release, +}; + +static const struct file_operations i915_param_uint_fops_ro = { + .owner = THIS_MODULE, + .open = i915_param_uint_open, + .read = seq_read, + .llseek = default_llseek, + .release = single_release, +}; + +/* char * param */ +static int i915_param_charp_show(struct seq_file *m, void *data) +{ + const char **s = m->private; + + seq_printf(m, "%s\n", *s); + + return 0; +} + +static int i915_param_charp_open(struct inode *inode, struct file *file) +{ + return single_open(file, i915_param_charp_show, inode->i_private); +} + +static ssize_t i915_param_charp_write(struct file *file, + const char __user *ubuf, size_t len, + loff_t *offp) +{ + struct seq_file *m = file->private_data; + char **s = m->private; + char *new, *old; + + /* FIXME: remove locking after params aren't the module params */ + kernel_param_lock(THIS_MODULE); + + old = *s; + new = strndup_user(ubuf, PAGE_SIZE); + if (IS_ERR(new)) { + len = PTR_ERR(new); + goto out; + } + + *s = new; + + kfree(old); +out: + kernel_param_unlock(THIS_MODULE); + + return len; +} + +static const struct file_operations i915_param_charp_fops = { + .owner = THIS_MODULE, + .open = i915_param_charp_open, + .read = seq_read, + .write = i915_param_charp_write, + .llseek = default_llseek, + .release = single_release, +}; + +static const struct file_operations i915_param_charp_fops_ro = { + .owner = THIS_MODULE, + .open = i915_param_charp_open, + .read = seq_read, + .llseek = default_llseek, + .release = single_release, +}; + +#define RO(mode) (((mode) & 0222) == 0) + +static struct dentry * +i915_debugfs_create_int(const char *name, umode_t mode, + struct dentry *parent, int *value) +{ + return debugfs_create_file_unsafe(name, mode, parent, value, + RO(mode) ? &i915_param_int_fops_ro : + &i915_param_int_fops); +} + +static struct dentry * +i915_debugfs_create_uint(const char *name, umode_t mode, + struct dentry *parent, unsigned int *value) +{ + return debugfs_create_file_unsafe(name, mode, parent, value, + RO(mode) ? &i915_param_uint_fops_ro : + &i915_param_uint_fops); +} + +static struct dentry * +i915_debugfs_create_charp(const char *name, umode_t mode, + struct dentry *parent, char **value) +{ + return debugfs_create_file(name, mode, parent, value, + RO(mode) ? &i915_param_charp_fops_ro : + &i915_param_charp_fops); +} + +static __always_inline void +_i915_param_create_file(struct dentry *parent, const char *name, + const char *type, int mode, void *value) +{ + if (!mode) + return; + + if (!__builtin_strcmp(type, "bool")) + debugfs_create_bool(name, mode, parent, value); + else if (!__builtin_strcmp(type, "int")) + i915_debugfs_create_int(name, mode, parent, value); + else if (!__builtin_strcmp(type, "unsigned int")) + i915_debugfs_create_uint(name, mode, parent, value); + else if (!__builtin_strcmp(type, "unsigned long")) + debugfs_create_ulong(name, mode, parent, value); + else if (!__builtin_strcmp(type, "char *")) + i915_debugfs_create_charp(name, mode, parent, value); + else + WARN(1, "no debugfs fops defined for param type %s (i915.%s)\n", + type, name); +} + +/* add a subdirectory with files for each i915 param */ +struct dentry *i915_debugfs_params(struct drm_i915_private *i915) +{ + struct drm_minor *minor = i915->drm.primary; + struct i915_params *params = &i915_modparams; + struct dentry *dir; + + dir = debugfs_create_dir("i915_params", minor->debugfs_root); + if (IS_ERR(dir)) + return dir; + + /* + * Note: We could create files for params needing special handling + * here. Set mode in params to 0 to skip the generic create file, or + * just let the generic create file fail silently with -EEXIST. + */ + +#define REGISTER(T, x, unused, mode, ...) _i915_param_create_file(dir, #x, #T, mode, ¶ms->x); + I915_PARAMS_FOR_EACH(REGISTER); +#undef REGISTER + + return dir; +} diff --git a/drivers/gpu/drm/i915/i915_debugfs_params.h b/drivers/gpu/drm/i915/i915_debugfs_params.h new file mode 100644 index 000000000000..66567076546b --- /dev/null +++ b/drivers/gpu/drm/i915/i915_debugfs_params.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __I915_DEBUGFS_PARAMS__ +#define __I915_DEBUGFS_PARAMS__ + +struct dentry; +struct drm_i915_private; + +struct dentry *i915_debugfs_params(struct drm_i915_private *i915); + +#endif /* __I915_DEBUGFS_PARAMS__ */ diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 8410330ce4f0..81a4621853db 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -44,12 +44,13 @@ #include <drm/drm_ioctl.h> #include <drm/drm_irq.h> #include <drm/drm_probe_helper.h> -#include <drm/i915_drm.h> #include "display/intel_acpi.h" #include "display/intel_audio.h" #include "display/intel_bw.h" #include "display/intel_cdclk.h" +#include "display/intel_csr.h" +#include "display/intel_display_debugfs.h" #include "display/intel_display_types.h" #include "display/intel_dp.h" #include "display/intel_fbdev.h" @@ -69,6 +70,7 @@ #include "i915_debugfs.h" #include "i915_drv.h" +#include "i915_ioc32.h" #include "i915_irq.h" #include "i915_memcpy.h" #include "i915_perf.h" @@ -78,74 +80,14 @@ #include "i915_sysfs.h" #include "i915_trace.h" #include "i915_vgpu.h" -#include "intel_csr.h" +#include "intel_dram.h" +#include "intel_gvt.h" #include "intel_memory_region.h" #include "intel_pm.h" +#include "vlv_suspend.h" static struct drm_driver driver; -struct vlv_s0ix_state { - /* GAM */ - u32 wr_watermark; - u32 gfx_prio_ctrl; - u32 arb_mode; - u32 gfx_pend_tlb0; - u32 gfx_pend_tlb1; - u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM]; - u32 media_max_req_count; - u32 gfx_max_req_count; - u32 render_hwsp; - u32 ecochk; - u32 bsd_hwsp; - u32 blt_hwsp; - u32 tlb_rd_addr; - - /* MBC */ - u32 g3dctl; - u32 gsckgctl; - u32 mbctl; - - /* GCP */ - u32 ucgctl1; - u32 ucgctl3; - u32 rcgctl1; - u32 rcgctl2; - u32 rstctl; - u32 misccpctl; - - /* GPM */ - u32 gfxpause; - u32 rpdeuhwtc; - u32 rpdeuc; - u32 ecobus; - u32 pwrdwnupctl; - u32 rp_down_timeout; - u32 rp_deucsw; - u32 rcubmabdtmr; - u32 rcedata; - u32 spare2gh; - - /* Display 1 CZ domain */ - u32 gt_imr; - u32 gt_ier; - u32 pm_imr; - u32 pm_ier; - u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM]; - - /* GT SA CZ domain */ - u32 tilectl; - u32 gt_fifoctl; - u32 gtlc_wake_ctrl; - u32 gtlc_survive; - u32 pmwgicz; - - /* Display 2 CZ domain */ - u32 gu_ctl0; - u32 gu_ctl1; - u32 pcbr; - u32 clock_gate_dis2; -}; - static int i915_get_bridge_dev(struct drm_i915_private *dev_priv) { int domain = pci_domain_nr(dev_priv->drm.pdev->bus); @@ -153,7 +95,7 @@ static int i915_get_bridge_dev(struct drm_i915_private *dev_priv) dev_priv->bridge_dev = pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0)); if (!dev_priv->bridge_dev) { - DRM_ERROR("bridge device not found\n"); + drm_err(&dev_priv->drm, "bridge device not found\n"); return -1; } return 0; @@ -190,7 +132,7 @@ intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv) 0, pcibios_align_resource, dev_priv->bridge_dev); if (ret) { - DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); + drm_dbg(&dev_priv->drm, "failed bus alloc: %d\n", ret); dev_priv->mch_res.start = 0; return ret; } @@ -273,7 +215,8 @@ intel_teardown_mchbar(struct drm_i915_private *dev_priv) release_resource(&dev_priv->mch_res); } -static int i915_driver_modeset_probe(struct drm_i915_private *i915) +/* part #1: call before irq install */ +static int i915_driver_modeset_probe_noirq(struct drm_i915_private *i915) { int ret; @@ -293,25 +236,32 @@ static int i915_driver_modeset_probe(struct drm_i915_private *i915) if (ret) goto out; - intel_register_dsm_handler(); + intel_power_domains_init_hw(i915, false); + + intel_csr_ucode_init(i915); - ret = i915_switcheroo_register(i915); + ret = intel_modeset_init_noirq(i915); if (ret) goto cleanup_vga_client; - intel_power_domains_init_hw(i915, false); + return 0; - intel_csr_ucode_init(i915); +cleanup_vga_client: + intel_vga_unregister(i915); +out: + return ret; +} - ret = intel_irq_install(i915); - if (ret) - goto cleanup_csr; +/* part #2: call after irq install */ +static int i915_driver_modeset_probe(struct drm_i915_private *i915) +{ + int ret; /* Important: The output setup functions called by modeset_init need * working irqs for e.g. gmbus and dp aux transfers. */ ret = intel_modeset_init(i915); if (ret) - goto cleanup_irq; + goto out; ret = i915_gem_init(i915); if (ret) @@ -340,29 +290,27 @@ cleanup_gem: i915_gem_driver_remove(i915); i915_gem_driver_release(i915); cleanup_modeset: + /* FIXME */ intel_modeset_driver_remove(i915); -cleanup_irq: intel_irq_uninstall(i915); -cleanup_csr: - intel_csr_ucode_fini(i915); - intel_power_domains_driver_remove(i915); - i915_switcheroo_unregister(i915); -cleanup_vga_client: - intel_vga_unregister(i915); + intel_modeset_driver_remove_noirq(i915); out: return ret; } +/* part #1: call before irq uninstall */ static void i915_driver_modeset_remove(struct drm_i915_private *i915) { intel_modeset_driver_remove(i915); +} - intel_irq_uninstall(i915); +/* part #2: call after irq uninstall */ +static void i915_driver_modeset_remove_noirq(struct drm_i915_private *i915) +{ + intel_modeset_driver_remove_noirq(i915); intel_bios_driver_remove(i915); - i915_switcheroo_unregister(i915); - intel_vga_unregister(i915); intel_csr_ucode_fini(i915); @@ -412,7 +360,7 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv) out_free_wq: destroy_workqueue(dev_priv->wq); out_err: - DRM_ERROR("Failed to allocate workqueues.\n"); + drm_err(&dev_priv->drm, "Failed to allocate workqueues.\n"); return -ENOMEM; } @@ -441,37 +389,15 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0); pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST); pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0); + pre |= IS_GLK_REVID(dev_priv, 0, GLK_REVID_A2); if (pre) { - DRM_ERROR("This is a pre-production stepping. " + drm_err(&dev_priv->drm, "This is a pre-production stepping. " "It may not be fully functional.\n"); add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK); } } -static int vlv_alloc_s0ix_state(struct drm_i915_private *i915) -{ - if (!IS_VALLEYVIEW(i915)) - return 0; - - /* we write all the values in the struct, so no need to zero it out */ - i915->vlv_s0ix_state = kmalloc(sizeof(*i915->vlv_s0ix_state), - GFP_KERNEL); - if (!i915->vlv_s0ix_state) - return -ENOMEM; - - return 0; -} - -static void vlv_free_s0ix_state(struct drm_i915_private *i915) -{ - if (!i915->vlv_s0ix_state) - return; - - kfree(i915->vlv_s0ix_state); - i915->vlv_s0ix_state = NULL; -} - static void sanitize_gpu(struct drm_i915_private *i915) { if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) @@ -505,8 +431,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) mutex_init(&dev_priv->backlight_lock); mutex_init(&dev_priv->sb_lock); - pm_qos_add_request(&dev_priv->sb_qos, - PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); + cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE); mutex_init(&dev_priv->av_mutex); mutex_init(&dev_priv->wm.wm_mutex); @@ -520,7 +445,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) if (ret < 0) return ret; - ret = vlv_alloc_s0ix_state(dev_priv); + ret = vlv_suspend_init(dev_priv); if (ret < 0) goto err_workqueues; @@ -542,7 +467,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) intel_init_display_hooks(dev_priv); intel_init_clock_gating_hooks(dev_priv); intel_init_audio_hooks(dev_priv); - intel_display_crc_init(dev_priv); intel_detect_preproduction_hw(dev_priv); @@ -551,7 +475,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) err_gem: i915_gem_cleanup_early(dev_priv); intel_gt_driver_late_release(&dev_priv->gt); - vlv_free_s0ix_state(dev_priv); + vlv_suspend_cleanup(dev_priv); err_workqueues: i915_workqueues_cleanup(dev_priv); return ret; @@ -568,10 +492,10 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv) intel_power_domains_cleanup(dev_priv); i915_gem_cleanup_early(dev_priv); intel_gt_driver_late_release(&dev_priv->gt); - vlv_free_s0ix_state(dev_priv); + vlv_suspend_cleanup(dev_priv); i915_workqueues_cleanup(dev_priv); - pm_qos_remove_request(&dev_priv->sb_qos); + cpu_latency_qos_remove_request(&dev_priv->sb_qos); mutex_destroy(&dev_priv->sb_lock); } @@ -641,487 +565,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv) intel_gvt_sanitize_options(dev_priv); } -#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type - -static const char *intel_dram_type_str(enum intel_dram_type type) -{ - static const char * const str[] = { - DRAM_TYPE_STR(UNKNOWN), - DRAM_TYPE_STR(DDR3), - DRAM_TYPE_STR(DDR4), - DRAM_TYPE_STR(LPDDR3), - DRAM_TYPE_STR(LPDDR4), - }; - - if (type >= ARRAY_SIZE(str)) - type = INTEL_DRAM_UNKNOWN; - - return str[type]; -} - -#undef DRAM_TYPE_STR - -static int intel_dimm_num_devices(const struct dram_dimm_info *dimm) -{ - return dimm->ranks * 64 / (dimm->width ?: 1); -} - -/* Returns total GB for the whole DIMM */ -static int skl_get_dimm_size(u16 val) -{ - return val & SKL_DRAM_SIZE_MASK; -} - -static int skl_get_dimm_width(u16 val) -{ - if (skl_get_dimm_size(val) == 0) - return 0; - - switch (val & SKL_DRAM_WIDTH_MASK) { - case SKL_DRAM_WIDTH_X8: - case SKL_DRAM_WIDTH_X16: - case SKL_DRAM_WIDTH_X32: - val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT; - return 8 << val; - default: - MISSING_CASE(val); - return 0; - } -} - -static int skl_get_dimm_ranks(u16 val) -{ - if (skl_get_dimm_size(val) == 0) - return 0; - - val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT; - - return val + 1; -} - -/* Returns total GB for the whole DIMM */ -static int cnl_get_dimm_size(u16 val) -{ - return (val & CNL_DRAM_SIZE_MASK) / 2; -} - -static int cnl_get_dimm_width(u16 val) -{ - if (cnl_get_dimm_size(val) == 0) - return 0; - - switch (val & CNL_DRAM_WIDTH_MASK) { - case CNL_DRAM_WIDTH_X8: - case CNL_DRAM_WIDTH_X16: - case CNL_DRAM_WIDTH_X32: - val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT; - return 8 << val; - default: - MISSING_CASE(val); - return 0; - } -} - -static int cnl_get_dimm_ranks(u16 val) -{ - if (cnl_get_dimm_size(val) == 0) - return 0; - - val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT; - - return val + 1; -} - -static bool -skl_is_16gb_dimm(const struct dram_dimm_info *dimm) -{ - /* Convert total GB to Gb per DRAM device */ - return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16; -} - -static void -skl_dram_get_dimm_info(struct drm_i915_private *dev_priv, - struct dram_dimm_info *dimm, - int channel, char dimm_name, u16 val) -{ - if (INTEL_GEN(dev_priv) >= 10) { - dimm->size = cnl_get_dimm_size(val); - dimm->width = cnl_get_dimm_width(val); - dimm->ranks = cnl_get_dimm_ranks(val); - } else { - dimm->size = skl_get_dimm_size(val); - dimm->width = skl_get_dimm_width(val); - dimm->ranks = skl_get_dimm_ranks(val); - } - - DRM_DEBUG_KMS("CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n", - channel, dimm_name, dimm->size, dimm->width, dimm->ranks, - yesno(skl_is_16gb_dimm(dimm))); -} - -static int -skl_dram_get_channel_info(struct drm_i915_private *dev_priv, - struct dram_channel_info *ch, - int channel, u32 val) -{ - skl_dram_get_dimm_info(dev_priv, &ch->dimm_l, - channel, 'L', val & 0xffff); - skl_dram_get_dimm_info(dev_priv, &ch->dimm_s, - channel, 'S', val >> 16); - - if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) { - DRM_DEBUG_KMS("CH%u not populated\n", channel); - return -EINVAL; - } - - if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2) - ch->ranks = 2; - else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1) - ch->ranks = 2; - else - ch->ranks = 1; - - ch->is_16gb_dimm = - skl_is_16gb_dimm(&ch->dimm_l) || - skl_is_16gb_dimm(&ch->dimm_s); - - DRM_DEBUG_KMS("CH%u ranks: %u, 16Gb DIMMs: %s\n", - channel, ch->ranks, yesno(ch->is_16gb_dimm)); - - return 0; -} - -static bool -intel_is_dram_symmetric(const struct dram_channel_info *ch0, - const struct dram_channel_info *ch1) -{ - return !memcmp(ch0, ch1, sizeof(*ch0)) && - (ch0->dimm_s.size == 0 || - !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l))); -} - -static int -skl_dram_get_channels_info(struct drm_i915_private *dev_priv) -{ - struct dram_info *dram_info = &dev_priv->dram_info; - struct dram_channel_info ch0 = {}, ch1 = {}; - u32 val; - int ret; - - val = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN); - ret = skl_dram_get_channel_info(dev_priv, &ch0, 0, val); - if (ret == 0) - dram_info->num_channels++; - - val = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN); - ret = skl_dram_get_channel_info(dev_priv, &ch1, 1, val); - if (ret == 0) - dram_info->num_channels++; - - if (dram_info->num_channels == 0) { - DRM_INFO("Number of memory channels is zero\n"); - return -EINVAL; - } - - /* - * If any of the channel is single rank channel, worst case output - * will be same as if single rank memory, so consider single rank - * memory. - */ - if (ch0.ranks == 1 || ch1.ranks == 1) - dram_info->ranks = 1; - else - dram_info->ranks = max(ch0.ranks, ch1.ranks); - - if (dram_info->ranks == 0) { - DRM_INFO("couldn't get memory rank information\n"); - return -EINVAL; - } - - dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm; - - dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1); - - DRM_DEBUG_KMS("Memory configuration is symmetric? %s\n", - yesno(dram_info->symmetric_memory)); - return 0; -} - -static enum intel_dram_type -skl_get_dram_type(struct drm_i915_private *dev_priv) -{ - u32 val; - - val = I915_READ(SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN); - - switch (val & SKL_DRAM_DDR_TYPE_MASK) { - case SKL_DRAM_DDR_TYPE_DDR3: - return INTEL_DRAM_DDR3; - case SKL_DRAM_DDR_TYPE_DDR4: - return INTEL_DRAM_DDR4; - case SKL_DRAM_DDR_TYPE_LPDDR3: - return INTEL_DRAM_LPDDR3; - case SKL_DRAM_DDR_TYPE_LPDDR4: - return INTEL_DRAM_LPDDR4; - default: - MISSING_CASE(val); - return INTEL_DRAM_UNKNOWN; - } -} - -static int -skl_get_dram_info(struct drm_i915_private *dev_priv) -{ - struct dram_info *dram_info = &dev_priv->dram_info; - u32 mem_freq_khz, val; - int ret; - - dram_info->type = skl_get_dram_type(dev_priv); - DRM_DEBUG_KMS("DRAM type: %s\n", intel_dram_type_str(dram_info->type)); - - ret = skl_dram_get_channels_info(dev_priv); - if (ret) - return ret; - - val = I915_READ(SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU); - mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) * - SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000); - - dram_info->bandwidth_kbps = dram_info->num_channels * - mem_freq_khz * 8; - - if (dram_info->bandwidth_kbps == 0) { - DRM_INFO("Couldn't get system memory bandwidth\n"); - return -EINVAL; - } - - dram_info->valid = true; - return 0; -} - -/* Returns Gb per DRAM device */ -static int bxt_get_dimm_size(u32 val) -{ - switch (val & BXT_DRAM_SIZE_MASK) { - case BXT_DRAM_SIZE_4GBIT: - return 4; - case BXT_DRAM_SIZE_6GBIT: - return 6; - case BXT_DRAM_SIZE_8GBIT: - return 8; - case BXT_DRAM_SIZE_12GBIT: - return 12; - case BXT_DRAM_SIZE_16GBIT: - return 16; - default: - MISSING_CASE(val); - return 0; - } -} - -static int bxt_get_dimm_width(u32 val) -{ - if (!bxt_get_dimm_size(val)) - return 0; - - val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT; - - return 8 << val; -} - -static int bxt_get_dimm_ranks(u32 val) -{ - if (!bxt_get_dimm_size(val)) - return 0; - - switch (val & BXT_DRAM_RANK_MASK) { - case BXT_DRAM_RANK_SINGLE: - return 1; - case BXT_DRAM_RANK_DUAL: - return 2; - default: - MISSING_CASE(val); - return 0; - } -} - -static enum intel_dram_type bxt_get_dimm_type(u32 val) -{ - if (!bxt_get_dimm_size(val)) - return INTEL_DRAM_UNKNOWN; - - switch (val & BXT_DRAM_TYPE_MASK) { - case BXT_DRAM_TYPE_DDR3: - return INTEL_DRAM_DDR3; - case BXT_DRAM_TYPE_LPDDR3: - return INTEL_DRAM_LPDDR3; - case BXT_DRAM_TYPE_DDR4: - return INTEL_DRAM_DDR4; - case BXT_DRAM_TYPE_LPDDR4: - return INTEL_DRAM_LPDDR4; - default: - MISSING_CASE(val); - return INTEL_DRAM_UNKNOWN; - } -} - -static void bxt_get_dimm_info(struct dram_dimm_info *dimm, - u32 val) -{ - dimm->width = bxt_get_dimm_width(val); - dimm->ranks = bxt_get_dimm_ranks(val); - - /* - * Size in register is Gb per DRAM device. Convert to total - * GB to match the way we report this for non-LP platforms. - */ - dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8; -} - -static int -bxt_get_dram_info(struct drm_i915_private *dev_priv) -{ - struct dram_info *dram_info = &dev_priv->dram_info; - u32 dram_channels; - u32 mem_freq_khz, val; - u8 num_active_channels; - int i; - - val = I915_READ(BXT_P_CR_MC_BIOS_REQ_0_0_0); - mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) * - BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000); - - dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK; - num_active_channels = hweight32(dram_channels); - - /* Each active bit represents 4-byte channel */ - dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4); - - if (dram_info->bandwidth_kbps == 0) { - DRM_INFO("Couldn't get system memory bandwidth\n"); - return -EINVAL; - } - - /* - * Now read each DUNIT8/9/10/11 to check the rank of each dimms. - */ - for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) { - struct dram_dimm_info dimm; - enum intel_dram_type type; - - val = I915_READ(BXT_D_CR_DRP0_DUNIT(i)); - if (val == 0xFFFFFFFF) - continue; - - dram_info->num_channels++; - - bxt_get_dimm_info(&dimm, val); - type = bxt_get_dimm_type(val); - - WARN_ON(type != INTEL_DRAM_UNKNOWN && - dram_info->type != INTEL_DRAM_UNKNOWN && - dram_info->type != type); - - DRM_DEBUG_KMS("CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n", - i - BXT_D_CR_DRP0_DUNIT_START, - dimm.size, dimm.width, dimm.ranks, - intel_dram_type_str(type)); - - /* - * If any of the channel is single rank channel, - * worst case output will be same as if single rank - * memory, so consider single rank memory. - */ - if (dram_info->ranks == 0) - dram_info->ranks = dimm.ranks; - else if (dimm.ranks == 1) - dram_info->ranks = 1; - - if (type != INTEL_DRAM_UNKNOWN) - dram_info->type = type; - } - - if (dram_info->type == INTEL_DRAM_UNKNOWN || - dram_info->ranks == 0) { - DRM_INFO("couldn't get memory information\n"); - return -EINVAL; - } - - dram_info->valid = true; - return 0; -} - -static void -intel_get_dram_info(struct drm_i915_private *dev_priv) -{ - struct dram_info *dram_info = &dev_priv->dram_info; - int ret; - - /* - * Assume 16Gb DIMMs are present until proven otherwise. - * This is only used for the level 0 watermark latency - * w/a which does not apply to bxt/glk. - */ - dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv); - - if (INTEL_GEN(dev_priv) < 9 || !HAS_DISPLAY(dev_priv)) - return; - - if (IS_GEN9_LP(dev_priv)) - ret = bxt_get_dram_info(dev_priv); - else - ret = skl_get_dram_info(dev_priv); - if (ret) - return; - - DRM_DEBUG_KMS("DRAM bandwidth: %u kBps, channels: %u\n", - dram_info->bandwidth_kbps, - dram_info->num_channels); - - DRM_DEBUG_KMS("DRAM ranks: %u, 16Gb DIMMs: %s\n", - dram_info->ranks, yesno(dram_info->is_16gb_dimm)); -} - -static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap) -{ - static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 }; - static const u8 sets[4] = { 1, 1, 2, 2 }; - - return EDRAM_NUM_BANKS(cap) * - ways[EDRAM_WAYS_IDX(cap)] * - sets[EDRAM_SETS_IDX(cap)]; -} - -static void edram_detect(struct drm_i915_private *dev_priv) -{ - u32 edram_cap = 0; - - if (!(IS_HASWELL(dev_priv) || - IS_BROADWELL(dev_priv) || - INTEL_GEN(dev_priv) >= 9)) - return; - - edram_cap = __raw_uncore_read32(&dev_priv->uncore, HSW_EDRAM_CAP); - - /* NB: We can't write IDICR yet because we don't have gt funcs set up */ - - if (!(edram_cap & EDRAM_ENABLED)) - return; - - /* - * The needed capability bits for size calculation are not there with - * pre gen9 so return 128MB always. - */ - if (INTEL_GEN(dev_priv) < 9) - dev_priv->edram_size_mb = 128; - else - dev_priv->edram_size_mb = - gen9_edram_size_mb(dev_priv, edram_cap); - - dev_info(dev_priv->drm.dev, - "Found %uMB of eDRAM\n", dev_priv->edram_size_mb); -} - /** * i915_driver_hw_probe - setup state requiring device access * @dev_priv: device private @@ -1165,7 +608,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) intel_sanitize_options(dev_priv); /* needs to be done before ggtt probe */ - edram_detect(dev_priv); + intel_dram_edram_detect(dev_priv); i915_perf_init(dev_priv); @@ -1189,7 +632,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) ret = i915_ggtt_enable_hw(dev_priv); if (ret) { - DRM_ERROR("failed to enable GGTT\n"); + drm_err(&dev_priv->drm, "failed to enable GGTT\n"); goto err_mem_regions; } @@ -1205,7 +648,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) if (IS_GEN(dev_priv, 2)) { ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30)); if (ret) { - DRM_ERROR("failed to set DMA mask\n"); + drm_err(&dev_priv->drm, "failed to set DMA mask\n"); goto err_mem_regions; } @@ -1223,14 +666,13 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (ret) { - DRM_ERROR("failed to set DMA mask\n"); + drm_err(&dev_priv->drm, "failed to set DMA mask\n"); goto err_mem_regions; } } - pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, - PM_QOS_DEFAULT_VALUE); + cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); intel_gt_init_workarounds(dev_priv); @@ -1255,7 +697,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) */ if (INTEL_GEN(dev_priv) >= 5) { if (pci_enable_msi(pdev) < 0) - DRM_DEBUG_DRIVER("can't enable MSI"); + drm_dbg(&dev_priv->drm, "can't enable MSI"); } ret = intel_gvt_init(dev_priv); @@ -1267,7 +709,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) * Fill the dram structure to get the system raw bandwidth and * dram info. This will be used for memory latency calculation. */ - intel_get_dram_info(dev_priv); + intel_dram_detect(dev_priv); intel_bw_init_hw(dev_priv); @@ -1276,7 +718,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) err_msi: if (pdev->msi_enabled) pci_disable_msi(pdev); - pm_qos_remove_request(&dev_priv->pm_qos); + cpu_latency_qos_remove_request(&dev_priv->pm_qos); err_mem_regions: intel_memory_regions_driver_release(dev_priv); err_ggtt: @@ -1299,7 +741,7 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv) if (pdev->msi_enabled) pci_disable_msi(pdev); - pm_qos_remove_request(&dev_priv->pm_qos); + cpu_latency_qos_remove_request(&dev_priv->pm_qos); } /** @@ -1316,22 +758,19 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) i915_gem_driver_register(dev_priv); i915_pmu_register(dev_priv); - /* - * Notify a valid surface after modesetting, - * when running inside a VM. - */ - if (intel_vgpu_active(dev_priv)) - I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY); + intel_vgpu_register(dev_priv); /* Reveal our presence to userspace */ if (drm_dev_register(dev, 0) == 0) { i915_debugfs_register(dev_priv); + intel_display_debugfs_register(dev_priv); i915_setup_sysfs(dev_priv); /* Depends on sysfs having been initialized */ i915_perf_register(dev_priv); } else - DRM_ERROR("Failed to register driver for userspace access!\n"); + drm_err(&dev_priv->drm, + "Failed to register driver for userspace access!\n"); if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) { /* Must be done after probing outputs */ @@ -1361,6 +800,11 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) intel_power_domains_enable(dev_priv); intel_runtime_pm_enable(&dev_priv->runtime_pm); + + intel_register_dsm_handler(); + + if (i915_switcheroo_register(dev_priv)) + drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n"); } /** @@ -1369,6 +813,10 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) */ static void i915_driver_unregister(struct drm_i915_private *dev_priv) { + i915_switcheroo_unregister(dev_priv); + + intel_unregister_dsm_handler(); + intel_runtime_pm_disable(&dev_priv->runtime_pm); intel_power_domains_disable(dev_priv); @@ -1413,11 +861,12 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv) } if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) - DRM_INFO("DRM_I915_DEBUG enabled\n"); + drm_info(&dev_priv->drm, "DRM_I915_DEBUG enabled\n"); if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) - DRM_INFO("DRM_I915_DEBUG_GEM enabled\n"); + drm_info(&dev_priv->drm, "DRM_I915_DEBUG_GEM enabled\n"); if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) - DRM_INFO("DRM_I915_DEBUG_RUNTIME_PM enabled\n"); + drm_info(&dev_priv->drm, + "DRM_I915_DEBUG_RUNTIME_PM enabled\n"); } static struct drm_i915_private * @@ -1439,8 +888,6 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) return ERR_PTR(err); } - i915->drm.dev_private = i915; - i915->drm.pdev = pdev; pci_set_drvdata(pdev, i915); @@ -1480,16 +927,16 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct intel_device_info *match_info = (struct intel_device_info *)ent->driver_data; - struct drm_i915_private *dev_priv; + struct drm_i915_private *i915; int ret; - dev_priv = i915_driver_create(pdev, ent); - if (IS_ERR(dev_priv)) - return PTR_ERR(dev_priv); + i915 = i915_driver_create(pdev, ent); + if (IS_ERR(i915)) + return PTR_ERR(i915); /* Disable nuclear pageflip by default on pre-ILK */ if (!i915_modparams.nuclear_pageflip && match_info->gen < 5) - dev_priv->drm.driver_features &= ~DRIVER_ATOMIC; + i915->drm.driver_features &= ~DRIVER_ATOMIC; /* * Check if we support fake LMEM -- for now we only unleash this for @@ -1497,13 +944,13 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) { - if (INTEL_GEN(dev_priv) >= 9 && i915_selftest.live < 0 && + if (INTEL_GEN(i915) >= 9 && i915_selftest.live < 0 && i915_modparams.fake_lmem_start) { - mkwrite_device_info(dev_priv)->memory_regions = + mkwrite_device_info(i915)->memory_regions = REGION_SMEM | REGION_LMEM | REGION_STOLEN; - mkwrite_device_info(dev_priv)->is_dgfx = true; - GEM_BUG_ON(!HAS_LMEM(dev_priv)); - GEM_BUG_ON(!IS_DGFX(dev_priv)); + mkwrite_device_info(i915)->is_dgfx = true; + GEM_BUG_ON(!HAS_LMEM(i915)); + GEM_BUG_ON(!IS_DGFX(i915)); } } #endif @@ -1512,48 +959,60 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_fini; - ret = i915_driver_early_probe(dev_priv); + ret = i915_driver_early_probe(i915); if (ret < 0) goto out_pci_disable; - disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); + disable_rpm_wakeref_asserts(&i915->runtime_pm); - i915_detect_vgpu(dev_priv); + intel_vgpu_detect(i915); - ret = i915_driver_mmio_probe(dev_priv); + ret = i915_driver_mmio_probe(i915); if (ret < 0) goto out_runtime_pm_put; - ret = i915_driver_hw_probe(dev_priv); + ret = i915_driver_hw_probe(i915); if (ret < 0) goto out_cleanup_mmio; - ret = i915_driver_modeset_probe(dev_priv); + ret = i915_driver_modeset_probe_noirq(i915); if (ret < 0) goto out_cleanup_hw; - i915_driver_register(dev_priv); + ret = intel_irq_install(i915); + if (ret) + goto out_cleanup_modeset; - enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); + ret = i915_driver_modeset_probe(i915); + if (ret < 0) + goto out_cleanup_irq; + + i915_driver_register(i915); + + enable_rpm_wakeref_asserts(&i915->runtime_pm); - i915_welcome_messages(dev_priv); + i915_welcome_messages(i915); return 0; +out_cleanup_irq: + intel_irq_uninstall(i915); +out_cleanup_modeset: + /* FIXME */ out_cleanup_hw: - i915_driver_hw_remove(dev_priv); - intel_memory_regions_driver_release(dev_priv); - i915_ggtt_driver_release(dev_priv); + i915_driver_hw_remove(i915); + intel_memory_regions_driver_release(i915); + i915_ggtt_driver_release(i915); out_cleanup_mmio: - i915_driver_mmio_release(dev_priv); + i915_driver_mmio_release(i915); out_runtime_pm_put: - enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); - i915_driver_late_release(dev_priv); + enable_rpm_wakeref_asserts(&i915->runtime_pm); + i915_driver_late_release(i915); out_pci_disable: pci_disable_device(pdev); out_fini: - i915_probe_error(dev_priv, "Device initialization failed (%d)\n", ret); - i915_driver_destroy(dev_priv); + i915_probe_error(i915, "Device initialization failed (%d)\n", ret); + i915_driver_destroy(i915); return ret; } @@ -1563,13 +1022,6 @@ void i915_driver_remove(struct drm_i915_private *i915) i915_driver_unregister(i915); - /* - * After unregistering the device to prevent any new users, cancel - * all in-flight requests so that we can quickly unbind the active - * resources. - */ - intel_gt_set_wedged(&i915->gt); - /* Flush any external code that still may be under the RCU lock */ synchronize_rcu(); @@ -1581,6 +1033,10 @@ void i915_driver_remove(struct drm_i915_private *i915) i915_driver_modeset_remove(i915); + intel_irq_uninstall(i915); + + i915_driver_modeset_remove_noirq(i915); + i915_reset_error_state(i915); i915_gem_driver_remove(i915); @@ -1667,10 +1123,6 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv) drm_modeset_unlock_all(dev); } -static int vlv_resume_prepare(struct drm_i915_private *dev_priv, - bool rpm_resume); -static int vlv_suspend_complete(struct drm_i915_private *dev_priv); - static bool suspend_to_idle(struct drm_i915_private *dev_priv) { #if IS_ENABLED(CONFIG_ACPI_SLEEP) @@ -1722,7 +1174,7 @@ static int i915_drm_suspend(struct drm_device *dev) intel_suspend_hw(dev_priv); - i915_gem_suspend_gtt_mappings(dev_priv); + i915_ggtt_suspend(&dev_priv->ggtt); i915_save_state(dev_priv); @@ -1757,7 +1209,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = dev_priv->drm.pdev; struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; - int ret = 0; + int ret; disable_rpm_wakeref_asserts(rpm); @@ -1770,11 +1222,9 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) intel_display_power_suspend_late(dev_priv); - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - ret = vlv_suspend_complete(dev_priv); - + ret = vlv_suspend_complete(dev_priv); if (ret) { - DRM_ERROR("Suspend complete failed: %d\n", ret); + drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret); intel_power_domains_resume(dev_priv); goto out; @@ -1808,8 +1258,8 @@ int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state) { int error; - if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND && - state.event != PM_EVENT_FREEZE)) + if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND && + state.event != PM_EVENT_FREEZE)) return -EINVAL; if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) @@ -1833,9 +1283,9 @@ static int i915_drm_resume(struct drm_device *dev) ret = i915_ggtt_enable_hw(dev_priv); if (ret) - DRM_ERROR("failed to re-enable GGTT\n"); + drm_err(&dev_priv->drm, "failed to re-enable GGTT\n"); - i915_gem_restore_gtt_mappings(dev_priv); + i915_ggtt_resume(&dev_priv->ggtt); i915_gem_restore_fences(&dev_priv->ggtt); intel_csr_ucode_resume(dev_priv); @@ -1922,7 +1372,8 @@ static int i915_drm_resume_early(struct drm_device *dev) */ ret = pci_set_power_state(pdev, PCI_D0); if (ret) { - DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret); + drm_err(&dev_priv->drm, + "failed to set PCI D0 power state (%d)\n", ret); return ret; } @@ -1946,11 +1397,10 @@ static int i915_drm_resume_early(struct drm_device *dev) disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - ret = vlv_resume_prepare(dev_priv, false); + ret = vlv_resume_prepare(dev_priv, false); if (ret) - DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", - ret); + drm_err(&dev_priv->drm, + "Resume prepare failed: %d, continuing anyway\n", ret); intel_uncore_resume_early(&dev_priv->uncore); @@ -2117,391 +1567,16 @@ static int i915_pm_restore(struct device *kdev) return i915_pm_resume(kdev); } -/* - * Save all Gunit registers that may be lost after a D3 and a subsequent - * S0i[R123] transition. The list of registers needing a save/restore is - * defined in the VLV2_S0IXRegs document. This documents marks all Gunit - * registers in the following way: - * - Driver: saved/restored by the driver - * - Punit : saved/restored by the Punit firmware - * - No, w/o marking: no need to save/restore, since the register is R/O or - * used internally by the HW in a way that doesn't depend - * keeping the content across a suspend/resume. - * - Debug : used for debugging - * - * We save/restore all registers marked with 'Driver', with the following - * exceptions: - * - Registers out of use, including also registers marked with 'Debug'. - * These have no effect on the driver's operation, so we don't save/restore - * them to reduce the overhead. - * - Registers that are fully setup by an initialization function called from - * the resume path. For example many clock gating and RPS/RC6 registers. - * - Registers that provide the right functionality with their reset defaults. - * - * TODO: Except for registers that based on the above 3 criteria can be safely - * ignored, we save/restore all others, practically treating the HW context as - * a black-box for the driver. Further investigation is needed to reduce the - * saved/restored registers even further, by following the same 3 criteria. - */ -static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv) -{ - struct vlv_s0ix_state *s = dev_priv->vlv_s0ix_state; - int i; - - if (!s) - return; - - /* GAM 0x4000-0x4770 */ - s->wr_watermark = I915_READ(GEN7_WR_WATERMARK); - s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL); - s->arb_mode = I915_READ(ARB_MODE); - s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0); - s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1); - - for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) - s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i)); - - s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); - s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT); - - s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7); - s->ecochk = I915_READ(GAM_ECOCHK); - s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7); - s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7); - - s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR); - - /* MBC 0x9024-0x91D0, 0x8500 */ - s->g3dctl = I915_READ(VLV_G3DCTL); - s->gsckgctl = I915_READ(VLV_GSCKGCTL); - s->mbctl = I915_READ(GEN6_MBCTL); - - /* GCP 0x9400-0x9424, 0x8100-0x810C */ - s->ucgctl1 = I915_READ(GEN6_UCGCTL1); - s->ucgctl3 = I915_READ(GEN6_UCGCTL3); - s->rcgctl1 = I915_READ(GEN6_RCGCTL1); - s->rcgctl2 = I915_READ(GEN6_RCGCTL2); - s->rstctl = I915_READ(GEN6_RSTCTL); - s->misccpctl = I915_READ(GEN7_MISCCPCTL); - - /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ - s->gfxpause = I915_READ(GEN6_GFXPAUSE); - s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC); - s->rpdeuc = I915_READ(GEN6_RPDEUC); - s->ecobus = I915_READ(ECOBUS); - s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL); - s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT); - s->rp_deucsw = I915_READ(GEN6_RPDEUCSW); - s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR); - s->rcedata = I915_READ(VLV_RCEDATA); - s->spare2gh = I915_READ(VLV_SPAREG2H); - - /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ - s->gt_imr = I915_READ(GTIMR); - s->gt_ier = I915_READ(GTIER); - s->pm_imr = I915_READ(GEN6_PMIMR); - s->pm_ier = I915_READ(GEN6_PMIER); - - for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) - s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i)); - - /* GT SA CZ domain, 0x100000-0x138124 */ - s->tilectl = I915_READ(TILECTL); - s->gt_fifoctl = I915_READ(GTFIFOCTL); - s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL); - s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG); - s->pmwgicz = I915_READ(VLV_PMWGICZ); - - /* Gunit-Display CZ domain, 0x182028-0x1821CF */ - s->gu_ctl0 = I915_READ(VLV_GU_CTL0); - s->gu_ctl1 = I915_READ(VLV_GU_CTL1); - s->pcbr = I915_READ(VLV_PCBR); - s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2); - - /* - * Not saving any of: - * DFT, 0x9800-0x9EC0 - * SARB, 0xB000-0xB1FC - * GAC, 0x5208-0x524C, 0x14000-0x14C000 - * PCI CFG - */ -} - -static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) -{ - struct vlv_s0ix_state *s = dev_priv->vlv_s0ix_state; - u32 val; - int i; - - if (!s) - return; - - /* GAM 0x4000-0x4770 */ - I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark); - I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl); - I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16)); - I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0); - I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1); - - for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) - I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]); - - I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count); - I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count); - - I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp); - I915_WRITE(GAM_ECOCHK, s->ecochk); - I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp); - I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp); - - I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr); - - /* MBC 0x9024-0x91D0, 0x8500 */ - I915_WRITE(VLV_G3DCTL, s->g3dctl); - I915_WRITE(VLV_GSCKGCTL, s->gsckgctl); - I915_WRITE(GEN6_MBCTL, s->mbctl); - - /* GCP 0x9400-0x9424, 0x8100-0x810C */ - I915_WRITE(GEN6_UCGCTL1, s->ucgctl1); - I915_WRITE(GEN6_UCGCTL3, s->ucgctl3); - I915_WRITE(GEN6_RCGCTL1, s->rcgctl1); - I915_WRITE(GEN6_RCGCTL2, s->rcgctl2); - I915_WRITE(GEN6_RSTCTL, s->rstctl); - I915_WRITE(GEN7_MISCCPCTL, s->misccpctl); - - /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ - I915_WRITE(GEN6_GFXPAUSE, s->gfxpause); - I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc); - I915_WRITE(GEN6_RPDEUC, s->rpdeuc); - I915_WRITE(ECOBUS, s->ecobus); - I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl); - I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout); - I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw); - I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr); - I915_WRITE(VLV_RCEDATA, s->rcedata); - I915_WRITE(VLV_SPAREG2H, s->spare2gh); - - /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ - I915_WRITE(GTIMR, s->gt_imr); - I915_WRITE(GTIER, s->gt_ier); - I915_WRITE(GEN6_PMIMR, s->pm_imr); - I915_WRITE(GEN6_PMIER, s->pm_ier); - - for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) - I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]); - - /* GT SA CZ domain, 0x100000-0x138124 */ - I915_WRITE(TILECTL, s->tilectl); - I915_WRITE(GTFIFOCTL, s->gt_fifoctl); - /* - * Preserve the GT allow wake and GFX force clock bit, they are not - * be restored, as they are used to control the s0ix suspend/resume - * sequence by the caller. - */ - val = I915_READ(VLV_GTLC_WAKE_CTRL); - val &= VLV_GTLC_ALLOWWAKEREQ; - val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ; - I915_WRITE(VLV_GTLC_WAKE_CTRL, val); - - val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); - val &= VLV_GFX_CLK_FORCE_ON_BIT; - val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT; - I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); - - I915_WRITE(VLV_PMWGICZ, s->pmwgicz); - - /* Gunit-Display CZ domain, 0x182028-0x1821CF */ - I915_WRITE(VLV_GU_CTL0, s->gu_ctl0); - I915_WRITE(VLV_GU_CTL1, s->gu_ctl1); - I915_WRITE(VLV_PCBR, s->pcbr); - I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); -} - -static int vlv_wait_for_pw_status(struct drm_i915_private *i915, - u32 mask, u32 val) -{ - i915_reg_t reg = VLV_GTLC_PW_STATUS; - u32 reg_value; - int ret; - - /* The HW does not like us polling for PW_STATUS frequently, so - * use the sleeping loop rather than risk the busy spin within - * intel_wait_for_register(). - * - * Transitioning between RC6 states should be at most 2ms (see - * valleyview_enable_rps) so use a 3ms timeout. - */ - ret = wait_for(((reg_value = - intel_uncore_read_notrace(&i915->uncore, reg)) & mask) - == val, 3); - - /* just trace the final value */ - trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true); - - return ret; -} - -int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) -{ - u32 val; - int err; - - val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); - val &= ~VLV_GFX_CLK_FORCE_ON_BIT; - if (force_on) - val |= VLV_GFX_CLK_FORCE_ON_BIT; - I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); - - if (!force_on) - return 0; - - err = intel_wait_for_register(&dev_priv->uncore, - VLV_GTLC_SURVIVABILITY_REG, - VLV_GFX_CLK_STATUS_BIT, - VLV_GFX_CLK_STATUS_BIT, - 20); - if (err) - DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n", - I915_READ(VLV_GTLC_SURVIVABILITY_REG)); - - return err; -} - -static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) -{ - u32 mask; - u32 val; - int err; - - val = I915_READ(VLV_GTLC_WAKE_CTRL); - val &= ~VLV_GTLC_ALLOWWAKEREQ; - if (allow) - val |= VLV_GTLC_ALLOWWAKEREQ; - I915_WRITE(VLV_GTLC_WAKE_CTRL, val); - POSTING_READ(VLV_GTLC_WAKE_CTRL); - - mask = VLV_GTLC_ALLOWWAKEACK; - val = allow ? mask : 0; - - err = vlv_wait_for_pw_status(dev_priv, mask, val); - if (err) - DRM_ERROR("timeout disabling GT waking\n"); - - return err; -} - -static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, - bool wait_for_on) -{ - u32 mask; - u32 val; - - mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; - val = wait_for_on ? mask : 0; - - /* - * RC6 transitioning can be delayed up to 2 msec (see - * valleyview_enable_rps), use 3 msec for safety. - * - * This can fail to turn off the rc6 if the GPU is stuck after a failed - * reset and we are trying to force the machine to sleep. - */ - if (vlv_wait_for_pw_status(dev_priv, mask, val)) - DRM_DEBUG_DRIVER("timeout waiting for GT wells to go %s\n", - onoff(wait_for_on)); -} - -static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) -{ - if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR)) - return; - - DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n"); - I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR); -} - -static int vlv_suspend_complete(struct drm_i915_private *dev_priv) -{ - u32 mask; - int err; - - /* - * Bspec defines the following GT well on flags as debug only, so - * don't treat them as hard failures. - */ - vlv_wait_for_gt_wells(dev_priv, false); - - mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS; - WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask); - - vlv_check_no_gt_access(dev_priv); - - err = vlv_force_gfx_clock(dev_priv, true); - if (err) - goto err1; - - err = vlv_allow_gt_wake(dev_priv, false); - if (err) - goto err2; - - vlv_save_gunit_s0ix_state(dev_priv); - - err = vlv_force_gfx_clock(dev_priv, false); - if (err) - goto err2; - - return 0; - -err2: - /* For safety always re-enable waking and disable gfx clock forcing */ - vlv_allow_gt_wake(dev_priv, true); -err1: - vlv_force_gfx_clock(dev_priv, false); - - return err; -} - -static int vlv_resume_prepare(struct drm_i915_private *dev_priv, - bool rpm_resume) -{ - int err; - int ret; - - /* - * If any of the steps fail just try to continue, that's the best we - * can do at this point. Return the first error code (which will also - * leave RPM permanently disabled). - */ - ret = vlv_force_gfx_clock(dev_priv, true); - - vlv_restore_gunit_s0ix_state(dev_priv); - - err = vlv_allow_gt_wake(dev_priv, true); - if (!ret) - ret = err; - - err = vlv_force_gfx_clock(dev_priv, false); - if (!ret) - ret = err; - - vlv_check_no_gt_access(dev_priv); - - if (rpm_resume) - intel_init_clock_gating(dev_priv); - - return ret; -} - static int intel_runtime_suspend(struct device *kdev) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; - int ret = 0; + int ret; - if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv))) + if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) return -ENODEV; - DRM_DEBUG_KMS("Suspending device\n"); + drm_dbg_kms(&dev_priv->drm, "Suspending device\n"); disable_rpm_wakeref_asserts(rpm); @@ -2519,11 +1594,10 @@ static int intel_runtime_suspend(struct device *kdev) intel_display_power_suspend(dev_priv); - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - ret = vlv_suspend_complete(dev_priv); - + ret = vlv_suspend_complete(dev_priv); if (ret) { - DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); + drm_err(&dev_priv->drm, + "Runtime suspend failed, disabling it (%d)\n", ret); intel_uncore_runtime_resume(&dev_priv->uncore); intel_runtime_pm_enable_interrupts(dev_priv); @@ -2541,7 +1615,8 @@ static int intel_runtime_suspend(struct device *kdev) intel_runtime_pm_driver_release(rpm); if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore)) - DRM_ERROR("Unclaimed access detected prior to suspending\n"); + drm_err(&dev_priv->drm, + "Unclaimed access detected prior to suspending\n"); rpm->suspended = true; @@ -2573,7 +1648,7 @@ static int intel_runtime_suspend(struct device *kdev) if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) intel_hpd_poll_init(dev_priv); - DRM_DEBUG_KMS("Device suspended\n"); + drm_dbg_kms(&dev_priv->drm, "Device suspended\n"); return 0; } @@ -2581,25 +1656,25 @@ static int intel_runtime_resume(struct device *kdev) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; - int ret = 0; + int ret; - if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv))) + if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) return -ENODEV; - DRM_DEBUG_KMS("Resuming device\n"); + drm_dbg_kms(&dev_priv->drm, "Resuming device\n"); - WARN_ON_ONCE(atomic_read(&rpm->wakeref_count)); + drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count)); disable_rpm_wakeref_asserts(rpm); intel_opregion_notify_adapter(dev_priv, PCI_D0); rpm->suspended = false; if (intel_uncore_unclaimed_mmio(&dev_priv->uncore)) - DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); + drm_dbg(&dev_priv->drm, + "Unclaimed access during suspend, bios?\n"); intel_display_power_resume(dev_priv); - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - ret = vlv_resume_prepare(dev_priv, true); + ret = vlv_resume_prepare(dev_priv, true); intel_uncore_runtime_resume(&dev_priv->uncore); @@ -2625,9 +1700,10 @@ static int intel_runtime_resume(struct device *kdev) enable_rpm_wakeref_asserts(rpm); if (ret) - DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret); + drm_err(&dev_priv->drm, + "Runtime resume failed, disabling it (%d)\n", ret); else - DRM_DEBUG_KMS("Device resumed\n"); + drm_dbg_kms(&dev_priv->drm, "Device resumed\n"); return ret; } @@ -2675,12 +1751,12 @@ const struct dev_pm_ops i915_pm_ops = { static const struct file_operations i915_driver_fops = { .owner = THIS_MODULE, .open = drm_open, - .release = drm_release, + .release = drm_release_noglobal, .unlocked_ioctl = drm_ioctl, .mmap = i915_gem_mmap, .poll = drm_poll, .read = drm_read, - .compat_ioctl = i915_compat_ioctl, + .compat_ioctl = i915_ioc32_compat_ioctl, .llseek = noop_llseek, }; @@ -2772,9 +1848,6 @@ static struct drm_driver driver = { .gem_prime_export = i915_gem_prime_export, .gem_prime_import = i915_gem_prime_import, - .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos, - .get_scanout_position = i915_get_crtc_scanoutpos, - .dumb_create = i915_gem_dumb_create, .dumb_map_offset = i915_gem_dumb_mmap_offset, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 810e3ccd56ec..62b901ffabf9 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -59,7 +59,6 @@ #include <drm/drm_connector.h> #include <drm/i915_mei_hdcp_interface.h> -#include "i915_fixed.h" #include "i915_params.h" #include "i915_reg.h" #include "i915_utils.h" @@ -70,6 +69,7 @@ #include "display/intel_dpll_mgr.h" #include "display/intel_dsb.h" #include "display/intel_frontbuffer.h" +#include "display/intel_global_state.h" #include "display/intel_gmbus.h" #include "display/intel_opregion.h" @@ -104,18 +104,23 @@ #include "intel_region_lmem.h" -#include "intel_gvt.h" - /* General customization: */ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20200114" -#define DRIVER_TIMESTAMP 1579001978 +#define DRIVER_DATE "20200313" +#define DRIVER_TIMESTAMP 1584144591 struct drm_i915_gem_object; +/* + * The code assumes that the hpd_pins below have consecutive values and + * starting with HPD_PORT_A, the HPD pin associated with any port can be + * retrieved by adding the corresponding port (or phy) enum value to + * HPD_PORT_A in most cases. For example: + * HPD_PORT_C = HPD_PORT_A + PHY_C - PHY_A + */ enum hpd_pin { HPD_NONE = 0, HPD_TV = HPD_NONE, /* TV is known to be unreliable */ @@ -203,9 +208,7 @@ struct drm_i915_file_private { } mm; struct xarray context_xa; - - struct idr vm_idr; - struct mutex vm_idr_lock; /* guards vm_idr */ + struct xarray vm_xa; unsigned int bsd_engine; @@ -255,18 +258,19 @@ struct sdvo_device_mapping { struct intel_connector; struct intel_encoder; struct intel_atomic_state; -struct intel_crtc_state; +struct intel_cdclk_config; +struct intel_cdclk_state; +struct intel_cdclk_vals; struct intel_initial_plane_config; struct intel_crtc; struct intel_limit; struct dpll; -struct intel_cdclk_state; struct drm_i915_display_funcs { void (*get_cdclk)(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state); + struct intel_cdclk_config *cdclk_config); void (*set_cdclk)(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *cdclk_state, + const struct intel_cdclk_config *cdclk_config, enum pipe pipe); int (*get_fifo_size)(struct drm_i915_private *dev_priv, enum i9xx_plane_id i9xx_plane); @@ -280,7 +284,7 @@ struct drm_i915_display_funcs { struct intel_crtc *crtc); int (*compute_global_watermarks)(struct intel_atomic_state *state); void (*update_wm)(struct intel_crtc *crtc); - int (*modeset_calc_cdclk)(struct intel_atomic_state *state); + int (*modeset_calc_cdclk)(struct intel_cdclk_state *state); u8 (*calc_voltage_level)(int cdclk); /* Returns the active state of the crtc, and if the crtc is active, * fills out the pipe-config with the hw state. */ @@ -504,7 +508,7 @@ struct i915_psr { u16 su_x_granularity; bool dc3co_enabled; u32 dc3co_exit_delay; - struct delayed_work idle_work; + struct delayed_work dc3co_work; bool force_mode_changed; }; @@ -732,19 +736,10 @@ enum intel_ddb_partitioning { INTEL_DDB_PART_5_6, /* IVB+ */ }; -struct intel_wm_level { - bool enable; - u32 pri_val; - u32 spr_val; - u32 cur_val; - u32 fbc_val; -}; - struct ilk_wm_values { u32 wm_pipe[3]; u32 wm_lp[3]; u32 wm_lp_spr[3]; - u32 wm_linetime[3]; bool enable_fbc_wm; enum intel_ddb_partitioning partitioning; }; @@ -799,65 +794,6 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, return false; } -struct skl_ddb_allocation { - u8 enabled_slices; /* GEN11 has configurable 2 slices */ -}; - -struct skl_ddb_values { - unsigned dirty_pipes; - struct skl_ddb_allocation ddb; -}; - -struct skl_wm_level { - u16 min_ddb_alloc; - u16 plane_res_b; - u8 plane_res_l; - bool plane_en; - bool ignore_lines; -}; - -/* Stores plane specific WM parameters */ -struct skl_wm_params { - bool x_tiled, y_tiled; - bool rc_surface; - bool is_planar; - u32 width; - u8 cpp; - u32 plane_pixel_rate; - u32 y_min_scanlines; - u32 plane_bytes_per_line; - uint_fixed_16_16_t plane_blocks_per_line; - uint_fixed_16_16_t y_tile_minimum; - u32 linetime_us; - u32 dbuf_block_size; -}; - -enum intel_pipe_crc_source { - INTEL_PIPE_CRC_SOURCE_NONE, - INTEL_PIPE_CRC_SOURCE_PLANE1, - INTEL_PIPE_CRC_SOURCE_PLANE2, - INTEL_PIPE_CRC_SOURCE_PLANE3, - INTEL_PIPE_CRC_SOURCE_PLANE4, - INTEL_PIPE_CRC_SOURCE_PLANE5, - INTEL_PIPE_CRC_SOURCE_PLANE6, - INTEL_PIPE_CRC_SOURCE_PLANE7, - INTEL_PIPE_CRC_SOURCE_PIPE, - /* TV/DP on pre-gen5/vlv can't use the pipe source. */ - INTEL_PIPE_CRC_SOURCE_TV, - INTEL_PIPE_CRC_SOURCE_DP_B, - INTEL_PIPE_CRC_SOURCE_DP_C, - INTEL_PIPE_CRC_SOURCE_DP_D, - INTEL_PIPE_CRC_SOURCE_AUTO, - INTEL_PIPE_CRC_SOURCE_MAX, -}; - -#define INTEL_PIPE_CRC_ENTRIES_NR 128 -struct intel_pipe_crc { - spinlock_t lock; - int skipped; - enum intel_pipe_crc_source source; -}; - struct i915_frontbuffer_tracking { spinlock_t lock; @@ -875,14 +811,7 @@ struct i915_virtual_gpu { u32 caps; }; -/* used in computing the new watermarks state */ -struct intel_wm_config { - unsigned int num_pipes_active; - bool sprites_enabled; - bool sprites_scaled; -}; - -struct intel_cdclk_state { +struct intel_cdclk_config { unsigned int cdclk, vco, ref, bypass; u8 voltage_level; }; @@ -1002,33 +931,18 @@ struct drm_i915_private { unsigned int max_cdclk_freq; unsigned int max_dotclk_freq; - unsigned int rawclk_freq; unsigned int hpll_freq; unsigned int fdi_pll_freq; unsigned int czclk_freq; - /* - * For reading holding any crtc lock is sufficient, - * for writing must hold all of them. - */ struct { - /* - * The current logical cdclk state. - * See intel_atomic_state.cdclk.logical - */ - struct intel_cdclk_state logical; - /* - * The current actual cdclk state. - * See intel_atomic_state.cdclk.actual - */ - struct intel_cdclk_state actual; - /* The current hardware cdclk state */ - struct intel_cdclk_state hw; + /* The current hardware cdclk configuration */ + struct intel_cdclk_config hw; /* cdclk, divider, and ratio table from bspec */ const struct intel_cdclk_vals *table; - int force_min_cdclk; + struct intel_global_obj obj; } cdclk; /** @@ -1068,31 +982,32 @@ struct drm_i915_private { struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; -#ifdef CONFIG_DEBUG_FS - struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; -#endif + /** + * dpll and cdclk state is protected by connection_mutex + * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll. + * Must be global rather than per dpll, because on some platforms plls + * share registers. + */ + struct { + struct mutex lock; - /* dpll and cdclk state is protected by connection_mutex */ - int num_shared_dpll; - struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; - const struct intel_dpll_mgr *dpll_mgr; + int num_shared_dpll; + struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; + const struct intel_dpll_mgr *mgr; - /* - * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll. - * Must be global rather than per dpll, because on some platforms - * plls share registers. - */ - struct mutex dpll_lock; + struct { + int nssc; + int ssc; + } ref_clks; + } dpll; + + struct list_head global_obj_list; /* - * For reading active_pipes, min_cdclk, min_voltage_level holding - * any crtc lock is sufficient, for writing must hold all of them. + * For reading active_pipes holding any crtc lock is + * sufficient, for writing must hold all of them. */ u8 active_pipes; - /* minimum acceptable cdclk for each pipe */ - int min_cdclk[I915_MAX_PIPES]; - /* minimum acceptable voltage level for each pipe */ - u8 min_voltage_level[I915_MAX_PIPES]; int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; @@ -1105,8 +1020,6 @@ struct drm_i915_private { struct work_struct free_work; } atomic_helper; - u16 orig_clock; - bool mchbar_need_disable; struct intel_l3_parity l3_parity; @@ -1191,7 +1104,6 @@ struct drm_i915_private { /* current hardware state */ union { struct ilk_wm_values hw; - struct skl_ddb_values skl_hw; struct vlv_wm_values vlv; struct g4x_wm_values g4x; }; @@ -1213,6 +1125,8 @@ struct drm_i915_private { bool distrust_bios_wm; } wm; + u8 enabled_dbuf_slices_mask; /* GEN11 has configurable 2 slices */ + struct dram_info { bool valid; bool is_16gb_dimm; @@ -1236,7 +1150,7 @@ struct drm_i915_private { u8 num_planes; } max_bw[6]; - struct drm_private_obj bw_obj; + struct intel_global_obj bw_obj; struct intel_runtime_pm runtime_pm; @@ -1300,16 +1214,6 @@ struct drm_i915_private { */ }; -struct dram_dimm_info { - u8 size, width, ranks; -}; - -struct dram_channel_info { - struct dram_dimm_info dimm_l, dimm_s; - u8 ranks; - bool is_16gb_dimm; -}; - static inline struct drm_i915_private *to_i915(const struct drm_device *dev) { return container_of(dev, struct drm_i915_private, drm); @@ -1580,6 +1484,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define GLK_REVID_A0 0x0 #define GLK_REVID_A1 0x1 +#define GLK_REVID_A2 0x2 +#define GLK_REVID_B0 0x3 #define IS_GLK_REVID(dev_priv, since, until) \ (IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until)) @@ -1601,6 +1507,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, (IS_ICELAKE(p) && IS_REVID(p, since, until)) #define TGL_REVID_A0 0x0 +#define TGL_REVID_B0 0x1 +#define TGL_REVID_C0 0x2 #define IS_TGL_REVID(p, since, until) \ (IS_TIGERLAKE(p) && IS_REVID(p, since, until)) @@ -1718,10 +1626,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc) -/* Having GuC is not the same as using GuC */ -#define USES_GUC(dev_priv) intel_uc_uses_guc(&(dev_priv)->gt.uc) -#define USES_GUC_SUBMISSION(dev_priv) intel_uc_uses_guc_submission(&(dev_priv)->gt.uc) - #define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu) #define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs) @@ -1767,11 +1671,6 @@ intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv) } /* i915_drv.c */ -#ifdef CONFIG_COMPAT -long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); -#else -#define i915_compat_ioctl NULL -#endif extern const struct dev_pm_ops i915_pm_ops; int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent); @@ -1780,18 +1679,6 @@ void i915_driver_remove(struct drm_i915_private *i915); int i915_resume_switcheroo(struct drm_i915_private *i915); int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state); -int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); - -static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) -{ - return dev_priv->gvt; -} - -static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv) -{ - return dev_priv->vgpu.active; -} - int i915_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -1856,12 +1743,6 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); -static inline int __must_check -i915_mutex_lock_interruptible(struct drm_device *dev) -{ - return mutex_lock_interruptible(&dev->struct_mutex); -} - int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); @@ -2010,20 +1891,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data, #define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__)) #define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__)) -/* register wait wrappers for display regs */ -#define intel_de_wait_for_register(dev_priv_, reg_, mask_, value_, timeout_) \ - intel_wait_for_register(&(dev_priv_)->uncore, \ - (reg_), (mask_), (value_), (timeout_)) - -#define intel_de_wait_for_set(dev_priv_, reg_, mask_, timeout_) ({ \ - u32 mask__ = (mask_); \ - intel_de_wait_for_register((dev_priv_), (reg_), \ - mask__, mask__, (timeout_)); \ -}) - -#define intel_de_wait_for_clear(dev_priv_, reg_, mask_, timeout_) \ - intel_de_wait_for_register((dev_priv_), (reg_), (mask_), 0, (timeout_)) - /* i915_mm.c */ int remap_io_mapping(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, @@ -2046,10 +1913,4 @@ i915_coherent_map_type(struct drm_i915_private *i915) return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC; } -static inline bool intel_guc_submission_is_enabled(struct intel_guc *guc) -{ - return intel_guc_is_submission_supported(guc) && - intel_guc_is_running(guc); -} - #endif diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 5f6e63952821..ca5420012a22 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -26,7 +26,6 @@ */ #include <drm/drm_vma_manager.h> -#include <drm/i915_drm.h> #include <linux/dma-fence-array.h> #include <linux/kthread.h> #include <linux/dma-resv.h> @@ -941,9 +940,6 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, struct i915_vma *vma; int ret; - if (i915_gem_object_never_bind_ggtt(obj)) - return ERR_PTR(-ENODEV); - if (flags & PIN_MAPPABLE && (!view || view->type == I915_GGTT_VIEW_NORMAL)) { /* @@ -1009,6 +1005,12 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, if (ret) return ERR_PTR(ret); + ret = i915_vma_wait_for_bind(vma); + if (ret) { + i915_vma_unpin(vma); + return ERR_PTR(ret); + } + return vma; } @@ -1153,7 +1155,7 @@ err_unlock: /* Minimal basic recovery for KMS */ ret = i915_ggtt_enable_hw(dev_priv); - i915_gem_restore_gtt_mappings(dev_priv); + i915_ggtt_resume(&dev_priv->ggtt); i915_gem_restore_fences(&dev_priv->ggtt); intel_init_clock_gating(dev_priv); } @@ -1201,7 +1203,7 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv) i915_gem_drain_freed_objects(dev_priv); - WARN_ON(!list_empty(&dev_priv->gem.contexts.list)); + drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list)); } static void i915_gem_init__mm(struct drm_i915_private *i915) @@ -1229,7 +1231,7 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv) i915_gem_drain_freed_objects(dev_priv); GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list)); GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count)); - WARN_ON(dev_priv->mm.shrink_count); + drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count); } int i915_gem_freeze(struct drm_i915_private *dev_priv) @@ -1269,7 +1271,8 @@ int i915_gem_freeze_late(struct drm_i915_private *i915) list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) { i915_gem_object_lock(obj); - WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true)); + drm_WARN_ON(&i915->drm, + i915_gem_object_set_to_cpu_domain(obj, true)); i915_gem_object_unlock(obj); } diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 0697bedebeef..02ad1acd117c 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -26,8 +26,6 @@ * */ -#include <drm/i915_drm.h> - #include "gem/i915_gem_context.h" #include "gt/intel_gt_requests.h" @@ -130,6 +128,13 @@ search_again: active = NULL; INIT_LIST_HEAD(&eviction_list); list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) { + if (vma == active) { /* now seen this vma twice */ + if (flags & PIN_NONBLOCK) + break; + + active = ERR_PTR(-EAGAIN); + } + /* * We keep this list in a rough least-recently scanned order * of active elements (inactive elements are cheap to reap). @@ -145,21 +150,12 @@ search_again: * To notice when we complete one full cycle, we record the * first active element seen, before moving it to the tail. */ - if (i915_vma_is_active(vma)) { - if (vma == active) { - if (flags & PIN_NONBLOCK) - break; - - active = ERR_PTR(-EAGAIN); - } - - if (active != ERR_PTR(-EAGAIN)) { - if (!active) - active = vma; + if (active != ERR_PTR(-EAGAIN) && i915_vma_is_active(vma)) { + if (!active) + active = vma; - list_move_tail(&vma->vm_link, &vm->bound_list); - continue; - } + list_move_tail(&vma->vm_link, &vm->bound_list); + continue; } if (mark_free(&scan, vma, flags, &eviction_list)) @@ -292,7 +288,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, GEM_BUG_ON(!drm_mm_node_allocated(node)); vma = container_of(node, typeof(*vma), node); - /* If we are using coloring to insert guard pages between + /* + * If we are using coloring to insert guard pages between * different cache domains within the address space, we have * to check whether the objects on either side of our range * abutt and conflict. If they are in conflict, then we evict @@ -309,22 +306,18 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, } } - if (flags & PIN_NONBLOCK && - (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) { + if (i915_vma_is_pinned(vma)) { ret = -ENOSPC; break; } - /* Overlap of objects in the same batch? */ - if (i915_vma_is_pinned(vma)) { + if (flags & PIN_NONBLOCK && i915_vma_is_active(vma)) { ret = -ENOSPC; - if (vma->exec_flags && - *vma->exec_flags & EXEC_OBJECT_PINNED) - ret = -EINVAL; break; } - /* Never show fear in the face of dragons! + /* + * Never show fear in the face of dragons! * * We cannot directly remove this node from within this * iterator and as with i915_gem_evict_something() we employ diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index d9c34a23cd67..d152b648c73c 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -21,10 +21,9 @@ * IN THE SOFTWARE. */ -#include <drm/i915_drm.h> - #include "i915_drv.h" #include "i915_scatterlist.h" +#include "i915_pvinfo.h" #include "i915_vgpu.h" /** @@ -237,11 +236,12 @@ static int fence_update(struct i915_fence_reg *fence, if (!i915_vma_is_map_and_fenceable(vma)) return -EINVAL; - if (WARN(!i915_gem_object_get_stride(vma->obj) || - !i915_gem_object_get_tiling(vma->obj), - "bogus fence setup with stride: 0x%x, tiling mode: %i\n", - i915_gem_object_get_stride(vma->obj), - i915_gem_object_get_tiling(vma->obj))) + if (drm_WARN(&uncore->i915->drm, + !i915_gem_object_get_stride(vma->obj) || + !i915_gem_object_get_tiling(vma->obj), + "bogus fence setup with stride: 0x%x, tiling mode: %i\n", + i915_gem_object_get_stride(vma->obj), + i915_gem_object_get_tiling(vma->obj))) return -EINVAL; ret = i915_vma_sync(vma); @@ -713,7 +713,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt) } if (dcc == 0xffffffff) { - DRM_ERROR("Couldn't read from MCHBAR. " + drm_err(&i915->drm, "Couldn't read from MCHBAR. " "Disabling tiling.\n"); swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index e039eb56900f..cb43381b0d37 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -15,8 +15,6 @@ #include <asm/set_memory.h> #include <asm/smp.h> -#include <drm/i915_drm.h> - #include "display/intel_frontbuffer.h" #include "gt/intel_gt.h" #include "gt/intel_gt_requests.h" @@ -63,7 +61,8 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, /* XXX This does not prevent more requests being submitted! */ if (intel_gt_retire_requests_timeout(ggtt->vm.gt, -MAX_SCHEDULE_TIMEOUT)) { - DRM_ERROR("Failed to wait for idle; VT'd may hang.\n"); + drm_err(&dev_priv->drm, + "Failed to wait for idle; VT'd may hang.\n"); /* Wait a bit, in hopes it avoids the hang */ udelay(10); } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 9e401a5fcae8..5c8e51d2ba5b 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -37,6 +37,7 @@ #include <drm/drm_print.h> #include "display/intel_atomic.h" +#include "display/intel_csr.h" #include "display/intel_overlay.h" #include "gem/i915_gem_context.h" @@ -47,7 +48,6 @@ #include "i915_gpu_error.h" #include "i915_memcpy.h" #include "i915_scatterlist.h" -#include "intel_csr.h" #define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) #define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN) @@ -450,6 +450,14 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m, err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n", slice, subslice, ee->instdone.row[slice][subslice]); + + if (INTEL_GEN(m->i915) < 12) + return; + + err_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n", + ee->instdone.slice_common_extra[0]); + err_printf(m, " SC_INSTDONE_EXTRA2: 0x%08x\n", + ee->instdone.slice_common_extra[1]); } static void error_print_request(struct drm_i915_error_state_buf *m, @@ -473,9 +481,13 @@ static void error_print_context(struct drm_i915_error_state_buf *m, const char *header, const struct i915_gem_context_coredump *ctx) { - err_printf(m, "%s%s[%d] prio %d, guilty %d active %d\n", + const u32 period = RUNTIME_INFO(m->i915)->cs_timestamp_period_ns; + + err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n", header, ctx->comm, ctx->pid, ctx->sched_attr.priority, - ctx->guilty, ctx->active); + ctx->guilty, ctx->active, + ctx->total_runtime * period, + mul_u32_u32(ctx->avg_runtime, period)); } static struct i915_vma_coredump * @@ -515,6 +527,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m, (u32)(ee->acthd>>32), (u32)ee->acthd); err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir); err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr); + err_printf(m, " ESR: 0x%08x\n", ee->esr); error_print_instdone(m, ee); @@ -1102,6 +1115,7 @@ static void engine_record_registers(struct intel_engine_coredump *ee) } if (INTEL_GEN(i915) >= 4) { + ee->esr = ENGINE_READ(engine, RING_ESR); ee->faddr = ENGINE_READ(engine, RING_DMA_FADD); ee->ipeir = ENGINE_READ(engine, RING_IPEIR); ee->ipehr = ENGINE_READ(engine, RING_IPEHR); @@ -1193,8 +1207,6 @@ static void engine_record_registers(struct intel_engine_coredump *ee) static void record_request(const struct i915_request *request, struct i915_request_coredump *erq) { - const struct i915_gem_context *ctx; - erq->flags = request->fence.flags; erq->context = request->fence.context; erq->seqno = request->fence.seqno; @@ -1205,9 +1217,13 @@ static void record_request(const struct i915_request *request, erq->pid = 0; rcu_read_lock(); - ctx = rcu_dereference(request->context->gem_context); - if (ctx) - erq->pid = pid_nr(ctx->pid); + if (!intel_context_is_closed(request->context)) { + const struct i915_gem_context *ctx; + + ctx = rcu_dereference(request->context->gem_context); + if (ctx) + erq->pid = pid_nr(ctx->pid); + } rcu_read_unlock(); } @@ -1228,7 +1244,7 @@ static bool record_context(struct i915_gem_context_coredump *e, { struct i915_gem_context *ctx; struct task_struct *task; - bool capture; + bool simulated; rcu_read_lock(); ctx = rcu_dereference(rq->context->gem_context); @@ -1236,7 +1252,7 @@ static bool record_context(struct i915_gem_context_coredump *e, ctx = NULL; rcu_read_unlock(); if (!ctx) - return false; + return true; rcu_read_lock(); task = pid_task(ctx->pid, PIDTYPE_PID); @@ -1250,10 +1266,13 @@ static bool record_context(struct i915_gem_context_coredump *e, e->guilty = atomic_read(&ctx->guilty_count); e->active = atomic_read(&ctx->active_count); - capture = i915_gem_context_no_error_capture(ctx); + e->total_runtime = rq->context->runtime.total; + e->avg_runtime = ewma_runtime_read(&rq->context->runtime.avg); + + simulated = i915_gem_context_no_error_capture(ctx); i915_gem_context_put(ctx); - return capture; + return simulated; } struct intel_engine_capture_vma { diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index e4a6afed3bbf..0d1f6c8ff355 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -75,6 +75,7 @@ struct intel_engine_coredump { u32 hws; u32 ipeir; u32 ipehr; + u32 esr; u32 bbstate; u32 instpm; u32 instps; @@ -87,6 +88,10 @@ struct intel_engine_coredump { struct i915_gem_context_coredump { char comm[TASK_COMM_LEN]; + + u64 total_runtime; + u32 avg_runtime; + pid_t pid; int active; int guilty; diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c index c1007245f46d..8e45ca3d2ede 100644 --- a/drivers/gpu/drm/i915/i915_ioc32.c +++ b/drivers/gpu/drm/i915/i915_ioc32.c @@ -28,9 +28,10 @@ */ #include <linux/compat.h> -#include <drm/i915_drm.h> #include <drm/drm_ioctl.h> + #include "i915_drv.h" +#include "i915_ioc32.h" struct drm_i915_getparam32 { s32 param; @@ -67,7 +68,7 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = { }; /** - * i915_compat_ioctl - handle the mistakes of the past + * i915_ioc32_compat_ioctl - handle the mistakes of the past * @filp: the file pointer * @cmd: the ioctl command (and encoded flags) * @arg: the ioctl argument (from userspace) @@ -75,7 +76,7 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = { * Called whenever a 32-bit process running under a 64-bit kernel * performs an ioctl on /dev/dri/card<n>. */ -long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +long i915_ioc32_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { unsigned int nr = DRM_IOCTL_NR(cmd); drm_ioctl_compat_t *fn = NULL; diff --git a/drivers/gpu/drm/i915/i915_ioc32.h b/drivers/gpu/drm/i915/i915_ioc32.h new file mode 100644 index 000000000000..40dcd55ca213 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_ioc32.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2020 Intel Corporation + */ + +#ifndef __I915_IOC32_H__ +#define __I915_IOC32_H__ + +#ifdef CONFIG_COMPAT +struct file; +long i915_ioc32_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg); +#else +#define i915_ioc32_compat_ioctl NULL +#endif + +#endif /* __I915_IOC32_H__ */ diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index afc6aad9bf8c..8a2b83807ffc 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -34,7 +34,6 @@ #include <drm/drm_drv.h> #include <drm/drm_irq.h> -#include <drm/i915_drm.h> #include "display/intel_display_types.h" #include "display/intel_fifo_underrun.h" @@ -79,7 +78,7 @@ static const u32 hpd_ibx[HPD_NUM_PINS] = { [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, [HPD_PORT_B] = SDE_PORTB_HOTPLUG, [HPD_PORT_C] = SDE_PORTC_HOTPLUG, - [HPD_PORT_D] = SDE_PORTD_HOTPLUG + [HPD_PORT_D] = SDE_PORTD_HOTPLUG, }; static const u32 hpd_cpt[HPD_NUM_PINS] = { @@ -87,7 +86,7 @@ static const u32 hpd_cpt[HPD_NUM_PINS] = { [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, - [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT + [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, }; static const u32 hpd_spt[HPD_NUM_PINS] = { @@ -95,7 +94,7 @@ static const u32 hpd_spt[HPD_NUM_PINS] = { [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, - [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT + [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT, }; static const u32 hpd_mask_i915[HPD_NUM_PINS] = { @@ -104,7 +103,7 @@ static const u32 hpd_mask_i915[HPD_NUM_PINS] = { [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, - [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN + [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN, }; static const u32 hpd_status_g4x[HPD_NUM_PINS] = { @@ -113,7 +112,7 @@ static const u32 hpd_status_g4x[HPD_NUM_PINS] = { [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, - [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS + [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS, }; static const u32 hpd_status_i915[HPD_NUM_PINS] = { @@ -122,21 +121,21 @@ static const u32 hpd_status_i915[HPD_NUM_PINS] = { [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, - [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS + [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS, }; /* BXT hpd list */ static const u32 hpd_bxt[HPD_NUM_PINS] = { [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, - [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC + [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC, }; static const u32 hpd_gen11[HPD_NUM_PINS] = { [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG, [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG, [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG, - [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG + [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG, }; static const u32 hpd_gen12[HPD_NUM_PINS] = { @@ -145,7 +144,7 @@ static const u32 hpd_gen12[HPD_NUM_PINS] = { [HPD_PORT_F] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG, [HPD_PORT_G] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG, [HPD_PORT_H] = GEN12_TC5_HOTPLUG | GEN12_TBT5_HOTPLUG, - [HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG + [HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG, }; static const u32 hpd_icp[HPD_NUM_PINS] = { @@ -169,6 +168,14 @@ static const u32 hpd_tgp[HPD_NUM_PINS] = { [HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6), }; +static void +intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) +{ + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + + drm_crtc_handle_vblank(&crtc->base); +} + void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr, i915_reg_t iir, i915_reg_t ier) { @@ -208,8 +215,9 @@ static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) if (val == 0) return; - WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", - i915_mmio_reg_offset(reg), val); + drm_WARN(&uncore->i915->drm, 1, + "Interrupt register 0x%x is not zero: 0x%08x\n", + i915_mmio_reg_offset(reg), val); intel_uncore_write(uncore, reg, 0xffffffff); intel_uncore_posting_read(uncore, reg); intel_uncore_write(uncore, reg, 0xffffffff); @@ -223,8 +231,9 @@ static void gen2_assert_iir_is_zero(struct intel_uncore *uncore) if (val == 0) return; - WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", - i915_mmio_reg_offset(GEN2_IIR), val); + drm_WARN(&uncore->i915->drm, 1, + "Interrupt register 0x%x is not zero: 0x%08x\n", + i915_mmio_reg_offset(GEN2_IIR), val); intel_uncore_write16(uncore, GEN2_IIR, 0xffff); intel_uncore_posting_read16(uncore, GEN2_IIR); intel_uncore_write16(uncore, GEN2_IIR, 0xffff); @@ -262,7 +271,7 @@ i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, u32 val; lockdep_assert_held(&dev_priv->irq_lock); - WARN_ON(bits & ~mask); + drm_WARN_ON(&dev_priv->drm, bits & ~mask); val = I915_READ(PORT_HOTPLUG_EN); val &= ~mask; @@ -305,9 +314,9 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv, lockdep_assert_held(&dev_priv->irq_lock); - WARN_ON(enabled_irq_mask & ~interrupt_mask); + drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); - if (WARN_ON(!intel_irqs_enabled(dev_priv))) + if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) return; new_val = dev_priv->irq_mask; @@ -336,9 +345,9 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv, lockdep_assert_held(&dev_priv->irq_lock); - WARN_ON(enabled_irq_mask & ~interrupt_mask); + drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); - if (WARN_ON(!intel_irqs_enabled(dev_priv))) + if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) return; old_val = I915_READ(GEN8_DE_PORT_IMR); @@ -369,9 +378,9 @@ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, lockdep_assert_held(&dev_priv->irq_lock); - WARN_ON(enabled_irq_mask & ~interrupt_mask); + drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); - if (WARN_ON(!intel_irqs_enabled(dev_priv))) + if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) return; new_val = dev_priv->de_irq_mask[pipe]; @@ -399,11 +408,11 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, sdeimr &= ~interrupt_mask; sdeimr |= (~enabled_irq_mask & interrupt_mask); - WARN_ON(enabled_irq_mask & ~interrupt_mask); + drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); lockdep_assert_held(&dev_priv->irq_lock); - if (WARN_ON(!intel_irqs_enabled(dev_priv))) + if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) return; I915_WRITE(SDEIMR, sdeimr); @@ -425,13 +434,15 @@ u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, * On pipe A we don't support the PSR interrupt yet, * on pipe B and C the same bit MBZ. */ - if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) + if (drm_WARN_ON_ONCE(&dev_priv->drm, + status_mask & PIPE_A_PSR_STATUS_VLV)) return 0; /* * On pipe B and C we don't support the PSR interrupt yet, on pipe * A the same bit is for perf counters which we don't use either. */ - if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) + if (drm_WARN_ON_ONCE(&dev_priv->drm, + status_mask & PIPE_B_PSR_STATUS_VLV)) return 0; enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | @@ -443,10 +454,11 @@ u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; out: - WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || - status_mask & ~PIPESTAT_INT_STATUS_MASK, - "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", - pipe_name(pipe), enable_mask, status_mask); + drm_WARN_ONCE(&dev_priv->drm, + enable_mask & ~PIPESTAT_INT_ENABLE_MASK || + status_mask & ~PIPESTAT_INT_STATUS_MASK, + "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", + pipe_name(pipe), enable_mask, status_mask); return enable_mask; } @@ -457,12 +469,12 @@ void i915_enable_pipestat(struct drm_i915_private *dev_priv, i915_reg_t reg = PIPESTAT(pipe); u32 enable_mask; - WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, - "pipe %c: status_mask=0x%x\n", - pipe_name(pipe), status_mask); + drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, + "pipe %c: status_mask=0x%x\n", + pipe_name(pipe), status_mask); lockdep_assert_held(&dev_priv->irq_lock); - WARN_ON(!intel_irqs_enabled(dev_priv)); + drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) return; @@ -480,12 +492,12 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv, i915_reg_t reg = PIPESTAT(pipe); u32 enable_mask; - WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, - "pipe %c: status_mask=0x%x\n", - pipe_name(pipe), status_mask); + drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, + "pipe %c: status_mask=0x%x\n", + pipe_name(pipe), status_mask); lockdep_assert_held(&dev_priv->irq_lock); - WARN_ON(!intel_irqs_enabled(dev_priv)); + drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) return; @@ -624,9 +636,9 @@ u32 i915_get_vblank_counter(struct drm_crtc *crtc) * register. */ do { - high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; - low = I915_READ_FW(low_frame); - high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; + high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK; + low = intel_de_read_fw(dev_priv, low_frame); + high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK; } while (high1 != high2); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); @@ -683,15 +695,17 @@ static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc) * pipe frame time stamp. The time stamp value * is sampled at every start of vertical blank. */ - scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); + scan_prev_time = intel_de_read_fw(dev_priv, + PIPE_FRMTMSTMP(crtc->pipe)); /* * The TIMESTAMP_CTR register has the current * time stamp value. */ - scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR); + scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR); - scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); + scan_post_time = intel_de_read_fw(dev_priv, + PIPE_FRMTMSTMP(crtc->pipe)); } while (scan_post_time != scan_prev_time); scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time, @@ -702,7 +716,10 @@ static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc) return scanline; } -/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ +/* + * intel_de_read_fw(), only for fast reads of display block, no need for + * forcewake etc. + */ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; @@ -726,9 +743,9 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) vtotal /= 2; if (IS_GEN(dev_priv, 2)) - position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; + position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; else - position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; + position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; /* * On HSW, the DSL reg (0x70000) appears to return 0 if we @@ -747,7 +764,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) for (i = 0; i < 100; i++) { udelay(1); - temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; + temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; if (temp != position) { position = temp; break; @@ -762,13 +779,15 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) return (position + crtc->scanline_offset) % vtotal; } -bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index, - bool in_vblank_irq, int *vpos, int *hpos, - ktime_t *stime, ktime_t *etime, - const struct drm_display_mode *mode) +static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc, + bool in_vblank_irq, + int *vpos, int *hpos, + ktime_t *stime, ktime_t *etime, + const struct drm_display_mode *mode) { + struct drm_device *dev = _crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *crtc = to_intel_crtc(drm_crtc_from_index(dev, index)); + struct intel_crtc *crtc = to_intel_crtc(_crtc); enum pipe pipe = crtc->pipe; int position; int vbl_start, vbl_end, hsync_start, htotal, vtotal; @@ -777,9 +796,10 @@ bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index, IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) || mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER; - if (WARN_ON(!mode->crtc_clock)) { - DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " - "pipe %c\n", pipe_name(pipe)); + if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) { + drm_dbg(&dev_priv->drm, + "trying to get scanoutpos for disabled " + "pipe %c\n", pipe_name(pipe)); return false; } @@ -818,7 +838,7 @@ bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index, * We can split this into vertical and horizontal * scanout position. */ - position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; + position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; /* convert to pixel counts */ vbl_start *= htotal; @@ -879,6 +899,14 @@ bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index, return true; } +bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error, + ktime_t *vblank_time, bool in_vblank_irq) +{ + return drm_crtc_vblank_helper_get_vblank_timestamp_internal( + crtc, max_error, vblank_time, in_vblank_irq, + i915_get_crtc_scanoutpos); +} + int intel_get_crtc_scanline(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -918,7 +946,7 @@ static void ivb_parity_work(struct work_struct *work) mutex_lock(&dev_priv->drm.struct_mutex); /* If we've screwed up tracking, just let the interrupt fire again */ - if (WARN_ON(!dev_priv->l3_parity.which_slice)) + if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice)) goto out; misccpctl = I915_READ(GEN7_MISCCPCTL); @@ -929,7 +957,8 @@ static void ivb_parity_work(struct work_struct *work) i915_reg_t reg; slice--; - if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) + if (drm_WARN_ON_ONCE(&dev_priv->drm, + slice >= NUM_L3_SLICES(dev_priv))) break; dev_priv->l3_parity.which_slice &= ~(1<<slice); @@ -966,7 +995,7 @@ static void ivb_parity_work(struct work_struct *work) I915_WRITE(GEN7_MISCCPCTL, misccpctl); out: - WARN_ON(dev_priv->l3_parity.which_slice); + drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice); spin_lock_irq(>->irq_lock); gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv)); spin_unlock_irq(>->irq_lock); @@ -1165,8 +1194,9 @@ static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, *long_mask |= BIT(pin); } - DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n", - hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask); + drm_dbg(&dev_priv->drm, + "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n", + hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask); } @@ -1187,8 +1217,8 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, u32 crc2, u32 crc3, u32 crc4) { - struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; trace_intel_pipe_crc(crtc, crcs); @@ -1351,7 +1381,7 @@ static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, for_each_pipe(dev_priv, pipe) { if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) - drm_handle_vblank(&dev_priv->drm, pipe); + intel_handle_vblank(dev_priv, pipe); if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) i9xx_pipe_crc_irq_handler(dev_priv, pipe); @@ -1369,7 +1399,7 @@ static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, for_each_pipe(dev_priv, pipe) { if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) - drm_handle_vblank(&dev_priv->drm, pipe); + intel_handle_vblank(dev_priv, pipe); if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) blc_event = true; @@ -1393,7 +1423,7 @@ static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, for_each_pipe(dev_priv, pipe) { if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) - drm_handle_vblank(&dev_priv->drm, pipe); + intel_handle_vblank(dev_priv, pipe); if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) blc_event = true; @@ -1419,7 +1449,7 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, for_each_pipe(dev_priv, pipe) { if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) - drm_handle_vblank(&dev_priv->drm, pipe); + intel_handle_vblank(dev_priv, pipe); if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) i9xx_pipe_crc_irq_handler(dev_priv, pipe); @@ -1463,9 +1493,9 @@ static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); } - WARN_ONCE(1, - "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", - I915_READ(PORT_HOTPLUG_STAT)); + drm_WARN_ONCE(&dev_priv->drm, 1, + "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", + I915_READ(PORT_HOTPLUG_STAT)); return hotplug_status; } @@ -1603,7 +1633,6 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) u32 master_ctl, iir; u32 pipe_stats[I915_MAX_PIPES] = {}; u32 hotplug_status = 0; - u32 gt_iir[4]; u32 ier = 0; master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; @@ -1631,7 +1660,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) ier = I915_READ(VLV_IER); I915_WRITE(VLV_IER, 0); - gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir); + gen8_gt_irq_handler(&dev_priv->gt, master_ctl); if (iir & I915_DISPLAY_PORT_INTERRUPT) hotplug_status = i9xx_hpd_irq_ack(dev_priv); @@ -1655,8 +1684,6 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) I915_WRITE(VLV_IER, ier); I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); - gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir); - if (hotplug_status) i9xx_hpd_irq_handler(dev_priv, hotplug_status); @@ -1710,8 +1737,8 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) if (pch_iir & SDE_AUDIO_POWER_MASK) { int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> SDE_AUDIO_POWER_SHIFT); - DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", - port_name(port)); + drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n", + port_name(port)); } if (pch_iir & SDE_AUX_MASK) @@ -1721,25 +1748,27 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) gmbus_irq_handler(dev_priv); if (pch_iir & SDE_AUDIO_HDCP_MASK) - DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); + drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n"); if (pch_iir & SDE_AUDIO_TRANS_MASK) - DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); + drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n"); if (pch_iir & SDE_POISON) - DRM_ERROR("PCH poison interrupt\n"); + drm_err(&dev_priv->drm, "PCH poison interrupt\n"); - if (pch_iir & SDE_FDI_MASK) + if (pch_iir & SDE_FDI_MASK) { for_each_pipe(dev_priv, pipe) - DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", - pipe_name(pipe), - I915_READ(FDI_RX_IIR(pipe))); + drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", + pipe_name(pipe), + I915_READ(FDI_RX_IIR(pipe))); + } if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) - DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); + drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n"); if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) - DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); + drm_dbg(&dev_priv->drm, + "PCH transcoder CRC error interrupt\n"); if (pch_iir & SDE_TRANSA_FIFO_UNDER) intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); @@ -1754,7 +1783,7 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv) enum pipe pipe; if (err_int & ERR_INT_POISON) - DRM_ERROR("Poison interrupt\n"); + drm_err(&dev_priv->drm, "Poison interrupt\n"); for_each_pipe(dev_priv, pipe) { if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) @@ -1777,7 +1806,7 @@ static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) enum pipe pipe; if (serr_int & SERR_INT_POISON) - DRM_ERROR("PCH poison interrupt\n"); + drm_err(&dev_priv->drm, "PCH poison interrupt\n"); for_each_pipe(dev_priv, pipe) if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) @@ -1796,8 +1825,8 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> SDE_AUDIO_POWER_SHIFT_CPT); - DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", - port_name(port)); + drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n", + port_name(port)); } if (pch_iir & SDE_AUX_MASK_CPT) @@ -1807,16 +1836,17 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) gmbus_irq_handler(dev_priv); if (pch_iir & SDE_AUDIO_CP_REQ_CPT) - DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); + drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n"); if (pch_iir & SDE_AUDIO_CP_CHG_CPT) - DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); + drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n"); - if (pch_iir & SDE_FDI_MASK_CPT) + if (pch_iir & SDE_FDI_MASK_CPT) { for_each_pipe(dev_priv, pipe) - DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", - pipe_name(pipe), - I915_READ(FDI_RX_IIR(pipe))); + drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", + pipe_name(pipe), + I915_READ(FDI_RX_IIR(pipe))); + } if (pch_iir & SDE_ERROR_CPT) cpt_serr_int_handler(dev_priv); @@ -1844,8 +1874,9 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect; pins = hpd_icp; } else { - WARN(!HAS_PCH_ICP(dev_priv), - "Unrecognized PCH type 0x%x\n", INTEL_PCH_TYPE(dev_priv)); + drm_WARN(&dev_priv->drm, !HAS_PCH_ICP(dev_priv), + "Unrecognized PCH type 0x%x\n", + INTEL_PCH_TYPE(dev_priv)); ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP; @@ -1952,11 +1983,11 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, intel_opregion_asle_intr(dev_priv); if (de_iir & DE_POISON) - DRM_ERROR("Poison interrupt\n"); + drm_err(&dev_priv->drm, "Poison interrupt\n"); for_each_pipe(dev_priv, pipe) { if (de_iir & DE_PIPE_VBLANK(pipe)) - drm_handle_vblank(&dev_priv->drm, pipe); + intel_handle_vblank(dev_priv, pipe); if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); @@ -2009,7 +2040,7 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, for_each_pipe(dev_priv, pipe) { if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) - drm_handle_vblank(&dev_priv->drm, pipe); + intel_handle_vblank(dev_priv, pipe); } /* check event from PCH */ @@ -2153,7 +2184,8 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) if (pin_mask) intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); else - DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir); + drm_err(&dev_priv->drm, + "Unexpected DE HPD interrupt 0x%08x\n", iir); } static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) @@ -2226,7 +2258,7 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) } if (!found) - DRM_ERROR("Unexpected DE Misc interrupt\n"); + drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n"); } static irqreturn_t @@ -2243,7 +2275,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) ret = IRQ_HANDLED; gen8_de_misc_irq_handler(dev_priv, iir); } else { - DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); + drm_err(&dev_priv->drm, + "The master control interrupt lied (DE MISC)!\n"); } } @@ -2254,7 +2287,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) ret = IRQ_HANDLED; gen11_hpd_irq_handler(dev_priv, iir); } else { - DRM_ERROR("The master control interrupt lied, (DE HPD)!\n"); + drm_err(&dev_priv->drm, + "The master control interrupt lied, (DE HPD)!\n"); } } @@ -2294,10 +2328,12 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) } if (!found) - DRM_ERROR("Unexpected DE Port interrupt\n"); + drm_err(&dev_priv->drm, + "Unexpected DE Port interrupt\n"); } else - DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); + drm_err(&dev_priv->drm, + "The master control interrupt lied (DE PORT)!\n"); } for_each_pipe(dev_priv, pipe) { @@ -2308,7 +2344,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); if (!iir) { - DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); + drm_err(&dev_priv->drm, + "The master control interrupt lied (DE PIPE)!\n"); continue; } @@ -2316,7 +2353,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); if (iir & GEN8_PIPE_VBLANK) - drm_handle_vblank(&dev_priv->drm, pipe); + intel_handle_vblank(dev_priv, pipe); if (iir & GEN8_PIPE_CDCLK_CRC_DONE) hsw_pipe_crc_irq_handler(dev_priv, pipe); @@ -2326,9 +2363,10 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv); if (fault_errors) - DRM_ERROR("Fault errors on pipe %c: 0x%08x\n", - pipe_name(pipe), - fault_errors); + drm_err(&dev_priv->drm, + "Fault errors on pipe %c: 0x%08x\n", + pipe_name(pipe), + fault_errors); } if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && @@ -2354,7 +2392,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) * Like on previous PCH there seems to be something * fishy going on with forwarding PCH interrupts. */ - DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); + drm_dbg(&dev_priv->drm, + "The master control interrupt lied (SDE)!\n"); } } @@ -2384,7 +2423,6 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) struct drm_i915_private *dev_priv = arg; void __iomem * const regs = dev_priv->uncore.regs; u32 master_ctl; - u32 gt_iir[4]; if (!intel_irqs_enabled(dev_priv)) return IRQ_NONE; @@ -2395,8 +2433,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) return IRQ_NONE; } - /* Find, clear, then process each source of interrupt */ - gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir); + /* Find, queue (onto bottom-halves), then clear each source */ + gen8_gt_irq_handler(&dev_priv->gt, master_ctl); /* IRQs are synced during runtime_suspend, we don't require a wakeref */ if (master_ctl & ~GEN8_GT_IRQS) { @@ -2407,8 +2445,6 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) gen8_master_intr_enable(regs); - gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir); - return IRQ_HANDLED; } @@ -2491,7 +2527,7 @@ __gen11_irq_handler(struct drm_i915_private * const i915, return IRQ_NONE; } - /* Find, clear, then process each source of interrupt. */ + /* Find, queue (onto bottom-halves), then clear each source */ gen11_gt_irq_handler(gt, master_ctl); /* IRQs are synced during runtime_suspend, we don't require a wakeref */ @@ -2686,7 +2722,7 @@ static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv) if (HAS_PCH_NOP(dev_priv)) return; - WARN_ON(I915_READ(SDEIER) != 0); + drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0); I915_WRITE(SDEIER, 0xffffffff); POSTING_READ(SDEIER); } @@ -2733,7 +2769,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | I915_LPE_PIPE_C_INTERRUPT; - WARN_ON(dev_priv->irq_mask != ~0u); + drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u); dev_priv->irq_mask = ~enable_mask; @@ -3163,8 +3199,9 @@ static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv, PORTB_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE; - DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", - hotplug, enabled_irqs); + drm_dbg_kms(&dev_priv->drm, + "Invert bit setting: hp_ctl:%x hp_port:%x\n", + hotplug, enabled_irqs); hotplug &= ~BXT_DDI_HPD_INVERT_MASK; /* @@ -3321,9 +3358,10 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; - u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; + u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) | + GEN8_PIPE_CDCLK_CRC_DONE; u32 de_pipe_enables; - u32 de_port_masked = GEN8_AUX_CHANNEL_A; + u32 de_port_masked = gen8_de_port_aux_mask(dev_priv); u32 de_port_enables; u32 de_misc_masked = GEN8_DE_EDP_PSR; enum pipe pipe; @@ -3331,21 +3369,8 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) if (INTEL_GEN(dev_priv) <= 10) de_misc_masked |= GEN8_DE_MISC_GSE; - if (INTEL_GEN(dev_priv) >= 9) { - de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; - de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | - GEN9_AUX_CHANNEL_D; - if (IS_GEN9_LP(dev_priv)) - de_port_masked |= BXT_DE_PORT_GMBUS; - } else { - de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; - } - - if (INTEL_GEN(dev_priv) >= 11) - de_port_masked |= ICL_AUX_CHANNEL_E; - - if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11) - de_port_masked |= CNL_AUX_CHANNEL_F; + if (IS_GEN9_LP(dev_priv)) + de_port_masked |= BXT_DE_PORT_GMBUS; de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; @@ -3418,7 +3443,7 @@ static void icp_irq_postinstall(struct drm_i915_private *dev_priv) { u32 mask = SDE_GMBUS_ICP; - WARN_ON(I915_READ(SDEIER) != 0); + drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0); I915_WRITE(SDEIER, 0xffffffff); POSTING_READ(SDEIER); @@ -3547,7 +3572,8 @@ static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, DRM_DEBUG("Master Error: EIR 0x%04x\n", eir); if (eir_stuck) - DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck); + drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n", + eir_stuck); } static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, @@ -3584,7 +3610,8 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, DRM_DEBUG("Master Error, EIR 0x%08x\n", eir); if (eir_stuck) - DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck); + drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n", + eir_stuck); } static irqreturn_t i8xx_irq_handler(int irq, void *arg) diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h index 812c47a9c2d6..25f25cd95818 100644 --- a/drivers/gpu/drm/i915/i915_irq.h +++ b/drivers/gpu/drm/i915/i915_irq.h @@ -101,10 +101,8 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, u8 pipe_mask); -bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, - bool in_vblank_irq, int *vpos, int *hpos, - ktime_t *stime, ktime_t *etime, - const struct drm_display_mode *mode); +bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error, + ktime_t *vblank_time, bool in_vblank_irq); u32 i915_get_vblank_counter(struct drm_crtc *crtc); u32 g4x_get_vblank_counter(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c index fdd550405fd3..7b3b83bd5ab8 100644 --- a/drivers/gpu/drm/i915/i915_memcpy.c +++ b/drivers/gpu/drm/i915/i915_memcpy.c @@ -35,7 +35,6 @@ static DEFINE_STATIC_KEY_FALSE(has_movntdqa); -#ifdef CONFIG_AS_MOVNTDQA static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len) { kernel_fpu_begin(); @@ -93,10 +92,6 @@ static void __memcpy_ntdqu(void *dst, const void *src, unsigned long len) kernel_fpu_end(); } -#else -static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len) {} -static void __memcpy_ntdqu(void *dst, const void *src, unsigned long len) {} -#endif /** * i915_memcpy_from_wc: perform an accelerated *aligned* read from WC diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 1dd1f3652795..add00ec1f787 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -35,7 +35,7 @@ MODULE_PARM_DESC(name, desc) struct i915_params i915_modparams __read_mostly = { -#define MEMBER(T, member, value) .member = (value), +#define MEMBER(T, member, value, ...) .member = (value), I915_PARAMS_FOR_EACH(MEMBER) #undef MEMBER }; @@ -92,9 +92,6 @@ i915_param_named_unsafe(force_probe, charp, 0400, "Force probe the driver for specified devices. " "See CONFIG_DRM_I915_FORCE_PROBE for details."); -i915_param_named_unsafe(alpha_support, bool, 0400, - "Deprecated. See i915.force_probe."); - i915_param_named_unsafe(disable_power_well, int, 0400, "Disable display power wells when possible " "(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)"); @@ -106,10 +103,6 @@ i915_param_named(fastboot, int, 0600, "(0=disabled, 1=enabled) " "Default: -1 (use per-chip default)"); -i915_param_named_unsafe(prefault_disable, bool, 0600, - "Disable page prefaulting for pread/pwrite/reloc (default:false). " - "For developers only."); - i915_param_named_unsafe(load_detect_test, bool, 0600, "Force-enable the VGA load detect code for testing (default:false). " "For developers only."); @@ -172,7 +165,7 @@ i915_param_named_unsafe(inject_probe_failure, uint, 0400, i915_param_named(enable_dpcd_backlight, int, 0600, "Enable support for DPCD backlight control" - "(-1=use per-VBT LFP backlight type setting, 0=disabled [default], 1=enabled)"); + "(-1=use per-VBT LFP backlight type setting [default], 0=disabled, 1=enabled)"); #if IS_ENABLED(CONFIG_DRM_I915_GVT) i915_param_named(enable_gvt, bool, 0400, diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index 31b88f297fbc..45323732f099 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -36,49 +36,49 @@ struct drm_printer; /* * Invoke param, a function-like macro, for each i915 param, with arguments: * - * param(type, name, value) + * param(type, name, value, mode) * - * type: parameter type, one of {bool, int, unsigned int, char *} + * type: parameter type, one of {bool, int, unsigned int, unsigned long, char *} * name: name of the parameter * value: initial/default value of the parameter + * mode: debugfs file permissions, one of {0400, 0600, 0}, use 0 to not create + * debugfs file */ #define I915_PARAMS_FOR_EACH(param) \ - param(char *, vbt_firmware, NULL) \ - param(int, modeset, -1) \ - param(int, lvds_channel_mode, 0) \ - param(int, panel_use_ssc, -1) \ - param(int, vbt_sdvo_panel_type, -1) \ - param(int, enable_dc, -1) \ - param(int, enable_fbc, -1) \ - param(int, enable_psr, -1) \ - param(int, disable_power_well, -1) \ - param(int, enable_ips, 1) \ - param(int, invert_brightness, 0) \ - param(int, enable_guc, 0) \ - param(int, guc_log_level, -1) \ - param(char *, guc_firmware_path, NULL) \ - param(char *, huc_firmware_path, NULL) \ - param(char *, dmc_firmware_path, NULL) \ - param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO)) \ - param(int, edp_vswing, 0) \ - param(int, reset, 3) \ - param(unsigned int, inject_probe_failure, 0) \ - param(int, fastboot, -1) \ - param(int, enable_dpcd_backlight, 0) \ - param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE) \ - param(unsigned long, fake_lmem_start, 0) \ + param(char *, vbt_firmware, NULL, 0400) \ + param(int, modeset, -1, 0400) \ + param(int, lvds_channel_mode, 0, 0400) \ + param(int, panel_use_ssc, -1, 0600) \ + param(int, vbt_sdvo_panel_type, -1, 0400) \ + param(int, enable_dc, -1, 0400) \ + param(int, enable_fbc, -1, 0600) \ + param(int, enable_psr, -1, 0600) \ + param(int, disable_power_well, -1, 0400) \ + param(int, enable_ips, 1, 0600) \ + param(int, invert_brightness, 0, 0600) \ + param(int, enable_guc, 0, 0400) \ + param(int, guc_log_level, -1, 0400) \ + param(char *, guc_firmware_path, NULL, 0400) \ + param(char *, huc_firmware_path, NULL, 0400) \ + param(char *, dmc_firmware_path, NULL, 0400) \ + param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO), 0600) \ + param(int, edp_vswing, 0, 0400) \ + param(unsigned int, reset, 3, 0600) \ + param(unsigned int, inject_probe_failure, 0, 0600) \ + param(int, fastboot, -1, 0600) \ + param(int, enable_dpcd_backlight, -1, 0600) \ + param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE, 0400) \ + param(unsigned long, fake_lmem_start, 0, 0400) \ /* leave bools at the end to not create holes */ \ - param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \ - param(bool, enable_hangcheck, true) \ - param(bool, prefault_disable, false) \ - param(bool, load_detect_test, false) \ - param(bool, force_reset_modeset_test, false) \ - param(bool, error_capture, true) \ - param(bool, disable_display, false) \ - param(bool, verbose_state_checks, true) \ - param(bool, nuclear_pageflip, false) \ - param(bool, enable_dp_mst, true) \ - param(bool, enable_gvt, false) + param(bool, enable_hangcheck, true, 0600) \ + param(bool, load_detect_test, false, 0600) \ + param(bool, force_reset_modeset_test, false, 0600) \ + param(bool, error_capture, true, 0600) \ + param(bool, disable_display, false, 0400) \ + param(bool, verbose_state_checks, true, 0) \ + param(bool, nuclear_pageflip, false, 0400) \ + param(bool, enable_dp_mst, true, 0600) \ + param(bool, enable_gvt, false, 0400) #define MEMBER(T, member, ...) T member; struct i915_params { diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index f631f6d21127..2c80a0194c80 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -26,6 +26,7 @@ #include <linux/vga_switcheroo.h> #include <drm/drm_drv.h> +#include <drm/i915_pciids.h> #include "display/intel_fbdev.h" @@ -615,7 +616,8 @@ static const struct intel_device_info chv_info = { .has_gt_uc = 1, \ .display.has_hdcp = 1, \ .display.has_ipc = 1, \ - .ddb_size = 896 + .ddb_size = 896, \ + .num_supported_dbuf_slices = 1 #define SKL_PLATFORM \ GEN9_FEATURES, \ @@ -650,6 +652,7 @@ static const struct intel_device_info skl_gt4_info = { #define GEN9_LP_FEATURES \ GEN(9), \ .is_lp = 1, \ + .num_supported_dbuf_slices = 1, \ .display.has_hotplug = 1, \ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ @@ -774,6 +777,7 @@ static const struct intel_device_info cnl_info = { }, \ GEN(11), \ .ddb_size = 2048, \ + .num_supported_dbuf_slices = 2, \ .has_logical_ring_elsq = 1, \ .color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 } @@ -819,11 +823,9 @@ static const struct intel_device_info tgl_info = { GEN12_FEATURES, PLATFORM(INTEL_TIGERLAKE), .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), - .require_force_probe = 1, .display.has_modular_fia = 1, .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), - .has_rps = false, /* XXX disabled for debugging */ }; #define GEN12_DGFX_FEATURES \ @@ -928,13 +930,6 @@ static bool force_probe(u16 device_id, const char *devices) char *s, *p, *tok; bool ret; - /* FIXME: transitional */ - if (i915_modparams.alpha_support) { - DRM_INFO("i915.alpha_support is deprecated, use i915.force_probe=%04x instead\n", - device_id); - return true; - } - if (!devices || !*devices) return false; @@ -968,7 +963,8 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (intel_info->require_force_probe && !force_probe(pdev->device, i915_modparams.force_probe)) { - DRM_INFO("Your graphics device %04x is not properly supported by the driver in this\n" + dev_info(&pdev->dev, + "Your graphics device %04x is not properly supported by the driver in this\n" "kernel version. To force driver probe anyway, use i915.force_probe=%04x\n" "module parameter or CONFIG_DRM_I915_FORCE_PROBE=%04x configuration option,\n" "or (recommended) check for kernel updates.\n", diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 3b6b913bd27a..cf2c01f17da8 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -555,8 +555,9 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream) aging_tail = hw_tail; stream->oa_buffer.aging_timestamp = now; } else { - DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %x\n", - hw_tail); + drm_err(&stream->perf->i915->drm, + "Ignoring spurious out of range OA buffer tail pointer = %x\n", + hw_tail); } } @@ -686,7 +687,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, u32 taken; int ret = 0; - if (WARN_ON(!stream->enabled)) + if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled)) return -EIO; spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); @@ -718,10 +719,11 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, * only be incremented by multiples of the report size (notably also * all a power of two). */ - if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size || - tail > OA_BUFFER_SIZE || tail % report_size, - "Inconsistent OA buffer pointers: head = %u, tail = %u\n", - head, tail)) + if (drm_WARN_ONCE(&uncore->i915->drm, + head > OA_BUFFER_SIZE || head % report_size || + tail > OA_BUFFER_SIZE || tail % report_size, + "Inconsistent OA buffer pointers: head = %u, tail = %u\n", + head, tail)) return -EIO; @@ -742,8 +744,10 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, * here would imply a driver bug that would result * in an overrun. */ - if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) { - DRM_ERROR("Spurious OA head ptr: non-integral report offset\n"); + if (drm_WARN_ON(&uncore->i915->drm, + (OA_BUFFER_SIZE - head) < report_size)) { + drm_err(&uncore->i915->drm, + "Spurious OA head ptr: non-integral report offset\n"); break; } @@ -896,7 +900,7 @@ static int gen8_oa_read(struct i915_perf_stream *stream, i915_reg_t oastatus_reg; int ret; - if (WARN_ON(!stream->oa_buffer.vaddr)) + if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr)) return -EIO; oastatus_reg = IS_GEN(stream->perf->i915, 12) ? @@ -986,7 +990,7 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream, u32 taken; int ret = 0; - if (WARN_ON(!stream->enabled)) + if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled)) return -EIO; spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); @@ -1015,10 +1019,11 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream, * only be incremented by multiples of the report size (notably also * all a power of two). */ - if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size || - tail > OA_BUFFER_SIZE || tail % report_size, - "Inconsistent OA buffer pointers: head = %u, tail = %u\n", - head, tail)) + if (drm_WARN_ONCE(&uncore->i915->drm, + head > OA_BUFFER_SIZE || head % report_size || + tail > OA_BUFFER_SIZE || tail % report_size, + "Inconsistent OA buffer pointers: head = %u, tail = %u\n", + head, tail)) return -EIO; @@ -1036,8 +1041,10 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream, * here would imply a driver bug that would result * in an overrun. */ - if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) { - DRM_ERROR("Spurious OA head ptr: non-integral report offset\n"); + if (drm_WARN_ON(&uncore->i915->drm, + (OA_BUFFER_SIZE - head) < report_size)) { + drm_err(&uncore->i915->drm, + "Spurious OA head ptr: non-integral report offset\n"); break; } @@ -1110,7 +1117,7 @@ static int gen7_oa_read(struct i915_perf_stream *stream, u32 oastatus1; int ret; - if (WARN_ON(!stream->oa_buffer.vaddr)) + if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr)) return -EIO; oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1); @@ -1303,8 +1310,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) * dropped by GuC. They won't be part of the context * ID in the OA reports, so squash those lower bits. */ - stream->specific_ctx_id = - lower_32_bits(ce->lrc_desc) >> 12; + stream->specific_ctx_id = ce->lrc.lrca >> 12; /* * GuC uses the top bit to signal proxy submission, so @@ -1319,7 +1325,12 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) case 12: { stream->specific_ctx_id_mask = ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32); - stream->specific_ctx_id = stream->specific_ctx_id_mask; + /* + * Pick an unused context id + * 0 - BITS_PER_LONG are used by other contexts + * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context + */ + stream->specific_ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32); break; } @@ -1327,11 +1338,12 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) MISSING_CASE(INTEL_GEN(ce->engine->i915)); } - ce->tag = stream->specific_ctx_id_mask; + ce->tag = stream->specific_ctx_id; - DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n", - stream->specific_ctx_id, - stream->specific_ctx_id_mask); + drm_dbg(&stream->perf->i915->drm, + "filtering on ctx_id=0x%x ctx_id_mask=0x%x\n", + stream->specific_ctx_id, + stream->specific_ctx_id_mask); return 0; } @@ -1391,8 +1403,10 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream) /* * Unset exclusive_stream first, it will be checked while disabling * the metric set on gen8+. + * + * See i915_oa_init_reg_state() and lrc_configure_all_contexts() */ - perf->exclusive_stream = NULL; + WRITE_ONCE(perf->exclusive_stream, NULL); perf->ops.disable_metric_set(stream); free_oa_buffer(stream); @@ -1575,11 +1589,12 @@ static void gen12_init_oa_buffer(struct i915_perf_stream *stream) static int alloc_oa_buffer(struct i915_perf_stream *stream) { + struct drm_i915_private *i915 = stream->perf->i915; struct drm_i915_gem_object *bo; struct i915_vma *vma; int ret; - if (WARN_ON(stream->oa_buffer.vma)) + if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma)) return -ENODEV; BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE); @@ -1587,7 +1602,7 @@ static int alloc_oa_buffer(struct i915_perf_stream *stream) bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE); if (IS_ERR(bo)) { - DRM_ERROR("Failed to allocate OA buffer\n"); + drm_err(&i915->drm, "Failed to allocate OA buffer\n"); return PTR_ERR(bo); } @@ -1669,7 +1684,8 @@ static int alloc_noa_wait(struct i915_perf_stream *stream) bo = i915_gem_object_create_internal(i915, 4096); if (IS_ERR(bo)) { - DRM_ERROR("Failed to allocate NOA wait batchbuffer\n"); + drm_err(&i915->drm, + "Failed to allocate NOA wait batchbuffer\n"); return PTR_ERR(bo); } @@ -2184,7 +2200,9 @@ static int gen8_modify_self(struct intel_context *ce, struct i915_request *rq; int err; + intel_engine_pm_get(ce->engine); rq = i915_request_create(ce); + intel_engine_pm_put(ce->engine); if (IS_ERR(rq)) return PTR_ERR(rq); @@ -2653,7 +2671,8 @@ static void gen7_oa_disable(struct i915_perf_stream *stream) if (intel_wait_for_register(uncore, GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0, 50)) - DRM_ERROR("wait for OA to be disabled timed out\n"); + drm_err(&stream->perf->i915->drm, + "wait for OA to be disabled timed out\n"); } static void gen8_oa_disable(struct i915_perf_stream *stream) @@ -2664,7 +2683,8 @@ static void gen8_oa_disable(struct i915_perf_stream *stream) if (intel_wait_for_register(uncore, GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0, 50)) - DRM_ERROR("wait for OA to be disabled timed out\n"); + drm_err(&stream->perf->i915->drm, + "wait for OA to be disabled timed out\n"); } static void gen12_oa_disable(struct i915_perf_stream *stream) @@ -2676,7 +2696,16 @@ static void gen12_oa_disable(struct i915_perf_stream *stream) GEN12_OAG_OACONTROL, GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0, 50)) - DRM_ERROR("wait for OA to be disabled timed out\n"); + drm_err(&stream->perf->i915->drm, + "wait for OA to be disabled timed out\n"); + + intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1); + if (intel_wait_for_register(uncore, + GEN12_OA_TLB_INV_CR, + 1, 0, + 50)) + drm_err(&stream->perf->i915->drm, + "wait for OA tlb invalidate timed out\n"); } /** @@ -2740,6 +2769,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, struct drm_i915_perf_open_param *param, struct perf_open_properties *props) { + struct drm_i915_private *i915 = stream->perf->i915; struct i915_perf *perf = stream->perf; int format_size; int ret; @@ -2796,7 +2826,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, stream->sample_size += format_size; stream->oa_buffer.format_size = format_size; - if (WARN_ON(stream->oa_buffer.format_size == 0)) + if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format_size == 0)) return -EINVAL; stream->hold_preemption = props->hold_preemption; @@ -2849,7 +2879,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, goto err_oa_buf_alloc; stream->ops = &i915_oa_stream_ops; - perf->exclusive_stream = stream; + WRITE_ONCE(perf->exclusive_stream, stream); ret = i915_perf_stream_enable_sync(stream); if (ret) { @@ -2869,7 +2899,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, return 0; err_enable: - perf->exclusive_stream = NULL; + WRITE_ONCE(perf->exclusive_stream, NULL); perf->ops.disable_metric_set(stream); free_oa_buffer(stream); @@ -2895,12 +2925,11 @@ void i915_oa_init_reg_state(const struct intel_context *ce, { struct i915_perf_stream *stream; - /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */ - if (engine->class != RENDER_CLASS) return; - stream = engine->i915->perf.exclusive_stream; + /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */ + stream = READ_ONCE(engine->i915->perf.exclusive_stream); /* * For gen12, only CTX_R_PWR_CLK_STATE needs update, but the caller * is already doing that, so nothing to be done for gen12 here. @@ -2910,49 +2939,6 @@ void i915_oa_init_reg_state(const struct intel_context *ce, } /** - * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation - * @stream: An i915 perf stream - * @file: An i915 perf stream file - * @buf: destination buffer given by userspace - * @count: the number of bytes userspace wants to read - * @ppos: (inout) file seek position (unused) - * - * Besides wrapping &i915_perf_stream_ops->read this provides a common place to - * ensure that if we've successfully copied any data then reporting that takes - * precedence over any internal error status, so the data isn't lost. - * - * For example ret will be -ENOSPC whenever there is more buffered data than - * can be copied to userspace, but that's only interesting if we weren't able - * to copy some data because it implies the userspace buffer is too small to - * receive a single record (and we never split records). - * - * Another case with ret == -EFAULT is more of a grey area since it would seem - * like bad form for userspace to ask us to overrun its buffer, but the user - * knows best: - * - * http://yarchive.net/comp/linux/partial_reads_writes.html - * - * Returns: The number of bytes copied or a negative error code on failure. - */ -static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream, - struct file *file, - char __user *buf, - size_t count, - loff_t *ppos) -{ - /* Note we keep the offset (aka bytes read) separate from any - * error status so that the final check for whether we return - * the bytes read with a higher precedence than any error (see - * comment below) doesn't need to be handled/duplicated in - * stream->ops->read() implementations. - */ - size_t offset = 0; - int ret = stream->ops->read(stream, buf, count, &offset); - - return offset ?: (ret ?: -EAGAIN); -} - -/** * i915_perf_read - handles read() FOP for i915 perf stream FDs * @file: An i915 perf stream file * @buf: destination buffer given by userspace @@ -2977,7 +2963,8 @@ static ssize_t i915_perf_read(struct file *file, { struct i915_perf_stream *stream = file->private_data; struct i915_perf *perf = stream->perf; - ssize_t ret; + size_t offset = 0; + int ret; /* To ensure it's handled consistently we simply treat all reads of a * disabled stream as an error. In particular it might otherwise lead @@ -3000,13 +2987,12 @@ static ssize_t i915_perf_read(struct file *file, return ret; mutex_lock(&perf->lock); - ret = i915_perf_read_locked(stream, file, - buf, count, ppos); + ret = stream->ops->read(stream, buf, count, &offset); mutex_unlock(&perf->lock); - } while (ret == -EAGAIN); + } while (!offset && !ret); } else { mutex_lock(&perf->lock); - ret = i915_perf_read_locked(stream, file, buf, count, ppos); + ret = stream->ops->read(stream, buf, count, &offset); mutex_unlock(&perf->lock); } @@ -3017,15 +3003,15 @@ static ssize_t i915_perf_read(struct file *file, * and read() returning -EAGAIN. Clearing the oa.pollin state here * effectively ensures we back off until the next hrtimer callback * before reporting another EPOLLIN event. + * The exception to this is if ops->read() returned -ENOSPC which means + * that more OA data is available than could fit in the user provided + * buffer. In this case we want the next poll() call to not block. */ - if (ret >= 0 || ret == -EAGAIN) { - /* Maybe make ->pollin per-stream state if we support multiple - * concurrent streams in the future. - */ + if (ret != -ENOSPC) stream->pollin = false; - } - return ret; + /* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */ + return offset ?: (ret ?: -EAGAIN); } static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer) diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index aa729d04abe2..2c062534eac1 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -448,7 +448,7 @@ static void engine_event_destroy(struct perf_event *event) engine = intel_engine_lookup_user(i915, engine_event_class(event), engine_event_instance(event)); - if (WARN_ON_ONCE(!engine)) + if (drm_WARN_ON_ONCE(&i915->drm, !engine)) return; if (engine_event_sample(event) == I915_SAMPLE_BUSY && @@ -584,7 +584,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event) engine_event_class(event), engine_event_instance(event)); - if (WARN_ON_ONCE(!engine)) { + if (drm_WARN_ON_ONCE(&i915->drm, !engine)) { /* Do nothing */ } else if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine)) { @@ -1188,7 +1188,7 @@ void i915_pmu_unregister(struct drm_i915_private *i915) if (!pmu->base.event_init) return; - WARN_ON(pmu->enable); + drm_WARN_ON(&i915->drm, pmu->enable); hrtimer_cancel(&pmu->timer); diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h index f1d6cad0d7d5..941f0c14037c 100644 --- a/drivers/gpu/drm/i915/i915_pmu.h +++ b/drivers/gpu/drm/i915/i915_pmu.h @@ -10,7 +10,7 @@ #include <linux/hrtimer.h> #include <linux/perf_event.h> #include <linux/spinlock_types.h> -#include <drm/i915_drm.h> +#include <uapi/drm/i915_drm.h> struct drm_i915_private; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3575fd30756b..6e12000c4b6b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -34,8 +34,8 @@ * Follow the style described here for new macros, and while changing existing * macros. Do **not** mass change existing definitions just to update the style. * - * Layout - * ~~~~~~ + * File Layout + * ~~~~~~~~~~~ * * Keep helper macros near the top. For example, _PIPE() and friends. * @@ -693,6 +693,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define OABUFFER_SIZE_8M (6 << 3) #define OABUFFER_SIZE_16M (7 << 3) +#define GEN12_OA_TLB_INV_CR _MMIO(0xceec) + /* Gen12 OAR unit */ #define GEN12_OAR_OACONTROL _MMIO(0x2960) #define GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT 1 @@ -2626,6 +2628,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define IPEIR_I965 _MMIO(0x2064) #define IPEHR_I965 _MMIO(0x2068) #define GEN7_SC_INSTDONE _MMIO(0x7100) +#define GEN12_SC_INSTDONE_EXTRA _MMIO(0x7104) +#define GEN12_SC_INSTDONE_EXTRA2 _MMIO(0x7108) #define GEN7_SAMPLER_INSTDONE _MMIO(0xe160) #define GEN7_ROW_INSTDONE _MMIO(0xe164) #define GEN8_MCR_SELECTOR _MMIO(0xfdc) @@ -2639,6 +2643,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN11_MCR_SUBSLICE_MASK GEN11_MCR_SUBSLICE(0x7) #define RING_IPEIR(base) _MMIO((base) + 0x64) #define RING_IPEHR(base) _MMIO((base) + 0x68) +#define RING_EIR(base) _MMIO((base) + 0xb0) +#define RING_EMR(base) _MMIO((base) + 0xb4) +#define RING_ESR(base) _MMIO((base) + 0xb8) /* * On GEN4, only the render ring INSTDONE exists and has a different * layout than the GEN7+ version. @@ -2860,6 +2867,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define MI_ARB_STATE _MMIO(0x20e4) /* 915+ only */ #define MBUS_ABOX_CTL _MMIO(0x45038) +#define MBUS_ABOX1_CTL _MMIO(0x45048) +#define MBUS_ABOX2_CTL _MMIO(0x4504C) #define MBUS_ABOX_BW_CREDIT_MASK (3 << 20) #define MBUS_ABOX_BW_CREDIT(x) ((x) << 20) #define MBUS_ABOX_B_CREDIT_MASK (0xF << 16) @@ -3085,10 +3094,11 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GT_BSD_CS_ERROR_INTERRUPT (1 << 15) #define GT_BSD_USER_INTERRUPT (1 << 12) #define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */ +#define GT_WAIT_SEMAPHORE_INTERRUPT REG_BIT(11) /* bdw+ */ #define GT_CONTEXT_SWITCH_INTERRUPT (1 << 8) #define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */ #define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4) -#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3) +#define GT_CS_MASTER_ERROR_INTERRUPT REG_BIT(3) #define GT_RENDER_SYNC_STATUS_INTERRUPT (1 << 2) #define GT_RENDER_DEBUG_INTERRUPT (1 << 1) #define GT_RENDER_USER_INTERRUPT (1 << 0) @@ -3160,6 +3170,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN7_FF_THREAD_MODE _MMIO(0x20a0) #define GEN7_FF_SCHED_MASK 0x0077070 #define GEN8_FF_DS_REF_CNT_FFME (1 << 19) +#define GEN12_FF_TESSELATION_DOP_GATE_DISABLE BIT(19) #define GEN7_FF_TS_SCHED_HS1 (0x5 << 16) #define GEN7_FF_TS_SCHED_HS0 (0x3 << 16) #define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1 << 16) @@ -3277,6 +3288,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) /* Framebuffer compression for Ivybridge */ #define IVB_FBC_RT_BASE _MMIO(0x7020) +#define IVB_FBC_RT_BASE_UPPER _MMIO(0x7024) #define IPS_CTL _MMIO(0x43408) #define IPS_ENABLE (1 << 31) @@ -3743,8 +3755,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define MCH_SSKPD_WM0_MASK 0x3f #define MCH_SSKPD_WM0_VAL 0xc -#define MCH_SECP_NRG_STTS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x592c) - /* Clocking configuration register */ #define CLKCFG _MMIO(MCHBAR_MIRROR_BASE + 0xc00) #define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */ @@ -4124,6 +4134,9 @@ enum { #define PWM2_GATING_DIS (1 << 14) #define PWM1_GATING_DIS (1 << 13) +#define GEN9_CLKGATE_DIS_3 _MMIO(0x46538) +#define TGL_VRH_GATING_DIS REG_BIT(31) + #define GEN9_CLKGATE_DIS_4 _MMIO(0x4653C) #define BXT_GMBUS_GATING_DIS (1 << 14) @@ -4851,16 +4864,6 @@ enum { #define _PP_STATUS 0x61200 #define PP_STATUS(pps_idx) _MMIO_PPS(pps_idx, _PP_STATUS) #define PP_ON REG_BIT(31) - -#define _PP_CONTROL_1 0xc7204 -#define _PP_CONTROL_2 0xc7304 -#define ICP_PP_CONTROL(x) _MMIO(((x) == 1) ? _PP_CONTROL_1 : \ - _PP_CONTROL_2) -#define POWER_CYCLE_DELAY_MASK REG_GENMASK(8, 4) -#define VDD_OVERRIDE_FORCE REG_BIT(3) -#define BACKLIGHT_ENABLE REG_BIT(2) -#define PWR_DOWN_ON_RESET REG_BIT(1) -#define PWR_STATE_TARGET REG_BIT(0) /* * Indicates that all dependencies of the panel are on: * @@ -4921,6 +4924,7 @@ enum { #define PFIT_ENABLE (1 << 31) #define PFIT_PIPE_MASK (3 << 29) #define PFIT_PIPE_SHIFT 29 +#define PFIT_PIPE(pipe) ((pipe) << 29) #define VERT_INTERP_DISABLE (0 << 10) #define VERT_INTERP_BILINEAR (1 << 10) #define VERT_INTERP_MASK (3 << 10) @@ -5870,7 +5874,6 @@ enum { #define _PIPEAGCMAX 0x70010 #define _PIPEBGCMAX 0x71010 -#define PIPEGCMAX_RGB_MASK REG_GENMASK(15, 0) #define PIPEGCMAX(pipe, i) _MMIO_PIPE2(pipe, _PIPEAGCMAX + (i) * 4) #define _PIPE_MISC_A 0x70030 @@ -5879,6 +5882,7 @@ enum { #define PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26) /* glk+ */ #define PIPEMISC_HDR_MODE_PRECISION (1 << 23) /* icl+ */ #define PIPEMISC_OUTPUT_COLORSPACE_YUV (1 << 11) +#define PIPEMISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */ #define PIPEMISC_DITHER_BPC_MASK (7 << 5) #define PIPEMISC_DITHER_8_BPC (0 << 5) #define PIPEMISC_DITHER_10_BPC (1 << 5) @@ -7745,9 +7749,9 @@ enum { #define DISP_ARB_CTL2 _MMIO(0x45004) #define DISP_DATA_PARTITION_5_6 (1 << 6) #define DISP_IPC_ENABLE (1 << 3) -#define DBUF_CTL _MMIO(0x45008) -#define DBUF_CTL_S1 _MMIO(0x45008) -#define DBUF_CTL_S2 _MMIO(0x44FE8) +#define _DBUF_CTL_S1 0x45008 +#define _DBUF_CTL_S2 0x44FE8 +#define DBUF_CTL_S(slice) _MMIO(_PICK_EVEN(slice, _DBUF_CTL_S1, _DBUF_CTL_S2)) #define DBUF_POWER_REQUEST (1 << 31) #define DBUF_POWER_STATE (1 << 30) #define GEN7_MSG_CTL _MMIO(0x45010) @@ -7767,6 +7771,7 @@ enum { #define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430) #define SKL_SELECT_ALTERNATE_DC_EXIT (1 << 30) +#define CNL_DELAY_PMRSP (1 << 22) #define MASK_WAKEMEM (1 << 13) #define CNL_DDI_CLOCK_REG_ACCESS_ON (1 << 7) @@ -8988,6 +8993,8 @@ enum { #define GEN6_PCODE_UNIMPLEMENTED_CMD 0xFF #define GEN7_PCODE_TIMEOUT 0x2 #define GEN7_PCODE_ILLEGAL_DATA 0x3 +#define GEN11_PCODE_ILLEGAL_SUBCOMMAND 0x4 +#define GEN11_PCODE_LOCKED 0x6 #define GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10 #define GEN6_PCODE_WRITE_RC6VIDS 0x4 #define GEN6_PCODE_READ_RC6VIDS 0x5 @@ -9136,12 +9143,19 @@ enum { #define THROTTLE_12_5 (7 << 2) #define DISABLE_EARLY_EOT (1 << 1) -#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4) +#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4) +#define GEN12_DISABLE_EARLY_READ REG_BIT(14) +#define GEN12_PUSH_CONST_DEREF_HOLD_DIS REG_BIT(8) + #define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4) #define DOP_CLOCK_GATING_DISABLE (1 << 0) #define PUSH_CONSTANT_DEREF_DISABLE (1 << 8) #define GEN11_TDL_CLOCK_GATING_FIX_DISABLE (1 << 1) +#define GEN9_ROW_CHICKEN4 _MMIO(0xe48c) +#define GEN12_DISABLE_TDL_PUSH REG_BIT(9) +#define GEN11_DIS_PICK_2ND_EU REG_BIT(7) + #define HSW_ROW_CHICKEN3 _MMIO(0xe49c) #define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6) @@ -9242,6 +9256,10 @@ enum { #define AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 (7 << 16) #define AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 (8 << 16) #define AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 (9 << 16) +#define AUD_CONFIG_PIXEL_CLOCK_HDMI_296703 (10 << 16) +#define AUD_CONFIG_PIXEL_CLOCK_HDMI_297000 (11 << 16) +#define AUD_CONFIG_PIXEL_CLOCK_HDMI_593407 (12 << 16) +#define AUD_CONFIG_PIXEL_CLOCK_HDMI_594000 (13 << 16) #define AUD_CONFIG_DISABLE_NCTS (1 << 3) /* HSW Audio */ @@ -10533,13 +10551,13 @@ enum skl_power_gate { #define D_COMP_COMP_DISABLE (1 << 0) /* Pipe WM_LINETIME - watermark line time */ -#define _PIPE_WM_LINETIME_A 0x45270 -#define _PIPE_WM_LINETIME_B 0x45274 -#define PIPE_WM_LINETIME(pipe) _MMIO_PIPE(pipe, _PIPE_WM_LINETIME_A, _PIPE_WM_LINETIME_B) -#define PIPE_WM_LINETIME_MASK (0x1ff) -#define PIPE_WM_LINETIME_TIME(x) ((x)) -#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff << 16) -#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x) << 16) +#define _WM_LINETIME_A 0x45270 +#define _WM_LINETIME_B 0x45274 +#define WM_LINETIME(pipe) _MMIO_PIPE(pipe, _WM_LINETIME_A, _WM_LINETIME_B) +#define HSW_LINETIME_MASK REG_GENMASK(8, 0) +#define HSW_LINETIME(x) REG_FIELD_PREP(HSW_LINETIME_MASK, (x)) +#define HSW_IPS_LINETIME_MASK REG_GENMASK(24, 16) +#define HSW_IPS_LINETIME(x) REG_FIELD_PREP(HSW_IPS_LINETIME_MASK, (x)) /* SFUSE_STRAP */ #define SFUSE_STRAP _MMIO(0xc2014) diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index a18b2a244706..e2b78db685ea 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -51,7 +51,6 @@ struct execute_cb { static struct i915_global_request { struct i915_global base; struct kmem_cache *slab_requests; - struct kmem_cache *slab_dependencies; struct kmem_cache *slab_execute_cbs; } global; @@ -203,6 +202,19 @@ static void free_capture_list(struct i915_request *request) } } +static void __i915_request_fill(struct i915_request *rq, u8 val) +{ + void *vaddr = rq->ring->vaddr; + u32 head; + + head = rq->infix; + if (rq->postfix < head) { + memset(vaddr + head, val, rq->ring->size - head); + head = 0; + } + memset(vaddr + head, val, rq->postfix - head); +} + static void remove_from_engine(struct i915_request *rq) { struct intel_engine_cs *engine, *locked; @@ -247,6 +259,9 @@ bool i915_request_retire(struct i915_request *rq) */ GEM_BUG_ON(!list_is_first(&rq->link, &i915_request_timeline(rq)->requests)); + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + /* Poison before we release our space in the ring */ + __i915_request_fill(rq, POISON_FREE); rq->ring->head = rq->postfix; /* @@ -275,7 +290,7 @@ bool i915_request_retire(struct i915_request *rq) spin_unlock_irq(&rq->lock); remove_from_client(rq); - list_del_rcu(&rq->link); + __list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */ intel_context_exit(rq->context); intel_context_unpin(rq->context); @@ -348,6 +363,50 @@ __await_execution(struct i915_request *rq, return 0; } +static bool fatal_error(int error) +{ + switch (error) { + case 0: /* not an error! */ + case -EAGAIN: /* innocent victim of a GT reset (__i915_request_reset) */ + case -ETIMEDOUT: /* waiting for Godot (timer_i915_sw_fence_wake) */ + return false; + default: + return true; + } +} + +void __i915_request_skip(struct i915_request *rq) +{ + GEM_BUG_ON(!fatal_error(rq->fence.error)); + + if (rq->infix == rq->postfix) + return; + + /* + * As this request likely depends on state from the lost + * context, clear out all the user operations leaving the + * breadcrumb at the end (so we get the fence notifications). + */ + __i915_request_fill(rq, 0); + rq->infix = rq->postfix; +} + +void i915_request_set_error_once(struct i915_request *rq, int error) +{ + int old; + + GEM_BUG_ON(!IS_ERR_VALUE((long)error)); + + if (i915_request_signaled(rq)) + return; + + old = READ_ONCE(rq->fence.error); + do { + if (fatal_error(old)) + return; + } while (!try_cmpxchg(&rq->fence.error, &old, error)); +} + bool __i915_request_submit(struct i915_request *request) { struct intel_engine_cs *engine = request->engine; @@ -377,8 +436,10 @@ bool __i915_request_submit(struct i915_request *request) if (i915_request_completed(request)) goto xfer; - if (intel_context_is_banned(request->context)) - i915_request_skip(request, -EIO); + if (unlikely(intel_context_is_banned(request->context))) + i915_request_set_error_once(request, -EIO); + if (unlikely(fatal_error(request->fence.error))) + __i915_request_skip(request); /* * Are we using semaphores when the gpu is already saturated? @@ -504,7 +565,7 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) trace_i915_request_submit(request); if (unlikely(fence->error)) - i915_request_skip(request, fence->error); + i915_request_set_error_once(request, fence->error); /* * We need to serialize use of the submit_request() callback @@ -688,6 +749,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) RCU_INIT_POINTER(rq->timeline, tl); RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline); rq->hwsp_seqno = tl->hwsp_seqno; + GEM_BUG_ON(i915_request_completed(rq)); rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */ @@ -857,7 +919,7 @@ already_busywaiting(struct i915_request *rq) * * See the are-we-too-late? check in __i915_request_submit(). */ - return rq->sched.semaphores | rq->engine->saturated; + return rq->sched.semaphores | READ_ONCE(rq->engine->saturated); } static int @@ -915,8 +977,16 @@ emit_semaphore_wait(struct i915_request *to, struct i915_request *from, gfp_t gfp) { + const intel_engine_mask_t mask = READ_ONCE(from->engine)->mask; + + if (!intel_context_use_semaphores(to->context)) + goto await_fence; + + if (!rcu_access_pointer(from->hwsp_cacheline)) + goto await_fence; + /* Just emit the first semaphore we see as request space is limited. */ - if (already_busywaiting(to) & from->engine->mask) + if (already_busywaiting(to) & mask) goto await_fence; if (i915_request_await_start(to, from) < 0) @@ -929,7 +999,7 @@ emit_semaphore_wait(struct i915_request *to, if (__emit_semaphore_wait(to, from, from->fence.seqno)) goto await_fence; - to->sched.semaphores |= from->engine->mask; + to->sched.semaphores |= mask; to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN; return 0; @@ -947,11 +1017,15 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from) GEM_BUG_ON(to == from); GEM_BUG_ON(to->timeline == from->timeline); - if (i915_request_completed(from)) + if (i915_request_completed(from)) { + i915_sw_fence_set_error_once(&to->submit, from->fence.error); return 0; + } if (to->engine->schedule) { - ret = i915_sched_node_add_dependency(&to->sched, &from->sched); + ret = i915_sched_node_add_dependency(&to->sched, + &from->sched, + I915_DEPENDENCY_EXTERNAL); if (ret < 0) return ret; } @@ -960,12 +1034,8 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from) ret = i915_sw_fence_await_sw_fence_gfp(&to->submit, &from->submit, I915_FENCE_GFP); - else if (intel_context_use_semaphores(to->context)) - ret = emit_semaphore_wait(to, from, I915_FENCE_GFP); else - ret = i915_sw_fence_await_dma_fence(&to->submit, - &from->fence, 0, - I915_FENCE_GFP); + ret = emit_semaphore_wait(to, from, I915_FENCE_GFP); if (ret < 0) return ret; @@ -1064,6 +1134,8 @@ __i915_request_await_execution(struct i915_request *to, { int err; + GEM_BUG_ON(intel_context_is_barrier(from->context)); + /* Submit both requests at the same time */ err = __await_execution(to, from, hook, I915_FENCE_GFP); if (err) @@ -1074,17 +1146,50 @@ __i915_request_await_execution(struct i915_request *to, &from->fence)) return 0; - /* Ensure both start together [after all semaphores in signal] */ - if (intel_engine_has_semaphores(to->engine)) - err = __emit_semaphore_wait(to, from, from->fence.seqno - 1); - else - err = i915_request_await_start(to, from); + /* + * Wait until the start of this request. + * + * The execution cb fires when we submit the request to HW. But in + * many cases this may be long before the request itself is ready to + * run (consider that we submit 2 requests for the same context, where + * the request of interest is behind an indefinite spinner). So we hook + * up to both to reduce our queues and keep the execution lag minimised + * in the worst case, though we hope that the await_start is elided. + */ + err = i915_request_await_start(to, from); if (err < 0) return err; + /* + * Ensure both start together [after all semaphores in signal] + * + * Now that we are queued to the HW at roughly the same time (thanks + * to the execute cb) and are ready to run at roughly the same time + * (thanks to the await start), our signaler may still be indefinitely + * delayed by waiting on a semaphore from a remote engine. If our + * signaler depends on a semaphore, so indirectly do we, and we do not + * want to start our payload until our signaler also starts theirs. + * So we wait. + * + * However, there is also a second condition for which we need to wait + * for the precise start of the signaler. Consider that the signaler + * was submitted in a chain of requests following another context + * (with just an ordinary intra-engine fence dependency between the + * two). In this case the signaler is queued to HW, but not for + * immediate execution, and so we must wait until it reaches the + * active slot. + */ + if (intel_engine_has_semaphores(to->engine)) { + err = __emit_semaphore_wait(to, from, from->fence.seqno - 1); + if (err < 0) + return err; + } + /* Couple the dependency tree for PI on this exposed to->fence */ if (to->engine->schedule) { - err = i915_sched_node_add_dependency(&to->sched, &from->sched); + err = i915_sched_node_add_dependency(&to->sched, + &from->sched, + I915_DEPENDENCY_WEAK); if (err < 0) return err; } @@ -1202,31 +1307,6 @@ i915_request_await_object(struct i915_request *to, return ret; } -void i915_request_skip(struct i915_request *rq, int error) -{ - void *vaddr = rq->ring->vaddr; - u32 head; - - GEM_BUG_ON(!IS_ERR_VALUE((long)error)); - dma_fence_set_error(&rq->fence, error); - - if (rq->infix == rq->postfix) - return; - - /* - * As this request likely depends on state from the lost - * context, clear out all the user operations leaving the - * breadcrumb at the end (so we get the fence notifications). - */ - head = rq->infix; - if (rq->postfix < head) { - memset(vaddr + head, 0, rq->ring->size - head); - head = 0; - } - memset(vaddr + head, 0, rq->postfix - head); - rq->infix = rq->postfix; -} - static struct i915_request * __i915_request_add_to_timeline(struct i915_request *rq) { @@ -1256,7 +1336,17 @@ __i915_request_add_to_timeline(struct i915_request *rq) prev = to_request(__i915_active_fence_set(&timeline->last_request, &rq->fence)); if (prev && !i915_request_completed(prev)) { - if (is_power_of_2(prev->engine->mask | rq->engine->mask)) + /* + * The requests are supposed to be kept in order. However, + * we need to be wary in case the timeline->last_request + * is used as a barrier for external modification to this + * context. + */ + GEM_BUG_ON(prev->context == rq->context && + i915_seqno_passed(prev->fence.seqno, + rq->fence.seqno)); + + if (is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask)) i915_sw_fence_await_sw_fence(&rq->submit, &prev->submit, &rq->submitq); @@ -1340,39 +1430,23 @@ void i915_request_add(struct i915_request *rq) { struct intel_timeline * const tl = i915_request_timeline(rq); struct i915_sched_attr attr = {}; - struct i915_request *prev; + struct i915_gem_context *ctx; lockdep_assert_held(&tl->mutex); lockdep_unpin_lock(&tl->mutex, rq->cookie); trace_i915_request_add(rq); + __i915_request_commit(rq); - prev = __i915_request_commit(rq); - - if (rcu_access_pointer(rq->context->gem_context)) - attr = i915_request_gem_context(rq)->sched; + /* XXX placeholder for selftests */ + rcu_read_lock(); + ctx = rcu_dereference(rq->context->gem_context); + if (ctx) + attr = ctx->sched; + rcu_read_unlock(); - /* - * Boost actual workloads past semaphores! - * - * With semaphores we spin on one engine waiting for another, - * simply to reduce the latency of starting our work when - * the signaler completes. However, if there is any other - * work that we could be doing on this engine instead, that - * is better utilisation and will reduce the overall duration - * of the current work. To avoid PI boosting a semaphore - * far in the distance past over useful work, we keep a history - * of any semaphore use along our dependency chain. - */ if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN)) attr.priority |= I915_PRIORITY_NOSEMAPHORE; - - /* - * Boost priorities to new clients (new request flows). - * - * Allow interactive/synchronous clients to jump ahead of - * the bulk clients. (FQ_CODEL) - */ if (list_empty(&rq->sched.signalers_list)) attr.priority |= I915_PRIORITY_WAIT; @@ -1380,32 +1454,10 @@ void i915_request_add(struct i915_request *rq) __i915_request_queue(rq, &attr); local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ - /* - * In typical scenarios, we do not expect the previous request on - * the timeline to be still tracked by timeline->last_request if it - * has been completed. If the completed request is still here, that - * implies that request retirement is a long way behind submission, - * suggesting that we haven't been retiring frequently enough from - * the combination of retire-before-alloc, waiters and the background - * retirement worker. So if the last request on this timeline was - * already completed, do a catch up pass, flushing the retirement queue - * up to this client. Since we have now moved the heaviest operations - * during retirement onto secondary workers, such as freeing objects - * or contexts, retiring a bunch of requests is mostly list management - * (and cache misses), and so we should not be overly penalizing this - * client by performing excess work, though we may still performing - * work on behalf of others -- but instead we should benefit from - * improved resource management. (Well, that's the theory at least.) - */ - if (prev && - i915_request_completed(prev) && - rcu_access_pointer(prev->timeline) == tl) - i915_request_retire_upto(prev); - mutex_unlock(&tl->mutex); } -static unsigned long local_clock_us(unsigned int *cpu) +static unsigned long local_clock_ns(unsigned int *cpu) { unsigned long t; @@ -1422,7 +1474,7 @@ static unsigned long local_clock_us(unsigned int *cpu) * stop busywaiting, see busywait_stop(). */ *cpu = get_cpu(); - t = local_clock() >> 10; + t = local_clock(); put_cpu(); return t; @@ -1432,15 +1484,15 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu) { unsigned int this_cpu; - if (time_after(local_clock_us(&this_cpu), timeout)) + if (time_after(local_clock_ns(&this_cpu), timeout)) return true; return this_cpu != cpu; } -static bool __i915_spin_request(const struct i915_request * const rq, - int state, unsigned long timeout_us) +static bool __i915_spin_request(const struct i915_request * const rq, int state) { + unsigned long timeout_ns; unsigned int cpu; /* @@ -1468,7 +1520,8 @@ static bool __i915_spin_request(const struct i915_request * const rq, * takes to sleep on a request, on the order of a microsecond. */ - timeout_us += local_clock_us(&cpu); + timeout_ns = READ_ONCE(rq->engine->props.max_busywait_duration_ns); + timeout_ns += local_clock_ns(&cpu); do { if (i915_request_completed(rq)) return true; @@ -1476,7 +1529,7 @@ static bool __i915_spin_request(const struct i915_request * const rq, if (signal_pending_state(state, current)) break; - if (busywait_stop(timeout_us, cpu)) + if (busywait_stop(timeout_ns, cpu)) break; cpu_relax(); @@ -1562,8 +1615,8 @@ long i915_request_wait(struct i915_request *rq, * completion. That requires having a good predictor for the request * duration, which we currently lack. */ - if (IS_ACTIVE(CONFIG_DRM_I915_SPIN_REQUEST) && - __i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST)) { + if (IS_ACTIVE(CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT) && + __i915_spin_request(rq, state)) { dma_fence_signal(&rq->fence); goto out; } @@ -1598,6 +1651,8 @@ long i915_request_wait(struct i915_request *rq, break; } + intel_engine_flush_submission(rq->engine); + if (signal_pending_state(state, current)) { timeout = -ERESTARTSYS; break; @@ -1608,7 +1663,6 @@ long i915_request_wait(struct i915_request *rq, break; } - intel_engine_flush_submission(rq->engine); timeout = io_schedule_timeout(timeout); } __set_current_state(TASK_RUNNING); @@ -1628,14 +1682,12 @@ out: static void i915_global_request_shrink(void) { - kmem_cache_shrink(global.slab_dependencies); kmem_cache_shrink(global.slab_execute_cbs); kmem_cache_shrink(global.slab_requests); } static void i915_global_request_exit(void) { - kmem_cache_destroy(global.slab_dependencies); kmem_cache_destroy(global.slab_execute_cbs); kmem_cache_destroy(global.slab_requests); } @@ -1665,17 +1717,9 @@ int __init i915_global_request_init(void) if (!global.slab_execute_cbs) goto err_requests; - global.slab_dependencies = KMEM_CACHE(i915_dependency, - SLAB_HWCACHE_ALIGN | - SLAB_RECLAIM_ACCOUNT); - if (!global.slab_dependencies) - goto err_execute_cbs; - i915_global_register(&global.base); return 0; -err_execute_cbs: - kmem_cache_destroy(global.slab_execute_cbs); err_requests: kmem_cache_destroy(global.slab_requests); return -ENOMEM; diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index fccc339949ec..3c552bfea67a 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -305,6 +305,9 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp); struct i915_request * __must_check i915_request_create(struct intel_context *ce); +void i915_request_set_error_once(struct i915_request *rq, int error); +void __i915_request_skip(struct i915_request *rq); + struct i915_request *__i915_request_commit(struct i915_request *request); void __i915_request_queue(struct i915_request *rq, const struct i915_sched_attr *attr); @@ -354,8 +357,6 @@ void i915_request_add(struct i915_request *rq); bool __i915_request_submit(struct i915_request *request); void i915_request_submit(struct i915_request *request); -void i915_request_skip(struct i915_request *request, int error); - void __i915_request_unsubmit(struct i915_request *request); void i915_request_unsubmit(struct i915_request *request); @@ -397,7 +398,9 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2) static inline u32 __hwsp_seqno(const struct i915_request *rq) { - return READ_ONCE(*rq->hwsp_seqno); + const u32 *hwsp = READ_ONCE(rq->hwsp_seqno); + + return READ_ONCE(*hwsp); } /** @@ -481,7 +484,7 @@ static inline bool i915_request_is_running(const struct i915_request *rq) } /** - * i915_request_is_running - check if the request is ready for execution + * i915_request_is_ready - check if the request is ready for execution * @rq: the request * * Upon construction, the request is instructed to wait upon various @@ -511,7 +514,8 @@ static inline bool i915_request_completed(const struct i915_request *rq) static inline void i915_request_mark_complete(struct i915_request *rq) { - rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */ + WRITE_ONCE(rq->hwsp_seqno, /* decouple from HWSP */ + (u32 *)&rq->fence.seqno); } static inline bool i915_request_has_waitboost(const struct i915_request *rq) diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index 34b654b4e58a..f0a9e8958ca0 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -209,6 +209,8 @@ static void kick_submission(struct intel_engine_cs *engine, if (!inflight) goto unlock; + engine->execlists.queue_priority_hint = prio; + /* * If we are already the currently executing context, don't * bother evaluating if we should preempt ourselves. @@ -216,7 +218,6 @@ static void kick_submission(struct intel_engine_cs *engine, if (inflight->context == rq->context) goto unlock; - engine->execlists.queue_priority_hint = prio; if (need_preempt(prio, rq_prio(inflight))) tasklet_hi_schedule(&engine->execlists.tasklet); @@ -227,10 +228,10 @@ unlock: static void __i915_schedule(struct i915_sched_node *node, const struct i915_sched_attr *attr) { + const int prio = max(attr->priority, node->attr.priority); struct intel_engine_cs *engine; struct i915_dependency *dep, *p; struct i915_dependency stack; - const int prio = attr->priority; struct sched_cache cache; LIST_HEAD(dfs); @@ -238,9 +239,6 @@ static void __i915_schedule(struct i915_sched_node *node, lockdep_assert_held(&schedule_lock); GEM_BUG_ON(prio == I915_PRIORITY_INVALID); - if (prio <= READ_ONCE(node->attr.priority)) - return; - if (node_signaled(node)) return; @@ -324,7 +322,7 @@ static void __i915_schedule(struct i915_sched_node *node, GEM_BUG_ON(node_to_request(node)->engine != engine); - node->attr.priority = prio; + WRITE_ONCE(node->attr.priority, prio); /* * Once the request is ready, it will be placed into the @@ -363,6 +361,9 @@ static void __bump_priority(struct i915_sched_node *node, unsigned int bump) { struct i915_sched_attr attr = node->attr; + if (attr.priority & bump) + return; + attr.priority |= bump; __i915_schedule(node, &attr); } @@ -433,7 +434,7 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node, node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN; /* All set, now publish. Beware the lockless walkers. */ - list_add(&dep->signal_link, &node->signalers_list); + list_add_rcu(&dep->signal_link, &node->signalers_list); list_add_rcu(&dep->wait_link, &signal->waiters_list); /* @@ -455,7 +456,8 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node, } int i915_sched_node_add_dependency(struct i915_sched_node *node, - struct i915_sched_node *signal) + struct i915_sched_node *signal, + unsigned long flags) { struct i915_dependency *dep; @@ -464,8 +466,7 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node, return -ENOMEM; if (!__i915_sched_node_add_dependency(node, signal, dep, - I915_DEPENDENCY_EXTERNAL | - I915_DEPENDENCY_ALLOC)) + flags | I915_DEPENDENCY_ALLOC)) i915_dependency_free(dep); return 0; @@ -486,7 +487,7 @@ void i915_sched_node_fini(struct i915_sched_node *node) list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) { GEM_BUG_ON(!list_empty(&dep->dfs_link)); - list_del(&dep->wait_link); + list_del_rcu(&dep->wait_link); if (dep->flags & I915_DEPENDENCY_ALLOC) i915_dependency_free(dep); } @@ -497,7 +498,7 @@ void i915_sched_node_fini(struct i915_sched_node *node) GEM_BUG_ON(dep->signaler != node); GEM_BUG_ON(!list_empty(&dep->dfs_link)); - list_del(&dep->signal_link); + list_del_rcu(&dep->signal_link); if (dep->flags & I915_DEPENDENCY_ALLOC) i915_dependency_free(dep); } @@ -526,7 +527,8 @@ static struct i915_global_scheduler global = { { int __init i915_global_scheduler_init(void) { global.slab_dependencies = KMEM_CACHE(i915_dependency, - SLAB_HWCACHE_ALIGN); + SLAB_HWCACHE_ALIGN | + SLAB_TYPESAFE_BY_RCU); if (!global.slab_dependencies) return -ENOMEM; diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h index d1dc4efef77b..6f0bf00fc569 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.h +++ b/drivers/gpu/drm/i915/i915_scheduler.h @@ -34,7 +34,8 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node, unsigned long flags); int i915_sched_node_add_dependency(struct i915_sched_node *node, - struct i915_sched_node *signal); + struct i915_sched_node *signal, + unsigned long flags); void i915_sched_node_fini(struct i915_sched_node *node); diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h index d18e70550054..7186875088a0 100644 --- a/drivers/gpu/drm/i915/i915_scheduler_types.h +++ b/drivers/gpu/drm/i915/i915_scheduler_types.h @@ -78,6 +78,7 @@ struct i915_dependency { unsigned long flags; #define I915_DEPENDENCY_ALLOC BIT(0) #define I915_DEPENDENCY_EXTERNAL BIT(1) +#define I915_DEPENDENCY_WEAK BIT(2) }; #endif /* _I915_SCHEDULER_TYPES_H_ */ diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 8812cdd9007f..ed2be3489f8e 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -24,8 +24,6 @@ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -#include <drm/i915_drm.h> - #include "display/intel_fbc.h" #include "display/intel_gmbus.h" #include "display/intel_vga.h" diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 51ba97daf2a0..a3d38e089b6e 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -211,10 +211,21 @@ void i915_sw_fence_complete(struct i915_sw_fence *fence) __i915_sw_fence_complete(fence, NULL); } -void i915_sw_fence_await(struct i915_sw_fence *fence) +bool i915_sw_fence_await(struct i915_sw_fence *fence) { - debug_fence_assert(fence); - WARN_ON(atomic_inc_return(&fence->pending) <= 1); + int pending; + + /* + * It is only safe to add a new await to the fence while it has + * not yet been signaled (i.e. there are still existing signalers). + */ + pending = atomic_read(&fence->pending); + do { + if (pending < 1) + return false; + } while (!atomic_try_cmpxchg(&fence->pending, &pending, pending + 1)); + + return true; } void __i915_sw_fence_init(struct i915_sw_fence *fence, diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h index 19e806ce43bc..30a863353ee6 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.h +++ b/drivers/gpu/drm/i915/i915_sw_fence.h @@ -91,7 +91,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, unsigned long timeout, gfp_t gfp); -void i915_sw_fence_await(struct i915_sw_fence *fence); +bool i915_sw_fence_await(struct i915_sw_fence *fence); void i915_sw_fence_complete(struct i915_sw_fence *fence); static inline bool i915_sw_fence_signaled(const struct i915_sw_fence *fence) diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c index 39c79e1c5b52..ed69b5d4a375 100644 --- a/drivers/gpu/drm/i915/i915_switcheroo.c +++ b/drivers/gpu/drm/i915/i915_switcheroo.c @@ -43,7 +43,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev) * locking inversion with the driver load path. And the access here is * completely racy anyway. So don't bother with locking for now. */ - return i915 && i915->drm.open_count == 0; + return i915 && atomic_read(&i915->drm.open_count) == 0; } static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 0cef3130db05..45d32ef42787 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -32,6 +32,7 @@ #include "gt/intel_rc6.h" #include "gt/intel_rps.h" +#include "gt/sysfs_engines.h" #include "i915_drv.h" #include "i915_sysfs.h" @@ -525,7 +526,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj, struct device *kdev = kobj_to_dev(kobj); struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); - DRM_DEBUG_DRIVER("Resetting error state\n"); + drm_dbg(&dev_priv->drm, "Resetting error state\n"); i915_reset_error_state(dev_priv); return count; @@ -564,31 +565,36 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv) ret = sysfs_merge_group(&kdev->kobj, &rc6_attr_group); if (ret) - DRM_ERROR("RC6 residency sysfs setup failed\n"); + drm_err(&dev_priv->drm, + "RC6 residency sysfs setup failed\n"); } if (HAS_RC6p(dev_priv)) { ret = sysfs_merge_group(&kdev->kobj, &rc6p_attr_group); if (ret) - DRM_ERROR("RC6p residency sysfs setup failed\n"); + drm_err(&dev_priv->drm, + "RC6p residency sysfs setup failed\n"); } if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { ret = sysfs_merge_group(&kdev->kobj, &media_rc6_attr_group); if (ret) - DRM_ERROR("Media RC6 residency sysfs setup failed\n"); + drm_err(&dev_priv->drm, + "Media RC6 residency sysfs setup failed\n"); } #endif if (HAS_L3_DPF(dev_priv)) { ret = device_create_bin_file(kdev, &dpf_attrs); if (ret) - DRM_ERROR("l3 parity sysfs setup failed\n"); + drm_err(&dev_priv->drm, + "l3 parity sysfs setup failed\n"); if (NUM_L3_SLICES(dev_priv) > 1) { ret = device_create_bin_file(kdev, &dpf_attrs_1); if (ret) - DRM_ERROR("l3 parity slice 1 setup failed\n"); + drm_err(&dev_priv->drm, + "l3 parity slice 1 setup failed\n"); } } @@ -598,9 +604,11 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv) else if (INTEL_GEN(dev_priv) >= 6) ret = sysfs_create_files(&kdev->kobj, gen6_attrs); if (ret) - DRM_ERROR("RPS sysfs setup failed\n"); + drm_err(&dev_priv->drm, "RPS sysfs setup failed\n"); i915_setup_error_capture(kdev); + + intel_engines_add_sysfs(dev_priv); } void i915_teardown_sysfs(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 233a97a2c276..bc854ad60954 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -339,6 +339,68 @@ TRACE_EVENT(intel_disable_plane, __entry->frame, __entry->scanline) ); +/* fbc */ + +TRACE_EVENT(intel_fbc_activate, + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pipe %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), __entry->frame, __entry->scanline) +); + +TRACE_EVENT(intel_fbc_deactivate, + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pipe %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), __entry->frame, __entry->scanline) +); + +TRACE_EVENT(intel_fbc_nuke, + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pipe %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), __entry->frame, __entry->scanline) +); + /* pipe updates */ TRACE_EVENT(intel_pipe_update_start, @@ -738,7 +800,7 @@ TRACE_EVENT(i915_request_in, __field(u16, instance) __field(u32, seqno) __field(u32, port) - __field(u32, prio) + __field(s32, prio) ), TP_fast_assign( @@ -751,7 +813,7 @@ TRACE_EVENT(i915_request_in, __entry->port = port; ), - TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, prio=%u, port=%u", + TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, prio=%d, port=%u", __entry->dev, __entry->class, __entry->instance, __entry->ctx, __entry->seqno, __entry->prio, __entry->port) diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c index 632d6953c78d..029854ae65fc 100644 --- a/drivers/gpu/drm/i915/i915_utils.c +++ b/drivers/gpu/drm/i915/i915_utils.c @@ -8,7 +8,6 @@ #include "i915_drv.h" #include "i915_utils.h" -#define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs" #define FDO_BUG_MSG "Please file a bug on drm/i915; see " FDO_BUG_URL " for details." void diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index d34141f7dcd8..03a73d2bd50d 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -34,6 +34,8 @@ struct drm_i915_private; struct timer_list; +#define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs" + #undef WARN_ON /* Many gcc seem to no see through this and fall over :( */ #if 0 @@ -100,12 +102,24 @@ bool i915_error_injected(void); typeof(max) max__ = (max); \ (void)(&start__ == &size__); \ (void)(&start__ == &max__); \ - start__ > max__ || size__ > max__ - start__; \ + start__ >= max__ || size__ > max__ - start__; \ }) #define range_overflows_t(type, start, size, max) \ range_overflows((type)(start), (type)(size), (type)(max)) +#define range_overflows_end(start, size, max) ({ \ + typeof(start) start__ = (start); \ + typeof(size) size__ = (size); \ + typeof(max) max__ = (max); \ + (void)(&start__ == &size__); \ + (void)(&start__ == &max__); \ + start__ > max__ || size__ > max__ - start__; \ +}) + +#define range_overflows_end_t(type, start, size, max) \ + range_overflows_end((type)(start), (type)(size), (type)(max)) + /* Note we don't consider signbits :| */ #define overflows_type(x, T) \ (sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T)) @@ -246,6 +260,12 @@ static inline void __list_del_many(struct list_head *head, WRITE_ONCE(head->next, first); } +static inline int list_is_last_rcu(const struct list_head *list, + const struct list_head *head) +{ + return READ_ONCE(list->next) == head; +} + /* * Wait until the work is finally complete, even if it tries to postpone * by requeueing itself. Note, that if the worker never cancels itself, diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index 968be26735c5..70fca72f5162 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -21,6 +21,8 @@ * SOFTWARE. */ +#include "i915_drv.h" +#include "i915_pvinfo.h" #include "i915_vgpu.h" /** @@ -51,13 +53,13 @@ */ /** - * i915_detect_vgpu - detect virtual GPU + * intel_vgpu_detect - detect virtual GPU * @dev_priv: i915 device private * * This function is called at the initialization stage, to detect whether * running on a vGPU. */ -void i915_detect_vgpu(struct drm_i915_private *dev_priv) +void intel_vgpu_detect(struct drm_i915_private *dev_priv) { struct pci_dev *pdev = dev_priv->drm.pdev; u64 magic; @@ -77,7 +79,8 @@ void i915_detect_vgpu(struct drm_i915_private *dev_priv) shared_area = pci_iomap_range(pdev, 0, VGT_PVINFO_PAGE, VGT_PVINFO_SIZE); if (!shared_area) { - DRM_ERROR("failed to map MMIO bar to check for VGT\n"); + drm_err(&dev_priv->drm, + "failed to map MMIO bar to check for VGT\n"); return; } @@ -87,7 +90,7 @@ void i915_detect_vgpu(struct drm_i915_private *dev_priv) version_major = readw(shared_area + vgtif_offset(version_major)); if (version_major < VGT_VERSION_MAJOR) { - DRM_INFO("VGT interface version mismatch!\n"); + drm_info(&dev_priv->drm, "VGT interface version mismatch!\n"); goto out; } @@ -95,17 +98,42 @@ void i915_detect_vgpu(struct drm_i915_private *dev_priv) dev_priv->vgpu.active = true; mutex_init(&dev_priv->vgpu.lock); - DRM_INFO("Virtual GPU for Intel GVT-g detected.\n"); + drm_info(&dev_priv->drm, "Virtual GPU for Intel GVT-g detected.\n"); out: pci_iounmap(pdev, shared_area); } +void intel_vgpu_register(struct drm_i915_private *i915) +{ + /* + * Notify a valid surface after modesetting, when running inside a VM. + */ + if (intel_vgpu_active(i915)) + intel_uncore_write(&i915->uncore, vgtif_reg(display_ready), + VGT_DRV_DISPLAY_READY); +} + +bool intel_vgpu_active(struct drm_i915_private *dev_priv) +{ + return dev_priv->vgpu.active; +} + bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv) { return dev_priv->vgpu.caps & VGT_CAPS_FULL_PPGTT; } +bool intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv) +{ + return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION; +} + +bool intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv) +{ + return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT; +} + struct _balloon_info_ { /* * There are up to 2 regions per mappable/unmappable graphic @@ -120,13 +148,15 @@ static struct _balloon_info_ bl_info; static void vgt_deballoon_space(struct i915_ggtt *ggtt, struct drm_mm_node *node) { + struct drm_i915_private *dev_priv = ggtt->vm.i915; if (!drm_mm_node_allocated(node)) return; - DRM_DEBUG_DRIVER("deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n", - node->start, - node->start + node->size, - node->size / 1024); + drm_dbg(&dev_priv->drm, + "deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n", + node->start, + node->start + node->size, + node->size / 1024); ggtt->vm.reserved -= node->size; drm_mm_remove_node(node); @@ -141,12 +171,13 @@ static void vgt_deballoon_space(struct i915_ggtt *ggtt, */ void intel_vgt_deballoon(struct i915_ggtt *ggtt) { + struct drm_i915_private *dev_priv = ggtt->vm.i915; int i; if (!intel_vgpu_active(ggtt->vm.i915)) return; - DRM_DEBUG("VGT deballoon.\n"); + drm_dbg(&dev_priv->drm, "VGT deballoon.\n"); for (i = 0; i < 4; i++) vgt_deballoon_space(ggtt, &bl_info.space[i]); @@ -156,13 +187,15 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt, struct drm_mm_node *node, unsigned long start, unsigned long end) { + struct drm_i915_private *dev_priv = ggtt->vm.i915; unsigned long size = end - start; int ret; if (start >= end) return -EINVAL; - DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n", + drm_info(&dev_priv->drm, + "balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n", start, end, size / 1024); ret = i915_gem_gtt_reserve(&ggtt->vm, node, size, start, I915_COLOR_UNEVICTABLE, @@ -219,7 +252,8 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt, */ int intel_vgt_balloon(struct i915_ggtt *ggtt) { - struct intel_uncore *uncore = &ggtt->vm.i915->uncore; + struct drm_i915_private *dev_priv = ggtt->vm.i915; + struct intel_uncore *uncore = &dev_priv->uncore; unsigned long ggtt_end = ggtt->vm.total; unsigned long mappable_base, mappable_size, mappable_end; @@ -241,16 +275,18 @@ int intel_vgt_balloon(struct i915_ggtt *ggtt) mappable_end = mappable_base + mappable_size; unmappable_end = unmappable_base + unmappable_size; - DRM_INFO("VGT ballooning configuration:\n"); - DRM_INFO("Mappable graphic memory: base 0x%lx size %ldKiB\n", + drm_info(&dev_priv->drm, "VGT ballooning configuration:\n"); + drm_info(&dev_priv->drm, + "Mappable graphic memory: base 0x%lx size %ldKiB\n", mappable_base, mappable_size / 1024); - DRM_INFO("Unmappable graphic memory: base 0x%lx size %ldKiB\n", + drm_info(&dev_priv->drm, + "Unmappable graphic memory: base 0x%lx size %ldKiB\n", unmappable_base, unmappable_size / 1024); if (mappable_end > ggtt->mappable_end || unmappable_base < ggtt->mappable_end || unmappable_end > ggtt_end) { - DRM_ERROR("Invalid ballooning configuration!\n"); + drm_err(&dev_priv->drm, "Invalid ballooning configuration!\n"); return -EINVAL; } @@ -287,7 +323,7 @@ int intel_vgt_balloon(struct i915_ggtt *ggtt) goto err_below_mappable; } - DRM_INFO("VGT balloon successfully\n"); + drm_info(&dev_priv->drm, "VGT balloon successfully\n"); return 0; err_below_mappable: @@ -297,6 +333,6 @@ err_upon_unmappable: err_upon_mappable: vgt_deballoon_space(ggtt, &bl_info.space[2]); err: - DRM_ERROR("VGT balloon fail\n"); + drm_err(&dev_priv->drm, "VGT balloon fail\n"); return ret; } diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h index 8b3663dad193..ffbb77d08048 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.h +++ b/drivers/gpu/drm/i915/i915_vgpu.h @@ -24,24 +24,17 @@ #ifndef _I915_VGPU_H_ #define _I915_VGPU_H_ -#include "i915_drv.h" -#include "i915_pvinfo.h" +#include <linux/types.h> -void i915_detect_vgpu(struct drm_i915_private *dev_priv); +struct drm_i915_private; +struct i915_ggtt; -bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv); - -static inline bool -intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv) -{ - return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION; -} - -static inline bool -intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv) -{ - return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT; -} +void intel_vgpu_detect(struct drm_i915_private *i915); +bool intel_vgpu_active(struct drm_i915_private *i915); +void intel_vgpu_register(struct drm_i915_private *i915); +bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *i915); +bool intel_vgpu_has_hwsp_emulation(struct drm_i915_private *i915); +bool intel_vgpu_has_huge_gtt(struct drm_i915_private *i915); int intel_vgt_balloon(struct i915_ggtt *ggtt); void intel_vgt_deballoon(struct i915_ggtt *ggtt); diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 4ff380770b32..2cd7a7e87c0a 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -158,16 +158,18 @@ vma_create(struct drm_i915_gem_object *obj, GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); + spin_lock(&obj->vma.lock); + if (i915_is_ggtt(vm)) { if (unlikely(overflows_type(vma->size, u32))) - goto err_vma; + goto err_unlock; vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, i915_gem_object_get_tiling(obj), i915_gem_object_get_stride(obj)); if (unlikely(vma->fence_size < vma->size || /* overflow */ vma->fence_size > vm->total)) - goto err_vma; + goto err_unlock; GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); @@ -179,8 +181,6 @@ vma_create(struct drm_i915_gem_object *obj, __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); } - spin_lock(&obj->vma.lock); - rb = NULL; p = &obj->vma.tree.rb_node; while (*p) { @@ -225,6 +225,8 @@ vma_create(struct drm_i915_gem_object *obj, return vma; +err_unlock: + spin_unlock(&obj->vma.lock); err_vma: i915_vma_free(vma); return ERR_PTR(-E2BIG); @@ -294,6 +296,7 @@ struct i915_vma_work { struct dma_fence_work base; struct i915_vma *vma; struct drm_i915_gem_object *pinned; + struct i915_sw_dma_fence_cb cb; enum i915_cache_level cache_level; unsigned int flags; }; @@ -339,6 +342,25 @@ struct i915_vma_work *i915_vma_work(void) return vw; } +int i915_vma_wait_for_bind(struct i915_vma *vma) +{ + int err = 0; + + if (rcu_access_pointer(vma->active.excl.fence)) { + struct dma_fence *fence; + + rcu_read_lock(); + fence = dma_fence_get_rcu_safe(&vma->active.excl.fence); + rcu_read_unlock(); + if (fence) { + err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT); + dma_fence_put(fence); + } + } + + return err; +} + /** * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. * @vma: VMA to map @@ -386,6 +408,8 @@ int i915_vma_bind(struct i915_vma *vma, trace_i915_vma_bind(vma, bind_flags); if (work && (bind_flags & ~vma_flags) & vma->vm->bind_async_flags) { + struct dma_fence *prev; + work->vma = vma; work->cache_level = cache_level; work->flags = bind_flags | I915_VMA_ALLOC; @@ -399,8 +423,14 @@ int i915_vma_bind(struct i915_vma *vma, * part of the obj->resv->excl_fence as it only affects * execution and not content or object's backing store lifetime. */ - GEM_BUG_ON(i915_active_has_exclusive(&vma->active)); - i915_active_set_exclusive(&vma->active, &work->base.dma); + prev = i915_active_set_exclusive(&vma->active, &work->base.dma); + if (prev) { + __i915_sw_fence_await_dma_fence(&work->base.chain, + prev, + &work->cb); + dma_fence_put(prev); + } + work->base.dma.error = 0; /* enable the queue_work() */ if (vma->obj) { @@ -408,7 +438,6 @@ int i915_vma_bind(struct i915_vma *vma, work->pinned = vma->obj; } } else { - GEM_BUG_ON((bind_flags & ~vma_flags) & vma->vm->bind_async_flags); ret = vma->ops->bind_vma(vma, cache_level, bind_flags); if (ret) return ret; @@ -614,7 +643,6 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) u64 start, end; int ret; - GEM_BUG_ON(i915_vma_is_closed(vma)); GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); @@ -892,6 +920,11 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) if (err) goto err_fence; + if (unlikely(i915_vma_is_closed(vma))) { + err = -ENOENT; + goto err_unlock; + } + bound = atomic_read(&vma->flags); if (unlikely(bound & I915_VMA_ERROR)) { err = -ENOMEM; @@ -977,8 +1010,14 @@ int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags) do { err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL); - if (err != -ENOSPC) + if (err != -ENOSPC) { + if (!err) { + err = i915_vma_wait_for_bind(vma); + if (err) + i915_vma_unpin(vma); + } return err; + } /* Unlike i915_vma_pin, we don't take no for an answer! */ flush_idle_contexts(vm->gt); @@ -1060,6 +1099,7 @@ void i915_vma_release(struct kref *ref) void i915_vma_parked(struct intel_gt *gt) { struct i915_vma *vma, *next; + LIST_HEAD(closed); spin_lock_irq(>->closed_lock); list_for_each_entry_safe(vma, next, >->closed_vma, closed_link) { @@ -1071,28 +1111,26 @@ void i915_vma_parked(struct intel_gt *gt) if (!kref_get_unless_zero(&obj->base.refcount)) continue; - if (i915_vm_tryopen(vm)) { - list_del_init(&vma->closed_link); - } else { + if (!i915_vm_tryopen(vm)) { i915_gem_object_put(obj); - obj = NULL; + continue; } - spin_unlock_irq(>->closed_lock); + list_move(&vma->closed_link, &closed); + } + spin_unlock_irq(>->closed_lock); - if (obj) { - __i915_vma_put(vma); - i915_gem_object_put(obj); - } + /* As the GT is held idle, no vma can be reopened as we destroy them */ + list_for_each_entry_safe(vma, next, &closed, closed_link) { + struct drm_i915_gem_object *obj = vma->obj; + struct i915_address_space *vm = vma->vm; - i915_vm_close(vm); + INIT_LIST_HEAD(&vma->closed_link); + __i915_vma_put(vma); - /* Restart after dropping lock */ - spin_lock_irq(>->closed_lock); - next = list_first_entry(>->closed_vma, - typeof(*next), closed_link); + i915_gem_object_put(obj); + i915_vm_close(vm); } - spin_unlock_irq(>->closed_lock); } static void __i915_vma_iounmap(struct i915_vma *vma) @@ -1136,7 +1174,7 @@ int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq) GEM_BUG_ON(!i915_vma_is_pinned(vma)); /* Wait for the vma to be bound before we start! */ - err = i915_request_await_active(rq, &vma->active); + err = i915_request_await_active(rq, &vma->active, 0); if (err) return err; @@ -1190,18 +1228,6 @@ int __i915_vma_unbind(struct i915_vma *vma) lockdep_assert_held(&vma->vm->mutex); - /* - * First wait upon any activity as retiring the request may - * have side-effects such as unpinning or even unbinding this vma. - * - * XXX Actually waiting under the vm->mutex is a hinderance and - * should be pipelined wherever possible. In cases where that is - * unavoidable, we should lift the wait to before the mutex. - */ - ret = i915_vma_sync(vma); - if (ret) - return ret; - if (i915_vma_is_pinned(vma)) { vma_print_allocator(vma, "is pinned"); return -EAGAIN; @@ -1228,9 +1254,15 @@ int __i915_vma_unbind(struct i915_vma *vma) * before the unbind, other due to non-strict nature of those * indirect writes they may end up referencing the GGTT PTE * after the unbind. + * + * Note that we may be concurrently poking at the GGTT_WRITE + * bit from set-domain, as we mark all GGTT vma associated + * with an object. We know this is for another vma, as we + * are currently unbinding this one -- so if this vma will be + * reused, it will be refaulted and have its dirty bit set + * before the next write. */ i915_vma_flush_writes(vma); - GEM_BUG_ON(i915_vma_has_ggtt_write(vma)); /* release the fence reg _after_ flushing */ ret = i915_vma_revoke_fence(vma); @@ -1250,7 +1282,8 @@ int __i915_vma_unbind(struct i915_vma *vma) trace_i915_vma_unbind(vma); vma->ops->unbind_vma(vma); } - atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR), &vma->flags); + atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE), + &vma->flags); i915_vma_detach(vma); vma_unbind_pages(vma); @@ -1268,20 +1301,30 @@ int i915_vma_unbind(struct i915_vma *vma) if (!drm_mm_node_allocated(&vma->node)) return 0; + /* Optimistic wait before taking the mutex */ + err = i915_vma_sync(vma); + if (err) + goto out_rpm; + + if (i915_vma_is_pinned(vma)) { + vma_print_allocator(vma, "is pinned"); + return -EAGAIN; + } + if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) /* XXX not always required: nop_clear_range */ wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); err = mutex_lock_interruptible(&vm->mutex); if (err) - return err; + goto out_rpm; err = __i915_vma_unbind(vma); mutex_unlock(&vm->mutex); +out_rpm: if (wakeref) intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); - return err; } diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 02b31a62951e..e1ced1df13e1 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -375,6 +375,8 @@ struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma); void i915_vma_make_shrinkable(struct i915_vma *vma); void i915_vma_make_purgeable(struct i915_vma *vma); +int i915_vma_wait_for_bind(struct i915_vma *vma); + static inline int i915_vma_sync(struct i915_vma *vma) { /* Wait for the asynchronous bindings and pending GPU reads */ diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h index e0942efd5236..63831cdb7402 100644 --- a/drivers/gpu/drm/i915/i915_vma_types.h +++ b/drivers/gpu/drm/i915/i915_vma_types.h @@ -273,21 +273,10 @@ struct i915_vma { struct rb_node obj_node; struct hlist_node obj_hash; - /** This vma's place in the execbuf reservation list */ - struct list_head exec_link; - struct list_head reloc_link; - /** This vma's place in the eviction list */ struct list_head evict_link; struct list_head closed_link; - - /** - * Used for performing relocations during execbuffer insertion. - */ - unsigned int *exec_flags; - struct hlist_node exec_node; - u32 exec_handle; }; #endif diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 6670a0763be2..d7fe12734db8 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -23,7 +23,9 @@ */ #include <drm/drm_print.h> +#include <drm/i915_pciids.h> +#include "display/intel_cdclk.h" #include "intel_device_info.h" #include "i915_drv.h" @@ -132,6 +134,7 @@ void intel_device_info_print_runtime(const struct intel_runtime_info *info, { sseu_dump(&info->sseu, p); + drm_printf(p, "rawclk rate: %u kHz\n", info->rawclk_freq); drm_printf(p, "CS timestamp frequency: %u kHz\n", info->cs_timestamp_frequency_khz); } @@ -743,7 +746,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv) * hclks." (through the “Clocking Configuration” * (“CLKCFG”) MCHBAR register) */ - return dev_priv->rawclk_freq / 16; + return RUNTIME_INFO(dev_priv)->rawclk_freq / 16; } else if (INTEL_GEN(dev_priv) <= 8) { /* PRMs say: * @@ -974,10 +977,11 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED || (HAS_PCH_CPT(dev_priv) && !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { - DRM_INFO("Display fused off, disabling\n"); + drm_info(&dev_priv->drm, + "Display fused off, disabling\n"); info->pipe_mask = 0; } else if (fuse_strap & IVB_PIPE_C_DISABLE) { - DRM_INFO("PipeC fused off\n"); + drm_info(&dev_priv->drm, "PipeC fused off\n"); info->pipe_mask &= ~BIT(PIPE_C); } } else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) { @@ -1000,8 +1004,9 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) * in the mask. */ if (enabled_mask == 0 || !is_power_of_2(enabled_mask + 1)) - DRM_ERROR("invalid pipe fuse configuration: enabled_mask=0x%x\n", - enabled_mask); + drm_err(&dev_priv->drm, + "invalid pipe fuse configuration: enabled_mask=0x%x\n", + enabled_mask); else info->pipe_mask = enabled_mask; @@ -1036,12 +1041,26 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) gen12_sseu_info_init(dev_priv); if (IS_GEN(dev_priv, 6) && intel_vtd_active()) { - DRM_INFO("Disabling ppGTT for VT-d support\n"); + drm_info(&dev_priv->drm, + "Disabling ppGTT for VT-d support\n"); info->ppgtt_type = INTEL_PPGTT_NONE; } + runtime->rawclk_freq = intel_read_rawclk(dev_priv); + drm_dbg(&dev_priv->drm, "rawclk rate: %d kHz\n", runtime->rawclk_freq); + /* Initialize command stream timestamp frequency */ - runtime->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv); + runtime->cs_timestamp_frequency_khz = + read_timestamp_frequency(dev_priv); + if (runtime->cs_timestamp_frequency_khz) { + runtime->cs_timestamp_period_ns = + div_u64(1e6, runtime->cs_timestamp_frequency_khz); + drm_dbg(&dev_priv->drm, + "CS timestamp wraparound in %lldms\n", + div_u64(mul_u32_u32(runtime->cs_timestamp_period_ns, + S32_MAX), + USEC_PER_SEC)); + } } void intel_driver_caps_print(const struct intel_driver_caps *caps, @@ -1084,7 +1103,7 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) if (!(BIT(i) & vdbox_mask)) { info->engine_mask &= ~BIT(_VCS(i)); - DRM_DEBUG_DRIVER("vcs%u fused off\n", i); + drm_dbg(&dev_priv->drm, "vcs%u fused off\n", i); continue; } @@ -1096,8 +1115,8 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) if (INTEL_GEN(dev_priv) >= 12 || logical_vdbox++ % 2 == 0) RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i); } - DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n", - vdbox_mask, VDBOX_MASK(dev_priv)); + drm_dbg(&dev_priv->drm, "vdbox enable: %04x, instances: %04lx\n", + vdbox_mask, VDBOX_MASK(dev_priv)); GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv)); for (i = 0; i < I915_MAX_VECS; i++) { @@ -1108,10 +1127,10 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) if (!(BIT(i) & vebox_mask)) { info->engine_mask &= ~BIT(_VECS(i)); - DRM_DEBUG_DRIVER("vecs%u fused off\n", i); + drm_dbg(&dev_priv->drm, "vecs%u fused off\n", i); } } - DRM_DEBUG_DRIVER("vebox enable: %04x, instances: %04lx\n", - vebox_mask, VEBOX_MASK(dev_priv)); + drm_dbg(&dev_priv->drm, "vebox enable: %04x, instances: %04lx\n", + vebox_mask, VEBOX_MASK(dev_priv)); GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv)); } diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 2725cb7fc169..1ecb9df2de91 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -180,6 +180,7 @@ struct intel_device_info { } display; u16 ddb_size; /* in blocks */ + u8 num_supported_dbuf_slices; /* number of DBuf slices */ /* Register offsets for the various display pipes and transcoders */ int pipe_offsets[I915_MAX_TRANSCODERS]; @@ -215,7 +216,10 @@ struct intel_runtime_info { /* Slice/subslice/EU info */ struct sseu_dev_info sseu; + u32 rawclk_freq; + u32 cs_timestamp_frequency_khz; + u32 cs_timestamp_period_ns; /* Media engine access to SFC per instance */ u8 vdbox_sfc_access; diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c new file mode 100644 index 000000000000..6b922efb1d7c --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dram.c @@ -0,0 +1,500 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include "i915_drv.h" +#include "intel_dram.h" + +struct dram_dimm_info { + u8 size, width, ranks; +}; + +struct dram_channel_info { + struct dram_dimm_info dimm_l, dimm_s; + u8 ranks; + bool is_16gb_dimm; +}; + +#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type + +static const char *intel_dram_type_str(enum intel_dram_type type) +{ + static const char * const str[] = { + DRAM_TYPE_STR(UNKNOWN), + DRAM_TYPE_STR(DDR3), + DRAM_TYPE_STR(DDR4), + DRAM_TYPE_STR(LPDDR3), + DRAM_TYPE_STR(LPDDR4), + }; + + if (type >= ARRAY_SIZE(str)) + type = INTEL_DRAM_UNKNOWN; + + return str[type]; +} + +#undef DRAM_TYPE_STR + +static int intel_dimm_num_devices(const struct dram_dimm_info *dimm) +{ + return dimm->ranks * 64 / (dimm->width ?: 1); +} + +/* Returns total GB for the whole DIMM */ +static int skl_get_dimm_size(u16 val) +{ + return val & SKL_DRAM_SIZE_MASK; +} + +static int skl_get_dimm_width(u16 val) +{ + if (skl_get_dimm_size(val) == 0) + return 0; + + switch (val & SKL_DRAM_WIDTH_MASK) { + case SKL_DRAM_WIDTH_X8: + case SKL_DRAM_WIDTH_X16: + case SKL_DRAM_WIDTH_X32: + val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT; + return 8 << val; + default: + MISSING_CASE(val); + return 0; + } +} + +static int skl_get_dimm_ranks(u16 val) +{ + if (skl_get_dimm_size(val) == 0) + return 0; + + val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT; + + return val + 1; +} + +/* Returns total GB for the whole DIMM */ +static int cnl_get_dimm_size(u16 val) +{ + return (val & CNL_DRAM_SIZE_MASK) / 2; +} + +static int cnl_get_dimm_width(u16 val) +{ + if (cnl_get_dimm_size(val) == 0) + return 0; + + switch (val & CNL_DRAM_WIDTH_MASK) { + case CNL_DRAM_WIDTH_X8: + case CNL_DRAM_WIDTH_X16: + case CNL_DRAM_WIDTH_X32: + val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT; + return 8 << val; + default: + MISSING_CASE(val); + return 0; + } +} + +static int cnl_get_dimm_ranks(u16 val) +{ + if (cnl_get_dimm_size(val) == 0) + return 0; + + val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT; + + return val + 1; +} + +static bool +skl_is_16gb_dimm(const struct dram_dimm_info *dimm) +{ + /* Convert total GB to Gb per DRAM device */ + return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16; +} + +static void +skl_dram_get_dimm_info(struct drm_i915_private *i915, + struct dram_dimm_info *dimm, + int channel, char dimm_name, u16 val) +{ + if (INTEL_GEN(i915) >= 10) { + dimm->size = cnl_get_dimm_size(val); + dimm->width = cnl_get_dimm_width(val); + dimm->ranks = cnl_get_dimm_ranks(val); + } else { + dimm->size = skl_get_dimm_size(val); + dimm->width = skl_get_dimm_width(val); + dimm->ranks = skl_get_dimm_ranks(val); + } + + drm_dbg_kms(&i915->drm, + "CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n", + channel, dimm_name, dimm->size, dimm->width, dimm->ranks, + yesno(skl_is_16gb_dimm(dimm))); +} + +static int +skl_dram_get_channel_info(struct drm_i915_private *i915, + struct dram_channel_info *ch, + int channel, u32 val) +{ + skl_dram_get_dimm_info(i915, &ch->dimm_l, + channel, 'L', val & 0xffff); + skl_dram_get_dimm_info(i915, &ch->dimm_s, + channel, 'S', val >> 16); + + if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) { + drm_dbg_kms(&i915->drm, "CH%u not populated\n", channel); + return -EINVAL; + } + + if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2) + ch->ranks = 2; + else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1) + ch->ranks = 2; + else + ch->ranks = 1; + + ch->is_16gb_dimm = skl_is_16gb_dimm(&ch->dimm_l) || + skl_is_16gb_dimm(&ch->dimm_s); + + drm_dbg_kms(&i915->drm, "CH%u ranks: %u, 16Gb DIMMs: %s\n", + channel, ch->ranks, yesno(ch->is_16gb_dimm)); + + return 0; +} + +static bool +intel_is_dram_symmetric(const struct dram_channel_info *ch0, + const struct dram_channel_info *ch1) +{ + return !memcmp(ch0, ch1, sizeof(*ch0)) && + (ch0->dimm_s.size == 0 || + !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l))); +} + +static int +skl_dram_get_channels_info(struct drm_i915_private *i915) +{ + struct dram_info *dram_info = &i915->dram_info; + struct dram_channel_info ch0 = {}, ch1 = {}; + u32 val; + int ret; + + val = intel_uncore_read(&i915->uncore, + SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN); + ret = skl_dram_get_channel_info(i915, &ch0, 0, val); + if (ret == 0) + dram_info->num_channels++; + + val = intel_uncore_read(&i915->uncore, + SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN); + ret = skl_dram_get_channel_info(i915, &ch1, 1, val); + if (ret == 0) + dram_info->num_channels++; + + if (dram_info->num_channels == 0) { + drm_info(&i915->drm, "Number of memory channels is zero\n"); + return -EINVAL; + } + + /* + * If any of the channel is single rank channel, worst case output + * will be same as if single rank memory, so consider single rank + * memory. + */ + if (ch0.ranks == 1 || ch1.ranks == 1) + dram_info->ranks = 1; + else + dram_info->ranks = max(ch0.ranks, ch1.ranks); + + if (dram_info->ranks == 0) { + drm_info(&i915->drm, "couldn't get memory rank information\n"); + return -EINVAL; + } + + dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm; + + dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1); + + drm_dbg_kms(&i915->drm, "Memory configuration is symmetric? %s\n", + yesno(dram_info->symmetric_memory)); + + return 0; +} + +static enum intel_dram_type +skl_get_dram_type(struct drm_i915_private *i915) +{ + u32 val; + + val = intel_uncore_read(&i915->uncore, + SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN); + + switch (val & SKL_DRAM_DDR_TYPE_MASK) { + case SKL_DRAM_DDR_TYPE_DDR3: + return INTEL_DRAM_DDR3; + case SKL_DRAM_DDR_TYPE_DDR4: + return INTEL_DRAM_DDR4; + case SKL_DRAM_DDR_TYPE_LPDDR3: + return INTEL_DRAM_LPDDR3; + case SKL_DRAM_DDR_TYPE_LPDDR4: + return INTEL_DRAM_LPDDR4; + default: + MISSING_CASE(val); + return INTEL_DRAM_UNKNOWN; + } +} + +static int +skl_get_dram_info(struct drm_i915_private *i915) +{ + struct dram_info *dram_info = &i915->dram_info; + u32 mem_freq_khz, val; + int ret; + + dram_info->type = skl_get_dram_type(i915); + drm_dbg_kms(&i915->drm, "DRAM type: %s\n", + intel_dram_type_str(dram_info->type)); + + ret = skl_dram_get_channels_info(i915); + if (ret) + return ret; + + val = intel_uncore_read(&i915->uncore, + SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU); + mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) * + SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000); + + dram_info->bandwidth_kbps = dram_info->num_channels * + mem_freq_khz * 8; + + if (dram_info->bandwidth_kbps == 0) { + drm_info(&i915->drm, + "Couldn't get system memory bandwidth\n"); + return -EINVAL; + } + + dram_info->valid = true; + return 0; +} + +/* Returns Gb per DRAM device */ +static int bxt_get_dimm_size(u32 val) +{ + switch (val & BXT_DRAM_SIZE_MASK) { + case BXT_DRAM_SIZE_4GBIT: + return 4; + case BXT_DRAM_SIZE_6GBIT: + return 6; + case BXT_DRAM_SIZE_8GBIT: + return 8; + case BXT_DRAM_SIZE_12GBIT: + return 12; + case BXT_DRAM_SIZE_16GBIT: + return 16; + default: + MISSING_CASE(val); + return 0; + } +} + +static int bxt_get_dimm_width(u32 val) +{ + if (!bxt_get_dimm_size(val)) + return 0; + + val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT; + + return 8 << val; +} + +static int bxt_get_dimm_ranks(u32 val) +{ + if (!bxt_get_dimm_size(val)) + return 0; + + switch (val & BXT_DRAM_RANK_MASK) { + case BXT_DRAM_RANK_SINGLE: + return 1; + case BXT_DRAM_RANK_DUAL: + return 2; + default: + MISSING_CASE(val); + return 0; + } +} + +static enum intel_dram_type bxt_get_dimm_type(u32 val) +{ + if (!bxt_get_dimm_size(val)) + return INTEL_DRAM_UNKNOWN; + + switch (val & BXT_DRAM_TYPE_MASK) { + case BXT_DRAM_TYPE_DDR3: + return INTEL_DRAM_DDR3; + case BXT_DRAM_TYPE_LPDDR3: + return INTEL_DRAM_LPDDR3; + case BXT_DRAM_TYPE_DDR4: + return INTEL_DRAM_DDR4; + case BXT_DRAM_TYPE_LPDDR4: + return INTEL_DRAM_LPDDR4; + default: + MISSING_CASE(val); + return INTEL_DRAM_UNKNOWN; + } +} + +static void bxt_get_dimm_info(struct dram_dimm_info *dimm, u32 val) +{ + dimm->width = bxt_get_dimm_width(val); + dimm->ranks = bxt_get_dimm_ranks(val); + + /* + * Size in register is Gb per DRAM device. Convert to total + * GB to match the way we report this for non-LP platforms. + */ + dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8; +} + +static int bxt_get_dram_info(struct drm_i915_private *i915) +{ + struct dram_info *dram_info = &i915->dram_info; + u32 dram_channels; + u32 mem_freq_khz, val; + u8 num_active_channels; + int i; + + val = intel_uncore_read(&i915->uncore, BXT_P_CR_MC_BIOS_REQ_0_0_0); + mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) * + BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000); + + dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK; + num_active_channels = hweight32(dram_channels); + + /* Each active bit represents 4-byte channel */ + dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4); + + if (dram_info->bandwidth_kbps == 0) { + drm_info(&i915->drm, + "Couldn't get system memory bandwidth\n"); + return -EINVAL; + } + + /* + * Now read each DUNIT8/9/10/11 to check the rank of each dimms. + */ + for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) { + struct dram_dimm_info dimm; + enum intel_dram_type type; + + val = intel_uncore_read(&i915->uncore, BXT_D_CR_DRP0_DUNIT(i)); + if (val == 0xFFFFFFFF) + continue; + + dram_info->num_channels++; + + bxt_get_dimm_info(&dimm, val); + type = bxt_get_dimm_type(val); + + drm_WARN_ON(&i915->drm, type != INTEL_DRAM_UNKNOWN && + dram_info->type != INTEL_DRAM_UNKNOWN && + dram_info->type != type); + + drm_dbg_kms(&i915->drm, + "CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n", + i - BXT_D_CR_DRP0_DUNIT_START, + dimm.size, dimm.width, dimm.ranks, + intel_dram_type_str(type)); + + /* + * If any of the channel is single rank channel, + * worst case output will be same as if single rank + * memory, so consider single rank memory. + */ + if (dram_info->ranks == 0) + dram_info->ranks = dimm.ranks; + else if (dimm.ranks == 1) + dram_info->ranks = 1; + + if (type != INTEL_DRAM_UNKNOWN) + dram_info->type = type; + } + + if (dram_info->type == INTEL_DRAM_UNKNOWN || dram_info->ranks == 0) { + drm_info(&i915->drm, "couldn't get memory information\n"); + return -EINVAL; + } + + dram_info->valid = true; + + return 0; +} + +void intel_dram_detect(struct drm_i915_private *i915) +{ + struct dram_info *dram_info = &i915->dram_info; + int ret; + + /* + * Assume 16Gb DIMMs are present until proven otherwise. + * This is only used for the level 0 watermark latency + * w/a which does not apply to bxt/glk. + */ + dram_info->is_16gb_dimm = !IS_GEN9_LP(i915); + + if (INTEL_GEN(i915) < 9 || !HAS_DISPLAY(i915)) + return; + + if (IS_GEN9_LP(i915)) + ret = bxt_get_dram_info(i915); + else + ret = skl_get_dram_info(i915); + if (ret) + return; + + drm_dbg_kms(&i915->drm, "DRAM bandwidth: %u kBps, channels: %u\n", + dram_info->bandwidth_kbps, dram_info->num_channels); + + drm_dbg_kms(&i915->drm, "DRAM ranks: %u, 16Gb DIMMs: %s\n", + dram_info->ranks, yesno(dram_info->is_16gb_dimm)); +} + +static u32 gen9_edram_size_mb(struct drm_i915_private *i915, u32 cap) +{ + static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 }; + static const u8 sets[4] = { 1, 1, 2, 2 }; + + return EDRAM_NUM_BANKS(cap) * + ways[EDRAM_WAYS_IDX(cap)] * + sets[EDRAM_SETS_IDX(cap)]; +} + +void intel_dram_edram_detect(struct drm_i915_private *i915) +{ + u32 edram_cap = 0; + + if (!(IS_HASWELL(i915) || IS_BROADWELL(i915) || INTEL_GEN(i915) >= 9)) + return; + + edram_cap = __raw_uncore_read32(&i915->uncore, HSW_EDRAM_CAP); + + /* NB: We can't write IDICR yet because we don't have gt funcs set up */ + + if (!(edram_cap & EDRAM_ENABLED)) + return; + + /* + * The needed capability bits for size calculation are not there with + * pre gen9 so return 128MB always. + */ + if (INTEL_GEN(i915) < 9) + i915->edram_size_mb = 128; + else + i915->edram_size_mb = gen9_edram_size_mb(i915, edram_cap); + + dev_info(i915->drm.dev, + "Found %uMB of eDRAM\n", i915->edram_size_mb); +} diff --git a/drivers/gpu/drm/i915/intel_dram.h b/drivers/gpu/drm/i915/intel_dram.h new file mode 100644 index 000000000000..4ba13c13162c --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dram.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2020 Intel Corporation + */ + +#ifndef __INTEL_DRAM_H__ +#define __INTEL_DRAM_H__ + +struct drm_i915_private; + +void intel_dram_edram_detect(struct drm_i915_private *i915); +void intel_dram_detect(struct drm_i915_private *i915); + +#endif /* __INTEL_DRAM_H__ */ diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index 2b6c016387c2..21b91313cc5d 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -22,6 +22,7 @@ */ #include "i915_drv.h" +#include "i915_vgpu.h" #include "intel_gvt.h" /** @@ -67,12 +68,13 @@ void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv) return; if (intel_vgpu_active(dev_priv)) { - DRM_INFO("GVT-g is disabled for guest\n"); + drm_info(&dev_priv->drm, "GVT-g is disabled for guest\n"); goto bail; } if (!is_supported_device(dev_priv)) { - DRM_INFO("Unsupported device. GVT-g is disabled\n"); + drm_info(&dev_priv->drm, + "Unsupported device. GVT-g is disabled\n"); goto bail; } @@ -99,18 +101,20 @@ int intel_gvt_init(struct drm_i915_private *dev_priv) return -ENODEV; if (!i915_modparams.enable_gvt) { - DRM_DEBUG_DRIVER("GVT-g is disabled by kernel params\n"); + drm_dbg(&dev_priv->drm, + "GVT-g is disabled by kernel params\n"); return 0; } - if (USES_GUC_SUBMISSION(dev_priv)) { - DRM_ERROR("i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n"); + if (intel_uc_wants_guc_submission(&dev_priv->gt.uc)) { + drm_err(&dev_priv->drm, + "i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n"); return -EIO; } ret = intel_gvt_init_device(dev_priv); if (ret) { - DRM_DEBUG_DRIVER("Fail to init GVT device\n"); + drm_dbg(&dev_priv->drm, "Fail to init GVT device\n"); goto bail; } @@ -121,6 +125,11 @@ bail: return 0; } +static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) +{ + return dev_priv->gvt; +} + /** * intel_gvt_driver_remove - cleanup GVT components when i915 driver is * unbinding diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c index d0d038b3cd79..6b5e9d88646d 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.c +++ b/drivers/gpu/drm/i915/intel_memory_region.c @@ -265,7 +265,9 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915) if (IS_ERR(mem)) { err = PTR_ERR(mem); - DRM_ERROR("Failed to setup region(%d) type=%d\n", err, type); + drm_err(&i915->drm, + "Failed to setup region(%d) type=%d\n", + err, type); goto out_cleanup; } diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c index 4ed60e1f01db..20ab9a5023b5 100644 --- a/drivers/gpu/drm/i915/intel_pch.c +++ b/drivers/gpu/drm/i915/intel_pch.c @@ -13,91 +13,106 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) switch (id) { case INTEL_PCH_IBX_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Ibex Peak PCH\n"); - WARN_ON(!IS_GEN(dev_priv, 5)); + drm_WARN_ON(&dev_priv->drm, !IS_GEN(dev_priv, 5)); return PCH_IBX; case INTEL_PCH_CPT_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found CougarPoint PCH\n"); - WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv)); + drm_WARN_ON(&dev_priv->drm, + !IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv)); return PCH_CPT; case INTEL_PCH_PPT_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found PantherPoint PCH\n"); - WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv)); + drm_WARN_ON(&dev_priv->drm, + !IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv)); /* PantherPoint is CPT compatible */ return PCH_CPT; case INTEL_PCH_LPT_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found LynxPoint PCH\n"); - WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); - WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)); + drm_WARN_ON(&dev_priv->drm, + !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); + drm_WARN_ON(&dev_priv->drm, + IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)); return PCH_LPT; case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found LynxPoint LP PCH\n"); - WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); - WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv)); + drm_WARN_ON(&dev_priv->drm, + !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); + drm_WARN_ON(&dev_priv->drm, + !IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv)); return PCH_LPT; case INTEL_PCH_WPT_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint PCH\n"); - WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); - WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)); + drm_WARN_ON(&dev_priv->drm, + !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); + drm_WARN_ON(&dev_priv->drm, + IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)); /* WildcatPoint is LPT compatible */ return PCH_LPT; case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint LP PCH\n"); - WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); - WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv)); + drm_WARN_ON(&dev_priv->drm, + !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); + drm_WARN_ON(&dev_priv->drm, + !IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv)); /* WildcatPoint is LPT compatible */ return PCH_LPT; case INTEL_PCH_SPT_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint PCH\n"); - WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)); + drm_WARN_ON(&dev_priv->drm, + !IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)); return PCH_SPT; case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint LP PCH\n"); - WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) && - !IS_COFFEELAKE(dev_priv)); + drm_WARN_ON(&dev_priv->drm, + !IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) && + !IS_COFFEELAKE(dev_priv)); return PCH_SPT; case INTEL_PCH_KBP_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Kaby Lake PCH (KBP)\n"); - WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) && - !IS_COFFEELAKE(dev_priv)); + drm_WARN_ON(&dev_priv->drm, + !IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) && + !IS_COFFEELAKE(dev_priv)); /* KBP is SPT compatible */ return PCH_SPT; case INTEL_PCH_CNP_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Cannon Lake PCH (CNP)\n"); - WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv)); + drm_WARN_ON(&dev_priv->drm, !IS_CANNONLAKE(dev_priv) && + !IS_COFFEELAKE(dev_priv)); return PCH_CNP; case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Cannon Lake LP PCH (CNP-LP)\n"); - WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv)); + drm_WARN_ON(&dev_priv->drm, !IS_CANNONLAKE(dev_priv) && + !IS_COFFEELAKE(dev_priv)); return PCH_CNP; case INTEL_PCH_CMP_DEVICE_ID_TYPE: case INTEL_PCH_CMP2_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Comet Lake PCH (CMP)\n"); - WARN_ON(!IS_COFFEELAKE(dev_priv)); + drm_WARN_ON(&dev_priv->drm, !IS_COFFEELAKE(dev_priv)); /* CometPoint is CNP Compatible */ return PCH_CNP; case INTEL_PCH_CMP_V_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Comet Lake V PCH (CMP-V)\n"); - WARN_ON(!IS_COFFEELAKE(dev_priv)); + drm_WARN_ON(&dev_priv->drm, !IS_COFFEELAKE(dev_priv)); /* Comet Lake V PCH is based on KBP, which is SPT compatible */ return PCH_SPT; case INTEL_PCH_ICP_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Ice Lake PCH\n"); - WARN_ON(!IS_ICELAKE(dev_priv)); + drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); return PCH_ICP; case INTEL_PCH_MCC_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Mule Creek Canyon PCH\n"); - WARN_ON(!IS_ELKHARTLAKE(dev_priv)); + drm_WARN_ON(&dev_priv->drm, !IS_ELKHARTLAKE(dev_priv)); return PCH_MCC; case INTEL_PCH_TGP_DEVICE_ID_TYPE: case INTEL_PCH_TGP2_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Tiger Lake LP PCH\n"); - WARN_ON(!IS_TIGERLAKE(dev_priv)); + drm_WARN_ON(&dev_priv->drm, !IS_TIGERLAKE(dev_priv)); return PCH_TGP; case INTEL_PCH_JSP_DEVICE_ID_TYPE: case INTEL_PCH_JSP2_DEVICE_ID_TYPE: drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n"); - WARN_ON(!IS_ELKHARTLAKE(dev_priv)); + drm_WARN_ON(&dev_priv->drm, !IS_ELKHARTLAKE(dev_priv)); return PCH_JSP; default: return PCH_NONE; @@ -188,7 +203,8 @@ void intel_detect_pch(struct drm_i915_private *dev_priv) pch_type = intel_pch_type(dev_priv, id); /* Sanity check virtual PCH id */ - if (WARN_ON(id && pch_type == PCH_NONE)) + if (drm_WARN_ON(&dev_priv->drm, + id && pch_type == PCH_NONE)) id = 0; dev_priv->pch_type = pch_type; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index bd2d30ecc030..a52986a9e7a6 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -40,12 +40,36 @@ #include "gt/intel_llc.h" #include "i915_drv.h" +#include "i915_fixed.h" #include "i915_irq.h" #include "i915_trace.h" #include "intel_pm.h" #include "intel_sideband.h" #include "../../../platform/x86/intel_ips.h" +/* Stores plane specific WM parameters */ +struct skl_wm_params { + bool x_tiled, y_tiled; + bool rc_surface; + bool is_planar; + u32 width; + u8 cpp; + u32 plane_pixel_rate; + u32 y_min_scanlines; + u32 plane_bytes_per_line; + uint_fixed_16_16_t plane_blocks_per_line; + uint_fixed_16_16_t y_tile_minimum; + u32 linetime_us; + u32 dbuf_block_size; +}; + +/* used in computing the new watermarks state */ +struct intel_wm_config { + unsigned int num_pipes_active; + bool sprites_enabled; + bool sprites_scaled; +}; + static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) { if (HAS_LLC(dev_priv)) { @@ -128,16 +152,6 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv) */ I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | PWM1_GATING_DIS | PWM2_GATING_DIS); - - /* WaDDIIOTimeout:glk */ - if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1)) { - u32 val = I915_READ(CHICKEN_MISC_2); - val &= ~(GLK_CL0_PWR_DOWN | - GLK_CL1_PWR_DOWN | - GLK_CL2_PWR_DOWN); - I915_WRITE(CHICKEN_MISC_2, val); - } - } static void pnv_get_mem_freq(struct drm_i915_private *dev_priv) @@ -469,9 +483,9 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state) struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; enum pipe pipe = crtc->pipe; int sprite0_start, sprite1_start; + u32 dsparb, dsparb2, dsparb3; switch (pipe) { - u32 dsparb, dsparb2, dsparb3; case PIPE_A: dsparb = I915_READ(DSPARB); dsparb2 = I915_READ(DSPARB2); @@ -1969,6 +1983,7 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state, const struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; int sprite0_start, sprite1_start, fifo_size; + u32 dsparb, dsparb2, dsparb3; if (!crtc_state->fifo_changed) return; @@ -1977,8 +1992,8 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state, sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start; fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start; - WARN_ON(fifo_state->plane[PLANE_CURSOR] != 63); - WARN_ON(fifo_size != 511); + drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63); + drm_WARN_ON(&dev_priv->drm, fifo_size != 511); trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size); @@ -1994,7 +2009,6 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state, spin_lock(&uncore->lock); switch (crtc->pipe) { - u32 dsparb, dsparb2, dsparb3; case PIPE_A: dsparb = intel_uncore_read_fw(uncore, DSPARB); dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); @@ -2776,7 +2790,7 @@ static bool ilk_validate_wm_level(int level, } static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, - const struct intel_crtc *intel_crtc, + const struct intel_crtc *crtc, int level, struct intel_crtc_state *crtc_state, const struct intel_plane_state *pristate, @@ -2810,34 +2824,6 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, result->enable = true; } -static u32 -hsw_compute_linetime_wm(const struct intel_crtc_state *crtc_state) -{ - const struct intel_atomic_state *intel_state = - to_intel_atomic_state(crtc_state->uapi.state); - const struct drm_display_mode *adjusted_mode = - &crtc_state->hw.adjusted_mode; - u32 linetime, ips_linetime; - - if (!crtc_state->hw.active) - return 0; - if (WARN_ON(adjusted_mode->crtc_clock == 0)) - return 0; - if (WARN_ON(intel_state->cdclk.logical.cdclk == 0)) - return 0; - - /* The WM are computed with base on how long it takes to fill a single - * row at the given clock rate, multiplied by 8. - * */ - linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, - adjusted_mode->crtc_clock); - ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, - intel_state->cdclk.logical.cdclk); - - return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | - PIPE_WM_LINETIME_TIME(linetime); -} - static void intel_read_wm_latency(struct drm_i915_private *dev_priv, u16 wm[8]) { @@ -3135,7 +3121,7 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv, static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_pipe_wm *pipe_wm; struct intel_plane *plane; const struct intel_plane_state *plane_state; @@ -3175,12 +3161,9 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state) usable_level = 0; memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm)); - ilk_compute_wm_level(dev_priv, intel_crtc, 0, crtc_state, + ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state, pristate, sprstate, curstate, &pipe_wm->wm[0]); - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - pipe_wm->linetime = hsw_compute_linetime_wm(crtc_state); - if (!ilk_validate_pipe_wm(dev_priv, pipe_wm)) return -EINVAL; @@ -3189,7 +3172,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state) for (level = 1; level <= usable_level; level++) { struct intel_wm_level *wm = &pipe_wm->wm[level]; - ilk_compute_wm_level(dev_priv, intel_crtc, level, crtc_state, + ilk_compute_wm_level(dev_priv, crtc, level, crtc_state, pristate, sprstate, curstate, wm); /* @@ -3417,7 +3400,7 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv, * level is disabled. Doing otherwise could cause underruns. */ if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) { - WARN_ON(wm_lp != 1); + drm_WARN_ON(&dev_priv->drm, wm_lp != 1); results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val; } else results->wm_lp_spr[wm_lp - 1] = r->spr_val; @@ -3426,14 +3409,12 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv, /* LP0 register values */ for_each_intel_crtc(&dev_priv->drm, intel_crtc) { enum pipe pipe = intel_crtc->pipe; - const struct intel_wm_level *r = - &intel_crtc->wm.active.ilk.wm[0]; + const struct intel_pipe_wm *pipe_wm = &intel_crtc->wm.active.ilk; + const struct intel_wm_level *r = &pipe_wm->wm[0]; - if (WARN_ON(!r->enable)) + if (drm_WARN_ON(&dev_priv->drm, !r->enable)) continue; - results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime; - results->wm_pipe[pipe] = (r->pri_val << WM0_PIPE_PLANE_SHIFT) | (r->spr_val << WM0_PIPE_SPRITE_SHIFT) | @@ -3472,7 +3453,6 @@ ilk_find_best_result(struct drm_i915_private *dev_priv, /* dirty bits used to track which watermarks need changes */ #define WM_DIRTY_PIPE(pipe) (1 << (pipe)) -#define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe))) #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp))) #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3)) #define WM_DIRTY_FBC (1 << 24) @@ -3487,12 +3467,6 @@ static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv, int wm_lp; for_each_pipe(dev_priv, pipe) { - if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) { - dirty |= WM_DIRTY_LINETIME(pipe); - /* Must disable LP1+ watermarks too */ - dirty |= WM_DIRTY_LP_ALL; - } - if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) { dirty |= WM_DIRTY_PIPE(pipe); /* Must disable LP1+ watermarks too */ @@ -3584,13 +3558,6 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv, if (dirty & WM_DIRTY_PIPE(PIPE_C)) I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]); - if (dirty & WM_DIRTY_LINETIME(PIPE_A)) - I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]); - if (dirty & WM_DIRTY_LINETIME(PIPE_B)) - I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]); - if (dirty & WM_DIRTY_LINETIME(PIPE_C)) - I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]); - if (dirty & WM_DIRTY_DDB) { if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { val = I915_READ(WM_MISC); @@ -3644,26 +3611,18 @@ bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv) return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); } -static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv) +u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv) { - u8 enabled_slices; - - /* Slice 1 will always be enabled */ - enabled_slices = 1; - - /* Gen prior to GEN11 have only one DBuf slice */ - if (INTEL_GEN(dev_priv) < 11) - return enabled_slices; + int i; + int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices; + u8 enabled_slices_mask = 0; - /* - * FIXME: for now we'll only ever use 1 slice; pretend that we have - * only that 1 slice enabled until we have a proper way for on-demand - * toggling of the second slice. - */ - if (0 && I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE) - enabled_slices++; + for (i = 0; i < max_slices; i++) { + if (I915_READ(DBUF_CTL_S(i)) & DBUF_POWER_STATE) + enabled_slices_mask |= BIT(i); + } - return enabled_slices; + return enabled_slices_mask; } /* @@ -3864,47 +3823,46 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state) return true; } -static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv, - const struct intel_crtc_state *crtc_state, - const u64 total_data_rate, - const int num_active, - struct skl_ddb_allocation *ddb) +/* + * Calculate initial DBuf slice offset, based on slice size + * and mask(i.e if slice size is 1024 and second slice is enabled + * offset would be 1024) + */ +static unsigned int +icl_get_first_dbuf_slice_offset(u32 dbuf_slice_mask, + u32 slice_size, + u32 ddb_size) +{ + unsigned int offset = 0; + + if (!dbuf_slice_mask) + return 0; + + offset = (ffs(dbuf_slice_mask) - 1) * slice_size; + + WARN_ON(offset >= ddb_size); + return offset; +} + +static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv) { - const struct drm_display_mode *adjusted_mode; - u64 total_data_bw; u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size; - WARN_ON(ddb_size == 0); + drm_WARN_ON(&dev_priv->drm, ddb_size == 0); if (INTEL_GEN(dev_priv) < 11) return ddb_size - 4; /* 4 blocks for bypass path allocation */ - adjusted_mode = &crtc_state->hw.adjusted_mode; - total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode); - - /* - * 12GB/s is maximum BW supported by single DBuf slice. - * - * FIXME dbuf slice code is broken: - * - must wait for planes to stop using the slice before powering it off - * - plane straddling both slices is illegal in multi-pipe scenarios - * - should validate we stay within the hw bandwidth limits - */ - if (0 && (num_active > 1 || total_data_bw >= GBps(12))) { - ddb->enabled_slices = 2; - } else { - ddb->enabled_slices = 1; - ddb_size /= 2; - } - return ddb_size; } +static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state, + u8 active_pipes); + static void skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv, const struct intel_crtc_state *crtc_state, const u64 total_data_rate, - struct skl_ddb_allocation *ddb, struct skl_ddb_entry *alloc, /* out */ int *num_active /* out */) { @@ -3912,12 +3870,19 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv, struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct drm_crtc *for_crtc = crtc_state->uapi.crtc; const struct intel_crtc *crtc; - u32 pipe_width = 0, total_width = 0, width_before_pipe = 0; + u32 pipe_width = 0, total_width_in_range = 0, width_before_pipe_in_range = 0; enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe; u16 ddb_size; + u32 ddb_range_size; u32 i; - - if (WARN_ON(!state) || !crtc_state->hw.active) { + u32 dbuf_slice_mask; + u32 active_pipes; + u32 offset; + u32 slice_size; + u32 total_slice_mask; + u32 start, end; + + if (drm_WARN_ON(&dev_priv->drm, !state) || !crtc_state->hw.active) { alloc->start = 0; alloc->end = 0; *num_active = hweight8(dev_priv->active_pipes); @@ -3925,12 +3890,15 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv, } if (intel_state->active_pipe_changes) - *num_active = hweight8(intel_state->active_pipes); + active_pipes = intel_state->active_pipes; else - *num_active = hweight8(dev_priv->active_pipes); + active_pipes = dev_priv->active_pipes; + + *num_active = hweight8(active_pipes); - ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate, - *num_active, ddb); + ddb_size = intel_get_ddb_size(dev_priv); + + slice_size = ddb_size / INTEL_INFO(dev_priv)->num_supported_dbuf_slices; /* * If the state doesn't change the active CRTC's or there is no @@ -3950,30 +3918,95 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv, } /* + * Get allowed DBuf slices for correspondent pipe and platform. + */ + dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state, active_pipes); + + DRM_DEBUG_KMS("DBuf slice mask %x pipe %c active pipes %x\n", + dbuf_slice_mask, + pipe_name(for_pipe), active_pipes); + + /* + * Figure out at which DBuf slice we start, i.e if we start at Dbuf S2 + * and slice size is 1024, the offset would be 1024 + */ + offset = icl_get_first_dbuf_slice_offset(dbuf_slice_mask, + slice_size, ddb_size); + + /* + * Figure out total size of allowed DBuf slices, which is basically + * a number of allowed slices for that pipe multiplied by slice size. + * Inside of this + * range ddb entries are still allocated in proportion to display width. + */ + ddb_range_size = hweight8(dbuf_slice_mask) * slice_size; + + /* * Watermark/ddb requirement highly depends upon width of the * framebuffer, So instead of allocating DDB equally among pipes * distribute DDB based on resolution/width of the display. */ + total_slice_mask = dbuf_slice_mask; for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; enum pipe pipe = crtc->pipe; int hdisplay, vdisplay; + u32 pipe_dbuf_slice_mask; + + if (!crtc_state->hw.active) + continue; + + pipe_dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state, + active_pipes); + + /* + * According to BSpec pipe can share one dbuf slice with another + * pipes or pipe can use multiple dbufs, in both cases we + * account for other pipes only if they have exactly same mask. + * However we need to account how many slices we should enable + * in total. + */ + total_slice_mask |= pipe_dbuf_slice_mask; - if (!crtc_state->hw.enable) + /* + * Do not account pipes using other slice sets + * luckily as of current BSpec slice sets do not partially + * intersect(pipes share either same one slice or same slice set + * i.e no partial intersection), so it is enough to check for + * equality for now. + */ + if (dbuf_slice_mask != pipe_dbuf_slice_mask) continue; drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay); - total_width += hdisplay; + + total_width_in_range += hdisplay; if (pipe < for_pipe) - width_before_pipe += hdisplay; + width_before_pipe_in_range += hdisplay; else if (pipe == for_pipe) pipe_width = hdisplay; } - alloc->start = ddb_size * width_before_pipe / total_width; - alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width; + /* + * FIXME: For now we always enable slice S1 as per + * the Bspec display initialization sequence. + */ + intel_state->enabled_dbuf_slices_mask = total_slice_mask | BIT(DBUF_S1); + + start = ddb_range_size * width_before_pipe_in_range / total_width_in_range; + end = ddb_range_size * + (width_before_pipe_in_range + pipe_width) / total_width_in_range; + + alloc->start = offset + start; + alloc->end = offset + end; + + DRM_DEBUG_KMS("Pipe %d ddb %d-%d\n", for_pipe, + alloc->start, alloc->end); + DRM_DEBUG_KMS("Enabled ddb slices mask %x num supported %d\n", + intel_state->enabled_dbuf_slices_mask, + INTEL_INFO(dev_priv)->num_supported_dbuf_slices); } static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state, @@ -4002,7 +4035,7 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state, DRM_FORMAT_MOD_LINEAR, DRM_MODE_ROTATE_0, crtc_state->pixel_rate, &wp, 0); - WARN_ON(ret); + drm_WARN_ON(&dev_priv->drm, ret); for (level = 0; level <= max_level; level++) { skl_compute_plane_wm(crtc_state, level, &wp, &wm, &wm); @@ -4091,10 +4124,10 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc, intel_display_power_put(dev_priv, power_domain, wakeref); } -void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, - struct skl_ddb_allocation *ddb /* out */) +void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv) { - ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv); + dev_priv->enabled_dbuf_slices_mask = + intel_enabled_dbuf_slices_mask(dev_priv); } /* @@ -4144,6 +4177,254 @@ skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state, return mul_fixed16(downscale_w, downscale_h); } +struct dbuf_slice_conf_entry { + u8 active_pipes; + u8 dbuf_mask[I915_MAX_PIPES]; +}; + +/* + * Table taken from Bspec 12716 + * Pipes do have some preferred DBuf slice affinity, + * plus there are some hardcoded requirements on how + * those should be distributed for multipipe scenarios. + * For more DBuf slices algorithm can get even more messy + * and less readable, so decided to use a table almost + * as is from BSpec itself - that way it is at least easier + * to compare, change and check. + */ +static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] = +/* Autogenerated with igt/tools/intel_dbuf_map tool: */ +{ + { + .active_pipes = BIT(PIPE_A), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + }, + }, + { + .active_pipes = BIT(PIPE_B), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S1), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_B] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_C), + .dbuf_mask = { + [PIPE_C] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_C] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_B) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S1), + [PIPE_C] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_B] = BIT(DBUF_S1), + [PIPE_C] = BIT(DBUF_S2), + }, + }, + {} +}; + +/* + * Table taken from Bspec 49255 + * Pipes do have some preferred DBuf slice affinity, + * plus there are some hardcoded requirements on how + * those should be distributed for multipipe scenarios. + * For more DBuf slices algorithm can get even more messy + * and less readable, so decided to use a table almost + * as is from BSpec itself - that way it is at least easier + * to compare, change and check. + */ +static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] = +/* Autogenerated with igt/tools/intel_dbuf_map tool: */ +{ + { + .active_pipes = BIT(PIPE_A), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_B), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S2), + [PIPE_B] = BIT(DBUF_S1), + }, + }, + { + .active_pipes = BIT(PIPE_C), + .dbuf_mask = { + [PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_C] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_B) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S1), + [PIPE_C] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_B] = BIT(DBUF_S1), + [PIPE_C] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_D), + .dbuf_mask = { + [PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_D] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_B) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S1), + [PIPE_D] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_B] = BIT(DBUF_S1), + [PIPE_D] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_C) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_C] = BIT(DBUF_S1), + [PIPE_D] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_C] = BIT(DBUF_S2), + [PIPE_D] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_B] = BIT(DBUF_S1), + [PIPE_C] = BIT(DBUF_S2), + [PIPE_D] = BIT(DBUF_S2), + }, + }, + { + .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), + .dbuf_mask = { + [PIPE_A] = BIT(DBUF_S1), + [PIPE_B] = BIT(DBUF_S1), + [PIPE_C] = BIT(DBUF_S2), + [PIPE_D] = BIT(DBUF_S2), + }, + }, + {} +}; + +static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, + const struct dbuf_slice_conf_entry *dbuf_slices) +{ + int i; + + for (i = 0; i < dbuf_slices[i].active_pipes; i++) { + if (dbuf_slices[i].active_pipes == active_pipes) + return dbuf_slices[i].dbuf_mask[pipe]; + } + return 0; +} + +/* + * This function finds an entry with same enabled pipe configuration and + * returns correspondent DBuf slice mask as stated in BSpec for particular + * platform. + */ +static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes) +{ + /* + * FIXME: For ICL this is still a bit unclear as prev BSpec revision + * required calculating "pipe ratio" in order to determine + * if one or two slices can be used for single pipe configurations + * as additional constraint to the existing table. + * However based on recent info, it should be not "pipe ratio" + * but rather ratio between pixel_rate and cdclk with additional + * constants, so for now we are using only table until this is + * clarified. Also this is the reason why crtc_state param is + * still here - we will need it once those additional constraints + * pop up. + */ + return compute_dbuf_slices(pipe, active_pipes, icl_allowed_dbufs); +} + +static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes) +{ + return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs); +} + +static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state, + u8 active_pipes) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + if (IS_GEN(dev_priv, 12)) + return tgl_compute_dbuf_slices(pipe, active_pipes); + else if (IS_GEN(dev_priv, 11)) + return icl_compute_dbuf_slices(pipe, active_pipes); + /* + * For anything else just return one slice yet. + * Should be extended for other platforms. + */ + return BIT(DBUF_S1); +} + static u64 skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, @@ -4195,14 +4476,10 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state, u64 *plane_data_rate, u64 *uv_plane_data_rate) { - struct drm_atomic_state *state = crtc_state->uapi.state; struct intel_plane *plane; const struct intel_plane_state *plane_state; u64 total_data_rate = 0; - if (WARN_ON(!state)) - return 0; - /* Calculate and cache data rate for each plane */ intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) { enum plane_id plane_id = plane->id; @@ -4230,9 +4507,6 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state; u64 total_data_rate = 0; - if (WARN_ON(!crtc_state->uapi.state)) - return 0; - /* Calculate and cache data rate for each plane */ intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) { enum plane_id plane_id = plane->id; @@ -4271,13 +4545,10 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state, } static int -skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state, - struct skl_ddb_allocation *ddb /* out */) +skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state) { - struct drm_atomic_state *state = crtc_state->uapi.state; - struct drm_crtc *crtc = crtc_state->uapi.crtc; - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb; u16 alloc_size, start = 0; u16 total[I915_MAX_PLANES] = {}; @@ -4294,9 +4565,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state, memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y)); memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv)); - if (WARN_ON(!state)) - return 0; - if (!crtc_state->hw.active) { alloc->start = alloc->end = 0; return 0; @@ -4314,7 +4582,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state, skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state, total_data_rate, - ddb, alloc, &num_active); + alloc, &num_active); alloc_size = skl_ddb_entry_size(alloc); if (alloc_size == 0) return 0; @@ -4335,13 +4603,14 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state, */ for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) { blocks = 0; - for_each_plane_id_on_crtc(intel_crtc, plane_id) { + for_each_plane_id_on_crtc(crtc, plane_id) { const struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; if (plane_id == PLANE_CURSOR) { if (wm->wm[level].min_ddb_alloc > total[PLANE_CURSOR]) { - WARN_ON(wm->wm[level].min_ddb_alloc != U16_MAX); + drm_WARN_ON(&dev_priv->drm, + wm->wm[level].min_ddb_alloc != U16_MAX); blocks = U32_MAX; break; } @@ -4371,7 +4640,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state, * watermark level, plus an extra share of the leftover blocks * proportional to its relative data rate. */ - for_each_plane_id_on_crtc(intel_crtc, plane_id) { + for_each_plane_id_on_crtc(crtc, plane_id) { const struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; u64 rate; @@ -4406,11 +4675,11 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state, alloc_size -= extra; total_data_rate -= rate; } - WARN_ON(alloc_size != 0 || total_data_rate != 0); + drm_WARN_ON(&dev_priv->drm, alloc_size != 0 || total_data_rate != 0); /* Set the actual DDB start/end points for each plane */ start = alloc->start; - for_each_plane_id_on_crtc(intel_crtc, plane_id) { + for_each_plane_id_on_crtc(crtc, plane_id) { struct skl_ddb_entry *plane_alloc = &crtc_state->wm.skl.plane_ddb_y[plane_id]; struct skl_ddb_entry *uv_plane_alloc = @@ -4420,7 +4689,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state, continue; /* Gen11+ uses a separate plane for UV watermarks */ - WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]); + drm_WARN_ON(&dev_priv->drm, + INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]); /* Leave disabled planes at (0,0) */ if (total[plane_id]) { @@ -4443,7 +4713,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state, * that aren't actually possible. */ for (level++; level <= ilk_wm_max_level(dev_priv); level++) { - for_each_plane_id_on_crtc(intel_crtc, plane_id) { + for_each_plane_id_on_crtc(crtc, plane_id) { struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; @@ -4480,7 +4750,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state, * Go back and disable the transition watermark if it turns out we * don't have enough DDB blocks for it. */ - for_each_plane_id_on_crtc(intel_crtc, plane_id) { + for_each_plane_id_on_crtc(crtc, plane_id) { struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; @@ -4722,7 +4992,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, * WaIncreaseLatencyIPCEnabled: kbl,cfl * Display WA #1141: kbl,cfl */ - if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) || + if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) && dev_priv->ipc_enabled) latency += 4; @@ -4844,45 +5114,36 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state, } } -static u32 -skl_compute_linetime_wm(const struct intel_crtc_state *crtc_state) -{ - struct drm_atomic_state *state = crtc_state->uapi.state; - struct drm_i915_private *dev_priv = to_i915(state->dev); - uint_fixed_16_16_t linetime_us; - u32 linetime_wm; - - linetime_us = intel_get_linetime_us(crtc_state); - linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us)); - - /* Display WA #1135: BXT:ALL GLK:ALL */ - if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled) - linetime_wm /= 2; - - return linetime_wm; -} - static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state, const struct skl_wm_params *wp, struct skl_plane_wm *wm) { struct drm_device *dev = crtc_state->uapi.crtc->dev; const struct drm_i915_private *dev_priv = to_i915(dev); - u16 trans_min, trans_y_tile_min; - const u16 trans_amount = 10; /* This is configurable amount */ + u16 trans_min, trans_amount, trans_y_tile_min; u16 wm0_sel_res_b, trans_offset_b, res_blocks; - /* Transition WM are not recommended by HW team for GEN9 */ - if (INTEL_GEN(dev_priv) <= 9) - return; - /* Transition WM don't make any sense if ipc is disabled */ if (!dev_priv->ipc_enabled) return; - trans_min = 14; + /* + * WaDisableTWM:skl,kbl,cfl,bxt + * Transition WM are not recommended by HW team for GEN9 + */ + if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) + return; + if (INTEL_GEN(dev_priv) >= 11) trans_min = 4; + else + trans_min = 14; + + /* Display WA #1140: glk,cnl */ + if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) + trans_amount = 0; + else + trans_amount = 10; /* This is configurable amount */ trans_offset_b = trans_min + trans_amount; @@ -4909,7 +5170,6 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state, /* WA BUG:1938466 add one block for non y-tile planes */ if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0)) res_blocks += 1; - } /* @@ -5049,8 +5309,6 @@ static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state) return ret; } - pipe_wm->linetime = skl_compute_linetime_wm(crtc_state); - return 0; } @@ -5059,9 +5317,10 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, const struct skl_ddb_entry *entry) { if (entry->end) - I915_WRITE_FW(reg, (entry->end - 1) << 16 | entry->start); + intel_de_write_fw(dev_priv, reg, + (entry->end - 1) << 16 | entry->start); else - I915_WRITE_FW(reg, 0); + intel_de_write_fw(dev_priv, reg, 0); } static void skl_write_wm_level(struct drm_i915_private *dev_priv, @@ -5077,7 +5336,7 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv, val |= level->plane_res_b; val |= level->plane_res_l << PLANE_WM_LINES_SHIFT; - I915_WRITE_FW(reg, val); + intel_de_write_fw(dev_priv, reg, val); } void skl_write_plane_wm(struct intel_plane *plane, @@ -5153,31 +5412,18 @@ static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv, int level, max_level = ilk_wm_max_level(dev_priv); for (level = 0; level <= max_level; level++) { - if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]) || - !skl_wm_level_equals(&wm1->uv_wm[level], &wm2->uv_wm[level])) + /* + * We don't check uv_wm as the hardware doesn't actually + * use it. It only gets used for calculating the required + * ddb allocation. + */ + if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level])) return false; } return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm); } -static bool skl_pipe_wm_equals(struct intel_crtc *crtc, - const struct skl_pipe_wm *wm1, - const struct skl_pipe_wm *wm2) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum plane_id plane_id; - - for_each_plane_id_on_crtc(crtc, plane_id) { - if (!skl_plane_wm_equals(dev_priv, - &wm1->planes[plane_id], - &wm2->planes[plane_id])) - return false; - } - - return wm1->linetime == wm2->linetime; -} - static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a, const struct skl_ddb_entry *b) { @@ -5231,18 +5477,17 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state, static int skl_compute_ddb(struct intel_atomic_state *state) { - const struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct skl_ddb_allocation *ddb = &state->wm_results.ddb; + struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *old_crtc_state; struct intel_crtc_state *new_crtc_state; struct intel_crtc *crtc; int ret, i; - memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb)); + state->enabled_dbuf_slices_mask = dev_priv->enabled_dbuf_slices_mask; for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - ret = skl_allocate_pipe_ddb(new_crtc_state, ddb); + ret = skl_allocate_pipe_ddb(new_crtc_state); if (ret) return ret; @@ -5439,8 +5684,6 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state) * to grab the lock on *all* CRTC's. */ if (state->active_pipe_changes || state->modeset) { - state->wm_results.dirty_pipes = INTEL_INFO(dev_priv)->pipe_mask; - ret = intel_add_all_pipes(state); if (ret) return ret; @@ -5515,12 +5758,8 @@ skl_compute_wm(struct intel_atomic_state *state) struct intel_crtc *crtc; struct intel_crtc_state *new_crtc_state; struct intel_crtc_state *old_crtc_state; - struct skl_ddb_values *results = &state->wm_results; int ret, i; - /* Clear all dirty flags */ - results->dirty_pipes = 0; - ret = skl_ddb_add_affected_pipes(state); if (ret) return ret; @@ -5528,68 +5767,36 @@ skl_compute_wm(struct intel_atomic_state *state) /* * Calculate WM's for all pipes that are part of this transaction. * Note that skl_ddb_add_affected_pipes may have added more CRTC's that - * weren't otherwise being modified (and set bits in dirty_pipes) if - * pipe allocations had to change. + * weren't otherwise being modified if pipe allocations had to change. */ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { ret = skl_build_pipe_wm(new_crtc_state); if (ret) return ret; - - ret = skl_wm_add_affected_planes(state, crtc); - if (ret) - return ret; - - if (!skl_pipe_wm_equals(crtc, - &old_crtc_state->wm.skl.optimal, - &new_crtc_state->wm.skl.optimal)) - results->dirty_pipes |= BIT(crtc->pipe); } ret = skl_compute_ddb(state); if (ret) return ret; + /* + * skl_compute_ddb() will have adjusted the final watermarks + * based on how much ddb is available. Now we can actually + * check if the final watermarks changed. + */ + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + ret = skl_wm_add_affected_planes(state, crtc); + if (ret) + return ret; + } + skl_print_wm_changes(state); return 0; } -static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - const struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; - enum pipe pipe = crtc->pipe; - - if ((state->wm_results.dirty_pipes & BIT(crtc->pipe)) == 0) - return; - - I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime); -} - -static void skl_initial_wm(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - const struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - struct skl_ddb_values *results = &state->wm_results; - - if ((results->dirty_pipes & BIT(crtc->pipe)) == 0) - return; - - mutex_lock(&dev_priv->wm.wm_mutex); - - if (crtc_state->uapi.active_changed) - skl_atomic_update_crtc_wm(state, crtc); - - mutex_unlock(&dev_priv->wm.wm_mutex); -} - static void ilk_compute_wm_config(struct drm_i915_private *dev_priv, struct intel_wm_config *config) { @@ -5712,25 +5919,18 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, if (!crtc->active) return; - - out->linetime = I915_READ(PIPE_WM_LINETIME(pipe)); } void skl_wm_get_hw_state(struct drm_i915_private *dev_priv) { - struct skl_ddb_values *hw = &dev_priv->wm.skl_hw; - struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb; struct intel_crtc *crtc; struct intel_crtc_state *crtc_state; - skl_ddb_get_hw_state(dev_priv, ddb); + skl_ddb_get_hw_state(dev_priv); for_each_intel_crtc(&dev_priv->drm, crtc) { crtc_state = to_intel_crtc_state(crtc->base.state); skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal); - - if (crtc->active) - hw->dirty_pipes |= BIT(crtc->pipe); } if (dev_priv->active_pipes) { @@ -5754,8 +5954,6 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) }; hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]); - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); memset(active, 0, sizeof(*active)); @@ -5774,7 +5972,6 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT; active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT; active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK; - active->linetime = hw->wm_linetime[pipe]; } else { int level, max_level = ilk_wm_max_level(dev_priv); @@ -6629,20 +6826,9 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv) I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN, I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE); - /* WaEnable32PlaneMode:icl */ - I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, - _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE)); - - /* - * Wa_1408615072:icl,ehl (vsunit) - * Wa_1407596294:icl,ehl (hsunit) - */ - intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE, - 0, VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS); - - /* Wa_1407352427:icl,ehl */ - intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2, - 0, PSDUNIT_CLKGATE_DIS); + /*Wa_14010594013:icl, ehl */ + intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1, + 0, CNL_DELAY_PMRSP); } static void tgl_init_clock_gating(struct drm_i915_private *dev_priv) @@ -6650,10 +6836,6 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv) u32 vd_pg_enable = 0; unsigned int i; - /* Wa_1408615072:tgl */ - intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2, - 0, VSUNIT_CLKGATE_DIS_TGL); - /* This is not a WA. Enable VD HCP & MFX_ENC powergate */ for (i = 0; i < I915_MAX_VCS; i++) { if (HAS_ENGINE(dev_priv, _VCS(i))) @@ -6663,6 +6845,11 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv) I915_WRITE(POWERGATE_ENABLE, I915_READ(POWERGATE_ENABLE) | vd_pg_enable); + + /* Wa_1409825376:tgl (pre-prod)*/ + if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0)) + I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) | + TGL_VRH_GATING_DIS); } static void cnp_init_clock_gating(struct drm_i915_private *dev_priv) @@ -7248,8 +7435,6 @@ void intel_init_pm(struct drm_i915_private *dev_priv) /* For FIFO watermark updates */ if (INTEL_GEN(dev_priv) >= 9) { skl_setup_wm_latency(dev_priv); - dev_priv->display.initial_watermarks = skl_initial_wm; - dev_priv->display.atomic_update_watermarks = skl_atomic_update_crtc_wm; dev_priv->display.compute_global_watermarks = skl_compute_wm; } else if (HAS_PCH_SPLIT(dev_priv)) { ilk_setup_wm_latency(dev_priv); diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h index c06c6a846d9a..d60a85421c5a 100644 --- a/drivers/gpu/drm/i915/intel_pm.h +++ b/drivers/gpu/drm/i915/intel_pm.h @@ -17,7 +17,6 @@ struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; struct intel_plane; -struct skl_ddb_allocation; struct skl_ddb_entry; struct skl_pipe_wm; struct skl_wm_level; @@ -33,11 +32,11 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv); void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv); void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv); void skl_wm_get_hw_state(struct drm_i915_private *dev_priv); +u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv); void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc, struct skl_ddb_entry *ddb_y, struct skl_ddb_entry *ddb_uv); -void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, - struct skl_ddb_allocation *ddb /* out */); +void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv); void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, struct skl_pipe_wm *out); void g4x_wm_sanitize(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c index cbfb7171d62d..3f13baaef058 100644 --- a/drivers/gpu/drm/i915/intel_sideband.c +++ b/drivers/gpu/drm/i915/intel_sideband.c @@ -60,7 +60,7 @@ static void __vlv_punit_get(struct drm_i915_private *i915) * to the Valleyview P-unit and not all sideband communications. */ if (IS_VALLEYVIEW(i915)) { - pm_qos_update_request(&i915->sb_qos, 0); + cpu_latency_qos_update_request(&i915->sb_qos, 0); on_each_cpu(ping, NULL, 1); } } @@ -68,7 +68,8 @@ static void __vlv_punit_get(struct drm_i915_private *i915) static void __vlv_punit_put(struct drm_i915_private *i915) { if (IS_VALLEYVIEW(i915)) - pm_qos_update_request(&i915->sb_qos, PM_QOS_DEFAULT_VALUE); + cpu_latency_qos_update_request(&i915->sb_qos, + PM_QOS_DEFAULT_VALUE); iosf_mbi_punit_release(); } @@ -241,8 +242,9 @@ u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg) * FIXME: There might be some registers where all 1's is a valid value, * so ideally we should check the register offset instead... */ - WARN(val == 0xffffffff, "DPIO read pipe %c reg 0x%x == 0x%x\n", - pipe_name(pipe), reg, val); + drm_WARN(&i915->drm, val == 0xffffffff, + "DPIO read pipe %c reg 0x%x == 0x%x\n", + pipe_name(pipe), reg, val); return val; } @@ -365,6 +367,10 @@ static inline int gen7_check_mailbox_status(u32 mbox) return -ETIMEDOUT; case GEN7_PCODE_ILLEGAL_DATA: return -EINVAL; + case GEN11_PCODE_ILLEGAL_SUBCOMMAND: + return -ENXIO; + case GEN11_PCODE_LOCKED: + return -EBUSY; case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: return -EOVERFLOW; default: @@ -525,7 +531,7 @@ int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request, */ drm_dbg_kms(&i915->drm, "PCODE timeout, retrying with preemption disabled\n"); - WARN_ON_ONCE(timeout_base_ms > 3); + drm_WARN_ON_ONCE(&i915->drm, timeout_base_ms > 3); preempt_disable(); ret = wait_for_atomic(COND, 50); preempt_enable(); diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 5f2cf6f43b8b..abb18b90d7c3 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -324,8 +324,9 @@ static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore) * w/a for a sporadic read returning 0 by waiting for the GT * thread to wake up. */ - WARN_ONCE(wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000), - "GT thread status wait timed out\n"); + drm_WARN_ONCE(&uncore->i915->drm, + wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000), + "GT thread status wait timed out\n"); } static void fw_domains_get_with_thread_status(struct intel_uncore *uncore, @@ -441,7 +442,7 @@ intel_uncore_forcewake_reset(struct intel_uncore *uncore) cond_resched(); } - WARN_ON(active_domains); + drm_WARN_ON(&uncore->i915->drm, active_domains); fw = uncore->fw_domains_active; if (fw) @@ -757,9 +758,9 @@ void assert_forcewakes_inactive(struct intel_uncore *uncore) if (!uncore->funcs.force_wake_get) return; - WARN(uncore->fw_domains_active, - "Expected all fw_domains to be inactive, but %08x are still on\n", - uncore->fw_domains_active); + drm_WARN(&uncore->i915->drm, uncore->fw_domains_active, + "Expected all fw_domains to be inactive, but %08x are still on\n", + uncore->fw_domains_active); } void assert_forcewakes_active(struct intel_uncore *uncore, @@ -779,9 +780,9 @@ void assert_forcewakes_active(struct intel_uncore *uncore, assert_rpm_wakelock_held(uncore->rpm); fw_domains &= uncore->fw_domains; - WARN(fw_domains & ~uncore->fw_domains_active, - "Expected %08x fw_domains to be active, but %08x are off\n", - fw_domains, fw_domains & ~uncore->fw_domains_active); + drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active, + "Expected %08x fw_domains to be active, but %08x are off\n", + fw_domains, fw_domains & ~uncore->fw_domains_active); /* * Check that the caller has an explicit wakeref and we don't mistake @@ -794,9 +795,9 @@ void assert_forcewakes_active(struct intel_uncore *uncore, if (uncore->fw_domains_timer & domain->mask) expect++; /* pending automatic release */ - if (WARN(actual < expect, - "Expected domain %d to be held awake by caller, count=%d\n", - domain->id, actual)) + if (drm_WARN(&uncore->i915->drm, actual < expect, + "Expected domain %d to be held awake by caller, count=%d\n", + domain->id, actual)) break; } @@ -866,9 +867,9 @@ find_fw_domain(struct intel_uncore *uncore, u32 offset) if (entry->domains == FORCEWAKE_ALL) return uncore->fw_domains; - WARN(entry->domains & ~uncore->fw_domains, - "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n", - entry->domains & ~uncore->fw_domains, offset); + drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains, + "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n", + entry->domains & ~uncore->fw_domains, offset); return entry->domains; } @@ -1158,10 +1159,11 @@ __unclaimed_reg_debug(struct intel_uncore *uncore, const bool read, const bool before) { - if (WARN(check_for_unclaimed_mmio(uncore) && !before, - "Unclaimed %s register 0x%x\n", - read ? "read from" : "write to", - i915_mmio_reg_offset(reg))) + if (drm_WARN(&uncore->i915->drm, + check_for_unclaimed_mmio(uncore) && !before, + "Unclaimed %s register 0x%x\n", + read ? "read from" : "write to", + i915_mmio_reg_offset(reg))) /* Only report the first N failures */ i915_modparams.mmio_debug--; } @@ -1436,8 +1438,8 @@ static int __fw_domain_init(struct intel_uncore *uncore, if (!d) return -ENOMEM; - WARN_ON(!i915_mmio_reg_valid(reg_set)); - WARN_ON(!i915_mmio_reg_valid(reg_ack)); + drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set)); + drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack)); d->uncore = uncore; d->wake_count = 0; @@ -1482,8 +1484,8 @@ static void fw_domain_fini(struct intel_uncore *uncore, return; uncore->fw_domains &= ~BIT(domain_id); - WARN_ON(d->wake_count); - WARN_ON(hrtimer_cancel(&d->timer)); + drm_WARN_ON(&uncore->i915->drm, d->wake_count); + drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer)); kfree(d); } @@ -1613,7 +1615,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore) #undef fw_domain_init /* All future platforms are expected to require complex power gating */ - WARN_ON(!ret && uncore->fw_domains == 0); + drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0); out: if (ret) @@ -2108,7 +2110,7 @@ intel_uncore_forcewake_for_reg(struct intel_uncore *uncore, { enum forcewake_domains fw_domains = 0; - WARN_ON(!op); + drm_WARN_ON(&uncore->i915->drm, !op); if (!intel_uncore_has_forcewake(uncore)) return 0; @@ -2119,7 +2121,7 @@ intel_uncore_forcewake_for_reg(struct intel_uncore *uncore, if (op & FW_REG_WRITE) fw_domains |= uncore->funcs.write_fw_domains(uncore, reg); - WARN_ON(fw_domains & ~uncore->fw_domains); + drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains); return fw_domains; } diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c index ef572a0c2566..68bbb1580162 100644 --- a/drivers/gpu/drm/i915/selftests/i915_active.c +++ b/drivers/gpu/drm/i915/selftests/i915_active.c @@ -201,11 +201,57 @@ static int live_active_retire(void *arg) return err; } +static int live_active_barrier(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + struct live_active *active; + int err = 0; + + /* Check that we get a callback when requests retire upon waiting */ + + active = __live_alloc(i915); + if (!active) + return -ENOMEM; + + err = i915_active_acquire(&active->base); + if (err) + goto out; + + for_each_uabi_engine(engine, i915) { + err = i915_active_acquire_preallocate_barrier(&active->base, + engine); + if (err) + break; + + i915_active_acquire_barrier(&active->base); + } + + i915_active_release(&active->base); + + if (err == 0) + err = i915_active_wait(&active->base); + + if (err == 0 && !READ_ONCE(active->retired)) { + pr_err("i915_active not retired after flushing barriers!\n"); + err = -EINVAL; + } + +out: + __live_put(active); + + if (igt_flush_test(i915)) + err = -EIO; + + return err; +} + int i915_active_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(live_active_wait), SUBTEST(live_active_retire), + SUBTEST(live_active_barrier), }; if (intel_gt_is_wedged(&i915->gt)) @@ -265,28 +311,40 @@ static void spin_unlock_wait(spinlock_t *lock) spin_unlock_irq(lock); } +static void active_flush(struct i915_active *ref, + struct i915_active_fence *active) +{ + struct dma_fence *fence; + + fence = xchg(__active_fence_slot(active), NULL); + if (!fence) + return; + + spin_lock_irq(fence->lock); + __list_del_entry(&active->cb.node); + spin_unlock_irq(fence->lock); /* serialise with fence->cb_list */ + atomic_dec(&ref->count); + + GEM_BUG_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)); +} + void i915_active_unlock_wait(struct i915_active *ref) { if (i915_active_acquire_if_busy(ref)) { struct active_node *it, *n; + /* Wait for all active callbacks */ rcu_read_lock(); - rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { - struct dma_fence *f; - - /* Wait for all active callbacks */ - f = rcu_dereference(it->base.fence); - if (f) - spin_unlock_wait(f->lock); - } + active_flush(ref, &ref->excl); + rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) + active_flush(ref, &it->base); rcu_read_unlock(); i915_active_release(ref); } /* And wait for the retire callback */ - spin_lock_irq(&ref->tree_lock); - spin_unlock_irq(&ref->tree_lock); + spin_unlock_wait(&ref->tree_lock); /* ... which may have been on a thread instead */ flush_work(&ref->work); diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c b/drivers/gpu/drm/i915/selftests/i915_buddy.c index 1b856bae67b5..939a6caebb03 100644 --- a/drivers/gpu/drm/i915/selftests/i915_buddy.c +++ b/drivers/gpu/drm/i915/selftests/i915_buddy.c @@ -298,10 +298,12 @@ static void igt_mm_config(u64 *size, u64 *chunk_size) static int igt_buddy_alloc_smoke(void *arg) { struct i915_buddy_mm mm; - int max_order; + IGT_TIMEOUT(end_time); + I915_RND_STATE(prng); u64 chunk_size; u64 mm_size; - int err; + int *order; + int err, i; igt_mm_config(&mm_size, &chunk_size); @@ -313,10 +315,16 @@ static int igt_buddy_alloc_smoke(void *arg) return err; } - for (max_order = mm.max_order; max_order >= 0; max_order--) { + order = i915_random_order(mm.max_order + 1, &prng); + if (!order) + goto out_fini; + + for (i = 0; i <= mm.max_order; ++i) { struct i915_buddy_block *block; - int order; + int max_order = order[i]; + bool timeout = false; LIST_HEAD(blocks); + int order; u64 total; err = igt_check_mm(&mm); @@ -360,6 +368,11 @@ retry: } total += i915_buddy_block_size(&mm, block); + + if (__igt_timeout(end_time, NULL)) { + timeout = true; + break; + } } while (total < mm.size); if (!err) @@ -373,7 +386,7 @@ retry: pr_err("post-mm check failed\n"); } - if (err) + if (err || timeout) break; cond_resched(); @@ -382,6 +395,8 @@ retry: if (err == -ENOMEM) err = 0; + kfree(order); +out_fini: i915_buddy_fini(&mm); return err; diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index 78f36faf2bbe..623759b73bb4 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -98,7 +98,7 @@ static void pm_suspend(struct drm_i915_private *i915) intel_wakeref_t wakeref; with_intel_runtime_pm(&i915->runtime_pm, wakeref) { - i915_gem_suspend_gtt_mappings(i915); + i915_ggtt_suspend(&i915->ggtt); i915_gem_suspend_late(i915); } } @@ -108,7 +108,7 @@ static void pm_hibernate(struct drm_i915_private *i915) intel_wakeref_t wakeref; with_intel_runtime_pm(&i915->runtime_pm, wakeref) { - i915_gem_suspend_gtt_mappings(i915); + i915_ggtt_suspend(&i915->ggtt); i915_gem_freeze(i915); i915_gem_freeze_late(i915); @@ -124,7 +124,7 @@ static void pm_resume(struct drm_i915_private *i915) * that runtime-pm just works. */ with_intel_runtime_pm(&i915->runtime_pm, wakeref) { - i915_gem_restore_gtt_mappings(i915); + i915_ggtt_resume(&i915->ggtt); i915_gem_restore_fences(&i915->ggtt); i915_gem_resume(i915); diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h index 34138c7bdd15..0a953bfc0585 100644 --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h @@ -43,6 +43,7 @@ selftest(reset, intel_reset_live_selftests) selftest(memory_region, intel_memory_region_live_selftests) selftest(hangcheck, intel_hangcheck_live_selftests) selftest(execlists, intel_execlists_live_selftests) +selftest(ring_submission, intel_ring_submission_live_selftests) selftest(perf, i915_perf_live_selftests) /* Here be dragons: keep last to run last! */ selftest(late_gt_pm, intel_gt_pm_late_selftests) diff --git a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h index 5a577a1332f5..3bf7f53e9924 100644 --- a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h @@ -17,3 +17,4 @@ */ selftest(engine_cs, intel_engine_cs_perf_selftests) selftest(blt, i915_gem_object_blt_perf_selftests) +selftest(region, intel_memory_region_perf_selftests) diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index 58b5f40a07dd..af89c7fc8f59 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -173,7 +173,7 @@ static int igt_vma_create(void *arg) } nc = 0; - for_each_prime_number(num_ctx, 2 * NUM_CONTEXT_TAG) { + for_each_prime_number(num_ctx, 2 * BITS_PER_LONG) { for (; nc < num_ctx; nc++) { ctx = mock_context(i915, "mock"); if (!ctx) diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c index e8a58fe49c39..9ad4ab088466 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.c +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -183,7 +183,7 @@ igt_spinner_create_request(struct igt_spinner *spin, cancel_rq: if (err) { - i915_request_skip(rq, err); + i915_request_set_error_once(rq, err); i915_request_add(rq); } unpin_hws: diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index 3ef3620e0da5..2a1d4ba1f9f3 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -4,6 +4,7 @@ */ #include <linux/prime_numbers.h> +#include <linux/sort.h> #include "../i915_selftest.h" @@ -19,6 +20,7 @@ #include "gem/selftests/mock_context.h" #include "gt/intel_engine_user.h" #include "gt/intel_gt.h" +#include "i915_memcpy.h" #include "selftests/igt_flush_test.h" #include "selftests/i915_random.h" @@ -572,6 +574,195 @@ out_put: return err; } +static const char *repr_type(u32 type) +{ + switch (type) { + case I915_MAP_WB: + return "WB"; + case I915_MAP_WC: + return "WC"; + } + + return ""; +} + +static struct drm_i915_gem_object * +create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type, + void **out_addr) +{ + struct drm_i915_gem_object *obj; + void *addr; + + obj = i915_gem_object_create_region(mr, size, 0); + if (IS_ERR(obj)) + return obj; + + addr = i915_gem_object_pin_map(obj, type); + if (IS_ERR(addr)) { + i915_gem_object_put(obj); + if (PTR_ERR(addr) == -ENXIO) + return ERR_PTR(-ENODEV); + return addr; + } + + *out_addr = addr; + return obj; +} + +static int wrap_ktime_compare(const void *A, const void *B) +{ + const ktime_t *a = A, *b = B; + + return ktime_compare(*a, *b); +} + +static void igt_memcpy_long(void *dst, const void *src, size_t size) +{ + unsigned long *tmp = dst; + const unsigned long *s = src; + + size = size / sizeof(unsigned long); + while (size--) + *tmp++ = *s++; +} + +static inline void igt_memcpy(void *dst, const void *src, size_t size) +{ + memcpy(dst, src, size); +} + +static inline void igt_memcpy_from_wc(void *dst, const void *src, size_t size) +{ + i915_memcpy_from_wc(dst, src, size); +} + +static int _perf_memcpy(struct intel_memory_region *src_mr, + struct intel_memory_region *dst_mr, + u64 size, u32 src_type, u32 dst_type) +{ + struct drm_i915_private *i915 = src_mr->i915; + const struct { + const char *name; + void (*copy)(void *dst, const void *src, size_t size); + bool skip; + } tests[] = { + { + "memcpy", + igt_memcpy, + }, + { + "memcpy_long", + igt_memcpy_long, + }, + { + "memcpy_from_wc", + igt_memcpy_from_wc, + !i915_has_memcpy_from_wc(), + }, + }; + struct drm_i915_gem_object *src, *dst; + void *src_addr, *dst_addr; + int ret = 0; + int i; + + src = create_region_for_mapping(src_mr, size, src_type, &src_addr); + if (IS_ERR(src)) { + ret = PTR_ERR(src); + goto out; + } + + dst = create_region_for_mapping(dst_mr, size, dst_type, &dst_addr); + if (IS_ERR(dst)) { + ret = PTR_ERR(dst); + goto out_unpin_src; + } + + for (i = 0; i < ARRAY_SIZE(tests); ++i) { + ktime_t t[5]; + int pass; + + if (tests[i].skip) + continue; + + for (pass = 0; pass < ARRAY_SIZE(t); pass++) { + ktime_t t0, t1; + + t0 = ktime_get(); + + tests[i].copy(dst_addr, src_addr, size); + + t1 = ktime_get(); + t[pass] = ktime_sub(t1, t0); + } + + sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL); + pr_info("%s src(%s, %s) -> dst(%s, %s) %14s %4llu KiB copy: %5lld MiB/s\n", + __func__, + src_mr->name, + repr_type(src_type), + dst_mr->name, + repr_type(dst_type), + tests[i].name, + size >> 10, + div64_u64(mul_u32_u32(4 * size, + 1000 * 1000 * 1000), + t[1] + 2 * t[2] + t[3]) >> 20); + + cond_resched(); + } + + i915_gem_object_unpin_map(dst); + i915_gem_object_put(dst); +out_unpin_src: + i915_gem_object_unpin_map(src); + i915_gem_object_put(src); + + i915_gem_drain_freed_objects(i915); +out: + if (ret == -ENODEV) + ret = 0; + + return ret; +} + +static int perf_memcpy(void *arg) +{ + struct drm_i915_private *i915 = arg; + static const u32 types[] = { + I915_MAP_WB, + I915_MAP_WC, + }; + static const u32 sizes[] = { + SZ_4K, + SZ_64K, + SZ_4M, + }; + struct intel_memory_region *src_mr, *dst_mr; + int src_id, dst_id; + int i, j, k; + int ret; + + for_each_memory_region(src_mr, i915, src_id) { + for_each_memory_region(dst_mr, i915, dst_id) { + for (i = 0; i < ARRAY_SIZE(sizes); ++i) { + for (j = 0; j < ARRAY_SIZE(types); ++j) { + for (k = 0; k < ARRAY_SIZE(types); ++k) { + ret = _perf_memcpy(src_mr, + dst_mr, + sizes[i], + types[j], + types[k]); + if (ret) + return ret; + } + } + } + } + } + + return 0; +} + int intel_memory_region_mock_selftests(void) { static const struct i915_subtest tests[] = { @@ -619,3 +810,15 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915) return i915_live_subtests(tests, i915); } + +int intel_memory_region_perf_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(perf_memcpy), + }; + + if (intel_gt_is_wedged(&i915->gt)) + return 0; + + return i915_live_subtests(tests, i915); +} diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 3b8986983afc..754d0eb6beaa 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -144,7 +144,6 @@ struct drm_i915_private *mock_gem_device(void) goto put_device; } i915->drm.pdev = pdev; - i915->drm.dev_private = i915; intel_runtime_pm_init_early(&i915->runtime_pm); diff --git a/drivers/gpu/drm/i915/vlv_suspend.c b/drivers/gpu/drm/i915/vlv_suspend.c new file mode 100644 index 000000000000..23adb64d640a --- /dev/null +++ b/drivers/gpu/drm/i915/vlv_suspend.c @@ -0,0 +1,489 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include <linux/kernel.h> + +#include <drm/drm_print.h> + +#include "i915_drv.h" +#include "i915_reg.h" +#include "i915_trace.h" +#include "i915_utils.h" +#include "intel_pm.h" +#include "vlv_suspend.h" + +struct vlv_s0ix_state { + /* GAM */ + u32 wr_watermark; + u32 gfx_prio_ctrl; + u32 arb_mode; + u32 gfx_pend_tlb0; + u32 gfx_pend_tlb1; + u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM]; + u32 media_max_req_count; + u32 gfx_max_req_count; + u32 render_hwsp; + u32 ecochk; + u32 bsd_hwsp; + u32 blt_hwsp; + u32 tlb_rd_addr; + + /* MBC */ + u32 g3dctl; + u32 gsckgctl; + u32 mbctl; + + /* GCP */ + u32 ucgctl1; + u32 ucgctl3; + u32 rcgctl1; + u32 rcgctl2; + u32 rstctl; + u32 misccpctl; + + /* GPM */ + u32 gfxpause; + u32 rpdeuhwtc; + u32 rpdeuc; + u32 ecobus; + u32 pwrdwnupctl; + u32 rp_down_timeout; + u32 rp_deucsw; + u32 rcubmabdtmr; + u32 rcedata; + u32 spare2gh; + + /* Display 1 CZ domain */ + u32 gt_imr; + u32 gt_ier; + u32 pm_imr; + u32 pm_ier; + u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM]; + + /* GT SA CZ domain */ + u32 tilectl; + u32 gt_fifoctl; + u32 gtlc_wake_ctrl; + u32 gtlc_survive; + u32 pmwgicz; + + /* Display 2 CZ domain */ + u32 gu_ctl0; + u32 gu_ctl1; + u32 pcbr; + u32 clock_gate_dis2; +}; + +/* + * Save all Gunit registers that may be lost after a D3 and a subsequent + * S0i[R123] transition. The list of registers needing a save/restore is + * defined in the VLV2_S0IXRegs document. This documents marks all Gunit + * registers in the following way: + * - Driver: saved/restored by the driver + * - Punit : saved/restored by the Punit firmware + * - No, w/o marking: no need to save/restore, since the register is R/O or + * used internally by the HW in a way that doesn't depend + * keeping the content across a suspend/resume. + * - Debug : used for debugging + * + * We save/restore all registers marked with 'Driver', with the following + * exceptions: + * - Registers out of use, including also registers marked with 'Debug'. + * These have no effect on the driver's operation, so we don't save/restore + * them to reduce the overhead. + * - Registers that are fully setup by an initialization function called from + * the resume path. For example many clock gating and RPS/RC6 registers. + * - Registers that provide the right functionality with their reset defaults. + * + * TODO: Except for registers that based on the above 3 criteria can be safely + * ignored, we save/restore all others, practically treating the HW context as + * a black-box for the driver. Further investigation is needed to reduce the + * saved/restored registers even further, by following the same 3 criteria. + */ +static void vlv_save_gunit_s0ix_state(struct drm_i915_private *i915) +{ + struct vlv_s0ix_state *s = i915->vlv_s0ix_state; + struct intel_uncore *uncore = &i915->uncore; + int i; + + if (!s) + return; + + /* GAM 0x4000-0x4770 */ + s->wr_watermark = intel_uncore_read(uncore, GEN7_WR_WATERMARK); + s->gfx_prio_ctrl = intel_uncore_read(uncore, GEN7_GFX_PRIO_CTRL); + s->arb_mode = intel_uncore_read(uncore, ARB_MODE); + s->gfx_pend_tlb0 = intel_uncore_read(uncore, GEN7_GFX_PEND_TLB0); + s->gfx_pend_tlb1 = intel_uncore_read(uncore, GEN7_GFX_PEND_TLB1); + + for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) + s->lra_limits[i] = intel_uncore_read(uncore, GEN7_LRA_LIMITS(i)); + + s->media_max_req_count = intel_uncore_read(uncore, GEN7_MEDIA_MAX_REQ_COUNT); + s->gfx_max_req_count = intel_uncore_read(uncore, GEN7_GFX_MAX_REQ_COUNT); + + s->render_hwsp = intel_uncore_read(uncore, RENDER_HWS_PGA_GEN7); + s->ecochk = intel_uncore_read(uncore, GAM_ECOCHK); + s->bsd_hwsp = intel_uncore_read(uncore, BSD_HWS_PGA_GEN7); + s->blt_hwsp = intel_uncore_read(uncore, BLT_HWS_PGA_GEN7); + + s->tlb_rd_addr = intel_uncore_read(uncore, GEN7_TLB_RD_ADDR); + + /* MBC 0x9024-0x91D0, 0x8500 */ + s->g3dctl = intel_uncore_read(uncore, VLV_G3DCTL); + s->gsckgctl = intel_uncore_read(uncore, VLV_GSCKGCTL); + s->mbctl = intel_uncore_read(uncore, GEN6_MBCTL); + + /* GCP 0x9400-0x9424, 0x8100-0x810C */ + s->ucgctl1 = intel_uncore_read(uncore, GEN6_UCGCTL1); + s->ucgctl3 = intel_uncore_read(uncore, GEN6_UCGCTL3); + s->rcgctl1 = intel_uncore_read(uncore, GEN6_RCGCTL1); + s->rcgctl2 = intel_uncore_read(uncore, GEN6_RCGCTL2); + s->rstctl = intel_uncore_read(uncore, GEN6_RSTCTL); + s->misccpctl = intel_uncore_read(uncore, GEN7_MISCCPCTL); + + /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ + s->gfxpause = intel_uncore_read(uncore, GEN6_GFXPAUSE); + s->rpdeuhwtc = intel_uncore_read(uncore, GEN6_RPDEUHWTC); + s->rpdeuc = intel_uncore_read(uncore, GEN6_RPDEUC); + s->ecobus = intel_uncore_read(uncore, ECOBUS); + s->pwrdwnupctl = intel_uncore_read(uncore, VLV_PWRDWNUPCTL); + s->rp_down_timeout = intel_uncore_read(uncore, GEN6_RP_DOWN_TIMEOUT); + s->rp_deucsw = intel_uncore_read(uncore, GEN6_RPDEUCSW); + s->rcubmabdtmr = intel_uncore_read(uncore, GEN6_RCUBMABDTMR); + s->rcedata = intel_uncore_read(uncore, VLV_RCEDATA); + s->spare2gh = intel_uncore_read(uncore, VLV_SPAREG2H); + + /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ + s->gt_imr = intel_uncore_read(uncore, GTIMR); + s->gt_ier = intel_uncore_read(uncore, GTIER); + s->pm_imr = intel_uncore_read(uncore, GEN6_PMIMR); + s->pm_ier = intel_uncore_read(uncore, GEN6_PMIER); + + for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) + s->gt_scratch[i] = intel_uncore_read(uncore, GEN7_GT_SCRATCH(i)); + + /* GT SA CZ domain, 0x100000-0x138124 */ + s->tilectl = intel_uncore_read(uncore, TILECTL); + s->gt_fifoctl = intel_uncore_read(uncore, GTFIFOCTL); + s->gtlc_wake_ctrl = intel_uncore_read(uncore, VLV_GTLC_WAKE_CTRL); + s->gtlc_survive = intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG); + s->pmwgicz = intel_uncore_read(uncore, VLV_PMWGICZ); + + /* Gunit-Display CZ domain, 0x182028-0x1821CF */ + s->gu_ctl0 = intel_uncore_read(uncore, VLV_GU_CTL0); + s->gu_ctl1 = intel_uncore_read(uncore, VLV_GU_CTL1); + s->pcbr = intel_uncore_read(uncore, VLV_PCBR); + s->clock_gate_dis2 = intel_uncore_read(uncore, VLV_GUNIT_CLOCK_GATE2); + + /* + * Not saving any of: + * DFT, 0x9800-0x9EC0 + * SARB, 0xB000-0xB1FC + * GAC, 0x5208-0x524C, 0x14000-0x14C000 + * PCI CFG + */ +} + +static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *i915) +{ + struct vlv_s0ix_state *s = i915->vlv_s0ix_state; + struct intel_uncore *uncore = &i915->uncore; + u32 val; + int i; + + if (!s) + return; + + /* GAM 0x4000-0x4770 */ + intel_uncore_write(uncore, GEN7_WR_WATERMARK, s->wr_watermark); + intel_uncore_write(uncore, GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl); + intel_uncore_write(uncore, ARB_MODE, s->arb_mode | (0xffff << 16)); + intel_uncore_write(uncore, GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0); + intel_uncore_write(uncore, GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1); + + for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) + intel_uncore_write(uncore, GEN7_LRA_LIMITS(i), s->lra_limits[i]); + + intel_uncore_write(uncore, GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count); + intel_uncore_write(uncore, GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count); + + intel_uncore_write(uncore, RENDER_HWS_PGA_GEN7, s->render_hwsp); + intel_uncore_write(uncore, GAM_ECOCHK, s->ecochk); + intel_uncore_write(uncore, BSD_HWS_PGA_GEN7, s->bsd_hwsp); + intel_uncore_write(uncore, BLT_HWS_PGA_GEN7, s->blt_hwsp); + + intel_uncore_write(uncore, GEN7_TLB_RD_ADDR, s->tlb_rd_addr); + + /* MBC 0x9024-0x91D0, 0x8500 */ + intel_uncore_write(uncore, VLV_G3DCTL, s->g3dctl); + intel_uncore_write(uncore, VLV_GSCKGCTL, s->gsckgctl); + intel_uncore_write(uncore, GEN6_MBCTL, s->mbctl); + + /* GCP 0x9400-0x9424, 0x8100-0x810C */ + intel_uncore_write(uncore, GEN6_UCGCTL1, s->ucgctl1); + intel_uncore_write(uncore, GEN6_UCGCTL3, s->ucgctl3); + intel_uncore_write(uncore, GEN6_RCGCTL1, s->rcgctl1); + intel_uncore_write(uncore, GEN6_RCGCTL2, s->rcgctl2); + intel_uncore_write(uncore, GEN6_RSTCTL, s->rstctl); + intel_uncore_write(uncore, GEN7_MISCCPCTL, s->misccpctl); + + /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ + intel_uncore_write(uncore, GEN6_GFXPAUSE, s->gfxpause); + intel_uncore_write(uncore, GEN6_RPDEUHWTC, s->rpdeuhwtc); + intel_uncore_write(uncore, GEN6_RPDEUC, s->rpdeuc); + intel_uncore_write(uncore, ECOBUS, s->ecobus); + intel_uncore_write(uncore, VLV_PWRDWNUPCTL, s->pwrdwnupctl); + intel_uncore_write(uncore, GEN6_RP_DOWN_TIMEOUT, s->rp_down_timeout); + intel_uncore_write(uncore, GEN6_RPDEUCSW, s->rp_deucsw); + intel_uncore_write(uncore, GEN6_RCUBMABDTMR, s->rcubmabdtmr); + intel_uncore_write(uncore, VLV_RCEDATA, s->rcedata); + intel_uncore_write(uncore, VLV_SPAREG2H, s->spare2gh); + + /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ + intel_uncore_write(uncore, GTIMR, s->gt_imr); + intel_uncore_write(uncore, GTIER, s->gt_ier); + intel_uncore_write(uncore, GEN6_PMIMR, s->pm_imr); + intel_uncore_write(uncore, GEN6_PMIER, s->pm_ier); + + for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) + intel_uncore_write(uncore, GEN7_GT_SCRATCH(i), s->gt_scratch[i]); + + /* GT SA CZ domain, 0x100000-0x138124 */ + intel_uncore_write(uncore, TILECTL, s->tilectl); + intel_uncore_write(uncore, GTFIFOCTL, s->gt_fifoctl); + /* + * Preserve the GT allow wake and GFX force clock bit, they are not + * be restored, as they are used to control the s0ix suspend/resume + * sequence by the caller. + */ + val = intel_uncore_read(uncore, VLV_GTLC_WAKE_CTRL); + val &= VLV_GTLC_ALLOWWAKEREQ; + val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ; + intel_uncore_write(uncore, VLV_GTLC_WAKE_CTRL, val); + + val = intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG); + val &= VLV_GFX_CLK_FORCE_ON_BIT; + val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT; + intel_uncore_write(uncore, VLV_GTLC_SURVIVABILITY_REG, val); + + intel_uncore_write(uncore, VLV_PMWGICZ, s->pmwgicz); + + /* Gunit-Display CZ domain, 0x182028-0x1821CF */ + intel_uncore_write(uncore, VLV_GU_CTL0, s->gu_ctl0); + intel_uncore_write(uncore, VLV_GU_CTL1, s->gu_ctl1); + intel_uncore_write(uncore, VLV_PCBR, s->pcbr); + intel_uncore_write(uncore, VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); +} + +static int vlv_wait_for_pw_status(struct drm_i915_private *i915, + u32 mask, u32 val) +{ + i915_reg_t reg = VLV_GTLC_PW_STATUS; + u32 reg_value; + int ret; + + /* The HW does not like us polling for PW_STATUS frequently, so + * use the sleeping loop rather than risk the busy spin within + * intel_wait_for_register(). + * + * Transitioning between RC6 states should be at most 2ms (see + * valleyview_enable_rps) so use a 3ms timeout. + */ + ret = wait_for(((reg_value = + intel_uncore_read_notrace(&i915->uncore, reg)) & mask) + == val, 3); + + /* just trace the final value */ + trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true); + + return ret; +} + +static int vlv_force_gfx_clock(struct drm_i915_private *i915, bool force_on) +{ + struct intel_uncore *uncore = &i915->uncore; + u32 val; + int err; + + val = intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG); + val &= ~VLV_GFX_CLK_FORCE_ON_BIT; + if (force_on) + val |= VLV_GFX_CLK_FORCE_ON_BIT; + intel_uncore_write(uncore, VLV_GTLC_SURVIVABILITY_REG, val); + + if (!force_on) + return 0; + + err = intel_wait_for_register(uncore, + VLV_GTLC_SURVIVABILITY_REG, + VLV_GFX_CLK_STATUS_BIT, + VLV_GFX_CLK_STATUS_BIT, + 20); + if (err) + drm_err(&i915->drm, + "timeout waiting for GFX clock force-on (%08x)\n", + intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG)); + + return err; +} + +static int vlv_allow_gt_wake(struct drm_i915_private *i915, bool allow) +{ + struct intel_uncore *uncore = &i915->uncore; + u32 mask; + u32 val; + int err; + + val = intel_uncore_read(uncore, VLV_GTLC_WAKE_CTRL); + val &= ~VLV_GTLC_ALLOWWAKEREQ; + if (allow) + val |= VLV_GTLC_ALLOWWAKEREQ; + intel_uncore_write(uncore, VLV_GTLC_WAKE_CTRL, val); + intel_uncore_posting_read(uncore, VLV_GTLC_WAKE_CTRL); + + mask = VLV_GTLC_ALLOWWAKEACK; + val = allow ? mask : 0; + + err = vlv_wait_for_pw_status(i915, mask, val); + if (err) + drm_err(&i915->drm, "timeout disabling GT waking\n"); + + return err; +} + +static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, + bool wait_for_on) +{ + u32 mask; + u32 val; + + mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; + val = wait_for_on ? mask : 0; + + /* + * RC6 transitioning can be delayed up to 2 msec (see + * valleyview_enable_rps), use 3 msec for safety. + * + * This can fail to turn off the rc6 if the GPU is stuck after a failed + * reset and we are trying to force the machine to sleep. + */ + if (vlv_wait_for_pw_status(dev_priv, mask, val)) + drm_dbg(&dev_priv->drm, + "timeout waiting for GT wells to go %s\n", + onoff(wait_for_on)); +} + +static void vlv_check_no_gt_access(struct drm_i915_private *i915) +{ + struct intel_uncore *uncore = &i915->uncore; + + if (!(intel_uncore_read(uncore, VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR)) + return; + + drm_dbg(&i915->drm, "GT register access while GT waking disabled\n"); + intel_uncore_write(uncore, VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR); +} + +int vlv_suspend_complete(struct drm_i915_private *dev_priv) +{ + u32 mask; + int err; + + if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) + return 0; + + /* + * Bspec defines the following GT well on flags as debug only, so + * don't treat them as hard failures. + */ + vlv_wait_for_gt_wells(dev_priv, false); + + mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS; + drm_WARN_ON(&dev_priv->drm, + (intel_uncore_read(&dev_priv->uncore, VLV_GTLC_WAKE_CTRL) & mask) != mask); + + vlv_check_no_gt_access(dev_priv); + + err = vlv_force_gfx_clock(dev_priv, true); + if (err) + goto err1; + + err = vlv_allow_gt_wake(dev_priv, false); + if (err) + goto err2; + + vlv_save_gunit_s0ix_state(dev_priv); + + err = vlv_force_gfx_clock(dev_priv, false); + if (err) + goto err2; + + return 0; + +err2: + /* For safety always re-enable waking and disable gfx clock forcing */ + vlv_allow_gt_wake(dev_priv, true); +err1: + vlv_force_gfx_clock(dev_priv, false); + + return err; +} + +int vlv_resume_prepare(struct drm_i915_private *dev_priv, bool rpm_resume) +{ + int err; + int ret; + + if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) + return 0; + + /* + * If any of the steps fail just try to continue, that's the best we + * can do at this point. Return the first error code (which will also + * leave RPM permanently disabled). + */ + ret = vlv_force_gfx_clock(dev_priv, true); + + vlv_restore_gunit_s0ix_state(dev_priv); + + err = vlv_allow_gt_wake(dev_priv, true); + if (!ret) + ret = err; + + err = vlv_force_gfx_clock(dev_priv, false); + if (!ret) + ret = err; + + vlv_check_no_gt_access(dev_priv); + + if (rpm_resume) + intel_init_clock_gating(dev_priv); + + return ret; +} + +int vlv_suspend_init(struct drm_i915_private *i915) +{ + if (!IS_VALLEYVIEW(i915)) + return 0; + + /* we write all the values in the struct, so no need to zero it out */ + i915->vlv_s0ix_state = kmalloc(sizeof(*i915->vlv_s0ix_state), + GFP_KERNEL); + if (!i915->vlv_s0ix_state) + return -ENOMEM; + + return 0; +} + +void vlv_suspend_cleanup(struct drm_i915_private *i915) +{ + if (!i915->vlv_s0ix_state) + return; + + kfree(i915->vlv_s0ix_state); + i915->vlv_s0ix_state = NULL; +} diff --git a/drivers/gpu/drm/i915/vlv_suspend.h b/drivers/gpu/drm/i915/vlv_suspend.h new file mode 100644 index 000000000000..895091cb1f62 --- /dev/null +++ b/drivers/gpu/drm/i915/vlv_suspend.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2020 Intel Corporation + */ + +#ifndef __VLV_SUSPEND_H__ +#define __VLV_SUSPEND_H__ + +#include <linux/types.h> + +struct drm_i915_private; + +int vlv_suspend_init(struct drm_i915_private *i915); +void vlv_suspend_cleanup(struct drm_i915_private *i915); +int vlv_suspend_complete(struct drm_i915_private *i915); +int vlv_resume_prepare(struct drm_i915_private *i915, bool rpm_resume); + +#endif /* __VLV_SUSPEND_H__ */ |