diff options
Diffstat (limited to 'drivers/gpu/drm/i915')
161 files changed, 12197 insertions, 7999 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index dfd95889f4b7..5c607f2c707b 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -23,6 +23,7 @@ config DRM_I915 select SYNC_FILE select IOSF_MBI select CRC32 + select SND_HDA_I915 if SND_HDA_CORE help Choose this option if you have a system that has "Intel Graphics Media Accelerator" or "HD Graphics" integrated graphics, diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index 9de8b1c51a5c..459f8f88a34c 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -51,6 +51,18 @@ config DRM_I915_DEBUG_GEM If in doubt, say "N". +config DRM_I915_ERRLOG_GEM + bool "Insert extra logging (very verbose) for common GEM errors" + default n + depends on DRM_I915_DEBUG_GEM + help + Enable additional logging that may help track down the cause of + principally userspace errors. + + Recommended for driver developers only. + + If in doubt, say "N". + config DRM_I915_TRACE_GEM bool "Insert extra ftrace output from the GEM internals" depends on DRM_I915_DEBUG_GEM diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 4c6adae23e18..5794f102f9b8 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -135,15 +135,14 @@ i915-y += dvo_ch7017.o \ dvo_ns2501.o \ dvo_sil164.o \ dvo_tfp410.o \ + icl_dsi.o \ intel_crt.o \ intel_ddi.o \ intel_dp_aux_backlight.o \ intel_dp_link_training.o \ intel_dp_mst.o \ intel_dp.o \ - intel_dsi.o \ intel_dsi_dcs_backlight.o \ - intel_dsi_pll.o \ intel_dsi_vbt.o \ intel_dvo.o \ intel_hdmi.o \ @@ -152,7 +151,9 @@ i915-y += dvo_ch7017.o \ intel_lvds.o \ intel_panel.o \ intel_sdvo.o \ - intel_tv.o + intel_tv.o \ + vlv_dsi.o \ + vlv_dsi_pll.o # Post-mortem debug and GPU hang state capture i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c index 80b3e16cf48c..caac9942e1e3 100644 --- a/drivers/gpu/drm/i915/dvo_ch7017.c +++ b/drivers/gpu/drm/i915/dvo_ch7017.c @@ -159,7 +159,7 @@ #define CH7017_BANG_LIMIT_CONTROL 0x7f struct ch7017_priv { - uint8_t dummy; + u8 dummy; }; static void ch7017_dump_regs(struct intel_dvo_device *dvo); @@ -186,7 +186,7 @@ static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val) static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val) { - uint8_t buf[2] = { addr, val }; + u8 buf[2] = { addr, val }; struct i2c_msg msg = { .addr = dvo->slave_addr, .flags = 0, @@ -258,11 +258,11 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { - uint8_t lvds_pll_feedback_div, lvds_pll_vco_control; - uint8_t outputs_enable, lvds_control_2, lvds_power_down; - uint8_t horizontal_active_pixel_input; - uint8_t horizontal_active_pixel_output, vertical_active_line_output; - uint8_t active_input_line_output; + u8 lvds_pll_feedback_div, lvds_pll_vco_control; + u8 outputs_enable, lvds_control_2, lvds_power_down; + u8 horizontal_active_pixel_input; + u8 horizontal_active_pixel_output, vertical_active_line_output; + u8 active_input_line_output; DRM_DEBUG_KMS("Registers before mode setting\n"); ch7017_dump_regs(dvo); @@ -333,7 +333,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo, /* set the CH7017 power state */ static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable) { - uint8_t val; + u8 val; ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val); @@ -361,7 +361,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable) static bool ch7017_get_hw_state(struct intel_dvo_device *dvo) { - uint8_t val; + u8 val; ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val); @@ -373,7 +373,7 @@ static bool ch7017_get_hw_state(struct intel_dvo_device *dvo) static void ch7017_dump_regs(struct intel_dvo_device *dvo) { - uint8_t val; + u8 val; #define DUMP(reg) \ do { \ diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c index 7aeeffd2428b..397ac5233726 100644 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c @@ -85,7 +85,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ static struct ch7xxx_id_struct { - uint8_t vid; + u8 vid; char *name; } ch7xxx_ids[] = { { CH7011_VID, "CH7011" }, @@ -96,7 +96,7 @@ static struct ch7xxx_id_struct { }; static struct ch7xxx_did_struct { - uint8_t did; + u8 did; char *name; } ch7xxx_dids[] = { { CH7xxx_DID, "CH7XXX" }, @@ -107,7 +107,7 @@ struct ch7xxx_priv { bool quiet; }; -static char *ch7xxx_get_id(uint8_t vid) +static char *ch7xxx_get_id(u8 vid) { int i; @@ -119,7 +119,7 @@ static char *ch7xxx_get_id(uint8_t vid) return NULL; } -static char *ch7xxx_get_did(uint8_t did) +static char *ch7xxx_get_did(u8 did) { int i; @@ -132,7 +132,7 @@ static char *ch7xxx_get_did(uint8_t did) } /** Reads an 8 bit register */ -static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) +static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, u8 *ch) { struct ch7xxx_priv *ch7xxx = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; @@ -170,11 +170,11 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) } /** Writes an 8 bit register */ -static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) +static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, u8 ch) { struct ch7xxx_priv *ch7xxx = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; - uint8_t out_buf[2]; + u8 out_buf[2]; struct i2c_msg msg = { .addr = dvo->slave_addr, .flags = 0, @@ -201,7 +201,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo, { /* this will detect the CH7xxx chip on the specified i2c bus */ struct ch7xxx_priv *ch7xxx; - uint8_t vendor, device; + u8 vendor, device; char *name, *devid; ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL); @@ -244,7 +244,7 @@ out: static enum drm_connector_status ch7xxx_detect(struct intel_dvo_device *dvo) { - uint8_t cdet, orig_pm, pm; + u8 cdet, orig_pm, pm; ch7xxx_readb(dvo, CH7xxx_PM, &orig_pm); @@ -276,7 +276,7 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { - uint8_t tvco, tpcp, tpd, tlpf, idf; + u8 tvco, tpcp, tpd, tlpf, idf; if (mode->clock <= 65000) { tvco = 0x23; @@ -336,7 +336,7 @@ static void ch7xxx_dump_regs(struct intel_dvo_device *dvo) int i; for (i = 0; i < CH7xxx_NUM_REGS; i++) { - uint8_t val; + u8 val; if ((i % 8) == 0) DRM_DEBUG_KMS("\n %02X: ", i); ch7xxx_readb(dvo, i, &val); diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c index c73aff163908..24278cc49090 100644 --- a/drivers/gpu/drm/i915/dvo_ivch.c +++ b/drivers/gpu/drm/i915/dvo_ivch.c @@ -161,7 +161,7 @@ * instead. The following list contains all registers that * require saving. */ -static const uint16_t backup_addresses[] = { +static const u16 backup_addresses[] = { 0x11, 0x12, 0x18, 0x19, 0x1a, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, @@ -174,11 +174,11 @@ static const uint16_t backup_addresses[] = { struct ivch_priv { bool quiet; - uint16_t width, height; + u16 width, height; /* Register backup */ - uint16_t reg_backup[ARRAY_SIZE(backup_addresses)]; + u16 reg_backup[ARRAY_SIZE(backup_addresses)]; }; @@ -188,7 +188,7 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo); * * Each of the 256 registers are 16 bits long. */ -static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) +static bool ivch_read(struct intel_dvo_device *dvo, int addr, u16 *data) { struct ivch_priv *priv = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; @@ -231,7 +231,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data) } /* Writes a 16-bit register on the ivch */ -static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) +static bool ivch_write(struct intel_dvo_device *dvo, int addr, u16 data) { struct ivch_priv *priv = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; @@ -263,7 +263,7 @@ static bool ivch_init(struct intel_dvo_device *dvo, struct i2c_adapter *adapter) { struct ivch_priv *priv; - uint16_t temp; + u16 temp; int i; priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL); @@ -342,7 +342,7 @@ static void ivch_reset(struct intel_dvo_device *dvo) static void ivch_dpms(struct intel_dvo_device *dvo, bool enable) { int i; - uint16_t vr01, vr30, backlight; + u16 vr01, vr30, backlight; ivch_reset(dvo); @@ -379,7 +379,7 @@ static void ivch_dpms(struct intel_dvo_device *dvo, bool enable) static bool ivch_get_hw_state(struct intel_dvo_device *dvo) { - uint16_t vr01; + u16 vr01; ivch_reset(dvo); @@ -398,9 +398,9 @@ static void ivch_mode_set(struct intel_dvo_device *dvo, const struct drm_display_mode *adjusted_mode) { struct ivch_priv *priv = dvo->dev_priv; - uint16_t vr40 = 0; - uint16_t vr01 = 0; - uint16_t vr10; + u16 vr40 = 0; + u16 vr01 = 0; + u16 vr10; ivch_reset(dvo); @@ -416,7 +416,7 @@ static void ivch_mode_set(struct intel_dvo_device *dvo, if (mode->hdisplay != adjusted_mode->crtc_hdisplay || mode->vdisplay != adjusted_mode->crtc_vdisplay) { - uint16_t x_ratio, y_ratio; + u16 x_ratio, y_ratio; vr01 |= VR01_PANEL_FIT_ENABLE; vr40 |= VR40_CLOCK_GATING_ENABLE; @@ -438,7 +438,7 @@ static void ivch_mode_set(struct intel_dvo_device *dvo, static void ivch_dump_regs(struct intel_dvo_device *dvo) { - uint16_t val; + u16 val; ivch_read(dvo, VR00, &val); DRM_DEBUG_KMS("VR00: 0x%04x\n", val); diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c index 2379c33cfe51..c584e01dc8dc 100644 --- a/drivers/gpu/drm/i915/dvo_ns2501.c +++ b/drivers/gpu/drm/i915/dvo_ns2501.c @@ -191,8 +191,8 @@ enum { }; struct ns2501_reg { - uint8_t offset; - uint8_t value; + u8 offset; + u8 value; }; /* @@ -202,23 +202,23 @@ struct ns2501_reg { * read all this with a grain of salt. */ struct ns2501_configuration { - uint8_t sync; /* configuration of the C0 register */ - uint8_t conf; /* configuration register 8 */ - uint8_t syncb; /* configuration register 41 */ - uint8_t dither; /* configuration of the dithering */ - uint8_t pll_a; /* PLL configuration, register A, 1B */ - uint16_t pll_b; /* PLL configuration, register B, 1C/1D */ - uint16_t hstart; /* horizontal start, registers C1/C2 */ - uint16_t hstop; /* horizontal total, registers C3/C4 */ - uint16_t vstart; /* vertical start, registers C5/C6 */ - uint16_t vstop; /* vertical total, registers C7/C8 */ - uint16_t vsync; /* manual vertical sync start, 80/81 */ - uint16_t vtotal; /* number of lines generated, 82/83 */ - uint16_t hpos; /* horizontal position + 256, 98/99 */ - uint16_t vpos; /* vertical position, 8e/8f */ - uint16_t voffs; /* vertical output offset, 9c/9d */ - uint16_t hscale; /* horizontal scaling factor, b8/b9 */ - uint16_t vscale; /* vertical scaling factor, 10/11 */ + u8 sync; /* configuration of the C0 register */ + u8 conf; /* configuration register 8 */ + u8 syncb; /* configuration register 41 */ + u8 dither; /* configuration of the dithering */ + u8 pll_a; /* PLL configuration, register A, 1B */ + u16 pll_b; /* PLL configuration, register B, 1C/1D */ + u16 hstart; /* horizontal start, registers C1/C2 */ + u16 hstop; /* horizontal total, registers C3/C4 */ + u16 vstart; /* vertical start, registers C5/C6 */ + u16 vstop; /* vertical total, registers C7/C8 */ + u16 vsync; /* manual vertical sync start, 80/81 */ + u16 vtotal; /* number of lines generated, 82/83 */ + u16 hpos; /* horizontal position + 256, 98/99 */ + u16 vpos; /* vertical position, 8e/8f */ + u16 voffs; /* vertical output offset, 9c/9d */ + u16 hscale; /* horizontal scaling factor, b8/b9 */ + u16 vscale; /* vertical scaling factor, 10/11 */ }; /* @@ -389,7 +389,7 @@ struct ns2501_priv { ** If it returns false, it might be wise to enable the ** DVO with the above function. */ -static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch) +static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, u8 *ch) { struct ns2501_priv *ns = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; @@ -434,11 +434,11 @@ static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch) ** If it returns false, it might be wise to enable the ** DVO with the above function. */ -static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) +static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, u8 ch) { struct ns2501_priv *ns = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; - uint8_t out_buf[2]; + u8 out_buf[2]; struct i2c_msg msg = { .addr = dvo->slave_addr, diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c index 1c1a0674dbab..4ae5d8fd9ff0 100644 --- a/drivers/gpu/drm/i915/dvo_sil164.c +++ b/drivers/gpu/drm/i915/dvo_sil164.c @@ -65,7 +65,7 @@ struct sil164_priv { #define SILPTR(d) ((SIL164Ptr)(d->DriverPrivate.ptr)) -static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) +static bool sil164_readb(struct intel_dvo_device *dvo, int addr, u8 *ch) { struct sil164_priv *sil = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; @@ -102,11 +102,11 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) return false; } -static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) +static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, u8 ch) { struct sil164_priv *sil = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; - uint8_t out_buf[2]; + u8 out_buf[2]; struct i2c_msg msg = { .addr = dvo->slave_addr, .flags = 0, @@ -173,7 +173,7 @@ out: static enum drm_connector_status sil164_detect(struct intel_dvo_device *dvo) { - uint8_t reg9; + u8 reg9; sil164_readb(dvo, SIL164_REG9, ®9); @@ -243,7 +243,7 @@ static bool sil164_get_hw_state(struct intel_dvo_device *dvo) static void sil164_dump_regs(struct intel_dvo_device *dvo) { - uint8_t val; + u8 val; sil164_readb(dvo, SIL164_FREQ_LO, &val); DRM_DEBUG_KMS("SIL164_FREQ_LO: 0x%02x\n", val); diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c index 31e181da93db..d603bc2f2506 100644 --- a/drivers/gpu/drm/i915/dvo_tfp410.c +++ b/drivers/gpu/drm/i915/dvo_tfp410.c @@ -90,7 +90,7 @@ struct tfp410_priv { bool quiet; }; -static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) +static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, u8 *ch) { struct tfp410_priv *tfp = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; @@ -127,11 +127,11 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) return false; } -static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) +static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, u8 ch) { struct tfp410_priv *tfp = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; - uint8_t out_buf[2]; + u8 out_buf[2]; struct i2c_msg msg = { .addr = dvo->slave_addr, .flags = 0, @@ -155,7 +155,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) static int tfp410_getid(struct intel_dvo_device *dvo, int addr) { - uint8_t ch1, ch2; + u8 ch1, ch2; if (tfp410_readb(dvo, addr+0, &ch1) && tfp410_readb(dvo, addr+1, &ch2)) @@ -203,7 +203,7 @@ out: static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo) { enum drm_connector_status ret = connector_status_disconnected; - uint8_t ctl2; + u8 ctl2; if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) { if (ctl2 & TFP410_CTL_2_RSEN) @@ -236,7 +236,7 @@ static void tfp410_mode_set(struct intel_dvo_device *dvo, /* set the tfp410 power state */ static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable) { - uint8_t ctl1; + u8 ctl1; if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1)) return; @@ -251,7 +251,7 @@ static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable) static bool tfp410_get_hw_state(struct intel_dvo_device *dvo) { - uint8_t ctl1; + u8 ctl1; if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1)) return false; @@ -264,7 +264,7 @@ static bool tfp410_get_hw_state(struct intel_dvo_device *dvo) static void tfp410_dump_regs(struct intel_dvo_device *dvo) { - uint8_t val, val2; + u8 val, val2; tfp410_readb(dvo, TFP410_REV, &val); DRM_DEBUG_KMS("TFP410_REV: 0x%02X\n", val); diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 7c9ec4f4f36c..380eeb2a0e83 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -61,7 +61,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) } mutex_lock(&dev_priv->drm.struct_mutex); - ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node, + ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node, size, I915_GTT_PAGE_SIZE, I915_COLOR_UNEVICTABLE, start, end, flags); diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 7f562410f9cf..45e89b1e0481 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -172,6 +172,7 @@ struct decode_info { #define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD OP_3D_MEDIA(0x2, 0x0, 0x2) #define OP_MEDIA_GATEWAY_STATE OP_3D_MEDIA(0x2, 0x0, 0x3) #define OP_MEDIA_STATE_FLUSH OP_3D_MEDIA(0x2, 0x0, 0x4) +#define OP_MEDIA_POOL_STATE OP_3D_MEDIA(0x2, 0x0, 0x5) #define OP_MEDIA_OBJECT OP_3D_MEDIA(0x2, 0x1, 0x0) #define OP_MEDIA_OBJECT_PRT OP_3D_MEDIA(0x2, 0x1, 0x2) @@ -1279,7 +1280,9 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s, if (!info->async_flip) return 0; - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + if (IS_SKYLAKE(dev_priv) + || IS_KABYLAKE(dev_priv) + || IS_BROXTON(dev_priv)) { stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0); tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & GENMASK(12, 10)) >> 10; @@ -1307,7 +1310,9 @@ static int gen8_update_plane_mmio_from_mi_display_flip( set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12), info->surf_val << 12); - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + if (IS_SKYLAKE(dev_priv) + || IS_KABYLAKE(dev_priv) + || IS_BROXTON(dev_priv)) { set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0), info->stride_val); set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10), @@ -1331,7 +1336,9 @@ static int decode_mi_display_flip(struct parser_exec_state *s, if (IS_BROADWELL(dev_priv)) return gen8_decode_mi_display_flip(s, info); - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + if (IS_SKYLAKE(dev_priv) + || IS_KABYLAKE(dev_priv) + || IS_BROXTON(dev_priv)) return skl_decode_mi_display_flip(s, info); return -ENODEV; @@ -1340,26 +1347,14 @@ static int decode_mi_display_flip(struct parser_exec_state *s, static int check_mi_display_flip(struct parser_exec_state *s, struct mi_display_flip_command_info *info) { - struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; - - if (IS_BROADWELL(dev_priv) - || IS_SKYLAKE(dev_priv) - || IS_KABYLAKE(dev_priv)) - return gen8_check_mi_display_flip(s, info); - return -ENODEV; + return gen8_check_mi_display_flip(s, info); } static int update_plane_mmio_from_mi_display_flip( struct parser_exec_state *s, struct mi_display_flip_command_info *info) { - struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; - - if (IS_BROADWELL(dev_priv) - || IS_SKYLAKE(dev_priv) - || IS_KABYLAKE(dev_priv)) - return gen8_update_plane_mmio_from_mi_display_flip(s, info); - return -ENODEV; + return gen8_update_plane_mmio_from_mi_display_flip(s, info); } static int cmd_handler_mi_display_flip(struct parser_exec_state *s) @@ -1638,15 +1633,10 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm, */ static int batch_buffer_needs_scan(struct parser_exec_state *s) { - struct intel_gvt *gvt = s->vgpu->gvt; - - if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) - || IS_KABYLAKE(gvt->dev_priv)) { - /* BDW decides privilege based on address space */ - if (cmd_val(s, 0) & (1 << 8) && + /* Decide privilege based on address space */ + if (cmd_val(s, 0) & (1 << 8) && !(s->vgpu->scan_nonprivbb & (1 << s->ring_id))) - return 0; - } + return 0; return 1; } @@ -2372,6 +2362,9 @@ static struct cmd_info cmd_info[] = { {"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL}, + {"MEDIA_POOL_STATE", OP_MEDIA_POOL_STATE, F_LEN_VAR, R_RCS, D_ALL, + 0, 16, NULL}, + {"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL}, {"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL, diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 4b072ade8c38..3019dbc39aef 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -171,6 +171,29 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; int pipe; + if (IS_BROXTON(dev_priv)) { + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~(BXT_DE_PORT_HP_DDIA | + BXT_DE_PORT_HP_DDIB | + BXT_DE_PORT_HP_DDIC); + + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) { + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= + BXT_DE_PORT_HP_DDIA; + } + + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= + BXT_DE_PORT_HP_DDIB; + } + + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= + BXT_DE_PORT_HP_DDIC; + } + + return; + } + vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT); @@ -273,8 +296,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) for_each_pipe(dev_priv, pipe) { vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE; vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE; - vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~CURSOR_MODE; - vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= CURSOR_MODE_DISABLE; + vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE; + vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE; } vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; @@ -337,26 +360,28 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt) struct intel_gvt_irq *irq = &gvt->irq; struct intel_vgpu *vgpu; int pipe, id; + int found = false; - if (WARN_ON(!mutex_is_locked(&gvt->lock))) - return; - + mutex_lock(&gvt->lock); for_each_active_vgpu(gvt, vgpu, id) { for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) { - if (pipe_is_enabled(vgpu, pipe)) - goto out; + if (pipe_is_enabled(vgpu, pipe)) { + found = true; + break; + } } + if (found) + break; } /* all the pipes are disabled */ - hrtimer_cancel(&irq->vblank_timer.timer); - return; - -out: - hrtimer_start(&irq->vblank_timer.timer, - ktime_add_ns(ktime_get(), irq->vblank_timer.period), - HRTIMER_MODE_ABS); - + if (!found) + hrtimer_cancel(&irq->vblank_timer.timer); + else + hrtimer_start(&irq->vblank_timer.timer, + ktime_add_ns(ktime_get(), irq->vblank_timer.period), + HRTIMER_MODE_ABS); + mutex_unlock(&gvt->lock); } static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe) @@ -393,8 +418,10 @@ static void emulate_vblank(struct intel_vgpu *vgpu) { int pipe; + mutex_lock(&vgpu->vgpu_lock); for_each_pipe(vgpu->gvt->dev_priv, pipe) emulate_vblank_on_pipe(vgpu, pipe); + mutex_unlock(&vgpu->vgpu_lock); } /** @@ -409,11 +436,10 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt) struct intel_vgpu *vgpu; int id; - if (WARN_ON(!mutex_is_locked(&gvt->lock))) - return; - + mutex_lock(&gvt->lock); for_each_active_vgpu(gvt, vgpu, id) emulate_vblank(vgpu); + mutex_unlock(&gvt->lock); } /** diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 6f4f8e941fc2..6e3f56684f4e 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -164,7 +164,9 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, obj->read_domains = I915_GEM_DOMAIN_GTT; obj->write_domain = 0; - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + if (IS_SKYLAKE(dev_priv) + || IS_KABYLAKE(dev_priv) + || IS_BROXTON(dev_priv)) { unsigned int tiling_mode = 0; unsigned int stride = 0; @@ -192,6 +194,14 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, return obj; } +static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c) +{ + if (c && c->x_hot <= c->width && c->y_hot <= c->height) + return true; + else + return false; +} + static int vgpu_get_plane_info(struct drm_device *dev, struct intel_vgpu *vgpu, struct intel_vgpu_fb_info *info, @@ -229,12 +239,14 @@ static int vgpu_get_plane_info(struct drm_device *dev, info->x_pos = c.x_pos; info->y_pos = c.y_pos; - /* The invalid cursor hotspot value is delivered to host - * until we find a way to get the cursor hotspot info of - * guest OS. - */ - info->x_hot = UINT_MAX; - info->y_hot = UINT_MAX; + if (validate_hotspot(&c)) { + info->x_hot = c.x_hot; + info->y_hot = c.y_hot; + } else { + info->x_hot = UINT_MAX; + info->y_hot = UINT_MAX; + } + info->size = (((info->stride * c.height * c.bpp) / 8) + (PAGE_SIZE - 1)) >> PAGE_SHIFT; } else { diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c index f61337632969..4b98539025c5 100644 --- a/drivers/gpu/drm/i915/gvt/edid.c +++ b/drivers/gpu/drm/i915/gvt/edid.c @@ -77,6 +77,20 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu) return chr; } +static inline int bxt_get_port_from_gmbus0(u32 gmbus0) +{ + int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK; + int port = -EINVAL; + + if (port_select == 1) + port = PORT_B; + else if (port_select == 2) + port = PORT_C; + else if (port_select == 3) + port = PORT_D; + return port; +} + static inline int get_port_from_gmbus0(u32 gmbus0) { int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK; @@ -105,6 +119,7 @@ static void reset_gmbus_controller(struct intel_vgpu *vgpu) static int gmbus0_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; int port, pin_select; memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes); @@ -116,7 +131,10 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu, if (pin_select == 0) return 0; - port = get_port_from_gmbus0(pin_select); + if (IS_BROXTON(dev_priv)) + port = bxt_get_port_from_gmbus0(pin_select); + else + port = get_port_from_gmbus0(pin_select); if (WARN_ON(port < 0)) return 0; diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h index 427e40e64d41..714d709829a2 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.h +++ b/drivers/gpu/drm/i915/gvt/execlist.h @@ -146,14 +146,11 @@ struct execlist_ring_context { u32 nop4; u32 lri_cmd_2; struct execlist_mmio_pair ctx_timestamp; - struct execlist_mmio_pair pdp3_UDW; - struct execlist_mmio_pair pdp3_LDW; - struct execlist_mmio_pair pdp2_UDW; - struct execlist_mmio_pair pdp2_LDW; - struct execlist_mmio_pair pdp1_UDW; - struct execlist_mmio_pair pdp1_LDW; - struct execlist_mmio_pair pdp0_UDW; - struct execlist_mmio_pair pdp0_LDW; + /* + * pdps[8]={ pdp3_UDW, pdp3_LDW, pdp2_UDW, pdp2_LDW, + * pdp1_UDW, pdp1_LDW, pdp0_UDW, pdp0_LDW} + */ + struct execlist_mmio_pair pdps[8]; }; struct intel_vgpu_elsp_dwords { diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 1c120683e958..face664be3e8 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -36,6 +36,7 @@ #include <uapi/drm/drm_fourcc.h> #include "i915_drv.h" #include "gvt.h" +#include "i915_pvinfo.h" #define PRIMARY_FORMAT_NUM 16 struct pixel_format { @@ -150,7 +151,9 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe, u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask; u32 stride = stride_reg; - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + if (IS_SKYLAKE(dev_priv) + || IS_KABYLAKE(dev_priv) + || IS_BROXTON(dev_priv)) { switch (tiled) { case PLANE_CTL_TILED_LINEAR: stride = stride_reg * 64; @@ -214,7 +217,9 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, if (!plane->enabled) return -ENODEV; - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + if (IS_SKYLAKE(dev_priv) + || IS_KABYLAKE(dev_priv) + || IS_BROXTON(dev_priv)) { plane->tiled = (val & PLANE_CTL_TILED_MASK) >> _PLANE_CTL_TILED_SHIFT; fmt = skl_format_to_drm( @@ -256,7 +261,9 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, } plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10), - (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) ? + (IS_SKYLAKE(dev_priv) + || IS_KABYLAKE(dev_priv) + || IS_BROXTON(dev_priv)) ? (_PRI_PLANE_STRIDE_MASK >> 6) : _PRI_PLANE_STRIDE_MASK, plane->bpp); @@ -300,16 +307,16 @@ static int cursor_mode_to_drm(int mode) int cursor_pixel_formats_index = 4; switch (mode) { - case CURSOR_MODE_128_ARGB_AX: + case MCURSOR_MODE_128_ARGB_AX: cursor_pixel_formats_index = 0; break; - case CURSOR_MODE_256_ARGB_AX: + case MCURSOR_MODE_256_ARGB_AX: cursor_pixel_formats_index = 1; break; - case CURSOR_MODE_64_ARGB_AX: + case MCURSOR_MODE_64_ARGB_AX: cursor_pixel_formats_index = 2; break; - case CURSOR_MODE_64_32B_AX: + case MCURSOR_MODE_64_32B_AX: cursor_pixel_formats_index = 3; break; @@ -342,8 +349,8 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, return -ENODEV; val = vgpu_vreg_t(vgpu, CURCNTR(pipe)); - mode = val & CURSOR_MODE; - plane->enabled = (mode != CURSOR_MODE_DISABLE); + mode = val & MCURSOR_MODE; + plane->enabled = (mode != MCURSOR_MODE_DISABLE); if (!plane->enabled) return -ENODEV; @@ -384,6 +391,8 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT; plane->y_sign = (val & _CURSOR_SIGN_Y_MASK) >> _CURSOR_SIGN_Y_SHIFT; + plane->x_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)); + plane->y_hot = vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)); return 0; } diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c index a73e1d418c22..4ac18b447247 100644 --- a/drivers/gpu/drm/i915/gvt/firmware.c +++ b/drivers/gpu/drm/i915/gvt/firmware.c @@ -162,7 +162,7 @@ static int verify_firmware(struct intel_gvt *gvt, h = (struct gvt_firmware_header *)fw->data; - crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4; + crc32_start = offsetofend(struct gvt_firmware_header, crc32); mem = fw->data + crc32_start; #define VERIFY(s, a, b) do { \ diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 4efec8fa6c1d..00aad8164dec 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -216,16 +216,22 @@ static struct gtt_type_table_entry gtt_type_table[] = { GTT_TYPE_PPGTT_PDE_PT, GTT_TYPE_PPGTT_PTE_PT, GTT_TYPE_PPGTT_PTE_2M_ENTRY), + /* We take IPS bit as 'PSE' for PTE level. */ GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT, GTT_TYPE_PPGTT_PTE_4K_ENTRY, GTT_TYPE_PPGTT_PTE_PT, GTT_TYPE_INVALID, - GTT_TYPE_INVALID), + GTT_TYPE_PPGTT_PTE_64K_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY, GTT_TYPE_PPGTT_PTE_4K_ENTRY, GTT_TYPE_PPGTT_PTE_PT, GTT_TYPE_INVALID, - GTT_TYPE_INVALID), + GTT_TYPE_PPGTT_PTE_64K_ENTRY), + GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_64K_ENTRY, + GTT_TYPE_PPGTT_PTE_4K_ENTRY, + GTT_TYPE_PPGTT_PTE_PT, + GTT_TYPE_INVALID, + GTT_TYPE_PPGTT_PTE_64K_ENTRY), GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY, GTT_TYPE_PPGTT_PDE_ENTRY, GTT_TYPE_PPGTT_PDE_PT, @@ -339,8 +345,14 @@ static inline int gtt_set_entry64(void *pt, #define ADDR_1G_MASK GENMASK_ULL(GTT_HAW - 1, 30) #define ADDR_2M_MASK GENMASK_ULL(GTT_HAW - 1, 21) +#define ADDR_64K_MASK GENMASK_ULL(GTT_HAW - 1, 16) #define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12) +#define GTT_SPTE_FLAG_MASK GENMASK_ULL(62, 52) +#define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */ + +#define GTT_64K_PTE_STRIDE 16 + static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e) { unsigned long pfn; @@ -349,6 +361,8 @@ static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e) pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT; else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT; + else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) + pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT; else pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT; return pfn; @@ -362,6 +376,9 @@ static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn) } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) { e->val64 &= ~ADDR_2M_MASK; pfn &= (ADDR_2M_MASK >> PAGE_SHIFT); + } else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) { + e->val64 &= ~ADDR_64K_MASK; + pfn &= (ADDR_64K_MASK >> PAGE_SHIFT); } else { e->val64 &= ~ADDR_4K_MASK; pfn &= (ADDR_4K_MASK >> PAGE_SHIFT); @@ -372,16 +389,41 @@ static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn) static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e) { - /* Entry doesn't have PSE bit. */ - if (get_pse_type(e->type) == GTT_TYPE_INVALID) - return false; + return !!(e->val64 & _PAGE_PSE); +} - e->type = get_entry_type(e->type); - if (!(e->val64 & _PAGE_PSE)) +static void gen8_gtt_clear_pse(struct intel_gvt_gtt_entry *e) +{ + if (gen8_gtt_test_pse(e)) { + switch (e->type) { + case GTT_TYPE_PPGTT_PTE_2M_ENTRY: + e->val64 &= ~_PAGE_PSE; + e->type = GTT_TYPE_PPGTT_PDE_ENTRY; + break; + case GTT_TYPE_PPGTT_PTE_1G_ENTRY: + e->type = GTT_TYPE_PPGTT_PDP_ENTRY; + e->val64 &= ~_PAGE_PSE; + break; + default: + WARN_ON(1); + } + } +} + +static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e) +{ + if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY)) return false; - e->type = get_pse_type(e->type); - return true; + return !!(e->val64 & GEN8_PDE_IPS_64K); +} + +static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry *e) +{ + if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY)) + return; + + e->val64 &= ~GEN8_PDE_IPS_64K; } static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e) @@ -408,6 +450,21 @@ static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e) e->val64 |= _PAGE_PRESENT; } +static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e) +{ + return !!(e->val64 & GTT_SPTE_FLAG_64K_SPLITED); +} + +static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry *e) +{ + e->val64 |= GTT_SPTE_FLAG_64K_SPLITED; +} + +static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry *e) +{ + e->val64 &= ~GTT_SPTE_FLAG_64K_SPLITED; +} + /* * Per-platform GMA routines. */ @@ -440,6 +497,12 @@ static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = { .set_present = gtt_entry_set_present, .test_present = gen8_gtt_test_present, .test_pse = gen8_gtt_test_pse, + .clear_pse = gen8_gtt_clear_pse, + .clear_ips = gen8_gtt_clear_ips, + .test_ips = gen8_gtt_test_ips, + .clear_64k_splited = gen8_gtt_clear_64k_splited, + .set_64k_splited = gen8_gtt_set_64k_splited, + .test_64k_splited = gen8_gtt_test_64k_splited, .get_pfn = gen8_gtt_get_pfn, .set_pfn = gen8_gtt_set_pfn, }; @@ -453,6 +516,27 @@ static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = { .gma_to_pml4_index = gen8_gma_to_pml4_index, }; +/* Update entry type per pse and ips bit. */ +static void update_entry_type_for_real(struct intel_gvt_gtt_pte_ops *pte_ops, + struct intel_gvt_gtt_entry *entry, bool ips) +{ + switch (entry->type) { + case GTT_TYPE_PPGTT_PDE_ENTRY: + case GTT_TYPE_PPGTT_PDP_ENTRY: + if (pte_ops->test_pse(entry)) + entry->type = get_pse_type(entry->type); + break; + case GTT_TYPE_PPGTT_PTE_4K_ENTRY: + if (ips) + entry->type = get_pse_type(entry->type); + break; + default: + GEM_BUG_ON(!gtt_type_is_entry(entry->type)); + } + + GEM_BUG_ON(entry->type == GTT_TYPE_INVALID); +} + /* * MM helpers. */ @@ -468,8 +552,7 @@ static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm, pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps : mm->ppgtt_mm.shadow_pdps, entry, index, false, 0, mm->vgpu); - - pte_ops->test_pse(entry); + update_entry_type_for_real(pte_ops, entry, false); } static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm, @@ -574,7 +657,8 @@ static inline int ppgtt_spt_get_entry( if (ret) return ret; - ops->test_pse(e); + update_entry_type_for_real(ops, e, guest ? + spt->guest_page.pde_ips : false); gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n", type, e->type, index, e->val64); @@ -653,10 +737,12 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt) radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn); - if (spt->guest_page.oos_page) - detach_oos_page(spt->vgpu, spt->guest_page.oos_page); + if (spt->guest_page.gfn) { + if (spt->guest_page.oos_page) + detach_oos_page(spt->vgpu, spt->guest_page.oos_page); - intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn); + intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn); + } list_del_init(&spt->post_shadow_list); free_spt(spt); @@ -717,8 +803,9 @@ static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn( static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt); +/* Allocate shadow page table without guest page. */ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt( - struct intel_vgpu *vgpu, int type, unsigned long gfn) + struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type) { struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; struct intel_vgpu_ppgtt_spt *spt = NULL; @@ -753,26 +840,12 @@ retry: spt->shadow_page.vaddr = page_address(spt->shadow_page.page); spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT; - /* - * Init guest_page. - */ - spt->guest_page.type = type; - spt->guest_page.gfn = gfn; - - ret = intel_vgpu_register_page_track(vgpu, spt->guest_page.gfn, - ppgtt_write_protection_handler, spt); - if (ret) - goto err_unmap_dma; - ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt); if (ret) - goto err_unreg_page_track; + goto err_unmap_dma; - trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn); return spt; -err_unreg_page_track: - intel_vgpu_unregister_page_track(vgpu, spt->guest_page.gfn); err_unmap_dma: dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); err_free_spt: @@ -780,6 +853,37 @@ err_free_spt: return ERR_PTR(ret); } +/* Allocate shadow page table associated with specific gfn. */ +static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn( + struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type, + unsigned long gfn, bool guest_pde_ips) +{ + struct intel_vgpu_ppgtt_spt *spt; + int ret; + + spt = ppgtt_alloc_spt(vgpu, type); + if (IS_ERR(spt)) + return spt; + + /* + * Init guest_page. + */ + ret = intel_vgpu_register_page_track(vgpu, gfn, + ppgtt_write_protection_handler, spt); + if (ret) { + ppgtt_free_spt(spt); + return ERR_PTR(ret); + } + + spt->guest_page.type = type; + spt->guest_page.gfn = gfn; + spt->guest_page.pde_ips = guest_pde_ips; + + trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn); + + return spt; +} + #define pt_entry_size_shift(spt) \ ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift) @@ -787,24 +891,38 @@ err_free_spt: (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt)) #define for_each_present_guest_entry(spt, e, i) \ - for (i = 0; i < pt_entries(spt); i++) \ + for (i = 0; i < pt_entries(spt); \ + i += spt->guest_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \ if (!ppgtt_get_guest_entry(spt, e, i) && \ spt->vgpu->gvt->gtt.pte_ops->test_present(e)) #define for_each_present_shadow_entry(spt, e, i) \ - for (i = 0; i < pt_entries(spt); i++) \ + for (i = 0; i < pt_entries(spt); \ + i += spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \ if (!ppgtt_get_shadow_entry(spt, e, i) && \ spt->vgpu->gvt->gtt.pte_ops->test_present(e)) -static void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt) +#define for_each_shadow_entry(spt, e, i) \ + for (i = 0; i < pt_entries(spt); \ + i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \ + if (!ppgtt_get_shadow_entry(spt, e, i)) + +static inline void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt) { int v = atomic_read(&spt->refcount); trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1)); - atomic_inc(&spt->refcount); } +static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt *spt) +{ + int v = atomic_read(&spt->refcount); + + trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1)); + return atomic_dec_return(&spt->refcount); +} + static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt); static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu, @@ -843,7 +961,8 @@ static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt, pfn = ops->get_pfn(entry); type = spt->shadow_page.type; - if (pfn == vgpu->gtt.scratch_pt[type].page_mfn) + /* Uninitialized spte or unshadowed spte. */ + if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn) return; intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT); @@ -855,14 +974,11 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt) struct intel_gvt_gtt_entry e; unsigned long index; int ret; - int v = atomic_read(&spt->refcount); trace_spt_change(spt->vgpu->id, "die", spt, spt->guest_page.gfn, spt->shadow_page.type); - trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1)); - - if (atomic_dec_return(&spt->refcount) > 0) + if (ppgtt_put_spt(spt) > 0) return 0; for_each_present_shadow_entry(spt, &e, index) { @@ -871,9 +987,15 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt) gvt_vdbg_mm("invalidate 4K entry\n"); ppgtt_invalidate_pte(spt, &e); break; + case GTT_TYPE_PPGTT_PTE_64K_ENTRY: + /* We don't setup 64K shadow entry so far. */ + WARN(1, "suspicious 64K gtt entry\n"); + continue; case GTT_TYPE_PPGTT_PTE_2M_ENTRY: + gvt_vdbg_mm("invalidate 2M entry\n"); + continue; case GTT_TYPE_PPGTT_PTE_1G_ENTRY: - WARN(1, "GVT doesn't support 2M/1GB page\n"); + WARN(1, "GVT doesn't support 1GB page\n"); continue; case GTT_TYPE_PPGTT_PML4_ENTRY: case GTT_TYPE_PPGTT_PDP_ENTRY: @@ -899,6 +1021,22 @@ fail: return ret; } +static bool vgpu_ips_enabled(struct intel_vgpu *vgpu) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + + if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) { + u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) & + GAMW_ECO_ENABLE_64K_IPS_FIELD; + + return ips == GAMW_ECO_ENABLE_64K_IPS_FIELD; + } else if (INTEL_GEN(dev_priv) >= 11) { + /* 64K paging only controlled by IPS bit in PTE now. */ + return true; + } else + return false; +} + static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt); static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry( @@ -906,35 +1044,54 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry( { struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *spt = NULL; + bool ips = false; int ret; GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type))); + if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY) + ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we); + spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we)); - if (spt) + if (spt) { ppgtt_get_spt(spt); - else { + + if (ips != spt->guest_page.pde_ips) { + spt->guest_page.pde_ips = ips; + + gvt_dbg_mm("reshadow PDE since ips changed\n"); + clear_page(spt->shadow_page.vaddr); + ret = ppgtt_populate_spt(spt); + if (ret) { + ppgtt_put_spt(spt); + goto err; + } + } + } else { int type = get_next_pt_type(we->type); - spt = ppgtt_alloc_spt(vgpu, type, ops->get_pfn(we)); + spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips); if (IS_ERR(spt)) { ret = PTR_ERR(spt); - goto fail; + goto err; } ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn); if (ret) - goto fail; + goto err_free_spt; ret = ppgtt_populate_spt(spt); if (ret) - goto fail; + goto err_free_spt; trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn, spt->shadow_page.type); } return spt; -fail: + +err_free_spt: + ppgtt_free_spt(spt); +err: gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", spt, we->val64, we->type); return ERR_PTR(ret); @@ -948,16 +1105,118 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, se->type = ge->type; se->val64 = ge->val64; + /* Because we always split 64KB pages, so clear IPS in shadow PDE. */ + if (se->type == GTT_TYPE_PPGTT_PDE_ENTRY) + ops->clear_ips(se); + ops->set_pfn(se, s->shadow_page.mfn); } +/** + * Return 1 if 2MB huge gtt shadowing is possilbe, 0 if miscondition, + * negtive if found err. + */ +static int is_2MB_gtt_possible(struct intel_vgpu *vgpu, + struct intel_gvt_gtt_entry *entry) +{ + struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + unsigned long pfn; + + if (!HAS_PAGE_SIZES(vgpu->gvt->dev_priv, I915_GTT_PAGE_SIZE_2M)) + return 0; + + pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry)); + if (pfn == INTEL_GVT_INVALID_ADDR) + return -EINVAL; + + return PageTransHuge(pfn_to_page(pfn)); +} + +static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, + struct intel_vgpu_ppgtt_spt *spt, unsigned long index, + struct intel_gvt_gtt_entry *se) +{ + struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + struct intel_vgpu_ppgtt_spt *sub_spt; + struct intel_gvt_gtt_entry sub_se; + unsigned long start_gfn; + dma_addr_t dma_addr; + unsigned long sub_index; + int ret; + + gvt_dbg_mm("Split 2M gtt entry, index %lu\n", index); + + start_gfn = ops->get_pfn(se); + + sub_spt = ppgtt_alloc_spt(vgpu, GTT_TYPE_PPGTT_PTE_PT); + if (IS_ERR(sub_spt)) + return PTR_ERR(sub_spt); + + for_each_shadow_entry(sub_spt, &sub_se, sub_index) { + ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, + start_gfn + sub_index, PAGE_SIZE, &dma_addr); + if (ret) { + ppgtt_invalidate_spt(spt); + return ret; + } + sub_se.val64 = se->val64; + + /* Copy the PAT field from PDE. */ + sub_se.val64 &= ~_PAGE_PAT; + sub_se.val64 |= (se->val64 & _PAGE_PAT_LARGE) >> 5; + + ops->set_pfn(&sub_se, dma_addr >> PAGE_SHIFT); + ppgtt_set_shadow_entry(sub_spt, &sub_se, sub_index); + } + + /* Clear dirty field. */ + se->val64 &= ~_PAGE_DIRTY; + + ops->clear_pse(se); + ops->clear_ips(se); + ops->set_pfn(se, sub_spt->shadow_page.mfn); + ppgtt_set_shadow_entry(spt, se, index); + return 0; +} + +static int split_64KB_gtt_entry(struct intel_vgpu *vgpu, + struct intel_vgpu_ppgtt_spt *spt, unsigned long index, + struct intel_gvt_gtt_entry *se) +{ + struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + struct intel_gvt_gtt_entry entry = *se; + unsigned long start_gfn; + dma_addr_t dma_addr; + int i, ret; + + gvt_vdbg_mm("Split 64K gtt entry, index %lu\n", index); + + GEM_BUG_ON(index % GTT_64K_PTE_STRIDE); + + start_gfn = ops->get_pfn(se); + + entry.type = GTT_TYPE_PPGTT_PTE_4K_ENTRY; + ops->set_64k_splited(&entry); + + for (i = 0; i < GTT_64K_PTE_STRIDE; i++) { + ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, + start_gfn + i, PAGE_SIZE, &dma_addr); + if (ret) + return ret; + + ops->set_pfn(&entry, dma_addr >> PAGE_SHIFT); + ppgtt_set_shadow_entry(spt, &entry, index + i); + } + return 0; +} + static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, struct intel_vgpu_ppgtt_spt *spt, unsigned long index, struct intel_gvt_gtt_entry *ge) { struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry se = *ge; - unsigned long gfn; + unsigned long gfn, page_size = PAGE_SIZE; dma_addr_t dma_addr; int ret; @@ -970,16 +1229,33 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, case GTT_TYPE_PPGTT_PTE_4K_ENTRY: gvt_vdbg_mm("shadow 4K gtt entry\n"); break; + case GTT_TYPE_PPGTT_PTE_64K_ENTRY: + gvt_vdbg_mm("shadow 64K gtt entry\n"); + /* + * The layout of 64K page is special, the page size is + * controlled by uper PDE. To be simple, we always split + * 64K page to smaller 4K pages in shadow PT. + */ + return split_64KB_gtt_entry(vgpu, spt, index, &se); case GTT_TYPE_PPGTT_PTE_2M_ENTRY: + gvt_vdbg_mm("shadow 2M gtt entry\n"); + ret = is_2MB_gtt_possible(vgpu, ge); + if (ret == 0) + return split_2MB_gtt_entry(vgpu, spt, index, &se); + else if (ret < 0) + return ret; + page_size = I915_GTT_PAGE_SIZE_2M; + break; case GTT_TYPE_PPGTT_PTE_1G_ENTRY: - gvt_vgpu_err("GVT doesn't support 2M/1GB entry\n"); + gvt_vgpu_err("GVT doesn't support 1GB entry\n"); return -EINVAL; default: GEM_BUG_ON(1); }; /* direct shadow */ - ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, &dma_addr); + ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size, + &dma_addr); if (ret) return -ENXIO; @@ -1062,8 +1338,12 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt, ret = ppgtt_invalidate_spt(s); if (ret) goto fail; - } else + } else { + /* We don't setup 64K shadow entry so far. */ + WARN(se->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY, + "suspicious 64K entry\n"); ppgtt_invalidate_pte(spt, se); + } return 0; fail: @@ -1286,7 +1566,7 @@ static int ppgtt_handle_guest_write_page_table( struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry old_se; int new_present; - int ret; + int i, ret; new_present = ops->test_present(we); @@ -1308,8 +1588,27 @@ static int ppgtt_handle_guest_write_page_table( goto fail; if (!new_present) { - ops->set_pfn(&old_se, vgpu->gtt.scratch_pt[type].page_mfn); - ppgtt_set_shadow_entry(spt, &old_se, index); + /* For 64KB splited entries, we need clear them all. */ + if (ops->test_64k_splited(&old_se) && + !(index % GTT_64K_PTE_STRIDE)) { + gvt_vdbg_mm("remove splited 64K shadow entries\n"); + for (i = 0; i < GTT_64K_PTE_STRIDE; i++) { + ops->clear_64k_splited(&old_se); + ops->set_pfn(&old_se, + vgpu->gtt.scratch_pt[type].page_mfn); + ppgtt_set_shadow_entry(spt, &old_se, index + i); + } + } else if (old_se.type == GTT_TYPE_PPGTT_PTE_2M_ENTRY || + old_se.type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) { + ops->clear_pse(&old_se); + ops->set_pfn(&old_se, + vgpu->gtt.scratch_pt[type].page_mfn); + ppgtt_set_shadow_entry(spt, &old_se, index); + } else { + ops->set_pfn(&old_se, + vgpu->gtt.scratch_pt[type].page_mfn); + ppgtt_set_shadow_entry(spt, &old_se, index); + } } return 0; @@ -1391,7 +1690,17 @@ static int ppgtt_handle_guest_write_page_table_bytes( ppgtt_get_guest_entry(spt, &we, index); - ops->test_pse(&we); + /* + * For page table which has 64K gtt entry, only PTE#0, PTE#16, + * PTE#32, ... PTE#496 are used. Unused PTEs update should be + * ignored. + */ + if (we.type == GTT_TYPE_PPGTT_PTE_64K_ENTRY && + (index % GTT_64K_PTE_STRIDE)) { + gvt_vdbg_mm("Ignore write to unused PTE entry, index %lu\n", + index); + return 0; + } if (bytes == info->gtt_entry_size) { ret = ppgtt_handle_guest_write_page_table(spt, &we, index); @@ -1939,7 +2248,7 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, } ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, - &dma_addr); + PAGE_SIZE, &dma_addr); if (ret) { gvt_vgpu_err("fail to populate guest ggtt entry\n"); /* guest driver may read/write the entry when partial @@ -2031,7 +2340,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. */ - if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) { + if (type > GTT_TYPE_PPGTT_PTE_PT) { struct intel_gvt_gtt_entry se; memset(&se, 0, sizeof(struct intel_gvt_gtt_entry)); @@ -2315,13 +2624,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) gvt_dbg_core("init gtt\n"); - if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) - || IS_KABYLAKE(gvt->dev_priv)) { - gvt->gtt.pte_ops = &gen8_gtt_pte_ops; - gvt->gtt.gma_ops = &gen8_gtt_gma_ops; - } else { - return -ENODEV; - } + gvt->gtt.pte_ops = &gen8_gtt_pte_ops; + gvt->gtt.gma_ops = &gen8_gtt_gma_ops; page = (void *)get_zeroed_page(GFP_KERNEL); if (!page) { diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 97e62647418a..7a9b36176efb 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -63,6 +63,12 @@ struct intel_gvt_gtt_pte_ops { void (*clear_present)(struct intel_gvt_gtt_entry *e); void (*set_present)(struct intel_gvt_gtt_entry *e); bool (*test_pse)(struct intel_gvt_gtt_entry *e); + void (*clear_pse)(struct intel_gvt_gtt_entry *e); + bool (*test_ips)(struct intel_gvt_gtt_entry *e); + void (*clear_ips)(struct intel_gvt_gtt_entry *e); + bool (*test_64k_splited)(struct intel_gvt_gtt_entry *e); + void (*clear_64k_splited)(struct intel_gvt_gtt_entry *e); + void (*set_64k_splited)(struct intel_gvt_gtt_entry *e); void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn); unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e); }; @@ -95,6 +101,7 @@ typedef enum { GTT_TYPE_GGTT_PTE, GTT_TYPE_PPGTT_PTE_4K_ENTRY, + GTT_TYPE_PPGTT_PTE_64K_ENTRY, GTT_TYPE_PPGTT_PTE_2M_ENTRY, GTT_TYPE_PPGTT_PTE_1G_ENTRY, @@ -222,6 +229,7 @@ struct intel_vgpu_ppgtt_spt { struct { intel_gvt_gtt_type_t type; + bool pde_ips; /* for 64KB PTEs */ void *vaddr; struct page *page; unsigned long mfn; @@ -229,6 +237,7 @@ struct intel_vgpu_ppgtt_spt { struct { intel_gvt_gtt_type_t type; + bool pde_ips; /* for 64KB PTEs */ unsigned long gfn; unsigned long write_cnt; struct intel_vgpu_oos_page *oos_page; diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 61bd14fcb649..712f9d14e720 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -238,18 +238,15 @@ static void init_device_info(struct intel_gvt *gvt) struct intel_gvt_device_info *info = &gvt->device_info; struct pci_dev *pdev = gvt->dev_priv->drm.pdev; - if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) - || IS_KABYLAKE(gvt->dev_priv)) { - info->max_support_vgpus = 8; - info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE; - info->mmio_size = 2 * 1024 * 1024; - info->mmio_bar = 0; - info->gtt_start_offset = 8 * 1024 * 1024; - info->gtt_entry_size = 8; - info->gtt_entry_size_shift = 3; - info->gmadr_bytes_in_cmd = 8; - info->max_surface_size = 36 * 1024 * 1024; - } + info->max_support_vgpus = 8; + info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE; + info->mmio_size = 2 * 1024 * 1024; + info->mmio_bar = 0; + info->gtt_start_offset = 8 * 1024 * 1024; + info->gtt_entry_size = 8; + info->gtt_entry_size_shift = 3; + info->gmadr_bytes_in_cmd = 8; + info->max_surface_size = 36 * 1024 * 1024; info->msi_cap_offset = pdev->msi_cap; } @@ -271,11 +268,8 @@ static int gvt_service_thread(void *data) continue; if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK, - (void *)&gvt->service_request)) { - mutex_lock(&gvt->lock); + (void *)&gvt->service_request)) intel_gvt_emulate_vblank(gvt); - mutex_unlock(&gvt->lock); - } if (test_bit(INTEL_GVT_REQUEST_SCHED, (void *)&gvt->service_request) || @@ -379,6 +373,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) idr_init(&gvt->vgpu_idr); spin_lock_init(&gvt->scheduler.mmio_context_lock); mutex_init(&gvt->lock); + mutex_init(&gvt->sched_lock); gvt->dev_priv = dev_priv; init_device_info(gvt); @@ -473,3 +468,7 @@ out_clean_idr: kfree(gvt); return ret; } + +#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) +MODULE_SOFTDEP("pre: kvmgt"); +#endif diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 858967daf04b..9a9671522774 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -170,12 +170,18 @@ struct intel_vgpu_submission { struct intel_vgpu { struct intel_gvt *gvt; + struct mutex vgpu_lock; int id; unsigned long handle; /* vGPU handle used by hypervisor MPT modules */ bool active; bool pv_notified; bool failsafe; unsigned int resetting_eng; + + /* Both sched_data and sched_ctl can be seen a part of the global gvt + * scheduler structure. So below 2 vgpu data are protected + * by sched_lock, not vgpu_lock. + */ void *sched_data; struct vgpu_sched_ctl sched_ctl; @@ -296,7 +302,13 @@ struct intel_vgpu_type { }; struct intel_gvt { + /* GVT scope lock, protect GVT itself, and all resource currently + * not yet protected by special locks(vgpu and scheduler lock). + */ struct mutex lock; + /* scheduler scope lock, protect gvt and vgpu schedule related data */ + struct mutex sched_lock; + struct drm_i915_private *dev_priv; struct idr vgpu_idr; /* vGPU IDR pool */ @@ -316,6 +328,10 @@ struct intel_gvt { struct task_struct *service_thread; wait_queue_head_t service_thread_wq; + + /* service_request is always used in bit operation, we should always + * use it with atomic bit ops so that no need to use gvt big lock. + */ unsigned long service_request; struct { @@ -363,9 +379,9 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt); #define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end) #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start) -#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total) +#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.vm.total) #define gvt_ggtt_sz(gvt) \ - ((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3) + ((gvt->dev_priv->ggtt.vm.total >> PAGE_SHIFT) << 3) #define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt)) #define gvt_aperture_gmadr_base(gvt) (0) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 8f1caacdc78a..7a58ca555197 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -55,6 +55,8 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt) return D_SKL; else if (IS_KABYLAKE(gvt->dev_priv)) return D_KBL; + else if (IS_BROXTON(gvt->dev_priv)) + return D_BXT; return 0; } @@ -208,6 +210,31 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu, return 0; } +static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, unsigned int bytes) +{ + u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD; + + if (INTEL_GEN(vgpu->gvt->dev_priv) <= 10) { + if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD) + gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id); + else if (!ips) + gvt_dbg_core("vgpu%d: ips disabled\n", vgpu->id); + else { + /* All engines must be enabled together for vGPU, + * since we don't know which engine the ppgtt will + * bind to when shadowing. + */ + gvt_vgpu_err("Unsupported IPS setting %x, cannot enable 64K gtt.\n", + ips); + return -EINVAL; + } + } + + write_vreg(vgpu, offset, p_data, bytes); + return 0; +} + static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off, void *p_data, unsigned int bytes) { @@ -255,7 +282,8 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, new = CALC_MODE_MASK_REG(old, *(u32 *)p_data); if (IS_SKYLAKE(vgpu->gvt->dev_priv) - || IS_KABYLAKE(vgpu->gvt->dev_priv)) { + || IS_KABYLAKE(vgpu->gvt->dev_priv) + || IS_BROXTON(vgpu->gvt->dev_priv)) { switch (offset) { case FORCEWAKE_RENDER_GEN9_REG: ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG; @@ -316,6 +344,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, } } + /* vgpu_lock already hold by emulate mmio r/w */ intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask); /* sw will wait for the device to ack the reset request */ @@ -420,7 +449,10 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE; else vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE; + /* vgpu_lock already hold by emulate mmio r/w */ + mutex_unlock(&vgpu->vgpu_lock); intel_gvt_check_vblank_emulation(vgpu->gvt); + mutex_lock(&vgpu->vgpu_lock); return 0; } @@ -857,7 +889,8 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, data = vgpu_vreg(vgpu, offset); if ((IS_SKYLAKE(vgpu->gvt->dev_priv) - || IS_KABYLAKE(vgpu->gvt->dev_priv)) + || IS_KABYLAKE(vgpu->gvt->dev_priv) + || IS_BROXTON(vgpu->gvt->dev_priv)) && offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) { /* SKL DPB/C/D aux ctl register changed */ return 0; @@ -1209,8 +1242,8 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ret = handle_g2v_notification(vgpu, data); break; /* add xhot and yhot to handled list to avoid error log */ - case 0x78830: - case 0x78834: + case _vgtif_reg(cursor_x_hot): + case _vgtif_reg(cursor_y_hot): case _vgtif_reg(pdp[0].lo): case _vgtif_reg(pdp[0].hi): case _vgtif_reg(pdp[1].lo): @@ -1369,6 +1402,16 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, *data0 = 0x1e1a1100; else *data0 = 0x61514b3d; + } else if (IS_BROXTON(vgpu->gvt->dev_priv)) { + /** + * "Read memory latency" command on gen9. + * Below memory latency values are read + * from Broxton MRB. + */ + if (!*data0) + *data0 = 0x16080707; + else + *data0 = 0x16161616; } break; case SKL_PCODE_CDCLK_CONTROL: @@ -1426,8 +1469,11 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu, { u32 v = *(u32 *)p_data; - v &= (1 << 31) | (1 << 29) | (1 << 9) | - (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1); + if (IS_BROXTON(vgpu->gvt->dev_priv)) + v &= (1 << 31) | (1 << 29); + else + v &= (1 << 31) | (1 << 29) | (1 << 9) | + (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1); v |= (v >> 1); return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes); @@ -1447,6 +1493,109 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset, return 0; } +static int bxt_de_pll_enable_write(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, unsigned int bytes) +{ + u32 v = *(u32 *)p_data; + + if (v & BXT_DE_PLL_PLL_ENABLE) + v |= BXT_DE_PLL_LOCK; + + vgpu_vreg(vgpu, offset) = v; + + return 0; +} + +static int bxt_port_pll_enable_write(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, unsigned int bytes) +{ + u32 v = *(u32 *)p_data; + + if (v & PORT_PLL_ENABLE) + v |= PORT_PLL_LOCK; + + vgpu_vreg(vgpu, offset) = v; + + return 0; +} + +static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, unsigned int bytes) +{ + u32 v = *(u32 *)p_data; + u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0; + + vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data; + vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data; + vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data; + + vgpu_vreg(vgpu, offset) = v; + + return 0; +} + +static int bxt_port_tx_dw3_read(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, unsigned int bytes) +{ + u32 v = vgpu_vreg(vgpu, offset); + + v &= ~UNIQUE_TRANGE_EN_METHOD; + + vgpu_vreg(vgpu, offset) = v; + + return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); +} + +static int bxt_pcs_dw12_grp_write(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, unsigned int bytes) +{ + u32 v = *(u32 *)p_data; + + if (offset == _PORT_PCS_DW12_GRP_A || offset == _PORT_PCS_DW12_GRP_B) { + vgpu_vreg(vgpu, offset - 0x600) = v; + vgpu_vreg(vgpu, offset - 0x800) = v; + } else { + vgpu_vreg(vgpu, offset - 0x400) = v; + vgpu_vreg(vgpu, offset - 0x600) = v; + } + + vgpu_vreg(vgpu, offset) = v; + + return 0; +} + +static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, unsigned int bytes) +{ + u32 v = *(u32 *)p_data; + + if (v & BIT(0)) { + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &= + ~PHY_RESERVED; + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |= + PHY_POWER_GOOD; + } + + if (v & BIT(1)) { + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &= + ~PHY_RESERVED; + vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |= + PHY_POWER_GOOD; + } + + + vgpu_vreg(vgpu, offset) = v; + + return 0; +} + +static int bxt_edp_psr_imr_iir_write(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, unsigned int bytes) +{ + vgpu_vreg(vgpu, offset) = 0; + return 0; +} + static int mmio_read_from_hw(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -1657,7 +1806,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL); - MMIO_GM_RDR(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL); + MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL, + gamw_echo_dev_rw_ia_write); + MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL); MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL); MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL); @@ -2670,17 +2821,17 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(_MMIO(0x45504), D_SKL_PLUS); MMIO_D(_MMIO(0x45520), D_SKL_PLUS); MMIO_D(_MMIO(0x46000), D_SKL_PLUS); - MMIO_DH(_MMIO(0x46010), D_SKL | D_KBL, NULL, skl_lcpll_write); - MMIO_DH(_MMIO(0x46014), D_SKL | D_KBL, NULL, skl_lcpll_write); - MMIO_D(_MMIO(0x6C040), D_SKL | D_KBL); - MMIO_D(_MMIO(0x6C048), D_SKL | D_KBL); - MMIO_D(_MMIO(0x6C050), D_SKL | D_KBL); - MMIO_D(_MMIO(0x6C044), D_SKL | D_KBL); - MMIO_D(_MMIO(0x6C04C), D_SKL | D_KBL); - MMIO_D(_MMIO(0x6C054), D_SKL | D_KBL); - MMIO_D(_MMIO(0x6c058), D_SKL | D_KBL); - MMIO_D(_MMIO(0x6c05c), D_SKL | D_KBL); - MMIO_DH(_MMIO(0x6c060), D_SKL | D_KBL, dpll_status_read, NULL); + MMIO_DH(_MMIO(0x46010), D_SKL_PLUS, NULL, skl_lcpll_write); + MMIO_DH(_MMIO(0x46014), D_SKL_PLUS, NULL, skl_lcpll_write); + MMIO_D(_MMIO(0x6C040), D_SKL_PLUS); + MMIO_D(_MMIO(0x6C048), D_SKL_PLUS); + MMIO_D(_MMIO(0x6C050), D_SKL_PLUS); + MMIO_D(_MMIO(0x6C044), D_SKL_PLUS); + MMIO_D(_MMIO(0x6C04C), D_SKL_PLUS); + MMIO_D(_MMIO(0x6C054), D_SKL_PLUS); + MMIO_D(_MMIO(0x6c058), D_SKL_PLUS); + MMIO_D(_MMIO(0x6c05c), D_SKL_PLUS); + MMIO_DH(_MMIO(0x6c060), D_SKL_PLUS, dpll_status_read, NULL); MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write); MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write); @@ -2805,53 +2956,57 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(_MMIO(0x7239c), D_SKL_PLUS); MMIO_D(_MMIO(0x7039c), D_SKL_PLUS); - MMIO_D(_MMIO(0x8f074), D_SKL | D_KBL); - MMIO_D(_MMIO(0x8f004), D_SKL | D_KBL); - MMIO_D(_MMIO(0x8f034), D_SKL | D_KBL); + MMIO_D(_MMIO(0x8f074), D_SKL_PLUS); + MMIO_D(_MMIO(0x8f004), D_SKL_PLUS); + MMIO_D(_MMIO(0x8f034), D_SKL_PLUS); - MMIO_D(_MMIO(0xb11c), D_SKL | D_KBL); + MMIO_D(_MMIO(0xb11c), D_SKL_PLUS); - MMIO_D(_MMIO(0x51000), D_SKL | D_KBL); + MMIO_D(_MMIO(0x51000), D_SKL_PLUS); MMIO_D(_MMIO(0x6c00c), D_SKL_PLUS); - MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL); - MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL); + MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS, + NULL, NULL); + MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS, + NULL, NULL); MMIO_D(RPM_CONFIG0, D_SKL_PLUS); MMIO_D(_MMIO(0xd08), D_SKL_PLUS); MMIO_D(RC6_LOCATION, D_SKL_PLUS); MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL); - MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, + NULL, NULL); /* TRTT */ - MMIO_DFH(_MMIO(0x4de0), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(_MMIO(0x4de4), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(_MMIO(0x4de8), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(_MMIO(0x4dec), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(_MMIO(0x4df0), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(_MMIO(0x4df4), D_SKL | D_KBL, F_CMD_ACCESS, NULL, gen9_trtte_write); - MMIO_DH(_MMIO(0x4dfc), D_SKL | D_KBL, NULL, gen9_trtt_chicken_write); + MMIO_DFH(_MMIO(0x4de0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(_MMIO(0x4de4), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(_MMIO(0x4de8), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(_MMIO(0x4dec), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(_MMIO(0x4df0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(_MMIO(0x4df4), D_SKL_PLUS, F_CMD_ACCESS, + NULL, gen9_trtte_write); + MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write); - MMIO_D(_MMIO(0x45008), D_SKL | D_KBL); + MMIO_D(_MMIO(0x45008), D_SKL_PLUS); - MMIO_D(_MMIO(0x46430), D_SKL | D_KBL); + MMIO_D(_MMIO(0x46430), D_SKL_PLUS); - MMIO_D(_MMIO(0x46520), D_SKL | D_KBL); + MMIO_D(_MMIO(0x46520), D_SKL_PLUS); - MMIO_D(_MMIO(0xc403c), D_SKL | D_KBL); + MMIO_D(_MMIO(0xc403c), D_SKL_PLUS); MMIO_D(_MMIO(0xb004), D_SKL_PLUS); MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write); MMIO_D(_MMIO(0x65900), D_SKL_PLUS); - MMIO_D(_MMIO(0x1082c0), D_SKL | D_KBL); - MMIO_D(_MMIO(0x4068), D_SKL | D_KBL); - MMIO_D(_MMIO(0x67054), D_SKL | D_KBL); - MMIO_D(_MMIO(0x6e560), D_SKL | D_KBL); - MMIO_D(_MMIO(0x6e554), D_SKL | D_KBL); - MMIO_D(_MMIO(0x2b20), D_SKL | D_KBL); - MMIO_D(_MMIO(0x65f00), D_SKL | D_KBL); - MMIO_D(_MMIO(0x65f08), D_SKL | D_KBL); - MMIO_D(_MMIO(0x320f0), D_SKL | D_KBL); + MMIO_D(_MMIO(0x1082c0), D_SKL_PLUS); + MMIO_D(_MMIO(0x4068), D_SKL_PLUS); + MMIO_D(_MMIO(0x67054), D_SKL_PLUS); + MMIO_D(_MMIO(0x6e560), D_SKL_PLUS); + MMIO_D(_MMIO(0x6e554), D_SKL_PLUS); + MMIO_D(_MMIO(0x2b20), D_SKL_PLUS); + MMIO_D(_MMIO(0x65f00), D_SKL_PLUS); + MMIO_D(_MMIO(0x65f08), D_SKL_PLUS); + MMIO_D(_MMIO(0x320f0), D_SKL_PLUS); MMIO_D(_MMIO(0x70034), D_SKL_PLUS); MMIO_D(_MMIO(0x71034), D_SKL_PLUS); @@ -2869,11 +3024,188 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(_MMIO(0x44500), D_SKL_PLUS); MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL | D_KBL, F_MODE_MASK | F_CMD_ACCESS, + MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); MMIO_D(_MMIO(0x4ab8), D_KBL); - MMIO_D(_MMIO(0x2248), D_SKL_PLUS | D_KBL); + MMIO_D(_MMIO(0x2248), D_KBL | D_SKL); + + return 0; +} + +static int init_bxt_mmio_info(struct intel_gvt *gvt) +{ + struct drm_i915_private *dev_priv = gvt->dev_priv; + int ret; + + MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL); + + MMIO_D(GEN7_SAMPLER_INSTDONE, D_BXT); + MMIO_D(GEN7_ROW_INSTDONE, D_BXT); + MMIO_D(GEN8_FAULT_TLB_DATA0, D_BXT); + MMIO_D(GEN8_FAULT_TLB_DATA1, D_BXT); + MMIO_D(ERROR_GEN6, D_BXT); + MMIO_D(DONE_REG, D_BXT); + MMIO_D(EIR, D_BXT); + MMIO_D(PGTBL_ER, D_BXT); + MMIO_D(_MMIO(0x4194), D_BXT); + MMIO_D(_MMIO(0x4294), D_BXT); + MMIO_D(_MMIO(0x4494), D_BXT); + + MMIO_RING_D(RING_PSMI_CTL, D_BXT); + MMIO_RING_D(RING_DMA_FADD, D_BXT); + MMIO_RING_D(RING_DMA_FADD_UDW, D_BXT); + MMIO_RING_D(RING_IPEHR, D_BXT); + MMIO_RING_D(RING_INSTPS, D_BXT); + MMIO_RING_D(RING_BBADDR_UDW, D_BXT); + MMIO_RING_D(RING_BBSTATE, D_BXT); + MMIO_RING_D(RING_IPEIR, D_BXT); + + MMIO_F(SOFT_SCRATCH(0), 16 * 4, 0, 0, 0, D_BXT, NULL, NULL); + + MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write); + MMIO_D(BXT_RP_STATE_CAP, D_BXT); + MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT, + NULL, bxt_phy_ctl_family_write); + MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT, + NULL, bxt_phy_ctl_family_write); + MMIO_D(BXT_PHY_CTL(PORT_A), D_BXT); + MMIO_D(BXT_PHY_CTL(PORT_B), D_BXT); + MMIO_D(BXT_PHY_CTL(PORT_C), D_BXT); + MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT, + NULL, bxt_port_pll_enable_write); + MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT, + NULL, bxt_port_pll_enable_write); + MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL, + bxt_port_pll_enable_write); + + MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY0), D_BXT); + MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY0), D_BXT); + MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY0), D_BXT); + MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY0), D_BXT); + MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY0), D_BXT); + MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY0), D_BXT); + MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY0), D_BXT); + MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY0), D_BXT); + MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY0), D_BXT); + + MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY1), D_BXT); + MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY1), D_BXT); + MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY1), D_BXT); + MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY1), D_BXT); + MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY1), D_BXT); + MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY1), D_BXT); + MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY1), D_BXT); + MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY1), D_BXT); + MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY1), D_BXT); + + MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0), D_BXT); + MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT, + NULL, bxt_pcs_dw12_grp_write); + MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0), D_BXT); + MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT, + bxt_port_tx_dw3_read, NULL); + MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0), D_BXT); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1), D_BXT); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 2), D_BXT); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 3), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 0), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 1), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 2), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 3), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 6), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 8), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 9), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 10), D_BXT); + + MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH1), D_BXT); + MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH1), D_BXT); + MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH1), D_BXT); + MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH1), D_BXT); + MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1), D_BXT); + MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1), D_BXT); + MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT, + NULL, bxt_pcs_dw12_grp_write); + MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1), D_BXT); + MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1), D_BXT); + MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT, + bxt_port_tx_dw3_read, NULL); + MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1), D_BXT); + MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1), D_BXT); + MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1), D_BXT); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0), D_BXT); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1), D_BXT); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 2), D_BXT); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 3), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 0), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 1), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 2), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 3), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 6), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 8), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 9), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 10), D_BXT); + + MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY1, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY1, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY1, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY1, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0), D_BXT); + MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT, + NULL, bxt_pcs_dw12_grp_write); + MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0), D_BXT); + MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT, + bxt_port_tx_dw3_read, NULL); + MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0), D_BXT); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0), D_BXT); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1), D_BXT); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 2), D_BXT); + MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 3), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 0), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 1), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 2), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 3), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 6), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 8), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 9), D_BXT); + MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 10), D_BXT); + + MMIO_D(BXT_DE_PLL_CTL, D_BXT); + MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write); + MMIO_D(BXT_DSI_PLL_CTL, D_BXT); + MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT); + + MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT); + + MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT); + MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT); + MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT); + + MMIO_DH(EDP_PSR_IMR, D_BXT, NULL, bxt_edp_psr_imr_iir_write); + MMIO_DH(EDP_PSR_IIR, D_BXT, NULL, bxt_edp_psr_imr_iir_write); + + MMIO_D(RC6_CTX_BASE, D_BXT); + + MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT); + MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT); + MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT); + MMIO_D(GEN6_GFXPAUSE, D_BXT); + MMIO_D(GEN8_L3SQCREG1, D_BXT); + + MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL); return 0; } @@ -2965,6 +3297,16 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) ret = init_skl_mmio_info(gvt); if (ret) goto err; + } else if (IS_BROXTON(dev_priv)) { + ret = init_broadwell_mmio_info(gvt); + if (ret) + goto err; + ret = init_skl_mmio_info(gvt); + if (ret) + goto err; + ret = init_bxt_mmio_info(gvt); + if (ret) + goto err; } gvt->mmio.mmio_block = mmio_blocks; diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index f6dd9f717888..5af11cf1b482 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -53,7 +53,7 @@ struct intel_gvt_mpt { unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn); int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn, - dma_addr_t *dma_addr); + unsigned long size, dma_addr_t *dma_addr); void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr); int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn, diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index 7a041b368f68..5daa23ae566b 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -350,7 +350,8 @@ static void update_upstream_irq(struct intel_vgpu *vgpu, clear_bits |= (1 << bit); } - WARN_ON(!up_irq_info); + if (WARN_ON(!up_irq_info)) + return; if (up_irq_info->group == INTEL_GVT_IRQ_INFO_MASTER) { u32 isr = i915_mmio_reg_offset(up_irq_info->reg_base); @@ -580,7 +581,9 @@ static void gen8_init_irq( SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); - } else if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) { + } else if (IS_SKYLAKE(gvt->dev_priv) + || IS_KABYLAKE(gvt->dev_priv) + || IS_BROXTON(gvt->dev_priv)) { SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT); @@ -690,14 +693,8 @@ int intel_gvt_init_irq(struct intel_gvt *gvt) gvt_dbg_core("init irq framework\n"); - if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) - || IS_KABYLAKE(gvt->dev_priv)) { - irq->ops = &gen8_irq_ops; - irq->irq_map = gen8_irq_map; - } else { - WARN_ON(1); - return -ENODEV; - } + irq->ops = &gen8_irq_ops; + irq->irq_map = gen8_irq_map; /* common event initialization */ init_events(irq); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index df4e4a07db3d..4d2f53ae9f0f 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -94,6 +94,7 @@ struct gvt_dma { struct rb_node dma_addr_node; gfn_t gfn; dma_addr_t dma_addr; + unsigned long size; struct kref ref; }; @@ -106,51 +107,103 @@ static int kvmgt_guest_init(struct mdev_device *mdev); static void intel_vgpu_release_work(struct work_struct *work); static bool kvmgt_guest_exit(struct kvmgt_guest_info *info); -static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, - dma_addr_t *dma_addr) +static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, + unsigned long size) { - struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; - struct page *page; - unsigned long pfn; + int total_pages; + int npage; int ret; - /* Pin the page first. */ - ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1, - IOMMU_READ | IOMMU_WRITE, &pfn); - if (ret != 1) { - gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", - gfn, ret); - return -EINVAL; + total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE; + + for (npage = 0; npage < total_pages; npage++) { + unsigned long cur_gfn = gfn + npage; + + ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1); + WARN_ON(ret != 1); } +} - if (!pfn_valid(pfn)) { - gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn); - vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1); - return -EINVAL; +/* Pin a normal or compound guest page for dma. */ +static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, + unsigned long size, struct page **page) +{ + unsigned long base_pfn = 0; + int total_pages; + int npage; + int ret; + + total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE; + /* + * We pin the pages one-by-one to avoid allocating a big arrary + * on stack to hold pfns. + */ + for (npage = 0; npage < total_pages; npage++) { + unsigned long cur_gfn = gfn + npage; + unsigned long pfn; + + ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1, + IOMMU_READ | IOMMU_WRITE, &pfn); + if (ret != 1) { + gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n", + cur_gfn, ret); + goto err; + } + + if (!pfn_valid(pfn)) { + gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn); + npage++; + ret = -EFAULT; + goto err; + } + + if (npage == 0) + base_pfn = pfn; + else if (base_pfn + npage != pfn) { + gvt_vgpu_err("The pages are not continuous\n"); + ret = -EINVAL; + npage++; + goto err; + } } + *page = pfn_to_page(base_pfn); + return 0; +err: + gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE); + return ret; +} + +static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, + dma_addr_t *dma_addr, unsigned long size) +{ + struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; + struct page *page = NULL; + int ret; + + ret = gvt_pin_guest_page(vgpu, gfn, size, &page); + if (ret) + return ret; + /* Setup DMA mapping. */ - page = pfn_to_page(pfn); - *dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL); - if (dma_mapping_error(dev, *dma_addr)) { - gvt_vgpu_err("DMA mapping failed for gfn 0x%lx\n", gfn); - vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1); - return -ENOMEM; + *dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL); + ret = dma_mapping_error(dev, *dma_addr); + if (ret) { + gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n", + page_to_pfn(page), ret); + gvt_unpin_guest_page(vgpu, gfn, size); } - return 0; + return ret; } static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn, - dma_addr_t dma_addr) + dma_addr_t dma_addr, unsigned long size) { struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; - int ret; - dma_unmap_page(dev, dma_addr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1); - WARN_ON(ret != 1); + dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); + gvt_unpin_guest_page(vgpu, gfn, size); } static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu, @@ -191,7 +244,7 @@ static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn) } static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, - dma_addr_t dma_addr) + dma_addr_t dma_addr, unsigned long size) { struct gvt_dma *new, *itr; struct rb_node **link, *parent = NULL; @@ -203,6 +256,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, new->vgpu = vgpu; new->gfn = gfn; new->dma_addr = dma_addr; + new->size = size; kref_init(&new->ref); /* gfn_cache maps gfn to struct gvt_dma. */ @@ -260,7 +314,7 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu) break; } dma = rb_entry(node, struct gvt_dma, gfn_node); - gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr); + gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size); __gvt_cache_remove_entry(vgpu, dma); mutex_unlock(&vgpu->vdev.cache_lock); } @@ -515,7 +569,8 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb, if (!entry) continue; - gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr); + gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr, + entry->size); __gvt_cache_remove_entry(vgpu, entry); } mutex_unlock(&vgpu->vdev.cache_lock); @@ -1648,7 +1703,7 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) } int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, - dma_addr_t *dma_addr) + unsigned long size, dma_addr_t *dma_addr) { struct kvmgt_guest_info *info; struct intel_vgpu *vgpu; @@ -1665,11 +1720,11 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, entry = __gvt_cache_find_gfn(info->vgpu, gfn); if (!entry) { - ret = gvt_dma_map_page(vgpu, gfn, dma_addr); + ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size); if (ret) goto err_unlock; - ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr); + ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size); if (ret) goto err_unmap; } else { @@ -1681,7 +1736,7 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, return 0; err_unmap: - gvt_dma_unmap_page(vgpu, gfn, *dma_addr); + gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size); err_unlock: mutex_unlock(&info->vgpu->vdev.cache_lock); return ret; @@ -1691,7 +1746,8 @@ static void __gvt_dma_release(struct kref *ref) { struct gvt_dma *entry = container_of(ref, typeof(*entry), ref); - gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr); + gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr, + entry->size); __gvt_cache_remove_entry(entry->vgpu, entry); } diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index b31eb36fc102..994366035364 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -67,7 +67,7 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, return; gvt = vgpu->gvt; - mutex_lock(&gvt->lock); + mutex_lock(&vgpu->vgpu_lock); offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); if (reg_is_mmio(gvt, offset)) { if (read) @@ -85,7 +85,7 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, memcpy(pt, p_data, bytes); } - mutex_unlock(&gvt->lock); + mutex_unlock(&vgpu->vgpu_lock); } /** @@ -109,7 +109,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true); return 0; } - mutex_lock(&gvt->lock); + mutex_lock(&vgpu->vgpu_lock); offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); @@ -156,7 +156,7 @@ err: gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n", offset, bytes); out: - mutex_unlock(&gvt->lock); + mutex_unlock(&vgpu->vgpu_lock); return ret; } @@ -182,7 +182,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, return 0; } - mutex_lock(&gvt->lock); + mutex_lock(&vgpu->vgpu_lock); offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); @@ -220,7 +220,7 @@ err: gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset, bytes); out: - mutex_unlock(&gvt->lock); + mutex_unlock(&vgpu->vgpu_lock); return ret; } diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index dac8c6401e26..1ffc69eba30e 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -42,15 +42,16 @@ struct intel_vgpu; #define D_BDW (1 << 0) #define D_SKL (1 << 1) #define D_KBL (1 << 2) +#define D_BXT (1 << 3) -#define D_GEN9PLUS (D_SKL | D_KBL) -#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL) +#define D_GEN9PLUS (D_SKL | D_KBL | D_BXT) +#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL | D_BXT) -#define D_SKL_PLUS (D_SKL | D_KBL) -#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL) +#define D_SKL_PLUS (D_SKL | D_KBL | D_BXT) +#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL | D_BXT) #define D_PRE_SKL (D_BDW) -#define D_ALL (D_BDW | D_SKL | D_KBL) +#define D_ALL (D_BDW | D_SKL | D_KBL | D_BXT) typedef int (*gvt_mmio_func)(struct intel_vgpu *, unsigned int, void *, unsigned int); diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 5ca9caf7552a..42e1e6bdcc2c 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -364,7 +364,8 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) */ fw = intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ | FW_REG_WRITE); - if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) + if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || + IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv))) fw |= FORCEWAKE_RENDER; intel_uncore_forcewake_get(dev_priv, fw); @@ -401,7 +402,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) return; - if (IS_KABYLAKE(dev_priv) && ring_id == RCS) + if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) && ring_id == RCS) return; if (!pre && !gen9_render_mocs.initialized) @@ -446,9 +447,9 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, #define CTX_CONTEXT_CONTROL_VAL 0x03 -bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id) +bool is_inhibit_context(struct intel_context *ce) { - u32 *reg_state = ctx->__engine[ring_id].lrc_reg_state; + const u32 *reg_state = ce->lrc_reg_state; u32 inhibit_mask = _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); @@ -467,7 +468,9 @@ static void switch_mmio(struct intel_vgpu *pre, u32 old_v, new_v; dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv; - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + if (IS_SKYLAKE(dev_priv) + || IS_KABYLAKE(dev_priv) + || IS_BROXTON(dev_priv)) switch_mocs(pre, next, ring_id); for (mmio = dev_priv->gvt->engine_mmio_list.mmio; @@ -479,7 +482,8 @@ static void switch_mmio(struct intel_vgpu *pre, * state image on kabylake, it's initialized by lri command and * save or restore with context together. */ - if (IS_KABYLAKE(dev_priv) && mmio->in_context) + if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) + && mmio->in_context) continue; // save @@ -501,7 +505,7 @@ static void switch_mmio(struct intel_vgpu *pre, * itself. */ if (mmio->in_context && - !is_inhibit_context(s->shadow_ctx, ring_id)) + !is_inhibit_context(&s->shadow_ctx->__engine[ring_id])) continue; if (mmio->mask) @@ -574,7 +578,9 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) { struct engine_mmio *mmio; - if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) + if (IS_SKYLAKE(gvt->dev_priv) || + IS_KABYLAKE(gvt->dev_priv) || + IS_BROXTON(gvt->dev_priv)) gvt->engine_mmio_list.mmio = gen9_engine_mmio_list; else gvt->engine_mmio_list.mmio = gen8_engine_mmio_list; diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h index 0439eb8057a8..5c3b9ff9f96a 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.h +++ b/drivers/gpu/drm/i915/gvt/mmio_context.h @@ -49,7 +49,7 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre, void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt); -bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id); +bool is_inhibit_context(struct intel_context *ce); int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu, struct i915_request *req); diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 32ffcd566cdd..67f19992b226 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -230,17 +230,18 @@ static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn( /** * intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page * @vgpu: a vGPU - * @gpfn: guest pfn + * @gfn: guest pfn + * @size: page size * @dma_addr: retrieve allocated dma addr * * Returns: * 0 on success, negative error code if failed. */ static inline int intel_gvt_hypervisor_dma_map_guest_page( - struct intel_vgpu *vgpu, unsigned long gfn, + struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr) { - return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, + return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, size, dma_addr); } diff --git a/drivers/gpu/drm/i915/gvt/page_track.c b/drivers/gpu/drm/i915/gvt/page_track.c index 53e2bd79c97d..256d0db8bbb1 100644 --- a/drivers/gpu/drm/i915/gvt/page_track.c +++ b/drivers/gpu/drm/i915/gvt/page_track.c @@ -157,11 +157,10 @@ int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn) int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa, void *data, unsigned int bytes) { - struct intel_gvt *gvt = vgpu->gvt; struct intel_vgpu_page_track *page_track; int ret = 0; - mutex_lock(&gvt->lock); + mutex_lock(&vgpu->vgpu_lock); page_track = intel_vgpu_find_page_track(vgpu, gpa >> PAGE_SHIFT); if (!page_track) { @@ -179,6 +178,6 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa, } out: - mutex_unlock(&gvt->lock); + mutex_unlock(&vgpu->vgpu_lock); return ret; } diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index d053cbe1dc94..09d7bb72b4ff 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -228,7 +228,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt) struct gvt_sched_data *sched_data = gvt->scheduler.sched_data; ktime_t cur_time; - mutex_lock(&gvt->lock); + mutex_lock(&gvt->sched_lock); cur_time = ktime_get(); if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED, @@ -244,7 +244,7 @@ void intel_gvt_schedule(struct intel_gvt *gvt) vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time); tbs_sched_func(sched_data); - mutex_unlock(&gvt->lock); + mutex_unlock(&gvt->sched_lock); } static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data) @@ -359,39 +359,65 @@ static struct intel_gvt_sched_policy_ops tbs_schedule_ops = { int intel_gvt_init_sched_policy(struct intel_gvt *gvt) { + int ret; + + mutex_lock(&gvt->sched_lock); gvt->scheduler.sched_ops = &tbs_schedule_ops; + ret = gvt->scheduler.sched_ops->init(gvt); + mutex_unlock(&gvt->sched_lock); - return gvt->scheduler.sched_ops->init(gvt); + return ret; } void intel_gvt_clean_sched_policy(struct intel_gvt *gvt) { + mutex_lock(&gvt->sched_lock); gvt->scheduler.sched_ops->clean(gvt); + mutex_unlock(&gvt->sched_lock); } +/* for per-vgpu scheduler policy, there are 2 per-vgpu data: + * sched_data, and sched_ctl. We see these 2 data as part of + * the global scheduler which are proteced by gvt->sched_lock. + * Caller should make their decision if the vgpu_lock should + * be hold outside. + */ + int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu) { - return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu); + int ret; + + mutex_lock(&vgpu->gvt->sched_lock); + ret = vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu); + mutex_unlock(&vgpu->gvt->sched_lock); + + return ret; } void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu) { + mutex_lock(&vgpu->gvt->sched_lock); vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu); + mutex_unlock(&vgpu->gvt->sched_lock); } void intel_vgpu_start_schedule(struct intel_vgpu *vgpu) { struct vgpu_sched_data *vgpu_data = vgpu->sched_data; + mutex_lock(&vgpu->gvt->sched_lock); if (!vgpu_data->active) { gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id); vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu); } + mutex_unlock(&vgpu->gvt->sched_lock); } void intel_gvt_kick_schedule(struct intel_gvt *gvt) { + mutex_lock(&gvt->sched_lock); intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED); + mutex_unlock(&gvt->sched_lock); } void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) @@ -406,6 +432,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); + mutex_lock(&vgpu->gvt->sched_lock); scheduler->sched_ops->stop_schedule(vgpu); if (scheduler->next_vgpu == vgpu) @@ -425,4 +452,5 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) } } spin_unlock_bh(&scheduler->mmio_context_lock); + mutex_unlock(&vgpu->gvt->sched_lock); } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index c2d183b91500..b0e566956b8d 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -45,20 +45,16 @@ static void set_context_pdp_root_pointer( struct execlist_ring_context *ring_context, u32 pdp[8]) { - struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW; int i; for (i = 0; i < 8; i++) - pdp_pair[i].val = pdp[7 - i]; + ring_context->pdps[i].val = pdp[7 - i]; } static void update_shadow_pdps(struct intel_vgpu_workload *workload) { - struct intel_vgpu *vgpu = workload->vgpu; - int ring_id = workload->ring_id; - struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx; struct drm_i915_gem_object *ctx_obj = - shadow_ctx->__engine[ring_id].state->obj; + workload->req->hw_context->state->obj; struct execlist_ring_context *shadow_ring_context; struct page *page; @@ -128,9 +124,8 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) struct intel_vgpu *vgpu = workload->vgpu; struct intel_gvt *gvt = vgpu->gvt; int ring_id = workload->ring_id; - struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx; struct drm_i915_gem_object *ctx_obj = - shadow_ctx->__engine[ring_id].state->obj; + workload->req->hw_context->state->obj; struct execlist_ring_context *shadow_ring_context; struct page *page; void *dst; @@ -205,7 +200,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) static inline bool is_gvt_request(struct i915_request *req) { - return i915_gem_context_force_single_submission(req->ctx); + return i915_gem_context_force_single_submission(req->gem_context); } static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id) @@ -280,10 +275,8 @@ static int shadow_context_status_change(struct notifier_block *nb, return NOTIFY_OK; } -static void shadow_context_descriptor_update(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) +static void shadow_context_descriptor_update(struct intel_context *ce) { - struct intel_context *ce = to_intel_context(ctx, engine); u64 desc = 0; desc = ce->lrc_desc; @@ -292,7 +285,7 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx, * like GEN8_CTX_* cached in desc_template */ desc &= U64_MAX << 12; - desc |= ctx->desc_template & ((1ULL << 12) - 1); + desc |= ce->gem_context->desc_template & ((1ULL << 12) - 1); ce->lrc_desc = desc; } @@ -300,12 +293,12 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx, static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; + struct i915_request *req = workload->req; void *shadow_ring_buffer_va; u32 *cs; - struct i915_request *req = workload->req; - if (IS_KABYLAKE(req->i915) && - is_inhibit_context(req->ctx, req->engine->id)) + if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915)) + && is_inhibit_context(req->hw_context)) intel_vgpu_restore_inhibit_context(vgpu, req); /* allocate shadow ring buffer */ @@ -353,92 +346,67 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) struct intel_vgpu_submission *s = &vgpu->submission; struct i915_gem_context *shadow_ctx = s->shadow_ctx; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - int ring_id = workload->ring_id; - struct intel_engine_cs *engine = dev_priv->engine[ring_id]; - struct intel_ring *ring; + struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id]; + struct intel_context *ce; + struct i915_request *rq; int ret; lockdep_assert_held(&dev_priv->drm.struct_mutex); - if (workload->shadowed) + if (workload->req) return 0; + /* pin shadow context by gvt even the shadow context will be pinned + * when i915 alloc request. That is because gvt will update the guest + * context from shadow context when workload is completed, and at that + * moment, i915 may already unpined the shadow context to make the + * shadow_ctx pages invalid. So gvt need to pin itself. After update + * the guest context, gvt can unpin the shadow_ctx safely. + */ + ce = intel_context_pin(shadow_ctx, engine); + if (IS_ERR(ce)) { + gvt_vgpu_err("fail to pin shadow context\n"); + return PTR_ERR(ce); + } + shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT); shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT; - if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated)) - shadow_context_descriptor_update(shadow_ctx, - dev_priv->engine[ring_id]); + if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated)) + shadow_context_descriptor_update(ce); ret = intel_gvt_scan_and_shadow_ringbuffer(workload); if (ret) - goto err_scan; + goto err_unpin; if ((workload->ring_id == RCS) && (workload->wa_ctx.indirect_ctx.size != 0)) { ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); if (ret) - goto err_scan; + goto err_shadow; } - /* pin shadow context by gvt even the shadow context will be pinned - * when i915 alloc request. That is because gvt will update the guest - * context from shadow context when workload is completed, and at that - * moment, i915 may already unpined the shadow context to make the - * shadow_ctx pages invalid. So gvt need to pin itself. After update - * the guest context, gvt can unpin the shadow_ctx safely. - */ - ring = intel_context_pin(shadow_ctx, engine); - if (IS_ERR(ring)) { - ret = PTR_ERR(ring); - gvt_vgpu_err("fail to pin shadow context\n"); + rq = i915_request_alloc(engine, shadow_ctx); + if (IS_ERR(rq)) { + gvt_vgpu_err("fail to allocate gem request\n"); + ret = PTR_ERR(rq); goto err_shadow; } + workload->req = i915_request_get(rq); ret = populate_shadow_context(workload); if (ret) - goto err_unpin; - workload->shadowed = true; - return 0; + goto err_req; -err_unpin: - intel_context_unpin(shadow_ctx, engine); + return 0; +err_req: + rq = fetch_and_zero(&workload->req); + i915_request_put(rq); err_shadow: release_shadow_wa_ctx(&workload->wa_ctx); -err_scan: - return ret; -} - -static int intel_gvt_generate_request(struct intel_vgpu_workload *workload) -{ - int ring_id = workload->ring_id; - struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; - struct intel_engine_cs *engine = dev_priv->engine[ring_id]; - struct i915_request *rq; - struct intel_vgpu *vgpu = workload->vgpu; - struct intel_vgpu_submission *s = &vgpu->submission; - struct i915_gem_context *shadow_ctx = s->shadow_ctx; - int ret; - - rq = i915_request_alloc(dev_priv->engine[ring_id], shadow_ctx); - if (IS_ERR(rq)) { - gvt_vgpu_err("fail to allocate gem request\n"); - ret = PTR_ERR(rq); - goto err_unpin; - } - - gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq); - - workload->req = i915_request_get(rq); - ret = copy_workload_to_ring_buffer(workload); - if (ret) - goto err_unpin; - return 0; - err_unpin: - intel_context_unpin(shadow_ctx, engine); - release_shadow_wa_ctx(&workload->wa_ctx); + intel_context_unpin(ce); return ret; } @@ -508,7 +476,11 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) i915_gem_obj_finish_shmem_access(bb->obj); bb->accessing = false; - i915_vma_move_to_active(bb->vma, workload->req, 0); + ret = i915_vma_move_to_active(bb->vma, + workload->req, + 0); + if (ret) + goto err; } } return 0; @@ -517,21 +489,13 @@ err: return ret; } -static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) +static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) { - struct intel_vgpu_workload *workload = container_of(wa_ctx, - struct intel_vgpu_workload, - wa_ctx); - int ring_id = workload->ring_id; - struct intel_vgpu_submission *s = &workload->vgpu->submission; - struct i915_gem_context *shadow_ctx = s->shadow_ctx; - struct drm_i915_gem_object *ctx_obj = - shadow_ctx->__engine[ring_id].state->obj; - struct execlist_ring_context *shadow_ring_context; - struct page *page; - - page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); - shadow_ring_context = kmap_atomic(page); + struct intel_vgpu_workload *workload = + container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx); + struct i915_request *rq = workload->req; + struct execlist_ring_context *shadow_ring_context = + (struct execlist_ring_context *)rq->hw_context->lrc_reg_state; shadow_ring_context->bb_per_ctx_ptr.val = (shadow_ring_context->bb_per_ctx_ptr.val & @@ -539,9 +503,6 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) shadow_ring_context->rcs_indirect_ctx.val = (shadow_ring_context->rcs_indirect_ctx.val & (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma; - - kunmap_atomic(shadow_ring_context); - return 0; } static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) @@ -633,7 +594,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload) goto err_unpin_mm; } - ret = intel_gvt_generate_request(workload); + ret = copy_workload_to_ring_buffer(workload); if (ret) { gvt_vgpu_err("fail to generate request\n"); goto err_unpin_mm; @@ -670,16 +631,14 @@ err_unpin_mm: static int dispatch_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; - struct intel_vgpu_submission *s = &vgpu->submission; - struct i915_gem_context *shadow_ctx = s->shadow_ctx; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; int ring_id = workload->ring_id; - struct intel_engine_cs *engine = dev_priv->engine[ring_id]; - int ret = 0; + int ret; gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", ring_id, workload); + mutex_lock(&vgpu->vgpu_lock); mutex_lock(&dev_priv->drm.struct_mutex); ret = intel_gvt_scan_and_shadow_workload(workload); @@ -687,10 +646,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) goto out; ret = prepare_workload(workload); - if (ret) { - intel_context_unpin(shadow_ctx, engine); - goto out; - } out: if (ret) @@ -704,6 +659,7 @@ out: } mutex_unlock(&dev_priv->drm.struct_mutex); + mutex_unlock(&vgpu->vgpu_lock); return ret; } @@ -713,7 +669,7 @@ static struct intel_vgpu_workload *pick_next_workload( struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_vgpu_workload *workload = NULL; - mutex_lock(&gvt->lock); + mutex_lock(&gvt->sched_lock); /* * no current vgpu / will be scheduled out / no workload @@ -759,33 +715,29 @@ static struct intel_vgpu_workload *pick_next_workload( atomic_inc(&workload->vgpu->submission.running_workload_num); out: - mutex_unlock(&gvt->lock); + mutex_unlock(&gvt->sched_lock); return workload; } static void update_guest_context(struct intel_vgpu_workload *workload) { + struct i915_request *rq = workload->req; struct intel_vgpu *vgpu = workload->vgpu; struct intel_gvt *gvt = vgpu->gvt; - struct intel_vgpu_submission *s = &vgpu->submission; - struct i915_gem_context *shadow_ctx = s->shadow_ctx; - int ring_id = workload->ring_id; - struct drm_i915_gem_object *ctx_obj = - shadow_ctx->__engine[ring_id].state->obj; + struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj; struct execlist_ring_context *shadow_ring_context; struct page *page; void *src; unsigned long context_gpa, context_page_num; int i; - gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id, - workload->ctx_desc.lrca); - - context_page_num = gvt->dev_priv->engine[ring_id]->context_size; + gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id, + workload->ctx_desc.lrca); + context_page_num = rq->engine->context_size; context_page_num = context_page_num >> PAGE_SHIFT; - if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS) + if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS) context_page_num = 19; i = 2; @@ -858,19 +810,17 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) scheduler->current_workload[ring_id]; struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu_submission *s = &vgpu->submission; + struct i915_request *rq = workload->req; int event; - mutex_lock(&gvt->lock); + mutex_lock(&vgpu->vgpu_lock); + mutex_lock(&gvt->sched_lock); /* For the workload w/ request, needs to wait for the context * switch to make sure request is completed. * For the workload w/o request, directly complete the workload. */ - if (workload->req) { - struct drm_i915_private *dev_priv = - workload->vgpu->gvt->dev_priv; - struct intel_engine_cs *engine = - dev_priv->engine[workload->ring_id]; + if (rq) { wait_event(workload->shadow_ctx_status_wq, !atomic_read(&workload->shadow_ctx_active)); @@ -886,8 +836,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) workload->status = 0; } - i915_request_put(fetch_and_zero(&workload->req)); - if (!workload->status && !(vgpu->resetting_eng & ENGINE_MASK(ring_id))) { update_guest_context(workload); @@ -896,10 +844,13 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) INTEL_GVT_EVENT_MAX) intel_vgpu_trigger_virtual_event(vgpu, event); } - mutex_lock(&dev_priv->drm.struct_mutex); + /* unpin shadow ctx as the shadow_ctx update is done */ - intel_context_unpin(s->shadow_ctx, engine); - mutex_unlock(&dev_priv->drm.struct_mutex); + mutex_lock(&rq->i915->drm.struct_mutex); + intel_context_unpin(rq->hw_context); + mutex_unlock(&rq->i915->drm.struct_mutex); + + i915_request_put(fetch_and_zero(&workload->req)); } gvt_dbg_sched("ring id %d complete workload %p status %d\n", @@ -939,7 +890,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) if (gvt->scheduler.need_reschedule) intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED); - mutex_unlock(&gvt->lock); + mutex_unlock(&gvt->sched_lock); + mutex_unlock(&vgpu->vgpu_lock); } struct workload_thread_param { @@ -957,7 +909,8 @@ static int workload_thread(void *priv) struct intel_vgpu *vgpu = NULL; int ret; bool need_force_wake = IS_SKYLAKE(gvt->dev_priv) - || IS_KABYLAKE(gvt->dev_priv); + || IS_KABYLAKE(gvt->dev_priv) + || IS_BROXTON(gvt->dev_priv); DEFINE_WAIT_FUNC(wait, woken_wake_function); kfree(p); @@ -991,9 +944,7 @@ static int workload_thread(void *priv) intel_uncore_forcewake_get(gvt->dev_priv, FORCEWAKE_ALL); - mutex_lock(&gvt->lock); ret = dispatch_workload(workload); - mutex_unlock(&gvt->lock); if (ret) { vgpu = workload->vgpu; @@ -1270,7 +1221,6 @@ alloc_workload(struct intel_vgpu *vgpu) atomic_set(&workload->shadow_ctx_active, 0); workload->status = -EINPROGRESS; - workload->shadowed = false; workload->vgpu = vgpu; return workload; @@ -1285,7 +1235,7 @@ static void read_guest_pdps(struct intel_vgpu *vgpu, u64 gpa; int i; - gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val); + gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val); for (i = 0; i < 8; i++) intel_gvt_hypervisor_read_gpa(vgpu, diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 6c644782193e..21eddab4a9cd 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -83,7 +83,6 @@ struct intel_vgpu_workload { struct i915_request *req; /* if this workload has been dispatched to i915? */ bool dispatched; - bool shadowed; int status; struct intel_vgpu_mm *shadow_mm; diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 572a18c2bfb5..f6fa916517c3 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -46,6 +46,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT; vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION; + vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT; vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) = vgpu_aperture_gmadr_base(vgpu); @@ -58,6 +59,9 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu); + vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)) = UINT_MAX; + vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)) = UINT_MAX; + gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id); gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n", vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu)); @@ -223,22 +227,20 @@ void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu) */ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu) { - struct intel_gvt *gvt = vgpu->gvt; - - mutex_lock(&gvt->lock); + mutex_lock(&vgpu->vgpu_lock); vgpu->active = false; if (atomic_read(&vgpu->submission.running_workload_num)) { - mutex_unlock(&gvt->lock); + mutex_unlock(&vgpu->vgpu_lock); intel_gvt_wait_vgpu_idle(vgpu); - mutex_lock(&gvt->lock); + mutex_lock(&vgpu->vgpu_lock); } intel_vgpu_stop_schedule(vgpu); intel_vgpu_dmabuf_cleanup(vgpu); - mutex_unlock(&gvt->lock); + mutex_unlock(&vgpu->vgpu_lock); } /** @@ -252,14 +254,11 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; - mutex_lock(&gvt->lock); + mutex_lock(&vgpu->vgpu_lock); WARN(vgpu->active, "vGPU is still active!\n"); intel_gvt_debugfs_remove_vgpu(vgpu); - idr_remove(&gvt->vgpu_idr, vgpu->id); - if (idr_is_empty(&gvt->vgpu_idr)) - intel_gvt_clean_irq(gvt); intel_vgpu_clean_sched_policy(vgpu); intel_vgpu_clean_submission(vgpu); intel_vgpu_clean_display(vgpu); @@ -269,10 +268,16 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) intel_vgpu_free_resource(vgpu); intel_vgpu_clean_mmio(vgpu); intel_vgpu_dmabuf_cleanup(vgpu); - vfree(vgpu); + mutex_unlock(&vgpu->vgpu_lock); + mutex_lock(&gvt->lock); + idr_remove(&gvt->vgpu_idr, vgpu->id); + if (idr_is_empty(&gvt->vgpu_idr)) + intel_gvt_clean_irq(gvt); intel_gvt_update_vgpu_types(gvt); mutex_unlock(&gvt->lock); + + vfree(vgpu); } #define IDLE_VGPU_IDR 0 @@ -298,6 +303,7 @@ struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt) vgpu->id = IDLE_VGPU_IDR; vgpu->gvt = gvt; + mutex_init(&vgpu->vgpu_lock); for (i = 0; i < I915_NUM_ENGINES; i++) INIT_LIST_HEAD(&vgpu->submission.workload_q_head[i]); @@ -324,7 +330,10 @@ out_free_vgpu: */ void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu) { + mutex_lock(&vgpu->vgpu_lock); intel_vgpu_clean_sched_policy(vgpu); + mutex_unlock(&vgpu->vgpu_lock); + vfree(vgpu); } @@ -342,8 +351,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, if (!vgpu) return ERR_PTR(-ENOMEM); - mutex_lock(&gvt->lock); - ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU, GFP_KERNEL); if (ret < 0) @@ -353,6 +360,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, vgpu->handle = param->handle; vgpu->gvt = gvt; vgpu->sched_ctl.weight = param->weight; + mutex_init(&vgpu->vgpu_lock); INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head); INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL); idr_init(&vgpu->object_idr); @@ -400,8 +408,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, if (ret) goto out_clean_sched_policy; - mutex_unlock(&gvt->lock); - return vgpu; out_clean_sched_policy: @@ -424,7 +430,6 @@ out_clean_idr: idr_remove(&gvt->vgpu_idr, vgpu->id); out_free_vgpu: vfree(vgpu); - mutex_unlock(&gvt->lock); return ERR_PTR(ret); } @@ -456,12 +461,12 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz); param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz); + mutex_lock(&gvt->lock); vgpu = __intel_gvt_create_vgpu(gvt, ¶m); - if (IS_ERR(vgpu)) - return vgpu; - - /* calculate left instance change for types */ - intel_gvt_update_vgpu_types(gvt); + if (!IS_ERR(vgpu)) + /* calculate left instance change for types */ + intel_gvt_update_vgpu_types(gvt); + mutex_unlock(&gvt->lock); return vgpu; } @@ -473,7 +478,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, * @engine_mask: engines to reset for GT reset * * This function is called when user wants to reset a virtual GPU through - * device model reset or GT reset. The caller should hold the gvt lock. + * device model reset or GT reset. The caller should hold the vgpu lock. * * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset * the whole vGPU to default state as when it is created. This vGPU function @@ -513,9 +518,9 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, * scheduler when the reset is triggered by current vgpu. */ if (scheduler->current_vgpu == NULL) { - mutex_unlock(&gvt->lock); + mutex_unlock(&vgpu->vgpu_lock); intel_gvt_wait_vgpu_idle(vgpu); - mutex_lock(&gvt->lock); + mutex_lock(&vgpu->vgpu_lock); } intel_vgpu_reset_submission(vgpu, resetting_eng); @@ -555,7 +560,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, */ void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu) { - mutex_lock(&vgpu->gvt->lock); + mutex_lock(&vgpu->vgpu_lock); intel_gvt_reset_vgpu_locked(vgpu, true, 0); - mutex_unlock(&vgpu->gvt->lock); + mutex_unlock(&vgpu->vgpu_lock); } diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 13e7b9e4a6e6..f9ce35da4123 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -328,7 +328,7 @@ static int per_file_stats(int id, void *ptr, void *data) } else { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm); - if (ppgtt->base.file != stats->file_priv) + if (ppgtt->vm.file != stats->file_priv) continue; } @@ -508,7 +508,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data) dpy_count, dpy_size); seq_printf(m, "%llu [%pa] gtt total\n", - ggtt->base.total, &ggtt->mappable_end); + ggtt->vm.total, &ggtt->mappable_end); seq_printf(m, "Supported page sizes: %s\n", stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes, buf, sizeof(buf))); @@ -542,8 +542,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data) struct i915_request, client_link); rcu_read_lock(); - task = pid_task(request && request->ctx->pid ? - request->ctx->pid : file->pid, + task = pid_task(request && request->gem_context->pid ? + request->gem_context->pid : file->pid, PIDTYPE_PID); print_file_stats(m, task ? task->comm : "<unknown>", stats); rcu_read_unlock(); @@ -1162,19 +1162,28 @@ static int i915_frequency_info(struct seq_file *m, void *unused) intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) { - pm_ier = I915_READ(GEN6_PMIER); - pm_imr = I915_READ(GEN6_PMIMR); - pm_isr = I915_READ(GEN6_PMISR); - pm_iir = I915_READ(GEN6_PMIIR); - pm_mask = I915_READ(GEN6_PMINTRMSK); - } else { + if (INTEL_GEN(dev_priv) >= 11) { + pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE); + pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK); + /* + * The equivalent to the PM ISR & IIR cannot be read + * without affecting the current state of the system + */ + pm_isr = 0; + pm_iir = 0; + } else if (INTEL_GEN(dev_priv) >= 8) { pm_ier = I915_READ(GEN8_GT_IER(2)); pm_imr = I915_READ(GEN8_GT_IMR(2)); pm_isr = I915_READ(GEN8_GT_ISR(2)); pm_iir = I915_READ(GEN8_GT_IIR(2)); - pm_mask = I915_READ(GEN6_PMINTRMSK); + } else { + pm_ier = I915_READ(GEN6_PMIER); + pm_imr = I915_READ(GEN6_PMIMR); + pm_isr = I915_READ(GEN6_PMISR); + pm_iir = I915_READ(GEN6_PMIIR); } + pm_mask = I915_READ(GEN6_PMINTRMSK); + seq_printf(m, "Video Turbo Mode: %s\n", yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); seq_printf(m, "HW control enabled: %s\n", @@ -1182,8 +1191,12 @@ static int i915_frequency_info(struct seq_file *m, void *unused) seq_printf(m, "SW control enabled: %s\n", yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == GEN6_RP_MEDIA_SW_MODE)); - seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n", - pm_ier, pm_imr, pm_isr, pm_iir, pm_mask); + + seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n", + pm_ier, pm_imr, pm_mask); + if (INTEL_GEN(dev_priv) <= 10) + seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n", + pm_isr, pm_iir); seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n", rps->pm_intrmsk_mbz); seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); @@ -1205,7 +1218,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused) rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup)); seq_printf(m, "RP PREV UP: %d (%dus)\n", rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup)); - seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold); + seq_printf(m, "Up threshold: %d%%\n", + rps->power.up_threshold); seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n", rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei)); @@ -1213,7 +1227,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused) rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown)); seq_printf(m, "RP PREV DOWN: %d (%dus)\n", rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown)); - seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold); + seq_printf(m, "Down threshold: %d%%\n", + rps->power.down_threshold); max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 : rp_state_cap >> 16) & 0xff; @@ -1346,11 +1361,12 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) seq_printf(m, "\tseqno = %x [current %x, last %x]\n", engine->hangcheck.seqno, seqno[id], intel_engine_last_submit(engine)); - seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n", + seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n", yesno(intel_engine_has_waiter(engine)), yesno(test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings)), - yesno(engine->hangcheck.stalled)); + yesno(engine->hangcheck.stalled), + yesno(engine->hangcheck.wedged)); spin_lock_irq(&b->rb_lock); for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { @@ -1645,11 +1661,6 @@ static int i915_fbc_status(struct seq_file *m, void *unused) else seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason); - if (fbc->work.scheduled) - seq_printf(m, "FBC worker scheduled on vblank %llu, now %llu\n", - fbc->work.scheduled_vblank, - drm_crtc_vblank_count(&fbc->crtc->base)); - if (intel_fbc_is_active(dev_priv)) { u32 mask; @@ -1895,7 +1906,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) fbdev_fb->base.format->cpp[0] * 8, fbdev_fb->base.modifier, drm_framebuffer_read_refcount(&fbdev_fb->base)); - describe_obj(m, fbdev_fb->obj); + describe_obj(m, intel_fb_obj(&fbdev_fb->base)); seq_putc(m, '\n'); } #endif @@ -1913,7 +1924,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) fb->base.format->cpp[0] * 8, fb->base.modifier, drm_framebuffer_read_refcount(&fb->base)); - describe_obj(m, fb->obj); + describe_obj(m, intel_fb_obj(&fb->base)); seq_putc(m, '\n'); } mutex_unlock(&dev->mode_config.fb_lock); @@ -2209,6 +2220,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv)); seq_printf(m, "Boosts outstanding? %d\n", atomic_read(&rps->num_waiters)); + seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive)); seq_printf(m, "Frequency requested %d\n", intel_gpu_freq(dev_priv, rps->cur_freq)); seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n", @@ -2252,13 +2264,13 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n", - rps_power_to_str(rps->power)); + rps_power_to_str(rps->power.mode)); seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n", rpup && rpupei ? 100 * rpup / rpupei : 0, - rps->up_threshold); + rps->power.up_threshold); seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n", rpdown && rpdownei ? 100 * rpdown / rpdownei : 0, - rps->down_threshold); + rps->power.down_threshold); } else { seq_puts(m, "\nRPS Autotuning inactive\n"); } @@ -2523,7 +2535,7 @@ static int i915_guc_log_level_get(void *data, u64 *val) if (!USES_GUC(dev_priv)) return -ENODEV; - *val = intel_guc_log_level_get(&dev_priv->guc.log); + *val = intel_guc_log_get_level(&dev_priv->guc.log); return 0; } @@ -2535,7 +2547,7 @@ static int i915_guc_log_level_set(void *data, u64 val) if (!USES_GUC(dev_priv)) return -ENODEV; - return intel_guc_log_level_set(&dev_priv->guc.log, val); + return intel_guc_log_set_level(&dev_priv->guc.log, val); } DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops, @@ -2583,31 +2595,9 @@ static const struct file_operations i915_guc_log_relay_fops = { .release = i915_guc_log_relay_release, }; -static const char *psr2_live_status(u32 val) -{ - static const char * const live_status[] = { - "IDLE", - "CAPTURE", - "CAPTURE_FS", - "SLEEP", - "BUFON_FW", - "ML_UP", - "SU_STANDBY", - "FAST_SLEEP", - "DEEP_SLEEP", - "BUF_ON", - "TG_ON" - }; - - val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT; - if (val < ARRAY_SIZE(live_status)) - return live_status[val]; - - return "unknown"; -} - -static const char *psr_sink_status(u8 val) +static int i915_psr_sink_status_show(struct seq_file *m, void *data) { + u8 val; static const char * const sink_status[] = { "inactive", "transition to active, capture and display", @@ -2616,22 +2606,94 @@ static const char *psr_sink_status(u8 val) "transition to inactive, capture and display, timing re-sync", "reserved", "reserved", - "sink internal error" + "sink internal error", }; + struct drm_connector *connector = m->private; + struct drm_i915_private *dev_priv = to_i915(connector->dev); + struct intel_dp *intel_dp = + enc_to_intel_dp(&intel_attached_encoder(connector)->base); + int ret; - val &= DP_PSR_SINK_STATE_MASK; - if (val < ARRAY_SIZE(sink_status)) - return sink_status[val]; + if (!CAN_PSR(dev_priv)) { + seq_puts(m, "PSR Unsupported\n"); + return -ENODEV; + } - return "unknown"; + if (connector->status != connector_status_connected) + return -ENODEV; + + ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val); + + if (ret == 1) { + const char *str = "unknown"; + + val &= DP_PSR_SINK_STATE_MASK; + if (val < ARRAY_SIZE(sink_status)) + str = sink_status[val]; + seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str); + } else { + return ret; + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status); + +static void +psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m) +{ + u32 val, psr_status; + + if (dev_priv->psr.psr2_enabled) { + static const char * const live_status[] = { + "IDLE", + "CAPTURE", + "CAPTURE_FS", + "SLEEP", + "BUFON_FW", + "ML_UP", + "SU_STANDBY", + "FAST_SLEEP", + "DEEP_SLEEP", + "BUF_ON", + "TG_ON" + }; + psr_status = I915_READ(EDP_PSR2_STATUS); + val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >> + EDP_PSR2_STATUS_STATE_SHIFT; + if (val < ARRAY_SIZE(live_status)) { + seq_printf(m, "Source PSR status: 0x%x [%s]\n", + psr_status, live_status[val]); + return; + } + } else { + static const char * const live_status[] = { + "IDLE", + "SRDONACK", + "SRDENT", + "BUFOFF", + "BUFON", + "AUXACK", + "SRDOFFACK", + "SRDENT_ON", + }; + psr_status = I915_READ(EDP_PSR_STATUS); + val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >> + EDP_PSR_STATUS_STATE_SHIFT; + if (val < ARRAY_SIZE(live_status)) { + seq_printf(m, "Source PSR status: 0x%x [%s]\n", + psr_status, live_status[val]); + return; + } + } + + seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown"); } static int i915_edp_psr_status(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); u32 psrperf = 0; - u32 stat[3]; - enum pipe pipe; bool enabled = false; bool sink_support; @@ -2649,50 +2711,18 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled)); seq_printf(m, "Busy frontbuffer bits: 0x%03x\n", dev_priv->psr.busy_frontbuffer_bits); - seq_printf(m, "Re-enable work scheduled: %s\n", - yesno(work_busy(&dev_priv->psr.work.work))); - - if (HAS_DDI(dev_priv)) { - if (dev_priv->psr.psr2_enabled) - enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE; - else - enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; - } else { - for_each_pipe(dev_priv, pipe) { - enum transcoder cpu_transcoder = - intel_pipe_to_cpu_transcoder(dev_priv, pipe); - enum intel_display_power_domain power_domain; - power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); - if (!intel_display_power_get_if_enabled(dev_priv, - power_domain)) - continue; - - stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) & - VLV_EDP_PSR_CURR_STATE_MASK; - if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || - (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) - enabled = true; - - intel_display_power_put(dev_priv, power_domain); - } - } + if (dev_priv->psr.psr2_enabled) + enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE; + else + enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; seq_printf(m, "Main link in standby mode: %s\n", yesno(dev_priv->psr.link_standby)); - seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled)); - - if (!HAS_DDI(dev_priv)) - for_each_pipe(dev_priv, pipe) { - if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || - (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) - seq_printf(m, " pipe %c", pipe_name(pipe)); - } - seq_puts(m, "\n"); + seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled)); /* - * VLV/CHV PSR has no kind of performance counter * SKL+ Perf counter is reset to 0 everytime DC state is entered */ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { @@ -2701,21 +2731,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) seq_printf(m, "Performance_Counter: %u\n", psrperf); } - if (dev_priv->psr.psr2_enabled) { - u32 psr2 = I915_READ(EDP_PSR2_STATUS); - - seq_printf(m, "EDP_PSR2_STATUS: %x [%s]\n", - psr2, psr2_live_status(psr2)); - } - if (dev_priv->psr.enabled) { - struct drm_dp_aux *aux = &dev_priv->psr.enabled->aux; - u8 val; - - if (drm_dp_dpcd_readb(aux, DP_PSR_STATUS, &val) == 1) - seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, - psr_sink_status(val)); - } + psr_source_status(dev_priv, m); mutex_unlock(&dev_priv->psr.lock); if (READ_ONCE(dev_priv->psr.debug)) { @@ -2762,86 +2779,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops, i915_edp_psr_debug_get, i915_edp_psr_debug_set, "%llu\n"); -static int i915_sink_crc(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct drm_device *dev = &dev_priv->drm; - struct intel_connector *connector; - struct drm_connector_list_iter conn_iter; - struct intel_dp *intel_dp = NULL; - struct drm_modeset_acquire_ctx ctx; - int ret; - u8 crc[6]; - - drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); - - drm_connector_list_iter_begin(dev, &conn_iter); - - for_each_intel_connector_iter(connector, &conn_iter) { - struct drm_crtc *crtc; - struct drm_connector_state *state; - struct intel_crtc_state *crtc_state; - - if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) - continue; - -retry: - ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx); - if (ret) - goto err; - - state = connector->base.state; - if (!state->best_encoder) - continue; - - crtc = state->crtc; - ret = drm_modeset_lock(&crtc->mutex, &ctx); - if (ret) - goto err; - - crtc_state = to_intel_crtc_state(crtc->state); - if (!crtc_state->base.active) - continue; - - /* - * We need to wait for all crtc updates to complete, to make - * sure any pending modesets and plane updates are completed. - */ - if (crtc_state->base.commit) { - ret = wait_for_completion_interruptible(&crtc_state->base.commit->hw_done); - - if (ret) - goto err; - } - - intel_dp = enc_to_intel_dp(state->best_encoder); - - ret = intel_dp_sink_crc(intel_dp, crtc_state, crc); - if (ret) - goto err; - - seq_printf(m, "%02x%02x%02x%02x%02x%02x\n", - crc[0], crc[1], crc[2], - crc[3], crc[4], crc[5]); - goto out; - -err: - if (ret == -EDEADLK) { - ret = drm_modeset_backoff(&ctx); - if (!ret) - goto retry; - } - goto out; - } - ret = -ENODEV; -out: - drm_connector_list_iter_end(&conn_iter); - drm_modeset_drop_locks(&ctx); - drm_modeset_acquire_fini(&ctx); - - return ret; -} - static int i915_energy_uJ(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -3398,28 +3335,13 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused) static int i915_wa_registers(struct seq_file *m, void *unused) { - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct i915_workarounds *workarounds = &dev_priv->workarounds; + struct i915_workarounds *wa = &node_to_i915(m->private)->workarounds; int i; - intel_runtime_pm_get(dev_priv); - - seq_printf(m, "Workarounds applied: %d\n", workarounds->count); - for (i = 0; i < workarounds->count; ++i) { - i915_reg_t addr; - u32 mask, value, read; - bool ok; - - addr = workarounds->reg[i].addr; - mask = workarounds->reg[i].mask; - value = workarounds->reg[i].value; - read = I915_READ(addr); - ok = (value & mask) == (read & mask); - seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n", - i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL"); - } - - intel_runtime_pm_put(dev_priv); + seq_printf(m, "Workarounds applied: %d\n", wa->count); + for (i = 0; i < wa->count; ++i) + seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n", + wa->reg[i].addr, wa->reg[i].value, wa->reg[i].mask); return 0; } @@ -4121,7 +4043,8 @@ fault_irq_set(struct drm_i915_private *i915, err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED | - I915_WAIT_INTERRUPTIBLE); + I915_WAIT_INTERRUPTIBLE, + MAX_SCHEDULE_TIMEOUT); if (err) goto err_unlock; @@ -4226,7 +4149,8 @@ i915_drop_caches_set(void *data, u64 val) if (val & DROP_ACTIVE) ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED); + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); if (val & DROP_RETIRE) i915_retire_requests(dev_priv); @@ -4245,8 +4169,13 @@ i915_drop_caches_set(void *data, u64 val) i915_gem_shrink_all(dev_priv); fs_reclaim_release(GFP_KERNEL); - if (val & DROP_IDLE) - drain_delayed_work(&dev_priv->gt.idle_work); + if (val & DROP_IDLE) { + do { + if (READ_ONCE(dev_priv->gt.active_requests)) + flush_delayed_work(&dev_priv->gt.retire_work); + drain_delayed_work(&dev_priv->gt.idle_work); + } while (READ_ONCE(dev_priv->gt.awake)); + } if (val & DROP_FREED) i915_gem_drain_freed_objects(dev_priv); @@ -4795,7 +4724,6 @@ static const struct drm_info_list i915_debugfs_list[] = { {"i915_ppgtt_info", i915_ppgtt_info, 0}, {"i915_llc", i915_llc, 0}, {"i915_edp_psr_status", i915_edp_psr_status, 0}, - {"i915_sink_crc_eDP1", i915_sink_crc, 0}, {"i915_energy_uJ", i915_energy_uJ, 0}, {"i915_runtime_pm_status", i915_runtime_pm_status, 0}, {"i915_power_domain_info", i915_power_domain_info, 0}, @@ -4829,7 +4757,6 @@ static const struct i915_debugfs_files { #endif {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops}, {"i915_next_seqno", &i915_next_seqno_fops}, - {"i915_display_crc_ctl", &i915_display_crc_ctl_fops}, {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, @@ -4849,7 +4776,7 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv) { struct drm_minor *minor = dev_priv->drm.primary; struct dentry *ent; - int ret, i; + int i; ent = debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root, to_i915(minor->dev), @@ -4857,10 +4784,6 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv) if (!ent) return -ENOMEM; - ret = intel_pipe_crc_create(minor); - if (ret) - return ret; - for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { ent = debugfs_create_file(i915_debugfs_files[i].name, S_IRUGO | S_IWUSR, @@ -4982,9 +4905,12 @@ int i915_debugfs_connector_add(struct drm_connector *connector) debugfs_create_file("i915_dpcd", S_IRUGO, root, connector, &i915_dpcd_fops); - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { debugfs_create_file("i915_panel_timings", S_IRUGO, root, connector, &i915_panel_fops); + debugfs_create_file("i915_psr_sink_status", S_IRUGO, root, + connector, &i915_psr_sink_status_fops); + } return 0; } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 9c449b8d8eab..f8cfd16be534 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -67,11 +67,18 @@ bool __i915_inject_load_failure(const char *func, int line) if (++i915_load_fail_count == i915_modparams.inject_load_failure) { DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n", i915_modparams.inject_load_failure, func, line); + i915_modparams.inject_load_failure = 0; return true; } return false; } + +bool i915_error_injected(void) +{ + return i915_load_fail_count && !i915_modparams.inject_load_failure; +} + #endif #define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI" @@ -97,8 +104,13 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level, vaf.fmt = fmt; vaf.va = &args; - dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV", - __builtin_return_address(0), &vaf); + if (is_error) + dev_printk(level, kdev, "%pV", &vaf); + else + dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV", + __builtin_return_address(0), &vaf); + + va_end(args); if (is_error && !shown_bug_once) { /* @@ -110,25 +122,8 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level, dev_notice(kdev, "%s", FDO_BUG_MSG); shown_bug_once = true; } - - va_end(args); -} - -static bool i915_error_injected(struct drm_i915_private *dev_priv) -{ -#if IS_ENABLED(CONFIG_DRM_I915_DEBUG) - return i915_modparams.inject_load_failure && - i915_load_fail_count == i915_modparams.inject_load_failure; -#else - return false; -#endif } -#define i915_load_error(dev_priv, fmt, ...) \ - __i915_printk(dev_priv, \ - i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \ - fmt, ##__VA_ARGS__) - /* Map PCH device id to PCH type, or PCH_NONE if unknown. */ static enum intel_pch intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) @@ -233,6 +228,8 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv) id = INTEL_PCH_SPT_DEVICE_ID_TYPE; else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) id = INTEL_PCH_CNP_DEVICE_ID_TYPE; + else if (IS_ICELAKE(dev_priv)) + id = INTEL_PCH_ICP_DEVICE_ID_TYPE; if (id) DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id); @@ -246,14 +243,6 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv) { struct pci_dev *pch = NULL; - /* In all current cases, num_pipes is equivalent to the PCH_NOP setting - * (which really amounts to a PCH but no South Display). - */ - if (INTEL_INFO(dev_priv)->num_pipes == 0) { - dev_priv->pch_type = PCH_NOP; - return; - } - /* * The reason to probe ISA bridge instead of Dev31:Fun0 is to * make graphics device passthrough work easy for VMM, that only @@ -282,18 +271,28 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv) } else if (intel_is_virt_pch(id, pch->subsystem_vendor, pch->subsystem_device)) { id = intel_virt_detect_pch(dev_priv); - if (id) { - pch_type = intel_pch_type(dev_priv, id); - if (WARN_ON(pch_type == PCH_NONE)) - pch_type = PCH_NOP; - } else { - pch_type = PCH_NOP; - } + pch_type = intel_pch_type(dev_priv, id); + + /* Sanity check virtual PCH id */ + if (WARN_ON(id && pch_type == PCH_NONE)) + id = 0; + dev_priv->pch_type = pch_type; dev_priv->pch_id = id; break; } } + + /* + * Use PCH_NOP (PCH but no South Display) for PCH platforms without + * display. + */ + if (pch && INTEL_INFO(dev_priv)->num_pipes == 0) { + DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n"); + dev_priv->pch_type = PCH_NOP; + dev_priv->pch_id = 0; + } + if (!pch) DRM_DEBUG_KMS("No PCH found.\n"); @@ -634,26 +633,6 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { .can_switch = i915_switcheroo_can_switch, }; -static void i915_gem_fini(struct drm_i915_private *dev_priv) -{ - /* Flush any outstanding unpin_work. */ - i915_gem_drain_workqueue(dev_priv); - - mutex_lock(&dev_priv->drm.struct_mutex); - intel_uc_fini_hw(dev_priv); - intel_uc_fini(dev_priv); - i915_gem_cleanup_engines(dev_priv); - i915_gem_contexts_fini(dev_priv); - mutex_unlock(&dev_priv->drm.struct_mutex); - - intel_uc_fini_misc(dev_priv); - i915_gem_cleanup_userptr(dev_priv); - - i915_gem_drain_freed_objects(dev_priv); - - WARN_ON(!list_empty(&dev_priv->contexts.list)); -} - static int i915_load_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -703,7 +682,7 @@ static int i915_load_modeset_init(struct drm_device *dev) ret = i915_gem_init(dev_priv); if (ret) - goto cleanup_irq; + goto cleanup_modeset; intel_setup_overlay(dev_priv); @@ -723,6 +702,8 @@ cleanup_gem: if (i915_gem_suspend(dev_priv)) DRM_ERROR("failed to idle hardware; continuing to unload!\n"); i915_gem_fini(dev_priv); +cleanup_modeset: + intel_modeset_cleanup(dev); cleanup_irq: drm_irq_uninstall(dev); intel_teardown_gmbus(dev_priv); @@ -919,7 +900,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, spin_lock_init(&dev_priv->uncore.lock); mutex_init(&dev_priv->sb_lock); - mutex_init(&dev_priv->modeset_restore_lock); mutex_init(&dev_priv->av_mutex); mutex_init(&dev_priv->wm.wm_mutex); mutex_init(&dev_priv->pps_mutex); @@ -1173,8 +1153,6 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) intel_uncore_sanitize(dev_priv); - intel_opregion_setup(dev_priv); - i915_gem_load_init_fences(dev_priv); /* On the 945G/GM, the chipset reports the MSI capability on the @@ -1189,6 +1167,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) * get lost on g4x as well, and interrupt delivery seems to stay * properly dead afterwards. So we'll just disable them for all * pre-gen5 chipsets. + * + * dp aux and gmbus irq on gen4 seems to be able to generate legacy + * interrupts even when in MSI mode. This results in spurious + * interrupt warnings if the legacy irq no. is shared with another + * device. The kernel then disables that interrupt source and so + * prevents the other device from working properly. */ if (INTEL_GEN(dev_priv) >= 5) { if (pci_enable_msi(pdev) < 0) @@ -1197,10 +1181,16 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) ret = intel_gvt_init(dev_priv); if (ret) - goto err_ggtt; + goto err_msi; + + intel_opregion_setup(dev_priv); return 0; +err_msi: + if (pdev->msi_enabled) + pci_disable_msi(pdev); + pm_qos_remove_request(&dev_priv->pm_qos); err_ggtt: i915_ggtt_cleanup_hw(dev_priv); err_perf: @@ -1433,6 +1423,7 @@ out_fini: drm_dev_fini(&dev_priv->drm); out_free: kfree(dev_priv); + pci_set_drvdata(pdev, NULL); return ret; } @@ -1553,17 +1544,30 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv) return false; } +static int i915_drm_prepare(struct drm_device *dev) +{ + struct drm_i915_private *i915 = to_i915(dev); + int err; + + /* + * NB intel_display_suspend() may issue new requests after we've + * ostensibly marked the GPU as ready-to-sleep here. We need to + * split out that work and pull it forward so that after point, + * the GPU is not woken again. + */ + err = i915_gem_suspend(i915); + if (err) + dev_err(&i915->drm.pdev->dev, + "GEM idle failed, suspend/resume might fail\n"); + + return err; +} + static int i915_drm_suspend(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = dev_priv->drm.pdev; pci_power_t opregion_target_state; - int error; - - /* ignore lid events during suspend */ - mutex_lock(&dev_priv->modeset_restore_lock); - dev_priv->modeset_restore = MODESET_SUSPENDED; - mutex_unlock(&dev_priv->modeset_restore_lock); disable_rpm_wakeref_asserts(dev_priv); @@ -1575,16 +1579,9 @@ static int i915_drm_suspend(struct drm_device *dev) pci_save_state(pdev); - error = i915_gem_suspend(dev_priv); - if (error) { - dev_err(&pdev->dev, - "GEM idle failed, resume might fail\n"); - goto out; - } - intel_display_suspend(dev); - intel_dp_mst_suspend(dev); + intel_dp_mst_suspend(dev_priv); intel_runtime_pm_disable_interrupts(dev_priv); intel_hpd_cancel_work(dev_priv); @@ -1600,7 +1597,6 @@ static int i915_drm_suspend(struct drm_device *dev) opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; intel_opregion_notify_adapter(dev_priv, opregion_target_state); - intel_uncore_suspend(dev_priv); intel_opregion_unregister(dev_priv); intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); @@ -1609,10 +1605,9 @@ static int i915_drm_suspend(struct drm_device *dev) intel_csr_ucode_suspend(dev_priv); -out: enable_rpm_wakeref_asserts(dev_priv); - return error; + return 0; } static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) @@ -1623,7 +1618,10 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) disable_rpm_wakeref_asserts(dev_priv); + i915_gem_suspend_late(dev_priv); + intel_display_set_init_power(dev_priv, false); + intel_uncore_suspend(dev_priv); /* * In case of firmware assisted context save/restore don't manually @@ -1710,6 +1708,8 @@ static int i915_drm_resume(struct drm_device *dev) disable_rpm_wakeref_asserts(dev_priv); intel_sanitize_gt_powersave(dev_priv); + i915_gem_sanitize(dev_priv); + ret = i915_ggtt_enable_hw(dev_priv); if (ret) DRM_ERROR("failed to re-enable GGTT\n"); @@ -1746,7 +1746,7 @@ static int i915_drm_resume(struct drm_device *dev) dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); - intel_dp_mst_resume(dev); + intel_dp_mst_resume(dev_priv); intel_display_resume(dev); @@ -1764,10 +1764,6 @@ static int i915_drm_resume(struct drm_device *dev) intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); - mutex_lock(&dev_priv->modeset_restore_lock); - dev_priv->modeset_restore = MODESET_DONE; - mutex_unlock(&dev_priv->modeset_restore_lock); - intel_opregion_notify_adapter(dev_priv, PCI_D0); enable_rpm_wakeref_asserts(dev_priv); @@ -1851,7 +1847,7 @@ static int i915_drm_resume_early(struct drm_device *dev) else intel_display_set_init_power(dev_priv, true); - i915_gem_sanitize(dev_priv); + intel_engines_sanitize(dev_priv); enable_rpm_wakeref_asserts(dev_priv); @@ -2081,6 +2077,22 @@ out: return ret; } +static int i915_pm_prepare(struct device *kdev) +{ + struct pci_dev *pdev = to_pci_dev(kdev); + struct drm_device *dev = pci_get_drvdata(pdev); + + if (!dev) { + dev_err(kdev, "DRM not initialized, aborting suspend.\n"); + return -ENODEV; + } + + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) + return 0; + + return i915_drm_prepare(dev); +} + static int i915_pm_suspend(struct device *kdev) { struct pci_dev *pdev = to_pci_dev(kdev); @@ -2731,6 +2743,7 @@ const struct dev_pm_ops i915_pm_ops = { * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, * PMSG_RESUME] */ + .prepare = i915_pm_prepare, .suspend = i915_pm_suspend, .suspend_late = i915_pm_suspend_late, .resume_early = i915_pm_resume_early, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 71e1aa54f774..4aca5344863d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -40,6 +40,7 @@ #include <linux/hash.h> #include <linux/intel-iommu.h> #include <linux/kref.h> +#include <linux/mm_types.h> #include <linux/perf_event.h> #include <linux/pm_qos.h> #include <linux/reservation.h> @@ -85,8 +86,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20180514" -#define DRIVER_TIMESTAMP 1526300884 +#define DRIVER_DATE "20180719" +#define DRIVER_TIMESTAMP 1532015279 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and * WARN_ON()) for hw state sanity checks to check for unexpected conditions @@ -107,13 +108,24 @@ I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")") #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) + bool __i915_inject_load_failure(const char *func, int line); #define i915_inject_load_failure() \ __i915_inject_load_failure(__func__, __LINE__) + +bool i915_error_injected(void); + #else + #define i915_inject_load_failure() false +#define i915_error_injected() false + #endif +#define i915_load_error(i915, fmt, ...) \ + __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \ + fmt, ##__VA_ARGS__) + typedef struct { uint32_t val; } uint_fixed_16_16_t; @@ -287,7 +299,6 @@ struct i915_hotplug { u32 event_bits; struct delayed_work reenable_work; - struct intel_digital_port *irq_port[I915_MAX_PORTS]; u32 long_port_mask; u32 short_port_mask; struct work_struct dig_port_work; @@ -500,6 +511,7 @@ struct intel_fbc { bool enabled; bool active; + bool flip_pending; bool underrun_detected; struct work_struct underrun_work; @@ -567,12 +579,6 @@ struct intel_fbc { unsigned int gen9_wa_cfb_stride; } params; - struct intel_fbc_work { - bool scheduled; - u64 scheduled_vblank; - struct work_struct work; - } work; - const char *no_fbc_reason; }; @@ -608,26 +614,17 @@ struct i915_psr { bool sink_support; struct intel_dp *enabled; bool active; - struct delayed_work work; + struct work_struct work; unsigned busy_frontbuffer_bits; bool sink_psr2_support; bool link_standby; bool colorimetry_support; bool alpm; - bool has_hw_tracking; bool psr2_enabled; u8 sink_sync_latency; bool debug; ktime_t last_entry_attempt; ktime_t last_exit; - - void (*enable_source)(struct intel_dp *, - const struct intel_crtc_state *); - void (*disable_source)(struct intel_dp *, - const struct intel_crtc_state *); - void (*enable_sink)(struct intel_dp *); - void (*activate)(struct intel_dp *); - void (*setup_vsc)(struct intel_dp *, const struct intel_crtc_state *); }; enum intel_pch { @@ -639,7 +636,7 @@ enum intel_pch { PCH_KBP, /* Kaby Lake PCH */ PCH_CNP, /* Cannon Lake PCH */ PCH_ICP, /* Ice Lake PCH */ - PCH_NOP, + PCH_NOP, /* PCH without south display */ }; enum intel_sbi_destination { @@ -782,11 +779,17 @@ struct intel_rps { u8 rp0_freq; /* Non-overclocked max frequency. */ u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */ - u8 up_threshold; /* Current %busy required to uplock */ - u8 down_threshold; /* Current %busy required to downclock */ - int last_adj; - enum { LOW_POWER, BETWEEN, HIGH_POWER } power; + + struct { + struct mutex mutex; + + enum { LOW_POWER, BETWEEN, HIGH_POWER } mode; + unsigned int interactive; + + u8 up_threshold; /* Current %busy required to uplock */ + u8 down_threshold; /* Current %busy required to downclock */ + } power; bool enabled; atomic_t num_waiters; @@ -955,7 +958,7 @@ struct i915_gem_mm { /** * Small stash of WC pages */ - struct pagevec wc_stash; + struct pagestash wc_stash; /** * tmpfs instance used for shmem backed objects @@ -1003,16 +1006,13 @@ struct i915_gem_mm { #define I915_ENGINE_DEAD_TIMEOUT (4 * HZ) /* Seqno, head and subunits dead */ #define I915_SEQNO_DEAD_TIMEOUT (12 * HZ) /* Seqno dead with active head */ -enum modeset_restore { - MODESET_ON_LID_OPEN, - MODESET_DONE, - MODESET_SUSPENDED, -}; +#define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */ #define DP_AUX_A 0x40 #define DP_AUX_B 0x10 #define DP_AUX_C 0x20 #define DP_AUX_D 0x30 +#define DP_AUX_E 0x50 #define DP_AUX_F 0x60 #define DDC_PIN_B 0x05 @@ -1057,9 +1057,9 @@ struct intel_vbt_data { /* Feature bits */ unsigned int int_tv_support:1; unsigned int lvds_dither:1; - unsigned int lvds_vbt:1; unsigned int int_crt_support:1; unsigned int lvds_use_ssc:1; + unsigned int int_lvds_support:1; unsigned int display_clock_mode:1; unsigned int fdi_rx_polarity_inverted:1; unsigned int panel_type:4; @@ -1075,7 +1075,6 @@ struct intel_vbt_data { int vswing; bool low_vswing; bool initialized; - bool support; int bpp; struct edp_power_seq pps; } edp; @@ -1086,8 +1085,8 @@ struct intel_vbt_data { bool require_aux_wakeup; int idle_frames; enum psr_lines_to_wait lines_to_wait; - int tp1_wakeup_time; - int tp2_tp3_wakeup_time; + int tp1_wakeup_time_us; + int tp2_tp3_wakeup_time_us; } psr; struct { @@ -1272,20 +1271,11 @@ enum intel_pipe_crc_source { INTEL_PIPE_CRC_SOURCE_MAX, }; -struct intel_pipe_crc_entry { - uint32_t frame; - uint32_t crc[5]; -}; - #define INTEL_PIPE_CRC_ENTRIES_NR 128 struct intel_pipe_crc { spinlock_t lock; - bool opened; /* exclusive access to the result file */ - struct intel_pipe_crc_entry *entries; - enum intel_pipe_crc_source source; - int head, tail; - wait_queue_head_t wq; int skipped; + enum intel_pipe_crc_source source; }; struct i915_frontbuffer_tracking { @@ -1300,7 +1290,7 @@ struct i915_frontbuffer_tracking { }; struct i915_wa_reg { - i915_reg_t addr; + u32 addr; u32 value; /* bitmask representing WA bits */ u32 mask; @@ -1740,12 +1730,9 @@ struct drm_i915_private { unsigned long quirks; - enum modeset_restore modeset_restore; - struct mutex modeset_restore_lock; struct drm_atomic_state *modeset_restore_state; struct drm_modeset_acquire_ctx reset_ctx; - struct list_head vm_list; /* Global list of all address spaces */ struct i915_ggtt ggtt; /* VM representing the global address space */ struct i915_gem_mm mm; @@ -1851,6 +1838,7 @@ struct drm_i915_private { */ struct ida hw_ida; #define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */ +#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */ #define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */ } contexts; @@ -1958,7 +1946,9 @@ struct drm_i915_private { */ struct i915_perf_stream *exclusive_stream; + struct intel_context *pinned_ctx; u32 specific_ctx_id; + u32 specific_ctx_id_mask; struct hrtimer poll_check_timer; wait_queue_head_t poll_wq; @@ -2311,6 +2301,7 @@ intel_info(const struct drm_i915_private *dev_priv) } #define INTEL_INFO(dev_priv) intel_info((dev_priv)) +#define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps) #define INTEL_GEN(dev_priv) ((dev_priv)->info.gen) #define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id) @@ -2563,17 +2554,10 @@ intel_info(const struct drm_i915_private *dev_priv) (IS_CANNONLAKE(dev_priv) || \ IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv)) -/* - * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts - * even when in MSI mode. This results in spurious interrupt warnings if the - * legacy irq no. is shared with another device. The kernel then disables that - * interrupt source and so prevents the other device from working properly. - * - * Since we don't enable MSI anymore on gen4, we can always use GMBUS/AUX - * interrupts. - */ -#define HAS_AUX_IRQ(dev_priv) true #define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4) +#define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \ + IS_GEMINILAKE(dev_priv) || \ + IS_KABYLAKE(dev_priv)) /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte * rows, which changed the alignment requirements and fence programming. @@ -2748,14 +2732,14 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); int intel_engines_init_mmio(struct drm_i915_private *dev_priv); int intel_engines_init(struct drm_i915_private *dev_priv); +u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv); + /* intel_hotplug.c */ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 pin_mask, u32 long_mask); void intel_hpd_init(struct drm_i915_private *dev_priv); void intel_hpd_init_work(struct drm_i915_private *dev_priv); void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); -enum port intel_hpd_pin_to_port(struct drm_i915_private *dev_priv, - enum hpd_pin pin); enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv, enum port port); bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin); @@ -3102,9 +3086,6 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj) } int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); -void i915_vma_move_to_active(struct i915_vma *vma, - struct i915_request *rq, - unsigned int flags); int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); @@ -3169,12 +3150,14 @@ void i915_gem_init_mmio(struct drm_i915_private *i915); int __must_check i915_gem_init(struct drm_i915_private *dev_priv); int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv); void i915_gem_init_swizzling(struct drm_i915_private *dev_priv); +void i915_gem_fini(struct drm_i915_private *dev_priv); void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv); int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, - unsigned int flags); + unsigned int flags, long timeout); int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv); +void i915_gem_suspend_late(struct drm_i915_private *dev_priv); void i915_gem_resume(struct drm_i915_private *dev_priv); -int i915_gem_fault(struct vm_fault *vmf); +vm_fault_t i915_gem_fault(struct vm_fault *vmf); int i915_gem_object_wait(struct drm_i915_gem_object *obj, unsigned int flags, long timeout, @@ -3213,7 +3196,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev, static inline struct i915_hw_ppgtt * i915_vm_to_ppgtt(struct i915_address_space *vm) { - return container_of(vm, struct i915_hw_ppgtt, base); + return container_of(vm, struct i915_hw_ppgtt, vm); } /* i915_gem_fence_reg.c */ @@ -3320,7 +3303,7 @@ unsigned long i915_gem_shrink(struct drm_i915_private *i915, unsigned long i915_gem_shrink_all(struct drm_i915_private *i915); void i915_gem_shrinker_register(struct drm_i915_private *i915); void i915_gem_shrinker_unregister(struct drm_i915_private *i915); - +void i915_gem_shrinker_taints_mutex(struct mutex *mutex); /* i915_gem_tiling.c */ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) @@ -3445,6 +3428,8 @@ extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv); extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val); extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv); extern int intel_set_rps(struct drm_i915_private *dev_priv, u8 val); +extern void intel_rps_mark_interactive(struct drm_i915_private *i915, + bool interactive); extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable); @@ -3678,14 +3663,6 @@ static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); } -static inline unsigned long -timespec_to_jiffies_timeout(const struct timespec *value) -{ - unsigned long j = timespec_to_jiffies(value); - - return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); -} - /* * If you need to wait X milliseconds between events A and B, but event B * doesn't happen exactly after event A, you record the timestamp (jiffies) of diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 17c5097721e8..fcc73a6ab503 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -65,7 +65,7 @@ insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size) { memset(node, 0, sizeof(*node)); - return drm_mm_insert_node_in_range(&ggtt->base.mm, node, + return drm_mm_insert_node_in_range(&ggtt->vm.mm, node, size, 0, I915_COLOR_UNEVICTABLE, 0, ggtt->mappable_end, DRM_MM_INSERT_LOW); @@ -139,6 +139,8 @@ int i915_mutex_lock_interruptible(struct drm_device *dev) static u32 __i915_gem_park(struct drm_i915_private *i915) { + GEM_TRACE("\n"); + lockdep_assert_held(&i915->drm.struct_mutex); GEM_BUG_ON(i915->gt.active_requests); GEM_BUG_ON(!list_empty(&i915->gt.active_rings)); @@ -181,6 +183,8 @@ static u32 __i915_gem_park(struct drm_i915_private *i915) void i915_gem_park(struct drm_i915_private *i915) { + GEM_TRACE("\n"); + lockdep_assert_held(&i915->drm.struct_mutex); GEM_BUG_ON(i915->gt.active_requests); @@ -193,6 +197,8 @@ void i915_gem_park(struct drm_i915_private *i915) void i915_gem_unpark(struct drm_i915_private *i915) { + GEM_TRACE("\n"); + lockdep_assert_held(&i915->drm.struct_mutex); GEM_BUG_ON(!i915->gt.active_requests); @@ -243,17 +249,17 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct i915_vma *vma; u64 pinned; - pinned = ggtt->base.reserved; + pinned = ggtt->vm.reserved; mutex_lock(&dev->struct_mutex); - list_for_each_entry(vma, &ggtt->base.active_list, vm_link) + list_for_each_entry(vma, &ggtt->vm.active_list, vm_link) if (i915_vma_is_pinned(vma)) pinned += vma->node.size; - list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link) + list_for_each_entry(vma, &ggtt->vm.inactive_list, vm_link) if (i915_vma_is_pinned(vma)) pinned += vma->node.size; mutex_unlock(&dev->struct_mutex); - args->aper_size = ggtt->base.total; + args->aper_size = ggtt->vm.total; args->aper_available_size = args->aper_size - pinned; return 0; @@ -796,7 +802,7 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv) * that was!). */ - wmb(); + i915_gem_chipset_flush(dev_priv); intel_runtime_pm_get(dev_priv); spin_lock_irq(&dev_priv->uncore.lock); @@ -831,6 +837,10 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains) } break; + case I915_GEM_DOMAIN_WC: + wmb(); + break; + case I915_GEM_DOMAIN_CPU: i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); break; @@ -1217,9 +1227,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, page_length = remain < page_length ? remain : page_length; if (node.allocated) { wmb(); - ggtt->base.insert_page(&ggtt->base, - i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), - node.start, I915_CACHE_NONE, 0); + ggtt->vm.insert_page(&ggtt->vm, + i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), + node.start, I915_CACHE_NONE, 0); wmb(); } else { page_base += offset & PAGE_MASK; @@ -1240,8 +1250,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, out_unpin: if (node.allocated) { wmb(); - ggtt->base.clear_range(&ggtt->base, - node.start, node.size); + ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); remove_mappable_node(&node); } else { i915_vma_unpin(vma); @@ -1420,9 +1429,9 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, page_length = remain < page_length ? remain : page_length; if (node.allocated) { wmb(); /* flush the write before we modify the GGTT */ - ggtt->base.insert_page(&ggtt->base, - i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), - node.start, I915_CACHE_NONE, 0); + ggtt->vm.insert_page(&ggtt->vm, + i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), + node.start, I915_CACHE_NONE, 0); wmb(); /* flush modifications to the GGTT (insert_page) */ } else { page_base += offset & PAGE_MASK; @@ -1449,8 +1458,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, out_unpin: if (node.allocated) { wmb(); - ggtt->base.clear_range(&ggtt->base, - node.start, node.size); + ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); remove_mappable_node(&node); } else { i915_vma_unpin(vma); @@ -1619,6 +1627,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, goto err; } + /* Writes not allowed into this read-only object */ + if (i915_gem_object_is_readonly(obj)) { + ret = -EINVAL; + goto err; + } + trace_i915_gem_object_pwrite(obj, args->offset, args->size); ret = -ENODEV; @@ -1991,9 +2005,9 @@ compute_partial_view(struct drm_i915_gem_object *obj, * The current feature set supported by i915_gem_fault() and thus GTT mmaps * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version). */ -int i915_gem_fault(struct vm_fault *vmf) +vm_fault_t i915_gem_fault(struct vm_fault *vmf) { -#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */ +#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT) struct vm_area_struct *area = vmf->vma; struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data); struct drm_device *dev = obj->base.dev; @@ -2004,6 +2018,10 @@ int i915_gem_fault(struct vm_fault *vmf) pgoff_t page_offset; int ret; + /* Sanity check that we allow writing into this object */ + if (i915_gem_object_is_readonly(obj) && write) + return VM_FAULT_SIGBUS; + /* We don't use vmf->pgoff since that has the fake offset */ page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; @@ -2114,10 +2132,9 @@ err: * fail). But any other -EIO isn't ours (e.g. swap in failure) * and so needs to be reported. */ - if (!i915_terminally_wedged(&dev_priv->gpu_error)) { - ret = VM_FAULT_SIGBUS; - break; - } + if (!i915_terminally_wedged(&dev_priv->gpu_error)) + return VM_FAULT_SIGBUS; + /* else: fall through */ case -EAGAIN: /* * EAGAIN means the gpu is hung and we'll wait for the error @@ -2132,21 +2149,16 @@ err: * EBUSY is ok: this just means that another thread * already did the job. */ - ret = VM_FAULT_NOPAGE; - break; + return VM_FAULT_NOPAGE; case -ENOMEM: - ret = VM_FAULT_OOM; - break; + return VM_FAULT_OOM; case -ENOSPC: case -EFAULT: - ret = VM_FAULT_SIGBUS; - break; + return VM_FAULT_SIGBUS; default: WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret); - ret = VM_FAULT_SIGBUS; - break; + return VM_FAULT_SIGBUS; } - return ret; } static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj) @@ -2265,7 +2277,9 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) /* Attempt to reap some mmap space from dead objects */ do { - err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE); + err = i915_gem_wait_for_idle(dev_priv, + I915_WAIT_INTERRUPTIBLE, + MAX_SCHEDULE_TIMEOUT); if (err) break; @@ -2410,29 +2424,15 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) rcu_read_unlock(); } -void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, - enum i915_mm_subclass subclass) +static struct sg_table * +__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct sg_table *pages; - if (i915_gem_object_has_pinned_pages(obj)) - return; - - GEM_BUG_ON(obj->bind_count); - if (!i915_gem_object_has_pages(obj)) - return; - - /* May be called by shrinker from within get_pages() (on another bo) */ - mutex_lock_nested(&obj->mm.lock, subclass); - if (unlikely(atomic_read(&obj->mm.pages_pin_count))) - goto unlock; - - /* ->put_pages might need to allocate memory for the bit17 swizzle - * array, hence protect them from being reaped by removing them from gtt - * lists early. */ pages = fetch_and_zero(&obj->mm.pages); - GEM_BUG_ON(!pages); + if (!pages) + return NULL; spin_lock(&i915->mm.obj_lock); list_del(&obj->mm.link); @@ -2451,12 +2451,37 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, } __i915_gem_object_reset_page_iter(obj); + obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0; + + return pages; +} + +void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, + enum i915_mm_subclass subclass) +{ + struct sg_table *pages; + if (i915_gem_object_has_pinned_pages(obj)) + return; + + GEM_BUG_ON(obj->bind_count); + if (!i915_gem_object_has_pages(obj)) + return; + + /* May be called by shrinker from within get_pages() (on another bo) */ + mutex_lock_nested(&obj->mm.lock, subclass); + if (unlikely(atomic_read(&obj->mm.pages_pin_count))) + goto unlock; + + /* + * ->put_pages might need to allocate memory for the bit17 swizzle + * array, hence protect them from being reaped by removing them from gtt + * lists early. + */ + pages = __i915_gem_object_unset_pages(obj); if (!IS_ERR(pages)) obj->ops->put_pages(obj, pages); - obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0; - unlock: mutex_unlock(&obj->mm.lock); } @@ -2974,16 +2999,16 @@ static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx) score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score); banned = score >= CONTEXT_SCORE_BAN_THRESHOLD; - DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, ban %s\n", - ctx->name, atomic_read(&ctx->guilty_count), - score, yesno(banned && bannable)); - /* Cool contexts don't accumulate client ban score */ if (!bannable) return; - if (banned) + if (banned) { + DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, banned\n", + ctx->name, atomic_read(&ctx->guilty_count), + score); i915_gem_context_set_banned(ctx); + } if (!IS_ERR_OR_NULL(ctx->file_priv)) i915_gem_client_mark_guilty(ctx->file_priv, ctx); @@ -3031,7 +3056,7 @@ i915_gem_find_active_request(struct intel_engine_cs *engine) struct i915_request * i915_gem_reset_prepare_engine(struct intel_engine_cs *engine) { - struct i915_request *request = NULL; + struct i915_request *request; /* * During the reset sequence, we must prevent the engine from @@ -3042,52 +3067,7 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine) */ intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL); - /* - * Prevent the signaler thread from updating the request - * state (by calling dma_fence_signal) as we are processing - * the reset. The write from the GPU of the seqno is - * asynchronous and the signaler thread may see a different - * value to us and declare the request complete, even though - * the reset routine have picked that request as the active - * (incomplete) request. This conflict is not handled - * gracefully! - */ - kthread_park(engine->breadcrumbs.signaler); - - /* - * Prevent request submission to the hardware until we have - * completed the reset in i915_gem_reset_finish(). If a request - * is completed by one engine, it may then queue a request - * to a second via its execlists->tasklet *just* as we are - * calling engine->init_hw() and also writing the ELSP. - * Turning off the execlists->tasklet until the reset is over - * prevents the race. - * - * Note that this needs to be a single atomic operation on the - * tasklet (flush existing tasks, prevent new tasks) to prevent - * a race between reset and set-wedged. It is not, so we do the best - * we can atm and make sure we don't lock the machine up in the more - * common case of recursively being called from set-wedged from inside - * i915_reset. - */ - if (!atomic_read(&engine->execlists.tasklet.count)) - tasklet_kill(&engine->execlists.tasklet); - tasklet_disable(&engine->execlists.tasklet); - - /* - * We're using worker to queue preemption requests from the tasklet in - * GuC submission mode. - * Even though tasklet was disabled, we may still have a worker queued. - * Let's make sure that all workers scheduled before disabling the - * tasklet are completed before continuing with the reset. - */ - if (engine->i915->guc.preempt_wq) - flush_workqueue(engine->i915->guc.preempt_wq); - - if (engine->irq_seqno_barrier) - engine->irq_seqno_barrier(engine); - - request = i915_gem_find_active_request(engine); + request = engine->reset.prepare(engine); if (request && request->fence.error == -EIO) request = ERR_PTR(-EIO); /* Previous reset failed! */ @@ -3117,43 +3097,24 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv) return err; } -static void skip_request(struct i915_request *request) -{ - void *vaddr = request->ring->vaddr; - u32 head; - - /* As this request likely depends on state from the lost - * context, clear out all the user operations leaving the - * breadcrumb at the end (so we get the fence notifications). - */ - head = request->head; - if (request->postfix < head) { - memset(vaddr + head, 0, request->ring->size - head); - head = 0; - } - memset(vaddr + head, 0, request->postfix - head); - - dma_fence_set_error(&request->fence, -EIO); -} - static void engine_skip_context(struct i915_request *request) { struct intel_engine_cs *engine = request->engine; - struct i915_gem_context *hung_ctx = request->ctx; + struct i915_gem_context *hung_ctx = request->gem_context; struct i915_timeline *timeline = request->timeline; unsigned long flags; GEM_BUG_ON(timeline == &engine->timeline); spin_lock_irqsave(&engine->timeline.lock, flags); - spin_lock_nested(&timeline->lock, SINGLE_DEPTH_NESTING); + spin_lock(&timeline->lock); list_for_each_entry_continue(request, &engine->timeline.requests, link) - if (request->ctx == hung_ctx) - skip_request(request); + if (request->gem_context == hung_ctx) + i915_request_skip(request, -EIO); list_for_each_entry(request, &timeline->requests, link) - skip_request(request); + i915_request_skip(request, -EIO); spin_unlock(&timeline->lock); spin_unlock_irqrestore(&engine->timeline.lock, flags); @@ -3195,11 +3156,11 @@ i915_gem_reset_request(struct intel_engine_cs *engine, } if (stalled) { - i915_gem_context_mark_guilty(request->ctx); - skip_request(request); + i915_gem_context_mark_guilty(request->gem_context); + i915_request_skip(request, -EIO); /* If this context is now banned, skip all pending requests. */ - if (i915_gem_context_is_banned(request->ctx)) + if (i915_gem_context_is_banned(request->gem_context)) engine_skip_context(request); } else { /* @@ -3209,15 +3170,17 @@ i915_gem_reset_request(struct intel_engine_cs *engine, */ request = i915_gem_find_active_request(engine); if (request) { - i915_gem_context_mark_innocent(request->ctx); + unsigned long flags; + + i915_gem_context_mark_innocent(request->gem_context); dma_fence_set_error(&request->fence, -EAGAIN); /* Rewind the engine to replay the incomplete rq */ - spin_lock_irq(&engine->timeline.lock); + spin_lock_irqsave(&engine->timeline.lock, flags); request = list_prev_entry(request, link); if (&request->link == &engine->timeline.requests) request = NULL; - spin_unlock_irq(&engine->timeline.lock); + spin_unlock_irqrestore(&engine->timeline.lock, flags); } } @@ -3238,13 +3201,8 @@ void i915_gem_reset_engine(struct intel_engine_cs *engine, if (request) request = i915_gem_reset_request(engine, request, stalled); - if (request) { - DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n", - engine->name, request->global_seqno); - } - /* Setup the CS to resume from the breadcrumb of the hung request */ - engine->reset_hw(engine, request); + engine->reset.reset(engine, request); } void i915_gem_reset(struct drm_i915_private *dev_priv, @@ -3258,14 +3216,14 @@ void i915_gem_reset(struct drm_i915_private *dev_priv, i915_retire_requests(dev_priv); for_each_engine(engine, dev_priv, id) { - struct i915_gem_context *ctx; + struct intel_context *ce; i915_gem_reset_engine(engine, engine->hangcheck.active_request, stalled_mask & ENGINE_MASK(id)); - ctx = fetch_and_zero(&engine->last_retired_context); - if (ctx) - intel_context_unpin(ctx, engine); + ce = fetch_and_zero(&engine->last_retired_context); + if (ce) + intel_context_unpin(ce); /* * Ostensibily, we always want a context loaded for powersaving, @@ -3283,7 +3241,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv, rq = i915_request_alloc(engine, dev_priv->kernel_context); if (!IS_ERR(rq)) - __i915_request_add(rq, false); + i915_request_add(rq); } } @@ -3292,8 +3250,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv, void i915_gem_reset_finish_engine(struct intel_engine_cs *engine) { - tasklet_enable(&engine->execlists.tasklet); - kthread_unpark(engine->breadcrumbs.signaler); + engine->reset.finish(engine); intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL); } @@ -3571,6 +3528,22 @@ new_requests_since_last_retire(const struct drm_i915_private *i915) work_pending(&i915->gt.idle_work.work)); } +static void assert_kernel_context_is_current(struct drm_i915_private *i915) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + if (i915_terminally_wedged(&i915->gpu_error)) + return; + + GEM_BUG_ON(i915->gt.active_requests); + for_each_engine(engine, i915, id) { + GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request)); + GEM_BUG_ON(engine->last_retired_context != + to_intel_context(i915->kernel_context, engine)); + } +} + static void i915_gem_idle_work_handler(struct work_struct *work) { @@ -3582,6 +3555,24 @@ i915_gem_idle_work_handler(struct work_struct *work) if (!READ_ONCE(dev_priv->gt.awake)) return; + if (READ_ONCE(dev_priv->gt.active_requests)) + return; + + /* + * Flush out the last user context, leaving only the pinned + * kernel context resident. When we are idling on the kernel_context, + * no more new requests (with a context switch) are emitted and we + * can finally rest. A consequence is that the idle work handler is + * always called at least twice before idling (and if the system is + * idle that implies a round trip through the retire worker). + */ + mutex_lock(&dev_priv->drm.struct_mutex); + i915_gem_switch_to_kernel_context(dev_priv); + mutex_unlock(&dev_priv->drm.struct_mutex); + + GEM_TRACE("active_requests=%d (after switch-to-kernel-context)\n", + READ_ONCE(dev_priv->gt.active_requests)); + /* * Wait for last execlists context complete, but bail out in case a * new request is submitted. As we don't trust the hardware, we @@ -3615,6 +3606,8 @@ i915_gem_idle_work_handler(struct work_struct *work) epoch = __i915_gem_park(dev_priv); + assert_kernel_context_is_current(dev_priv); + rearm_hangcheck = false; out_unlock: mutex_unlock(&dev_priv->drm.struct_mutex); @@ -3761,9 +3754,31 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) return ret; } -static int wait_for_timeline(struct i915_timeline *tl, unsigned int flags) +static long wait_for_timeline(struct i915_timeline *tl, + unsigned int flags, long timeout) { - return i915_gem_active_wait(&tl->last_request, flags); + struct i915_request *rq; + + rq = i915_gem_active_get_unlocked(&tl->last_request); + if (!rq) + return timeout; + + /* + * "Race-to-idle". + * + * Switching to the kernel context is often used a synchronous + * step prior to idling, e.g. in suspend for flushing all + * current operations to memory before sleeping. These we + * want to complete as quickly as possible to avoid prolonged + * stalls, so allow the gpu to boost to maximum clocks. + */ + if (flags & I915_WAIT_FOR_IDLE_BOOST) + gen6_rps_boost(rq, NULL); + + timeout = i915_request_wait(rq, flags, timeout); + i915_request_put(rq); + + return timeout; } static int wait_for_engines(struct drm_i915_private *i915) @@ -3779,8 +3794,13 @@ static int wait_for_engines(struct drm_i915_private *i915) return 0; } -int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags) +int i915_gem_wait_for_idle(struct drm_i915_private *i915, + unsigned int flags, long timeout) { + GEM_TRACE("flags=%x (%s), timeout=%ld%s\n", + flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked", + timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : ""); + /* If the device is asleep, we have no requests outstanding */ if (!READ_ONCE(i915->gt.awake)) return 0; @@ -3792,26 +3812,31 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags) lockdep_assert_held(&i915->drm.struct_mutex); list_for_each_entry(tl, &i915->gt.timelines, link) { - err = wait_for_timeline(tl, flags); - if (err) - return err; + timeout = wait_for_timeline(tl, flags, timeout); + if (timeout < 0) + return timeout; } - i915_retire_requests(i915); - return wait_for_engines(i915); + err = wait_for_engines(i915); + if (err) + return err; + + i915_retire_requests(i915); + GEM_BUG_ON(i915->gt.active_requests); } else { struct intel_engine_cs *engine; enum intel_engine_id id; - int err; for_each_engine(engine, i915, id) { - err = wait_for_timeline(&engine->timeline, flags); - if (err) - return err; - } + struct i915_timeline *tl = &engine->timeline; - return 0; + timeout = wait_for_timeline(tl, flags, timeout); + if (timeout < 0) + return timeout; + } } + + return 0; } static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj) @@ -4385,7 +4410,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, u64 flags) { struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - struct i915_address_space *vm = &dev_priv->ggtt.base; + struct i915_address_space *vm = &dev_priv->ggtt.vm; struct i915_vma *vma; int ret; @@ -4973,25 +4998,25 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj) i915_gem_object_put(obj); } -static void assert_kernel_context_is_current(struct drm_i915_private *i915) +void i915_gem_sanitize(struct drm_i915_private *i915) { - struct i915_gem_context *kernel_context = i915->kernel_context; - struct intel_engine_cs *engine; - enum intel_engine_id id; + int err; - for_each_engine(engine, i915, id) { - GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request)); - GEM_BUG_ON(engine->last_retired_context != kernel_context); - } -} + GEM_TRACE("\n"); -void i915_gem_sanitize(struct drm_i915_private *i915) -{ - if (i915_terminally_wedged(&i915->gpu_error)) { - mutex_lock(&i915->drm.struct_mutex); + mutex_lock(&i915->drm.struct_mutex); + + intel_runtime_pm_get(i915); + intel_uncore_forcewake_get(i915, FORCEWAKE_ALL); + + /* + * As we have just resumed the machine and woken the device up from + * deep PCI sleep (presumably D3_cold), assume the HW has been reset + * back to defaults, recovering from whatever wedged state we left it + * in and so worth trying to use the device once more. + */ + if (i915_terminally_wedged(&i915->gpu_error)) i915_gem_unset_wedged(i915); - mutex_unlock(&i915->drm.struct_mutex); - } /* * If we inherit context state from the BIOS or earlier occupants @@ -5001,60 +5026,94 @@ void i915_gem_sanitize(struct drm_i915_private *i915) * it may impact the display and we are uncertain about the stability * of the reset, so this could be applied to even earlier gen. */ + err = -ENODEV; if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915)) - WARN_ON(intel_gpu_reset(i915, ALL_ENGINES)); + err = WARN_ON(intel_gpu_reset(i915, ALL_ENGINES)); + if (!err) + intel_engines_sanitize(i915); + + intel_uncore_forcewake_put(i915, FORCEWAKE_ALL); + intel_runtime_pm_put(i915); + + i915_gem_contexts_lost(i915); + mutex_unlock(&i915->drm.struct_mutex); } -int i915_gem_suspend(struct drm_i915_private *dev_priv) +int i915_gem_suspend(struct drm_i915_private *i915) { - struct drm_device *dev = &dev_priv->drm; int ret; - intel_runtime_pm_get(dev_priv); - intel_suspend_gt_powersave(dev_priv); + GEM_TRACE("\n"); - mutex_lock(&dev->struct_mutex); + intel_runtime_pm_get(i915); + intel_suspend_gt_powersave(i915); - /* We have to flush all the executing contexts to main memory so + mutex_lock(&i915->drm.struct_mutex); + + /* + * We have to flush all the executing contexts to main memory so * that they can saved in the hibernation image. To ensure the last * context image is coherent, we have to switch away from it. That - * leaves the dev_priv->kernel_context still active when + * leaves the i915->kernel_context still active when * we actually suspend, and its image in memory may not match the GPU * state. Fortunately, the kernel_context is disposable and we do * not rely on its state. */ - if (!i915_terminally_wedged(&dev_priv->gpu_error)) { - ret = i915_gem_switch_to_kernel_context(dev_priv); + if (!i915_terminally_wedged(&i915->gpu_error)) { + ret = i915_gem_switch_to_kernel_context(i915); if (ret) goto err_unlock; - ret = i915_gem_wait_for_idle(dev_priv, + ret = i915_gem_wait_for_idle(i915, I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED); + I915_WAIT_LOCKED | + I915_WAIT_FOR_IDLE_BOOST, + MAX_SCHEDULE_TIMEOUT); if (ret && ret != -EIO) goto err_unlock; - assert_kernel_context_is_current(dev_priv); + assert_kernel_context_is_current(i915); } - i915_gem_contexts_lost(dev_priv); - mutex_unlock(&dev->struct_mutex); + i915_retire_requests(i915); /* ensure we flush after wedging */ + + mutex_unlock(&i915->drm.struct_mutex); - intel_uc_suspend(dev_priv); + intel_uc_suspend(i915); - cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); - cancel_delayed_work_sync(&dev_priv->gt.retire_work); + cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work); + cancel_delayed_work_sync(&i915->gt.retire_work); - /* As the idle_work is rearming if it detects a race, play safe and + /* + * As the idle_work is rearming if it detects a race, play safe and * repeat the flush until it is definitely idle. */ - drain_delayed_work(&dev_priv->gt.idle_work); + drain_delayed_work(&i915->gt.idle_work); - /* Assert that we sucessfully flushed all the work and + /* + * Assert that we successfully flushed all the work and * reset the GPU back to its idle, low power state. */ - WARN_ON(dev_priv->gt.awake); - if (WARN_ON(!intel_engines_are_idle(dev_priv))) - i915_gem_set_wedged(dev_priv); /* no hope, discard everything */ + WARN_ON(i915->gt.awake); + if (WARN_ON(!intel_engines_are_idle(i915))) + i915_gem_set_wedged(i915); /* no hope, discard everything */ + + intel_runtime_pm_put(i915); + return 0; + +err_unlock: + mutex_unlock(&i915->drm.struct_mutex); + intel_runtime_pm_put(i915); + return ret; +} + +void i915_gem_suspend_late(struct drm_i915_private *i915) +{ + struct drm_i915_gem_object *obj; + struct list_head *phases[] = { + &i915->mm.unbound_list, + &i915->mm.bound_list, + NULL + }, **phase; /* * Neither the BIOS, ourselves or any other kernel @@ -5075,20 +5134,22 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv) * machines is a good idea, we don't - just in case it leaves the * machine in an unusable condition. */ - intel_uc_sanitize(dev_priv); - i915_gem_sanitize(dev_priv); - intel_runtime_pm_put(dev_priv); - return 0; + mutex_lock(&i915->drm.struct_mutex); + for (phase = phases; *phase; phase++) { + list_for_each_entry(obj, *phase, mm.link) + WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false)); + } + mutex_unlock(&i915->drm.struct_mutex); -err_unlock: - mutex_unlock(&dev->struct_mutex); - intel_runtime_pm_put(dev_priv); - return ret; + intel_uc_sanitize(i915); + i915_gem_sanitize(i915); } void i915_gem_resume(struct drm_i915_private *i915) { + GEM_TRACE("\n"); + WARN_ON(i915->gt.awake); mutex_lock(&i915->drm.struct_mutex); @@ -5262,8 +5323,18 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) /* Only when the HW is re-initialised, can we replay the requests */ ret = __i915_gem_restart_engines(dev_priv); + if (ret) + goto cleanup_uc; + + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + + return 0; + +cleanup_uc: + intel_uc_fini_hw(dev_priv); out: intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + return ret; } @@ -5300,7 +5371,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) if (engine->init_context) err = engine->init_context(rq); - __i915_request_add(rq, true); + i915_request_add(rq); if (err) goto err_active; } @@ -5309,9 +5380,11 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) if (err) goto err_active; - err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED); - if (err) + if (i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED, HZ / 5)) { + i915_gem_set_wedged(i915); + err = -EIO; /* Caller will declare us wedged */ goto err_active; + } assert_kernel_context_is_current(i915); @@ -5374,7 +5447,9 @@ err_active: if (WARN_ON(i915_gem_switch_to_kernel_context(i915))) goto out_ctx; - if (WARN_ON(i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED))) + if (WARN_ON(i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT))) goto out_ctx; i915_gem_contexts_lost(i915); @@ -5385,12 +5460,8 @@ int i915_gem_init(struct drm_i915_private *dev_priv) { int ret; - /* - * We need to fallback to 4K pages since gvt gtt handling doesn't - * support huge page entries - we will need to check either hypervisor - * mm can support huge guest page or just do emulation in gvt. - */ - if (intel_vgpu_active(dev_priv)) + /* We need to fallback to 4K pages if host doesn't support huge gtt. */ + if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv)) mkwrite_device_info(dev_priv)->page_sizes = I915_GTT_PAGE_SIZE_4K; @@ -5408,13 +5479,13 @@ int i915_gem_init(struct drm_i915_private *dev_priv) if (ret) return ret; - ret = intel_wopcm_init(&dev_priv->wopcm); + ret = intel_uc_init_misc(dev_priv); if (ret) return ret; - ret = intel_uc_init_misc(dev_priv); + ret = intel_wopcm_init(&dev_priv->wopcm); if (ret) - return ret; + goto err_uc_misc; /* This is just a security blanket to placate dragons. * On some systems, we very sporadically observe that the first TLBs @@ -5490,8 +5561,14 @@ int i915_gem_init(struct drm_i915_private *dev_priv) * driver doesn't explode during runtime. */ err_init_hw: - i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED); - i915_gem_contexts_lost(dev_priv); + mutex_unlock(&dev_priv->drm.struct_mutex); + + WARN_ON(i915_gem_suspend(dev_priv)); + i915_gem_suspend_late(dev_priv); + + i915_gem_drain_workqueue(dev_priv); + + mutex_lock(&dev_priv->drm.struct_mutex); intel_uc_fini_hw(dev_priv); err_uc_init: intel_uc_fini(dev_priv); @@ -5508,6 +5585,7 @@ err_unlock: intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); mutex_unlock(&dev_priv->drm.struct_mutex); +err_uc_misc: intel_uc_fini_misc(dev_priv); if (ret != -EIO) @@ -5520,7 +5598,8 @@ err_unlock: * for all other failure, such as an allocation failure, bail. */ if (!i915_terminally_wedged(&dev_priv->gpu_error)) { - DRM_ERROR("Failed to initialize GPU, declaring it wedged\n"); + i915_load_error(dev_priv, + "Failed to initialize GPU, declaring it wedged!\n"); i915_gem_set_wedged(dev_priv); } ret = 0; @@ -5530,6 +5609,28 @@ err_unlock: return ret; } +void i915_gem_fini(struct drm_i915_private *dev_priv) +{ + i915_gem_suspend_late(dev_priv); + + /* Flush any outstanding unpin_work. */ + i915_gem_drain_workqueue(dev_priv); + + mutex_lock(&dev_priv->drm.struct_mutex); + intel_uc_fini_hw(dev_priv); + intel_uc_fini(dev_priv); + i915_gem_cleanup_engines(dev_priv); + i915_gem_contexts_fini(dev_priv); + mutex_unlock(&dev_priv->drm.struct_mutex); + + intel_uc_fini_misc(dev_priv); + i915_gem_cleanup_userptr(dev_priv); + + i915_gem_drain_freed_objects(dev_priv); + + WARN_ON(!list_empty(&dev_priv->contexts.list)); +} + void i915_gem_init_mmio(struct drm_i915_private *i915) { i915_gem_sanitize(i915); @@ -5694,16 +5795,17 @@ int i915_gem_freeze(struct drm_i915_private *dev_priv) return 0; } -int i915_gem_freeze_late(struct drm_i915_private *dev_priv) +int i915_gem_freeze_late(struct drm_i915_private *i915) { struct drm_i915_gem_object *obj; struct list_head *phases[] = { - &dev_priv->mm.unbound_list, - &dev_priv->mm.bound_list, + &i915->mm.unbound_list, + &i915->mm.bound_list, NULL - }, **p; + }, **phase; - /* Called just before we write the hibernation image. + /* + * Called just before we write the hibernation image. * * We need to update the domain tracking to reflect that the CPU * will be accessing all the pages to create and restore from the @@ -5717,15 +5819,15 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv) * the objects as well, see i915_gem_freeze() */ - i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND); - i915_gem_drain_freed_objects(dev_priv); + i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND); + i915_gem_drain_freed_objects(i915); - spin_lock(&dev_priv->mm.obj_lock); - for (p = phases; *p; p++) { - list_for_each_entry(obj, *p, mm.link) - __start_cpu_write(obj); + mutex_lock(&i915->drm.struct_mutex); + for (phase = phases; *phase; phase++) { + list_for_each_entry(obj, *phase, mm.link) + WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true)); } - spin_unlock(&dev_priv->mm.obj_lock); + mutex_unlock(&i915->drm.struct_mutex); return 0; } @@ -6045,16 +6147,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) goto err_unlock; } - pages = fetch_and_zero(&obj->mm.pages); - if (pages) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); - - __i915_gem_object_reset_page_iter(obj); - - spin_lock(&i915->mm.obj_lock); - list_del(&obj->mm.link); - spin_unlock(&i915->mm.obj_lock); - } + pages = __i915_gem_object_unset_pages(obj); obj->ops = &i915_gem_phys_ops; @@ -6072,7 +6165,11 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) err_xfer: obj->ops = &i915_gem_object_ops; - obj->mm.pages = pages; + if (!IS_ERR_OR_NULL(pages)) { + unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl); + + __i915_gem_object_set_pages(obj, pages, sg_page_sizes); + } err_unlock: mutex_unlock(&obj->mm.lock); return err; diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h index 525920404ede..e46592956872 100644 --- a/drivers/gpu/drm/i915/i915_gem.h +++ b/drivers/gpu/drm/i915/i915_gem.h @@ -26,6 +26,7 @@ #define __I915_GEM_H__ #include <linux/bug.h> +#include <linux/interrupt.h> struct drm_i915_private; @@ -62,9 +63,12 @@ struct drm_i915_private; #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM) #define GEM_TRACE(...) trace_printk(__VA_ARGS__) #define GEM_TRACE_DUMP() ftrace_dump(DUMP_ALL) +#define GEM_TRACE_DUMP_ON(expr) \ + do { if (expr) ftrace_dump(DUMP_ALL); } while (0) #else #define GEM_TRACE(...) do { } while (0) #define GEM_TRACE_DUMP() do { } while (0) +#define GEM_TRACE_DUMP_ON(expr) BUILD_BUG_ON_INVALID(expr) #endif #define I915_NUM_ENGINES 8 @@ -72,4 +76,21 @@ struct drm_i915_private; void i915_gem_park(struct drm_i915_private *i915); void i915_gem_unpark(struct drm_i915_private *i915); +static inline void __tasklet_disable_sync_once(struct tasklet_struct *t) +{ + if (atomic_inc_return(&t->count) == 1) + tasklet_unlock_wait(t); +} + +static inline void __tasklet_enable_sync_once(struct tasklet_struct *t) +{ + if (atomic_dec_return(&t->count) == 0) + tasklet_kill(t); +} + +static inline bool __tasklet_is_enabled(const struct tasklet_struct *t) +{ + return !atomic_read(&t->count); +} + #endif /* __I915_GEM_H__ */ diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 060335d3d9e0..b10770cfccd2 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -127,14 +127,8 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { struct intel_context *ce = &ctx->__engine[n]; - if (!ce->state) - continue; - - WARN_ON(ce->pin_count); - if (ce->ring) - intel_ring_free(ce->ring); - - __i915_gem_object_release_unless_active(ce->state->obj); + if (ce->ops) + ce->ops->destroy(ce); } kfree(ctx->name); @@ -203,7 +197,7 @@ static void context_close(struct i915_gem_context *ctx) */ lut_close(ctx); if (ctx->ppgtt) - i915_ppgtt_close(&ctx->ppgtt->base); + i915_ppgtt_close(&ctx->ppgtt->vm); ctx->file_priv = ERR_PTR(-EBADF); i915_gem_context_put(ctx); @@ -214,10 +208,19 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out) int ret; unsigned int max; - if (INTEL_GEN(dev_priv) >= 11) + if (INTEL_GEN(dev_priv) >= 11) { max = GEN11_MAX_CONTEXT_HW_ID; - else - max = MAX_CONTEXT_HW_ID; + } else { + /* + * When using GuC in proxy submission, GuC consumes the + * highest bit in the context id to indicate proxy submission. + */ + if (USES_GUC_SUBMISSION(dev_priv)) + max = MAX_GUC_CONTEXT_HW_ID; + else + max = MAX_CONTEXT_HW_ID; + } + ret = ida_simple_get(&dev_priv->contexts.hw_ida, 0, max, GFP_KERNEL); @@ -246,7 +249,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915, desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE; address_mode = INTEL_LEGACY_32B_CONTEXT; - if (ppgtt && i915_vm_is_48bit(&ppgtt->base)) + if (ppgtt && i915_vm_is_48bit(&ppgtt->vm)) address_mode = INTEL_LEGACY_64B_CONTEXT; desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT; @@ -266,6 +269,7 @@ __create_hw_context(struct drm_i915_private *dev_priv, struct drm_i915_file_private *file_priv) { struct i915_gem_context *ctx; + unsigned int n; int ret; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); @@ -283,6 +287,12 @@ __create_hw_context(struct drm_i915_private *dev_priv, ctx->i915 = dev_priv; ctx->sched.priority = I915_PRIORITY_NORMAL; + for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { + struct intel_context *ce = &ctx->__engine[n]; + + ce->gem_context = ctx; + } + INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); INIT_LIST_HEAD(&ctx->handles_list); @@ -364,7 +374,7 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, if (USES_FULL_PPGTT(dev_priv)) { struct i915_hw_ppgtt *ppgtt; - ppgtt = i915_ppgtt_create(dev_priv, file_priv, ctx->name); + ppgtt = i915_ppgtt_create(dev_priv, file_priv); if (IS_ERR(ppgtt)) { DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", PTR_ERR(ppgtt)); @@ -502,8 +512,8 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) } DRM_DEBUG_DRIVER("%s context support initialized\n", - dev_priv->engine[RCS]->context_size ? "logical" : - "fake"); + DRIVER_CAPS(dev_priv)->has_logical_contexts ? + "logical" : "fake"); return 0; } @@ -514,16 +524,8 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv) lockdep_assert_held(&dev_priv->drm.struct_mutex); - for_each_engine(engine, dev_priv, id) { - engine->legacy_active_context = NULL; - engine->legacy_active_ppgtt = NULL; - - if (!engine->last_retired_context) - continue; - - intel_context_unpin(engine->last_retired_context, engine); - engine->last_retired_context = NULL; - } + for_each_engine(engine, dev_priv, id) + intel_engine_lost_context(engine); } void i915_gem_contexts_fini(struct drm_i915_private *i915) @@ -583,68 +585,122 @@ last_request_on_engine(struct i915_timeline *timeline, { struct i915_request *rq; - if (timeline == &engine->timeline) - return NULL; + GEM_BUG_ON(timeline == &engine->timeline); rq = i915_gem_active_raw(&timeline->last_request, &engine->i915->drm.struct_mutex); - if (rq && rq->engine == engine) + if (rq && rq->engine == engine) { + GEM_TRACE("last request for %s on engine %s: %llx:%d\n", + timeline->name, engine->name, + rq->fence.context, rq->fence.seqno); + GEM_BUG_ON(rq->timeline != timeline); return rq; + } return NULL; } -static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine) +static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine) { - struct i915_timeline *timeline; + struct drm_i915_private *i915 = engine->i915; + const struct intel_context * const ce = + to_intel_context(i915->kernel_context, engine); + struct i915_timeline *barrier = ce->ring->timeline; + struct intel_ring *ring; + bool any_active = false; - list_for_each_entry(timeline, &engine->i915->gt.timelines, link) { - if (last_request_on_engine(timeline, engine)) + lockdep_assert_held(&i915->drm.struct_mutex); + list_for_each_entry(ring, &i915->gt.active_rings, active_link) { + struct i915_request *rq; + + rq = last_request_on_engine(ring->timeline, engine); + if (!rq) + continue; + + any_active = true; + + if (rq->hw_context == ce) + continue; + + /* + * Was this request submitted after the previous + * switch-to-kernel-context? + */ + if (!i915_timeline_sync_is_later(barrier, &rq->fence)) { + GEM_TRACE("%s needs barrier for %llx:%d\n", + ring->timeline->name, + rq->fence.context, + rq->fence.seqno); return false; + } + + GEM_TRACE("%s has barrier after %llx:%d\n", + ring->timeline->name, + rq->fence.context, + rq->fence.seqno); } - return intel_engine_has_kernel_context(engine); + /* + * If any other timeline was still active and behind the last barrier, + * then our last switch-to-kernel-context must still be queued and + * will run last (leaving the engine in the kernel context when it + * eventually idles). + */ + if (any_active) + return true; + + /* The engine is idle; check that it is idling in the kernel context. */ + return engine->last_retired_context == ce; } -int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv) +int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915) { struct intel_engine_cs *engine; - struct i915_timeline *timeline; enum intel_engine_id id; - lockdep_assert_held(&dev_priv->drm.struct_mutex); + GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake)); - i915_retire_requests(dev_priv); + lockdep_assert_held(&i915->drm.struct_mutex); + GEM_BUG_ON(!i915->kernel_context); + + i915_retire_requests(i915); - for_each_engine(engine, dev_priv, id) { + for_each_engine(engine, i915, id) { + struct intel_ring *ring; struct i915_request *rq; - if (engine_has_idle_kernel_context(engine)) + GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine)); + if (engine_has_kernel_context_barrier(engine)) continue; - rq = i915_request_alloc(engine, dev_priv->kernel_context); + GEM_TRACE("emit barrier on %s\n", engine->name); + + rq = i915_request_alloc(engine, i915->kernel_context); if (IS_ERR(rq)) return PTR_ERR(rq); /* Queue this switch after all other activity */ - list_for_each_entry(timeline, &dev_priv->gt.timelines, link) { + list_for_each_entry(ring, &i915->gt.active_rings, active_link) { struct i915_request *prev; - prev = last_request_on_engine(timeline, engine); - if (prev) - i915_sw_fence_await_sw_fence_gfp(&rq->submit, - &prev->submit, - I915_FENCE_GFP); + prev = last_request_on_engine(ring->timeline, engine); + if (!prev) + continue; + + if (prev->gem_context == i915->kernel_context) + continue; + + GEM_TRACE("add barrier on %s for %llx:%d\n", + engine->name, + prev->fence.context, + prev->fence.seqno); + i915_sw_fence_await_sw_fence_gfp(&rq->submit, + &prev->submit, + I915_FENCE_GFP); + i915_timeline_sync_set(rq->timeline, &prev->fence); } - /* - * Force a flush after the switch to ensure that all rendering - * and operations prior to switching to the kernel context hits - * memory. This should be guaranteed by the previous request, - * but an extra layer of paranoia before we declare the system - * idle (on suspend etc) is advisable! - */ - __i915_request_add(rq, true); + i915_request_add(rq); } return 0; @@ -664,7 +720,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, struct i915_gem_context *ctx; int ret; - if (!dev_priv->engine[RCS]->context_size) + if (!DRIVER_CAPS(dev_priv)->has_logical_contexts) return -ENODEV; if (args->pad != 0) @@ -747,11 +803,11 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, break; case I915_CONTEXT_PARAM_GTT_SIZE: if (ctx->ppgtt) - args->value = ctx->ppgtt->base.total; + args->value = ctx->ppgtt->vm.total; else if (to_i915(dev)->mm.aliasing_ppgtt) - args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total; + args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total; else - args->value = to_i915(dev)->ggtt.base.total; + args->value = to_i915(dev)->ggtt.vm.total; break; case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: args->value = i915_gem_context_no_error_capture(ctx); diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h index ace3b129c189..b116e4942c10 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.h +++ b/drivers/gpu/drm/i915/i915_gem_context.h @@ -30,6 +30,7 @@ #include <linux/radix-tree.h> #include "i915_gem.h" +#include "i915_scheduler.h" struct pid; @@ -45,6 +46,13 @@ struct intel_ring; #define DEFAULT_CONTEXT_HANDLE 0 +struct intel_context; + +struct intel_context_ops { + void (*unpin)(struct intel_context *ce); + void (*destroy)(struct intel_context *ce); +}; + /** * struct i915_gem_context - client state * @@ -144,11 +152,14 @@ struct i915_gem_context { /** engine: per-engine logical HW state */ struct intel_context { + struct i915_gem_context *gem_context; struct i915_vma *state; struct intel_ring *ring; u32 *lrc_reg_state; u64 lrc_desc; int pin_count; + + const struct intel_context_ops *ops; } __engine[I915_NUM_ENGINES]; /** ring_size: size for allocating the per-engine ring buffer */ @@ -263,25 +274,26 @@ to_intel_context(struct i915_gem_context *ctx, return &ctx->__engine[engine->id]; } -static inline struct intel_ring * +static inline struct intel_context * intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { return engine->context_pin(engine, ctx); } -static inline void __intel_context_pin(struct i915_gem_context *ctx, - const struct intel_engine_cs *engine) +static inline void __intel_context_pin(struct intel_context *ce) { - struct intel_context *ce = to_intel_context(ctx, engine); - GEM_BUG_ON(!ce->pin_count); ce->pin_count++; } -static inline void intel_context_unpin(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) +static inline void intel_context_unpin(struct intel_context *ce) { - engine->context_unpin(engine, ctx); + GEM_BUG_ON(!ce->pin_count); + if (--ce->pin_count) + return; + + GEM_BUG_ON(!ce->ops); + ce->ops->unpin(ce); } /* i915_gem_context.c */ diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 69a7aec49e84..82e2ca17a441 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -111,15 +111,6 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) i915_gem_object_unpin_map(obj); } -static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num) -{ - return NULL; -} - -static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr) -{ - -} static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); @@ -225,9 +216,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = { .unmap_dma_buf = i915_gem_unmap_dma_buf, .release = drm_gem_dmabuf_release, .map = i915_gem_dmabuf_kmap, - .map_atomic = i915_gem_dmabuf_kmap_atomic, .unmap = i915_gem_dmabuf_kunmap, - .unmap_atomic = i915_gem_dmabuf_kunmap_atomic, .mmap = i915_gem_dmabuf_mmap, .vmap = i915_gem_dmabuf_vmap, .vunmap = i915_gem_dmabuf_vunmap, diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 54814a196ee4..02b83a5ed96c 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -69,7 +69,8 @@ static int ggtt_flush(struct drm_i915_private *i915) err = i915_gem_wait_for_idle(i915, I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED); + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); if (err) return err; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 22df17c8ca9b..3f0c612d42e7 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -66,6 +66,15 @@ enum { #define __I915_EXEC_ILLEGAL_FLAGS \ (__I915_EXEC_UNKNOWN_FLAGS | I915_EXEC_CONSTANTS_MASK) +/* Catch emission of unexpected errors for CI! */ +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) +#undef EINVAL +#define EINVAL ({ \ + DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \ + 22; \ +}) +#endif + /** * DOC: User command execution * @@ -534,7 +543,8 @@ eb_add_vma(struct i915_execbuffer *eb, * paranoia do it everywhere. */ if (i == batch_idx) { - if (!(eb->flags[i] & EXEC_OBJECT_PINNED)) + if (entry->relocation_count && + !(eb->flags[i] & EXEC_OBJECT_PINNED)) eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS; if (eb->reloc_cache.has_fence) eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE; @@ -723,7 +733,7 @@ static int eb_select_context(struct i915_execbuffer *eb) return -ENOENT; eb->ctx = ctx; - eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base; + eb->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &eb->i915->ggtt.vm; eb->context_flags = 0; if (ctx->flags & CONTEXT_NO_ZEROMAP) @@ -921,7 +931,7 @@ static void reloc_gpu_flush(struct reloc_cache *cache) i915_gem_object_unpin_map(cache->rq->batch->obj); i915_gem_chipset_flush(cache->rq->i915); - __i915_request_add(cache->rq, true); + i915_request_add(cache->rq); cache->rq = NULL; } @@ -948,9 +958,9 @@ static void reloc_cache_reset(struct reloc_cache *cache) if (cache->node.allocated) { struct i915_ggtt *ggtt = cache_to_ggtt(cache); - ggtt->base.clear_range(&ggtt->base, - cache->node.start, - cache->node.size); + ggtt->vm.clear_range(&ggtt->vm, + cache->node.start, + cache->node.size); drm_mm_remove_node(&cache->node); } else { i915_vma_unpin((struct i915_vma *)cache->node.mm); @@ -1021,7 +1031,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, if (IS_ERR(vma)) { memset(&cache->node, 0, sizeof(cache->node)); err = drm_mm_insert_node_in_range - (&ggtt->base.mm, &cache->node, + (&ggtt->vm.mm, &cache->node, PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, 0, ggtt->mappable_end, DRM_MM_INSERT_LOW); @@ -1042,9 +1052,9 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, offset = cache->node.start; if (cache->node.allocated) { wmb(); - ggtt->base.insert_page(&ggtt->base, - i915_gem_object_get_dma_address(obj, page), - offset, I915_CACHE_NONE, 0); + ggtt->vm.insert_page(&ggtt->vm, + i915_gem_object_get_dma_address(obj, page), + offset, I915_CACHE_NONE, 0); } else { offset += page << PAGE_SHIFT; } @@ -1155,18 +1165,16 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, goto err_request; GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true)); - i915_vma_move_to_active(batch, rq, 0); - reservation_object_lock(batch->resv, NULL); - reservation_object_add_excl_fence(batch->resv, &rq->fence); - reservation_object_unlock(batch->resv); - i915_vma_unpin(batch); + err = i915_vma_move_to_active(batch, rq, 0); + if (err) + goto skip_request; - i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - reservation_object_lock(vma->resv, NULL); - reservation_object_add_excl_fence(vma->resv, &rq->fence); - reservation_object_unlock(vma->resv); + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + if (err) + goto skip_request; rq->batch = batch; + i915_vma_unpin(batch); cache->rq = rq; cache->rq_cmd = cmd; @@ -1175,6 +1183,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, /* Return with batch mapping (cmd) still pinned */ return 0; +skip_request: + i915_request_skip(rq, err); err_request: i915_request_add(rq); err_unpin: @@ -1761,25 +1771,6 @@ slow: return eb_relocate_slow(eb); } -static void eb_export_fence(struct i915_vma *vma, - struct i915_request *rq, - unsigned int flags) -{ - struct reservation_object *resv = vma->resv; - - /* - * Ignore errors from failing to allocate the new fence, we can't - * handle an error right now. Worst case should be missed - * synchronisation leading to rendering corruption. - */ - reservation_object_lock(resv, NULL); - if (flags & EXEC_OBJECT_WRITE) - reservation_object_add_excl_fence(resv, &rq->fence); - else if (reservation_object_reserve_shared(resv) == 0) - reservation_object_add_shared_fence(resv, &rq->fence); - reservation_object_unlock(resv); -} - static int eb_move_to_gpu(struct i915_execbuffer *eb) { const unsigned int count = eb->buffer_count; @@ -1833,8 +1824,11 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) unsigned int flags = eb->flags[i]; struct i915_vma *vma = eb->vma[i]; - i915_vma_move_to_active(vma, eb->request, flags); - eb_export_fence(vma, eb->request, flags); + err = i915_vma_move_to_active(vma, eb->request, flags); + if (unlikely(err)) { + i915_request_skip(eb->request, err); + return err; + } __eb_unreserve_vma(vma, flags); vma->exec_flags = NULL; @@ -1874,45 +1868,6 @@ static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) return true; } -void i915_vma_move_to_active(struct i915_vma *vma, - struct i915_request *rq, - unsigned int flags) -{ - struct drm_i915_gem_object *obj = vma->obj; - const unsigned int idx = rq->engine->id; - - lockdep_assert_held(&rq->i915->drm.struct_mutex); - GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); - - /* - * Add a reference if we're newly entering the active list. - * The order in which we add operations to the retirement queue is - * vital here: mark_active adds to the start of the callback list, - * such that subsequent callbacks are called first. Therefore we - * add the active reference first and queue for it to be dropped - * *last*. - */ - if (!i915_vma_is_active(vma)) - obj->active_count++; - i915_vma_set_active(vma, idx); - i915_gem_active_set(&vma->last_read[idx], rq); - list_move_tail(&vma->vm_link, &vma->vm->active_list); - - obj->write_domain = 0; - if (flags & EXEC_OBJECT_WRITE) { - obj->write_domain = I915_GEM_DOMAIN_RENDER; - - if (intel_fb_obj_invalidate(obj, ORIGIN_CS)) - i915_gem_active_set(&obj->frontbuffer_write, rq); - - obj->read_domains = 0; - } - obj->read_domains |= I915_GEM_GPU_DOMAINS; - - if (flags & EXEC_OBJECT_NEEDS_FENCE) - i915_gem_active_set(&vma->last_fence, rq); -} - static int i915_reset_gen7_sol_offsets(struct i915_request *rq) { u32 *cs; @@ -2438,7 +2393,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, trace_i915_request_queue(eb.request, eb.batch_flags); err = eb_submit(&eb); err_request: - __i915_request_add(eb.request, err == 0); + i915_request_add(eb.request); add_to_client(eb.request, file); if (fences) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 996ab2ad6c45..f00c7fbef79e 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -42,7 +42,7 @@ #include "intel_drv.h" #include "intel_frontbuffer.h" -#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM) +#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) /** * DOC: Global GTT views @@ -195,18 +195,18 @@ static int ppgtt_bind_vma(struct i915_vma *vma, u32 unused) { u32 pte_flags; - int ret; + int err; if (!(vma->flags & I915_VMA_LOCAL_BIND)) { - ret = vma->vm->allocate_va_range(vma->vm, vma->node.start, - vma->size); - if (ret) - return ret; + err = vma->vm->allocate_va_range(vma->vm, + vma->node.start, vma->size); + if (err) + return err; } - /* Currently applicable only to VLV */ + /* Applicable to VLV, and gen8+ */ pte_flags = 0; - if (vma->obj->gt_ro) + if (i915_gem_object_is_readonly(vma->obj)) pte_flags |= PTE_READ_ONLY; vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); @@ -244,10 +244,13 @@ static void clear_pages(struct i915_vma *vma) } static gen8_pte_t gen8_pte_encode(dma_addr_t addr, - enum i915_cache_level level) + enum i915_cache_level level, + u32 flags) { - gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW; - pte |= addr; + gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW; + + if (unlikely(flags & PTE_READ_ONLY)) + pte &= ~_PAGE_RW; switch (level) { case I915_CACHE_NONE: @@ -375,37 +378,70 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr, return pte; } +static void stash_init(struct pagestash *stash) +{ + pagevec_init(&stash->pvec); + spin_lock_init(&stash->lock); +} + +static struct page *stash_pop_page(struct pagestash *stash) +{ + struct page *page = NULL; + + spin_lock(&stash->lock); + if (likely(stash->pvec.nr)) + page = stash->pvec.pages[--stash->pvec.nr]; + spin_unlock(&stash->lock); + + return page; +} + +static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec) +{ + int nr; + + spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING); + + nr = min_t(int, pvec->nr, pagevec_space(&stash->pvec)); + memcpy(stash->pvec.pages + stash->pvec.nr, + pvec->pages + pvec->nr - nr, + sizeof(pvec->pages[0]) * nr); + stash->pvec.nr += nr; + + spin_unlock(&stash->lock); + + pvec->nr -= nr; +} + static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) { - struct pagevec *pvec = &vm->free_pages; - struct pagevec stash; + struct pagevec stack; + struct page *page; if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) i915_gem_shrink_all(vm->i915); - if (likely(pvec->nr)) - return pvec->pages[--pvec->nr]; + page = stash_pop_page(&vm->free_pages); + if (page) + return page; if (!vm->pt_kmap_wc) return alloc_page(gfp); - /* A placeholder for a specific mutex to guard the WC stash */ - lockdep_assert_held(&vm->i915->drm.struct_mutex); - /* Look in our global stash of WC pages... */ - pvec = &vm->i915->mm.wc_stash; - if (likely(pvec->nr)) - return pvec->pages[--pvec->nr]; + page = stash_pop_page(&vm->i915->mm.wc_stash); + if (page) + return page; /* - * Otherwise batch allocate pages to amoritize cost of set_pages_wc. + * Otherwise batch allocate pages to amortize cost of set_pages_wc. * * We have to be careful as page allocation may trigger the shrinker * (via direct reclaim) which will fill up the WC stash underneath us. * So we add our WB pages into a temporary pvec on the stack and merge * them into the WC stash after all the allocations are complete. */ - pagevec_init(&stash); + pagevec_init(&stack); do { struct page *page; @@ -413,59 +449,67 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) if (unlikely(!page)) break; - stash.pages[stash.nr++] = page; - } while (stash.nr < pagevec_space(pvec)); + stack.pages[stack.nr++] = page; + } while (pagevec_space(&stack)); - if (stash.nr) { - int nr = min_t(int, stash.nr, pagevec_space(pvec)); - struct page **pages = stash.pages + stash.nr - nr; + if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) { + page = stack.pages[--stack.nr]; - if (nr && !set_pages_array_wc(pages, nr)) { - memcpy(pvec->pages + pvec->nr, - pages, sizeof(pages[0]) * nr); - pvec->nr += nr; - stash.nr -= nr; - } + /* Merge spare WC pages to the global stash */ + stash_push_pagevec(&vm->i915->mm.wc_stash, &stack); - pagevec_release(&stash); + /* Push any surplus WC pages onto the local VM stash */ + if (stack.nr) + stash_push_pagevec(&vm->free_pages, &stack); } - return likely(pvec->nr) ? pvec->pages[--pvec->nr] : NULL; + /* Return unwanted leftovers */ + if (unlikely(stack.nr)) { + WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr)); + __pagevec_release(&stack); + } + + return page; } static void vm_free_pages_release(struct i915_address_space *vm, bool immediate) { - struct pagevec *pvec = &vm->free_pages; + struct pagevec *pvec = &vm->free_pages.pvec; + struct pagevec stack; + lockdep_assert_held(&vm->free_pages.lock); GEM_BUG_ON(!pagevec_count(pvec)); if (vm->pt_kmap_wc) { - struct pagevec *stash = &vm->i915->mm.wc_stash; - - /* When we use WC, first fill up the global stash and then + /* + * When we use WC, first fill up the global stash and then * only if full immediately free the overflow. */ + stash_push_pagevec(&vm->i915->mm.wc_stash, pvec); - lockdep_assert_held(&vm->i915->drm.struct_mutex); - if (pagevec_space(stash)) { - do { - stash->pages[stash->nr++] = - pvec->pages[--pvec->nr]; - if (!pvec->nr) - return; - } while (pagevec_space(stash)); - - /* As we have made some room in the VM's free_pages, - * we can wait for it to fill again. Unless we are - * inside i915_address_space_fini() and must - * immediately release the pages! - */ - if (!immediate) - return; - } + /* + * As we have made some room in the VM's free_pages, + * we can wait for it to fill again. Unless we are + * inside i915_address_space_fini() and must + * immediately release the pages! + */ + if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1)) + return; + + /* + * We have to drop the lock to allow ourselves to sleep, + * so take a copy of the pvec and clear the stash for + * others to use it as we sleep. + */ + stack = *pvec; + pagevec_reinit(pvec); + spin_unlock(&vm->free_pages.lock); + pvec = &stack; set_pages_array_wb(pvec->pages, pvec->nr); + + spin_lock(&vm->free_pages.lock); } __pagevec_release(pvec); @@ -481,20 +525,60 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page) * unconditional might_sleep() for everybody. */ might_sleep(); - if (!pagevec_add(&vm->free_pages, page)) + spin_lock(&vm->free_pages.lock); + if (!pagevec_add(&vm->free_pages.pvec, page)) vm_free_pages_release(vm, false); + spin_unlock(&vm->free_pages.lock); +} + +static void i915_address_space_init(struct i915_address_space *vm, + struct drm_i915_private *dev_priv) +{ + /* + * The vm->mutex must be reclaim safe (for use in the shrinker). + * Do a dummy acquire now under fs_reclaim so that any allocation + * attempt holding the lock is immediately reported by lockdep. + */ + mutex_init(&vm->mutex); + i915_gem_shrinker_taints_mutex(&vm->mutex); + + GEM_BUG_ON(!vm->total); + drm_mm_init(&vm->mm, 0, vm->total); + vm->mm.head_node.color = I915_COLOR_UNEVICTABLE; + + stash_init(&vm->free_pages); + + INIT_LIST_HEAD(&vm->active_list); + INIT_LIST_HEAD(&vm->inactive_list); + INIT_LIST_HEAD(&vm->unbound_list); +} + +static void i915_address_space_fini(struct i915_address_space *vm) +{ + spin_lock(&vm->free_pages.lock); + if (pagevec_count(&vm->free_pages.pvec)) + vm_free_pages_release(vm, true); + GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec)); + spin_unlock(&vm->free_pages.lock); + + drm_mm_takedown(&vm->mm); + + mutex_destroy(&vm->mutex); } static int __setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p, gfp_t gfp) { - p->page = vm_alloc_page(vm, gfp | __GFP_NOWARN | __GFP_NORETRY); + p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL); if (unlikely(!p->page)) return -ENOMEM; - p->daddr = dma_map_page(vm->dma, p->page, 0, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL); + p->daddr = dma_map_page_attrs(vm->dma, + p->page, 0, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC | + DMA_ATTR_NO_WARN); if (unlikely(dma_mapping_error(vm->dma, p->daddr))) { vm_free_page(vm, p->page); return -ENOMEM; @@ -506,7 +590,7 @@ static int __setup_page_dma(struct i915_address_space *vm, static int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p) { - return __setup_page_dma(vm, p, I915_GFP_DMA); + return __setup_page_dma(vm, p, __GFP_HIGHMEM); } static void cleanup_page_dma(struct i915_address_space *vm, @@ -520,8 +604,8 @@ static void cleanup_page_dma(struct i915_address_space *vm, #define setup_px(vm, px) setup_page_dma((vm), px_base(px)) #define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px)) -#define fill_px(ppgtt, px, v) fill_page_dma((vm), px_base(px), (v)) -#define fill32_px(ppgtt, px, v) fill_page_dma_32((vm), px_base(px), (v)) +#define fill_px(vm, px, v) fill_page_dma((vm), px_base(px), (v)) +#define fill32_px(vm, px, v) fill_page_dma_32((vm), px_base(px), (v)) static void fill_page_dma(struct i915_address_space *vm, struct i915_page_dma *p, @@ -575,8 +659,11 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) if (unlikely(!page)) goto skip; - addr = dma_map_page(vm->dma, page, 0, size, - PCI_DMA_BIDIRECTIONAL); + addr = dma_map_page_attrs(vm->dma, + page, 0, size, + PCI_DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC | + DMA_ATTR_NO_WARN); if (unlikely(dma_mapping_error(vm->dma, addr))) goto free_page; @@ -614,7 +701,7 @@ static struct i915_page_table *alloc_pt(struct i915_address_space *vm) { struct i915_page_table *pt; - pt = kmalloc(sizeof(*pt), GFP_KERNEL | __GFP_NOWARN); + pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL); if (unlikely(!pt)) return ERR_PTR(-ENOMEM); @@ -637,21 +724,20 @@ static void gen8_initialize_pt(struct i915_address_space *vm, struct i915_page_table *pt) { fill_px(vm, pt, - gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC)); + gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0)); } -static void gen6_initialize_pt(struct i915_address_space *vm, +static void gen6_initialize_pt(struct gen6_hw_ppgtt *ppgtt, struct i915_page_table *pt) { - fill32_px(vm, pt, - vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0)); + fill32_px(&ppgtt->base.vm, pt, ppgtt->scratch_pte); } static struct i915_page_directory *alloc_pd(struct i915_address_space *vm) { struct i915_page_directory *pd; - pd = kzalloc(sizeof(*pd), GFP_KERNEL | __GFP_NOWARN); + pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL); if (unlikely(!pd)) return ERR_PTR(-ENOMEM); @@ -685,7 +771,7 @@ static int __pdp_init(struct i915_address_space *vm, const unsigned int pdpes = i915_pdpes_per_pdp(vm); pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory), - GFP_KERNEL | __GFP_NOWARN); + I915_GFP_ALLOW_FAIL); if (unlikely(!pdp->page_directory)) return -ENOMEM; @@ -765,53 +851,6 @@ static void gen8_initialize_pml4(struct i915_address_space *vm, memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4); } -/* Broadwell Page Directory Pointer Descriptors */ -static int gen8_write_pdp(struct i915_request *rq, - unsigned entry, - dma_addr_t addr) -{ - struct intel_engine_cs *engine = rq->engine; - u32 *cs; - - BUG_ON(entry >= 4); - - cs = intel_ring_begin(rq, 6); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - *cs++ = MI_LOAD_REGISTER_IMM(1); - *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry)); - *cs++ = upper_32_bits(addr); - *cs++ = MI_LOAD_REGISTER_IMM(1); - *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry)); - *cs++ = lower_32_bits(addr); - intel_ring_advance(rq, cs); - - return 0; -} - -static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt, - struct i915_request *rq) -{ - int i, ret; - - for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) { - const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); - - ret = gen8_write_pdp(rq, i, pd_daddr); - if (ret) - return ret; - } - - return 0; -} - -static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt, - struct i915_request *rq) -{ - return gen8_write_pdp(rq, 0, px_dma(&ppgtt->pml4)); -} - /* PDE TLBs are a pain to invalidate on GEN8+. When we modify * the page table structures, we mark them dirty so that * context switching/execlist queuing code takes extra steps @@ -819,7 +858,7 @@ static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt, */ static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt) { - ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask; + ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->vm.i915)->ring_mask; } /* Removes entries from a single page table, releasing it if it's empty. @@ -833,7 +872,7 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm, unsigned int pte = gen8_pte_index(start); unsigned int pte_end = pte + num_entries; const gen8_pte_t scratch_pte = - gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC); + gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0); gen8_pte_t *vaddr; GEM_BUG_ON(num_entries > pt->used_ptes); @@ -1005,14 +1044,15 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt, struct i915_page_directory_pointer *pdp, struct sgt_dma *iter, struct gen8_insert_pte *idx, - enum i915_cache_level cache_level) + enum i915_cache_level cache_level, + u32 flags) { struct i915_page_directory *pd; - const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level); + const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); gen8_pte_t *vaddr; bool ret; - GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base)); + GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm)); pd = pdp->page_directory[idx->pdpe]; vaddr = kmap_atomic_px(pd->page_table[idx->pde]); do { @@ -1043,7 +1083,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt, break; } - GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base)); + GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm)); pd = pdp->page_directory[idx->pdpe]; } @@ -1059,14 +1099,14 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt, static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm, struct i915_vma *vma, enum i915_cache_level cache_level, - u32 unused) + u32 flags) { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct sgt_dma iter = sgt_dma(vma); struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start); gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx, - cache_level); + cache_level, flags); vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; } @@ -1074,9 +1114,10 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm, static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma, struct i915_page_directory_pointer **pdps, struct sgt_dma *iter, - enum i915_cache_level cache_level) + enum i915_cache_level cache_level, + u32 flags) { - const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level); + const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); u64 start = vma->node.start; dma_addr_t rem = iter->sg->length; @@ -1192,19 +1233,21 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma, static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm, struct i915_vma *vma, enum i915_cache_level cache_level, - u32 unused) + u32 flags) { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct sgt_dma iter = sgt_dma(vma); struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps; if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) { - gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level); + gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level, + flags); } else { struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start); while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], - &iter, &idx, cache_level)) + &iter, &idx, cache_level, + flags)) GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4); vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; @@ -1229,7 +1272,7 @@ static int gen8_init_scratch(struct i915_address_space *vm) { int ret; - ret = setup_scratch_page(vm, I915_GFP_DMA); + ret = setup_scratch_page(vm, __GFP_HIGHMEM); if (ret) return ret; @@ -1272,7 +1315,7 @@ free_scratch_page: static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) { - struct i915_address_space *vm = &ppgtt->base; + struct i915_address_space *vm = &ppgtt->vm; struct drm_i915_private *dev_priv = vm->i915; enum vgt_g2v_type msg; int i; @@ -1333,13 +1376,13 @@ static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt) int i; for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) { - if (ppgtt->pml4.pdps[i] == ppgtt->base.scratch_pdp) + if (ppgtt->pml4.pdps[i] == ppgtt->vm.scratch_pdp) continue; - gen8_ppgtt_cleanup_3lvl(&ppgtt->base, ppgtt->pml4.pdps[i]); + gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pml4.pdps[i]); } - cleanup_px(&ppgtt->base, &ppgtt->pml4); + cleanup_px(&ppgtt->vm, &ppgtt->pml4); } static void gen8_ppgtt_cleanup(struct i915_address_space *vm) @@ -1353,7 +1396,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) if (use_4lvl(vm)) gen8_ppgtt_cleanup_4lvl(ppgtt); else - gen8_ppgtt_cleanup_3lvl(&ppgtt->base, &ppgtt->pdp); + gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp); gen8_free_scratch(vm); } @@ -1489,7 +1532,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt, gen8_pte_t scratch_pte, struct seq_file *m) { - struct i915_address_space *vm = &ppgtt->base; + struct i915_address_space *vm = &ppgtt->vm; struct i915_page_directory *pd; u32 pdpe; @@ -1499,7 +1542,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt, u64 pd_start = start; u32 pde; - if (pdp->page_directory[pdpe] == ppgtt->base.scratch_pd) + if (pdp->page_directory[pdpe] == ppgtt->vm.scratch_pd) continue; seq_printf(m, "\tPDPE #%d\n", pdpe); @@ -1507,7 +1550,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt, u32 pte; gen8_pte_t *pt_vaddr; - if (pd->page_table[pde] == ppgtt->base.scratch_pt) + if (pd->page_table[pde] == ppgtt->vm.scratch_pt) continue; pt_vaddr = kmap_atomic_px(pt); @@ -1540,10 +1583,10 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt, static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) { - struct i915_address_space *vm = &ppgtt->base; + struct i915_address_space *vm = &ppgtt->vm; const gen8_pte_t scratch_pte = - gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC); - u64 start = 0, length = ppgtt->base.total; + gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0); + u64 start = 0, length = ppgtt->vm.total; if (use_4lvl(vm)) { u64 pml4e; @@ -1551,7 +1594,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) struct i915_page_directory_pointer *pdp; gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { - if (pml4->pdps[pml4e] == ppgtt->base.scratch_pdp) + if (pml4->pdps[pml4e] == ppgtt->vm.scratch_pdp) continue; seq_printf(m, " PML4E #%llu\n", pml4e); @@ -1564,10 +1607,10 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt) { - struct i915_address_space *vm = &ppgtt->base; + struct i915_address_space *vm = &ppgtt->vm; struct i915_page_directory_pointer *pdp = &ppgtt->pdp; struct i915_page_directory *pd; - u64 start = 0, length = ppgtt->base.total; + u64 start = 0, length = ppgtt->vm.total; u64 from = start; unsigned int pdpe; @@ -1601,211 +1644,153 @@ unwind: * space. * */ -static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) +static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) { - struct i915_address_space *vm = &ppgtt->base; - struct drm_i915_private *dev_priv = vm->i915; - int ret; + struct i915_hw_ppgtt *ppgtt; + int err; + + ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); + if (!ppgtt) + return ERR_PTR(-ENOMEM); + + kref_init(&ppgtt->ref); + + ppgtt->vm.i915 = i915; + ppgtt->vm.dma = &i915->drm.pdev->dev; - ppgtt->base.total = USES_FULL_48BIT_PPGTT(dev_priv) ? + ppgtt->vm.total = USES_FULL_48BIT_PPGTT(i915) ? 1ULL << 48 : 1ULL << 32; + /* + * From bdw, there is support for read-only pages in the PPGTT. + * + * XXX GVT is not honouring the lack of RW in the PTE bits. + */ + ppgtt->vm.has_read_only = !intel_vgpu_active(i915); + + i915_address_space_init(&ppgtt->vm, i915); + /* There are only few exceptions for gen >=6. chv and bxt. * And we are not sure about the latter so play safe for now. */ - if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv)) - ppgtt->base.pt_kmap_wc = true; + if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915)) + ppgtt->vm.pt_kmap_wc = true; - ret = gen8_init_scratch(&ppgtt->base); - if (ret) { - ppgtt->base.total = 0; - return ret; - } + err = gen8_init_scratch(&ppgtt->vm); + if (err) + goto err_free; - if (use_4lvl(vm)) { - ret = setup_px(&ppgtt->base, &ppgtt->pml4); - if (ret) - goto free_scratch; + if (use_4lvl(&ppgtt->vm)) { + err = setup_px(&ppgtt->vm, &ppgtt->pml4); + if (err) + goto err_scratch; - gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4); + gen8_initialize_pml4(&ppgtt->vm, &ppgtt->pml4); - ppgtt->switch_mm = gen8_mm_switch_4lvl; - ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_4lvl; - ppgtt->base.insert_entries = gen8_ppgtt_insert_4lvl; - ppgtt->base.clear_range = gen8_ppgtt_clear_4lvl; + ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl; + ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl; + ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl; } else { - ret = __pdp_init(&ppgtt->base, &ppgtt->pdp); - if (ret) - goto free_scratch; + err = __pdp_init(&ppgtt->vm, &ppgtt->pdp); + if (err) + goto err_scratch; - if (intel_vgpu_active(dev_priv)) { - ret = gen8_preallocate_top_level_pdp(ppgtt); - if (ret) { + if (intel_vgpu_active(i915)) { + err = gen8_preallocate_top_level_pdp(ppgtt); + if (err) { __pdp_fini(&ppgtt->pdp); - goto free_scratch; + goto err_scratch; } } - ppgtt->switch_mm = gen8_mm_switch_3lvl; - ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_3lvl; - ppgtt->base.insert_entries = gen8_ppgtt_insert_3lvl; - ppgtt->base.clear_range = gen8_ppgtt_clear_3lvl; + ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl; + ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl; + ppgtt->vm.clear_range = gen8_ppgtt_clear_3lvl; } - if (intel_vgpu_active(dev_priv)) + if (intel_vgpu_active(i915)) gen8_ppgtt_notify_vgt(ppgtt, true); - ppgtt->base.cleanup = gen8_ppgtt_cleanup; - ppgtt->base.unbind_vma = ppgtt_unbind_vma; - ppgtt->base.bind_vma = ppgtt_bind_vma; - ppgtt->base.set_pages = ppgtt_set_pages; - ppgtt->base.clear_pages = clear_pages; + ppgtt->vm.cleanup = gen8_ppgtt_cleanup; ppgtt->debug_dump = gen8_dump_ppgtt; - return 0; + ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma; + ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma; + ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages; + ppgtt->vm.vma_ops.clear_pages = clear_pages; -free_scratch: - gen8_free_scratch(&ppgtt->base); - return ret; + return ppgtt; + +err_scratch: + gen8_free_scratch(&ppgtt->vm); +err_free: + kfree(ppgtt); + return ERR_PTR(err); } -static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) +static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m) { - struct i915_address_space *vm = &ppgtt->base; - struct i915_page_table *unused; - gen6_pte_t scratch_pte; - u32 pd_entry, pte, pde; - u32 start = 0, length = ppgtt->base.total; + struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); + const gen6_pte_t scratch_pte = ppgtt->scratch_pte; + struct i915_page_table *pt; + u32 pte, pde; - scratch_pte = vm->pte_encode(vm->scratch_page.daddr, - I915_CACHE_LLC, 0); + gen6_for_all_pdes(pt, &base->pd, pde) { + gen6_pte_t *vaddr; + + if (pt == base->vm.scratch_pt) + continue; + + if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) { + u32 expected = + GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | + GEN6_PDE_VALID; + u32 pd_entry = readl(ppgtt->pd_addr + pde); + + if (pd_entry != expected) + seq_printf(m, + "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n", + pde, + pd_entry, + expected); + + seq_printf(m, "\tPDE: %x\n", pd_entry); + } - gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) { - u32 expected; - gen6_pte_t *pt_vaddr; - const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]); - pd_entry = readl(ppgtt->pd_addr + pde); - expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID); - - if (pd_entry != expected) - seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n", - pde, - pd_entry, - expected); - seq_printf(m, "\tPDE: %x\n", pd_entry); - - pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[pde]); - - for (pte = 0; pte < GEN6_PTES; pte+=4) { - unsigned long va = - (pde * PAGE_SIZE * GEN6_PTES) + - (pte * PAGE_SIZE); + vaddr = kmap_atomic_px(base->pd.page_table[pde]); + for (pte = 0; pte < GEN6_PTES; pte += 4) { int i; - bool found = false; + for (i = 0; i < 4; i++) - if (pt_vaddr[pte + i] != scratch_pte) - found = true; - if (!found) + if (vaddr[pte + i] != scratch_pte) + break; + if (i == 4) continue; - seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte); + seq_printf(m, "\t\t(%03d, %04d) %08lx: ", + pde, pte, + (pde * GEN6_PTES + pte) * PAGE_SIZE); for (i = 0; i < 4; i++) { - if (pt_vaddr[pte + i] != scratch_pte) - seq_printf(m, " %08x", pt_vaddr[pte + i]); + if (vaddr[pte + i] != scratch_pte) + seq_printf(m, " %08x", vaddr[pte + i]); else - seq_puts(m, " SCRATCH "); + seq_puts(m, " SCRATCH"); } seq_puts(m, "\n"); } - kunmap_atomic(pt_vaddr); + kunmap_atomic(vaddr); } } /* Write pde (index) from the page directory @pd to the page table @pt */ -static inline void gen6_write_pde(const struct i915_hw_ppgtt *ppgtt, +static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt, const unsigned int pde, const struct i915_page_table *pt) { /* Caller needs to make sure the write completes if necessary */ - writel_relaxed(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID, - ppgtt->pd_addr + pde); -} - -/* Write all the page tables found in the ppgtt structure to incrementing page - * directories. */ -static void gen6_write_page_range(struct i915_hw_ppgtt *ppgtt, - u32 start, u32 length) -{ - struct i915_page_table *pt; - unsigned int pde; - - gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) - gen6_write_pde(ppgtt, pde, pt); - - mark_tlbs_dirty(ppgtt); - wmb(); -} - -static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt) -{ - GEM_BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f); - return ppgtt->pd.base.ggtt_offset << 10; -} - -static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, - struct i915_request *rq) -{ - struct intel_engine_cs *engine = rq->engine; - u32 *cs; - - /* NB: TLBs must be flushed and invalidated before a switch */ - cs = intel_ring_begin(rq, 6); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - *cs++ = MI_LOAD_REGISTER_IMM(2); - *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine)); - *cs++ = PP_DIR_DCLV_2G; - *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine)); - *cs++ = get_pd_offset(ppgtt); - *cs++ = MI_NOOP; - intel_ring_advance(rq, cs); - - return 0; -} - -static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, - struct i915_request *rq) -{ - struct intel_engine_cs *engine = rq->engine; - u32 *cs; - - /* NB: TLBs must be flushed and invalidated before a switch */ - cs = intel_ring_begin(rq, 6); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - *cs++ = MI_LOAD_REGISTER_IMM(2); - *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine)); - *cs++ = PP_DIR_DCLV_2G; - *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine)); - *cs++ = get_pd_offset(ppgtt); - *cs++ = MI_NOOP; - intel_ring_advance(rq, cs); - - return 0; -} - -static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, - struct i915_request *rq) -{ - struct intel_engine_cs *engine = rq->engine; - struct drm_i915_private *dev_priv = rq->i915; - - I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G); - I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt)); - return 0; + iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID, + ppgtt->pd_addr + pde); } static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv) @@ -1867,22 +1852,30 @@ static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv) static void gen6_ppgtt_clear_range(struct i915_address_space *vm, u64 start, u64 length) { - struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); unsigned int first_entry = start >> PAGE_SHIFT; unsigned int pde = first_entry / GEN6_PTES; unsigned int pte = first_entry % GEN6_PTES; unsigned int num_entries = length >> PAGE_SHIFT; - gen6_pte_t scratch_pte = - vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0); + const gen6_pte_t scratch_pte = ppgtt->scratch_pte; while (num_entries) { - struct i915_page_table *pt = ppgtt->pd.page_table[pde++]; - unsigned int end = min(pte + num_entries, GEN6_PTES); + struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++]; + const unsigned int end = min(pte + num_entries, GEN6_PTES); + const unsigned int count = end - pte; gen6_pte_t *vaddr; - num_entries -= end - pte; + GEM_BUG_ON(pt == vm->scratch_pt); + + num_entries -= count; + + GEM_BUG_ON(count > pt->used_ptes); + pt->used_ptes -= count; + if (!pt->used_ptes) + ppgtt->scan_for_unused_pt = true; - /* Note that the hw doesn't support removing PDE on the fly + /* + * Note that the hw doesn't support removing PDE on the fly * (they are cached inside the context with no means to * invalidate the cache), so we can only reset the PTE * entries back to scratch. @@ -1911,6 +1904,8 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, struct sgt_dma iter = sgt_dma(vma); gen6_pte_t *vaddr; + GEM_BUG_ON(ppgtt->pd.page_table[act_pt] == vm->scratch_pt); + vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]); do { vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma); @@ -1939,218 +1934,280 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, static int gen6_alloc_va_range(struct i915_address_space *vm, u64 start, u64 length) { - struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); struct i915_page_table *pt; u64 from = start; unsigned int pde; bool flush = false; - gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) { + gen6_for_each_pde(pt, &ppgtt->base.pd, start, length, pde) { + const unsigned int count = gen6_pte_count(start, length); + if (pt == vm->scratch_pt) { pt = alloc_pt(vm); if (IS_ERR(pt)) goto unwind_out; - gen6_initialize_pt(vm, pt); - ppgtt->pd.page_table[pde] = pt; - gen6_write_pde(ppgtt, pde, pt); - flush = true; + gen6_initialize_pt(ppgtt, pt); + ppgtt->base.pd.page_table[pde] = pt; + + if (i915_vma_is_bound(ppgtt->vma, + I915_VMA_GLOBAL_BIND)) { + gen6_write_pde(ppgtt, pde, pt); + flush = true; + } + + GEM_BUG_ON(pt->used_ptes); } + + pt->used_ptes += count; } if (flush) { - mark_tlbs_dirty(ppgtt); - wmb(); + mark_tlbs_dirty(&ppgtt->base); + gen6_ggtt_invalidate(ppgtt->base.vm.i915); } return 0; unwind_out: - gen6_ppgtt_clear_range(vm, from, start); + gen6_ppgtt_clear_range(vm, from, start - from); return -ENOMEM; } -static int gen6_init_scratch(struct i915_address_space *vm) +static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt) { + struct i915_address_space * const vm = &ppgtt->base.vm; + struct i915_page_table *unused; + u32 pde; int ret; - ret = setup_scratch_page(vm, I915_GFP_DMA); + ret = setup_scratch_page(vm, __GFP_HIGHMEM); if (ret) return ret; + ppgtt->scratch_pte = + vm->pte_encode(vm->scratch_page.daddr, + I915_CACHE_NONE, PTE_READ_ONLY); + vm->scratch_pt = alloc_pt(vm); if (IS_ERR(vm->scratch_pt)) { cleanup_scratch_page(vm); return PTR_ERR(vm->scratch_pt); } - gen6_initialize_pt(vm, vm->scratch_pt); + gen6_initialize_pt(ppgtt, vm->scratch_pt); + gen6_for_all_pdes(unused, &ppgtt->base.pd, pde) + ppgtt->base.pd.page_table[pde] = vm->scratch_pt; return 0; } -static void gen6_free_scratch(struct i915_address_space *vm) +static void gen6_ppgtt_free_scratch(struct i915_address_space *vm) { free_pt(vm, vm->scratch_pt); cleanup_scratch_page(vm); } -static void gen6_ppgtt_cleanup(struct i915_address_space *vm) +static void gen6_ppgtt_free_pd(struct gen6_hw_ppgtt *ppgtt) { - struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - struct i915_page_directory *pd = &ppgtt->pd; struct i915_page_table *pt; u32 pde; - drm_mm_remove_node(&ppgtt->node); + gen6_for_all_pdes(pt, &ppgtt->base.pd, pde) + if (pt != ppgtt->base.vm.scratch_pt) + free_pt(&ppgtt->base.vm, pt); +} + +static void gen6_ppgtt_cleanup(struct i915_address_space *vm) +{ + struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); - gen6_for_all_pdes(pt, pd, pde) - if (pt != vm->scratch_pt) - free_pt(vm, pt); + i915_vma_destroy(ppgtt->vma); - gen6_free_scratch(vm); + gen6_ppgtt_free_pd(ppgtt); + gen6_ppgtt_free_scratch(vm); } -static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) +static int pd_vma_set_pages(struct i915_vma *vma) { - struct i915_address_space *vm = &ppgtt->base; - struct drm_i915_private *dev_priv = ppgtt->base.i915; - struct i915_ggtt *ggtt = &dev_priv->ggtt; - int ret; + vma->pages = ERR_PTR(-ENODEV); + return 0; +} - /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The - * allocator works in address space sizes, so it's multiplied by page - * size. We allocate at the top of the GTT to avoid fragmentation. - */ - BUG_ON(!drm_mm_initialized(&ggtt->base.mm)); +static void pd_vma_clear_pages(struct i915_vma *vma) +{ + GEM_BUG_ON(!vma->pages); - ret = gen6_init_scratch(vm); - if (ret) - return ret; + vma->pages = NULL; +} - ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node, - GEN6_PD_SIZE, GEN6_PD_ALIGN, - I915_COLOR_UNEVICTABLE, - 0, ggtt->base.total, - PIN_HIGH); - if (ret) - goto err_out; +static int pd_vma_bind(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 unused) +{ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm); + struct gen6_hw_ppgtt *ppgtt = vma->private; + u32 ggtt_offset = i915_ggtt_offset(vma) / PAGE_SIZE; + struct i915_page_table *pt; + unsigned int pde; - if (ppgtt->node.start < ggtt->mappable_end) - DRM_DEBUG("Forced to use aperture for PDEs\n"); + ppgtt->base.pd.base.ggtt_offset = ggtt_offset * sizeof(gen6_pte_t); + ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset; - ppgtt->pd.base.ggtt_offset = - ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t); + gen6_for_all_pdes(pt, &ppgtt->base.pd, pde) + gen6_write_pde(ppgtt, pde, pt); - ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + - ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t); + mark_tlbs_dirty(&ppgtt->base); + gen6_ggtt_invalidate(ppgtt->base.vm.i915); return 0; - -err_out: - gen6_free_scratch(vm); - return ret; } -static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt) +static void pd_vma_unbind(struct i915_vma *vma) { - return gen6_ppgtt_allocate_page_directories(ppgtt); -} + struct gen6_hw_ppgtt *ppgtt = vma->private; + struct i915_page_table * const scratch_pt = ppgtt->base.vm.scratch_pt; + struct i915_page_table *pt; + unsigned int pde; -static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt, - u64 start, u64 length) -{ - struct i915_page_table *unused; - u32 pde; + if (!ppgtt->scan_for_unused_pt) + return; + + /* Free all no longer used page tables */ + gen6_for_all_pdes(pt, &ppgtt->base.pd, pde) { + if (pt->used_ptes || pt == scratch_pt) + continue; - gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) - ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt; + free_pt(&ppgtt->base.vm, pt); + ppgtt->base.pd.page_table[pde] = scratch_pt; + } + + ppgtt->scan_for_unused_pt = false; } -static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) -{ - struct drm_i915_private *dev_priv = ppgtt->base.i915; - struct i915_ggtt *ggtt = &dev_priv->ggtt; - int ret; +static const struct i915_vma_ops pd_vma_ops = { + .set_pages = pd_vma_set_pages, + .clear_pages = pd_vma_clear_pages, + .bind_vma = pd_vma_bind, + .unbind_vma = pd_vma_unbind, +}; - ppgtt->base.pte_encode = ggtt->base.pte_encode; - if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv)) - ppgtt->switch_mm = gen6_mm_switch; - else if (IS_HASWELL(dev_priv)) - ppgtt->switch_mm = hsw_mm_switch; - else if (IS_GEN7(dev_priv)) - ppgtt->switch_mm = gen7_mm_switch; - else - BUG(); +static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size) +{ + struct drm_i915_private *i915 = ppgtt->base.vm.i915; + struct i915_ggtt *ggtt = &i915->ggtt; + struct i915_vma *vma; - ret = gen6_ppgtt_alloc(ppgtt); - if (ret) - return ret; + GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); + GEM_BUG_ON(size > ggtt->vm.total); - ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE; + vma = kmem_cache_zalloc(i915->vmas, GFP_KERNEL); + if (!vma) + return ERR_PTR(-ENOMEM); - gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total); - gen6_write_page_range(ppgtt, 0, ppgtt->base.total); + init_request_active(&vma->last_fence, NULL); - ret = gen6_alloc_va_range(&ppgtt->base, 0, ppgtt->base.total); - if (ret) { - gen6_ppgtt_cleanup(&ppgtt->base); - return ret; - } + vma->vm = &ggtt->vm; + vma->ops = &pd_vma_ops; + vma->private = ppgtt; - ppgtt->base.clear_range = gen6_ppgtt_clear_range; - ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; - ppgtt->base.unbind_vma = ppgtt_unbind_vma; - ppgtt->base.bind_vma = ppgtt_bind_vma; - ppgtt->base.set_pages = ppgtt_set_pages; - ppgtt->base.clear_pages = clear_pages; - ppgtt->base.cleanup = gen6_ppgtt_cleanup; - ppgtt->debug_dump = gen6_dump_ppgtt; + vma->active = RB_ROOT; - DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n", - ppgtt->node.size >> 20, - ppgtt->node.start / PAGE_SIZE); + vma->size = size; + vma->fence_size = size; + vma->flags = I915_VMA_GGTT; + vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */ - DRM_DEBUG_DRIVER("Adding PPGTT at offset %x\n", - ppgtt->pd.base.ggtt_offset << 10); + INIT_LIST_HEAD(&vma->obj_link); + list_add(&vma->vm_link, &vma->vm->unbound_list); - return 0; + return vma; } -static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt, - struct drm_i915_private *dev_priv) +int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) { - ppgtt->base.i915 = dev_priv; - ppgtt->base.dma = &dev_priv->drm.pdev->dev; + struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); - if (INTEL_GEN(dev_priv) < 8) - return gen6_ppgtt_init(ppgtt); - else - return gen8_ppgtt_init(ppgtt); + /* + * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt + * which will be pinned into every active context. + * (When vma->pin_count becomes atomic, I expect we will naturally + * need a larger, unpacked, type and kill this redundancy.) + */ + if (ppgtt->pin_count++) + return 0; + + /* + * PPGTT PDEs reside in the GGTT and consists of 512 entries. The + * allocator works in address space sizes, so it's multiplied by page + * size. We allocate at the top of the GTT to avoid fragmentation. + */ + return i915_vma_pin(ppgtt->vma, + 0, GEN6_PD_ALIGN, + PIN_GLOBAL | PIN_HIGH); } -static void i915_address_space_init(struct i915_address_space *vm, - struct drm_i915_private *dev_priv, - const char *name) +void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base) { - drm_mm_init(&vm->mm, 0, vm->total); - vm->mm.head_node.color = I915_COLOR_UNEVICTABLE; + struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); - INIT_LIST_HEAD(&vm->active_list); - INIT_LIST_HEAD(&vm->inactive_list); - INIT_LIST_HEAD(&vm->unbound_list); + GEM_BUG_ON(!ppgtt->pin_count); + if (--ppgtt->pin_count) + return; - list_add_tail(&vm->global_link, &dev_priv->vm_list); - pagevec_init(&vm->free_pages); + i915_vma_unpin(ppgtt->vma); } -static void i915_address_space_fini(struct i915_address_space *vm) +static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) { - if (pagevec_count(&vm->free_pages)) - vm_free_pages_release(vm, true); + struct i915_ggtt * const ggtt = &i915->ggtt; + struct gen6_hw_ppgtt *ppgtt; + int err; - drm_mm_takedown(&vm->mm); - list_del(&vm->global_link); + ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); + if (!ppgtt) + return ERR_PTR(-ENOMEM); + + kref_init(&ppgtt->base.ref); + + ppgtt->base.vm.i915 = i915; + ppgtt->base.vm.dma = &i915->drm.pdev->dev; + + ppgtt->base.vm.total = I915_PDES * GEN6_PTES * PAGE_SIZE; + + i915_address_space_init(&ppgtt->base.vm, i915); + + ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range; + ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range; + ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries; + ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup; + ppgtt->base.debug_dump = gen6_dump_ppgtt; + + ppgtt->base.vm.vma_ops.bind_vma = ppgtt_bind_vma; + ppgtt->base.vm.vma_ops.unbind_vma = ppgtt_unbind_vma; + ppgtt->base.vm.vma_ops.set_pages = ppgtt_set_pages; + ppgtt->base.vm.vma_ops.clear_pages = clear_pages; + + ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode; + + err = gen6_ppgtt_init_scratch(ppgtt); + if (err) + goto err_free; + + ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE); + if (IS_ERR(ppgtt->vma)) { + err = PTR_ERR(ppgtt->vma); + goto err_scratch; + } + + return &ppgtt->base; + +err_scratch: + gen6_ppgtt_free_scratch(&ppgtt->base.vm); +err_free: + kfree(ppgtt); + return ERR_PTR(err); } static void gtt_write_workarounds(struct drm_i915_private *dev_priv) @@ -2212,29 +2269,28 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv) return 0; } +static struct i915_hw_ppgtt * +__hw_ppgtt_create(struct drm_i915_private *i915) +{ + if (INTEL_GEN(i915) < 8) + return gen6_ppgtt_create(i915); + else + return gen8_ppgtt_create(i915); +} + struct i915_hw_ppgtt * -i915_ppgtt_create(struct drm_i915_private *dev_priv, - struct drm_i915_file_private *fpriv, - const char *name) +i915_ppgtt_create(struct drm_i915_private *i915, + struct drm_i915_file_private *fpriv) { struct i915_hw_ppgtt *ppgtt; - int ret; - - ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); - if (!ppgtt) - return ERR_PTR(-ENOMEM); - ret = __hw_ppgtt_init(ppgtt, dev_priv); - if (ret) { - kfree(ppgtt); - return ERR_PTR(ret); - } + ppgtt = __hw_ppgtt_create(i915); + if (IS_ERR(ppgtt)) + return ppgtt; - kref_init(&ppgtt->ref); - i915_address_space_init(&ppgtt->base, dev_priv, name); - ppgtt->base.file = fpriv; + ppgtt->vm.file = fpriv; - trace_i915_ppgtt_create(&ppgtt->base); + trace_i915_ppgtt_create(&ppgtt->vm); return ppgtt; } @@ -2268,16 +2324,16 @@ void i915_ppgtt_release(struct kref *kref) struct i915_hw_ppgtt *ppgtt = container_of(kref, struct i915_hw_ppgtt, ref); - trace_i915_ppgtt_release(&ppgtt->base); + trace_i915_ppgtt_release(&ppgtt->vm); - ppgtt_destroy_vma(&ppgtt->base); + ppgtt_destroy_vma(&ppgtt->vm); - GEM_BUG_ON(!list_empty(&ppgtt->base.active_list)); - GEM_BUG_ON(!list_empty(&ppgtt->base.inactive_list)); - GEM_BUG_ON(!list_empty(&ppgtt->base.unbound_list)); + GEM_BUG_ON(!list_empty(&ppgtt->vm.active_list)); + GEM_BUG_ON(!list_empty(&ppgtt->vm.inactive_list)); + GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list)); - ppgtt->base.cleanup(&ppgtt->base); - i915_address_space_fini(&ppgtt->base); + ppgtt->vm.cleanup(&ppgtt->vm); + i915_address_space_fini(&ppgtt->vm); kfree(ppgtt); } @@ -2373,7 +2429,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv) i915_check_and_clear_faults(dev_priv); - ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total); + ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); i915_ggtt_invalidate(dev_priv); } @@ -2419,7 +2475,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm, gen8_pte_t __iomem *pte = (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT); - gen8_set_pte(pte, gen8_pte_encode(addr, level)); + gen8_set_pte(pte, gen8_pte_encode(addr, level, 0)); ggtt->invalidate(vm->i915); } @@ -2427,14 +2483,19 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm, static void gen8_ggtt_insert_entries(struct i915_address_space *vm, struct i915_vma *vma, enum i915_cache_level level, - u32 unused) + u32 flags) { struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); struct sgt_iter sgt_iter; gen8_pte_t __iomem *gtt_entries; - const gen8_pte_t pte_encode = gen8_pte_encode(0, level); + const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0); dma_addr_t addr; + /* + * Note that we ignore PTE_READ_ONLY here. The caller must be careful + * not to allow the user to override access to a read only page. + */ + gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm; gtt_entries += vma->node.start >> PAGE_SHIFT; for_each_sgt_dma(addr, sgt_iter, vma->pages) @@ -2500,7 +2561,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, unsigned first_entry = start >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT; const gen8_pte_t scratch_pte = - gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC); + gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0); gen8_pte_t __iomem *gtt_base = (gen8_pte_t __iomem *)ggtt->gsm + first_entry; const int max_entries = ggtt_total_entries(ggtt) - first_entry; @@ -2561,13 +2622,14 @@ struct insert_entries { struct i915_address_space *vm; struct i915_vma *vma; enum i915_cache_level level; + u32 flags; }; static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) { struct insert_entries *arg = _arg; - gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0); + gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags); bxt_vtd_ggtt_wa(arg->vm); return 0; @@ -2576,9 +2638,9 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, struct i915_vma *vma, enum i915_cache_level level, - u32 unused) + u32 flags) { - struct insert_entries arg = { vm, vma, level }; + struct insert_entries arg = { vm, vma, level, flags }; stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL); } @@ -2669,9 +2731,9 @@ static int ggtt_bind_vma(struct i915_vma *vma, struct drm_i915_gem_object *obj = vma->obj; u32 pte_flags; - /* Currently applicable only to VLV */ + /* Applicable to VLV (gen8+ do not support RO in the GGTT) */ pte_flags = 0; - if (obj->gt_ro) + if (i915_gem_object_is_readonly(obj)) pte_flags |= PTE_READ_ONLY; intel_runtime_pm_get(i915); @@ -2709,23 +2771,22 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, /* Currently applicable only to VLV */ pte_flags = 0; - if (vma->obj->gt_ro) + if (i915_gem_object_is_readonly(vma->obj)) pte_flags |= PTE_READ_ONLY; if (flags & I915_VMA_LOCAL_BIND) { struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt; - if (!(vma->flags & I915_VMA_LOCAL_BIND) && - appgtt->base.allocate_va_range) { - ret = appgtt->base.allocate_va_range(&appgtt->base, - vma->node.start, - vma->size); + if (!(vma->flags & I915_VMA_LOCAL_BIND)) { + ret = appgtt->vm.allocate_va_range(&appgtt->vm, + vma->node.start, + vma->size); if (ret) return ret; } - appgtt->base.insert_entries(&appgtt->base, vma, cache_level, - pte_flags); + appgtt->vm.insert_entries(&appgtt->vm, vma, cache_level, + pte_flags); } if (flags & I915_VMA_GLOBAL_BIND) { @@ -2748,7 +2809,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma) } if (vma->flags & I915_VMA_LOCAL_BIND) { - struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->base; + struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->vm; vm->clear_range(vm, vma->node.start, vma->size); } @@ -2762,7 +2823,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, struct i915_ggtt *ggtt = &dev_priv->ggtt; if (unlikely(ggtt->do_idle_maps)) { - if (i915_gem_wait_for_idle(dev_priv, 0)) { + if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) { DRM_ERROR("Failed to wait for idle; VT'd may hang.\n"); /* Wait a bit, in hopes it avoids the hang */ udelay(10); @@ -2811,34 +2872,32 @@ int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915) struct i915_hw_ppgtt *ppgtt; int err; - ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM), "[alias]"); + ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM)); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); - if (WARN_ON(ppgtt->base.total < ggtt->base.total)) { + if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) { err = -ENODEV; goto err_ppgtt; } - if (ppgtt->base.allocate_va_range) { - /* Note we only pre-allocate as far as the end of the global - * GTT. On 48b / 4-level page-tables, the difference is very, - * very significant! We have to preallocate as GVT/vgpu does - * not like the page directory disappearing. - */ - err = ppgtt->base.allocate_va_range(&ppgtt->base, - 0, ggtt->base.total); - if (err) - goto err_ppgtt; - } + /* + * Note we only pre-allocate as far as the end of the global + * GTT. On 48b / 4-level page-tables, the difference is very, + * very significant! We have to preallocate as GVT/vgpu does + * not like the page directory disappearing. + */ + err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total); + if (err) + goto err_ppgtt; i915->mm.aliasing_ppgtt = ppgtt; - GEM_BUG_ON(ggtt->base.bind_vma != ggtt_bind_vma); - ggtt->base.bind_vma = aliasing_gtt_bind_vma; + GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma); + ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma; - GEM_BUG_ON(ggtt->base.unbind_vma != ggtt_unbind_vma); - ggtt->base.unbind_vma = aliasing_gtt_unbind_vma; + GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma); + ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma; return 0; @@ -2858,8 +2917,8 @@ void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915) i915_ppgtt_put(ppgtt); - ggtt->base.bind_vma = ggtt_bind_vma; - ggtt->base.unbind_vma = ggtt_unbind_vma; + ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; + ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; } int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) @@ -2883,7 +2942,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) return ret; /* Reserve a mappable slot for our lockless error capture */ - ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture, + ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture, PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, 0, ggtt->mappable_end, DRM_MM_INSERT_LOW); @@ -2891,16 +2950,15 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) return ret; /* Clear any non-preallocated blocks */ - drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) { + drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", hole_start, hole_end); - ggtt->base.clear_range(&ggtt->base, hole_start, - hole_end - hole_start); + ggtt->vm.clear_range(&ggtt->vm, hole_start, + hole_end - hole_start); } /* And finally clear the reserved guard page */ - ggtt->base.clear_range(&ggtt->base, - ggtt->base.total - PAGE_SIZE, PAGE_SIZE); + ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) { ret = i915_gem_init_aliasing_ppgtt(dev_priv); @@ -2925,30 +2983,26 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) struct i915_vma *vma, *vn; struct pagevec *pvec; - ggtt->base.closed = true; - - mutex_lock(&dev_priv->drm.struct_mutex); - GEM_BUG_ON(!list_empty(&ggtt->base.active_list)); - list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link) - WARN_ON(i915_vma_unbind(vma)); - mutex_unlock(&dev_priv->drm.struct_mutex); - - i915_gem_cleanup_stolen(&dev_priv->drm); + ggtt->vm.closed = true; mutex_lock(&dev_priv->drm.struct_mutex); i915_gem_fini_aliasing_ppgtt(dev_priv); + GEM_BUG_ON(!list_empty(&ggtt->vm.active_list)); + list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link) + WARN_ON(i915_vma_unbind(vma)); + if (drm_mm_node_allocated(&ggtt->error_capture)) drm_mm_remove_node(&ggtt->error_capture); - if (drm_mm_initialized(&ggtt->base.mm)) { + if (drm_mm_initialized(&ggtt->vm.mm)) { intel_vgt_deballoon(dev_priv); - i915_address_space_fini(&ggtt->base); + i915_address_space_fini(&ggtt->vm); } - ggtt->base.cleanup(&ggtt->base); + ggtt->vm.cleanup(&ggtt->vm); - pvec = &dev_priv->mm.wc_stash; + pvec = &dev_priv->mm.wc_stash.pvec; if (pvec->nr) { set_pages_array_wb(pvec->pages, pvec->nr); __pagevec_release(pvec); @@ -2958,6 +3012,8 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) arch_phys_wc_del(ggtt->mtrr); io_mapping_fini(&ggtt->iomap); + + i915_gem_cleanup_stolen(&dev_priv->drm); } static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) @@ -2996,7 +3052,7 @@ static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) { - struct drm_i915_private *dev_priv = ggtt->base.i915; + struct drm_i915_private *dev_priv = ggtt->vm.i915; struct pci_dev *pdev = dev_priv->drm.pdev; phys_addr_t phys_addr; int ret; @@ -3020,7 +3076,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) return -ENOMEM; } - ret = setup_scratch_page(&ggtt->base, GFP_DMA32); + ret = setup_scratch_page(&ggtt->vm, GFP_DMA32); if (ret) { DRM_ERROR("Scratch setup failed\n"); /* iounmap will also get called at remove, but meh */ @@ -3326,7 +3382,7 @@ static void setup_private_pat(struct drm_i915_private *dev_priv) static int gen8_gmch_probe(struct i915_ggtt *ggtt) { - struct drm_i915_private *dev_priv = ggtt->base.i915; + struct drm_i915_private *dev_priv = ggtt->vm.i915; struct pci_dev *pdev = dev_priv->drm.pdev; unsigned int size; u16 snb_gmch_ctl; @@ -3350,29 +3406,30 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) else size = gen8_get_total_gtt_size(snb_gmch_ctl); - ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT; - ggtt->base.cleanup = gen6_gmch_remove; - ggtt->base.bind_vma = ggtt_bind_vma; - ggtt->base.unbind_vma = ggtt_unbind_vma; - ggtt->base.set_pages = ggtt_set_pages; - ggtt->base.clear_pages = clear_pages; - ggtt->base.insert_page = gen8_ggtt_insert_page; - ggtt->base.clear_range = nop_clear_range; + ggtt->vm.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT; + ggtt->vm.cleanup = gen6_gmch_remove; + ggtt->vm.insert_page = gen8_ggtt_insert_page; + ggtt->vm.clear_range = nop_clear_range; if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv)) - ggtt->base.clear_range = gen8_ggtt_clear_range; + ggtt->vm.clear_range = gen8_ggtt_clear_range; - ggtt->base.insert_entries = gen8_ggtt_insert_entries; + ggtt->vm.insert_entries = gen8_ggtt_insert_entries; /* Serialize GTT updates with aperture access on BXT if VT-d is on. */ if (intel_ggtt_update_needs_vtd_wa(dev_priv)) { - ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; - ggtt->base.insert_page = bxt_vtd_ggtt_insert_page__BKL; - if (ggtt->base.clear_range != nop_clear_range) - ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL; + ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; + ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; + if (ggtt->vm.clear_range != nop_clear_range) + ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL; } ggtt->invalidate = gen6_ggtt_invalidate; + ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; + ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; + ggtt->vm.vma_ops.set_pages = ggtt_set_pages; + ggtt->vm.vma_ops.clear_pages = clear_pages; + setup_private_pat(dev_priv); return ggtt_probe_common(ggtt, size); @@ -3380,7 +3437,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) static int gen6_gmch_probe(struct i915_ggtt *ggtt) { - struct drm_i915_private *dev_priv = ggtt->base.i915; + struct drm_i915_private *dev_priv = ggtt->vm.i915; struct pci_dev *pdev = dev_priv->drm.pdev; unsigned int size; u16 snb_gmch_ctl; @@ -3407,29 +3464,30 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt) pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); size = gen6_get_total_gtt_size(snb_gmch_ctl); - ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT; + ggtt->vm.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT; - ggtt->base.clear_range = gen6_ggtt_clear_range; - ggtt->base.insert_page = gen6_ggtt_insert_page; - ggtt->base.insert_entries = gen6_ggtt_insert_entries; - ggtt->base.bind_vma = ggtt_bind_vma; - ggtt->base.unbind_vma = ggtt_unbind_vma; - ggtt->base.set_pages = ggtt_set_pages; - ggtt->base.clear_pages = clear_pages; - ggtt->base.cleanup = gen6_gmch_remove; + ggtt->vm.clear_range = gen6_ggtt_clear_range; + ggtt->vm.insert_page = gen6_ggtt_insert_page; + ggtt->vm.insert_entries = gen6_ggtt_insert_entries; + ggtt->vm.cleanup = gen6_gmch_remove; ggtt->invalidate = gen6_ggtt_invalidate; if (HAS_EDRAM(dev_priv)) - ggtt->base.pte_encode = iris_pte_encode; + ggtt->vm.pte_encode = iris_pte_encode; else if (IS_HASWELL(dev_priv)) - ggtt->base.pte_encode = hsw_pte_encode; + ggtt->vm.pte_encode = hsw_pte_encode; else if (IS_VALLEYVIEW(dev_priv)) - ggtt->base.pte_encode = byt_pte_encode; + ggtt->vm.pte_encode = byt_pte_encode; else if (INTEL_GEN(dev_priv) >= 7) - ggtt->base.pte_encode = ivb_pte_encode; + ggtt->vm.pte_encode = ivb_pte_encode; else - ggtt->base.pte_encode = snb_pte_encode; + ggtt->vm.pte_encode = snb_pte_encode; + + ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; + ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; + ggtt->vm.vma_ops.set_pages = ggtt_set_pages; + ggtt->vm.vma_ops.clear_pages = clear_pages; return ggtt_probe_common(ggtt, size); } @@ -3441,7 +3499,7 @@ static void i915_gmch_remove(struct i915_address_space *vm) static int i915_gmch_probe(struct i915_ggtt *ggtt) { - struct drm_i915_private *dev_priv = ggtt->base.i915; + struct drm_i915_private *dev_priv = ggtt->vm.i915; phys_addr_t gmadr_base; int ret; @@ -3451,26 +3509,25 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt) return -EIO; } - intel_gtt_get(&ggtt->base.total, - &gmadr_base, - &ggtt->mappable_end); + intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end); ggtt->gmadr = (struct resource) DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end); ggtt->do_idle_maps = needs_idle_maps(dev_priv); - ggtt->base.insert_page = i915_ggtt_insert_page; - ggtt->base.insert_entries = i915_ggtt_insert_entries; - ggtt->base.clear_range = i915_ggtt_clear_range; - ggtt->base.bind_vma = ggtt_bind_vma; - ggtt->base.unbind_vma = ggtt_unbind_vma; - ggtt->base.set_pages = ggtt_set_pages; - ggtt->base.clear_pages = clear_pages; - ggtt->base.cleanup = i915_gmch_remove; + ggtt->vm.insert_page = i915_ggtt_insert_page; + ggtt->vm.insert_entries = i915_ggtt_insert_entries; + ggtt->vm.clear_range = i915_ggtt_clear_range; + ggtt->vm.cleanup = i915_gmch_remove; ggtt->invalidate = gmch_ggtt_invalidate; + ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; + ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; + ggtt->vm.vma_ops.set_pages = ggtt_set_pages; + ggtt->vm.vma_ops.clear_pages = clear_pages; + if (unlikely(ggtt->do_idle_maps)) DRM_INFO("applying Ironlake quirks for intel_iommu\n"); @@ -3486,8 +3543,8 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv) struct i915_ggtt *ggtt = &dev_priv->ggtt; int ret; - ggtt->base.i915 = dev_priv; - ggtt->base.dma = &dev_priv->drm.pdev->dev; + ggtt->vm.i915 = dev_priv; + ggtt->vm.dma = &dev_priv->drm.pdev->dev; if (INTEL_GEN(dev_priv) <= 5) ret = i915_gmch_probe(ggtt); @@ -3504,27 +3561,29 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv) * restriction! */ if (USES_GUC(dev_priv)) { - ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP); - ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->base.total); + ggtt->vm.total = min_t(u64, ggtt->vm.total, GUC_GGTT_TOP); + ggtt->mappable_end = + min_t(u64, ggtt->mappable_end, ggtt->vm.total); } - if ((ggtt->base.total - 1) >> 32) { + if ((ggtt->vm.total - 1) >> 32) { DRM_ERROR("We never expected a Global GTT with more than 32bits" " of address space! Found %lldM!\n", - ggtt->base.total >> 20); - ggtt->base.total = 1ULL << 32; - ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->base.total); + ggtt->vm.total >> 20); + ggtt->vm.total = 1ULL << 32; + ggtt->mappable_end = + min_t(u64, ggtt->mappable_end, ggtt->vm.total); } - if (ggtt->mappable_end > ggtt->base.total) { + if (ggtt->mappable_end > ggtt->vm.total) { DRM_ERROR("mappable aperture extends past end of GGTT," " aperture=%pa, total=%llx\n", - &ggtt->mappable_end, ggtt->base.total); - ggtt->mappable_end = ggtt->base.total; + &ggtt->mappable_end, ggtt->vm.total); + ggtt->mappable_end = ggtt->vm.total; } /* GMADR is the PCI mmio aperture into the global GTT. */ - DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->base.total >> 20); + DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20); DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20); DRM_DEBUG_DRIVER("DSM size = %lluM\n", (u64)resource_size(&intel_graphics_stolen_res) >> 20); @@ -3543,7 +3602,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) struct i915_ggtt *ggtt = &dev_priv->ggtt; int ret; - INIT_LIST_HEAD(&dev_priv->vm_list); + stash_init(&dev_priv->mm.wc_stash); /* Note that we use page colouring to enforce a guard page at the * end of the address space. This is required as the CS may prefetch @@ -3551,9 +3610,13 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) * and beyond the end of the GTT if we do not provide a guard. */ mutex_lock(&dev_priv->drm.struct_mutex); - i915_address_space_init(&ggtt->base, dev_priv, "[global]"); + i915_address_space_init(&ggtt->vm, dev_priv); + + /* Only VLV supports read-only GGTT mappings */ + ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv); + if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv)) - ggtt->base.mm.color_adjust = i915_gtt_color_adjust; + ggtt->vm.mm.color_adjust = i915_gtt_color_adjust; mutex_unlock(&dev_priv->drm.struct_mutex); if (!io_mapping_init_wc(&dev_priv->ggtt.iomap, @@ -3576,7 +3639,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) return 0; out_gtt_cleanup: - ggtt->base.cleanup(&ggtt->base); + ggtt->vm.cleanup(&ggtt->vm); return ret; } @@ -3610,34 +3673,35 @@ void i915_ggtt_disable_guc(struct drm_i915_private *i915) void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) { struct i915_ggtt *ggtt = &dev_priv->ggtt; - struct drm_i915_gem_object *obj, *on; + struct i915_vma *vma, *vn; i915_check_and_clear_faults(dev_priv); /* First fill our portion of the GTT with scratch pages */ - ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total); + ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); - ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */ + ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */ /* clflush objects bound into the GGTT and rebind them. */ - list_for_each_entry_safe(obj, on, &dev_priv->mm.bound_list, mm.link) { - bool ggtt_bound = false; - struct i915_vma *vma; + GEM_BUG_ON(!list_empty(&ggtt->vm.active_list)); + list_for_each_entry_safe(vma, vn, &ggtt->vm.inactive_list, vm_link) { + struct drm_i915_gem_object *obj = vma->obj; - for_each_ggtt_vma(vma, obj) { - if (!i915_vma_unbind(vma)) - continue; + if (!(vma->flags & I915_VMA_GLOBAL_BIND)) + continue; - WARN_ON(i915_vma_bind(vma, obj->cache_level, - PIN_UPDATE)); - ggtt_bound = true; - } + if (!i915_vma_unbind(vma)) + continue; - if (ggtt_bound) + WARN_ON(i915_vma_bind(vma, + obj ? obj->cache_level : 0, + PIN_UPDATE)); + if (obj) WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false)); } - ggtt->base.closed = false; + ggtt->vm.closed = false; + i915_ggtt_invalidate(dev_priv); if (INTEL_GEN(dev_priv) >= 8) { struct intel_ppat *ppat = &dev_priv->ppat; @@ -3646,23 +3710,6 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) dev_priv->ppat.update_hw(dev_priv); return; } - - if (USES_PPGTT(dev_priv)) { - struct i915_address_space *vm; - - list_for_each_entry(vm, &dev_priv->vm_list, global_link) { - struct i915_hw_ppgtt *ppgtt; - - if (i915_is_ggtt(vm)) - ppgtt = dev_priv->mm.aliasing_ppgtt; - else - ppgtt = i915_vm_to_ppgtt(vm); - - gen6_write_page_range(ppgtt, 0, ppgtt->base.total); - } - } - - i915_ggtt_invalidate(dev_priv); } static struct scatterlist * @@ -3880,7 +3927,7 @@ int i915_gem_gtt_reserve(struct i915_address_space *vm, GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT)); GEM_BUG_ON(range_overflows(offset, size, vm->total)); - GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base); + GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm); GEM_BUG_ON(drm_mm_node_allocated(node)); node->size = size; @@ -3977,7 +4024,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm, GEM_BUG_ON(start >= end); GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); - GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base); + GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm); GEM_BUG_ON(drm_mm_node_allocated(node)); if (unlikely(range_overflows(start, size, end))) @@ -3988,7 +4035,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm, mode = DRM_MM_INSERT_BEST; if (flags & PIN_HIGH) - mode = DRM_MM_INSERT_HIGH; + mode = DRM_MM_INSERT_HIGHEST; if (flags & PIN_MAPPABLE) mode = DRM_MM_INSERT_LOW; @@ -4008,6 +4055,15 @@ int i915_gem_gtt_insert(struct i915_address_space *vm, if (err != -ENOSPC) return err; + if (mode & DRM_MM_INSERT_ONCE) { + err = drm_mm_insert_node_in_range(&vm->mm, node, + size, alignment, color, + start, end, + DRM_MM_INSERT_BEST); + if (err != -ENOSPC) + return err; + } + if (flags & PIN_NOEVICT) return -ENOSPC; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index aec4f73574f4..2a116a91420b 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -58,6 +58,7 @@ struct drm_i915_file_private; struct drm_i915_fence_reg; +struct i915_vma; typedef u32 gen6_pte_t; typedef u64 gen8_pte_t; @@ -65,7 +66,7 @@ typedef u64 gen8_pde_t; typedef u64 gen8_ppgtt_pdpe_t; typedef u64 gen8_ppgtt_pml4e_t; -#define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT) +#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT) /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) @@ -254,6 +255,26 @@ struct i915_pml4 { struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4]; }; +struct i915_vma_ops { + /* Map an object into an address space with the given cache flags. */ + int (*bind_vma)(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags); + /* + * Unmap an object from an address space. This usually consists of + * setting the valid PTE entries to a reserved scratch page. + */ + void (*unbind_vma)(struct i915_vma *vma); + + int (*set_pages)(struct i915_vma *vma); + void (*clear_pages)(struct i915_vma *vma); +}; + +struct pagestash { + spinlock_t lock; + struct pagevec pvec; +}; + struct i915_address_space { struct drm_mm mm; struct drm_i915_private *i915; @@ -267,12 +288,13 @@ struct i915_address_space { * assign blame. */ struct drm_i915_file_private *file; - struct list_head global_link; u64 total; /* size addr space maps (ex. 2GB for ggtt) */ u64 reserved; /* size addr space reserved */ bool closed; + struct mutex mutex; /* protects vma and our lists */ + struct i915_page_dma scratch_page; struct i915_page_table *scratch_pt; struct i915_page_directory *scratch_pd; @@ -308,8 +330,13 @@ struct i915_address_space { */ struct list_head unbound_list; - struct pagevec free_pages; - bool pt_kmap_wc; + struct pagestash free_pages; + + /* Some systems require uncached updates of the page directories */ + bool pt_kmap_wc:1; + + /* Some systems support read-only mappings for GGTT and/or PPGTT */ + bool has_read_only:1; /* FIXME: Need a more generic return type */ gen6_pte_t (*pte_encode)(dma_addr_t addr, @@ -331,15 +358,8 @@ struct i915_address_space { enum i915_cache_level cache_level, u32 flags); void (*cleanup)(struct i915_address_space *vm); - /** Unmap an object from an address space. This usually consists of - * setting the valid PTE entries to a reserved scratch page. */ - void (*unbind_vma)(struct i915_vma *vma); - /* Map an object into an address space with the given cache flags. */ - int (*bind_vma)(struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags); - int (*set_pages)(struct i915_vma *vma); - void (*clear_pages)(struct i915_vma *vma); + + struct i915_vma_ops vma_ops; I915_SELFTEST_DECLARE(struct fault_attr fault_attr); I915_SELFTEST_DECLARE(bool scrub_64K); @@ -367,7 +387,7 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm) * the spec. */ struct i915_ggtt { - struct i915_address_space base; + struct i915_address_space vm; struct io_mapping iomap; /* Mapping to our CPU mappable region */ struct resource gmadr; /* GMADR resource */ @@ -385,9 +405,9 @@ struct i915_ggtt { }; struct i915_hw_ppgtt { - struct i915_address_space base; + struct i915_address_space vm; struct kref ref; - struct drm_mm_node node; + unsigned long pd_dirty_rings; union { struct i915_pml4 pml4; /* GEN8+ & 48b PPGTT */ @@ -395,13 +415,28 @@ struct i915_hw_ppgtt { struct i915_page_directory pd; /* GEN6-7 */ }; + void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); +}; + +struct gen6_hw_ppgtt { + struct i915_hw_ppgtt base; + + struct i915_vma *vma; gen6_pte_t __iomem *pd_addr; + gen6_pte_t scratch_pte; - int (*switch_mm)(struct i915_hw_ppgtt *ppgtt, - struct i915_request *rq); - void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); + unsigned int pin_count; + bool scan_for_unused_pt; }; +#define __to_gen6_ppgtt(base) container_of(base, struct gen6_hw_ppgtt, base) + +static inline struct gen6_hw_ppgtt *to_gen6_ppgtt(struct i915_hw_ppgtt *base) +{ + BUILD_BUG_ON(offsetof(struct gen6_hw_ppgtt, base)); + return __to_gen6_ppgtt(base); +} + /* * gen6_for_each_pde() iterates over every pde from start until start+length. * If start and start+length are not perfectly divisible, the macro will round @@ -440,8 +475,8 @@ static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift) const u64 mask = ~((1ULL << pde_shift) - 1); u64 end; - WARN_ON(length == 0); - WARN_ON(offset_in_page(addr|length)); + GEM_BUG_ON(length == 0); + GEM_BUG_ON(offset_in_page(addr | length)); end = addr + length; @@ -543,7 +578,7 @@ static inline struct i915_ggtt * i915_vm_to_ggtt(struct i915_address_space *vm) { GEM_BUG_ON(!i915_is_ggtt(vm)); - return container_of(vm, struct i915_ggtt, base); + return container_of(vm, struct i915_ggtt, vm); } #define INTEL_MAX_PPAT_ENTRIES 8 @@ -591,8 +626,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv); int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv); void i915_ppgtt_release(struct kref *kref); struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv, - struct drm_i915_file_private *fpriv, - const char *name); + struct drm_i915_file_private *fpriv); void i915_ppgtt_close(struct i915_address_space *vm); static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt) { @@ -605,6 +639,9 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt) kref_put(&ppgtt->ref, i915_ppgtt_release); } +int gen6_ppgtt_pin(struct i915_hw_ppgtt *base); +void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base); + void i915_check_and_clear_faults(struct drm_i915_private *dev_priv); void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv); void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h index 54f00b350779..83e5e01fa9ea 100644 --- a/drivers/gpu/drm/i915/i915_gem_object.h +++ b/drivers/gpu/drm/i915/i915_gem_object.h @@ -141,7 +141,6 @@ struct drm_i915_gem_object { * Is the object to be mapped as read-only to the GPU * Only honoured if hardware has relevant pte bit */ - unsigned long gt_ro:1; unsigned int cache_level:3; unsigned int cache_coherent:2; #define I915_BO_CACHE_COHERENT_FOR_READ BIT(0) @@ -268,7 +267,6 @@ struct drm_i915_gem_object { union { struct i915_gem_userptr { uintptr_t ptr; - unsigned read_only :1; struct i915_mm_struct *mm; struct i915_mmu_object *mmu_object; @@ -337,26 +335,17 @@ __attribute__((nonnull)) static inline struct drm_i915_gem_object * i915_gem_object_get(struct drm_i915_gem_object *obj) { - drm_gem_object_reference(&obj->base); + drm_gem_object_get(&obj->base); return obj; } -__deprecated -extern void drm_gem_object_reference(struct drm_gem_object *); - __attribute__((nonnull)) static inline void i915_gem_object_put(struct drm_i915_gem_object *obj) { - __drm_gem_object_unreference(&obj->base); + __drm_gem_object_put(&obj->base); } -__deprecated -extern void drm_gem_object_unreference(struct drm_gem_object *); - -__deprecated -extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *); - static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj) { reservation_object_lock(obj->resv, NULL); @@ -367,6 +356,18 @@ static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) reservation_object_unlock(obj->resv); } +static inline void +i915_gem_object_set_readonly(struct drm_i915_gem_object *obj) +{ + obj->base.vma_node.readonly = true; +} + +static inline bool +i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj) +{ + return obj->base.vma_node.readonly; +} + static inline bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) { diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 1036e8686916..90baf9086d0a 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -194,7 +194,7 @@ int i915_gem_render_state_emit(struct i915_request *rq) if (IS_ERR(so.obj)) return PTR_ERR(so.obj); - so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.base, NULL); + so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.vm, NULL); if (IS_ERR(so.vma)) { err = PTR_ERR(so.vma); goto err_obj; @@ -222,7 +222,7 @@ int i915_gem_render_state_emit(struct i915_request *rq) goto err_unpin; } - i915_vma_move_to_active(so.vma, rq, 0); + err = i915_vma_move_to_active(so.vma, rq, 0); err_unpin: i915_vma_unpin(so.vma); err_vma: diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 5757fb7c4b5a..ea90d3a0d511 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -23,6 +23,7 @@ */ #include <linux/oom.h> +#include <linux/sched/mm.h> #include <linux/shmem_fs.h> #include <linux/slab.h> #include <linux/swap.h> @@ -172,7 +173,9 @@ i915_gem_shrink(struct drm_i915_private *i915, * we will free as much as we can and hope to get a second chance. */ if (flags & I915_SHRINK_ACTIVE) - i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED); + i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); trace_i915_gem_shrink(i915, target, flags); i915_retire_requests(i915); @@ -392,7 +395,8 @@ shrinker_lock_uninterruptible(struct drm_i915_private *i915, bool *unlock, unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms); do { - if (i915_gem_wait_for_idle(i915, 0) == 0 && + if (i915_gem_wait_for_idle(i915, + 0, MAX_SCHEDULE_TIMEOUT) == 0 && shrinker_lock(i915, unlock)) break; @@ -466,7 +470,9 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr return NOTIFY_DONE; /* Force everything onto the inactive lists */ - ret = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED); + ret = i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); if (ret) goto out; @@ -480,7 +486,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr /* We also want to clear any cached iomaps as they wrap vmap */ list_for_each_entry_safe(vma, next, - &i915->ggtt.base.inactive_list, vm_link) { + &i915->ggtt.vm.inactive_list, vm_link) { unsigned long count = vma->node.size >> PAGE_SHIFT; if (vma->iomap && i915_vma_unbind(vma) == 0) freed_pages += count; @@ -526,3 +532,14 @@ void i915_gem_shrinker_unregister(struct drm_i915_private *i915) WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier)); unregister_shrinker(&i915->mm.shrinker); } + +void i915_gem_shrinker_taints_mutex(struct mutex *mutex) +{ + if (!IS_ENABLED(CONFIG_LOCKDEP)) + return; + + fs_reclaim_acquire(GFP_KERNEL); + mutex_lock(mutex); + mutex_unlock(mutex); + fs_reclaim_release(GFP_KERNEL); +} diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index ad949cc30928..53440bf87650 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -254,6 +254,7 @@ static void vlv_get_stolen_reserved(struct drm_i915_private *dev_priv, switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) { default: MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK); + /* fall through */ case GEN7_STOLEN_RESERVED_1M: *size = 1024 * 1024; break; @@ -343,6 +344,35 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, *size = stolen_top - *base; } +static void icl_get_stolen_reserved(struct drm_i915_private *dev_priv, + resource_size_t *base, + resource_size_t *size) +{ + u64 reg_val = I915_READ64(GEN6_STOLEN_RESERVED); + + DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val); + + *base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK; + + switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) { + case GEN8_STOLEN_RESERVED_1M: + *size = 1024 * 1024; + break; + case GEN8_STOLEN_RESERVED_2M: + *size = 2 * 1024 * 1024; + break; + case GEN8_STOLEN_RESERVED_4M: + *size = 4 * 1024 * 1024; + break; + case GEN8_STOLEN_RESERVED_8M: + *size = 8 * 1024 * 1024; + break; + default: + *size = 8 * 1024 * 1024; + MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK); + } +} + int i915_gem_init_stolen(struct drm_i915_private *dev_priv) { resource_size_t reserved_base, stolen_top; @@ -399,7 +429,9 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv) gen7_get_stolen_reserved(dev_priv, &reserved_base, &reserved_size); break; - default: + case 8: + case 9: + case 10: if (IS_LP(dev_priv)) chv_get_stolen_reserved(dev_priv, &reserved_base, &reserved_size); @@ -407,6 +439,11 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv) bdw_get_stolen_reserved(dev_priv, &reserved_base, &reserved_size); break; + case 11: + default: + icl_get_stolen_reserved(dev_priv, &reserved_base, + &reserved_size); + break; } /* @@ -642,7 +679,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv if (ret) goto err; - vma = i915_vma_instance(obj, &ggtt->base, NULL); + vma = i915_vma_instance(obj, &ggtt->vm, NULL); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto err_pages; @@ -653,7 +690,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv * setting up the GTT space. The actual reservation will occur * later. */ - ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node, + ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, size, gtt_offset, obj->cache_level, 0); if (ret) { @@ -666,7 +703,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv vma->pages = obj->mm.pages; vma->flags |= I915_VMA_GLOBAL_BIND; __i915_vma_set_map_and_fenceable(vma); - list_move_tail(&vma->vm_link, &ggtt->base.inactive_list); + list_move_tail(&vma->vm_link, &ggtt->vm.inactive_list); spin_lock(&dev_priv->mm.obj_lock); list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list); diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 854bd51b9478..dcd6e230d16a 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -507,7 +507,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) struct mm_struct *mm = obj->userptr.mm->mm; unsigned int flags = 0; - if (!obj->userptr.read_only) + if (!i915_gem_object_is_readonly(obj)) flags |= FOLL_WRITE; ret = -EFAULT; @@ -643,7 +643,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) if (pvec) /* defer to worker if malloc fails */ pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages, - !obj->userptr.read_only, + !i915_gem_object_is_readonly(obj), pvec); } @@ -789,10 +789,15 @@ i915_gem_userptr_ioctl(struct drm_device *dev, return -EFAULT; if (args->flags & I915_USERPTR_READ_ONLY) { - /* On almost all of the current hw, we cannot tell the GPU that a - * page is readonly, so this is just a placeholder in the uAPI. + struct i915_hw_ppgtt *ppgtt; + + /* + * On almost all of the older hw, we cannot tell the GPU that + * a page is readonly. */ - return -ENODEV; + ppgtt = dev_priv->kernel_context->ppgtt; + if (!ppgtt || !ppgtt->vm.has_read_only) + return -ENODEV; } obj = i915_gem_object_alloc(dev_priv); @@ -806,7 +811,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); obj->userptr.ptr = args->user_ptr; - obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY); + if (args->flags & I915_USERPTR_READ_ONLY) + i915_gem_object_set_readonly(obj); /* And keep a pointer to the current->mm for resolving the user pages * at binding. This means that we need to hook into the mmu_notifier diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index df234dc23274..f7f2aa71d8d9 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -31,6 +31,7 @@ #include <linux/stop_machine.h> #include <linux/zlib.h> #include <drm/drm_print.h> +#include <linux/ascii85.h> #include "i915_gpu_error.h" #include "i915_drv.h" @@ -335,21 +336,16 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m, struct drm_i915_error_buffer *err, int count) { - int i; - err_printf(m, "%s [%d]:\n", name, count); while (count--) { - err_printf(m, " %08x_%08x %8u %02x %02x [ ", + err_printf(m, " %08x_%08x %8u %02x %02x %02x", upper_32_bits(err->gtt_offset), lower_32_bits(err->gtt_offset), err->size, err->read_domains, - err->write_domain); - for (i = 0; i < I915_NUM_ENGINES; i++) - err_printf(m, "%02x ", err->rseqno[i]); - - err_printf(m, "] %02x", err->wseqno); + err->write_domain, + err->wseqno); err_puts(m, tiling_flag(err->tiling)); err_puts(m, dirty_flag(err->dirty)); err_puts(m, purgeable_flag(err->purgeable)); @@ -522,35 +518,12 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) va_end(args); } -static int -ascii85_encode_len(int len) -{ - return DIV_ROUND_UP(len, 4); -} - -static bool -ascii85_encode(u32 in, char *out) -{ - int i; - - if (in == 0) - return false; - - out[5] = '\0'; - for (i = 5; i--; ) { - out[i] = '!' + in % 85; - in /= 85; - } - - return true; -} - static void print_error_obj(struct drm_i915_error_state_buf *m, struct intel_engine_cs *engine, const char *name, struct drm_i915_error_object *obj) { - char out[6]; + char out[ASCII85_BUFSZ]; int page; if (!obj) @@ -572,12 +545,8 @@ static void print_error_obj(struct drm_i915_error_state_buf *m, len -= obj->unused; len = ascii85_encode_len(len); - for (i = 0; i < len; i++) { - if (ascii85_encode(obj->pages[page][i], out)) - err_puts(m, out); - else - err_puts(m, "z"); - } + for (i = 0; i < len; i++) + err_puts(m, ascii85_encode(obj->pages[page][i], out)); } err_puts(m, "\n"); } @@ -973,8 +942,7 @@ i915_error_object_create(struct drm_i915_private *i915, void __iomem *s; int ret; - ggtt->base.insert_page(&ggtt->base, dma, slot, - I915_CACHE_NONE, 0); + ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0); s = io_mapping_map_atomic_wc(&ggtt->iomap, slot); ret = compress_page(&compress, (void __force *)s, dst); @@ -993,7 +961,7 @@ unwind: out: compress_fini(&compress, dst); - ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE); + ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE); return dst; } @@ -1022,13 +990,10 @@ static void capture_bo(struct drm_i915_error_buffer *err, struct i915_vma *vma) { struct drm_i915_gem_object *obj = vma->obj; - int i; err->size = obj->base.size; err->name = obj->base.name; - for (i = 0; i < I915_NUM_ENGINES; i++) - err->rseqno[i] = __active_get_seqno(&vma->last_read[i]); err->wseqno = __active_get_seqno(&obj->frontbuffer_write); err->engine = __active_get_engine_id(&obj->frontbuffer_write); @@ -1051,6 +1016,9 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err, int i = 0; list_for_each_entry(vma, head, vm_link) { + if (!vma->obj) + continue; + if (pinned_only && !i915_vma_is_pinned(vma)) continue; @@ -1287,9 +1255,11 @@ static void error_record_engine_registers(struct i915_gpu_state *error, static void record_request(struct i915_request *request, struct drm_i915_error_request *erq) { - erq->context = request->ctx->hw_id; + struct i915_gem_context *ctx = request->gem_context; + + erq->context = ctx->hw_id; erq->sched_attr = request->sched.attr; - erq->ban_score = atomic_read(&request->ctx->ban_score); + erq->ban_score = atomic_read(&ctx->ban_score); erq->seqno = request->global_seqno; erq->jiffies = request->emitted_jiffies; erq->start = i915_ggtt_offset(request->ring->vma); @@ -1297,7 +1267,7 @@ static void record_request(struct i915_request *request, erq->tail = request->tail; rcu_read_lock(); - erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0; + erq->pid = ctx->pid ? pid_nr(ctx->pid) : 0; rcu_read_unlock(); } @@ -1461,12 +1431,12 @@ static void gem_record_rings(struct i915_gpu_state *error) request = i915_gem_find_active_request(engine); if (request) { + struct i915_gem_context *ctx = request->gem_context; struct intel_ring *ring; - ee->vm = request->ctx->ppgtt ? - &request->ctx->ppgtt->base : &ggtt->base; + ee->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &ggtt->vm; - record_context(&ee->context, request->ctx); + record_context(&ee->context, ctx); /* We need to copy these to an anonymous buffer * as the simplest method to avoid being overwritten @@ -1483,11 +1453,10 @@ static void gem_record_rings(struct i915_gpu_state *error) ee->ctx = i915_error_object_create(i915, - to_intel_context(request->ctx, - engine)->state); + request->hw_context->state); error->simulated |= - i915_gem_context_no_error_capture(request->ctx); + i915_gem_context_no_error_capture(ctx); ee->rq_head = request->head; ee->rq_post = request->postfix; @@ -1563,17 +1532,17 @@ static void capture_active_buffers(struct i915_gpu_state *error) static void capture_pinned_buffers(struct i915_gpu_state *error) { - struct i915_address_space *vm = &error->i915->ggtt.base; + struct i915_address_space *vm = &error->i915->ggtt.vm; struct drm_i915_error_buffer *bo; struct i915_vma *vma; int count_inactive, count_active; count_inactive = 0; - list_for_each_entry(vma, &vm->active_list, vm_link) + list_for_each_entry(vma, &vm->inactive_list, vm_link) count_inactive++; count_active = 0; - list_for_each_entry(vma, &vm->inactive_list, vm_link) + list_for_each_entry(vma, &vm->active_list, vm_link) count_active++; bo = NULL; @@ -1667,7 +1636,16 @@ static void capture_reg_state(struct i915_gpu_state *error) } /* 4: Everything else */ - if (INTEL_GEN(dev_priv) >= 8) { + if (INTEL_GEN(dev_priv) >= 11) { + error->ier = I915_READ(GEN8_DE_MISC_IER); + error->gtier[0] = I915_READ(GEN11_RENDER_COPY_INTR_ENABLE); + error->gtier[1] = I915_READ(GEN11_VCS_VECS_INTR_ENABLE); + error->gtier[2] = I915_READ(GEN11_GUC_SG_INTR_ENABLE); + error->gtier[3] = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE); + error->gtier[4] = I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE); + error->gtier[5] = I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE); + error->ngtier = 6; + } else if (INTEL_GEN(dev_priv) >= 8) { error->ier = I915_READ(GEN8_DE_MISC_IER); for (i = 0; i < 4; i++) error->gtier[i] = I915_READ(GEN8_GT_IER(i)); diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index dac0f8c4c1cf..f893a4e8b783 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -58,7 +58,7 @@ struct i915_gpu_state { u32 eir; u32 pgtbl_er; u32 ier; - u32 gtier[4], ngtier; + u32 gtier[6], ngtier; u32 ccid; u32 derrmr; u32 forcewake; @@ -177,7 +177,7 @@ struct i915_gpu_state { struct drm_i915_error_buffer { u32 size; u32 name; - u32 rseqno[I915_NUM_ENGINES], wseqno; + u32 wseqno; u64 gtt_offset; u32 read_domains; u32 write_domain; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index c16cb025755e..90628a47ae17 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -115,6 +115,22 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = { [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC }; +static const u32 hpd_gen11[HPD_NUM_PINS] = { + [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG, + [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG, + [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG, + [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG +}; + +static const u32 hpd_icp[HPD_NUM_PINS] = { + [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP, + [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP, + [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP, + [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP, + [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP, + [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP +}; + /* IIR can theoretically queue up two events. Be paranoid. */ #define GEN8_IRQ_RESET_NDX(type, which) do { \ I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ @@ -247,9 +263,9 @@ static u32 gen11_gt_engine_identity(struct drm_i915_private * const i915, const unsigned int bank, const unsigned int bit); -bool gen11_reset_one_iir(struct drm_i915_private * const i915, - const unsigned int bank, - const unsigned int bit) +static bool gen11_reset_one_iir(struct drm_i915_private * const i915, + const unsigned int bank, + const unsigned int bit) { void __iomem * const regs = i915->regs; u32 dw; @@ -1138,21 +1154,21 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) static void notify_ring(struct intel_engine_cs *engine) { + const u32 seqno = intel_engine_get_seqno(engine); struct i915_request *rq = NULL; + struct task_struct *tsk = NULL; struct intel_wait *wait; - if (!engine->breadcrumbs.irq_armed) + if (unlikely(!engine->breadcrumbs.irq_armed)) return; - atomic_inc(&engine->irq_count); - set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); + rcu_read_lock(); spin_lock(&engine->breadcrumbs.irq_lock); wait = engine->breadcrumbs.irq_wait; if (wait) { - bool wakeup = engine->irq_seqno_barrier; - - /* We use a callback from the dma-fence to submit + /* + * We use a callback from the dma-fence to submit * requests after waiting on our own requests. To * ensure minimum delay in queuing the next request to * hardware, signal the fence now rather than wait for @@ -1163,19 +1179,26 @@ static void notify_ring(struct intel_engine_cs *engine) * and to handle coalescing of multiple seqno updates * and many waiters. */ - if (i915_seqno_passed(intel_engine_get_seqno(engine), - wait->seqno)) { + if (i915_seqno_passed(seqno, wait->seqno)) { struct i915_request *waiter = wait->request; - wakeup = true; - if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + if (waiter && + !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &waiter->fence.flags) && intel_wait_check_request(wait, waiter)) rq = i915_request_get(waiter); + + tsk = wait->tsk; + } else { + if (engine->irq_seqno_barrier && + i915_seqno_passed(seqno, wait->seqno - 1)) { + set_bit(ENGINE_IRQ_BREADCRUMB, + &engine->irq_posted); + tsk = wait->tsk; + } } - if (wakeup) - wake_up_process(wait->tsk); + engine->breadcrumbs.irq_count++; } else { if (engine->breadcrumbs.irq_armed) __intel_engine_disarm_breadcrumbs(engine); @@ -1183,11 +1206,19 @@ static void notify_ring(struct intel_engine_cs *engine) spin_unlock(&engine->breadcrumbs.irq_lock); if (rq) { - dma_fence_signal(&rq->fence); + spin_lock(&rq->lock); + dma_fence_signal_locked(&rq->fence); GEM_BUG_ON(!i915_request_completed(rq)); + spin_unlock(&rq->lock); + i915_request_put(rq); } + if (tsk && tsk->state & TASK_NORMAL) + wake_up_process(tsk); + + rcu_read_unlock(); + trace_intel_engine_notify(engine, wait); } @@ -1234,9 +1265,9 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) c0 = max(render, media); c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ - if (c0 > time * rps->up_threshold) + if (c0 > time * rps->power.up_threshold) events = GEN6_PM_RP_UP_THRESHOLD; - else if (c0 < time * rps->down_threshold) + else if (c0 < time * rps->power.down_threshold) events = GEN6_PM_RP_DOWN_THRESHOLD; } @@ -1462,14 +1493,10 @@ static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, static void gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) { - struct intel_engine_execlists * const execlists = &engine->execlists; bool tasklet = false; - if (iir & GT_CONTEXT_SWITCH_INTERRUPT) { - if (READ_ONCE(engine->execlists.active)) - tasklet = !test_and_set_bit(ENGINE_IRQ_EXECLIST, - &engine->irq_posted); - } + if (iir & GT_CONTEXT_SWITCH_INTERRUPT) + tasklet = true; if (iir & GT_RENDER_USER_INTERRUPT) { notify_ring(engine); @@ -1477,7 +1504,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) } if (tasklet) - tasklet_hi_schedule(&execlists->tasklet); + tasklet_hi_schedule(&engine->execlists.tasklet); } static void gen8_gt_irq_ack(struct drm_i915_private *i915, @@ -1549,78 +1576,122 @@ static void gen8_gt_irq_handler(struct drm_i915_private *i915, } } -static bool bxt_port_hotplug_long_detect(enum port port, u32 val) +static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { - switch (port) { - case PORT_A: + switch (pin) { + case HPD_PORT_C: + return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1); + case HPD_PORT_D: + return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2); + case HPD_PORT_E: + return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3); + case HPD_PORT_F: + return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4); + default: + return false; + } +} + +static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) +{ + switch (pin) { + case HPD_PORT_A: return val & PORTA_HOTPLUG_LONG_DETECT; - case PORT_B: + case HPD_PORT_B: return val & PORTB_HOTPLUG_LONG_DETECT; - case PORT_C: + case HPD_PORT_C: return val & PORTC_HOTPLUG_LONG_DETECT; default: return false; } } -static bool spt_port_hotplug2_long_detect(enum port port, u32 val) +static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) +{ + switch (pin) { + case HPD_PORT_A: + return val & ICP_DDIA_HPD_LONG_DETECT; + case HPD_PORT_B: + return val & ICP_DDIB_HPD_LONG_DETECT; + default: + return false; + } +} + +static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) +{ + switch (pin) { + case HPD_PORT_C: + return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1); + case HPD_PORT_D: + return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2); + case HPD_PORT_E: + return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3); + case HPD_PORT_F: + return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4); + default: + return false; + } +} + +static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val) { - switch (port) { - case PORT_E: + switch (pin) { + case HPD_PORT_E: return val & PORTE_HOTPLUG_LONG_DETECT; default: return false; } } -static bool spt_port_hotplug_long_detect(enum port port, u32 val) +static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { - switch (port) { - case PORT_A: + switch (pin) { + case HPD_PORT_A: return val & PORTA_HOTPLUG_LONG_DETECT; - case PORT_B: + case HPD_PORT_B: return val & PORTB_HOTPLUG_LONG_DETECT; - case PORT_C: + case HPD_PORT_C: return val & PORTC_HOTPLUG_LONG_DETECT; - case PORT_D: + case HPD_PORT_D: return val & PORTD_HOTPLUG_LONG_DETECT; default: return false; } } -static bool ilk_port_hotplug_long_detect(enum port port, u32 val) +static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { - switch (port) { - case PORT_A: + switch (pin) { + case HPD_PORT_A: return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; default: return false; } } -static bool pch_port_hotplug_long_detect(enum port port, u32 val) +static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { - switch (port) { - case PORT_B: + switch (pin) { + case HPD_PORT_B: return val & PORTB_HOTPLUG_LONG_DETECT; - case PORT_C: + case HPD_PORT_C: return val & PORTC_HOTPLUG_LONG_DETECT; - case PORT_D: + case HPD_PORT_D: return val & PORTD_HOTPLUG_LONG_DETECT; default: return false; } } -static bool i9xx_port_hotplug_long_detect(enum port port, u32 val) +static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { - switch (port) { - case PORT_B: + switch (pin) { + case HPD_PORT_B: return val & PORTB_HOTPLUG_INT_LONG_PULSE; - case PORT_C: + case HPD_PORT_C: return val & PORTC_HOTPLUG_INT_LONG_PULSE; - case PORT_D: + case HPD_PORT_D: return val & PORTD_HOTPLUG_INT_LONG_PULSE; default: return false; @@ -1638,27 +1709,22 @@ static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, u32 *pin_mask, u32 *long_mask, u32 hotplug_trigger, u32 dig_hotplug_reg, const u32 hpd[HPD_NUM_PINS], - bool long_pulse_detect(enum port port, u32 val)) + bool long_pulse_detect(enum hpd_pin pin, u32 val)) { - enum port port; - int i; + enum hpd_pin pin; - for_each_hpd_pin(i) { - if ((hpd[i] & hotplug_trigger) == 0) + for_each_hpd_pin(pin) { + if ((hpd[pin] & hotplug_trigger) == 0) continue; - *pin_mask |= BIT(i); + *pin_mask |= BIT(pin); - port = intel_hpd_pin_to_port(dev_priv, i); - if (port == PORT_NONE) - continue; - - if (long_pulse_detect(port, dig_hotplug_reg)) - *long_mask |= BIT(i); + if (long_pulse_detect(pin, dig_hotplug_reg)) + *long_mask |= BIT(pin); } - DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n", - hotplug_trigger, dig_hotplug_reg, *pin_mask); + DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n", + hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask); } @@ -1680,69 +1746,34 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, uint32_t crc4) { struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; - struct intel_pipe_crc_entry *entry; struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - struct drm_driver *driver = dev_priv->drm.driver; uint32_t crcs[5]; - int head, tail; spin_lock(&pipe_crc->lock); - if (pipe_crc->source && !crtc->base.crc.opened) { - if (!pipe_crc->entries) { - spin_unlock(&pipe_crc->lock); - DRM_DEBUG_KMS("spurious interrupt\n"); - return; - } - - head = pipe_crc->head; - tail = pipe_crc->tail; - - if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { - spin_unlock(&pipe_crc->lock); - DRM_ERROR("CRC buffer overflowing\n"); - return; - } - - entry = &pipe_crc->entries[head]; - - entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe); - entry->crc[0] = crc0; - entry->crc[1] = crc1; - entry->crc[2] = crc2; - entry->crc[3] = crc3; - entry->crc[4] = crc4; - - head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); - pipe_crc->head = head; - - spin_unlock(&pipe_crc->lock); - - wake_up_interruptible(&pipe_crc->wq); - } else { - /* - * For some not yet identified reason, the first CRC is - * bonkers. So let's just wait for the next vblank and read - * out the buggy result. - * - * On GEN8+ sometimes the second CRC is bonkers as well, so - * don't trust that one either. - */ - if (pipe_crc->skipped <= 0 || - (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) { - pipe_crc->skipped++; - spin_unlock(&pipe_crc->lock); - return; - } + /* + * For some not yet identified reason, the first CRC is + * bonkers. So let's just wait for the next vblank and read + * out the buggy result. + * + * On GEN8+ sometimes the second CRC is bonkers as well, so + * don't trust that one either. + */ + if (pipe_crc->skipped <= 0 || + (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) { + pipe_crc->skipped++; spin_unlock(&pipe_crc->lock); - crcs[0] = crc0; - crcs[1] = crc1; - crcs[2] = crc2; - crcs[3] = crc3; - crcs[4] = crc4; - drm_crtc_add_crc_entry(&crtc->base, true, - drm_crtc_accurate_vblank_count(&crtc->base), - crcs); + return; } + spin_unlock(&pipe_crc->lock); + + crcs[0] = crc0; + crcs[1] = crc1; + crcs[2] = crc2; + crcs[3] = crc3; + crcs[4] = crc4; + drm_crtc_add_crc_entry(&crtc->base, true, + drm_crtc_accurate_vblank_count(&crtc->base), + crcs); } #else static inline void @@ -2136,7 +2167,6 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) I915_WRITE(VLV_IER, ier); I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); - POSTING_READ(VLV_MASTER_IER); if (gt_iir) snb_gt_irq_handler(dev_priv, gt_iir); @@ -2221,7 +2251,6 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) I915_WRITE(VLV_IER, ier); I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); - POSTING_READ(GEN8_MASTER_IRQ); gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); @@ -2390,6 +2419,43 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) cpt_serr_int_handler(dev_priv); } +static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) +{ + u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; + u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP; + u32 pin_mask = 0, long_mask = 0; + + if (ddi_hotplug_trigger) { + u32 dig_hotplug_reg; + + dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI); + I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg); + + intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + ddi_hotplug_trigger, + dig_hotplug_reg, hpd_icp, + icp_ddi_port_hotplug_long_detect); + } + + if (tc_hotplug_trigger) { + u32 dig_hotplug_reg; + + dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC); + I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg); + + intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + tc_hotplug_trigger, + dig_hotplug_reg, hpd_icp, + icp_tc_port_hotplug_long_detect); + } + + if (pin_mask) + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); + + if (pch_iir & SDE_GMBUS_ICP) + gmbus_irq_handler(dev_priv); +} + static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & @@ -2553,7 +2619,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) /* disable master interrupt before clearing iir */ de_ier = I915_READ(DEIER); I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); - POSTING_READ(DEIER); /* Disable south interrupts. We'll only write to SDEIIR once, so further * interrupts will will be stored on its back queue, and then we'll be @@ -2563,7 +2628,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) if (!HAS_PCH_NOP(dev_priv)) { sde_ier = I915_READ(SDEIER); I915_WRITE(SDEIER, 0); - POSTING_READ(SDEIER); } /* Find, clear, then process each source of interrupt */ @@ -2598,11 +2662,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) } I915_WRITE(DEIER, de_ier); - POSTING_READ(DEIER); - if (!HAS_PCH_NOP(dev_priv)) { + if (!HAS_PCH_NOP(dev_priv)) I915_WRITE(SDEIER, sde_ier); - POSTING_READ(SDEIER); - } /* IRQs are synced during runtime_suspend, we don't require a wakeref */ enable_rpm_wakeref_asserts(dev_priv); @@ -2626,6 +2687,40 @@ static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); } +static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) +{ + u32 pin_mask = 0, long_mask = 0; + u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK; + u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK; + + if (trigger_tc) { + u32 dig_hotplug_reg; + + dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL); + I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg); + + intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc, + dig_hotplug_reg, hpd_gen11, + gen11_port_hotplug_long_detect); + } + + if (trigger_tbt) { + u32 dig_hotplug_reg; + + dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL); + I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg); + + intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt, + dig_hotplug_reg, hpd_gen11, + gen11_port_hotplug_long_detect); + } + + if (pin_mask) + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); + else + DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir); +} + static irqreturn_t gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) { @@ -2661,6 +2756,17 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); } + if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { + iir = I915_READ(GEN11_DE_HPD_IIR); + if (iir) { + I915_WRITE(GEN11_DE_HPD_IIR, iir); + ret = IRQ_HANDLED; + gen11_hpd_irq_handler(dev_priv, iir); + } else { + DRM_ERROR("The master control interrupt lied, (DE HPD)!\n"); + } + } + if (master_ctl & GEN8_DE_PORT_IRQ) { iir = I915_READ(GEN8_DE_PORT_IIR); if (iir) { @@ -2676,7 +2782,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) GEN9_AUX_CHANNEL_C | GEN9_AUX_CHANNEL_D; - if (IS_CNL_WITH_PORT_F(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11) + tmp_mask |= ICL_AUX_CHANNEL_E; + + if (IS_CNL_WITH_PORT_F(dev_priv) || + INTEL_GEN(dev_priv) >= 11) tmp_mask |= CNL_AUX_CHANNEL_F; if (iir & tmp_mask) { @@ -2760,8 +2870,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) I915_WRITE(SDEIIR, iir); ret = IRQ_HANDLED; - if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) || - HAS_PCH_CNP(dev_priv)) + if (HAS_PCH_ICP(dev_priv)) + icp_irq_handler(dev_priv, iir); + else if (HAS_PCH_SPT(dev_priv) || + HAS_PCH_KBP(dev_priv) || + HAS_PCH_CNP(dev_priv)) spt_irq_handler(dev_priv, iir); else cpt_irq_handler(dev_priv, iir); @@ -2978,11 +3091,44 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915, spin_unlock(&i915->irq_lock); } +static void +gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl, + u32 *iir) +{ + void __iomem * const regs = dev_priv->regs; + + if (!(master_ctl & GEN11_GU_MISC_IRQ)) + return; + + *iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); + if (likely(*iir)) + raw_reg_write(regs, GEN11_GU_MISC_IIR, *iir); +} + +static void +gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, + const u32 master_ctl, const u32 iir) +{ + if (!(master_ctl & GEN11_GU_MISC_IRQ)) + return; + + if (unlikely(!iir)) { + DRM_ERROR("GU_MISC iir blank!\n"); + return; + } + + if (iir & GEN11_GU_MISC_GSE) + intel_opregion_asle_intr(dev_priv); + else + DRM_ERROR("Unexpected GU_MISC interrupt 0x%x\n", iir); +} + static irqreturn_t gen11_irq_handler(int irq, void *arg) { struct drm_i915_private * const i915 = to_i915(arg); void __iomem * const regs = i915->regs; u32 master_ctl; + u32 gu_misc_iir; if (!intel_irqs_enabled(i915)) return IRQ_NONE; @@ -3011,9 +3157,13 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg) enable_rpm_wakeref_asserts(i915); } + gen11_gu_misc_irq_ack(i915, master_ctl, &gu_misc_iir); + /* Acknowledge and enable interrupts. */ raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl); + gen11_gu_misc_irq_handler(i915, master_ctl, gu_misc_iir); + return IRQ_HANDLED; } @@ -3089,7 +3239,7 @@ static void i915_clear_error_registers(struct drm_i915_private *dev_priv) */ DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); I915_WRITE(EMR, I915_READ(EMR) | eir); - I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); + I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT); } } @@ -3500,7 +3650,12 @@ static void gen11_irq_reset(struct drm_device *dev) GEN3_IRQ_RESET(GEN8_DE_PORT_); GEN3_IRQ_RESET(GEN8_DE_MISC_); + GEN3_IRQ_RESET(GEN11_DE_HPD_); + GEN3_IRQ_RESET(GEN11_GU_MISC_); GEN3_IRQ_RESET(GEN8_PCU_); + + if (HAS_PCH_ICP(dev_priv)) + GEN3_IRQ_RESET(SDE); } void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, @@ -3617,6 +3772,73 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) ibx_hpd_detection_setup(dev_priv); } +static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv) +{ + u32 hotplug; + + hotplug = I915_READ(SHOTPLUG_CTL_DDI); + hotplug |= ICP_DDIA_HPD_ENABLE | + ICP_DDIB_HPD_ENABLE; + I915_WRITE(SHOTPLUG_CTL_DDI, hotplug); + + hotplug = I915_READ(SHOTPLUG_CTL_TC); + hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) | + ICP_TC_HPD_ENABLE(PORT_TC2) | + ICP_TC_HPD_ENABLE(PORT_TC3) | + ICP_TC_HPD_ENABLE(PORT_TC4); + I915_WRITE(SHOTPLUG_CTL_TC, hotplug); +} + +static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) +{ + u32 hotplug_irqs, enabled_irqs; + + hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP; + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp); + + ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); + + icp_hpd_detection_setup(dev_priv); +} + +static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv) +{ + u32 hotplug; + + hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL); + hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | + GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | + GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | + GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); + I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug); + + hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL); + hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | + GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | + GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | + GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); + I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug); +} + +static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) +{ + u32 hotplug_irqs, enabled_irqs; + u32 val; + + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11); + hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK; + + val = I915_READ(GEN11_DE_HPD_IMR); + val &= ~hotplug_irqs; + I915_WRITE(GEN11_DE_HPD_IMR, val); + POSTING_READ(GEN11_DE_HPD_IMR); + + gen11_hpd_detection_setup(dev_priv); + + if (HAS_PCH_ICP(dev_priv)) + icp_hpd_irq_setup(dev_priv); +} + static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) { u32 val, hotplug; @@ -3943,9 +4165,12 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) uint32_t de_pipe_enables; u32 de_port_masked = GEN8_AUX_CHANNEL_A; u32 de_port_enables; - u32 de_misc_masked = GEN8_DE_MISC_GSE | GEN8_DE_EDP_PSR; + u32 de_misc_masked = GEN8_DE_EDP_PSR; enum pipe pipe; + if (INTEL_GEN(dev_priv) <= 10) + de_misc_masked |= GEN8_DE_MISC_GSE; + if (INTEL_GEN(dev_priv) >= 9) { de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | @@ -3956,7 +4181,10 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; } - if (IS_CNL_WITH_PORT_F(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11) + de_port_masked |= ICL_AUX_CHANNEL_E; + + if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11) de_port_masked |= CNL_AUX_CHANNEL_F; de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | @@ -3984,10 +4212,18 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); - if (IS_GEN9_LP(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11) { + u32 de_hpd_masked = 0; + u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | + GEN11_DE_TBT_HOTPLUG_MASK; + + GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables); + gen11_hpd_detection_setup(dev_priv); + } else if (IS_GEN9_LP(dev_priv)) { bxt_hpd_detection_setup(dev_priv); - else if (IS_BROADWELL(dev_priv)) + } else if (IS_BROADWELL(dev_priv)) { ilk_hpd_detection_setup(dev_priv); + } } static int gen8_irq_postinstall(struct drm_device *dev) @@ -4036,13 +4272,34 @@ static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv) I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); } +static void icp_irq_postinstall(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + u32 mask = SDE_GMBUS_ICP; + + WARN_ON(I915_READ(SDEIER) != 0); + I915_WRITE(SDEIER, 0xffffffff); + POSTING_READ(SDEIER); + + gen3_assert_iir_is_zero(dev_priv, SDEIIR); + I915_WRITE(SDEIMR, ~mask); + + icp_hpd_detection_setup(dev_priv); +} + static int gen11_irq_postinstall(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + u32 gu_misc_masked = GEN11_GU_MISC_GSE; + + if (HAS_PCH_ICP(dev_priv)) + icp_irq_postinstall(dev); gen11_gt_irq_postinstall(dev_priv); gen8_de_irq_postinstall(dev_priv); + GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); + I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); @@ -4090,11 +4347,13 @@ static int i8xx_irq_postinstall(struct drm_device *dev) /* Unmask the interrupts that we always want on. */ dev_priv->irq_mask = ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_MASTER_ERROR_INTERRUPT); enable_mask = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_MASTER_ERROR_INTERRUPT | I915_USER_INTERRUPT; GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask); @@ -4109,6 +4368,81 @@ static int i8xx_irq_postinstall(struct drm_device *dev) return 0; } +static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv, + u16 *eir, u16 *eir_stuck) +{ + u16 emr; + + *eir = I915_READ16(EIR); + + if (*eir) + I915_WRITE16(EIR, *eir); + + *eir_stuck = I915_READ16(EIR); + if (*eir_stuck == 0) + return; + + /* + * Toggle all EMR bits to make sure we get an edge + * in the ISR master error bit if we don't clear + * all the EIR bits. Otherwise the edge triggered + * IIR on i965/g4x wouldn't notice that an interrupt + * is still pending. Also some EIR bits can't be + * cleared except by handling the underlying error + * (or by a GPU reset) so we mask any bit that + * remains set. + */ + emr = I915_READ16(EMR); + I915_WRITE16(EMR, 0xffff); + I915_WRITE16(EMR, emr | *eir_stuck); +} + +static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, + u16 eir, u16 eir_stuck) +{ + DRM_DEBUG("Master Error: EIR 0x%04x\n", eir); + + if (eir_stuck) + DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck); +} + +static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, + u32 *eir, u32 *eir_stuck) +{ + u32 emr; + + *eir = I915_READ(EIR); + + I915_WRITE(EIR, *eir); + + *eir_stuck = I915_READ(EIR); + if (*eir_stuck == 0) + return; + + /* + * Toggle all EMR bits to make sure we get an edge + * in the ISR master error bit if we don't clear + * all the EIR bits. Otherwise the edge triggered + * IIR on i965/g4x wouldn't notice that an interrupt + * is still pending. Also some EIR bits can't be + * cleared except by handling the underlying error + * (or by a GPU reset) so we mask any bit that + * remains set. + */ + emr = I915_READ(EMR); + I915_WRITE(EMR, 0xffffffff); + I915_WRITE(EMR, emr | *eir_stuck); +} + +static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, + u32 eir, u32 eir_stuck) +{ + DRM_DEBUG("Master Error, EIR 0x%08x\n", eir); + + if (eir_stuck) + DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck); +} + static irqreturn_t i8xx_irq_handler(int irq, void *arg) { struct drm_device *dev = arg; @@ -4123,6 +4457,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) do { u32 pipe_stats[I915_MAX_PIPES] = {}; + u16 eir = 0, eir_stuck = 0; u16 iir; iir = I915_READ16(IIR); @@ -4135,13 +4470,16 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) * signalled in iir */ i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); + if (iir & I915_MASTER_ERROR_INTERRUPT) + i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck); + I915_WRITE16(IIR, iir); if (iir & I915_USER_INTERRUPT) notify_ring(dev_priv->engine[RCS]); - if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) - DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); + if (iir & I915_MASTER_ERROR_INTERRUPT) + i8xx_error_irq_handler(dev_priv, eir, eir_stuck); i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); } while (0); @@ -4179,12 +4517,14 @@ static int i915_irq_postinstall(struct drm_device *dev) dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_MASTER_ERROR_INTERRUPT); enable_mask = I915_ASLE_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_MASTER_ERROR_INTERRUPT | I915_USER_INTERRUPT; if (I915_HAS_HOTPLUG(dev_priv)) { @@ -4222,6 +4562,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) do { u32 pipe_stats[I915_MAX_PIPES] = {}; + u32 eir = 0, eir_stuck = 0; u32 hotplug_status = 0; u32 iir; @@ -4239,13 +4580,16 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) * signalled in iir */ i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); + if (iir & I915_MASTER_ERROR_INTERRUPT) + i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); + I915_WRITE(IIR, iir); if (iir & I915_USER_INTERRUPT) notify_ring(dev_priv->engine[RCS]); - if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) - DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); + if (iir & I915_MASTER_ERROR_INTERRUPT) + i9xx_error_irq_handler(dev_priv, eir, eir_stuck); if (hotplug_status) i9xx_hpd_irq_handler(dev_priv, hotplug_status); @@ -4299,14 +4643,14 @@ static int i965_irq_postinstall(struct drm_device *dev) I915_DISPLAY_PORT_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); + I915_MASTER_ERROR_INTERRUPT); enable_mask = I915_ASLE_INTERRUPT | I915_DISPLAY_PORT_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | + I915_MASTER_ERROR_INTERRUPT | I915_USER_INTERRUPT; if (IS_G4X(dev_priv)) @@ -4366,6 +4710,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) do { u32 pipe_stats[I915_MAX_PIPES] = {}; + u32 eir = 0, eir_stuck = 0; u32 hotplug_status = 0; u32 iir; @@ -4382,6 +4727,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) * signalled in iir */ i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); + if (iir & I915_MASTER_ERROR_INTERRUPT) + i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); + I915_WRITE(IIR, iir); if (iir & I915_USER_INTERRUPT) @@ -4390,8 +4738,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) if (iir & I915_BSD_USER_INTERRUPT) notify_ring(dev_priv->engine[VCS]); - if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) - DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); + if (iir & I915_MASTER_ERROR_INTERRUPT) + i9xx_error_irq_handler(dev_priv, eir, eir_stuck); if (hotplug_status) i9xx_hpd_irq_handler(dev_priv, hotplug_status); @@ -4506,7 +4854,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) dev->driver->irq_uninstall = gen11_irq_reset; dev->driver->enable_vblank = gen8_enable_vblank; dev->driver->disable_vblank = gen8_disable_vblank; - dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; + dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; } else if (INTEL_GEN(dev_priv) >= 8) { dev->driver->irq_handler = gen8_irq_handler; dev->driver->irq_preinstall = gen8_irq_reset; diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 66ea3552c63e..295e981e4a39 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -44,10 +44,6 @@ i915_param_named(modeset, int, 0400, "Use kernel modesetting [KMS] (0=disable, " "1=on, -1=force vga console preference [default])"); -i915_param_named_unsafe(panel_ignore_lid, int, 0600, - "Override lid status (0=autodetect, 1=autodetect disabled [default], " - "-1=force lid closed, -2=force lid open)"); - i915_param_named_unsafe(enable_dc, int, 0400, "Enable power-saving display C-states. " "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)"); @@ -92,7 +88,7 @@ i915_param_named_unsafe(enable_ppgtt, int, 0400, i915_param_named_unsafe(enable_psr, int, 0600, "Enable PSR " - "(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) " + "(0=disabled, 1=enabled) " "Default: -1 (use per-chip default)"); i915_param_named_unsafe(alpha_support, bool, 0400, @@ -130,9 +126,6 @@ i915_param_named_unsafe(invert_brightness, int, 0600, i915_param_named(disable_display, bool, 0400, "Disable display (default: false)"); -i915_param_named_unsafe(enable_cmd_parser, bool, 0400, - "Enable command parsing (true=enabled [default], false=disabled)"); - i915_param_named(mmio_debug, int, 0600, "Enable the MMIO debug code for the first N failures (default: off). " "This may negatively affect performance."); diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index 6684025b7af8..6c4d4a21474b 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -36,7 +36,6 @@ struct drm_printer; #define I915_PARAMS_FOR_EACH(param) \ param(char *, vbt_firmware, NULL) \ param(int, modeset, -1) \ - param(int, panel_ignore_lid, 1) \ param(int, lvds_channel_mode, 0) \ param(int, panel_use_ssc, -1) \ param(int, vbt_sdvo_panel_type, -1) \ @@ -58,7 +57,6 @@ struct drm_printer; param(unsigned int, inject_load_failure, 0) \ /* leave bools at the end to not create holes */ \ param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \ - param(bool, enable_cmd_parser, true) \ param(bool, enable_hangcheck, true) \ param(bool, fastboot, false) \ param(bool, prefault_disable, false) \ diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 4364922e935d..6a4d1388ad2d 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -340,7 +340,6 @@ static const struct intel_device_info intel_valleyview_info = { GEN(7), .is_lp = 1, .num_pipes = 2, - .has_psr = 1, .has_runtime_pm = 1, .has_rc6 = 1, .has_gmch_display = 1, @@ -433,7 +432,6 @@ static const struct intel_device_info intel_cherryview_info = { .is_lp = 1, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, .has_64bit_reloc = 1, - .has_psr = 1, .has_runtime_pm = 1, .has_resource_streamer = 1, .has_rc6 = 1, @@ -659,12 +657,15 @@ static const struct pci_device_id pciidlist[] = { INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info), INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info), + INTEL_AML_GT2_IDS(&intel_kabylake_gt2_info), INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info), INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info), INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info), - INTEL_CFL_U_GT1_IDS(&intel_coffeelake_gt1_info), INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info), INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info), + INTEL_WHL_U_GT1_IDS(&intel_coffeelake_gt1_info), + INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info), + INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info), INTEL_CNL_IDS(&intel_cannonlake_info), INTEL_ICL_11_IDS(&intel_icelake_11_info), {0, 0, 0} @@ -673,10 +674,16 @@ MODULE_DEVICE_TABLE(pci, pciidlist); static void i915_pci_remove(struct pci_dev *pdev) { - struct drm_device *dev = pci_get_drvdata(pdev); + struct drm_device *dev; + + dev = pci_get_drvdata(pdev); + if (!dev) /* driver load aborted, nothing to cleanup */ + return; i915_driver_unload(dev); drm_dev_put(dev); + + pci_set_drvdata(pdev, NULL); } static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -711,6 +718,11 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) return err; + if (i915_inject_load_failure()) { + i915_pci_remove(pdev); + return -ENODEV; + } + err = i915_live_selftests(pdev); if (err) { i915_pci_remove(pdev); diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 019bd2d073ad..6bf10952c724 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -315,7 +315,7 @@ static u32 i915_oa_max_sample_rate = 100000; * code assumes all reports have a power-of-two size and ~(size - 1) can * be used as a mask to align the OA tail pointer. */ -static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = { +static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = { [I915_OA_FORMAT_A13] = { 0, 64 }, [I915_OA_FORMAT_A29] = { 1, 128 }, [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 }, @@ -326,7 +326,7 @@ static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = { [I915_OA_FORMAT_C4_B8] = { 7, 64 }, }; -static struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = { +static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = { [I915_OA_FORMAT_A12] = { 0, 64 }, [I915_OA_FORMAT_A12_B8_C8] = { 2, 128 }, [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 }, @@ -737,12 +737,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, continue; } - /* - * XXX: Just keep the lower 21 bits for now since I'm not - * entirely sure if the HW touches any of the higher bits in - * this field - */ - ctx_id = report32[2] & 0x1fffff; + ctx_id = report32[2] & dev_priv->perf.oa.specific_ctx_id_mask; /* * Squash whatever is in the CTX_ID field if it's marked as @@ -1203,6 +1198,33 @@ static int i915_oa_read(struct i915_perf_stream *stream, return dev_priv->perf.oa.ops.read(stream, buf, count, offset); } +static struct intel_context *oa_pin_context(struct drm_i915_private *i915, + struct i915_gem_context *ctx) +{ + struct intel_engine_cs *engine = i915->engine[RCS]; + struct intel_context *ce; + int ret; + + ret = i915_mutex_lock_interruptible(&i915->drm); + if (ret) + return ERR_PTR(ret); + + /* + * As the ID is the gtt offset of the context's vma we + * pin the vma to ensure the ID remains fixed. + * + * NB: implied RCS engine... + */ + ce = intel_context_pin(ctx, engine); + mutex_unlock(&i915->drm.struct_mutex); + if (IS_ERR(ce)) + return ce; + + i915->perf.oa.pinned_ctx = ce; + + return ce; +} + /** * oa_get_render_ctx_id - determine and hold ctx hw id * @stream: An i915-perf stream opened for OA metrics @@ -1215,40 +1237,76 @@ static int i915_oa_read(struct i915_perf_stream *stream, */ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) { - struct drm_i915_private *dev_priv = stream->dev_priv; + struct drm_i915_private *i915 = stream->dev_priv; + struct intel_context *ce; - if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { - dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id; - } else { - struct intel_engine_cs *engine = dev_priv->engine[RCS]; - struct intel_ring *ring; - int ret; - - ret = i915_mutex_lock_interruptible(&dev_priv->drm); - if (ret) - return ret; + ce = oa_pin_context(i915, stream->ctx); + if (IS_ERR(ce)) + return PTR_ERR(ce); + switch (INTEL_GEN(i915)) { + case 7: { /* - * As the ID is the gtt offset of the context's vma we - * pin the vma to ensure the ID remains fixed. - * - * NB: implied RCS engine... + * On Haswell we don't do any post processing of the reports + * and don't need to use the mask. */ - ring = intel_context_pin(stream->ctx, engine); - mutex_unlock(&dev_priv->drm.struct_mutex); - if (IS_ERR(ring)) - return PTR_ERR(ring); + i915->perf.oa.specific_ctx_id = i915_ggtt_offset(ce->state); + i915->perf.oa.specific_ctx_id_mask = 0; + break; + } + case 8: + case 9: + case 10: + if (USES_GUC_SUBMISSION(i915)) { + /* + * When using GuC, the context descriptor we write in + * i915 is read by GuC and rewritten before it's + * actually written into the hardware. The LRCA is + * what is put into the context id field of the + * context descriptor by GuC. Because it's aligned to + * a page, the lower 12bits are always at 0 and + * dropped by GuC. They won't be part of the context + * ID in the OA reports, so squash those lower bits. + */ + i915->perf.oa.specific_ctx_id = + lower_32_bits(ce->lrc_desc) >> 12; - /* - * Explicitly track the ID (instead of calling - * i915_ggtt_offset() on the fly) considering the difference - * with gen8+ and execlists - */ - dev_priv->perf.oa.specific_ctx_id = - i915_ggtt_offset(to_intel_context(stream->ctx, engine)->state); + /* + * GuC uses the top bit to signal proxy submission, so + * ignore that bit. + */ + i915->perf.oa.specific_ctx_id_mask = + (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1; + } else { + i915->perf.oa.specific_ctx_id_mask = + (1U << GEN8_CTX_ID_WIDTH) - 1; + i915->perf.oa.specific_ctx_id = + upper_32_bits(ce->lrc_desc); + i915->perf.oa.specific_ctx_id &= + i915->perf.oa.specific_ctx_id_mask; + } + break; + + case 11: { + i915->perf.oa.specific_ctx_id_mask = + ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32) | + ((1U << GEN11_ENGINE_INSTANCE_WIDTH) - 1) << (GEN11_ENGINE_INSTANCE_SHIFT - 32) | + ((1 << GEN11_ENGINE_CLASS_WIDTH) - 1) << (GEN11_ENGINE_CLASS_SHIFT - 32); + i915->perf.oa.specific_ctx_id = upper_32_bits(ce->lrc_desc); + i915->perf.oa.specific_ctx_id &= + i915->perf.oa.specific_ctx_id_mask; + break; + } + + default: + MISSING_CASE(INTEL_GEN(i915)); } + DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n", + i915->perf.oa.specific_ctx_id, + i915->perf.oa.specific_ctx_id_mask); + return 0; } @@ -1262,17 +1320,15 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) static void oa_put_render_ctx_id(struct i915_perf_stream *stream) { struct drm_i915_private *dev_priv = stream->dev_priv; + struct intel_context *ce; - if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { - dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID; - } else { - struct intel_engine_cs *engine = dev_priv->engine[RCS]; + dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID; + dev_priv->perf.oa.specific_ctx_id_mask = 0; + ce = fetch_and_zero(&dev_priv->perf.oa.pinned_ctx); + if (ce) { mutex_lock(&dev_priv->drm.struct_mutex); - - dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID; - intel_context_unpin(stream->ctx, engine); - + intel_context_unpin(ce); mutex_unlock(&dev_priv->drm.struct_mutex); } } @@ -1780,7 +1836,9 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, * So far the best way to work around this issue seems to be draining * the GPU from any submitted work. */ - ret = i915_gem_wait_for_idle(dev_priv, wait_flags); + ret = i915_gem_wait_for_idle(dev_priv, + wait_flags, + MAX_SCHEDULE_TIMEOUT); if (ret) goto out; diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index dc87797db500..d6c8f8fdfda5 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -4,6 +4,7 @@ * Copyright © 2017-2018 Intel Corporation */ +#include <linux/irq.h> #include "i915_pmu.h" #include "intel_ringbuffer.h" #include "i915_drv.h" @@ -127,6 +128,7 @@ static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915) { if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) { i915->pmu.timer_enabled = true; + i915->pmu.timer_last = ktime_get(); hrtimer_start_range_ns(&i915->pmu.timer, ns_to_ktime(PERIOD), 0, HRTIMER_MODE_REL_PINNED); @@ -155,12 +157,13 @@ static bool grab_forcewake(struct drm_i915_private *i915, bool fw) } static void -update_sample(struct i915_pmu_sample *sample, u32 unit, u32 val) +add_sample(struct i915_pmu_sample *sample, u32 val) { - sample->cur += mul_u32_u32(val, unit); + sample->cur += val; } -static void engines_sample(struct drm_i915_private *dev_priv) +static void +engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) { struct intel_engine_cs *engine; enum intel_engine_id id; @@ -182,8 +185,9 @@ static void engines_sample(struct drm_i915_private *dev_priv) val = !i915_seqno_passed(current_seqno, last_seqno); - update_sample(&engine->pmu.sample[I915_SAMPLE_BUSY], - PERIOD, val); + if (val) + add_sample(&engine->pmu.sample[I915_SAMPLE_BUSY], + period_ns); if (val && (engine->pmu.enable & (BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) { @@ -194,11 +198,13 @@ static void engines_sample(struct drm_i915_private *dev_priv) val = 0; } - update_sample(&engine->pmu.sample[I915_SAMPLE_WAIT], - PERIOD, !!(val & RING_WAIT)); + if (val & RING_WAIT) + add_sample(&engine->pmu.sample[I915_SAMPLE_WAIT], + period_ns); - update_sample(&engine->pmu.sample[I915_SAMPLE_SEMA], - PERIOD, !!(val & RING_WAIT_SEMAPHORE)); + if (val & RING_WAIT_SEMAPHORE) + add_sample(&engine->pmu.sample[I915_SAMPLE_SEMA], + period_ns); } if (fw) @@ -207,7 +213,14 @@ static void engines_sample(struct drm_i915_private *dev_priv) intel_runtime_pm_put(dev_priv); } -static void frequency_sample(struct drm_i915_private *dev_priv) +static void +add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul) +{ + sample->cur += mul_u32_u32(val, mul); +} + +static void +frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) { if (dev_priv->pmu.enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) { @@ -221,15 +234,17 @@ static void frequency_sample(struct drm_i915_private *dev_priv) intel_runtime_pm_put(dev_priv); } - update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT], - 1, intel_gpu_freq(dev_priv, val)); + add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT], + intel_gpu_freq(dev_priv, val), + period_ns / 1000); } if (dev_priv->pmu.enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) { - update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ], 1, - intel_gpu_freq(dev_priv, - dev_priv->gt_pm.rps.cur_freq)); + add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ], + intel_gpu_freq(dev_priv, + dev_priv->gt_pm.rps.cur_freq), + period_ns / 1000); } } @@ -237,14 +252,27 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) { struct drm_i915_private *i915 = container_of(hrtimer, struct drm_i915_private, pmu.timer); + unsigned int period_ns; + ktime_t now; if (!READ_ONCE(i915->pmu.timer_enabled)) return HRTIMER_NORESTART; - engines_sample(i915); - frequency_sample(i915); + now = ktime_get(); + period_ns = ktime_to_ns(ktime_sub(now, i915->pmu.timer_last)); + i915->pmu.timer_last = now; + + /* + * Strictly speaking the passed in period may not be 100% accurate for + * all internal calculation, since some amount of time can be spent on + * grabbing the forcewake. However the potential error from timer call- + * back delay greatly dominates this so we keep it simple. + */ + engines_sample(i915, period_ns); + frequency_sample(i915, period_ns); + + hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD)); - hrtimer_forward_now(hrtimer, ns_to_ktime(PERIOD)); return HRTIMER_RESTART; } @@ -519,12 +547,12 @@ static u64 __i915_pmu_event_read(struct perf_event *event) case I915_PMU_ACTUAL_FREQUENCY: val = div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur, - FREQUENCY); + USEC_PER_SEC /* to MHz */); break; case I915_PMU_REQUESTED_FREQUENCY: val = div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur, - FREQUENCY); + USEC_PER_SEC /* to MHz */); break; case I915_PMU_INTERRUPTS: val = count_interrupts(i915); diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h index 2ba735299f7c..7f164ca3db12 100644 --- a/drivers/gpu/drm/i915/i915_pmu.h +++ b/drivers/gpu/drm/i915/i915_pmu.h @@ -65,6 +65,14 @@ struct i915_pmu { * event types. */ u64 enable; + + /** + * @timer_last: + * + * Timestmap of the previous timer invocation. + */ + ktime_t timer_last; + /** * @enable_count: Reference counts for the enabled events. * diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h index 195203f298df..eeaa3d506d95 100644 --- a/drivers/gpu/drm/i915/i915_pvinfo.h +++ b/drivers/gpu/drm/i915/i915_pvinfo.h @@ -54,6 +54,7 @@ enum vgt_g2v_type { */ #define VGT_CAPS_FULL_48BIT_PPGTT BIT(2) #define VGT_CAPS_HWSP_EMULATION BIT(3) +#define VGT_CAPS_HUGE_GTT BIT(4) struct vgt_if { u64 magic; /* VGT_MAGIC */ @@ -93,7 +94,10 @@ struct vgt_if { u32 rsv5[4]; u32 g2v_notify; - u32 rsv6[7]; + u32 rsv6[5]; + + u32 cursor_x_hot; + u32 cursor_y_hot; struct { u32 lo; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 7720569f2024..91e7483228e1 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -139,23 +139,40 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG); } +/* + * Given the first two numbers __a and __b of arbitrarily many evenly spaced + * numbers, pick the 0-based __index'th value. + * + * Always prefer this over _PICK() if the numbers are evenly spaced. + */ +#define _PICK_EVEN(__index, __a, __b) ((__a) + (__index) * ((__b) - (__a))) + +/* + * Given the arbitrary numbers in varargs, pick the 0-based __index'th number. + * + * Always prefer _PICK_EVEN() over this if the numbers are evenly spaced. + */ #define _PICK(__index, ...) (((const u32 []){ __VA_ARGS__ })[__index]) -#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) +/* + * Named helper wrappers around _PICK_EVEN() and _PICK(). + */ +#define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b) #define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b)) -#define _PLANE(plane, a, b) _PIPE(plane, a, b) +#define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b) #define _MMIO_PLANE(plane, a, b) _MMIO_PIPE(plane, a, b) -#define _TRANS(tran, a, b) ((a) + (tran)*((b)-(a))) +#define _TRANS(tran, a, b) _PICK_EVEN(tran, a, b) #define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b)) -#define _PORT(port, a, b) ((a) + (port)*((b)-(a))) +#define _PORT(port, a, b) _PICK_EVEN(port, a, b) #define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b)) #define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) #define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) -#define _PLL(pll, a, b) ((a) + (pll)*((b)-(a))) +#define _PLL(pll, a, b) _PICK_EVEN(pll, a, b) #define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b)) #define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__) #define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c)) +#define __MASKED_FIELD(mask, value) ((mask) << 16 | (value)) #define _MASKED_FIELD(mask, value) ({ \ if (__builtin_constant_p(mask)) \ BUILD_BUG_ON_MSG(((mask) & 0xffff0000), "Incorrect mask"); \ @@ -164,7 +181,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) if (__builtin_constant_p(mask) && __builtin_constant_p(value)) \ BUILD_BUG_ON_MSG((value) & ~(mask), \ "Incorrect value for mask"); \ - (mask) << 16 | (value); }) + __MASKED_FIELD(mask, value); }) #define _MASKED_BIT_ENABLE(a) ({ typeof(a) _a = (a); _MASKED_FIELD(_a, _a); }) #define _MASKED_BIT_DISABLE(a) (_MASKED_FIELD((a), 0)) @@ -270,19 +287,19 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define ILK_GDSR _MMIO(MCHBAR_MIRROR_BASE + 0x2ca4) -#define ILK_GRDOM_FULL (0<<1) -#define ILK_GRDOM_RENDER (1<<1) -#define ILK_GRDOM_MEDIA (3<<1) -#define ILK_GRDOM_MASK (3<<1) -#define ILK_GRDOM_RESET_ENABLE (1<<0) +#define ILK_GRDOM_FULL (0 << 1) +#define ILK_GRDOM_RENDER (1 << 1) +#define ILK_GRDOM_MEDIA (3 << 1) +#define ILK_GRDOM_MASK (3 << 1) +#define ILK_GRDOM_RESET_ENABLE (1 << 0) #define GEN6_MBCUNIT_SNPCR _MMIO(0x900c) /* for LLC config */ #define GEN6_MBC_SNPCR_SHIFT 21 -#define GEN6_MBC_SNPCR_MASK (3<<21) -#define GEN6_MBC_SNPCR_MAX (0<<21) -#define GEN6_MBC_SNPCR_MED (1<<21) -#define GEN6_MBC_SNPCR_LOW (2<<21) -#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */ +#define GEN6_MBC_SNPCR_MASK (3 << 21) +#define GEN6_MBC_SNPCR_MAX (0 << 21) +#define GEN6_MBC_SNPCR_MED (1 << 21) +#define GEN6_MBC_SNPCR_LOW (2 << 21) +#define GEN6_MBC_SNPCR_MIN (3 << 21) /* only 1/16th of the cache is shared */ #define VLV_G3DCTL _MMIO(0x9024) #define VLV_GSCKGCTL _MMIO(0x9028) @@ -314,13 +331,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN11_GRDOM_VECS (1 << 13) #define GEN11_GRDOM_VECS2 (1 << 14) -#define RING_PP_DIR_BASE(engine) _MMIO((engine)->mmio_base+0x228) -#define RING_PP_DIR_BASE_READ(engine) _MMIO((engine)->mmio_base+0x518) -#define RING_PP_DIR_DCLV(engine) _MMIO((engine)->mmio_base+0x220) +#define RING_PP_DIR_BASE(engine) _MMIO((engine)->mmio_base + 0x228) +#define RING_PP_DIR_BASE_READ(engine) _MMIO((engine)->mmio_base + 0x518) +#define RING_PP_DIR_DCLV(engine) _MMIO((engine)->mmio_base + 0x220) #define PP_DIR_DCLV_2G 0xffffffff -#define GEN8_RING_PDP_UDW(engine, n) _MMIO((engine)->mmio_base+0x270 + (n) * 8 + 4) -#define GEN8_RING_PDP_LDW(engine, n) _MMIO((engine)->mmio_base+0x270 + (n) * 8) +#define GEN8_RING_PDP_UDW(engine, n) _MMIO((engine)->mmio_base + 0x270 + (n) * 8 + 4) +#define GEN8_RING_PDP_LDW(engine, n) _MMIO((engine)->mmio_base + 0x270 + (n) * 8) #define GEN8_R_PWR_CLK_STATE _MMIO(0x20C8) #define GEN8_RPCS_ENABLE (1 << 31) @@ -358,25 +375,25 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN8_SELECTIVE_READ_ADDRESSING_ENABLE (1 << 13) #define GAM_ECOCHK _MMIO(0x4090) -#define BDW_DISABLE_HDC_INVALIDATION (1<<25) -#define ECOCHK_SNB_BIT (1<<10) -#define ECOCHK_DIS_TLB (1<<8) -#define HSW_ECOCHK_ARB_PRIO_SOL (1<<6) -#define ECOCHK_PPGTT_CACHE64B (0x3<<3) -#define ECOCHK_PPGTT_CACHE4B (0x0<<3) -#define ECOCHK_PPGTT_GFDT_IVB (0x1<<4) -#define ECOCHK_PPGTT_LLC_IVB (0x1<<3) -#define ECOCHK_PPGTT_UC_HSW (0x1<<3) -#define ECOCHK_PPGTT_WT_HSW (0x2<<3) -#define ECOCHK_PPGTT_WB_HSW (0x3<<3) +#define BDW_DISABLE_HDC_INVALIDATION (1 << 25) +#define ECOCHK_SNB_BIT (1 << 10) +#define ECOCHK_DIS_TLB (1 << 8) +#define HSW_ECOCHK_ARB_PRIO_SOL (1 << 6) +#define ECOCHK_PPGTT_CACHE64B (0x3 << 3) +#define ECOCHK_PPGTT_CACHE4B (0x0 << 3) +#define ECOCHK_PPGTT_GFDT_IVB (0x1 << 4) +#define ECOCHK_PPGTT_LLC_IVB (0x1 << 3) +#define ECOCHK_PPGTT_UC_HSW (0x1 << 3) +#define ECOCHK_PPGTT_WT_HSW (0x2 << 3) +#define ECOCHK_PPGTT_WB_HSW (0x3 << 3) #define GAC_ECO_BITS _MMIO(0x14090) -#define ECOBITS_SNB_BIT (1<<13) -#define ECOBITS_PPGTT_CACHE64B (3<<8) -#define ECOBITS_PPGTT_CACHE4B (0<<8) +#define ECOBITS_SNB_BIT (1 << 13) +#define ECOBITS_PPGTT_CACHE64B (3 << 8) +#define ECOBITS_PPGTT_CACHE4B (0 << 8) #define GAB_CTL _MMIO(0x24000) -#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8) +#define GAB_CTL_CONT_AFTER_PAGEFAULT (1 << 8) #define GEN6_STOLEN_RESERVED _MMIO(0x1082C0) #define GEN6_STOLEN_RESERVED_ADDR_MASK (0xFFF << 20) @@ -395,6 +412,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN8_STOLEN_RESERVED_4M (2 << 7) #define GEN8_STOLEN_RESERVED_8M (3 << 7) #define GEN6_STOLEN_RESERVED_ENABLE (1 << 0) +#define GEN11_STOLEN_RESERVED_ADDR_MASK (0xFFFFFFFFFFFULL << 20) /* VGA stuff */ @@ -404,15 +422,15 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define _VGA_MSR_WRITE _MMIO(0x3c2) #define VGA_MSR_WRITE 0x3c2 #define VGA_MSR_READ 0x3cc -#define VGA_MSR_MEM_EN (1<<1) -#define VGA_MSR_CGA_MODE (1<<0) +#define VGA_MSR_MEM_EN (1 << 1) +#define VGA_MSR_CGA_MODE (1 << 0) #define VGA_SR_INDEX 0x3c4 #define SR01 1 #define VGA_SR_DATA 0x3c5 #define VGA_AR_INDEX 0x3c0 -#define VGA_AR_VID_EN (1<<5) +#define VGA_AR_VID_EN (1 << 5) #define VGA_AR_DATA_WRITE 0x3c0 #define VGA_AR_DATA_READ 0x3c1 @@ -445,8 +463,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define MI_PREDICATE_SRC1_UDW _MMIO(0x2408 + 4) #define MI_PREDICATE_RESULT_2 _MMIO(0x2214) -#define LOWER_SLICE_ENABLED (1<<0) -#define LOWER_SLICE_DISABLED (0<<0) +#define LOWER_SLICE_ENABLED (1 << 0) +#define LOWER_SLICE_DISABLED (0 << 0) /* * Registers used only by the command parser @@ -504,47 +522,47 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN7_OACONTROL_CTX_MASK 0xFFFFF000 #define GEN7_OACONTROL_TIMER_PERIOD_MASK 0x3F #define GEN7_OACONTROL_TIMER_PERIOD_SHIFT 6 -#define GEN7_OACONTROL_TIMER_ENABLE (1<<5) -#define GEN7_OACONTROL_FORMAT_A13 (0<<2) -#define GEN7_OACONTROL_FORMAT_A29 (1<<2) -#define GEN7_OACONTROL_FORMAT_A13_B8_C8 (2<<2) -#define GEN7_OACONTROL_FORMAT_A29_B8_C8 (3<<2) -#define GEN7_OACONTROL_FORMAT_B4_C8 (4<<2) -#define GEN7_OACONTROL_FORMAT_A45_B8_C8 (5<<2) -#define GEN7_OACONTROL_FORMAT_B4_C8_A16 (6<<2) -#define GEN7_OACONTROL_FORMAT_C4_B8 (7<<2) +#define GEN7_OACONTROL_TIMER_ENABLE (1 << 5) +#define GEN7_OACONTROL_FORMAT_A13 (0 << 2) +#define GEN7_OACONTROL_FORMAT_A29 (1 << 2) +#define GEN7_OACONTROL_FORMAT_A13_B8_C8 (2 << 2) +#define GEN7_OACONTROL_FORMAT_A29_B8_C8 (3 << 2) +#define GEN7_OACONTROL_FORMAT_B4_C8 (4 << 2) +#define GEN7_OACONTROL_FORMAT_A45_B8_C8 (5 << 2) +#define GEN7_OACONTROL_FORMAT_B4_C8_A16 (6 << 2) +#define GEN7_OACONTROL_FORMAT_C4_B8 (7 << 2) #define GEN7_OACONTROL_FORMAT_SHIFT 2 -#define GEN7_OACONTROL_PER_CTX_ENABLE (1<<1) -#define GEN7_OACONTROL_ENABLE (1<<0) +#define GEN7_OACONTROL_PER_CTX_ENABLE (1 << 1) +#define GEN7_OACONTROL_ENABLE (1 << 0) #define GEN8_OACTXID _MMIO(0x2364) #define GEN8_OA_DEBUG _MMIO(0x2B04) -#define GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS (1<<5) -#define GEN9_OA_DEBUG_INCLUDE_CLK_RATIO (1<<6) -#define GEN9_OA_DEBUG_DISABLE_GO_1_0_REPORTS (1<<2) -#define GEN9_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS (1<<1) +#define GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS (1 << 5) +#define GEN9_OA_DEBUG_INCLUDE_CLK_RATIO (1 << 6) +#define GEN9_OA_DEBUG_DISABLE_GO_1_0_REPORTS (1 << 2) +#define GEN9_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS (1 << 1) #define GEN8_OACONTROL _MMIO(0x2B00) -#define GEN8_OA_REPORT_FORMAT_A12 (0<<2) -#define GEN8_OA_REPORT_FORMAT_A12_B8_C8 (2<<2) -#define GEN8_OA_REPORT_FORMAT_A36_B8_C8 (5<<2) -#define GEN8_OA_REPORT_FORMAT_C4_B8 (7<<2) +#define GEN8_OA_REPORT_FORMAT_A12 (0 << 2) +#define GEN8_OA_REPORT_FORMAT_A12_B8_C8 (2 << 2) +#define GEN8_OA_REPORT_FORMAT_A36_B8_C8 (5 << 2) +#define GEN8_OA_REPORT_FORMAT_C4_B8 (7 << 2) #define GEN8_OA_REPORT_FORMAT_SHIFT 2 -#define GEN8_OA_SPECIFIC_CONTEXT_ENABLE (1<<1) -#define GEN8_OA_COUNTER_ENABLE (1<<0) +#define GEN8_OA_SPECIFIC_CONTEXT_ENABLE (1 << 1) +#define GEN8_OA_COUNTER_ENABLE (1 << 0) #define GEN8_OACTXCONTROL _MMIO(0x2360) #define GEN8_OA_TIMER_PERIOD_MASK 0x3F #define GEN8_OA_TIMER_PERIOD_SHIFT 2 -#define GEN8_OA_TIMER_ENABLE (1<<1) -#define GEN8_OA_COUNTER_RESUME (1<<0) +#define GEN8_OA_TIMER_ENABLE (1 << 1) +#define GEN8_OA_COUNTER_RESUME (1 << 0) #define GEN7_OABUFFER _MMIO(0x23B0) /* R/W */ -#define GEN7_OABUFFER_OVERRUN_DISABLE (1<<3) -#define GEN7_OABUFFER_EDGE_TRIGGER (1<<2) -#define GEN7_OABUFFER_STOP_RESUME_ENABLE (1<<1) -#define GEN7_OABUFFER_RESUME (1<<0) +#define GEN7_OABUFFER_OVERRUN_DISABLE (1 << 3) +#define GEN7_OABUFFER_EDGE_TRIGGER (1 << 2) +#define GEN7_OABUFFER_STOP_RESUME_ENABLE (1 << 1) +#define GEN7_OABUFFER_RESUME (1 << 0) #define GEN8_OABUFFER_UDW _MMIO(0x23b4) #define GEN8_OABUFFER _MMIO(0x2b14) @@ -552,33 +570,33 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN7_OASTATUS1 _MMIO(0x2364) #define GEN7_OASTATUS1_TAIL_MASK 0xffffffc0 -#define GEN7_OASTATUS1_COUNTER_OVERFLOW (1<<2) -#define GEN7_OASTATUS1_OABUFFER_OVERFLOW (1<<1) -#define GEN7_OASTATUS1_REPORT_LOST (1<<0) +#define GEN7_OASTATUS1_COUNTER_OVERFLOW (1 << 2) +#define GEN7_OASTATUS1_OABUFFER_OVERFLOW (1 << 1) +#define GEN7_OASTATUS1_REPORT_LOST (1 << 0) #define GEN7_OASTATUS2 _MMIO(0x2368) #define GEN7_OASTATUS2_HEAD_MASK 0xffffffc0 #define GEN7_OASTATUS2_MEM_SELECT_GGTT (1 << 0) /* 0: PPGTT, 1: GGTT */ #define GEN8_OASTATUS _MMIO(0x2b08) -#define GEN8_OASTATUS_OVERRUN_STATUS (1<<3) -#define GEN8_OASTATUS_COUNTER_OVERFLOW (1<<2) -#define GEN8_OASTATUS_OABUFFER_OVERFLOW (1<<1) -#define GEN8_OASTATUS_REPORT_LOST (1<<0) +#define GEN8_OASTATUS_OVERRUN_STATUS (1 << 3) +#define GEN8_OASTATUS_COUNTER_OVERFLOW (1 << 2) +#define GEN8_OASTATUS_OABUFFER_OVERFLOW (1 << 1) +#define GEN8_OASTATUS_REPORT_LOST (1 << 0) #define GEN8_OAHEADPTR _MMIO(0x2B0C) #define GEN8_OAHEADPTR_MASK 0xffffffc0 #define GEN8_OATAILPTR _MMIO(0x2B10) #define GEN8_OATAILPTR_MASK 0xffffffc0 -#define OABUFFER_SIZE_128K (0<<3) -#define OABUFFER_SIZE_256K (1<<3) -#define OABUFFER_SIZE_512K (2<<3) -#define OABUFFER_SIZE_1M (3<<3) -#define OABUFFER_SIZE_2M (4<<3) -#define OABUFFER_SIZE_4M (5<<3) -#define OABUFFER_SIZE_8M (6<<3) -#define OABUFFER_SIZE_16M (7<<3) +#define OABUFFER_SIZE_128K (0 << 3) +#define OABUFFER_SIZE_256K (1 << 3) +#define OABUFFER_SIZE_512K (2 << 3) +#define OABUFFER_SIZE_1M (3 << 3) +#define OABUFFER_SIZE_2M (4 << 3) +#define OABUFFER_SIZE_4M (5 << 3) +#define OABUFFER_SIZE_8M (6 << 3) +#define OABUFFER_SIZE_16M (7 << 3) /* * Flexible, Aggregate EU Counter Registers. @@ -601,35 +619,35 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define OASTARTTRIG1_THRESHOLD_MASK 0xffff #define OASTARTTRIG2 _MMIO(0x2714) -#define OASTARTTRIG2_INVERT_A_0 (1<<0) -#define OASTARTTRIG2_INVERT_A_1 (1<<1) -#define OASTARTTRIG2_INVERT_A_2 (1<<2) -#define OASTARTTRIG2_INVERT_A_3 (1<<3) -#define OASTARTTRIG2_INVERT_A_4 (1<<4) -#define OASTARTTRIG2_INVERT_A_5 (1<<5) -#define OASTARTTRIG2_INVERT_A_6 (1<<6) -#define OASTARTTRIG2_INVERT_A_7 (1<<7) -#define OASTARTTRIG2_INVERT_A_8 (1<<8) -#define OASTARTTRIG2_INVERT_A_9 (1<<9) -#define OASTARTTRIG2_INVERT_A_10 (1<<10) -#define OASTARTTRIG2_INVERT_A_11 (1<<11) -#define OASTARTTRIG2_INVERT_A_12 (1<<12) -#define OASTARTTRIG2_INVERT_A_13 (1<<13) -#define OASTARTTRIG2_INVERT_A_14 (1<<14) -#define OASTARTTRIG2_INVERT_A_15 (1<<15) -#define OASTARTTRIG2_INVERT_B_0 (1<<16) -#define OASTARTTRIG2_INVERT_B_1 (1<<17) -#define OASTARTTRIG2_INVERT_B_2 (1<<18) -#define OASTARTTRIG2_INVERT_B_3 (1<<19) -#define OASTARTTRIG2_INVERT_C_0 (1<<20) -#define OASTARTTRIG2_INVERT_C_1 (1<<21) -#define OASTARTTRIG2_INVERT_D_0 (1<<22) -#define OASTARTTRIG2_THRESHOLD_ENABLE (1<<23) -#define OASTARTTRIG2_START_TRIG_FLAG_MBZ (1<<24) -#define OASTARTTRIG2_EVENT_SELECT_0 (1<<28) -#define OASTARTTRIG2_EVENT_SELECT_1 (1<<29) -#define OASTARTTRIG2_EVENT_SELECT_2 (1<<30) -#define OASTARTTRIG2_EVENT_SELECT_3 (1<<31) +#define OASTARTTRIG2_INVERT_A_0 (1 << 0) +#define OASTARTTRIG2_INVERT_A_1 (1 << 1) +#define OASTARTTRIG2_INVERT_A_2 (1 << 2) +#define OASTARTTRIG2_INVERT_A_3 (1 << 3) +#define OASTARTTRIG2_INVERT_A_4 (1 << 4) +#define OASTARTTRIG2_INVERT_A_5 (1 << 5) +#define OASTARTTRIG2_INVERT_A_6 (1 << 6) +#define OASTARTTRIG2_INVERT_A_7 (1 << 7) +#define OASTARTTRIG2_INVERT_A_8 (1 << 8) +#define OASTARTTRIG2_INVERT_A_9 (1 << 9) +#define OASTARTTRIG2_INVERT_A_10 (1 << 10) +#define OASTARTTRIG2_INVERT_A_11 (1 << 11) +#define OASTARTTRIG2_INVERT_A_12 (1 << 12) +#define OASTARTTRIG2_INVERT_A_13 (1 << 13) +#define OASTARTTRIG2_INVERT_A_14 (1 << 14) +#define OASTARTTRIG2_INVERT_A_15 (1 << 15) +#define OASTARTTRIG2_INVERT_B_0 (1 << 16) +#define OASTARTTRIG2_INVERT_B_1 (1 << 17) +#define OASTARTTRIG2_INVERT_B_2 (1 << 18) +#define OASTARTTRIG2_INVERT_B_3 (1 << 19) +#define OASTARTTRIG2_INVERT_C_0 (1 << 20) +#define OASTARTTRIG2_INVERT_C_1 (1 << 21) +#define OASTARTTRIG2_INVERT_D_0 (1 << 22) +#define OASTARTTRIG2_THRESHOLD_ENABLE (1 << 23) +#define OASTARTTRIG2_START_TRIG_FLAG_MBZ (1 << 24) +#define OASTARTTRIG2_EVENT_SELECT_0 (1 << 28) +#define OASTARTTRIG2_EVENT_SELECT_1 (1 << 29) +#define OASTARTTRIG2_EVENT_SELECT_2 (1 << 30) +#define OASTARTTRIG2_EVENT_SELECT_3 (1 << 31) #define OASTARTTRIG3 _MMIO(0x2718) #define OASTARTTRIG3_NOA_SELECT_MASK 0xf @@ -658,35 +676,35 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define OASTARTTRIG5_THRESHOLD_MASK 0xffff #define OASTARTTRIG6 _MMIO(0x2724) -#define OASTARTTRIG6_INVERT_A_0 (1<<0) -#define OASTARTTRIG6_INVERT_A_1 (1<<1) -#define OASTARTTRIG6_INVERT_A_2 (1<<2) -#define OASTARTTRIG6_INVERT_A_3 (1<<3) -#define OASTARTTRIG6_INVERT_A_4 (1<<4) -#define OASTARTTRIG6_INVERT_A_5 (1<<5) -#define OASTARTTRIG6_INVERT_A_6 (1<<6) -#define OASTARTTRIG6_INVERT_A_7 (1<<7) -#define OASTARTTRIG6_INVERT_A_8 (1<<8) -#define OASTARTTRIG6_INVERT_A_9 (1<<9) -#define OASTARTTRIG6_INVERT_A_10 (1<<10) -#define OASTARTTRIG6_INVERT_A_11 (1<<11) -#define OASTARTTRIG6_INVERT_A_12 (1<<12) -#define OASTARTTRIG6_INVERT_A_13 (1<<13) -#define OASTARTTRIG6_INVERT_A_14 (1<<14) -#define OASTARTTRIG6_INVERT_A_15 (1<<15) -#define OASTARTTRIG6_INVERT_B_0 (1<<16) -#define OASTARTTRIG6_INVERT_B_1 (1<<17) -#define OASTARTTRIG6_INVERT_B_2 (1<<18) -#define OASTARTTRIG6_INVERT_B_3 (1<<19) -#define OASTARTTRIG6_INVERT_C_0 (1<<20) -#define OASTARTTRIG6_INVERT_C_1 (1<<21) -#define OASTARTTRIG6_INVERT_D_0 (1<<22) -#define OASTARTTRIG6_THRESHOLD_ENABLE (1<<23) -#define OASTARTTRIG6_START_TRIG_FLAG_MBZ (1<<24) -#define OASTARTTRIG6_EVENT_SELECT_4 (1<<28) -#define OASTARTTRIG6_EVENT_SELECT_5 (1<<29) -#define OASTARTTRIG6_EVENT_SELECT_6 (1<<30) -#define OASTARTTRIG6_EVENT_SELECT_7 (1<<31) +#define OASTARTTRIG6_INVERT_A_0 (1 << 0) +#define OASTARTTRIG6_INVERT_A_1 (1 << 1) +#define OASTARTTRIG6_INVERT_A_2 (1 << 2) +#define OASTARTTRIG6_INVERT_A_3 (1 << 3) +#define OASTARTTRIG6_INVERT_A_4 (1 << 4) +#define OASTARTTRIG6_INVERT_A_5 (1 << 5) +#define OASTARTTRIG6_INVERT_A_6 (1 << 6) +#define OASTARTTRIG6_INVERT_A_7 (1 << 7) +#define OASTARTTRIG6_INVERT_A_8 (1 << 8) +#define OASTARTTRIG6_INVERT_A_9 (1 << 9) +#define OASTARTTRIG6_INVERT_A_10 (1 << 10) +#define OASTARTTRIG6_INVERT_A_11 (1 << 11) +#define OASTARTTRIG6_INVERT_A_12 (1 << 12) +#define OASTARTTRIG6_INVERT_A_13 (1 << 13) +#define OASTARTTRIG6_INVERT_A_14 (1 << 14) +#define OASTARTTRIG6_INVERT_A_15 (1 << 15) +#define OASTARTTRIG6_INVERT_B_0 (1 << 16) +#define OASTARTTRIG6_INVERT_B_1 (1 << 17) +#define OASTARTTRIG6_INVERT_B_2 (1 << 18) +#define OASTARTTRIG6_INVERT_B_3 (1 << 19) +#define OASTARTTRIG6_INVERT_C_0 (1 << 20) +#define OASTARTTRIG6_INVERT_C_1 (1 << 21) +#define OASTARTTRIG6_INVERT_D_0 (1 << 22) +#define OASTARTTRIG6_THRESHOLD_ENABLE (1 << 23) +#define OASTARTTRIG6_START_TRIG_FLAG_MBZ (1 << 24) +#define OASTARTTRIG6_EVENT_SELECT_4 (1 << 28) +#define OASTARTTRIG6_EVENT_SELECT_5 (1 << 29) +#define OASTARTTRIG6_EVENT_SELECT_6 (1 << 30) +#define OASTARTTRIG6_EVENT_SELECT_7 (1 << 31) #define OASTARTTRIG7 _MMIO(0x2728) #define OASTARTTRIG7_NOA_SELECT_MASK 0xf @@ -715,31 +733,31 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define OAREPORTTRIG1_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */ #define OAREPORTTRIG2 _MMIO(0x2744) -#define OAREPORTTRIG2_INVERT_A_0 (1<<0) -#define OAREPORTTRIG2_INVERT_A_1 (1<<1) -#define OAREPORTTRIG2_INVERT_A_2 (1<<2) -#define OAREPORTTRIG2_INVERT_A_3 (1<<3) -#define OAREPORTTRIG2_INVERT_A_4 (1<<4) -#define OAREPORTTRIG2_INVERT_A_5 (1<<5) -#define OAREPORTTRIG2_INVERT_A_6 (1<<6) -#define OAREPORTTRIG2_INVERT_A_7 (1<<7) -#define OAREPORTTRIG2_INVERT_A_8 (1<<8) -#define OAREPORTTRIG2_INVERT_A_9 (1<<9) -#define OAREPORTTRIG2_INVERT_A_10 (1<<10) -#define OAREPORTTRIG2_INVERT_A_11 (1<<11) -#define OAREPORTTRIG2_INVERT_A_12 (1<<12) -#define OAREPORTTRIG2_INVERT_A_13 (1<<13) -#define OAREPORTTRIG2_INVERT_A_14 (1<<14) -#define OAREPORTTRIG2_INVERT_A_15 (1<<15) -#define OAREPORTTRIG2_INVERT_B_0 (1<<16) -#define OAREPORTTRIG2_INVERT_B_1 (1<<17) -#define OAREPORTTRIG2_INVERT_B_2 (1<<18) -#define OAREPORTTRIG2_INVERT_B_3 (1<<19) -#define OAREPORTTRIG2_INVERT_C_0 (1<<20) -#define OAREPORTTRIG2_INVERT_C_1 (1<<21) -#define OAREPORTTRIG2_INVERT_D_0 (1<<22) -#define OAREPORTTRIG2_THRESHOLD_ENABLE (1<<23) -#define OAREPORTTRIG2_REPORT_TRIGGER_ENABLE (1<<31) +#define OAREPORTTRIG2_INVERT_A_0 (1 << 0) +#define OAREPORTTRIG2_INVERT_A_1 (1 << 1) +#define OAREPORTTRIG2_INVERT_A_2 (1 << 2) +#define OAREPORTTRIG2_INVERT_A_3 (1 << 3) +#define OAREPORTTRIG2_INVERT_A_4 (1 << 4) +#define OAREPORTTRIG2_INVERT_A_5 (1 << 5) +#define OAREPORTTRIG2_INVERT_A_6 (1 << 6) +#define OAREPORTTRIG2_INVERT_A_7 (1 << 7) +#define OAREPORTTRIG2_INVERT_A_8 (1 << 8) +#define OAREPORTTRIG2_INVERT_A_9 (1 << 9) +#define OAREPORTTRIG2_INVERT_A_10 (1 << 10) +#define OAREPORTTRIG2_INVERT_A_11 (1 << 11) +#define OAREPORTTRIG2_INVERT_A_12 (1 << 12) +#define OAREPORTTRIG2_INVERT_A_13 (1 << 13) +#define OAREPORTTRIG2_INVERT_A_14 (1 << 14) +#define OAREPORTTRIG2_INVERT_A_15 (1 << 15) +#define OAREPORTTRIG2_INVERT_B_0 (1 << 16) +#define OAREPORTTRIG2_INVERT_B_1 (1 << 17) +#define OAREPORTTRIG2_INVERT_B_2 (1 << 18) +#define OAREPORTTRIG2_INVERT_B_3 (1 << 19) +#define OAREPORTTRIG2_INVERT_C_0 (1 << 20) +#define OAREPORTTRIG2_INVERT_C_1 (1 << 21) +#define OAREPORTTRIG2_INVERT_D_0 (1 << 22) +#define OAREPORTTRIG2_THRESHOLD_ENABLE (1 << 23) +#define OAREPORTTRIG2_REPORT_TRIGGER_ENABLE (1 << 31) #define OAREPORTTRIG3 _MMIO(0x2748) #define OAREPORTTRIG3_NOA_SELECT_MASK 0xf @@ -768,31 +786,31 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define OAREPORTTRIG5_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */ #define OAREPORTTRIG6 _MMIO(0x2754) -#define OAREPORTTRIG6_INVERT_A_0 (1<<0) -#define OAREPORTTRIG6_INVERT_A_1 (1<<1) -#define OAREPORTTRIG6_INVERT_A_2 (1<<2) -#define OAREPORTTRIG6_INVERT_A_3 (1<<3) -#define OAREPORTTRIG6_INVERT_A_4 (1<<4) -#define OAREPORTTRIG6_INVERT_A_5 (1<<5) -#define OAREPORTTRIG6_INVERT_A_6 (1<<6) -#define OAREPORTTRIG6_INVERT_A_7 (1<<7) -#define OAREPORTTRIG6_INVERT_A_8 (1<<8) -#define OAREPORTTRIG6_INVERT_A_9 (1<<9) -#define OAREPORTTRIG6_INVERT_A_10 (1<<10) -#define OAREPORTTRIG6_INVERT_A_11 (1<<11) -#define OAREPORTTRIG6_INVERT_A_12 (1<<12) -#define OAREPORTTRIG6_INVERT_A_13 (1<<13) -#define OAREPORTTRIG6_INVERT_A_14 (1<<14) -#define OAREPORTTRIG6_INVERT_A_15 (1<<15) -#define OAREPORTTRIG6_INVERT_B_0 (1<<16) -#define OAREPORTTRIG6_INVERT_B_1 (1<<17) -#define OAREPORTTRIG6_INVERT_B_2 (1<<18) -#define OAREPORTTRIG6_INVERT_B_3 (1<<19) -#define OAREPORTTRIG6_INVERT_C_0 (1<<20) -#define OAREPORTTRIG6_INVERT_C_1 (1<<21) -#define OAREPORTTRIG6_INVERT_D_0 (1<<22) -#define OAREPORTTRIG6_THRESHOLD_ENABLE (1<<23) -#define OAREPORTTRIG6_REPORT_TRIGGER_ENABLE (1<<31) +#define OAREPORTTRIG6_INVERT_A_0 (1 << 0) +#define OAREPORTTRIG6_INVERT_A_1 (1 << 1) +#define OAREPORTTRIG6_INVERT_A_2 (1 << 2) +#define OAREPORTTRIG6_INVERT_A_3 (1 << 3) +#define OAREPORTTRIG6_INVERT_A_4 (1 << 4) +#define OAREPORTTRIG6_INVERT_A_5 (1 << 5) +#define OAREPORTTRIG6_INVERT_A_6 (1 << 6) +#define OAREPORTTRIG6_INVERT_A_7 (1 << 7) +#define OAREPORTTRIG6_INVERT_A_8 (1 << 8) +#define OAREPORTTRIG6_INVERT_A_9 (1 << 9) +#define OAREPORTTRIG6_INVERT_A_10 (1 << 10) +#define OAREPORTTRIG6_INVERT_A_11 (1 << 11) +#define OAREPORTTRIG6_INVERT_A_12 (1 << 12) +#define OAREPORTTRIG6_INVERT_A_13 (1 << 13) +#define OAREPORTTRIG6_INVERT_A_14 (1 << 14) +#define OAREPORTTRIG6_INVERT_A_15 (1 << 15) +#define OAREPORTTRIG6_INVERT_B_0 (1 << 16) +#define OAREPORTTRIG6_INVERT_B_1 (1 << 17) +#define OAREPORTTRIG6_INVERT_B_2 (1 << 18) +#define OAREPORTTRIG6_INVERT_B_3 (1 << 19) +#define OAREPORTTRIG6_INVERT_C_0 (1 << 20) +#define OAREPORTTRIG6_INVERT_C_1 (1 << 21) +#define OAREPORTTRIG6_INVERT_D_0 (1 << 22) +#define OAREPORTTRIG6_THRESHOLD_ENABLE (1 << 23) +#define OAREPORTTRIG6_REPORT_TRIGGER_ENABLE (1 << 31) #define OAREPORTTRIG7 _MMIO(0x2758) #define OAREPORTTRIG7_NOA_SELECT_MASK 0xf @@ -828,9 +846,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define OACEC_COMPARE_VALUE_MASK 0xffff #define OACEC_COMPARE_VALUE_SHIFT 3 -#define OACEC_SELECT_NOA (0<<19) -#define OACEC_SELECT_PREV (1<<19) -#define OACEC_SELECT_BOOLEAN (2<<19) +#define OACEC_SELECT_NOA (0 << 19) +#define OACEC_SELECT_PREV (1 << 19) +#define OACEC_SELECT_BOOLEAN (2 << 19) /* CECX_1 */ #define OACEC_MASK_MASK 0xffff @@ -948,9 +966,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) * Reset registers */ #define DEBUG_RESET_I830 _MMIO(0x6070) -#define DEBUG_RESET_FULL (1<<7) -#define DEBUG_RESET_RENDER (1<<8) -#define DEBUG_RESET_DISPLAY (1<<9) +#define DEBUG_RESET_FULL (1 << 7) +#define DEBUG_RESET_RENDER (1 << 8) +#define DEBUG_RESET_DISPLAY (1 << 9) /* * IOSF sideband @@ -961,7 +979,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define IOSF_PORT_SHIFT 8 #define IOSF_BYTE_ENABLES_SHIFT 4 #define IOSF_BAR_SHIFT 1 -#define IOSF_SB_BUSY (1<<0) +#define IOSF_SB_BUSY (1 << 0) #define IOSF_PORT_BUNIT 0x03 #define IOSF_PORT_PUNIT 0x04 #define IOSF_PORT_NC 0x11 @@ -1044,13 +1062,13 @@ enum i915_power_well_id { /* * HSW/BDW - * - HSW_PWR_WELL_CTL_DRIVER(0) (status bit: id*2, req bit: id*2+1) + * - _HSW_PWR_WELL_CTL1-4 (status bit: id*2, req bit: id*2+1) */ HSW_DISP_PW_GLOBAL = 15, /* * GEN9+ - * - HSW_PWR_WELL_CTL_DRIVER(0) (status bit: id*2, req bit: id*2+1) + * - _HSW_PWR_WELL_CTL1-4 (status bit: id*2, req bit: id*2+1) */ SKL_DISP_PW_MISC_IO = 0, SKL_DISP_PW_DDI_A_E, @@ -1074,17 +1092,54 @@ enum i915_power_well_id { SKL_DISP_PW_2, /* - custom power wells */ - SKL_DISP_PW_DC_OFF, BXT_DPIO_CMN_A, BXT_DPIO_CMN_BC, - GLK_DPIO_CMN_C, /* 19 */ + GLK_DPIO_CMN_C, /* 18 */ + + /* + * GEN11+ + * - _HSW_PWR_WELL_CTL1-4 + * (status bit: (id&15)*2, req bit:(id&15)*2+1) + */ + ICL_DISP_PW_1 = 0, + ICL_DISP_PW_2, + ICL_DISP_PW_3, + ICL_DISP_PW_4, + + /* + * - _HSW_PWR_WELL_CTL_AUX1/2/4 + * (status bit: (id&15)*2, req bit:(id&15)*2+1) + */ + ICL_DISP_PW_AUX_A = 16, + ICL_DISP_PW_AUX_B, + ICL_DISP_PW_AUX_C, + ICL_DISP_PW_AUX_D, + ICL_DISP_PW_AUX_E, + ICL_DISP_PW_AUX_F, + + ICL_DISP_PW_AUX_TBT1 = 24, + ICL_DISP_PW_AUX_TBT2, + ICL_DISP_PW_AUX_TBT3, + ICL_DISP_PW_AUX_TBT4, + + /* + * - _HSW_PWR_WELL_CTL_DDI1/2/4 + * (status bit: (id&15)*2, req bit:(id&15)*2+1) + */ + ICL_DISP_PW_DDI_A = 32, + ICL_DISP_PW_DDI_B, + ICL_DISP_PW_DDI_C, + ICL_DISP_PW_DDI_D, + ICL_DISP_PW_DDI_E, + ICL_DISP_PW_DDI_F, /* 37 */ /* * Multiple platforms. * Must start following the highest ID of any platform. * - custom power wells */ - I915_DISP_PW_ALWAYS_ON = 20, + SKL_DISP_PW_DC_OFF = 38, + I915_DISP_PW_ALWAYS_ON, }; #define PUNIT_REG_PWRGT_CTRL 0x60 @@ -1098,8 +1153,8 @@ enum i915_power_well_id { #define PUNIT_REG_GPU_LFM 0xd3 #define PUNIT_REG_GPU_FREQ_REQ 0xd4 #define PUNIT_REG_GPU_FREQ_STS 0xd8 -#define GPLLENABLE (1<<4) -#define GENFREQSTATUS (1<<0) +#define GPLLENABLE (1 << 4) +#define GENFREQSTATUS (1 << 0) #define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc #define PUNIT_REG_CZ_TIMESTAMP 0xce @@ -1141,11 +1196,11 @@ enum i915_power_well_id { #define FB_FMAX_VMIN_FREQ_LO_SHIFT 27 #define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000 -#define VLV_TURBO_SOC_OVERRIDE 0x04 -#define VLV_OVERRIDE_EN 1 -#define VLV_SOC_TDP_EN (1 << 1) -#define VLV_BIAS_CPU_125_SOC_875 (6 << 2) -#define CHV_BIAS_CPU_50_SOC_50 (3 << 2) +#define VLV_TURBO_SOC_OVERRIDE 0x04 +#define VLV_OVERRIDE_EN 1 +#define VLV_SOC_TDP_EN (1 << 1) +#define VLV_BIAS_CPU_125_SOC_875 (6 << 2) +#define CHV_BIAS_CPU_50_SOC_50 (3 << 2) /* vlv2 north clock has */ #define CCK_FUSE_REG 0x8 @@ -1194,10 +1249,10 @@ enum i915_power_well_id { #define DPIO_DEVFN 0 #define DPIO_CTL _MMIO(VLV_DISPLAY_BASE + 0x2110) -#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */ -#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */ -#define DPIO_SFR_BYPASS (1<<1) -#define DPIO_CMNRST (1<<0) +#define DPIO_MODSEL1 (1 << 3) /* if ref clk b == 27 */ +#define DPIO_MODSEL0 (1 << 2) /* if ref clk a == 27 */ +#define DPIO_SFR_BYPASS (1 << 1) +#define DPIO_CMNRST (1 << 0) #define DPIO_PHY(pipe) ((pipe) >> 1) #define DPIO_PHY_IOSF_PORT(phy) (dev_priv->dpio_phy_iosf_port[phy]) @@ -1215,7 +1270,7 @@ enum i915_power_well_id { #define DPIO_P1_SHIFT (21) /* 3 bits */ #define DPIO_P2_SHIFT (16) /* 5 bits */ #define DPIO_N_SHIFT (12) /* 4 bits */ -#define DPIO_ENABLE_CALIBRATION (1<<11) +#define DPIO_ENABLE_CALIBRATION (1 << 11) #define DPIO_M1DIV_SHIFT (8) /* 3 bits */ #define DPIO_M2DIV_MASK 0xff #define _VLV_PLL_DW3_CH1 0x802c @@ -1264,10 +1319,10 @@ enum i915_power_well_id { #define _VLV_PCS_DW0_CH0 0x8200 #define _VLV_PCS_DW0_CH1 0x8400 -#define DPIO_PCS_TX_LANE2_RESET (1<<16) -#define DPIO_PCS_TX_LANE1_RESET (1<<7) -#define DPIO_LEFT_TXFIFO_RST_MASTER2 (1<<4) -#define DPIO_RIGHT_TXFIFO_RST_MASTER2 (1<<3) +#define DPIO_PCS_TX_LANE2_RESET (1 << 16) +#define DPIO_PCS_TX_LANE1_RESET (1 << 7) +#define DPIO_LEFT_TXFIFO_RST_MASTER2 (1 << 4) +#define DPIO_RIGHT_TXFIFO_RST_MASTER2 (1 << 3) #define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1) #define _VLV_PCS01_DW0_CH0 0x200 @@ -1279,11 +1334,11 @@ enum i915_power_well_id { #define _VLV_PCS_DW1_CH0 0x8204 #define _VLV_PCS_DW1_CH1 0x8404 -#define CHV_PCS_REQ_SOFTRESET_EN (1<<23) -#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1<<22) -#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1<<21) +#define CHV_PCS_REQ_SOFTRESET_EN (1 << 23) +#define DPIO_PCS_CLK_CRI_RXEB_EIOS_EN (1 << 22) +#define DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN (1 << 21) #define DPIO_PCS_CLK_DATAWIDTH_SHIFT (6) -#define DPIO_PCS_CLK_SOFT_RESET (1<<5) +#define DPIO_PCS_CLK_SOFT_RESET (1 << 5) #define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1) #define _VLV_PCS01_DW1_CH0 0x204 @@ -1308,12 +1363,12 @@ enum i915_power_well_id { #define _VLV_PCS_DW9_CH0 0x8224 #define _VLV_PCS_DW9_CH1 0x8424 -#define DPIO_PCS_TX2MARGIN_MASK (0x7<<13) -#define DPIO_PCS_TX2MARGIN_000 (0<<13) -#define DPIO_PCS_TX2MARGIN_101 (1<<13) -#define DPIO_PCS_TX1MARGIN_MASK (0x7<<10) -#define DPIO_PCS_TX1MARGIN_000 (0<<10) -#define DPIO_PCS_TX1MARGIN_101 (1<<10) +#define DPIO_PCS_TX2MARGIN_MASK (0x7 << 13) +#define DPIO_PCS_TX2MARGIN_000 (0 << 13) +#define DPIO_PCS_TX2MARGIN_101 (1 << 13) +#define DPIO_PCS_TX1MARGIN_MASK (0x7 << 10) +#define DPIO_PCS_TX1MARGIN_000 (0 << 10) +#define DPIO_PCS_TX1MARGIN_101 (1 << 10) #define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1) #define _VLV_PCS01_DW9_CH0 0x224 @@ -1325,14 +1380,14 @@ enum i915_power_well_id { #define _CHV_PCS_DW10_CH0 0x8228 #define _CHV_PCS_DW10_CH1 0x8428 -#define DPIO_PCS_SWING_CALC_TX0_TX2 (1<<30) -#define DPIO_PCS_SWING_CALC_TX1_TX3 (1<<31) -#define DPIO_PCS_TX2DEEMP_MASK (0xf<<24) -#define DPIO_PCS_TX2DEEMP_9P5 (0<<24) -#define DPIO_PCS_TX2DEEMP_6P0 (2<<24) -#define DPIO_PCS_TX1DEEMP_MASK (0xf<<16) -#define DPIO_PCS_TX1DEEMP_9P5 (0<<16) -#define DPIO_PCS_TX1DEEMP_6P0 (2<<16) +#define DPIO_PCS_SWING_CALC_TX0_TX2 (1 << 30) +#define DPIO_PCS_SWING_CALC_TX1_TX3 (1 << 31) +#define DPIO_PCS_TX2DEEMP_MASK (0xf << 24) +#define DPIO_PCS_TX2DEEMP_9P5 (0 << 24) +#define DPIO_PCS_TX2DEEMP_6P0 (2 << 24) +#define DPIO_PCS_TX1DEEMP_MASK (0xf << 16) +#define DPIO_PCS_TX1DEEMP_9P5 (0 << 16) +#define DPIO_PCS_TX1DEEMP_6P0 (2 << 16) #define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1) #define _VLV_PCS01_DW10_CH0 0x0228 @@ -1344,10 +1399,10 @@ enum i915_power_well_id { #define _VLV_PCS_DW11_CH0 0x822c #define _VLV_PCS_DW11_CH1 0x842c -#define DPIO_TX2_STAGGER_MASK(x) ((x)<<24) -#define DPIO_LANEDESKEW_STRAP_OVRD (1<<3) -#define DPIO_LEFT_TXFIFO_RST_MASTER (1<<1) -#define DPIO_RIGHT_TXFIFO_RST_MASTER (1<<0) +#define DPIO_TX2_STAGGER_MASK(x) ((x) << 24) +#define DPIO_LANEDESKEW_STRAP_OVRD (1 << 3) +#define DPIO_LEFT_TXFIFO_RST_MASTER (1 << 1) +#define DPIO_RIGHT_TXFIFO_RST_MASTER (1 << 0) #define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1) #define _VLV_PCS01_DW11_CH0 0x022c @@ -1366,11 +1421,11 @@ enum i915_power_well_id { #define _VLV_PCS_DW12_CH0 0x8230 #define _VLV_PCS_DW12_CH1 0x8430 -#define DPIO_TX2_STAGGER_MULT(x) ((x)<<20) -#define DPIO_TX1_STAGGER_MULT(x) ((x)<<16) -#define DPIO_TX1_STAGGER_MASK(x) ((x)<<8) -#define DPIO_LANESTAGGER_STRAP_OVRD (1<<6) -#define DPIO_LANESTAGGER_STRAP(x) ((x)<<0) +#define DPIO_TX2_STAGGER_MULT(x) ((x) << 20) +#define DPIO_TX1_STAGGER_MULT(x) ((x) << 16) +#define DPIO_TX1_STAGGER_MASK(x) ((x) << 8) +#define DPIO_LANESTAGGER_STRAP_OVRD (1 << 6) +#define DPIO_LANESTAGGER_STRAP(x) ((x) << 0) #define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1) #define _VLV_PCS_DW14_CH0 0x8238 @@ -1391,7 +1446,7 @@ enum i915_power_well_id { #define _VLV_TX_DW3_CH0 0x828c #define _VLV_TX_DW3_CH1 0x848c /* The following bit for CHV phy */ -#define DPIO_TX_UNIQ_TRANS_SCALE_EN (1<<27) +#define DPIO_TX_UNIQ_TRANS_SCALE_EN (1 << 27) #define DPIO_SWING_MARGIN101_SHIFT 16 #define DPIO_SWING_MARGIN101_MASK (0xff << DPIO_SWING_MARGIN101_SHIFT) #define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1) @@ -1410,7 +1465,7 @@ enum i915_power_well_id { #define _VLV_TX_DW5_CH0 0x8294 #define _VLV_TX_DW5_CH1 0x8494 -#define DPIO_TX_OCALINIT_EN (1<<31) +#define DPIO_TX_OCALINIT_EN (1 << 31) #define VLV_TX_DW5(ch) _PORT(ch, _VLV_TX_DW5_CH0, _VLV_TX_DW5_CH1) #define _VLV_TX_DW11_CH0 0x82ac @@ -1640,10 +1695,10 @@ enum i915_power_well_id { #define PORT_PLL_LOCK_THRESHOLD_SHIFT 1 #define PORT_PLL_LOCK_THRESHOLD_MASK (0x7 << PORT_PLL_LOCK_THRESHOLD_SHIFT) /* PORT_PLL_10_A */ -#define PORT_PLL_DCO_AMP_OVR_EN_H (1<<27) +#define PORT_PLL_DCO_AMP_OVR_EN_H (1 << 27) #define PORT_PLL_DCO_AMP_DEFAULT 15 #define PORT_PLL_DCO_AMP_MASK 0x3c00 -#define PORT_PLL_DCO_AMP(x) ((x)<<10) +#define PORT_PLL_DCO_AMP(x) ((x) << 10) #define _PORT_PLL_BASE(phy, ch) _BXT_PHY_CH(phy, ch, \ _PORT_PLL_0_B, \ _PORT_PLL_0_C) @@ -1666,6 +1721,26 @@ enum i915_power_well_id { #define ICL_PORT_CL_DW5(port) _MMIO_PORT(port, _ICL_PORT_CL_DW5_A, \ _ICL_PORT_CL_DW5_B) +#define _CNL_PORT_CL_DW10_A 0x162028 +#define _ICL_PORT_CL_DW10_B 0x6c028 +#define ICL_PORT_CL_DW10(port) _MMIO_PORT(port, \ + _CNL_PORT_CL_DW10_A, \ + _ICL_PORT_CL_DW10_B) +#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25) +#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25 +#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24) +#define PWR_UP_ALL_LANES (0x0 << 4) +#define PWR_DOWN_LN_3_2_1 (0xe << 4) +#define PWR_DOWN_LN_3_2 (0xc << 4) +#define PWR_DOWN_LN_3 (0x8 << 4) +#define PWR_DOWN_LN_2_1_0 (0x7 << 4) +#define PWR_DOWN_LN_1_0 (0x3 << 4) +#define PWR_DOWN_LN_1 (0x2 << 4) +#define PWR_DOWN_LN_3_1 (0xa << 4) +#define PWR_DOWN_LN_3_1_0 (0xb << 4) +#define PWR_DOWN_LN_MASK (0xf << 4) +#define PWR_DOWN_LN_SHIFT 4 + #define _PORT_CL1CM_DW9_A 0x162024 #define _PORT_CL1CM_DW9_BC 0x6C024 #define IREF0RC_OFFSET_SHIFT 8 @@ -1678,6 +1753,13 @@ enum i915_power_well_id { #define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT) #define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC) +#define _ICL_PORT_CL_DW12_A 0x162030 +#define _ICL_PORT_CL_DW12_B 0x6C030 +#define ICL_LANE_ENABLE_AUX (1 << 0) +#define ICL_PORT_CL_DW12(port) _MMIO_PORT((port), \ + _ICL_PORT_CL_DW12_A, \ + _ICL_PORT_CL_DW12_B) + #define _PORT_CL1CM_DW28_A 0x162070 #define _PORT_CL1CM_DW28_BC 0x6C070 #define OCL1_POWER_DOWN_EN (1 << 23) @@ -1715,16 +1797,22 @@ enum i915_power_well_id { _CNL_PORT_PCS_DW1_LN0_D, \ _CNL_PORT_PCS_DW1_LN0_AE, \ _CNL_PORT_PCS_DW1_LN0_F)) + #define _ICL_PORT_PCS_DW1_GRP_A 0x162604 #define _ICL_PORT_PCS_DW1_GRP_B 0x6C604 #define _ICL_PORT_PCS_DW1_LN0_A 0x162804 #define _ICL_PORT_PCS_DW1_LN0_B 0x6C804 +#define _ICL_PORT_PCS_DW1_AUX_A 0x162304 +#define _ICL_PORT_PCS_DW1_AUX_B 0x6c304 #define ICL_PORT_PCS_DW1_GRP(port) _MMIO_PORT(port,\ _ICL_PORT_PCS_DW1_GRP_A, \ _ICL_PORT_PCS_DW1_GRP_B) #define ICL_PORT_PCS_DW1_LN0(port) _MMIO_PORT(port, \ _ICL_PORT_PCS_DW1_LN0_A, \ _ICL_PORT_PCS_DW1_LN0_B) +#define ICL_PORT_PCS_DW1_AUX(port) _MMIO_PORT(port, \ + _ICL_PORT_PCS_DW1_AUX_A, \ + _ICL_PORT_PCS_DW1_AUX_B) #define COMMON_KEEPER_EN (1 << 26) /* CNL Port TX registers */ @@ -1745,7 +1833,7 @@ enum i915_power_well_id { _CNL_PORT_TX_D_GRP_OFFSET, \ _CNL_PORT_TX_AE_GRP_OFFSET, \ _CNL_PORT_TX_F_GRP_OFFSET) + \ - 4*(dw)) + 4 * (dw)) #define _CNL_PORT_TX_DW_LN0(port, dw) (_PICK((port), \ _CNL_PORT_TX_AE_LN0_OFFSET, \ _CNL_PORT_TX_B_LN0_OFFSET, \ @@ -1753,7 +1841,7 @@ enum i915_power_well_id { _CNL_PORT_TX_D_LN0_OFFSET, \ _CNL_PORT_TX_AE_LN0_OFFSET, \ _CNL_PORT_TX_F_LN0_OFFSET) + \ - 4*(dw)) + 4 * (dw)) #define CNL_PORT_TX_DW2_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 2)) #define CNL_PORT_TX_DW2_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 2)) @@ -1761,16 +1849,23 @@ enum i915_power_well_id { #define _ICL_PORT_TX_DW2_GRP_B 0x6C688 #define _ICL_PORT_TX_DW2_LN0_A 0x162888 #define _ICL_PORT_TX_DW2_LN0_B 0x6C888 +#define _ICL_PORT_TX_DW2_AUX_A 0x162388 +#define _ICL_PORT_TX_DW2_AUX_B 0x6c388 #define ICL_PORT_TX_DW2_GRP(port) _MMIO_PORT(port, \ _ICL_PORT_TX_DW2_GRP_A, \ _ICL_PORT_TX_DW2_GRP_B) #define ICL_PORT_TX_DW2_LN0(port) _MMIO_PORT(port, \ _ICL_PORT_TX_DW2_LN0_A, \ _ICL_PORT_TX_DW2_LN0_B) +#define ICL_PORT_TX_DW2_AUX(port) _MMIO_PORT(port, \ + _ICL_PORT_TX_DW2_AUX_A, \ + _ICL_PORT_TX_DW2_AUX_B) #define SWING_SEL_UPPER(x) (((x) >> 3) << 15) #define SWING_SEL_UPPER_MASK (1 << 15) #define SWING_SEL_LOWER(x) (((x) & 0x7) << 11) #define SWING_SEL_LOWER_MASK (0x7 << 11) +#define FRC_LATENCY_OPTIM_MASK (0x7 << 8) +#define FRC_LATENCY_OPTIM_VAL(x) ((x) << 8) #define RCOMP_SCALAR(x) ((x) << 0) #define RCOMP_SCALAR_MASK (0xFF << 0) @@ -1779,21 +1874,26 @@ enum i915_power_well_id { #define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 4)) #define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4)) #define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \ - (ln * (_CNL_PORT_TX_DW4_LN1_AE - \ + ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \ _CNL_PORT_TX_DW4_LN0_AE))) #define _ICL_PORT_TX_DW4_GRP_A 0x162690 #define _ICL_PORT_TX_DW4_GRP_B 0x6C690 #define _ICL_PORT_TX_DW4_LN0_A 0x162890 #define _ICL_PORT_TX_DW4_LN1_A 0x162990 #define _ICL_PORT_TX_DW4_LN0_B 0x6C890 +#define _ICL_PORT_TX_DW4_AUX_A 0x162390 +#define _ICL_PORT_TX_DW4_AUX_B 0x6c390 #define ICL_PORT_TX_DW4_GRP(port) _MMIO_PORT(port, \ _ICL_PORT_TX_DW4_GRP_A, \ _ICL_PORT_TX_DW4_GRP_B) #define ICL_PORT_TX_DW4_LN(port, ln) _MMIO(_PORT(port, \ _ICL_PORT_TX_DW4_LN0_A, \ _ICL_PORT_TX_DW4_LN0_B) + \ - (ln * (_ICL_PORT_TX_DW4_LN1_A - \ - _ICL_PORT_TX_DW4_LN0_A))) + ((ln) * (_ICL_PORT_TX_DW4_LN1_A - \ + _ICL_PORT_TX_DW4_LN0_A))) +#define ICL_PORT_TX_DW4_AUX(port) _MMIO_PORT(port, \ + _ICL_PORT_TX_DW4_AUX_A, \ + _ICL_PORT_TX_DW4_AUX_B) #define LOADGEN_SELECT (1 << 31) #define POST_CURSOR_1(x) ((x) << 12) #define POST_CURSOR_1_MASK (0x3F << 12) @@ -1808,12 +1908,17 @@ enum i915_power_well_id { #define _ICL_PORT_TX_DW5_GRP_B 0x6C694 #define _ICL_PORT_TX_DW5_LN0_A 0x162894 #define _ICL_PORT_TX_DW5_LN0_B 0x6C894 +#define _ICL_PORT_TX_DW5_AUX_A 0x162394 +#define _ICL_PORT_TX_DW5_AUX_B 0x6c394 #define ICL_PORT_TX_DW5_GRP(port) _MMIO_PORT(port, \ _ICL_PORT_TX_DW5_GRP_A, \ _ICL_PORT_TX_DW5_GRP_B) #define ICL_PORT_TX_DW5_LN0(port) _MMIO_PORT(port, \ _ICL_PORT_TX_DW5_LN0_A, \ _ICL_PORT_TX_DW5_LN0_B) +#define ICL_PORT_TX_DW5_AUX(port) _MMIO_PORT(port, \ + _ICL_PORT_TX_DW5_AUX_A, \ + _ICL_PORT_TX_DW5_AUX_B) #define TX_TRAINING_EN (1 << 31) #define TAP2_DISABLE (1 << 30) #define TAP3_DISABLE (1 << 29) @@ -1990,6 +2095,11 @@ enum i915_power_well_id { _ICL_PORT_COMP_DW10_A, \ _ICL_PORT_COMP_DW10_B) +/* ICL PHY DFLEX registers */ +#define PORT_TX_DFLEXDPMLE1 _MMIO(0x1638C0) +#define DFLEXDPMLE1_DPMLETC_MASK(n) (0xf << (4 * (n))) +#define DFLEXDPMLE1_DPMLETC(n, x) ((x) << (4 * (n))) + /* BXT PHY Ref registers */ #define _PORT_REF_DW3_A 0x16218C #define _PORT_REF_DW3_BC 0x6C18C @@ -2134,8 +2244,8 @@ enum i915_power_well_id { /* SKL balance leg register */ #define DISPIO_CR_TX_BMU_CR0 _MMIO(0x6C00C) /* I_boost values */ -#define BALANCE_LEG_SHIFT(port) (8+3*(port)) -#define BALANCE_LEG_MASK(port) (7<<(8+3*(port))) +#define BALANCE_LEG_SHIFT(port) (8 + 3 * (port)) +#define BALANCE_LEG_MASK(port) (7 << (8 + 3 * (port))) /* Balance leg disable bits */ #define BALANCE_LEG_DISABLE_SHIFT 23 #define BALANCE_LEG_DISABLE(port) (1 << (23 + (port))) @@ -2155,10 +2265,10 @@ enum i915_power_well_id { #define I830_FENCE_TILING_Y_SHIFT 12 #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) #define I830_FENCE_PITCH_SHIFT 4 -#define I830_FENCE_REG_VALID (1<<0) +#define I830_FENCE_REG_VALID (1 << 0) #define I915_FENCE_MAX_PITCH_VAL 4 #define I830_FENCE_MAX_PITCH_VAL 6 -#define I830_FENCE_MAX_SIZE_VAL (1<<8) +#define I830_FENCE_MAX_SIZE_VAL (1 << 8) #define I915_FENCE_START_MASK 0x0ff00000 #define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8) @@ -2167,7 +2277,7 @@ enum i915_power_well_id { #define FENCE_REG_965_HI(i) _MMIO(0x03000 + (i) * 8 + 4) #define I965_FENCE_PITCH_SHIFT 2 #define I965_FENCE_TILING_Y_SHIFT 1 -#define I965_FENCE_REG_VALID (1<<0) +#define I965_FENCE_REG_VALID (1 << 0) #define I965_FENCE_MAX_PITCH_VAL 0x0400 #define FENCE_REG_GEN6_LO(i) _MMIO(0x100000 + (i) * 8) @@ -2190,13 +2300,13 @@ enum i915_power_well_id { #define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */ #define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */ #define PGTBL_ER _MMIO(0x02024) -#define PRB0_BASE (0x2030-0x30) -#define PRB1_BASE (0x2040-0x30) /* 830,gen3 */ -#define PRB2_BASE (0x2050-0x30) /* gen3 */ -#define SRB0_BASE (0x2100-0x30) /* gen2 */ -#define SRB1_BASE (0x2110-0x30) /* gen2 */ -#define SRB2_BASE (0x2120-0x30) /* 830 */ -#define SRB3_BASE (0x2130-0x30) /* 830 */ +#define PRB0_BASE (0x2030 - 0x30) +#define PRB1_BASE (0x2040 - 0x30) /* 830,gen3 */ +#define PRB2_BASE (0x2050 - 0x30) /* gen3 */ +#define SRB0_BASE (0x2100 - 0x30) /* gen2 */ +#define SRB1_BASE (0x2110 - 0x30) /* gen2 */ +#define SRB2_BASE (0x2120 - 0x30) /* 830 */ +#define SRB3_BASE (0x2130 - 0x30) /* 830 */ #define RENDER_RING_BASE 0x02000 #define BSD_RING_BASE 0x04000 #define GEN6_BSD_RING_BASE 0x12000 @@ -2209,14 +2319,14 @@ enum i915_power_well_id { #define GEN11_VEBOX_RING_BASE 0x1c8000 #define GEN11_VEBOX2_RING_BASE 0x1d8000 #define BLT_RING_BASE 0x22000 -#define RING_TAIL(base) _MMIO((base)+0x30) -#define RING_HEAD(base) _MMIO((base)+0x34) -#define RING_START(base) _MMIO((base)+0x38) -#define RING_CTL(base) _MMIO((base)+0x3c) +#define RING_TAIL(base) _MMIO((base) + 0x30) +#define RING_HEAD(base) _MMIO((base) + 0x34) +#define RING_START(base) _MMIO((base) + 0x38) +#define RING_CTL(base) _MMIO((base) + 0x3c) #define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */ -#define RING_SYNC_0(base) _MMIO((base)+0x40) -#define RING_SYNC_1(base) _MMIO((base)+0x44) -#define RING_SYNC_2(base) _MMIO((base)+0x48) +#define RING_SYNC_0(base) _MMIO((base) + 0x40) +#define RING_SYNC_1(base) _MMIO((base) + 0x44) +#define RING_SYNC_2(base) _MMIO((base) + 0x48) #define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE)) #define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE)) #define GEN6_RVESYNC (RING_SYNC_2(RENDER_RING_BASE)) @@ -2230,21 +2340,22 @@ enum i915_power_well_id { #define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE)) #define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE)) #define GEN6_NOSYNC INVALID_MMIO_REG -#define RING_PSMI_CTL(base) _MMIO((base)+0x50) -#define RING_MAX_IDLE(base) _MMIO((base)+0x54) -#define RING_HWS_PGA(base) _MMIO((base)+0x80) -#define RING_HWS_PGA_GEN6(base) _MMIO((base)+0x2080) -#define RING_RESET_CTL(base) _MMIO((base)+0xd0) +#define RING_PSMI_CTL(base) _MMIO((base) + 0x50) +#define RING_MAX_IDLE(base) _MMIO((base) + 0x54) +#define RING_HWS_PGA(base) _MMIO((base) + 0x80) +#define RING_HWS_PGA_GEN6(base) _MMIO((base) + 0x2080) +#define RING_RESET_CTL(base) _MMIO((base) + 0xd0) #define RESET_CTL_REQUEST_RESET (1 << 0) #define RESET_CTL_READY_TO_RESET (1 << 1) +#define RING_SEMA_WAIT_POLL(base) _MMIO((base) + 0x24c) #define HSW_GTT_CACHE_EN _MMIO(0x4024) #define GTT_CACHE_EN_ALL 0xF0007FFF #define GEN7_WR_WATERMARK _MMIO(0x4028) #define GEN7_GFX_PRIO_CTRL _MMIO(0x402C) #define ARB_MODE _MMIO(0x4030) -#define ARB_MODE_SWIZZLE_SNB (1<<4) -#define ARB_MODE_SWIZZLE_IVB (1<<5) +#define ARB_MODE_SWIZZLE_SNB (1 << 4) +#define ARB_MODE_SWIZZLE_IVB (1 << 5) #define GEN7_GFX_PEND_TLB0 _MMIO(0x4034) #define GEN7_GFX_PEND_TLB1 _MMIO(0x4038) /* L3, CVS, ZTLB, RCC, CASC LRA min, max values */ @@ -2254,30 +2365,30 @@ enum i915_power_well_id { #define GEN7_GFX_MAX_REQ_COUNT _MMIO(0x4074) #define GAMTARBMODE _MMIO(0x04a08) -#define ARB_MODE_BWGTLB_DISABLE (1<<9) -#define ARB_MODE_SWIZZLE_BDW (1<<1) +#define ARB_MODE_BWGTLB_DISABLE (1 << 9) +#define ARB_MODE_SWIZZLE_BDW (1 << 1) #define RENDER_HWS_PGA_GEN7 _MMIO(0x04080) -#define RING_FAULT_REG(engine) _MMIO(0x4094 + 0x100*(engine)->hw_id) +#define RING_FAULT_REG(engine) _MMIO(0x4094 + 0x100 * (engine)->hw_id) #define GEN8_RING_FAULT_REG _MMIO(0x4094) #define GEN8_RING_FAULT_ENGINE_ID(x) (((x) >> 12) & 0x7) -#define RING_FAULT_GTTSEL_MASK (1<<11) +#define RING_FAULT_GTTSEL_MASK (1 << 11) #define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff) #define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3) -#define RING_FAULT_VALID (1<<0) +#define RING_FAULT_VALID (1 << 0) #define DONE_REG _MMIO(0x40b0) #define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0) #define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4) -#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index)*4) +#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4) #define BSD_HWS_PGA_GEN7 _MMIO(0x04180) #define BLT_HWS_PGA_GEN7 _MMIO(0x04280) #define VEBOX_HWS_PGA_GEN7 _MMIO(0x04380) -#define RING_ACTHD(base) _MMIO((base)+0x74) -#define RING_ACTHD_UDW(base) _MMIO((base)+0x5c) -#define RING_NOPID(base) _MMIO((base)+0x94) -#define RING_IMR(base) _MMIO((base)+0xa8) -#define RING_HWSTAM(base) _MMIO((base)+0x98) -#define RING_TIMESTAMP(base) _MMIO((base)+0x358) -#define RING_TIMESTAMP_UDW(base) _MMIO((base)+0x358 + 4) +#define RING_ACTHD(base) _MMIO((base) + 0x74) +#define RING_ACTHD_UDW(base) _MMIO((base) + 0x5c) +#define RING_NOPID(base) _MMIO((base) + 0x94) +#define RING_IMR(base) _MMIO((base) + 0xa8) +#define RING_HWSTAM(base) _MMIO((base) + 0x98) +#define RING_TIMESTAMP(base) _MMIO((base) + 0x358) +#define RING_TIMESTAMP_UDW(base) _MMIO((base) + 0x358 + 4) #define TAIL_ADDR 0x001FFFF8 #define HEAD_WRAP_COUNT 0xFFE00000 #define HEAD_WRAP_ONE 0x00200000 @@ -2290,24 +2401,25 @@ enum i915_power_well_id { #define RING_VALID_MASK 0x00000001 #define RING_VALID 0x00000001 #define RING_INVALID 0x00000000 -#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */ -#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */ -#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */ +#define RING_WAIT_I8XX (1 << 0) /* gen2, PRBx_HEAD */ +#define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */ +#define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */ -#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base)+0x4D0) + (i)*4) +#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4) #define RING_MAX_NONPRIV_SLOTS 12 #define GEN7_TLB_RD_ADDR _MMIO(0x4700) #define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0) -#define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1<<18) +#define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1 << 18) #define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080) #define GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF #define GAMT_CHKN_BIT_REG _MMIO(0x4ab8) -#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28) -#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1<<24) +#define GAMT_CHKN_DISABLE_L3_COH_PIPE (1 << 31) +#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1 << 28) +#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1 << 24) #if 0 #define PRB0_TAIL _MMIO(0x2030) @@ -2333,19 +2445,19 @@ enum i915_power_well_id { #define GEN11_MCR_SLICE_MASK GEN11_MCR_SLICE(0xf) #define GEN11_MCR_SUBSLICE(subslice) (((subslice) & 0x7) << 24) #define GEN11_MCR_SUBSLICE_MASK GEN11_MCR_SUBSLICE(0x7) -#define RING_IPEIR(base) _MMIO((base)+0x64) -#define RING_IPEHR(base) _MMIO((base)+0x68) +#define RING_IPEIR(base) _MMIO((base) + 0x64) +#define RING_IPEHR(base) _MMIO((base) + 0x68) /* * On GEN4, only the render ring INSTDONE exists and has a different * layout than the GEN7+ version. * The GEN2 counterpart of this register is GEN2_INSTDONE. */ -#define RING_INSTDONE(base) _MMIO((base)+0x6c) -#define RING_INSTPS(base) _MMIO((base)+0x70) -#define RING_DMA_FADD(base) _MMIO((base)+0x78) -#define RING_DMA_FADD_UDW(base) _MMIO((base)+0x60) /* gen8+ */ -#define RING_INSTPM(base) _MMIO((base)+0xc0) -#define RING_MI_MODE(base) _MMIO((base)+0x9c) +#define RING_INSTDONE(base) _MMIO((base) + 0x6c) +#define RING_INSTPS(base) _MMIO((base) + 0x70) +#define RING_DMA_FADD(base) _MMIO((base) + 0x78) +#define RING_DMA_FADD_UDW(base) _MMIO((base) + 0x60) /* gen8+ */ +#define RING_INSTPM(base) _MMIO((base) + 0xc0) +#define RING_MI_MODE(base) _MMIO((base) + 0x9c) #define INSTPS _MMIO(0x2070) /* 965+ only */ #define GEN4_INSTDONE1 _MMIO(0x207c) /* 965+ only, aka INSTDONE_2 on SNB */ #define ACTHD_I965 _MMIO(0x2074) @@ -2353,37 +2465,37 @@ enum i915_power_well_id { #define HWS_ADDRESS_MASK 0xfffff000 #define HWS_START_ADDRESS_SHIFT 4 #define PWRCTXA _MMIO(0x2088) /* 965GM+ only */ -#define PWRCTX_EN (1<<0) +#define PWRCTX_EN (1 << 0) #define IPEIR _MMIO(0x2088) #define IPEHR _MMIO(0x208c) #define GEN2_INSTDONE _MMIO(0x2090) #define NOPID _MMIO(0x2094) #define HWSTAM _MMIO(0x2098) #define DMA_FADD_I8XX _MMIO(0x20d0) -#define RING_BBSTATE(base) _MMIO((base)+0x110) +#define RING_BBSTATE(base) _MMIO((base) + 0x110) #define RING_BB_PPGTT (1 << 5) -#define RING_SBBADDR(base) _MMIO((base)+0x114) /* hsw+ */ -#define RING_SBBSTATE(base) _MMIO((base)+0x118) /* hsw+ */ -#define RING_SBBADDR_UDW(base) _MMIO((base)+0x11c) /* gen8+ */ -#define RING_BBADDR(base) _MMIO((base)+0x140) -#define RING_BBADDR_UDW(base) _MMIO((base)+0x168) /* gen8+ */ -#define RING_BB_PER_CTX_PTR(base) _MMIO((base)+0x1c0) /* gen8+ */ -#define RING_INDIRECT_CTX(base) _MMIO((base)+0x1c4) /* gen8+ */ -#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base)+0x1c8) /* gen8+ */ -#define RING_CTX_TIMESTAMP(base) _MMIO((base)+0x3a8) /* gen8+ */ +#define RING_SBBADDR(base) _MMIO((base) + 0x114) /* hsw+ */ +#define RING_SBBSTATE(base) _MMIO((base) + 0x118) /* hsw+ */ +#define RING_SBBADDR_UDW(base) _MMIO((base) + 0x11c) /* gen8+ */ +#define RING_BBADDR(base) _MMIO((base) + 0x140) +#define RING_BBADDR_UDW(base) _MMIO((base) + 0x168) /* gen8+ */ +#define RING_BB_PER_CTX_PTR(base) _MMIO((base) + 0x1c0) /* gen8+ */ +#define RING_INDIRECT_CTX(base) _MMIO((base) + 0x1c4) /* gen8+ */ +#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base) + 0x1c8) /* gen8+ */ +#define RING_CTX_TIMESTAMP(base) _MMIO((base) + 0x3a8) /* gen8+ */ #define ERROR_GEN6 _MMIO(0x40a0) #define GEN7_ERR_INT _MMIO(0x44040) -#define ERR_INT_POISON (1<<31) -#define ERR_INT_MMIO_UNCLAIMED (1<<13) -#define ERR_INT_PIPE_CRC_DONE_C (1<<8) -#define ERR_INT_FIFO_UNDERRUN_C (1<<6) -#define ERR_INT_PIPE_CRC_DONE_B (1<<5) -#define ERR_INT_FIFO_UNDERRUN_B (1<<3) -#define ERR_INT_PIPE_CRC_DONE_A (1<<2) -#define ERR_INT_PIPE_CRC_DONE(pipe) (1<<(2 + (pipe)*3)) -#define ERR_INT_FIFO_UNDERRUN_A (1<<0) -#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<((pipe)*3)) +#define ERR_INT_POISON (1 << 31) +#define ERR_INT_MMIO_UNCLAIMED (1 << 13) +#define ERR_INT_PIPE_CRC_DONE_C (1 << 8) +#define ERR_INT_FIFO_UNDERRUN_C (1 << 6) +#define ERR_INT_PIPE_CRC_DONE_B (1 << 5) +#define ERR_INT_FIFO_UNDERRUN_B (1 << 3) +#define ERR_INT_PIPE_CRC_DONE_A (1 << 2) +#define ERR_INT_PIPE_CRC_DONE(pipe) (1 << (2 + (pipe) * 3)) +#define ERR_INT_FIFO_UNDERRUN_A (1 << 0) +#define ERR_INT_FIFO_UNDERRUN(pipe) (1 << ((pipe) * 3)) #define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10) #define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14) @@ -2391,7 +2503,7 @@ enum i915_power_well_id { #define FAULT_GTT_SEL (1 << 4) #define FPGA_DBG _MMIO(0x42300) -#define FPGA_DBG_RM_NOCLAIM (1<<31) +#define FPGA_DBG_RM_NOCLAIM (1 << 31) #define CLAIM_ER _MMIO(VLV_DISPLAY_BASE + 0x2028) #define CLAIM_ER_CLR (1 << 31) @@ -2400,22 +2512,22 @@ enum i915_power_well_id { #define DERRMR _MMIO(0x44050) /* Note that HBLANK events are reserved on bdw+ */ -#define DERRMR_PIPEA_SCANLINE (1<<0) -#define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1) -#define DERRMR_PIPEA_SPR_FLIP_DONE (1<<2) -#define DERRMR_PIPEA_VBLANK (1<<3) -#define DERRMR_PIPEA_HBLANK (1<<5) -#define DERRMR_PIPEB_SCANLINE (1<<8) -#define DERRMR_PIPEB_PRI_FLIP_DONE (1<<9) -#define DERRMR_PIPEB_SPR_FLIP_DONE (1<<10) -#define DERRMR_PIPEB_VBLANK (1<<11) -#define DERRMR_PIPEB_HBLANK (1<<13) +#define DERRMR_PIPEA_SCANLINE (1 << 0) +#define DERRMR_PIPEA_PRI_FLIP_DONE (1 << 1) +#define DERRMR_PIPEA_SPR_FLIP_DONE (1 << 2) +#define DERRMR_PIPEA_VBLANK (1 << 3) +#define DERRMR_PIPEA_HBLANK (1 << 5) +#define DERRMR_PIPEB_SCANLINE (1 << 8) +#define DERRMR_PIPEB_PRI_FLIP_DONE (1 << 9) +#define DERRMR_PIPEB_SPR_FLIP_DONE (1 << 10) +#define DERRMR_PIPEB_VBLANK (1 << 11) +#define DERRMR_PIPEB_HBLANK (1 << 13) /* Note that PIPEC is not a simple translation of PIPEA/PIPEB */ -#define DERRMR_PIPEC_SCANLINE (1<<14) -#define DERRMR_PIPEC_PRI_FLIP_DONE (1<<15) -#define DERRMR_PIPEC_SPR_FLIP_DONE (1<<20) -#define DERRMR_PIPEC_VBLANK (1<<21) -#define DERRMR_PIPEC_HBLANK (1<<22) +#define DERRMR_PIPEC_SCANLINE (1 << 14) +#define DERRMR_PIPEC_PRI_FLIP_DONE (1 << 15) +#define DERRMR_PIPEC_SPR_FLIP_DONE (1 << 20) +#define DERRMR_PIPEC_VBLANK (1 << 21) +#define DERRMR_PIPEC_HBLANK (1 << 22) /* GM45+ chicken bits -- debug workaround bits that may be required @@ -2439,7 +2551,7 @@ enum i915_power_well_id { #define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) #define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5) #define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) -#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) /* gen8+ */ +#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x) << 1) /* gen8+ */ #define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */ #define MI_MODE _MMIO(0x209c) @@ -2478,22 +2590,22 @@ enum i915_power_well_id { #define GFX_MODE _MMIO(0x2520) #define GFX_MODE_GEN7 _MMIO(0x229c) -#define RING_MODE_GEN7(engine) _MMIO((engine)->mmio_base+0x29c) -#define GFX_RUN_LIST_ENABLE (1<<15) -#define GFX_INTERRUPT_STEERING (1<<14) -#define GFX_TLB_INVALIDATE_EXPLICIT (1<<13) -#define GFX_SURFACE_FAULT_ENABLE (1<<12) -#define GFX_REPLAY_MODE (1<<11) -#define GFX_PSMI_GRANULARITY (1<<10) -#define GFX_PPGTT_ENABLE (1<<9) -#define GEN8_GFX_PPGTT_48B (1<<7) - -#define GFX_FORWARD_VBLANK_MASK (3<<5) -#define GFX_FORWARD_VBLANK_NEVER (0<<5) -#define GFX_FORWARD_VBLANK_ALWAYS (1<<5) -#define GFX_FORWARD_VBLANK_COND (2<<5) - -#define GEN11_GFX_DISABLE_LEGACY_MODE (1<<3) +#define RING_MODE_GEN7(engine) _MMIO((engine)->mmio_base + 0x29c) +#define GFX_RUN_LIST_ENABLE (1 << 15) +#define GFX_INTERRUPT_STEERING (1 << 14) +#define GFX_TLB_INVALIDATE_EXPLICIT (1 << 13) +#define GFX_SURFACE_FAULT_ENABLE (1 << 12) +#define GFX_REPLAY_MODE (1 << 11) +#define GFX_PSMI_GRANULARITY (1 << 10) +#define GFX_PPGTT_ENABLE (1 << 9) +#define GEN8_GFX_PPGTT_48B (1 << 7) + +#define GFX_FORWARD_VBLANK_MASK (3 << 5) +#define GFX_FORWARD_VBLANK_NEVER (0 << 5) +#define GFX_FORWARD_VBLANK_ALWAYS (1 << 5) +#define GFX_FORWARD_VBLANK_COND (2 << 5) + +#define GEN11_GFX_DISABLE_LEGACY_MODE (1 << 3) #define VLV_DISPLAY_BASE 0x180000 #define VLV_MIPI_BASE VLV_DISPLAY_BASE @@ -2507,8 +2619,8 @@ enum i915_power_well_id { #define IMR _MMIO(0x20a8) #define ISR _MMIO(0x20ac) #define VLV_GUNIT_CLOCK_GATE _MMIO(VLV_DISPLAY_BASE + 0x2060) -#define GINT_DIS (1<<22) -#define GCFG_DIS (1<<8) +#define GINT_DIS (1 << 22) +#define GCFG_DIS (1 << 8) #define VLV_GUNIT_CLOCK_GATE2 _MMIO(VLV_DISPLAY_BASE + 0x2064) #define VLV_IIR_RW _MMIO(VLV_DISPLAY_BASE + 0x2084) #define VLV_IER _MMIO(VLV_DISPLAY_BASE + 0x20a0) @@ -2518,35 +2630,35 @@ enum i915_power_well_id { #define VLV_PCBR _MMIO(VLV_DISPLAY_BASE + 0x2120) #define VLV_PCBR_ADDR_SHIFT 12 -#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */ +#define DISPLAY_PLANE_FLIP_PENDING(plane) (1 << (11 - (plane))) /* A and B only */ #define EIR _MMIO(0x20b0) #define EMR _MMIO(0x20b4) #define ESR _MMIO(0x20b8) -#define GM45_ERROR_PAGE_TABLE (1<<5) -#define GM45_ERROR_MEM_PRIV (1<<4) -#define I915_ERROR_PAGE_TABLE (1<<4) -#define GM45_ERROR_CP_PRIV (1<<3) -#define I915_ERROR_MEMORY_REFRESH (1<<1) -#define I915_ERROR_INSTRUCTION (1<<0) +#define GM45_ERROR_PAGE_TABLE (1 << 5) +#define GM45_ERROR_MEM_PRIV (1 << 4) +#define I915_ERROR_PAGE_TABLE (1 << 4) +#define GM45_ERROR_CP_PRIV (1 << 3) +#define I915_ERROR_MEMORY_REFRESH (1 << 1) +#define I915_ERROR_INSTRUCTION (1 << 0) #define INSTPM _MMIO(0x20c0) -#define INSTPM_SELF_EN (1<<12) /* 915GM only */ -#define INSTPM_AGPBUSY_INT_EN (1<<11) /* gen3: when disabled, pending interrupts +#define INSTPM_SELF_EN (1 << 12) /* 915GM only */ +#define INSTPM_AGPBUSY_INT_EN (1 << 11) /* gen3: when disabled, pending interrupts will not assert AGPBUSY# and will only be delivered when out of C3. */ -#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ -#define INSTPM_TLB_INVALIDATE (1<<9) -#define INSTPM_SYNC_FLUSH (1<<5) +#define INSTPM_FORCE_ORDERING (1 << 7) /* GEN6+ */ +#define INSTPM_TLB_INVALIDATE (1 << 9) +#define INSTPM_SYNC_FLUSH (1 << 5) #define ACTHD _MMIO(0x20c8) #define MEM_MODE _MMIO(0x20cc) -#define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1<<3) /* 830 only */ -#define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1<<2) /* 830/845 only */ -#define MEM_DISPLAY_TRICKLE_FEED_DISABLE (1<<2) /* 85x only */ +#define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1 << 3) /* 830 only */ +#define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1 << 2) /* 830/845 only */ +#define MEM_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2) /* 85x only */ #define FW_BLC _MMIO(0x20d8) #define FW_BLC2 _MMIO(0x20dc) #define FW_BLC_SELF _MMIO(0x20e0) /* 915+ only */ -#define FW_BLC_SELF_EN_MASK (1<<31) -#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */ -#define FW_BLC_SELF_EN (1<<15) /* 945 only */ +#define FW_BLC_SELF_EN_MASK (1 << 31) +#define FW_BLC_SELF_FIFO_MASK (1 << 16) /* 945 only */ +#define FW_BLC_SELF_EN (1 << 15) /* 945 only */ #define MM_BURST_LENGTH 0x00700000 #define MM_FIFO_WATERMARK 0x0001F000 #define LM_BURST_LENGTH 0x00000700 @@ -2645,37 +2757,37 @@ enum i915_power_well_id { #define MI_AGPBUSY_830_MODE (1 << 0) /* 85x only */ #define CACHE_MODE_0 _MMIO(0x2120) /* 915+ only */ -#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8) -#define CM0_IZ_OPT_DISABLE (1<<6) -#define CM0_ZR_OPT_DISABLE (1<<5) -#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5) -#define CM0_DEPTH_EVICT_DISABLE (1<<4) -#define CM0_COLOR_EVICT_DISABLE (1<<3) -#define CM0_DEPTH_WRITE_DISABLE (1<<1) -#define CM0_RC_OP_FLUSH_DISABLE (1<<0) +#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1 << 8) +#define CM0_IZ_OPT_DISABLE (1 << 6) +#define CM0_ZR_OPT_DISABLE (1 << 5) +#define CM0_STC_EVICT_DISABLE_LRA_SNB (1 << 5) +#define CM0_DEPTH_EVICT_DISABLE (1 << 4) +#define CM0_COLOR_EVICT_DISABLE (1 << 3) +#define CM0_DEPTH_WRITE_DISABLE (1 << 1) +#define CM0_RC_OP_FLUSH_DISABLE (1 << 0) #define GFX_FLSH_CNTL _MMIO(0x2170) /* 915+ only */ #define GFX_FLSH_CNTL_GEN6 _MMIO(0x101008) -#define GFX_FLSH_CNTL_EN (1<<0) +#define GFX_FLSH_CNTL_EN (1 << 0) #define ECOSKPD _MMIO(0x21d0) -#define ECO_GATING_CX_ONLY (1<<3) -#define ECO_FLIP_DONE (1<<0) +#define ECO_GATING_CX_ONLY (1 << 3) +#define ECO_FLIP_DONE (1 << 0) #define CACHE_MODE_0_GEN7 _MMIO(0x7000) /* IVB+ */ -#define RC_OP_FLUSH_ENABLE (1<<0) -#define HIZ_RAW_STALL_OPT_DISABLE (1<<2) +#define RC_OP_FLUSH_ENABLE (1 << 0) +#define HIZ_RAW_STALL_OPT_DISABLE (1 << 2) #define CACHE_MODE_1 _MMIO(0x7004) /* IVB+ */ -#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6) -#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6) -#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1<<1) +#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1 << 6) +#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1 << 6) +#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1 << 1) #define GEN6_BLITTER_ECOSKPD _MMIO(0x221d0) #define GEN6_BLITTER_LOCK_SHIFT 16 -#define GEN6_BLITTER_FBC_NOTIFY (1<<3) +#define GEN6_BLITTER_FBC_NOTIFY (1 << 3) #define GEN6_RC_SLEEP_PSMI_CONTROL _MMIO(0x2050) #define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0) #define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12) -#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10) +#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1 << 10) #define GEN6_RCS_PWR_FSM _MMIO(0x22ac) #define GEN9_RCS_FE_FSM2 _MMIO(0x22a4) @@ -2714,6 +2826,10 @@ enum i915_power_well_id { #define GEN10_F2_SS_DIS_SHIFT 18 #define GEN10_F2_SS_DIS_MASK (0xf << GEN10_F2_SS_DIS_SHIFT) +#define GEN10_MIRROR_FUSE3 _MMIO(0x9118) +#define GEN10_L3BANK_PAIR_COUNT 4 +#define GEN10_L3BANK_MASK 0x0F + #define GEN8_EU_DISABLE0 _MMIO(0x9134) #define GEN8_EU_DIS0_S0_MASK 0xffffff #define GEN8_EU_DIS0_S1_SHIFT 24 @@ -2727,7 +2843,7 @@ enum i915_power_well_id { #define GEN8_EU_DISABLE2 _MMIO(0x913c) #define GEN8_EU_DIS2_S2_MASK 0xff -#define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice)*0x4) +#define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice) * 0x4) #define GEN10_EU_DISABLE3 _MMIO(0x9140) #define GEN10_EU_DIS_SS_MASK 0xff @@ -2784,44 +2900,43 @@ enum i915_power_well_id { (IS_HASWELL(dev_priv) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0)) /* These are all the "old" interrupts */ -#define ILK_BSD_USER_INTERRUPT (1<<5) - -#define I915_PM_INTERRUPT (1<<31) -#define I915_ISP_INTERRUPT (1<<22) -#define I915_LPE_PIPE_B_INTERRUPT (1<<21) -#define I915_LPE_PIPE_A_INTERRUPT (1<<20) -#define I915_MIPIC_INTERRUPT (1<<19) -#define I915_MIPIA_INTERRUPT (1<<18) -#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) -#define I915_DISPLAY_PORT_INTERRUPT (1<<17) -#define I915_DISPLAY_PIPE_C_HBLANK_INTERRUPT (1<<16) -#define I915_MASTER_ERROR_INTERRUPT (1<<15) -#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) -#define I915_DISPLAY_PIPE_B_HBLANK_INTERRUPT (1<<14) -#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */ -#define I915_DISPLAY_PIPE_A_HBLANK_INTERRUPT (1<<13) -#define I915_HWB_OOM_INTERRUPT (1<<13) -#define I915_LPE_PIPE_C_INTERRUPT (1<<12) -#define I915_SYNC_STATUS_INTERRUPT (1<<12) -#define I915_MISC_INTERRUPT (1<<11) -#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11) -#define I915_DISPLAY_PIPE_C_VBLANK_INTERRUPT (1<<10) -#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10) -#define I915_DISPLAY_PIPE_C_EVENT_INTERRUPT (1<<9) -#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9) -#define I915_DISPLAY_PIPE_C_DPBM_INTERRUPT (1<<8) -#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8) -#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7) -#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6) -#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5) -#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4) -#define I915_DISPLAY_PIPE_A_DPBM_INTERRUPT (1<<3) -#define I915_DISPLAY_PIPE_B_DPBM_INTERRUPT (1<<2) -#define I915_DEBUG_INTERRUPT (1<<2) -#define I915_WINVALID_INTERRUPT (1<<1) -#define I915_USER_INTERRUPT (1<<1) -#define I915_ASLE_INTERRUPT (1<<0) -#define I915_BSD_USER_INTERRUPT (1<<25) +#define ILK_BSD_USER_INTERRUPT (1 << 5) + +#define I915_PM_INTERRUPT (1 << 31) +#define I915_ISP_INTERRUPT (1 << 22) +#define I915_LPE_PIPE_B_INTERRUPT (1 << 21) +#define I915_LPE_PIPE_A_INTERRUPT (1 << 20) +#define I915_MIPIC_INTERRUPT (1 << 19) +#define I915_MIPIA_INTERRUPT (1 << 18) +#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 18) +#define I915_DISPLAY_PORT_INTERRUPT (1 << 17) +#define I915_DISPLAY_PIPE_C_HBLANK_INTERRUPT (1 << 16) +#define I915_MASTER_ERROR_INTERRUPT (1 << 15) +#define I915_DISPLAY_PIPE_B_HBLANK_INTERRUPT (1 << 14) +#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1 << 14) /* p-state */ +#define I915_DISPLAY_PIPE_A_HBLANK_INTERRUPT (1 << 13) +#define I915_HWB_OOM_INTERRUPT (1 << 13) +#define I915_LPE_PIPE_C_INTERRUPT (1 << 12) +#define I915_SYNC_STATUS_INTERRUPT (1 << 12) +#define I915_MISC_INTERRUPT (1 << 11) +#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1 << 11) +#define I915_DISPLAY_PIPE_C_VBLANK_INTERRUPT (1 << 10) +#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1 << 10) +#define I915_DISPLAY_PIPE_C_EVENT_INTERRUPT (1 << 9) +#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1 << 9) +#define I915_DISPLAY_PIPE_C_DPBM_INTERRUPT (1 << 8) +#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1 << 8) +#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1 << 7) +#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1 << 6) +#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1 << 5) +#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1 << 4) +#define I915_DISPLAY_PIPE_A_DPBM_INTERRUPT (1 << 3) +#define I915_DISPLAY_PIPE_B_DPBM_INTERRUPT (1 << 2) +#define I915_DEBUG_INTERRUPT (1 << 2) +#define I915_WINVALID_INTERRUPT (1 << 1) +#define I915_USER_INTERRUPT (1 << 1) +#define I915_ASLE_INTERRUPT (1 << 0) +#define I915_BSD_USER_INTERRUPT (1 << 25) #define I915_HDMI_LPE_AUDIO_BASE (VLV_DISPLAY_BASE + 0x65000) #define I915_HDMI_LPE_AUDIO_SIZE 0x1000 @@ -2844,19 +2959,19 @@ enum i915_power_well_id { #define GEN7_FF_THREAD_MODE _MMIO(0x20a0) #define GEN7_FF_SCHED_MASK 0x0077070 #define GEN8_FF_DS_REF_CNT_FFME (1 << 19) -#define GEN7_FF_TS_SCHED_HS1 (0x5<<16) -#define GEN7_FF_TS_SCHED_HS0 (0x3<<16) -#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16) -#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */ +#define GEN7_FF_TS_SCHED_HS1 (0x5 << 16) +#define GEN7_FF_TS_SCHED_HS0 (0x3 << 16) +#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1 << 16) +#define GEN7_FF_TS_SCHED_HW (0x0 << 16) /* Default */ #define GEN7_FF_VS_REF_CNT_FFME (1 << 15) -#define GEN7_FF_VS_SCHED_HS1 (0x5<<12) -#define GEN7_FF_VS_SCHED_HS0 (0x3<<12) -#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */ -#define GEN7_FF_VS_SCHED_HW (0x0<<12) -#define GEN7_FF_DS_SCHED_HS1 (0x5<<4) -#define GEN7_FF_DS_SCHED_HS0 (0x3<<4) -#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1<<4) /* Default */ -#define GEN7_FF_DS_SCHED_HW (0x0<<4) +#define GEN7_FF_VS_SCHED_HS1 (0x5 << 12) +#define GEN7_FF_VS_SCHED_HS0 (0x3 << 12) +#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1 << 12) /* Default */ +#define GEN7_FF_VS_SCHED_HW (0x0 << 12) +#define GEN7_FF_DS_SCHED_HS1 (0x5 << 4) +#define GEN7_FF_DS_SCHED_HS0 (0x3 << 4) +#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1 << 4) /* Default */ +#define GEN7_FF_DS_SCHED_HW (0x0 << 4) /* * Framebuffer compression (915+ only) @@ -2865,51 +2980,51 @@ enum i915_power_well_id { #define FBC_CFB_BASE _MMIO(0x3200) /* 4k page aligned */ #define FBC_LL_BASE _MMIO(0x3204) /* 4k page aligned */ #define FBC_CONTROL _MMIO(0x3208) -#define FBC_CTL_EN (1<<31) -#define FBC_CTL_PERIODIC (1<<30) +#define FBC_CTL_EN (1 << 31) +#define FBC_CTL_PERIODIC (1 << 30) #define FBC_CTL_INTERVAL_SHIFT (16) -#define FBC_CTL_UNCOMPRESSIBLE (1<<14) -#define FBC_CTL_C3_IDLE (1<<13) +#define FBC_CTL_UNCOMPRESSIBLE (1 << 14) +#define FBC_CTL_C3_IDLE (1 << 13) #define FBC_CTL_STRIDE_SHIFT (5) #define FBC_CTL_FENCENO_SHIFT (0) #define FBC_COMMAND _MMIO(0x320c) -#define FBC_CMD_COMPRESS (1<<0) +#define FBC_CMD_COMPRESS (1 << 0) #define FBC_STATUS _MMIO(0x3210) -#define FBC_STAT_COMPRESSING (1<<31) -#define FBC_STAT_COMPRESSED (1<<30) -#define FBC_STAT_MODIFIED (1<<29) +#define FBC_STAT_COMPRESSING (1 << 31) +#define FBC_STAT_COMPRESSED (1 << 30) +#define FBC_STAT_MODIFIED (1 << 29) #define FBC_STAT_CURRENT_LINE_SHIFT (0) #define FBC_CONTROL2 _MMIO(0x3214) -#define FBC_CTL_FENCE_DBL (0<<4) -#define FBC_CTL_IDLE_IMM (0<<2) -#define FBC_CTL_IDLE_FULL (1<<2) -#define FBC_CTL_IDLE_LINE (2<<2) -#define FBC_CTL_IDLE_DEBUG (3<<2) -#define FBC_CTL_CPU_FENCE (1<<1) -#define FBC_CTL_PLANE(plane) ((plane)<<0) +#define FBC_CTL_FENCE_DBL (0 << 4) +#define FBC_CTL_IDLE_IMM (0 << 2) +#define FBC_CTL_IDLE_FULL (1 << 2) +#define FBC_CTL_IDLE_LINE (2 << 2) +#define FBC_CTL_IDLE_DEBUG (3 << 2) +#define FBC_CTL_CPU_FENCE (1 << 1) +#define FBC_CTL_PLANE(plane) ((plane) << 0) #define FBC_FENCE_OFF _MMIO(0x3218) /* BSpec typo has 321Bh */ #define FBC_TAG(i) _MMIO(0x3300 + (i) * 4) #define FBC_LL_SIZE (1536) #define FBC_LLC_READ_CTRL _MMIO(0x9044) -#define FBC_LLC_FULLY_OPEN (1<<30) +#define FBC_LLC_FULLY_OPEN (1 << 30) /* Framebuffer compression for GM45+ */ #define DPFC_CB_BASE _MMIO(0x3200) #define DPFC_CONTROL _MMIO(0x3208) -#define DPFC_CTL_EN (1<<31) -#define DPFC_CTL_PLANE(plane) ((plane)<<30) -#define IVB_DPFC_CTL_PLANE(plane) ((plane)<<29) -#define DPFC_CTL_FENCE_EN (1<<29) -#define IVB_DPFC_CTL_FENCE_EN (1<<28) -#define DPFC_CTL_PERSISTENT_MODE (1<<25) -#define DPFC_SR_EN (1<<10) -#define DPFC_CTL_LIMIT_1X (0<<6) -#define DPFC_CTL_LIMIT_2X (1<<6) -#define DPFC_CTL_LIMIT_4X (2<<6) +#define DPFC_CTL_EN (1 << 31) +#define DPFC_CTL_PLANE(plane) ((plane) << 30) +#define IVB_DPFC_CTL_PLANE(plane) ((plane) << 29) +#define DPFC_CTL_FENCE_EN (1 << 29) +#define IVB_DPFC_CTL_FENCE_EN (1 << 28) +#define DPFC_CTL_PERSISTENT_MODE (1 << 25) +#define DPFC_SR_EN (1 << 10) +#define DPFC_CTL_LIMIT_1X (0 << 6) +#define DPFC_CTL_LIMIT_2X (1 << 6) +#define DPFC_CTL_LIMIT_4X (2 << 6) #define DPFC_RECOMP_CTL _MMIO(0x320c) -#define DPFC_RECOMP_STALL_EN (1<<27) +#define DPFC_RECOMP_STALL_EN (1 << 27) #define DPFC_RECOMP_STALL_WM_SHIFT (16) #define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000) #define DPFC_RECOMP_TIMER_COUNT_SHIFT (0) @@ -2922,12 +3037,12 @@ enum i915_power_well_id { #define DPFC_STATUS2 _MMIO(0x3214) #define DPFC_FENCE_YOFF _MMIO(0x3218) #define DPFC_CHICKEN _MMIO(0x3224) -#define DPFC_HT_MODIFY (1<<31) +#define DPFC_HT_MODIFY (1 << 31) /* Framebuffer compression for Ironlake */ #define ILK_DPFC_CB_BASE _MMIO(0x43200) #define ILK_DPFC_CONTROL _MMIO(0x43208) -#define FBC_CTL_FALSE_COLOR (1<<10) +#define FBC_CTL_FALSE_COLOR (1 << 10) /* The bit 28-8 is reserved */ #define DPFC_RESERVED (0x1FFFFF00) #define ILK_DPFC_RECOMP_CTL _MMIO(0x4320c) @@ -2938,15 +3053,15 @@ enum i915_power_well_id { #define BDW_FBC_COMP_SEG_MASK 0xfff #define ILK_DPFC_FENCE_YOFF _MMIO(0x43218) #define ILK_DPFC_CHICKEN _MMIO(0x43224) -#define ILK_DPFC_DISABLE_DUMMY0 (1<<8) -#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1<<23) +#define ILK_DPFC_DISABLE_DUMMY0 (1 << 8) +#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1 << 23) #define ILK_FBC_RT_BASE _MMIO(0x2128) -#define ILK_FBC_RT_VALID (1<<0) -#define SNB_FBC_FRONT_BUFFER (1<<1) +#define ILK_FBC_RT_VALID (1 << 0) +#define SNB_FBC_FRONT_BUFFER (1 << 1) #define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000) -#define ILK_FBCQ_DIS (1<<22) -#define ILK_PABSTRETCH_DIS (1<<21) +#define ILK_FBCQ_DIS (1 << 22) +#define ILK_PABSTRETCH_DIS (1 << 21) /* @@ -2955,7 +3070,7 @@ enum i915_power_well_id { * The following two registers are of type GTTMMADR */ #define SNB_DPFC_CTL_SA _MMIO(0x100100) -#define SNB_CPU_FENCE_ENABLE (1<<29) +#define SNB_CPU_FENCE_ENABLE (1 << 29) #define DPFC_CPU_FENCE_OFFSET _MMIO(0x100104) /* Framebuffer compression for Ivybridge */ @@ -2965,8 +3080,8 @@ enum i915_power_well_id { #define IPS_ENABLE (1 << 31) #define MSG_FBC_REND_STATE _MMIO(0x50380) -#define FBC_REND_NUKE (1<<2) -#define FBC_REND_CACHE_CLEAN (1<<1) +#define FBC_REND_NUKE (1 << 2) +#define FBC_REND_CACHE_CLEAN (1 << 1) /* * GPIO regs @@ -2979,6 +3094,10 @@ enum i915_power_well_id { #define GPIOF _MMIO(0x5024) #define GPIOG _MMIO(0x5028) #define GPIOH _MMIO(0x502c) +#define GPIOJ _MMIO(0x5034) +#define GPIOK _MMIO(0x5038) +#define GPIOL _MMIO(0x503C) +#define GPIOM _MMIO(0x5040) # define GPIO_CLOCK_DIR_MASK (1 << 0) # define GPIO_CLOCK_DIR_IN (0 << 1) # define GPIO_CLOCK_DIR_OUT (1 << 1) @@ -2995,12 +3114,13 @@ enum i915_power_well_id { # define GPIO_DATA_PULLUP_DISABLE (1 << 13) #define GMBUS0 _MMIO(dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */ -#define GMBUS_AKSV_SELECT (1<<11) -#define GMBUS_RATE_100KHZ (0<<8) -#define GMBUS_RATE_50KHZ (1<<8) -#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */ -#define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */ -#define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */ +#define GMBUS_AKSV_SELECT (1 << 11) +#define GMBUS_RATE_100KHZ (0 << 8) +#define GMBUS_RATE_50KHZ (1 << 8) +#define GMBUS_RATE_400KHZ (2 << 8) /* reserved on Pineview */ +#define GMBUS_RATE_1MHZ (3 << 8) /* reserved on Pineview */ +#define GMBUS_HOLD_EXT (1 << 7) /* 300ns hold time, rsvd on Pineview */ +#define GMBUS_BYTE_CNT_OVERRIDE (1 << 6) #define GMBUS_PIN_DISABLED 0 #define GMBUS_PIN_SSC 1 #define GMBUS_PIN_VGADDC 2 @@ -3021,36 +3141,37 @@ enum i915_power_well_id { #define GMBUS_NUM_PINS 13 /* including 0 */ #define GMBUS1 _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */ -#define GMBUS_SW_CLR_INT (1<<31) -#define GMBUS_SW_RDY (1<<30) -#define GMBUS_ENT (1<<29) /* enable timeout */ -#define GMBUS_CYCLE_NONE (0<<25) -#define GMBUS_CYCLE_WAIT (1<<25) -#define GMBUS_CYCLE_INDEX (2<<25) -#define GMBUS_CYCLE_STOP (4<<25) +#define GMBUS_SW_CLR_INT (1 << 31) +#define GMBUS_SW_RDY (1 << 30) +#define GMBUS_ENT (1 << 29) /* enable timeout */ +#define GMBUS_CYCLE_NONE (0 << 25) +#define GMBUS_CYCLE_WAIT (1 << 25) +#define GMBUS_CYCLE_INDEX (2 << 25) +#define GMBUS_CYCLE_STOP (4 << 25) #define GMBUS_BYTE_COUNT_SHIFT 16 #define GMBUS_BYTE_COUNT_MAX 256U +#define GEN9_GMBUS_BYTE_COUNT_MAX 511U #define GMBUS_SLAVE_INDEX_SHIFT 8 #define GMBUS_SLAVE_ADDR_SHIFT 1 -#define GMBUS_SLAVE_READ (1<<0) -#define GMBUS_SLAVE_WRITE (0<<0) +#define GMBUS_SLAVE_READ (1 << 0) +#define GMBUS_SLAVE_WRITE (0 << 0) #define GMBUS2 _MMIO(dev_priv->gpio_mmio_base + 0x5108) /* status */ -#define GMBUS_INUSE (1<<15) -#define GMBUS_HW_WAIT_PHASE (1<<14) -#define GMBUS_STALL_TIMEOUT (1<<13) -#define GMBUS_INT (1<<12) -#define GMBUS_HW_RDY (1<<11) -#define GMBUS_SATOER (1<<10) -#define GMBUS_ACTIVE (1<<9) +#define GMBUS_INUSE (1 << 15) +#define GMBUS_HW_WAIT_PHASE (1 << 14) +#define GMBUS_STALL_TIMEOUT (1 << 13) +#define GMBUS_INT (1 << 12) +#define GMBUS_HW_RDY (1 << 11) +#define GMBUS_SATOER (1 << 10) +#define GMBUS_ACTIVE (1 << 9) #define GMBUS3 _MMIO(dev_priv->gpio_mmio_base + 0x510c) /* data buffer bytes 3-0 */ #define GMBUS4 _MMIO(dev_priv->gpio_mmio_base + 0x5110) /* interrupt mask (Pineview+) */ -#define GMBUS_SLAVE_TIMEOUT_EN (1<<4) -#define GMBUS_NAK_EN (1<<3) -#define GMBUS_IDLE_EN (1<<2) -#define GMBUS_HW_WAIT_EN (1<<1) -#define GMBUS_HW_RDY_EN (1<<0) +#define GMBUS_SLAVE_TIMEOUT_EN (1 << 4) +#define GMBUS_NAK_EN (1 << 3) +#define GMBUS_IDLE_EN (1 << 2) +#define GMBUS_HW_WAIT_EN (1 << 1) +#define GMBUS_HW_RDY_EN (1 << 0) #define GMBUS5 _MMIO(dev_priv->gpio_mmio_base + 0x5120) /* byte index */ -#define GMBUS_2BYTE_INDEX_EN (1<<31) +#define GMBUS_2BYTE_INDEX_EN (1 << 31) /* * Clock control & power management @@ -3088,10 +3209,10 @@ enum i915_power_well_id { #define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ #define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ #define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */ -#define DPLL_LOCK_VLV (1<<15) -#define DPLL_INTEGRATED_CRI_CLK_VLV (1<<14) -#define DPLL_INTEGRATED_REF_CLK_VLV (1<<13) -#define DPLL_SSC_REF_CLK_CHV (1<<13) +#define DPLL_LOCK_VLV (1 << 15) +#define DPLL_INTEGRATED_CRI_CLK_VLV (1 << 14) +#define DPLL_INTEGRATED_REF_CLK_VLV (1 << 13) +#define DPLL_SSC_REF_CLK_CHV (1 << 13) #define DPLL_PORTC_READY_MASK (0xf << 4) #define DPLL_PORTB_READY_MASK (0xf) @@ -3101,20 +3222,20 @@ enum i915_power_well_id { #define DPIO_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x6240) #define DPLL_PORTD_READY_MASK (0xf) #define DISPLAY_PHY_CONTROL _MMIO(VLV_DISPLAY_BASE + 0x60100) -#define PHY_CH_POWER_DOWN_OVRD_EN(phy, ch) (1 << (2*(phy)+(ch)+27)) +#define PHY_CH_POWER_DOWN_OVRD_EN(phy, ch) (1 << (2 * (phy) + (ch) + 27)) #define PHY_LDO_DELAY_0NS 0x0 #define PHY_LDO_DELAY_200NS 0x1 #define PHY_LDO_DELAY_600NS 0x2 -#define PHY_LDO_SEQ_DELAY(delay, phy) ((delay) << (2*(phy)+23)) -#define PHY_CH_POWER_DOWN_OVRD(mask, phy, ch) ((mask) << (8*(phy)+4*(ch)+11)) +#define PHY_LDO_SEQ_DELAY(delay, phy) ((delay) << (2 * (phy) + 23)) +#define PHY_CH_POWER_DOWN_OVRD(mask, phy, ch) ((mask) << (8 * (phy) + 4 * (ch) + 11)) #define PHY_CH_SU_PSR 0x1 #define PHY_CH_DEEP_PSR 0x7 -#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6*(phy)+3*(ch)+2)) +#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6 * (phy) + 3 * (ch) + 2)) #define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy)) #define DISPLAY_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x60104) -#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30)) -#define PHY_STATUS_CMN_LDO(phy, ch) (1 << (6-(6*(phy)+3*(ch)))) -#define PHY_STATUS_SPLINE_LDO(phy, ch, spline) (1 << (8-(6*(phy)+3*(ch)+(spline)))) +#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1 << 31) : (1 << 30)) +#define PHY_STATUS_CMN_LDO(phy, ch) (1 << (6 - (6 * (phy) + 3 * (ch)))) +#define PHY_STATUS_SPLINE_LDO(phy, ch, spline) (1 << (8 - (6 * (phy) + 3 * (ch) + (spline)))) /* * The i830 generation, in LVDS mode, defines P1 as the bit number set within @@ -3135,7 +3256,7 @@ enum i915_power_well_id { /* Ironlake */ # define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9 # define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9) -# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9) +# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x) - 1) << 9) # define DPLL_FPA1_P1_POST_DIV_SHIFT 0 # define DPLL_FPA1_P1_POST_DIV_MASK 0xff @@ -3224,10 +3345,10 @@ enum i915_power_well_id { #define DPLLA_TEST_M_BYPASS (1 << 2) #define DPLLA_INPUT_BUFFER_ENABLE (1 << 0) #define D_STATE _MMIO(0x6104) -#define DSTATE_GFX_RESET_I830 (1<<6) -#define DSTATE_PLL_D3_OFF (1<<3) -#define DSTATE_GFX_CLOCK_GATING (1<<1) -#define DSTATE_DOT_CLOCK_GATING (1<<0) +#define DSTATE_GFX_RESET_I830 (1 << 6) +#define DSTATE_PLL_D3_OFF (1 << 3) +#define DSTATE_GFX_CLOCK_GATING (1 << 1) +#define DSTATE_DOT_CLOCK_GATING (1 << 0) #define DSPCLK_GATE_D _MMIO(dev_priv->info.display_mmio_offset + 0x6200) # define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */ # define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */ @@ -3344,7 +3465,7 @@ enum i915_power_well_id { #define DEUC _MMIO(0x6214) /* CRL only */ #define FW_BLC_SELF_VLV _MMIO(VLV_DISPLAY_BASE + 0x6500) -#define FW_CSPWRDWNEN (1<<15) +#define FW_CSPWRDWNEN (1 << 15) #define MI_ARB_VLV _MMIO(VLV_DISPLAY_BASE + 0x6504) @@ -3469,7 +3590,7 @@ enum i915_power_well_id { #define HPLLVCO_MOBILE _MMIO(MCHBAR_MIRROR_BASE + 0xc0f) #define TSC1 _MMIO(0x11001) -#define TSE (1<<0) +#define TSE (1 << 0) #define TR1 _MMIO(0x11006) #define TSFS _MMIO(0x11020) #define TSFS_SLOPE_MASK 0x0000ff00 @@ -3515,23 +3636,23 @@ enum i915_power_well_id { #define MEMCTL_CMD_CHVID 3 #define MEMCTL_CMD_VMMOFF 4 #define MEMCTL_CMD_VMMON 5 -#define MEMCTL_CMD_STS (1<<12) /* write 1 triggers command, clears +#define MEMCTL_CMD_STS (1 << 12) /* write 1 triggers command, clears when command complete */ #define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */ #define MEMCTL_FREQ_SHIFT 8 -#define MEMCTL_SFCAVM (1<<7) +#define MEMCTL_SFCAVM (1 << 7) #define MEMCTL_TGT_VID_MASK 0x007f #define MEMIHYST _MMIO(0x1117c) #define MEMINTREN _MMIO(0x11180) /* 16 bits */ -#define MEMINT_RSEXIT_EN (1<<8) -#define MEMINT_CX_SUPR_EN (1<<7) -#define MEMINT_CONT_BUSY_EN (1<<6) -#define MEMINT_AVG_BUSY_EN (1<<5) -#define MEMINT_EVAL_CHG_EN (1<<4) -#define MEMINT_MON_IDLE_EN (1<<3) -#define MEMINT_UP_EVAL_EN (1<<2) -#define MEMINT_DOWN_EVAL_EN (1<<1) -#define MEMINT_SW_CMD_EN (1<<0) +#define MEMINT_RSEXIT_EN (1 << 8) +#define MEMINT_CX_SUPR_EN (1 << 7) +#define MEMINT_CONT_BUSY_EN (1 << 6) +#define MEMINT_AVG_BUSY_EN (1 << 5) +#define MEMINT_EVAL_CHG_EN (1 << 4) +#define MEMINT_MON_IDLE_EN (1 << 3) +#define MEMINT_UP_EVAL_EN (1 << 2) +#define MEMINT_DOWN_EVAL_EN (1 << 1) +#define MEMINT_SW_CMD_EN (1 << 0) #define MEMINTRSTR _MMIO(0x11182) /* 16 bits */ #define MEM_RSEXIT_MASK 0xc000 #define MEM_RSEXIT_SHIFT 14 @@ -3553,26 +3674,26 @@ enum i915_power_well_id { #define MEM_INT_STEER_SMI 2 #define MEM_INT_STEER_SCI 3 #define MEMINTRSTS _MMIO(0x11184) -#define MEMINT_RSEXIT (1<<7) -#define MEMINT_CONT_BUSY (1<<6) -#define MEMINT_AVG_BUSY (1<<5) -#define MEMINT_EVAL_CHG (1<<4) -#define MEMINT_MON_IDLE (1<<3) -#define MEMINT_UP_EVAL (1<<2) -#define MEMINT_DOWN_EVAL (1<<1) -#define MEMINT_SW_CMD (1<<0) +#define MEMINT_RSEXIT (1 << 7) +#define MEMINT_CONT_BUSY (1 << 6) +#define MEMINT_AVG_BUSY (1 << 5) +#define MEMINT_EVAL_CHG (1 << 4) +#define MEMINT_MON_IDLE (1 << 3) +#define MEMINT_UP_EVAL (1 << 2) +#define MEMINT_DOWN_EVAL (1 << 1) +#define MEMINT_SW_CMD (1 << 0) #define MEMMODECTL _MMIO(0x11190) -#define MEMMODE_BOOST_EN (1<<31) +#define MEMMODE_BOOST_EN (1 << 31) #define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */ #define MEMMODE_BOOST_FREQ_SHIFT 24 #define MEMMODE_IDLE_MODE_MASK 0x00030000 #define MEMMODE_IDLE_MODE_SHIFT 16 #define MEMMODE_IDLE_MODE_EVAL 0 #define MEMMODE_IDLE_MODE_CONT 1 -#define MEMMODE_HWIDLE_EN (1<<15) -#define MEMMODE_SWMODE_EN (1<<14) -#define MEMMODE_RCLK_GATE (1<<13) -#define MEMMODE_HW_UPDATE (1<<12) +#define MEMMODE_HWIDLE_EN (1 << 15) +#define MEMMODE_SWMODE_EN (1 << 14) +#define MEMMODE_RCLK_GATE (1 << 13) +#define MEMMODE_HW_UPDATE (1 << 12) #define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */ #define MEMMODE_FSTART_SHIFT 8 #define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */ @@ -3586,8 +3707,8 @@ enum i915_power_well_id { #define SWMEMCMD_TARVID (3 << 13) #define SWMEMCMD_VRM_OFF (4 << 13) #define SWMEMCMD_VRM_ON (5 << 13) -#define CMDSTS (1<<12) -#define SFCAVM (1<<11) +#define CMDSTS (1 << 12) +#define SFCAVM (1 << 11) #define SWFREQ_MASK 0x0380 /* P0-7 */ #define SWFREQ_SHIFT 7 #define TARVID_MASK 0x001f @@ -3596,49 +3717,49 @@ enum i915_power_well_id { #define RCUPEI _MMIO(0x111b0) #define RCDNEI _MMIO(0x111b4) #define RSTDBYCTL _MMIO(0x111b8) -#define RS1EN (1<<31) -#define RS2EN (1<<30) -#define RS3EN (1<<29) -#define D3RS3EN (1<<28) /* Display D3 imlies RS3 */ -#define SWPROMORSX (1<<27) /* RSx promotion timers ignored */ -#define RCWAKERW (1<<26) /* Resetwarn from PCH causes wakeup */ -#define DPRSLPVREN (1<<25) /* Fast voltage ramp enable */ -#define GFXTGHYST (1<<24) /* Hysteresis to allow trunk gating */ -#define RCX_SW_EXIT (1<<23) /* Leave RSx and prevent re-entry */ -#define RSX_STATUS_MASK (7<<20) -#define RSX_STATUS_ON (0<<20) -#define RSX_STATUS_RC1 (1<<20) -#define RSX_STATUS_RC1E (2<<20) -#define RSX_STATUS_RS1 (3<<20) -#define RSX_STATUS_RS2 (4<<20) /* aka rc6 */ -#define RSX_STATUS_RSVD (5<<20) /* deep rc6 unsupported on ilk */ -#define RSX_STATUS_RS3 (6<<20) /* rs3 unsupported on ilk */ -#define RSX_STATUS_RSVD2 (7<<20) -#define UWRCRSXE (1<<19) /* wake counter limit prevents rsx */ -#define RSCRP (1<<18) /* rs requests control on rs1/2 reqs */ -#define JRSC (1<<17) /* rsx coupled to cpu c-state */ -#define RS2INC0 (1<<16) /* allow rs2 in cpu c0 */ -#define RS1CONTSAV_MASK (3<<14) -#define RS1CONTSAV_NO_RS1 (0<<14) /* rs1 doesn't save/restore context */ -#define RS1CONTSAV_RSVD (1<<14) -#define RS1CONTSAV_SAVE_RS1 (2<<14) /* rs1 saves context */ -#define RS1CONTSAV_FULL_RS1 (3<<14) /* rs1 saves and restores context */ -#define NORMSLEXLAT_MASK (3<<12) -#define SLOW_RS123 (0<<12) -#define SLOW_RS23 (1<<12) -#define SLOW_RS3 (2<<12) -#define NORMAL_RS123 (3<<12) -#define RCMODE_TIMEOUT (1<<11) /* 0 is eval interval method */ -#define IMPROMOEN (1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */ -#define RCENTSYNC (1<<9) /* rs coupled to cpu c-state (3/6/7) */ -#define STATELOCK (1<<7) /* locked to rs_cstate if 0 */ -#define RS_CSTATE_MASK (3<<4) -#define RS_CSTATE_C367_RS1 (0<<4) -#define RS_CSTATE_C36_RS1_C7_RS2 (1<<4) -#define RS_CSTATE_RSVD (2<<4) -#define RS_CSTATE_C367_RS2 (3<<4) -#define REDSAVES (1<<3) /* no context save if was idle during rs0 */ -#define REDRESTORES (1<<2) /* no restore if was idle during rs0 */ +#define RS1EN (1 << 31) +#define RS2EN (1 << 30) +#define RS3EN (1 << 29) +#define D3RS3EN (1 << 28) /* Display D3 imlies RS3 */ +#define SWPROMORSX (1 << 27) /* RSx promotion timers ignored */ +#define RCWAKERW (1 << 26) /* Resetwarn from PCH causes wakeup */ +#define DPRSLPVREN (1 << 25) /* Fast voltage ramp enable */ +#define GFXTGHYST (1 << 24) /* Hysteresis to allow trunk gating */ +#define RCX_SW_EXIT (1 << 23) /* Leave RSx and prevent re-entry */ +#define RSX_STATUS_MASK (7 << 20) +#define RSX_STATUS_ON (0 << 20) +#define RSX_STATUS_RC1 (1 << 20) +#define RSX_STATUS_RC1E (2 << 20) +#define RSX_STATUS_RS1 (3 << 20) +#define RSX_STATUS_RS2 (4 << 20) /* aka rc6 */ +#define RSX_STATUS_RSVD (5 << 20) /* deep rc6 unsupported on ilk */ +#define RSX_STATUS_RS3 (6 << 20) /* rs3 unsupported on ilk */ +#define RSX_STATUS_RSVD2 (7 << 20) +#define UWRCRSXE (1 << 19) /* wake counter limit prevents rsx */ +#define RSCRP (1 << 18) /* rs requests control on rs1/2 reqs */ +#define JRSC (1 << 17) /* rsx coupled to cpu c-state */ +#define RS2INC0 (1 << 16) /* allow rs2 in cpu c0 */ +#define RS1CONTSAV_MASK (3 << 14) +#define RS1CONTSAV_NO_RS1 (0 << 14) /* rs1 doesn't save/restore context */ +#define RS1CONTSAV_RSVD (1 << 14) +#define RS1CONTSAV_SAVE_RS1 (2 << 14) /* rs1 saves context */ +#define RS1CONTSAV_FULL_RS1 (3 << 14) /* rs1 saves and restores context */ +#define NORMSLEXLAT_MASK (3 << 12) +#define SLOW_RS123 (0 << 12) +#define SLOW_RS23 (1 << 12) +#define SLOW_RS3 (2 << 12) +#define NORMAL_RS123 (3 << 12) +#define RCMODE_TIMEOUT (1 << 11) /* 0 is eval interval method */ +#define IMPROMOEN (1 << 10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */ +#define RCENTSYNC (1 << 9) /* rs coupled to cpu c-state (3/6/7) */ +#define STATELOCK (1 << 7) /* locked to rs_cstate if 0 */ +#define RS_CSTATE_MASK (3 << 4) +#define RS_CSTATE_C367_RS1 (0 << 4) +#define RS_CSTATE_C36_RS1_C7_RS2 (1 << 4) +#define RS_CSTATE_RSVD (2 << 4) +#define RS_CSTATE_C367_RS2 (3 << 4) +#define REDSAVES (1 << 3) /* no context save if was idle during rs0 */ +#define REDRESTORES (1 << 2) /* no restore if was idle during rs0 */ #define VIDCTL _MMIO(0x111c0) #define VIDSTS _MMIO(0x111c8) #define VIDSTART _MMIO(0x111cc) /* 8 bits */ @@ -3647,7 +3768,7 @@ enum i915_power_well_id { #define MEMSTAT_VID_SHIFT 8 #define MEMSTAT_PSTATE_MASK 0x00f8 #define MEMSTAT_PSTATE_SHIFT 3 -#define MEMSTAT_MON_ACTV (1<<2) +#define MEMSTAT_MON_ACTV (1 << 2) #define MEMSTAT_SRC_CTL_MASK 0x0003 #define MEMSTAT_SRC_CTL_CORE 0 #define MEMSTAT_SRC_CTL_TRB 1 @@ -3656,7 +3777,7 @@ enum i915_power_well_id { #define RCPREVBSYTUPAVG _MMIO(0x113b8) #define RCPREVBSYTDNAVG _MMIO(0x113bc) #define PMMISC _MMIO(0x11214) -#define MCPPCE_EN (1<<0) /* enable PM_MSG from PCH->MPC */ +#define MCPPCE_EN (1 << 0) /* enable PM_MSG from PCH->MPC */ #define SDEW _MMIO(0x1124c) #define CSIEW0 _MMIO(0x11250) #define CSIEW1 _MMIO(0x11254) @@ -3673,8 +3794,8 @@ enum i915_power_well_id { #define RPPREVBSYTUPAVG _MMIO(0x113b8) #define RPPREVBSYTDNAVG _MMIO(0x113bc) #define ECR _MMIO(0x11600) -#define ECR_GPFE (1<<31) -#define ECR_IMONE (1<<30) +#define ECR_GPFE (1 << 31) +#define ECR_IMONE (1 << 30) #define ECR_CAP_MASK 0x0000001f /* Event range, 0-31 */ #define OGW0 _MMIO(0x11608) #define OGW1 _MMIO(0x1160c) @@ -3781,11 +3902,11 @@ enum { FAULT_AND_CONTINUE /* Unsupported */ }; -#define GEN8_CTX_VALID (1<<0) -#define GEN8_CTX_FORCE_PD_RESTORE (1<<1) -#define GEN8_CTX_FORCE_RESTORE (1<<2) -#define GEN8_CTX_L3LLC_COHERENT (1<<5) -#define GEN8_CTX_PRIVILEGE (1<<8) +#define GEN8_CTX_VALID (1 << 0) +#define GEN8_CTX_FORCE_PD_RESTORE (1 << 1) +#define GEN8_CTX_FORCE_RESTORE (1 << 2) +#define GEN8_CTX_L3LLC_COHERENT (1 << 5) +#define GEN8_CTX_PRIVILEGE (1 << 8) #define GEN8_CTX_ADDRESSING_MODE_SHIFT 3 #define GEN8_CTX_ID_SHIFT 32 @@ -3807,7 +3928,7 @@ enum { #define OVADD _MMIO(0x30000) #define DOVSTA _MMIO(0x30008) -#define OC_BUF (0x3<<20) +#define OC_BUF (0x3 << 20) #define OGAMC5 _MMIO(0x30010) #define OGAMC4 _MMIO(0x30014) #define OGAMC3 _MMIO(0x30018) @@ -3975,64 +4096,65 @@ enum { /* VLV eDP PSR registers */ #define _PSRCTLA (VLV_DISPLAY_BASE + 0x60090) #define _PSRCTLB (VLV_DISPLAY_BASE + 0x61090) -#define VLV_EDP_PSR_ENABLE (1<<0) -#define VLV_EDP_PSR_RESET (1<<1) -#define VLV_EDP_PSR_MODE_MASK (7<<2) -#define VLV_EDP_PSR_MODE_HW_TIMER (1<<3) -#define VLV_EDP_PSR_MODE_SW_TIMER (1<<2) -#define VLV_EDP_PSR_SINGLE_FRAME_UPDATE (1<<7) -#define VLV_EDP_PSR_ACTIVE_ENTRY (1<<8) -#define VLV_EDP_PSR_SRC_TRANSMITTER_STATE (1<<9) -#define VLV_EDP_PSR_DBL_FRAME (1<<10) -#define VLV_EDP_PSR_FRAME_COUNT_MASK (0xff<<16) +#define VLV_EDP_PSR_ENABLE (1 << 0) +#define VLV_EDP_PSR_RESET (1 << 1) +#define VLV_EDP_PSR_MODE_MASK (7 << 2) +#define VLV_EDP_PSR_MODE_HW_TIMER (1 << 3) +#define VLV_EDP_PSR_MODE_SW_TIMER (1 << 2) +#define VLV_EDP_PSR_SINGLE_FRAME_UPDATE (1 << 7) +#define VLV_EDP_PSR_ACTIVE_ENTRY (1 << 8) +#define VLV_EDP_PSR_SRC_TRANSMITTER_STATE (1 << 9) +#define VLV_EDP_PSR_DBL_FRAME (1 << 10) +#define VLV_EDP_PSR_FRAME_COUNT_MASK (0xff << 16) #define VLV_EDP_PSR_IDLE_FRAME_SHIFT 16 #define VLV_PSRCTL(pipe) _MMIO_PIPE(pipe, _PSRCTLA, _PSRCTLB) #define _VSCSDPA (VLV_DISPLAY_BASE + 0x600a0) #define _VSCSDPB (VLV_DISPLAY_BASE + 0x610a0) -#define VLV_EDP_PSR_SDP_FREQ_MASK (3<<30) -#define VLV_EDP_PSR_SDP_FREQ_ONCE (1<<31) -#define VLV_EDP_PSR_SDP_FREQ_EVFRAME (1<<30) +#define VLV_EDP_PSR_SDP_FREQ_MASK (3 << 30) +#define VLV_EDP_PSR_SDP_FREQ_ONCE (1 << 31) +#define VLV_EDP_PSR_SDP_FREQ_EVFRAME (1 << 30) #define VLV_VSCSDP(pipe) _MMIO_PIPE(pipe, _VSCSDPA, _VSCSDPB) #define _PSRSTATA (VLV_DISPLAY_BASE + 0x60094) #define _PSRSTATB (VLV_DISPLAY_BASE + 0x61094) -#define VLV_EDP_PSR_LAST_STATE_MASK (7<<3) +#define VLV_EDP_PSR_LAST_STATE_MASK (7 << 3) #define VLV_EDP_PSR_CURR_STATE_MASK 7 -#define VLV_EDP_PSR_DISABLED (0<<0) -#define VLV_EDP_PSR_INACTIVE (1<<0) -#define VLV_EDP_PSR_IN_TRANS_TO_ACTIVE (2<<0) -#define VLV_EDP_PSR_ACTIVE_NORFB_UP (3<<0) -#define VLV_EDP_PSR_ACTIVE_SF_UPDATE (4<<0) -#define VLV_EDP_PSR_EXIT (5<<0) -#define VLV_EDP_PSR_IN_TRANS (1<<7) +#define VLV_EDP_PSR_DISABLED (0 << 0) +#define VLV_EDP_PSR_INACTIVE (1 << 0) +#define VLV_EDP_PSR_IN_TRANS_TO_ACTIVE (2 << 0) +#define VLV_EDP_PSR_ACTIVE_NORFB_UP (3 << 0) +#define VLV_EDP_PSR_ACTIVE_SF_UPDATE (4 << 0) +#define VLV_EDP_PSR_EXIT (5 << 0) +#define VLV_EDP_PSR_IN_TRANS (1 << 7) #define VLV_PSRSTAT(pipe) _MMIO_PIPE(pipe, _PSRSTATA, _PSRSTATB) /* HSW+ eDP PSR registers */ #define HSW_EDP_PSR_BASE 0x64800 #define BDW_EDP_PSR_BASE 0x6f800 #define EDP_PSR_CTL _MMIO(dev_priv->psr_mmio_base + 0) -#define EDP_PSR_ENABLE (1<<31) -#define BDW_PSR_SINGLE_FRAME (1<<30) -#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1<<29) /* SW can't modify */ -#define EDP_PSR_LINK_STANDBY (1<<27) -#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25) -#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0<<25) -#define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES (1<<25) -#define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES (2<<25) -#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES (3<<25) +#define EDP_PSR_ENABLE (1 << 31) +#define BDW_PSR_SINGLE_FRAME (1 << 30) +#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1 << 29) /* SW can't modify */ +#define EDP_PSR_LINK_STANDBY (1 << 27) +#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3 << 25) +#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0 << 25) +#define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES (1 << 25) +#define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES (2 << 25) +#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES (3 << 25) #define EDP_PSR_MAX_SLEEP_TIME_SHIFT 20 -#define EDP_PSR_SKIP_AUX_EXIT (1<<12) -#define EDP_PSR_TP1_TP2_SEL (0<<11) -#define EDP_PSR_TP1_TP3_SEL (1<<11) -#define EDP_PSR_TP2_TP3_TIME_500us (0<<8) -#define EDP_PSR_TP2_TP3_TIME_100us (1<<8) -#define EDP_PSR_TP2_TP3_TIME_2500us (2<<8) -#define EDP_PSR_TP2_TP3_TIME_0us (3<<8) -#define EDP_PSR_TP1_TIME_500us (0<<4) -#define EDP_PSR_TP1_TIME_100us (1<<4) -#define EDP_PSR_TP1_TIME_2500us (2<<4) -#define EDP_PSR_TP1_TIME_0us (3<<4) +#define EDP_PSR_SKIP_AUX_EXIT (1 << 12) +#define EDP_PSR_TP1_TP2_SEL (0 << 11) +#define EDP_PSR_TP1_TP3_SEL (1 << 11) +#define EDP_PSR_CRC_ENABLE (1 << 10) /* BDW+ */ +#define EDP_PSR_TP2_TP3_TIME_500us (0 << 8) +#define EDP_PSR_TP2_TP3_TIME_100us (1 << 8) +#define EDP_PSR_TP2_TP3_TIME_2500us (2 << 8) +#define EDP_PSR_TP2_TP3_TIME_0us (3 << 8) +#define EDP_PSR_TP1_TIME_500us (0 << 4) +#define EDP_PSR_TP1_TIME_100us (1 << 4) +#define EDP_PSR_TP1_TIME_2500us (2 << 4) +#define EDP_PSR_TP1_TIME_0us (3 << 4) #define EDP_PSR_IDLE_FRAME_SHIFT 0 /* Bspec claims those aren't shifted but stay at 0x64800 */ @@ -4052,55 +4174,56 @@ enum { #define EDP_PSR_AUX_DATA(i) _MMIO(dev_priv->psr_mmio_base + 0x14 + (i) * 4) /* 5 registers */ #define EDP_PSR_STATUS _MMIO(dev_priv->psr_mmio_base + 0x40) -#define EDP_PSR_STATUS_STATE_MASK (7<<29) -#define EDP_PSR_STATUS_STATE_IDLE (0<<29) -#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29) -#define EDP_PSR_STATUS_STATE_SRDENT (2<<29) -#define EDP_PSR_STATUS_STATE_BUFOFF (3<<29) -#define EDP_PSR_STATUS_STATE_BUFON (4<<29) -#define EDP_PSR_STATUS_STATE_AUXACK (5<<29) -#define EDP_PSR_STATUS_STATE_SRDOFFACK (6<<29) -#define EDP_PSR_STATUS_LINK_MASK (3<<26) -#define EDP_PSR_STATUS_LINK_FULL_OFF (0<<26) -#define EDP_PSR_STATUS_LINK_FULL_ON (1<<26) -#define EDP_PSR_STATUS_LINK_STANDBY (2<<26) +#define EDP_PSR_STATUS_STATE_MASK (7 << 29) +#define EDP_PSR_STATUS_STATE_SHIFT 29 +#define EDP_PSR_STATUS_STATE_IDLE (0 << 29) +#define EDP_PSR_STATUS_STATE_SRDONACK (1 << 29) +#define EDP_PSR_STATUS_STATE_SRDENT (2 << 29) +#define EDP_PSR_STATUS_STATE_BUFOFF (3 << 29) +#define EDP_PSR_STATUS_STATE_BUFON (4 << 29) +#define EDP_PSR_STATUS_STATE_AUXACK (5 << 29) +#define EDP_PSR_STATUS_STATE_SRDOFFACK (6 << 29) +#define EDP_PSR_STATUS_LINK_MASK (3 << 26) +#define EDP_PSR_STATUS_LINK_FULL_OFF (0 << 26) +#define EDP_PSR_STATUS_LINK_FULL_ON (1 << 26) +#define EDP_PSR_STATUS_LINK_STANDBY (2 << 26) #define EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT 20 #define EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK 0x1f #define EDP_PSR_STATUS_COUNT_SHIFT 16 #define EDP_PSR_STATUS_COUNT_MASK 0xf -#define EDP_PSR_STATUS_AUX_ERROR (1<<15) -#define EDP_PSR_STATUS_AUX_SENDING (1<<12) -#define EDP_PSR_STATUS_SENDING_IDLE (1<<9) -#define EDP_PSR_STATUS_SENDING_TP2_TP3 (1<<8) -#define EDP_PSR_STATUS_SENDING_TP1 (1<<4) +#define EDP_PSR_STATUS_AUX_ERROR (1 << 15) +#define EDP_PSR_STATUS_AUX_SENDING (1 << 12) +#define EDP_PSR_STATUS_SENDING_IDLE (1 << 9) +#define EDP_PSR_STATUS_SENDING_TP2_TP3 (1 << 8) +#define EDP_PSR_STATUS_SENDING_TP1 (1 << 4) #define EDP_PSR_STATUS_IDLE_MASK 0xf #define EDP_PSR_PERF_CNT _MMIO(dev_priv->psr_mmio_base + 0x44) #define EDP_PSR_PERF_CNT_MASK 0xffffff #define EDP_PSR_DEBUG _MMIO(dev_priv->psr_mmio_base + 0x60) /* PSR_MASK on SKL+ */ -#define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1<<28) -#define EDP_PSR_DEBUG_MASK_LPSP (1<<27) -#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26) -#define EDP_PSR_DEBUG_MASK_HPD (1<<25) -#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1<<16) -#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1<<15) /* SKL+ */ +#define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1 << 28) +#define EDP_PSR_DEBUG_MASK_LPSP (1 << 27) +#define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26) +#define EDP_PSR_DEBUG_MASK_HPD (1 << 25) +#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16) +#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */ #define EDP_PSR2_CTL _MMIO(0x6f900) -#define EDP_PSR2_ENABLE (1<<31) -#define EDP_SU_TRACK_ENABLE (1<<30) -#define EDP_Y_COORDINATE_VALID (1<<26) /* GLK and CNL+ */ -#define EDP_Y_COORDINATE_ENABLE (1<<25) /* GLK and CNL+ */ -#define EDP_MAX_SU_DISABLE_TIME(t) ((t)<<20) -#define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f<<20) -#define EDP_PSR2_TP2_TIME_500 (0<<8) -#define EDP_PSR2_TP2_TIME_100 (1<<8) -#define EDP_PSR2_TP2_TIME_2500 (2<<8) -#define EDP_PSR2_TP2_TIME_50 (3<<8) -#define EDP_PSR2_TP2_TIME_MASK (3<<8) +#define EDP_PSR2_ENABLE (1 << 31) +#define EDP_SU_TRACK_ENABLE (1 << 30) +#define EDP_Y_COORDINATE_VALID (1 << 26) /* GLK and CNL+ */ +#define EDP_Y_COORDINATE_ENABLE (1 << 25) /* GLK and CNL+ */ +#define EDP_MAX_SU_DISABLE_TIME(t) ((t) << 20) +#define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f << 20) +#define EDP_PSR2_TP2_TIME_500us (0 << 8) +#define EDP_PSR2_TP2_TIME_100us (1 << 8) +#define EDP_PSR2_TP2_TIME_2500us (2 << 8) +#define EDP_PSR2_TP2_TIME_50us (3 << 8) +#define EDP_PSR2_TP2_TIME_MASK (3 << 8) #define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4 -#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf<<4) -#define EDP_PSR2_FRAME_BEFORE_SU(a) ((a)<<4) +#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf << 4) +#define EDP_PSR2_FRAME_BEFORE_SU(a) ((a) << 4) #define EDP_PSR2_IDLE_FRAME_MASK 0xf #define EDP_PSR2_IDLE_FRAME_SHIFT 0 @@ -4128,7 +4251,7 @@ enum { #define PSR_EVENT_PSR_DISABLE (1 << 0) #define EDP_PSR2_STATUS _MMIO(0x6f940) -#define EDP_PSR2_STATUS_STATE_MASK (0xf<<28) +#define EDP_PSR2_STATUS_STATE_MASK (0xf << 28) #define EDP_PSR2_STATUS_STATE_SHIFT 28 /* VGA port control */ @@ -4136,47 +4259,48 @@ enum { #define PCH_ADPA _MMIO(0xe1100) #define VLV_ADPA _MMIO(VLV_DISPLAY_BASE + 0x61100) -#define ADPA_DAC_ENABLE (1<<31) +#define ADPA_DAC_ENABLE (1 << 31) #define ADPA_DAC_DISABLE 0 -#define ADPA_PIPE_SELECT_MASK (1<<30) -#define ADPA_PIPE_A_SELECT 0 -#define ADPA_PIPE_B_SELECT (1<<30) -#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30) -/* CPT uses bits 29:30 for pch transcoder select */ +#define ADPA_PIPE_SEL_SHIFT 30 +#define ADPA_PIPE_SEL_MASK (1 << 30) +#define ADPA_PIPE_SEL(pipe) ((pipe) << 30) +#define ADPA_PIPE_SEL_SHIFT_CPT 29 +#define ADPA_PIPE_SEL_MASK_CPT (3 << 29) +#define ADPA_PIPE_SEL_CPT(pipe) ((pipe) << 29) #define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */ -#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24) -#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24) -#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24) -#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24) -#define ADPA_CRT_HOTPLUG_ENABLE (1<<23) -#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22) -#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22) -#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21) -#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21) -#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20) -#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20) -#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18) -#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18) -#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18) -#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18) -#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17) -#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17) -#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) -#define ADPA_USE_VGA_HVPOLARITY (1<<15) +#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0 << 24) +#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3 << 24) +#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3 << 24) +#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2 << 24) +#define ADPA_CRT_HOTPLUG_ENABLE (1 << 23) +#define ADPA_CRT_HOTPLUG_PERIOD_64 (0 << 22) +#define ADPA_CRT_HOTPLUG_PERIOD_128 (1 << 22) +#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0 << 21) +#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1 << 21) +#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0 << 20) +#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1 << 20) +#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0 << 18) +#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1 << 18) +#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2 << 18) +#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3 << 18) +#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0 << 17) +#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1 << 17) +#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1 << 16) +#define ADPA_USE_VGA_HVPOLARITY (1 << 15) #define ADPA_SETS_HVPOLARITY 0 -#define ADPA_VSYNC_CNTL_DISABLE (1<<10) +#define ADPA_VSYNC_CNTL_DISABLE (1 << 10) #define ADPA_VSYNC_CNTL_ENABLE 0 -#define ADPA_HSYNC_CNTL_DISABLE (1<<11) +#define ADPA_HSYNC_CNTL_DISABLE (1 << 11) #define ADPA_HSYNC_CNTL_ENABLE 0 -#define ADPA_VSYNC_ACTIVE_HIGH (1<<4) +#define ADPA_VSYNC_ACTIVE_HIGH (1 << 4) #define ADPA_VSYNC_ACTIVE_LOW 0 -#define ADPA_HSYNC_ACTIVE_HIGH (1<<3) +#define ADPA_HSYNC_ACTIVE_HIGH (1 << 3) #define ADPA_HSYNC_ACTIVE_LOW 0 -#define ADPA_DPMS_MASK (~(3<<10)) -#define ADPA_DPMS_ON (0<<10) -#define ADPA_DPMS_SUSPEND (1<<10) -#define ADPA_DPMS_STANDBY (2<<10) -#define ADPA_DPMS_OFF (3<<10) +#define ADPA_DPMS_MASK (~(3 << 10)) +#define ADPA_DPMS_ON (0 << 10) +#define ADPA_DPMS_SUSPEND (1 << 10) +#define ADPA_DPMS_STANDBY (2 << 10) +#define ADPA_DPMS_OFF (3 << 10) /* Hotplug control (945+ only) */ @@ -4301,9 +4425,9 @@ enum { /* Gen 3 SDVO bits: */ #define SDVO_ENABLE (1 << 31) -#define SDVO_PIPE_SEL(pipe) ((pipe) << 30) +#define SDVO_PIPE_SEL_SHIFT 30 #define SDVO_PIPE_SEL_MASK (1 << 30) -#define SDVO_PIPE_B_SELECT (1 << 30) +#define SDVO_PIPE_SEL(pipe) ((pipe) << 30) #define SDVO_STALL_SELECT (1 << 29) #define SDVO_INTERRUPT_ENABLE (1 << 26) /* @@ -4343,12 +4467,14 @@ enum { #define SDVOB_HOTPLUG_ENABLE (1 << 23) /* SDVO only */ /* Gen 6 (CPT) SDVO/HDMI bits: */ -#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29) +#define SDVO_PIPE_SEL_SHIFT_CPT 29 #define SDVO_PIPE_SEL_MASK_CPT (3 << 29) +#define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29) /* CHV SDVO/HDMI bits: */ -#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24) +#define SDVO_PIPE_SEL_SHIFT_CHV 24 #define SDVO_PIPE_SEL_MASK_CHV (3 << 24) +#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24) /* DVO port control */ @@ -4359,7 +4485,9 @@ enum { #define _DVOC 0x61160 #define DVOC _MMIO(_DVOC) #define DVO_ENABLE (1 << 31) -#define DVO_PIPE_B_SELECT (1 << 30) +#define DVO_PIPE_SEL_SHIFT 30 +#define DVO_PIPE_SEL_MASK (1 << 30) +#define DVO_PIPE_SEL(pipe) ((pipe) << 30) #define DVO_PIPE_STALL_UNUSED (0 << 28) #define DVO_PIPE_STALL (1 << 28) #define DVO_PIPE_STALL_TV (2 << 28) @@ -4381,7 +4509,7 @@ enum { #define DVO_BLANK_ACTIVE_HIGH (1 << 2) #define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */ #define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */ -#define DVO_PRESERVE_MASK (0x7<<24) +#define DVO_PRESERVE_MASK (0x7 << 24) #define DVOA_SRCDIM _MMIO(0x61124) #define DVOB_SRCDIM _MMIO(0x61144) #define DVOC_SRCDIM _MMIO(0x61164) @@ -4396,9 +4524,12 @@ enum { */ #define LVDS_PORT_EN (1 << 31) /* Selects pipe B for LVDS data. Must be set on pre-965. */ -#define LVDS_PIPEB_SELECT (1 << 30) -#define LVDS_PIPE_MASK (1 << 30) -#define LVDS_PIPE(pipe) ((pipe) << 30) +#define LVDS_PIPE_SEL_SHIFT 30 +#define LVDS_PIPE_SEL_MASK (1 << 30) +#define LVDS_PIPE_SEL(pipe) ((pipe) << 30) +#define LVDS_PIPE_SEL_SHIFT_CPT 29 +#define LVDS_PIPE_SEL_MASK_CPT (3 << 29) +#define LVDS_PIPE_SEL_CPT(pipe) ((pipe) << 29) /* LVDS dithering flag on 965/g4x platform */ #define LVDS_ENABLE_DITHER (1 << 25) /* LVDS sync polarity flags. Set to invert (i.e. negative) */ @@ -4471,6 +4602,16 @@ enum { #define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4) #define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0) +#define DRM_DIP_ENABLE (1 << 28) +#define PSR_VSC_BIT_7_SET (1 << 27) +#define VSC_SELECT_MASK (0x3 << 26) +#define VSC_SELECT_SHIFT 26 +#define VSC_DIP_HW_HEA_DATA (0 << 26) +#define VSC_DIP_HW_HEA_SW_DATA (1 << 26) +#define VSC_DIP_HW_DATA_SW_HEA (2 << 26) +#define VSC_DIP_SW_HEA_DATA (3 << 26) +#define VDIP_ENABLE_PPS (1 << 24) + /* Panel power sequencing */ #define PPS_BASE 0x61200 #define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE) @@ -4695,7 +4836,9 @@ enum { /* Enables the TV encoder */ # define TV_ENC_ENABLE (1 << 31) /* Sources the TV encoder input from pipe B instead of A. */ -# define TV_ENC_PIPEB_SELECT (1 << 30) +# define TV_ENC_PIPE_SEL_SHIFT 30 +# define TV_ENC_PIPE_SEL_MASK (1 << 30) +# define TV_ENC_PIPE_SEL(pipe) ((pipe) << 30) /* Outputs composite video (DAC A only) */ # define TV_ENC_OUTPUT_COMPOSITE (0 << 28) /* Outputs SVideo video (DAC B/C) */ @@ -5177,10 +5320,15 @@ enum { #define CHV_DP_D _MMIO(VLV_DISPLAY_BASE + 0x64300) #define DP_PORT_EN (1 << 31) -#define DP_PIPEB_SELECT (1 << 30) -#define DP_PIPE_MASK (1 << 30) -#define DP_PIPE_SELECT_CHV(pipe) ((pipe) << 16) -#define DP_PIPE_MASK_CHV (3 << 16) +#define DP_PIPE_SEL_SHIFT 30 +#define DP_PIPE_SEL_MASK (1 << 30) +#define DP_PIPE_SEL(pipe) ((pipe) << 30) +#define DP_PIPE_SEL_SHIFT_IVB 29 +#define DP_PIPE_SEL_MASK_IVB (3 << 29) +#define DP_PIPE_SEL_IVB(pipe) ((pipe) << 29) +#define DP_PIPE_SEL_SHIFT_CHV 16 +#define DP_PIPE_SEL_MASK_CHV (3 << 16) +#define DP_PIPE_SEL_CHV(pipe) ((pipe) << 16) /* Link training mode - select a suitable mode for each stage */ #define DP_LINK_TRAIN_PAT_1 (0 << 28) @@ -5287,6 +5435,13 @@ enum { #define _DPD_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64320) #define _DPD_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64324) +#define _DPE_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64410) +#define _DPE_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64414) +#define _DPE_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64418) +#define _DPE_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6441c) +#define _DPE_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64420) +#define _DPE_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64424) + #define _DPF_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64510) #define _DPF_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64514) #define _DPF_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64518) @@ -5342,7 +5497,7 @@ enum { #define _PIPEB_DATA_M_G4X 0x71050 /* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */ -#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */ +#define TU_SIZE(x) (((x) - 1) << 25) /* default size 64 */ #define TU_SIZE_SHIFT 25 #define TU_SIZE_MASK (0x3f << 25) @@ -5384,18 +5539,18 @@ enum { #define DSL_LINEMASK_GEN2 0x00000fff #define DSL_LINEMASK_GEN3 0x00001fff #define _PIPEACONF 0x70008 -#define PIPECONF_ENABLE (1<<31) +#define PIPECONF_ENABLE (1 << 31) #define PIPECONF_DISABLE 0 -#define PIPECONF_DOUBLE_WIDE (1<<30) -#define I965_PIPECONF_ACTIVE (1<<30) -#define PIPECONF_DSI_PLL_LOCKED (1<<29) /* vlv & pipe A only */ -#define PIPECONF_FRAME_START_DELAY_MASK (3<<27) +#define PIPECONF_DOUBLE_WIDE (1 << 30) +#define I965_PIPECONF_ACTIVE (1 << 30) +#define PIPECONF_DSI_PLL_LOCKED (1 << 29) /* vlv & pipe A only */ +#define PIPECONF_FRAME_START_DELAY_MASK (3 << 27) #define PIPECONF_SINGLE_WIDE 0 #define PIPECONF_PIPE_UNLOCKED 0 -#define PIPECONF_PIPE_LOCKED (1<<25) +#define PIPECONF_PIPE_LOCKED (1 << 25) #define PIPECONF_PALETTE 0 -#define PIPECONF_GAMMA (1<<24) -#define PIPECONF_FORCE_BORDER (1<<25) +#define PIPECONF_GAMMA (1 << 24) +#define PIPECONF_FORCE_BORDER (1 << 25) #define PIPECONF_INTERLACE_MASK (7 << 21) #define PIPECONF_INTERLACE_MASK_HSW (3 << 21) /* Note that pre-gen3 does not support interlaced display directly. Panel @@ -5414,67 +5569,67 @@ enum { #define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */ #define PIPECONF_INTERLACE_MODE_MASK (7 << 21) #define PIPECONF_EDP_RR_MODE_SWITCH (1 << 20) -#define PIPECONF_CXSR_DOWNCLOCK (1<<16) +#define PIPECONF_CXSR_DOWNCLOCK (1 << 16) #define PIPECONF_EDP_RR_MODE_SWITCH_VLV (1 << 14) #define PIPECONF_COLOR_RANGE_SELECT (1 << 13) #define PIPECONF_BPC_MASK (0x7 << 5) -#define PIPECONF_8BPC (0<<5) -#define PIPECONF_10BPC (1<<5) -#define PIPECONF_6BPC (2<<5) -#define PIPECONF_12BPC (3<<5) -#define PIPECONF_DITHER_EN (1<<4) +#define PIPECONF_8BPC (0 << 5) +#define PIPECONF_10BPC (1 << 5) +#define PIPECONF_6BPC (2 << 5) +#define PIPECONF_12BPC (3 << 5) +#define PIPECONF_DITHER_EN (1 << 4) #define PIPECONF_DITHER_TYPE_MASK (0x0000000c) -#define PIPECONF_DITHER_TYPE_SP (0<<2) -#define PIPECONF_DITHER_TYPE_ST1 (1<<2) -#define PIPECONF_DITHER_TYPE_ST2 (2<<2) -#define PIPECONF_DITHER_TYPE_TEMP (3<<2) +#define PIPECONF_DITHER_TYPE_SP (0 << 2) +#define PIPECONF_DITHER_TYPE_ST1 (1 << 2) +#define PIPECONF_DITHER_TYPE_ST2 (2 << 2) +#define PIPECONF_DITHER_TYPE_TEMP (3 << 2) #define _PIPEASTAT 0x70024 -#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) -#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL<<30) -#define PIPE_CRC_ERROR_ENABLE (1UL<<29) -#define PIPE_CRC_DONE_ENABLE (1UL<<28) -#define PERF_COUNTER2_INTERRUPT_EN (1UL<<27) -#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27) -#define PLANE_FLIP_DONE_INT_EN_VLV (1UL<<26) -#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26) -#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25) -#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) -#define PIPE_DPST_EVENT_ENABLE (1UL<<23) -#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<22) -#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22) -#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) -#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) -#define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL<<19) -#define PERF_COUNTER_INTERRUPT_EN (1UL<<19) -#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */ -#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ -#define PIPE_FRAMESTART_INTERRUPT_ENABLE (1UL<<17) -#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17) -#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16) -#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16) -#define SPRITE1_FLIP_DONE_INT_STATUS_VLV (1UL<<15) -#define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL<<14) -#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) -#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12) -#define PERF_COUNTER2_INTERRUPT_STATUS (1UL<<11) -#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11) -#define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL<<10) -#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10) -#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9) -#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) -#define PIPE_DPST_EVENT_STATUS (1UL<<7) -#define PIPE_A_PSR_STATUS_VLV (1UL<<6) -#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6) -#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) -#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4) -#define PIPE_B_PSR_STATUS_VLV (1UL<<3) -#define PERF_COUNTER_INTERRUPT_STATUS (1UL<<3) -#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */ -#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ -#define PIPE_FRAMESTART_INTERRUPT_STATUS (1UL<<1) -#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) -#define PIPE_HBLANK_INT_STATUS (1UL<<0) -#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) +#define PIPE_FIFO_UNDERRUN_STATUS (1UL << 31) +#define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL << 30) +#define PIPE_CRC_ERROR_ENABLE (1UL << 29) +#define PIPE_CRC_DONE_ENABLE (1UL << 28) +#define PERF_COUNTER2_INTERRUPT_EN (1UL << 27) +#define PIPE_GMBUS_EVENT_ENABLE (1UL << 27) +#define PLANE_FLIP_DONE_INT_EN_VLV (1UL << 26) +#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL << 26) +#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL << 25) +#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL << 24) +#define PIPE_DPST_EVENT_ENABLE (1UL << 23) +#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL << 22) +#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL << 22) +#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL << 21) +#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL << 20) +#define PIPE_B_PSR_INTERRUPT_ENABLE_VLV (1UL << 19) +#define PERF_COUNTER_INTERRUPT_EN (1UL << 19) +#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL << 18) /* pre-965 */ +#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL << 18) /* 965 or later */ +#define PIPE_FRAMESTART_INTERRUPT_ENABLE (1UL << 17) +#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL << 17) +#define PIPEA_HBLANK_INT_EN_VLV (1UL << 16) +#define PIPE_OVERLAY_UPDATED_ENABLE (1UL << 16) +#define SPRITE1_FLIP_DONE_INT_STATUS_VLV (1UL << 15) +#define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL << 14) +#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL << 13) +#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL << 12) +#define PERF_COUNTER2_INTERRUPT_STATUS (1UL << 11) +#define PIPE_GMBUS_INTERRUPT_STATUS (1UL << 11) +#define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL << 10) +#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL << 10) +#define PIPE_VSYNC_INTERRUPT_STATUS (1UL << 9) +#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL << 8) +#define PIPE_DPST_EVENT_STATUS (1UL << 7) +#define PIPE_A_PSR_STATUS_VLV (1UL << 6) +#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL << 6) +#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL << 5) +#define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL << 4) +#define PIPE_B_PSR_STATUS_VLV (1UL << 3) +#define PERF_COUNTER_INTERRUPT_STATUS (1UL << 3) +#define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL << 2) /* pre-965 */ +#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL << 2) /* 965 or later */ +#define PIPE_FRAMESTART_INTERRUPT_STATUS (1UL << 1) +#define PIPE_VBLANK_INTERRUPT_STATUS (1UL << 1) +#define PIPE_HBLANK_INT_STATUS (1UL << 0) +#define PIPE_OVERLAY_UPDATED_STATUS (1UL << 0) #define PIPESTAT_INT_ENABLE_MASK 0x7fff0000 #define PIPESTAT_INT_STATUS_MASK 0x0000ffff @@ -5503,67 +5658,67 @@ enum { #define _PIPE_MISC_A 0x70030 #define _PIPE_MISC_B 0x71030 -#define PIPEMISC_YUV420_ENABLE (1<<27) -#define PIPEMISC_YUV420_MODE_FULL_BLEND (1<<26) -#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1<<11) -#define PIPEMISC_DITHER_BPC_MASK (7<<5) -#define PIPEMISC_DITHER_8_BPC (0<<5) -#define PIPEMISC_DITHER_10_BPC (1<<5) -#define PIPEMISC_DITHER_6_BPC (2<<5) -#define PIPEMISC_DITHER_12_BPC (3<<5) -#define PIPEMISC_DITHER_ENABLE (1<<4) -#define PIPEMISC_DITHER_TYPE_MASK (3<<2) -#define PIPEMISC_DITHER_TYPE_SP (0<<2) +#define PIPEMISC_YUV420_ENABLE (1 << 27) +#define PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26) +#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1 << 11) +#define PIPEMISC_DITHER_BPC_MASK (7 << 5) +#define PIPEMISC_DITHER_8_BPC (0 << 5) +#define PIPEMISC_DITHER_10_BPC (1 << 5) +#define PIPEMISC_DITHER_6_BPC (2 << 5) +#define PIPEMISC_DITHER_12_BPC (3 << 5) +#define PIPEMISC_DITHER_ENABLE (1 << 4) +#define PIPEMISC_DITHER_TYPE_MASK (3 << 2) +#define PIPEMISC_DITHER_TYPE_SP (0 << 2) #define PIPEMISC(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC_A) #define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028) -#define PIPEB_LINE_COMPARE_INT_EN (1<<29) -#define PIPEB_HLINE_INT_EN (1<<28) -#define PIPEB_VBLANK_INT_EN (1<<27) -#define SPRITED_FLIP_DONE_INT_EN (1<<26) -#define SPRITEC_FLIP_DONE_INT_EN (1<<25) -#define PLANEB_FLIP_DONE_INT_EN (1<<24) -#define PIPE_PSR_INT_EN (1<<22) -#define PIPEA_LINE_COMPARE_INT_EN (1<<21) -#define PIPEA_HLINE_INT_EN (1<<20) -#define PIPEA_VBLANK_INT_EN (1<<19) -#define SPRITEB_FLIP_DONE_INT_EN (1<<18) -#define SPRITEA_FLIP_DONE_INT_EN (1<<17) -#define PLANEA_FLIPDONE_INT_EN (1<<16) -#define PIPEC_LINE_COMPARE_INT_EN (1<<13) -#define PIPEC_HLINE_INT_EN (1<<12) -#define PIPEC_VBLANK_INT_EN (1<<11) -#define SPRITEF_FLIPDONE_INT_EN (1<<10) -#define SPRITEE_FLIPDONE_INT_EN (1<<9) -#define PLANEC_FLIPDONE_INT_EN (1<<8) +#define PIPEB_LINE_COMPARE_INT_EN (1 << 29) +#define PIPEB_HLINE_INT_EN (1 << 28) +#define PIPEB_VBLANK_INT_EN (1 << 27) +#define SPRITED_FLIP_DONE_INT_EN (1 << 26) +#define SPRITEC_FLIP_DONE_INT_EN (1 << 25) +#define PLANEB_FLIP_DONE_INT_EN (1 << 24) +#define PIPE_PSR_INT_EN (1 << 22) +#define PIPEA_LINE_COMPARE_INT_EN (1 << 21) +#define PIPEA_HLINE_INT_EN (1 << 20) +#define PIPEA_VBLANK_INT_EN (1 << 19) +#define SPRITEB_FLIP_DONE_INT_EN (1 << 18) +#define SPRITEA_FLIP_DONE_INT_EN (1 << 17) +#define PLANEA_FLIPDONE_INT_EN (1 << 16) +#define PIPEC_LINE_COMPARE_INT_EN (1 << 13) +#define PIPEC_HLINE_INT_EN (1 << 12) +#define PIPEC_VBLANK_INT_EN (1 << 11) +#define SPRITEF_FLIPDONE_INT_EN (1 << 10) +#define SPRITEE_FLIPDONE_INT_EN (1 << 9) +#define PLANEC_FLIPDONE_INT_EN (1 << 8) #define DPINVGTT _MMIO(VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */ -#define SPRITEF_INVALID_GTT_INT_EN (1<<27) -#define SPRITEE_INVALID_GTT_INT_EN (1<<26) -#define PLANEC_INVALID_GTT_INT_EN (1<<25) -#define CURSORC_INVALID_GTT_INT_EN (1<<24) -#define CURSORB_INVALID_GTT_INT_EN (1<<23) -#define CURSORA_INVALID_GTT_INT_EN (1<<22) -#define SPRITED_INVALID_GTT_INT_EN (1<<21) -#define SPRITEC_INVALID_GTT_INT_EN (1<<20) -#define PLANEB_INVALID_GTT_INT_EN (1<<19) -#define SPRITEB_INVALID_GTT_INT_EN (1<<18) -#define SPRITEA_INVALID_GTT_INT_EN (1<<17) -#define PLANEA_INVALID_GTT_INT_EN (1<<16) +#define SPRITEF_INVALID_GTT_INT_EN (1 << 27) +#define SPRITEE_INVALID_GTT_INT_EN (1 << 26) +#define PLANEC_INVALID_GTT_INT_EN (1 << 25) +#define CURSORC_INVALID_GTT_INT_EN (1 << 24) +#define CURSORB_INVALID_GTT_INT_EN (1 << 23) +#define CURSORA_INVALID_GTT_INT_EN (1 << 22) +#define SPRITED_INVALID_GTT_INT_EN (1 << 21) +#define SPRITEC_INVALID_GTT_INT_EN (1 << 20) +#define PLANEB_INVALID_GTT_INT_EN (1 << 19) +#define SPRITEB_INVALID_GTT_INT_EN (1 << 18) +#define SPRITEA_INVALID_GTT_INT_EN (1 << 17) +#define PLANEA_INVALID_GTT_INT_EN (1 << 16) #define DPINVGTT_EN_MASK 0xff0000 #define DPINVGTT_EN_MASK_CHV 0xfff0000 -#define SPRITEF_INVALID_GTT_STATUS (1<<11) -#define SPRITEE_INVALID_GTT_STATUS (1<<10) -#define PLANEC_INVALID_GTT_STATUS (1<<9) -#define CURSORC_INVALID_GTT_STATUS (1<<8) -#define CURSORB_INVALID_GTT_STATUS (1<<7) -#define CURSORA_INVALID_GTT_STATUS (1<<6) -#define SPRITED_INVALID_GTT_STATUS (1<<5) -#define SPRITEC_INVALID_GTT_STATUS (1<<4) -#define PLANEB_INVALID_GTT_STATUS (1<<3) -#define SPRITEB_INVALID_GTT_STATUS (1<<2) -#define SPRITEA_INVALID_GTT_STATUS (1<<1) -#define PLANEA_INVALID_GTT_STATUS (1<<0) +#define SPRITEF_INVALID_GTT_STATUS (1 << 11) +#define SPRITEE_INVALID_GTT_STATUS (1 << 10) +#define PLANEC_INVALID_GTT_STATUS (1 << 9) +#define CURSORC_INVALID_GTT_STATUS (1 << 8) +#define CURSORB_INVALID_GTT_STATUS (1 << 7) +#define CURSORA_INVALID_GTT_STATUS (1 << 6) +#define SPRITED_INVALID_GTT_STATUS (1 << 5) +#define SPRITEC_INVALID_GTT_STATUS (1 << 4) +#define PLANEB_INVALID_GTT_STATUS (1 << 3) +#define SPRITEB_INVALID_GTT_STATUS (1 << 2) +#define SPRITEA_INVALID_GTT_STATUS (1 << 1) +#define PLANEA_INVALID_GTT_STATUS (1 << 0) #define DPINVGTT_STATUS_MASK 0xff #define DPINVGTT_STATUS_MASK_CHV 0xfff @@ -5604,149 +5759,149 @@ enum { /* pnv/gen4/g4x/vlv/chv */ #define DSPFW1 _MMIO(dev_priv->info.display_mmio_offset + 0x70034) #define DSPFW_SR_SHIFT 23 -#define DSPFW_SR_MASK (0x1ff<<23) +#define DSPFW_SR_MASK (0x1ff << 23) #define DSPFW_CURSORB_SHIFT 16 -#define DSPFW_CURSORB_MASK (0x3f<<16) +#define DSPFW_CURSORB_MASK (0x3f << 16) #define DSPFW_PLANEB_SHIFT 8 -#define DSPFW_PLANEB_MASK (0x7f<<8) -#define DSPFW_PLANEB_MASK_VLV (0xff<<8) /* vlv/chv */ +#define DSPFW_PLANEB_MASK (0x7f << 8) +#define DSPFW_PLANEB_MASK_VLV (0xff << 8) /* vlv/chv */ #define DSPFW_PLANEA_SHIFT 0 -#define DSPFW_PLANEA_MASK (0x7f<<0) -#define DSPFW_PLANEA_MASK_VLV (0xff<<0) /* vlv/chv */ +#define DSPFW_PLANEA_MASK (0x7f << 0) +#define DSPFW_PLANEA_MASK_VLV (0xff << 0) /* vlv/chv */ #define DSPFW2 _MMIO(dev_priv->info.display_mmio_offset + 0x70038) -#define DSPFW_FBC_SR_EN (1<<31) /* g4x */ +#define DSPFW_FBC_SR_EN (1 << 31) /* g4x */ #define DSPFW_FBC_SR_SHIFT 28 -#define DSPFW_FBC_SR_MASK (0x7<<28) /* g4x */ +#define DSPFW_FBC_SR_MASK (0x7 << 28) /* g4x */ #define DSPFW_FBC_HPLL_SR_SHIFT 24 -#define DSPFW_FBC_HPLL_SR_MASK (0xf<<24) /* g4x */ +#define DSPFW_FBC_HPLL_SR_MASK (0xf << 24) /* g4x */ #define DSPFW_SPRITEB_SHIFT (16) -#define DSPFW_SPRITEB_MASK (0x7f<<16) /* g4x */ -#define DSPFW_SPRITEB_MASK_VLV (0xff<<16) /* vlv/chv */ +#define DSPFW_SPRITEB_MASK (0x7f << 16) /* g4x */ +#define DSPFW_SPRITEB_MASK_VLV (0xff << 16) /* vlv/chv */ #define DSPFW_CURSORA_SHIFT 8 -#define DSPFW_CURSORA_MASK (0x3f<<8) +#define DSPFW_CURSORA_MASK (0x3f << 8) #define DSPFW_PLANEC_OLD_SHIFT 0 -#define DSPFW_PLANEC_OLD_MASK (0x7f<<0) /* pre-gen4 sprite C */ +#define DSPFW_PLANEC_OLD_MASK (0x7f << 0) /* pre-gen4 sprite C */ #define DSPFW_SPRITEA_SHIFT 0 -#define DSPFW_SPRITEA_MASK (0x7f<<0) /* g4x */ -#define DSPFW_SPRITEA_MASK_VLV (0xff<<0) /* vlv/chv */ +#define DSPFW_SPRITEA_MASK (0x7f << 0) /* g4x */ +#define DSPFW_SPRITEA_MASK_VLV (0xff << 0) /* vlv/chv */ #define DSPFW3 _MMIO(dev_priv->info.display_mmio_offset + 0x7003c) -#define DSPFW_HPLL_SR_EN (1<<31) -#define PINEVIEW_SELF_REFRESH_EN (1<<30) +#define DSPFW_HPLL_SR_EN (1 << 31) +#define PINEVIEW_SELF_REFRESH_EN (1 << 30) #define DSPFW_CURSOR_SR_SHIFT 24 -#define DSPFW_CURSOR_SR_MASK (0x3f<<24) +#define DSPFW_CURSOR_SR_MASK (0x3f << 24) #define DSPFW_HPLL_CURSOR_SHIFT 16 -#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16) +#define DSPFW_HPLL_CURSOR_MASK (0x3f << 16) #define DSPFW_HPLL_SR_SHIFT 0 -#define DSPFW_HPLL_SR_MASK (0x1ff<<0) +#define DSPFW_HPLL_SR_MASK (0x1ff << 0) /* vlv/chv */ #define DSPFW4 _MMIO(VLV_DISPLAY_BASE + 0x70070) #define DSPFW_SPRITEB_WM1_SHIFT 16 -#define DSPFW_SPRITEB_WM1_MASK (0xff<<16) +#define DSPFW_SPRITEB_WM1_MASK (0xff << 16) #define DSPFW_CURSORA_WM1_SHIFT 8 -#define DSPFW_CURSORA_WM1_MASK (0x3f<<8) +#define DSPFW_CURSORA_WM1_MASK (0x3f << 8) #define DSPFW_SPRITEA_WM1_SHIFT 0 -#define DSPFW_SPRITEA_WM1_MASK (0xff<<0) +#define DSPFW_SPRITEA_WM1_MASK (0xff << 0) #define DSPFW5 _MMIO(VLV_DISPLAY_BASE + 0x70074) #define DSPFW_PLANEB_WM1_SHIFT 24 -#define DSPFW_PLANEB_WM1_MASK (0xff<<24) +#define DSPFW_PLANEB_WM1_MASK (0xff << 24) #define DSPFW_PLANEA_WM1_SHIFT 16 -#define DSPFW_PLANEA_WM1_MASK (0xff<<16) +#define DSPFW_PLANEA_WM1_MASK (0xff << 16) #define DSPFW_CURSORB_WM1_SHIFT 8 -#define DSPFW_CURSORB_WM1_MASK (0x3f<<8) +#define DSPFW_CURSORB_WM1_MASK (0x3f << 8) #define DSPFW_CURSOR_SR_WM1_SHIFT 0 -#define DSPFW_CURSOR_SR_WM1_MASK (0x3f<<0) +#define DSPFW_CURSOR_SR_WM1_MASK (0x3f << 0) #define DSPFW6 _MMIO(VLV_DISPLAY_BASE + 0x70078) #define DSPFW_SR_WM1_SHIFT 0 -#define DSPFW_SR_WM1_MASK (0x1ff<<0) +#define DSPFW_SR_WM1_MASK (0x1ff << 0) #define DSPFW7 _MMIO(VLV_DISPLAY_BASE + 0x7007c) #define DSPFW7_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b4) /* wtf #1? */ #define DSPFW_SPRITED_WM1_SHIFT 24 -#define DSPFW_SPRITED_WM1_MASK (0xff<<24) +#define DSPFW_SPRITED_WM1_MASK (0xff << 24) #define DSPFW_SPRITED_SHIFT 16 -#define DSPFW_SPRITED_MASK_VLV (0xff<<16) +#define DSPFW_SPRITED_MASK_VLV (0xff << 16) #define DSPFW_SPRITEC_WM1_SHIFT 8 -#define DSPFW_SPRITEC_WM1_MASK (0xff<<8) +#define DSPFW_SPRITEC_WM1_MASK (0xff << 8) #define DSPFW_SPRITEC_SHIFT 0 -#define DSPFW_SPRITEC_MASK_VLV (0xff<<0) +#define DSPFW_SPRITEC_MASK_VLV (0xff << 0) #define DSPFW8_CHV _MMIO(VLV_DISPLAY_BASE + 0x700b8) #define DSPFW_SPRITEF_WM1_SHIFT 24 -#define DSPFW_SPRITEF_WM1_MASK (0xff<<24) +#define DSPFW_SPRITEF_WM1_MASK (0xff << 24) #define DSPFW_SPRITEF_SHIFT 16 -#define DSPFW_SPRITEF_MASK_VLV (0xff<<16) +#define DSPFW_SPRITEF_MASK_VLV (0xff << 16) #define DSPFW_SPRITEE_WM1_SHIFT 8 -#define DSPFW_SPRITEE_WM1_MASK (0xff<<8) +#define DSPFW_SPRITEE_WM1_MASK (0xff << 8) #define DSPFW_SPRITEE_SHIFT 0 -#define DSPFW_SPRITEE_MASK_VLV (0xff<<0) +#define DSPFW_SPRITEE_MASK_VLV (0xff << 0) #define DSPFW9_CHV _MMIO(VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */ #define DSPFW_PLANEC_WM1_SHIFT 24 -#define DSPFW_PLANEC_WM1_MASK (0xff<<24) +#define DSPFW_PLANEC_WM1_MASK (0xff << 24) #define DSPFW_PLANEC_SHIFT 16 -#define DSPFW_PLANEC_MASK_VLV (0xff<<16) +#define DSPFW_PLANEC_MASK_VLV (0xff << 16) #define DSPFW_CURSORC_WM1_SHIFT 8 -#define DSPFW_CURSORC_WM1_MASK (0x3f<<16) +#define DSPFW_CURSORC_WM1_MASK (0x3f << 16) #define DSPFW_CURSORC_SHIFT 0 -#define DSPFW_CURSORC_MASK (0x3f<<0) +#define DSPFW_CURSORC_MASK (0x3f << 0) /* vlv/chv high order bits */ #define DSPHOWM _MMIO(VLV_DISPLAY_BASE + 0x70064) #define DSPFW_SR_HI_SHIFT 24 -#define DSPFW_SR_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */ +#define DSPFW_SR_HI_MASK (3 << 24) /* 2 bits for chv, 1 for vlv */ #define DSPFW_SPRITEF_HI_SHIFT 23 -#define DSPFW_SPRITEF_HI_MASK (1<<23) +#define DSPFW_SPRITEF_HI_MASK (1 << 23) #define DSPFW_SPRITEE_HI_SHIFT 22 -#define DSPFW_SPRITEE_HI_MASK (1<<22) +#define DSPFW_SPRITEE_HI_MASK (1 << 22) #define DSPFW_PLANEC_HI_SHIFT 21 -#define DSPFW_PLANEC_HI_MASK (1<<21) +#define DSPFW_PLANEC_HI_MASK (1 << 21) #define DSPFW_SPRITED_HI_SHIFT 20 -#define DSPFW_SPRITED_HI_MASK (1<<20) +#define DSPFW_SPRITED_HI_MASK (1 << 20) #define DSPFW_SPRITEC_HI_SHIFT 16 -#define DSPFW_SPRITEC_HI_MASK (1<<16) +#define DSPFW_SPRITEC_HI_MASK (1 << 16) #define DSPFW_PLANEB_HI_SHIFT 12 -#define DSPFW_PLANEB_HI_MASK (1<<12) +#define DSPFW_PLANEB_HI_MASK (1 << 12) #define DSPFW_SPRITEB_HI_SHIFT 8 -#define DSPFW_SPRITEB_HI_MASK (1<<8) +#define DSPFW_SPRITEB_HI_MASK (1 << 8) #define DSPFW_SPRITEA_HI_SHIFT 4 -#define DSPFW_SPRITEA_HI_MASK (1<<4) +#define DSPFW_SPRITEA_HI_MASK (1 << 4) #define DSPFW_PLANEA_HI_SHIFT 0 -#define DSPFW_PLANEA_HI_MASK (1<<0) +#define DSPFW_PLANEA_HI_MASK (1 << 0) #define DSPHOWM1 _MMIO(VLV_DISPLAY_BASE + 0x70068) #define DSPFW_SR_WM1_HI_SHIFT 24 -#define DSPFW_SR_WM1_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */ +#define DSPFW_SR_WM1_HI_MASK (3 << 24) /* 2 bits for chv, 1 for vlv */ #define DSPFW_SPRITEF_WM1_HI_SHIFT 23 -#define DSPFW_SPRITEF_WM1_HI_MASK (1<<23) +#define DSPFW_SPRITEF_WM1_HI_MASK (1 << 23) #define DSPFW_SPRITEE_WM1_HI_SHIFT 22 -#define DSPFW_SPRITEE_WM1_HI_MASK (1<<22) +#define DSPFW_SPRITEE_WM1_HI_MASK (1 << 22) #define DSPFW_PLANEC_WM1_HI_SHIFT 21 -#define DSPFW_PLANEC_WM1_HI_MASK (1<<21) +#define DSPFW_PLANEC_WM1_HI_MASK (1 << 21) #define DSPFW_SPRITED_WM1_HI_SHIFT 20 -#define DSPFW_SPRITED_WM1_HI_MASK (1<<20) +#define DSPFW_SPRITED_WM1_HI_MASK (1 << 20) #define DSPFW_SPRITEC_WM1_HI_SHIFT 16 -#define DSPFW_SPRITEC_WM1_HI_MASK (1<<16) +#define DSPFW_SPRITEC_WM1_HI_MASK (1 << 16) #define DSPFW_PLANEB_WM1_HI_SHIFT 12 -#define DSPFW_PLANEB_WM1_HI_MASK (1<<12) +#define DSPFW_PLANEB_WM1_HI_MASK (1 << 12) #define DSPFW_SPRITEB_WM1_HI_SHIFT 8 -#define DSPFW_SPRITEB_WM1_HI_MASK (1<<8) +#define DSPFW_SPRITEB_WM1_HI_MASK (1 << 8) #define DSPFW_SPRITEA_WM1_HI_SHIFT 4 -#define DSPFW_SPRITEA_WM1_HI_MASK (1<<4) +#define DSPFW_SPRITEA_WM1_HI_MASK (1 << 4) #define DSPFW_PLANEA_WM1_HI_SHIFT 0 -#define DSPFW_PLANEA_WM1_HI_MASK (1<<0) +#define DSPFW_PLANEA_WM1_HI_MASK (1 << 0) /* drain latency register values*/ #define VLV_DDL(pipe) _MMIO(VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe)) #define DDL_CURSOR_SHIFT 24 -#define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite)) +#define DDL_SPRITE_SHIFT(sprite) (8 + 8 * (sprite)) #define DDL_PLANE_SHIFT 0 -#define DDL_PRECISION_HIGH (1<<7) -#define DDL_PRECISION_LOW (0<<7) +#define DDL_PRECISION_HIGH (1 << 7) +#define DDL_PRECISION_LOW (0 << 7) #define DRAIN_LATENCY_MASK 0x7f #define CBR1_VLV _MMIO(VLV_DISPLAY_BASE + 0x70400) -#define CBR_PND_DEADLINE_DISABLE (1<<31) -#define CBR_PWM_CLOCK_MUX_SELECT (1<<30) +#define CBR_PND_DEADLINE_DISABLE (1 << 31) +#define CBR_PWM_CLOCK_MUX_SELECT (1 << 30) #define CBR4_VLV _MMIO(VLV_DISPLAY_BASE + 0x70450) -#define CBR_DPLLBMD_PIPE(pipe) (1<<(7+(pipe)*11)) /* pipes B and C */ +#define CBR_DPLLBMD_PIPE(pipe) (1 << (7 + (pipe) * 11)) /* pipes B and C */ /* FIFO watermark sizes etc */ #define G4X_FIFO_LINE_SIZE 64 @@ -5818,32 +5973,32 @@ enum { /* define the Watermark register on Ironlake */ #define WM0_PIPEA_ILK _MMIO(0x45100) -#define WM0_PIPE_PLANE_MASK (0xffff<<16) +#define WM0_PIPE_PLANE_MASK (0xffff << 16) #define WM0_PIPE_PLANE_SHIFT 16 -#define WM0_PIPE_SPRITE_MASK (0xff<<8) +#define WM0_PIPE_SPRITE_MASK (0xff << 8) #define WM0_PIPE_SPRITE_SHIFT 8 #define WM0_PIPE_CURSOR_MASK (0xff) #define WM0_PIPEB_ILK _MMIO(0x45104) #define WM0_PIPEC_IVB _MMIO(0x45200) #define WM1_LP_ILK _MMIO(0x45108) -#define WM1_LP_SR_EN (1<<31) +#define WM1_LP_SR_EN (1 << 31) #define WM1_LP_LATENCY_SHIFT 24 -#define WM1_LP_LATENCY_MASK (0x7f<<24) -#define WM1_LP_FBC_MASK (0xf<<20) +#define WM1_LP_LATENCY_MASK (0x7f << 24) +#define WM1_LP_FBC_MASK (0xf << 20) #define WM1_LP_FBC_SHIFT 20 #define WM1_LP_FBC_SHIFT_BDW 19 -#define WM1_LP_SR_MASK (0x7ff<<8) +#define WM1_LP_SR_MASK (0x7ff << 8) #define WM1_LP_SR_SHIFT 8 #define WM1_LP_CURSOR_MASK (0xff) #define WM2_LP_ILK _MMIO(0x4510c) -#define WM2_LP_EN (1<<31) +#define WM2_LP_EN (1 << 31) #define WM3_LP_ILK _MMIO(0x45110) -#define WM3_LP_EN (1<<31) +#define WM3_LP_EN (1 << 31) #define WM1S_LP_ILK _MMIO(0x45120) #define WM2S_LP_IVB _MMIO(0x45124) #define WM3S_LP_IVB _MMIO(0x45128) -#define WM1S_LP_EN (1<<31) +#define WM1S_LP_EN (1 << 31) #define HSW_WM_LP_VAL(lat, fbc, pri, cur) \ (WM3_LP_EN | ((lat) << WM1_LP_LATENCY_SHIFT) | \ @@ -5900,8 +6055,7 @@ enum { #define CURSOR_ENABLE 0x80000000 #define CURSOR_GAMMA_ENABLE 0x40000000 #define CURSOR_STRIDE_SHIFT 28 -#define CURSOR_STRIDE(x) ((ffs(x)-9) << CURSOR_STRIDE_SHIFT) /* 256,512,1k,2k */ -#define CURSOR_PIPE_CSC_ENABLE (1<<24) +#define CURSOR_STRIDE(x) ((ffs(x) - 9) << CURSOR_STRIDE_SHIFT) /* 256,512,1k,2k */ #define CURSOR_FORMAT_SHIFT 24 #define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT) #define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT) @@ -5910,18 +6064,21 @@ enum { #define CURSOR_FORMAT_ARGB (0x04 << CURSOR_FORMAT_SHIFT) #define CURSOR_FORMAT_XRGB (0x05 << CURSOR_FORMAT_SHIFT) /* New style CUR*CNTR flags */ -#define CURSOR_MODE 0x27 -#define CURSOR_MODE_DISABLE 0x00 -#define CURSOR_MODE_128_32B_AX 0x02 -#define CURSOR_MODE_256_32B_AX 0x03 -#define CURSOR_MODE_64_32B_AX 0x07 -#define CURSOR_MODE_128_ARGB_AX ((1 << 5) | CURSOR_MODE_128_32B_AX) -#define CURSOR_MODE_256_ARGB_AX ((1 << 5) | CURSOR_MODE_256_32B_AX) -#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX) +#define MCURSOR_MODE 0x27 +#define MCURSOR_MODE_DISABLE 0x00 +#define MCURSOR_MODE_128_32B_AX 0x02 +#define MCURSOR_MODE_256_32B_AX 0x03 +#define MCURSOR_MODE_64_32B_AX 0x07 +#define MCURSOR_MODE_128_ARGB_AX ((1 << 5) | MCURSOR_MODE_128_32B_AX) +#define MCURSOR_MODE_256_ARGB_AX ((1 << 5) | MCURSOR_MODE_256_32B_AX) +#define MCURSOR_MODE_64_ARGB_AX ((1 << 5) | MCURSOR_MODE_64_32B_AX) +#define MCURSOR_PIPE_SELECT_MASK (0x3 << 28) +#define MCURSOR_PIPE_SELECT_SHIFT 28 #define MCURSOR_PIPE_SELECT(pipe) ((pipe) << 28) #define MCURSOR_GAMMA_ENABLE (1 << 26) -#define CURSOR_ROTATE_180 (1<<15) -#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14) +#define MCURSOR_PIPE_CSC_ENABLE (1 << 24) +#define MCURSOR_ROTATE_180 (1 << 15) +#define MCURSOR_TRICKLE_FEED_DISABLE (1 << 14) #define _CURABASE 0x70084 #define _CURAPOS 0x70088 #define CURSOR_POS_MASK 0x007FF @@ -5958,41 +6115,41 @@ enum { /* Display A control */ #define _DSPACNTR 0x70180 -#define DISPLAY_PLANE_ENABLE (1<<31) +#define DISPLAY_PLANE_ENABLE (1 << 31) #define DISPLAY_PLANE_DISABLE 0 -#define DISPPLANE_GAMMA_ENABLE (1<<30) +#define DISPPLANE_GAMMA_ENABLE (1 << 30) #define DISPPLANE_GAMMA_DISABLE 0 -#define DISPPLANE_PIXFORMAT_MASK (0xf<<26) -#define DISPPLANE_YUV422 (0x0<<26) -#define DISPPLANE_8BPP (0x2<<26) -#define DISPPLANE_BGRA555 (0x3<<26) -#define DISPPLANE_BGRX555 (0x4<<26) -#define DISPPLANE_BGRX565 (0x5<<26) -#define DISPPLANE_BGRX888 (0x6<<26) -#define DISPPLANE_BGRA888 (0x7<<26) -#define DISPPLANE_RGBX101010 (0x8<<26) -#define DISPPLANE_RGBA101010 (0x9<<26) -#define DISPPLANE_BGRX101010 (0xa<<26) -#define DISPPLANE_RGBX161616 (0xc<<26) -#define DISPPLANE_RGBX888 (0xe<<26) -#define DISPPLANE_RGBA888 (0xf<<26) -#define DISPPLANE_STEREO_ENABLE (1<<25) +#define DISPPLANE_PIXFORMAT_MASK (0xf << 26) +#define DISPPLANE_YUV422 (0x0 << 26) +#define DISPPLANE_8BPP (0x2 << 26) +#define DISPPLANE_BGRA555 (0x3 << 26) +#define DISPPLANE_BGRX555 (0x4 << 26) +#define DISPPLANE_BGRX565 (0x5 << 26) +#define DISPPLANE_BGRX888 (0x6 << 26) +#define DISPPLANE_BGRA888 (0x7 << 26) +#define DISPPLANE_RGBX101010 (0x8 << 26) +#define DISPPLANE_RGBA101010 (0x9 << 26) +#define DISPPLANE_BGRX101010 (0xa << 26) +#define DISPPLANE_RGBX161616 (0xc << 26) +#define DISPPLANE_RGBX888 (0xe << 26) +#define DISPPLANE_RGBA888 (0xf << 26) +#define DISPPLANE_STEREO_ENABLE (1 << 25) #define DISPPLANE_STEREO_DISABLE 0 -#define DISPPLANE_PIPE_CSC_ENABLE (1<<24) +#define DISPPLANE_PIPE_CSC_ENABLE (1 << 24) #define DISPPLANE_SEL_PIPE_SHIFT 24 -#define DISPPLANE_SEL_PIPE_MASK (3<<DISPPLANE_SEL_PIPE_SHIFT) -#define DISPPLANE_SEL_PIPE(pipe) ((pipe)<<DISPPLANE_SEL_PIPE_SHIFT) -#define DISPPLANE_SRC_KEY_ENABLE (1<<22) +#define DISPPLANE_SEL_PIPE_MASK (3 << DISPPLANE_SEL_PIPE_SHIFT) +#define DISPPLANE_SEL_PIPE(pipe) ((pipe) << DISPPLANE_SEL_PIPE_SHIFT) +#define DISPPLANE_SRC_KEY_ENABLE (1 << 22) #define DISPPLANE_SRC_KEY_DISABLE 0 -#define DISPPLANE_LINE_DOUBLE (1<<20) +#define DISPPLANE_LINE_DOUBLE (1 << 20) #define DISPPLANE_NO_LINE_DOUBLE 0 #define DISPPLANE_STEREO_POLARITY_FIRST 0 -#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) -#define DISPPLANE_ALPHA_PREMULTIPLY (1<<16) /* CHV pipe B */ -#define DISPPLANE_ROTATE_180 (1<<15) -#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */ -#define DISPPLANE_TILED (1<<10) -#define DISPPLANE_MIRROR (1<<8) /* CHV pipe B */ +#define DISPPLANE_STEREO_POLARITY_SECOND (1 << 18) +#define DISPPLANE_ALPHA_PREMULTIPLY (1 << 16) /* CHV pipe B */ +#define DISPPLANE_ROTATE_180 (1 << 15) +#define DISPPLANE_TRICKLE_FEED_DISABLE (1 << 14) /* Ironlake */ +#define DISPPLANE_TILED (1 << 10) +#define DISPPLANE_MIRROR (1 << 8) /* CHV pipe B */ #define _DSPAADDR 0x70184 #define _DSPASTRIDE 0x70188 #define _DSPAPOS 0x7018C /* reserved */ @@ -6015,15 +6172,15 @@ enum { /* CHV pipe B blender and primary plane */ #define _CHV_BLEND_A 0x60a00 -#define CHV_BLEND_LEGACY (0<<30) -#define CHV_BLEND_ANDROID (1<<30) -#define CHV_BLEND_MPO (2<<30) -#define CHV_BLEND_MASK (3<<30) +#define CHV_BLEND_LEGACY (0 << 30) +#define CHV_BLEND_ANDROID (1 << 30) +#define CHV_BLEND_MPO (2 << 30) +#define CHV_BLEND_MASK (3 << 30) #define _CHV_CANVAS_A 0x60a04 #define _PRIMPOS_A 0x60a08 #define _PRIMSIZE_A 0x60a0c #define _PRIMCNSTALPHA_A 0x60a10 -#define PRIM_CONST_ALPHA_ENABLE (1<<31) +#define PRIM_CONST_ALPHA_ENABLE (1 << 31) #define CHV_BLEND(pipe) _MMIO_TRANS2(pipe, _CHV_BLEND_A) #define CHV_CANVAS(pipe) _MMIO_TRANS2(pipe, _CHV_CANVAS_A) @@ -6033,8 +6190,8 @@ enum { /* Display/Sprite base address macros */ #define DISP_BASEADDR_MASK (0xfffff000) -#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK) -#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK) +#define I915_LO_DISPBASE(val) ((val) & ~DISP_BASEADDR_MASK) +#define I915_HI_DISPBASE(val) ((val) & DISP_BASEADDR_MASK) /* * VBIOS flags @@ -6064,7 +6221,7 @@ enum { /* Display B control */ #define _DSPBCNTR (dev_priv->info.display_mmio_offset + 0x71180) -#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) +#define DISPPLANE_ALPHA_TRANS_ENABLE (1 << 15) #define DISPPLANE_ALPHA_TRANS_DISABLE 0 #define DISPPLANE_SPRITE_ABOVE_DISPLAY 0 #define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) @@ -6079,27 +6236,27 @@ enum { /* Sprite A control */ #define _DVSACNTR 0x72180 -#define DVS_ENABLE (1<<31) -#define DVS_GAMMA_ENABLE (1<<30) -#define DVS_YUV_RANGE_CORRECTION_DISABLE (1<<27) -#define DVS_PIXFORMAT_MASK (3<<25) -#define DVS_FORMAT_YUV422 (0<<25) -#define DVS_FORMAT_RGBX101010 (1<<25) -#define DVS_FORMAT_RGBX888 (2<<25) -#define DVS_FORMAT_RGBX161616 (3<<25) -#define DVS_PIPE_CSC_ENABLE (1<<24) -#define DVS_SOURCE_KEY (1<<22) -#define DVS_RGB_ORDER_XBGR (1<<20) -#define DVS_YUV_FORMAT_BT709 (1<<18) -#define DVS_YUV_BYTE_ORDER_MASK (3<<16) -#define DVS_YUV_ORDER_YUYV (0<<16) -#define DVS_YUV_ORDER_UYVY (1<<16) -#define DVS_YUV_ORDER_YVYU (2<<16) -#define DVS_YUV_ORDER_VYUY (3<<16) -#define DVS_ROTATE_180 (1<<15) -#define DVS_DEST_KEY (1<<2) -#define DVS_TRICKLE_FEED_DISABLE (1<<14) -#define DVS_TILED (1<<10) +#define DVS_ENABLE (1 << 31) +#define DVS_GAMMA_ENABLE (1 << 30) +#define DVS_YUV_RANGE_CORRECTION_DISABLE (1 << 27) +#define DVS_PIXFORMAT_MASK (3 << 25) +#define DVS_FORMAT_YUV422 (0 << 25) +#define DVS_FORMAT_RGBX101010 (1 << 25) +#define DVS_FORMAT_RGBX888 (2 << 25) +#define DVS_FORMAT_RGBX161616 (3 << 25) +#define DVS_PIPE_CSC_ENABLE (1 << 24) +#define DVS_SOURCE_KEY (1 << 22) +#define DVS_RGB_ORDER_XBGR (1 << 20) +#define DVS_YUV_FORMAT_BT709 (1 << 18) +#define DVS_YUV_BYTE_ORDER_MASK (3 << 16) +#define DVS_YUV_ORDER_YUYV (0 << 16) +#define DVS_YUV_ORDER_UYVY (1 << 16) +#define DVS_YUV_ORDER_YVYU (2 << 16) +#define DVS_YUV_ORDER_VYUY (3 << 16) +#define DVS_ROTATE_180 (1 << 15) +#define DVS_DEST_KEY (1 << 2) +#define DVS_TRICKLE_FEED_DISABLE (1 << 14) +#define DVS_TILED (1 << 10) #define _DVSALINOFF 0x72184 #define _DVSASTRIDE 0x72188 #define _DVSAPOS 0x7218c @@ -6111,13 +6268,13 @@ enum { #define _DVSATILEOFF 0x721a4 #define _DVSASURFLIVE 0x721ac #define _DVSASCALE 0x72204 -#define DVS_SCALE_ENABLE (1<<31) -#define DVS_FILTER_MASK (3<<29) -#define DVS_FILTER_MEDIUM (0<<29) -#define DVS_FILTER_ENHANCING (1<<29) -#define DVS_FILTER_SOFTENING (2<<29) -#define DVS_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */ -#define DVS_VERTICAL_OFFSET_ENABLE (1<<27) +#define DVS_SCALE_ENABLE (1 << 31) +#define DVS_FILTER_MASK (3 << 29) +#define DVS_FILTER_MEDIUM (0 << 29) +#define DVS_FILTER_ENHANCING (1 << 29) +#define DVS_FILTER_SOFTENING (2 << 29) +#define DVS_VERTICAL_OFFSET_HALF (1 << 28) /* must be enabled below */ +#define DVS_VERTICAL_OFFSET_ENABLE (1 << 27) #define _DVSAGAMC 0x72300 #define _DVSBCNTR 0x73180 @@ -6148,31 +6305,31 @@ enum { #define DVSSURFLIVE(pipe) _MMIO_PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE) #define _SPRA_CTL 0x70280 -#define SPRITE_ENABLE (1<<31) -#define SPRITE_GAMMA_ENABLE (1<<30) -#define SPRITE_YUV_RANGE_CORRECTION_DISABLE (1<<28) -#define SPRITE_PIXFORMAT_MASK (7<<25) -#define SPRITE_FORMAT_YUV422 (0<<25) -#define SPRITE_FORMAT_RGBX101010 (1<<25) -#define SPRITE_FORMAT_RGBX888 (2<<25) -#define SPRITE_FORMAT_RGBX161616 (3<<25) -#define SPRITE_FORMAT_YUV444 (4<<25) -#define SPRITE_FORMAT_XR_BGR101010 (5<<25) /* Extended range */ -#define SPRITE_PIPE_CSC_ENABLE (1<<24) -#define SPRITE_SOURCE_KEY (1<<22) -#define SPRITE_RGB_ORDER_RGBX (1<<20) /* only for 888 and 161616 */ -#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1<<19) -#define SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709 (1<<18) /* 0 is BT601 */ -#define SPRITE_YUV_BYTE_ORDER_MASK (3<<16) -#define SPRITE_YUV_ORDER_YUYV (0<<16) -#define SPRITE_YUV_ORDER_UYVY (1<<16) -#define SPRITE_YUV_ORDER_YVYU (2<<16) -#define SPRITE_YUV_ORDER_VYUY (3<<16) -#define SPRITE_ROTATE_180 (1<<15) -#define SPRITE_TRICKLE_FEED_DISABLE (1<<14) -#define SPRITE_INT_GAMMA_ENABLE (1<<13) -#define SPRITE_TILED (1<<10) -#define SPRITE_DEST_KEY (1<<2) +#define SPRITE_ENABLE (1 << 31) +#define SPRITE_GAMMA_ENABLE (1 << 30) +#define SPRITE_YUV_RANGE_CORRECTION_DISABLE (1 << 28) +#define SPRITE_PIXFORMAT_MASK (7 << 25) +#define SPRITE_FORMAT_YUV422 (0 << 25) +#define SPRITE_FORMAT_RGBX101010 (1 << 25) +#define SPRITE_FORMAT_RGBX888 (2 << 25) +#define SPRITE_FORMAT_RGBX161616 (3 << 25) +#define SPRITE_FORMAT_YUV444 (4 << 25) +#define SPRITE_FORMAT_XR_BGR101010 (5 << 25) /* Extended range */ +#define SPRITE_PIPE_CSC_ENABLE (1 << 24) +#define SPRITE_SOURCE_KEY (1 << 22) +#define SPRITE_RGB_ORDER_RGBX (1 << 20) /* only for 888 and 161616 */ +#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1 << 19) +#define SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18) /* 0 is BT601 */ +#define SPRITE_YUV_BYTE_ORDER_MASK (3 << 16) +#define SPRITE_YUV_ORDER_YUYV (0 << 16) +#define SPRITE_YUV_ORDER_UYVY (1 << 16) +#define SPRITE_YUV_ORDER_YVYU (2 << 16) +#define SPRITE_YUV_ORDER_VYUY (3 << 16) +#define SPRITE_ROTATE_180 (1 << 15) +#define SPRITE_TRICKLE_FEED_DISABLE (1 << 14) +#define SPRITE_INT_GAMMA_ENABLE (1 << 13) +#define SPRITE_TILED (1 << 10) +#define SPRITE_DEST_KEY (1 << 2) #define _SPRA_LINOFF 0x70284 #define _SPRA_STRIDE 0x70288 #define _SPRA_POS 0x7028c @@ -6185,13 +6342,13 @@ enum { #define _SPRA_OFFSET 0x702a4 #define _SPRA_SURFLIVE 0x702ac #define _SPRA_SCALE 0x70304 -#define SPRITE_SCALE_ENABLE (1<<31) -#define SPRITE_FILTER_MASK (3<<29) -#define SPRITE_FILTER_MEDIUM (0<<29) -#define SPRITE_FILTER_ENHANCING (1<<29) -#define SPRITE_FILTER_SOFTENING (2<<29) -#define SPRITE_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */ -#define SPRITE_VERTICAL_OFFSET_ENABLE (1<<27) +#define SPRITE_SCALE_ENABLE (1 << 31) +#define SPRITE_FILTER_MASK (3 << 29) +#define SPRITE_FILTER_MEDIUM (0 << 29) +#define SPRITE_FILTER_ENHANCING (1 << 29) +#define SPRITE_FILTER_SOFTENING (2 << 29) +#define SPRITE_VERTICAL_OFFSET_HALF (1 << 28) /* must be enabled below */ +#define SPRITE_VERTICAL_OFFSET_ENABLE (1 << 27) #define _SPRA_GAMC 0x70400 #define _SPRB_CTL 0x71280 @@ -6225,28 +6382,28 @@ enum { #define SPRSURFLIVE(pipe) _MMIO_PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE) #define _SPACNTR (VLV_DISPLAY_BASE + 0x72180) -#define SP_ENABLE (1<<31) -#define SP_GAMMA_ENABLE (1<<30) -#define SP_PIXFORMAT_MASK (0xf<<26) -#define SP_FORMAT_YUV422 (0<<26) -#define SP_FORMAT_BGR565 (5<<26) -#define SP_FORMAT_BGRX8888 (6<<26) -#define SP_FORMAT_BGRA8888 (7<<26) -#define SP_FORMAT_RGBX1010102 (8<<26) -#define SP_FORMAT_RGBA1010102 (9<<26) -#define SP_FORMAT_RGBX8888 (0xe<<26) -#define SP_FORMAT_RGBA8888 (0xf<<26) -#define SP_ALPHA_PREMULTIPLY (1<<23) /* CHV pipe B */ -#define SP_SOURCE_KEY (1<<22) -#define SP_YUV_FORMAT_BT709 (1<<18) -#define SP_YUV_BYTE_ORDER_MASK (3<<16) -#define SP_YUV_ORDER_YUYV (0<<16) -#define SP_YUV_ORDER_UYVY (1<<16) -#define SP_YUV_ORDER_YVYU (2<<16) -#define SP_YUV_ORDER_VYUY (3<<16) -#define SP_ROTATE_180 (1<<15) -#define SP_TILED (1<<10) -#define SP_MIRROR (1<<8) /* CHV pipe B */ +#define SP_ENABLE (1 << 31) +#define SP_GAMMA_ENABLE (1 << 30) +#define SP_PIXFORMAT_MASK (0xf << 26) +#define SP_FORMAT_YUV422 (0 << 26) +#define SP_FORMAT_BGR565 (5 << 26) +#define SP_FORMAT_BGRX8888 (6 << 26) +#define SP_FORMAT_BGRA8888 (7 << 26) +#define SP_FORMAT_RGBX1010102 (8 << 26) +#define SP_FORMAT_RGBA1010102 (9 << 26) +#define SP_FORMAT_RGBX8888 (0xe << 26) +#define SP_FORMAT_RGBA8888 (0xf << 26) +#define SP_ALPHA_PREMULTIPLY (1 << 23) /* CHV pipe B */ +#define SP_SOURCE_KEY (1 << 22) +#define SP_YUV_FORMAT_BT709 (1 << 18) +#define SP_YUV_BYTE_ORDER_MASK (3 << 16) +#define SP_YUV_ORDER_YUYV (0 << 16) +#define SP_YUV_ORDER_UYVY (1 << 16) +#define SP_YUV_ORDER_YVYU (2 << 16) +#define SP_YUV_ORDER_VYUY (3 << 16) +#define SP_ROTATE_180 (1 << 15) +#define SP_TILED (1 << 10) +#define SP_MIRROR (1 << 8) /* CHV pipe B */ #define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184) #define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188) #define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c) @@ -6257,7 +6414,7 @@ enum { #define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0) #define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4) #define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8) -#define SP_CONST_ALPHA_ENABLE (1<<31) +#define SP_CONST_ALPHA_ENABLE (1 << 31) #define _SPACLRC0 (VLV_DISPLAY_BASE + 0x721d0) #define SP_CONTRAST(x) ((x) << 18) /* u3.6 */ #define SP_BRIGHTNESS(x) ((x) & 0xff) /* s8 */ @@ -6349,40 +6506,40 @@ enum { * correctly map to the same formats in ICL, as long as bit 23 is set to 0 */ #define PLANE_CTL_FORMAT_MASK (0xf << 24) -#define PLANE_CTL_FORMAT_YUV422 ( 0 << 24) -#define PLANE_CTL_FORMAT_NV12 ( 1 << 24) -#define PLANE_CTL_FORMAT_XRGB_2101010 ( 2 << 24) -#define PLANE_CTL_FORMAT_XRGB_8888 ( 4 << 24) -#define PLANE_CTL_FORMAT_XRGB_16161616F ( 6 << 24) -#define PLANE_CTL_FORMAT_AYUV ( 8 << 24) -#define PLANE_CTL_FORMAT_INDEXED ( 12 << 24) -#define PLANE_CTL_FORMAT_RGB_565 ( 14 << 24) +#define PLANE_CTL_FORMAT_YUV422 (0 << 24) +#define PLANE_CTL_FORMAT_NV12 (1 << 24) +#define PLANE_CTL_FORMAT_XRGB_2101010 (2 << 24) +#define PLANE_CTL_FORMAT_XRGB_8888 (4 << 24) +#define PLANE_CTL_FORMAT_XRGB_16161616F (6 << 24) +#define PLANE_CTL_FORMAT_AYUV (8 << 24) +#define PLANE_CTL_FORMAT_INDEXED (12 << 24) +#define PLANE_CTL_FORMAT_RGB_565 (14 << 24) #define ICL_PLANE_CTL_FORMAT_MASK (0x1f << 23) #define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23) /* Pre-GLK */ #define PLANE_CTL_KEY_ENABLE_MASK (0x3 << 21) -#define PLANE_CTL_KEY_ENABLE_SOURCE ( 1 << 21) -#define PLANE_CTL_KEY_ENABLE_DESTINATION ( 2 << 21) +#define PLANE_CTL_KEY_ENABLE_SOURCE (1 << 21) +#define PLANE_CTL_KEY_ENABLE_DESTINATION (2 << 21) #define PLANE_CTL_ORDER_BGRX (0 << 20) #define PLANE_CTL_ORDER_RGBX (1 << 20) #define PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18) #define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16) -#define PLANE_CTL_YUV422_YUYV ( 0 << 16) -#define PLANE_CTL_YUV422_UYVY ( 1 << 16) -#define PLANE_CTL_YUV422_YVYU ( 2 << 16) -#define PLANE_CTL_YUV422_VYUY ( 3 << 16) +#define PLANE_CTL_YUV422_YUYV (0 << 16) +#define PLANE_CTL_YUV422_UYVY (1 << 16) +#define PLANE_CTL_YUV422_YVYU (2 << 16) +#define PLANE_CTL_YUV422_VYUY (3 << 16) #define PLANE_CTL_DECOMPRESSION_ENABLE (1 << 15) #define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14) #define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13) /* Pre-GLK */ #define PLANE_CTL_TILED_MASK (0x7 << 10) -#define PLANE_CTL_TILED_LINEAR ( 0 << 10) -#define PLANE_CTL_TILED_X ( 1 << 10) -#define PLANE_CTL_TILED_Y ( 4 << 10) -#define PLANE_CTL_TILED_YF ( 5 << 10) -#define PLANE_CTL_FLIP_HORIZONTAL ( 1 << 8) +#define PLANE_CTL_TILED_LINEAR (0 << 10) +#define PLANE_CTL_TILED_X (1 << 10) +#define PLANE_CTL_TILED_Y (4 << 10) +#define PLANE_CTL_TILED_YF (5 << 10) +#define PLANE_CTL_FLIP_HORIZONTAL (1 << 8) #define PLANE_CTL_ALPHA_MASK (0x3 << 4) /* Pre-GLK */ -#define PLANE_CTL_ALPHA_DISABLE ( 0 << 4) -#define PLANE_CTL_ALPHA_SW_PREMULTIPLY ( 2 << 4) -#define PLANE_CTL_ALPHA_HW_PREMULTIPLY ( 3 << 4) +#define PLANE_CTL_ALPHA_DISABLE (0 << 4) +#define PLANE_CTL_ALPHA_SW_PREMULTIPLY (2 << 4) +#define PLANE_CTL_ALPHA_HW_PREMULTIPLY (3 << 4) #define PLANE_CTL_ROTATE_MASK 0x3 #define PLANE_CTL_ROTATE_0 0x0 #define PLANE_CTL_ROTATE_90 0x1 @@ -6610,7 +6767,7 @@ enum { # define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11) #define FDI_PLL_FREQ_CTL _MMIO(0x46030) -#define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24) +#define FDI_PLL_FREQ_CHANGE_REQUEST (1 << 24) #define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00 #define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff @@ -6659,14 +6816,14 @@ enum { /* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */ #define _PFA_CTL_1 0x68080 #define _PFB_CTL_1 0x68880 -#define PF_ENABLE (1<<31) -#define PF_PIPE_SEL_MASK_IVB (3<<29) -#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29) -#define PF_FILTER_MASK (3<<23) -#define PF_FILTER_PROGRAMMED (0<<23) -#define PF_FILTER_MED_3x3 (1<<23) -#define PF_FILTER_EDGE_ENHANCE (2<<23) -#define PF_FILTER_EDGE_SOFTEN (3<<23) +#define PF_ENABLE (1 << 31) +#define PF_PIPE_SEL_MASK_IVB (3 << 29) +#define PF_PIPE_SEL_IVB(pipe) ((pipe) << 29) +#define PF_FILTER_MASK (3 << 23) +#define PF_FILTER_PROGRAMMED (0 << 23) +#define PF_FILTER_MED_3x3 (1 << 23) +#define PF_FILTER_EDGE_ENHANCE (2 << 23) +#define PF_FILTER_EDGE_SOFTEN (3 << 23) #define _PFA_WIN_SZ 0x68074 #define _PFB_WIN_SZ 0x68874 #define _PFA_WIN_POS 0x68070 @@ -6684,7 +6841,7 @@ enum { #define _PSA_CTL 0x68180 #define _PSB_CTL 0x68980 -#define PS_ENABLE (1<<31) +#define PS_ENABLE (1 << 31) #define _PSA_WIN_SZ 0x68174 #define _PSB_WIN_SZ 0x68974 #define _PSA_WIN_POS 0x68170 @@ -6769,6 +6926,10 @@ enum { #define _PS_VPHASE_1B 0x68988 #define _PS_VPHASE_2B 0x68A88 #define _PS_VPHASE_1C 0x69188 +#define PS_Y_PHASE(x) ((x) << 16) +#define PS_UV_RGB_PHASE(x) ((x) << 0) +#define PS_PHASE_MASK (0x7fff << 1) /* u2.13 */ +#define PS_PHASE_TRIP (1 << 0) #define _PS_HPHASE_1A 0x68194 #define _PS_HPHASE_2A 0x68294 @@ -6782,7 +6943,7 @@ enum { #define _PS_ECC_STAT_2B 0x68AD0 #define _PS_ECC_STAT_1C 0x691D0 -#define _ID(id, a, b) ((a) + (id)*((b)-(a))) +#define _ID(id, a, b) _PICK_EVEN(id, a, b) #define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe, \ _ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \ _ID(id, _PS_1B_CTRL, _PS_2B_CTRL)) @@ -6863,37 +7024,37 @@ enum { #define DE_PIPEB_CRC_DONE (1 << 10) #define DE_PIPEB_FIFO_UNDERRUN (1 << 8) #define DE_PIPEA_VBLANK (1 << 7) -#define DE_PIPE_VBLANK(pipe) (1 << (7 + 8*(pipe))) +#define DE_PIPE_VBLANK(pipe) (1 << (7 + 8 * (pipe))) #define DE_PIPEA_EVEN_FIELD (1 << 6) #define DE_PIPEA_ODD_FIELD (1 << 5) #define DE_PIPEA_LINE_COMPARE (1 << 4) #define DE_PIPEA_VSYNC (1 << 3) #define DE_PIPEA_CRC_DONE (1 << 2) -#define DE_PIPE_CRC_DONE(pipe) (1 << (2 + 8*(pipe))) +#define DE_PIPE_CRC_DONE(pipe) (1 << (2 + 8 * (pipe))) #define DE_PIPEA_FIFO_UNDERRUN (1 << 0) -#define DE_PIPE_FIFO_UNDERRUN(pipe) (1 << (8*(pipe))) +#define DE_PIPE_FIFO_UNDERRUN(pipe) (1 << (8 * (pipe))) /* More Ivybridge lolz */ -#define DE_ERR_INT_IVB (1<<30) -#define DE_GSE_IVB (1<<29) -#define DE_PCH_EVENT_IVB (1<<28) -#define DE_DP_A_HOTPLUG_IVB (1<<27) -#define DE_AUX_CHANNEL_A_IVB (1<<26) -#define DE_EDP_PSR_INT_HSW (1<<19) -#define DE_SPRITEC_FLIP_DONE_IVB (1<<14) -#define DE_PLANEC_FLIP_DONE_IVB (1<<13) -#define DE_PIPEC_VBLANK_IVB (1<<10) -#define DE_SPRITEB_FLIP_DONE_IVB (1<<9) -#define DE_PLANEB_FLIP_DONE_IVB (1<<8) -#define DE_PIPEB_VBLANK_IVB (1<<5) -#define DE_SPRITEA_FLIP_DONE_IVB (1<<4) -#define DE_PLANEA_FLIP_DONE_IVB (1<<3) -#define DE_PLANE_FLIP_DONE_IVB(plane) (1<< (3 + 5*(plane))) -#define DE_PIPEA_VBLANK_IVB (1<<0) +#define DE_ERR_INT_IVB (1 << 30) +#define DE_GSE_IVB (1 << 29) +#define DE_PCH_EVENT_IVB (1 << 28) +#define DE_DP_A_HOTPLUG_IVB (1 << 27) +#define DE_AUX_CHANNEL_A_IVB (1 << 26) +#define DE_EDP_PSR_INT_HSW (1 << 19) +#define DE_SPRITEC_FLIP_DONE_IVB (1 << 14) +#define DE_PLANEC_FLIP_DONE_IVB (1 << 13) +#define DE_PIPEC_VBLANK_IVB (1 << 10) +#define DE_SPRITEB_FLIP_DONE_IVB (1 << 9) +#define DE_PLANEB_FLIP_DONE_IVB (1 << 8) +#define DE_PIPEB_VBLANK_IVB (1 << 5) +#define DE_SPRITEA_FLIP_DONE_IVB (1 << 4) +#define DE_PLANEA_FLIP_DONE_IVB (1 << 3) +#define DE_PLANE_FLIP_DONE_IVB(plane) (1 << (3 + 5 * (plane))) +#define DE_PIPEA_VBLANK_IVB (1 << 0) #define DE_PIPE_VBLANK_IVB(pipe) (1 << ((pipe) * 5)) #define VLV_MASTER_IER _MMIO(0x4400c) /* Gunit master IER */ -#define MASTER_INTERRUPT_ENABLE (1<<31) +#define MASTER_INTERRUPT_ENABLE (1 << 31) #define DEISR _MMIO(0x44000) #define DEIMR _MMIO(0x44004) @@ -6906,37 +7067,37 @@ enum { #define GTIER _MMIO(0x4401c) #define GEN8_MASTER_IRQ _MMIO(0x44200) -#define GEN8_MASTER_IRQ_CONTROL (1<<31) -#define GEN8_PCU_IRQ (1<<30) -#define GEN8_DE_PCH_IRQ (1<<23) -#define GEN8_DE_MISC_IRQ (1<<22) -#define GEN8_DE_PORT_IRQ (1<<20) -#define GEN8_DE_PIPE_C_IRQ (1<<18) -#define GEN8_DE_PIPE_B_IRQ (1<<17) -#define GEN8_DE_PIPE_A_IRQ (1<<16) -#define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+(pipe))) -#define GEN8_GT_VECS_IRQ (1<<6) -#define GEN8_GT_GUC_IRQ (1<<5) -#define GEN8_GT_PM_IRQ (1<<4) -#define GEN8_GT_VCS2_IRQ (1<<3) -#define GEN8_GT_VCS1_IRQ (1<<2) -#define GEN8_GT_BCS_IRQ (1<<1) -#define GEN8_GT_RCS_IRQ (1<<0) +#define GEN8_MASTER_IRQ_CONTROL (1 << 31) +#define GEN8_PCU_IRQ (1 << 30) +#define GEN8_DE_PCH_IRQ (1 << 23) +#define GEN8_DE_MISC_IRQ (1 << 22) +#define GEN8_DE_PORT_IRQ (1 << 20) +#define GEN8_DE_PIPE_C_IRQ (1 << 18) +#define GEN8_DE_PIPE_B_IRQ (1 << 17) +#define GEN8_DE_PIPE_A_IRQ (1 << 16) +#define GEN8_DE_PIPE_IRQ(pipe) (1 << (16 + (pipe))) +#define GEN8_GT_VECS_IRQ (1 << 6) +#define GEN8_GT_GUC_IRQ (1 << 5) +#define GEN8_GT_PM_IRQ (1 << 4) +#define GEN8_GT_VCS2_IRQ (1 << 3) +#define GEN8_GT_VCS1_IRQ (1 << 2) +#define GEN8_GT_BCS_IRQ (1 << 1) +#define GEN8_GT_RCS_IRQ (1 << 0) #define GEN8_GT_ISR(which) _MMIO(0x44300 + (0x10 * (which))) #define GEN8_GT_IMR(which) _MMIO(0x44304 + (0x10 * (which))) #define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which))) #define GEN8_GT_IER(which) _MMIO(0x4430c + (0x10 * (which))) -#define GEN9_GUC_TO_HOST_INT_EVENT (1<<31) -#define GEN9_GUC_EXEC_ERROR_EVENT (1<<30) -#define GEN9_GUC_DISPLAY_EVENT (1<<29) -#define GEN9_GUC_SEMA_SIGNAL_EVENT (1<<28) -#define GEN9_GUC_IOMMU_MSG_EVENT (1<<27) -#define GEN9_GUC_DB_RING_EVENT (1<<26) -#define GEN9_GUC_DMA_DONE_EVENT (1<<25) -#define GEN9_GUC_FATAL_ERROR_EVENT (1<<24) -#define GEN9_GUC_NOTIFICATION_EVENT (1<<23) +#define GEN9_GUC_TO_HOST_INT_EVENT (1 << 31) +#define GEN9_GUC_EXEC_ERROR_EVENT (1 << 30) +#define GEN9_GUC_DISPLAY_EVENT (1 << 29) +#define GEN9_GUC_SEMA_SIGNAL_EVENT (1 << 28) +#define GEN9_GUC_IOMMU_MSG_EVENT (1 << 27) +#define GEN9_GUC_DB_RING_EVENT (1 << 26) +#define GEN9_GUC_DMA_DONE_EVENT (1 << 25) +#define GEN9_GUC_FATAL_ERROR_EVENT (1 << 24) +#define GEN9_GUC_NOTIFICATION_EVENT (1 << 23) #define GEN8_RCS_IRQ_SHIFT 0 #define GEN8_BCS_IRQ_SHIFT 16 @@ -6985,6 +7146,7 @@ enum { #define GEN8_DE_PORT_IMR _MMIO(0x44444) #define GEN8_DE_PORT_IIR _MMIO(0x44448) #define GEN8_DE_PORT_IER _MMIO(0x4444c) +#define ICL_AUX_CHANNEL_E (1 << 29) #define CNL_AUX_CHANNEL_F (1 << 28) #define GEN9_AUX_CHANNEL_D (1 << 27) #define GEN9_AUX_CHANNEL_C (1 << 26) @@ -7011,9 +7173,16 @@ enum { #define GEN8_PCU_IIR _MMIO(0x444e8) #define GEN8_PCU_IER _MMIO(0x444ec) +#define GEN11_GU_MISC_ISR _MMIO(0x444f0) +#define GEN11_GU_MISC_IMR _MMIO(0x444f4) +#define GEN11_GU_MISC_IIR _MMIO(0x444f8) +#define GEN11_GU_MISC_IER _MMIO(0x444fc) +#define GEN11_GU_MISC_GSE (1 << 27) + #define GEN11_GFX_MSTR_IRQ _MMIO(0x190010) #define GEN11_MASTER_IRQ (1 << 31) #define GEN11_PCU_IRQ (1 << 30) +#define GEN11_GU_MISC_IRQ (1 << 29) #define GEN11_DISPLAY_IRQ (1 << 16) #define GEN11_GT_DW_IRQ(x) (1 << (x)) #define GEN11_GT_DW1_IRQ (1 << 1) @@ -7024,11 +7193,40 @@ enum { #define GEN11_AUDIO_CODEC_IRQ (1 << 24) #define GEN11_DE_PCH_IRQ (1 << 23) #define GEN11_DE_MISC_IRQ (1 << 22) +#define GEN11_DE_HPD_IRQ (1 << 21) #define GEN11_DE_PORT_IRQ (1 << 20) #define GEN11_DE_PIPE_C (1 << 18) #define GEN11_DE_PIPE_B (1 << 17) #define GEN11_DE_PIPE_A (1 << 16) +#define GEN11_DE_HPD_ISR _MMIO(0x44470) +#define GEN11_DE_HPD_IMR _MMIO(0x44474) +#define GEN11_DE_HPD_IIR _MMIO(0x44478) +#define GEN11_DE_HPD_IER _MMIO(0x4447c) +#define GEN11_TC4_HOTPLUG (1 << 19) +#define GEN11_TC3_HOTPLUG (1 << 18) +#define GEN11_TC2_HOTPLUG (1 << 17) +#define GEN11_TC1_HOTPLUG (1 << 16) +#define GEN11_DE_TC_HOTPLUG_MASK (GEN11_TC4_HOTPLUG | \ + GEN11_TC3_HOTPLUG | \ + GEN11_TC2_HOTPLUG | \ + GEN11_TC1_HOTPLUG) +#define GEN11_TBT4_HOTPLUG (1 << 3) +#define GEN11_TBT3_HOTPLUG (1 << 2) +#define GEN11_TBT2_HOTPLUG (1 << 1) +#define GEN11_TBT1_HOTPLUG (1 << 0) +#define GEN11_DE_TBT_HOTPLUG_MASK (GEN11_TBT4_HOTPLUG | \ + GEN11_TBT3_HOTPLUG | \ + GEN11_TBT2_HOTPLUG | \ + GEN11_TBT1_HOTPLUG) + +#define GEN11_TBT_HOTPLUG_CTL _MMIO(0x44030) +#define GEN11_TC_HOTPLUG_CTL _MMIO(0x44038) +#define GEN11_HOTPLUG_CTL_ENABLE(tc_port) (8 << (tc_port) * 4) +#define GEN11_HOTPLUG_CTL_LONG_DETECT(tc_port) (2 << (tc_port) * 4) +#define GEN11_HOTPLUG_CTL_SHORT_DETECT(tc_port) (1 << (tc_port) * 4) +#define GEN11_HOTPLUG_CTL_NO_DETECT(tc_port) (0 << (tc_port) * 4) + #define GEN11_GT_INTR_DW0 _MMIO(0x190018) #define GEN11_CSME (31) #define GEN11_GUNIT (28) @@ -7043,7 +7241,7 @@ enum { #define GEN11_VECS(x) (31 - (x)) #define GEN11_VCS(x) (x) -#define GEN11_GT_INTR_DW(x) _MMIO(0x190018 + (x * 4)) +#define GEN11_GT_INTR_DW(x) _MMIO(0x190018 + ((x) * 4)) #define GEN11_INTR_IDENTITY_REG0 _MMIO(0x190060) #define GEN11_INTR_IDENTITY_REG1 _MMIO(0x190064) @@ -7052,12 +7250,12 @@ enum { #define GEN11_INTR_ENGINE_INSTANCE(x) (((x) & GENMASK(25, 20)) >> 20) #define GEN11_INTR_ENGINE_INTR(x) ((x) & 0xffff) -#define GEN11_INTR_IDENTITY_REG(x) _MMIO(0x190060 + (x * 4)) +#define GEN11_INTR_IDENTITY_REG(x) _MMIO(0x190060 + ((x) * 4)) #define GEN11_IIR_REG0_SELECTOR _MMIO(0x190070) #define GEN11_IIR_REG1_SELECTOR _MMIO(0x190074) -#define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + (x * 4)) +#define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + ((x) * 4)) #define GEN11_RENDER_COPY_INTR_ENABLE _MMIO(0x190030) #define GEN11_VCS_VECS_INTR_ENABLE _MMIO(0x190034) @@ -7079,8 +7277,8 @@ enum { #define ILK_DISPLAY_CHICKEN2 _MMIO(0x42004) /* Required on all Ironlake and Sandybridge according to the B-Spec. */ #define ILK_ELPIN_409_SELECT (1 << 25) -#define ILK_DPARB_GATE (1<<22) -#define ILK_VSDPFD_FULL (1<<21) +#define ILK_DPARB_GATE (1 << 22) +#define ILK_VSDPFD_FULL (1 << 21) #define FUSE_STRAP _MMIO(0x42014) #define ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31) #define ILK_INTERNAL_DISPLAY_DISABLE (1 << 30) @@ -7130,31 +7328,31 @@ enum { #define CHICKEN_TRANS_A 0x420c0 #define CHICKEN_TRANS_B 0x420c4 #define CHICKEN_TRANS(trans) _MMIO_TRANS(trans, CHICKEN_TRANS_A, CHICKEN_TRANS_B) -#define VSC_DATA_SEL_SOFTWARE_CONTROL (1<<25) /* GLK and CNL+ */ -#define DDI_TRAINING_OVERRIDE_ENABLE (1<<19) -#define DDI_TRAINING_OVERRIDE_VALUE (1<<18) -#define DDIE_TRAINING_OVERRIDE_ENABLE (1<<17) /* CHICKEN_TRANS_A only */ -#define DDIE_TRAINING_OVERRIDE_VALUE (1<<16) /* CHICKEN_TRANS_A only */ -#define PSR2_ADD_VERTICAL_LINE_COUNT (1<<15) -#define PSR2_VSC_ENABLE_PROG_HEADER (1<<12) +#define VSC_DATA_SEL_SOFTWARE_CONTROL (1 << 25) /* GLK and CNL+ */ +#define DDI_TRAINING_OVERRIDE_ENABLE (1 << 19) +#define DDI_TRAINING_OVERRIDE_VALUE (1 << 18) +#define DDIE_TRAINING_OVERRIDE_ENABLE (1 << 17) /* CHICKEN_TRANS_A only */ +#define DDIE_TRAINING_OVERRIDE_VALUE (1 << 16) /* CHICKEN_TRANS_A only */ +#define PSR2_ADD_VERTICAL_LINE_COUNT (1 << 15) +#define PSR2_VSC_ENABLE_PROG_HEADER (1 << 12) #define DISP_ARB_CTL _MMIO(0x45000) -#define DISP_FBC_MEMORY_WAKE (1<<31) -#define DISP_TILE_SURFACE_SWIZZLING (1<<13) -#define DISP_FBC_WM_DIS (1<<15) +#define DISP_FBC_MEMORY_WAKE (1 << 31) +#define DISP_TILE_SURFACE_SWIZZLING (1 << 13) +#define DISP_FBC_WM_DIS (1 << 15) #define DISP_ARB_CTL2 _MMIO(0x45004) -#define DISP_DATA_PARTITION_5_6 (1<<6) -#define DISP_IPC_ENABLE (1<<3) +#define DISP_DATA_PARTITION_5_6 (1 << 6) +#define DISP_IPC_ENABLE (1 << 3) #define DBUF_CTL _MMIO(0x45008) #define DBUF_CTL_S1 _MMIO(0x45008) #define DBUF_CTL_S2 _MMIO(0x44FE8) -#define DBUF_POWER_REQUEST (1<<31) -#define DBUF_POWER_STATE (1<<30) +#define DBUF_POWER_REQUEST (1 << 31) +#define DBUF_POWER_STATE (1 << 30) #define GEN7_MSG_CTL _MMIO(0x45010) -#define WAIT_FOR_PCH_RESET_ACK (1<<1) -#define WAIT_FOR_PCH_FLR_ACK (1<<0) +#define WAIT_FOR_PCH_RESET_ACK (1 << 1) +#define WAIT_FOR_PCH_FLR_ACK (1 << 0) #define HSW_NDE_RSTWRN_OPT _MMIO(0x46408) -#define RESET_PCH_HANDSHAKE_ENABLE (1<<4) +#define RESET_PCH_HANDSHAKE_ENABLE (1 << 4) #define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430) #define SKL_SELECT_ALTERNATE_DC_EXIT (1 << 30) @@ -7179,16 +7377,16 @@ enum { #define ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz (2 << 29) #define GEN7_FF_SLICE_CS_CHICKEN1 _MMIO(0x20e0) -#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1<<14) +#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1 << 14) #define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4) -#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8) -#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1<<10) +#define GEN9_TSG_BARRIER_ACK_DISABLE (1 << 8) +#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1 << 10) #define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec) #define GEN9_CTX_PREEMPT_REG _MMIO(0x2248) #define GEN8_CS_CHICKEN1 _MMIO(0x2580) -#define GEN9_PREEMPT_3D_OBJECT_LEVEL (1<<0) +#define GEN9_PREEMPT_3D_OBJECT_LEVEL (1 << 0) #define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo) (((hi) << 2) | ((lo) << 1)) #define GEN9_PREEMPT_GPGPU_MID_THREAD_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 0) #define GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL GEN9_PREEMPT_GPGPU_LEVEL(0, 1) @@ -7197,22 +7395,27 @@ enum { /* GEN7 chicken */ #define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010) -# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) -# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14) -#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014) -# define GEN9_PBE_COMPRESSED_HASH_SELECTION (1<<13) -# define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1<<12) -# define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8) -# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0) + #define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1 << 10) | (1 << 26)) + #define GEN9_RHWO_OPTIMIZATION_DISABLE (1 << 14) + +#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014) + #define GEN9_PBE_COMPRESSED_HASH_SELECTION (1 << 13) + #define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1 << 12) + #define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1 << 8) + #define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1 << 0) + +#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304) + #define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC (1 << 11) #define HIZ_CHICKEN _MMIO(0x7018) -# define CHV_HZ_8X8_MODE_IN_1X (1<<15) -# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE (1<<3) +# define CHV_HZ_8X8_MODE_IN_1X (1 << 15) +# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE (1 << 3) #define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308) -#define DISABLE_PIXEL_MASK_CAMMING (1<<14) +#define DISABLE_PIXEL_MASK_CAMMING (1 << 14) #define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c) +#define GEN11_STATE_CACHE_REDIRECT_TO_CS (1 << 11) #define GEN7_L3SQCREG1 _MMIO(0xB010) #define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 @@ -7230,7 +7433,7 @@ enum { #define GEN7_L3CNTLREG1 _MMIO(0xB01C) #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C -#define GEN7_L3AGDIS (1<<19) +#define GEN7_L3AGDIS (1 << 19) #define GEN7_L3CNTLREG2 _MMIO(0xB020) #define GEN7_L3CNTLREG3 _MMIO(0xB024) @@ -7240,7 +7443,7 @@ enum { #define GEN11_I2M_WRITE_DISABLE (1 << 28) #define GEN7_L3SQCREG4 _MMIO(0xb034) -#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27) +#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1 << 27) #define GEN8_L3SQCREG4 _MMIO(0xb118) #define GEN11_LQSC_CLEAN_EVICT_DISABLE (1 << 6) @@ -7251,12 +7454,12 @@ enum { #define HDC_CHICKEN0 _MMIO(0x7300) #define CNL_HDC_CHICKEN0 _MMIO(0xE5F0) #define ICL_HDC_MODE _MMIO(0xE5F4) -#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1<<15) -#define HDC_FENCE_DEST_SLM_DISABLE (1<<14) -#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11) -#define HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT (1<<5) -#define HDC_FORCE_NON_COHERENT (1<<4) -#define HDC_BARRIER_PERFORMANCE_DISABLE (1<<10) +#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1 << 15) +#define HDC_FENCE_DEST_SLM_DISABLE (1 << 14) +#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1 << 11) +#define HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT (1 << 5) +#define HDC_FORCE_NON_COHERENT (1 << 4) +#define HDC_BARRIER_PERFORMANCE_DISABLE (1 << 10) #define GEN8_HDC_CHICKEN1 _MMIO(0x7304) @@ -7269,13 +7472,21 @@ enum { /* WaCatErrorRejectionIssue */ #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG _MMIO(0x9030) -#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) +#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1 << 11) #define HSW_SCRATCH1 _MMIO(0xb038) -#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27) +#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1 << 27) #define BDW_SCRATCH1 _MMIO(0xb11c) -#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1<<2) +#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1 << 2) + +/*GEN11 chicken */ +#define _PIPEA_CHICKEN 0x70038 +#define _PIPEB_CHICKEN 0x71038 +#define _PIPEC_CHICKEN 0x72038 +#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7) +#define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\ + _PIPEB_CHICKEN) /* PCH */ @@ -7320,7 +7531,7 @@ enum { #define SDE_TRANSA_FIFO_UNDER (1 << 0) #define SDE_TRANS_MASK (0x3f) -/* south display engine interrupt: CPT/PPT */ +/* south display engine interrupt: CPT - CNP */ #define SDE_AUDIO_POWER_D_CPT (1 << 31) #define SDE_AUDIO_POWER_C_CPT (1 << 30) #define SDE_AUDIO_POWER_B_CPT (1 << 29) @@ -7368,14 +7579,29 @@ enum { SDE_FDI_RXB_CPT | \ SDE_FDI_RXA_CPT) +/* south display engine interrupt: ICP */ +#define SDE_TC4_HOTPLUG_ICP (1 << 27) +#define SDE_TC3_HOTPLUG_ICP (1 << 26) +#define SDE_TC2_HOTPLUG_ICP (1 << 25) +#define SDE_TC1_HOTPLUG_ICP (1 << 24) +#define SDE_GMBUS_ICP (1 << 23) +#define SDE_DDIB_HOTPLUG_ICP (1 << 17) +#define SDE_DDIA_HOTPLUG_ICP (1 << 16) +#define SDE_DDI_MASK_ICP (SDE_DDIB_HOTPLUG_ICP | \ + SDE_DDIA_HOTPLUG_ICP) +#define SDE_TC_MASK_ICP (SDE_TC4_HOTPLUG_ICP | \ + SDE_TC3_HOTPLUG_ICP | \ + SDE_TC2_HOTPLUG_ICP | \ + SDE_TC1_HOTPLUG_ICP) + #define SDEISR _MMIO(0xc4000) #define SDEIMR _MMIO(0xc4004) #define SDEIIR _MMIO(0xc4008) #define SDEIER _MMIO(0xc400c) #define SERR_INT _MMIO(0xc4040) -#define SERR_INT_POISON (1<<31) -#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<((pipe)*3)) +#define SERR_INT_POISON (1 << 31) +#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1 << ((pipe) * 3)) /* digital port hotplug */ #define PCH_PORT_HOTPLUG _MMIO(0xc4030) /* SHOTPLUG_CTL */ @@ -7428,6 +7654,134 @@ enum { #define PORTE_HOTPLUG_SHORT_DETECT (1 << 0) #define PORTE_HOTPLUG_LONG_DETECT (2 << 0) +/* This register is a reuse of PCH_PORT_HOTPLUG register. The + * functionality covered in PCH_PORT_HOTPLUG is split into + * SHOTPLUG_CTL_DDI and SHOTPLUG_CTL_TC. + */ + +#define SHOTPLUG_CTL_DDI _MMIO(0xc4030) +#define ICP_DDIB_HPD_ENABLE (1 << 7) +#define ICP_DDIB_HPD_STATUS_MASK (3 << 4) +#define ICP_DDIB_HPD_NO_DETECT (0 << 4) +#define ICP_DDIB_HPD_SHORT_DETECT (1 << 4) +#define ICP_DDIB_HPD_LONG_DETECT (2 << 4) +#define ICP_DDIB_HPD_SHORT_LONG_DETECT (3 << 4) +#define ICP_DDIA_HPD_ENABLE (1 << 3) +#define ICP_DDIA_HPD_STATUS_MASK (3 << 0) +#define ICP_DDIA_HPD_NO_DETECT (0 << 0) +#define ICP_DDIA_HPD_SHORT_DETECT (1 << 0) +#define ICP_DDIA_HPD_LONG_DETECT (2 << 0) +#define ICP_DDIA_HPD_SHORT_LONG_DETECT (3 << 0) + +#define SHOTPLUG_CTL_TC _MMIO(0xc4034) +#define ICP_TC_HPD_ENABLE(tc_port) (8 << (tc_port) * 4) +/* Icelake DSC Rate Control Range Parameter Registers */ +#define DSCA_RC_RANGE_PARAMETERS_0 _MMIO(0x6B240) +#define DSCA_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6B240 + 4) +#define DSCC_RC_RANGE_PARAMETERS_0 _MMIO(0x6BA40) +#define DSCC_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6BA40 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB (0x78208) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB (0x78208 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB (0x78308) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB (0x78308 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC (0x78408) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC (0x78408 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC (0x78508) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC (0x78508 + 4) +#define ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC) +#define ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC) +#define RC_BPG_OFFSET_SHIFT 10 +#define RC_MAX_QP_SHIFT 5 +#define RC_MIN_QP_SHIFT 0 + +#define DSCA_RC_RANGE_PARAMETERS_1 _MMIO(0x6B248) +#define DSCA_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6B248 + 4) +#define DSCC_RC_RANGE_PARAMETERS_1 _MMIO(0x6BA48) +#define DSCC_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6BA48 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB (0x78210) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB (0x78210 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB (0x78310) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB (0x78310 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC (0x78410) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC (0x78410 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC (0x78510) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC (0x78510 + 4) +#define ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC) +#define ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC) + +#define DSCA_RC_RANGE_PARAMETERS_2 _MMIO(0x6B250) +#define DSCA_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6B250 + 4) +#define DSCC_RC_RANGE_PARAMETERS_2 _MMIO(0x6BA50) +#define DSCC_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6BA50 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB (0x78218) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB (0x78218 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB (0x78318) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB (0x78318 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC (0x78418) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC (0x78418 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC (0x78518) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC (0x78518 + 4) +#define ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC) +#define ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC) + +#define DSCA_RC_RANGE_PARAMETERS_3 _MMIO(0x6B258) +#define DSCA_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6B258 + 4) +#define DSCC_RC_RANGE_PARAMETERS_3 _MMIO(0x6BA58) +#define DSCC_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6BA58 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB (0x78220) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB (0x78220 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB (0x78320) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB (0x78320 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC (0x78420) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC (0x78420 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC (0x78520) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC (0x78520 + 4) +#define ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC) +#define ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC) + +#define ICP_TC_HPD_LONG_DETECT(tc_port) (2 << (tc_port) * 4) +#define ICP_TC_HPD_SHORT_DETECT(tc_port) (1 << (tc_port) * 4) + #define PCH_GPIOA _MMIO(0xc5010) #define PCH_GPIOB _MMIO(0xc5014) #define PCH_GPIOC _MMIO(0xc5018) @@ -7444,46 +7798,46 @@ enum { #define _PCH_DPLL_A 0xc6014 #define _PCH_DPLL_B 0xc6018 -#define PCH_DPLL(pll) _MMIO(pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B) +#define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B) #define _PCH_FPA0 0xc6040 -#define FP_CB_TUNE (0x3<<22) +#define FP_CB_TUNE (0x3 << 22) #define _PCH_FPA1 0xc6044 #define _PCH_FPB0 0xc6048 #define _PCH_FPB1 0xc604c -#define PCH_FP0(pll) _MMIO(pll == 0 ? _PCH_FPA0 : _PCH_FPB0) -#define PCH_FP1(pll) _MMIO(pll == 0 ? _PCH_FPA1 : _PCH_FPB1) +#define PCH_FP0(pll) _MMIO((pll) == 0 ? _PCH_FPA0 : _PCH_FPB0) +#define PCH_FP1(pll) _MMIO((pll) == 0 ? _PCH_FPA1 : _PCH_FPB1) #define PCH_DPLL_TEST _MMIO(0xc606c) #define PCH_DREF_CONTROL _MMIO(0xC6200) #define DREF_CONTROL_MASK 0x7fc3 -#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0<<13) -#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2<<13) -#define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3<<13) -#define DREF_CPU_SOURCE_OUTPUT_MASK (3<<13) -#define DREF_SSC_SOURCE_DISABLE (0<<11) -#define DREF_SSC_SOURCE_ENABLE (2<<11) -#define DREF_SSC_SOURCE_MASK (3<<11) -#define DREF_NONSPREAD_SOURCE_DISABLE (0<<9) -#define DREF_NONSPREAD_CK505_ENABLE (1<<9) -#define DREF_NONSPREAD_SOURCE_ENABLE (2<<9) -#define DREF_NONSPREAD_SOURCE_MASK (3<<9) -#define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7) -#define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7) -#define DREF_SUPERSPREAD_SOURCE_MASK (3<<7) -#define DREF_SSC4_DOWNSPREAD (0<<6) -#define DREF_SSC4_CENTERSPREAD (1<<6) -#define DREF_SSC1_DISABLE (0<<1) -#define DREF_SSC1_ENABLE (1<<1) +#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0 << 13) +#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2 << 13) +#define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3 << 13) +#define DREF_CPU_SOURCE_OUTPUT_MASK (3 << 13) +#define DREF_SSC_SOURCE_DISABLE (0 << 11) +#define DREF_SSC_SOURCE_ENABLE (2 << 11) +#define DREF_SSC_SOURCE_MASK (3 << 11) +#define DREF_NONSPREAD_SOURCE_DISABLE (0 << 9) +#define DREF_NONSPREAD_CK505_ENABLE (1 << 9) +#define DREF_NONSPREAD_SOURCE_ENABLE (2 << 9) +#define DREF_NONSPREAD_SOURCE_MASK (3 << 9) +#define DREF_SUPERSPREAD_SOURCE_DISABLE (0 << 7) +#define DREF_SUPERSPREAD_SOURCE_ENABLE (2 << 7) +#define DREF_SUPERSPREAD_SOURCE_MASK (3 << 7) +#define DREF_SSC4_DOWNSPREAD (0 << 6) +#define DREF_SSC4_CENTERSPREAD (1 << 6) +#define DREF_SSC1_DISABLE (0 << 1) +#define DREF_SSC1_ENABLE (1 << 1) #define DREF_SSC4_DISABLE (0) #define DREF_SSC4_ENABLE (1) #define PCH_RAWCLK_FREQ _MMIO(0xc6204) #define FDL_TP1_TIMER_SHIFT 12 -#define FDL_TP1_TIMER_MASK (3<<12) +#define FDL_TP1_TIMER_MASK (3 << 12) #define FDL_TP2_TIMER_SHIFT 10 -#define FDL_TP2_TIMER_MASK (3<<10) +#define FDL_TP2_TIMER_MASK (3 << 10) #define RAWCLK_FREQ_MASK 0x3ff #define CNP_RAWCLK_DIV_MASK (0x3ff << 16) #define CNP_RAWCLK_DIV(div) ((div) << 16) @@ -7520,7 +7874,7 @@ enum { #define TRANS_VBLANK_END_SHIFT 16 #define TRANS_VBLANK_START_SHIFT 0 #define _PCH_TRANS_VSYNC_A 0xe0014 -#define TRANS_VSYNC_END_SHIFT 16 +#define TRANS_VSYNC_END_SHIFT 16 #define TRANS_VSYNC_START_SHIFT 0 #define _PCH_TRANS_VSYNCSHIFT_A 0xe0028 @@ -7600,15 +7954,28 @@ enum { #define _HSW_VIDEO_DIP_VSC_ECC_B 0x61344 #define _HSW_VIDEO_DIP_GCP_B 0x61210 +/* Icelake PPS_DATA and _ECC DIP Registers. + * These are available for transcoders B,C and eDP. + * Adding the _A so as to reuse the _MMIO_TRANS2 + * definition, with which it offsets to the right location. + */ + +#define _ICL_VIDEO_DIP_PPS_DATA_A 0x60350 +#define _ICL_VIDEO_DIP_PPS_DATA_B 0x61350 +#define _ICL_VIDEO_DIP_PPS_ECC_A 0x603D4 +#define _ICL_VIDEO_DIP_PPS_ECC_B 0x613D4 + #define HSW_TVIDEO_DIP_CTL(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_CTL_A) #define HSW_TVIDEO_DIP_AVI_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4) #define HSW_TVIDEO_DIP_VS_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4) #define HSW_TVIDEO_DIP_SPD_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4) #define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A) #define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4) +#define ICL_VIDEO_DIP_PPS_DATA(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4) +#define ICL_VIDEO_DIP_PPS_ECC(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4) #define _HSW_STEREO_3D_CTL_A 0x70020 -#define S3D_ENABLE (1<<31) +#define S3D_ENABLE (1 << 31) #define _HSW_STEREO_3D_CTL_B 0x71020 #define HSW_STEREO_3D_CTL(trans) _MMIO_PIPE2(trans, _HSW_STEREO_3D_CTL_A) @@ -7651,156 +8018,156 @@ enum { #define _PCH_TRANSBCONF 0xf1008 #define PCH_TRANSCONF(pipe) _MMIO_PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF) #define LPT_TRANSCONF PCH_TRANSCONF(PIPE_A) /* lpt has only one transcoder */ -#define TRANS_DISABLE (0<<31) -#define TRANS_ENABLE (1<<31) -#define TRANS_STATE_MASK (1<<30) -#define TRANS_STATE_DISABLE (0<<30) -#define TRANS_STATE_ENABLE (1<<30) -#define TRANS_FSYNC_DELAY_HB1 (0<<27) -#define TRANS_FSYNC_DELAY_HB2 (1<<27) -#define TRANS_FSYNC_DELAY_HB3 (2<<27) -#define TRANS_FSYNC_DELAY_HB4 (3<<27) -#define TRANS_INTERLACE_MASK (7<<21) -#define TRANS_PROGRESSIVE (0<<21) -#define TRANS_INTERLACED (3<<21) -#define TRANS_LEGACY_INTERLACED_ILK (2<<21) -#define TRANS_8BPC (0<<5) -#define TRANS_10BPC (1<<5) -#define TRANS_6BPC (2<<5) -#define TRANS_12BPC (3<<5) +#define TRANS_DISABLE (0 << 31) +#define TRANS_ENABLE (1 << 31) +#define TRANS_STATE_MASK (1 << 30) +#define TRANS_STATE_DISABLE (0 << 30) +#define TRANS_STATE_ENABLE (1 << 30) +#define TRANS_FSYNC_DELAY_HB1 (0 << 27) +#define TRANS_FSYNC_DELAY_HB2 (1 << 27) +#define TRANS_FSYNC_DELAY_HB3 (2 << 27) +#define TRANS_FSYNC_DELAY_HB4 (3 << 27) +#define TRANS_INTERLACE_MASK (7 << 21) +#define TRANS_PROGRESSIVE (0 << 21) +#define TRANS_INTERLACED (3 << 21) +#define TRANS_LEGACY_INTERLACED_ILK (2 << 21) +#define TRANS_8BPC (0 << 5) +#define TRANS_10BPC (1 << 5) +#define TRANS_6BPC (2 << 5) +#define TRANS_12BPC (3 << 5) #define _TRANSA_CHICKEN1 0xf0060 #define _TRANSB_CHICKEN1 0xf1060 #define TRANS_CHICKEN1(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1) -#define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE (1<<10) -#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4) +#define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE (1 << 10) +#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1 << 4) #define _TRANSA_CHICKEN2 0xf0064 #define _TRANSB_CHICKEN2 0xf1064 #define TRANS_CHICKEN2(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) -#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31) -#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1<<29) -#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3<<27) -#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1<<26) -#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1<<25) +#define TRANS_CHICKEN2_TIMING_OVERRIDE (1 << 31) +#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1 << 29) +#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3 << 27) +#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1 << 26) +#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1 << 25) #define SOUTH_CHICKEN1 _MMIO(0xc2000) #define FDIA_PHASE_SYNC_SHIFT_OVR 19 #define FDIA_PHASE_SYNC_SHIFT_EN 18 -#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) -#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) +#define FDI_PHASE_SYNC_OVR(pipe) (1 << (FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) +#define FDI_PHASE_SYNC_EN(pipe) (1 << (FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) #define FDI_BC_BIFURCATION_SELECT (1 << 12) #define CHASSIS_CLK_REQ_DURATION_MASK (0xf << 8) #define CHASSIS_CLK_REQ_DURATION(x) ((x) << 8) -#define SPT_PWM_GRANULARITY (1<<0) +#define SPT_PWM_GRANULARITY (1 << 0) #define SOUTH_CHICKEN2 _MMIO(0xc2004) -#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13) -#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12) -#define LPT_PWM_GRANULARITY (1<<5) -#define DPLS_EDP_PPS_FIX_DIS (1<<0) +#define FDI_MPHY_IOSFSB_RESET_STATUS (1 << 13) +#define FDI_MPHY_IOSFSB_RESET_CTL (1 << 12) +#define LPT_PWM_GRANULARITY (1 << 5) +#define DPLS_EDP_PPS_FIX_DIS (1 << 0) #define _FDI_RXA_CHICKEN 0xc200c #define _FDI_RXB_CHICKEN 0xc2010 -#define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1) -#define FDI_RX_PHASE_SYNC_POINTER_EN (1<<0) +#define FDI_RX_PHASE_SYNC_POINTER_OVR (1 << 1) +#define FDI_RX_PHASE_SYNC_POINTER_EN (1 << 0) #define FDI_RX_CHICKEN(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN) #define SOUTH_DSPCLK_GATE_D _MMIO(0xc2020) -#define PCH_GMBUSUNIT_CLOCK_GATE_DISABLE (1<<31) -#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30) -#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) -#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14) -#define CNP_PWM_CGE_GATING_DISABLE (1<<13) -#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12) +#define PCH_GMBUSUNIT_CLOCK_GATE_DISABLE (1 << 31) +#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1 << 30) +#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1 << 29) +#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1 << 14) +#define CNP_PWM_CGE_GATING_DISABLE (1 << 13) +#define PCH_LP_PARTITION_LEVEL_DISABLE (1 << 12) /* CPU: FDI_TX */ #define _FDI_TXA_CTL 0x60100 #define _FDI_TXB_CTL 0x61100 #define FDI_TX_CTL(pipe) _MMIO_PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL) -#define FDI_TX_DISABLE (0<<31) -#define FDI_TX_ENABLE (1<<31) -#define FDI_LINK_TRAIN_PATTERN_1 (0<<28) -#define FDI_LINK_TRAIN_PATTERN_2 (1<<28) -#define FDI_LINK_TRAIN_PATTERN_IDLE (2<<28) -#define FDI_LINK_TRAIN_NONE (3<<28) -#define FDI_LINK_TRAIN_VOLTAGE_0_4V (0<<25) -#define FDI_LINK_TRAIN_VOLTAGE_0_6V (1<<25) -#define FDI_LINK_TRAIN_VOLTAGE_0_8V (2<<25) -#define FDI_LINK_TRAIN_VOLTAGE_1_2V (3<<25) -#define FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0<<22) -#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22) -#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22) -#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22) +#define FDI_TX_DISABLE (0 << 31) +#define FDI_TX_ENABLE (1 << 31) +#define FDI_LINK_TRAIN_PATTERN_1 (0 << 28) +#define FDI_LINK_TRAIN_PATTERN_2 (1 << 28) +#define FDI_LINK_TRAIN_PATTERN_IDLE (2 << 28) +#define FDI_LINK_TRAIN_NONE (3 << 28) +#define FDI_LINK_TRAIN_VOLTAGE_0_4V (0 << 25) +#define FDI_LINK_TRAIN_VOLTAGE_0_6V (1 << 25) +#define FDI_LINK_TRAIN_VOLTAGE_0_8V (2 << 25) +#define FDI_LINK_TRAIN_VOLTAGE_1_2V (3 << 25) +#define FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0 << 22) +#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1 << 22) +#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2 << 22) +#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3 << 22) /* ILK always use 400mV 0dB for voltage swing and pre-emphasis level. SNB has different settings. */ /* SNB A-stepping */ -#define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22) -#define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22) -#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22) -#define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22) +#define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38 << 22) +#define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02 << 22) +#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01 << 22) +#define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0 << 22) /* SNB B-stepping */ -#define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0<<22) -#define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a<<22) -#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39<<22) -#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22) -#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f<<22) +#define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0 << 22) +#define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a << 22) +#define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39 << 22) +#define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38 << 22) +#define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f << 22) #define FDI_DP_PORT_WIDTH_SHIFT 19 #define FDI_DP_PORT_WIDTH_MASK (7 << FDI_DP_PORT_WIDTH_SHIFT) #define FDI_DP_PORT_WIDTH(width) (((width) - 1) << FDI_DP_PORT_WIDTH_SHIFT) -#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18) +#define FDI_TX_ENHANCE_FRAME_ENABLE (1 << 18) /* Ironlake: hardwired to 1 */ -#define FDI_TX_PLL_ENABLE (1<<14) +#define FDI_TX_PLL_ENABLE (1 << 14) /* Ivybridge has different bits for lolz */ -#define FDI_LINK_TRAIN_PATTERN_1_IVB (0<<8) -#define FDI_LINK_TRAIN_PATTERN_2_IVB (1<<8) -#define FDI_LINK_TRAIN_PATTERN_IDLE_IVB (2<<8) -#define FDI_LINK_TRAIN_NONE_IVB (3<<8) +#define FDI_LINK_TRAIN_PATTERN_1_IVB (0 << 8) +#define FDI_LINK_TRAIN_PATTERN_2_IVB (1 << 8) +#define FDI_LINK_TRAIN_PATTERN_IDLE_IVB (2 << 8) +#define FDI_LINK_TRAIN_NONE_IVB (3 << 8) /* both Tx and Rx */ -#define FDI_COMPOSITE_SYNC (1<<11) -#define FDI_LINK_TRAIN_AUTO (1<<10) -#define FDI_SCRAMBLING_ENABLE (0<<7) -#define FDI_SCRAMBLING_DISABLE (1<<7) +#define FDI_COMPOSITE_SYNC (1 << 11) +#define FDI_LINK_TRAIN_AUTO (1 << 10) +#define FDI_SCRAMBLING_ENABLE (0 << 7) +#define FDI_SCRAMBLING_DISABLE (1 << 7) /* FDI_RX, FDI_X is hard-wired to Transcoder_X */ #define _FDI_RXA_CTL 0xf000c #define _FDI_RXB_CTL 0xf100c #define FDI_RX_CTL(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL) -#define FDI_RX_ENABLE (1<<31) +#define FDI_RX_ENABLE (1 << 31) /* train, dp width same as FDI_TX */ -#define FDI_FS_ERRC_ENABLE (1<<27) -#define FDI_FE_ERRC_ENABLE (1<<26) -#define FDI_RX_POLARITY_REVERSED_LPT (1<<16) -#define FDI_8BPC (0<<16) -#define FDI_10BPC (1<<16) -#define FDI_6BPC (2<<16) -#define FDI_12BPC (3<<16) -#define FDI_RX_LINK_REVERSAL_OVERRIDE (1<<15) -#define FDI_DMI_LINK_REVERSE_MASK (1<<14) -#define FDI_RX_PLL_ENABLE (1<<13) -#define FDI_FS_ERR_CORRECT_ENABLE (1<<11) -#define FDI_FE_ERR_CORRECT_ENABLE (1<<10) -#define FDI_FS_ERR_REPORT_ENABLE (1<<9) -#define FDI_FE_ERR_REPORT_ENABLE (1<<8) -#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6) -#define FDI_PCDCLK (1<<4) +#define FDI_FS_ERRC_ENABLE (1 << 27) +#define FDI_FE_ERRC_ENABLE (1 << 26) +#define FDI_RX_POLARITY_REVERSED_LPT (1 << 16) +#define FDI_8BPC (0 << 16) +#define FDI_10BPC (1 << 16) +#define FDI_6BPC (2 << 16) +#define FDI_12BPC (3 << 16) +#define FDI_RX_LINK_REVERSAL_OVERRIDE (1 << 15) +#define FDI_DMI_LINK_REVERSE_MASK (1 << 14) +#define FDI_RX_PLL_ENABLE (1 << 13) +#define FDI_FS_ERR_CORRECT_ENABLE (1 << 11) +#define FDI_FE_ERR_CORRECT_ENABLE (1 << 10) +#define FDI_FS_ERR_REPORT_ENABLE (1 << 9) +#define FDI_FE_ERR_REPORT_ENABLE (1 << 8) +#define FDI_RX_ENHANCE_FRAME_ENABLE (1 << 6) +#define FDI_PCDCLK (1 << 4) /* CPT */ -#define FDI_AUTO_TRAINING (1<<10) -#define FDI_LINK_TRAIN_PATTERN_1_CPT (0<<8) -#define FDI_LINK_TRAIN_PATTERN_2_CPT (1<<8) -#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8) -#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8) -#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8) +#define FDI_AUTO_TRAINING (1 << 10) +#define FDI_LINK_TRAIN_PATTERN_1_CPT (0 << 8) +#define FDI_LINK_TRAIN_PATTERN_2_CPT (1 << 8) +#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2 << 8) +#define FDI_LINK_TRAIN_NORMAL_CPT (3 << 8) +#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3 << 8) #define _FDI_RXA_MISC 0xf0010 #define _FDI_RXB_MISC 0xf1010 -#define FDI_RX_PWRDN_LANE1_MASK (3<<26) -#define FDI_RX_PWRDN_LANE1_VAL(x) ((x)<<26) -#define FDI_RX_PWRDN_LANE0_MASK (3<<24) -#define FDI_RX_PWRDN_LANE0_VAL(x) ((x)<<24) -#define FDI_RX_TP1_TO_TP2_48 (2<<20) -#define FDI_RX_TP1_TO_TP2_64 (3<<20) -#define FDI_RX_FDI_DELAY_90 (0x90<<0) +#define FDI_RX_PWRDN_LANE1_MASK (3 << 26) +#define FDI_RX_PWRDN_LANE1_VAL(x) ((x) << 26) +#define FDI_RX_PWRDN_LANE0_MASK (3 << 24) +#define FDI_RX_PWRDN_LANE0_VAL(x) ((x) << 24) +#define FDI_RX_TP1_TO_TP2_48 (2 << 20) +#define FDI_RX_TP1_TO_TP2_64 (3 << 20) +#define FDI_RX_FDI_DELAY_90 (0x90 << 0) #define FDI_RX_MISC(pipe) _MMIO_PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC) #define _FDI_RXA_TUSIZE1 0xf0030 @@ -7811,17 +8178,17 @@ enum { #define FDI_RX_TUSIZE2(pipe) _MMIO_PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2) /* FDI_RX interrupt register format */ -#define FDI_RX_INTER_LANE_ALIGN (1<<10) -#define FDI_RX_SYMBOL_LOCK (1<<9) /* train 2 */ -#define FDI_RX_BIT_LOCK (1<<8) /* train 1 */ -#define FDI_RX_TRAIN_PATTERN_2_FAIL (1<<7) -#define FDI_RX_FS_CODE_ERR (1<<6) -#define FDI_RX_FE_CODE_ERR (1<<5) -#define FDI_RX_SYMBOL_ERR_RATE_ABOVE (1<<4) -#define FDI_RX_HDCP_LINK_FAIL (1<<3) -#define FDI_RX_PIXEL_FIFO_OVERFLOW (1<<2) -#define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1) -#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0) +#define FDI_RX_INTER_LANE_ALIGN (1 << 10) +#define FDI_RX_SYMBOL_LOCK (1 << 9) /* train 2 */ +#define FDI_RX_BIT_LOCK (1 << 8) /* train 1 */ +#define FDI_RX_TRAIN_PATTERN_2_FAIL (1 << 7) +#define FDI_RX_FS_CODE_ERR (1 << 6) +#define FDI_RX_FE_CODE_ERR (1 << 5) +#define FDI_RX_SYMBOL_ERR_RATE_ABOVE (1 << 4) +#define FDI_RX_HDCP_LINK_FAIL (1 << 3) +#define FDI_RX_PIXEL_FIFO_OVERFLOW (1 << 2) +#define FDI_RX_CROSS_CLOCK_OVERFLOW (1 << 1) +#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1 << 0) #define _FDI_RXA_IIR 0xf0014 #define _FDI_RXA_IMR 0xf0018 @@ -7867,71 +8234,58 @@ enum { #define PCH_DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT((aux_ch) - AUX_CH_B, _PCH_DPB_AUX_CH_DATA1, _PCH_DPC_AUX_CH_DATA1) + (i) * 4) /* 5 registers */ /* CPT */ -#define PORT_TRANS_A_SEL_CPT 0 -#define PORT_TRANS_B_SEL_CPT (1<<29) -#define PORT_TRANS_C_SEL_CPT (2<<29) -#define PORT_TRANS_SEL_MASK (3<<29) -#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29) -#define PORT_TO_PIPE(val) (((val) & (1<<30)) >> 30) -#define PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29) -#define SDVO_PORT_TO_PIPE_CHV(val) (((val) & (3<<24)) >> 24) -#define DP_PORT_TO_PIPE_CHV(val) (((val) & (3<<16)) >> 16) - #define _TRANS_DP_CTL_A 0xe0300 #define _TRANS_DP_CTL_B 0xe1300 #define _TRANS_DP_CTL_C 0xe2300 #define TRANS_DP_CTL(pipe) _MMIO_PIPE(pipe, _TRANS_DP_CTL_A, _TRANS_DP_CTL_B) -#define TRANS_DP_OUTPUT_ENABLE (1<<31) -#define TRANS_DP_PORT_SEL_B (0<<29) -#define TRANS_DP_PORT_SEL_C (1<<29) -#define TRANS_DP_PORT_SEL_D (2<<29) -#define TRANS_DP_PORT_SEL_NONE (3<<29) -#define TRANS_DP_PORT_SEL_MASK (3<<29) -#define TRANS_DP_PIPE_TO_PORT(val) ((((val) & TRANS_DP_PORT_SEL_MASK) >> 29) + PORT_B) -#define TRANS_DP_AUDIO_ONLY (1<<26) -#define TRANS_DP_ENH_FRAMING (1<<18) -#define TRANS_DP_8BPC (0<<9) -#define TRANS_DP_10BPC (1<<9) -#define TRANS_DP_6BPC (2<<9) -#define TRANS_DP_12BPC (3<<9) -#define TRANS_DP_BPC_MASK (3<<9) -#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4) +#define TRANS_DP_OUTPUT_ENABLE (1 << 31) +#define TRANS_DP_PORT_SEL_MASK (3 << 29) +#define TRANS_DP_PORT_SEL_NONE (3 << 29) +#define TRANS_DP_PORT_SEL(port) (((port) - PORT_B) << 29) +#define TRANS_DP_AUDIO_ONLY (1 << 26) +#define TRANS_DP_ENH_FRAMING (1 << 18) +#define TRANS_DP_8BPC (0 << 9) +#define TRANS_DP_10BPC (1 << 9) +#define TRANS_DP_6BPC (2 << 9) +#define TRANS_DP_12BPC (3 << 9) +#define TRANS_DP_BPC_MASK (3 << 9) +#define TRANS_DP_VSYNC_ACTIVE_HIGH (1 << 4) #define TRANS_DP_VSYNC_ACTIVE_LOW 0 -#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3) +#define TRANS_DP_HSYNC_ACTIVE_HIGH (1 << 3) #define TRANS_DP_HSYNC_ACTIVE_LOW 0 -#define TRANS_DP_SYNC_MASK (3<<3) +#define TRANS_DP_SYNC_MASK (3 << 3) /* SNB eDP training params */ /* SNB A-stepping */ -#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38<<22) -#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02<<22) -#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01<<22) -#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0<<22) +#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38 << 22) +#define EDP_LINK_TRAIN_400MV_6DB_SNB_A (0x02 << 22) +#define EDP_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01 << 22) +#define EDP_LINK_TRAIN_800MV_0DB_SNB_A (0x0 << 22) /* SNB B-stepping */ -#define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0<<22) -#define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1<<22) -#define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a<<22) -#define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39<<22) -#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22) -#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22) +#define EDP_LINK_TRAIN_400_600MV_0DB_SNB_B (0x0 << 22) +#define EDP_LINK_TRAIN_400MV_3_5DB_SNB_B (0x1 << 22) +#define EDP_LINK_TRAIN_400_600MV_6DB_SNB_B (0x3a << 22) +#define EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B (0x39 << 22) +#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38 << 22) +#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f << 22) /* IVB */ -#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 <<22) -#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a <<22) -#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f <<22) -#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22) -#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22) -#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22) -#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e <<22) +#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 << 22) +#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a << 22) +#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f << 22) +#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 << 22) +#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 << 22) +#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 << 22) +#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e << 22) /* legacy values */ -#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22) -#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 <<22) -#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 <<22) -#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 <<22) -#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 <<22) +#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 << 22) +#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 << 22) +#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 << 22) +#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 << 22) +#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 << 22) -#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22) +#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f << 22) #define VLV_PMWGICZ _MMIO(0x1300a4) @@ -7978,7 +8332,7 @@ enum { #define FORCEWAKE_KERNEL_FALLBACK BIT(15) #define FORCEWAKE_MT_ACK _MMIO(0x130040) #define ECOBUS _MMIO(0xa180) -#define FORCEWAKE_MT_ENABLE (1<<5) +#define FORCEWAKE_MT_ENABLE (1 << 5) #define VLV_SPAREG2H _MMIO(0xA194) #define GEN9_PWRGT_DOMAIN_STATUS _MMIO(0xA2A0) #define GEN9_PWRGT_MEDIA_STATUS_MASK (1 << 0) @@ -7987,13 +8341,13 @@ enum { #define GTFIFODBG _MMIO(0x120000) #define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20) #define GT_FIFO_FREE_ENTRIES_CHV (0x7f << 13) -#define GT_FIFO_SBDROPERR (1<<6) -#define GT_FIFO_BLOBDROPERR (1<<5) -#define GT_FIFO_SB_READ_ABORTERR (1<<4) -#define GT_FIFO_DROPERR (1<<3) -#define GT_FIFO_OVFERR (1<<2) -#define GT_FIFO_IAWRERR (1<<1) -#define GT_FIFO_IARDERR (1<<0) +#define GT_FIFO_SBDROPERR (1 << 6) +#define GT_FIFO_BLOBDROPERR (1 << 5) +#define GT_FIFO_SB_READ_ABORTERR (1 << 4) +#define GT_FIFO_DROPERR (1 << 3) +#define GT_FIFO_OVFERR (1 << 2) +#define GT_FIFO_IAWRERR (1 << 1) +#define GT_FIFO_IARDERR (1 << 0) #define GTFIFOCTL _MMIO(0x120008) #define GT_FIFO_FREE_ENTRIES_MASK 0x7f @@ -8027,37 +8381,37 @@ enum { # define GEN6_OACSUNIT_CLOCK_GATE_DISABLE (1 << 20) #define GEN7_UCGCTL4 _MMIO(0x940c) -#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25) -#define GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE (1<<14) +#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1 << 25) +#define GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE (1 << 14) #define GEN6_RCGCTL1 _MMIO(0x9410) #define GEN6_RCGCTL2 _MMIO(0x9414) #define GEN6_RSTCTL _MMIO(0x9420) #define GEN8_UCGCTL6 _MMIO(0x9430) -#define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1<<24) -#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14) -#define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1<<28) +#define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1 << 24) +#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1 << 14) +#define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1 << 28) #define GEN6_GFXPAUSE _MMIO(0xA000) #define GEN6_RPNSWREQ _MMIO(0xA008) -#define GEN6_TURBO_DISABLE (1<<31) -#define GEN6_FREQUENCY(x) ((x)<<25) -#define HSW_FREQUENCY(x) ((x)<<24) -#define GEN9_FREQUENCY(x) ((x)<<23) -#define GEN6_OFFSET(x) ((x)<<19) -#define GEN6_AGGRESSIVE_TURBO (0<<15) +#define GEN6_TURBO_DISABLE (1 << 31) +#define GEN6_FREQUENCY(x) ((x) << 25) +#define HSW_FREQUENCY(x) ((x) << 24) +#define GEN9_FREQUENCY(x) ((x) << 23) +#define GEN6_OFFSET(x) ((x) << 19) +#define GEN6_AGGRESSIVE_TURBO (0 << 15) #define GEN6_RC_VIDEO_FREQ _MMIO(0xA00C) #define GEN6_RC_CONTROL _MMIO(0xA090) -#define GEN6_RC_CTL_RC6pp_ENABLE (1<<16) -#define GEN6_RC_CTL_RC6p_ENABLE (1<<17) -#define GEN6_RC_CTL_RC6_ENABLE (1<<18) -#define GEN6_RC_CTL_RC1e_ENABLE (1<<20) -#define GEN6_RC_CTL_RC7_ENABLE (1<<22) -#define VLV_RC_CTL_CTX_RST_PARALLEL (1<<24) -#define GEN7_RC_CTL_TO_MODE (1<<28) -#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27) -#define GEN6_RC_CTL_HW_ENABLE (1<<31) +#define GEN6_RC_CTL_RC6pp_ENABLE (1 << 16) +#define GEN6_RC_CTL_RC6p_ENABLE (1 << 17) +#define GEN6_RC_CTL_RC6_ENABLE (1 << 18) +#define GEN6_RC_CTL_RC1e_ENABLE (1 << 20) +#define GEN6_RC_CTL_RC7_ENABLE (1 << 22) +#define VLV_RC_CTL_CTX_RST_PARALLEL (1 << 24) +#define GEN7_RC_CTL_TO_MODE (1 << 28) +#define GEN6_RC_CTL_EI_MODE(x) ((x) << 27) +#define GEN6_RC_CTL_HW_ENABLE (1 << 31) #define GEN6_RP_DOWN_TIMEOUT _MMIO(0xA010) #define GEN6_RP_INTERRUPT_LIMITS _MMIO(0xA014) #define GEN6_RPSTAT1 _MMIO(0xA01C) @@ -8068,19 +8422,19 @@ enum { #define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT) #define GEN9_CAGF_MASK (0x1ff << GEN9_CAGF_SHIFT) #define GEN6_RP_CONTROL _MMIO(0xA024) -#define GEN6_RP_MEDIA_TURBO (1<<11) -#define GEN6_RP_MEDIA_MODE_MASK (3<<9) -#define GEN6_RP_MEDIA_HW_TURBO_MODE (3<<9) -#define GEN6_RP_MEDIA_HW_NORMAL_MODE (2<<9) -#define GEN6_RP_MEDIA_HW_MODE (1<<9) -#define GEN6_RP_MEDIA_SW_MODE (0<<9) -#define GEN6_RP_MEDIA_IS_GFX (1<<8) -#define GEN6_RP_ENABLE (1<<7) -#define GEN6_RP_UP_IDLE_MIN (0x1<<3) -#define GEN6_RP_UP_BUSY_AVG (0x2<<3) -#define GEN6_RP_UP_BUSY_CONT (0x4<<3) -#define GEN6_RP_DOWN_IDLE_AVG (0x2<<0) -#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0) +#define GEN6_RP_MEDIA_TURBO (1 << 11) +#define GEN6_RP_MEDIA_MODE_MASK (3 << 9) +#define GEN6_RP_MEDIA_HW_TURBO_MODE (3 << 9) +#define GEN6_RP_MEDIA_HW_NORMAL_MODE (2 << 9) +#define GEN6_RP_MEDIA_HW_MODE (1 << 9) +#define GEN6_RP_MEDIA_SW_MODE (0 << 9) +#define GEN6_RP_MEDIA_IS_GFX (1 << 8) +#define GEN6_RP_ENABLE (1 << 7) +#define GEN6_RP_UP_IDLE_MIN (0x1 << 3) +#define GEN6_RP_UP_BUSY_AVG (0x2 << 3) +#define GEN6_RP_UP_BUSY_CONT (0x4 << 3) +#define GEN6_RP_DOWN_IDLE_AVG (0x2 << 0) +#define GEN6_RP_DOWN_IDLE_CONT (0x1 << 0) #define GEN6_RP_UP_THRESHOLD _MMIO(0xA02C) #define GEN6_RP_DOWN_THRESHOLD _MMIO(0xA030) #define GEN6_RP_CUR_UP_EI _MMIO(0xA050) @@ -8116,15 +8470,15 @@ enum { #define VLV_RCEDATA _MMIO(0xA0BC) #define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0) #define GEN6_PMINTRMSK _MMIO(0xA168) -#define GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC (1<<31) -#define ARAT_EXPIRED_INTRMSK (1<<9) +#define GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC (1 << 31) +#define ARAT_EXPIRED_INTRMSK (1 << 9) #define GEN8_MISC_CTRL0 _MMIO(0xA180) #define VLV_PWRDWNUPCTL _MMIO(0xA294) #define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4) #define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8) #define GEN9_PG_ENABLE _MMIO(0xA210) -#define GEN9_RENDER_PG_ENABLE (1<<0) -#define GEN9_MEDIA_PG_ENABLE (1<<1) +#define GEN9_RENDER_PG_ENABLE (1 << 0) +#define GEN9_MEDIA_PG_ENABLE (1 << 1) #define GEN8_PUSHBUS_CONTROL _MMIO(0xA248) #define GEN8_PUSHBUS_ENABLE _MMIO(0xA250) #define GEN8_PUSHBUS_SHIFT _MMIO(0xA25C) @@ -8137,13 +8491,13 @@ enum { #define GEN6_PMIMR _MMIO(0x44024) /* rps_lock */ #define GEN6_PMIIR _MMIO(0x44028) #define GEN6_PMIER _MMIO(0x4402C) -#define GEN6_PM_MBOX_EVENT (1<<25) -#define GEN6_PM_THERMAL_EVENT (1<<24) -#define GEN6_PM_RP_DOWN_TIMEOUT (1<<6) -#define GEN6_PM_RP_UP_THRESHOLD (1<<5) -#define GEN6_PM_RP_DOWN_THRESHOLD (1<<4) -#define GEN6_PM_RP_UP_EI_EXPIRED (1<<2) -#define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1) +#define GEN6_PM_MBOX_EVENT (1 << 25) +#define GEN6_PM_THERMAL_EVENT (1 << 24) +#define GEN6_PM_RP_DOWN_TIMEOUT (1 << 6) +#define GEN6_PM_RP_UP_THRESHOLD (1 << 5) +#define GEN6_PM_RP_DOWN_THRESHOLD (1 << 4) +#define GEN6_PM_RP_UP_EI_EXPIRED (1 << 2) +#define GEN6_PM_RP_DOWN_EI_EXPIRED (1 << 1) #define GEN6_PM_RPS_EVENTS (GEN6_PM_RP_UP_THRESHOLD | \ GEN6_PM_RP_DOWN_THRESHOLD | \ GEN6_PM_RP_DOWN_TIMEOUT) @@ -8152,16 +8506,16 @@ enum { #define GEN7_GT_SCRATCH_REG_NUM 8 #define VLV_GTLC_SURVIVABILITY_REG _MMIO(0x130098) -#define VLV_GFX_CLK_STATUS_BIT (1<<3) -#define VLV_GFX_CLK_FORCE_ON_BIT (1<<2) +#define VLV_GFX_CLK_STATUS_BIT (1 << 3) +#define VLV_GFX_CLK_FORCE_ON_BIT (1 << 2) #define GEN6_GT_GFX_RC6_LOCKED _MMIO(0x138104) #define VLV_COUNTER_CONTROL _MMIO(0x138104) -#define VLV_COUNT_RANGE_HIGH (1<<15) -#define VLV_MEDIA_RC0_COUNT_EN (1<<5) -#define VLV_RENDER_RC0_COUNT_EN (1<<4) -#define VLV_MEDIA_RC6_COUNT_EN (1<<1) -#define VLV_RENDER_RC6_COUNT_EN (1<<0) +#define VLV_COUNT_RANGE_HIGH (1 << 15) +#define VLV_MEDIA_RC0_COUNT_EN (1 << 5) +#define VLV_RENDER_RC0_COUNT_EN (1 << 4) +#define VLV_MEDIA_RC6_COUNT_EN (1 << 1) +#define VLV_RENDER_RC6_COUNT_EN (1 << 0) #define GEN6_GT_GFX_RC6 _MMIO(0x138108) #define VLV_GT_RENDER_RC6 _MMIO(0x138108) #define VLV_GT_MEDIA_RC6 _MMIO(0x13810C) @@ -8172,7 +8526,7 @@ enum { #define VLV_MEDIA_C0_COUNT _MMIO(0x13811C) #define GEN6_PCODE_MAILBOX _MMIO(0x138124) -#define GEN6_PCODE_READY (1<<31) +#define GEN6_PCODE_READY (1 << 31) #define GEN6_PCODE_ERROR_MASK 0xFF #define GEN6_PCODE_SUCCESS 0x0 #define GEN6_PCODE_ILLEGAL_CMD 0x1 @@ -8216,7 +8570,7 @@ enum { #define GEN6_PCODE_DATA1 _MMIO(0x13812C) #define GEN6_GT_CORE_STATUS _MMIO(0x138060) -#define GEN6_CORE_CPD_STATE_MASK (7<<4) +#define GEN6_CORE_CPD_STATE_MASK (7 << 4) #define GEN6_RCn_MASK 7 #define GEN6_RC0 0 #define GEN6_RC3 2 @@ -8228,26 +8582,26 @@ enum { #define CHV_POWER_SS0_SIG1 _MMIO(0xa720) #define CHV_POWER_SS1_SIG1 _MMIO(0xa728) -#define CHV_SS_PG_ENABLE (1<<1) -#define CHV_EU08_PG_ENABLE (1<<9) -#define CHV_EU19_PG_ENABLE (1<<17) -#define CHV_EU210_PG_ENABLE (1<<25) +#define CHV_SS_PG_ENABLE (1 << 1) +#define CHV_EU08_PG_ENABLE (1 << 9) +#define CHV_EU19_PG_ENABLE (1 << 17) +#define CHV_EU210_PG_ENABLE (1 << 25) #define CHV_POWER_SS0_SIG2 _MMIO(0xa724) #define CHV_POWER_SS1_SIG2 _MMIO(0xa72c) -#define CHV_EU311_PG_ENABLE (1<<1) +#define CHV_EU311_PG_ENABLE (1 << 1) -#define GEN9_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + (slice)*0x4) +#define GEN9_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + (slice) * 0x4) #define GEN10_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + ((slice) / 3) * 0x34 + \ ((slice) % 3) * 0x4) #define GEN9_PGCTL_SLICE_ACK (1 << 0) -#define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice)*2)) +#define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice) * 2)) #define GEN10_PGCTL_VALID_SS_MASK(slice) ((slice) == 0 ? 0x7F : 0x1F) -#define GEN9_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + (slice)*0x8) +#define GEN9_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + (slice) * 0x8) #define GEN10_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + ((slice) / 3) * 0x30 + \ ((slice) % 3) * 0x8) -#define GEN9_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + (slice)*0x8) +#define GEN9_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + (slice) * 0x8) #define GEN10_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + ((slice) / 3) * 0x30 + \ ((slice) % 3) * 0x8) #define GEN9_PGCTL_SSA_EU08_ACK (1 << 0) @@ -8260,10 +8614,10 @@ enum { #define GEN9_PGCTL_SSB_EU311_ACK (1 << 14) #define GEN7_MISCCPCTL _MMIO(0x9424) -#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0) -#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1<<2) -#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1<<4) -#define GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE (1<<6) +#define GEN7_DOP_CLOCK_GATE_ENABLE (1 << 0) +#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1 << 2) +#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1 << 4) +#define GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE (1 << 6) #define GEN8_GARBCNTL _MMIO(0xB004) #define GEN9_GAPS_TSV_CREDIT_DISABLE (1 << 7) @@ -8292,61 +8646,62 @@ enum { /* IVYBRIDGE DPF */ #define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */ -#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14) -#define GEN7_PARITY_ERROR_VALID (1<<13) -#define GEN7_L3CDERRST1_BANK_MASK (3<<11) -#define GEN7_L3CDERRST1_SUBBANK_MASK (7<<8) +#define GEN7_L3CDERRST1_ROW_MASK (0x7ff << 14) +#define GEN7_PARITY_ERROR_VALID (1 << 13) +#define GEN7_L3CDERRST1_BANK_MASK (3 << 11) +#define GEN7_L3CDERRST1_SUBBANK_MASK (7 << 8) #define GEN7_PARITY_ERROR_ROW(reg) \ - ((reg & GEN7_L3CDERRST1_ROW_MASK) >> 14) + (((reg) & GEN7_L3CDERRST1_ROW_MASK) >> 14) #define GEN7_PARITY_ERROR_BANK(reg) \ - ((reg & GEN7_L3CDERRST1_BANK_MASK) >> 11) + (((reg) & GEN7_L3CDERRST1_BANK_MASK) >> 11) #define GEN7_PARITY_ERROR_SUBBANK(reg) \ - ((reg & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8) -#define GEN7_L3CDERRST1_ENABLE (1<<7) + (((reg) & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8) +#define GEN7_L3CDERRST1_ENABLE (1 << 7) #define GEN7_L3LOG(slice, i) _MMIO(0xB070 + (slice) * 0x200 + (i) * 4) #define GEN7_L3LOG_SIZE 0x80 #define GEN7_HALF_SLICE_CHICKEN1 _MMIO(0xe100) /* IVB GT1 + VLV */ #define GEN7_HALF_SLICE_CHICKEN1_GT2 _MMIO(0xf100) -#define GEN7_MAX_PS_THREAD_DEP (8<<12) -#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10) -#define GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE (1<<4) -#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3) +#define GEN7_MAX_PS_THREAD_DEP (8 << 12) +#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1 << 10) +#define GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE (1 << 4) +#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1 << 3) #define GEN9_HALF_SLICE_CHICKEN5 _MMIO(0xe188) -#define GEN9_DG_MIRROR_FIX_ENABLE (1<<5) -#define GEN9_CCS_TLB_PREFETCH_ENABLE (1<<3) +#define GEN9_DG_MIRROR_FIX_ENABLE (1 << 5) +#define GEN9_CCS_TLB_PREFETCH_ENABLE (1 << 3) #define GEN8_ROW_CHICKEN _MMIO(0xe4f0) -#define FLOW_CONTROL_ENABLE (1<<15) -#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8) -#define STALL_DOP_GATING_DISABLE (1<<5) -#define THROTTLE_12_5 (7<<2) -#define DISABLE_EARLY_EOT (1<<1) +#define FLOW_CONTROL_ENABLE (1 << 15) +#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1 << 8) +#define STALL_DOP_GATING_DISABLE (1 << 5) +#define THROTTLE_12_5 (7 << 2) +#define DISABLE_EARLY_EOT (1 << 1) #define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4) #define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4) -#define DOP_CLOCK_GATING_DISABLE (1<<0) -#define PUSH_CONSTANT_DEREF_DISABLE (1<<8) +#define DOP_CLOCK_GATING_DISABLE (1 << 0) +#define PUSH_CONSTANT_DEREF_DISABLE (1 << 8) +#define GEN11_TDL_CLOCK_GATING_FIX_DISABLE (1 << 1) #define HSW_ROW_CHICKEN3 _MMIO(0xe49c) #define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6) #define HALF_SLICE_CHICKEN2 _MMIO(0xe180) -#define GEN8_ST_PO_DISABLE (1<<13) +#define GEN8_ST_PO_DISABLE (1 << 13) #define HALF_SLICE_CHICKEN3 _MMIO(0xe184) -#define HSW_SAMPLE_C_PERFORMANCE (1<<9) -#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8) -#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1<<5) -#define CNL_FAST_ANISO_L1_BANKING_FIX (1<<4) -#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1) +#define HSW_SAMPLE_C_PERFORMANCE (1 << 9) +#define GEN8_CENTROID_PIXEL_OPT_DIS (1 << 8) +#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1 << 5) +#define CNL_FAST_ANISO_L1_BANKING_FIX (1 << 4) +#define GEN8_SAMPLER_POWER_BYPASS_DIS (1 << 1) #define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194) -#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR (1<<8) -#define GEN9_ENABLE_YV12_BUGFIX (1<<4) -#define GEN9_ENABLE_GPGPU_PREEMPTION (1<<2) +#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR (1 << 8) +#define GEN9_ENABLE_YV12_BUGFIX (1 << 4) +#define GEN9_ENABLE_GPGPU_PREEMPTION (1 << 2) /* Audio */ #define G4X_AUD_VID_DID _MMIO(dev_priv->info.display_mmio_offset + 0x62020) @@ -8478,6 +8833,14 @@ enum { #define _HSW_PWR_WELL_CTL3 0x45408 #define _HSW_PWR_WELL_CTL4 0x4540C +#define _ICL_PWR_WELL_CTL_AUX1 0x45440 +#define _ICL_PWR_WELL_CTL_AUX2 0x45444 +#define _ICL_PWR_WELL_CTL_AUX4 0x4544C + +#define _ICL_PWR_WELL_CTL_DDI1 0x45450 +#define _ICL_PWR_WELL_CTL_DDI2 0x45454 +#define _ICL_PWR_WELL_CTL_DDI4 0x4545C + /* * Each power well control register contains up to 16 (request, status) HW * flag tuples. The register index and HW flag shift is determined by the @@ -8487,21 +8850,27 @@ enum { */ #define _HSW_PW_REG_IDX(pw) ((pw) >> 4) #define _HSW_PW_SHIFT(pw) (((pw) & 0xf) * 2) -/* TODO: Add all PWR_WELL_CTL registers below for new platforms */ #define HSW_PWR_WELL_CTL_BIOS(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \ - _HSW_PWR_WELL_CTL1)) + _HSW_PWR_WELL_CTL1, \ + _ICL_PWR_WELL_CTL_AUX1, \ + _ICL_PWR_WELL_CTL_DDI1)) #define HSW_PWR_WELL_CTL_DRIVER(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \ - _HSW_PWR_WELL_CTL2)) + _HSW_PWR_WELL_CTL2, \ + _ICL_PWR_WELL_CTL_AUX2, \ + _ICL_PWR_WELL_CTL_DDI2)) +/* KVMR doesn't have a reg for AUX or DDI power well control */ #define HSW_PWR_WELL_CTL_KVMR _MMIO(_HSW_PWR_WELL_CTL3) #define HSW_PWR_WELL_CTL_DEBUG(pw) _MMIO(_PICK(_HSW_PW_REG_IDX(pw), \ - _HSW_PWR_WELL_CTL4)) + _HSW_PWR_WELL_CTL4, \ + _ICL_PWR_WELL_CTL_AUX4, \ + _ICL_PWR_WELL_CTL_DDI4)) #define HSW_PWR_WELL_CTL_REQ(pw) (1 << (_HSW_PW_SHIFT(pw) + 1)) #define HSW_PWR_WELL_CTL_STATE(pw) (1 << _HSW_PW_SHIFT(pw)) #define HSW_PWR_WELL_CTL5 _MMIO(0x45410) -#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31) -#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20) -#define HSW_PWR_WELL_FORCE_ON (1<<19) +#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1 << 31) +#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1 << 20) +#define HSW_PWR_WELL_FORCE_ON (1 << 19) #define HSW_PWR_WELL_CTL6 _MMIO(0x45414) /* SKL Fuse Status */ @@ -8512,9 +8881,11 @@ enum skl_power_gate { }; #define SKL_FUSE_STATUS _MMIO(0x42000) -#define SKL_FUSE_DOWNLOAD_STATUS (1<<31) +#define SKL_FUSE_DOWNLOAD_STATUS (1 << 31) /* PG0 (HW control->no power well ID), PG1..PG2 (SKL_DISP_PW1..SKL_DISP_PW2) */ #define SKL_PW_TO_PG(pw) ((pw) - SKL_DISP_PW_1 + SKL_PG1) +/* PG0 (HW control->no power well ID), PG1..PG4 (ICL_DISP_PW1..ICL_DISP_PW4) */ +#define ICL_PW_TO_PG(pw) ((pw) - ICL_DISP_PW_1 + SKL_PG1) #define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg))) #define _CNL_AUX_REG_IDX(pw) ((pw) - 9) @@ -8527,8 +8898,8 @@ enum skl_power_gate { _CNL_AUX_ANAOVRD1_C, \ _CNL_AUX_ANAOVRD1_D, \ _CNL_AUX_ANAOVRD1_F)) -#define CNL_AUX_ANAOVRD1_ENABLE (1<<16) -#define CNL_AUX_ANAOVRD1_LDO_BYPASS (1<<23) +#define CNL_AUX_ANAOVRD1_ENABLE (1 << 16) +#define CNL_AUX_ANAOVRD1_LDO_BYPASS (1 << 23) /* HDCP Key Registers */ #define HDCP_KEY_CONF _MMIO(0x66c00) @@ -8573,7 +8944,7 @@ enum skl_power_gate { #define HDCP_SHA_V_PRIME_H2 _MMIO(0x66d0C) #define HDCP_SHA_V_PRIME_H3 _MMIO(0x66d10) #define HDCP_SHA_V_PRIME_H4 _MMIO(0x66d14) -#define HDCP_SHA_V_PRIME(h) _MMIO((0x66d04 + h * 4)) +#define HDCP_SHA_V_PRIME(h) _MMIO((0x66d04 + (h) * 4)) #define HDCP_SHA_TEXT _MMIO(0x66d18) /* HDCP Auth Registers */ @@ -8589,7 +8960,7 @@ enum skl_power_gate { _PORTC_HDCP_AUTHENC, \ _PORTD_HDCP_AUTHENC, \ _PORTE_HDCP_AUTHENC, \ - _PORTF_HDCP_AUTHENC) + x) + _PORTF_HDCP_AUTHENC) + (x)) #define PORT_HDCP_CONF(port) _PORT_HDCP_AUTHENC(port, 0x0) #define HDCP_CONF_CAPTURE_AN BIT(0) #define HDCP_CONF_AUTH_AND_ENC (BIT(1) | BIT(0)) @@ -8610,7 +8981,7 @@ enum skl_power_gate { #define HDCP_STATUS_R0_READY BIT(18) #define HDCP_STATUS_AN_READY BIT(17) #define HDCP_STATUS_CIPHER BIT(16) -#define HDCP_STATUS_FRAME_CNT(x) ((x >> 8) & 0xff) +#define HDCP_STATUS_FRAME_CNT(x) (((x) >> 8) & 0xff) /* Per-pipe DDI Function Control */ #define _TRANS_DDI_FUNC_CTL_A 0x60400 @@ -8619,37 +8990,37 @@ enum skl_power_gate { #define _TRANS_DDI_FUNC_CTL_EDP 0x6F400 #define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL_A) -#define TRANS_DDI_FUNC_ENABLE (1<<31) +#define TRANS_DDI_FUNC_ENABLE (1 << 31) /* Those bits are ignored by pipe EDP since it can only connect to DDI A */ -#define TRANS_DDI_PORT_MASK (7<<28) +#define TRANS_DDI_PORT_MASK (7 << 28) #define TRANS_DDI_PORT_SHIFT 28 -#define TRANS_DDI_SELECT_PORT(x) ((x)<<28) -#define TRANS_DDI_PORT_NONE (0<<28) -#define TRANS_DDI_MODE_SELECT_MASK (7<<24) -#define TRANS_DDI_MODE_SELECT_HDMI (0<<24) -#define TRANS_DDI_MODE_SELECT_DVI (1<<24) -#define TRANS_DDI_MODE_SELECT_DP_SST (2<<24) -#define TRANS_DDI_MODE_SELECT_DP_MST (3<<24) -#define TRANS_DDI_MODE_SELECT_FDI (4<<24) -#define TRANS_DDI_BPC_MASK (7<<20) -#define TRANS_DDI_BPC_8 (0<<20) -#define TRANS_DDI_BPC_10 (1<<20) -#define TRANS_DDI_BPC_6 (2<<20) -#define TRANS_DDI_BPC_12 (3<<20) -#define TRANS_DDI_PVSYNC (1<<17) -#define TRANS_DDI_PHSYNC (1<<16) -#define TRANS_DDI_EDP_INPUT_MASK (7<<12) -#define TRANS_DDI_EDP_INPUT_A_ON (0<<12) -#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12) -#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12) -#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12) -#define TRANS_DDI_HDCP_SIGNALLING (1<<9) -#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1<<8) -#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1<<7) -#define TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1<<6) -#define TRANS_DDI_BFI_ENABLE (1<<4) -#define TRANS_DDI_HIGH_TMDS_CHAR_RATE (1<<4) -#define TRANS_DDI_HDMI_SCRAMBLING (1<<0) +#define TRANS_DDI_SELECT_PORT(x) ((x) << 28) +#define TRANS_DDI_PORT_NONE (0 << 28) +#define TRANS_DDI_MODE_SELECT_MASK (7 << 24) +#define TRANS_DDI_MODE_SELECT_HDMI (0 << 24) +#define TRANS_DDI_MODE_SELECT_DVI (1 << 24) +#define TRANS_DDI_MODE_SELECT_DP_SST (2 << 24) +#define TRANS_DDI_MODE_SELECT_DP_MST (3 << 24) +#define TRANS_DDI_MODE_SELECT_FDI (4 << 24) +#define TRANS_DDI_BPC_MASK (7 << 20) +#define TRANS_DDI_BPC_8 (0 << 20) +#define TRANS_DDI_BPC_10 (1 << 20) +#define TRANS_DDI_BPC_6 (2 << 20) +#define TRANS_DDI_BPC_12 (3 << 20) +#define TRANS_DDI_PVSYNC (1 << 17) +#define TRANS_DDI_PHSYNC (1 << 16) +#define TRANS_DDI_EDP_INPUT_MASK (7 << 12) +#define TRANS_DDI_EDP_INPUT_A_ON (0 << 12) +#define TRANS_DDI_EDP_INPUT_A_ONOFF (4 << 12) +#define TRANS_DDI_EDP_INPUT_B_ONOFF (5 << 12) +#define TRANS_DDI_EDP_INPUT_C_ONOFF (6 << 12) +#define TRANS_DDI_HDCP_SIGNALLING (1 << 9) +#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1 << 8) +#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1 << 7) +#define TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1 << 6) +#define TRANS_DDI_BFI_ENABLE (1 << 4) +#define TRANS_DDI_HIGH_TMDS_CHAR_RATE (1 << 4) +#define TRANS_DDI_HDMI_SCRAMBLING (1 << 0) #define TRANS_DDI_HDMI_SCRAMBLING_MASK (TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE \ | TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \ | TRANS_DDI_HDMI_SCRAMBLING) @@ -8658,28 +9029,29 @@ enum skl_power_gate { #define _DP_TP_CTL_A 0x64040 #define _DP_TP_CTL_B 0x64140 #define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B) -#define DP_TP_CTL_ENABLE (1<<31) -#define DP_TP_CTL_MODE_SST (0<<27) -#define DP_TP_CTL_MODE_MST (1<<27) -#define DP_TP_CTL_FORCE_ACT (1<<25) -#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18) -#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15) -#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8) -#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8) -#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8) -#define DP_TP_CTL_LINK_TRAIN_PAT3 (4<<8) -#define DP_TP_CTL_LINK_TRAIN_IDLE (2<<8) -#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8) -#define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7) +#define DP_TP_CTL_ENABLE (1 << 31) +#define DP_TP_CTL_MODE_SST (0 << 27) +#define DP_TP_CTL_MODE_MST (1 << 27) +#define DP_TP_CTL_FORCE_ACT (1 << 25) +#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1 << 18) +#define DP_TP_CTL_FDI_AUTOTRAIN (1 << 15) +#define DP_TP_CTL_LINK_TRAIN_MASK (7 << 8) +#define DP_TP_CTL_LINK_TRAIN_PAT1 (0 << 8) +#define DP_TP_CTL_LINK_TRAIN_PAT2 (1 << 8) +#define DP_TP_CTL_LINK_TRAIN_PAT3 (4 << 8) +#define DP_TP_CTL_LINK_TRAIN_PAT4 (5 << 8) +#define DP_TP_CTL_LINK_TRAIN_IDLE (2 << 8) +#define DP_TP_CTL_LINK_TRAIN_NORMAL (3 << 8) +#define DP_TP_CTL_SCRAMBLE_DISABLE (1 << 7) /* DisplayPort Transport Status */ #define _DP_TP_STATUS_A 0x64044 #define _DP_TP_STATUS_B 0x64144 #define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B) -#define DP_TP_STATUS_IDLE_DONE (1<<25) -#define DP_TP_STATUS_ACT_SENT (1<<24) -#define DP_TP_STATUS_MODE_STATUS_MST (1<<23) -#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12) +#define DP_TP_STATUS_IDLE_DONE (1 << 25) +#define DP_TP_STATUS_ACT_SENT (1 << 24) +#define DP_TP_STATUS_MODE_STATUS_MST (1 << 23) +#define DP_TP_STATUS_AUTOTRAIN_DONE (1 << 12) #define DP_TP_STATUS_PAYLOAD_MAPPING_VC2 (3 << 8) #define DP_TP_STATUS_PAYLOAD_MAPPING_VC1 (3 << 4) #define DP_TP_STATUS_PAYLOAD_MAPPING_VC0 (3 << 0) @@ -8688,16 +9060,16 @@ enum skl_power_gate { #define _DDI_BUF_CTL_A 0x64000 #define _DDI_BUF_CTL_B 0x64100 #define DDI_BUF_CTL(port) _MMIO_PORT(port, _DDI_BUF_CTL_A, _DDI_BUF_CTL_B) -#define DDI_BUF_CTL_ENABLE (1<<31) +#define DDI_BUF_CTL_ENABLE (1 << 31) #define DDI_BUF_TRANS_SELECT(n) ((n) << 24) -#define DDI_BUF_EMP_MASK (0xf<<24) -#define DDI_BUF_PORT_REVERSAL (1<<16) -#define DDI_BUF_IS_IDLE (1<<7) -#define DDI_A_4_LANES (1<<4) +#define DDI_BUF_EMP_MASK (0xf << 24) +#define DDI_BUF_PORT_REVERSAL (1 << 16) +#define DDI_BUF_IS_IDLE (1 << 7) +#define DDI_A_4_LANES (1 << 4) #define DDI_PORT_WIDTH(width) (((width) - 1) << 1) #define DDI_PORT_WIDTH_MASK (7 << 1) #define DDI_PORT_WIDTH_SHIFT 1 -#define DDI_INIT_DISPLAY_DETECTED (1<<0) +#define DDI_INIT_DISPLAY_DETECTED (1 << 0) /* DDI Buffer Translations */ #define _DDI_BUF_TRANS_A 0x64E00 @@ -8712,95 +9084,99 @@ enum skl_power_gate { #define SBI_ADDR _MMIO(0xC6000) #define SBI_DATA _MMIO(0xC6004) #define SBI_CTL_STAT _MMIO(0xC6008) -#define SBI_CTL_DEST_ICLK (0x0<<16) -#define SBI_CTL_DEST_MPHY (0x1<<16) -#define SBI_CTL_OP_IORD (0x2<<8) -#define SBI_CTL_OP_IOWR (0x3<<8) -#define SBI_CTL_OP_CRRD (0x6<<8) -#define SBI_CTL_OP_CRWR (0x7<<8) -#define SBI_RESPONSE_FAIL (0x1<<1) -#define SBI_RESPONSE_SUCCESS (0x0<<1) -#define SBI_BUSY (0x1<<0) -#define SBI_READY (0x0<<0) +#define SBI_CTL_DEST_ICLK (0x0 << 16) +#define SBI_CTL_DEST_MPHY (0x1 << 16) +#define SBI_CTL_OP_IORD (0x2 << 8) +#define SBI_CTL_OP_IOWR (0x3 << 8) +#define SBI_CTL_OP_CRRD (0x6 << 8) +#define SBI_CTL_OP_CRWR (0x7 << 8) +#define SBI_RESPONSE_FAIL (0x1 << 1) +#define SBI_RESPONSE_SUCCESS (0x0 << 1) +#define SBI_BUSY (0x1 << 0) +#define SBI_READY (0x0 << 0) /* SBI offsets */ #define SBI_SSCDIVINTPHASE 0x0200 #define SBI_SSCDIVINTPHASE6 0x0600 #define SBI_SSCDIVINTPHASE_DIVSEL_SHIFT 1 -#define SBI_SSCDIVINTPHASE_DIVSEL_MASK (0x7f<<1) -#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1) +#define SBI_SSCDIVINTPHASE_DIVSEL_MASK (0x7f << 1) +#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x) << 1) #define SBI_SSCDIVINTPHASE_INCVAL_SHIFT 8 -#define SBI_SSCDIVINTPHASE_INCVAL_MASK (0x7f<<8) -#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8) -#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15) -#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0) +#define SBI_SSCDIVINTPHASE_INCVAL_MASK (0x7f << 8) +#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x) << 8) +#define SBI_SSCDIVINTPHASE_DIR(x) ((x) << 15) +#define SBI_SSCDIVINTPHASE_PROPAGATE (1 << 0) #define SBI_SSCDITHPHASE 0x0204 #define SBI_SSCCTL 0x020c #define SBI_SSCCTL6 0x060C -#define SBI_SSCCTL_PATHALT (1<<3) -#define SBI_SSCCTL_DISABLE (1<<0) +#define SBI_SSCCTL_PATHALT (1 << 3) +#define SBI_SSCCTL_DISABLE (1 << 0) #define SBI_SSCAUXDIV6 0x0610 #define SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT 4 -#define SBI_SSCAUXDIV_FINALDIV2SEL_MASK (1<<4) -#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4) +#define SBI_SSCAUXDIV_FINALDIV2SEL_MASK (1 << 4) +#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x) << 4) #define SBI_DBUFF0 0x2a00 #define SBI_GEN0 0x1f00 -#define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1<<0) +#define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1 << 0) /* LPT PIXCLK_GATE */ #define PIXCLK_GATE _MMIO(0xC6020) -#define PIXCLK_GATE_UNGATE (1<<0) -#define PIXCLK_GATE_GATE (0<<0) +#define PIXCLK_GATE_UNGATE (1 << 0) +#define PIXCLK_GATE_GATE (0 << 0) /* SPLL */ #define SPLL_CTL _MMIO(0x46020) -#define SPLL_PLL_ENABLE (1<<31) -#define SPLL_PLL_SSC (1<<28) -#define SPLL_PLL_NON_SSC (2<<28) -#define SPLL_PLL_LCPLL (3<<28) -#define SPLL_PLL_REF_MASK (3<<28) -#define SPLL_PLL_FREQ_810MHz (0<<26) -#define SPLL_PLL_FREQ_1350MHz (1<<26) -#define SPLL_PLL_FREQ_2700MHz (2<<26) -#define SPLL_PLL_FREQ_MASK (3<<26) +#define SPLL_PLL_ENABLE (1 << 31) +#define SPLL_PLL_SSC (1 << 28) +#define SPLL_PLL_NON_SSC (2 << 28) +#define SPLL_PLL_LCPLL (3 << 28) +#define SPLL_PLL_REF_MASK (3 << 28) +#define SPLL_PLL_FREQ_810MHz (0 << 26) +#define SPLL_PLL_FREQ_1350MHz (1 << 26) +#define SPLL_PLL_FREQ_2700MHz (2 << 26) +#define SPLL_PLL_FREQ_MASK (3 << 26) /* WRPLL */ #define _WRPLL_CTL1 0x46040 #define _WRPLL_CTL2 0x46060 #define WRPLL_CTL(pll) _MMIO_PIPE(pll, _WRPLL_CTL1, _WRPLL_CTL2) -#define WRPLL_PLL_ENABLE (1<<31) -#define WRPLL_PLL_SSC (1<<28) -#define WRPLL_PLL_NON_SSC (2<<28) -#define WRPLL_PLL_LCPLL (3<<28) -#define WRPLL_PLL_REF_MASK (3<<28) +#define WRPLL_PLL_ENABLE (1 << 31) +#define WRPLL_PLL_SSC (1 << 28) +#define WRPLL_PLL_NON_SSC (2 << 28) +#define WRPLL_PLL_LCPLL (3 << 28) +#define WRPLL_PLL_REF_MASK (3 << 28) /* WRPLL divider programming */ -#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) +#define WRPLL_DIVIDER_REFERENCE(x) ((x) << 0) #define WRPLL_DIVIDER_REF_MASK (0xff) -#define WRPLL_DIVIDER_POST(x) ((x)<<8) -#define WRPLL_DIVIDER_POST_MASK (0x3f<<8) +#define WRPLL_DIVIDER_POST(x) ((x) << 8) +#define WRPLL_DIVIDER_POST_MASK (0x3f << 8) #define WRPLL_DIVIDER_POST_SHIFT 8 -#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16) +#define WRPLL_DIVIDER_FEEDBACK(x) ((x) << 16) #define WRPLL_DIVIDER_FB_SHIFT 16 -#define WRPLL_DIVIDER_FB_MASK (0xff<<16) +#define WRPLL_DIVIDER_FB_MASK (0xff << 16) /* Port clock selection */ #define _PORT_CLK_SEL_A 0x46100 #define _PORT_CLK_SEL_B 0x46104 #define PORT_CLK_SEL(port) _MMIO_PORT(port, _PORT_CLK_SEL_A, _PORT_CLK_SEL_B) -#define PORT_CLK_SEL_LCPLL_2700 (0<<29) -#define PORT_CLK_SEL_LCPLL_1350 (1<<29) -#define PORT_CLK_SEL_LCPLL_810 (2<<29) -#define PORT_CLK_SEL_SPLL (3<<29) -#define PORT_CLK_SEL_WRPLL(pll) (((pll)+4)<<29) -#define PORT_CLK_SEL_WRPLL1 (4<<29) -#define PORT_CLK_SEL_WRPLL2 (5<<29) -#define PORT_CLK_SEL_NONE (7<<29) -#define PORT_CLK_SEL_MASK (7<<29) +#define PORT_CLK_SEL_LCPLL_2700 (0 << 29) +#define PORT_CLK_SEL_LCPLL_1350 (1 << 29) +#define PORT_CLK_SEL_LCPLL_810 (2 << 29) +#define PORT_CLK_SEL_SPLL (3 << 29) +#define PORT_CLK_SEL_WRPLL(pll) (((pll) + 4) << 29) +#define PORT_CLK_SEL_WRPLL1 (4 << 29) +#define PORT_CLK_SEL_WRPLL2 (5 << 29) +#define PORT_CLK_SEL_NONE (7 << 29) +#define PORT_CLK_SEL_MASK (7 << 29) /* On ICL+ this is the same as PORT_CLK_SEL, but all bits change. */ #define DDI_CLK_SEL(port) PORT_CLK_SEL(port) #define DDI_CLK_SEL_NONE (0x0 << 28) #define DDI_CLK_SEL_MG (0x8 << 28) +#define DDI_CLK_SEL_TBT_162 (0xC << 28) +#define DDI_CLK_SEL_TBT_270 (0xD << 28) +#define DDI_CLK_SEL_TBT_540 (0xE << 28) +#define DDI_CLK_SEL_TBT_810 (0xF << 28) #define DDI_CLK_SEL_MASK (0xF << 28) /* Transcoder clock selection */ @@ -8808,8 +9184,8 @@ enum skl_power_gate { #define _TRANS_CLK_SEL_B 0x46144 #define TRANS_CLK_SEL(tran) _MMIO_TRANS(tran, _TRANS_CLK_SEL_A, _TRANS_CLK_SEL_B) /* For each transcoder, we need to select the corresponding port clock */ -#define TRANS_CLK_SEL_DISABLED (0x0<<29) -#define TRANS_CLK_SEL_PORT(x) (((x)+1)<<29) +#define TRANS_CLK_SEL_DISABLED (0x0 << 29) +#define TRANS_CLK_SEL_PORT(x) (((x) + 1) << 29) #define CDCLK_FREQ _MMIO(0x46200) @@ -8819,28 +9195,28 @@ enum skl_power_gate { #define _TRANS_EDP_MSA_MISC 0x6f410 #define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC) -#define TRANS_MSA_SYNC_CLK (1<<0) -#define TRANS_MSA_6_BPC (0<<5) -#define TRANS_MSA_8_BPC (1<<5) -#define TRANS_MSA_10_BPC (2<<5) -#define TRANS_MSA_12_BPC (3<<5) -#define TRANS_MSA_16_BPC (4<<5) +#define TRANS_MSA_SYNC_CLK (1 << 0) +#define TRANS_MSA_6_BPC (0 << 5) +#define TRANS_MSA_8_BPC (1 << 5) +#define TRANS_MSA_10_BPC (2 << 5) +#define TRANS_MSA_12_BPC (3 << 5) +#define TRANS_MSA_16_BPC (4 << 5) /* LCPLL Control */ #define LCPLL_CTL _MMIO(0x130040) -#define LCPLL_PLL_DISABLE (1<<31) -#define LCPLL_PLL_LOCK (1<<30) -#define LCPLL_CLK_FREQ_MASK (3<<26) -#define LCPLL_CLK_FREQ_450 (0<<26) -#define LCPLL_CLK_FREQ_54O_BDW (1<<26) -#define LCPLL_CLK_FREQ_337_5_BDW (2<<26) -#define LCPLL_CLK_FREQ_675_BDW (3<<26) -#define LCPLL_CD_CLOCK_DISABLE (1<<25) -#define LCPLL_ROOT_CD_CLOCK_DISABLE (1<<24) -#define LCPLL_CD2X_CLOCK_DISABLE (1<<23) -#define LCPLL_POWER_DOWN_ALLOW (1<<22) -#define LCPLL_CD_SOURCE_FCLK (1<<21) -#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19) +#define LCPLL_PLL_DISABLE (1 << 31) +#define LCPLL_PLL_LOCK (1 << 30) +#define LCPLL_CLK_FREQ_MASK (3 << 26) +#define LCPLL_CLK_FREQ_450 (0 << 26) +#define LCPLL_CLK_FREQ_54O_BDW (1 << 26) +#define LCPLL_CLK_FREQ_337_5_BDW (2 << 26) +#define LCPLL_CLK_FREQ_675_BDW (3 << 26) +#define LCPLL_CD_CLOCK_DISABLE (1 << 25) +#define LCPLL_ROOT_CD_CLOCK_DISABLE (1 << 24) +#define LCPLL_CD2X_CLOCK_DISABLE (1 << 23) +#define LCPLL_POWER_DOWN_ALLOW (1 << 22) +#define LCPLL_CD_SOURCE_FCLK (1 << 21) +#define LCPLL_CD_SOURCE_FCLK_DONE (1 << 19) /* * SKL Clocks @@ -8868,16 +9244,16 @@ enum skl_power_gate { /* LCPLL_CTL */ #define LCPLL1_CTL _MMIO(0x46010) #define LCPLL2_CTL _MMIO(0x46014) -#define LCPLL_PLL_ENABLE (1<<31) +#define LCPLL_PLL_ENABLE (1 << 31) /* DPLL control1 */ #define DPLL_CTRL1 _MMIO(0x6C058) -#define DPLL_CTRL1_HDMI_MODE(id) (1<<((id)*6+5)) -#define DPLL_CTRL1_SSC(id) (1<<((id)*6+4)) -#define DPLL_CTRL1_LINK_RATE_MASK(id) (7<<((id)*6+1)) -#define DPLL_CTRL1_LINK_RATE_SHIFT(id) ((id)*6+1) -#define DPLL_CTRL1_LINK_RATE(linkrate, id) ((linkrate)<<((id)*6+1)) -#define DPLL_CTRL1_OVERRIDE(id) (1<<((id)*6)) +#define DPLL_CTRL1_HDMI_MODE(id) (1 << ((id) * 6 + 5)) +#define DPLL_CTRL1_SSC(id) (1 << ((id) * 6 + 4)) +#define DPLL_CTRL1_LINK_RATE_MASK(id) (7 << ((id) * 6 + 1)) +#define DPLL_CTRL1_LINK_RATE_SHIFT(id) ((id) * 6 + 1) +#define DPLL_CTRL1_LINK_RATE(linkrate, id) ((linkrate) << ((id) * 6 + 1)) +#define DPLL_CTRL1_OVERRIDE(id) (1 << ((id) * 6)) #define DPLL_CTRL1_LINK_RATE_2700 0 #define DPLL_CTRL1_LINK_RATE_1350 1 #define DPLL_CTRL1_LINK_RATE_810 2 @@ -8887,43 +9263,43 @@ enum skl_power_gate { /* DPLL control2 */ #define DPLL_CTRL2 _MMIO(0x6C05C) -#define DPLL_CTRL2_DDI_CLK_OFF(port) (1<<((port)+15)) -#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3<<((port)*3+1)) -#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port)*3+1) -#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) ((clk)<<((port)*3+1)) -#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1<<((port)*3)) +#define DPLL_CTRL2_DDI_CLK_OFF(port) (1 << ((port) + 15)) +#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3 << ((port) * 3 + 1)) +#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port) * 3 + 1) +#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) ((clk) << ((port) * 3 + 1)) +#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1 << ((port) * 3)) /* DPLL Status */ #define DPLL_STATUS _MMIO(0x6C060) -#define DPLL_LOCK(id) (1<<((id)*8)) +#define DPLL_LOCK(id) (1 << ((id) * 8)) /* DPLL cfg */ #define _DPLL1_CFGCR1 0x6C040 #define _DPLL2_CFGCR1 0x6C048 #define _DPLL3_CFGCR1 0x6C050 -#define DPLL_CFGCR1_FREQ_ENABLE (1<<31) -#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff<<9) -#define DPLL_CFGCR1_DCO_FRACTION(x) ((x)<<9) +#define DPLL_CFGCR1_FREQ_ENABLE (1 << 31) +#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff << 9) +#define DPLL_CFGCR1_DCO_FRACTION(x) ((x) << 9) #define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff) #define _DPLL1_CFGCR2 0x6C044 #define _DPLL2_CFGCR2 0x6C04C #define _DPLL3_CFGCR2 0x6C054 -#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff<<8) -#define DPLL_CFGCR2_QDIV_RATIO(x) ((x)<<8) -#define DPLL_CFGCR2_QDIV_MODE(x) ((x)<<7) -#define DPLL_CFGCR2_KDIV_MASK (3<<5) -#define DPLL_CFGCR2_KDIV(x) ((x)<<5) -#define DPLL_CFGCR2_KDIV_5 (0<<5) -#define DPLL_CFGCR2_KDIV_2 (1<<5) -#define DPLL_CFGCR2_KDIV_3 (2<<5) -#define DPLL_CFGCR2_KDIV_1 (3<<5) -#define DPLL_CFGCR2_PDIV_MASK (7<<2) -#define DPLL_CFGCR2_PDIV(x) ((x)<<2) -#define DPLL_CFGCR2_PDIV_1 (0<<2) -#define DPLL_CFGCR2_PDIV_2 (1<<2) -#define DPLL_CFGCR2_PDIV_3 (2<<2) -#define DPLL_CFGCR2_PDIV_7 (4<<2) +#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff << 8) +#define DPLL_CFGCR2_QDIV_RATIO(x) ((x) << 8) +#define DPLL_CFGCR2_QDIV_MODE(x) ((x) << 7) +#define DPLL_CFGCR2_KDIV_MASK (3 << 5) +#define DPLL_CFGCR2_KDIV(x) ((x) << 5) +#define DPLL_CFGCR2_KDIV_5 (0 << 5) +#define DPLL_CFGCR2_KDIV_2 (1 << 5) +#define DPLL_CFGCR2_KDIV_3 (2 << 5) +#define DPLL_CFGCR2_KDIV_1 (3 << 5) +#define DPLL_CFGCR2_PDIV_MASK (7 << 2) +#define DPLL_CFGCR2_PDIV(x) ((x) << 2) +#define DPLL_CFGCR2_PDIV_1 (0 << 2) +#define DPLL_CFGCR2_PDIV_2 (1 << 2) +#define DPLL_CFGCR2_PDIV_3 (2 << 2) +#define DPLL_CFGCR2_PDIV_7 (4 << 2) #define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3) #define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1) @@ -8935,9 +9311,9 @@ enum skl_power_gate { #define DPCLKA_CFGCR0 _MMIO(0x6C200) #define DPCLKA_CFGCR0_ICL _MMIO(0x164280) #define DPCLKA_CFGCR0_DDI_CLK_OFF(port) (1 << ((port) == PORT_F ? 23 : \ - (port)+10)) + (port) + 10)) #define DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port) ((port) == PORT_F ? 21 : \ - (port)*2) + (port) * 2) #define DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port) (3 << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port)) #define DPCLKA_CFGCR0_DDI_CLK_SEL(pll, port) ((pll) << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port)) @@ -8950,6 +9326,8 @@ enum skl_power_gate { #define PLL_POWER_STATE (1 << 26) #define CNL_DPLL_ENABLE(pll) _MMIO_PLL(pll, DPLL0_ENABLE, DPLL1_ENABLE) +#define TBT_PLL_ENABLE _MMIO(0x46020) + #define _MG_PLL1_ENABLE 0x46030 #define _MG_PLL2_ENABLE 0x46034 #define _MG_PLL3_ENABLE 0x46038 @@ -8963,6 +9341,7 @@ enum skl_power_gate { #define _MG_REFCLKIN_CTL_PORT3 0x16A92C #define _MG_REFCLKIN_CTL_PORT4 0x16B92C #define MG_REFCLKIN_CTL_OD_2_MUX(x) ((x) << 8) +#define MG_REFCLKIN_CTL_OD_2_MUX_MASK (0x7 << 8) #define MG_REFCLKIN_CTL(port) _MMIO_PORT((port) - PORT_C, \ _MG_REFCLKIN_CTL_PORT1, \ _MG_REFCLKIN_CTL_PORT2) @@ -8972,7 +9351,9 @@ enum skl_power_gate { #define _MG_CLKTOP2_CORECLKCTL1_PORT3 0x16A8D8 #define _MG_CLKTOP2_CORECLKCTL1_PORT4 0x16B8D8 #define MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO(x) ((x) << 16) +#define MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO_MASK (0xff << 16) #define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(x) ((x) << 8) +#define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK (0xff << 8) #define MG_CLKTOP2_CORECLKCTL1(port) _MMIO_PORT((port) - PORT_C, \ _MG_CLKTOP2_CORECLKCTL1_PORT1, \ _MG_CLKTOP2_CORECLKCTL1_PORT2) @@ -8982,9 +9363,13 @@ enum skl_power_gate { #define _MG_CLKTOP2_HSCLKCTL_PORT3 0x16A8D4 #define _MG_CLKTOP2_HSCLKCTL_PORT4 0x16B8D4 #define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(x) ((x) << 16) +#define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK (0x1 << 16) #define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(x) ((x) << 14) +#define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK (0x3 << 14) #define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO(x) ((x) << 12) +#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK (0x3 << 12) #define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(x) ((x) << 8) +#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK (0xf << 8) #define MG_CLKTOP2_HSCLKCTL(port) _MMIO_PORT((port) - PORT_C, \ _MG_CLKTOP2_HSCLKCTL_PORT1, \ _MG_CLKTOP2_HSCLKCTL_PORT2) @@ -9058,12 +9443,18 @@ enum skl_power_gate { #define _MG_PLL_BIAS_PORT3 0x16AA14 #define _MG_PLL_BIAS_PORT4 0x16BA14 #define MG_PLL_BIAS_BIAS_GB_SEL(x) ((x) << 30) +#define MG_PLL_BIAS_BIAS_GB_SEL_MASK (0x3 << 30) #define MG_PLL_BIAS_INIT_DCOAMP(x) ((x) << 24) +#define MG_PLL_BIAS_INIT_DCOAMP_MASK (0x3f << 24) #define MG_PLL_BIAS_BIAS_BONUS(x) ((x) << 16) +#define MG_PLL_BIAS_BIAS_BONUS_MASK (0xff << 16) #define MG_PLL_BIAS_BIASCAL_EN (1 << 15) #define MG_PLL_BIAS_CTRIM(x) ((x) << 8) +#define MG_PLL_BIAS_CTRIM_MASK (0x1f << 8) #define MG_PLL_BIAS_VREF_RDAC(x) ((x) << 5) +#define MG_PLL_BIAS_VREF_RDAC_MASK (0x7 << 5) #define MG_PLL_BIAS_IREFTRIM(x) ((x) << 0) +#define MG_PLL_BIAS_IREFTRIM_MASK (0x1f << 0) #define MG_PLL_BIAS(port) _MMIO_PORT((port) - PORT_C, _MG_PLL_BIAS_PORT1, \ _MG_PLL_BIAS_PORT2) @@ -9105,13 +9496,16 @@ enum skl_power_gate { #define DPLL_CFGCR1_QDIV_RATIO_MASK (0xff << 10) #define DPLL_CFGCR1_QDIV_RATIO_SHIFT (10) #define DPLL_CFGCR1_QDIV_RATIO(x) ((x) << 10) +#define DPLL_CFGCR1_QDIV_MODE_SHIFT (9) #define DPLL_CFGCR1_QDIV_MODE(x) ((x) << 9) #define DPLL_CFGCR1_KDIV_MASK (7 << 6) +#define DPLL_CFGCR1_KDIV_SHIFT (6) #define DPLL_CFGCR1_KDIV(x) ((x) << 6) #define DPLL_CFGCR1_KDIV_1 (1 << 6) #define DPLL_CFGCR1_KDIV_2 (2 << 6) #define DPLL_CFGCR1_KDIV_4 (4 << 6) #define DPLL_CFGCR1_PDIV_MASK (0xf << 2) +#define DPLL_CFGCR1_PDIV_SHIFT (2) #define DPLL_CFGCR1_PDIV(x) ((x) << 2) #define DPLL_CFGCR1_PDIV_2 (1 << 2) #define DPLL_CFGCR1_PDIV_3 (2 << 2) @@ -9145,22 +9539,22 @@ enum skl_power_gate { /* GEN9 DC */ #define DC_STATE_EN _MMIO(0x45504) #define DC_STATE_DISABLE 0 -#define DC_STATE_EN_UPTO_DC5 (1<<0) -#define DC_STATE_EN_DC9 (1<<3) -#define DC_STATE_EN_UPTO_DC6 (2<<0) +#define DC_STATE_EN_UPTO_DC5 (1 << 0) +#define DC_STATE_EN_DC9 (1 << 3) +#define DC_STATE_EN_UPTO_DC6 (2 << 0) #define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3 #define DC_STATE_DEBUG _MMIO(0x45520) -#define DC_STATE_DEBUG_MASK_CORES (1<<0) -#define DC_STATE_DEBUG_MASK_MEMORY_UP (1<<1) +#define DC_STATE_DEBUG_MASK_CORES (1 << 0) +#define DC_STATE_DEBUG_MASK_MEMORY_UP (1 << 1) /* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register, * since on HSW we can't write to it using I915_WRITE. */ #define D_COMP_HSW _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5F0C) #define D_COMP_BDW _MMIO(0x138144) -#define D_COMP_RCOMP_IN_PROGRESS (1<<9) -#define D_COMP_COMP_FORCE (1<<8) -#define D_COMP_COMP_DISABLE (1<<0) +#define D_COMP_RCOMP_IN_PROGRESS (1 << 9) +#define D_COMP_COMP_FORCE (1 << 8) +#define D_COMP_COMP_DISABLE (1 << 0) /* Pipe WM_LINETIME - watermark line time */ #define _PIPE_WM_LINETIME_A 0x45270 @@ -9168,27 +9562,27 @@ enum skl_power_gate { #define PIPE_WM_LINETIME(pipe) _MMIO_PIPE(pipe, _PIPE_WM_LINETIME_A, _PIPE_WM_LINETIME_B) #define PIPE_WM_LINETIME_MASK (0x1ff) #define PIPE_WM_LINETIME_TIME(x) ((x)) -#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16) -#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16) +#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff << 16) +#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x) << 16) /* SFUSE_STRAP */ #define SFUSE_STRAP _MMIO(0xc2014) -#define SFUSE_STRAP_FUSE_LOCK (1<<13) -#define SFUSE_STRAP_RAW_FREQUENCY (1<<8) -#define SFUSE_STRAP_DISPLAY_DISABLED (1<<7) -#define SFUSE_STRAP_CRT_DISABLED (1<<6) -#define SFUSE_STRAP_DDIF_DETECTED (1<<3) -#define SFUSE_STRAP_DDIB_DETECTED (1<<2) -#define SFUSE_STRAP_DDIC_DETECTED (1<<1) -#define SFUSE_STRAP_DDID_DETECTED (1<<0) +#define SFUSE_STRAP_FUSE_LOCK (1 << 13) +#define SFUSE_STRAP_RAW_FREQUENCY (1 << 8) +#define SFUSE_STRAP_DISPLAY_DISABLED (1 << 7) +#define SFUSE_STRAP_CRT_DISABLED (1 << 6) +#define SFUSE_STRAP_DDIF_DETECTED (1 << 3) +#define SFUSE_STRAP_DDIB_DETECTED (1 << 2) +#define SFUSE_STRAP_DDIC_DETECTED (1 << 1) +#define SFUSE_STRAP_DDID_DETECTED (1 << 0) #define WM_MISC _MMIO(0x45260) #define WM_MISC_DATA_PARTITION_5_6 (1 << 0) #define WM_DBG _MMIO(0x45280) -#define WM_DBG_DISALLOW_MULTIPLE_LP (1<<0) -#define WM_DBG_DISALLOW_MAXFIFO (1<<1) -#define WM_DBG_DISALLOW_SPRITE (1<<2) +#define WM_DBG_DISALLOW_MULTIPLE_LP (1 << 0) +#define WM_DBG_DISALLOW_MAXFIFO (1 << 1) +#define WM_DBG_DISALLOW_SPRITE (1 << 2) /* pipe CSC */ #define _PIPE_A_CSC_COEFF_RY_GY 0x49010 @@ -9314,6 +9708,22 @@ enum skl_power_gate { #define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008) #define GLK_TX_ESC_CLK_DIV2_MASK 0x3FF +#define _ICL_DSI_ESC_CLK_DIV0 0x6b090 +#define _ICL_DSI_ESC_CLK_DIV1 0x6b890 +#define ICL_DSI_ESC_CLK_DIV(port) _MMIO_PORT((port), \ + _ICL_DSI_ESC_CLK_DIV0, \ + _ICL_DSI_ESC_CLK_DIV1) +#define _ICL_DPHY_ESC_CLK_DIV0 0x162190 +#define _ICL_DPHY_ESC_CLK_DIV1 0x6C190 +#define ICL_DPHY_ESC_CLK_DIV(port) _MMIO_PORT((port), \ + _ICL_DPHY_ESC_CLK_DIV0, \ + _ICL_DPHY_ESC_CLK_DIV1) +#define ICL_BYTE_CLK_PER_ESC_CLK_MASK (0x1f << 16) +#define ICL_BYTE_CLK_PER_ESC_CLK_SHIFT 16 +#define ICL_ESC_CLK_DIV_MASK 0x1ff +#define ICL_ESC_CLK_DIV_SHIFT 0 +#define DSI_MAX_ESC_CLK 20000 /* in KHz */ + /* Gen4+ Timestamp and Pipe Frame time stamp registers */ #define GEN4_TIMESTAMP _MMIO(0x2358) #define ILK_TIMESTAMP_HI _MMIO(0x70070) @@ -9351,7 +9761,7 @@ enum skl_power_gate { _MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_FIXDIV_MASK, \ BXT_MIPI2_TX_ESCLK_FIXDIV_MASK) #define BXT_MIPI_TX_ESCLK_DIVIDER(port, val) \ - ((val & 0x3F) << BXT_MIPI_TX_ESCLK_SHIFT(port)) + (((val) & 0x3F) << BXT_MIPI_TX_ESCLK_SHIFT(port)) /* RX upper control divider to select actual RX clock output from 8x */ #define BXT_MIPI1_RX_ESCLK_UPPER_SHIFT 21 #define BXT_MIPI2_RX_ESCLK_UPPER_SHIFT 5 @@ -9364,7 +9774,7 @@ enum skl_power_gate { _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_UPPER_FIXDIV_MASK, \ BXT_MIPI2_RX_ESCLK_UPPER_FIXDIV_MASK) #define BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, val) \ - ((val & 3) << BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port)) + (((val) & 3) << BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port)) /* 8/3X divider to select the actual 8/3X clock output from 8x */ #define BXT_MIPI1_8X_BY3_SHIFT 19 #define BXT_MIPI2_8X_BY3_SHIFT 3 @@ -9377,7 +9787,7 @@ enum skl_power_gate { _MIPI_PORT(port, BXT_MIPI1_8X_BY3_DIVIDER_MASK, \ BXT_MIPI2_8X_BY3_DIVIDER_MASK) #define BXT_MIPI_8X_BY3_DIVIDER(port, val) \ - ((val & 3) << BXT_MIPI_8X_BY3_SHIFT(port)) + (((val) & 3) << BXT_MIPI_8X_BY3_SHIFT(port)) /* RX lower control divider to select actual RX clock output from 8x */ #define BXT_MIPI1_RX_ESCLK_LOWER_SHIFT 16 #define BXT_MIPI2_RX_ESCLK_LOWER_SHIFT 0 @@ -9390,7 +9800,7 @@ enum skl_power_gate { _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_LOWER_FIXDIV_MASK, \ BXT_MIPI2_RX_ESCLK_LOWER_FIXDIV_MASK) #define BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, val) \ - ((val & 3) << BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port)) + (((val) & 3) << BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port)) #define RX_DIVIDER_BIT_1_2 0x3 #define RX_DIVIDER_BIT_3_4 0xC @@ -9448,6 +9858,14 @@ enum skl_power_gate { #define _BXT_MIPIC_PORT_CTRL 0x6B8C0 #define BXT_MIPI_PORT_CTRL(tc) _MMIO_MIPI(tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL) +/* ICL DSI MODE control */ +#define _ICL_DSI_IO_MODECTL_0 0x6B094 +#define _ICL_DSI_IO_MODECTL_1 0x6B894 +#define ICL_DSI_IO_MODECTL(port) _MMIO_PORT(port, \ + _ICL_DSI_IO_MODECTL_0, \ + _ICL_DSI_IO_MODECTL_1) +#define COMBO_PHY_MODE_DSI (1 << 0) + #define BXT_P_DSI_REGULATOR_CFG _MMIO(0x160020) #define STAP_SELECT (1 << 0) @@ -9927,4 +10345,310 @@ enum skl_power_gate { _ICL_PHY_MISC_B) #define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23) +/* Icelake Display Stream Compression Registers */ +#define DSCA_PICTURE_PARAMETER_SET_0 0x6B200 +#define DSCC_PICTURE_PARAMETER_SET_0 0x6BA00 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC 0x78570 +#define ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC) +#define DSC_VBR_ENABLE (1 << 19) +#define DSC_422_ENABLE (1 << 18) +#define DSC_COLOR_SPACE_CONVERSION (1 << 17) +#define DSC_BLOCK_PREDICTION (1 << 16) +#define DSC_LINE_BUF_DEPTH_SHIFT 12 +#define DSC_BPC_SHIFT 8 +#define DSC_VER_MIN_SHIFT 4 +#define DSC_VER_MAJ (0x1 << 0) + +#define DSCA_PICTURE_PARAMETER_SET_1 0x6B204 +#define DSCC_PICTURE_PARAMETER_SET_1 0x6BA04 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB 0x78274 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB 0x78374 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC 0x78474 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC 0x78574 +#define ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC) +#define DSC_BPP(bpp) ((bpp) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_2 0x6B208 +#define DSCC_PICTURE_PARAMETER_SET_2 0x6BA08 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB 0x78278 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB 0x78378 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC 0x78478 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC 0x78578 +#define ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC) +#define DSC_PIC_WIDTH(pic_width) ((pic_width) << 16) +#define DSC_PIC_HEIGHT(pic_height) ((pic_height) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_3 0x6B20C +#define DSCC_PICTURE_PARAMETER_SET_3 0x6BA0C +#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB 0x7827C +#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB 0x7837C +#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC 0x7847C +#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC 0x7857C +#define ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC) +#define DSC_SLICE_WIDTH(slice_width) ((slice_width) << 16) +#define DSC_SLICE_HEIGHT(slice_height) ((slice_height) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_4 0x6B210 +#define DSCC_PICTURE_PARAMETER_SET_4 0x6BA10 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB 0x78280 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB 0x78380 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC 0x78480 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC 0x78580 +#define ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC) +#define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16) +#define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_5 0x6B214 +#define DSCC_PICTURE_PARAMETER_SET_5 0x6BA14 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB 0x78284 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB 0x78384 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC 0x78484 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC 0x78584 +#define ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC) +#define DSC_SCALE_DEC_INTINT(scale_dec) ((scale_dec) << 16) +#define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_6 0x6B218 +#define DSCC_PICTURE_PARAMETER_SET_6 0x6BA18 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB 0x78288 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB 0x78388 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC 0x78488 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC 0x78588 +#define ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC) +#define DSC_FLATNESS_MAX_QP(max_qp) (qp << 24) +#define DSC_FLATNESS_MIN_QP(min_qp) (qp << 16) +#define DSC_FIRST_LINE_BPG_OFFSET(offset) ((offset) << 8) +#define DSC_INITIAL_SCALE_VALUE(value) ((value) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_7 0x6B21C +#define DSCC_PICTURE_PARAMETER_SET_7 0x6BA1C +#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB 0x7828C +#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB 0x7838C +#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC 0x7848C +#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC 0x7858C +#define ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC) +#define DSC_NFL_BPG_OFFSET(bpg_offset) ((bpg_offset) << 16) +#define DSC_SLICE_BPG_OFFSET(bpg_offset) ((bpg_offset) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_8 0x6B220 +#define DSCC_PICTURE_PARAMETER_SET_8 0x6BA20 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB 0x78290 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB 0x78390 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC 0x78490 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC 0x78590 +#define ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC) +#define DSC_INITIAL_OFFSET(initial_offset) ((initial_offset) << 16) +#define DSC_FINAL_OFFSET(final_offset) ((final_offset) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_9 0x6B224 +#define DSCC_PICTURE_PARAMETER_SET_9 0x6BA24 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB 0x78294 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB 0x78394 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC 0x78494 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC 0x78594 +#define ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC) +#define DSC_RC_EDGE_FACTOR(rc_edge_fact) ((rc_edge_fact) << 16) +#define DSC_RC_MODEL_SIZE(rc_model_size) ((rc_model_size) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_10 0x6B228 +#define DSCC_PICTURE_PARAMETER_SET_10 0x6BA28 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB 0x78298 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB 0x78398 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC 0x78498 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC 0x78598 +#define ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC) +#define DSC_RC_TARGET_OFF_LOW(rc_tgt_off_low) ((rc_tgt_off_low) << 20) +#define DSC_RC_TARGET_OFF_HIGH(rc_tgt_off_high) ((rc_tgt_off_high) << 16) +#define DSC_RC_QUANT_INC_LIMIT1(lim) ((lim) << 8) +#define DSC_RC_QUANT_INC_LIMIT0(lim) ((lim) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_11 0x6B22C +#define DSCC_PICTURE_PARAMETER_SET_11 0x6BA2C +#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB 0x7829C +#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB 0x7839C +#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC 0x7849C +#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC 0x7859C +#define ICL_DSC0_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC) + +#define DSCA_PICTURE_PARAMETER_SET_12 0x6B260 +#define DSCC_PICTURE_PARAMETER_SET_12 0x6BA60 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB 0x782A0 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB 0x783A0 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC 0x784A0 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC 0x785A0 +#define ICL_DSC0_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC) + +#define DSCA_PICTURE_PARAMETER_SET_13 0x6B264 +#define DSCC_PICTURE_PARAMETER_SET_13 0x6BA64 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB 0x782A4 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB 0x783A4 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC 0x784A4 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC 0x785A4 +#define ICL_DSC0_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC) + +#define DSCA_PICTURE_PARAMETER_SET_14 0x6B268 +#define DSCC_PICTURE_PARAMETER_SET_14 0x6BA68 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB 0x782A8 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB 0x783A8 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC 0x784A8 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC 0x785A8 +#define ICL_DSC0_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC) + +#define DSCA_PICTURE_PARAMETER_SET_15 0x6B26C +#define DSCC_PICTURE_PARAMETER_SET_15 0x6BA6C +#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB 0x782AC +#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB 0x783AC +#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC 0x784AC +#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC 0x785AC +#define ICL_DSC0_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC) + +#define DSCA_PICTURE_PARAMETER_SET_16 0x6B270 +#define DSCC_PICTURE_PARAMETER_SET_16 0x6BA70 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB 0x782B0 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB 0x783B0 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC 0x784B0 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC 0x785B0 +#define ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC) +#define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16) +#define DSC_SLICE_CHUNK_SIZE(slice_chunk_aize) (slice_chunk_size << 0) + +/* Icelake Rate Control Buffer Threshold Registers */ +#define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230) +#define DSCA_RC_BUF_THRESH_0_UDW _MMIO(0x6B230 + 4) +#define DSCC_RC_BUF_THRESH_0 _MMIO(0x6BA30) +#define DSCC_RC_BUF_THRESH_0_UDW _MMIO(0x6BA30 + 4) +#define _ICL_DSC0_RC_BUF_THRESH_0_PB (0x78254) +#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB (0x78254 + 4) +#define _ICL_DSC1_RC_BUF_THRESH_0_PB (0x78354) +#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB (0x78354 + 4) +#define _ICL_DSC0_RC_BUF_THRESH_0_PC (0x78454) +#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC (0x78454 + 4) +#define _ICL_DSC1_RC_BUF_THRESH_0_PC (0x78554) +#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC (0x78554 + 4) +#define ICL_DSC0_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_BUF_THRESH_0_PB, \ + _ICL_DSC0_RC_BUF_THRESH_0_PC) +#define ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB, \ + _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC) +#define ICL_DSC1_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_BUF_THRESH_0_PB, \ + _ICL_DSC1_RC_BUF_THRESH_0_PC) +#define ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB, \ + _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC) + +#define DSCA_RC_BUF_THRESH_1 _MMIO(0x6B238) +#define DSCA_RC_BUF_THRESH_1_UDW _MMIO(0x6B238 + 4) +#define DSCC_RC_BUF_THRESH_1 _MMIO(0x6BA38) +#define DSCC_RC_BUF_THRESH_1_UDW _MMIO(0x6BA38 + 4) +#define _ICL_DSC0_RC_BUF_THRESH_1_PB (0x7825C) +#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB (0x7825C + 4) +#define _ICL_DSC1_RC_BUF_THRESH_1_PB (0x7835C) +#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB (0x7835C + 4) +#define _ICL_DSC0_RC_BUF_THRESH_1_PC (0x7845C) +#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC (0x7845C + 4) +#define _ICL_DSC1_RC_BUF_THRESH_1_PC (0x7855C) +#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC (0x7855C + 4) +#define ICL_DSC0_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_BUF_THRESH_1_PB, \ + _ICL_DSC0_RC_BUF_THRESH_1_PC) +#define ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB, \ + _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC) +#define ICL_DSC1_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_BUF_THRESH_1_PB, \ + _ICL_DSC1_RC_BUF_THRESH_1_PC) +#define ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \ + _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC) + #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 8928894dd9c7..5c2c93cbab12 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -206,7 +206,8 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) /* Carefully retire all requests without writing to the rings */ ret = i915_gem_wait_for_idle(i915, I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED); + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); if (ret) return ret; @@ -320,6 +321,7 @@ static void advance_ring(struct i915_request *request) * is just about to be. Either works, if we miss the last two * noops - they are safe to be replayed on a reset. */ + GEM_TRACE("marking %s as inactive\n", ring->timeline->name); tail = READ_ONCE(request->tail); list_del(&ring->active_link); } else { @@ -383,8 +385,8 @@ static void __retire_engine_request(struct intel_engine_cs *engine, * the subsequent request. */ if (engine->last_retired_context) - intel_context_unpin(engine->last_retired_context, engine); - engine->last_retired_context = rq->ctx; + intel_context_unpin(engine->last_retired_context); + engine->last_retired_context = rq->hw_context; } static void __retire_engine_upto(struct intel_engine_cs *engine, @@ -455,8 +457,8 @@ static void i915_request_retire(struct i915_request *request) i915_request_remove_from_client(request); /* Retirement decays the ban score as it is a sign of ctx progress */ - atomic_dec_if_positive(&request->ctx->ban_score); - intel_context_unpin(request->ctx, request->engine); + atomic_dec_if_positive(&request->gem_context->ban_score); + intel_context_unpin(request->hw_context); __retire_engine_upto(request->engine, request); @@ -502,7 +504,7 @@ static void move_to_timeline(struct i915_request *request, GEM_BUG_ON(request->timeline == &request->engine->timeline); lockdep_assert_held(&request->engine->timeline.lock); - spin_lock_nested(&request->timeline->lock, SINGLE_DEPTH_NESTING); + spin_lock(&request->timeline->lock); list_move_tail(&request->link, &timeline->requests); spin_unlock(&request->timeline->lock); } @@ -657,7 +659,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) { struct drm_i915_private *i915 = engine->i915; struct i915_request *rq; - struct intel_ring *ring; + struct intel_context *ce; int ret; lockdep_assert_held(&i915->drm.struct_mutex); @@ -681,22 +683,21 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) * GGTT space, so do this first before we reserve a seqno for * ourselves. */ - ring = intel_context_pin(ctx, engine); - if (IS_ERR(ring)) - return ERR_CAST(ring); - GEM_BUG_ON(!ring); + ce = intel_context_pin(ctx, engine); + if (IS_ERR(ce)) + return ERR_CAST(ce); ret = reserve_gt(i915); if (ret) goto err_unpin; - ret = intel_ring_wait_for_space(ring, MIN_SPACE_FOR_ADD_REQUEST); + ret = intel_ring_wait_for_space(ce->ring, MIN_SPACE_FOR_ADD_REQUEST); if (ret) goto err_unreserve; /* Move our oldest request to the slab-cache (if not in use!) */ - rq = list_first_entry(&ring->request_list, typeof(*rq), ring_link); - if (!list_is_last(&rq->ring_link, &ring->request_list) && + rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link); + if (!list_is_last(&rq->ring_link, &ce->ring->request_list) && i915_request_completed(rq)) i915_request_retire(rq); @@ -735,7 +736,8 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) /* Ratelimit ourselves to prevent oom from malicious clients */ ret = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED | - I915_WAIT_INTERRUPTIBLE); + I915_WAIT_INTERRUPTIBLE, + MAX_SCHEDULE_TIMEOUT); if (ret) goto err_unreserve; @@ -760,9 +762,10 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) INIT_LIST_HEAD(&rq->active_list); rq->i915 = i915; rq->engine = engine; - rq->ctx = ctx; - rq->ring = ring; - rq->timeline = ring->timeline; + rq->gem_context = ctx; + rq->hw_context = ce; + rq->ring = ce->ring; + rq->timeline = ce->ring->timeline; GEM_BUG_ON(rq->timeline == &engine->timeline); spin_lock_init(&rq->lock); @@ -814,14 +817,16 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) goto err_unwind; /* Keep a second pin for the dual retirement along engine and ring */ - __intel_context_pin(rq->ctx, engine); + __intel_context_pin(ce); + + rq->infix = rq->ring->emit; /* end of header; start of user payload */ /* Check that we didn't interrupt ourselves with a new request */ GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno); return rq; err_unwind: - rq->ring->emit = rq->head; + ce->ring->emit = rq->head; /* Make sure we didn't add ourselves to external state before freeing */ GEM_BUG_ON(!list_empty(&rq->active_list)); @@ -832,7 +837,7 @@ err_unwind: err_unreserve: unreserve_gt(i915); err_unpin: - intel_context_unpin(ctx, engine); + intel_context_unpin(ce); return ERR_PTR(ret); } @@ -1010,19 +1015,39 @@ i915_request_await_object(struct i915_request *to, return ret; } +void i915_request_skip(struct i915_request *rq, int error) +{ + void *vaddr = rq->ring->vaddr; + u32 head; + + GEM_BUG_ON(!IS_ERR_VALUE((long)error)); + dma_fence_set_error(&rq->fence, error); + + /* + * As this request likely depends on state from the lost + * context, clear out all the user operations leaving the + * breadcrumb at the end (so we get the fence notifications). + */ + head = rq->infix; + if (rq->postfix < head) { + memset(vaddr + head, 0, rq->ring->size - head); + head = 0; + } + memset(vaddr + head, 0, rq->postfix - head); +} + /* * NB: This function is not allowed to fail. Doing so would mean the the * request is not being tracked for completion but the work itself is * going to happen on the hardware. This would be a Bad Thing(tm). */ -void __i915_request_add(struct i915_request *request, bool flush_caches) +void i915_request_add(struct i915_request *request) { struct intel_engine_cs *engine = request->engine; - struct intel_ring *ring = request->ring; struct i915_timeline *timeline = request->timeline; + struct intel_ring *ring = request->ring; struct i915_request *prev; u32 *cs; - int err; GEM_TRACE("%s fence %llx:%d\n", engine->name, request->fence.context, request->fence.seqno); @@ -1043,20 +1068,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches) * know that it is time to use that space up. */ request->reserved_space = 0; - - /* - * Emit any outstanding flushes - execbuf can fail to emit the flush - * after having emitted the batchbuffer command. Hence we need to fix - * things up similar to emitting the lazy request. The difference here - * is that the flush _must_ happen before the next request, no matter - * what. - */ - if (flush_caches) { - err = engine->emit_flush(request, EMIT_FLUSH); - - /* Not allowed to fail! */ - WARN(err, "engine->emit_flush() failed: %d!\n", err); - } + engine->emit_flush(request, EMIT_FLUSH); /* * Record the position of the start of the breadcrumb so that @@ -1095,8 +1107,10 @@ void __i915_request_add(struct i915_request *request, bool flush_caches) i915_gem_active_set(&timeline->last_request, request); list_add_tail(&request->ring_link, &ring->request_list); - if (list_is_first(&request->ring_link, &ring->request_list)) + if (list_is_first(&request->ring_link, &ring->request_list)) { + GEM_TRACE("marking %s as active\n", ring->timeline->name); list_add(&ring->active_link, &request->i915->gt.active_rings); + } request->emitted_jiffies = jiffies; /* @@ -1113,7 +1127,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches) local_bh_disable(); rcu_read_lock(); /* RCU serialisation for set-wedged protection */ if (engine->schedule) - engine->schedule(request, &request->ctx->sched); + engine->schedule(request, &request->gem_context->sched); rcu_read_unlock(); i915_sw_fence_commit(&request->submit); local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ @@ -1205,7 +1219,7 @@ static bool __i915_spin_request(const struct i915_request *rq, * takes to sleep on a request, on the order of a microsecond. */ - irq = atomic_read(&engine->irq_count); + irq = READ_ONCE(engine->breadcrumbs.irq_count); timeout_us += local_clock_us(&cpu); do { if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno)) @@ -1217,7 +1231,7 @@ static bool __i915_spin_request(const struct i915_request *rq, * assume we won't see one in the near future but require * the engine->seqno_barrier() to fixup coherency. */ - if (atomic_read(&engine->irq_count) != irq) + if (READ_ONCE(engine->breadcrumbs.irq_count) != irq) break; if (signal_pending_state(state, current)) @@ -1294,7 +1308,7 @@ long i915_request_wait(struct i915_request *rq, if (flags & I915_WAIT_LOCKED) add_wait_queue(errq, &reset); - intel_wait_init(&wait, rq); + intel_wait_init(&wait); restart: do { diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index eddbd4245cb3..e1c9365dfefb 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -93,8 +93,9 @@ struct i915_request { * i915_request_free() will then decrement the refcount on the * context. */ - struct i915_gem_context *ctx; + struct i915_gem_context *gem_context; struct intel_engine_cs *engine; + struct intel_context *hw_context; struct intel_ring *ring; struct i915_timeline *timeline; struct intel_signal_node signaling; @@ -133,6 +134,9 @@ struct i915_request { /** Position in the ring of the start of the request */ u32 head; + /** Position in the ring of the start of the user packets */ + u32 infix; + /** * Position in the ring of the start of the postfix. * This is required to calculate the maximum available ring space @@ -249,13 +253,13 @@ int i915_request_await_object(struct i915_request *to, int i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence); -void __i915_request_add(struct i915_request *rq, bool flush_caches); -#define i915_request_add(rq) \ - __i915_request_add(rq, false) +void i915_request_add(struct i915_request *rq); void __i915_request_submit(struct i915_request *request); void i915_request_submit(struct i915_request *request); +void i915_request_skip(struct i915_request *request, int error); + void __i915_request_unsubmit(struct i915_request *request); void i915_request_unsubmit(struct i915_request *request); @@ -266,6 +270,7 @@ long i915_request_wait(struct i915_request *rq, #define I915_WAIT_INTERRUPTIBLE BIT(0) #define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */ #define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */ +#define I915_WAIT_FOR_IDLE_BOOST BIT(3) static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine); @@ -375,6 +380,7 @@ static inline void init_request_active(struct i915_gem_active *active, i915_gem_retire_fn retire) { + RCU_INIT_POINTER(active->request, NULL); INIT_LIST_HEAD(&active->link); active->retire = retire ?: i915_gem_retire_noop; } diff --git a/drivers/gpu/drm/i915/i915_selftest.h b/drivers/gpu/drm/i915/i915_selftest.h index 9766e806dce6..a73472dd12fd 100644 --- a/drivers/gpu/drm/i915/i915_selftest.h +++ b/drivers/gpu/drm/i915/i915_selftest.h @@ -99,6 +99,6 @@ __printf(2, 3) bool __igt_timeout(unsigned long timeout, const char *fmt, ...); #define igt_timeout(t, fmt, ...) \ - __igt_timeout((t), KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) + __igt_timeout((t), KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #endif /* !__I915_SELFTEST_H__ */ diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h index dc2a4632faa7..a2c2c3ab5fb0 100644 --- a/drivers/gpu/drm/i915/i915_timeline.h +++ b/drivers/gpu/drm/i915/i915_timeline.h @@ -37,6 +37,8 @@ struct i915_timeline { u32 seqno; spinlock_t lock; +#define TIMELINE_CLIENT 0 /* default subclass */ +#define TIMELINE_ENGINE 1 /** * List of breadcrumbs associated with GPU requests currently diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 8cc3a256f29d..b50c6b829715 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -591,21 +591,26 @@ TRACE_EVENT(i915_gem_ring_sync_to, TP_STRUCT__entry( __field(u32, dev) - __field(u32, sync_from) - __field(u32, sync_to) + __field(u32, from_class) + __field(u32, from_instance) + __field(u32, to_class) + __field(u32, to_instance) __field(u32, seqno) ), TP_fast_assign( __entry->dev = from->i915->drm.primary->index; - __entry->sync_from = from->engine->id; - __entry->sync_to = to->engine->id; + __entry->from_class = from->engine->uabi_class; + __entry->from_instance = from->engine->instance; + __entry->to_class = to->engine->uabi_class; + __entry->to_instance = to->engine->instance; __entry->seqno = from->global_seqno; ), - TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u", + TP_printk("dev=%u, sync-from=%u:%u, sync-to=%u:%u, seqno=%u", __entry->dev, - __entry->sync_from, __entry->sync_to, + __entry->from_class, __entry->from_instance, + __entry->to_class, __entry->to_instance, __entry->seqno) ); @@ -616,24 +621,27 @@ TRACE_EVENT(i915_request_queue, TP_STRUCT__entry( __field(u32, dev) __field(u32, hw_id) - __field(u32, ring) - __field(u32, ctx) + __field(u64, ctx) + __field(u16, class) + __field(u16, instance) __field(u32, seqno) __field(u32, flags) ), TP_fast_assign( __entry->dev = rq->i915->drm.primary->index; - __entry->hw_id = rq->ctx->hw_id; - __entry->ring = rq->engine->id; + __entry->hw_id = rq->gem_context->hw_id; + __entry->class = rq->engine->uabi_class; + __entry->instance = rq->engine->instance; __entry->ctx = rq->fence.context; __entry->seqno = rq->fence.seqno; __entry->flags = flags; ), - TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, flags=0x%x", - __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx, - __entry->seqno, __entry->flags) + TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, flags=0x%x", + __entry->dev, __entry->class, __entry->instance, + __entry->hw_id, __entry->ctx, __entry->seqno, + __entry->flags) ); DECLARE_EVENT_CLASS(i915_request, @@ -643,24 +651,27 @@ DECLARE_EVENT_CLASS(i915_request, TP_STRUCT__entry( __field(u32, dev) __field(u32, hw_id) - __field(u32, ring) - __field(u32, ctx) + __field(u64, ctx) + __field(u16, class) + __field(u16, instance) __field(u32, seqno) __field(u32, global) ), TP_fast_assign( __entry->dev = rq->i915->drm.primary->index; - __entry->hw_id = rq->ctx->hw_id; - __entry->ring = rq->engine->id; + __entry->hw_id = rq->gem_context->hw_id; + __entry->class = rq->engine->uabi_class; + __entry->instance = rq->engine->instance; __entry->ctx = rq->fence.context; __entry->seqno = rq->fence.seqno; __entry->global = rq->global_seqno; ), - TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u", - __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx, - __entry->seqno, __entry->global) + TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u", + __entry->dev, __entry->class, __entry->instance, + __entry->hw_id, __entry->ctx, __entry->seqno, + __entry->global) ); DEFINE_EVENT(i915_request, i915_request_add, @@ -686,8 +697,9 @@ TRACE_EVENT(i915_request_in, TP_STRUCT__entry( __field(u32, dev) __field(u32, hw_id) - __field(u32, ring) - __field(u32, ctx) + __field(u64, ctx) + __field(u16, class) + __field(u16, instance) __field(u32, seqno) __field(u32, global_seqno) __field(u32, port) @@ -696,8 +708,9 @@ TRACE_EVENT(i915_request_in, TP_fast_assign( __entry->dev = rq->i915->drm.primary->index; - __entry->hw_id = rq->ctx->hw_id; - __entry->ring = rq->engine->id; + __entry->hw_id = rq->gem_context->hw_id; + __entry->class = rq->engine->uabi_class; + __entry->instance = rq->engine->instance; __entry->ctx = rq->fence.context; __entry->seqno = rq->fence.seqno; __entry->global_seqno = rq->global_seqno; @@ -705,10 +718,10 @@ TRACE_EVENT(i915_request_in, __entry->port = port; ), - TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, prio=%u, global=%u, port=%u", - __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx, - __entry->seqno, __entry->prio, __entry->global_seqno, - __entry->port) + TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, global=%u, port=%u", + __entry->dev, __entry->class, __entry->instance, + __entry->hw_id, __entry->ctx, __entry->seqno, + __entry->prio, __entry->global_seqno, __entry->port) ); TRACE_EVENT(i915_request_out, @@ -718,8 +731,9 @@ TRACE_EVENT(i915_request_out, TP_STRUCT__entry( __field(u32, dev) __field(u32, hw_id) - __field(u32, ring) - __field(u32, ctx) + __field(u64, ctx) + __field(u16, class) + __field(u16, instance) __field(u32, seqno) __field(u32, global_seqno) __field(u32, completed) @@ -727,17 +741,18 @@ TRACE_EVENT(i915_request_out, TP_fast_assign( __entry->dev = rq->i915->drm.primary->index; - __entry->hw_id = rq->ctx->hw_id; - __entry->ring = rq->engine->id; + __entry->hw_id = rq->gem_context->hw_id; + __entry->class = rq->engine->uabi_class; + __entry->instance = rq->engine->instance; __entry->ctx = rq->fence.context; __entry->seqno = rq->fence.seqno; __entry->global_seqno = rq->global_seqno; __entry->completed = i915_request_completed(rq); ), - TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, completed?=%u", - __entry->dev, __entry->hw_id, __entry->ring, - __entry->ctx, __entry->seqno, + TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, completed?=%u", + __entry->dev, __entry->class, __entry->instance, + __entry->hw_id, __entry->ctx, __entry->seqno, __entry->global_seqno, __entry->completed) ); @@ -771,21 +786,23 @@ TRACE_EVENT(intel_engine_notify, TP_STRUCT__entry( __field(u32, dev) - __field(u32, ring) + __field(u16, class) + __field(u16, instance) __field(u32, seqno) __field(bool, waiters) ), TP_fast_assign( __entry->dev = engine->i915->drm.primary->index; - __entry->ring = engine->id; + __entry->class = engine->uabi_class; + __entry->instance = engine->instance; __entry->seqno = intel_engine_get_seqno(engine); __entry->waiters = waiters; ), - TP_printk("dev=%u, ring=%u, seqno=%u, waiters=%u", - __entry->dev, __entry->ring, __entry->seqno, - __entry->waiters) + TP_printk("dev=%u, engine=%u:%u, seqno=%u, waiters=%u", + __entry->dev, __entry->class, __entry->instance, + __entry->seqno, __entry->waiters) ); DEFINE_EVENT(i915_request, i915_request_retire, @@ -800,8 +817,9 @@ TRACE_EVENT(i915_request_wait_begin, TP_STRUCT__entry( __field(u32, dev) __field(u32, hw_id) - __field(u32, ring) - __field(u32, ctx) + __field(u64, ctx) + __field(u16, class) + __field(u16, instance) __field(u32, seqno) __field(u32, global) __field(unsigned int, flags) @@ -815,18 +833,20 @@ TRACE_EVENT(i915_request_wait_begin, */ TP_fast_assign( __entry->dev = rq->i915->drm.primary->index; - __entry->hw_id = rq->ctx->hw_id; - __entry->ring = rq->engine->id; + __entry->hw_id = rq->gem_context->hw_id; + __entry->class = rq->engine->uabi_class; + __entry->instance = rq->engine->instance; __entry->ctx = rq->fence.context; __entry->seqno = rq->fence.seqno; __entry->global = rq->global_seqno; __entry->flags = flags; ), - TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, blocking=%u, flags=0x%x", - __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx, - __entry->seqno, __entry->global, - !!(__entry->flags & I915_WAIT_LOCKED), __entry->flags) + TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, blocking=%u, flags=0x%x", + __entry->dev, __entry->class, __entry->instance, + __entry->hw_id, __entry->ctx, __entry->seqno, + __entry->global, !!(__entry->flags & I915_WAIT_LOCKED), + __entry->flags) ); DEFINE_EVENT(i915_request, i915_request_wait_end, @@ -936,7 +956,7 @@ DECLARE_EVENT_CLASS(i915_context, __entry->dev = ctx->i915->drm.primary->index; __entry->ctx = ctx; __entry->hw_id = ctx->hw_id; - __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL; + __entry->vm = ctx->ppgtt ? &ctx->ppgtt->vm : NULL; ), TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u", @@ -953,36 +973,6 @@ DEFINE_EVENT(i915_context, i915_context_free, TP_ARGS(ctx) ); -/** - * DOC: switch_mm tracepoint - * - * This tracepoint allows tracking of the mm switch, which is an important point - * in the lifetime of the vm in the legacy submission path. This tracepoint is - * called only if full ppgtt is enabled. - */ -TRACE_EVENT(switch_mm, - TP_PROTO(struct intel_engine_cs *engine, struct i915_gem_context *to), - - TP_ARGS(engine, to), - - TP_STRUCT__entry( - __field(u32, ring) - __field(struct i915_gem_context *, to) - __field(struct i915_address_space *, vm) - __field(u32, dev) - ), - - TP_fast_assign( - __entry->ring = engine->id; - __entry->to = to; - __entry->vm = to->ppgtt? &to->ppgtt->base : NULL; - __entry->dev = engine->i915->drm.primary->index; - ), - - TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p", - __entry->dev, __entry->ring, __entry->to, __entry->vm) -); - #endif /* _I915_TRACE_H_ */ /* This part must be outside protection */ diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index 5fe9f3f39467..869cf4a3b6de 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -105,7 +105,7 @@ static void vgt_deballoon_space(struct i915_ggtt *ggtt, node->start + node->size, node->size / 1024); - ggtt->base.reserved -= node->size; + ggtt->vm.reserved -= node->size; drm_mm_remove_node(node); } @@ -141,11 +141,11 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt, DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n", start, end, size / 1024); - ret = i915_gem_gtt_reserve(&ggtt->base, node, + ret = i915_gem_gtt_reserve(&ggtt->vm, node, size, start, I915_COLOR_UNEVICTABLE, 0); if (!ret) - ggtt->base.reserved += size; + ggtt->vm.reserved += size; return ret; } @@ -197,7 +197,7 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt, int intel_vgt_balloon(struct drm_i915_private *dev_priv) { struct i915_ggtt *ggtt = &dev_priv->ggtt; - unsigned long ggtt_end = ggtt->base.total; + unsigned long ggtt_end = ggtt->vm.total; unsigned long mappable_base, mappable_size, mappable_end; unsigned long unmappable_base, unmappable_size, unmappable_end; diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h index bb8338450dc1..551acc390046 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.h +++ b/drivers/gpu/drm/i915/i915_vgpu.h @@ -36,6 +36,12 @@ intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv) return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION; } +static inline bool +intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv) +{ + return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT; +} + int intel_vgt_balloon(struct drm_i915_private *dev_priv); void intel_vgt_deballoon(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 0531c01c3604..11d834f94220 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -21,7 +21,7 @@ * IN THE SOFTWARE. * */ - + #include "i915_vma.h" #include "i915_drv.h" @@ -30,18 +30,53 @@ #include <drm/drm_gem.h> +#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM) + +#include <linux/stackdepot.h> + +static void vma_print_allocator(struct i915_vma *vma, const char *reason) +{ + unsigned long entries[12]; + struct stack_trace trace = { + .entries = entries, + .max_entries = ARRAY_SIZE(entries), + }; + char buf[512]; + + if (!vma->node.stack) { + DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n", + vma->node.start, vma->node.size, reason); + return; + } + + depot_fetch_stack(vma->node.stack, &trace); + snprint_stack_trace(buf, sizeof(buf), &trace, 0); + DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n", + vma->node.start, vma->node.size, reason, buf); +} + +#else + +static void vma_print_allocator(struct i915_vma *vma, const char *reason) +{ +} + +#endif + +struct i915_vma_active { + struct i915_gem_active base; + struct i915_vma *vma; + struct rb_node node; + u64 timeline; +}; + static void -i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq) +__i915_vma_retire(struct i915_vma *vma, struct i915_request *rq) { - const unsigned int idx = rq->engine->id; - struct i915_vma *vma = - container_of(active, struct i915_vma, last_read[idx]); struct drm_i915_gem_object *obj = vma->obj; - GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx)); - - i915_vma_clear_active(vma, idx); - if (i915_vma_is_active(vma)) + GEM_BUG_ON(!i915_vma_is_active(vma)); + if (--vma->active_count) return; GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); @@ -75,6 +110,21 @@ i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq) } } +static void +i915_vma_retire(struct i915_gem_active *base, struct i915_request *rq) +{ + struct i915_vma_active *active = + container_of(base, typeof(*active), base); + + __i915_vma_retire(active->vma, rq); +} + +static void +i915_vma_last_retire(struct i915_gem_active *base, struct i915_request *rq) +{ + __i915_vma_retire(container_of(base, struct i915_vma, last_active), rq); +} + static struct i915_vma * vma_create(struct drm_i915_gem_object *obj, struct i915_address_space *vm, @@ -82,19 +132,20 @@ vma_create(struct drm_i915_gem_object *obj, { struct i915_vma *vma; struct rb_node *rb, **p; - int i; /* The aliasing_ppgtt should never be used directly! */ - GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base); + GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm); vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL); if (vma == NULL) return ERR_PTR(-ENOMEM); - for (i = 0; i < ARRAY_SIZE(vma->last_read); i++) - init_request_active(&vma->last_read[i], i915_vma_retire); + vma->active = RB_ROOT; + + init_request_active(&vma->last_active, i915_vma_last_retire); init_request_active(&vma->last_fence, NULL); vma->vm = vm; + vma->ops = &vm->vma_ops; vma->obj = obj; vma->resv = obj->resv; vma->size = obj->base.size; @@ -280,7 +331,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, GEM_BUG_ON(!vma->pages); trace_i915_vma_bind(vma, bind_flags); - ret = vma->vm->bind_vma(vma, cache_level, bind_flags); + ret = vma->ops->bind_vma(vma, cache_level, bind_flags); if (ret) return ret; @@ -345,7 +396,7 @@ void i915_vma_flush_writes(struct i915_vma *vma) void i915_vma_unpin_iomap(struct i915_vma *vma) { - lockdep_assert_held(&vma->obj->base.dev->struct_mutex); + lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); GEM_BUG_ON(vma->iomap == NULL); @@ -365,6 +416,7 @@ void i915_vma_unpin_and_release(struct i915_vma **p_vma) return; obj = vma->obj; + GEM_BUG_ON(!obj); i915_vma_unpin(vma); i915_vma_close(vma); @@ -459,6 +511,18 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level) return true; } +static void assert_bind_count(const struct drm_i915_gem_object *obj) +{ + /* + * Combine the assertion that the object is bound and that we have + * pinned its pages. But we should never have bound the object + * more than we have pinned its pages. (For complete accuracy, we + * assume that no else is pinning the pages, but as a rough assertion + * that we will not run into problems later, this will do!) + */ + GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); +} + /** * i915_vma_insert - finds a slot for the vma in its address space * @vma: the vma @@ -477,7 +541,7 @@ static int i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) { struct drm_i915_private *dev_priv = vma->vm->i915; - struct drm_i915_gem_object *obj = vma->obj; + unsigned int cache_level; u64 start, end; int ret; @@ -512,20 +576,25 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) * attempt to find space. */ if (size > end) { - DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n", - size, obj->base.size, - flags & PIN_MAPPABLE ? "mappable" : "total", + DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n", + size, flags & PIN_MAPPABLE ? "mappable" : "total", end); return -ENOSPC; } - ret = i915_gem_object_pin_pages(obj); - if (ret) - return ret; + if (vma->obj) { + ret = i915_gem_object_pin_pages(vma->obj); + if (ret) + return ret; + + cache_level = vma->obj->cache_level; + } else { + cache_level = 0; + } GEM_BUG_ON(vma->pages); - ret = vma->vm->set_pages(vma); + ret = vma->ops->set_pages(vma); if (ret) goto err_unpin; @@ -538,7 +607,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) } ret = i915_gem_gtt_reserve(vma->vm, &vma->node, - size, offset, obj->cache_level, + size, offset, cache_level, flags); if (ret) goto err_clear; @@ -577,7 +646,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) } ret = i915_gem_gtt_insert(vma->vm, &vma->node, - size, alignment, obj->cache_level, + size, alignment, cache_level, start, end, flags); if (ret) goto err_clear; @@ -586,23 +655,28 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) GEM_BUG_ON(vma->node.start + vma->node.size > end); } GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); - GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level)); + GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level)); list_move_tail(&vma->vm_link, &vma->vm->inactive_list); - spin_lock(&dev_priv->mm.obj_lock); - list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list); - obj->bind_count++; - spin_unlock(&dev_priv->mm.obj_lock); + if (vma->obj) { + struct drm_i915_gem_object *obj = vma->obj; - GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); + spin_lock(&dev_priv->mm.obj_lock); + list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list); + obj->bind_count++; + spin_unlock(&dev_priv->mm.obj_lock); + + assert_bind_count(obj); + } return 0; err_clear: - vma->vm->clear_pages(vma); + vma->ops->clear_pages(vma); err_unpin: - i915_gem_object_unpin_pages(obj); + if (vma->obj) + i915_gem_object_unpin_pages(vma->obj); return ret; } @@ -610,30 +684,35 @@ static void i915_vma_remove(struct i915_vma *vma) { struct drm_i915_private *i915 = vma->vm->i915; - struct drm_i915_gem_object *obj = vma->obj; GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); - vma->vm->clear_pages(vma); + vma->ops->clear_pages(vma); drm_mm_remove_node(&vma->node); list_move_tail(&vma->vm_link, &vma->vm->unbound_list); - /* Since the unbound list is global, only move to that list if + /* + * Since the unbound list is global, only move to that list if * no more VMAs exist. */ - spin_lock(&i915->mm.obj_lock); - if (--obj->bind_count == 0) - list_move_tail(&obj->mm.link, &i915->mm.unbound_list); - spin_unlock(&i915->mm.obj_lock); - - /* And finally now the object is completely decoupled from this vma, - * we can drop its hold on the backing storage and allow it to be - * reaped by the shrinker. - */ - i915_gem_object_unpin_pages(obj); - GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); + if (vma->obj) { + struct drm_i915_gem_object *obj = vma->obj; + + spin_lock(&i915->mm.obj_lock); + if (--obj->bind_count == 0) + list_move_tail(&obj->mm.link, &i915->mm.unbound_list); + spin_unlock(&i915->mm.obj_lock); + + /* + * And finally now the object is completely decoupled from this + * vma, we can drop its hold on the backing storage and allow + * it to be reaped by the shrinker. + */ + i915_gem_object_unpin_pages(obj); + assert_bind_count(obj); + } } int __i915_vma_do_pin(struct i915_vma *vma, @@ -658,7 +737,7 @@ int __i915_vma_do_pin(struct i915_vma *vma, } GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); - ret = i915_vma_bind(vma, vma->obj->cache_level, flags); + ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags); if (ret) goto err_remove; @@ -715,23 +794,28 @@ void i915_vma_reopen(struct i915_vma *vma) static void __i915_vma_destroy(struct i915_vma *vma) { - int i; + struct drm_i915_private *i915 = vma->vm->i915; + struct i915_vma_active *iter, *n; GEM_BUG_ON(vma->node.allocated); GEM_BUG_ON(vma->fence); - for (i = 0; i < ARRAY_SIZE(vma->last_read); i++) - GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i])); GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence)); list_del(&vma->obj_link); list_del(&vma->vm_link); - rb_erase(&vma->obj_node, &vma->obj->vma_tree); + if (vma->obj) + rb_erase(&vma->obj_node, &vma->obj->vma_tree); if (!i915_vma_is_ggtt(vma)) i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); - kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma); + rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) { + GEM_BUG_ON(i915_gem_active_isset(&iter->base)); + kfree(iter); + } + + kmem_cache_free(i915->vmas, vma); } void i915_vma_destroy(struct i915_vma *vma) @@ -795,23 +879,173 @@ void i915_vma_revoke_mmap(struct i915_vma *vma) list_del(&vma->obj->userfault_link); } -int i915_vma_unbind(struct i915_vma *vma) +static void export_fence(struct i915_vma *vma, + struct i915_request *rq, + unsigned int flags) +{ + struct reservation_object *resv = vma->resv; + + /* + * Ignore errors from failing to allocate the new fence, we can't + * handle an error right now. Worst case should be missed + * synchronisation leading to rendering corruption. + */ + reservation_object_lock(resv, NULL); + if (flags & EXEC_OBJECT_WRITE) + reservation_object_add_excl_fence(resv, &rq->fence); + else if (reservation_object_reserve_shared(resv) == 0) + reservation_object_add_shared_fence(resv, &rq->fence); + reservation_object_unlock(resv); +} + +static struct i915_gem_active *active_instance(struct i915_vma *vma, u64 idx) +{ + struct i915_vma_active *active; + struct rb_node **p, *parent; + struct i915_request *old; + + /* + * We track the most recently used timeline to skip a rbtree search + * for the common case, under typical loads we never need the rbtree + * at all. We can reuse the last_active slot if it is empty, that is + * after the previous activity has been retired, or if the active + * matches the current timeline. + * + * Note that we allow the timeline to be active simultaneously in + * the rbtree and the last_active cache. We do this to avoid having + * to search and replace the rbtree element for a new timeline, with + * the cost being that we must be aware that the vma may be retired + * twice for the same timeline (as the older rbtree element will be + * retired before the new request added to last_active). + */ + old = i915_gem_active_raw(&vma->last_active, + &vma->vm->i915->drm.struct_mutex); + if (!old || old->fence.context == idx) + goto out; + + /* Move the currently active fence into the rbtree */ + idx = old->fence.context; + + parent = NULL; + p = &vma->active.rb_node; + while (*p) { + parent = *p; + + active = rb_entry(parent, struct i915_vma_active, node); + if (active->timeline == idx) + goto replace; + + if (active->timeline < idx) + p = &parent->rb_right; + else + p = &parent->rb_left; + } + + active = kmalloc(sizeof(*active), GFP_KERNEL); + + /* kmalloc may retire the vma->last_active request (thanks shrinker)! */ + if (unlikely(!i915_gem_active_raw(&vma->last_active, + &vma->vm->i915->drm.struct_mutex))) { + kfree(active); + goto out; + } + + if (unlikely(!active)) + return ERR_PTR(-ENOMEM); + + init_request_active(&active->base, i915_vma_retire); + active->vma = vma; + active->timeline = idx; + + rb_link_node(&active->node, parent, p); + rb_insert_color(&active->node, &vma->active); + +replace: + /* + * Overwrite the previous active slot in the rbtree with last_active, + * leaving last_active zeroed. If the previous slot is still active, + * we must be careful as we now only expect to receive one retire + * callback not two, and so much undo the active counting for the + * overwritten slot. + */ + if (i915_gem_active_isset(&active->base)) { + /* Retire ourselves from the old rq->active_list */ + __list_del_entry(&active->base.link); + vma->active_count--; + GEM_BUG_ON(!vma->active_count); + } + GEM_BUG_ON(list_empty(&vma->last_active.link)); + list_replace_init(&vma->last_active.link, &active->base.link); + active->base.request = fetch_and_zero(&vma->last_active.request); + +out: + return &vma->last_active; +} + +int i915_vma_move_to_active(struct i915_vma *vma, + struct i915_request *rq, + unsigned int flags) { struct drm_i915_gem_object *obj = vma->obj; - unsigned long active; + struct i915_gem_active *active; + + lockdep_assert_held(&rq->i915->drm.struct_mutex); + GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); + + active = active_instance(vma, rq->fence.context); + if (IS_ERR(active)) + return PTR_ERR(active); + + /* + * Add a reference if we're newly entering the active list. + * The order in which we add operations to the retirement queue is + * vital here: mark_active adds to the start of the callback list, + * such that subsequent callbacks are called first. Therefore we + * add the active reference first and queue for it to be dropped + * *last*. + */ + if (!i915_gem_active_isset(active) && !vma->active_count++) { + list_move_tail(&vma->vm_link, &vma->vm->active_list); + obj->active_count++; + } + i915_gem_active_set(active, rq); + GEM_BUG_ON(!i915_vma_is_active(vma)); + GEM_BUG_ON(!obj->active_count); + + obj->write_domain = 0; + if (flags & EXEC_OBJECT_WRITE) { + obj->write_domain = I915_GEM_DOMAIN_RENDER; + + if (intel_fb_obj_invalidate(obj, ORIGIN_CS)) + i915_gem_active_set(&obj->frontbuffer_write, rq); + + obj->read_domains = 0; + } + obj->read_domains |= I915_GEM_GPU_DOMAINS; + + if (flags & EXEC_OBJECT_NEEDS_FENCE) + i915_gem_active_set(&vma->last_fence, rq); + + export_fence(vma, rq, flags); + return 0; +} + +int i915_vma_unbind(struct i915_vma *vma) +{ int ret; - lockdep_assert_held(&obj->base.dev->struct_mutex); + lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); - /* First wait upon any activity as retiring the request may + /* + * First wait upon any activity as retiring the request may * have side-effects such as unpinning or even unbinding this vma. */ might_sleep(); - active = i915_vma_get_active(vma); - if (active) { - int idx; + if (i915_vma_is_active(vma)) { + struct i915_vma_active *active, *n; - /* When a closed VMA is retired, it is unbound - eek. + /* + * When a closed VMA is retired, it is unbound - eek. * In order to prevent it from being recursively closed, * take a pin on the vma so that the second unbind is * aborted. @@ -825,33 +1059,36 @@ int i915_vma_unbind(struct i915_vma *vma) */ __i915_vma_pin(vma); - for_each_active(active, idx) { - ret = i915_gem_active_retire(&vma->last_read[idx], - &vma->vm->i915->drm.struct_mutex); - if (ret) - break; - } + ret = i915_gem_active_retire(&vma->last_active, + &vma->vm->i915->drm.struct_mutex); + if (ret) + goto unpin; - if (!ret) { - ret = i915_gem_active_retire(&vma->last_fence, + rbtree_postorder_for_each_entry_safe(active, n, + &vma->active, node) { + ret = i915_gem_active_retire(&active->base, &vma->vm->i915->drm.struct_mutex); + if (ret) + goto unpin; } + ret = i915_gem_active_retire(&vma->last_fence, + &vma->vm->i915->drm.struct_mutex); +unpin: __i915_vma_unpin(vma); if (ret) return ret; } GEM_BUG_ON(i915_vma_is_active(vma)); - if (i915_vma_is_pinned(vma)) + if (i915_vma_is_pinned(vma)) { + vma_print_allocator(vma, "is pinned"); return -EBUSY; + } if (!drm_mm_node_allocated(&vma->node)) return 0; - GEM_BUG_ON(obj->bind_count == 0); - GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); - if (i915_vma_is_map_and_fenceable(vma)) { /* * Check that we have flushed all writes through the GGTT @@ -878,7 +1115,7 @@ int i915_vma_unbind(struct i915_vma *vma) if (likely(!vma->vm->closed)) { trace_i915_vma_unbind(vma); - vma->vm->unbind_vma(vma); + vma->ops->unbind_vma(vma); } vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index fc4294cfaa91..f06d66377107 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -26,6 +26,7 @@ #define __I915_VMA_H__ #include <linux/io-mapping.h> +#include <linux/rbtree.h> #include <drm/drm_mm.h> @@ -49,10 +50,12 @@ struct i915_vma { struct drm_mm_node node; struct drm_i915_gem_object *obj; struct i915_address_space *vm; + const struct i915_vma_ops *ops; struct drm_i915_fence_reg *fence; struct reservation_object *resv; /** Alias of obj->resv */ struct sg_table *pages; void __iomem *iomap; + void *private; /* owned by creator */ u64 size; u64 display_alignment; struct i915_page_sizes page_sizes; @@ -92,8 +95,9 @@ struct i915_vma { #define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT) #define I915_VMA_GGTT_WRITE BIT(12) - unsigned int active; - struct i915_gem_active last_read[I915_NUM_ENGINES]; + unsigned int active_count; + struct rb_root active; + struct i915_gem_active last_active; struct i915_gem_active last_fence; /** @@ -136,6 +140,15 @@ i915_vma_instance(struct drm_i915_gem_object *obj, void i915_vma_unpin_and_release(struct i915_vma **p_vma); +static inline bool i915_vma_is_active(struct i915_vma *vma) +{ + return vma->active_count; +} + +int __must_check i915_vma_move_to_active(struct i915_vma *vma, + struct i915_request *rq, + unsigned int flags); + static inline bool i915_vma_is_ggtt(const struct i915_vma *vma) { return vma->flags & I915_VMA_GGTT; @@ -185,34 +198,6 @@ static inline bool i915_vma_has_userfault(const struct i915_vma *vma) return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags); } -static inline unsigned int i915_vma_get_active(const struct i915_vma *vma) -{ - return vma->active; -} - -static inline bool i915_vma_is_active(const struct i915_vma *vma) -{ - return i915_vma_get_active(vma); -} - -static inline void i915_vma_set_active(struct i915_vma *vma, - unsigned int engine) -{ - vma->active |= BIT(engine); -} - -static inline void i915_vma_clear_active(struct i915_vma *vma, - unsigned int engine) -{ - vma->active &= ~BIT(engine); -} - -static inline bool i915_vma_has_active_engine(const struct i915_vma *vma, - unsigned int engine) -{ - return vma->active & BIT(engine); -} - static inline u32 i915_ggtt_offset(const struct i915_vma *vma) { GEM_BUG_ON(!i915_vma_is_ggtt(vma)); @@ -339,6 +324,12 @@ static inline void i915_vma_unpin(struct i915_vma *vma) __i915_vma_unpin(vma); } +static inline bool i915_vma_is_bound(const struct i915_vma *vma, + unsigned int where) +{ + return vma->flags & where; +} + /** * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture * @vma: VMA to iomap @@ -407,7 +398,7 @@ static inline void __i915_vma_unpin_fence(struct i915_vma *vma) static inline void i915_vma_unpin_fence(struct i915_vma *vma) { - lockdep_assert_held(&vma->obj->base.dev->struct_mutex); + /* lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); */ if (vma->fence) __i915_vma_unpin_fence(vma); } diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c new file mode 100644 index 000000000000..13830e43a4d1 --- /dev/null +++ b/drivers/gpu/drm/i915/icl_dsi.c @@ -0,0 +1,127 @@ +/* + * Copyright © 2018 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Madhav Chauhan <madhav.chauhan@intel.com> + * Jani Nikula <jani.nikula@intel.com> + */ + +#include "intel_dsi.h" + +static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + enum port port; + u32 bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); + u32 afe_clk_khz; /* 8X Clock */ + u32 esc_clk_div_m; + + afe_clk_khz = DIV_ROUND_CLOSEST(intel_dsi->pclk * bpp, + intel_dsi->lane_count); + + esc_clk_div_m = DIV_ROUND_UP(afe_clk_khz, DSI_MAX_ESC_CLK); + + for_each_dsi_port(port, intel_dsi->ports) { + I915_WRITE(ICL_DSI_ESC_CLK_DIV(port), + esc_clk_div_m & ICL_ESC_CLK_DIV_MASK); + POSTING_READ(ICL_DSI_ESC_CLK_DIV(port)); + } + + for_each_dsi_port(port, intel_dsi->ports) { + I915_WRITE(ICL_DPHY_ESC_CLK_DIV(port), + esc_clk_div_m & ICL_ESC_CLK_DIV_MASK); + POSTING_READ(ICL_DPHY_ESC_CLK_DIV(port)); + } +} + +static void gen11_dsi_enable_io_power(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + enum port port; + u32 tmp; + + for_each_dsi_port(port, intel_dsi->ports) { + tmp = I915_READ(ICL_DSI_IO_MODECTL(port)); + tmp |= COMBO_PHY_MODE_DSI; + I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp); + } + + for_each_dsi_port(port, intel_dsi->ports) { + intel_display_power_get(dev_priv, port == PORT_A ? + POWER_DOMAIN_PORT_DDI_A_IO : + POWER_DOMAIN_PORT_DDI_B_IO); + } +} + +static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + enum port port; + u32 tmp; + u32 lane_mask; + + switch (intel_dsi->lane_count) { + case 1: + lane_mask = PWR_DOWN_LN_3_1_0; + break; + case 2: + lane_mask = PWR_DOWN_LN_3_1; + break; + case 3: + lane_mask = PWR_DOWN_LN_3; + break; + case 4: + default: + lane_mask = PWR_UP_ALL_LANES; + break; + } + + for_each_dsi_port(port, intel_dsi->ports) { + tmp = I915_READ(ICL_PORT_CL_DW10(port)); + tmp &= ~PWR_DOWN_LN_MASK; + I915_WRITE(ICL_PORT_CL_DW10(port), tmp | lane_mask); + } +} + +static void gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder) +{ + /* step 4a: power up all lanes of the DDI used by DSI */ + gen11_dsi_power_up_lanes(encoder); +} + +static void __attribute__((unused)) +gen11_dsi_pre_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) +{ + /* step2: enable IO power */ + gen11_dsi_enable_io_power(encoder); + + /* step3: enable DSI PLL */ + gen11_dsi_program_esc_clk_div(encoder); + + /* step4: enable DSI port and DPHY */ + gen11_dsi_enable_port_and_phy(encoder); +} diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c index d1abf4bb7c81..6ba478e57b9b 100644 --- a/drivers/gpu/drm/i915/intel_acpi.c +++ b/drivers/gpu/drm/i915/intel_acpi.c @@ -12,10 +12,6 @@ #define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */ #define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */ -static struct intel_dsm_priv { - acpi_handle dhandle; -} intel_dsm_priv; - static const guid_t intel_dsm_guid = GUID_INIT(0x7ed873d3, 0xc2d0, 0x4e4f, 0xa8, 0x54, 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c); @@ -72,12 +68,12 @@ static char *intel_dsm_mux_type(u8 type) } } -static void intel_dsm_platform_mux_info(void) +static void intel_dsm_platform_mux_info(acpi_handle dhandle) { int i; union acpi_object *pkg, *connector_count; - pkg = acpi_evaluate_dsm_typed(intel_dsm_priv.dhandle, &intel_dsm_guid, + pkg = acpi_evaluate_dsm_typed(dhandle, &intel_dsm_guid, INTEL_DSM_REVISION_ID, INTEL_DSM_FN_PLATFORM_MUX_INFO, NULL, ACPI_TYPE_PACKAGE); if (!pkg) { @@ -107,41 +103,40 @@ static void intel_dsm_platform_mux_info(void) ACPI_FREE(pkg); } -static bool intel_dsm_pci_probe(struct pci_dev *pdev) +static acpi_handle intel_dsm_pci_probe(struct pci_dev *pdev) { acpi_handle dhandle; dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) - return false; + return NULL; if (!acpi_check_dsm(dhandle, &intel_dsm_guid, INTEL_DSM_REVISION_ID, 1 << INTEL_DSM_FN_PLATFORM_MUX_INFO)) { DRM_DEBUG_KMS("no _DSM method for intel device\n"); - return false; + return NULL; } - intel_dsm_priv.dhandle = dhandle; - intel_dsm_platform_mux_info(); + intel_dsm_platform_mux_info(dhandle); - return true; + return dhandle; } static bool intel_dsm_detect(void) { + acpi_handle dhandle = NULL; char acpi_method_name[255] = { 0 }; struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; struct pci_dev *pdev = NULL; - bool has_dsm = false; int vga_count = 0; while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { vga_count++; - has_dsm |= intel_dsm_pci_probe(pdev); + dhandle = intel_dsm_pci_probe(pdev) ?: dhandle; } - if (vga_count == 2 && has_dsm) { - acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); + if (vga_count == 2 && dhandle) { + acpi_get_name(dhandle, ACPI_FULL_PATHNAME, &buffer); DRM_DEBUG_DRIVER("vga_switcheroo: detected DSM switching method %s handle\n", acpi_method_name); return true; diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index 40285d1b91b7..b04952bacf77 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c @@ -59,7 +59,8 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector, else if (property == dev_priv->broadcast_rgb_property) *val = intel_conn_state->broadcast_rgb; else { - DRM_DEBUG_ATOMIC("Unknown property %s\n", property->name); + DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n", + property->base.id, property->name); return -EINVAL; } @@ -95,7 +96,8 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector, return 0; } - DRM_DEBUG_ATOMIC("Unknown property %s\n", property->name); + DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n", + property->base.id, property->name); return -EINVAL; } @@ -124,6 +126,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn, if (new_conn_state->force_audio != old_conn_state->force_audio || new_conn_state->broadcast_rgb != old_conn_state->broadcast_rgb || new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio || + new_conn_state->base.content_type != old_conn_state->base.content_type || new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode) crtc_state->mode_changed = true; diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c index 6d068786eb41..dcba645cabb8 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/intel_atomic_plane.c @@ -120,12 +120,6 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ &crtc_state->base.adjusted_mode; int ret; - /* - * Both crtc and plane->crtc could be NULL if we're updating a - * property while the plane is disabled. We don't actually have - * anything driver-specific we need to test in that case, so - * just return success. - */ if (!intel_state->base.crtc && !old_plane_state->base.crtc) return 0; @@ -209,12 +203,6 @@ static int intel_plane_atomic_check(struct drm_plane *plane, const struct drm_crtc_state *old_crtc_state; struct drm_crtc_state *new_crtc_state; - /* - * Both crtc and plane->crtc could be NULL if we're updating a - * property while the plane is disabled. We don't actually have - * anything driver-specific we need to test in that case, so - * just return success. - */ if (!crtc) return 0; @@ -277,7 +265,8 @@ intel_plane_atomic_get_property(struct drm_plane *plane, struct drm_property *property, uint64_t *val) { - DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name); + DRM_DEBUG_KMS("Unknown property [PROP:%d:%s]\n", + property->base.id, property->name); return -EINVAL; } @@ -299,6 +288,7 @@ intel_plane_atomic_set_property(struct drm_plane *plane, struct drm_property *property, uint64_t val) { - DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name); + DRM_DEBUG_KMS("Unknown property [PROP:%d:%s]\n", + property->base.id, property->name); return -EINVAL; } diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 3ea566f99450..b725835b47ef 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -59,6 +59,7 @@ */ /* DP N/M table */ +#define LC_810M 810000 #define LC_540M 540000 #define LC_270M 270000 #define LC_162M 162000 @@ -99,6 +100,15 @@ static const struct dp_aud_n_m dp_aud_n_m[] = { { 128000, LC_540M, 4096, 33750 }, { 176400, LC_540M, 3136, 18750 }, { 192000, LC_540M, 2048, 11250 }, + { 32000, LC_810M, 1024, 50625 }, + { 44100, LC_810M, 784, 28125 }, + { 48000, LC_810M, 512, 16875 }, + { 64000, LC_810M, 2048, 50625 }, + { 88200, LC_810M, 1568, 28125 }, + { 96000, LC_810M, 1024, 16875 }, + { 128000, LC_810M, 4096, 50625 }, + { 176400, LC_810M, 3136, 28125 }, + { 192000, LC_810M, 2048, 16875 }, }; static const struct dp_aud_n_m * @@ -198,13 +208,13 @@ static int audio_config_hdmi_get_n(const struct intel_crtc_state *crtc_state, } static bool intel_eld_uptodate(struct drm_connector *connector, - i915_reg_t reg_eldv, uint32_t bits_eldv, - i915_reg_t reg_elda, uint32_t bits_elda, + i915_reg_t reg_eldv, u32 bits_eldv, + i915_reg_t reg_elda, u32 bits_elda, i915_reg_t reg_edid) { struct drm_i915_private *dev_priv = to_i915(connector->dev); - uint8_t *eld = connector->eld; - uint32_t tmp; + const u8 *eld = connector->eld; + u32 tmp; int i; tmp = I915_READ(reg_eldv); @@ -218,7 +228,7 @@ static bool intel_eld_uptodate(struct drm_connector *connector, I915_WRITE(reg_elda, tmp); for (i = 0; i < drm_eld_size(eld) / 4; i++) - if (I915_READ(reg_edid) != *((uint32_t *)eld + i)) + if (I915_READ(reg_edid) != *((const u32 *)eld + i)) return false; return true; @@ -229,7 +239,7 @@ static void g4x_audio_codec_disable(struct intel_encoder *encoder, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - uint32_t eldv, tmp; + u32 eldv, tmp; DRM_DEBUG_KMS("Disable audio codec\n"); @@ -251,12 +261,12 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct drm_connector *connector = conn_state->connector; - uint8_t *eld = connector->eld; - uint32_t eldv; - uint32_t tmp; + const u8 *eld = connector->eld; + u32 eldv; + u32 tmp; int len, i; - DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", eld[2]); + DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", drm_eld_size(eld)); tmp = I915_READ(G4X_AUD_VID_DID); if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL) @@ -278,7 +288,7 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder, len = min(drm_eld_size(eld) / 4, len); DRM_DEBUG_DRIVER("ELD size %d\n", len); for (i = 0; i < len; i++) - I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i)); + I915_WRITE(G4X_HDMIW_HDMIEDID, *((const u32 *)eld + i)); tmp = I915_READ(G4X_AUD_CNTL_ST); tmp |= eldv; @@ -393,7 +403,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); enum pipe pipe = crtc->pipe; - uint32_t tmp; + u32 tmp; DRM_DEBUG_KMS("Disable audio codec on pipe %c\n", pipe_name(pipe)); @@ -426,8 +436,8 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder, struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_connector *connector = conn_state->connector; enum pipe pipe = crtc->pipe; - const uint8_t *eld = connector->eld; - uint32_t tmp; + const u8 *eld = connector->eld; + u32 tmp; int len, i; DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n", @@ -456,7 +466,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder, /* Up to 84 bytes of hw ELD buffer */ len = min(drm_eld_size(eld), 84); for (i = 0; i < len / 4; i++) - I915_WRITE(HSW_AUD_EDID_DATA(pipe), *((uint32_t *)eld + i)); + I915_WRITE(HSW_AUD_EDID_DATA(pipe), *((const u32 *)eld + i)); /* ELD valid */ tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); @@ -477,7 +487,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder, struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); enum pipe pipe = crtc->pipe; enum port port = encoder->port; - uint32_t tmp, eldv; + u32 tmp, eldv; i915_reg_t aud_config, aud_cntrl_st2; DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n", @@ -524,8 +534,8 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder, struct drm_connector *connector = conn_state->connector; enum pipe pipe = crtc->pipe; enum port port = encoder->port; - uint8_t *eld = connector->eld; - uint32_t tmp, eldv; + const u8 *eld = connector->eld; + u32 tmp, eldv; int len, i; i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2; @@ -575,7 +585,7 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder, /* Up to 84 bytes of hw ELD buffer */ len = min(drm_eld_size(eld), 84); for (i = 0; i < len / 4; i++) - I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); + I915_WRITE(hdmiw_hdmiedid, *((const u32 *)eld + i)); /* ELD valid */ tmp = I915_READ(aud_cntrl_st2); @@ -639,11 +649,12 @@ void intel_audio_codec_enable(struct intel_encoder *encoder, dev_priv->av_enc_map[pipe] = encoder; mutex_unlock(&dev_priv->av_mutex); - if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) { + if (acomp && acomp->base.audio_ops && + acomp->base.audio_ops->pin_eld_notify) { /* audio drivers expect pipe = -1 to indicate Non-MST cases */ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) pipe = -1; - acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, + acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr, (int) port, (int) pipe); } @@ -681,11 +692,12 @@ void intel_audio_codec_disable(struct intel_encoder *encoder, dev_priv->av_enc_map[pipe] = NULL; mutex_unlock(&dev_priv->av_mutex); - if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) { + if (acomp && acomp->base.audio_ops && + acomp->base.audio_ops->pin_eld_notify) { /* audio drivers expect pipe = -1 to indicate Non-MST cases */ if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) pipe = -1; - acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, + acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr, (int) port, (int) pipe); } @@ -880,7 +892,7 @@ static int i915_audio_component_get_eld(struct device *kdev, int port, return ret; } -static const struct i915_audio_component_ops i915_audio_component_ops = { +static const struct drm_audio_component_ops i915_audio_component_ops = { .owner = THIS_MODULE, .get_power = i915_audio_component_get_power, .put_power = i915_audio_component_put_power, @@ -897,12 +909,12 @@ static int i915_audio_component_bind(struct device *i915_kdev, struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev); int i; - if (WARN_ON(acomp->ops || acomp->dev)) + if (WARN_ON(acomp->base.ops || acomp->base.dev)) return -EEXIST; drm_modeset_lock_all(&dev_priv->drm); - acomp->ops = &i915_audio_component_ops; - acomp->dev = i915_kdev; + acomp->base.ops = &i915_audio_component_ops; + acomp->base.dev = i915_kdev; BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS); for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++) acomp->aud_sample_rate[i] = 0; @@ -919,8 +931,8 @@ static void i915_audio_component_unbind(struct device *i915_kdev, struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev); drm_modeset_lock_all(&dev_priv->drm); - acomp->ops = NULL; - acomp->dev = NULL; + acomp->base.ops = NULL; + acomp->base.dev = NULL; dev_priv->audio_component = NULL; drm_modeset_unlock_all(&dev_priv->drm); } diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 54270bdde100..1faa494e2bc9 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -267,8 +267,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, if (!lvds_lfp_data_ptrs) return; - dev_priv->vbt.lvds_vbt = 1; - panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data, lvds_lfp_data_ptrs, panel_type); @@ -518,8 +516,31 @@ parse_driver_features(struct drm_i915_private *dev_priv, if (!driver) return; - if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP) - dev_priv->vbt.edp.support = 1; + if (INTEL_GEN(dev_priv) >= 5) { + /* + * Note that we consider BDB_DRIVER_FEATURE_INT_SDVO_LVDS + * to mean "eDP". The VBT spec doesn't agree with that + * interpretation, but real world VBTs seem to. + */ + if (driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS) + dev_priv->vbt.int_lvds_support = 0; + } else { + /* + * FIXME it's not clear which BDB version has the LVDS config + * bits defined. Revision history in the VBT spec says: + * "0.92 | Add two definitions for VBT value of LVDS Active + * Config (00b and 11b values defined) | 06/13/2005" + * but does not the specify the BDB version. + * + * So far version 134 (on i945gm) is the oldest VBT observed + * in the wild with the bits correctly populated. Version + * 108 (on i85x) does not have the bits correctly populated. + */ + if (bdb->version >= 134 && + driver->lvds_config != BDB_DRIVER_FEATURE_INT_LVDS && + driver->lvds_config != BDB_DRIVER_FEATURE_INT_SDVO_LVDS) + dev_priv->vbt.int_lvds_support = 0; + } DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled); /* @@ -542,11 +563,8 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) int panel_type = dev_priv->vbt.panel_type; edp = find_section(bdb, BDB_EDP); - if (!edp) { - if (dev_priv->vbt.edp.support) - DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n"); + if (!edp) return; - } switch ((edp->color_depth >> (panel_type * 2)) & 3) { case EDP_18BPP: @@ -634,7 +652,7 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) } if (bdb->version >= 173) { - uint8_t vswing; + u8 vswing; /* Don't read from VBT if module parameter has valid value*/ if (i915_modparams.edp_vswing) { @@ -688,8 +706,54 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) break; } - dev_priv->vbt.psr.tp1_wakeup_time = psr_table->tp1_wakeup_time; - dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time; + /* + * New psr options 0=500us, 1=100us, 2=2500us, 3=0us + * Old decimal value is wake up time in multiples of 100 us. + */ + if (bdb->version >= 205 && + (IS_GEN9_BC(dev_priv) || IS_GEMINILAKE(dev_priv) || + INTEL_GEN(dev_priv) >= 10)) { + switch (psr_table->tp1_wakeup_time) { + case 0: + dev_priv->vbt.psr.tp1_wakeup_time_us = 500; + break; + case 1: + dev_priv->vbt.psr.tp1_wakeup_time_us = 100; + break; + case 3: + dev_priv->vbt.psr.tp1_wakeup_time_us = 0; + break; + default: + DRM_DEBUG_KMS("VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n", + psr_table->tp1_wakeup_time); + /* fallthrough */ + case 2: + dev_priv->vbt.psr.tp1_wakeup_time_us = 2500; + break; + } + + switch (psr_table->tp2_tp3_wakeup_time) { + case 0: + dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 500; + break; + case 1: + dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 100; + break; + case 3: + dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 0; + break; + default: + DRM_DEBUG_KMS("VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n", + psr_table->tp2_tp3_wakeup_time); + /* fallthrough */ + case 2: + dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 2500; + break; + } + } else { + dev_priv->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100; + dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100; + } } static void parse_dsi_backlight_ports(struct drm_i915_private *dev_priv, @@ -902,7 +966,7 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total) * includes MIPI_SEQ_ELEM_END byte, excludes the final MIPI_SEQ_END * byte. */ - size_of_sequence = *((const uint32_t *)(data + index)); + size_of_sequence = *((const u32 *)(data + index)); index += 4; seq_end = index + size_of_sequence; @@ -1197,18 +1261,37 @@ static const u8 cnp_ddc_pin_map[] = { [DDC_BUS_DDI_F] = GMBUS_PIN_3_BXT, /* sic */ }; +static const u8 icp_ddc_pin_map[] = { + [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT, + [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT, + [ICL_DDC_BUS_PORT_1] = GMBUS_PIN_9_TC1_ICP, + [ICL_DDC_BUS_PORT_2] = GMBUS_PIN_10_TC2_ICP, + [ICL_DDC_BUS_PORT_3] = GMBUS_PIN_11_TC3_ICP, + [ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP, +}; + static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin) { - if (HAS_PCH_CNP(dev_priv)) { - if (vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map)) { - return cnp_ddc_pin_map[vbt_pin]; - } else { - DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n", vbt_pin); - return 0; - } + const u8 *ddc_pin_map; + int n_entries; + + if (HAS_PCH_ICP(dev_priv)) { + ddc_pin_map = icp_ddc_pin_map; + n_entries = ARRAY_SIZE(icp_ddc_pin_map); + } else if (HAS_PCH_CNP(dev_priv)) { + ddc_pin_map = cnp_ddc_pin_map; + n_entries = ARRAY_SIZE(cnp_ddc_pin_map); + } else { + /* Assuming direct map */ + return vbt_pin; } - return vbt_pin; + if (vbt_pin < n_entries && ddc_pin_map[vbt_pin] != 0) + return ddc_pin_map[vbt_pin]; + + DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n", + vbt_pin); + return 0; } static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, @@ -1504,7 +1587,6 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) /* LFP panel data */ dev_priv->vbt.lvds_dither = 1; - dev_priv->vbt.lvds_vbt = 0; /* SDVO panel data */ dev_priv->vbt.sdvo_lvds_vbt_mode = NULL; @@ -1513,6 +1595,9 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) dev_priv->vbt.int_tv_support = 1; dev_priv->vbt.int_crt_support = 1; + /* driver features */ + dev_priv->vbt.int_lvds_support = 1; + /* Default to using SSC */ dev_priv->vbt.lvds_use_ssc = 1; /* @@ -1636,7 +1721,7 @@ void intel_bios_init(struct drm_i915_private *dev_priv) const struct bdb_header *bdb; u8 __iomem *bios = NULL; - if (HAS_PCH_NOP(dev_priv)) { + if (INTEL_INFO(dev_priv)->num_pipes == 0) { DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n"); return; } diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index 18e643df523e..1db6ba7d926e 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -98,12 +98,14 @@ static void intel_breadcrumbs_hangcheck(struct timer_list *t) struct intel_engine_cs *engine = from_timer(engine, t, breadcrumbs.hangcheck); struct intel_breadcrumbs *b = &engine->breadcrumbs; + unsigned int irq_count; if (!b->irq_armed) return; - if (b->hangcheck_interrupts != atomic_read(&engine->irq_count)) { - b->hangcheck_interrupts = atomic_read(&engine->irq_count); + irq_count = READ_ONCE(b->irq_count); + if (b->hangcheck_interrupts != irq_count) { + b->hangcheck_interrupts = irq_count; mod_timer(&b->hangcheck, wait_timeout()); return; } @@ -272,13 +274,14 @@ static bool use_fake_irq(const struct intel_breadcrumbs *b) if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings)) return false; - /* Only start with the heavy weight fake irq timer if we have not + /* + * Only start with the heavy weight fake irq timer if we have not * seen any interrupts since enabling it the first time. If the * interrupts are still arriving, it means we made a mistake in our * engine->seqno_barrier(), a timing error that should be transient * and unlikely to reoccur. */ - return atomic_read(&engine->irq_count) == b->hangcheck_interrupts; + return READ_ONCE(b->irq_count) == b->hangcheck_interrupts; } static void enable_fake_irq(struct intel_breadcrumbs *b) @@ -846,8 +849,9 @@ static void cancel_fake_irq(struct intel_engine_cs *engine) void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine) { struct intel_breadcrumbs *b = &engine->breadcrumbs; + unsigned long flags; - spin_lock_irq(&b->irq_lock); + spin_lock_irqsave(&b->irq_lock, flags); /* * Leave the fake_irq timer enabled (if it is running), but clear the @@ -871,7 +875,7 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine) */ clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); - spin_unlock_irq(&b->irq_lock); + spin_unlock_irqrestore(&b->irq_lock, flags); } void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index 704ddb4d3ca7..29075c763428 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -316,6 +316,7 @@ static void pnv_get_cdclk(struct drm_i915_private *dev_priv, break; default: DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc); + /* fall through */ case GC_DISPLAY_CLOCK_133_MHZ_PNV: cdclk_state->cdclk = 133333; break; @@ -991,6 +992,16 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, u32 freq_select, cdclk_ctl; int ret; + /* + * Based on WA#1183 CDCLK rates 308 and 617MHz CDCLK rates are + * unsupported on SKL. In theory this should never happen since only + * the eDP1.4 2.16 and 4.32Gbps rates require it, but eDP1.4 is not + * supported on SKL either, see the above WA. WARN whenever trying to + * use the corresponding VCO freq as that always leads to using the + * minimum 308MHz CDCLK. + */ + WARN_ON_ONCE(IS_SKYLAKE(dev_priv) && vco == 8640000); + mutex_lock(&dev_priv->pcu_lock); ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, SKL_CDCLK_PREPARE_FOR_CHANGE, @@ -1787,6 +1798,7 @@ static int icl_calc_cdclk(int min_cdclk, unsigned int ref) switch (ref) { default: MISSING_CASE(ref); + /* fall through */ case 24000: ranges = ranges_24; break; @@ -1814,6 +1826,7 @@ static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk) switch (cdclk) { default: MISSING_CASE(cdclk); + /* fall through */ case 307200: case 556800: case 652800: @@ -1861,11 +1874,36 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv, skl_cdclk_decimal(cdclk)); mutex_lock(&dev_priv->pcu_lock); - /* TODO: add proper DVFS support. */ - sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, 2); + sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, + cdclk_state->voltage_level); mutex_unlock(&dev_priv->pcu_lock); intel_update_cdclk(dev_priv); + + /* + * Can't read out the voltage level :( + * Let's just assume everything is as expected. + */ + dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level; +} + +static u8 icl_calc_voltage_level(int cdclk) +{ + switch (cdclk) { + case 50000: + case 307200: + case 312000: + return 0; + case 556800: + case 552000: + return 1; + default: + MISSING_CASE(cdclk); + /* fall through */ + case 652800: + case 648000: + return 2; + } } static void icl_get_cdclk(struct drm_i915_private *dev_priv, @@ -1879,6 +1917,7 @@ static void icl_get_cdclk(struct drm_i915_private *dev_priv, switch (val & ICL_DSSM_CDCLK_PLL_REFCLK_MASK) { default: MISSING_CASE(val); + /* fall through */ case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz: cdclk_state->ref = 24000; break; @@ -1899,7 +1938,7 @@ static void icl_get_cdclk(struct drm_i915_private *dev_priv, */ cdclk_state->vco = 0; cdclk_state->cdclk = cdclk_state->bypass; - return; + goto out; } cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref; @@ -1908,6 +1947,14 @@ static void icl_get_cdclk(struct drm_i915_private *dev_priv, WARN_ON((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0); cdclk_state->cdclk = cdclk_state->vco / 2; + +out: + /* + * Can't read this out :( Let's assume it's + * at least what the CDCLK frequency requires. + */ + cdclk_state->voltage_level = + icl_calc_voltage_level(cdclk_state->cdclk); } /** @@ -1950,6 +1997,8 @@ sanitize: sanitized_state.cdclk = icl_calc_cdclk(0, sanitized_state.ref); sanitized_state.vco = icl_calc_cdclk_pll_vco(dev_priv, sanitized_state.cdclk); + sanitized_state.voltage_level = + icl_calc_voltage_level(sanitized_state.cdclk); icl_set_cdclk(dev_priv, &sanitized_state); } @@ -1967,6 +2016,7 @@ void icl_uninit_cdclk(struct drm_i915_private *dev_priv) cdclk_state.cdclk = cdclk_state.bypass; cdclk_state.vco = 0; + cdclk_state.voltage_level = icl_calc_voltage_level(cdclk_state.cdclk); icl_set_cdclk(dev_priv, &cdclk_state); } @@ -2470,6 +2520,9 @@ static int icl_modeset_calc_cdclk(struct drm_atomic_state *state) intel_state->cdclk.logical.vco = vco; intel_state->cdclk.logical.cdclk = cdclk; + intel_state->cdclk.logical.voltage_level = + max(icl_calc_voltage_level(cdclk), + cnl_compute_min_voltage_level(intel_state)); if (!intel_state->active_crtcs) { cdclk = icl_calc_cdclk(0, ref); @@ -2477,6 +2530,8 @@ static int icl_modeset_calc_cdclk(struct drm_atomic_state *state) intel_state->cdclk.actual.vco = vco; intel_state->cdclk.actual.cdclk = cdclk; + intel_state->cdclk.actual.voltage_level = + icl_calc_voltage_level(cdclk); } else { intel_state->cdclk.actual = intel_state->cdclk.logical; } diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 072b326d5ee0..0c6bf82bb059 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -63,33 +63,35 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector) return intel_encoder_to_crt(intel_attached_encoder(connector)); } +bool intel_crt_port_enabled(struct drm_i915_private *dev_priv, + i915_reg_t adpa_reg, enum pipe *pipe) +{ + u32 val; + + val = I915_READ(adpa_reg); + + /* asserts want to know the pipe even if the port is disabled */ + if (HAS_PCH_CPT(dev_priv)) + *pipe = (val & ADPA_PIPE_SEL_MASK_CPT) >> ADPA_PIPE_SEL_SHIFT_CPT; + else + *pipe = (val & ADPA_PIPE_SEL_MASK) >> ADPA_PIPE_SEL_SHIFT; + + return val & ADPA_DAC_ENABLE; +} + static bool intel_crt_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crt *crt = intel_encoder_to_crt(encoder); - u32 tmp; bool ret; if (!intel_display_power_get_if_enabled(dev_priv, encoder->power_domain)) return false; - ret = false; - - tmp = I915_READ(crt->adpa_reg); + ret = intel_crt_port_enabled(dev_priv, crt->adpa_reg, pipe); - if (!(tmp & ADPA_DAC_ENABLE)) - goto out; - - if (HAS_PCH_CPT(dev_priv)) - *pipe = PORT_TO_PIPE_CPT(tmp); - else - *pipe = PORT_TO_PIPE(tmp); - - ret = true; -out: intel_display_power_put(dev_priv, encoder->power_domain); return ret; @@ -168,11 +170,9 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, if (HAS_PCH_LPT(dev_priv)) ; /* Those bits don't exist here */ else if (HAS_PCH_CPT(dev_priv)) - adpa |= PORT_TRANS_SEL_CPT(crtc->pipe); - else if (crtc->pipe == 0) - adpa |= ADPA_PIPE_A_SELECT; + adpa |= ADPA_PIPE_SEL_CPT(crtc->pipe); else - adpa |= ADPA_PIPE_B_SELECT; + adpa |= ADPA_PIPE_SEL(crtc->pipe); if (!HAS_PCH_SPLIT(dev_priv)) I915_WRITE(BCLRPAT(crtc->pipe), 0); @@ -232,6 +232,8 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + intel_ddi_disable_pipe_clock(old_crtc_state); + pch_post_disable_crt(encoder, old_crtc_state, old_conn_state); lpt_disable_pch_transcoder(dev_priv); @@ -268,6 +270,8 @@ static void hsw_pre_enable_crt(struct intel_encoder *encoder, intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); dev_priv->display.fdi_link_train(crtc, crtc_state); + + intel_ddi_enable_pipe_clock(crtc_state); } static void hsw_enable_crt(struct intel_encoder *encoder, @@ -333,6 +337,10 @@ intel_crt_mode_valid(struct drm_connector *connector, (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2)) return MODE_CLOCK_HIGH; + /* HSW/BDW FDI limited to 4k */ + if (mode->hdisplay > 4096) + return MODE_H_ILLEGAL; + return MODE_OK; } @@ -375,6 +383,11 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return false; + /* HSW/BDW FDI limited to 4k */ + if (adjusted_mode->crtc_hdisplay > 4096 || + adjusted_mode->crtc_hblank_start > 4096) + return false; + pipe_config->has_pch_encoder = true; /* LPT FDI RX only supports 8bpc. */ @@ -513,7 +526,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) * to get a reliable result. */ - if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) + if (IS_G45(dev_priv)) tries = 2; else tries = 1; diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index fed26d6e4e27..39d66f8493fa 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -915,7 +915,14 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; - if (IS_CANNONLAKE(dev_priv)) { + if (IS_ICELAKE(dev_priv)) { + if (port == PORT_A || port == PORT_B) + icl_get_combo_buf_trans(dev_priv, port, + INTEL_OUTPUT_HDMI, &n_entries); + else + n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); + default_entry = n_entries - 1; + } else if (IS_CANNONLAKE(dev_priv)) { cnl_get_buf_trans_hdmi(dev_priv, &n_entries); default_entry = n_entries - 1; } else if (IS_GEN9_LP(dev_priv)) { @@ -1055,14 +1062,31 @@ static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll) static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder, const struct intel_shared_dpll *pll) { + struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); + int clock = crtc->config->port_clock; const enum intel_dpll_id id = pll->info->id; switch (id) { default: MISSING_CASE(id); + /* fall through */ case DPLL_ID_ICL_DPLL0: case DPLL_ID_ICL_DPLL1: return DDI_CLK_SEL_NONE; + case DPLL_ID_ICL_TBTPLL: + switch (clock) { + case 162000: + return DDI_CLK_SEL_TBT_162; + case 270000: + return DDI_CLK_SEL_TBT_270; + case 540000: + return DDI_CLK_SEL_TBT_540; + case 810000: + return DDI_CLK_SEL_TBT_810; + default: + MISSING_CASE(clock); + break; + } case DPLL_ID_ICL_MGPLL1: case DPLL_ID_ICL_MGPLL2: case DPLL_ID_ICL_MGPLL3: @@ -1243,35 +1267,6 @@ intel_ddi_get_crtc_encoder(struct intel_crtc *crtc) return ret; } -/* Finds the only possible encoder associated with the given CRTC. */ -struct intel_encoder * -intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - struct intel_encoder *ret = NULL; - struct drm_atomic_state *state; - struct drm_connector *connector; - struct drm_connector_state *connector_state; - int num_encoders = 0; - int i; - - state = crtc_state->base.state; - - for_each_new_connector_in_state(state, connector, connector_state, i) { - if (connector_state->crtc != crtc_state->base.crtc) - continue; - - ret = to_intel_encoder(connector_state->best_encoder); - num_encoders++; - } - - WARN(num_encoders != 1, "%d encoders on crtc for pipe %c\n", num_encoders, - pipe_name(crtc->pipe)); - - BUG_ON(ret == NULL); - return ret; -} - #define LC_FREQ 2700 static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, @@ -1374,8 +1369,13 @@ static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv, uint32_t cfgcr0, cfgcr1; uint32_t p0, p1, p2, dco_freq, ref_clock; - cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id)); - cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll_id)); + if (INTEL_GEN(dev_priv) >= 11) { + cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id)); + cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id)); + } else { + cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id)); + cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll_id)); + } p0 = cfgcr1 & DPLL_CFGCR1_PDIV_MASK; p2 = cfgcr1 & DPLL_CFGCR1_KDIV_MASK; @@ -1451,6 +1451,30 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) pipe_config->base.adjusted_mode.crtc_clock = dotclock; } +static void icl_ddi_clock_get(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum port port = encoder->port; + int link_clock = 0; + uint32_t pll_id; + + pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll); + if (port == PORT_A || port == PORT_B) { + if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) + link_clock = cnl_calc_wrpll_link(dev_priv, pll_id); + else + link_clock = icl_calc_dp_combo_pll_link(dev_priv, + pll_id); + } else { + /* FIXME - Add for MG PLL */ + WARN(1, "MG PLL clock_get code not implemented yet\n"); + } + + pipe_config->port_clock = link_clock; + ddi_dotclock_get(pipe_config); +} + static void cnl_ddi_clock_get(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { @@ -1644,6 +1668,8 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder, bxt_ddi_clock_get(encoder, pipe_config); else if (IS_CANNONLAKE(dev_priv)) cnl_ddi_clock_get(encoder, pipe_config); + else if (IS_ICELAKE(dev_priv)) + icl_ddi_clock_get(encoder, pipe_config); } void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state) @@ -1967,15 +1993,50 @@ out: return ret; } -static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder) +static inline enum intel_display_power_domain +intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp) +{ + /* CNL HW requires corresponding AUX IOs to be powered up for PSR with + * DC states enabled at the same time, while for driver initiated AUX + * transfers we need the same AUX IOs to be powered but with DC states + * disabled. Accordingly use the AUX power domain here which leaves DC + * states enabled. + * However, for non-A AUX ports the corresponding non-EDP transcoders + * would have already enabled power well 2 and DC_OFF. This means we can + * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a + * specific AUX_IO reference without powering up any extra wells. + * Note that PSR is enabled only on Port A even though this function + * returns the correct domain for other ports too. + */ + return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A : + intel_dp->aux_power_domain; +} + +static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) { - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); - enum pipe pipe; + struct intel_digital_port *dig_port; + u64 domains; - if (intel_ddi_get_hw_state(encoder, &pipe)) - return BIT_ULL(dig_port->ddi_io_power_domain); + /* + * TODO: Add support for MST encoders. Atm, the following should never + * happen since fake-MST encoders don't set their get_power_domains() + * hook. + */ + if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))) + return 0; - return 0; + dig_port = enc_to_dig_port(&encoder->base); + domains = BIT_ULL(dig_port->ddi_io_power_domain); + + /* AUX power is only needed for (e)DP mode, not for HDMI. */ + if (intel_crtc_has_dp_encoder(crtc_state)) { + struct intel_dp *intel_dp = &dig_port->dp; + + domains |= BIT_ULL(intel_ddi_main_link_aux_domain(intel_dp)); + } + + return domains; } void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state) @@ -2124,6 +2185,26 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) DP_TRAIN_VOLTAGE_SWING_MASK; } +/* + * We assume that the full set of pre-emphasis values can be + * used on all DDI platforms. Should that change we need to + * rethink this code. + */ +u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder, u8 voltage_swing) +{ + switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: + return DP_TRAIN_PRE_EMPH_LEVEL_3; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: + return DP_TRAIN_PRE_EMPH_LEVEL_2; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: + return DP_TRAIN_PRE_EMPH_LEVEL_1; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: + default: + return DP_TRAIN_PRE_EMPH_LEVEL_0; + } +} + static void cnl_ddi_vswing_program(struct intel_encoder *encoder, int level, enum intel_output_type type) { @@ -2595,6 +2676,9 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, WARN_ON(is_mst && (port == PORT_A || port == PORT_E)); + intel_display_power_get(dev_priv, + intel_ddi_main_link_aux_domain(intel_dp)); + intel_dp_set_link_params(intel_dp, crtc_state->port_clock, crtc_state->lane_count, is_mst); @@ -2619,6 +2703,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, intel_dp_start_link_train(intel_dp); if (port != PORT_A || INTEL_GEN(dev_priv) >= 9) intel_dp_stop_link_train(intel_dp); + + intel_ddi_enable_pipe_clock(crtc_state); } static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, @@ -2649,6 +2735,8 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, if (IS_GEN9_BC(dev_priv)) skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI); + intel_ddi_enable_pipe_clock(crtc_state); + intel_dig_port->set_infoframes(&encoder->base, crtc_state->has_infoframe, crtc_state, conn_state); @@ -2718,6 +2806,8 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, bool is_mst = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST); + intel_ddi_disable_pipe_clock(old_crtc_state); + /* * Power down sink before disabling the port, otherwise we end * up getting interrupts from the sink on detecting link loss. @@ -2733,6 +2823,9 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain); intel_ddi_clk_disable(encoder); + + intel_display_power_put(dev_priv, + intel_ddi_main_link_aux_domain(intel_dp)); } static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder, @@ -2743,11 +2836,13 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder, struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); struct intel_hdmi *intel_hdmi = &dig_port->hdmi; - intel_disable_ddi_buf(encoder); - dig_port->set_infoframes(&encoder->base, false, old_crtc_state, old_conn_state); + intel_ddi_disable_pipe_clock(old_crtc_state); + + intel_disable_ddi_buf(encoder); + intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain); intel_ddi_clk_disable(encoder); @@ -3034,6 +3129,8 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv, { if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000) crtc_state->min_voltage_level = 2; + else if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000) + crtc_state->min_voltage_level = 1; } void intel_ddi_get_config(struct intel_encoder *encoder, @@ -3542,7 +3639,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) goto err; intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; - dev_priv->hotplug.irq_port[port] = intel_dig_port; } /* In theory we don't need the encoder->type check, but leave it just in diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 0fd13df424cf..0ef0c6448d53 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -858,6 +858,8 @@ void intel_device_info_runtime_init(struct intel_device_info *info) void intel_driver_caps_print(const struct intel_driver_caps *caps, struct drm_printer *p) { + drm_printf(p, "Has logical contexts? %s\n", + yesno(caps->has_logical_contexts)); drm_printf(p, "scheduler: %x\n", caps->scheduler); } diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 933e31669557..633f9fbf72ea 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -186,6 +186,7 @@ struct intel_device_info { struct intel_driver_caps { unsigned int scheduler; + bool has_logical_contexts:1; }; static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index dec0d60921bf..ed3fa1c8a983 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1022,7 +1022,7 @@ bool intel_crtc_active(struct intel_crtc *crtc) * We can ditch the adjusted_mode.crtc_clock check as soon * as Haswell has gained clock readout/fastboot support. * - * We can ditch the crtc->primary->fb check as soon as we can + * We can ditch the crtc->primary->state->fb check as soon as we can * properly reconstruct framebuffers. * * FIXME: The intel_crtc->active here should be switched to @@ -1202,7 +1202,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) { i915_reg_t pp_reg; u32 val; - enum pipe panel_pipe = PIPE_A; + enum pipe panel_pipe = INVALID_PIPE; bool locked = true; if (WARN_ON(HAS_DDI(dev_priv))) @@ -1214,18 +1214,35 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) pp_reg = PP_CONTROL(0); port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; - if (port_sel == PANEL_PORT_SELECT_LVDS && - I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) - panel_pipe = PIPE_B; - /* XXX: else fix for eDP */ + switch (port_sel) { + case PANEL_PORT_SELECT_LVDS: + intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe); + break; + case PANEL_PORT_SELECT_DPA: + intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe); + break; + case PANEL_PORT_SELECT_DPC: + intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe); + break; + case PANEL_PORT_SELECT_DPD: + intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe); + break; + default: + MISSING_CASE(port_sel); + break; + } } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { /* presumably write lock depends on pipe, not port select */ pp_reg = PP_CONTROL(pipe); panel_pipe = pipe; } else { + u32 port_sel; + pp_reg = PP_CONTROL(0); - if (I915_READ(LVDS) & LVDS_PIPEB_SELECT) - panel_pipe = PIPE_B; + port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; + + WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS); + intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe); } val = I915_READ(pp_reg); @@ -1267,7 +1284,10 @@ void assert_pipe(struct drm_i915_private *dev_priv, static void assert_plane(struct intel_plane *plane, bool state) { - bool cur_state = plane->get_hw_state(plane); + enum pipe pipe; + bool cur_state; + + cur_state = plane->get_hw_state(plane, &pipe); I915_STATE_WARN(cur_state != state, "%s assertion failure (expected %s, current %s)\n", @@ -1305,125 +1325,64 @@ void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, pipe_name(pipe)); } -static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, - enum pipe pipe, u32 port_sel, u32 val) -{ - if ((val & DP_PORT_EN) == 0) - return false; - - if (HAS_PCH_CPT(dev_priv)) { - u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe)); - if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) - return false; - } else if (IS_CHERRYVIEW(dev_priv)) { - if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe)) - return false; - } else { - if ((val & DP_PIPE_MASK) != (pipe << 30)) - return false; - } - return true; -} - -static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, - enum pipe pipe, u32 val) +static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, + enum pipe pipe, enum port port, + i915_reg_t dp_reg) { - if ((val & SDVO_ENABLE) == 0) - return false; - - if (HAS_PCH_CPT(dev_priv)) { - if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe)) - return false; - } else if (IS_CHERRYVIEW(dev_priv)) { - if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe)) - return false; - } else { - if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe)) - return false; - } - return true; -} + enum pipe port_pipe; + bool state; -static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, - enum pipe pipe, u32 val) -{ - if ((val & LVDS_PORT_EN) == 0) - return false; + state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe); - if (HAS_PCH_CPT(dev_priv)) { - if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) - return false; - } else { - if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) - return false; - } - return true; -} + I915_STATE_WARN(state && port_pipe == pipe, + "PCH DP %c enabled on transcoder %c, should be disabled\n", + port_name(port), pipe_name(pipe)); -static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, - enum pipe pipe, u32 val) -{ - if ((val & ADPA_DAC_ENABLE) == 0) - return false; - if (HAS_PCH_CPT(dev_priv)) { - if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) - return false; - } else { - if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) - return false; - } - return true; + I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, + "IBX PCH DP %c still using transcoder B\n", + port_name(port)); } -static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, - enum pipe pipe, i915_reg_t reg, - u32 port_sel) +static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, + enum pipe pipe, enum port port, + i915_reg_t hdmi_reg) { - u32 val = I915_READ(reg); - I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), - "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", - i915_mmio_reg_offset(reg), pipe_name(pipe)); + enum pipe port_pipe; + bool state; - I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0 - && (val & DP_PIPEB_SELECT), - "IBX PCH dp port still using transcoder B\n"); -} + state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe); -static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, - enum pipe pipe, i915_reg_t reg) -{ - u32 val = I915_READ(reg); - I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val), - "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", - i915_mmio_reg_offset(reg), pipe_name(pipe)); + I915_STATE_WARN(state && port_pipe == pipe, + "PCH HDMI %c enabled on transcoder %c, should be disabled\n", + port_name(port), pipe_name(pipe)); - I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0 - && (val & SDVO_PIPE_B_SELECT), - "IBX PCH hdmi port still using transcoder B\n"); + I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, + "IBX PCH HDMI %c still using transcoder B\n", + port_name(port)); } static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, enum pipe pipe) { - u32 val; + enum pipe port_pipe; - assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); - assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); - assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); + assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B); + assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C); + assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D); - val = I915_READ(PCH_ADPA); - I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val), - "PCH VGA enabled on transcoder %c, should be disabled\n", - pipe_name(pipe)); + I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) && + port_pipe == pipe, + "PCH VGA enabled on transcoder %c, should be disabled\n", + pipe_name(pipe)); - val = I915_READ(PCH_LVDS); - I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val), - "PCH LVDS enabled on transcoder %c, should be disabled\n", - pipe_name(pipe)); + I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && + port_pipe == pipe, + "PCH LVDS enabled on transcoder %c, should be disabled\n", + pipe_name(pipe)); - assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB); - assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC); - assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); + assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB); + assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC); + assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID); } static void _vlv_enable_pll(struct intel_crtc *crtc, @@ -2521,6 +2480,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, { struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct intel_rotation_info *rot_info = &intel_fb->rot_info; + struct drm_i915_gem_object *obj = intel_fb_obj(fb); u32 gtt_offset_rotated = 0; unsigned int max_size = 0; int i, num_planes = fb->format->num_planes; @@ -2585,7 +2545,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, * fb layout agrees with the fence layout. We already check that the * fb stride matches the fence stride elsewhere. */ - if (i == 0 && i915_gem_object_is_tiled(intel_fb->obj) && + if (i == 0 && i915_gem_object_is_tiled(obj) && (x + width) * cpp > fb->pitches[i]) { DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n", i, fb->offsets[i]); @@ -2670,9 +2630,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, max_size = max(max_size, offset + size); } - if (max_size * tile_size > intel_fb->obj->base.size) { + if (max_size * tile_size > obj->base.size) { DRM_DEBUG_KMS("fb too big for bo (need %u bytes, have %zu bytes)\n", - max_size * tile_size, intel_fb->obj->base.size); + max_size * tile_size, obj->base.size); return -EINVAL; } @@ -2796,10 +2756,10 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state, /* FIXME pre-g4x don't work like this */ if (visible) { - crtc_state->base.plane_mask |= BIT(drm_plane_index(&plane->base)); + crtc_state->base.plane_mask |= drm_plane_mask(&plane->base); crtc_state->active_planes |= BIT(plane->id); } else { - crtc_state->base.plane_mask &= ~BIT(drm_plane_index(&plane->base)); + crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base); crtc_state->active_planes &= ~BIT(plane->id); } @@ -2922,9 +2882,8 @@ valid_fb: if (i915_gem_object_is_tiled(obj)) dev_priv->preserve_bios_swizzle = true; - drm_framebuffer_get(fb); - primary->fb = primary->state->fb = fb; - primary->crtc = primary->state->crtc = &intel_crtc->base; + plane_state->fb = fb; + plane_state->crtc = &intel_crtc->base; intel_set_plane_visible(to_intel_crtc_state(crtc_state), to_intel_plane_state(plane_state), @@ -3430,24 +3389,33 @@ static void i9xx_disable_plane(struct intel_plane *plane, spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } -static bool i9xx_plane_get_hw_state(struct intel_plane *plane) +static bool i9xx_plane_get_hw_state(struct intel_plane *plane, + enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; - enum pipe pipe = plane->pipe; bool ret; + u32 val; /* * Not 100% correct for planes that can move between pipes, * but that's only the case for gen2-4 which don't have any * display power wells. */ - power_domain = POWER_DOMAIN_PIPE(pipe); + power_domain = POWER_DOMAIN_PIPE(plane->pipe); if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) return false; - ret = I915_READ(DSPCNTR(i9xx_plane)) & DISPLAY_PLANE_ENABLE; + val = I915_READ(DSPCNTR(i9xx_plane)); + + ret = val & DISPLAY_PLANE_ENABLE; + + if (INTEL_GEN(dev_priv) >= 5) + *pipe = plane->pipe; + else + *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> + DISPPLANE_SEL_PIPE_SHIFT; intel_display_power_put(dev_priv, power_domain); @@ -3689,7 +3657,7 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE; plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format); - if (intel_format_is_yuv(fb->format->format)) { + if (fb->format->is_yuv) { if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709) plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709; else @@ -4631,20 +4599,33 @@ static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) } } -/* Return which DP Port should be selected for Transcoder DP control */ -static enum port -intel_trans_dp_port_sel(struct intel_crtc *crtc) +/* + * Finds the encoder associated with the given CRTC. This can only be + * used when we know that the CRTC isn't feeding multiple encoders! + */ +static struct intel_encoder * +intel_get_crtc_new_encoder(const struct intel_atomic_state *state, + const struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct intel_encoder *encoder; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + const struct drm_connector_state *connector_state; + const struct drm_connector *connector; + struct intel_encoder *encoder = NULL; + int num_encoders = 0; + int i; - for_each_encoder_on_crtc(dev, &crtc->base, encoder) { - if (encoder->type == INTEL_OUTPUT_DP || - encoder->type == INTEL_OUTPUT_EDP) - return encoder->port; + for_each_new_connector_in_state(&state->base, connector, connector_state, i) { + if (connector_state->crtc != &crtc->base) + continue; + + encoder = to_intel_encoder(connector_state->best_encoder); + num_encoders++; } - return -1; + WARN(num_encoders != 1, "%d encoders for pipe %c\n", + num_encoders, pipe_name(crtc->pipe)); + + return encoder; } /* @@ -4655,7 +4636,8 @@ intel_trans_dp_port_sel(struct intel_crtc *crtc) * - DP transcoding bits * - transcoder */ -static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state) +static void ironlake_pch_enable(const struct intel_atomic_state *state, + const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; @@ -4714,6 +4696,8 @@ static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state) &crtc_state->base.adjusted_mode; u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; i915_reg_t reg = TRANS_DP_CTL(pipe); + enum port port; + temp = I915_READ(reg); temp &= ~(TRANS_DP_PORT_SEL_MASK | TRANS_DP_SYNC_MASK | @@ -4726,19 +4710,9 @@ static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state) if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; - switch (intel_trans_dp_port_sel(crtc)) { - case PORT_B: - temp |= TRANS_DP_PORT_SEL_B; - break; - case PORT_C: - temp |= TRANS_DP_PORT_SEL_C; - break; - case PORT_D: - temp |= TRANS_DP_PORT_SEL_D; - break; - default: - BUG(); - } + port = intel_get_crtc_new_encoder(state, crtc_state)->port; + WARN_ON(port < PORT_B || port > PORT_D); + temp |= TRANS_DP_PORT_SEL(port); I915_WRITE(reg, temp); } @@ -4746,7 +4720,8 @@ static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state) ironlake_enable_pch_transcoder(dev_priv, pipe); } -static void lpt_pch_enable(const struct intel_crtc_state *crtc_state) +static void lpt_pch_enable(const struct intel_atomic_state *state, + const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -4776,6 +4751,39 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe) } } +/* + * The hardware phase 0.0 refers to the center of the pixel. + * We want to start from the top/left edge which is phase + * -0.5. That matches how the hardware calculates the scaling + * factors (from top-left of the first pixel to bottom-right + * of the last pixel, as opposed to the pixel centers). + * + * For 4:2:0 subsampled chroma planes we obviously have to + * adjust that so that the chroma sample position lands in + * the right spot. + * + * Note that for packed YCbCr 4:2:2 formats there is no way to + * control chroma siting. The hardware simply replicates the + * chroma samples for both of the luma samples, and thus we don't + * actually get the expected MPEG2 chroma siting convention :( + * The same behaviour is observed on pre-SKL platforms as well. + */ +u16 skl_scaler_calc_phase(int sub, bool chroma_cosited) +{ + int phase = -0x8000; + u16 trip = 0; + + if (chroma_cosited) + phase += (sub - 1) * 0x8000 / sub; + + if (phase < 0) + phase = 0x10000 + phase; + else + trip = PS_PHASE_TRIP; + + return ((phase >> 2) & PS_PHASE_MASK) | trip; +} + static int skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, unsigned int scaler_user, int *scaler_id, @@ -4975,14 +4983,22 @@ static void skylake_pfit_enable(struct intel_crtc *crtc) &crtc->config->scaler_state; if (crtc->config->pch_pfit.enabled) { + u16 uv_rgb_hphase, uv_rgb_vphase; int id; if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) return; + uv_rgb_hphase = skl_scaler_calc_phase(1, false); + uv_rgb_vphase = skl_scaler_calc_phase(1, false); + id = scaler_state->scaler_id; I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); + I915_WRITE_FW(SKL_PS_VPHASE(pipe, id), + PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase)); + I915_WRITE_FW(SKL_PS_HPHASE(pipe, id), + PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase)); I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos); I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size); } @@ -5501,10 +5517,8 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, * * Spurious PCH underruns also occur during PCH enabling. */ - if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv)) - intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); - if (intel_crtc->config->has_pch_encoder) - intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); + intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); if (intel_crtc->config->has_pch_encoder) intel_prepare_shared_dpll(intel_crtc); @@ -5549,7 +5563,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, intel_enable_pipe(pipe_config); if (intel_crtc->config->has_pch_encoder) - ironlake_pch_enable(pipe_config); + ironlake_pch_enable(old_intel_state, pipe_config); assert_vblank_disabled(crtc); drm_crtc_vblank_on(crtc); @@ -5559,9 +5573,16 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, if (HAS_PCH_CPT(dev_priv)) cpt_verify_modeset(dev, intel_crtc->pipe); - /* Must wait for vblank to avoid spurious PCH FIFO underruns */ - if (intel_crtc->config->has_pch_encoder) + /* + * Must wait for vblank to avoid spurious PCH FIFO underruns. + * And a second vblank wait is needed at least on ILK with + * some interlaced HDMI modes. Let's do the double wait always + * in case there are more corner cases we don't know about. + */ + if (intel_crtc->config->has_pch_encoder) { intel_wait_for_vblank(dev_priv, pipe); + intel_wait_for_vblank(dev_priv, pipe); + } intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); } @@ -5611,6 +5632,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, struct intel_atomic_state *old_intel_state = to_intel_atomic_state(old_state); bool psl_clkgate_wa; + u32 pipe_chicken; if (WARN_ON(intel_crtc->active)) return; @@ -5623,6 +5645,8 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, if (INTEL_GEN(dev_priv) >= 11) icl_map_plls_to_ports(crtc, pipe_config, old_state); + intel_encoders_pre_enable(crtc, pipe_config, old_state); + if (intel_crtc_has_dp_encoder(intel_crtc->config)) intel_dp_set_m_n(intel_crtc, M1_N1); @@ -5651,11 +5675,6 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, intel_crtc->active = true; - intel_encoders_pre_enable(crtc, pipe_config, old_state); - - if (!transcoder_is_dsi(cpu_transcoder)) - intel_ddi_enable_pipe_clock(pipe_config); - /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */ psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && intel_crtc->config->pch_pfit.enabled; @@ -5673,6 +5692,17 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, */ intel_color_load_luts(&pipe_config->base); + /* + * Display WA #1153: enable hardware to bypass the alpha math + * and rounding for per-pixel values 00 and 0xff + */ + if (INTEL_GEN(dev_priv) >= 11) { + pipe_chicken = I915_READ(PIPE_CHICKEN(pipe)); + if (!(pipe_chicken & PER_PIXEL_ALPHA_BYPASS_EN)) + I915_WRITE_FW(PIPE_CHICKEN(pipe), + pipe_chicken | PER_PIXEL_ALPHA_BYPASS_EN); + } + intel_ddi_set_pipe_settings(pipe_config); if (!transcoder_is_dsi(cpu_transcoder)) intel_ddi_enable_transcoder_func(pipe_config); @@ -5688,7 +5718,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, intel_enable_pipe(pipe_config); if (intel_crtc->config->has_pch_encoder) - lpt_pch_enable(pipe_config); + lpt_pch_enable(old_intel_state, pipe_config); if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST)) intel_ddi_set_vc_payload_alloc(pipe_config, true); @@ -5741,10 +5771,8 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state, * pipe is already disabled, but FDI RX/TX is still enabled. * Happens at least with VGA+HDMI cloning. Suppress them. */ - if (intel_crtc->config->has_pch_encoder) { - intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); - intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); - } + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); + intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); intel_encoders_disable(crtc, old_crtc_state, old_state); @@ -5794,7 +5822,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state, struct drm_crtc *crtc = old_crtc_state->base.crtc; struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; + enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; intel_encoders_disable(crtc, old_crtc_state, old_state); @@ -5805,8 +5833,8 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state, if (!transcoder_is_dsi(cpu_transcoder)) intel_disable_pipe(old_crtc_state); - if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST)) - intel_ddi_set_vc_payload_alloc(intel_crtc->config, false); + if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) + intel_ddi_set_vc_payload_alloc(old_crtc_state, false); if (!transcoder_is_dsi(cpu_transcoder)) intel_ddi_disable_transcoder_func(old_crtc_state); @@ -5816,9 +5844,6 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state, else ironlake_pfit_disable(intel_crtc, false); - if (!transcoder_is_dsi(cpu_transcoder)) - intel_ddi_disable_pipe_clock(intel_crtc->config); - intel_encoders_post_disable(crtc, old_crtc_state, old_state); if (INTEL_GEN(dev_priv) >= 11) @@ -5849,6 +5874,22 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc) I915_WRITE(BCLRPAT(crtc->pipe), 0); } +bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port) +{ + if (IS_ICELAKE(dev_priv)) + return port >= PORT_C && port <= PORT_F; + + return false; +} + +enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) +{ + if (!intel_port_is_tc(dev_priv, port)) + return PORT_TC_NONE; + + return port - PORT_C; +} + enum intel_display_power_domain intel_port_to_power_domain(enum port port) { switch (port) { @@ -7675,16 +7716,18 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *plane = to_intel_plane(crtc->base.primary); enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; - enum pipe pipe = crtc->pipe; + enum pipe pipe; u32 val, base, offset; int fourcc, pixel_format; unsigned int aligned_height; struct drm_framebuffer *fb; struct intel_framebuffer *intel_fb; - if (!plane->get_hw_state(plane)) + if (!plane->get_hw_state(plane, &pipe)) return; + WARN_ON(pipe != crtc->pipe); + intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); if (!intel_fb) { DRM_DEBUG_KMS("failed to alloc fb\n"); @@ -8705,16 +8748,18 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane *plane = to_intel_plane(crtc->base.primary); enum plane_id plane_id = plane->id; - enum pipe pipe = crtc->pipe; + enum pipe pipe; u32 val, base, offset, stride_mult, tiling, alpha; int fourcc, pixel_format; unsigned int aligned_height; struct drm_framebuffer *fb; struct intel_framebuffer *intel_fb; - if (!plane->get_hw_state(plane)) + if (!plane->get_hw_state(plane, &pipe)) return; + WARN_ON(pipe != crtc->pipe); + intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); if (!intel_fb) { DRM_DEBUG_KMS("failed to alloc fb\n"); @@ -9142,9 +9187,12 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv) static int haswell_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { + struct intel_atomic_state *state = + to_intel_atomic_state(crtc_state->base.state); + if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) { struct intel_encoder *encoder = - intel_ddi_get_crtc_new_encoder(crtc_state); + intel_get_crtc_new_encoder(state, crtc_state); if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) { DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", @@ -9172,6 +9220,44 @@ static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv, pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); } +static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv, + enum port port, + struct intel_crtc_state *pipe_config) +{ + enum intel_dpll_id id; + u32 temp; + + /* TODO: TBT pll not implemented. */ + switch (port) { + case PORT_A: + case PORT_B: + temp = I915_READ(DPCLKA_CFGCR0_ICL) & + DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); + id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); + + if (WARN_ON(id != DPLL_ID_ICL_DPLL0 && id != DPLL_ID_ICL_DPLL1)) + return; + break; + case PORT_C: + id = DPLL_ID_ICL_MGPLL1; + break; + case PORT_D: + id = DPLL_ID_ICL_MGPLL2; + break; + case PORT_E: + id = DPLL_ID_ICL_MGPLL3; + break; + case PORT_F: + id = DPLL_ID_ICL_MGPLL4; + break; + default: + MISSING_CASE(port); + return; + } + + pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); +} + static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, struct intel_crtc_state *pipe_config) @@ -9273,6 +9359,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { default: WARN(1, "unknown pipe linked to edp transcoder\n"); + /* fall through */ case TRANS_DDI_EDP_INPUT_A_ONOFF: case TRANS_DDI_EDP_INPUT_A_ON: trans_edp_pipe = PIPE_A; @@ -9328,7 +9415,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, * registers/MIPI[BXT]. We can break out here early, since we * need the same DSI PLL to be enabled for both DSI ports. */ - if (!intel_dsi_pll_is_enabled(dev_priv)) + if (!bxt_dsi_pll_is_enabled(dev_priv)) break; /* XXX: this works for video mode only */ @@ -9359,7 +9446,9 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc, port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; - if (IS_CANNONLAKE(dev_priv)) + if (IS_ICELAKE(dev_priv)) + icelake_get_ddi_pll(dev_priv, port, pipe_config); + else if (IS_CANNONLAKE(dev_priv)) cannonlake_get_ddi_pll(dev_priv, port, pipe_config); else if (IS_GEN9_BC(dev_priv)) skylake_get_ddi_pll(dev_priv, port, pipe_config); @@ -9692,7 +9781,8 @@ static void i845_disable_cursor(struct intel_plane *plane, i845_update_cursor(plane, NULL, NULL); } -static bool i845_cursor_get_hw_state(struct intel_plane *plane) +static bool i845_cursor_get_hw_state(struct intel_plane *plane, + enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; @@ -9704,6 +9794,8 @@ static bool i845_cursor_get_hw_state(struct intel_plane *plane) ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; + *pipe = PIPE_A; + intel_display_power_put(dev_priv, power_domain); return ret; @@ -9715,25 +9807,30 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - u32 cntl; + u32 cntl = 0; - cntl = MCURSOR_GAMMA_ENABLE; + if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) + cntl |= MCURSOR_TRICKLE_FEED_DISABLE; - if (HAS_DDI(dev_priv)) - cntl |= CURSOR_PIPE_CSC_ENABLE; + if (INTEL_GEN(dev_priv) <= 10) { + cntl |= MCURSOR_GAMMA_ENABLE; + + if (HAS_DDI(dev_priv)) + cntl |= MCURSOR_PIPE_CSC_ENABLE; + } if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) cntl |= MCURSOR_PIPE_SELECT(crtc->pipe); switch (plane_state->base.crtc_w) { case 64: - cntl |= CURSOR_MODE_64_ARGB_AX; + cntl |= MCURSOR_MODE_64_ARGB_AX; break; case 128: - cntl |= CURSOR_MODE_128_ARGB_AX; + cntl |= MCURSOR_MODE_128_ARGB_AX; break; case 256: - cntl |= CURSOR_MODE_256_ARGB_AX; + cntl |= MCURSOR_MODE_256_ARGB_AX; break; default: MISSING_CASE(plane_state->base.crtc_w); @@ -9741,7 +9838,7 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, } if (plane_state->base.rotation & DRM_MODE_ROTATE_180) - cntl |= CURSOR_ROTATE_180; + cntl |= MCURSOR_ROTATE_180; return cntl; } @@ -9903,23 +10000,32 @@ static void i9xx_disable_cursor(struct intel_plane *plane, i9xx_update_cursor(plane, NULL, NULL); } -static bool i9xx_cursor_get_hw_state(struct intel_plane *plane) +static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, + enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; - enum pipe pipe = plane->pipe; bool ret; + u32 val; /* * Not 100% correct for planes that can move between pipes, * but that's only the case for gen2-3 which don't have any * display power wells. */ - power_domain = POWER_DOMAIN_PIPE(pipe); + power_domain = POWER_DOMAIN_PIPE(plane->pipe); if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) return false; - ret = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; + val = I915_READ(CURCNTR(plane->pipe)); + + ret = val & MCURSOR_MODE; + + if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) + *pipe = plane->pipe; + else + *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >> + MCURSOR_PIPE_SELECT_SHIFT; intel_display_power_put(dev_priv, power_domain); @@ -10631,7 +10737,7 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) drm_connector_list_iter_begin(dev, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { if (connector->base.state->crtc) - drm_connector_unreference(&connector->base); + drm_connector_put(&connector->base); if (connector->base.encoder) { connector->base.state->best_encoder = @@ -10639,7 +10745,7 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) connector->base.state->crtc = connector->base.encoder->crtc; - drm_connector_reference(&connector->base); + drm_connector_get(&connector->base); } else { connector->base.state->best_encoder = NULL; connector->base.state->crtc = NULL; @@ -10918,6 +11024,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state) case INTEL_OUTPUT_DDI: if (WARN_ON(!HAS_DDI(to_i915(dev)))) break; + /* else: fall through */ case INTEL_OUTPUT_DP: case INTEL_OUTPUT_HDMI: case INTEL_OUTPUT_EDP: @@ -11791,7 +11898,7 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv, struct drm_crtc_state *new_state) { struct intel_dpll_hw_state dpll_hw_state; - unsigned crtc_mask; + unsigned int crtc_mask; bool active; memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); @@ -11818,7 +11925,7 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv, return; } - crtc_mask = 1 << drm_crtc_index(crtc); + crtc_mask = drm_crtc_mask(crtc); if (new_state->active) I915_STATE_WARN(!(pll->active_mask & crtc_mask), @@ -11853,7 +11960,7 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc, if (old_state->shared_dpll && old_state->shared_dpll != new_state->shared_dpll) { - unsigned crtc_mask = 1 << drm_crtc_index(crtc); + unsigned int crtc_mask = drm_crtc_mask(crtc); struct intel_shared_dpll *pll = old_state->shared_dpll; I915_STATE_WARN(pll->active_mask & crtc_mask, @@ -12449,6 +12556,19 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset); } +static void intel_atomic_cleanup_work(struct work_struct *work) +{ + struct drm_atomic_state *state = + container_of(work, struct drm_atomic_state, commit_work); + struct drm_i915_private *i915 = to_i915(state->dev); + + drm_atomic_helper_cleanup_planes(&i915->drm, state); + drm_atomic_helper_commit_cleanup_done(state); + drm_atomic_state_put(state); + + intel_atomic_helper_free_state(i915); +} + static void intel_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; @@ -12609,13 +12729,16 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET); } - drm_atomic_helper_cleanup_planes(dev, state); - - drm_atomic_helper_commit_cleanup_done(state); - - drm_atomic_state_put(state); - - intel_atomic_helper_free_state(dev_priv); + /* + * Defer the cleanup of the old state to a separate worker to not + * impede the current task (userspace for blocking modesets) that + * are executed inline. For out-of-line asynchronous modesets/flips, + * deferring to a new worker seems overkill, but we would place a + * schedule point (cond_resched()) here anyway to keep latencies + * down. + */ + INIT_WORK(&state->commit_work, intel_atomic_cleanup_work); + schedule_work(&state->commit_work); } static void intel_atomic_commit_work(struct work_struct *work) @@ -12981,6 +13104,19 @@ intel_prepare_plane_fb(struct drm_plane *plane, add_rps_boost_after_vblank(new_state->crtc, new_state->fence); } + /* + * We declare pageflips to be interactive and so merit a small bias + * towards upclocking to deliver the frame on time. By only changing + * the RPS thresholds to sample more regularly and aim for higher + * clocks we can hopefully deliver low power workloads (like kodi) + * that are not quite steady state without resorting to forcing + * maximum clocks following a vblank miss (see do_rps_boost()). + */ + if (!intel_state->rps_interactive) { + intel_rps_mark_interactive(dev_priv, true); + intel_state->rps_interactive = true; + } + return 0; } @@ -12997,8 +13133,15 @@ void intel_cleanup_plane_fb(struct drm_plane *plane, struct drm_plane_state *old_state) { + struct intel_atomic_state *intel_state = + to_intel_atomic_state(old_state->state); struct drm_i915_private *dev_priv = to_i915(plane->dev); + if (intel_state->rps_interactive) { + intel_rps_mark_interactive(dev_priv, false); + intel_state->rps_interactive = false; + } + /* Should only be called after a successful intel_prepare_plane_fb()! */ mutex_lock(&dev_priv->drm.struct_mutex); intel_plane_unpin_fb(to_intel_plane_state(old_state)); @@ -13181,8 +13324,17 @@ void intel_plane_destroy(struct drm_plane *plane) kfree(to_intel_plane(plane)); } -static bool i8xx_mod_supported(uint32_t format, uint64_t modifier) +static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane, + u32 format, u64 modifier) { + switch (modifier) { + case DRM_FORMAT_MOD_LINEAR: + case I915_FORMAT_MOD_X_TILED: + break; + default: + return false; + } + switch (format) { case DRM_FORMAT_C8: case DRM_FORMAT_RGB565: @@ -13195,8 +13347,17 @@ static bool i8xx_mod_supported(uint32_t format, uint64_t modifier) } } -static bool i965_mod_supported(uint32_t format, uint64_t modifier) +static bool i965_plane_format_mod_supported(struct drm_plane *_plane, + u32 format, u64 modifier) { + switch (modifier) { + case DRM_FORMAT_MOD_LINEAR: + case I915_FORMAT_MOD_X_TILED: + break; + default: + return false; + } + switch (format) { case DRM_FORMAT_C8: case DRM_FORMAT_RGB565: @@ -13211,8 +13372,26 @@ static bool i965_mod_supported(uint32_t format, uint64_t modifier) } } -static bool skl_mod_supported(uint32_t format, uint64_t modifier) +static bool skl_plane_format_mod_supported(struct drm_plane *_plane, + u32 format, u64 modifier) { + struct intel_plane *plane = to_intel_plane(_plane); + + switch (modifier) { + case DRM_FORMAT_MOD_LINEAR: + case I915_FORMAT_MOD_X_TILED: + case I915_FORMAT_MOD_Y_TILED: + case I915_FORMAT_MOD_Yf_TILED: + break; + case I915_FORMAT_MOD_Y_TILED_CCS: + case I915_FORMAT_MOD_Yf_TILED_CCS: + if (!plane->has_ccs) + return false; + break; + default: + return false; + } + switch (format) { case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: @@ -13244,38 +13423,36 @@ static bool skl_mod_supported(uint32_t format, uint64_t modifier) } } -static bool intel_primary_plane_format_mod_supported(struct drm_plane *plane, - uint32_t format, - uint64_t modifier) +static bool intel_cursor_format_mod_supported(struct drm_plane *_plane, + u32 format, u64 modifier) { - struct drm_i915_private *dev_priv = to_i915(plane->dev); - - if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID)) - return false; - - if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_INTEL && - modifier != DRM_FORMAT_MOD_LINEAR) - return false; - - if (INTEL_GEN(dev_priv) >= 9) - return skl_mod_supported(format, modifier); - else if (INTEL_GEN(dev_priv) >= 4) - return i965_mod_supported(format, modifier); - else - return i8xx_mod_supported(format, modifier); + return modifier == DRM_FORMAT_MOD_LINEAR && + format == DRM_FORMAT_ARGB8888; } -static bool intel_cursor_plane_format_mod_supported(struct drm_plane *plane, - uint32_t format, - uint64_t modifier) -{ - if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID)) - return false; +static struct drm_plane_funcs skl_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = intel_plane_destroy, + .atomic_get_property = intel_plane_atomic_get_property, + .atomic_set_property = intel_plane_atomic_set_property, + .atomic_duplicate_state = intel_plane_duplicate_state, + .atomic_destroy_state = intel_plane_destroy_state, + .format_mod_supported = skl_plane_format_mod_supported, +}; - return modifier == DRM_FORMAT_MOD_LINEAR && format == DRM_FORMAT_ARGB8888; -} +static struct drm_plane_funcs i965_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = intel_plane_destroy, + .atomic_get_property = intel_plane_atomic_get_property, + .atomic_set_property = intel_plane_atomic_set_property, + .atomic_duplicate_state = intel_plane_duplicate_state, + .atomic_destroy_state = intel_plane_destroy_state, + .format_mod_supported = i965_plane_format_mod_supported, +}; -static struct drm_plane_funcs intel_plane_funcs = { +static struct drm_plane_funcs i8xx_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = intel_plane_destroy, @@ -13283,7 +13460,7 @@ static struct drm_plane_funcs intel_plane_funcs = { .atomic_set_property = intel_plane_atomic_set_property, .atomic_duplicate_state = intel_plane_duplicate_state, .atomic_destroy_state = intel_plane_destroy_state, - .format_mod_supported = intel_primary_plane_format_mod_supported, + .format_mod_supported = i8xx_plane_format_mod_supported, }; static int @@ -13408,7 +13585,7 @@ static const struct drm_plane_funcs intel_cursor_plane_funcs = { .atomic_set_property = intel_plane_atomic_set_property, .atomic_duplicate_state = intel_plane_duplicate_state, .atomic_destroy_state = intel_plane_destroy_state, - .format_mod_supported = intel_cursor_plane_format_mod_supported, + .format_mod_supported = intel_cursor_format_mod_supported, }; static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv, @@ -13466,6 +13643,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) { struct intel_plane *primary = NULL; struct intel_plane_state *state = NULL; + const struct drm_plane_funcs *plane_funcs; const uint32_t *intel_primary_formats; unsigned int supported_rotations; unsigned int num_formats; @@ -13521,6 +13699,9 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) primary->check_plane = intel_check_primary_plane; if (INTEL_GEN(dev_priv) >= 9) { + primary->has_ccs = skl_plane_has_ccs(dev_priv, pipe, + PLANE_PRIMARY); + if (skl_plane_has_planar(dev_priv, pipe, PLANE_PRIMARY)) { intel_primary_formats = skl_pri_planar_formats; num_formats = ARRAY_SIZE(skl_pri_planar_formats); @@ -13529,7 +13710,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) num_formats = ARRAY_SIZE(skl_primary_formats); } - if (skl_plane_has_ccs(dev_priv, pipe, PLANE_PRIMARY)) + if (primary->has_ccs) modifiers = skl_format_modifiers_ccs; else modifiers = skl_format_modifiers_noccs; @@ -13537,6 +13718,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) primary->update_plane = skl_update_plane; primary->disable_plane = skl_disable_plane; primary->get_hw_state = skl_plane_get_hw_state; + + plane_funcs = &skl_plane_funcs; } else if (INTEL_GEN(dev_priv) >= 4) { intel_primary_formats = i965_primary_formats; num_formats = ARRAY_SIZE(i965_primary_formats); @@ -13545,6 +13728,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) primary->update_plane = i9xx_update_plane; primary->disable_plane = i9xx_disable_plane; primary->get_hw_state = i9xx_plane_get_hw_state; + + plane_funcs = &i965_plane_funcs; } else { intel_primary_formats = i8xx_primary_formats; num_formats = ARRAY_SIZE(i8xx_primary_formats); @@ -13553,25 +13738,27 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) primary->update_plane = i9xx_update_plane; primary->disable_plane = i9xx_disable_plane; primary->get_hw_state = i9xx_plane_get_hw_state; + + plane_funcs = &i8xx_plane_funcs; } if (INTEL_GEN(dev_priv) >= 9) ret = drm_universal_plane_init(&dev_priv->drm, &primary->base, - 0, &intel_plane_funcs, + 0, plane_funcs, intel_primary_formats, num_formats, modifiers, DRM_PLANE_TYPE_PRIMARY, "plane 1%c", pipe_name(pipe)); else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) ret = drm_universal_plane_init(&dev_priv->drm, &primary->base, - 0, &intel_plane_funcs, + 0, plane_funcs, intel_primary_formats, num_formats, modifiers, DRM_PLANE_TYPE_PRIMARY, "primary %c", pipe_name(pipe)); else ret = drm_universal_plane_init(&dev_priv->drm, &primary->base, - 0, &intel_plane_funcs, + 0, plane_funcs, intel_primary_formats, num_formats, modifiers, DRM_PLANE_TYPE_PRIMARY, @@ -13951,7 +14138,14 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) if (intel_crt_present(dev_priv)) intel_crt_init(dev_priv); - if (IS_GEN9_LP(dev_priv)) { + if (IS_ICELAKE(dev_priv)) { + intel_ddi_init(dev_priv, PORT_A); + intel_ddi_init(dev_priv, PORT_B); + intel_ddi_init(dev_priv, PORT_C); + intel_ddi_init(dev_priv, PORT_D); + intel_ddi_init(dev_priv, PORT_E); + intel_ddi_init(dev_priv, PORT_F); + } else if (IS_GEN9_LP(dev_priv)) { /* * FIXME: Broxton doesn't support port detection via the * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to @@ -13961,7 +14155,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) intel_ddi_init(dev_priv, PORT_B); intel_ddi_init(dev_priv, PORT_C); - intel_dsi_init(dev_priv); + vlv_dsi_init(dev_priv); } else if (HAS_DDI(dev_priv)) { int found; @@ -14067,7 +14261,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D); } - intel_dsi_init(dev_priv); + vlv_dsi_init(dev_priv); } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) { bool found = false; @@ -14124,14 +14318,15 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct drm_i915_gem_object *obj = intel_fb_obj(fb); drm_framebuffer_cleanup(fb); - i915_gem_object_lock(intel_fb->obj); - WARN_ON(!intel_fb->obj->framebuffer_references--); - i915_gem_object_unlock(intel_fb->obj); + i915_gem_object_lock(obj); + WARN_ON(!obj->framebuffer_references--); + i915_gem_object_unlock(obj); - i915_gem_object_put(intel_fb->obj); + i915_gem_object_put(obj); kfree(intel_fb); } @@ -14140,8 +14335,7 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, struct drm_file *file, unsigned int *handle) { - struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj = intel_fb->obj; + struct drm_i915_gem_object *obj = intel_fb_obj(fb); if (obj->userptr.mm) { DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n"); @@ -14349,11 +14543,6 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, } break; case DRM_FORMAT_NV12: - if (mode_cmd->modifier[0] == I915_FORMAT_MOD_Y_TILED_CCS || - mode_cmd->modifier[0] == I915_FORMAT_MOD_Yf_TILED_CCS) { - DRM_DEBUG_KMS("RC not to be enabled with NV12\n"); - goto err; - } if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) { DRM_DEBUG_KMS("unsupported pixel format: %s\n", @@ -14411,9 +14600,9 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, i, fb->pitches[i], stride_alignment); goto err; } - } - intel_fb->obj = obj; + fb->obj[i] = &obj->base; + } ret = intel_fill_fb_info(dev_priv, fb); if (ret) @@ -14469,6 +14658,10 @@ static enum drm_mode_status intel_mode_valid(struct drm_device *dev, const struct drm_display_mode *mode) { + struct drm_i915_private *dev_priv = to_i915(dev); + int hdisplay_max, htotal_max; + int vdisplay_max, vtotal_max; + /* * Can't reject DBLSCAN here because Xorg ddxen can add piles * of DBLSCAN modes to the output's mode list when they detect @@ -14498,6 +14691,36 @@ intel_mode_valid(struct drm_device *dev, DRM_MODE_FLAG_CLKDIV2)) return MODE_BAD; + if (INTEL_GEN(dev_priv) >= 9 || + IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { + hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */ + vdisplay_max = 4096; + htotal_max = 8192; + vtotal_max = 8192; + } else if (INTEL_GEN(dev_priv) >= 3) { + hdisplay_max = 4096; + vdisplay_max = 4096; + htotal_max = 8192; + vtotal_max = 8192; + } else { + hdisplay_max = 2048; + vdisplay_max = 2048; + htotal_max = 4096; + vtotal_max = 4096; + } + + if (mode->hdisplay > hdisplay_max || + mode->hsync_start > htotal_max || + mode->hsync_end > htotal_max || + mode->htotal > htotal_max) + return MODE_H_ILLEGAL; + + if (mode->vdisplay > vdisplay_max || + mode->vsync_start > vtotal_max || + mode->vsync_end > vtotal_max || + mode->vtotal > vtotal_max) + return MODE_V_ILLEGAL; + return MODE_OK; } @@ -14955,6 +15178,7 @@ int intel_modeset_init(struct drm_device *dev) } } + /* maximum framebuffer dimensions */ if (IS_GEN2(dev_priv)) { dev->mode_config.max_width = 2048; dev->mode_config.max_height = 2048; @@ -14970,11 +15194,11 @@ int intel_modeset_init(struct drm_device *dev) dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512; dev->mode_config.cursor_height = 1023; } else if (IS_GEN2(dev_priv)) { - dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH; - dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT; + dev->mode_config.cursor_width = 64; + dev->mode_config.cursor_height = 64; } else { - dev->mode_config.cursor_width = MAX_CURSOR_WIDTH; - dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT; + dev->mode_config.cursor_width = 256; + dev->mode_config.cursor_height = 256; } dev->mode_config.fb_base = ggtt->gmadr.start; @@ -15124,8 +15348,8 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE); WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE); WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE); - WARN_ON(I915_READ(CURCNTR(PIPE_A)) & CURSOR_MODE); - WARN_ON(I915_READ(CURCNTR(PIPE_B)) & CURSOR_MODE); + WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE); + WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE); I915_WRITE(PIPECONF(pipe), 0); POSTING_READ(PIPECONF(pipe)); @@ -15139,12 +15363,12 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) static bool intel_plane_mapping_ok(struct intel_crtc *crtc, struct intel_plane *plane) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; - u32 val = I915_READ(DSPCNTR(i9xx_plane)); + enum pipe pipe; + + if (!plane->get_hw_state(plane, &pipe)) + return true; - return (val & DISPLAY_PLANE_ENABLE) == 0 || - (val & DISPPLANE_SEL_PIPE_MASK) == DISPPLANE_SEL_PIPE(crtc->pipe); + return pipe == crtc->pipe; } static void @@ -15303,6 +15527,9 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) connector->base.dpms = DRM_MODE_DPMS_OFF; connector->base.encoder = NULL; } + + /* notify opregion of the sanitized encoder state */ + intel_opregion_notify_encoder(encoder, connector && has_active_crtc); } void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv) @@ -15343,7 +15570,10 @@ static void readout_plane_state(struct intel_crtc *crtc) for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); - bool visible = plane->get_hw_state(plane); + enum pipe pipe; + bool visible; + + visible = plane->get_hw_state(plane, &pipe); intel_set_plane_visible(crtc_state, plane_state, visible); } @@ -15442,9 +15672,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) * rely on the connector_mask being accurate. */ encoder->base.crtc->state->connector_mask |= - 1 << drm_connector_index(&connector->base); + drm_connector_mask(&connector->base); encoder->base.crtc->state->encoder_mask |= - 1 << drm_encoder_index(&encoder->base); + drm_encoder_mask(&encoder->base); } } else { @@ -15510,11 +15740,20 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv) for_each_intel_encoder(&dev_priv->drm, encoder) { u64 get_domains; enum intel_display_power_domain domain; + struct intel_crtc_state *crtc_state; if (!encoder->get_power_domains) continue; - get_domains = encoder->get_power_domains(encoder); + /* + * MST-primary and inactive encoders don't have a crtc state + * and neither of these require any power domain references. + */ + if (!encoder->base.crtc) + continue; + + crtc_state = to_intel_crtc_state(encoder->base.crtc->state); + get_domains = encoder->get_power_domains(encoder, crtc_state); for_each_power_domain(domain, get_domains) intel_display_power_get(dev_priv, domain); } @@ -15690,6 +15929,8 @@ void intel_modeset_cleanup(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); + flush_workqueue(dev_priv->modeset_wq); + flush_work(&dev_priv->atomic_helper.free_work); WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list)); @@ -15733,8 +15974,7 @@ void intel_connector_attach_encoder(struct intel_connector *connector, struct intel_encoder *encoder) { connector->encoder = encoder; - drm_mode_connector_attach_encoder(&connector->base, - &encoder->base); + drm_connector_attach_encoder(&connector->base, &encoder->base); } /* diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h index 2ef31617614a..9292001cdd14 100644 --- a/drivers/gpu/drm/i915/intel_display.h +++ b/drivers/gpu/drm/i915/intel_display.h @@ -126,6 +126,17 @@ enum port { #define port_name(p) ((p) + 'A') +enum tc_port { + PORT_TC_NONE = -1, + + PORT_TC1 = 0, + PORT_TC2, + PORT_TC3, + PORT_TC4, + + I915_MAX_TC_PORTS +}; + enum dpio_channel { DPIO_CH0, DPIO_CH1 @@ -144,7 +155,7 @@ enum aux_ch { AUX_CH_B, AUX_CH_C, AUX_CH_D, - _AUX_CH_E, /* does not exist */ + AUX_CH_E, /* ICL+ */ AUX_CH_F, }; @@ -185,8 +196,13 @@ enum intel_display_power_domain { POWER_DOMAIN_AUX_B, POWER_DOMAIN_AUX_C, POWER_DOMAIN_AUX_D, + POWER_DOMAIN_AUX_E, POWER_DOMAIN_AUX_F, POWER_DOMAIN_AUX_IO_A, + POWER_DOMAIN_AUX_TBT1, + POWER_DOMAIN_AUX_TBT2, + POWER_DOMAIN_AUX_TBT3, + POWER_DOMAIN_AUX_TBT4, POWER_DOMAIN_GMBUS, POWER_DOMAIN_MODESET, POWER_DOMAIN_GT_IRQ, @@ -249,7 +265,7 @@ struct intel_link_m_n { &(dev)->mode_config.plane_list, \ base.head) \ for_each_if((plane_mask) & \ - BIT(drm_plane_index(&intel_plane->base))) + drm_plane_mask(&intel_plane->base))) #define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \ list_for_each_entry(intel_plane, \ @@ -266,13 +282,17 @@ struct intel_link_m_n { list_for_each_entry(intel_crtc, \ &(dev)->mode_config.crtc_list, \ base.head) \ - for_each_if((crtc_mask) & BIT(drm_crtc_index(&intel_crtc->base))) + for_each_if((crtc_mask) & drm_crtc_mask(&intel_crtc->base)) #define for_each_intel_encoder(dev, intel_encoder) \ list_for_each_entry(intel_encoder, \ &(dev)->mode_config.encoder_list, \ base.head) +#define for_each_intel_dp(dev, intel_encoder) \ + for_each_intel_encoder(dev, intel_encoder) \ + for_each_if(intel_encoder_is_dp(intel_encoder)) + #define for_each_intel_connector_iter(intel_connector, iter) \ while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter)))) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 16faea30114a..cd0f649b57a5 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -56,7 +56,7 @@ struct dp_link_dpll { struct dpll dpll; }; -static const struct dp_link_dpll gen4_dpll[] = { +static const struct dp_link_dpll g4x_dpll[] = { { 162000, { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } }, { 270000, @@ -256,6 +256,17 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp) return 810000; } +static int icl_max_source_rate(struct intel_dp *intel_dp) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + enum port port = dig_port->base.port; + + if (port == PORT_B) + return 540000; + + return 810000; +} + static void intel_dp_set_source_rates(struct intel_dp *intel_dp) { @@ -285,10 +296,13 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp) /* This should only be done once */ WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates); - if (IS_CANNONLAKE(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 10) { source_rates = cnl_rates; size = ARRAY_SIZE(cnl_rates); - max_rate = cnl_max_source_rate(intel_dp); + if (INTEL_GEN(dev_priv) == 10) + max_rate = cnl_max_source_rate(intel_dp); + else + max_rate = icl_max_source_rate(intel_dp); } else if (IS_GEN9_LP(dev_priv)) { source_rates = bxt_rates; size = ARRAY_SIZE(bxt_rates); @@ -516,7 +530,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) uint32_t DP; if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN, - "skipping pipe %c power seqeuncer kick due to port %c being active\n", + "skipping pipe %c power sequencer kick due to port %c being active\n", pipe_name(pipe), port_name(intel_dig_port->base.port))) return; @@ -532,9 +546,9 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) DP |= DP_LINK_TRAIN_PAT_1; if (IS_CHERRYVIEW(dev_priv)) - DP |= DP_PIPE_SELECT_CHV(pipe); - else if (pipe == PIPE_B) - DP |= DP_PIPEB_SELECT; + DP |= DP_PIPE_SEL_CHV(pipe); + else + DP |= DP_PIPE_SEL(pipe); pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE; @@ -557,7 +571,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) /* * Similar magic as in intel_dp_enable_port(). * We _must_ do this port enable + disable trick - * to make this power seqeuencer lock onto the port. + * to make this power sequencer lock onto the port. * Otherwise even VDD force bit won't work. */ I915_WRITE(intel_dp->output_reg, DP); @@ -586,14 +600,8 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv) * We don't have power sequencer currently. * Pick one that's not used by other ports. */ - for_each_intel_encoder(&dev_priv->drm, encoder) { - struct intel_dp *intel_dp; - - if (encoder->type != INTEL_OUTPUT_DP && - encoder->type != INTEL_OUTPUT_EDP) - continue; - - intel_dp = enc_to_intel_dp(&encoder->base); + for_each_intel_dp(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); if (encoder->type == INTEL_OUTPUT_EDP) { WARN_ON(intel_dp->active_pipe != INVALID_PIPE && @@ -785,19 +793,8 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv) * should use them always. */ - for_each_intel_encoder(&dev_priv->drm, encoder) { - struct intel_dp *intel_dp; - - if (encoder->type != INTEL_OUTPUT_DP && - encoder->type != INTEL_OUTPUT_EDP && - encoder->type != INTEL_OUTPUT_DDI) - continue; - - intel_dp = enc_to_intel_dp(&encoder->base); - - /* Skip pure DVI/HDMI DDI encoders */ - if (!i915_mmio_reg_valid(intel_dp->output_reg)) - continue; + for_each_intel_dp(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); WARN_ON(intel_dp->active_pipe != INVALID_PIPE); @@ -939,7 +936,7 @@ intel_dp_check_edp(struct intel_dp *intel_dp) } static uint32_t -intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) +intel_dp_aux_wait_done(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); @@ -947,14 +944,10 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) bool done; #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) - if (has_aux_irq) - done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, - msecs_to_jiffies_timeout(10)); - else - done = wait_for(C, 10) == 0; + done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, + msecs_to_jiffies_timeout(10)); if (!done) - DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n", - has_aux_irq); + DRM_ERROR("dp aux hw did not signal timeout!\n"); #undef C return status; @@ -1019,7 +1012,6 @@ static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) } static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp, - bool has_aux_irq, int send_bytes, uint32_t aux_clock_divider) { @@ -1040,7 +1032,7 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp, return DP_AUX_CH_CTL_SEND_BUSY | DP_AUX_CH_CTL_DONE | - (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | + DP_AUX_CH_CTL_INTERRUPT | DP_AUX_CH_CTL_TIME_OUT_ERROR | timeout | DP_AUX_CH_CTL_RECEIVE_ERROR | @@ -1050,13 +1042,12 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp, } static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp, - bool has_aux_irq, int send_bytes, uint32_t unused) { return DP_AUX_CH_CTL_SEND_BUSY | DP_AUX_CH_CTL_DONE | - (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | + DP_AUX_CH_CTL_INTERRUPT | DP_AUX_CH_CTL_TIME_OUT_ERROR | DP_AUX_CH_CTL_TIME_OUT_MAX | DP_AUX_CH_CTL_RECEIVE_ERROR | @@ -1079,7 +1070,6 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, int i, ret, recv_bytes; uint32_t status; int try, clock = 0; - bool has_aux_irq = HAS_AUX_IRQ(dev_priv); bool vdd; ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); @@ -1134,7 +1124,6 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, - has_aux_irq, send_bytes, aux_clock_divider); @@ -1151,7 +1140,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, /* Send the command and wait for it to complete */ I915_WRITE(ch_ctl, send_ctl); - status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); + status = intel_dp_aux_wait_done(intel_dp); /* Clear done status and any errors */ I915_WRITE(ch_ctl, @@ -1350,6 +1339,9 @@ static enum aux_ch intel_aux_ch(struct intel_dp *intel_dp) case DP_AUX_D: aux_ch = AUX_CH_D; break; + case DP_AUX_E: + aux_ch = AUX_CH_E; + break; case DP_AUX_F: aux_ch = AUX_CH_F; break; @@ -1377,6 +1369,8 @@ intel_aux_power_domain(struct intel_dp *intel_dp) return POWER_DOMAIN_AUX_C; case AUX_CH_D: return POWER_DOMAIN_AUX_D; + case AUX_CH_E: + return POWER_DOMAIN_AUX_E; case AUX_CH_F: return POWER_DOMAIN_AUX_F; default: @@ -1463,6 +1457,7 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) case AUX_CH_B: case AUX_CH_C: case AUX_CH_D: + case AUX_CH_E: case AUX_CH_F: return DP_AUX_CH_CTL(aux_ch); default: @@ -1481,6 +1476,7 @@ static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) case AUX_CH_B: case AUX_CH_C: case AUX_CH_D: + case AUX_CH_E: case AUX_CH_F: return DP_AUX_CH_DATA(aux_ch, index); default: @@ -1544,6 +1540,13 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp) return max_rate >= 540000; } +bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp) +{ + int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; + + return max_rate >= 810000; +} + static void intel_dp_set_clock(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) @@ -1553,8 +1556,8 @@ intel_dp_set_clock(struct intel_encoder *encoder, int i, count = 0; if (IS_G4X(dev_priv)) { - divisor = gen4_dpll; - count = ARRAY_SIZE(gen4_dpll); + divisor = g4x_dpll; + count = ARRAY_SIZE(g4x_dpll); } else if (HAS_PCH_SPLIT(dev_priv)) { divisor = pch_dpll; count = ARRAY_SIZE(pch_dpll); @@ -1970,7 +1973,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder, /* Split out the IBX/CPU vs CPT settings */ - if (IS_GEN7(dev_priv) && port == PORT_A) { + if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) intel_dp->DP |= DP_SYNC_HS_HIGH; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) @@ -1980,7 +1983,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder, if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) intel_dp->DP |= DP_ENHANCED_FRAMING; - intel_dp->DP |= crtc->pipe << 29; + intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe); } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { u32 trans_dp; @@ -2006,9 +2009,9 @@ static void intel_dp_prepare(struct intel_encoder *encoder, intel_dp->DP |= DP_ENHANCED_FRAMING; if (IS_CHERRYVIEW(dev_priv)) - intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe); - else if (crtc->pipe == PIPE_B) - intel_dp->DP |= DP_PIPEB_SELECT; + intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe); + else + intel_dp->DP |= DP_PIPE_SEL(crtc->pipe); } } @@ -2630,52 +2633,66 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) mode == DRM_MODE_DPMS_ON ? "enable" : "disable"); } +static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv, + enum port port, enum pipe *pipe) +{ + enum pipe p; + + for_each_pipe(dev_priv, p) { + u32 val = I915_READ(TRANS_DP_CTL(p)); + + if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) { + *pipe = p; + return true; + } + } + + DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port)); + + /* must initialize pipe to something for the asserts */ + *pipe = PIPE_A; + + return false; +} + +bool intel_dp_port_enabled(struct drm_i915_private *dev_priv, + i915_reg_t dp_reg, enum port port, + enum pipe *pipe) +{ + bool ret; + u32 val; + + val = I915_READ(dp_reg); + + ret = val & DP_PORT_EN; + + /* asserts want to know the pipe even if the port is disabled */ + if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) + *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB; + else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) + ret &= cpt_dp_port_selected(dev_priv, port, pipe); + else if (IS_CHERRYVIEW(dev_priv)) + *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV; + else + *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT; + + return ret; +} + static bool intel_dp_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - enum port port = encoder->port; - u32 tmp; bool ret; if (!intel_display_power_get_if_enabled(dev_priv, encoder->power_domain)) return false; - ret = false; + ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg, + encoder->port, pipe); - tmp = I915_READ(intel_dp->output_reg); - - if (!(tmp & DP_PORT_EN)) - goto out; - - if (IS_GEN7(dev_priv) && port == PORT_A) { - *pipe = PORT_TO_PIPE_CPT(tmp); - } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { - enum pipe p; - - for_each_pipe(dev_priv, p) { - u32 trans_dp = I915_READ(TRANS_DP_CTL(p)); - if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) { - *pipe = p; - ret = true; - - goto out; - } - } - - DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", - i915_mmio_reg_offset(intel_dp->output_reg)); - } else if (IS_CHERRYVIEW(dev_priv)) { - *pipe = DP_PORT_TO_PIPE_CHV(tmp); - } else { - *pipe = PORT_TO_PIPE(tmp); - } - - ret = true; - -out: intel_display_power_put(dev_priv, encoder->power_domain); return ret; @@ -2796,10 +2813,6 @@ static void vlv_disable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - - intel_psr_disable(intel_dp, old_crtc_state); - intel_disable_dp(encoder, old_crtc_state, old_conn_state); } @@ -2854,10 +2867,11 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp, struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); enum port port = intel_dig_port->base.port; + uint8_t train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd); - if (dp_train_pat & DP_TRAINING_PATTERN_MASK) + if (dp_train_pat & train_pat_mask) DRM_DEBUG_KMS("Using DP training pattern TPS%d\n", - dp_train_pat & DP_TRAINING_PATTERN_MASK); + dp_train_pat & train_pat_mask); if (HAS_DDI(dev_priv)) { uint32_t temp = I915_READ(DP_TP_CTL(port)); @@ -2868,7 +2882,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp, temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; - switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { + switch (dp_train_pat & train_pat_mask) { case DP_TRAINING_PATTERN_DISABLE: temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; @@ -2882,10 +2896,13 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp, case DP_TRAINING_PATTERN_3: temp |= DP_TP_CTL_LINK_TRAIN_PAT3; break; + case DP_TRAINING_PATTERN_4: + temp |= DP_TP_CTL_LINK_TRAIN_PAT4; + break; } I915_WRITE(DP_TP_CTL(port), temp); - } else if ((IS_GEN7(dev_priv) && port == PORT_A) || + } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { *DP &= ~DP_LINK_TRAIN_MASK_CPT; @@ -3008,10 +3025,7 @@ static void vlv_enable_dp(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - intel_edp_backlight_on(pipe_config, conn_state); - intel_psr_enable(intel_dp, pipe_config); } static void g4x_pre_enable_dp(struct intel_encoder *encoder, @@ -3043,11 +3057,11 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) edp_panel_vdd_off_sync(intel_dp); /* - * VLV seems to get confused when multiple power seqeuencers + * VLV seems to get confused when multiple power sequencers * have the same port selected (even if only one has power/vdd * enabled). The failure manifests as vlv_wait_port_ready() failing * CHV on the other hand doesn't seem to mind having the same port - * selected in multiple power seqeuencers, but let's clear the + * selected in multiple power sequencers, but let's clear the * port select always when logically disconnecting a power sequencer * from a port. */ @@ -3066,16 +3080,9 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, lockdep_assert_held(&dev_priv->pps_mutex); - for_each_intel_encoder(&dev_priv->drm, encoder) { - struct intel_dp *intel_dp; - enum port port; - - if (encoder->type != INTEL_OUTPUT_DP && - encoder->type != INTEL_OUTPUT_EDP) - continue; - - intel_dp = enc_to_intel_dp(&encoder->base); - port = dp_to_dig_port(intel_dp)->base.port; + for_each_intel_dp(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + enum port port = encoder->port; WARN(intel_dp->active_pipe == pipe, "stealing pipe %c power sequencer from active (e)DP port %c\n", @@ -3197,14 +3204,14 @@ uint8_t intel_dp_voltage_max(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); - enum port port = dp_to_dig_port(intel_dp)->base.port; + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + enum port port = encoder->port; - if (INTEL_GEN(dev_priv) >= 9) { - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + if (HAS_DDI(dev_priv)) return intel_ddi_dp_voltage_max(encoder); - } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; - else if (IS_GEN7(dev_priv) && port == PORT_A) + else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; @@ -3216,33 +3223,11 @@ uint8_t intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) { struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); - enum port port = dp_to_dig_port(intel_dp)->base.port; + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + enum port port = encoder->port; - if (INTEL_GEN(dev_priv) >= 9) { - switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { - case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: - return DP_TRAIN_PRE_EMPH_LEVEL_3; - case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: - return DP_TRAIN_PRE_EMPH_LEVEL_2; - case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: - return DP_TRAIN_PRE_EMPH_LEVEL_1; - case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: - return DP_TRAIN_PRE_EMPH_LEVEL_0; - default: - return DP_TRAIN_PRE_EMPH_LEVEL_0; - } - } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { - case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: - return DP_TRAIN_PRE_EMPH_LEVEL_3; - case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: - return DP_TRAIN_PRE_EMPH_LEVEL_2; - case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: - return DP_TRAIN_PRE_EMPH_LEVEL_1; - case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: - default: - return DP_TRAIN_PRE_EMPH_LEVEL_0; - } + if (HAS_DDI(dev_priv)) { + return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing); } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: @@ -3255,7 +3240,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) default: return DP_TRAIN_PRE_EMPH_LEVEL_0; } - } else if (IS_GEN7(dev_priv) && port == PORT_A) { + } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: return DP_TRAIN_PRE_EMPH_LEVEL_2; @@ -3450,7 +3435,7 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp) } static uint32_t -gen4_signal_levels(uint8_t train_set) +g4x_signal_levels(uint8_t train_set) { uint32_t signal_levels = 0; @@ -3487,9 +3472,9 @@ gen4_signal_levels(uint8_t train_set) return signal_levels; } -/* Gen6's DP voltage swing and pre-emphasis control */ +/* SNB CPU eDP voltage swing and pre-emphasis control */ static uint32_t -gen6_edp_signal_levels(uint8_t train_set) +snb_cpu_edp_signal_levels(uint8_t train_set) { int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | DP_TRAIN_PRE_EMPHASIS_MASK); @@ -3515,9 +3500,9 @@ gen6_edp_signal_levels(uint8_t train_set) } } -/* Gen7's DP voltage swing and pre-emphasis control */ +/* IVB CPU eDP voltage swing and pre-emphasis control */ static uint32_t -gen7_edp_signal_levels(uint8_t train_set) +ivb_cpu_edp_signal_levels(uint8_t train_set) { int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | DP_TRAIN_PRE_EMPHASIS_MASK); @@ -3564,14 +3549,14 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp) signal_levels = chv_signal_levels(intel_dp); } else if (IS_VALLEYVIEW(dev_priv)) { signal_levels = vlv_signal_levels(intel_dp); - } else if (IS_GEN7(dev_priv) && port == PORT_A) { - signal_levels = gen7_edp_signal_levels(train_set); + } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) { + signal_levels = ivb_cpu_edp_signal_levels(train_set); mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; } else if (IS_GEN6(dev_priv) && port == PORT_A) { - signal_levels = gen6_edp_signal_levels(train_set); + signal_levels = snb_cpu_edp_signal_levels(train_set); mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; } else { - signal_levels = gen4_signal_levels(train_set); + signal_levels = g4x_signal_levels(train_set); mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK; } @@ -3654,7 +3639,7 @@ intel_dp_link_down(struct intel_encoder *encoder, DRM_DEBUG_KMS("\n"); - if ((IS_GEN7(dev_priv) && port == PORT_A) || + if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { DP &= ~DP_LINK_TRAIN_MASK_CPT; DP |= DP_LINK_TRAIN_PAT_IDLE_CPT; @@ -3683,8 +3668,9 @@ intel_dp_link_down(struct intel_encoder *encoder, intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); /* always enable with pattern 1 (as per spec) */ - DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK); - DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1; + DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK); + DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) | + DP_LINK_TRAIN_PAT_1; I915_WRITE(intel_dp->output_reg, DP); POSTING_READ(intel_dp->output_reg); @@ -3739,8 +3725,6 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] & DP_NO_AUX_HANDSHAKE_LINK_TRAINING; - intel_psr_init_dpcd(intel_dp); - /* * Read the eDP display control registers. * @@ -3756,6 +3740,12 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd), intel_dp->edp_dpcd); + /* + * This has to be called after intel_dp->edp_dpcd is filled, PSR checks + * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] + */ + intel_psr_init_dpcd(intel_dp); + /* Read the eDP 1.4+ supported link rates. */ if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; @@ -3884,129 +3874,6 @@ intel_dp_configure_mst(struct intel_dp *intel_dp) intel_dp->is_mst); } -static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp, - struct intel_crtc_state *crtc_state, bool disable_wa) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); - u8 buf; - int ret = 0; - int count = 0; - int attempts = 10; - - if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) { - DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n"); - ret = -EIO; - goto out; - } - - if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, - buf & ~DP_TEST_SINK_START) < 0) { - DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n"); - ret = -EIO; - goto out; - } - - do { - intel_wait_for_vblank(dev_priv, intel_crtc->pipe); - - if (drm_dp_dpcd_readb(&intel_dp->aux, - DP_TEST_SINK_MISC, &buf) < 0) { - ret = -EIO; - goto out; - } - count = buf & DP_TEST_COUNT_MASK; - } while (--attempts && count); - - if (attempts == 0) { - DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n"); - ret = -ETIMEDOUT; - } - - out: - if (disable_wa) - hsw_enable_ips(crtc_state); - return ret; -} - -static int intel_dp_sink_crc_start(struct intel_dp *intel_dp, - struct intel_crtc_state *crtc_state) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); - u8 buf; - int ret; - - if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) - return -EIO; - - if (!(buf & DP_TEST_CRC_SUPPORTED)) - return -ENOTTY; - - if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) - return -EIO; - - if (buf & DP_TEST_SINK_START) { - ret = intel_dp_sink_crc_stop(intel_dp, crtc_state, false); - if (ret) - return ret; - } - - hsw_disable_ips(crtc_state); - - if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, - buf | DP_TEST_SINK_START) < 0) { - hsw_enable_ips(crtc_state); - return -EIO; - } - - intel_wait_for_vblank(dev_priv, intel_crtc->pipe); - return 0; -} - -int intel_dp_sink_crc(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state, u8 *crc) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); - u8 buf; - int count, ret; - int attempts = 6; - - ret = intel_dp_sink_crc_start(intel_dp, crtc_state); - if (ret) - return ret; - - do { - intel_wait_for_vblank(dev_priv, intel_crtc->pipe); - - if (drm_dp_dpcd_readb(&intel_dp->aux, - DP_TEST_SINK_MISC, &buf) < 0) { - ret = -EIO; - goto stop; - } - count = buf & DP_TEST_COUNT_MASK; - - } while (--attempts && count == 0); - - if (attempts == 0) { - DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n"); - ret = -ETIMEDOUT; - goto stop; - } - - if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) { - ret = -EIO; - goto stop; - } - -stop: - intel_dp_sink_crc_stop(intel_dp, crtc_state, true); - return ret; -} - static bool intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) { @@ -4466,10 +4333,15 @@ intel_dp_short_pulse(struct intel_dp *intel_dp) DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); } + /* Handle CEC interrupts, if any */ + drm_dp_cec_irq(&intel_dp->aux); + /* defer to the hotplug work for link retraining if needed */ if (intel_dp_needs_link_retrain(intel_dp)) return false; + intel_psr_short_pulse(intel_dp); + if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { DRM_DEBUG_KMS("Link Training Compliance Test requested\n"); /* Send a Hotplug Uevent to userspace to start modeset */ @@ -4537,14 +4409,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp) static enum drm_connector_status edp_detect(struct intel_dp *intel_dp) { - struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); - enum drm_connector_status status; - - status = intel_panel_detect(dev_priv); - if (status == connector_status_unknown) - status = connector_status_connected; - - return status; + return connector_status_connected; } static bool ibx_digital_port_connected(struct intel_encoder *encoder) @@ -4780,6 +4645,7 @@ intel_dp_set_edid(struct intel_dp *intel_dp) intel_connector->detect_edid = edid; intel_dp->has_audio = drm_detect_monitor_audio(edid); + drm_dp_cec_set_edid(&intel_dp->aux, edid); } static void @@ -4787,6 +4653,7 @@ intel_dp_unset_edid(struct intel_dp *intel_dp) { struct intel_connector *intel_connector = intel_dp->attached_connector; + drm_dp_cec_unset_edid(&intel_dp->aux); kfree(intel_connector->detect_edid); intel_connector->detect_edid = NULL; @@ -4805,7 +4672,7 @@ intel_dp_long_pulse(struct intel_connector *connector) intel_display_power_get(dev_priv, intel_dp->aux_power_domain); - /* Can't disconnect eDP, but you can close the lid... */ + /* Can't disconnect eDP */ if (intel_dp_is_edp(intel_dp)) status = edp_detect(intel_dp); else if (intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) @@ -4975,6 +4842,7 @@ static int intel_dp_connector_register(struct drm_connector *connector) { struct intel_dp *intel_dp = intel_attached_dp(connector); + struct drm_device *dev = connector->dev; int ret; ret = intel_connector_register(connector); @@ -4987,13 +4855,20 @@ intel_dp_connector_register(struct drm_connector *connector) intel_dp->aux.name, connector->kdev->kobj.name); intel_dp->aux.dev = connector->kdev; - return drm_dp_aux_register(&intel_dp->aux); + ret = drm_dp_aux_register(&intel_dp->aux); + if (!ret) + drm_dp_cec_register_connector(&intel_dp->aux, + connector->name, dev->dev); + return ret; } static void intel_dp_connector_unregister(struct drm_connector *connector) { - drm_dp_aux_unregister(&intel_attached_dp(connector)->aux); + struct intel_dp *intel_dp = intel_attached_dp(connector); + + drm_dp_cec_unregister_connector(&intel_dp->aux); + drm_dp_aux_unregister(&intel_dp->aux); intel_connector_unregister(connector); } @@ -5319,14 +5194,14 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) static enum pipe vlv_active_pipe(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + enum pipe pipe; - if ((intel_dp->DP & DP_PORT_EN) == 0) - return INVALID_PIPE; + if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg, + encoder->port, &pipe)) + return pipe; - if (IS_CHERRYVIEW(dev_priv)) - return DP_PORT_TO_PIPE_CHV(intel_dp->DP); - else - return PORT_TO_PIPE(intel_dp->DP); + return INVALID_PIPE; } void intel_dp_encoder_reset(struct drm_encoder *encoder) @@ -5675,7 +5550,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, /* * On some VLV machines the BIOS can leave the VDD - * enabled even on power seqeuencers which aren't + * enabled even on power sequencers which aren't * hooked up to any port. This would mess up the * power domain tracking the first time we pick * one of these power sequencers for use since @@ -5683,7 +5558,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, * already on and therefore wouldn't grab the power * domain reference. Disable VDD first to avoid this. * This also avoids spuriously turning the VDD on as - * soon as the new power seqeuencer gets initialized. + * soon as the new power sequencer gets initialized. */ if (force_disable_vdd) { u32 pp = ironlake_get_pp_control(intel_dp); @@ -5721,10 +5596,20 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { port_sel = PANEL_PORT_SELECT_VLV(port); } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { - if (port == PORT_A) + switch (port) { + case PORT_A: port_sel = PANEL_PORT_SELECT_DPA; - else + break; + case PORT_C: + port_sel = PANEL_PORT_SELECT_DPC; + break; + case PORT_D: port_sel = PANEL_PORT_SELECT_DPD; + break; + default: + MISSING_CASE(port); + break; + } } pp_on |= port_sel; @@ -6179,7 +6064,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, edid = drm_get_edid(connector, &intel_dp->aux.ddc); if (edid) { if (drm_add_edid_modes(connector, edid)) { - drm_mode_connector_update_edid_property(connector, + drm_connector_update_edid_property(connector, edid); } else { kfree(edid); @@ -6268,8 +6153,8 @@ static void intel_dp_modeset_retry_work_fn(struct work_struct *work) /* Set connector link status to BAD and send a Uevent to notify * userspace to do a modeset. */ - drm_mode_connector_set_link_status_property(connector, - DRM_MODE_LINK_STATUS_BAD); + drm_connector_set_link_status_property(connector, + DRM_MODE_LINK_STATUS_BAD); mutex_unlock(&connector->dev->mode_config.mutex); /* Send Hotplug uevent so userspace can reprobe */ drm_kms_helper_hotplug_event(connector->dev); @@ -6382,7 +6267,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, * 0xd. Failure to do so will result in spurious interrupts being * generated on the port when a cable is not attached. */ - if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) { + if (IS_G45(dev_priv)) { u32 temp = I915_READ(PEG_BAND_GAP_DATA); I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); } @@ -6462,7 +6347,6 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, intel_encoder->port = port; intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; - dev_priv->hotplug.irq_port[port] = intel_dig_port; if (port != PORT_A) intel_infoframe_init(intel_dig_port); @@ -6481,37 +6365,44 @@ err_connector_alloc: return false; } -void intel_dp_mst_suspend(struct drm_device *dev) +void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - int i; + struct intel_encoder *encoder; - /* disable MST */ - for (i = 0; i < I915_MAX_PORTS; i++) { - struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i]; + for_each_intel_encoder(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp; - if (!intel_dig_port || !intel_dig_port->dp.can_mst) + if (encoder->type != INTEL_OUTPUT_DDI) continue; - if (intel_dig_port->dp.is_mst) - drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr); + intel_dp = enc_to_intel_dp(&encoder->base); + + if (!intel_dp->can_mst) + continue; + + if (intel_dp->is_mst) + drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr); } } -void intel_dp_mst_resume(struct drm_device *dev) +void intel_dp_mst_resume(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - int i; + struct intel_encoder *encoder; - for (i = 0; i < I915_MAX_PORTS; i++) { - struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i]; + for_each_intel_encoder(&dev_priv->drm, encoder) { + struct intel_dp *intel_dp; int ret; - if (!intel_dig_port || !intel_dig_port->dp.can_mst) + if (encoder->type != INTEL_OUTPUT_DDI) + continue; + + intel_dp = enc_to_intel_dp(&encoder->base); + + if (!intel_dp->can_mst) continue; - ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr); + ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr); if (ret) - intel_dp_check_mst_status(&intel_dig_port->dp); + intel_dp_check_mst_status(intel_dp); } } diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c index 2bb2ceb9d463..357136f17f85 100644 --- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c @@ -26,7 +26,7 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable) { - uint8_t reg_val = 0; + u8 reg_val = 0; /* Early return when display use other mechanism to enable backlight. */ if (!(intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP)) @@ -54,11 +54,11 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable) * Read the current backlight value from DPCD register(s) based * on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported */ -static uint32_t intel_dp_aux_get_backlight(struct intel_connector *connector) +static u32 intel_dp_aux_get_backlight(struct intel_connector *connector) { struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); - uint8_t read_val[2] = { 0x0 }; - uint16_t level = 0; + u8 read_val[2] = { 0x0 }; + u16 level = 0; if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, &read_val, sizeof(read_val)) < 0) { @@ -82,7 +82,7 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); - uint8_t vals[2] = { 0x0 }; + u8 vals[2] = { 0x0 }; vals[0] = level; @@ -178,7 +178,7 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); - uint8_t dpcd_buf, new_dpcd_buf, edp_backlight_mode; + u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode; if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) { diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c index 3fcaa98b9055..4da6e33c7fa1 100644 --- a/drivers/gpu/drm/i915/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/intel_dp_link_training.c @@ -219,32 +219,47 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) } /* - * Pick training pattern for channel equalization. Training Pattern 3 for HBR2 + * Pick training pattern for channel equalization. Training pattern 4 for HBR3 + * or for 1.4 devices that support it, training Pattern 3 for HBR2 * or 1.2 devices that support it, Training Pattern 2 otherwise. */ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp) { - u32 training_pattern = DP_TRAINING_PATTERN_2; - bool source_tps3, sink_tps3; + bool source_tps3, sink_tps3, source_tps4, sink_tps4; /* + * Intel platforms that support HBR3 also support TPS4. It is mandatory + * for all downstream devices that support HBR3. There are no known eDP + * panels that support TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 + * specification. + */ + source_tps4 = intel_dp_source_supports_hbr3(intel_dp); + sink_tps4 = drm_dp_tps4_supported(intel_dp->dpcd); + if (source_tps4 && sink_tps4) { + return DP_TRAINING_PATTERN_4; + } else if (intel_dp->link_rate == 810000) { + if (!source_tps4) + DRM_DEBUG_KMS("8.1 Gbps link rate without source HBR3/TPS4 support\n"); + if (!sink_tps4) + DRM_DEBUG_KMS("8.1 Gbps link rate without sink TPS4 support\n"); + } + /* * Intel platforms that support HBR2 also support TPS3. TPS3 support is * also mandatory for downstream devices that support HBR2. However, not * all sinks follow the spec. */ source_tps3 = intel_dp_source_supports_hbr2(intel_dp); sink_tps3 = drm_dp_tps3_supported(intel_dp->dpcd); - if (source_tps3 && sink_tps3) { - training_pattern = DP_TRAINING_PATTERN_3; - } else if (intel_dp->link_rate == 540000) { + return DP_TRAINING_PATTERN_3; + } else if (intel_dp->link_rate >= 540000) { if (!source_tps3) - DRM_DEBUG_KMS("5.4 Gbps link rate without source HBR2/TPS3 support\n"); + DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without source HBR2/TPS3 support\n"); if (!sink_tps3) - DRM_DEBUG_KMS("5.4 Gbps link rate without sink TPS3 support\n"); + DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without sink TPS3 support\n"); } - return training_pattern; + return DP_TRAINING_PATTERN_2; } static bool @@ -256,11 +271,13 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) bool channel_eq = false; training_pattern = intel_dp_training_pattern(intel_dp); + /* Scrambling is disabled for TPS2/3 and enabled for TPS4 */ + if (training_pattern != DP_TRAINING_PATTERN_4) + training_pattern |= DP_LINK_SCRAMBLING_DISABLE; /* channel equalization */ if (!intel_dp_set_link_train(intel_dp, - training_pattern | - DP_LINK_SCRAMBLING_DISABLE)) { + training_pattern)) { DRM_ERROR("failed to start channel equalization\n"); return false; } diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 5890500a3a8b..7e3e01607643 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -403,20 +403,10 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c return &intel_dp->mst_encoders[crtc->pipe]->base.base; } -static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector) -{ - struct intel_connector *intel_connector = to_intel_connector(connector); - struct intel_dp *intel_dp = intel_connector->mst_port; - if (!intel_dp) - return NULL; - return &intel_dp->mst_encoders[0]->base.base; -} - static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = { .get_modes = intel_dp_mst_get_modes, .mode_valid = intel_dp_mst_mode_valid, .atomic_best_encoder = intel_mst_atomic_best_encoder, - .best_encoder = intel_mst_best_encoder, .atomic_check = intel_dp_mst_atomic_check, }; @@ -476,8 +466,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo struct drm_encoder *enc = &intel_dp->mst_encoders[pipe]->base.base; - ret = drm_mode_connector_attach_encoder(&intel_connector->base, - enc); + ret = drm_connector_attach_encoder(&intel_connector->base, enc); if (ret) goto err; } @@ -485,7 +474,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0); drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0); - ret = drm_mode_connector_set_path_property(connector, pathprop); + ret = drm_connector_set_path_property(connector, pathprop); if (ret) goto err; @@ -524,7 +513,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, intel_connector->mst_port = NULL; drm_modeset_unlock(&connector->dev->mode_config.connection_mutex); - drm_connector_unreference(connector); + drm_connector_put(connector); } static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 383fbc15113d..b51ad2917dbe 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -163,8 +163,8 @@ void intel_enable_shared_dpll(struct intel_crtc *crtc) struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_shared_dpll *pll = crtc->config->shared_dpll; - unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base); - unsigned old_mask; + unsigned int crtc_mask = drm_crtc_mask(&crtc->base); + unsigned int old_mask; if (WARN_ON(pll == NULL)) return; @@ -207,7 +207,7 @@ void intel_disable_shared_dpll(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll = crtc->config->shared_dpll; - unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base); + unsigned int crtc_mask = drm_crtc_mask(&crtc->base); /* PCH only available on ILK+ */ if (INTEL_GEN(dev_priv) < 5) @@ -2525,6 +2525,77 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state, return true; } +int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv, + uint32_t pll_id) +{ + uint32_t cfgcr0, cfgcr1; + uint32_t pdiv, kdiv, qdiv_mode, qdiv_ratio, dco_integer, dco_fraction; + const struct skl_wrpll_params *params; + int index, n_entries, link_clock; + + /* Read back values from DPLL CFGCR registers */ + cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id)); + cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id)); + + dco_integer = cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK; + dco_fraction = (cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >> + DPLL_CFGCR0_DCO_FRACTION_SHIFT; + pdiv = (cfgcr1 & DPLL_CFGCR1_PDIV_MASK) >> DPLL_CFGCR1_PDIV_SHIFT; + kdiv = (cfgcr1 & DPLL_CFGCR1_KDIV_MASK) >> DPLL_CFGCR1_KDIV_SHIFT; + qdiv_mode = (cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) >> + DPLL_CFGCR1_QDIV_MODE_SHIFT; + qdiv_ratio = (cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >> + DPLL_CFGCR1_QDIV_RATIO_SHIFT; + + params = dev_priv->cdclk.hw.ref == 24000 ? + icl_dp_combo_pll_24MHz_values : + icl_dp_combo_pll_19_2MHz_values; + n_entries = ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); + + for (index = 0; index < n_entries; index++) { + if (dco_integer == params[index].dco_integer && + dco_fraction == params[index].dco_fraction && + pdiv == params[index].pdiv && + kdiv == params[index].kdiv && + qdiv_mode == params[index].qdiv_mode && + qdiv_ratio == params[index].qdiv_ratio) + break; + } + + /* Map PLL Index to Link Clock */ + switch (index) { + default: + MISSING_CASE(index); + /* fall through */ + case 0: + link_clock = 540000; + break; + case 1: + link_clock = 270000; + break; + case 2: + link_clock = 162000; + break; + case 3: + link_clock = 324000; + break; + case 4: + link_clock = 216000; + break; + case 5: + link_clock = 432000; + break; + case 6: + link_clock = 648000; + break; + case 7: + link_clock = 810000; + break; + } + + return link_clock; +} + static enum port icl_mg_pll_id_to_port(enum intel_dpll_id id) { return id - DPLL_ID_ICL_MGPLL1 + PORT_C; @@ -2569,6 +2640,7 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, switch (div1) { default: MISSING_CASE(div1); + /* fall through */ case 2: hsdiv = 0; break; @@ -2742,25 +2814,31 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, MG_PLL_SSC_FLLEN | MG_PLL_SSC_STEPSIZE(ssc_stepsize); - pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART; - - if (refclk_khz != 38400) { - pll_state->mg_pll_tdc_coldst_bias |= - MG_PLL_TDC_COLDST_IREFINT_EN | - MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) | - MG_PLL_TDC_COLDST_COLDSTART | - MG_PLL_TDC_TDCOVCCORR_EN | - MG_PLL_TDC_TDCSEL(3); - - pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) | - MG_PLL_BIAS_INIT_DCOAMP(0x3F) | - MG_PLL_BIAS_BIAS_BONUS(10) | - MG_PLL_BIAS_BIASCAL_EN | - MG_PLL_BIAS_CTRIM(12) | - MG_PLL_BIAS_VREF_RDAC(4) | - MG_PLL_BIAS_IREFTRIM(iref_trim); + pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART | + MG_PLL_TDC_COLDST_IREFINT_EN | + MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) | + MG_PLL_TDC_TDCOVCCORR_EN | + MG_PLL_TDC_TDCSEL(3); + + pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) | + MG_PLL_BIAS_INIT_DCOAMP(0x3F) | + MG_PLL_BIAS_BIAS_BONUS(10) | + MG_PLL_BIAS_BIASCAL_EN | + MG_PLL_BIAS_CTRIM(12) | + MG_PLL_BIAS_VREF_RDAC(4) | + MG_PLL_BIAS_IREFTRIM(iref_trim); + + if (refclk_khz == 38400) { + pll_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART; + pll_state->mg_pll_bias_mask = 0; + } else { + pll_state->mg_pll_tdc_coldst_bias_mask = -1U; + pll_state->mg_pll_bias_mask = -1U; } + pll_state->mg_pll_tdc_coldst_bias &= pll_state->mg_pll_tdc_coldst_bias_mask; + pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask; + return true; } @@ -2787,10 +2865,17 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, case PORT_D: case PORT_E: case PORT_F: - min = icl_port_to_mg_pll_id(port); - max = min; - ret = icl_calc_mg_pll_state(crtc_state, encoder, clock, - &pll_state); + if (0 /* TODO: TBT PLLs */) { + min = DPLL_ID_ICL_TBTPLL; + max = min; + ret = icl_calc_dpll_state(crtc_state, encoder, clock, + &pll_state); + } else { + min = icl_port_to_mg_pll_id(port); + max = min; + ret = icl_calc_mg_pll_state(crtc_state, encoder, clock, + &pll_state); + } break; default: MISSING_CASE(port); @@ -2820,9 +2905,12 @@ static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id) switch (id) { default: MISSING_CASE(id); + /* fall through */ case DPLL_ID_ICL_DPLL0: case DPLL_ID_ICL_DPLL1: return CNL_DPLL_ENABLE(id); + case DPLL_ID_ICL_TBTPLL: + return TBT_PLL_ENABLE; case DPLL_ID_ICL_MGPLL1: case DPLL_ID_ICL_MGPLL2: case DPLL_ID_ICL_MGPLL3: @@ -2850,6 +2938,7 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, switch (id) { case DPLL_ID_ICL_DPLL0: case DPLL_ID_ICL_DPLL1: + case DPLL_ID_ICL_TBTPLL: hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id)); hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id)); break; @@ -2859,18 +2948,41 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, case DPLL_ID_ICL_MGPLL4: port = icl_mg_pll_id_to_port(id); hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(port)); + hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK; + hw_state->mg_clktop2_coreclkctl1 = I915_READ(MG_CLKTOP2_CORECLKCTL1(port)); + hw_state->mg_clktop2_coreclkctl1 &= + MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; + hw_state->mg_clktop2_hsclkctl = I915_READ(MG_CLKTOP2_HSCLKCTL(port)); + hw_state->mg_clktop2_hsclkctl &= + MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | + MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | + MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | + MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK; + hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(port)); hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(port)); hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(port)); hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(port)); hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(port)); + hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(port)); hw_state->mg_pll_tdc_coldst_bias = I915_READ(MG_PLL_TDC_COLDST_BIAS(port)); + + if (dev_priv->cdclk.hw.ref == 38400) { + hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART; + hw_state->mg_pll_bias_mask = 0; + } else { + hw_state->mg_pll_tdc_coldst_bias_mask = -1U; + hw_state->mg_pll_bias_mask = -1U; + } + + hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask; + hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask; break; default: MISSING_CASE(id); @@ -2898,19 +3010,48 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv, { struct intel_dpll_hw_state *hw_state = &pll->state.hw_state; enum port port = icl_mg_pll_id_to_port(pll->info->id); + u32 val; + + /* + * Some of the following registers have reserved fields, so program + * these with RMW based on a mask. The mask can be fixed or generated + * during the calc/readout phase if the mask depends on some other HW + * state like refclk, see icl_calc_mg_pll_state(). + */ + val = I915_READ(MG_REFCLKIN_CTL(port)); + val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK; + val |= hw_state->mg_refclkin_ctl; + I915_WRITE(MG_REFCLKIN_CTL(port), val); + + val = I915_READ(MG_CLKTOP2_CORECLKCTL1(port)); + val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; + val |= hw_state->mg_clktop2_coreclkctl1; + I915_WRITE(MG_CLKTOP2_CORECLKCTL1(port), val); + + val = I915_READ(MG_CLKTOP2_HSCLKCTL(port)); + val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | + MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | + MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | + MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK); + val |= hw_state->mg_clktop2_hsclkctl; + I915_WRITE(MG_CLKTOP2_HSCLKCTL(port), val); - I915_WRITE(MG_REFCLKIN_CTL(port), hw_state->mg_refclkin_ctl); - I915_WRITE(MG_CLKTOP2_CORECLKCTL1(port), - hw_state->mg_clktop2_coreclkctl1); - I915_WRITE(MG_CLKTOP2_HSCLKCTL(port), hw_state->mg_clktop2_hsclkctl); I915_WRITE(MG_PLL_DIV0(port), hw_state->mg_pll_div0); I915_WRITE(MG_PLL_DIV1(port), hw_state->mg_pll_div1); I915_WRITE(MG_PLL_LF(port), hw_state->mg_pll_lf); I915_WRITE(MG_PLL_FRAC_LOCK(port), hw_state->mg_pll_frac_lock); I915_WRITE(MG_PLL_SSC(port), hw_state->mg_pll_ssc); - I915_WRITE(MG_PLL_BIAS(port), hw_state->mg_pll_bias); - I915_WRITE(MG_PLL_TDC_COLDST_BIAS(port), - hw_state->mg_pll_tdc_coldst_bias); + + val = I915_READ(MG_PLL_BIAS(port)); + val &= ~hw_state->mg_pll_bias_mask; + val |= hw_state->mg_pll_bias; + I915_WRITE(MG_PLL_BIAS(port), val); + + val = I915_READ(MG_PLL_TDC_COLDST_BIAS(port)); + val &= ~hw_state->mg_pll_tdc_coldst_bias_mask; + val |= hw_state->mg_pll_tdc_coldst_bias; + I915_WRITE(MG_PLL_TDC_COLDST_BIAS(port), val); + POSTING_READ(MG_PLL_TDC_COLDST_BIAS(port)); } @@ -2936,6 +3077,7 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv, switch (id) { case DPLL_ID_ICL_DPLL0: case DPLL_ID_ICL_DPLL1: + case DPLL_ID_ICL_TBTPLL: icl_dpll_write(dev_priv, pll); break; case DPLL_ID_ICL_MGPLL1: @@ -3034,6 +3176,7 @@ static const struct intel_shared_dpll_funcs icl_pll_funcs = { static const struct dpll_info icl_plls[] = { { "DPLL 0", &icl_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, { "DPLL 1", &icl_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, + { "TBT PLL", &icl_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 }, { "MG PLL 1", &icl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 }, { "MG PLL 2", &icl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 }, { "MG PLL 3", &icl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 }, diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h index 7a0cd564a9ee..7e522cf4f13f 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h @@ -114,23 +114,27 @@ enum intel_dpll_id { */ DPLL_ID_ICL_DPLL1 = 1, /** + * @DPLL_ID_ICL_TBTPLL: ICL TBT PLL + */ + DPLL_ID_ICL_TBTPLL = 2, + /** * @DPLL_ID_ICL_MGPLL1: ICL MG PLL 1 port 1 (C) */ - DPLL_ID_ICL_MGPLL1 = 2, + DPLL_ID_ICL_MGPLL1 = 3, /** * @DPLL_ID_ICL_MGPLL2: ICL MG PLL 1 port 2 (D) */ - DPLL_ID_ICL_MGPLL2 = 3, + DPLL_ID_ICL_MGPLL2 = 4, /** * @DPLL_ID_ICL_MGPLL3: ICL MG PLL 1 port 3 (E) */ - DPLL_ID_ICL_MGPLL3 = 4, + DPLL_ID_ICL_MGPLL3 = 5, /** * @DPLL_ID_ICL_MGPLL4: ICL MG PLL 1 port 4 (F) */ - DPLL_ID_ICL_MGPLL4 = 5, + DPLL_ID_ICL_MGPLL4 = 6, }; -#define I915_NUM_PLLS 6 +#define I915_NUM_PLLS 7 struct intel_dpll_hw_state { /* i9xx, pch plls */ @@ -176,6 +180,8 @@ struct intel_dpll_hw_state { uint32_t mg_pll_ssc; uint32_t mg_pll_bias; uint32_t mg_pll_tdc_coldst_bias; + uint32_t mg_pll_bias_mask; + uint32_t mg_pll_tdc_coldst_bias_mask; }; /** @@ -336,5 +342,7 @@ void intel_shared_dpll_init(struct drm_device *dev); void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv, struct intel_dpll_hw_state *hw_state); +int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv, + uint32_t pll_id); #endif /* _INTEL_DPLL_MGR_H_ */ diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index b8eefbffc77d..17af06d8a43e 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -158,12 +158,6 @@ #define MAX_OUTPUTS 6 /* maximum connectors per crtcs in the mode set */ -/* Maximum cursor sizes */ -#define GEN2_CURSOR_WIDTH 64 -#define GEN2_CURSOR_HEIGHT 64 -#define MAX_CURSOR_WIDTH 256 -#define MAX_CURSOR_HEIGHT 256 - #define INTEL_I2C_BUS_DVO 1 #define INTEL_I2C_BUS_SDVO 2 @@ -194,7 +188,6 @@ enum intel_output_type { struct intel_framebuffer { struct drm_framebuffer base; - struct drm_i915_gem_object *obj; struct intel_rotation_info rot_info; /* for each plane in the normal GTT view */ @@ -261,7 +254,8 @@ struct intel_encoder { struct intel_crtc_state *pipe_config); /* Returns a mask of power domains that need to be referenced as part * of the hardware state readout code. */ - u64 (*get_power_domains)(struct intel_encoder *encoder); + u64 (*get_power_domains)(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state); /* * Called during system suspend after all pending requests for the * encoder are flushed (for example for DP AUX transactions) and @@ -310,6 +304,8 @@ struct intel_panel { } backlight; }; +struct intel_digital_port; + /* * This structure serves as a translation layer between the generic HDCP code * and the bus-specific code. What that means is that HDCP over HDMI differs @@ -488,6 +484,8 @@ struct intel_atomic_state { */ bool skip_intermediate_wm; + bool rps_interactive; + /* Gen9+ only */ struct skl_ddb_values wm_results; @@ -953,6 +951,7 @@ struct intel_plane { enum pipe pipe; bool can_scale; bool has_fbc; + bool has_ccs; int max_downscale; uint32_t frontbuffer_bit; @@ -971,7 +970,7 @@ struct intel_plane { const struct intel_plane_state *plane_state); void (*disable_plane)(struct intel_plane *plane, struct intel_crtc *crtc); - bool (*get_hw_state)(struct intel_plane *plane); + bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe); int (*check_plane)(struct intel_plane *plane, struct intel_crtc_state *crtc_state, struct intel_plane_state *state); @@ -1004,7 +1003,7 @@ struct cxsr_latency { #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) #define to_intel_plane(x) container_of(x, struct intel_plane, base) #define to_intel_plane_state(x) container_of(x, struct intel_plane_state, base) -#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL) +#define intel_fb_obj(x) ((x) ? to_intel_bo((x)->obj[0]) : NULL) struct intel_hdmi { i915_reg_t hdmi_reg; @@ -1139,7 +1138,6 @@ struct intel_dp { * register with to kick off an AUX transaction. */ uint32_t (*get_aux_send_ctl)(struct intel_dp *dp, - bool has_aux_irq, int send_bytes, uint32_t aux_clock_divider); @@ -1252,22 +1250,29 @@ intel_attached_encoder(struct drm_connector *connector) return to_intel_connector(connector)->encoder; } -static inline struct intel_digital_port * -enc_to_dig_port(struct drm_encoder *encoder) +static inline bool intel_encoder_is_dig_port(struct intel_encoder *encoder) { - struct intel_encoder *intel_encoder = to_intel_encoder(encoder); - - switch (intel_encoder->type) { + switch (encoder->type) { case INTEL_OUTPUT_DDI: - WARN_ON(!HAS_DDI(to_i915(encoder->dev))); case INTEL_OUTPUT_DP: case INTEL_OUTPUT_EDP: case INTEL_OUTPUT_HDMI: + return true; + default: + return false; + } +} + +static inline struct intel_digital_port * +enc_to_dig_port(struct drm_encoder *encoder) +{ + struct intel_encoder *intel_encoder = to_intel_encoder(encoder); + + if (intel_encoder_is_dig_port(intel_encoder)) return container_of(encoder, struct intel_digital_port, base.base); - default: + else return NULL; - } } static inline struct intel_dp_mst_encoder * @@ -1281,6 +1286,20 @@ static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) return &enc_to_dig_port(encoder)->dp; } +static inline bool intel_encoder_is_dp(struct intel_encoder *encoder) +{ + switch (encoder->type) { + case INTEL_OUTPUT_DP: + case INTEL_OUTPUT_EDP: + return true; + case INTEL_OUTPUT_DDI: + /* Skip pure HDMI/DVI DDI encoders */ + return i915_mmio_reg_valid(enc_to_intel_dp(&encoder->base)->output_reg); + default: + return false; + } +} + static inline struct intel_digital_port * dp_to_dig_port(struct intel_dp *intel_dp) { @@ -1337,9 +1356,6 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv); void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv); /* i915_irq.c */ -bool gen11_reset_one_iir(struct drm_i915_private * const i915, - const unsigned int bank, - const unsigned int bit); void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask); @@ -1376,6 +1392,8 @@ void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv); void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv); /* intel_crt.c */ +bool intel_crt_port_enabled(struct drm_i915_private *dev_priv, + i915_reg_t adpa_reg, enum pipe *pipe); void intel_crt_init(struct drm_i915_private *dev_priv); void intel_crt_reset(struct drm_encoder *encoder); @@ -1391,8 +1409,6 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state) void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state); void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state); void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state); -struct intel_encoder * -intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state); void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state); void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp); bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); @@ -1406,6 +1422,8 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv, u32 bxt_signal_levels(struct intel_dp *intel_dp); uint32_t ddi_signal_levels(struct intel_dp *intel_dp); u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder); +u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder, + u8 voltage_swing); int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, bool enable); void icl_map_plls_to_ports(struct drm_crtc *crtc, @@ -1487,6 +1505,9 @@ void intel_connector_attach_encoder(struct intel_connector *connector, struct intel_encoder *encoder); struct drm_display_mode * intel_encoder_current_mode(struct intel_encoder *encoder); +bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port); +enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, + enum port port); enum pipe intel_get_pipe_from_connector(struct intel_connector *connector); int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, @@ -1614,6 +1635,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode, void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); +u16 skl_scaler_calc_phase(int sub, bool chroma_center); int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, uint32_t pixel_format); @@ -1643,6 +1665,9 @@ void intel_csr_ucode_suspend(struct drm_i915_private *); void intel_csr_ucode_resume(struct drm_i915_private *); /* intel_dp.c */ +bool intel_dp_port_enabled(struct drm_i915_private *dev_priv, + i915_reg_t dp_reg, enum port port, + enum pipe *pipe); bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg, enum port port); bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, @@ -1660,8 +1685,6 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); void intel_dp_encoder_reset(struct drm_encoder *encoder); void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder); void intel_dp_encoder_destroy(struct drm_encoder *encoder); -int intel_dp_sink_crc(struct intel_dp *intel_dp, - struct intel_crtc_state *crtc_state, u8 *crc); bool intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state); @@ -1675,8 +1698,8 @@ void intel_edp_backlight_off(const struct drm_connector_state *conn_state); void intel_edp_panel_vdd_on(struct intel_dp *intel_dp); void intel_edp_panel_on(struct intel_dp *intel_dp); void intel_edp_panel_off(struct intel_dp *intel_dp); -void intel_dp_mst_suspend(struct drm_device *dev); -void intel_dp_mst_resume(struct drm_device *dev); +void intel_dp_mst_suspend(struct drm_i915_private *dev_priv); +void intel_dp_mst_resume(struct drm_i915_private *dev_priv); int intel_dp_max_link_rate(struct intel_dp *intel_dp); int intel_dp_max_lane_count(struct intel_dp *intel_dp); int intel_dp_rate_select(struct intel_dp *intel_dp, int rate); @@ -1706,6 +1729,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing); void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, uint8_t *link_bw, uint8_t *rate_select); bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp); +bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp); bool intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]); @@ -1725,8 +1749,8 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector); /* intel_dp_mst.c */ int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); -/* intel_dsi.c */ -void intel_dsi_init(struct drm_i915_private *dev_priv); +/* vlv_dsi.c */ +void vlv_dsi_init(struct drm_i915_private *dev_priv); /* intel_dsi_dcs_backlight.c */ int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector); @@ -1820,6 +1844,8 @@ void intel_infoframe_init(struct intel_digital_port *intel_dig_port); /* intel_lvds.c */ +bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv, + i915_reg_t lvds_reg, enum pipe *pipe); void intel_lvds_init(struct drm_i915_private *dev_priv); struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev); bool intel_is_dual_link_lvds(struct drm_device *dev); @@ -1866,7 +1892,6 @@ void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state); void intel_panel_destroy_backlight(struct drm_connector *connector); -enum drm_connector_status intel_panel_detect(struct drm_i915_private *dev_priv); extern struct drm_display_mode *intel_find_panel_downclock( struct drm_i915_private *dev_priv, struct drm_display_mode *fixed_mode, @@ -1910,12 +1935,12 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits, enum fb_op_origin origin); void intel_psr_init(struct drm_i915_private *dev_priv); -void intel_psr_single_frame_update(struct drm_i915_private *dev_priv, - unsigned frontbuffer_bits); void intel_psr_compute_config(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state); void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug); void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir); +void intel_psr_short_pulse(struct intel_dp *intel_dp); +int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state); /* intel_runtime_pm.c */ int intel_power_domains_init(struct drm_i915_private *); @@ -2057,12 +2082,13 @@ void intel_init_ipc(struct drm_i915_private *dev_priv); void intel_enable_ipc(struct drm_i915_private *dev_priv); /* intel_sdvo.c */ +bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv, + i915_reg_t sdvo_reg, enum pipe *pipe); bool intel_sdvo_init(struct drm_i915_private *dev_priv, i915_reg_t reg, enum port port); /* intel_sprite.c */ -bool intel_format_is_yuv(u32 format); int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, int usecs); struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv, @@ -2075,10 +2101,9 @@ void skl_update_plane(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc); -bool skl_plane_get_hw_state(struct intel_plane *plane); +bool skl_plane_get_hw_state(struct intel_plane *plane, enum pipe *pipe); bool skl_plane_has_ccs(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id); -bool intel_format_is_yuv(uint32_t format); bool skl_plane_has_planar(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id); @@ -2144,7 +2169,6 @@ void lspcon_resume(struct intel_lspcon *lspcon); void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon); /* intel_pipe_crc.c */ -int intel_pipe_crc_create(struct drm_minor *minor); #ifdef CONFIG_DEBUG_FS int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name, size_t *values_cnt); @@ -2160,5 +2184,4 @@ static inline void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc) { } #endif -extern const struct file_operations i915_display_crc_ctl_fops; #endif /* __INTEL_DRV_H__ */ diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h index 7afeb9580f41..ad7c1cb32983 100644 --- a/drivers/gpu/drm/i915/intel_dsi.h +++ b/drivers/gpu/drm/i915/intel_dsi.h @@ -129,21 +129,29 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder) return container_of(encoder, struct intel_dsi, base.base); } -/* intel_dsi.c */ -void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port); +/* vlv_dsi.c */ +void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port); enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt); -/* intel_dsi_pll.c */ -bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv); -int intel_compute_dsi_pll(struct intel_encoder *encoder, - struct intel_crtc_state *config); -void intel_enable_dsi_pll(struct intel_encoder *encoder, - const struct intel_crtc_state *config); -void intel_disable_dsi_pll(struct intel_encoder *encoder); -u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, - struct intel_crtc_state *config); -void intel_dsi_reset_clocks(struct intel_encoder *encoder, - enum port port); +/* vlv_dsi_pll.c */ +int vlv_dsi_pll_compute(struct intel_encoder *encoder, + struct intel_crtc_state *config); +void vlv_dsi_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *config); +void vlv_dsi_pll_disable(struct intel_encoder *encoder); +u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, + struct intel_crtc_state *config); +void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); + +bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv); +int bxt_dsi_pll_compute(struct intel_encoder *encoder, + struct intel_crtc_state *config); +void bxt_dsi_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *config); +void bxt_dsi_pll_disable(struct intel_encoder *encoder); +u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, + struct intel_crtc_state *config); +void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); /* intel_dsi_vbt.c */ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id); diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c index 4d6ffa7b3e7b..ac83d6b89ae0 100644 --- a/drivers/gpu/drm/i915/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c @@ -181,7 +181,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, break; } - wait_for_dsi_fifo_empty(intel_dsi, port); + vlv_dsi_wait_for_fifo_empty(intel_dsi, port); out: data += len; diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 61d908e0df0e..4e142ff49708 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -137,19 +137,15 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector) static bool intel_dvo_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dvo *intel_dvo = enc_to_dvo(encoder); u32 tmp; tmp = I915_READ(intel_dvo->dev.dvo_reg); - if (!(tmp & DVO_ENABLE)) - return false; - - *pipe = PORT_TO_PIPE(tmp); + *pipe = (tmp & DVO_PIPE_SEL_MASK) >> DVO_PIPE_SEL_SHIFT; - return true; + return tmp & DVO_ENABLE; } static void intel_dvo_get_config(struct intel_encoder *encoder, @@ -282,8 +278,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder, dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE | DVO_BLANK_ACTIVE_HIGH; - if (pipe == 1) - dvo_val |= DVO_PIPE_B_SELECT; + dvo_val |= DVO_PIPE_SEL(pipe); dvo_val |= DVO_PIPE_STALL; if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) dvo_val |= DVO_HSYNC_ACTIVE_HIGH; @@ -443,7 +438,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv) int gpio; bool dvoinit; enum pipe pipe; - uint32_t dpll[I915_MAX_PIPES]; + u32 dpll[I915_MAX_PIPES]; enum port port; /* diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 1590375f31cb..2d1952849d69 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -25,7 +25,6 @@ #include <drm/drm_print.h> #include "i915_drv.h" -#include "i915_vgpu.h" #include "intel_ringbuffer.h" #include "intel_lrc.h" @@ -230,6 +229,7 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class) break; default: MISSING_CASE(class); + /* fall through */ case VIDEO_DECODE_CLASS: case VIDEO_ENHANCEMENT_CLASS: case COPY_ENGINE_CLASS: @@ -302,6 +302,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv, engine->class); if (WARN_ON(engine->context_size > BIT(20))) engine->context_size = 0; + if (engine->context_size) + DRIVER_CAPS(dev_priv)->has_logical_contexts = true; /* Nothing to do here, execute in order of dependencies */ engine->schedule = NULL; @@ -456,28 +458,16 @@ static void intel_engine_init_batch_pool(struct intel_engine_cs *engine) i915_gem_batch_pool_init(&engine->batch_pool, engine); } -static bool csb_force_mmio(struct drm_i915_private *i915) -{ - /* Older GVT emulation depends upon intercepting CSB mmio */ - if (intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915)) - return true; - - return false; -} - static void intel_engine_init_execlist(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; - execlists->csb_use_mmio = csb_force_mmio(engine->i915); - execlists->port_mask = 1; BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists)); GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS); execlists->queue_priority = INT_MIN; - execlists->queue = RB_ROOT; - execlists->first = NULL; + execlists->queue = RB_ROOT_CACHED; } /** @@ -492,6 +482,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine) void intel_engine_setup_common(struct intel_engine_cs *engine) { i915_timeline_init(engine->i915, &engine->timeline, engine->name); + lockdep_set_subclass(&engine->timeline.lock, TIMELINE_ENGINE); intel_engine_init_execlist(engine); intel_engine_init_hangcheck(engine); @@ -499,7 +490,8 @@ void intel_engine_setup_common(struct intel_engine_cs *engine) intel_engine_init_cmd_parser(engine); } -int intel_engine_create_scratch(struct intel_engine_cs *engine, int size) +int intel_engine_create_scratch(struct intel_engine_cs *engine, + unsigned int size) { struct drm_i915_gem_object *obj; struct i915_vma *vma; @@ -515,7 +507,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size) return PTR_ERR(obj); } - vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL); + vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto err_unref; @@ -533,7 +525,7 @@ err_unref: return ret; } -static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine) +void intel_engine_cleanup_scratch(struct intel_engine_cs *engine) { i915_vma_unpin_and_release(&engine->scratch); } @@ -585,7 +577,7 @@ static int init_status_page(struct intel_engine_cs *engine) if (ret) goto err; - vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL); + vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto err; @@ -645,6 +637,12 @@ static int init_phys_status_page(struct intel_engine_cs *engine) return 0; } +static void __intel_context_unpin(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + intel_context_unpin(to_intel_context(ctx, engine)); +} + /** * intel_engines_init_common - initialize cengine state which might require hw access * @engine: Engine to initialize. @@ -658,7 +656,8 @@ static int init_phys_status_page(struct intel_engine_cs *engine) */ int intel_engine_init_common(struct intel_engine_cs *engine) { - struct intel_ring *ring; + struct drm_i915_private *i915 = engine->i915; + struct intel_context *ce; int ret; engine->set_default_submission(engine); @@ -670,18 +669,18 @@ int intel_engine_init_common(struct intel_engine_cs *engine) * be available. To avoid this we always pin the default * context. */ - ring = intel_context_pin(engine->i915->kernel_context, engine); - if (IS_ERR(ring)) - return PTR_ERR(ring); + ce = intel_context_pin(i915->kernel_context, engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); /* * Similarly the preempt context must always be available so that * we can interrupt the engine at any time. */ - if (engine->i915->preempt_context) { - ring = intel_context_pin(engine->i915->preempt_context, engine); - if (IS_ERR(ring)) { - ret = PTR_ERR(ring); + if (i915->preempt_context) { + ce = intel_context_pin(i915->preempt_context, engine); + if (IS_ERR(ce)) { + ret = PTR_ERR(ce); goto err_unpin_kernel; } } @@ -690,7 +689,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine) if (ret) goto err_unpin_preempt; - if (HWS_NEEDS_PHYSICAL(engine->i915)) + if (HWS_NEEDS_PHYSICAL(i915)) ret = init_phys_status_page(engine); else ret = init_status_page(engine); @@ -702,10 +701,11 @@ int intel_engine_init_common(struct intel_engine_cs *engine) err_breadcrumbs: intel_engine_fini_breadcrumbs(engine); err_unpin_preempt: - if (engine->i915->preempt_context) - intel_context_unpin(engine->i915->preempt_context, engine); + if (i915->preempt_context) + __intel_context_unpin(i915->preempt_context, engine); + err_unpin_kernel: - intel_context_unpin(engine->i915->kernel_context, engine); + __intel_context_unpin(i915->kernel_context, engine); return ret; } @@ -718,6 +718,8 @@ err_unpin_kernel: */ void intel_engine_cleanup_common(struct intel_engine_cs *engine) { + struct drm_i915_private *i915 = engine->i915; + intel_engine_cleanup_scratch(engine); if (HWS_NEEDS_PHYSICAL(engine->i915)) @@ -732,9 +734,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) if (engine->default_state) i915_gem_object_put(engine->default_state); - if (engine->i915->preempt_context) - intel_context_unpin(engine->i915->preempt_context, engine); - intel_context_unpin(engine->i915->kernel_context, engine); + if (i915->preempt_context) + __intel_context_unpin(i915->preempt_context, engine); + __intel_context_unpin(i915->kernel_context, engine); i915_timeline_fini(&engine->timeline); } @@ -769,6 +771,35 @@ u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine) return bbaddr; } +int intel_engine_stop_cs(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + const u32 base = engine->mmio_base; + const i915_reg_t mode = RING_MI_MODE(base); + int err; + + if (INTEL_GEN(dev_priv) < 3) + return -ENODEV; + + GEM_TRACE("%s\n", engine->name); + + I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING)); + + err = 0; + if (__intel_wait_for_register_fw(dev_priv, + mode, MODE_IDLE, MODE_IDLE, + 1000, 0, + NULL)) { + GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name); + err = -ETIMEDOUT; + } + + /* A final mmio read to let GPU writes be hopefully flushed to memory */ + POSTING_READ_FW(mode); + + return err; +} + const char *i915_cache_level_str(struct drm_i915_private *i915, int type) { switch (type) { @@ -780,12 +811,32 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type) } } +u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv) +{ + const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu); + u32 mcr_s_ss_select; + u32 slice = fls(sseu->slice_mask); + u32 subslice = fls(sseu->subslice_mask[slice]); + + if (INTEL_GEN(dev_priv) == 10) + mcr_s_ss_select = GEN8_MCR_SLICE(slice) | + GEN8_MCR_SUBSLICE(subslice); + else if (INTEL_GEN(dev_priv) >= 11) + mcr_s_ss_select = GEN11_MCR_SLICE(slice) | + GEN11_MCR_SUBSLICE(subslice); + else + mcr_s_ss_select = 0; + + return mcr_s_ss_select; +} + static inline uint32_t read_subslice_reg(struct drm_i915_private *dev_priv, int slice, int subslice, i915_reg_t reg) { uint32_t mcr_slice_subslice_mask; uint32_t mcr_slice_subslice_select; + uint32_t default_mcr_s_ss_select; uint32_t mcr; uint32_t ret; enum forcewake_domains fw_domains; @@ -802,6 +853,8 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice, GEN8_MCR_SUBSLICE(subslice); } + default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(dev_priv); + fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ); fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, @@ -812,11 +865,10 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice, intel_uncore_forcewake_get__locked(dev_priv, fw_domains); mcr = I915_READ_FW(GEN8_MCR_SELECTOR); - /* - * The HW expects the slice and sublice selectors to be reset to 0 - * after reading out the registers. - */ - WARN_ON_ONCE(mcr & mcr_slice_subslice_mask); + + WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) != + default_mcr_s_ss_select); + mcr &= ~mcr_slice_subslice_mask; mcr |= mcr_slice_subslice_select; I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr); @@ -824,6 +876,8 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice, ret = I915_READ_FW(reg); mcr &= ~mcr_slice_subslice_mask; + mcr |= default_mcr_s_ss_select; + I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr); intel_uncore_forcewake_put__locked(dev_priv, fw_domains); @@ -934,11 +988,24 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine) return true; /* Waiting to drain ELSP? */ - if (READ_ONCE(engine->execlists.active)) - return false; + if (READ_ONCE(engine->execlists.active)) { + struct tasklet_struct *t = &engine->execlists.tasklet; + + local_bh_disable(); + if (tasklet_trylock(t)) { + /* Must wait for any GPU reset in progress. */ + if (__tasklet_is_enabled(t)) + t->func(t->data); + tasklet_unlock(t); + } + local_bh_enable(); - /* ELSP is empty, but there are ready requests? */ - if (READ_ONCE(engine->execlists.first)) + if (READ_ONCE(engine->execlists.active)) + return false; + } + + /* ELSP is empty, but there are ready requests? E.g. after reset */ + if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)) return false; /* Ring stopped? */ @@ -978,8 +1045,8 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv) */ bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine) { - const struct i915_gem_context * const kernel_context = - engine->i915->kernel_context; + const struct intel_context *kernel_context = + to_intel_context(engine->i915->kernel_context, engine); struct i915_request *rq; lockdep_assert_held(&engine->i915->drm.struct_mutex); @@ -991,7 +1058,7 @@ bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine) */ rq = __i915_gem_active_peek(&engine->timeline.last_request); if (rq) - return rq->ctx == kernel_context; + return rq->hw_context == kernel_context; else return engine->last_retired_context == kernel_context; } @@ -1006,6 +1073,28 @@ void intel_engines_reset_default_submission(struct drm_i915_private *i915) } /** + * intel_engines_sanitize: called after the GPU has lost power + * @i915: the i915 device + * + * Anytime we reset the GPU, either with an explicit GPU reset or through a + * PCI power cycle, the GPU loses state and we must reset our state tracking + * to match. Note that calling intel_engines_sanitize() if the GPU has not + * been reset results in much confusion! + */ +void intel_engines_sanitize(struct drm_i915_private *i915) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + GEM_TRACE("\n"); + + for_each_engine(engine, i915, id) { + if (engine->reset.reset) + engine->reset.reset(engine, NULL); + } +} + +/** * intel_engines_park: called when the GT is transitioning from busy->idle * @i915: the i915 device * @@ -1043,6 +1132,11 @@ void intel_engines_park(struct drm_i915_private *i915) if (engine->park) engine->park(engine); + if (engine->pinned_default_state) { + i915_gem_object_unpin_map(engine->default_state); + engine->pinned_default_state = NULL; + } + i915_gem_batch_pool_fini(&engine->batch_pool); engine->execlists.no_priolist = false; } @@ -1060,6 +1154,16 @@ void intel_engines_unpark(struct drm_i915_private *i915) enum intel_engine_id id; for_each_engine(engine, i915, id) { + void *map; + + /* Pin the default state for fast resets from atomic context. */ + map = NULL; + if (engine->default_state) + map = i915_gem_object_pin_map(engine->default_state, + I915_MAP_WB); + if (!IS_ERR_OR_NULL(map)) + engine->pinned_default_state = map; + if (engine->unpark) engine->unpark(engine); @@ -1067,6 +1171,26 @@ void intel_engines_unpark(struct drm_i915_private *i915) } } +/** + * intel_engine_lost_context: called when the GPU is reset into unknown state + * @engine: the engine + * + * We have either reset the GPU or otherwise about to lose state tracking of + * the current GPU logical state (e.g. suspend). On next use, it is therefore + * imperative that we make no presumptions about the current state and load + * from scratch. + */ +void intel_engine_lost_context(struct intel_engine_cs *engine) +{ + struct intel_context *ce; + + lockdep_assert_held(&engine->i915->drm.struct_mutex); + + ce = fetch_and_zero(&engine->last_retired_context); + if (ce) + intel_context_unpin(ce); +} + bool intel_engine_can_store_dword(struct intel_engine_cs *engine) { switch (INTEL_GEN(engine->i915)) { @@ -1151,7 +1275,7 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len) rowsize, sizeof(u32), line, sizeof(line), false) >= sizeof(line)); - drm_printf(m, "%08zx %s\n", pos, line); + drm_printf(m, "[%04zx] %s\n", pos, line); prev = buf + pos; skip = false; @@ -1166,6 +1290,8 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine, &engine->execlists; u64 addr; + if (engine->id == RCS && IS_GEN(dev_priv, 4, 7)) + drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID)); drm_printf(m, "\tRING_START: 0x%08x\n", I915_READ(RING_START(engine->mmio_base))); drm_printf(m, "\tRING_HEAD: 0x%08x\n", @@ -1232,12 +1358,10 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine, ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine)); read = GEN8_CSB_READ_PTR(ptr); write = GEN8_CSB_WRITE_PTR(ptr); - drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s, tasklet queued? %s (%s)\n", + drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], tasklet queued? %s (%s)\n", read, execlists->csb_head, write, intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)), - yesno(test_bit(ENGINE_IRQ_EXECLIST, - &engine->irq_posted)), yesno(test_bit(TASKLET_STATE_SCHED, &engine->execlists.tasklet.state)), enableddisabled(!atomic_read(&engine->execlists.tasklet.count))); @@ -1287,6 +1411,39 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine, } } +static void print_request_ring(struct drm_printer *m, struct i915_request *rq) +{ + void *ring; + int size; + + drm_printf(m, + "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n", + rq->head, rq->postfix, rq->tail, + rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u, + rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u); + + size = rq->tail - rq->head; + if (rq->tail < rq->head) + size += rq->ring->size; + + ring = kmalloc(size, GFP_ATOMIC); + if (ring) { + const void *vaddr = rq->ring->vaddr; + unsigned int head = rq->head; + unsigned int len = 0; + + if (rq->tail < head) { + len = rq->ring->size - head; + memcpy(ring, vaddr + head, len); + head = 0; + } + memcpy(ring + len, vaddr + head, size - len); + + hexdump(m, ring, size); + kfree(ring); + } +} + void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m, const char *header, ...) @@ -1296,6 +1453,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, const struct intel_engine_execlists * const execlists = &engine->execlists; struct i915_gpu_error * const error = &engine->i915->gpu_error; struct i915_request *rq, *last; + unsigned long flags; struct rb_node *rb; int count; @@ -1336,11 +1494,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, rq = i915_gem_find_active_request(engine); if (rq) { print_request(m, rq, "\t\tactive "); - drm_printf(m, - "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n", - rq->head, rq->postfix, rq->tail, - rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u, - rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u); + drm_printf(m, "\t\tring->start: 0x%08x\n", i915_ggtt_offset(rq->ring->vma)); drm_printf(m, "\t\tring->head: 0x%08x\n", @@ -1351,6 +1505,8 @@ void intel_engine_dump(struct intel_engine_cs *engine, rq->ring->emit); drm_printf(m, "\t\tring->space: 0x%08x\n", rq->ring->space); + + print_request_ring(m, rq); } rcu_read_unlock(); @@ -1362,7 +1518,8 @@ void intel_engine_dump(struct intel_engine_cs *engine, drm_printf(m, "\tDevice is asleep; skipping register dump\n"); } - spin_lock_irq(&engine->timeline.lock); + local_irq_save(flags); + spin_lock(&engine->timeline.lock); last = NULL; count = 0; @@ -1384,7 +1541,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, last = NULL; count = 0; drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority); - for (rb = execlists->first; rb; rb = rb_next(rb)) { + for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { struct i915_priolist *p = rb_entry(rb, typeof(*p), node); @@ -1404,22 +1561,21 @@ void intel_engine_dump(struct intel_engine_cs *engine, print_request(m, last, "\t\tQ "); } - spin_unlock_irq(&engine->timeline.lock); + spin_unlock(&engine->timeline.lock); - spin_lock_irq(&b->rb_lock); + spin_lock(&b->rb_lock); for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { struct intel_wait *w = rb_entry(rb, typeof(*w), node); drm_printf(m, "\t%s [%d] waiting for %x\n", w->tsk->comm, w->tsk->pid, w->seqno); } - spin_unlock_irq(&b->rb_lock); + spin_unlock(&b->rb_lock); + local_irq_restore(flags); - drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s) (execlists? %s)\n", + drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s)\n", engine->irq_posted, yesno(test_bit(ENGINE_IRQ_BREADCRUMB, - &engine->irq_posted)), - yesno(test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))); drm_printf(m, "HWSP:\n"); @@ -1468,8 +1624,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine) if (!intel_engine_supports_stats(engine)) return -ENODEV; - tasklet_disable(&execlists->tasklet); - write_seqlock_irqsave(&engine->stats.lock, flags); + spin_lock_irqsave(&engine->timeline.lock, flags); + write_seqlock(&engine->stats.lock); if (unlikely(engine->stats.enabled == ~0)) { err = -EBUSY; @@ -1493,8 +1649,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine) } unlock: - write_sequnlock_irqrestore(&engine->stats.lock, flags); - tasklet_enable(&execlists->tasklet); + write_sequnlock(&engine->stats.lock); + spin_unlock_irqrestore(&engine->timeline.lock, flags); return err; } diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index b431b6733cc1..01d1d2088f04 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -399,89 +399,6 @@ bool intel_fbc_is_active(struct drm_i915_private *dev_priv) return dev_priv->fbc.active; } -static void intel_fbc_work_fn(struct work_struct *__work) -{ - struct drm_i915_private *dev_priv = - container_of(__work, struct drm_i915_private, fbc.work.work); - struct intel_fbc *fbc = &dev_priv->fbc; - struct intel_fbc_work *work = &fbc->work; - struct intel_crtc *crtc = fbc->crtc; - struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[crtc->pipe]; - - if (drm_crtc_vblank_get(&crtc->base)) { - /* CRTC is now off, leave FBC deactivated */ - mutex_lock(&fbc->lock); - work->scheduled = false; - mutex_unlock(&fbc->lock); - return; - } - -retry: - /* Delay the actual enabling to let pageflipping cease and the - * display to settle before starting the compression. Note that - * this delay also serves a second purpose: it allows for a - * vblank to pass after disabling the FBC before we attempt - * to modify the control registers. - * - * WaFbcWaitForVBlankBeforeEnable:ilk,snb - * - * It is also worth mentioning that since work->scheduled_vblank can be - * updated multiple times by the other threads, hitting the timeout is - * not an error condition. We'll just end up hitting the "goto retry" - * case below. - */ - wait_event_timeout(vblank->queue, - drm_crtc_vblank_count(&crtc->base) != work->scheduled_vblank, - msecs_to_jiffies(50)); - - mutex_lock(&fbc->lock); - - /* Were we cancelled? */ - if (!work->scheduled) - goto out; - - /* Were we delayed again while this function was sleeping? */ - if (drm_crtc_vblank_count(&crtc->base) == work->scheduled_vblank) { - mutex_unlock(&fbc->lock); - goto retry; - } - - intel_fbc_hw_activate(dev_priv); - - work->scheduled = false; - -out: - mutex_unlock(&fbc->lock); - drm_crtc_vblank_put(&crtc->base); -} - -static void intel_fbc_schedule_activation(struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_fbc *fbc = &dev_priv->fbc; - struct intel_fbc_work *work = &fbc->work; - - WARN_ON(!mutex_is_locked(&fbc->lock)); - if (WARN_ON(!fbc->enabled)) - return; - - if (drm_crtc_vblank_get(&crtc->base)) { - DRM_ERROR("vblank not available for FBC on pipe %c\n", - pipe_name(crtc->pipe)); - return; - } - - /* It is useless to call intel_fbc_cancel_work() or cancel_work() in - * this function since we're not releasing fbc.lock, so it won't have an - * opportunity to grab it to discover that it was cancelled. So we just - * update the expected jiffy count. */ - work->scheduled = true; - work->scheduled_vblank = drm_crtc_vblank_count(&crtc->base); - drm_crtc_vblank_put(&crtc->base); - - schedule_work(&work->work); -} - static void intel_fbc_deactivate(struct drm_i915_private *dev_priv, const char *reason) { @@ -489,11 +406,6 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv, WARN_ON(!mutex_is_locked(&fbc->lock)); - /* Calling cancel_work() here won't help due to the fact that the work - * function grabs fbc->lock. Just set scheduled to false so the work - * function can know it was cancelled. */ - fbc->work.scheduled = false; - if (fbc->active) intel_fbc_hw_deactivate(dev_priv); @@ -924,13 +836,6 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc, 32 * fbc->threshold) * 8; } -static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1, - struct intel_fbc_reg_params *params2) -{ - /* We can use this since intel_fbc_get_reg_params() does a memset. */ - return memcmp(params1, params2, sizeof(*params1)) == 0; -} - void intel_fbc_pre_update(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) @@ -953,6 +858,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc, goto unlock; intel_fbc_update_state_cache(crtc, crtc_state, plane_state); + fbc->flip_pending = true; deactivate: intel_fbc_deactivate(dev_priv, reason); @@ -988,13 +894,15 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_fbc *fbc = &dev_priv->fbc; - struct intel_fbc_reg_params old_params; WARN_ON(!mutex_is_locked(&fbc->lock)); if (!fbc->enabled || fbc->crtc != crtc) return; + fbc->flip_pending = false; + WARN_ON(fbc->active); + if (!i915_modparams.enable_fbc) { intel_fbc_deactivate(dev_priv, "disabled at runtime per module param"); __intel_fbc_disable(dev_priv); @@ -1002,25 +910,16 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc) return; } - if (!intel_fbc_can_activate(crtc)) { - WARN_ON(fbc->active); - return; - } - - old_params = fbc->params; intel_fbc_get_reg_params(crtc, &fbc->params); - /* If the scanout has not changed, don't modify the FBC settings. - * Note that we make the fundamental assumption that the fb->obj - * cannot be unpinned (and have its GTT offset and fence revoked) - * without first being decoupled from the scanout and FBC disabled. - */ - if (fbc->active && - intel_fbc_reg_params_equal(&old_params, &fbc->params)) + if (!intel_fbc_can_activate(crtc)) return; - intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)"); - intel_fbc_schedule_activation(crtc); + if (!fbc->busy_bits) { + intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)"); + intel_fbc_hw_activate(dev_priv); + } else + intel_fbc_deactivate(dev_priv, "frontbuffer write"); } void intel_fbc_post_update(struct intel_crtc *crtc) @@ -1085,7 +984,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv, (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) { if (fbc->active) intel_fbc_recompress(dev_priv); - else + else if (!fbc->flip_pending) __intel_fbc_post_update(fbc->crtc); } @@ -1225,8 +1124,6 @@ void intel_fbc_disable(struct intel_crtc *crtc) if (fbc->crtc == crtc) __intel_fbc_disable(dev_priv); mutex_unlock(&fbc->lock); - - cancel_work_sync(&fbc->work.work); } /** @@ -1248,8 +1145,6 @@ void intel_fbc_global_disable(struct drm_i915_private *dev_priv) __intel_fbc_disable(dev_priv); } mutex_unlock(&fbc->lock); - - cancel_work_sync(&fbc->work.work); } static void intel_fbc_underrun_work_fn(struct work_struct *work) @@ -1400,12 +1295,10 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) { struct intel_fbc *fbc = &dev_priv->fbc; - INIT_WORK(&fbc->work.work, intel_fbc_work_fn); INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); mutex_init(&fbc->lock); fbc->enabled = false; fbc->active = false; - fbc->work.scheduled = false; if (need_fbc_vtd_wa(dev_priv)) mkwrite_device_info(dev_priv)->has_fbc = false; diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index e9e02b58b7be..fb2f9fce34cd 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -47,7 +47,7 @@ static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev) { - struct drm_i915_gem_object *obj = ifbdev->fb->obj; + struct drm_i915_gem_object *obj = intel_fb_obj(&ifbdev->fb->base); unsigned int origin = ifbdev->vma_flags & PLANE_HAS_FENCE ? ORIGIN_GTT : ORIGIN_CPU; @@ -193,7 +193,7 @@ static int intelfb_create(struct drm_fb_helper *helper, drm_framebuffer_put(&intel_fb->base); intel_fb = ifbdev->fb = NULL; } - if (!intel_fb || WARN_ON(!intel_fb->obj)) { + if (!intel_fb || WARN_ON(!intel_fb_obj(&intel_fb->base))) { DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n"); ret = intelfb_alloc(helper, sizes); if (ret) @@ -265,7 +265,7 @@ static int intelfb_create(struct drm_fb_helper *helper, * If the object is stolen however, it will be full of whatever * garbage was left in there. */ - if (intel_fb->obj->stolen && !prealloc) + if (intel_fb_obj(fb)->stolen && !prealloc) memset_io(info->screen_base, 0, info->screen_size); /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ @@ -792,7 +792,8 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous * been restored from swap. If the object is stolen however, it will be * full of whatever garbage was left in there. */ - if (state == FBINFO_STATE_RUNNING && ifbdev->fb->obj->stolen) + if (state == FBINFO_STATE_RUNNING && + intel_fb_obj(&ifbdev->fb->base)->stolen) memset_io(info->screen_base, 0, info->screen_size); drm_fb_helper_set_suspend(&ifbdev->helper, state); diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c index 7fff0a0eceb4..c3379bde266f 100644 --- a/drivers/gpu/drm/i915/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/intel_frontbuffer.c @@ -153,8 +153,6 @@ void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv, /* Remove stale busy bits due to the old buffer. */ dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits; spin_unlock(&dev_priv->fb_tracking.lock); - - intel_psr_single_frame_update(dev_priv, frontbuffer_bits); } /** diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c index 116f4ccf1bbd..560c7406ae40 100644 --- a/drivers/gpu/drm/i915/intel_guc.c +++ b/drivers/gpu/drm/i915/intel_guc.c @@ -27,6 +27,8 @@ #include "intel_guc_submission.h" #include "i915_drv.h" +static void guc_init_ggtt_pin_bias(struct intel_guc *guc); + static void gen8_guc_raise_irq(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); @@ -73,7 +75,7 @@ void intel_guc_init_early(struct intel_guc *guc) guc->notify = gen8_guc_raise_irq; } -int intel_guc_init_wq(struct intel_guc *guc) +static int guc_init_wq(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); @@ -124,7 +126,7 @@ int intel_guc_init_wq(struct intel_guc *guc) return 0; } -void intel_guc_fini_wq(struct intel_guc *guc) +static void guc_fini_wq(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); @@ -135,6 +137,28 @@ void intel_guc_fini_wq(struct intel_guc *guc) destroy_workqueue(guc->log.relay.flush_wq); } +int intel_guc_init_misc(struct intel_guc *guc) +{ + struct drm_i915_private *i915 = guc_to_i915(guc); + int ret; + + guc_init_ggtt_pin_bias(guc); + + ret = guc_init_wq(guc); + if (ret) + return ret; + + intel_uc_fw_fetch(i915, &guc->fw); + + return 0; +} + +void intel_guc_fini_misc(struct intel_guc *guc) +{ + intel_uc_fw_fini(&guc->fw); + guc_fini_wq(guc); +} + static int guc_shared_data_create(struct intel_guc *guc) { struct i915_vma *vma; @@ -169,7 +193,7 @@ int intel_guc_init(struct intel_guc *guc) ret = guc_shared_data_create(guc); if (ret) - return ret; + goto err_fetch; GEM_BUG_ON(!guc->shared_data); ret = intel_guc_log_create(&guc->log); @@ -190,6 +214,8 @@ err_log: intel_guc_log_destroy(&guc->log); err_shared: guc_shared_data_destroy(guc); +err_fetch: + intel_uc_fw_fini(&guc->fw); return ret; } @@ -201,14 +227,17 @@ void intel_guc_fini(struct intel_guc *guc) intel_guc_ads_destroy(guc); intel_guc_log_destroy(&guc->log); guc_shared_data_destroy(guc); + intel_uc_fw_fini(&guc->fw); } -static u32 get_log_control_flags(void) +static u32 guc_ctl_debug_flags(struct intel_guc *guc) { - u32 level = i915_modparams.guc_log_level; - u32 flags = 0; + u32 level = intel_guc_log_get_level(&guc->log); + u32 flags; + u32 ads; - GEM_BUG_ON(level < 0); + ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT; + flags = ads << GUC_ADS_ADDR_SHIFT | GUC_ADS_ENABLED; if (!GUC_LOG_LEVEL_IS_ENABLED(level)) flags |= GUC_LOG_DEFAULT_DISABLED; @@ -222,6 +251,78 @@ static u32 get_log_control_flags(void) return flags; } +static u32 guc_ctl_feature_flags(struct intel_guc *guc) +{ + u32 flags = 0; + + flags |= GUC_CTL_VCS2_ENABLED; + + if (USES_GUC_SUBMISSION(guc_to_i915(guc))) + flags |= GUC_CTL_KERNEL_SUBMISSIONS; + else + flags |= GUC_CTL_DISABLE_SCHEDULER; + + return flags; +} + +static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc) +{ + u32 flags = 0; + + if (USES_GUC_SUBMISSION(guc_to_i915(guc))) { + u32 ctxnum, base; + + base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool); + ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16; + + base >>= PAGE_SHIFT; + flags |= (base << GUC_CTL_BASE_ADDR_SHIFT) | + (ctxnum << GUC_CTL_CTXNUM_IN16_SHIFT); + } + return flags; +} + +static u32 guc_ctl_log_params_flags(struct intel_guc *guc) +{ + u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT; + u32 flags; + + #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0) + #define UNIT SZ_1M + #define FLAG GUC_LOG_ALLOC_IN_MEGABYTE + #else + #define UNIT SZ_4K + #define FLAG 0 + #endif + + BUILD_BUG_ON(!CRASH_BUFFER_SIZE); + BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT)); + BUILD_BUG_ON(!DPC_BUFFER_SIZE); + BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT)); + BUILD_BUG_ON(!ISR_BUFFER_SIZE); + BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT)); + + BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) > + (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT)); + BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) > + (GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT)); + BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) > + (GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT)); + + flags = GUC_LOG_VALID | + GUC_LOG_NOTIFY_ON_HALF_FULL | + FLAG | + ((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) | + ((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) | + ((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) | + (offset << GUC_LOG_BUF_ADDR_SHIFT); + + #undef UNIT + #undef FLAG + + return flags; +} + /* * Initialise the GuC parameter block before starting the firmware * transfer. These parameters are read by the firmware on startup @@ -245,32 +346,13 @@ void intel_guc_init_params(struct intel_guc *guc) params[GUC_CTL_WA] |= GUC_CTL_WA_UK_BY_DRIVER; - params[GUC_CTL_FEATURE] |= GUC_CTL_DISABLE_SCHEDULER | - GUC_CTL_VCS2_ENABLED; - - params[GUC_CTL_LOG_PARAMS] = guc->log.flags; - - params[GUC_CTL_DEBUG] = get_log_control_flags(); + params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc); + params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc); + params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc); + params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc); - /* If GuC submission is enabled, set up additional parameters here */ - if (USES_GUC_SUBMISSION(dev_priv)) { - u32 ads = intel_guc_ggtt_offset(guc, - guc->ads_vma) >> PAGE_SHIFT; - u32 pgs = intel_guc_ggtt_offset(guc, guc->stage_desc_pool); - u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16; - - params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT; - params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED; - - pgs >>= PAGE_SHIFT; - params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) | - (ctx_in_16 << GUC_CTL_CTXNUM_IN16_SHIFT); - - params[GUC_CTL_FEATURE] |= GUC_CTL_KERNEL_SUBMISSIONS; - - /* Unmask this bit to enable the GuC's internal scheduler */ - params[GUC_CTL_FEATURE] &= ~GUC_CTL_DISABLE_SCHEDULER; - } + for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) + DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]); /* * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and @@ -346,10 +428,8 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, ret = -EIO; if (ret) { - DRM_DEBUG_DRIVER("INTEL_GUC_SEND: Action 0x%X failed;" - " ret=%d status=0x%08X response=0x%08X\n", - action[0], ret, status, - I915_READ(SOFT_SCRATCH(15))); + DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n", + action[0], ret, status); goto out; } @@ -386,11 +466,13 @@ void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc) * could happen that GuC sets the bit for 2nd interrupt but Host * clears out the bit on handling the 1st interrupt. */ + disable_rpm_wakeref_asserts(dev_priv); spin_lock(&guc->irq_lock); val = I915_READ(SOFT_SCRATCH(15)); msg = val & guc->msg_enabled_mask; I915_WRITE(SOFT_SCRATCH(15), val & ~msg); spin_unlock(&guc->irq_lock); + enable_rpm_wakeref_asserts(dev_priv); intel_guc_to_host_process_recv_msg(guc, msg); } @@ -532,13 +614,13 @@ int intel_guc_resume(struct intel_guc *guc) */ /** - * intel_guc_init_ggtt_pin_bias() - Initialize the GuC ggtt_pin_bias value. + * guc_init_ggtt_pin_bias() - Initialize the GuC ggtt_pin_bias value. * @guc: intel_guc structure. * * This function will calculate and initialize the ggtt_pin_bias value based on * overall WOPCM size and GuC WOPCM size. */ -void intel_guc_init_ggtt_pin_bias(struct intel_guc *guc) +static void guc_init_ggtt_pin_bias(struct intel_guc *guc) { struct drm_i915_private *i915 = guc_to_i915(guc); @@ -572,7 +654,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size) if (IS_ERR(obj)) return ERR_CAST(obj); - vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL); + vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL); if (IS_ERR(vma)) goto err; diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index f1265e122d30..4121928a495e 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -151,11 +151,10 @@ static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc, void intel_guc_init_early(struct intel_guc *guc); void intel_guc_init_send_regs(struct intel_guc *guc); void intel_guc_init_params(struct intel_guc *guc); -void intel_guc_init_ggtt_pin_bias(struct intel_guc *guc); -int intel_guc_init_wq(struct intel_guc *guc); -void intel_guc_fini_wq(struct intel_guc *guc); +int intel_guc_init_misc(struct intel_guc *guc); int intel_guc_init(struct intel_guc *guc); void intel_guc_fini(struct intel_guc *guc); +void intel_guc_fini_misc(struct intel_guc *guc); int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len, u32 *response_buf, u32 response_buf_size); int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h index 0867ba76d445..1a0f2a39cef9 100644 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h @@ -84,12 +84,12 @@ #define GUC_LOG_VALID (1 << 0) #define GUC_LOG_NOTIFY_ON_HALF_FULL (1 << 1) #define GUC_LOG_ALLOC_IN_MEGABYTE (1 << 3) -#define GUC_LOG_CRASH_PAGES 1 #define GUC_LOG_CRASH_SHIFT 4 -#define GUC_LOG_DPC_PAGES 7 +#define GUC_LOG_CRASH_MASK (0x1 << GUC_LOG_CRASH_SHIFT) #define GUC_LOG_DPC_SHIFT 6 -#define GUC_LOG_ISR_PAGES 7 +#define GUC_LOG_DPC_MASK (0x7 << GUC_LOG_DPC_SHIFT) #define GUC_LOG_ISR_SHIFT 9 +#define GUC_LOG_ISR_MASK (0x7 << GUC_LOG_ISR_SHIFT) #define GUC_LOG_BUF_ADDR_SHIFT 12 #define GUC_CTL_PAGE_FAULT_CONTROL 5 @@ -532,20 +532,6 @@ enum guc_log_buffer_type { }; /** - * DOC: GuC Log buffer Layout - * - * Page0 +-------------------------------+ - * | ISR state header (32 bytes) | - * | DPC state header | - * | Crash dump state header | - * Page1 +-------------------------------+ - * | ISR logs | - * Page9 +-------------------------------+ - * | DPC logs | - * Page17 +-------------------------------+ - * | Crash Dump logs | - * +-------------------------------+ - * * Below state structure is used for coordination of retrieval of GuC firmware * logs. Separate state is maintained for each log buffer type. * read_ptr points to the location where i915 read last in log buffer and diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c index 401e1704d61e..6da61a71d28f 100644 --- a/drivers/gpu/drm/i915/intel_guc_log.c +++ b/drivers/gpu/drm/i915/intel_guc_log.c @@ -215,11 +215,11 @@ static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type) { switch (type) { case GUC_ISR_LOG_BUFFER: - return (GUC_LOG_ISR_PAGES + 1) * PAGE_SIZE; + return ISR_BUFFER_SIZE; case GUC_DPC_LOG_BUFFER: - return (GUC_LOG_DPC_PAGES + 1) * PAGE_SIZE; + return DPC_BUFFER_SIZE; case GUC_CRASH_DUMP_LOG_BUFFER: - return (GUC_LOG_CRASH_PAGES + 1) * PAGE_SIZE; + return CRASH_BUFFER_SIZE; default: MISSING_CASE(type); } @@ -397,7 +397,7 @@ static int guc_log_relay_create(struct intel_guc_log *log) lockdep_assert_held(&log->relay.lock); /* Keep the size of sub buffers same as shared log buffer */ - subbuf_size = GUC_LOG_SIZE; + subbuf_size = log->vma->size; /* * Store up to 8 snapshots, which is large enough to buffer sufficient @@ -452,13 +452,34 @@ int intel_guc_log_create(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); struct i915_vma *vma; - unsigned long offset; - u32 flags; + u32 guc_log_size; int ret; GEM_BUG_ON(log->vma); - vma = intel_guc_allocate_vma(guc, GUC_LOG_SIZE); + /* + * GuC Log buffer Layout + * + * +===============================+ 00B + * | Crash dump state header | + * +-------------------------------+ 32B + * | DPC state header | + * +-------------------------------+ 64B + * | ISR state header | + * +-------------------------------+ 96B + * | | + * +===============================+ PAGE_SIZE (4KB) + * | Crash Dump logs | + * +===============================+ + CRASH_SIZE + * | DPC logs | + * +===============================+ + DPC_SIZE + * | ISR logs | + * +===============================+ + ISR_SIZE + */ + guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DPC_BUFFER_SIZE + + ISR_BUFFER_SIZE; + + vma = intel_guc_allocate_vma(guc, guc_log_size); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto err; @@ -466,20 +487,12 @@ int intel_guc_log_create(struct intel_guc_log *log) log->vma = vma; - /* each allocated unit is a page */ - flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL | - (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) | - (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) | - (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT); - - offset = intel_guc_ggtt_offset(guc, vma) >> PAGE_SHIFT; - log->flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags; + log->level = i915_modparams.guc_log_level; return 0; err: - /* logging will be off */ - i915_modparams.guc_log_level = 0; + DRM_ERROR("Failed to allocate GuC log buffer. %d\n", ret); return ret; } @@ -488,15 +501,7 @@ void intel_guc_log_destroy(struct intel_guc_log *log) i915_vma_unpin_and_release(&log->vma); } -int intel_guc_log_level_get(struct intel_guc_log *log) -{ - GEM_BUG_ON(!log->vma); - GEM_BUG_ON(i915_modparams.guc_log_level < 0); - - return i915_modparams.guc_log_level; -} - -int intel_guc_log_level_set(struct intel_guc_log *log, u64 val) +int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) { struct intel_guc *guc = log_to_guc(log); struct drm_i915_private *dev_priv = guc_to_i915(guc); @@ -504,33 +509,32 @@ int intel_guc_log_level_set(struct intel_guc_log *log, u64 val) BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0); GEM_BUG_ON(!log->vma); - GEM_BUG_ON(i915_modparams.guc_log_level < 0); /* * GuC is recognizing log levels starting from 0 to max, we're using 0 * as indication that logging should be disabled. */ - if (val < GUC_LOG_LEVEL_DISABLED || val > GUC_LOG_LEVEL_MAX) + if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX) return -EINVAL; mutex_lock(&dev_priv->drm.struct_mutex); - if (i915_modparams.guc_log_level == val) { + if (log->level == level) { ret = 0; goto out_unlock; } intel_runtime_pm_get(dev_priv); - ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(val), - GUC_LOG_LEVEL_IS_ENABLED(val), - GUC_LOG_LEVEL_TO_VERBOSITY(val)); + ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(level), + GUC_LOG_LEVEL_IS_ENABLED(level), + GUC_LOG_LEVEL_TO_VERBOSITY(level)); intel_runtime_pm_put(dev_priv); if (ret) { DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret); goto out_unlock; } - i915_modparams.guc_log_level = val; + log->level = level; out_unlock: mutex_unlock(&dev_priv->drm.struct_mutex); diff --git a/drivers/gpu/drm/i915/intel_guc_log.h b/drivers/gpu/drm/i915/intel_guc_log.h index fa80535a6f9d..7bc763f10c03 100644 --- a/drivers/gpu/drm/i915/intel_guc_log.h +++ b/drivers/gpu/drm/i915/intel_guc_log.h @@ -30,15 +30,19 @@ #include <linux/workqueue.h> #include "intel_guc_fwif.h" +#include "i915_gem.h" struct intel_guc; -/* - * The first page is to save log buffer state. Allocate one - * extra page for others in case for overlap - */ -#define GUC_LOG_SIZE ((1 + GUC_LOG_DPC_PAGES + 1 + GUC_LOG_ISR_PAGES + \ - 1 + GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT) +#ifdef CONFIG_DRM_I915_DEBUG_GUC +#define CRASH_BUFFER_SIZE SZ_2M +#define DPC_BUFFER_SIZE SZ_8M +#define ISR_BUFFER_SIZE SZ_8M +#else +#define CRASH_BUFFER_SIZE SZ_8K +#define DPC_BUFFER_SIZE SZ_32K +#define ISR_BUFFER_SIZE SZ_32K +#endif /* * While we're using plain log level in i915, GuC controls are much more... @@ -58,7 +62,7 @@ struct intel_guc; #define GUC_LOG_LEVEL_MAX GUC_VERBOSITY_TO_LOG_LEVEL(GUC_LOG_VERBOSITY_MAX) struct intel_guc_log { - u32 flags; + u32 level; struct i915_vma *vma; struct { void *buf_addr; @@ -80,8 +84,7 @@ void intel_guc_log_init_early(struct intel_guc_log *log); int intel_guc_log_create(struct intel_guc_log *log); void intel_guc_log_destroy(struct intel_guc_log *log); -int intel_guc_log_level_get(struct intel_guc_log *log); -int intel_guc_log_level_set(struct intel_guc_log *log, u64 control_val); +int intel_guc_log_set_level(struct intel_guc_log *log, u32 level); bool intel_guc_log_relay_enabled(const struct intel_guc_log *log); int intel_guc_log_relay_open(struct intel_guc_log *log); void intel_guc_log_relay_flush(struct intel_guc_log *log); @@ -89,4 +92,9 @@ void intel_guc_log_relay_close(struct intel_guc_log *log); void intel_guc_log_handle_flush_event(struct intel_guc_log *log); +static inline u32 intel_guc_log_get_level(struct intel_guc_log *log) +{ + return log->level; +} + #endif diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index 2feb65096966..4aa5e6463e7b 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -513,8 +513,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) { struct intel_guc_client *client = guc->execbuf_client; struct intel_engine_cs *engine = rq->engine; - u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(rq->ctx, - engine)); + u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc); u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64); spin_lock(&client->wq_lock); @@ -537,7 +536,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) */ static void flush_ggtt_writes(struct i915_vma *vma) { - struct drm_i915_private *dev_priv = to_i915(vma->obj->base.dev); + struct drm_i915_private *dev_priv = vma->vm->i915; if (i915_vma_is_map_and_fenceable(vma)) POSTING_READ_FW(GUC_STATUS); @@ -552,8 +551,8 @@ static void inject_preempt_context(struct work_struct *work) preempt_work[engine->id]); struct intel_guc_client *client = guc->preempt_client; struct guc_stage_desc *stage_desc = __get_stage_desc(client); - u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(client->owner, - engine)); + u32 ctx_desc = lower_32_bits(to_intel_context(client->owner, + engine)->lrc_desc); u32 data[7]; /* @@ -623,6 +622,22 @@ static void wait_for_guc_preempt_report(struct intel_engine_cs *engine) report->report_return_status = INTEL_GUC_REPORT_STATUS_UNKNOWN; } +static void complete_preempt_context(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists *execlists = &engine->execlists; + + GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)); + + if (inject_preempt_hang(execlists)) + return; + + execlists_cancel_port_requests(execlists); + execlists_unwind_incomplete_requests(execlists); + + wait_for_guc_preempt_report(engine); + intel_write_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX, 0); +} + /** * guc_submit() - Submit commands through GuC * @engine: engine associated with the commands @@ -681,9 +696,6 @@ static bool __guc_dequeue(struct intel_engine_cs *engine) lockdep_assert_held(&engine->timeline.lock); - rb = execlists->first; - GEM_BUG_ON(rb_first(&execlists->queue) != rb); - if (port_isset(port)) { if (intel_engine_has_preemption(engine)) { struct guc_preempt_work *preempt_work = @@ -705,12 +717,12 @@ static bool __guc_dequeue(struct intel_engine_cs *engine) } GEM_BUG_ON(port_isset(port)); - while (rb) { + while ((rb = rb_first_cached(&execlists->queue))) { struct i915_priolist *p = to_priolist(rb); struct i915_request *rq, *rn; list_for_each_entry_safe(rq, rn, &p->requests, sched.link) { - if (last && rq->ctx != last->ctx) { + if (last && rq->hw_context != last->hw_context) { if (port == last_port) { __list_del_many(&p->requests, &rq->sched.link); @@ -730,15 +742,13 @@ static bool __guc_dequeue(struct intel_engine_cs *engine) submit = true; } - rb = rb_next(rb); - rb_erase(&p->node, &execlists->queue); + rb_erase_cached(&p->node, &execlists->queue); INIT_LIST_HEAD(&p->requests); if (p->priority != I915_PRIORITY_NORMAL) kmem_cache_free(engine->i915->priorities, p); } done: execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN; - execlists->first = rb; if (submit) port_assign(port, last); if (last) @@ -747,7 +757,8 @@ done: /* We must always keep the beast fed if we have work piled up */ GEM_BUG_ON(port_isset(execlists->port) && !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER)); - GEM_BUG_ON(execlists->first && !port_isset(execlists->port)); + GEM_BUG_ON(rb_first_cached(&execlists->queue) && + !port_isset(execlists->port)); return submit; } @@ -793,20 +804,44 @@ static void guc_submission_tasklet(unsigned long data) if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) && intel_read_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX) == - GUC_PREEMPT_FINISHED) { - execlists_cancel_port_requests(&engine->execlists); - execlists_unwind_incomplete_requests(execlists); - - wait_for_guc_preempt_report(engine); - - execlists_clear_active(execlists, EXECLISTS_ACTIVE_PREEMPT); - intel_write_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX, 0); - } + GUC_PREEMPT_FINISHED) + complete_preempt_context(engine); if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)) guc_dequeue(engine); } +static struct i915_request * +guc_reset_prepare(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + + GEM_TRACE("%s\n", engine->name); + + /* + * Prevent request submission to the hardware until we have + * completed the reset in i915_gem_reset_finish(). If a request + * is completed by one engine, it may then queue a request + * to a second via its execlists->tasklet *just* as we are + * calling engine->init_hw() and also writing the ELSP. + * Turning off the execlists->tasklet until the reset is over + * prevents the race. + */ + __tasklet_disable_sync_once(&execlists->tasklet); + + /* + * We're using worker to queue preemption requests from the tasklet in + * GuC submission mode. + * Even though tasklet was disabled, we may still have a worker queued. + * Let's make sure that all workers scheduled before disabling the + * tasklet are completed before continuing with the reset. + */ + if (engine->i915->guc.preempt_wq) + flush_workqueue(engine->i915->guc.preempt_wq); + + return i915_gem_find_active_request(engine); +} + /* * Everything below here is concerned with setup & teardown, and is * therefore not part of the somewhat time-critical batch-submission @@ -876,8 +911,12 @@ static void guc_clients_doorbell_fini(struct intel_guc *guc) __update_doorbell_desc(guc->preempt_client, GUC_DOORBELL_INVALID); } - __destroy_doorbell(guc->execbuf_client); - __update_doorbell_desc(guc->execbuf_client, GUC_DOORBELL_INVALID); + + if (guc->execbuf_client) { + __destroy_doorbell(guc->execbuf_client); + __update_doorbell_desc(guc->execbuf_client, + GUC_DOORBELL_INVALID); + } } /** @@ -1090,7 +1129,8 @@ static void guc_clients_destroy(struct intel_guc *guc) guc_client_free(client); client = fetch_and_zero(&guc->execbuf_client); - guc_client_free(client); + if (client) + guc_client_free(client); } /* @@ -1119,7 +1159,7 @@ int intel_guc_submission_init(struct intel_guc *guc) WARN_ON(!guc_verify_doorbells(guc)); ret = guc_clients_create(guc); if (ret) - return ret; + goto err_pool; for_each_engine(engine, dev_priv, id) { guc->preempt_work[id].engine = engine; @@ -1128,6 +1168,9 @@ int intel_guc_submission_init(struct intel_guc *guc) return 0; +err_pool: + guc_stage_desc_pool_destroy(guc); + return ret; } void intel_guc_submission_fini(struct intel_guc *guc) @@ -1142,7 +1185,8 @@ void intel_guc_submission_fini(struct intel_guc *guc) guc_clients_destroy(guc); WARN_ON(!guc_verify_doorbells(guc)); - guc_stage_desc_pool_destroy(guc); + if (guc->stage_desc_pool) + guc_stage_desc_pool_destroy(guc); } static void guc_interrupts_capture(struct drm_i915_private *dev_priv) @@ -1225,6 +1269,31 @@ static void guc_submission_unpark(struct intel_engine_cs *engine) intel_engine_pin_breadcrumbs_irq(engine); } +static void guc_set_default_submission(struct intel_engine_cs *engine) +{ + /* + * We inherit a bunch of functions from execlists that we'd like + * to keep using: + * + * engine->submit_request = execlists_submit_request; + * engine->cancel_requests = execlists_cancel_requests; + * engine->schedule = execlists_schedule; + * + * But we need to override the actual submission backend in order + * to talk to the GuC. + */ + intel_execlists_set_default_submission(engine); + + engine->execlists.tasklet.func = guc_submission_tasklet; + + engine->park = guc_submission_park; + engine->unpark = guc_submission_unpark; + + engine->reset.prepare = guc_reset_prepare; + + engine->flags &= ~I915_ENGINE_SUPPORTS_STATS; +} + int intel_guc_submission_enable(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); @@ -1263,14 +1332,8 @@ int intel_guc_submission_enable(struct intel_guc *guc) guc_interrupts_capture(dev_priv); for_each_engine(engine, dev_priv, id) { - struct intel_engine_execlists * const execlists = - &engine->execlists; - - execlists->tasklet.func = guc_submission_tasklet; - engine->park = guc_submission_park; - engine->unpark = guc_submission_unpark; - - engine->flags &= ~I915_ENGINE_SUPPORTS_STATS; + engine->set_default_submission = guc_set_default_submission; + engine->set_default_submission(engine); } return 0; @@ -1284,9 +1347,6 @@ void intel_guc_submission_disable(struct intel_guc *guc) guc_interrupts_release(dev_priv); guc_clients_doorbell_fini(guc); - - /* Revert back to manual ELSP submission */ - intel_engines_reset_default_submission(dev_priv); } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index a2fe7c8d4477..c22b3e18a0f5 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -47,6 +47,8 @@ static bool is_supported_device(struct drm_i915_private *dev_priv) return true; if (IS_KABYLAKE(dev_priv)) return true; + if (IS_BROXTON(dev_priv)) + return true; return false; } @@ -90,6 +92,9 @@ int intel_gvt_init(struct drm_i915_private *dev_priv) { int ret; + if (i915_inject_load_failure()) + return -ENODEV; + if (!i915_modparams.enable_gvt) { DRM_DEBUG_DRIVER("GVT-g is disabled by kernel params\n"); return 0; diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c index d47e346bd49e..2fc7a0dd0df9 100644 --- a/drivers/gpu/drm/i915/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/intel_hangcheck.c @@ -294,6 +294,7 @@ static void hangcheck_store_sample(struct intel_engine_cs *engine, engine->hangcheck.seqno = hc->seqno; engine->hangcheck.action = hc->action; engine->hangcheck.stalled = hc->stalled; + engine->hangcheck.wedged = hc->wedged; } static enum intel_engine_hangcheck_action @@ -368,6 +369,9 @@ static void hangcheck_accumulate_sample(struct intel_engine_cs *engine, hc->stalled = time_after(jiffies, engine->hangcheck.action_timestamp + timeout); + hc->wedged = time_after(jiffies, + engine->hangcheck.action_timestamp + + I915_ENGINE_WEDGED_TIMEOUT); } static void hangcheck_declare_hang(struct drm_i915_private *i915, @@ -409,7 +413,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work) gpu_error.hangcheck_work.work); struct intel_engine_cs *engine; enum intel_engine_id id; - unsigned int hung = 0, stuck = 0; + unsigned int hung = 0, stuck = 0, wedged = 0; if (!i915_modparams.enable_hangcheck) return; @@ -440,6 +444,17 @@ static void i915_hangcheck_elapsed(struct work_struct *work) if (hc.action != ENGINE_DEAD) stuck |= intel_engine_flag(engine); } + + if (engine->hangcheck.wedged) + wedged |= intel_engine_flag(engine); + } + + if (wedged) { + dev_err(dev_priv->drm.dev, + "GPU recovery timed out," + " cancelling all in-flight rendering.\n"); + GEM_TRACE_DUMP(); + i915_gem_set_wedged(dev_priv); } if (hung) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index d8cb53ef4351..8363fbd18ee8 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -51,7 +51,7 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) { struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi); struct drm_i915_private *dev_priv = to_i915(dev); - uint32_t enabled_bits; + u32 enabled_bits; enabled_bits = HAS_DDI(dev_priv) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE; @@ -59,6 +59,15 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) "HDMI port enabled, expecting disabled\n"); } +static void +assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder) +{ + WARN(I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)) & + TRANS_DDI_FUNC_ENABLE, + "HDMI transcoder function enabled, expecting disabled\n"); +} + struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) { struct intel_digital_port *intel_dig_port = @@ -144,7 +153,7 @@ static void g4x_write_infoframe(struct drm_encoder *encoder, unsigned int type, const void *frame, ssize_t len) { - const uint32_t *data = frame; + const u32 *data = frame; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = to_i915(dev); u32 val = I915_READ(VIDEO_DIP_CTL); @@ -199,7 +208,7 @@ static void ibx_write_infoframe(struct drm_encoder *encoder, unsigned int type, const void *frame, ssize_t len) { - const uint32_t *data = frame; + const u32 *data = frame; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); @@ -259,7 +268,7 @@ static void cpt_write_infoframe(struct drm_encoder *encoder, unsigned int type, const void *frame, ssize_t len) { - const uint32_t *data = frame; + const u32 *data = frame; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); @@ -317,7 +326,7 @@ static void vlv_write_infoframe(struct drm_encoder *encoder, unsigned int type, const void *frame, ssize_t len) { - const uint32_t *data = frame; + const u32 *data = frame; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); @@ -376,19 +385,16 @@ static void hsw_write_infoframe(struct drm_encoder *encoder, unsigned int type, const void *frame, ssize_t len) { - const uint32_t *data = frame; + const u32 *data = frame; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = to_i915(dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); - i915_reg_t data_reg; int data_size = type == DP_SDP_VSC ? VIDEO_DIP_VSC_DATA_SIZE : VIDEO_DIP_DATA_SIZE; int i; u32 val = I915_READ(ctl_reg); - data_reg = hsw_dip_data_reg(dev_priv, cpu_transcoder, type, 0); - val &= ~hsw_infoframe_enable(type); I915_WRITE(ctl_reg, val); @@ -442,7 +448,7 @@ static void intel_write_infoframe(struct drm_encoder *encoder, union hdmi_infoframe *frame) { struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); - uint8_t buffer[VIDEO_DIP_DATA_SIZE]; + u8 buffer[VIDEO_DIP_DATA_SIZE]; ssize_t len; /* see comment above for the reason for this offset */ @@ -461,7 +467,8 @@ static void intel_write_infoframe(struct drm_encoder *encoder, } static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, - const struct intel_crtc_state *crtc_state) + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); const struct drm_display_mode *adjusted_mode = @@ -491,6 +498,9 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, intel_hdmi->rgb_quant_range_selectable, is_hdmi2_sink); + drm_hdmi_avi_infoframe_content_type(&frame.avi, + conn_state); + /* TODO: handle pixel repetition for YCBCR420 outputs */ intel_write_infoframe(encoder, crtc_state, &frame); } @@ -586,7 +596,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder, I915_WRITE(reg, val); POSTING_READ(reg); - intel_hdmi_set_avi_infoframe(encoder, crtc_state); + intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state); intel_hdmi_set_spd_infoframe(encoder, crtc_state); intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); } @@ -727,7 +737,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder, I915_WRITE(reg, val); POSTING_READ(reg); - intel_hdmi_set_avi_infoframe(encoder, crtc_state); + intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state); intel_hdmi_set_spd_infoframe(encoder, crtc_state); intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); } @@ -770,7 +780,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder, I915_WRITE(reg, val); POSTING_READ(reg); - intel_hdmi_set_avi_infoframe(encoder, crtc_state); + intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state); intel_hdmi_set_spd_infoframe(encoder, crtc_state); intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); } @@ -823,7 +833,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder, I915_WRITE(reg, val); POSTING_READ(reg); - intel_hdmi_set_avi_infoframe(encoder, crtc_state); + intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state); intel_hdmi_set_spd_infoframe(encoder, crtc_state); intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); } @@ -834,11 +844,11 @@ static void hsw_set_infoframes(struct drm_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->dev); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); u32 val = I915_READ(reg); - assert_hdmi_port_disabled(intel_hdmi); + assert_hdmi_transcoder_func_disabled(dev_priv, + crtc_state->cpu_transcoder); val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW | @@ -856,7 +866,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder, I915_WRITE(reg, val); POSTING_READ(reg); - intel_hdmi_set_avi_infoframe(encoder, crtc_state); + intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state); intel_hdmi_set_spd_infoframe(encoder, crtc_state); intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); } @@ -1161,33 +1171,16 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder, static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); - u32 tmp; bool ret; if (!intel_display_power_get_if_enabled(dev_priv, encoder->power_domain)) return false; - ret = false; + ret = intel_sdvo_port_enabled(dev_priv, intel_hdmi->hdmi_reg, pipe); - tmp = I915_READ(intel_hdmi->hdmi_reg); - - if (!(tmp & SDVO_ENABLE)) - goto out; - - if (HAS_PCH_CPT(dev_priv)) - *pipe = PORT_TO_PIPE_CPT(tmp); - else if (IS_CHERRYVIEW(dev_priv)) - *pipe = SDVO_PORT_TO_PIPE_CHV(tmp); - else - *pipe = PORT_TO_PIPE(tmp); - - ret = true; - -out: intel_display_power_put(dev_priv, encoder->power_domain); return ret; @@ -1421,8 +1414,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder, intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); - temp &= ~SDVO_PIPE_B_SELECT; - temp |= SDVO_ENABLE; + temp &= ~SDVO_PIPE_SEL_MASK; + temp |= SDVO_ENABLE | SDVO_PIPE_SEL(PIPE_A); /* * HW workaround, need to write this twice for issue * that may result in first write getting masked. @@ -1577,14 +1570,23 @@ intel_hdmi_mode_valid(struct drm_connector *connector, /* check if we can do 8bpc */ status = hdmi_port_clock_valid(hdmi, clock, true, force_dvi); - /* if we can't do 8bpc we may still be able to do 12bpc */ - if (!HAS_GMCH_DISPLAY(dev_priv) && status != MODE_OK && hdmi->has_hdmi_sink && !force_dvi) - status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, true, force_dvi); + if (hdmi->has_hdmi_sink && !force_dvi) { + /* if we can't do 8bpc we may still be able to do 12bpc */ + if (status != MODE_OK && !HAS_GMCH_DISPLAY(dev_priv)) + status = hdmi_port_clock_valid(hdmi, clock * 3 / 2, + true, force_dvi); + + /* if we can't do 8,12bpc we may still be able to do 10bpc */ + if (status != MODE_OK && INTEL_GEN(dev_priv) >= 11) + status = hdmi_port_clock_valid(hdmi, clock * 5 / 4, + true, force_dvi); + } return status; } -static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state) +static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, + int bpc) { struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); @@ -1596,6 +1598,9 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state) if (HAS_GMCH_DISPLAY(dev_priv)) return false; + if (bpc == 10 && INTEL_GEN(dev_priv) < 11) + return false; + if (crtc_state->pipe_bpp <= 8*3) return false; @@ -1603,7 +1608,7 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state) return false; /* - * HDMI 12bpc affects the clocks, so it's only possible + * HDMI deep color affects the clocks, so it's only possible * when not cloning with other encoder types. */ if (crtc_state->output_types != 1 << INTEL_OUTPUT_HDMI) @@ -1618,16 +1623,24 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state) if (crtc_state->ycbcr420) { const struct drm_hdmi_info *hdmi = &info->hdmi; - if (!(hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_36)) + if (bpc == 12 && !(hdmi->y420_dc_modes & + DRM_EDID_YCBCR420_DC_36)) + return false; + else if (bpc == 10 && !(hdmi->y420_dc_modes & + DRM_EDID_YCBCR420_DC_30)) return false; } else { - if (!(info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36)) + if (bpc == 12 && !(info->edid_hdmi_dc_modes & + DRM_EDID_HDMI_DC_36)) + return false; + else if (bpc == 10 && !(info->edid_hdmi_dc_modes & + DRM_EDID_HDMI_DC_30)) return false; } } /* Display WA #1139: glk */ - if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) && + if (bpc == 12 && IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) && crtc_state->base.adjusted_mode.htotal > 5460) return false; @@ -1637,7 +1650,8 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state) static bool intel_hdmi_ycbcr420_config(struct drm_connector *connector, struct intel_crtc_state *config, - int *clock_12bpc, int *clock_8bpc) + int *clock_12bpc, int *clock_10bpc, + int *clock_8bpc) { struct intel_crtc *intel_crtc = to_intel_crtc(config->base.crtc); @@ -1649,6 +1663,7 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector, /* YCBCR420 TMDS rate requirement is half the pixel clock */ config->port_clock /= 2; *clock_12bpc /= 2; + *clock_10bpc /= 2; *clock_8bpc /= 2; config->ycbcr420 = true; @@ -1676,6 +1691,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(conn_state); int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock; + int clock_10bpc = clock_8bpc * 5 / 4; int clock_12bpc = clock_8bpc * 3 / 2; int desired_bpp; bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI; @@ -1702,12 +1718,14 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) { pipe_config->pixel_multiplier = 2; clock_8bpc *= 2; + clock_10bpc *= 2; clock_12bpc *= 2; } if (drm_mode_is_420_only(&connector->display_info, adjusted_mode)) { if (!intel_hdmi_ycbcr420_config(connector, pipe_config, - &clock_12bpc, &clock_8bpc)) { + &clock_12bpc, &clock_10bpc, + &clock_8bpc)) { DRM_ERROR("Can't support YCBCR420 output\n"); return false; } @@ -1725,18 +1743,25 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, } /* - * HDMI is either 12 or 8, so if the display lets 10bpc sneak - * through, clamp it down. Note that g4x/vlv don't support 12bpc hdmi - * outputs. We also need to check that the higher clock still fits - * within limits. + * Note that g4x/vlv don't support 12bpc hdmi outputs. We also need + * to check that the higher clock still fits within limits. */ - if (hdmi_12bpc_possible(pipe_config) && - hdmi_port_clock_valid(intel_hdmi, clock_12bpc, true, force_dvi) == MODE_OK) { + if (hdmi_deep_color_possible(pipe_config, 12) && + hdmi_port_clock_valid(intel_hdmi, clock_12bpc, + true, force_dvi) == MODE_OK) { DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); desired_bpp = 12*3; /* Need to adjust the port link by 1.5x for 12bpc. */ pipe_config->port_clock = clock_12bpc; + } else if (hdmi_deep_color_possible(pipe_config, 10) && + hdmi_port_clock_valid(intel_hdmi, clock_10bpc, + true, force_dvi) == MODE_OK) { + DRM_DEBUG_KMS("picking bpc to 10 for HDMI output\n"); + desired_bpp = 10 * 3; + + /* Need to adjust the port link by 1.25x for 10bpc. */ + pipe_config->port_clock = clock_10bpc; } else { DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n"); desired_bpp = 8*3; @@ -2071,6 +2096,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c intel_attach_force_audio_property(connector); intel_attach_broadcast_rgb_property(connector); intel_attach_aspect_ratio_property(connector); + drm_connector_attach_content_type_property(connector); connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE; } @@ -2257,7 +2283,7 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv, ddc_pin = bxt_port_to_ddc_pin(dev_priv, port); else if (HAS_PCH_CNP(dev_priv)) ddc_pin = cnp_port_to_ddc_pin(dev_priv, port); - else if (IS_ICELAKE(dev_priv)) + else if (HAS_PCH_ICP(dev_priv)) ddc_pin = icl_port_to_ddc_pin(dev_priv, port); else ddc_pin = g4x_port_to_ddc_pin(dev_priv, port); @@ -2352,7 +2378,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, * 0xd. Failure to do so will result in spurious interrupts being * generated on the port when a cable is not attached. */ - if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) { + if (IS_G45(dev_priv)) { u32 temp = I915_READ(PEG_BAND_GAP_DATA); I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); } diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index 43aa92beff2a..648a13c6043c 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -77,37 +77,6 @@ */ /** - * intel_hpd_port - return port hard associated with certain pin. - * @dev_priv: private driver data pointer - * @pin: the hpd pin to get associated port - * - * Return port that is associatade with @pin and PORT_NONE if no port is - * hard associated with that @pin. - */ -enum port intel_hpd_pin_to_port(struct drm_i915_private *dev_priv, - enum hpd_pin pin) -{ - switch (pin) { - case HPD_PORT_A: - return PORT_A; - case HPD_PORT_B: - return PORT_B; - case HPD_PORT_C: - return PORT_C; - case HPD_PORT_D: - return PORT_D; - case HPD_PORT_E: - if (IS_CNL_WITH_PORT_F(dev_priv)) - return PORT_F; - return PORT_E; - case HPD_PORT_F: - return PORT_F; - default: - return PORT_NONE; /* no port for this pin */ - } -} - -/** * intel_hpd_pin_default - return default pin associated with certain port. * @dev_priv: private driver data pointer * @port: the hpd port to get associated pin @@ -241,25 +210,25 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) container_of(work, typeof(*dev_priv), hotplug.reenable_work.work); struct drm_device *dev = &dev_priv->drm; - int i; + enum hpd_pin pin; intel_runtime_pm_get(dev_priv); spin_lock_irq(&dev_priv->irq_lock); - for_each_hpd_pin(i) { + for_each_hpd_pin(pin) { struct drm_connector *connector; struct drm_connector_list_iter conn_iter; - if (dev_priv->hotplug.stats[i].state != HPD_DISABLED) + if (dev_priv->hotplug.stats[pin].state != HPD_DISABLED) continue; - dev_priv->hotplug.stats[i].state = HPD_ENABLED; + dev_priv->hotplug.stats[pin].state = HPD_ENABLED; drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { struct intel_connector *intel_connector = to_intel_connector(connector); - if (intel_connector->encoder->hpd_pin == i) { + if (intel_connector->encoder->hpd_pin == pin) { if (connector->polled != intel_connector->polled) DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", connector->name); @@ -301,13 +270,18 @@ bool intel_encoder_hotplug(struct intel_encoder *encoder, return true; } +static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder) +{ + return intel_encoder_is_dig_port(encoder) && + enc_to_dig_port(&encoder->base)->hpd_pulse != NULL; +} + static void i915_digport_work_func(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, struct drm_i915_private, hotplug.dig_port_work); u32 long_port_mask, short_port_mask; - struct intel_digital_port *intel_dig_port; - int i; + struct intel_encoder *encoder; u32 old_bits = 0; spin_lock_irq(&dev_priv->irq_lock); @@ -317,27 +291,27 @@ static void i915_digport_work_func(struct work_struct *work) dev_priv->hotplug.short_port_mask = 0; spin_unlock_irq(&dev_priv->irq_lock); - for (i = 0; i < I915_MAX_PORTS; i++) { - bool valid = false; - bool long_hpd = false; - intel_dig_port = dev_priv->hotplug.irq_port[i]; - if (!intel_dig_port || !intel_dig_port->hpd_pulse) + for_each_intel_encoder(&dev_priv->drm, encoder) { + struct intel_digital_port *dig_port; + enum port port = encoder->port; + bool long_hpd, short_hpd; + enum irqreturn ret; + + if (!intel_encoder_has_hpd_pulse(encoder)) continue; - if (long_port_mask & (1 << i)) { - valid = true; - long_hpd = true; - } else if (short_port_mask & (1 << i)) - valid = true; + long_hpd = long_port_mask & BIT(port); + short_hpd = short_port_mask & BIT(port); - if (valid) { - enum irqreturn ret; + if (!long_hpd && !short_hpd) + continue; - ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd); - if (ret == IRQ_NONE) { - /* fall back to old school hpd */ - old_bits |= (1 << intel_dig_port->base.hpd_pin); - } + dig_port = enc_to_dig_port(&encoder->base); + + ret = dig_port->hpd_pulse(dig_port, long_hpd); + if (ret == IRQ_NONE) { + /* fall back to old school hpd */ + old_bits |= BIT(encoder->hpd_pin); } } @@ -418,26 +392,24 @@ static void i915_hotplug_work_func(struct work_struct *work) void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 pin_mask, u32 long_mask) { - int i; - enum port port; + struct intel_encoder *encoder; bool storm_detected = false; bool queue_dig = false, queue_hp = false; - bool is_dig_port; if (!pin_mask) return; spin_lock(&dev_priv->irq_lock); - for_each_hpd_pin(i) { - if (!(BIT(i) & pin_mask)) - continue; + for_each_intel_encoder(&dev_priv->drm, encoder) { + enum hpd_pin pin = encoder->hpd_pin; + bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder); - port = intel_hpd_pin_to_port(dev_priv, i); - is_dig_port = port != PORT_NONE && - dev_priv->hotplug.irq_port[port]; + if (!(BIT(pin) & pin_mask)) + continue; - if (is_dig_port) { - bool long_hpd = long_mask & BIT(i); + if (has_hpd_pulse) { + bool long_hpd = long_mask & BIT(pin); + enum port port = encoder->port; DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port), long_hpd ? "long" : "short"); @@ -455,7 +427,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, } } - if (dev_priv->hotplug.stats[i].state == HPD_DISABLED) { + if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) { /* * On GMCH platforms the interrupt mask bits only * prevent irq generation, not the setting of the @@ -463,20 +435,20 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, * interrupts on saner platforms. */ WARN_ONCE(!HAS_GMCH_DISPLAY(dev_priv), - "Received HPD interrupt on pin %d although disabled\n", i); + "Received HPD interrupt on pin %d although disabled\n", pin); continue; } - if (dev_priv->hotplug.stats[i].state != HPD_ENABLED) + if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED) continue; - if (!is_dig_port) { - dev_priv->hotplug.event_bits |= BIT(i); + if (!has_hpd_pulse) { + dev_priv->hotplug.event_bits |= BIT(pin); queue_hp = true; } - if (intel_hpd_irq_storm_detect(dev_priv, i)) { - dev_priv->hotplug.event_bits &= ~BIT(i); + if (intel_hpd_irq_storm_detect(dev_priv, pin)) { + dev_priv->hotplug.event_bits &= ~BIT(pin); storm_detected = true; } } diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c index 291285277403..ffcad5fad6a7 100644 --- a/drivers/gpu/drm/i915/intel_huc.c +++ b/drivers/gpu/drm/i915/intel_huc.c @@ -32,6 +32,14 @@ void intel_huc_init_early(struct intel_huc *huc) intel_huc_fw_init_early(huc); } +int intel_huc_init_misc(struct intel_huc *huc) +{ + struct drm_i915_private *i915 = huc_to_i915(huc); + + intel_uc_fw_fetch(i915, &huc->fw); + return 0; +} + /** * intel_huc_auth() - Authenticate HuC uCode * @huc: intel_huc structure diff --git a/drivers/gpu/drm/i915/intel_huc.h b/drivers/gpu/drm/i915/intel_huc.h index aa854907abac..7e41d870b509 100644 --- a/drivers/gpu/drm/i915/intel_huc.h +++ b/drivers/gpu/drm/i915/intel_huc.h @@ -36,9 +36,15 @@ struct intel_huc { }; void intel_huc_init_early(struct intel_huc *huc); +int intel_huc_init_misc(struct intel_huc *huc); int intel_huc_auth(struct intel_huc *huc); int intel_huc_check_status(struct intel_huc *huc); +static inline void intel_huc_fini_misc(struct intel_huc *huc) +{ + intel_uc_fw_fini(&huc->fw); +} + static inline int intel_huc_sanitize(struct intel_huc *huc) { intel_uc_fw_sanitize(&huc->fw); diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index e6875509bcd9..bef32b7c248e 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -77,12 +77,12 @@ static const struct gmbus_pin gmbus_pins_cnp[] = { }; static const struct gmbus_pin gmbus_pins_icp[] = { - [GMBUS_PIN_1_BXT] = { "dpa", GPIOA }, - [GMBUS_PIN_2_BXT] = { "dpb", GPIOB }, - [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOC }, - [GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOD }, - [GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOE }, - [GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOF }, + [GMBUS_PIN_1_BXT] = { "dpa", GPIOB }, + [GMBUS_PIN_2_BXT] = { "dpb", GPIOC }, + [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ }, + [GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOK }, + [GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOL }, + [GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOM }, }; /* pin is expected to be valid */ @@ -361,15 +361,39 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv) return ret; } +static inline +unsigned int gmbus_max_xfer_size(struct drm_i915_private *dev_priv) +{ + return INTEL_GEN(dev_priv) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX : + GMBUS_BYTE_COUNT_MAX; +} + static int gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv, unsigned short addr, u8 *buf, unsigned int len, - u32 gmbus1_index) + u32 gmbus0_reg, u32 gmbus1_index) { + unsigned int size = len; + bool burst_read = len > gmbus_max_xfer_size(dev_priv); + bool extra_byte_added = false; + + if (burst_read) { + /* + * As per HW Spec, for 512Bytes need to read extra Byte and + * Ignore the extra byte read. + */ + if (len == 512) { + extra_byte_added = true; + len++; + } + size = len % 256 + 256; + I915_WRITE_FW(GMBUS0, gmbus0_reg | GMBUS_BYTE_CNT_OVERRIDE); + } + I915_WRITE_FW(GMBUS1, gmbus1_index | GMBUS_CYCLE_WAIT | - (len << GMBUS_BYTE_COUNT_SHIFT) | + (size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_READ | GMBUS_SW_RDY); while (len) { @@ -382,17 +406,34 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv, val = I915_READ_FW(GMBUS3); do { + if (extra_byte_added && len == 1) + break; + *buf++ = val & 0xff; val >>= 8; } while (--len && ++loop < 4); + + if (burst_read && len == size - 4) + /* Reset the override bit */ + I915_WRITE_FW(GMBUS0, gmbus0_reg); } return 0; } +/* + * HW spec says that 512Bytes in Burst read need special treatment. + * But it doesn't talk about other multiple of 256Bytes. And couldn't locate + * an I2C slave, which supports such a lengthy burst read too for experiments. + * + * So until things get clarified on HW support, to avoid the burst read length + * in fold of 256Bytes except 512, max burst read length is fixed at 767Bytes. + */ +#define INTEL_GMBUS_BURST_READ_MAX_LEN 767U + static int gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, - u32 gmbus1_index) + u32 gmbus0_reg, u32 gmbus1_index) { u8 *buf = msg->buf; unsigned int rx_size = msg->len; @@ -400,10 +441,13 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, int ret; do { - len = min(rx_size, GMBUS_BYTE_COUNT_MAX); + if (HAS_GMBUS_BURST_READ(dev_priv)) + len = min(rx_size, INTEL_GMBUS_BURST_READ_MAX_LEN); + else + len = min(rx_size, gmbus_max_xfer_size(dev_priv)); - ret = gmbus_xfer_read_chunk(dev_priv, msg->addr, - buf, len, gmbus1_index); + ret = gmbus_xfer_read_chunk(dev_priv, msg->addr, buf, len, + gmbus0_reg, gmbus1_index); if (ret) return ret; @@ -462,7 +506,7 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg, int ret; do { - len = min(tx_size, GMBUS_BYTE_COUNT_MAX); + len = min(tx_size, gmbus_max_xfer_size(dev_priv)); ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len, gmbus1_index); @@ -491,7 +535,8 @@ gmbus_is_index_xfer(struct i2c_msg *msgs, int i, int num) } static int -gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs) +gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs, + u32 gmbus0_reg) { u32 gmbus1_index = 0; u32 gmbus5 = 0; @@ -509,7 +554,8 @@ gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs) I915_WRITE_FW(GMBUS5, gmbus5); if (msgs[1].flags & I2C_M_RD) - ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index); + ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus0_reg, + gmbus1_index); else ret = gmbus_xfer_write(dev_priv, &msgs[1], gmbus1_index); @@ -544,10 +590,12 @@ retry: for (; i < num; i += inc) { inc = 1; if (gmbus_is_index_xfer(msgs, i, num)) { - ret = gmbus_index_xfer(dev_priv, &msgs[i]); + ret = gmbus_index_xfer(dev_priv, &msgs[i], + gmbus0_source | bus->reg0); inc = 2; /* an index transmission is two msgs */ } else if (msgs[i].flags & I2C_M_RD) { - ret = gmbus_xfer_read(dev_priv, &msgs[i], 0); + ret = gmbus_xfer_read(dev_priv, &msgs[i], + gmbus0_source | bus->reg0, 0); } else { ret = gmbus_xfer_write(dev_priv, &msgs[i], 0); } @@ -771,7 +819,7 @@ int intel_setup_gmbus(struct drm_i915_private *dev_priv) unsigned int pin; int ret; - if (HAS_PCH_NOP(dev_priv)) + if (INTEL_INFO(dev_priv)->num_pipes == 0) return 0; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c index 6269750e2b54..cdf19553ffac 100644 --- a/drivers/gpu/drm/i915/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/intel_lpe_audio.c @@ -62,6 +62,7 @@ #include <linux/acpi.h> #include <linux/device.h> +#include <linux/irq.h> #include <linux/pci.h> #include <linux/pm_runtime.h> @@ -126,9 +127,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv) return platdev; } - pm_runtime_forbid(&platdev->dev); - pm_runtime_set_active(&platdev->dev); - pm_runtime_enable(&platdev->dev); + pm_runtime_no_callbacks(&platdev->dev); return platdev; } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 7c4c8fb1dae4..174479232e94 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -137,6 +137,7 @@ #include <drm/i915_drm.h> #include "i915_drv.h" #include "i915_gem_render_state.h" +#include "i915_vgpu.h" #include "intel_lrc_reg.h" #include "intel_mocs.h" #include "intel_workarounds.h" @@ -164,7 +165,8 @@ #define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS) static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, - struct intel_engine_cs *engine); + struct intel_engine_cs *engine, + struct intel_context *ce); static void execlists_init_reg_state(u32 *reg_state, struct i915_gem_context *ctx, struct intel_engine_cs *engine, @@ -189,12 +191,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine, !i915_request_completed(last)); } -/** - * intel_lr_context_descriptor_update() - calculate & cache the descriptor - * descriptor for a pinned context - * @ctx: Context to work on - * @engine: Engine the descriptor will be used with - * +/* * The context descriptor encodes various attributes of a context, * including its GTT address and some flags. Because it's fairly * expensive to calculate, we'll just do it once and cache the result, @@ -204,7 +201,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine, * * bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template) * bits 12-31: LRCA, GTT address of (the HWSP of) this context - * bits 32-52: ctx ID, a globally unique tag + * bits 32-52: ctx ID, a globally unique tag (highest bit used by GuC) * bits 53-54: mbz, reserved for use by hardware * bits 55-63: group ID, currently unused and set to 0 * @@ -222,9 +219,9 @@ static inline bool need_preempt(const struct intel_engine_cs *engine, */ static void intel_lr_context_descriptor_update(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) + struct intel_engine_cs *engine, + struct intel_context *ce) { - struct intel_context *ce = to_intel_context(ctx, engine); u64 desc; BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH))); @@ -237,6 +234,11 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx, /* bits 12-31 */ GEM_BUG_ON(desc & GENMASK_ULL(63, 32)); + /* + * The following 32bits are copied into the OA reports (dword 2). + * Consider updating oa_get_render_ctx_id in i915_perf.c when changing + * anything below. + */ if (INTEL_GEN(ctx->i915) >= 11) { GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH)); desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT; @@ -271,7 +273,7 @@ lookup_priolist(struct intel_engine_cs *engine, int prio) find_priolist: /* most positive priority is scheduled first, equal priorities fifo */ rb = NULL; - parent = &execlists->queue.rb_node; + parent = &execlists->queue.rb_root.rb_node; while (*parent) { rb = *parent; p = to_priolist(rb); @@ -309,10 +311,7 @@ find_priolist: p->priority = prio; INIT_LIST_HEAD(&p->requests); rb_link_node(&p->node, rb, parent); - rb_insert_color(&p->node, &execlists->queue); - - if (first) - execlists->first = &p->node; + rb_insert_color_cached(&p->node, &execlists->queue, first); return p; } @@ -418,9 +417,9 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state) static u64 execlists_update_context(struct i915_request *rq) { - struct intel_context *ce = to_intel_context(rq->ctx, rq->engine); + struct intel_context *ce = rq->hw_context; struct i915_hw_ppgtt *ppgtt = - rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; + rq->gem_context->ppgtt ?: rq->i915->mm.aliasing_ppgtt; u32 *reg_state = ce->lrc_reg_state; reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail); @@ -430,7 +429,7 @@ static u64 execlists_update_context(struct i915_request *rq) * PML4 is allocated during ppgtt init, so this is not needed * in 48-bit mode. */ - if (ppgtt && !i915_vm_is_48bit(&ppgtt->base)) + if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm)) execlists_update_context_pdps(ppgtt, reg_state); return ce->lrc_desc; @@ -454,6 +453,16 @@ static void execlists_submit_ports(struct intel_engine_cs *engine) unsigned int n; /* + * We can skip acquiring intel_runtime_pm_get() here as it was taken + * on our behalf by the request (see i915_gem_mark_busy()) and it will + * not be relinquished until the device is idle (see + * i915_gem_idle_work_handler()). As a precaution, we make sure + * that all ELSP are drained i.e. we have processed the CSB, + * before allowing ourselves to idle and calling intel_runtime_pm_put(). + */ + GEM_BUG_ON(!engine->i915->gt.awake); + + /* * ELSQ note: the submit queue is not cleared after being submitted * to the HW so we need to make sure we always clean it up. This is * currently ensured by the fact that we always write the same number @@ -495,14 +504,14 @@ static void execlists_submit_ports(struct intel_engine_cs *engine) execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK); } -static bool ctx_single_port_submission(const struct i915_gem_context *ctx) +static bool ctx_single_port_submission(const struct intel_context *ce) { return (IS_ENABLED(CONFIG_DRM_I915_GVT) && - i915_gem_context_force_single_submission(ctx)); + i915_gem_context_force_single_submission(ce->gem_context)); } -static bool can_merge_ctx(const struct i915_gem_context *prev, - const struct i915_gem_context *next) +static bool can_merge_ctx(const struct intel_context *prev, + const struct intel_context *next) { if (prev != next) return false; @@ -552,11 +561,24 @@ static void inject_preempt_context(struct intel_engine_cs *engine) if (execlists->ctrl_reg) writel(EL_CTRL_LOAD, execlists->ctrl_reg); - execlists_clear_active(&engine->execlists, EXECLISTS_ACTIVE_HWACK); - execlists_set_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT); + execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK); + execlists_set_active(execlists, EXECLISTS_ACTIVE_PREEMPT); +} + +static void complete_preempt_context(struct intel_engine_execlists *execlists) +{ + GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)); + + if (inject_preempt_hang(execlists)) + return; + + execlists_cancel_port_requests(execlists); + __unwind_incomplete_requests(container_of(execlists, + struct intel_engine_cs, + execlists)); } -static bool __execlists_dequeue(struct intel_engine_cs *engine) +static void execlists_dequeue(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; struct execlist_port *port = execlists->port; @@ -566,9 +588,8 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine) struct rb_node *rb; bool submit = false; - lockdep_assert_held(&engine->timeline.lock); - - /* Hardware submission is through 2 ports. Conceptually each port + /* + * Hardware submission is through 2 ports. Conceptually each port * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is * static for a context, and unique to each, so we only execute * requests belonging to a single context from each ring. RING_HEAD @@ -589,9 +610,6 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine) * and context switches) submission. */ - rb = execlists->first; - GEM_BUG_ON(rb_first(&execlists->queue) != rb); - if (last) { /* * Don't resubmit or switch until all outstanding @@ -602,8 +620,6 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine) GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER)); GEM_BUG_ON(!port_count(&port[0])); - if (port_count(&port[0]) > 1) - return false; /* * If we write to ELSP a second time before the HW has had @@ -613,11 +629,11 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine) * the HW to indicate that it has had a chance to respond. */ if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK)) - return false; + return; if (need_preempt(engine, last, execlists->queue_priority)) { inject_preempt_context(engine); - return false; + return; } /* @@ -642,7 +658,7 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine) * priorities of the ports haven't been switch. */ if (port_count(&port[1])) - return false; + return; /* * WaIdleLiteRestore:bdw,skl @@ -655,7 +671,7 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine) last->tail = last->wa_tail; } - while (rb) { + while ((rb = rb_first_cached(&execlists->queue))) { struct i915_priolist *p = to_priolist(rb); struct i915_request *rq, *rn; @@ -671,7 +687,8 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine) * second request, and so we never need to tell the * hardware about the first. */ - if (last && !can_merge_ctx(rq->ctx, last->ctx)) { + if (last && + !can_merge_ctx(rq->hw_context, last->hw_context)) { /* * If we are on the second port and cannot * combine this request with the last, then we @@ -690,14 +707,14 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine) * the same context (even though a different * request) to the second port. */ - if (ctx_single_port_submission(last->ctx) || - ctx_single_port_submission(rq->ctx)) { + if (ctx_single_port_submission(last->hw_context) || + ctx_single_port_submission(rq->hw_context)) { __list_del_many(&p->requests, &rq->sched.link); goto done; } - GEM_BUG_ON(last->ctx == rq->ctx); + GEM_BUG_ON(last->hw_context == rq->hw_context); if (submit) port_assign(port, last); @@ -713,8 +730,7 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine) submit = true; } - rb = rb_next(rb); - rb_erase(&p->node, &execlists->queue); + rb_erase_cached(&p->node, &execlists->queue); INIT_LIST_HEAD(&p->requests); if (p->priority != I915_PRIORITY_NORMAL) kmem_cache_free(engine->i915->priorities, p); @@ -740,35 +756,23 @@ done: execlists->queue_priority = port != execlists->port ? rq_prio(last) : INT_MIN; - execlists->first = rb; - if (submit) + if (submit) { port_assign(port, last); + execlists_submit_ports(engine); + } /* We must always keep the beast fed if we have work piled up */ - GEM_BUG_ON(execlists->first && !port_isset(execlists->port)); + GEM_BUG_ON(rb_first_cached(&execlists->queue) && + !port_isset(execlists->port)); /* Re-evaluate the executing context setup after each preemptive kick */ if (last) execlists_user_begin(execlists, execlists->port); - return submit; -} - -static void execlists_dequeue(struct intel_engine_cs *engine) -{ - struct intel_engine_execlists * const execlists = &engine->execlists; - unsigned long flags; - bool submit; - - spin_lock_irqsave(&engine->timeline.lock, flags); - submit = __execlists_dequeue(engine); - spin_unlock_irqrestore(&engine->timeline.lock, flags); - - if (submit) - execlists_submit_ports(engine); - - GEM_BUG_ON(port_isset(execlists->port) && - !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER)); + /* If the engine is now idle, so should be the flag; and vice versa. */ + GEM_BUG_ON(execlists_is_active(&engine->execlists, + EXECLISTS_ACTIVE_USER) == + !port_isset(engine->execlists.port)); } void @@ -799,82 +803,27 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists) port++; } - execlists_user_end(execlists); + execlists_clear_all_active(execlists); } -static void clear_gtiir(struct intel_engine_cs *engine) +static void reset_csb_pointers(struct intel_engine_execlists *execlists) { - struct drm_i915_private *dev_priv = engine->i915; - int i; - /* - * Clear any pending interrupt state. - * - * We do it twice out of paranoia that some of the IIR are - * double buffered, and so if we only reset it once there may - * still be an interrupt pending. + * After a reset, the HW starts writing into CSB entry [0]. We + * therefore have to set our HEAD pointer back one entry so that + * the *first* entry we check is entry 0. To complicate this further, + * as we don't wait for the first interrupt after reset, we have to + * fake the HW write to point back to the last entry so that our + * inline comparison of our cached head position against the last HW + * write works even before the first interrupt. */ - if (INTEL_GEN(dev_priv) >= 11) { - static const struct { - u8 bank; - u8 bit; - } gen11_gtiir[] = { - [RCS] = {0, GEN11_RCS0}, - [BCS] = {0, GEN11_BCS}, - [_VCS(0)] = {1, GEN11_VCS(0)}, - [_VCS(1)] = {1, GEN11_VCS(1)}, - [_VCS(2)] = {1, GEN11_VCS(2)}, - [_VCS(3)] = {1, GEN11_VCS(3)}, - [_VECS(0)] = {1, GEN11_VECS(0)}, - [_VECS(1)] = {1, GEN11_VECS(1)}, - }; - unsigned long irqflags; - - GEM_BUG_ON(engine->id >= ARRAY_SIZE(gen11_gtiir)); - - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - for (i = 0; i < 2; i++) { - gen11_reset_one_iir(dev_priv, - gen11_gtiir[engine->id].bank, - gen11_gtiir[engine->id].bit); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); - } else { - static const u8 gtiir[] = { - [RCS] = 0, - [BCS] = 0, - [VCS] = 1, - [VCS2] = 1, - [VECS] = 3, - }; - - GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir)); - - for (i = 0; i < 2; i++) { - I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]), - engine->irq_keep_mask); - POSTING_READ(GEN8_GT_IIR(gtiir[engine->id])); - } - GEM_BUG_ON(I915_READ(GEN8_GT_IIR(gtiir[engine->id])) & - engine->irq_keep_mask); - } + execlists->csb_head = execlists->csb_write_reset; + WRITE_ONCE(*execlists->csb_write, execlists->csb_write_reset); } -static void reset_irq(struct intel_engine_cs *engine) +static void nop_submission_tasklet(unsigned long data) { - /* Mark all CS interrupts as complete */ - smp_store_mb(engine->execlists.active, 0); - synchronize_hardirq(engine->i915->drm.irq); - - clear_gtiir(engine); - - /* - * The port is checked prior to scheduling a tasklet, but - * just in case we have suspended the tasklet to do the - * wedging make sure that when it wakes, it decides there - * is no work to do by clearing the irq_posted bit. - */ - clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); + /* The driver is wedged; don't process any more events. */ } static void execlists_cancel_requests(struct intel_engine_cs *engine) @@ -901,13 +850,11 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) * submission's irq state, we also wish to remind ourselves that * it is irq state.) */ - local_irq_save(flags); + spin_lock_irqsave(&engine->timeline.lock, flags); /* Cancel the requests on the HW and clear the ELSP tracker. */ execlists_cancel_port_requests(execlists); - reset_irq(engine); - - spin_lock(&engine->timeline.lock); + execlists_user_end(execlists); /* Mark all executing requests as skipped. */ list_for_each_entry(rq, &engine->timeline.requests, link) { @@ -917,8 +864,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) } /* Flush the queued requests to the timeline list (for retiring). */ - rb = execlists->first; - while (rb) { + while ((rb = rb_first_cached(&execlists->queue))) { struct i915_priolist *p = to_priolist(rb); list_for_each_entry_safe(rq, rn, &p->requests, sched.link) { @@ -928,8 +874,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) __i915_request_submit(rq); } - rb = rb_next(rb); - rb_erase(&p->node, &execlists->queue); + rb_erase_cached(&p->node, &execlists->queue); INIT_LIST_HEAD(&p->requests); if (p->priority != I915_PRIORITY_NORMAL) kmem_cache_free(engine->i915->priorities, p); @@ -938,221 +883,198 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) /* Remaining _unready_ requests will be nop'ed when submitted */ execlists->queue_priority = INT_MIN; - execlists->queue = RB_ROOT; - execlists->first = NULL; + execlists->queue = RB_ROOT_CACHED; GEM_BUG_ON(port_isset(execlists->port)); - spin_unlock(&engine->timeline.lock); + GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet)); + execlists->tasklet.func = nop_submission_tasklet; - local_irq_restore(flags); + spin_unlock_irqrestore(&engine->timeline.lock, flags); } -/* - * Check the unread Context Status Buffers and manage the submission of new - * contexts to the ELSP accordingly. - */ -static void execlists_submission_tasklet(unsigned long data) +static inline bool +reset_in_progress(const struct intel_engine_execlists *execlists) +{ + return unlikely(!__tasklet_is_enabled(&execlists->tasklet)); +} + +static void process_csb(struct intel_engine_cs *engine) { - struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; struct intel_engine_execlists * const execlists = &engine->execlists; struct execlist_port *port = execlists->port; - struct drm_i915_private *dev_priv = engine->i915; - bool fw = false; + const u32 * const buf = execlists->csb_status; + u8 head, tail; /* - * We can skip acquiring intel_runtime_pm_get() here as it was taken - * on our behalf by the request (see i915_gem_mark_busy()) and it will - * not be relinquished until the device is idle (see - * i915_gem_idle_work_handler()). As a precaution, we make sure - * that all ELSP are drained i.e. we have processed the CSB, - * before allowing ourselves to idle and calling intel_runtime_pm_put(). + * Note that csb_write, csb_status may be either in HWSP or mmio. + * When reading from the csb_write mmio register, we have to be + * careful to only use the GEN8_CSB_WRITE_PTR portion, which is + * the low 4bits. As it happens we know the next 4bits are always + * zero and so we can simply masked off the low u8 of the register + * and treat it identically to reading from the HWSP (without having + * to use explicit shifting and masking, and probably bifurcating + * the code to handle the legacy mmio read). */ - GEM_BUG_ON(!dev_priv->gt.awake); + head = execlists->csb_head; + tail = READ_ONCE(*execlists->csb_write); + GEM_TRACE("%s cs-irq head=%d, tail=%d\n", engine->name, head, tail); + if (unlikely(head == tail)) + return; /* - * Prefer doing test_and_clear_bit() as a two stage operation to avoid - * imposing the cost of a locked atomic transaction when submitting a - * new request (outside of the context-switch interrupt). + * Hopefully paired with a wmb() in HW! + * + * We must complete the read of the write pointer before any reads + * from the CSB, so that we do not see stale values. Without an rmb + * (lfence) the HW may speculatively perform the CSB[] reads *before* + * we perform the READ_ONCE(*csb_write). */ - while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) { - /* The HWSP contains a (cacheable) mirror of the CSB */ - const u32 *buf = - &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; - unsigned int head, tail; + rmb(); - if (unlikely(execlists->csb_use_mmio)) { - buf = (u32 * __force) - (dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0))); - execlists->csb_head = -1; /* force mmio read of CSB ptrs */ - } + do { + struct i915_request *rq; + unsigned int status; + unsigned int count; - /* Clear before reading to catch new interrupts */ - clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); - smp_mb__after_atomic(); + if (++head == GEN8_CSB_ENTRIES) + head = 0; - if (unlikely(execlists->csb_head == -1)) { /* following a reset */ - if (!fw) { - intel_uncore_forcewake_get(dev_priv, - execlists->fw_domains); - fw = true; - } + /* + * We are flying near dragons again. + * + * We hold a reference to the request in execlist_port[] + * but no more than that. We are operating in softirq + * context and so cannot hold any mutex or sleep. That + * prevents us stopping the requests we are processing + * in port[] from being retired simultaneously (the + * breadcrumb will be complete before we see the + * context-switch). As we only hold the reference to the + * request, any pointer chasing underneath the request + * is subject to a potential use-after-free. Thus we + * store all of the bookkeeping within port[] as + * required, and avoid using unguarded pointers beneath + * request itself. The same applies to the atomic + * status notifier. + */ - head = readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine))); - tail = GEN8_CSB_WRITE_PTR(head); - head = GEN8_CSB_READ_PTR(head); - execlists->csb_head = head; - } else { - const int write_idx = - intel_hws_csb_write_index(dev_priv) - - I915_HWS_CSB_BUF0_INDEX; + GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n", + engine->name, head, + buf[2 * head + 0], buf[2 * head + 1], + execlists->active); + + status = buf[2 * head]; + if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE | + GEN8_CTX_STATUS_PREEMPTED)) + execlists_set_active(execlists, + EXECLISTS_ACTIVE_HWACK); + if (status & GEN8_CTX_STATUS_ACTIVE_IDLE) + execlists_clear_active(execlists, + EXECLISTS_ACTIVE_HWACK); + + if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK)) + continue; + + /* We should never get a COMPLETED | IDLE_ACTIVE! */ + GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE); - head = execlists->csb_head; - tail = READ_ONCE(buf[write_idx]); - rmb(); /* Hopefully paired with a wmb() in HW */ + if (status & GEN8_CTX_STATUS_COMPLETE && + buf[2*head + 1] == execlists->preempt_complete_status) { + GEM_TRACE("%s preempt-idle\n", engine->name); + complete_preempt_context(execlists); + continue; } - GEM_TRACE("%s cs-irq head=%d [%d%s], tail=%d [%d%s]\n", - engine->name, - head, GEN8_CSB_READ_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?", - tail, GEN8_CSB_WRITE_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?"); - while (head != tail) { - struct i915_request *rq; - unsigned int status; - unsigned int count; + if (status & GEN8_CTX_STATUS_PREEMPTED && + execlists_is_active(execlists, + EXECLISTS_ACTIVE_PREEMPT)) + continue; - if (++head == GEN8_CSB_ENTRIES) - head = 0; + GEM_BUG_ON(!execlists_is_active(execlists, + EXECLISTS_ACTIVE_USER)); - /* We are flying near dragons again. - * - * We hold a reference to the request in execlist_port[] - * but no more than that. We are operating in softirq - * context and so cannot hold any mutex or sleep. That - * prevents us stopping the requests we are processing - * in port[] from being retired simultaneously (the - * breadcrumb will be complete before we see the - * context-switch). As we only hold the reference to the - * request, any pointer chasing underneath the request - * is subject to a potential use-after-free. Thus we - * store all of the bookkeeping within port[] as - * required, and avoid using unguarded pointers beneath - * request itself. The same applies to the atomic - * status notifier. + rq = port_unpack(port, &count); + GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n", + engine->name, + port->context_id, count, + rq ? rq->global_seqno : 0, + rq ? rq->fence.context : 0, + rq ? rq->fence.seqno : 0, + intel_engine_get_seqno(engine), + rq ? rq_prio(rq) : 0); + + /* Check the context/desc id for this event matches */ + GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id); + + GEM_BUG_ON(count == 0); + if (--count == 0) { + /* + * On the final event corresponding to the + * submission of this context, we expect either + * an element-switch event or a completion + * event (and on completion, the active-idle + * marker). No more preemptions, lite-restore + * or otherwise. */ + GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED); + GEM_BUG_ON(port_isset(&port[1]) && + !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH)); + GEM_BUG_ON(!port_isset(&port[1]) && + !(status & GEN8_CTX_STATUS_ACTIVE_IDLE)); - status = READ_ONCE(buf[2 * head]); /* maybe mmio! */ - GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n", - engine->name, head, - status, buf[2*head + 1], - execlists->active); - - if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE | - GEN8_CTX_STATUS_PREEMPTED)) - execlists_set_active(execlists, - EXECLISTS_ACTIVE_HWACK); - if (status & GEN8_CTX_STATUS_ACTIVE_IDLE) - execlists_clear_active(execlists, - EXECLISTS_ACTIVE_HWACK); - - if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK)) - continue; - - /* We should never get a COMPLETED | IDLE_ACTIVE! */ - GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE); - - if (status & GEN8_CTX_STATUS_COMPLETE && - buf[2*head + 1] == execlists->preempt_complete_status) { - GEM_TRACE("%s preempt-idle\n", engine->name); - - execlists_cancel_port_requests(execlists); - execlists_unwind_incomplete_requests(execlists); - - GEM_BUG_ON(!execlists_is_active(execlists, - EXECLISTS_ACTIVE_PREEMPT)); - execlists_clear_active(execlists, - EXECLISTS_ACTIVE_PREEMPT); - continue; - } - - if (status & GEN8_CTX_STATUS_PREEMPTED && - execlists_is_active(execlists, - EXECLISTS_ACTIVE_PREEMPT)) - continue; + /* + * We rely on the hardware being strongly + * ordered, that the breadcrumb write is + * coherent (visible from the CPU) before the + * user interrupt and CSB is processed. + */ + GEM_BUG_ON(!i915_request_completed(rq)); - GEM_BUG_ON(!execlists_is_active(execlists, - EXECLISTS_ACTIVE_USER)); + execlists_context_schedule_out(rq, + INTEL_CONTEXT_SCHEDULE_OUT); + i915_request_put(rq); - rq = port_unpack(port, &count); - GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n", - engine->name, - port->context_id, count, - rq ? rq->global_seqno : 0, - rq ? rq->fence.context : 0, - rq ? rq->fence.seqno : 0, - intel_engine_get_seqno(engine), - rq ? rq_prio(rq) : 0); + GEM_TRACE("%s completed ctx=%d\n", + engine->name, port->context_id); - /* Check the context/desc id for this event matches */ - GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id); - - GEM_BUG_ON(count == 0); - if (--count == 0) { - /* - * On the final event corresponding to the - * submission of this context, we expect either - * an element-switch event or a completion - * event (and on completion, the active-idle - * marker). No more preemptions, lite-restore - * or otherwise. - */ - GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED); - GEM_BUG_ON(port_isset(&port[1]) && - !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH)); - GEM_BUG_ON(!port_isset(&port[1]) && - !(status & GEN8_CTX_STATUS_ACTIVE_IDLE)); - - /* - * We rely on the hardware being strongly - * ordered, that the breadcrumb write is - * coherent (visible from the CPU) before the - * user interrupt and CSB is processed. - */ - GEM_BUG_ON(!i915_request_completed(rq)); - - execlists_context_schedule_out(rq, - INTEL_CONTEXT_SCHEDULE_OUT); - i915_request_put(rq); - - GEM_TRACE("%s completed ctx=%d\n", - engine->name, port->context_id); - - port = execlists_port_complete(execlists, port); - if (port_isset(port)) - execlists_user_begin(execlists, port); - else - execlists_user_end(execlists); - } else { - port_set(port, port_pack(rq, count)); - } + port = execlists_port_complete(execlists, port); + if (port_isset(port)) + execlists_user_begin(execlists, port); + else + execlists_user_end(execlists); + } else { + port_set(port, port_pack(rq, count)); } + } while (head != tail); - if (head != execlists->csb_head) { - execlists->csb_head = head; - writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8), - dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine))); - } - } + execlists->csb_head = head; +} - if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)) +static void __execlists_submission_tasklet(struct intel_engine_cs *const engine) +{ + lockdep_assert_held(&engine->timeline.lock); + + process_csb(engine); + if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT)) execlists_dequeue(engine); +} + +/* + * Check the unread Context Status Buffers and manage the submission of new + * contexts to the ELSP accordingly. + */ +static void execlists_submission_tasklet(unsigned long data) +{ + struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; + unsigned long flags; - if (fw) - intel_uncore_forcewake_put(dev_priv, execlists->fw_domains); + GEM_TRACE("%s awake?=%d, active=%x\n", + engine->name, + engine->i915->gt.awake, + engine->execlists.active); - /* If the engine is now idle, so should be the flag; and vice versa. */ - GEM_BUG_ON(execlists_is_active(&engine->execlists, - EXECLISTS_ACTIVE_USER) == - !port_isset(engine->execlists.port)); + spin_lock_irqsave(&engine->timeline.lock, flags); + __execlists_submission_tasklet(engine); + spin_unlock_irqrestore(&engine->timeline.lock, flags); } static void queue_request(struct intel_engine_cs *engine, @@ -1163,16 +1085,30 @@ static void queue_request(struct intel_engine_cs *engine, &lookup_priolist(engine, prio)->requests); } -static void __submit_queue(struct intel_engine_cs *engine, int prio) +static void __update_queue(struct intel_engine_cs *engine, int prio) { engine->execlists.queue_priority = prio; - tasklet_hi_schedule(&engine->execlists.tasklet); +} + +static void __submit_queue_imm(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + + if (reset_in_progress(execlists)) + return; /* defer until we restart the engine following reset */ + + if (execlists->tasklet.func == execlists_submission_tasklet) + __execlists_submission_tasklet(engine); + else + tasklet_hi_schedule(&execlists->tasklet); } static void submit_queue(struct intel_engine_cs *engine, int prio) { - if (prio > engine->execlists.queue_priority) - __submit_queue(engine, prio); + if (prio > engine->execlists.queue_priority) { + __update_queue(engine, prio); + __submit_queue_imm(engine); + } } static void execlists_submit_request(struct i915_request *request) @@ -1184,11 +1120,12 @@ static void execlists_submit_request(struct i915_request *request) spin_lock_irqsave(&engine->timeline.lock, flags); queue_request(engine, &request->sched, rq_prio(request)); - submit_queue(engine, rq_prio(request)); - GEM_BUG_ON(!engine->execlists.first); + GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); GEM_BUG_ON(list_empty(&request->sched.link)); + submit_queue(engine, rq_prio(request)); + spin_unlock_irqrestore(&engine->timeline.lock, flags); } @@ -1315,13 +1252,40 @@ static void execlists_schedule(struct i915_request *request, } if (prio > engine->execlists.queue_priority && - i915_sw_fence_done(&sched_to_request(node)->submit)) - __submit_queue(engine, prio); + i915_sw_fence_done(&sched_to_request(node)->submit)) { + /* defer submission until after all of our updates */ + __update_queue(engine, prio); + tasklet_hi_schedule(&engine->execlists.tasklet); + } } spin_unlock_irq(&engine->timeline.lock); } +static void execlists_context_destroy(struct intel_context *ce) +{ + GEM_BUG_ON(ce->pin_count); + + if (!ce->state) + return; + + intel_ring_free(ce->ring); + + GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj)); + i915_gem_object_put(ce->state->obj); +} + +static void execlists_context_unpin(struct intel_context *ce) +{ + intel_ring_unpin(ce->ring); + + ce->state->obj->pin_global--; + i915_gem_object_unpin_map(ce->state->obj); + i915_vma_unpin(ce->state); + + i915_gem_context_put(ce->gem_context); +} + static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma) { unsigned int flags; @@ -1345,21 +1309,15 @@ static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma) return i915_vma_pin(vma, 0, GEN8_LR_CONTEXT_ALIGN, flags); } -static struct intel_ring * -execlists_context_pin(struct intel_engine_cs *engine, - struct i915_gem_context *ctx) +static struct intel_context * +__execlists_context_pin(struct intel_engine_cs *engine, + struct i915_gem_context *ctx, + struct intel_context *ce) { - struct intel_context *ce = to_intel_context(ctx, engine); void *vaddr; int ret; - lockdep_assert_held(&ctx->i915->drm.struct_mutex); - - if (likely(ce->pin_count++)) - goto out; - GEM_BUG_ON(!ce->pin_count); /* no overflow please! */ - - ret = execlists_context_deferred_alloc(ctx, engine); + ret = execlists_context_deferred_alloc(ctx, engine, ce); if (ret) goto err; GEM_BUG_ON(!ce->state); @@ -1378,17 +1336,17 @@ execlists_context_pin(struct intel_engine_cs *engine, if (ret) goto unpin_map; - intel_lr_context_descriptor_update(ctx, engine); + intel_lr_context_descriptor_update(ctx, engine, ce); ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; ce->lrc_reg_state[CTX_RING_BUFFER_START+1] = i915_ggtt_offset(ce->ring->vma); + GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head)); ce->lrc_reg_state[CTX_RING_HEAD+1] = ce->ring->head; ce->state->obj->pin_global++; i915_gem_context_get(ctx); -out: - return ce->ring; + return ce; unpin_map: i915_gem_object_unpin_map(ce->state->obj); @@ -1399,33 +1357,33 @@ err: return ERR_PTR(ret); } -static void execlists_context_unpin(struct intel_engine_cs *engine, - struct i915_gem_context *ctx) +static const struct intel_context_ops execlists_context_ops = { + .unpin = execlists_context_unpin, + .destroy = execlists_context_destroy, +}; + +static struct intel_context * +execlists_context_pin(struct intel_engine_cs *engine, + struct i915_gem_context *ctx) { struct intel_context *ce = to_intel_context(ctx, engine); lockdep_assert_held(&ctx->i915->drm.struct_mutex); - GEM_BUG_ON(ce->pin_count == 0); - if (--ce->pin_count) - return; - - intel_ring_unpin(ce->ring); + if (likely(ce->pin_count++)) + return ce; + GEM_BUG_ON(!ce->pin_count); /* no overflow please! */ - ce->state->obj->pin_global--; - i915_gem_object_unpin_map(ce->state->obj); - i915_vma_unpin(ce->state); + ce->ops = &execlists_context_ops; - i915_gem_context_put(ctx); + return __execlists_context_pin(engine, ctx, ce); } static int execlists_request_alloc(struct i915_request *request) { - struct intel_context *ce = - to_intel_context(request->ctx, request->engine); int ret; - GEM_BUG_ON(!ce->pin_count); + GEM_BUG_ON(!request->hw_context->pin_count); /* Flush enough space to reduce the likelihood of waiting after * we start building the request - in which case we will just @@ -1538,29 +1496,56 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) return batch; } -static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) +struct lri { + i915_reg_t reg; + u32 value; +}; + +static u32 *emit_lri(u32 *batch, const struct lri *lri, unsigned int count) { - *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; + GEM_BUG_ON(!count || count > 63); - /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */ - batch = gen8_emit_flush_coherentl3_wa(engine, batch); + *batch++ = MI_LOAD_REGISTER_IMM(count); + do { + *batch++ = i915_mmio_reg_offset(lri->reg); + *batch++ = lri->value; + } while (lri++, --count); + *batch++ = MI_NOOP; - *batch++ = MI_LOAD_REGISTER_IMM(3); + return batch; +} - /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */ - *batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2); - *batch++ = _MASKED_BIT_DISABLE( - GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE); +static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) +{ + static const struct lri lri[] = { + /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */ + { + COMMON_SLICE_CHICKEN2, + __MASKED_FIELD(GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE, + 0), + }, + + /* BSpec: 11391 */ + { + FF_SLICE_CHICKEN, + __MASKED_FIELD(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX, + FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX), + }, + + /* BSpec: 11299 */ + { + _3D_CHICKEN3, + __MASKED_FIELD(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX, + _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX), + } + }; - /* BSpec: 11391 */ - *batch++ = i915_mmio_reg_offset(FF_SLICE_CHICKEN); - *batch++ = _MASKED_BIT_ENABLE(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX); + *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; - /* BSpec: 11299 */ - *batch++ = i915_mmio_reg_offset(_3D_CHICKEN3); - *batch++ = _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX); + /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */ + batch = gen8_emit_flush_coherentl3_wa(engine, batch); - *batch++ = MI_NOOP; + batch = emit_lri(batch, lri, ARRAY_SIZE(lri)); /* WaClearSlmSpaceAtContextSwitch:kbl */ /* Actual scratch location is at 128 bytes offset */ @@ -1652,7 +1637,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine) if (IS_ERR(obj)) return PTR_ERR(obj); - vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL); + vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err; @@ -1767,17 +1752,29 @@ static void enable_execlists(struct intel_engine_cs *engine) I915_WRITE(RING_MODE_GEN7(engine), _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); + I915_WRITE(RING_MI_MODE(engine->mmio_base), + _MASKED_BIT_DISABLE(STOP_RING)); + I915_WRITE(RING_HWS_PGA(engine->mmio_base), engine->status_page.ggtt_offset); POSTING_READ(RING_HWS_PGA(engine->mmio_base)); +} - /* Following the reset, we need to reload the CSB read/write pointers */ - engine->execlists.csb_head = -1; +static bool unexpected_starting_state(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + bool unexpected = false; + + if (I915_READ(RING_MI_MODE(engine->mmio_base)) & STOP_RING) { + DRM_DEBUG_DRIVER("STOP_RING still set in RING_MI_MODE\n"); + unexpected = true; + } + + return unexpected; } static int gen8_init_common_ring(struct intel_engine_cs *engine) { - struct intel_engine_execlists * const execlists = &engine->execlists; int ret; ret = intel_mocs_init_engine(engine); @@ -1785,13 +1782,14 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine) return ret; intel_engine_reset_breadcrumbs(engine); - intel_engine_init_hangcheck(engine); - enable_execlists(engine); + if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) { + struct drm_printer p = drm_debug_printer(__func__); - /* After a GPU reset, we may have requests to replay */ - if (execlists->first) - tasklet_schedule(&execlists->tasklet); + intel_engine_dump(engine, &p, NULL); + } + + enable_execlists(engine); return 0; } @@ -1833,8 +1831,69 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine) return 0; } -static void reset_common_ring(struct intel_engine_cs *engine, - struct i915_request *request) +static struct i915_request * +execlists_reset_prepare(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + struct i915_request *request, *active; + unsigned long flags; + + GEM_TRACE("%s\n", engine->name); + + /* + * Prevent request submission to the hardware until we have + * completed the reset in i915_gem_reset_finish(). If a request + * is completed by one engine, it may then queue a request + * to a second via its execlists->tasklet *just* as we are + * calling engine->init_hw() and also writing the ELSP. + * Turning off the execlists->tasklet until the reset is over + * prevents the race. + */ + __tasklet_disable_sync_once(&execlists->tasklet); + + spin_lock_irqsave(&engine->timeline.lock, flags); + + /* + * We want to flush the pending context switches, having disabled + * the tasklet above, we can assume exclusive access to the execlists. + * For this allows us to catch up with an inflight preemption event, + * and avoid blaming an innocent request if the stall was due to the + * preemption itself. + */ + process_csb(engine); + + /* + * The last active request can then be no later than the last request + * now in ELSP[0]. So search backwards from there, so that if the GPU + * has advanced beyond the last CSB update, it will be pardoned. + */ + active = NULL; + request = port_request(execlists->port); + if (request) { + /* + * Prevent the breadcrumb from advancing before we decide + * which request is currently active. + */ + intel_engine_stop_cs(engine); + + list_for_each_entry_from_reverse(request, + &engine->timeline.requests, + link) { + if (__i915_request_completed(request, + request->global_seqno)) + break; + + active = request; + } + } + + spin_unlock_irqrestore(&engine->timeline.lock, flags); + + return active; +} + +static void execlists_reset(struct intel_engine_cs *engine, + struct i915_request *request) { struct intel_engine_execlists * const execlists = &engine->execlists; unsigned long flags; @@ -1844,8 +1903,7 @@ static void reset_common_ring(struct intel_engine_cs *engine, engine->name, request ? request->global_seqno : 0, intel_engine_get_seqno(engine)); - /* See execlists_cancel_requests() for the irq/spinlock split. */ - local_irq_save(flags); + spin_lock_irqsave(&engine->timeline.lock, flags); /* * Catch up with any missed context-switch interrupts. @@ -1857,14 +1915,14 @@ static void reset_common_ring(struct intel_engine_cs *engine, * requests were completed. */ execlists_cancel_port_requests(execlists); - reset_irq(engine); /* Push back any incomplete requests for replay after the reset. */ - spin_lock(&engine->timeline.lock); __unwind_incomplete_requests(engine); - spin_unlock(&engine->timeline.lock); - local_irq_restore(flags); + /* Following the reset, we need to reload the CSB read/write pointers */ + reset_csb_pointers(&engine->execlists); + + spin_unlock_irqrestore(&engine->timeline.lock, flags); /* * If the request was innocent, we leave the request in the ELSP @@ -1888,35 +1946,52 @@ static void reset_common_ring(struct intel_engine_cs *engine, * future request will be after userspace has had the opportunity * to recreate its own state. */ - regs = to_intel_context(request->ctx, engine)->lrc_reg_state; - if (engine->default_state) { - void *defaults; - - defaults = i915_gem_object_pin_map(engine->default_state, - I915_MAP_WB); - if (!IS_ERR(defaults)) { - memcpy(regs, /* skip restoring the vanilla PPHWSP */ - defaults + LRC_STATE_PN * PAGE_SIZE, - engine->context_size - PAGE_SIZE); - i915_gem_object_unpin_map(engine->default_state); - } + regs = request->hw_context->lrc_reg_state; + if (engine->pinned_default_state) { + memcpy(regs, /* skip restoring the vanilla PPHWSP */ + engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE, + engine->context_size - PAGE_SIZE); } - execlists_init_reg_state(regs, request->ctx, engine, request->ring); + execlists_init_reg_state(regs, + request->gem_context, engine, request->ring); /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */ regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(request->ring->vma); - regs[CTX_RING_HEAD + 1] = request->postfix; - request->ring->head = request->postfix; + request->ring->head = intel_ring_wrap(request->ring, request->postfix); + regs[CTX_RING_HEAD + 1] = request->ring->head; + intel_ring_update_space(request->ring); /* Reset WaIdleLiteRestore:bdw,skl as well */ unwind_wa_tail(request); } +static void execlists_reset_finish(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + + /* After a GPU reset, we may have requests to replay */ + if (!RB_EMPTY_ROOT(&execlists->queue.rb_root)) + tasklet_schedule(&execlists->tasklet); + + /* + * Flush the tasklet while we still have the forcewake to be sure + * that it is not allowed to sleep before we restart and reload a + * context. + * + * As before (with execlists_reset_prepare) we rely on the caller + * serialising multiple attempts to reset so that we know that we + * are the only one manipulating tasklet state. + */ + __tasklet_enable_sync_once(&execlists->tasklet); + + GEM_TRACE("%s\n", engine->name); +} + static int intel_logical_ring_emit_pdps(struct i915_request *rq) { - struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt; + struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt; struct intel_engine_cs *engine = rq->engine; const int num_lri_cmds = GEN8_3LVL_PDPES * 2; u32 *cs; @@ -1955,15 +2030,15 @@ static int gen8_emit_bb_start(struct i915_request *rq, * it is unsafe in case of lite-restore (because the ctx is * not idle). PML4 is allocated during ppgtt init so this is * not needed in 48-bit.*/ - if (rq->ctx->ppgtt && - (intel_engine_flag(rq->engine) & rq->ctx->ppgtt->pd_dirty_rings) && - !i915_vm_is_48bit(&rq->ctx->ppgtt->base) && + if (rq->gem_context->ppgtt && + (intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) && + !i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) && !intel_vgpu_active(rq->i915)) { ret = intel_logical_ring_emit_pdps(rq); if (ret) return ret; - rq->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine); + rq->gem_context->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine); } cs = intel_ring_begin(rq, 6); @@ -2217,13 +2292,15 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) kfree(engine); } -static void execlists_set_default_submission(struct intel_engine_cs *engine) +void intel_execlists_set_default_submission(struct intel_engine_cs *engine) { engine->submit_request = execlists_submit_request; engine->cancel_requests = execlists_cancel_requests; engine->schedule = execlists_schedule; engine->execlists.tasklet.func = execlists_submission_tasklet; + engine->reset.prepare = execlists_reset_prepare; + engine->park = NULL; engine->unpark = NULL; @@ -2243,18 +2320,19 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) { /* Default vfuncs which can be overriden by each engine. */ engine->init_hw = gen8_init_common_ring; - engine->reset_hw = reset_common_ring; - engine->context_pin = execlists_context_pin; - engine->context_unpin = execlists_context_unpin; + engine->reset.prepare = execlists_reset_prepare; + engine->reset.reset = execlists_reset; + engine->reset.finish = execlists_reset_finish; + engine->context_pin = execlists_context_pin; engine->request_alloc = execlists_request_alloc; engine->emit_flush = gen8_emit_flush; engine->emit_breadcrumb = gen8_emit_breadcrumb; engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz; - engine->set_default_submission = execlists_set_default_submission; + engine->set_default_submission = intel_execlists_set_default_submission; if (INTEL_GEN(engine->i915) < 11) { engine->irq_enable = gen8_logical_ring_enable_irq; @@ -2294,28 +2372,11 @@ logical_ring_default_irqs(struct intel_engine_cs *engine) static void logical_ring_setup(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; - enum forcewake_domains fw_domains; - intel_engine_setup_common(engine); /* Intentionally left blank. */ engine->buffer = NULL; - fw_domains = intel_uncore_forcewake_for_reg(dev_priv, - RING_ELSP(engine), - FW_REG_WRITE); - - fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, - RING_CONTEXT_STATUS_PTR(engine), - FW_REG_READ | FW_REG_WRITE); - - fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, - RING_CONTEXT_STATUS_BUF_BASE(engine), - FW_REG_READ); - - engine->execlists.fw_domains = fw_domains; - tasklet_init(&engine->execlists.tasklet, execlists_submission_tasklet, (unsigned long)engine); @@ -2323,33 +2384,61 @@ logical_ring_setup(struct intel_engine_cs *engine) logical_ring_default_irqs(engine); } +static bool csb_force_mmio(struct drm_i915_private *i915) +{ + /* Older GVT emulation depends upon intercepting CSB mmio */ + return intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915); +} + static int logical_ring_init(struct intel_engine_cs *engine) { + struct drm_i915_private *i915 = engine->i915; + struct intel_engine_execlists * const execlists = &engine->execlists; int ret; ret = intel_engine_init_common(engine); if (ret) goto error; - if (HAS_LOGICAL_RING_ELSQ(engine->i915)) { - engine->execlists.submit_reg = engine->i915->regs + + if (HAS_LOGICAL_RING_ELSQ(i915)) { + execlists->submit_reg = i915->regs + i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine)); - engine->execlists.ctrl_reg = engine->i915->regs + + execlists->ctrl_reg = i915->regs + i915_mmio_reg_offset(RING_EXECLIST_CONTROL(engine)); } else { - engine->execlists.submit_reg = engine->i915->regs + + execlists->submit_reg = i915->regs + i915_mmio_reg_offset(RING_ELSP(engine)); } - engine->execlists.preempt_complete_status = ~0u; - if (engine->i915->preempt_context) { + execlists->preempt_complete_status = ~0u; + if (i915->preempt_context) { struct intel_context *ce = - to_intel_context(engine->i915->preempt_context, engine); + to_intel_context(i915->preempt_context, engine); - engine->execlists.preempt_complete_status = + execlists->preempt_complete_status = upper_32_bits(ce->lrc_desc); } + execlists->csb_read = + i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)); + if (csb_force_mmio(i915)) { + execlists->csb_status = (u32 __force *) + (i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0))); + + execlists->csb_write = (u32 __force *)execlists->csb_read; + execlists->csb_write_reset = + _MASKED_FIELD(GEN8_CSB_WRITE_PTR_MASK, + GEN8_CSB_ENTRIES - 1); + } else { + execlists->csb_status = + &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; + + execlists->csb_write = + &engine->status_page.page_addr[intel_hws_csb_write_index(i915)]; + execlists->csb_write_reset = GEN8_CSB_ENTRIES - 1; + } + reset_csb_pointers(execlists); + return 0; error: @@ -2482,7 +2571,7 @@ static void execlists_init_reg_state(u32 *regs, struct drm_i915_private *dev_priv = engine->i915; struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt; u32 base = engine->mmio_base; - bool rcs = engine->id == RCS; + bool rcs = engine->class == RENDER_CLASS; /* A context is actually a big batch buffer with several * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The @@ -2550,7 +2639,7 @@ static void execlists_init_reg_state(u32 *regs, CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0); CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0); - if (ppgtt && i915_vm_is_48bit(&ppgtt->base)) { + if (ppgtt && i915_vm_is_48bit(&ppgtt->vm)) { /* 64b PPGTT (48bit canonical) * PDP0_DESCRIPTOR contains the base address to PML4 and * other PDP Descriptors are ignored. @@ -2629,10 +2718,10 @@ err_unpin_ctx: } static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) + struct intel_engine_cs *engine, + struct intel_context *ce) { struct drm_i915_gem_object *ctx_obj; - struct intel_context *ce = to_intel_context(ctx, engine); struct i915_vma *vma; uint32_t context_size; struct intel_ring *ring; @@ -2654,7 +2743,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, if (IS_ERR(ctx_obj)) return PTR_ERR(ctx_obj); - vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL); + vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.vm, NULL); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto error_deref_obj; diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 4ec7d8dd13c8..4dfb78e3ec7e 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -104,11 +104,6 @@ struct i915_gem_context; void intel_lr_context_resume(struct drm_i915_private *dev_priv); -static inline uint64_t -intel_lr_context_descriptor(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) -{ - return to_intel_context(ctx, engine)->lrc_desc; -} +void intel_execlists_set_default_submission(struct intel_engine_cs *engine); #endif /* _INTEL_LRC_H_ */ diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c index 8ae8f42f430a..5dae16ccd9f1 100644 --- a/drivers/gpu/drm/i915/intel_lspcon.c +++ b/drivers/gpu/drm/i915/intel_lspcon.c @@ -116,7 +116,7 @@ static int lspcon_change_mode(struct intel_lspcon *lspcon, static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon) { - uint8_t rev; + u8 rev; if (drm_dp_dpcd_readb(&lspcon_to_intel_dp(lspcon)->aux, DP_DPCD_REV, &rev) != 1) { diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 48f618dc9abb..f9f3b0885ba5 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -44,8 +44,6 @@ /* Private structure for the integrated LVDS support */ struct intel_lvds_connector { struct intel_connector base; - - struct notifier_block lid_notifier; }; struct intel_lvds_pps { @@ -85,34 +83,35 @@ static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *conn return container_of(connector, struct intel_lvds_connector, base.base); } +bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv, + i915_reg_t lvds_reg, enum pipe *pipe) +{ + u32 val; + + val = I915_READ(lvds_reg); + + /* asserts want to know the pipe even if the port is disabled */ + if (HAS_PCH_CPT(dev_priv)) + *pipe = (val & LVDS_PIPE_SEL_MASK_CPT) >> LVDS_PIPE_SEL_SHIFT_CPT; + else + *pipe = (val & LVDS_PIPE_SEL_MASK) >> LVDS_PIPE_SEL_SHIFT; + + return val & LVDS_PORT_EN; +} + static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); - u32 tmp; bool ret; if (!intel_display_power_get_if_enabled(dev_priv, encoder->power_domain)) return false; - ret = false; - - tmp = I915_READ(lvds_encoder->reg); - - if (!(tmp & LVDS_PORT_EN)) - goto out; - - if (HAS_PCH_CPT(dev_priv)) - *pipe = PORT_TO_PIPE_CPT(tmp); - else - *pipe = PORT_TO_PIPE(tmp); - - ret = true; + ret = intel_lvds_port_enabled(dev_priv, lvds_encoder->reg, pipe); -out: intel_display_power_put(dev_priv, encoder->power_domain); return ret; @@ -255,14 +254,11 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder, temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; if (HAS_PCH_CPT(dev_priv)) { - temp &= ~PORT_TRANS_SEL_MASK; - temp |= PORT_TRANS_SEL_CPT(pipe); + temp &= ~LVDS_PIPE_SEL_MASK_CPT; + temp |= LVDS_PIPE_SEL_CPT(pipe); } else { - if (pipe == 1) { - temp |= LVDS_PIPEB_SELECT; - } else { - temp &= ~LVDS_PIPEB_SELECT; - } + temp &= ~LVDS_PIPE_SEL_MASK; + temp |= LVDS_PIPE_SEL(pipe); } /* set the corresponsding LVDS_BORDER bit */ @@ -454,26 +450,9 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, return true; } -/* - * Detect the LVDS connection. - * - * Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means - * connected and closed means disconnected. We also send hotplug events as - * needed, using lid status notification from the input layer. - */ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector, bool force) { - struct drm_i915_private *dev_priv = to_i915(connector->dev); - enum drm_connector_status status; - - DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", - connector->base.id, connector->name); - - status = intel_panel_detect(dev_priv); - if (status != connector_status_unknown) - return status; - return connector_status_connected; } @@ -498,117 +477,6 @@ static int intel_lvds_get_modes(struct drm_connector *connector) return 1; } -static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id) -{ - DRM_INFO("Skipping forced modeset for %s\n", id->ident); - return 1; -} - -/* The GPU hangs up on these systems if modeset is performed on LID open */ -static const struct dmi_system_id intel_no_modeset_on_lid[] = { - { - .callback = intel_no_modeset_on_lid_dmi_callback, - .ident = "Toshiba Tecra A11", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), - DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"), - }, - }, - - { } /* terminating entry */ -}; - -/* - * Lid events. Note the use of 'modeset': - * - we set it to MODESET_ON_LID_OPEN on lid close, - * and set it to MODESET_DONE on open - * - we use it as a "only once" bit (ie we ignore - * duplicate events where it was already properly set) - * - the suspend/resume paths will set it to - * MODESET_SUSPENDED and ignore the lid open event, - * because they restore the mode ("lid open"). - */ -static int intel_lid_notify(struct notifier_block *nb, unsigned long val, - void *unused) -{ - struct intel_lvds_connector *lvds_connector = - container_of(nb, struct intel_lvds_connector, lid_notifier); - struct drm_connector *connector = &lvds_connector->base.base; - struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - - if (dev->switch_power_state != DRM_SWITCH_POWER_ON) - return NOTIFY_OK; - - mutex_lock(&dev_priv->modeset_restore_lock); - if (dev_priv->modeset_restore == MODESET_SUSPENDED) - goto exit; - /* - * check and update the status of LVDS connector after receiving - * the LID nofication event. - */ - connector->status = connector->funcs->detect(connector, false); - - /* Don't force modeset on machines where it causes a GPU lockup */ - if (dmi_check_system(intel_no_modeset_on_lid)) - goto exit; - if (!acpi_lid_open()) { - /* do modeset on next lid open event */ - dev_priv->modeset_restore = MODESET_ON_LID_OPEN; - goto exit; - } - - if (dev_priv->modeset_restore == MODESET_DONE) - goto exit; - - /* - * Some old platform's BIOS love to wreak havoc while the lid is closed. - * We try to detect this here and undo any damage. The split for PCH - * platforms is rather conservative and a bit arbitrary expect that on - * those platforms VGA disabling requires actual legacy VGA I/O access, - * and as part of the cleanup in the hw state restore we also redisable - * the vga plane. - */ - if (!HAS_PCH_SPLIT(dev_priv)) - intel_display_resume(dev); - - dev_priv->modeset_restore = MODESET_DONE; - -exit: - mutex_unlock(&dev_priv->modeset_restore_lock); - return NOTIFY_OK; -} - -static int -intel_lvds_connector_register(struct drm_connector *connector) -{ - struct intel_lvds_connector *lvds = to_lvds_connector(connector); - int ret; - - ret = intel_connector_register(connector); - if (ret) - return ret; - - lvds->lid_notifier.notifier_call = intel_lid_notify; - if (acpi_lid_notifier_register(&lvds->lid_notifier)) { - DRM_DEBUG_KMS("lid notifier registration failed\n"); - lvds->lid_notifier.notifier_call = NULL; - } - - return 0; -} - -static void -intel_lvds_connector_unregister(struct drm_connector *connector) -{ - struct intel_lvds_connector *lvds = to_lvds_connector(connector); - - if (lvds->lid_notifier.notifier_call) - acpi_lid_notifier_unregister(&lvds->lid_notifier); - - intel_connector_unregister(connector); -} - /** * intel_lvds_destroy - unregister and free LVDS structures * @connector: connector to free @@ -641,8 +509,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .atomic_get_property = intel_digital_connector_atomic_get_property, .atomic_set_property = intel_digital_connector_atomic_set_property, - .late_register = intel_lvds_connector_register, - .early_unregister = intel_lvds_connector_unregister, + .late_register = intel_connector_register, + .early_unregister = intel_connector_unregister, .destroy = intel_lvds_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = intel_digital_connector_duplicate_state, @@ -948,7 +816,11 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) * register is uninitialized. */ val = I915_READ(lvds_encoder->reg); - if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED))) + if (HAS_PCH_CPT(dev_priv)) + val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK_CPT); + else + val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK); + if (val == 0) val = dev_priv->vbt.bios_lvds_val; return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP; @@ -1003,8 +875,16 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) return; /* Skip init on machines we know falsely report LVDS */ - if (dmi_check_system(intel_no_lvds)) + if (dmi_check_system(intel_no_lvds)) { + WARN(!dev_priv->vbt.int_lvds_support, + "Useless DMI match. Internal LVDS support disabled by VBT\n"); + return; + } + + if (!dev_priv->vbt.int_lvds_support) { + DRM_DEBUG_KMS("Internal LVDS support disabled by VBT\n"); return; + } if (HAS_PCH_SPLIT(dev_priv)) lvds_reg = PCH_LVDS; @@ -1016,10 +896,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) if (HAS_PCH_SPLIT(dev_priv)) { if ((lvds & LVDS_DETECTED) == 0) return; - if (dev_priv->vbt.edp.support) { - DRM_DEBUG_KMS("disable LVDS for eDP support\n"); - return; - } } pin = GMBUS_PIN_PANEL; @@ -1108,8 +984,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) * 2) check for VBT data * 3) check to see if LVDS is already on * if none of the above, no panel - * 4) make sure lid is open - * if closed, act like it's not there for now */ /* @@ -1125,7 +999,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) intel_gmbus_get_adapter(dev_priv, pin)); if (edid) { if (drm_add_edid_modes(connector, edid)) { - drm_mode_connector_update_edid_property(connector, + drm_connector_update_edid_property(connector, edid); } else { kfree(edid); diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index b39846613e3c..ca44bf368e24 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c @@ -40,7 +40,7 @@ int intel_connector_update_modes(struct drm_connector *connector, { int ret; - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); return ret; diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index c58e5f53bab0..e034b4166d32 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -608,16 +608,16 @@ void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) #define ACPI_EV_LID (1<<1) #define ACPI_EV_DOCK (1<<2) -static struct intel_opregion *system_opregion; - +/* + * The only video events relevant to opregion are 0x80. These indicate either a + * docking event, lid switch or display switch request. In Linux, these are + * handled by the dock, button and video drivers. + */ static int intel_opregion_video_event(struct notifier_block *nb, unsigned long val, void *data) { - /* The only video events relevant to opregion are 0x80. These indicate - either a docking event, lid switch or display switch request. In - Linux, these are handled by the dock, button and video drivers. - */ - + struct intel_opregion *opregion = container_of(nb, struct intel_opregion, + acpi_notifier); struct acpi_bus_event *event = data; struct opregion_acpi *acpi; int ret = NOTIFY_OK; @@ -625,10 +625,7 @@ static int intel_opregion_video_event(struct notifier_block *nb, if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) return NOTIFY_DONE; - if (!system_opregion) - return NOTIFY_DONE; - - acpi = system_opregion->acpi; + acpi = opregion->acpi; if (event->type == 0x80 && ((acpi->cevt & 1) == 0)) ret = NOTIFY_BAD; @@ -638,10 +635,6 @@ static int intel_opregion_video_event(struct notifier_block *nb, return ret; } -static struct notifier_block intel_opregion_notifier = { - .notifier_call = intel_opregion_video_event, -}; - /* * Initialise the DIDL field in opregion. This passes a list of devices to * the firmware. Values are defined by section B.4.2 of the ACPI specification @@ -797,8 +790,8 @@ void intel_opregion_register(struct drm_i915_private *dev_priv) opregion->acpi->csts = 0; opregion->acpi->drdy = 1; - system_opregion = opregion; - register_acpi_notifier(&intel_opregion_notifier); + opregion->acpi_notifier.notifier_call = intel_opregion_video_event; + register_acpi_notifier(&opregion->acpi_notifier); } if (opregion->asle) { @@ -822,8 +815,8 @@ void intel_opregion_unregister(struct drm_i915_private *dev_priv) if (opregion->acpi) { opregion->acpi->drdy = 0; - system_opregion = NULL; - unregister_acpi_notifier(&intel_opregion_notifier); + unregister_acpi_notifier(&opregion->acpi_notifier); + opregion->acpi_notifier.notifier_call = NULL; } /* just clear all opregion memory pointers now */ diff --git a/drivers/gpu/drm/i915/intel_opregion.h b/drivers/gpu/drm/i915/intel_opregion.h index e0e437ba9e51..e8498a8cda3d 100644 --- a/drivers/gpu/drm/i915/intel_opregion.h +++ b/drivers/gpu/drm/i915/intel_opregion.h @@ -49,6 +49,7 @@ struct intel_opregion { u32 vbt_size; u32 *lid_state; struct work_struct asle_work; + struct notifier_block acpi_notifier; }; #define OPREGION_SIZE (8 * 1024) diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index b443278e569c..4a9f139e7b73 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -375,26 +375,6 @@ out: pipe_config->gmch_pfit.lvds_border_bits = border; } -enum drm_connector_status -intel_panel_detect(struct drm_i915_private *dev_priv) -{ - /* Assume that the BIOS does not lie through the OpRegion... */ - if (!i915_modparams.panel_ignore_lid && dev_priv->opregion.lid_state) { - return *dev_priv->opregion.lid_state & 0x1 ? - connector_status_connected : - connector_status_disconnected; - } - - switch (i915_modparams.panel_ignore_lid) { - case -2: - return connector_status_connected; - case -1: - return connector_status_disconnected; - default: - return connector_status_unknown; - } -} - /** * scale - scale values from one range to another * @source_val: value in range [@source_min..@source_max] @@ -406,11 +386,11 @@ intel_panel_detect(struct drm_i915_private *dev_priv) * Return @source_val in range [@source_min..@source_max] scaled to range * [@target_min..@target_max]. */ -static uint32_t scale(uint32_t source_val, - uint32_t source_min, uint32_t source_max, - uint32_t target_min, uint32_t target_max) +static u32 scale(u32 source_val, + u32 source_min, u32 source_max, + u32 target_min, u32 target_max) { - uint64_t target_val; + u64 target_val; WARN_ON(source_min > source_max); WARN_ON(target_min > target_max); diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c index 39a4e4edda07..849e1b69ba73 100644 --- a/drivers/gpu/drm/i915/intel_pipe_crc.c +++ b/drivers/gpu/drm/i915/intel_pipe_crc.c @@ -30,160 +30,6 @@ #include <linux/debugfs.h> #include "intel_drv.h" -struct pipe_crc_info { - const char *name; - struct drm_i915_private *dev_priv; - enum pipe pipe; -}; - -static int i915_pipe_crc_open(struct inode *inode, struct file *filep) -{ - struct pipe_crc_info *info = inode->i_private; - struct drm_i915_private *dev_priv = info->dev_priv; - struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; - - if (info->pipe >= INTEL_INFO(dev_priv)->num_pipes) - return -ENODEV; - - spin_lock_irq(&pipe_crc->lock); - - if (pipe_crc->opened) { - spin_unlock_irq(&pipe_crc->lock); - return -EBUSY; /* already open */ - } - - pipe_crc->opened = true; - filep->private_data = inode->i_private; - - spin_unlock_irq(&pipe_crc->lock); - - return 0; -} - -static int i915_pipe_crc_release(struct inode *inode, struct file *filep) -{ - struct pipe_crc_info *info = inode->i_private; - struct drm_i915_private *dev_priv = info->dev_priv; - struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; - - spin_lock_irq(&pipe_crc->lock); - pipe_crc->opened = false; - spin_unlock_irq(&pipe_crc->lock); - - return 0; -} - -/* (6 fields, 8 chars each, space separated (5) + '\n') */ -#define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1) -/* account for \'0' */ -#define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1) - -static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc) -{ - lockdep_assert_held(&pipe_crc->lock); - return CIRC_CNT(pipe_crc->head, pipe_crc->tail, - INTEL_PIPE_CRC_ENTRIES_NR); -} - -static ssize_t -i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, - loff_t *pos) -{ - struct pipe_crc_info *info = filep->private_data; - struct drm_i915_private *dev_priv = info->dev_priv; - struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; - char buf[PIPE_CRC_BUFFER_LEN]; - int n_entries; - ssize_t bytes_read; - - /* - * Don't allow user space to provide buffers not big enough to hold - * a line of data. - */ - if (count < PIPE_CRC_LINE_LEN) - return -EINVAL; - - if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE) - return 0; - - /* nothing to read */ - spin_lock_irq(&pipe_crc->lock); - while (pipe_crc_data_count(pipe_crc) == 0) { - int ret; - - if (filep->f_flags & O_NONBLOCK) { - spin_unlock_irq(&pipe_crc->lock); - return -EAGAIN; - } - - ret = wait_event_interruptible_lock_irq(pipe_crc->wq, - pipe_crc_data_count(pipe_crc), pipe_crc->lock); - if (ret) { - spin_unlock_irq(&pipe_crc->lock); - return ret; - } - } - - /* We now have one or more entries to read */ - n_entries = count / PIPE_CRC_LINE_LEN; - - bytes_read = 0; - while (n_entries > 0) { - struct intel_pipe_crc_entry *entry = - &pipe_crc->entries[pipe_crc->tail]; - - if (CIRC_CNT(pipe_crc->head, pipe_crc->tail, - INTEL_PIPE_CRC_ENTRIES_NR) < 1) - break; - - BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR); - pipe_crc->tail = (pipe_crc->tail + 1) & - (INTEL_PIPE_CRC_ENTRIES_NR - 1); - - bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN, - "%8u %8x %8x %8x %8x %8x\n", - entry->frame, entry->crc[0], - entry->crc[1], entry->crc[2], - entry->crc[3], entry->crc[4]); - - spin_unlock_irq(&pipe_crc->lock); - - if (copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN)) - return -EFAULT; - - user_buf += PIPE_CRC_LINE_LEN; - n_entries--; - - spin_lock_irq(&pipe_crc->lock); - } - - spin_unlock_irq(&pipe_crc->lock); - - return bytes_read; -} - -static const struct file_operations i915_pipe_crc_fops = { - .owner = THIS_MODULE, - .open = i915_pipe_crc_open, - .read = i915_pipe_crc_read, - .release = i915_pipe_crc_release, -}; - -static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = { - { - .name = "i915_pipe_A_crc", - .pipe = PIPE_A, - }, - { - .name = "i915_pipe_B_crc", - .pipe = PIPE_B, - }, - { - .name = "i915_pipe_C_crc", - .pipe = PIPE_C, - }, -}; - static const char * const pipe_crc_sources[] = { "none", "plane1", @@ -197,29 +43,6 @@ static const char * const pipe_crc_sources[] = { "auto", }; -static const char *pipe_crc_source_name(enum intel_pipe_crc_source source) -{ - BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX); - return pipe_crc_sources[source]; -} - -static int display_crc_ctl_show(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = m->private; - enum pipe pipe; - - for_each_pipe(dev_priv, pipe) - seq_printf(m, "%c %s\n", pipe_name(pipe), - pipe_crc_source_name(dev_priv->pipe_crc[pipe].source)); - - return 0; -} - -static int display_crc_ctl_open(struct inode *inode, struct file *file) -{ - return single_open(file, display_crc_ctl_show, inode->i_private); -} - static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, uint32_t *val) { @@ -616,177 +439,6 @@ static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv, return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val, set_wa); } -static int pipe_crc_set_source(struct drm_i915_private *dev_priv, - enum pipe pipe, - enum intel_pipe_crc_source source) -{ - struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; - enum intel_display_power_domain power_domain; - u32 val = 0; /* shut up gcc */ - int ret; - - if (pipe_crc->source == source) - return 0; - - /* forbid changing the source without going back to 'none' */ - if (pipe_crc->source && source) - return -EINVAL; - - power_domain = POWER_DOMAIN_PIPE(pipe); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) { - DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n"); - return -EIO; - } - - ret = get_new_crc_ctl_reg(dev_priv, pipe, &source, &val, true); - if (ret != 0) - goto out; - - /* none -> real source transition */ - if (source) { - struct intel_pipe_crc_entry *entries; - - DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n", - pipe_name(pipe), pipe_crc_source_name(source)); - - entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR, - sizeof(pipe_crc->entries[0]), - GFP_KERNEL); - if (!entries) { - ret = -ENOMEM; - goto out; - } - - spin_lock_irq(&pipe_crc->lock); - kfree(pipe_crc->entries); - pipe_crc->entries = entries; - pipe_crc->head = 0; - pipe_crc->tail = 0; - spin_unlock_irq(&pipe_crc->lock); - } - - pipe_crc->source = source; - - I915_WRITE(PIPE_CRC_CTL(pipe), val); - POSTING_READ(PIPE_CRC_CTL(pipe)); - - /* real source -> none transition */ - if (!source) { - struct intel_pipe_crc_entry *entries; - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, - pipe); - - DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n", - pipe_name(pipe)); - - drm_modeset_lock(&crtc->base.mutex, NULL); - if (crtc->base.state->active) - intel_wait_for_vblank(dev_priv, pipe); - drm_modeset_unlock(&crtc->base.mutex); - - spin_lock_irq(&pipe_crc->lock); - entries = pipe_crc->entries; - pipe_crc->entries = NULL; - pipe_crc->head = 0; - pipe_crc->tail = 0; - spin_unlock_irq(&pipe_crc->lock); - - kfree(entries); - - if (IS_G4X(dev_priv)) - g4x_undo_pipe_scramble_reset(dev_priv, pipe); - else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - vlv_undo_pipe_scramble_reset(dev_priv, pipe); - else if ((IS_HASWELL(dev_priv) || - IS_BROADWELL(dev_priv)) && pipe == PIPE_A) - hsw_pipe_A_crc_wa(dev_priv, false); - } - - ret = 0; - -out: - intel_display_power_put(dev_priv, power_domain); - - return ret; -} - -/* - * Parse pipe CRC command strings: - * command: wsp* object wsp+ name wsp+ source wsp* - * object: 'pipe' - * name: (A | B | C) - * source: (none | plane1 | plane2 | pf) - * wsp: (#0x20 | #0x9 | #0xA)+ - * - * eg.: - * "pipe A plane1" -> Start CRC computations on plane1 of pipe A - * "pipe A none" -> Stop CRC - */ -static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words) -{ - int n_words = 0; - - while (*buf) { - char *end; - - /* skip leading white space */ - buf = skip_spaces(buf); - if (!*buf) - break; /* end of buffer */ - - /* find end of word */ - for (end = buf; *end && !isspace(*end); end++) - ; - - if (n_words == max_words) { - DRM_DEBUG_DRIVER("too many words, allowed <= %d\n", - max_words); - return -EINVAL; /* ran out of words[] before bytes */ - } - - if (*end) - *end++ = '\0'; - words[n_words++] = buf; - buf = end; - } - - return n_words; -} - -enum intel_pipe_crc_object { - PIPE_CRC_OBJECT_PIPE, -}; - -static const char * const pipe_crc_objects[] = { - "pipe", -}; - -static int -display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o) -{ - int i; - - i = match_string(pipe_crc_objects, ARRAY_SIZE(pipe_crc_objects), buf); - if (i < 0) - return i; - - *o = i; - return 0; -} - -static int display_crc_ctl_parse_pipe(struct drm_i915_private *dev_priv, - const char *buf, enum pipe *pipe) -{ - const char name = buf[0]; - - if (name < 'A' || name >= pipe_name(INTEL_INFO(dev_priv)->num_pipes)) - return -EINVAL; - - *pipe = name - 'A'; - - return 0; -} - static int display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s) { @@ -805,81 +457,6 @@ display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s) return 0; } -static int display_crc_ctl_parse(struct drm_i915_private *dev_priv, - char *buf, size_t len) -{ -#define N_WORDS 3 - int n_words; - char *words[N_WORDS]; - enum pipe pipe; - enum intel_pipe_crc_object object; - enum intel_pipe_crc_source source; - - n_words = display_crc_ctl_tokenize(buf, words, N_WORDS); - if (n_words != N_WORDS) { - DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n", - N_WORDS); - return -EINVAL; - } - - if (display_crc_ctl_parse_object(words[0], &object) < 0) { - DRM_DEBUG_DRIVER("unknown object %s\n", words[0]); - return -EINVAL; - } - - if (display_crc_ctl_parse_pipe(dev_priv, words[1], &pipe) < 0) { - DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]); - return -EINVAL; - } - - if (display_crc_ctl_parse_source(words[2], &source) < 0) { - DRM_DEBUG_DRIVER("unknown source %s\n", words[2]); - return -EINVAL; - } - - return pipe_crc_set_source(dev_priv, pipe, source); -} - -static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf, - size_t len, loff_t *offp) -{ - struct seq_file *m = file->private_data; - struct drm_i915_private *dev_priv = m->private; - char *tmpbuf; - int ret; - - if (len == 0) - return 0; - - if (len > PAGE_SIZE - 1) { - DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n", - PAGE_SIZE); - return -E2BIG; - } - - tmpbuf = memdup_user_nul(ubuf, len); - if (IS_ERR(tmpbuf)) - return PTR_ERR(tmpbuf); - - ret = display_crc_ctl_parse(dev_priv, tmpbuf, len); - - kfree(tmpbuf); - if (ret < 0) - return ret; - - *offp += len; - return len; -} - -const struct file_operations i915_display_crc_ctl_fops = { - .owner = THIS_MODULE, - .open = display_crc_ctl_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = display_crc_ctl_write -}; - void intel_display_crc_init(struct drm_i915_private *dev_priv) { enum pipe pipe; @@ -887,30 +464,8 @@ void intel_display_crc_init(struct drm_i915_private *dev_priv) for_each_pipe(dev_priv, pipe) { struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; - pipe_crc->opened = false; spin_lock_init(&pipe_crc->lock); - init_waitqueue_head(&pipe_crc->wq); - } -} - -int intel_pipe_crc_create(struct drm_minor *minor) -{ - struct drm_i915_private *dev_priv = to_i915(minor->dev); - struct dentry *ent; - int i; - - for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { - struct pipe_crc_info *info = &i915_pipe_crc_data[i]; - - info->dev_priv = dev_priv; - ent = debugfs_create_file(info->name, S_IRUGO, - minor->debugfs_root, info, - &i915_pipe_crc_fops); - if (!ent) - return -ENOMEM; } - - return 0; } int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name, diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 53aaaa3e6886..43ae9de12ba3 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -6264,42 +6264,15 @@ static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val) return limits; } -static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) +static void rps_set_power(struct drm_i915_private *dev_priv, int new_power) { struct intel_rps *rps = &dev_priv->gt_pm.rps; - int new_power; u32 threshold_up = 0, threshold_down = 0; /* in % */ u32 ei_up = 0, ei_down = 0; - new_power = rps->power; - switch (rps->power) { - case LOW_POWER: - if (val > rps->efficient_freq + 1 && - val > rps->cur_freq) - new_power = BETWEEN; - break; - - case BETWEEN: - if (val <= rps->efficient_freq && - val < rps->cur_freq) - new_power = LOW_POWER; - else if (val >= rps->rp0_freq && - val > rps->cur_freq) - new_power = HIGH_POWER; - break; + lockdep_assert_held(&rps->power.mutex); - case HIGH_POWER: - if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 && - val < rps->cur_freq) - new_power = BETWEEN; - break; - } - /* Max/min bins are special */ - if (val <= rps->min_freq_softlimit) - new_power = LOW_POWER; - if (val >= rps->max_freq_softlimit) - new_power = HIGH_POWER; - if (new_power == rps->power) + if (new_power == rps->power.mode) return; /* Note the units here are not exactly 1us, but 1280ns. */ @@ -6362,12 +6335,71 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) GEN6_RP_DOWN_IDLE_AVG); skip_hw_write: - rps->power = new_power; - rps->up_threshold = threshold_up; - rps->down_threshold = threshold_down; + rps->power.mode = new_power; + rps->power.up_threshold = threshold_up; + rps->power.down_threshold = threshold_down; +} + +static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) +{ + struct intel_rps *rps = &dev_priv->gt_pm.rps; + int new_power; + + new_power = rps->power.mode; + switch (rps->power.mode) { + case LOW_POWER: + if (val > rps->efficient_freq + 1 && + val > rps->cur_freq) + new_power = BETWEEN; + break; + + case BETWEEN: + if (val <= rps->efficient_freq && + val < rps->cur_freq) + new_power = LOW_POWER; + else if (val >= rps->rp0_freq && + val > rps->cur_freq) + new_power = HIGH_POWER; + break; + + case HIGH_POWER: + if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 && + val < rps->cur_freq) + new_power = BETWEEN; + break; + } + /* Max/min bins are special */ + if (val <= rps->min_freq_softlimit) + new_power = LOW_POWER; + if (val >= rps->max_freq_softlimit) + new_power = HIGH_POWER; + + mutex_lock(&rps->power.mutex); + if (rps->power.interactive) + new_power = HIGH_POWER; + rps_set_power(dev_priv, new_power); + mutex_unlock(&rps->power.mutex); rps->last_adj = 0; } +void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive) +{ + struct intel_rps *rps = &i915->gt_pm.rps; + + if (INTEL_GEN(i915) < 6) + return; + + mutex_lock(&rps->power.mutex); + if (interactive) { + if (!rps->power.interactive++ && READ_ONCE(i915->gt.awake)) + rps_set_power(i915, HIGH_POWER); + } else { + GEM_BUG_ON(!rps->power.interactive); + rps->power.interactive--; + } + mutex_unlock(&rps->power.mutex); +} + static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) { struct intel_rps *rps = &dev_priv->gt_pm.rps; @@ -6780,7 +6812,7 @@ static void reset_rps(struct drm_i915_private *dev_priv, u8 freq = rps->cur_freq; /* force a reset */ - rps->power = -1; + rps->power.mode = -1; rps->cur_freq = -1; if (set(dev_priv, freq)) @@ -7347,11 +7379,11 @@ out: static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv) { - if (WARN_ON(!dev_priv->vlv_pctx)) - return; + struct drm_i915_gem_object *pctx; - i915_gem_object_put(dev_priv->vlv_pctx); - dev_priv->vlv_pctx = NULL; + pctx = fetch_and_zero(&dev_priv->vlv_pctx); + if (pctx) + i915_gem_object_put(pctx); } static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv) @@ -9604,6 +9636,7 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) void intel_pm_setup(struct drm_i915_private *dev_priv) { mutex_init(&dev_priv->pcu_lock); + mutex_init(&dev_priv->gt_pm.rps.power.mutex); atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0); diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index db27f2faa1de..4bd5768731ee 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -56,51 +56,10 @@ #include "intel_drv.h" #include "i915_drv.h" -static inline enum intel_display_power_domain -psr_aux_domain(struct intel_dp *intel_dp) -{ - /* CNL HW requires corresponding AUX IOs to be powered up for PSR. - * However, for non-A AUX ports the corresponding non-EDP transcoders - * would have already enabled power well 2 and DC_OFF. This means we can - * acquire a wider POWER_DOMAIN_AUX_{B,C,D,F} reference instead of a - * specific AUX_IO reference without powering up any extra wells. - * Note that PSR is enabled only on Port A even though this function - * returns the correct domain for other ports too. - */ - return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A : - intel_dp->aux_power_domain; -} - -static void psr_aux_io_power_get(struct intel_dp *intel_dp) -{ - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); - - if (INTEL_GEN(dev_priv) < 10) - return; - - intel_display_power_get(dev_priv, psr_aux_domain(intel_dp)); -} - -static void psr_aux_io_power_put(struct intel_dp *intel_dp) -{ - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); - - if (INTEL_GEN(dev_priv) < 10) - return; - - intel_display_power_put(dev_priv, psr_aux_domain(intel_dp)); -} - void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug) { u32 debug_mask, mask; - /* No PSR interrupts on VLV/CHV */ - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - return; - mask = EDP_PSR_ERROR(TRANSCODER_EDP); debug_mask = EDP_PSR_POST_EXIT(TRANSCODER_EDP) | EDP_PSR_PRE_ENTRY(TRANSCODER_EDP); @@ -201,15 +160,6 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) } } -static bool intel_dp_get_y_coord_required(struct intel_dp *intel_dp) -{ - uint8_t psr_caps = 0; - - if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps) != 1) - return false; - return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED; -} - static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) { uint8_t dprx = 0; @@ -232,13 +182,13 @@ static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp) static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp) { - u8 val = 0; + u8 val = 8; /* assume the worst if we can't read the value */ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1) val &= DP_MAX_RESYNC_FRAME_COUNT_MASK; else - DRM_ERROR("Unable to get sink synchronization latency\n"); + DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n"); return val; } @@ -250,13 +200,25 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd, sizeof(intel_dp->psr_dpcd)); - if (intel_dp->psr_dpcd[0]) { - dev_priv->psr.sink_support = true; - DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); + if (!intel_dp->psr_dpcd[0]) + return; + DRM_DEBUG_KMS("eDP panel supports PSR version %x\n", + intel_dp->psr_dpcd[0]); + + if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { + DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n"); + return; } + dev_priv->psr.sink_support = true; + dev_priv->psr.sink_sync_latency = + intel_dp_get_sink_sync_latency(intel_dp); if (INTEL_GEN(dev_priv) >= 9 && (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) { + bool y_req = intel_dp->psr_dpcd[1] & + DP_PSR2_SU_Y_COORDINATE_REQUIRED; + bool alpm = intel_dp_get_alpm_status(intel_dp); + /* * All panels that supports PSR version 03h (PSR2 + * Y-coordinate) can handle Y-coordinates in VSC but we are @@ -268,49 +230,19 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) * Y-coordinate requirement panels we would need to enable * GTC first. */ - dev_priv->psr.sink_psr2_support = - intel_dp_get_y_coord_required(intel_dp); - DRM_DEBUG_KMS("PSR2 %s on sink", dev_priv->psr.sink_psr2_support - ? "supported" : "not supported"); + dev_priv->psr.sink_psr2_support = y_req && alpm; + DRM_DEBUG_KMS("PSR2 %ssupported\n", + dev_priv->psr.sink_psr2_support ? "" : "not "); if (dev_priv->psr.sink_psr2_support) { dev_priv->psr.colorimetry_support = intel_dp_get_colorimetry_status(intel_dp); - dev_priv->psr.alpm = - intel_dp_get_alpm_status(intel_dp); - dev_priv->psr.sink_sync_latency = - intel_dp_get_sink_sync_latency(intel_dp); } } } -static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - uint32_t val; - - val = I915_READ(VLV_PSRSTAT(pipe)) & - VLV_EDP_PSR_CURR_STATE_MASK; - return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) || - (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE); -} - -static void vlv_psr_setup_vsc(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - uint32_t val; - - /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */ - val = I915_READ(VLV_VSCSDP(crtc->pipe)); - val &= ~VLV_EDP_PSR_SDP_FREQ_MASK; - val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME; - I915_WRITE(VLV_VSCSDP(crtc->pipe), val); -} - -static void hsw_psr_setup_vsc(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) +static void intel_psr_setup_vsc(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); @@ -341,12 +273,6 @@ static void hsw_psr_setup_vsc(struct intel_dp *intel_dp, DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc)); } -static void vlv_psr_enable_sink(struct intel_dp *intel_dp) -{ - drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, - DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); -} - static void hsw_psr_setup_aux(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); @@ -373,7 +299,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp) aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); /* Start with bits set for DDI_AUX_CTL register */ - aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg), + aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg), aux_clock_divider); /* Select only valid bits for SRD_AUX_CTL */ @@ -381,7 +307,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp) I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl); } -static void hsw_psr_enable_sink(struct intel_dp *intel_dp) +static void intel_psr_enable_sink(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; @@ -389,95 +315,64 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp) u8 dpcd_val = DP_PSR_ENABLE; /* Enable ALPM at sink for psr2 */ - if (dev_priv->psr.psr2_enabled && dev_priv->psr.alpm) - drm_dp_dpcd_writeb(&intel_dp->aux, - DP_RECEIVER_ALPM_CONFIG, - DP_ALPM_ENABLE); - - if (dev_priv->psr.psr2_enabled) + if (dev_priv->psr.psr2_enabled) { + drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, + DP_ALPM_ENABLE); dpcd_val |= DP_PSR_ENABLE_PSR2; + } + if (dev_priv->psr.link_standby) dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE; + if (!dev_priv->psr.psr2_enabled && INTEL_GEN(dev_priv) >= 8) + dpcd_val |= DP_PSR_CRC_VERIFICATION; drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val); drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); } -static void vlv_psr_enable_source(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - - /* Transition from PSR_state 0 (disabled) to PSR_state 1 (inactive) */ - I915_WRITE(VLV_PSRCTL(crtc->pipe), - VLV_EDP_PSR_MODE_SW_TIMER | - VLV_EDP_PSR_SRC_TRANSMITTER_STATE | - VLV_EDP_PSR_ENABLE); -} - -static void vlv_psr_activate(struct intel_dp *intel_dp) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_crtc *crtc = dig_port->base.base.crtc; - enum pipe pipe = to_intel_crtc(crtc)->pipe; - - /* - * Let's do the transition from PSR_state 1 (inactive) to - * PSR_state 2 (transition to active - static frame transmission). - * Then Hardware is responsible for the transition to - * PSR_state 3 (active - no Remote Frame Buffer (RFB) update). - */ - I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) | - VLV_EDP_PSR_ACTIVE_ENTRY); -} - static void hsw_activate_psr1(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); + u32 max_sleep_time = 0x1f; + u32 val = EDP_PSR_ENABLE; - uint32_t max_sleep_time = 0x1f; - /* - * Let's respect VBT in case VBT asks a higher idle_frame value. - * Let's use 6 as the minimum to cover all known cases including - * the off-by-one issue that HW has in some cases. Also there are - * cases where sink should be able to train - * with the 5 or 6 idle patterns. + /* Let's use 6 as the minimum to cover all known cases including the + * off-by-one issue that HW has in some cases. */ - uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames); - uint32_t val = EDP_PSR_ENABLE; + int idle_frames = max(6, dev_priv->vbt.psr.idle_frames); - val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; + /* sink_sync_latency of 8 means source has to wait for more than 8 + * frames, we'll go with 9 frames for now + */ + idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT; + val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; if (IS_HASWELL(dev_priv)) val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; if (dev_priv->psr.link_standby) val |= EDP_PSR_LINK_STANDBY; - if (dev_priv->vbt.psr.tp1_wakeup_time > 5) - val |= EDP_PSR_TP1_TIME_2500us; - else if (dev_priv->vbt.psr.tp1_wakeup_time > 1) - val |= EDP_PSR_TP1_TIME_500us; - else if (dev_priv->vbt.psr.tp1_wakeup_time > 0) + if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0) + val |= EDP_PSR_TP1_TIME_0us; + else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100) val |= EDP_PSR_TP1_TIME_100us; + else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500) + val |= EDP_PSR_TP1_TIME_500us; else - val |= EDP_PSR_TP1_TIME_0us; + val |= EDP_PSR_TP1_TIME_2500us; - if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5) - val |= EDP_PSR_TP2_TP3_TIME_2500us; - else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1) - val |= EDP_PSR_TP2_TP3_TIME_500us; - else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0) + if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0) + val |= EDP_PSR_TP2_TP3_TIME_0us; + else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100) val |= EDP_PSR_TP2_TP3_TIME_100us; + else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500) + val |= EDP_PSR_TP2_TP3_TIME_500us; else - val |= EDP_PSR_TP2_TP3_TIME_0us; + val |= EDP_PSR_TP2_TP3_TIME_2500us; if (intel_dp_source_supports_hbr2(intel_dp) && drm_dp_tps3_supported(intel_dp->dpcd)) @@ -485,6 +380,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp) else val |= EDP_PSR_TP1_TP2_SEL; + if (INTEL_GEN(dev_priv) >= 8) + val |= EDP_PSR_CRC_ENABLE; + val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK; I915_WRITE(EDP_PSR_CTL, val); } @@ -494,15 +392,15 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - /* - * Let's respect VBT in case VBT asks a higher idle_frame value. - * Let's use 6 as the minimum to cover all known cases including - * the off-by-one issue that HW has in some cases. Also there are - * cases where sink should be able to train - * with the 5 or 6 idle patterns. + u32 val; + + /* Let's use 6 as the minimum to cover all known cases including the + * off-by-one issue that HW has in some cases. */ - uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames); - u32 val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT; + int idle_frames = max(6, dev_priv->vbt.psr.idle_frames); + + idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); + val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT; /* FIXME: selective update is probably totally broken because it doesn't * mesh at all with our frontbuffer tracking. And the hw alone isn't @@ -513,36 +411,19 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1); - if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5) - val |= EDP_PSR2_TP2_TIME_2500; - else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1) - val |= EDP_PSR2_TP2_TIME_500; - else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0) - val |= EDP_PSR2_TP2_TIME_100; + if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us >= 0 && + dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 50) + val |= EDP_PSR2_TP2_TIME_50us; + else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100) + val |= EDP_PSR2_TP2_TIME_100us; + else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500) + val |= EDP_PSR2_TP2_TIME_500us; else - val |= EDP_PSR2_TP2_TIME_50; + val |= EDP_PSR2_TP2_TIME_2500us; I915_WRITE(EDP_PSR2_CTL, val); } -static void hsw_psr_activate(struct intel_dp *intel_dp) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - - /* On HSW+ after we enable PSR on source it will activate it - * as soon as it match configure idle_frame count. So - * we just actually enable it here on activation time. - */ - - /* psr1 and psr2 are mutually exclusive.*/ - if (dev_priv->psr.psr2_enabled) - hsw_activate_psr2(intel_dp); - else - hsw_activate_psr1(intel_dp); -} - static bool intel_psr2_config_valid(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { @@ -602,17 +483,11 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, * ones. Since by Display design transcoder EDP is tied to port A * we can safely escape based on the port A. */ - if (HAS_DDI(dev_priv) && dig_port->base.port != PORT_A) { + if (dig_port->base.port != PORT_A) { DRM_DEBUG_KMS("PSR condition failed: Port not supported\n"); return; } - if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && - !dev_priv->psr.link_standby) { - DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n"); - return; - } - if (IS_HASWELL(dev_priv) && I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) & S3D_ENABLE) { @@ -640,11 +515,6 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, return; } - if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { - DRM_DEBUG_KMS("PSR condition failed: panel lacks power state control\n"); - return; - } - crtc_state->has_psr = true; crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state); DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : ""); @@ -656,27 +526,29 @@ static void intel_psr_activate(struct intel_dp *intel_dp) struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - if (dev_priv->psr.psr2_enabled) + if (INTEL_GEN(dev_priv) >= 9) WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); - else - WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); + WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); WARN_ON(dev_priv->psr.active); lockdep_assert_held(&dev_priv->psr.lock); - dev_priv->psr.activate(intel_dp); + /* psr1 and psr2 are mutually exclusive.*/ + if (dev_priv->psr.psr2_enabled) + hsw_activate_psr2(intel_dp); + else + hsw_activate_psr1(intel_dp); + dev_priv->psr.active = true; } -static void hsw_psr_enable_source(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) +static void intel_psr_enable_source(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - psr_aux_io_power_get(intel_dp); - /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+ * use hardcoded values PSR AUX transactions */ @@ -712,7 +584,8 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp, EDP_PSR_DEBUG_MASK_MEMUP | EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP | - EDP_PSR_DEBUG_MASK_DISP_REG_WRITE); + EDP_PSR_DEBUG_MASK_DISP_REG_WRITE | + EDP_PSR_DEBUG_MASK_MAX_SLEEP); } } @@ -746,64 +619,19 @@ void intel_psr_enable(struct intel_dp *intel_dp, dev_priv->psr.psr2_enabled = crtc_state->has_psr2; dev_priv->psr.busy_frontbuffer_bits = 0; - dev_priv->psr.setup_vsc(intel_dp, crtc_state); - dev_priv->psr.enable_sink(intel_dp); - dev_priv->psr.enable_source(intel_dp, crtc_state); + intel_psr_setup_vsc(intel_dp, crtc_state); + intel_psr_enable_sink(intel_dp); + intel_psr_enable_source(intel_dp, crtc_state); dev_priv->psr.enabled = intel_dp; - if (INTEL_GEN(dev_priv) >= 9) { - intel_psr_activate(intel_dp); - } else { - /* - * FIXME: Activation should happen immediately since this - * function is just called after pipe is fully trained and - * enabled. - * However on some platforms we face issues when first - * activation follows a modeset so quickly. - * - On VLV/CHV we get bank screen on first activation - * - On HSW/BDW we get a recoverable frozen screen until - * next exit-activate sequence. - */ - schedule_delayed_work(&dev_priv->psr.work, - msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); - } + intel_psr_activate(intel_dp); unlock: mutex_unlock(&dev_priv->psr.lock); } -static void vlv_psr_disable(struct intel_dp *intel_dp, - const struct intel_crtc_state *old_crtc_state) -{ - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); - uint32_t val; - - if (dev_priv->psr.active) { - /* Put VLV PSR back to PSR_state 0 (disabled). */ - if (intel_wait_for_register(dev_priv, - VLV_PSRSTAT(crtc->pipe), - VLV_EDP_PSR_IN_TRANS, - 0, - 1)) - WARN(1, "PSR transition took longer than expected\n"); - - val = I915_READ(VLV_PSRCTL(crtc->pipe)); - val &= ~VLV_EDP_PSR_ACTIVE_ENTRY; - val &= ~VLV_EDP_PSR_ENABLE; - val &= ~VLV_EDP_PSR_MODE_MASK; - I915_WRITE(VLV_PSRCTL(crtc->pipe), val); - - dev_priv->psr.active = false; - } else { - WARN_ON(vlv_is_psr_active_on_pipe(dev, crtc->pipe)); - } -} - -static void hsw_psr_disable(struct intel_dp *intel_dp, - const struct intel_crtc_state *old_crtc_state) +static void +intel_psr_disable_source(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; @@ -842,8 +670,25 @@ static void hsw_psr_disable(struct intel_dp *intel_dp, else WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); } +} + +static void intel_psr_disable_locked(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + + lockdep_assert_held(&dev_priv->psr.lock); + + if (!dev_priv->psr.enabled) + return; + + intel_psr_disable_source(intel_dp); - psr_aux_io_power_put(intel_dp); + /* Disable PSR on Sink */ + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); + + dev_priv->psr.enabled = NULL; } /** @@ -867,23 +712,49 @@ void intel_psr_disable(struct intel_dp *intel_dp, return; mutex_lock(&dev_priv->psr.lock); - if (!dev_priv->psr.enabled) { - mutex_unlock(&dev_priv->psr.lock); - return; - } + intel_psr_disable_locked(intel_dp); + mutex_unlock(&dev_priv->psr.lock); + cancel_work_sync(&dev_priv->psr.work); +} - dev_priv->psr.disable_source(intel_dp, old_crtc_state); +int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + i915_reg_t reg; + u32 mask; - /* Disable PSR on Sink */ - drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); + if (!new_crtc_state->has_psr) + return 0; - dev_priv->psr.enabled = NULL; - mutex_unlock(&dev_priv->psr.lock); + /* + * The sole user right now is intel_pipe_update_start(), + * which won't race with psr_enable/disable, which is + * where psr2_enabled is written to. So, we don't need + * to acquire the psr.lock. More importantly, we want the + * latency inside intel_pipe_update_start() to be as low + * as possible, so no need to acquire psr.lock when it is + * not needed and will induce latencies in the atomic + * update path. + */ + if (dev_priv->psr.psr2_enabled) { + reg = EDP_PSR2_STATUS; + mask = EDP_PSR2_STATUS_STATE_MASK; + } else { + reg = EDP_PSR_STATUS; + mask = EDP_PSR_STATUS_STATE_MASK; + } - cancel_delayed_work_sync(&dev_priv->psr.work); + /* + * Max time for PSR to idle = Inverse of the refresh rate + + * 6 ms of exit training time + 1.5 ms of aux channel + * handshake. 50 msec is defesive enough to cover everything. + */ + return intel_wait_for_register(dev_priv, reg, mask, + EDP_PSR_STATUS_STATE_IDLE, 50); } -static bool psr_wait_for_idle(struct drm_i915_private *dev_priv) +static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv) { struct intel_dp *intel_dp; i915_reg_t reg; @@ -894,21 +765,12 @@ static bool psr_wait_for_idle(struct drm_i915_private *dev_priv) if (!intel_dp) return false; - if (HAS_DDI(dev_priv)) { - if (dev_priv->psr.psr2_enabled) { - reg = EDP_PSR2_STATUS; - mask = EDP_PSR2_STATUS_STATE_MASK; - } else { - reg = EDP_PSR_STATUS; - mask = EDP_PSR_STATUS_STATE_MASK; - } + if (dev_priv->psr.psr2_enabled) { + reg = EDP_PSR2_STATUS; + mask = EDP_PSR2_STATUS_STATE_MASK; } else { - struct drm_crtc *crtc = - dp_to_dig_port(intel_dp)->base.base.crtc; - enum pipe pipe = to_intel_crtc(crtc)->pipe; - - reg = VLV_PSRSTAT(pipe); - mask = VLV_EDP_PSR_IN_TRANS; + reg = EDP_PSR_STATUS; + mask = EDP_PSR_STATUS_STATE_MASK; } mutex_unlock(&dev_priv->psr.lock); @@ -925,17 +787,20 @@ static bool psr_wait_for_idle(struct drm_i915_private *dev_priv) static void intel_psr_work(struct work_struct *work) { struct drm_i915_private *dev_priv = - container_of(work, typeof(*dev_priv), psr.work.work); + container_of(work, typeof(*dev_priv), psr.work); mutex_lock(&dev_priv->psr.lock); + if (!dev_priv->psr.enabled) + goto unlock; + /* * We have to make sure PSR is ready for re-enable * otherwise it keeps disabled until next full enable/disable cycle. * PSR might take some time to get fully disabled * and be ready for re-enable. */ - if (!psr_wait_for_idle(dev_priv)) + if (!__psr_wait_for_idle_locked(dev_priv)) goto unlock; /* @@ -943,7 +808,7 @@ static void intel_psr_work(struct work_struct *work) * recheck. Since psr_flush first clears this and then reschedules we * won't ever miss a flush when bailing out here. */ - if (dev_priv->psr.busy_frontbuffer_bits) + if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active) goto unlock; intel_psr_activate(dev_priv->psr.enabled); @@ -953,103 +818,24 @@ unlock: static void intel_psr_exit(struct drm_i915_private *dev_priv) { - struct intel_dp *intel_dp = dev_priv->psr.enabled; - struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; - enum pipe pipe = to_intel_crtc(crtc)->pipe; u32 val; if (!dev_priv->psr.active) return; - if (HAS_DDI(dev_priv)) { - if (dev_priv->psr.psr2_enabled) { - val = I915_READ(EDP_PSR2_CTL); - WARN_ON(!(val & EDP_PSR2_ENABLE)); - I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE); - } else { - val = I915_READ(EDP_PSR_CTL); - WARN_ON(!(val & EDP_PSR_ENABLE)); - I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE); - } + if (dev_priv->psr.psr2_enabled) { + val = I915_READ(EDP_PSR2_CTL); + WARN_ON(!(val & EDP_PSR2_ENABLE)); + I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE); } else { - val = I915_READ(VLV_PSRCTL(pipe)); - - /* - * Here we do the transition drirectly from - * PSR_state 3 (active - no Remote Frame Buffer (RFB) update) to - * PSR_state 5 (exit). - * PSR State 4 (active with single frame update) can be skipped. - * On PSR_state 5 (exit) Hardware is responsible to transition - * back to PSR_state 1 (inactive). - * Now we are at Same state after vlv_psr_enable_source. - */ - val &= ~VLV_EDP_PSR_ACTIVE_ENTRY; - I915_WRITE(VLV_PSRCTL(pipe), val); - - /* - * Send AUX wake up - Spec says after transitioning to PSR - * active we have to send AUX wake up by writing 01h in DPCD - * 600h of sink device. - * XXX: This might slow down the transition, but without this - * HW doesn't complete the transition to PSR_state 1 and we - * never get the screen updated. - */ - drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, - DP_SET_POWER_D0); + val = I915_READ(EDP_PSR_CTL); + WARN_ON(!(val & EDP_PSR_ENABLE)); + I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE); } - dev_priv->psr.active = false; } /** - * intel_psr_single_frame_update - Single Frame Update - * @dev_priv: i915 device - * @frontbuffer_bits: frontbuffer plane tracking bits - * - * Some platforms support a single frame update feature that is used to - * send and update only one frame on Remote Frame Buffer. - * So far it is only implemented for Valleyview and Cherryview because - * hardware requires this to be done before a page flip. - */ -void intel_psr_single_frame_update(struct drm_i915_private *dev_priv, - unsigned frontbuffer_bits) -{ - struct drm_crtc *crtc; - enum pipe pipe; - u32 val; - - if (!CAN_PSR(dev_priv)) - return; - - /* - * Single frame update is already supported on BDW+ but it requires - * many W/A and it isn't really needed. - */ - if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) - return; - - mutex_lock(&dev_priv->psr.lock); - if (!dev_priv->psr.enabled) { - mutex_unlock(&dev_priv->psr.lock); - return; - } - - crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; - pipe = to_intel_crtc(crtc)->pipe; - - if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) { - val = I915_READ(VLV_PSRCTL(pipe)); - - /* - * We need to set this bit before writing registers for a flip. - * This bit will be self-clear when it gets to the PSR active state. - */ - I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE); - } - mutex_unlock(&dev_priv->psr.lock); -} - -/** * intel_psr_invalidate - Invalidade PSR * @dev_priv: i915 device * @frontbuffer_bits: frontbuffer plane tracking bits @@ -1071,7 +857,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv, if (!CAN_PSR(dev_priv)) return; - if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP) + if (origin == ORIGIN_FLIP) return; mutex_lock(&dev_priv->psr.lock); @@ -1114,7 +900,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, if (!CAN_PSR(dev_priv)) return; - if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP) + if (origin == ORIGIN_FLIP) return; mutex_lock(&dev_priv->psr.lock); @@ -1131,8 +917,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, /* By definition flush = invalidate + flush */ if (frontbuffer_bits) { - if (dev_priv->psr.psr2_enabled || - IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + if (dev_priv->psr.psr2_enabled) { intel_psr_exit(dev_priv); } else { /* @@ -1149,9 +934,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, } if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) - if (!work_busy(&dev_priv->psr.work.work)) - schedule_delayed_work(&dev_priv->psr.work, - msecs_to_jiffies(100)); + schedule_work(&dev_priv->psr.work); mutex_unlock(&dev_priv->psr.lock); } @@ -1184,38 +967,64 @@ void intel_psr_init(struct drm_i915_private *dev_priv) if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) /* HSW and BDW require workarounds that we don't implement. */ dev_priv->psr.link_standby = false; - else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - /* On VLV and CHV only standby mode is supported. */ - dev_priv->psr.link_standby = true; else /* For new platforms let's respect VBT back again */ dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link; - /* Override link_standby x link_off defaults */ - if (i915_modparams.enable_psr == 2 && !dev_priv->psr.link_standby) { - DRM_DEBUG_KMS("PSR: Forcing link standby\n"); - dev_priv->psr.link_standby = true; - } - if (i915_modparams.enable_psr == 3 && dev_priv->psr.link_standby) { - DRM_DEBUG_KMS("PSR: Forcing main link off\n"); - dev_priv->psr.link_standby = false; + INIT_WORK(&dev_priv->psr.work, intel_psr_work); + mutex_init(&dev_priv->psr.lock); +} + +void intel_psr_short_pulse(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_psr *psr = &dev_priv->psr; + u8 val; + const u8 errors = DP_PSR_RFB_STORAGE_ERROR | + DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR | + DP_PSR_LINK_CRC_ERROR; + + if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp)) + return; + + mutex_lock(&psr->lock); + + if (psr->enabled != intel_dp) + goto exit; + + if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) { + DRM_ERROR("PSR_STATUS dpcd read failed\n"); + goto exit; } - INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work); - mutex_init(&dev_priv->psr.lock); + if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) { + DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n"); + intel_psr_disable_locked(intel_dp); + } - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - dev_priv->psr.enable_source = vlv_psr_enable_source; - dev_priv->psr.disable_source = vlv_psr_disable; - dev_priv->psr.enable_sink = vlv_psr_enable_sink; - dev_priv->psr.activate = vlv_psr_activate; - dev_priv->psr.setup_vsc = vlv_psr_setup_vsc; - } else { - dev_priv->psr.has_hw_tracking = true; - dev_priv->psr.enable_source = hsw_psr_enable_source; - dev_priv->psr.disable_source = hsw_psr_disable; - dev_priv->psr.enable_sink = hsw_psr_enable_sink; - dev_priv->psr.activate = hsw_psr_activate; - dev_priv->psr.setup_vsc = hsw_psr_setup_vsc; + if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) { + DRM_ERROR("PSR_ERROR_STATUS dpcd read failed\n"); + goto exit; } + + if (val & DP_PSR_RFB_STORAGE_ERROR) + DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n"); + if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR) + DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n"); + if (val & DP_PSR_LINK_CRC_ERROR) + DRM_ERROR("PSR Link CRC error, disabling PSR\n"); + + if (val & ~errors) + DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n", + val & ~errors); + if (val & errors) + intel_psr_disable_locked(intel_dp); + /* clear status register */ + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val); + + /* TODO: handle PSR2 errors */ +exit: + mutex_unlock(&psr->lock); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 8f19349a6055..33faad3197fe 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -496,6 +496,10 @@ static int init_ring_common(struct intel_engine_cs *engine) DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n", engine->name, I915_READ_HEAD(engine)); + /* Check that the ring offsets point within the ring! */ + GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); + GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); + intel_ring_update_space(ring); I915_WRITE_HEAD(engine, ring->head); I915_WRITE_TAIL(engine, ring->tail); @@ -520,8 +524,6 @@ static int init_ring_common(struct intel_engine_cs *engine) goto out; } - intel_engine_init_hangcheck(engine); - if (INTEL_GEN(dev_priv) > 2) I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); @@ -531,16 +533,33 @@ out: return ret; } -static void reset_ring_common(struct intel_engine_cs *engine, - struct i915_request *request) +static struct i915_request *reset_prepare(struct intel_engine_cs *engine) { - /* - * RC6 must be prevented until the reset is complete and the engine - * reinitialised. If it occurs in the middle of this sequence, the - * state written to/loaded from the power context is ill-defined (e.g. - * the PP_BASE_DIR may be lost). - */ - assert_forcewakes_active(engine->i915, FORCEWAKE_ALL); + intel_engine_stop_cs(engine); + + if (engine->irq_seqno_barrier) + engine->irq_seqno_barrier(engine); + + return i915_gem_find_active_request(engine); +} + +static void skip_request(struct i915_request *rq) +{ + void *vaddr = rq->ring->vaddr; + u32 head; + + head = rq->infix; + if (rq->postfix < head) { + memset32(vaddr + head, MI_NOOP, + (rq->ring->size - head) / sizeof(u32)); + head = 0; + } + memset32(vaddr + head, MI_NOOP, (rq->postfix - head) / sizeof(u32)); +} + +static void reset_ring(struct intel_engine_cs *engine, struct i915_request *rq) +{ + GEM_TRACE("%s seqno=%x\n", engine->name, rq ? rq->global_seqno : 0); /* * Try to restore the logical GPU state to match the continuation @@ -556,47 +575,18 @@ static void reset_ring_common(struct intel_engine_cs *engine, * If the request was innocent, we try to replay the request with * the restored context. */ - if (request) { - struct drm_i915_private *dev_priv = request->i915; - struct intel_context *ce = to_intel_context(request->ctx, - engine); - struct i915_hw_ppgtt *ppgtt; - - if (ce->state) { - I915_WRITE(CCID, - i915_ggtt_offset(ce->state) | - BIT(8) /* must be set! */ | - CCID_EXTENDED_STATE_SAVE | - CCID_EXTENDED_STATE_RESTORE | - CCID_EN); - } - - ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt; - if (ppgtt) { - u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10; - - I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G); - I915_WRITE(RING_PP_DIR_BASE(engine), pd_offset); - - /* Wait for the PD reload to complete */ - if (intel_wait_for_register(dev_priv, - RING_PP_DIR_BASE(engine), - BIT(0), 0, - 10)) - DRM_ERROR("Wait for reload of ppgtt page-directory timed out\n"); - - ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); - } - + if (rq) { /* If the rq hung, jump to its breadcrumb and skip the batch */ - if (request->fence.error == -EIO) - request->ring->head = request->postfix; - } else { - engine->legacy_active_context = NULL; - engine->legacy_active_ppgtt = NULL; + rq->ring->head = intel_ring_wrap(rq->ring, rq->head); + if (rq->fence.error == -EIO) + skip_request(rq); } } +static void reset_finish(struct intel_engine_cs *engine) +{ +} + static int intel_rcs_ctx_init(struct i915_request *rq) { int ret; @@ -1033,6 +1023,8 @@ int intel_ring_pin(struct intel_ring *ring, flags |= PIN_OFFSET_BIAS | offset_bias; if (vma->obj->stolen) flags |= PIN_MAPPABLE; + else + flags |= PIN_HIGH; if (!(vma->flags & I915_VMA_GLOBAL_BIND)) { if (flags & PIN_MAPPABLE || map == I915_MAP_WC) @@ -1066,6 +1058,8 @@ err: void intel_ring_reset(struct intel_ring *ring, u32 tail) { + GEM_BUG_ON(!intel_ring_offset_valid(ring, tail)); + ring->tail = tail; ring->head = tail; ring->emit = tail; @@ -1093,6 +1087,7 @@ void intel_ring_unpin(struct intel_ring *ring) static struct i915_vma * intel_ring_create_vma(struct drm_i915_private *dev_priv, int size) { + struct i915_address_space *vm = &dev_priv->ggtt.vm; struct drm_i915_gem_object *obj; struct i915_vma *vma; @@ -1102,10 +1097,14 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size) if (IS_ERR(obj)) return ERR_CAST(obj); - /* mark ring buffers as read-only from GPU side by default */ - obj->gt_ro = 1; + /* + * Mark ring buffers as read-only from GPU side (so no stray overwrites) + * if supported by the platform's GGTT. + */ + if (vm->has_read_only) + i915_gem_object_set_readonly(obj); - vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL); + vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) goto err; @@ -1169,10 +1168,46 @@ intel_ring_free(struct intel_ring *ring) kfree(ring); } -static int context_pin(struct intel_context *ce) +static void intel_ring_context_destroy(struct intel_context *ce) { - struct i915_vma *vma = ce->state; - int ret; + GEM_BUG_ON(ce->pin_count); + + if (!ce->state) + return; + + GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj)); + i915_gem_object_put(ce->state->obj); +} + +static int __context_pin_ppgtt(struct i915_gem_context *ctx) +{ + struct i915_hw_ppgtt *ppgtt; + int err = 0; + + ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt; + if (ppgtt) + err = gen6_ppgtt_pin(ppgtt); + + return err; +} + +static void __context_unpin_ppgtt(struct i915_gem_context *ctx) +{ + struct i915_hw_ppgtt *ppgtt; + + ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt; + if (ppgtt) + gen6_ppgtt_unpin(ppgtt); +} + +static int __context_pin(struct intel_context *ce) +{ + struct i915_vma *vma; + int err; + + vma = ce->state; + if (!vma) + return 0; /* * Clear this page out of any CPU caches for coherent swap-in/out. @@ -1180,13 +1215,43 @@ static int context_pin(struct intel_context *ce) * on an active context (which by nature is already on the GPU). */ if (!(vma->flags & I915_VMA_GLOBAL_BIND)) { - ret = i915_gem_object_set_to_gtt_domain(vma->obj, true); - if (ret) - return ret; + err = i915_gem_object_set_to_gtt_domain(vma->obj, true); + if (err) + return err; } - return i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT, - PIN_GLOBAL | PIN_HIGH); + err = i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT, + PIN_GLOBAL | PIN_HIGH); + if (err) + return err; + + /* + * And mark is as a globally pinned object to let the shrinker know + * it cannot reclaim the object until we release it. + */ + vma->obj->pin_global++; + + return 0; +} + +static void __context_unpin(struct intel_context *ce) +{ + struct i915_vma *vma; + + vma = ce->state; + if (!vma) + return; + + vma->obj->pin_global--; + i915_vma_unpin(vma); +} + +static void intel_ring_context_unpin(struct intel_context *ce) +{ + __context_unpin_ppgtt(ce->gem_context); + __context_unpin(ce); + + i915_gem_context_put(ce->gem_context); } static struct i915_vma * @@ -1243,7 +1308,7 @@ alloc_context_vma(struct intel_engine_cs *engine) i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC); } - vma = i915_vma_instance(obj, &i915->ggtt.base, NULL); + vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err_obj; @@ -1258,81 +1323,79 @@ err_obj: return ERR_PTR(err); } -static struct intel_ring * -intel_ring_context_pin(struct intel_engine_cs *engine, - struct i915_gem_context *ctx) +static struct intel_context * +__ring_context_pin(struct intel_engine_cs *engine, + struct i915_gem_context *ctx, + struct intel_context *ce) { - struct intel_context *ce = to_intel_context(ctx, engine); - int ret; - - lockdep_assert_held(&ctx->i915->drm.struct_mutex); - - if (likely(ce->pin_count++)) - goto out; - GEM_BUG_ON(!ce->pin_count); /* no overflow please! */ + int err; if (!ce->state && engine->context_size) { struct i915_vma *vma; vma = alloc_context_vma(engine); if (IS_ERR(vma)) { - ret = PTR_ERR(vma); + err = PTR_ERR(vma); goto err; } ce->state = vma; } - if (ce->state) { - ret = context_pin(ce); - if (ret) - goto err; + err = __context_pin(ce); + if (err) + goto err; - ce->state->obj->pin_global++; - } + err = __context_pin_ppgtt(ce->gem_context); + if (err) + goto err_unpin; i915_gem_context_get(ctx); -out: /* One ringbuffer to rule them all */ - return engine->buffer; + GEM_BUG_ON(!engine->buffer); + ce->ring = engine->buffer; + + return ce; +err_unpin: + __context_unpin(ce); err: ce->pin_count = 0; - return ERR_PTR(ret); + return ERR_PTR(err); } -static void intel_ring_context_unpin(struct intel_engine_cs *engine, - struct i915_gem_context *ctx) +static const struct intel_context_ops ring_context_ops = { + .unpin = intel_ring_context_unpin, + .destroy = intel_ring_context_destroy, +}; + +static struct intel_context * +intel_ring_context_pin(struct intel_engine_cs *engine, + struct i915_gem_context *ctx) { struct intel_context *ce = to_intel_context(ctx, engine); lockdep_assert_held(&ctx->i915->drm.struct_mutex); - GEM_BUG_ON(ce->pin_count == 0); - if (--ce->pin_count) - return; + if (likely(ce->pin_count++)) + return ce; + GEM_BUG_ON(!ce->pin_count); /* no overflow please! */ - if (ce->state) { - ce->state->obj->pin_global--; - i915_vma_unpin(ce->state); - } + ce->ops = &ring_context_ops; - i915_gem_context_put(ctx); + return __ring_context_pin(engine, ctx, ce); } static int intel_init_ring_buffer(struct intel_engine_cs *engine) { - struct intel_ring *ring; struct i915_timeline *timeline; + struct intel_ring *ring; + unsigned int size; int err; intel_engine_setup_common(engine); - err = intel_engine_init_common(engine); - if (err) - goto err; - timeline = i915_timeline_create(engine->i915, engine->name); if (IS_ERR(timeline)) { err = PTR_ERR(timeline); @@ -1354,8 +1417,23 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) GEM_BUG_ON(engine->buffer); engine->buffer = ring; + size = PAGE_SIZE; + if (HAS_BROKEN_CS_TLB(engine->i915)) + size = I830_WA_SIZE; + err = intel_engine_create_scratch(engine, size); + if (err) + goto err_unpin; + + err = intel_engine_init_common(engine); + if (err) + goto err_scratch; + return 0; +err_scratch: + intel_engine_cleanup_scratch(engine); +err_unpin: + intel_ring_unpin(ring); err_ring: intel_ring_free(ring); err: @@ -1392,6 +1470,48 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv) intel_ring_reset(engine->buffer, 0); } +static int load_pd_dir(struct i915_request *rq, + const struct i915_hw_ppgtt *ppgtt) +{ + const struct intel_engine_cs * const engine = rq->engine; + u32 *cs; + + cs = intel_ring_begin(rq, 6); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_LOAD_REGISTER_IMM(1); + *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine)); + *cs++ = PP_DIR_DCLV_2G; + + *cs++ = MI_LOAD_REGISTER_IMM(1); + *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine)); + *cs++ = ppgtt->pd.base.ggtt_offset << 10; + + intel_ring_advance(rq, cs); + + return 0; +} + +static int flush_pd_dir(struct i915_request *rq) +{ + const struct intel_engine_cs * const engine = rq->engine; + u32 *cs; + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + /* Stall until the page table load is complete */ + *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; + *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine)); + *cs++ = i915_ggtt_offset(engine->scratch); + *cs++ = MI_NOOP; + + intel_ring_advance(rq, cs); + return 0; +} + static inline int mi_set_context(struct i915_request *rq, u32 flags) { struct drm_i915_private *i915 = rq->i915; @@ -1402,6 +1522,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) (HAS_LEGACY_SEMAPHORES(i915) && IS_GEN7(i915)) ? INTEL_INFO(i915)->num_rings - 1 : 0; + bool force_restore = false; int len; u32 *cs; @@ -1415,6 +1536,12 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) len = 4; if (IS_GEN7(i915)) len += 2 + (num_rings ? 4*num_rings + 6 : 0); + if (flags & MI_FORCE_RESTORE) { + GEM_BUG_ON(flags & MI_RESTORE_INHIBIT); + flags &= ~MI_FORCE_RESTORE; + force_restore = true; + len += 2; + } cs = intel_ring_begin(rq, len); if (IS_ERR(cs)) @@ -1439,9 +1566,29 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) } } + if (force_restore) { + /* + * The HW doesn't handle being told to restore the current + * context very well. Quite often it likes goes to go off and + * sulk, especially when it is meant to be reloading PP_DIR. + * A very simple fix to force the reload is to simply switch + * away from the current context and back again. + * + * Note that the kernel_context will contain random state + * following the INHIBIT_RESTORE. We accept this since we + * never use the kernel_context state; it is merely a + * placeholder we use to flush other contexts. + */ + *cs++ = MI_SET_CONTEXT; + *cs++ = i915_ggtt_offset(to_intel_context(i915->kernel_context, + engine)->state) | + MI_MM_SPACE_GTT | + MI_RESTORE_INHIBIT; + } + *cs++ = MI_NOOP; *cs++ = MI_SET_CONTEXT; - *cs++ = i915_ggtt_offset(to_intel_context(rq->ctx, engine)->state) | flags; + *cs++ = i915_ggtt_offset(rq->hw_context->state) | flags; /* * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP * WaMiSetContext_Hang:snb,ivb,vlv @@ -1509,31 +1656,28 @@ static int remap_l3(struct i915_request *rq, int slice) static int switch_context(struct i915_request *rq) { struct intel_engine_cs *engine = rq->engine; - struct i915_gem_context *to_ctx = rq->ctx; - struct i915_hw_ppgtt *to_mm = - to_ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; - struct i915_gem_context *from_ctx = engine->legacy_active_context; - struct i915_hw_ppgtt *from_mm = engine->legacy_active_ppgtt; + struct i915_gem_context *ctx = rq->gem_context; + struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; + unsigned int unwind_mm = 0; u32 hw_flags = 0; int ret, i; lockdep_assert_held(&rq->i915->drm.struct_mutex); GEM_BUG_ON(HAS_EXECLISTS(rq->i915)); - if (to_mm != from_mm || - (to_mm && intel_engine_flag(engine) & to_mm->pd_dirty_rings)) { - trace_switch_mm(engine, to_ctx); - ret = to_mm->switch_mm(to_mm, rq); + if (ppgtt) { + ret = load_pd_dir(rq, ppgtt); if (ret) goto err; - to_mm->pd_dirty_rings &= ~intel_engine_flag(engine); - engine->legacy_active_ppgtt = to_mm; - hw_flags = MI_FORCE_RESTORE; + if (intel_engine_flag(engine) & ppgtt->pd_dirty_rings) { + unwind_mm = intel_engine_flag(engine); + ppgtt->pd_dirty_rings &= ~unwind_mm; + hw_flags = MI_FORCE_RESTORE; + } } - if (to_intel_context(to_ctx, engine)->state && - (to_ctx != from_ctx || hw_flags & MI_FORCE_RESTORE)) { + if (rq->hw_context->state) { GEM_BUG_ON(engine->id != RCS); /* @@ -1543,35 +1687,38 @@ static int switch_context(struct i915_request *rq) * as nothing actually executes using the kernel context; it * is purely used for flushing user contexts. */ - if (i915_gem_context_is_kernel(to_ctx)) + if (i915_gem_context_is_kernel(ctx)) hw_flags = MI_RESTORE_INHIBIT; ret = mi_set_context(rq, hw_flags); if (ret) goto err_mm; + } - engine->legacy_active_context = to_ctx; + if (ppgtt) { + ret = flush_pd_dir(rq); + if (ret) + goto err_mm; } - if (to_ctx->remap_slice) { + if (ctx->remap_slice) { for (i = 0; i < MAX_L3_SLICES; i++) { - if (!(to_ctx->remap_slice & BIT(i))) + if (!(ctx->remap_slice & BIT(i))) continue; ret = remap_l3(rq, i); if (ret) - goto err_ctx; + goto err_mm; } - to_ctx->remap_slice = 0; + ctx->remap_slice = 0; } return 0; -err_ctx: - engine->legacy_active_context = from_ctx; err_mm: - engine->legacy_active_ppgtt = from_mm; + if (unwind_mm) + ppgtt->pd_dirty_rings |= unwind_mm; err: return ret; } @@ -1580,7 +1727,7 @@ static int ring_request_alloc(struct i915_request *request) { int ret; - GEM_BUG_ON(!to_intel_context(request->ctx, request->engine)->pin_count); + GEM_BUG_ON(!request->hw_context->pin_count); /* Flush enough space to reduce the likelihood of waiting after * we start building the request - in which case we will just @@ -2006,11 +2153,11 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, intel_ring_init_semaphores(dev_priv, engine); engine->init_hw = init_ring_common; - engine->reset_hw = reset_ring_common; + engine->reset.prepare = reset_prepare; + engine->reset.reset = reset_ring; + engine->reset.finish = reset_finish; engine->context_pin = intel_ring_context_pin; - engine->context_unpin = intel_ring_context_unpin; - engine->request_alloc = ring_request_alloc; engine->emit_breadcrumb = i9xx_emit_breadcrumb; @@ -2074,16 +2221,6 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine) if (ret) return ret; - if (INTEL_GEN(dev_priv) >= 6) { - ret = intel_engine_create_scratch(engine, PAGE_SIZE); - if (ret) - return ret; - } else if (HAS_BROKEN_CS_TLB(dev_priv)) { - ret = intel_engine_create_scratch(engine, I830_WA_SIZE); - if (ret) - return ret; - } - return 0; } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 010750e8ee44..f5ffa6d31e82 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -122,7 +122,8 @@ struct intel_engine_hangcheck { int deadlock; struct intel_instdone instdone; struct i915_request *active_request; - bool stalled; + bool stalled:1; + bool wedged:1; }; struct intel_ring { @@ -192,6 +193,11 @@ struct i915_priolist { int priority; }; +struct st_preempt_hang { + struct completion completion; + bool inject_hang; +}; + /** * struct intel_engine_execlists - execlist submission queue and port state * @@ -291,32 +297,49 @@ struct intel_engine_execlists { /** * @queue: queue of requests, in priority lists */ - struct rb_root queue; + struct rb_root_cached queue; /** - * @first: leftmost level in priority @queue + * @csb_read: control register for Context Switch buffer + * + * Note this register is always in mmio. */ - struct rb_node *first; + u32 __iomem *csb_read; /** - * @fw_domains: forcewake domains for irq tasklet + * @csb_write: control register for Context Switch buffer + * + * Note this register may be either mmio or HWSP shadow. */ - unsigned int fw_domains; + u32 *csb_write; /** - * @csb_head: context status buffer head + * @csb_status: status array for Context Switch buffer + * + * Note these register may be either mmio or HWSP shadow. */ - unsigned int csb_head; + u32 *csb_status; /** - * @csb_use_mmio: access csb through mmio, instead of hwsp + * @preempt_complete_status: expected CSB upon completing preemption */ - bool csb_use_mmio; + u32 preempt_complete_status; /** - * @preempt_complete_status: expected CSB upon completing preemption + * @csb_write_reset: reset value for CSB write pointer + * + * As the CSB write pointer maybe either in HWSP or as a field + * inside an mmio register, we want to reprogram it slightly + * differently to avoid later confusion. */ - u32 preempt_complete_status; + u32 csb_write_reset; + + /** + * @csb_head: context status buffer head + */ + u8 csb_head; + + I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;) }; #define INTEL_ENGINE_CS_MAX_NAME 8 @@ -342,11 +365,10 @@ struct intel_engine_cs { struct i915_timeline timeline; struct drm_i915_gem_object *default_state; + void *pinned_default_state; - atomic_t irq_count; unsigned long irq_posted; #define ENGINE_IRQ_BREADCRUMB 0 -#define ENGINE_IRQ_EXECLIST 1 /* Rather than have every client wait upon all user interrupts, * with the herd waking after every interrupt and each doing the @@ -378,6 +400,7 @@ struct intel_engine_cs { unsigned int hangcheck_interrupts; unsigned int irq_enabled; + unsigned int irq_count; bool irq_armed : 1; I915_SELFTEST_DECLARE(bool mock : 1); @@ -423,18 +446,22 @@ struct intel_engine_cs { void (*irq_disable)(struct intel_engine_cs *engine); int (*init_hw)(struct intel_engine_cs *engine); - void (*reset_hw)(struct intel_engine_cs *engine, - struct i915_request *rq); + + struct { + struct i915_request *(*prepare)(struct intel_engine_cs *engine); + void (*reset)(struct intel_engine_cs *engine, + struct i915_request *rq); + void (*finish)(struct intel_engine_cs *engine); + } reset; void (*park)(struct intel_engine_cs *engine); void (*unpark)(struct intel_engine_cs *engine); void (*set_default_submission)(struct intel_engine_cs *engine); - struct intel_ring *(*context_pin)(struct intel_engine_cs *engine, - struct i915_gem_context *ctx); - void (*context_unpin)(struct intel_engine_cs *engine, - struct i915_gem_context *ctx); + struct intel_context *(*context_pin)(struct intel_engine_cs *engine, + struct i915_gem_context *ctx); + int (*request_alloc)(struct i915_request *rq); int (*init_context)(struct i915_request *rq); @@ -550,16 +577,7 @@ struct intel_engine_cs { * to the kernel context and trash it as the save may not happen * before the hardware is powered down. */ - struct i915_gem_context *last_retired_context; - - /* We track the current MI_SET_CONTEXT in order to eliminate - * redudant context switches. This presumes that requests are not - * reordered! Or when they are the tracking is updated along with - * the emission of individual requests into the legacy command - * stream (ring). - */ - struct i915_gem_context *legacy_active_context; - struct i915_hw_ppgtt *legacy_active_ppgtt; + struct intel_context *last_retired_context; /* status_notifier: list of callbacks for context-switch changes */ struct atomic_notifier_head context_status_notifier; @@ -672,6 +690,12 @@ execlists_clear_active(struct intel_engine_execlists *execlists, __clear_bit(bit, (unsigned long *)&execlists->active); } +static inline void +execlists_clear_all_active(struct intel_engine_execlists *execlists) +{ + execlists->active = 0; +} + static inline bool execlists_is_active(const struct intel_engine_execlists *execlists, unsigned int bit) @@ -809,6 +833,19 @@ static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos) return pos & (ring->size - 1); } +static inline bool +intel_ring_offset_valid(const struct intel_ring *ring, + unsigned int pos) +{ + if (pos & -ring->size) /* must be strictly within the ring */ + return false; + + if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */ + return false; + + return true; +} + static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr) { /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */ @@ -820,12 +857,7 @@ static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr) static inline void assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail) { - /* We could combine these into a single tail operation, but keeping - * them as seperate tests will help identify the cause should one - * ever fire. - */ - GEM_BUG_ON(!IS_ALIGNED(tail, 8)); - GEM_BUG_ON(tail >= ring->size); + GEM_BUG_ON(!intel_ring_offset_valid(ring, tail)); /* * "Ring Buffer Use" @@ -865,14 +897,19 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno); void intel_engine_setup_common(struct intel_engine_cs *engine); int intel_engine_init_common(struct intel_engine_cs *engine); -int intel_engine_create_scratch(struct intel_engine_cs *engine, int size); void intel_engine_cleanup_common(struct intel_engine_cs *engine); +int intel_engine_create_scratch(struct intel_engine_cs *engine, + unsigned int size); +void intel_engine_cleanup_scratch(struct intel_engine_cs *engine); + int intel_init_render_ring_buffer(struct intel_engine_cs *engine); int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine); int intel_init_blt_ring_buffer(struct intel_engine_cs *engine); int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine); +int intel_engine_stop_cs(struct intel_engine_cs *engine); + u64 intel_engine_get_active_head(const struct intel_engine_cs *engine); u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine); @@ -918,11 +955,10 @@ static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine) /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */ int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); -static inline void intel_wait_init(struct intel_wait *wait, - struct i915_request *rq) +static inline void intel_wait_init(struct intel_wait *wait) { wait->tsk = current; - wait->request = rq; + wait->request = NULL; } static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno) @@ -1042,10 +1078,13 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset) return cs; } +void intel_engines_sanitize(struct drm_i915_private *i915); + bool intel_engine_is_idle(struct intel_engine_cs *engine); bool intel_engines_are_idle(struct drm_i915_private *dev_priv); bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine); +void intel_engine_lost_context(struct intel_engine_cs *engine); void intel_engines_park(struct drm_i915_private *i915); void intel_engines_unpark(struct drm_i915_private *i915); @@ -1123,4 +1162,24 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine); ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine); +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) + +static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists) +{ + if (!execlists->preempt_hang.inject_hang) + return false; + + complete(&execlists->preempt_hang.completion); + return true; +} + +#else + +static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists) +{ + return false; +} + +#endif + #endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 53a6eaa9671a..6b5aa3b074ec 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -128,10 +128,20 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "AUX_C"; case POWER_DOMAIN_AUX_D: return "AUX_D"; + case POWER_DOMAIN_AUX_E: + return "AUX_E"; case POWER_DOMAIN_AUX_F: return "AUX_F"; case POWER_DOMAIN_AUX_IO_A: return "AUX_IO_A"; + case POWER_DOMAIN_AUX_TBT1: + return "AUX_TBT1"; + case POWER_DOMAIN_AUX_TBT2: + return "AUX_TBT2"; + case POWER_DOMAIN_AUX_TBT3: + return "AUX_TBT3"; + case POWER_DOMAIN_AUX_TBT4: + return "AUX_TBT4"; case POWER_DOMAIN_GMBUS: return "GMBUS"; case POWER_DOMAIN_INIT: @@ -382,7 +392,8 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv, u32 val; if (wait_fuses) { - pg = SKL_PW_TO_PG(id); + pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_TO_PG(id) : + SKL_PW_TO_PG(id); /* * For PW1 we have to wait both for the PW0/PG0 fuse state * before enabling the power well and PW1/PG1's own fuse @@ -428,6 +439,43 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv, hsw_wait_for_power_well_disable(dev_priv, power_well); } +#define ICL_AUX_PW_TO_PORT(pw) ((pw) - ICL_DISP_PW_AUX_A) + +static void +icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum i915_power_well_id id = power_well->id; + enum port port = ICL_AUX_PW_TO_PORT(id); + u32 val; + + val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)); + I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), val | HSW_PWR_WELL_CTL_REQ(id)); + + val = I915_READ(ICL_PORT_CL_DW12(port)); + I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX); + + hsw_wait_for_power_well_enable(dev_priv, power_well); +} + +static void +icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum i915_power_well_id id = power_well->id; + enum port port = ICL_AUX_PW_TO_PORT(id); + u32 val; + + val = I915_READ(ICL_PORT_CL_DW12(port)); + I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX); + + val = I915_READ(HSW_PWR_WELL_CTL_DRIVER(id)); + I915_WRITE(HSW_PWR_WELL_CTL_DRIVER(id), + val & ~HSW_PWR_WELL_CTL_REQ(id)); + + hsw_wait_for_power_well_disable(dev_priv, power_well); +} + /* * We should only use the power well if we explicitly asked the hardware to * enable it, so check if it's enabled and also check if we've requested it to @@ -1822,6 +1870,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, BIT_ULL(POWER_DOMAIN_INIT)) #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_AUX_B) | \ @@ -1894,6 +1943,105 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, BIT_ULL(POWER_DOMAIN_AUX_A) | \ BIT_ULL(POWER_DOMAIN_INIT)) +/* + * ICL PW_0/PG_0 domains (HW/DMC control): + * - PCI + * - clocks except port PLL + * - central power except FBC + * - shared functions except pipe interrupts, pipe MBUS, DBUF registers + * ICL PW_1/PG_1 domains (HW/DMC control): + * - DBUF function + * - PIPE_A and its planes, except VGA + * - transcoder EDP + PSR + * - transcoder DSI + * - DDI_A + * - FBC + */ +#define ICL_PW_4_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PIPE_C) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + /* VDSC/joining */ +#define ICL_PW_3_POWER_DOMAINS ( \ + ICL_PW_4_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ + BIT_ULL(POWER_DOMAIN_AUX_D) | \ + BIT_ULL(POWER_DOMAIN_AUX_E) | \ + BIT_ULL(POWER_DOMAIN_AUX_F) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ + BIT_ULL(POWER_DOMAIN_VGA) | \ + BIT_ULL(POWER_DOMAIN_AUDIO) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + /* + * - transcoder WD + * - KVMR (HW control) + */ +#define ICL_PW_2_POWER_DOMAINS ( \ + ICL_PW_3_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_INIT)) + /* + * - eDP/DSI VDSC + * - KVMR (HW control) + */ +#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ + ICL_PW_2_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_MODESET) | \ + BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define ICL_DDI_IO_A_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) +#define ICL_DDI_IO_B_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) +#define ICL_DDI_IO_C_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) +#define ICL_DDI_IO_D_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) +#define ICL_DDI_IO_E_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) +#define ICL_DDI_IO_F_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) + +#define ICL_AUX_A_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_A)) +#define ICL_AUX_B_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_B)) +#define ICL_AUX_C_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_C)) +#define ICL_AUX_D_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_D)) +#define ICL_AUX_E_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_E)) +#define ICL_AUX_F_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_F)) +#define ICL_AUX_TBT1_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_TBT1)) +#define ICL_AUX_TBT2_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_TBT2)) +#define ICL_AUX_TBT3_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_TBT3)) +#define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_TBT4)) + static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { .sync_hw = i9xx_power_well_sync_hw_noop, .enable = i9xx_always_on_power_well_noop, @@ -2451,6 +2599,157 @@ static struct i915_power_well cnl_power_wells[] = { }, }; +static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = { + .sync_hw = hsw_power_well_sync_hw, + .enable = icl_combo_phy_aux_power_well_enable, + .disable = icl_combo_phy_aux_power_well_disable, + .is_enabled = hsw_power_well_enabled, +}; + +static struct i915_power_well icl_power_wells[] = { + { + .name = "always-on", + .always_on = 1, + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .id = I915_DISP_PW_ALWAYS_ON, + }, + { + .name = "power well 1", + /* Handled by the DMC firmware */ + .domains = 0, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_1, + .hsw.has_fuses = true, + }, + { + .name = "power well 2", + .domains = ICL_PW_2_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_2, + .hsw.has_fuses = true, + }, + { + .name = "DC off", + .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, + .ops = &gen9_dc_off_power_well_ops, + .id = SKL_DISP_PW_DC_OFF, + }, + { + .name = "power well 3", + .domains = ICL_PW_3_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_3, + .hsw.irq_pipe_mask = BIT(PIPE_B), + .hsw.has_vga = true, + .hsw.has_fuses = true, + }, + { + .name = "DDI A IO", + .domains = ICL_DDI_IO_A_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_DDI_A, + }, + { + .name = "DDI B IO", + .domains = ICL_DDI_IO_B_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_DDI_B, + }, + { + .name = "DDI C IO", + .domains = ICL_DDI_IO_C_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_DDI_C, + }, + { + .name = "DDI D IO", + .domains = ICL_DDI_IO_D_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_DDI_D, + }, + { + .name = "DDI E IO", + .domains = ICL_DDI_IO_E_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_DDI_E, + }, + { + .name = "DDI F IO", + .domains = ICL_DDI_IO_F_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_DDI_F, + }, + { + .name = "AUX A", + .domains = ICL_AUX_A_IO_POWER_DOMAINS, + .ops = &icl_combo_phy_aux_power_well_ops, + .id = ICL_DISP_PW_AUX_A, + }, + { + .name = "AUX B", + .domains = ICL_AUX_B_IO_POWER_DOMAINS, + .ops = &icl_combo_phy_aux_power_well_ops, + .id = ICL_DISP_PW_AUX_B, + }, + { + .name = "AUX C", + .domains = ICL_AUX_C_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_AUX_C, + }, + { + .name = "AUX D", + .domains = ICL_AUX_D_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_AUX_D, + }, + { + .name = "AUX E", + .domains = ICL_AUX_E_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_AUX_E, + }, + { + .name = "AUX F", + .domains = ICL_AUX_F_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_AUX_F, + }, + { + .name = "AUX TBT1", + .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_AUX_TBT1, + }, + { + .name = "AUX TBT2", + .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_AUX_TBT2, + }, + { + .name = "AUX TBT3", + .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_AUX_TBT3, + }, + { + .name = "AUX TBT4", + .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_AUX_TBT4, + }, + { + .name = "power well 4", + .domains = ICL_PW_4_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = ICL_DISP_PW_4, + .hsw.has_fuses = true, + .hsw.irq_pipe_mask = BIT(PIPE_C), + }, +}; + static int sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, int disable_power_well) @@ -2468,7 +2767,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv, int requested_dc; int max_dc; - if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) { + if (IS_GEN9_BC(dev_priv) || INTEL_INFO(dev_priv)->gen >= 10) { max_dc = 2; mask = 0; } else if (IS_GEN9_LP(dev_priv)) { @@ -2556,7 +2855,9 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) * The enabling order will be from lower to higher indexed wells, * the disabling order is reversed. */ - if (IS_HASWELL(dev_priv)) { + if (IS_ICELAKE(dev_priv)) { + set_power_wells(power_domains, icl_power_wells); + } else if (IS_HASWELL(dev_priv)) { set_power_wells(power_domains, hsw_power_wells); } else if (IS_BROADWELL(dev_priv)) { set_power_wells(power_domains, bdw_power_wells); @@ -2911,6 +3212,7 @@ static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv, switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) { default: MISSING_CASE(val); + /* fall through */ case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0: procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0]; break; @@ -3023,6 +3325,8 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv) static void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume) { + struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_well *well; enum port port; u32 val; @@ -3051,8 +3355,14 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, I915_WRITE(ICL_PORT_CL_DW5(port), val); } - /* 4. Enable power well 1 (PG1) and aux IO power. */ - /* FIXME: ICL power wells code not here yet. */ + /* + * 4. Enable Power Well 1 (PG1). + * The AUX IO power wells will be enabled on demand. + */ + mutex_lock(&power_domains->lock); + well = lookup_power_well(dev_priv, ICL_DISP_PW_1); + intel_power_well_enable(dev_priv, well); + mutex_unlock(&power_domains->lock); /* 5. Enable CDCLK. */ icl_init_cdclk(dev_priv); @@ -3070,6 +3380,8 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, static void icl_display_core_uninit(struct drm_i915_private *dev_priv) { + struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_well *well; enum port port; u32 val; @@ -3083,8 +3395,15 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv) /* 3. Disable CD clock */ icl_uninit_cdclk(dev_priv); - /* 4. Disable Power Well 1 (PG1) and Aux IO Power */ - /* FIXME: ICL power wells code not here yet. */ + /* + * 4. Disable Power Well 1 (PG1). + * The AUX IO power wells are toggled on demand, so they are already + * disabled at this point. + */ + mutex_lock(&power_domains->lock); + well = lookup_power_well(dev_priv, ICL_DISP_PW_1); + intel_power_well_disable(dev_priv, well); + mutex_unlock(&power_domains->lock); /* 5. Disable Comp */ for (port = PORT_A; port <= PORT_B; port++) { diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 26975df4e593..812fe7b06f87 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1340,6 +1340,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder, switch (crtc_state->pixel_multiplier) { default: WARN(1, "unknown pixel multiplier specified\n"); + /* fall through */ case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break; case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break; @@ -1400,33 +1401,40 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector) intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs); - if (active_outputs & intel_sdvo_connector->output_flag) - return true; + return active_outputs & intel_sdvo_connector->output_flag; +} + +bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv, + i915_reg_t sdvo_reg, enum pipe *pipe) +{ + u32 val; + + val = I915_READ(sdvo_reg); + + /* asserts want to know the pipe even if the port is disabled */ + if (HAS_PCH_CPT(dev_priv)) + *pipe = (val & SDVO_PIPE_SEL_MASK_CPT) >> SDVO_PIPE_SEL_SHIFT_CPT; + else if (IS_CHERRYVIEW(dev_priv)) + *pipe = (val & SDVO_PIPE_SEL_MASK_CHV) >> SDVO_PIPE_SEL_SHIFT_CHV; else - return false; + *pipe = (val & SDVO_PIPE_SEL_MASK) >> SDVO_PIPE_SEL_SHIFT; + + return val & SDVO_ENABLE; } static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_sdvo *intel_sdvo = to_sdvo(encoder); u16 active_outputs = 0; - u32 tmp; + bool ret; - tmp = I915_READ(intel_sdvo->sdvo_reg); intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs); - if (!(tmp & SDVO_ENABLE) && (active_outputs == 0)) - return false; + ret = intel_sdvo_port_enabled(dev_priv, intel_sdvo->sdvo_reg, pipe); - if (HAS_PCH_CPT(dev_priv)) - *pipe = PORT_TO_PIPE_CPT(tmp); - else - *pipe = PORT_TO_PIPE(tmp); - - return true; + return ret || active_outputs; } static void intel_sdvo_get_config(struct intel_encoder *encoder, @@ -1553,8 +1561,8 @@ static void intel_disable_sdvo(struct intel_encoder *encoder, intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false); intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false); - temp &= ~SDVO_PIPE_B_SELECT; - temp |= SDVO_ENABLE; + temp &= ~SDVO_PIPE_SEL_MASK; + temp |= SDVO_ENABLE | SDVO_PIPE_SEL(PIPE_A); intel_sdvo_write_sdvox(intel_sdvo, temp); temp &= ~SDVO_ENABLE; @@ -1903,7 +1911,7 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) if (edid != NULL) { if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector), edid)) { - drm_mode_connector_update_edid_property(connector, edid); + drm_connector_update_edid_property(connector, edid); drm_add_edid_modes(connector, edid); } @@ -2306,14 +2314,19 @@ intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo) switch (sdvo->controlled_output) { case SDVO_OUTPUT_LVDS1: mask |= SDVO_OUTPUT_LVDS1; + /* fall through */ case SDVO_OUTPUT_LVDS0: mask |= SDVO_OUTPUT_LVDS0; + /* fall through */ case SDVO_OUTPUT_TMDS1: mask |= SDVO_OUTPUT_TMDS1; + /* fall through */ case SDVO_OUTPUT_TMDS0: mask |= SDVO_OUTPUT_TMDS0; + /* fall through */ case SDVO_OUTPUT_RGB1: mask |= SDVO_OUTPUT_RGB1; + /* fall through */ case SDVO_OUTPUT_RGB0: mask |= SDVO_OUTPUT_RGB0; break; diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index ee23613f9fd4..f7026e887fa9 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -41,20 +41,6 @@ #include <drm/i915_drm.h> #include "i915_drv.h" -bool intel_format_is_yuv(u32 format) -{ - switch (format) { - case DRM_FORMAT_YUYV: - case DRM_FORMAT_UYVY: - case DRM_FORMAT_VYUY: - case DRM_FORMAT_YVYU: - case DRM_FORMAT_NV12: - return true; - default: - return false; - } -} - int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, int usecs) { @@ -107,13 +93,21 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) VBLANK_EVASION_TIME_US); max = vblank_start - 1; - local_irq_disable(); - if (min <= 0 || max <= 0) - return; + goto irq_disable; if (WARN_ON(drm_crtc_vblank_get(&crtc->base))) - return; + goto irq_disable; + + /* + * Wait for psr to idle out after enabling the VBL interrupts + * VBL interrupts will start the PSR exit and prevent a PSR + * re-entry as well. + */ + if (intel_psr_wait_for_idle(new_crtc_state)) + DRM_ERROR("PSR idle timed out, atomic update may fail\n"); + + local_irq_disable(); crtc->debug.min_vbl = min; crtc->debug.max_vbl = max; @@ -171,6 +165,10 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc); trace_i915_pipe_update_vblank_evaded(crtc); + return; + +irq_disable: + local_irq_disable(); } /** @@ -284,13 +282,35 @@ skl_update_plane(struct intel_plane *plane, /* program plane scaler */ if (plane_state->scaler_id >= 0) { int scaler_id = plane_state->scaler_id; - const struct intel_scaler *scaler; + const struct intel_scaler *scaler = + &crtc_state->scaler_state.scalers[scaler_id]; + u16 y_hphase, uv_rgb_hphase; + u16 y_vphase, uv_rgb_vphase; + + /* TODO: handle sub-pixel coordinates */ + if (fb->format->format == DRM_FORMAT_NV12) { + y_hphase = skl_scaler_calc_phase(1, false); + y_vphase = skl_scaler_calc_phase(1, false); + + /* MPEG2 chroma siting convention */ + uv_rgb_hphase = skl_scaler_calc_phase(2, true); + uv_rgb_vphase = skl_scaler_calc_phase(2, false); + } else { + /* not used */ + y_hphase = 0; + y_vphase = 0; - scaler = &crtc_state->scaler_state.scalers[scaler_id]; + uv_rgb_hphase = skl_scaler_calc_phase(1, false); + uv_rgb_vphase = skl_scaler_calc_phase(1, false); + } I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id), PS_SCALER_EN | PS_PLANE_SEL(plane_id) | scaler->mode); I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0); + I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id), + PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase)); + I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id), + PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase)); I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y); I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), ((crtc_w + 1) << 16)|(crtc_h + 1)); @@ -327,19 +347,21 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) } bool -skl_plane_get_hw_state(struct intel_plane *plane) +skl_plane_get_hw_state(struct intel_plane *plane, + enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; enum plane_id plane_id = plane->id; - enum pipe pipe = plane->pipe; bool ret; - power_domain = POWER_DOMAIN_PIPE(pipe); + power_domain = POWER_DOMAIN_PIPE(plane->pipe); if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) return false; - ret = I915_READ(PLANE_CTL(pipe, plane_id)) & PLANE_CTL_ENABLE; + ret = I915_READ(PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE; + + *pipe = plane->pipe; intel_display_power_put(dev_priv, power_domain); @@ -380,7 +402,7 @@ chv_update_csc(const struct intel_plane_state *plane_state) const s16 *csc = csc_matrix[plane_state->base.color_encoding]; /* Seems RGB data bypasses the CSC always */ - if (!intel_format_is_yuv(fb->format->format)) + if (!fb->format->is_yuv) return; I915_WRITE_FW(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0)); @@ -415,7 +437,7 @@ vlv_update_clrc(const struct intel_plane_state *plane_state) enum plane_id plane_id = plane->id; int contrast, brightness, sh_scale, sh_sin, sh_cos; - if (intel_format_is_yuv(fb->format->format) && + if (fb->format->is_yuv && plane_state->base.color_range == DRM_COLOR_YCBCR_LIMITED_RANGE) { /* * Expand limited range to full range: @@ -588,19 +610,21 @@ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) } static bool -vlv_plane_get_hw_state(struct intel_plane *plane) +vlv_plane_get_hw_state(struct intel_plane *plane, + enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; enum plane_id plane_id = plane->id; - enum pipe pipe = plane->pipe; bool ret; - power_domain = POWER_DOMAIN_PIPE(pipe); + power_domain = POWER_DOMAIN_PIPE(plane->pipe); if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) return false; - ret = I915_READ(SPCNTR(pipe, plane_id)) & SP_ENABLE; + ret = I915_READ(SPCNTR(plane->pipe, plane_id)) & SP_ENABLE; + + *pipe = plane->pipe; intel_display_power_put(dev_priv, power_domain); @@ -754,18 +778,20 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) } static bool -ivb_plane_get_hw_state(struct intel_plane *plane) +ivb_plane_get_hw_state(struct intel_plane *plane, + enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; - enum pipe pipe = plane->pipe; bool ret; - power_domain = POWER_DOMAIN_PIPE(pipe); + power_domain = POWER_DOMAIN_PIPE(plane->pipe); if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) return false; - ret = I915_READ(SPRCTL(pipe)) & SPRITE_ENABLE; + ret = I915_READ(SPRCTL(plane->pipe)) & SPRITE_ENABLE; + + *pipe = plane->pipe; intel_display_power_put(dev_priv, power_domain); @@ -910,18 +936,20 @@ g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) } static bool -g4x_plane_get_hw_state(struct intel_plane *plane) +g4x_plane_get_hw_state(struct intel_plane *plane, + enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; - enum pipe pipe = plane->pipe; bool ret; - power_domain = POWER_DOMAIN_PIPE(pipe); + power_domain = POWER_DOMAIN_PIPE(plane->pipe); if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) return false; - ret = I915_READ(DVSCNTR(pipe)) & DVS_ENABLE; + ret = I915_READ(DVSCNTR(plane->pipe)) & DVS_ENABLE; + + *pipe = plane->pipe; intel_display_power_put(dev_priv, power_domain); @@ -1010,7 +1038,7 @@ intel_check_sprite_plane(struct intel_plane *plane, src->y1 = src_y << 16; src->y2 = (src_y + src_h) << 16; - if (intel_format_is_yuv(fb->format->format) && + if (fb->format->is_yuv && fb->format->format != DRM_FORMAT_NV12 && (src_x % 2 || src_w % 2)) { DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n", @@ -1071,6 +1099,37 @@ intel_check_sprite_plane(struct intel_plane *plane, return 0; } +static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv) +{ + return INTEL_GEN(dev_priv) >= 9; +} + +static void intel_plane_set_ckey(struct intel_plane_state *plane_state, + const struct drm_intel_sprite_colorkey *set) +{ + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + + *key = *set; + + /* + * We want src key enabled on the + * sprite and not on the primary. + */ + if (plane->id == PLANE_PRIMARY && + set->flags & I915_SET_COLORKEY_SOURCE) + key->flags = 0; + + /* + * On SKL+ we want dst key enabled on + * the primary and not on the sprite. + */ + if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_PRIMARY && + set->flags & I915_SET_COLORKEY_DESTINATION) + key->flags = 0; +} + int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -1100,6 +1159,16 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) return -ENOENT; + /* + * SKL+ only plane 2 can do destination keying against plane 1. + * Also multiple planes can't do destination keying on the same + * pipe simultaneously. + */ + if (INTEL_GEN(dev_priv) >= 9 && + to_intel_plane(plane)->id >= PLANE_SPRITE1 && + set->flags & I915_SET_COLORKEY_DESTINATION) + return -EINVAL; + drm_modeset_acquire_init(&ctx, 0); state = drm_atomic_state_alloc(plane->dev); @@ -1112,11 +1181,28 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, while (1) { plane_state = drm_atomic_get_plane_state(state, plane); ret = PTR_ERR_OR_ZERO(plane_state); - if (!ret) { - to_intel_plane_state(plane_state)->ckey = *set; - ret = drm_atomic_commit(state); + if (!ret) + intel_plane_set_ckey(to_intel_plane_state(plane_state), set); + + /* + * On some platforms we have to configure + * the dst colorkey on the primary plane. + */ + if (!ret && has_dst_key_in_primary_plane(dev_priv)) { + struct intel_crtc *crtc = + intel_get_crtc_for_pipe(dev_priv, + to_intel_plane(plane)->pipe); + + plane_state = drm_atomic_get_plane_state(state, + crtc->base.primary); + ret = PTR_ERR_OR_ZERO(plane_state); + if (!ret) + intel_plane_set_ckey(to_intel_plane_state(plane_state), set); } + if (!ret) + ret = drm_atomic_commit(state); + if (ret != -EDEADLK) break; @@ -1211,8 +1297,17 @@ static const uint64_t skl_plane_format_modifiers_ccs[] = { DRM_FORMAT_MOD_INVALID }; -static bool g4x_mod_supported(uint32_t format, uint64_t modifier) +static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane, + u32 format, u64 modifier) { + switch (modifier) { + case DRM_FORMAT_MOD_LINEAR: + case I915_FORMAT_MOD_X_TILED: + break; + default: + return false; + } + switch (format) { case DRM_FORMAT_XRGB8888: case DRM_FORMAT_YUYV: @@ -1228,8 +1323,17 @@ static bool g4x_mod_supported(uint32_t format, uint64_t modifier) } } -static bool snb_mod_supported(uint32_t format, uint64_t modifier) +static bool snb_sprite_format_mod_supported(struct drm_plane *_plane, + u32 format, u64 modifier) { + switch (modifier) { + case DRM_FORMAT_MOD_LINEAR: + case I915_FORMAT_MOD_X_TILED: + break; + default: + return false; + } + switch (format) { case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: @@ -1246,8 +1350,17 @@ static bool snb_mod_supported(uint32_t format, uint64_t modifier) } } -static bool vlv_mod_supported(uint32_t format, uint64_t modifier) +static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane, + u32 format, u64 modifier) { + switch (modifier) { + case DRM_FORMAT_MOD_LINEAR: + case I915_FORMAT_MOD_X_TILED: + break; + default: + return false; + } + switch (format) { case DRM_FORMAT_RGB565: case DRM_FORMAT_ABGR8888: @@ -1269,8 +1382,26 @@ static bool vlv_mod_supported(uint32_t format, uint64_t modifier) } } -static bool skl_mod_supported(uint32_t format, uint64_t modifier) +static bool skl_plane_format_mod_supported(struct drm_plane *_plane, + u32 format, u64 modifier) { + struct intel_plane *plane = to_intel_plane(_plane); + + switch (modifier) { + case DRM_FORMAT_MOD_LINEAR: + case I915_FORMAT_MOD_X_TILED: + case I915_FORMAT_MOD_Y_TILED: + case I915_FORMAT_MOD_Yf_TILED: + break; + case I915_FORMAT_MOD_Y_TILED_CCS: + case I915_FORMAT_MOD_Yf_TILED_CCS: + if (!plane->has_ccs) + return false; + break; + default: + return false; + } + switch (format) { case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: @@ -1302,38 +1433,48 @@ static bool skl_mod_supported(uint32_t format, uint64_t modifier) } } -static bool intel_sprite_plane_format_mod_supported(struct drm_plane *plane, - uint32_t format, - uint64_t modifier) -{ - struct drm_i915_private *dev_priv = to_i915(plane->dev); - - if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID)) - return false; +static const struct drm_plane_funcs g4x_sprite_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = intel_plane_destroy, + .atomic_get_property = intel_plane_atomic_get_property, + .atomic_set_property = intel_plane_atomic_set_property, + .atomic_duplicate_state = intel_plane_duplicate_state, + .atomic_destroy_state = intel_plane_destroy_state, + .format_mod_supported = g4x_sprite_format_mod_supported, +}; - if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_INTEL && - modifier != DRM_FORMAT_MOD_LINEAR) - return false; +static const struct drm_plane_funcs snb_sprite_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = intel_plane_destroy, + .atomic_get_property = intel_plane_atomic_get_property, + .atomic_set_property = intel_plane_atomic_set_property, + .atomic_duplicate_state = intel_plane_duplicate_state, + .atomic_destroy_state = intel_plane_destroy_state, + .format_mod_supported = snb_sprite_format_mod_supported, +}; - if (INTEL_GEN(dev_priv) >= 9) - return skl_mod_supported(format, modifier); - else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - return vlv_mod_supported(format, modifier); - else if (INTEL_GEN(dev_priv) >= 6) - return snb_mod_supported(format, modifier); - else - return g4x_mod_supported(format, modifier); -} +static const struct drm_plane_funcs vlv_sprite_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = intel_plane_destroy, + .atomic_get_property = intel_plane_atomic_get_property, + .atomic_set_property = intel_plane_atomic_set_property, + .atomic_duplicate_state = intel_plane_duplicate_state, + .atomic_destroy_state = intel_plane_destroy_state, + .format_mod_supported = vlv_sprite_format_mod_supported, +}; -static const struct drm_plane_funcs intel_sprite_plane_funcs = { - .update_plane = drm_atomic_helper_update_plane, - .disable_plane = drm_atomic_helper_disable_plane, - .destroy = intel_plane_destroy, - .atomic_get_property = intel_plane_atomic_get_property, - .atomic_set_property = intel_plane_atomic_set_property, - .atomic_duplicate_state = intel_plane_duplicate_state, - .atomic_destroy_state = intel_plane_destroy_state, - .format_mod_supported = intel_sprite_plane_format_mod_supported, +static const struct drm_plane_funcs skl_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = intel_plane_destroy, + .atomic_get_property = intel_plane_atomic_get_property, + .atomic_set_property = intel_plane_atomic_set_property, + .atomic_duplicate_state = intel_plane_duplicate_state, + .atomic_destroy_state = intel_plane_destroy_state, + .format_mod_supported = skl_plane_format_mod_supported, }; bool skl_plane_has_ccs(struct drm_i915_private *dev_priv, @@ -1359,6 +1500,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, { struct intel_plane *intel_plane = NULL; struct intel_plane_state *state = NULL; + const struct drm_plane_funcs *plane_funcs; unsigned long possible_crtcs; const uint32_t *plane_formats; const uint64_t *modifiers; @@ -1383,6 +1525,9 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, intel_plane->can_scale = true; state->scaler_id = -1; + intel_plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, + PLANE_SPRITE0 + plane); + intel_plane->update_plane = skl_update_plane; intel_plane->disable_plane = skl_disable_plane; intel_plane->get_hw_state = skl_plane_get_hw_state; @@ -1396,10 +1541,12 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, num_plane_formats = ARRAY_SIZE(skl_plane_formats); } - if (skl_plane_has_ccs(dev_priv, pipe, PLANE_SPRITE0 + plane)) + if (intel_plane->has_ccs) modifiers = skl_plane_format_modifiers_ccs; else modifiers = skl_plane_format_modifiers_noccs; + + plane_funcs = &skl_plane_funcs; } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { intel_plane->can_scale = false; intel_plane->max_downscale = 1; @@ -1411,6 +1558,8 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, plane_formats = vlv_plane_formats; num_plane_formats = ARRAY_SIZE(vlv_plane_formats); modifiers = i9xx_plane_format_modifiers; + + plane_funcs = &vlv_sprite_funcs; } else if (INTEL_GEN(dev_priv) >= 7) { if (IS_IVYBRIDGE(dev_priv)) { intel_plane->can_scale = true; @@ -1427,6 +1576,8 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, plane_formats = snb_plane_formats; num_plane_formats = ARRAY_SIZE(snb_plane_formats); modifiers = i9xx_plane_format_modifiers; + + plane_funcs = &snb_sprite_funcs; } else { intel_plane->can_scale = true; intel_plane->max_downscale = 16; @@ -1439,9 +1590,13 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, if (IS_GEN6(dev_priv)) { plane_formats = snb_plane_formats; num_plane_formats = ARRAY_SIZE(snb_plane_formats); + + plane_funcs = &snb_sprite_funcs; } else { plane_formats = g4x_plane_formats; num_plane_formats = ARRAY_SIZE(g4x_plane_formats); + + plane_funcs = &g4x_sprite_funcs; } } @@ -1468,14 +1623,14 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, if (INTEL_GEN(dev_priv) >= 9) ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base, - possible_crtcs, &intel_sprite_plane_funcs, + possible_crtcs, plane_funcs, plane_formats, num_plane_formats, modifiers, DRM_PLANE_TYPE_OVERLAY, "plane %d%c", plane + 2, pipe_name(pipe)); else ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base, - possible_crtcs, &intel_sprite_plane_funcs, + possible_crtcs, plane_funcs, plane_formats, num_plane_formats, modifiers, DRM_PLANE_TYPE_OVERLAY, diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index b55b5c157e38..b5b04cb892e9 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -798,16 +798,12 @@ static struct intel_tv *intel_attached_tv(struct drm_connector *connector) static bool intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 tmp = I915_READ(TV_CTL); - if (!(tmp & TV_ENC_ENABLE)) - return false; + *pipe = (tmp & TV_ENC_PIPE_SEL_MASK) >> TV_ENC_PIPE_SEL_SHIFT; - *pipe = PORT_TO_PIPE(tmp); - - return true; + return tmp & TV_ENC_ENABLE; } static void @@ -1032,8 +1028,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder, break; } - if (intel_crtc->pipe == 1) - tv_ctl |= TV_ENC_PIPEB_SELECT; + tv_ctl |= TV_ENC_PIPE_SEL(intel_crtc->pipe); tv_ctl |= tv_mode->oversample; if (tv_mode->progressive) @@ -1157,12 +1152,9 @@ intel_tv_detect_type(struct intel_tv *intel_tv, save_tv_ctl = tv_ctl = I915_READ(TV_CTL); /* Poll for TV detection */ - tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK); + tv_ctl &= ~(TV_ENC_ENABLE | TV_ENC_PIPE_SEL_MASK | TV_TEST_MODE_MASK); tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; - if (intel_crtc->pipe == 1) - tv_ctl |= TV_ENC_PIPEB_SELECT; - else - tv_ctl &= ~TV_ENC_PIPEB_SELECT; + tv_ctl |= TV_ENC_PIPE_SEL(intel_crtc->pipe); tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK); tv_dac |= (TVDAC_STATE_CHG_EN | @@ -1355,8 +1347,7 @@ intel_tv_get_modes(struct drm_connector *connector) mode_ptr = drm_mode_create(connector->dev); if (!mode_ptr) continue; - strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN); - mode_ptr->name[DRM_DISPLAY_MODE_LEN - 1] = '\0'; + strlcpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN); mode_ptr->hdisplay = hactive_s; mode_ptr->hsync_start = hactive_s + 1; diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index 1cffaf7b5dbe..7c95697e1a35 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -50,10 +50,10 @@ static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv) return ret; } -static int __get_platform_enable_guc(struct drm_i915_private *dev_priv) +static int __get_platform_enable_guc(struct drm_i915_private *i915) { - struct intel_uc_fw *guc_fw = &dev_priv->guc.fw; - struct intel_uc_fw *huc_fw = &dev_priv->huc.fw; + struct intel_uc_fw *guc_fw = &i915->guc.fw; + struct intel_uc_fw *huc_fw = &i915->huc.fw; int enable_guc = 0; /* Default is to enable GuC/HuC if we know their firmwares */ @@ -67,11 +67,11 @@ static int __get_platform_enable_guc(struct drm_i915_private *dev_priv) return enable_guc; } -static int __get_default_guc_log_level(struct drm_i915_private *dev_priv) +static int __get_default_guc_log_level(struct drm_i915_private *i915) { int guc_log_level; - if (!HAS_GUC(dev_priv) || !intel_uc_is_using_guc()) + if (!HAS_GUC(i915) || !intel_uc_is_using_guc()) guc_log_level = GUC_LOG_LEVEL_DISABLED; else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) @@ -86,7 +86,7 @@ static int __get_default_guc_log_level(struct drm_i915_private *dev_priv) /** * sanitize_options_early - sanitize uC related modparam options - * @dev_priv: device private + * @i915: device private * * In case of "enable_guc" option this function will attempt to modify * it only if it was initially set to "auto(-1)". Default value for this @@ -101,14 +101,14 @@ static int __get_default_guc_log_level(struct drm_i915_private *dev_priv) * unless GuC is enabled on given platform and the driver is compiled with * debug config when this modparam will default to "enable(1..4)". */ -static void sanitize_options_early(struct drm_i915_private *dev_priv) +static void sanitize_options_early(struct drm_i915_private *i915) { - struct intel_uc_fw *guc_fw = &dev_priv->guc.fw; - struct intel_uc_fw *huc_fw = &dev_priv->huc.fw; + struct intel_uc_fw *guc_fw = &i915->guc.fw; + struct intel_uc_fw *huc_fw = &i915->huc.fw; /* A negative value means "use platform default" */ if (i915_modparams.enable_guc < 0) - i915_modparams.enable_guc = __get_platform_enable_guc(dev_priv); + i915_modparams.enable_guc = __get_platform_enable_guc(i915); DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n", i915_modparams.enable_guc, @@ -119,28 +119,28 @@ static void sanitize_options_early(struct drm_i915_private *dev_priv) if (intel_uc_is_using_guc() && !intel_uc_fw_is_selected(guc_fw)) { DRM_WARN("Incompatible option detected: %s=%d, %s!\n", "enable_guc", i915_modparams.enable_guc, - !HAS_GUC(dev_priv) ? "no GuC hardware" : - "no GuC firmware"); + !HAS_GUC(i915) ? "no GuC hardware" : + "no GuC firmware"); } /* Verify HuC firmware availability */ if (intel_uc_is_using_huc() && !intel_uc_fw_is_selected(huc_fw)) { DRM_WARN("Incompatible option detected: %s=%d, %s!\n", "enable_guc", i915_modparams.enable_guc, - !HAS_HUC(dev_priv) ? "no HuC hardware" : - "no HuC firmware"); + !HAS_HUC(i915) ? "no HuC hardware" : + "no HuC firmware"); } /* A negative value means "use platform/config default" */ if (i915_modparams.guc_log_level < 0) i915_modparams.guc_log_level = - __get_default_guc_log_level(dev_priv); + __get_default_guc_log_level(i915); if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc()) { DRM_WARN("Incompatible option detected: %s=%d, %s!\n", "guc_log_level", i915_modparams.guc_log_level, - !HAS_GUC(dev_priv) ? "no GuC hardware" : - "GuC not enabled"); + !HAS_GUC(i915) ? "no GuC hardware" : + "GuC not enabled"); i915_modparams.guc_log_level = 0; } @@ -171,44 +171,30 @@ void intel_uc_init_early(struct drm_i915_private *i915) intel_huc_init_early(huc); sanitize_options_early(i915); - - if (USES_GUC(i915)) - intel_uc_fw_fetch(i915, &guc->fw); - - if (USES_HUC(i915)) - intel_uc_fw_fetch(i915, &huc->fw); } void intel_uc_cleanup_early(struct drm_i915_private *i915) { struct intel_guc *guc = &i915->guc; - struct intel_huc *huc = &i915->huc; - - if (USES_HUC(i915)) - intel_uc_fw_fini(&huc->fw); - - if (USES_GUC(i915)) - intel_uc_fw_fini(&guc->fw); guc_free_load_err_log(guc); } /** * intel_uc_init_mmio - setup uC MMIO access - * - * @dev_priv: device private + * @i915: device private * * Setup minimal state necessary for MMIO accesses later in the * initialization sequence. */ -void intel_uc_init_mmio(struct drm_i915_private *dev_priv) +void intel_uc_init_mmio(struct drm_i915_private *i915) { - intel_guc_init_send_regs(&dev_priv->guc); + intel_guc_init_send_regs(&i915->guc); } static void guc_capture_load_err_log(struct intel_guc *guc) { - if (!guc->log.vma || !i915_modparams.guc_log_level) + if (!guc->log.vma || !intel_guc_log_get_level(&guc->log)) return; if (!guc->load_err_log) @@ -225,11 +211,11 @@ static void guc_free_load_err_log(struct intel_guc *guc) static int guc_enable_communication(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct drm_i915_private *i915 = guc_to_i915(guc); - gen9_enable_guc_interrupts(dev_priv); + gen9_enable_guc_interrupts(i915); - if (HAS_GUC_CT(dev_priv)) + if (HAS_GUC_CT(i915)) return intel_guc_ct_enable(&guc->ct); guc->send = intel_guc_send_mmio; @@ -239,60 +225,73 @@ static int guc_enable_communication(struct intel_guc *guc) static void guc_disable_communication(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct drm_i915_private *i915 = guc_to_i915(guc); - if (HAS_GUC_CT(dev_priv)) + if (HAS_GUC_CT(i915)) intel_guc_ct_disable(&guc->ct); - gen9_disable_guc_interrupts(dev_priv); + gen9_disable_guc_interrupts(i915); guc->send = intel_guc_send_nop; guc->handler = intel_guc_to_host_event_handler_nop; } -int intel_uc_init_misc(struct drm_i915_private *dev_priv) +int intel_uc_init_misc(struct drm_i915_private *i915) { - struct intel_guc *guc = &dev_priv->guc; + struct intel_guc *guc = &i915->guc; + struct intel_huc *huc = &i915->huc; int ret; - if (!USES_GUC(dev_priv)) + if (!USES_GUC(i915)) return 0; - intel_guc_init_ggtt_pin_bias(guc); - - ret = intel_guc_init_wq(guc); + ret = intel_guc_init_misc(guc); if (ret) return ret; + if (USES_HUC(i915)) { + ret = intel_huc_init_misc(huc); + if (ret) + goto err_guc; + } + return 0; + +err_guc: + intel_guc_fini_misc(guc); + return ret; } -void intel_uc_fini_misc(struct drm_i915_private *dev_priv) +void intel_uc_fini_misc(struct drm_i915_private *i915) { - struct intel_guc *guc = &dev_priv->guc; + struct intel_guc *guc = &i915->guc; + struct intel_huc *huc = &i915->huc; - if (!USES_GUC(dev_priv)) + if (!USES_GUC(i915)) return; - intel_guc_fini_wq(guc); + if (USES_HUC(i915)) + intel_huc_fini_misc(huc); + + intel_guc_fini_misc(guc); } -int intel_uc_init(struct drm_i915_private *dev_priv) +int intel_uc_init(struct drm_i915_private *i915) { - struct intel_guc *guc = &dev_priv->guc; + struct intel_guc *guc = &i915->guc; int ret; - if (!USES_GUC(dev_priv)) + if (!USES_GUC(i915)) return 0; - if (!HAS_GUC(dev_priv)) + if (!HAS_GUC(i915)) return -ENODEV; ret = intel_guc_init(guc); if (ret) return ret; - if (USES_GUC_SUBMISSION(dev_priv)) { + if (USES_GUC_SUBMISSION(i915)) { /* * This is stuff we need to have available at fw load time * if we are planning to enable submission later @@ -307,16 +306,16 @@ int intel_uc_init(struct drm_i915_private *dev_priv) return 0; } -void intel_uc_fini(struct drm_i915_private *dev_priv) +void intel_uc_fini(struct drm_i915_private *i915) { - struct intel_guc *guc = &dev_priv->guc; + struct intel_guc *guc = &i915->guc; - if (!USES_GUC(dev_priv)) + if (!USES_GUC(i915)) return; - GEM_BUG_ON(!HAS_GUC(dev_priv)); + GEM_BUG_ON(!HAS_GUC(i915)); - if (USES_GUC_SUBMISSION(dev_priv)) + if (USES_GUC_SUBMISSION(i915)) intel_guc_submission_fini(guc); intel_guc_fini(guc); @@ -340,22 +339,22 @@ void intel_uc_sanitize(struct drm_i915_private *i915) __intel_uc_reset_hw(i915); } -int intel_uc_init_hw(struct drm_i915_private *dev_priv) +int intel_uc_init_hw(struct drm_i915_private *i915) { - struct intel_guc *guc = &dev_priv->guc; - struct intel_huc *huc = &dev_priv->huc; + struct intel_guc *guc = &i915->guc; + struct intel_huc *huc = &i915->huc; int ret, attempts; - if (!USES_GUC(dev_priv)) + if (!USES_GUC(i915)) return 0; - GEM_BUG_ON(!HAS_GUC(dev_priv)); + GEM_BUG_ON(!HAS_GUC(i915)); - gen9_reset_guc_interrupts(dev_priv); + gen9_reset_guc_interrupts(i915); /* WaEnableuKernelHeaderValidFix:skl */ /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */ - if (IS_GEN9(dev_priv)) + if (IS_GEN9(i915)) attempts = 3; else attempts = 1; @@ -365,11 +364,11 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) * Always reset the GuC just before (re)loading, so * that the state and timing are fairly predictable */ - ret = __intel_uc_reset_hw(dev_priv); + ret = __intel_uc_reset_hw(i915); if (ret) goto err_out; - if (USES_HUC(dev_priv)) { + if (USES_HUC(i915)) { ret = intel_huc_fw_upload(huc); if (ret) goto err_out; @@ -392,24 +391,24 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) if (ret) goto err_log_capture; - if (USES_HUC(dev_priv)) { + if (USES_HUC(i915)) { ret = intel_huc_auth(huc); if (ret) goto err_communication; } - if (USES_GUC_SUBMISSION(dev_priv)) { + if (USES_GUC_SUBMISSION(i915)) { ret = intel_guc_submission_enable(guc); if (ret) goto err_communication; } - dev_info(dev_priv->drm.dev, "GuC firmware version %u.%u\n", + dev_info(i915->drm.dev, "GuC firmware version %u.%u\n", guc->fw.major_ver_found, guc->fw.minor_ver_found); - dev_info(dev_priv->drm.dev, "GuC submission %s\n", - enableddisabled(USES_GUC_SUBMISSION(dev_priv))); - dev_info(dev_priv->drm.dev, "HuC %s\n", - enableddisabled(USES_HUC(dev_priv))); + dev_info(i915->drm.dev, "GuC submission %s\n", + enableddisabled(USES_GUC_SUBMISSION(i915))); + dev_info(i915->drm.dev, "HuC %s\n", + enableddisabled(USES_HUC(i915))); return 0; @@ -428,20 +427,20 @@ err_out: if (GEM_WARN_ON(ret == -EIO)) ret = -EINVAL; - dev_err(dev_priv->drm.dev, "GuC initialization failed %d\n", ret); + dev_err(i915->drm.dev, "GuC initialization failed %d\n", ret); return ret; } -void intel_uc_fini_hw(struct drm_i915_private *dev_priv) +void intel_uc_fini_hw(struct drm_i915_private *i915) { - struct intel_guc *guc = &dev_priv->guc; + struct intel_guc *guc = &i915->guc; - if (!USES_GUC(dev_priv)) + if (!USES_GUC(i915)) return; - GEM_BUG_ON(!HAS_GUC(dev_priv)); + GEM_BUG_ON(!HAS_GUC(i915)); - if (USES_GUC_SUBMISSION(dev_priv)) + if (USES_GUC_SUBMISSION(i915)) intel_guc_submission_disable(guc); guc_disable_communication(guc); diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 448293eb638d..b892ca8396e8 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1702,15 +1702,9 @@ static void gen3_stop_engine(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; const u32 base = engine->mmio_base; - const i915_reg_t mode = RING_MI_MODE(base); - - I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING)); - if (__intel_wait_for_register_fw(dev_priv, - mode, MODE_IDLE, MODE_IDLE, - 500, 0, - NULL)) - DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", - engine->name); + + if (intel_engine_stop_cs(engine)) + DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", engine->name); I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base))); POSTING_READ_FW(RING_HEAD(base)); /* paranoia */ @@ -2099,21 +2093,25 @@ static int gen8_reset_engines(struct drm_i915_private *dev_priv, { struct intel_engine_cs *engine; unsigned int tmp; + int ret; - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) - if (gen8_reset_engine_start(engine)) + for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { + if (gen8_reset_engine_start(engine)) { + ret = -EIO; goto not_ready; + } + } if (INTEL_GEN(dev_priv) >= 11) - return gen11_reset_engines(dev_priv, engine_mask); + ret = gen11_reset_engines(dev_priv, engine_mask); else - return gen6_reset_engines(dev_priv, engine_mask); + ret = gen6_reset_engines(dev_priv, engine_mask); not_ready: for_each_engine_masked(engine, dev_priv, engine_mask, tmp) gen8_reset_engine_cancel(engine); - return -EIO; + return ret; } typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask); @@ -2176,6 +2174,8 @@ int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) * Thus assume it is best to stop engines on all gens * where we have a gpu reset. * + * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) + * * WaMediaResetMainRingCleanup:ctg,elk (presumably) * * FIXME: Wa for more modern gens needs to be validated diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index 47478d609630..2fbe93178fb2 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -67,21 +67,21 @@ struct intel_uncore_funcs { void (*force_wake_put)(struct drm_i915_private *dev_priv, enum forcewake_domains domains); - uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, - i915_reg_t r, bool trace); - uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, - i915_reg_t r, bool trace); - uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, - i915_reg_t r, bool trace); - uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, - i915_reg_t r, bool trace); + u8 (*mmio_readb)(struct drm_i915_private *dev_priv, + i915_reg_t r, bool trace); + u16 (*mmio_readw)(struct drm_i915_private *dev_priv, + i915_reg_t r, bool trace); + u32 (*mmio_readl)(struct drm_i915_private *dev_priv, + i915_reg_t r, bool trace); + u64 (*mmio_readq)(struct drm_i915_private *dev_priv, + i915_reg_t r, bool trace); void (*mmio_writeb)(struct drm_i915_private *dev_priv, - i915_reg_t r, uint8_t val, bool trace); + i915_reg_t r, u8 val, bool trace); void (*mmio_writew)(struct drm_i915_private *dev_priv, - i915_reg_t r, uint16_t val, bool trace); + i915_reg_t r, u16 val, bool trace); void (*mmio_writel)(struct drm_i915_private *dev_priv, - i915_reg_t r, uint32_t val, bool trace); + i915_reg_t r, u32 val, bool trace); }; struct intel_forcewake_range { diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h index 458468237b5f..bba98cf83cbd 100644 --- a/drivers/gpu/drm/i915/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/intel_vbt_defs.h @@ -318,6 +318,12 @@ enum vbt_gmbus_ddi { DDC_BUS_DDI_C, DDC_BUS_DDI_D, DDC_BUS_DDI_F, + ICL_DDC_BUS_DDI_A = 0x1, + ICL_DDC_BUS_DDI_B, + ICL_DDC_BUS_PORT_1 = 0x4, + ICL_DDC_BUS_PORT_2, + ICL_DDC_BUS_PORT_3, + ICL_DDC_BUS_PORT_4, }; #define VBT_DP_MAX_LINK_RATE_HBR3 0 @@ -414,7 +420,9 @@ struct child_device_config { u16 extended_type; u8 dvo_function; u8 dp_usb_type_c:1; /* 195 */ - u8 flags2_reserved:7; /* 195 */ + u8 tbt:1; /* 209 */ + u8 flags2_reserved:2; /* 195 */ + u8 dp_port_trace_length:4; /* 209 */ u8 dp_gpio_index; /* 195 */ u16 dp_gpio_pin_num; /* 195 */ u8 dp_iboost_level:4; /* 196 */ @@ -448,7 +456,7 @@ struct bdb_general_definitions { * number = (block_size - sizeof(bdb_general_definitions))/ * defs->child_dev_size; */ - uint8_t devices[0]; + u8 devices[0]; } __packed; /* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */ @@ -635,7 +643,7 @@ struct bdb_sdvo_lvds_options { #define BDB_DRIVER_FEATURE_NO_LVDS 0 #define BDB_DRIVER_FEATURE_INT_LVDS 1 #define BDB_DRIVER_FEATURE_SDVO_LVDS 2 -#define BDB_DRIVER_FEATURE_EDP 3 +#define BDB_DRIVER_FEATURE_INT_SDVO_LVDS 3 struct bdb_driver_features { u8 boot_dev_algorithm:1; diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c index 2df3538ceba5..4bcdeaf8d98f 100644 --- a/drivers/gpu/drm/i915/intel_workarounds.c +++ b/drivers/gpu/drm/i915/intel_workarounds.c @@ -48,29 +48,58 @@ * - Public functions to init or apply the given workaround type. */ -static int wa_add(struct drm_i915_private *dev_priv, - i915_reg_t addr, - const u32 mask, const u32 val) +static void wa_add(struct drm_i915_private *i915, + i915_reg_t reg, const u32 mask, const u32 val) { - const unsigned int idx = dev_priv->workarounds.count; + struct i915_workarounds *wa = &i915->workarounds; + unsigned int start = 0, end = wa->count; + unsigned int addr = i915_mmio_reg_offset(reg); + struct i915_wa_reg *r; + + while (start < end) { + unsigned int mid = start + (end - start) / 2; + + if (wa->reg[mid].addr < addr) { + start = mid + 1; + } else if (wa->reg[mid].addr > addr) { + end = mid; + } else { + r = &wa->reg[mid]; + + if ((mask & ~r->mask) == 0) { + DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n", + addr, r->mask, r->value); + + r->value &= ~mask; + } + + r->value |= val; + r->mask |= mask; + return; + } + } - if (WARN_ON(idx >= I915_MAX_WA_REGS)) - return -ENOSPC; + if (WARN_ON_ONCE(wa->count >= I915_MAX_WA_REGS)) { + DRM_ERROR("Dropping w/a for reg %04x (mask: %08x, value: %08x)\n", + addr, mask, val); + return; + } - dev_priv->workarounds.reg[idx].addr = addr; - dev_priv->workarounds.reg[idx].value = val; - dev_priv->workarounds.reg[idx].mask = mask; + r = &wa->reg[wa->count++]; + r->addr = addr; + r->value = val; + r->mask = mask; - dev_priv->workarounds.count++; + while (r-- > wa->reg) { + GEM_BUG_ON(r[0].addr == r[1].addr); + if (r[1].addr > r[0].addr) + break; - return 0; + swap(r[1], r[0]); + } } -#define WA_REG(addr, mask, val) do { \ - const int r = wa_add(dev_priv, (addr), (mask), (val)); \ - if (r) \ - return r; \ - } while (0) +#define WA_REG(addr, mask, val) wa_add(dev_priv, (addr), (mask), (val)) #define WA_SET_BIT_MASKED(addr, mask) \ WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask)) @@ -463,6 +492,22 @@ static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv) */ WA_SET_BIT_MASKED(ICL_HDC_MODE, HDC_FORCE_NON_COHERENT); + /* Wa_2006611047:icl (pre-prod) + * Formerly known as WaDisableImprovedTdlClkGating + */ + if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0)) + WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, + GEN11_TDL_CLOCK_GATING_FIX_DISABLE); + + /* WaEnableStateCacheRedirectToCS:icl */ + WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN1, + GEN11_STATE_CACHE_REDIRECT_TO_CS); + + /* Wa_2006665173:icl (pre-prod) */ + if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0)) + WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3, + GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC); + return 0; } @@ -521,7 +566,7 @@ int intel_ctx_workarounds_emit(struct i915_request *rq) *cs++ = MI_LOAD_REGISTER_IMM(w->count); for (i = 0; i < w->count; i++) { - *cs++ = i915_mmio_reg_offset(w->reg[i].addr); + *cs++ = w->reg[i].addr; *cs++ = w->reg[i].value; } *cs++ = MI_NOOP; @@ -647,6 +692,19 @@ static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv) I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + + /* WaKBLVECSSemaphoreWaitPoll:kbl */ + if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_E0)) { + struct intel_engine_cs *engine; + unsigned int tmp; + + for_each_engine(engine, dev_priv, tmp) { + if (engine->id == RCS) + continue; + + I915_WRITE(RING_SEMA_WAIT_POLL(engine->mmio_base), 1); + } + } } static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv) @@ -672,8 +730,74 @@ static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv) GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); } +static void wa_init_mcr(struct drm_i915_private *dev_priv) +{ + const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu); + u32 mcr; + u32 mcr_slice_subslice_mask; + + /* + * WaProgramMgsrForL3BankSpecificMmioReads: cnl,icl + * L3Banks could be fused off in single slice scenario. If that is + * the case, we might need to program MCR select to a valid L3Bank + * by default, to make sure we correctly read certain registers + * later on (in the range 0xB100 - 0xB3FF). + * This might be incompatible with + * WaProgramMgsrForCorrectSliceSpecificMmioReads. + * Fortunately, this should not happen in production hardware, so + * we only assert that this is the case (instead of implementing + * something more complex that requires checking the range of every + * MMIO read). + */ + if (INTEL_GEN(dev_priv) >= 10 && + is_power_of_2(sseu->slice_mask)) { + /* + * read FUSE3 for enabled L3 Bank IDs, if L3 Bank matches + * enabled subslice, no need to redirect MCR packet + */ + u32 slice = fls(sseu->slice_mask); + u32 fuse3 = I915_READ(GEN10_MIRROR_FUSE3); + u8 ss_mask = sseu->subslice_mask[slice]; + + u8 enabled_mask = (ss_mask | ss_mask >> + GEN10_L3BANK_PAIR_COUNT) & GEN10_L3BANK_MASK; + u8 disabled_mask = fuse3 & GEN10_L3BANK_MASK; + + /* + * Production silicon should have matched L3Bank and + * subslice enabled + */ + WARN_ON((enabled_mask & disabled_mask) != enabled_mask); + } + + mcr = I915_READ(GEN8_MCR_SELECTOR); + + if (INTEL_GEN(dev_priv) >= 11) + mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK | + GEN11_MCR_SUBSLICE_MASK; + else + mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK | + GEN8_MCR_SUBSLICE_MASK; + /* + * WaProgramMgsrForCorrectSliceSpecificMmioReads:cnl,icl + * Before any MMIO read into slice/subslice specific registers, MCR + * packet control register needs to be programmed to point to any + * enabled s/ss pair. Otherwise, incorrect values will be returned. + * This means each subsequent MMIO read will be forwarded to an + * specific s/ss combination, but this is OK since these registers + * are consistent across s/ss in almost all cases. In the rare + * occasions, such as INSTDONE, where this value is dependent + * on s/ss combo, the read should be done with read_subslice_reg. + */ + mcr &= ~mcr_slice_subslice_mask; + mcr |= intel_calculate_mcr_s_ss_select(dev_priv); + I915_WRITE(GEN8_MCR_SELECTOR, mcr); +} + static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv) { + wa_init_mcr(dev_priv); + /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */ if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0)) I915_WRITE(GAMT_CHKN_BIT_REG, @@ -692,6 +816,8 @@ static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv) static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv) { + wa_init_mcr(dev_priv); + /* This is not an Wa. Enable for better image quality */ I915_WRITE(_3D_CHICKEN3, _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE)); @@ -772,6 +898,13 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv) PMFLUSHDONE_LNICRSDROP | PMFLUSH_GAPL3UNBLOCK | PMFLUSHDONE_LNEBLK); + + /* Wa_1406463099:icl + * Formerly known as WaGamTlbPendError + */ + I915_WRITE(GAMT_CHKN_BIT_REG, + I915_READ(GAMT_CHKN_BIT_REG) | + GAMT_CHKN_DISABLE_L3_COH_PIPE); } void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c index 91c72911be3c..7efb326badcd 100644 --- a/drivers/gpu/drm/i915/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c @@ -338,7 +338,7 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single) static int igt_check_page_sizes(struct i915_vma *vma) { - struct drm_i915_private *i915 = to_i915(vma->obj->base.dev); + struct drm_i915_private *i915 = vma->vm->i915; unsigned int supported = INTEL_INFO(i915)->page_sizes; struct drm_i915_gem_object *obj = vma->obj; int err = 0; @@ -379,7 +379,7 @@ static int igt_check_page_sizes(struct i915_vma *vma) static int igt_mock_exhaust_device_supported_pages(void *arg) { struct i915_hw_ppgtt *ppgtt = arg; - struct drm_i915_private *i915 = ppgtt->base.i915; + struct drm_i915_private *i915 = ppgtt->vm.i915; unsigned int saved_mask = INTEL_INFO(i915)->page_sizes; struct drm_i915_gem_object *obj; struct i915_vma *vma; @@ -415,7 +415,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg) goto out_put; } - vma = i915_vma_instance(obj, &ppgtt->base, NULL); + vma = i915_vma_instance(obj, &ppgtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out_put; @@ -458,7 +458,7 @@ out_device: static int igt_mock_ppgtt_misaligned_dma(void *arg) { struct i915_hw_ppgtt *ppgtt = arg; - struct drm_i915_private *i915 = ppgtt->base.i915; + struct drm_i915_private *i915 = ppgtt->vm.i915; unsigned long supported = INTEL_INFO(i915)->page_sizes; struct drm_i915_gem_object *obj; int bit; @@ -500,7 +500,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg) /* Force the page size for this object */ obj->mm.page_sizes.sg = page_size; - vma = i915_vma_instance(obj, &ppgtt->base, NULL); + vma = i915_vma_instance(obj, &ppgtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out_unpin; @@ -570,6 +570,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg) i915_vma_close(vma); i915_gem_object_unpin_pages(obj); + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); i915_gem_object_put(obj); } @@ -591,12 +592,13 @@ static void close_object_list(struct list_head *objects, list_for_each_entry_safe(obj, on, objects, st_link) { struct i915_vma *vma; - vma = i915_vma_instance(obj, &ppgtt->base, NULL); + vma = i915_vma_instance(obj, &ppgtt->vm, NULL); if (!IS_ERR(vma)) i915_vma_close(vma); list_del(&obj->st_link); i915_gem_object_unpin_pages(obj); + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); i915_gem_object_put(obj); } } @@ -604,8 +606,8 @@ static void close_object_list(struct list_head *objects, static int igt_mock_ppgtt_huge_fill(void *arg) { struct i915_hw_ppgtt *ppgtt = arg; - struct drm_i915_private *i915 = ppgtt->base.i915; - unsigned long max_pages = ppgtt->base.total >> PAGE_SHIFT; + struct drm_i915_private *i915 = ppgtt->vm.i915; + unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT; unsigned long page_num; bool single = false; LIST_HEAD(objects); @@ -641,7 +643,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg) list_add(&obj->st_link, &objects); - vma = i915_vma_instance(obj, &ppgtt->base, NULL); + vma = i915_vma_instance(obj, &ppgtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); break; @@ -725,7 +727,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg) static int igt_mock_ppgtt_64K(void *arg) { struct i915_hw_ppgtt *ppgtt = arg; - struct drm_i915_private *i915 = ppgtt->base.i915; + struct drm_i915_private *i915 = ppgtt->vm.i915; struct drm_i915_gem_object *obj; const struct object_info { unsigned int size; @@ -819,7 +821,7 @@ static int igt_mock_ppgtt_64K(void *arg) */ obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M; - vma = i915_vma_instance(obj, &ppgtt->base, NULL); + vma = i915_vma_instance(obj, &ppgtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out_object_unpin; @@ -866,6 +868,7 @@ static int igt_mock_ppgtt_64K(void *arg) i915_vma_close(vma); i915_gem_object_unpin_pages(obj); + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); i915_gem_object_put(obj); } } @@ -887,8 +890,8 @@ out_object_put: static struct i915_vma * gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val) { - struct drm_i915_private *i915 = to_i915(vma->obj->base.dev); - const int gen = INTEL_GEN(vma->vm->i915); + struct drm_i915_private *i915 = vma->vm->i915; + const int gen = INTEL_GEN(i915); unsigned int count = vma->size >> PAGE_SHIFT; struct drm_i915_gem_object *obj; struct i915_vma *batch; @@ -919,12 +922,12 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val) *cmd++ = val; } else if (gen >= 4) { *cmd++ = MI_STORE_DWORD_IMM_GEN4 | - (gen < 6 ? 1 << 22 : 0); + (gen < 6 ? MI_USE_GGTT : 0); *cmd++ = 0; *cmd++ = offset; *cmd++ = val; } else { - *cmd++ = MI_STORE_DWORD_IMM | 1 << 22; + *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; *cmd++ = offset; *cmd++ = val; } @@ -985,7 +988,10 @@ static int gpu_write(struct i915_vma *vma, goto err_request; } - i915_vma_move_to_active(batch, rq, 0); + err = i915_vma_move_to_active(batch, rq, 0); + if (err) + goto err_request; + i915_gem_object_set_active_reference(batch->obj); i915_vma_unpin(batch); i915_vma_close(batch); @@ -996,14 +1002,12 @@ static int gpu_write(struct i915_vma *vma, if (err) goto err_request; - i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - - reservation_object_lock(vma->resv, NULL); - reservation_object_add_excl_fence(vma->resv, &rq->fence); - reservation_object_unlock(vma->resv); + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + if (err) + i915_request_skip(rq, err); err_request: - __i915_request_add(rq, err == 0); + i915_request_add(rq); return err; } @@ -1047,7 +1051,8 @@ static int __igt_write_huge(struct i915_gem_context *ctx, u32 dword, u32 val) { struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base; + struct i915_address_space *vm = + ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm; unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; struct i915_vma *vma; int err; @@ -1100,7 +1105,8 @@ static int igt_write_huge(struct i915_gem_context *ctx, struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base; + struct i915_address_space *vm = + ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm; static struct intel_engine_cs *engines[I915_NUM_ENGINES]; struct intel_engine_cs *engine; I915_RND_STATE(prng); @@ -1262,6 +1268,7 @@ static int igt_ppgtt_exhaust_huge(void *arg) } i915_gem_object_unpin_pages(obj); + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); i915_gem_object_put(obj); } } @@ -1323,6 +1330,7 @@ static int igt_ppgtt_internal_huge(void *arg) } i915_gem_object_unpin_pages(obj); + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); i915_gem_object_put(obj); } @@ -1391,6 +1399,7 @@ static int igt_ppgtt_gemfs_huge(void *arg) } i915_gem_object_unpin_pages(obj); + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); i915_gem_object_put(obj); } @@ -1439,7 +1448,7 @@ static int igt_ppgtt_pin_update(void *arg) if (IS_ERR(obj)) return PTR_ERR(obj); - vma = i915_vma_instance(obj, &ppgtt->base, NULL); + vma = i915_vma_instance(obj, &ppgtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out_put; @@ -1493,7 +1502,7 @@ static int igt_ppgtt_pin_update(void *arg) if (IS_ERR(obj)) return PTR_ERR(obj); - vma = i915_vma_instance(obj, &ppgtt->base, NULL); + vma = i915_vma_instance(obj, &ppgtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out_put; @@ -1531,7 +1540,8 @@ static int igt_tmpfs_fallback(void *arg) struct i915_gem_context *ctx = arg; struct drm_i915_private *i915 = ctx->i915; struct vfsmount *gemfs = i915->mm.gemfs; - struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base; + struct i915_address_space *vm = + ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm; struct drm_i915_gem_object *obj; struct i915_vma *vma; u32 *vaddr; @@ -1587,7 +1597,8 @@ static int igt_shrink_thp(void *arg) { struct i915_gem_context *ctx = arg; struct drm_i915_private *i915 = ctx->i915; - struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base; + struct i915_address_space *vm = + ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm; struct drm_i915_gem_object *obj; struct i915_vma *vma; unsigned int flags = PIN_USER; @@ -1690,20 +1701,20 @@ int i915_gem_huge_page_mock_selftests(void) dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39)); mutex_lock(&dev_priv->drm.struct_mutex); - ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV), "mock"); + ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV)); if (IS_ERR(ppgtt)) { err = PTR_ERR(ppgtt); goto out_unlock; } - if (!i915_vm_is_48bit(&ppgtt->base)) { + if (!i915_vm_is_48bit(&ppgtt->vm)) { pr_err("failed to create 48b PPGTT\n"); err = -EINVAL; goto out_close; } /* If we were ever hit this then it's time to mock the 64K scratch */ - if (!i915_vm_has_scratch_64K(&ppgtt->base)) { + if (!i915_vm_has_scratch_64K(&ppgtt->vm)) { pr_err("PPGTT missing 64K scratch page\n"); err = -EINVAL; goto out_close; @@ -1712,7 +1723,7 @@ int i915_gem_huge_page_mock_selftests(void) err = i915_subtests(tests, ppgtt); out_close: - i915_ppgtt_close(&ppgtt->base); + i915_ppgtt_close(&ppgtt->vm); i915_ppgtt_put(ppgtt); out_unlock: @@ -1720,7 +1731,7 @@ out_unlock: i915_modparams.enable_ppgtt = saved_ppgtt; - drm_dev_unref(&dev_priv->drm); + drm_dev_put(&dev_priv->drm); return err; } @@ -1744,6 +1755,9 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) return 0; } + if (i915_terminally_wedged(&dev_priv->gpu_error)) + return 0; + file = mock_file(dev_priv); if (IS_ERR(file)) return PTR_ERR(file); @@ -1758,7 +1772,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) } if (ctx->ppgtt) - ctx->ppgtt->base.scrub_64K = true; + ctx->ppgtt->vm.scrub_64K = true; err = i915_subtests(tests, ctx); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c index 340a98c0c804..3a095c37c120 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c @@ -42,11 +42,21 @@ static int cpu_set(struct drm_i915_gem_object *obj, page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); map = kmap_atomic(page); - if (needs_clflush & CLFLUSH_BEFORE) + + if (needs_clflush & CLFLUSH_BEFORE) { + mb(); clflush(map+offset_in_page(offset) / sizeof(*map)); + mb(); + } + map[offset_in_page(offset) / sizeof(*map)] = v; - if (needs_clflush & CLFLUSH_AFTER) + + if (needs_clflush & CLFLUSH_AFTER) { + mb(); clflush(map+offset_in_page(offset) / sizeof(*map)); + mb(); + } + kunmap_atomic(map); i915_gem_obj_finish_shmem_access(obj); @@ -68,8 +78,13 @@ static int cpu_get(struct drm_i915_gem_object *obj, page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); map = kmap_atomic(page); - if (needs_clflush & CLFLUSH_BEFORE) + + if (needs_clflush & CLFLUSH_BEFORE) { + mb(); clflush(map+offset_in_page(offset) / sizeof(*map)); + mb(); + } + *v = map[offset_in_page(offset) / sizeof(*map)]; kunmap_atomic(map); @@ -199,7 +214,7 @@ static int gpu_set(struct drm_i915_gem_object *obj, cs = intel_ring_begin(rq, 4); if (IS_ERR(cs)) { - __i915_request_add(rq, false); + i915_request_add(rq); i915_vma_unpin(vma); return PTR_ERR(cs); } @@ -210,28 +225,24 @@ static int gpu_set(struct drm_i915_gem_object *obj, *cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset); *cs++ = v; } else if (INTEL_GEN(i915) >= 4) { - *cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22; + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *cs++ = 0; *cs++ = i915_ggtt_offset(vma) + offset; *cs++ = v; } else { - *cs++ = MI_STORE_DWORD_IMM | 1 << 22; + *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; *cs++ = i915_ggtt_offset(vma) + offset; *cs++ = v; *cs++ = MI_NOOP; } intel_ring_advance(rq, cs); - i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); i915_vma_unpin(vma); - reservation_object_lock(obj->resv, NULL); - reservation_object_add_excl_fence(obj->resv, &rq->fence); - reservation_object_unlock(obj->resv); - - __i915_request_add(rq, true); + i915_request_add(rq); - return 0; + return err; } static bool always_valid(struct drm_i915_private *i915) @@ -239,8 +250,16 @@ static bool always_valid(struct drm_i915_private *i915) return true; } +static bool needs_fence_registers(struct drm_i915_private *i915) +{ + return !i915_terminally_wedged(&i915->gpu_error); +} + static bool needs_mi_store_dword(struct drm_i915_private *i915) { + if (i915_terminally_wedged(&i915->gpu_error)) + return false; + return intel_engine_can_store_dword(i915->engine[RCS]); } @@ -251,7 +270,7 @@ static const struct igt_coherency_mode { bool (*valid)(struct drm_i915_private *i915); } igt_coherency_mode[] = { { "cpu", cpu_set, cpu_get, always_valid }, - { "gtt", gtt_set, gtt_get, always_valid }, + { "gtt", gtt_set, gtt_get, needs_fence_registers }, { "wc", wc_set, wc_get, always_valid }, { "gpu", gpu_set, NULL, needs_mi_store_dword }, { }, diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c index ddb03f009232..1c92560d35da 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c @@ -23,9 +23,11 @@ */ #include "../i915_selftest.h" +#include "i915_random.h" #include "igt_flush_test.h" #include "mock_drm.h" +#include "mock_gem_device.h" #include "huge_gem_object.h" #define DW_PER_PAGE (PAGE_SIZE / sizeof(u32)) @@ -62,12 +64,12 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value) *cmd++ = value; } else if (gen >= 4) { *cmd++ = MI_STORE_DWORD_IMM_GEN4 | - (gen < 6 ? 1 << 22 : 0); + (gen < 6 ? MI_USE_GGTT : 0); *cmd++ = 0; *cmd++ = offset; *cmd++ = value; } else { - *cmd++ = MI_STORE_DWORD_IMM | 1 << 22; + *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; *cmd++ = offset; *cmd++ = value; } @@ -114,7 +116,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj, { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_address_space *vm = - ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base; + ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm; struct i915_request *rq; struct i915_vma *vma; struct i915_vma *batch; @@ -169,24 +171,28 @@ static int gpu_fill(struct drm_i915_gem_object *obj, if (err) goto err_request; - i915_vma_move_to_active(batch, rq, 0); + err = i915_vma_move_to_active(batch, rq, 0); + if (err) + goto skip_request; + + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + if (err) + goto skip_request; + i915_gem_object_set_active_reference(batch->obj); i915_vma_unpin(batch); i915_vma_close(batch); - i915_vma_move_to_active(vma, rq, 0); i915_vma_unpin(vma); - reservation_object_lock(obj->resv, NULL); - reservation_object_add_excl_fence(obj->resv, &rq->fence); - reservation_object_unlock(obj->resv); - - __i915_request_add(rq, true); + i915_request_add(rq); return 0; +skip_request: + i915_request_skip(rq, err); err_request: - __i915_request_add(rq, false); + i915_request_add(rq); err_batch: i915_vma_unpin(batch); err_vma: @@ -247,9 +253,9 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max) } for (; m < DW_PER_PAGE; m++) { - if (map[m] != 0xdeadbeef) { + if (map[m] != STACK_MAGIC) { pr_err("Invalid value at page %d, offset %d: found %x expected %x\n", - n, m, map[m], 0xdeadbeef); + n, m, map[m], STACK_MAGIC); err = -EINVAL; goto out_unmap; } @@ -289,7 +295,7 @@ create_test_object(struct i915_gem_context *ctx, { struct drm_i915_gem_object *obj; struct i915_address_space *vm = - ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base; + ctx->ppgtt ? &ctx->ppgtt->vm : &ctx->i915->ggtt.vm; u64 size; int err; @@ -305,7 +311,7 @@ create_test_object(struct i915_gem_context *ctx, if (err) return ERR_PTR(err); - err = cpu_fill(obj, 0xdeadbeef); + err = cpu_fill(obj, STACK_MAGIC); if (err) { pr_err("Failed to fill object with cpu, err=%d\n", err); @@ -335,11 +341,15 @@ static int igt_ctx_exec(void *arg) bool first_shared_gtt = true; int err = -ENODEV; - /* Create a few different contexts (with different mm) and write + /* + * Create a few different contexts (with different mm) and write * through each ctx/mm using the GPU making sure those writes end * up in the expected pages of our obj. */ + if (!DRIVER_CAPS(i915)->has_logical_contexts) + return 0; + file = mock_file(i915); if (IS_ERR(file)) return PTR_ERR(file); @@ -366,6 +376,9 @@ static int igt_ctx_exec(void *arg) } for_each_engine(engine, i915, id) { + if (!engine->context_size) + continue; /* No logical context support in HW */ + if (!intel_engine_can_store_dword(engine)) continue; @@ -420,6 +433,237 @@ out_unlock: return err; } +static int igt_ctx_readonly(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *obj = NULL; + struct drm_file *file; + I915_RND_STATE(prng); + IGT_TIMEOUT(end_time); + LIST_HEAD(objects); + struct i915_gem_context *ctx; + struct i915_hw_ppgtt *ppgtt; + unsigned long ndwords, dw; + int err = -ENODEV; + + /* + * Create a few read-only objects (with the occasional writable object) + * and try to write into these object checking that the GPU discards + * any write to a read-only object. + */ + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + mutex_lock(&i915->drm.struct_mutex); + + ctx = i915_gem_create_context(i915, file->driver_priv); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out_unlock; + } + + ppgtt = ctx->ppgtt ?: i915->mm.aliasing_ppgtt; + if (!ppgtt || !ppgtt->vm.has_read_only) { + err = 0; + goto out_unlock; + } + + ndwords = 0; + dw = 0; + while (!time_after(jiffies, end_time)) { + struct intel_engine_cs *engine; + unsigned int id; + + for_each_engine(engine, i915, id) { + if (!intel_engine_can_store_dword(engine)) + continue; + + if (!obj) { + obj = create_test_object(ctx, file, &objects); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto out_unlock; + } + + if (prandom_u32_state(&prng) & 1) + i915_gem_object_set_readonly(obj); + } + + intel_runtime_pm_get(i915); + err = gpu_fill(obj, ctx, engine, dw); + intel_runtime_pm_put(i915); + if (err) { + pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", + ndwords, dw, max_dwords(obj), + engine->name, ctx->hw_id, + yesno(!!ctx->ppgtt), err); + goto out_unlock; + } + + if (++dw == max_dwords(obj)) { + obj = NULL; + dw = 0; + } + ndwords++; + } + } + pr_info("Submitted %lu dwords (across %u engines)\n", + ndwords, INTEL_INFO(i915)->num_rings); + + dw = 0; + list_for_each_entry(obj, &objects, st_link) { + unsigned int rem = + min_t(unsigned int, ndwords - dw, max_dwords(obj)); + unsigned int num_writes; + + num_writes = rem; + if (i915_gem_object_is_readonly(obj)) + num_writes = 0; + + err = cpu_check(obj, num_writes); + if (err) + break; + + dw += rem; + } + +out_unlock: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + mutex_unlock(&i915->drm.struct_mutex); + + mock_file_free(i915, file); + return err; +} + +static __maybe_unused const char * +__engine_name(struct drm_i915_private *i915, unsigned int engines) +{ + struct intel_engine_cs *engine; + unsigned int tmp; + + if (engines == ALL_ENGINES) + return "all"; + + for_each_engine_masked(engine, i915, engines, tmp) + return engine->name; + + return "none"; +} + +static int __igt_switch_to_kernel_context(struct drm_i915_private *i915, + struct i915_gem_context *ctx, + unsigned int engines) +{ + struct intel_engine_cs *engine; + unsigned int tmp; + int err; + + GEM_TRACE("Testing %s\n", __engine_name(i915, engines)); + for_each_engine_masked(engine, i915, engines, tmp) { + struct i915_request *rq; + + rq = i915_request_alloc(engine, ctx); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + i915_request_add(rq); + } + + err = i915_gem_switch_to_kernel_context(i915); + if (err) + return err; + + for_each_engine_masked(engine, i915, engines, tmp) { + if (!engine_has_kernel_context_barrier(engine)) { + pr_err("kernel context not last on engine %s!\n", + engine->name); + return -EINVAL; + } + } + + err = i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); + if (err) + return err; + + GEM_BUG_ON(i915->gt.active_requests); + for_each_engine_masked(engine, i915, engines, tmp) { + if (engine->last_retired_context->gem_context != i915->kernel_context) { + pr_err("engine %s not idling in kernel context!\n", + engine->name); + return -EINVAL; + } + } + + err = i915_gem_switch_to_kernel_context(i915); + if (err) + return err; + + if (i915->gt.active_requests) { + pr_err("switch-to-kernel-context emitted %d requests even though it should already be idling in the kernel context\n", + i915->gt.active_requests); + return -EINVAL; + } + + for_each_engine_masked(engine, i915, engines, tmp) { + if (!intel_engine_has_kernel_context(engine)) { + pr_err("kernel context not last on engine %s!\n", + engine->name); + return -EINVAL; + } + } + + return 0; +} + +static int igt_switch_to_kernel_context(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + struct i915_gem_context *ctx; + enum intel_engine_id id; + int err; + + /* + * A core premise of switching to the kernel context is that + * if an engine is already idling in the kernel context, we + * do not emit another request and wake it up. The other being + * that we do indeed end up idling in the kernel context. + */ + + mutex_lock(&i915->drm.struct_mutex); + ctx = kernel_context(i915); + if (IS_ERR(ctx)) { + mutex_unlock(&i915->drm.struct_mutex); + return PTR_ERR(ctx); + } + + /* First check idling each individual engine */ + for_each_engine(engine, i915, id) { + err = __igt_switch_to_kernel_context(i915, ctx, BIT(id)); + if (err) + goto out_unlock; + } + + /* Now en masse */ + err = __igt_switch_to_kernel_context(i915, ctx, ALL_ENGINES); + if (err) + goto out_unlock; + +out_unlock: + GEM_TRACE_DUMP_ON(err); + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + mutex_unlock(&i915->drm.struct_mutex); + + kernel_context_close(ctx); + return err; +} + static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915) { struct drm_i915_gem_object *obj; @@ -432,7 +676,7 @@ static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915) list_for_each_entry(obj, &i915->mm.bound_list, mm.link) { struct i915_vma *vma; - vma = i915_vma_instance(obj, &i915->ggtt.base, NULL); + vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); if (IS_ERR(vma)) continue; @@ -447,14 +691,37 @@ static void fake_aliasing_ppgtt_disable(struct drm_i915_private *i915) i915_gem_fini_aliasing_ppgtt(i915); } +int i915_gem_context_mock_selftests(void) +{ + static const struct i915_subtest tests[] = { + SUBTEST(igt_switch_to_kernel_context), + }; + struct drm_i915_private *i915; + int err; + + i915 = mock_gem_device(); + if (!i915) + return -ENOMEM; + + err = i915_subtests(tests, i915); + + drm_dev_put(&i915->drm); + return err; +} + int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv) { static const struct i915_subtest tests[] = { + SUBTEST(igt_switch_to_kernel_context), SUBTEST(igt_ctx_exec), + SUBTEST(igt_ctx_readonly), }; bool fake_alias = false; int err; + if (i915_terminally_wedged(&dev_priv->gpu_error)) + return 0; + /* Install a fake aliasing gtt for exercise */ if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) { mutex_lock(&dev_priv->drm.struct_mutex); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c index 89dc25a5a53b..a7055b12e53c 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c @@ -389,7 +389,7 @@ int i915_gem_dmabuf_mock_selftests(void) err = i915_subtests(tests, i915); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index ab9d7bee0aae..128ad1cf0647 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -35,7 +35,7 @@ static int populate_ggtt(struct drm_i915_private *i915) u64 size; for (size = 0; - size + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total; + size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total; size += I915_GTT_PAGE_SIZE) { struct i915_vma *vma; @@ -57,7 +57,7 @@ static int populate_ggtt(struct drm_i915_private *i915) return -EINVAL; } - if (list_empty(&i915->ggtt.base.inactive_list)) { + if (list_empty(&i915->ggtt.vm.inactive_list)) { pr_err("No objects on the GGTT inactive list!\n"); return -EINVAL; } @@ -69,7 +69,7 @@ static void unpin_ggtt(struct drm_i915_private *i915) { struct i915_vma *vma; - list_for_each_entry(vma, &i915->ggtt.base.inactive_list, vm_link) + list_for_each_entry(vma, &i915->ggtt.vm.inactive_list, vm_link) i915_vma_unpin(vma); } @@ -103,7 +103,7 @@ static int igt_evict_something(void *arg) goto cleanup; /* Everything is pinned, nothing should happen */ - err = i915_gem_evict_something(&ggtt->base, + err = i915_gem_evict_something(&ggtt->vm, I915_GTT_PAGE_SIZE, 0, 0, 0, U64_MAX, 0); @@ -116,7 +116,7 @@ static int igt_evict_something(void *arg) unpin_ggtt(i915); /* Everything is unpinned, we should be able to evict something */ - err = i915_gem_evict_something(&ggtt->base, + err = i915_gem_evict_something(&ggtt->vm, I915_GTT_PAGE_SIZE, 0, 0, 0, U64_MAX, 0); @@ -181,7 +181,7 @@ static int igt_evict_for_vma(void *arg) goto cleanup; /* Everything is pinned, nothing should happen */ - err = i915_gem_evict_for_node(&ggtt->base, &target, 0); + err = i915_gem_evict_for_node(&ggtt->vm, &target, 0); if (err != -ENOSPC) { pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n", err); @@ -191,7 +191,7 @@ static int igt_evict_for_vma(void *arg) unpin_ggtt(i915); /* Everything is unpinned, we should be able to evict the node */ - err = i915_gem_evict_for_node(&ggtt->base, &target, 0); + err = i915_gem_evict_for_node(&ggtt->vm, &target, 0); if (err) { pr_err("i915_gem_evict_for_node returned err=%d\n", err); @@ -229,7 +229,7 @@ static int igt_evict_for_cache_color(void *arg) * i915_gtt_color_adjust throughout our driver, so using a mock color * adjust will work just fine for our purposes. */ - ggtt->base.mm.color_adjust = mock_color_adjust; + ggtt->vm.mm.color_adjust = mock_color_adjust; obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE); if (IS_ERR(obj)) { @@ -265,7 +265,7 @@ static int igt_evict_for_cache_color(void *arg) i915_vma_unpin(vma); /* Remove just the second vma */ - err = i915_gem_evict_for_node(&ggtt->base, &target, 0); + err = i915_gem_evict_for_node(&ggtt->vm, &target, 0); if (err) { pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err); goto cleanup; @@ -276,7 +276,7 @@ static int igt_evict_for_cache_color(void *arg) */ target.color = I915_CACHE_L3_LLC; - err = i915_gem_evict_for_node(&ggtt->base, &target, 0); + err = i915_gem_evict_for_node(&ggtt->vm, &target, 0); if (!err) { pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err); err = -EINVAL; @@ -288,7 +288,7 @@ static int igt_evict_for_cache_color(void *arg) cleanup: unpin_ggtt(i915); cleanup_objects(i915); - ggtt->base.mm.color_adjust = NULL; + ggtt->vm.mm.color_adjust = NULL; return err; } @@ -305,7 +305,7 @@ static int igt_evict_vm(void *arg) goto cleanup; /* Everything is pinned, nothing should happen */ - err = i915_gem_evict_vm(&ggtt->base); + err = i915_gem_evict_vm(&ggtt->vm); if (err) { pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n", err); @@ -314,7 +314,7 @@ static int igt_evict_vm(void *arg) unpin_ggtt(i915); - err = i915_gem_evict_vm(&ggtt->base); + err = i915_gem_evict_vm(&ggtt->vm); if (err) { pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n", err); @@ -359,9 +359,9 @@ static int igt_evict_contexts(void *arg) /* Reserve a block so that we know we have enough to fit a few rq */ memset(&hole, 0, sizeof(hole)); - err = i915_gem_gtt_insert(&i915->ggtt.base, &hole, + err = i915_gem_gtt_insert(&i915->ggtt.vm, &hole, PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE, - 0, i915->ggtt.base.total, + 0, i915->ggtt.vm.total, PIN_NOEVICT); if (err) goto out_locked; @@ -377,9 +377,9 @@ static int igt_evict_contexts(void *arg) goto out_locked; } - if (i915_gem_gtt_insert(&i915->ggtt.base, &r->node, + if (i915_gem_gtt_insert(&i915->ggtt.vm, &r->node, 1ul << 20, 0, I915_COLOR_UNEVICTABLE, - 0, i915->ggtt.base.total, + 0, i915->ggtt.vm.total, PIN_NOEVICT)) { kfree(r); break; @@ -490,7 +490,7 @@ int i915_gem_evict_mock_selftests(void) err = i915_subtests(tests, i915); mutex_unlock(&i915->drm.struct_mutex); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } @@ -500,5 +500,8 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_evict_contexts), }; + if (i915_terminally_wedged(&i915->gpu_error)) + return 0; + return i915_subtests(tests, i915); } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index f7dc926f4ef1..8e2e269db97e 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -32,6 +32,20 @@ #include "mock_drm.h" #include "mock_gem_device.h" +static void cleanup_freed_objects(struct drm_i915_private *i915) +{ + /* + * As we may hold onto the struct_mutex for inordinate lengths of + * time, the NMI khungtaskd detector may fire for the free objects + * worker. + */ + mutex_unlock(&i915->drm.struct_mutex); + + i915_gem_drain_freed_objects(i915); + + mutex_lock(&i915->drm.struct_mutex); +} + static void fake_free_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { @@ -134,31 +148,34 @@ static int igt_ppgtt_alloc(void *arg) { struct drm_i915_private *dev_priv = arg; struct i915_hw_ppgtt *ppgtt; - u64 size, last; - int err; + u64 size, last, limit; + int err = 0; /* Allocate a ppggt and try to fill the entire range */ if (!USES_PPGTT(dev_priv)) return 0; - ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); - if (!ppgtt) - return -ENOMEM; + ppgtt = __hw_ppgtt_create(dev_priv); + if (IS_ERR(ppgtt)) + return PTR_ERR(ppgtt); - mutex_lock(&dev_priv->drm.struct_mutex); - err = __hw_ppgtt_init(ppgtt, dev_priv); - if (err) - goto err_ppgtt; - - if (!ppgtt->base.allocate_va_range) + if (!ppgtt->vm.allocate_va_range) goto err_ppgtt_cleanup; + /* + * While we only allocate the page tables here and so we could + * address a much larger GTT than we could actually fit into + * RAM, a practical limit is the amount of physical pages in the system. + * This should ensure that we do not run into the oomkiller during + * the test and take down the machine wilfully. + */ + limit = totalram_pages << PAGE_SHIFT; + limit = min(ppgtt->vm.total, limit); + /* Check we can allocate the entire range */ - for (size = 4096; - size <= ppgtt->base.total; - size <<= 2) { - err = ppgtt->base.allocate_va_range(&ppgtt->base, 0, size); + for (size = 4096; size <= limit; size <<= 2) { + err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size); if (err) { if (err == -ENOMEM) { pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n", @@ -168,15 +185,15 @@ static int igt_ppgtt_alloc(void *arg) goto err_ppgtt_cleanup; } - ppgtt->base.clear_range(&ppgtt->base, 0, size); + cond_resched(); + + ppgtt->vm.clear_range(&ppgtt->vm, 0, size); } /* Check we can incrementally allocate the entire range */ - for (last = 0, size = 4096; - size <= ppgtt->base.total; - last = size, size <<= 2) { - err = ppgtt->base.allocate_va_range(&ppgtt->base, - last, size - last); + for (last = 0, size = 4096; size <= limit; last = size, size <<= 2) { + err = ppgtt->vm.allocate_va_range(&ppgtt->vm, + last, size - last); if (err) { if (err == -ENOMEM) { pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n", @@ -185,13 +202,14 @@ static int igt_ppgtt_alloc(void *arg) } goto err_ppgtt_cleanup; } + + cond_resched(); } err_ppgtt_cleanup: - ppgtt->base.cleanup(&ppgtt->base); -err_ppgtt: + mutex_lock(&dev_priv->drm.struct_mutex); + i915_ppgtt_put(ppgtt); mutex_unlock(&dev_priv->drm.struct_mutex); - kfree(ppgtt); return err; } @@ -293,6 +311,8 @@ static int lowlevel_hole(struct drm_i915_private *i915, i915_gem_object_put(obj); kfree(order); + + cleanup_freed_objects(i915); } return 0; @@ -521,6 +541,7 @@ static int fill_hole(struct drm_i915_private *i915, } close_object_list(&objects, vm); + cleanup_freed_objects(i915); } return 0; @@ -607,6 +628,8 @@ err_put: i915_gem_object_put(obj); if (err) return err; + + cleanup_freed_objects(i915); } return 0; @@ -791,6 +814,8 @@ err_obj: kfree(order); if (err) return err; + + cleanup_freed_objects(i915); } return 0; @@ -859,6 +884,7 @@ static int __shrink_hole(struct drm_i915_private *i915, } close_object_list(&objects, vm); + cleanup_freed_objects(i915); return err; } @@ -951,6 +977,7 @@ static int shrink_boom(struct drm_i915_private *i915, i915_gem_object_put(explode); memset(&vm->fault_attr, 0, sizeof(vm->fault_attr)); + cleanup_freed_objects(i915); } return 0; @@ -982,17 +1009,17 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv, return PTR_ERR(file); mutex_lock(&dev_priv->drm.struct_mutex); - ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv, "mock"); + ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv); if (IS_ERR(ppgtt)) { err = PTR_ERR(ppgtt); goto out_unlock; } - GEM_BUG_ON(offset_in_page(ppgtt->base.total)); - GEM_BUG_ON(ppgtt->base.closed); + GEM_BUG_ON(offset_in_page(ppgtt->vm.total)); + GEM_BUG_ON(ppgtt->vm.closed); - err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total, end_time); + err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time); - i915_ppgtt_close(&ppgtt->base); + i915_ppgtt_close(&ppgtt->vm); i915_ppgtt_put(ppgtt); out_unlock: mutex_unlock(&dev_priv->drm.struct_mutex); @@ -1061,18 +1088,18 @@ static int exercise_ggtt(struct drm_i915_private *i915, mutex_lock(&i915->drm.struct_mutex); restart: - list_sort(NULL, &ggtt->base.mm.hole_stack, sort_holes); - drm_mm_for_each_hole(node, &ggtt->base.mm, hole_start, hole_end) { + list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes); + drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) { if (hole_start < last) continue; - if (ggtt->base.mm.color_adjust) - ggtt->base.mm.color_adjust(node, 0, - &hole_start, &hole_end); + if (ggtt->vm.mm.color_adjust) + ggtt->vm.mm.color_adjust(node, 0, + &hole_start, &hole_end); if (hole_start >= hole_end) continue; - err = func(i915, &ggtt->base, hole_start, hole_end, end_time); + err = func(i915, &ggtt->vm, hole_start, hole_end, end_time); if (err) break; @@ -1134,7 +1161,7 @@ static int igt_ggtt_page(void *arg) goto out_free; memset(&tmp, 0, sizeof(tmp)); - err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp, + err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp, count * PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, 0, ggtt->mappable_end, @@ -1147,9 +1174,9 @@ static int igt_ggtt_page(void *arg) for (n = 0; n < count; n++) { u64 offset = tmp.start + n * PAGE_SIZE; - ggtt->base.insert_page(&ggtt->base, - i915_gem_object_get_dma_address(obj, 0), - offset, I915_CACHE_NONE, 0); + ggtt->vm.insert_page(&ggtt->vm, + i915_gem_object_get_dma_address(obj, 0), + offset, I915_CACHE_NONE, 0); } order = i915_random_order(count, &prng); @@ -1188,7 +1215,7 @@ static int igt_ggtt_page(void *arg) kfree(order); out_remove: - ggtt->base.clear_range(&ggtt->base, tmp.start, tmp.size); + ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size); intel_runtime_pm_put(i915); drm_mm_remove_node(&tmp); out_unpin: @@ -1217,6 +1244,7 @@ static int exercise_mock(struct drm_i915_private *i915, u64 hole_start, u64 hole_end, unsigned long end_time)) { + const u64 limit = totalram_pages << PAGE_SHIFT; struct i915_gem_context *ctx; struct i915_hw_ppgtt *ppgtt; IGT_TIMEOUT(end_time); @@ -1229,7 +1257,7 @@ static int exercise_mock(struct drm_i915_private *i915, ppgtt = ctx->ppgtt; GEM_BUG_ON(!ppgtt); - err = func(i915, &ppgtt->base, 0, ppgtt->base.total, end_time); + err = func(i915, &ppgtt->vm, 0, min(ppgtt->vm.total, limit), end_time); mock_context_close(ctx); return err; @@ -1270,7 +1298,7 @@ static int igt_gtt_reserve(void *arg) /* Start by filling the GGTT */ for (total = 0; - total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total; + total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total; total += 2*I915_GTT_PAGE_SIZE) { struct i915_vma *vma; @@ -1288,20 +1316,20 @@ static int igt_gtt_reserve(void *arg) list_add(&obj->st_link, &objects); - vma = i915_vma_instance(obj, &i915->ggtt.base, NULL); + vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; } - err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node, + err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node, obj->base.size, total, obj->cache_level, 0); if (err) { pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n", - total, i915->ggtt.base.total, err); + total, i915->ggtt.vm.total, err); goto out; } track_vma_bind(vma); @@ -1319,7 +1347,7 @@ static int igt_gtt_reserve(void *arg) /* Now we start forcing evictions */ for (total = I915_GTT_PAGE_SIZE; - total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total; + total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total; total += 2*I915_GTT_PAGE_SIZE) { struct i915_vma *vma; @@ -1337,20 +1365,20 @@ static int igt_gtt_reserve(void *arg) list_add(&obj->st_link, &objects); - vma = i915_vma_instance(obj, &i915->ggtt.base, NULL); + vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; } - err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node, + err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node, obj->base.size, total, obj->cache_level, 0); if (err) { pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n", - total, i915->ggtt.base.total, err); + total, i915->ggtt.vm.total, err); goto out; } track_vma_bind(vma); @@ -1371,7 +1399,7 @@ static int igt_gtt_reserve(void *arg) struct i915_vma *vma; u64 offset; - vma = i915_vma_instance(obj, &i915->ggtt.base, NULL); + vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; @@ -1383,18 +1411,18 @@ static int igt_gtt_reserve(void *arg) goto out; } - offset = random_offset(0, i915->ggtt.base.total, + offset = random_offset(0, i915->ggtt.vm.total, 2*I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT); - err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node, + err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node, obj->base.size, offset, obj->cache_level, 0); if (err) { pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n", - total, i915->ggtt.base.total, err); + total, i915->ggtt.vm.total, err); goto out; } track_vma_bind(vma); @@ -1429,8 +1457,8 @@ static int igt_gtt_insert(void *arg) u64 start, end; } invalid_insert[] = { { - i915->ggtt.base.total + I915_GTT_PAGE_SIZE, 0, - 0, i915->ggtt.base.total, + i915->ggtt.vm.total + I915_GTT_PAGE_SIZE, 0, + 0, i915->ggtt.vm.total, }, { 2*I915_GTT_PAGE_SIZE, 0, @@ -1460,7 +1488,7 @@ static int igt_gtt_insert(void *arg) /* Check a couple of obviously invalid requests */ for (ii = invalid_insert; ii->size; ii++) { - err = i915_gem_gtt_insert(&i915->ggtt.base, &tmp, + err = i915_gem_gtt_insert(&i915->ggtt.vm, &tmp, ii->size, ii->alignment, I915_COLOR_UNEVICTABLE, ii->start, ii->end, @@ -1475,7 +1503,7 @@ static int igt_gtt_insert(void *arg) /* Start by filling the GGTT */ for (total = 0; - total + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total; + total + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total; total += I915_GTT_PAGE_SIZE) { struct i915_vma *vma; @@ -1493,15 +1521,15 @@ static int igt_gtt_insert(void *arg) list_add(&obj->st_link, &objects); - vma = i915_vma_instance(obj, &i915->ggtt.base, NULL); + vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; } - err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node, + err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node, obj->base.size, 0, obj->cache_level, - 0, i915->ggtt.base.total, + 0, i915->ggtt.vm.total, 0); if (err == -ENOSPC) { /* maxed out the GGTT space */ @@ -1510,7 +1538,7 @@ static int igt_gtt_insert(void *arg) } if (err) { pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n", - total, i915->ggtt.base.total, err); + total, i915->ggtt.vm.total, err); goto out; } track_vma_bind(vma); @@ -1522,7 +1550,7 @@ static int igt_gtt_insert(void *arg) list_for_each_entry(obj, &objects, st_link) { struct i915_vma *vma; - vma = i915_vma_instance(obj, &i915->ggtt.base, NULL); + vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; @@ -1542,7 +1570,7 @@ static int igt_gtt_insert(void *arg) struct i915_vma *vma; u64 offset; - vma = i915_vma_instance(obj, &i915->ggtt.base, NULL); + vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; @@ -1557,13 +1585,13 @@ static int igt_gtt_insert(void *arg) goto out; } - err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node, + err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node, obj->base.size, 0, obj->cache_level, - 0, i915->ggtt.base.total, + 0, i915->ggtt.vm.total, 0); if (err) { pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n", - total, i915->ggtt.base.total, err); + total, i915->ggtt.vm.total, err); goto out; } track_vma_bind(vma); @@ -1579,7 +1607,7 @@ static int igt_gtt_insert(void *arg) /* And then force evictions */ for (total = 0; - total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total; + total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total; total += 2*I915_GTT_PAGE_SIZE) { struct i915_vma *vma; @@ -1597,19 +1625,19 @@ static int igt_gtt_insert(void *arg) list_add(&obj->st_link, &objects); - vma = i915_vma_instance(obj, &i915->ggtt.base, NULL); + vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto out; } - err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node, + err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node, obj->base.size, 0, obj->cache_level, - 0, i915->ggtt.base.total, + 0, i915->ggtt.vm.total, 0); if (err) { pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n", - total, i915->ggtt.base.total, err); + total, i915->ggtt.vm.total, err); goto out; } track_vma_bind(vma); @@ -1646,7 +1674,7 @@ int i915_gem_gtt_mock_selftests(void) err = i915_subtests(tests, i915); mutex_unlock(&i915->drm.struct_mutex); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } @@ -1669,7 +1697,7 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_ggtt_page), }; - GEM_BUG_ON(offset_in_page(i915->ggtt.base.total)); + GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total)); return i915_subtests(tests, i915); } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c index fbdb2419d418..c69cbd5aed52 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c @@ -113,7 +113,7 @@ static int igt_gem_huge(void *arg) obj = huge_gem_object(i915, nreal * PAGE_SIZE, - i915->ggtt.base.total + PAGE_SIZE); + i915->ggtt.vm.total + PAGE_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -169,9 +169,16 @@ static u64 tiled_offset(const struct tile *tile, u64 v) v += y * tile->width; v += div64_u64_rem(x, tile->width, &x) << tile->size; v += x; - } else { + } else if (tile->width == 128) { const unsigned int ytile_span = 16; - const unsigned int ytile_height = 32 * ytile_span; + const unsigned int ytile_height = 512; + + v += y * ytile_span; + v += div64_u64_rem(x, ytile_span, &x) * ytile_height; + v += x; + } else { + const unsigned int ytile_span = 32; + const unsigned int ytile_height = 256; v += y * ytile_span; v += div64_u64_rem(x, ytile_span, &x) * ytile_height; @@ -288,6 +295,8 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, kunmap(p); if (err) return err; + + i915_vma_destroy(vma); } return 0; @@ -311,7 +320,7 @@ static int igt_partial_tiling(void *arg) obj = huge_gem_object(i915, nreal << PAGE_SHIFT, - (1 + next_prime_number(i915->ggtt.base.total >> PAGE_SHIFT)) << PAGE_SHIFT); + (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -347,6 +356,14 @@ static int igt_partial_tiling(void *arg) unsigned int pitch; struct tile tile; + if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) + /* + * The swizzling pattern is actually unknown as it + * varies based on physical address of each page. + * See i915_gem_detect_bit_6_swizzle(). + */ + break; + tile.tiling = tiling; switch (tiling) { case I915_TILING_X: @@ -357,7 +374,8 @@ static int igt_partial_tiling(void *arg) break; } - if (tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN || + GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN); + if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 || tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17) continue; @@ -440,7 +458,7 @@ static int make_obj_busy(struct drm_i915_gem_object *obj) struct i915_vma *vma; int err; - vma = i915_vma_instance(obj, &i915->ggtt.base, NULL); + vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); if (IS_ERR(vma)) return PTR_ERR(vma); @@ -454,12 +472,14 @@ static int make_obj_busy(struct drm_i915_gem_object *obj) return PTR_ERR(rq); } - i915_vma_move_to_active(vma, rq, 0); + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + i915_request_add(rq); - i915_gem_object_set_active_reference(obj); + __i915_gem_object_release_unless_active(obj); i915_vma_unpin(vma); - return 0; + + return err; } static bool assert_mmap_offset(struct drm_i915_private *i915, @@ -488,6 +508,15 @@ static int igt_mmap_offset_exhaustion(void *arg) u64 hole_start, hole_end; int loop, err; + /* Disable background reaper */ + mutex_lock(&i915->drm.struct_mutex); + if (!i915->gt.active_requests++) + i915_gem_unpark(i915); + mutex_unlock(&i915->drm.struct_mutex); + cancel_delayed_work_sync(&i915->gt.retire_work); + cancel_delayed_work_sync(&i915->gt.idle_work); + GEM_BUG_ON(!i915->gt.awake); + /* Trim the device mmap space to only a page */ memset(&resv, 0, sizeof(resv)); drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { @@ -496,7 +525,7 @@ static int igt_mmap_offset_exhaustion(void *arg) err = drm_mm_reserve_node(mm, &resv); if (err) { pr_err("Failed to trim VMA manager, err=%d\n", err); - return err; + goto out_park; } break; } @@ -538,6 +567,9 @@ static int igt_mmap_offset_exhaustion(void *arg) /* Now fill with busy dead objects that we expect to reap */ for (loop = 0; loop < 3; loop++) { + if (i915_terminally_wedged(&i915->gpu_error)) + break; + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); @@ -554,6 +586,7 @@ static int igt_mmap_offset_exhaustion(void *arg) goto err_obj; } + /* NB we rely on the _active_ reference to access obj now */ GEM_BUG_ON(!i915_gem_object_is_active(obj)); err = i915_gem_object_create_mmap_offset(obj); if (err) { @@ -565,6 +598,13 @@ static int igt_mmap_offset_exhaustion(void *arg) out: drm_mm_remove_node(&resv); +out_park: + mutex_lock(&i915->drm.struct_mutex); + if (--i915->gt.active_requests) + queue_delayed_work(i915->wq, &i915->gt.retire_work, 0); + else + queue_delayed_work(i915->wq, &i915->gt.idle_work, 0); + mutex_unlock(&i915->drm.struct_mutex); return err; err_obj: i915_gem_object_put(obj); @@ -586,7 +626,7 @@ int i915_gem_object_mock_selftests(void) err = i915_subtests(tests, i915); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h index d16d74178e9d..1b70208eeea7 100644 --- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h @@ -24,3 +24,4 @@ selftest(vma, i915_vma_mock_selftests) selftest(evict, i915_gem_evict_mock_selftests) selftest(gtt, i915_gem_gtt_mock_selftests) selftest(hugepages, i915_gem_huge_page_mock_selftests) +selftest(contexts, i915_gem_context_mock_selftests) diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 94bc2e1898a4..c4aac6141e04 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -262,7 +262,7 @@ int i915_request_mock_selftests(void) return -ENOMEM; err = i915_subtests(tests, i915); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } @@ -286,7 +286,9 @@ static int begin_live_test(struct live_test *t, t->func = func; t->name = name; - err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED); + err = i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); if (err) { pr_err("%s(%s): failed to idle before, with err=%d!", func, name, err); @@ -342,9 +344,9 @@ static int live_nop_request(void *arg) mutex_lock(&i915->drm.struct_mutex); for_each_engine(engine, i915, id) { - IGT_TIMEOUT(end_time); - struct i915_request *request; + struct i915_request *request = NULL; unsigned long n, prime; + IGT_TIMEOUT(end_time); ktime_t times[2] = {}; err = begin_live_test(&t, i915, __func__, engine->name); @@ -430,7 +432,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915) if (err) goto err; - vma = i915_vma_instance(obj, &i915->ggtt.base, NULL); + vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err; @@ -466,7 +468,7 @@ empty_request(struct intel_engine_cs *engine, goto out_request; out_request: - __i915_request_add(request, err == 0); + i915_request_add(request); return err ? ERR_PTR(err) : request; } @@ -555,7 +557,8 @@ out_unlock: static struct i915_vma *recursive_batch(struct drm_i915_private *i915) { struct i915_gem_context *ctx = i915->kernel_context; - struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base; + struct i915_address_space *vm = + ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm; struct drm_i915_gem_object *obj; const int gen = INTEL_GEN(i915); struct i915_vma *vma; @@ -593,11 +596,8 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915) } else if (gen >= 6) { *cmd++ = MI_BATCH_BUFFER_START | 1 << 8; *cmd++ = lower_32_bits(vma->node.start); - } else if (gen >= 4) { - *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; - *cmd++ = lower_32_bits(vma->node.start); } else { - *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | 1; + *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; *cmd++ = lower_32_bits(vma->node.start); } *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */ @@ -677,7 +677,9 @@ static int live_all_engines(void *arg) i915_gem_object_set_active_reference(batch->obj); } - i915_vma_move_to_active(batch, request[id], 0); + err = i915_vma_move_to_active(batch, request[id], 0); + GEM_BUG_ON(err); + i915_request_get(request[id]); i915_request_add(request[id]); } @@ -787,7 +789,9 @@ static int live_sequential_engines(void *arg) GEM_BUG_ON(err); request[id]->batch = batch; - i915_vma_move_to_active(batch, request[id], 0); + err = i915_vma_move_to_active(batch, request[id], 0); + GEM_BUG_ON(err); + i915_gem_object_set_active_reference(batch->obj); i915_vma_get(batch); @@ -861,5 +865,9 @@ int i915_request_live_selftests(struct drm_i915_private *i915) SUBTEST(live_sequential_engines), SUBTEST(live_empty_request), }; + + if (i915_terminally_wedged(&i915->gpu_error)) + return 0; + return i915_subtests(tests, i915); } diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c index addc5a599c4a..86c54ea37f48 100644 --- a/drivers/gpu/drm/i915/selftests/i915_selftest.c +++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c @@ -210,6 +210,8 @@ int __i915_subtests(const char *caller, return -EINTR; pr_debug(DRIVER_NAME ": Running %s/%s\n", caller, st->name); + GEM_TRACE("Running %s/%s\n", caller, st->name); + err = st->func(data); if (err && err != -EINTR) { pr_err(DRIVER_NAME "/%s: %s failed with error %d\n", diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index e90f97236e50..ffa74290e054 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -35,7 +35,7 @@ static bool assert_vma(struct i915_vma *vma, { bool ok = true; - if (vma->vm != &ctx->ppgtt->base) { + if (vma->vm != &ctx->ppgtt->vm) { pr_err("VMA created with wrong VM\n"); ok = false; } @@ -110,8 +110,7 @@ static int create_vmas(struct drm_i915_private *i915, list_for_each_entry(obj, objects, st_link) { for (pinned = 0; pinned <= 1; pinned++) { list_for_each_entry(ctx, contexts, link) { - struct i915_address_space *vm = - &ctx->ppgtt->base; + struct i915_address_space *vm = &ctx->ppgtt->vm; struct i915_vma *vma; int err; @@ -259,12 +258,12 @@ static int igt_vma_pin1(void *arg) VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 8192), VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)), VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)), - VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)), + VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)), VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)), INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | i915->ggtt.mappable_end), - VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)), - INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.base.total), + VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)), + INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.vm.total), INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | round_down(U64_MAX, PAGE_SIZE)), VALID(4096, PIN_GLOBAL), @@ -272,12 +271,12 @@ static int igt_vma_pin1(void *arg) VALID(i915->ggtt.mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE), VALID(i915->ggtt.mappable_end, PIN_GLOBAL | PIN_MAPPABLE), NOSPACE(i915->ggtt.mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE), - VALID(i915->ggtt.base.total - 4096, PIN_GLOBAL), - VALID(i915->ggtt.base.total, PIN_GLOBAL), - NOSPACE(i915->ggtt.base.total + 4096, PIN_GLOBAL), + VALID(i915->ggtt.vm.total - 4096, PIN_GLOBAL), + VALID(i915->ggtt.vm.total, PIN_GLOBAL), + NOSPACE(i915->ggtt.vm.total + 4096, PIN_GLOBAL), NOSPACE(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL), INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)), - INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)), + INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)), INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)), VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)), @@ -289,9 +288,9 @@ static int igt_vma_pin1(void *arg) * variable start, end and size. */ NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | i915->ggtt.mappable_end), - NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.base.total), + NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.vm.total), NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)), - NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)), + NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)), #endif { }, #undef NOSPACE @@ -307,13 +306,13 @@ static int igt_vma_pin1(void *arg) * focusing on error handling of boundary conditions. */ - GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.base.mm)); + GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.vm.mm)); obj = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); - vma = checked_vma_instance(obj, &i915->ggtt.base, NULL); + vma = checked_vma_instance(obj, &i915->ggtt.vm, NULL); if (IS_ERR(vma)) goto out; @@ -405,7 +404,7 @@ static unsigned int rotated_size(const struct intel_rotation_plane_info *a, static int igt_vma_rotate(void *arg) { struct drm_i915_private *i915 = arg; - struct i915_address_space *vm = &i915->ggtt.base; + struct i915_address_space *vm = &i915->ggtt.vm; struct drm_i915_gem_object *obj; const struct intel_rotation_plane_info planes[] = { { .width = 1, .height = 1, .stride = 1 }, @@ -604,7 +603,7 @@ static bool assert_pin(struct i915_vma *vma, static int igt_vma_partial(void *arg) { struct drm_i915_private *i915 = arg; - struct i915_address_space *vm = &i915->ggtt.base; + struct i915_address_space *vm = &i915->ggtt.vm; const unsigned int npages = 1021; /* prime! */ struct drm_i915_gem_object *obj; const struct phase { @@ -734,7 +733,7 @@ int i915_vma_mock_selftests(void) err = i915_subtests(tests, i915); mutex_unlock(&i915->drm.struct_mutex); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c index 0d06f559243f..af66e3d4e23a 100644 --- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c +++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c @@ -9,52 +9,8 @@ #include "../i915_selftest.h" #include "igt_flush_test.h" -struct wedge_me { - struct delayed_work work; - struct drm_i915_private *i915; - const void *symbol; -}; - -static void wedge_me(struct work_struct *work) -{ - struct wedge_me *w = container_of(work, typeof(*w), work.work); - - pr_err("%pS timed out, cancelling all further testing.\n", w->symbol); - - GEM_TRACE("%pS timed out.\n", w->symbol); - GEM_TRACE_DUMP(); - - i915_gem_set_wedged(w->i915); -} - -static void __init_wedge(struct wedge_me *w, - struct drm_i915_private *i915, - long timeout, - const void *symbol) -{ - w->i915 = i915; - w->symbol = symbol; - - INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me); - schedule_delayed_work(&w->work, timeout); -} - -static void __fini_wedge(struct wedge_me *w) -{ - cancel_delayed_work_sync(&w->work); - destroy_delayed_work_on_stack(&w->work); - w->i915 = NULL; -} - -#define wedge_on_timeout(W, DEV, TIMEOUT) \ - for (__init_wedge((W), (DEV), (TIMEOUT), __builtin_return_address(0)); \ - (W)->i915; \ - __fini_wedge((W))) - int igt_flush_test(struct drm_i915_private *i915, unsigned int flags) { - struct wedge_me w; - cond_resched(); if (flags & I915_WAIT_LOCKED && @@ -63,8 +19,15 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags) i915_gem_set_wedged(i915); } - wedge_on_timeout(&w, i915, HZ) - i915_gem_wait_for_idle(i915, flags); + if (i915_gem_wait_for_idle(i915, flags, HZ / 5) == -ETIME) { + pr_err("%pS timed out, cancelling all further testing.\n", + __builtin_return_address(0)); + + GEM_TRACE("%pS timed out.\n", __builtin_return_address(0)); + GEM_TRACE_DUMP(); + + i915_gem_set_wedged(i915); + } return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0; } diff --git a/drivers/gpu/drm/i915/selftests/igt_wedge_me.h b/drivers/gpu/drm/i915/selftests/igt_wedge_me.h new file mode 100644 index 000000000000..08e5ff11bbd9 --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/igt_wedge_me.h @@ -0,0 +1,58 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#ifndef IGT_WEDGE_ME_H +#define IGT_WEDGE_ME_H + +#include <linux/workqueue.h> + +#include "../i915_gem.h" + +struct drm_i915_private; + +struct igt_wedge_me { + struct delayed_work work; + struct drm_i915_private *i915; + const char *name; +}; + +static void __igt_wedge_me(struct work_struct *work) +{ + struct igt_wedge_me *w = container_of(work, typeof(*w), work.work); + + pr_err("%s timed out, cancelling test.\n", w->name); + + GEM_TRACE("%s timed out.\n", w->name); + GEM_TRACE_DUMP(); + + i915_gem_set_wedged(w->i915); +} + +static void __igt_init_wedge(struct igt_wedge_me *w, + struct drm_i915_private *i915, + long timeout, + const char *name) +{ + w->i915 = i915; + w->name = name; + + INIT_DELAYED_WORK_ONSTACK(&w->work, __igt_wedge_me); + schedule_delayed_work(&w->work, timeout); +} + +static void __igt_fini_wedge(struct igt_wedge_me *w) +{ + cancel_delayed_work_sync(&w->work); + destroy_delayed_work_on_stack(&w->work); + w->i915 = NULL; +} + +#define igt_wedge_on_timeout(W, DEV, TIMEOUT) \ + for (__igt_init_wedge((W), (DEV), (TIMEOUT), __func__); \ + (W)->i915; \ + __igt_fini_wedge((W))) + +#endif /* IGT_WEDGE_ME_H */ diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c index d6926e7820e5..f03b407fdbe2 100644 --- a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c @@ -464,7 +464,7 @@ int intel_breadcrumbs_mock_selftests(void) return -ENOMEM; err = i915_subtests(tests, i915->engine[RCS]); - drm_dev_unref(&i915->drm); + drm_dev_put(&i915->drm); return err; } diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c index fb74e2cf8a0a..407c98fb9170 100644 --- a/drivers/gpu/drm/i915/selftests/intel_guc.c +++ b/drivers/gpu/drm/i915/selftests/intel_guc.c @@ -196,19 +196,23 @@ static int igt_guc_clients(void *args) } unreserve_doorbell(guc->execbuf_client); - err = guc_clients_doorbell_init(guc); + + __create_doorbell(guc->execbuf_client); + err = __guc_allocate_doorbell(guc, guc->execbuf_client->stage_id); if (err != -EIO) { pr_err("unexpected (err = %d)", err); - goto out; + goto out_db; } if (!available_dbs(guc, guc->execbuf_client->priority)) { pr_err("doorbell not available when it should\n"); err = -EIO; - goto out; + goto out_db; } +out_db: /* clean after test */ + __destroy_doorbell(guc->execbuf_client); err = reserve_doorbell(guc->execbuf_client); if (err) { pr_err("failed to reserve back the doorbell back\n"); diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index 438e0b045a2c..65d66cdedd26 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -27,6 +27,7 @@ #include "../i915_selftest.h" #include "i915_random.h" #include "igt_flush_test.h" +#include "igt_wedge_me.h" #include "mock_context.h" #include "mock_drm.h" @@ -105,7 +106,10 @@ static int emit_recurse_batch(struct hang *h, struct i915_request *rq) { struct drm_i915_private *i915 = h->i915; - struct i915_address_space *vm = rq->ctx->ppgtt ? &rq->ctx->ppgtt->base : &i915->ggtt.base; + struct i915_address_space *vm = + rq->gem_context->ppgtt ? + &rq->gem_context->ppgtt->vm : + &i915->ggtt.vm; struct i915_vma *hws, *vma; unsigned int flags; u32 *batch; @@ -127,13 +131,19 @@ static int emit_recurse_batch(struct hang *h, if (err) goto unpin_vma; - i915_vma_move_to_active(vma, rq, 0); + err = i915_vma_move_to_active(vma, rq, 0); + if (err) + goto unpin_hws; + if (!i915_gem_object_has_active_reference(vma->obj)) { i915_gem_object_get(vma->obj); i915_gem_object_set_active_reference(vma->obj); } - i915_vma_move_to_active(hws, rq, 0); + err = i915_vma_move_to_active(hws, rq, 0); + if (err) + goto unpin_hws; + if (!i915_gem_object_has_active_reference(hws->obj)) { i915_gem_object_get(hws->obj); i915_gem_object_set_active_reference(hws->obj); @@ -168,7 +178,7 @@ static int emit_recurse_batch(struct hang *h, *batch++ = MI_BATCH_BUFFER_START | 1 << 8; *batch++ = lower_32_bits(vma->node.start); } else if (INTEL_GEN(i915) >= 4) { - *batch++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22; + *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *batch++ = 0; *batch++ = lower_32_bits(hws_address(hws, rq)); *batch++ = rq->fence.seqno; @@ -181,7 +191,7 @@ static int emit_recurse_batch(struct hang *h, *batch++ = MI_BATCH_BUFFER_START | 2 << 6; *batch++ = lower_32_bits(vma->node.start); } else { - *batch++ = MI_STORE_DWORD_IMM; + *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; *batch++ = lower_32_bits(hws_address(hws, rq)); *batch++ = rq->fence.seqno; *batch++ = MI_ARB_CHECK; @@ -190,7 +200,7 @@ static int emit_recurse_batch(struct hang *h, batch += 1024 / sizeof(*batch); *batch++ = MI_ARB_CHECK; - *batch++ = MI_BATCH_BUFFER_START | 2 << 6 | 1; + *batch++ = MI_BATCH_BUFFER_START | 2 << 6; *batch++ = lower_32_bits(vma->node.start); } *batch++ = MI_BATCH_BUFFER_END; /* not reached */ @@ -202,6 +212,7 @@ static int emit_recurse_batch(struct hang *h, err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags); +unpin_hws: i915_vma_unpin(hws); unpin_vma: i915_vma_unpin(vma); @@ -242,7 +253,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) err = emit_recurse_batch(h, rq); if (err) { - __i915_request_add(rq, false); + i915_request_add(rq); return ERR_PTR(err); } @@ -315,7 +326,7 @@ static int igt_hang_sanitycheck(void *arg) *h.batch = MI_BATCH_BUFFER_END; i915_gem_chipset_flush(i915); - __i915_request_add(rq, true); + i915_request_add(rq); timeout = i915_request_wait(rq, I915_WAIT_LOCKED, @@ -461,7 +472,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active) } i915_request_get(rq); - __i915_request_add(rq, true); + i915_request_add(rq); mutex_unlock(&i915->drm.struct_mutex); if (!wait_until_running(&h, rq)) { @@ -560,6 +571,30 @@ struct active_engine { #define TEST_SELF BIT(2) #define TEST_PRIORITY BIT(3) +static int active_request_put(struct i915_request *rq) +{ + int err = 0; + + if (!rq) + return 0; + + if (i915_request_wait(rq, 0, 5 * HZ) < 0) { + GEM_TRACE("%s timed out waiting for completion of fence %llx:%d, seqno %d.\n", + rq->engine->name, + rq->fence.context, + rq->fence.seqno, + i915_request_global_seqno(rq)); + GEM_TRACE_DUMP(); + + i915_gem_set_wedged(rq->i915); + err = -EIO; + } + + i915_request_put(rq); + + return err; +} + static int active_engine(void *data) { I915_RND_STATE(prng); @@ -608,24 +643,20 @@ static int active_engine(void *data) i915_request_add(new); mutex_unlock(&engine->i915->drm.struct_mutex); - if (old) { - if (i915_request_wait(old, 0, HZ) < 0) { - GEM_TRACE("%s timed out.\n", engine->name); - GEM_TRACE_DUMP(); - - i915_gem_set_wedged(engine->i915); - i915_request_put(old); - err = -EIO; - break; - } - i915_request_put(old); - } + err = active_request_put(old); + if (err) + break; cond_resched(); } - for (count = 0; count < ARRAY_SIZE(rq); count++) - i915_request_put(rq[count]); + for (count = 0; count < ARRAY_SIZE(rq); count++) { + int err__ = active_request_put(rq[count]); + + /* Keep the first error */ + if (!err) + err = err__; + } err_file: mock_file_free(engine->i915, file); @@ -719,7 +750,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915, } i915_request_get(rq); - __i915_request_add(rq, true); + i915_request_add(rq); mutex_unlock(&i915->drm.struct_mutex); if (!wait_until_running(&h, rq)) { @@ -891,7 +922,7 @@ static u32 fake_hangcheck(struct i915_request *rq, u32 mask) return reset_count; } -static int igt_wait_reset(void *arg) +static int igt_reset_wait(void *arg) { struct drm_i915_private *i915 = arg; struct i915_request *rq; @@ -919,7 +950,7 @@ static int igt_wait_reset(void *arg) } i915_request_get(rq); - __i915_request_add(rq, true); + i915_request_add(rq); if (!wait_until_running(&h, rq)) { struct drm_printer p = drm_info_printer(i915->drm.dev); @@ -965,6 +996,170 @@ unlock: return err; } +struct evict_vma { + struct completion completion; + struct i915_vma *vma; +}; + +static int evict_vma(void *data) +{ + struct evict_vma *arg = data; + struct i915_address_space *vm = arg->vma->vm; + struct drm_i915_private *i915 = vm->i915; + struct drm_mm_node evict = arg->vma->node; + int err; + + complete(&arg->completion); + + mutex_lock(&i915->drm.struct_mutex); + err = i915_gem_evict_for_node(vm, &evict, 0); + mutex_unlock(&i915->drm.struct_mutex); + + return err; +} + +static int __igt_reset_evict_vma(struct drm_i915_private *i915, + struct i915_address_space *vm) +{ + struct drm_i915_gem_object *obj; + struct task_struct *tsk = NULL; + struct i915_request *rq; + struct evict_vma arg; + struct hang h; + int err; + + if (!intel_engine_can_store_dword(i915->engine[RCS])) + return 0; + + /* Check that we can recover an unbind stuck on a hanging request */ + + global_reset_lock(i915); + + mutex_lock(&i915->drm.struct_mutex); + err = hang_init(&h, i915); + if (err) + goto unlock; + + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto fini; + } + + arg.vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(arg.vma)) { + err = PTR_ERR(arg.vma); + goto out_obj; + } + + rq = hang_create_request(&h, i915->engine[RCS]); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_obj; + } + + err = i915_vma_pin(arg.vma, 0, 0, + i915_vma_is_ggtt(arg.vma) ? PIN_GLOBAL : PIN_USER); + if (err) + goto out_obj; + + err = i915_vma_move_to_active(arg.vma, rq, EXEC_OBJECT_WRITE); + i915_vma_unpin(arg.vma); + + i915_request_get(rq); + i915_request_add(rq); + if (err) + goto out_rq; + + mutex_unlock(&i915->drm.struct_mutex); + + if (!wait_until_running(&h, rq)) { + struct drm_printer p = drm_info_printer(i915->drm.dev); + + pr_err("%s: Failed to start request %x, at %x\n", + __func__, rq->fence.seqno, hws_seqno(&h, rq)); + intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); + + i915_gem_set_wedged(i915); + goto out_reset; + } + + init_completion(&arg.completion); + + tsk = kthread_run(evict_vma, &arg, "igt/evict_vma"); + if (IS_ERR(tsk)) { + err = PTR_ERR(tsk); + tsk = NULL; + goto out_reset; + } + + wait_for_completion(&arg.completion); + + if (wait_for(waitqueue_active(&rq->execute), 10)) { + struct drm_printer p = drm_info_printer(i915->drm.dev); + + pr_err("igt/evict_vma kthread did not wait\n"); + intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); + + i915_gem_set_wedged(i915); + goto out_reset; + } + +out_reset: + fake_hangcheck(rq, intel_engine_flag(rq->engine)); + + if (tsk) { + struct igt_wedge_me w; + + /* The reset, even indirectly, should take less than 10ms. */ + igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/) + err = kthread_stop(tsk); + } + + mutex_lock(&i915->drm.struct_mutex); +out_rq: + i915_request_put(rq); +out_obj: + i915_gem_object_put(obj); +fini: + hang_fini(&h); +unlock: + mutex_unlock(&i915->drm.struct_mutex); + global_reset_unlock(i915); + + if (i915_terminally_wedged(&i915->gpu_error)) + return -EIO; + + return err; +} + +static int igt_reset_evict_ggtt(void *arg) +{ + struct drm_i915_private *i915 = arg; + + return __igt_reset_evict_vma(i915, &i915->ggtt.vm); +} + +static int igt_reset_evict_ppgtt(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct i915_gem_context *ctx; + int err; + + mutex_lock(&i915->drm.struct_mutex); + ctx = kernel_context(i915); + mutex_unlock(&i915->drm.struct_mutex); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + err = 0; + if (ctx->ppgtt) /* aliasing == global gtt locking, covered above */ + err = __igt_reset_evict_vma(i915, &ctx->ppgtt->vm); + + kernel_context_close(ctx); + return err; +} + static int wait_for_others(struct drm_i915_private *i915, struct intel_engine_cs *exclude) { @@ -1014,7 +1209,7 @@ static int igt_reset_queue(void *arg) } i915_request_get(prev); - __i915_request_add(prev, true); + i915_request_add(prev); count = 0; do { @@ -1028,7 +1223,7 @@ static int igt_reset_queue(void *arg) } i915_request_get(rq); - __i915_request_add(rq, true); + i915_request_add(rq); /* * XXX We don't handle resetting the kernel context @@ -1161,7 +1356,7 @@ static int igt_handle_error(void *arg) } i915_request_get(rq); - __i915_request_add(rq, true); + i915_request_add(rq); if (!wait_until_running(&h, rq)) { struct drm_printer p = drm_info_printer(i915->drm.dev); @@ -1210,8 +1405,10 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_reset_idle_engine), SUBTEST(igt_reset_active_engine), SUBTEST(igt_reset_engines), - SUBTEST(igt_wait_reset), SUBTEST(igt_reset_queue), + SUBTEST(igt_reset_wait), + SUBTEST(igt_reset_evict_ggtt), + SUBTEST(igt_reset_evict_ppgtt), SUBTEST(igt_handle_error), }; bool saved_hangcheck; @@ -1220,6 +1417,9 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) if (!intel_has_gpu_reset(i915)) return 0; + if (i915_terminally_wedged(&i915->gpu_error)) + return -EIO; /* we're long past hope of a successful reset */ + intel_runtime_pm_get(i915); saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck); diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c index 1b8a07125150..582566faef09 100644 --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c @@ -83,7 +83,7 @@ static int emit_recurse_batch(struct spinner *spin, struct i915_request *rq, u32 arbitration_command) { - struct i915_address_space *vm = &rq->ctx->ppgtt->base; + struct i915_address_space *vm = &rq->gem_context->ppgtt->vm; struct i915_vma *hws, *vma; u32 *batch; int err; @@ -104,13 +104,19 @@ static int emit_recurse_batch(struct spinner *spin, if (err) goto unpin_vma; - i915_vma_move_to_active(vma, rq, 0); + err = i915_vma_move_to_active(vma, rq, 0); + if (err) + goto unpin_hws; + if (!i915_gem_object_has_active_reference(vma->obj)) { i915_gem_object_get(vma->obj); i915_gem_object_set_active_reference(vma->obj); } - i915_vma_move_to_active(hws, rq, 0); + err = i915_vma_move_to_active(hws, rq, 0); + if (err) + goto unpin_hws; + if (!i915_gem_object_has_active_reference(hws->obj)) { i915_gem_object_get(hws->obj); i915_gem_object_set_active_reference(hws->obj); @@ -134,6 +140,7 @@ static int emit_recurse_batch(struct spinner *spin, err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0); +unpin_hws: i915_vma_unpin(hws); unpin_vma: i915_vma_unpin(vma); @@ -155,7 +162,7 @@ spinner_create_request(struct spinner *spin, err = emit_recurse_batch(spin, rq, arbitration_command); if (err) { - __i915_request_add(rq, false); + i915_request_add(rq); return ERR_PTR(err); } @@ -444,16 +451,134 @@ err_wedged: goto err_ctx_lo; } +static int live_preempt_hang(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct i915_gem_context *ctx_hi, *ctx_lo; + struct spinner spin_hi, spin_lo; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = -ENOMEM; + + if (!HAS_LOGICAL_RING_PREEMPTION(i915)) + return 0; + + if (!intel_has_reset_engine(i915)) + return 0; + + mutex_lock(&i915->drm.struct_mutex); + + if (spinner_init(&spin_hi, i915)) + goto err_unlock; + + if (spinner_init(&spin_lo, i915)) + goto err_spin_hi; + + ctx_hi = kernel_context(i915); + if (!ctx_hi) + goto err_spin_lo; + ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY; + + ctx_lo = kernel_context(i915); + if (!ctx_lo) + goto err_ctx_hi; + ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY; + + for_each_engine(engine, i915, id) { + struct i915_request *rq; + + if (!intel_engine_has_preemption(engine)) + continue; + + rq = spinner_create_request(&spin_lo, ctx_lo, engine, + MI_ARB_CHECK); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_ctx_lo; + } + + i915_request_add(rq); + if (!wait_for_spinner(&spin_lo, rq)) { + GEM_TRACE("lo spinner failed to start\n"); + GEM_TRACE_DUMP(); + i915_gem_set_wedged(i915); + err = -EIO; + goto err_ctx_lo; + } + + rq = spinner_create_request(&spin_hi, ctx_hi, engine, + MI_ARB_CHECK); + if (IS_ERR(rq)) { + spinner_end(&spin_lo); + err = PTR_ERR(rq); + goto err_ctx_lo; + } + + init_completion(&engine->execlists.preempt_hang.completion); + engine->execlists.preempt_hang.inject_hang = true; + + i915_request_add(rq); + + if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion, + HZ / 10)) { + pr_err("Preemption did not occur within timeout!"); + GEM_TRACE_DUMP(); + i915_gem_set_wedged(i915); + err = -EIO; + goto err_ctx_lo; + } + + set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); + i915_reset_engine(engine, NULL); + clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); + + engine->execlists.preempt_hang.inject_hang = false; + + if (!wait_for_spinner(&spin_hi, rq)) { + GEM_TRACE("hi spinner failed to start\n"); + GEM_TRACE_DUMP(); + i915_gem_set_wedged(i915); + err = -EIO; + goto err_ctx_lo; + } + + spinner_end(&spin_hi); + spinner_end(&spin_lo); + if (igt_flush_test(i915, I915_WAIT_LOCKED)) { + err = -EIO; + goto err_ctx_lo; + } + } + + err = 0; +err_ctx_lo: + kernel_context_close(ctx_lo); +err_ctx_hi: + kernel_context_close(ctx_hi); +err_spin_lo: + spinner_fini(&spin_lo); +err_spin_hi: + spinner_fini(&spin_hi); +err_unlock: + igt_flush_test(i915, I915_WAIT_LOCKED); + mutex_unlock(&i915->drm.struct_mutex); + return err; +} + int intel_execlists_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(live_sanitycheck), SUBTEST(live_preempt), SUBTEST(live_late_preempt), + SUBTEST(live_preempt_hang), }; if (!HAS_EXECLISTS(i915)) return 0; + if (i915_terminally_wedged(&i915->gpu_error)) + return 0; + return i915_subtests(tests, i915); } diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c index 17444a3abbb9..0d39b3bf0c0d 100644 --- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c +++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c @@ -6,6 +6,7 @@ #include "../i915_selftest.h" +#include "igt_wedge_me.h" #include "mock_context.h" static struct drm_i915_gem_object * @@ -33,7 +34,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) memset(cs, 0xc5, PAGE_SIZE); i915_gem_object_unpin_map(result); - vma = i915_vma_instance(result, &engine->i915->ggtt.base, NULL); + vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err_obj; @@ -49,6 +50,10 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) goto err_pin; } + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + if (err) + goto err_req; + srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; if (INTEL_GEN(ctx->i915) >= 8) srm++; @@ -67,15 +72,10 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) } intel_ring_advance(rq, cs); - i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - reservation_object_lock(vma->resv, NULL); - reservation_object_add_excl_fence(vma->resv, &rq->fence); - reservation_object_unlock(vma->resv); - i915_gem_object_get(result); i915_gem_object_set_active_reference(result); - __i915_request_add(rq, true); + i915_request_add(rq); i915_vma_unpin(vma); return result; @@ -112,6 +112,7 @@ static int check_whitelist(const struct whitelist *w, struct intel_engine_cs *engine) { struct drm_i915_gem_object *results; + struct igt_wedge_me wedge; u32 *vaddr; int err; int i; @@ -120,7 +121,11 @@ static int check_whitelist(const struct whitelist *w, if (IS_ERR(results)) return PTR_ERR(results); - err = i915_gem_object_set_to_cpu_domain(results, false); + err = 0; + igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */ + err = i915_gem_object_set_to_cpu_domain(results, false); + if (i915_terminally_wedged(&ctx->i915->gpu_error)) + err = -EIO; if (err) goto out_put; @@ -283,6 +288,9 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915) }; int err; + if (i915_terminally_wedged(&i915->gpu_error)) + return 0; + mutex_lock(&i915->drm.struct_mutex); err = i915_subtests(tests, i915); mutex_unlock(&i915->drm.struct_mutex); diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c index 501becc47c0c..8904f1ce64e3 100644 --- a/drivers/gpu/drm/i915/selftests/mock_context.c +++ b/drivers/gpu/drm/i915/selftests/mock_context.c @@ -30,6 +30,7 @@ mock_context(struct drm_i915_private *i915, const char *name) { struct i915_gem_context *ctx; + unsigned int n; int ret; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); @@ -43,6 +44,12 @@ mock_context(struct drm_i915_private *i915, INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); INIT_LIST_HEAD(&ctx->handles_list); + for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { + struct intel_context *ce = &ctx->__engine[n]; + + ce->gem_context = ctx; + } + ret = ida_simple_get(&i915->contexts.hw_ida, 0, MAX_CONTEXT_HW_ID, GFP_KERNEL); if (ret < 0) diff --git a/drivers/gpu/drm/i915/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/selftests/mock_dmabuf.c index 302f7d103635..ca682caf1062 100644 --- a/drivers/gpu/drm/i915/selftests/mock_dmabuf.c +++ b/drivers/gpu/drm/i915/selftests/mock_dmabuf.c @@ -94,18 +94,6 @@ static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) vm_unmap_ram(vaddr, mock->npages); } -static void *mock_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num) -{ - struct mock_dmabuf *mock = to_mock(dma_buf); - - return kmap_atomic(mock->pages[page_num]); -} - -static void mock_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr) -{ - kunmap_atomic(addr); -} - static void *mock_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num) { struct mock_dmabuf *mock = to_mock(dma_buf); @@ -130,9 +118,7 @@ static const struct dma_buf_ops mock_dmabuf_ops = { .unmap_dma_buf = mock_unmap_dma_buf, .release = mock_dmabuf_release, .map = mock_dmabuf_kmap, - .map_atomic = mock_dmabuf_kmap_atomic, .unmap = mock_dmabuf_kunmap, - .unmap_atomic = mock_dmabuf_kunmap_atomic, .mmap = mock_dmabuf_mmap, .vmap = mock_dmabuf_vmap, .vunmap = mock_dmabuf_vunmap, diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c index 26bf29d97007..22a73da45ad5 100644 --- a/drivers/gpu/drm/i915/selftests/mock_engine.c +++ b/drivers/gpu/drm/i915/selftests/mock_engine.c @@ -72,25 +72,34 @@ static void hw_delay_complete(struct timer_list *t) spin_unlock(&engine->hw_lock); } -static struct intel_ring * -mock_context_pin(struct intel_engine_cs *engine, - struct i915_gem_context *ctx) +static void mock_context_unpin(struct intel_context *ce) { - struct intel_context *ce = to_intel_context(ctx, engine); - - if (!ce->pin_count++) - i915_gem_context_get(ctx); + i915_gem_context_put(ce->gem_context); +} - return engine->buffer; +static void mock_context_destroy(struct intel_context *ce) +{ + GEM_BUG_ON(ce->pin_count); } -static void mock_context_unpin(struct intel_engine_cs *engine, - struct i915_gem_context *ctx) +static const struct intel_context_ops mock_context_ops = { + .unpin = mock_context_unpin, + .destroy = mock_context_destroy, +}; + +static struct intel_context * +mock_context_pin(struct intel_engine_cs *engine, + struct i915_gem_context *ctx) { struct intel_context *ce = to_intel_context(ctx, engine); - if (!--ce->pin_count) - i915_gem_context_put(ctx); + if (!ce->pin_count++) { + i915_gem_context_get(ctx); + ce->ring = engine->buffer; + ce->ops = &mock_context_ops; + } + + return ce; } static int mock_request_alloc(struct i915_request *request) @@ -185,13 +194,14 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, engine->base.status_page.page_addr = (void *)(engine + 1); engine->base.context_pin = mock_context_pin; - engine->base.context_unpin = mock_context_unpin; engine->base.request_alloc = mock_request_alloc; engine->base.emit_flush = mock_emit_flush; engine->base.emit_breadcrumb = mock_emit_breadcrumb; engine->base.submit_request = mock_submit_request; i915_timeline_init(i915, &engine->base.timeline, engine->base.name); + lockdep_set_subclass(&engine->base.timeline.lock, TIMELINE_ENGINE); + intel_engine_init_breadcrumbs(&engine->base); engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */ @@ -204,8 +214,13 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, if (!engine->base.buffer) goto err_breadcrumbs; + if (IS_ERR(intel_context_pin(i915->kernel_context, &engine->base))) + goto err_ring; + return &engine->base; +err_ring: + mock_ring_free(engine->base.buffer); err_breadcrumbs: intel_engine_fini_breadcrumbs(&engine->base); i915_timeline_fini(&engine->base.timeline); @@ -238,11 +253,15 @@ void mock_engine_free(struct intel_engine_cs *engine) { struct mock_engine *mock = container_of(engine, typeof(*mock), base); + struct intel_context *ce; GEM_BUG_ON(timer_pending(&mock->hw_delay)); - if (engine->last_retired_context) - intel_context_unpin(engine->last_retired_context, engine); + ce = fetch_and_zero(&engine->last_retired_context); + if (ce) + intel_context_unpin(ce); + + __intel_context_unpin(engine->i915->kernel_context, engine); mock_ring_free(engine->buffer); diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 94baedfa0f74..43ed8b28aeaa 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -136,8 +136,6 @@ static struct dev_pm_domain pm_domain = { struct drm_i915_private *mock_gem_device(void) { struct drm_i915_private *i915; - struct intel_engine_cs *engine; - enum intel_engine_id id; struct pci_dev *pdev; int err; @@ -159,7 +157,8 @@ struct drm_i915_private *mock_gem_device(void) dev_pm_domain_set(&pdev->dev, &pm_domain); pm_runtime_enable(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); - WARN_ON(pm_runtime_get_sync(&pdev->dev)); + if (pm_runtime_enabled(&pdev->dev)) + WARN_ON(pm_runtime_get_sync(&pdev->dev)); i915 = (struct drm_i915_private *)(pdev + 1); pci_set_drvdata(pdev, i915); @@ -233,13 +232,13 @@ struct drm_i915_private *mock_gem_device(void) mock_init_ggtt(i915); mkwrite_device_info(i915)->ring_mask = BIT(0); - i915->engine[RCS] = mock_engine(i915, "mock", RCS); - if (!i915->engine[RCS]) - goto err_unlock; - i915->kernel_context = mock_context(i915, NULL); if (!i915->kernel_context) - goto err_engine; + goto err_unlock; + + i915->engine[RCS] = mock_engine(i915, "mock", RCS); + if (!i915->engine[RCS]) + goto err_context; mutex_unlock(&i915->drm.struct_mutex); @@ -247,9 +246,8 @@ struct drm_i915_private *mock_gem_device(void) return i915; -err_engine: - for_each_engine(engine, i915, id) - mock_engine_free(engine); +err_context: + i915_gem_contexts_fini(i915); err_unlock: mutex_unlock(&i915->drm.struct_mutex); kmem_cache_destroy(i915->priorities); diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index 36c112088940..a140ea5c3a7c 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -66,25 +66,21 @@ mock_ppgtt(struct drm_i915_private *i915, return NULL; kref_init(&ppgtt->ref); - ppgtt->base.i915 = i915; - ppgtt->base.total = round_down(U64_MAX, PAGE_SIZE); - ppgtt->base.file = ERR_PTR(-ENODEV); - - INIT_LIST_HEAD(&ppgtt->base.active_list); - INIT_LIST_HEAD(&ppgtt->base.inactive_list); - INIT_LIST_HEAD(&ppgtt->base.unbound_list); - - INIT_LIST_HEAD(&ppgtt->base.global_link); - drm_mm_init(&ppgtt->base.mm, 0, ppgtt->base.total); - - ppgtt->base.clear_range = nop_clear_range; - ppgtt->base.insert_page = mock_insert_page; - ppgtt->base.insert_entries = mock_insert_entries; - ppgtt->base.bind_vma = mock_bind_ppgtt; - ppgtt->base.unbind_vma = mock_unbind_ppgtt; - ppgtt->base.set_pages = ppgtt_set_pages; - ppgtt->base.clear_pages = clear_pages; - ppgtt->base.cleanup = mock_cleanup; + ppgtt->vm.i915 = i915; + ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE); + ppgtt->vm.file = ERR_PTR(-ENODEV); + + i915_address_space_init(&ppgtt->vm, i915); + + ppgtt->vm.clear_range = nop_clear_range; + ppgtt->vm.insert_page = mock_insert_page; + ppgtt->vm.insert_entries = mock_insert_entries; + ppgtt->vm.cleanup = mock_cleanup; + + ppgtt->vm.vma_ops.bind_vma = mock_bind_ppgtt; + ppgtt->vm.vma_ops.unbind_vma = mock_unbind_ppgtt; + ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages; + ppgtt->vm.vma_ops.clear_pages = clear_pages; return ppgtt; } @@ -105,29 +101,28 @@ void mock_init_ggtt(struct drm_i915_private *i915) { struct i915_ggtt *ggtt = &i915->ggtt; - INIT_LIST_HEAD(&i915->vm_list); - - ggtt->base.i915 = i915; + ggtt->vm.i915 = i915; ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE); ggtt->mappable_end = resource_size(&ggtt->gmadr); - ggtt->base.total = 4096 * PAGE_SIZE; - - ggtt->base.clear_range = nop_clear_range; - ggtt->base.insert_page = mock_insert_page; - ggtt->base.insert_entries = mock_insert_entries; - ggtt->base.bind_vma = mock_bind_ggtt; - ggtt->base.unbind_vma = mock_unbind_ggtt; - ggtt->base.set_pages = ggtt_set_pages; - ggtt->base.clear_pages = clear_pages; - ggtt->base.cleanup = mock_cleanup; - - i915_address_space_init(&ggtt->base, i915, "global"); + ggtt->vm.total = 4096 * PAGE_SIZE; + + ggtt->vm.clear_range = nop_clear_range; + ggtt->vm.insert_page = mock_insert_page; + ggtt->vm.insert_entries = mock_insert_entries; + ggtt->vm.cleanup = mock_cleanup; + + ggtt->vm.vma_ops.bind_vma = mock_bind_ggtt; + ggtt->vm.vma_ops.unbind_vma = mock_unbind_ggtt; + ggtt->vm.vma_ops.set_pages = ggtt_set_pages; + ggtt->vm.vma_ops.clear_pages = clear_pages; + + i915_address_space_init(&ggtt->vm, i915); } void mock_fini_ggtt(struct drm_i915_private *i915) { struct i915_ggtt *ggtt = &i915->ggtt; - i915_address_space_fini(&ggtt->base); + i915_address_space_fini(&ggtt->vm); } diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c index f349b3920199..435a2c35ee8c 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/vlv_dsi.c @@ -69,7 +69,7 @@ enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt) } } -void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port) +void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port) { struct drm_encoder *encoder = &intel_dsi->base.base; struct drm_device *dev = encoder->dev; @@ -342,11 +342,15 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder, pipe_config->cpu_transcoder = TRANSCODER_DSI_C; else pipe_config->cpu_transcoder = TRANSCODER_DSI_A; - } - ret = intel_compute_dsi_pll(encoder, pipe_config); - if (ret) - return false; + ret = bxt_dsi_pll_compute(encoder, pipe_config); + if (ret) + return false; + } else { + ret = vlv_dsi_pll_compute(encoder, pipe_config); + if (ret) + return false; + } pipe_config->clock_set = true; @@ -546,12 +550,12 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - vlv_dsi_device_ready(encoder); - else if (IS_BROXTON(dev_priv)) - bxt_dsi_device_ready(encoder); - else if (IS_GEMINILAKE(dev_priv)) + if (IS_GEMINILAKE(dev_priv)) glk_dsi_device_ready(encoder); + else if (IS_GEN9_LP(dev_priv)) + bxt_dsi_device_ready(encoder); + else + vlv_dsi_device_ready(encoder); } static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder) @@ -810,8 +814,13 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder, * The BIOS may leave the PLL in a wonky state where it doesn't * lock. It needs to be fully powered down to fix it. */ - intel_disable_dsi_pll(encoder); - intel_enable_dsi_pll(encoder, pipe_config); + if (IS_GEN9_LP(dev_priv)) { + bxt_dsi_pll_disable(encoder); + bxt_dsi_pll_enable(encoder, pipe_config); + } else { + vlv_dsi_pll_disable(encoder); + vlv_dsi_pll_enable(encoder, pipe_config); + } if (IS_BROXTON(dev_priv)) { /* Add MIPI IO reset programming for modeset */ @@ -929,11 +938,10 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) || - IS_BROXTON(dev_priv)) - vlv_dsi_clear_device_ready(encoder); - else if (IS_GEMINILAKE(dev_priv)) + if (IS_GEMINILAKE(dev_priv)) glk_dsi_clear_device_ready(encoder); + else + vlv_dsi_clear_device_ready(encoder); } static void intel_dsi_post_disable(struct intel_encoder *encoder, @@ -949,7 +957,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder, if (is_vid_mode(intel_dsi)) { for_each_dsi_port(port, intel_dsi->ports) - wait_for_dsi_fifo_empty(intel_dsi, port); + vlv_dsi_wait_for_fifo_empty(intel_dsi, port); intel_dsi_port_disable(encoder); usleep_range(2000, 5000); @@ -979,11 +987,13 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder, val & ~MIPIO_RST_CTRL); } - intel_disable_dsi_pll(encoder); - - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + if (IS_GEN9_LP(dev_priv)) { + bxt_dsi_pll_disable(encoder); + } else { u32 val; + vlv_dsi_pll_disable(encoder); + val = I915_READ(DSPCLK_GATE_D); val &= ~DPOUNIT_CLOCK_GATE_DISABLE; I915_WRITE(DSPCLK_GATE_D, val); @@ -1024,7 +1034,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, * configuration, otherwise accessing DSI registers will hang the * machine. See BSpec North Display Engine registers/MIPI[BXT]. */ - if (IS_GEN9_LP(dev_priv) && !intel_dsi_pll_is_enabled(dev_priv)) + if (IS_GEN9_LP(dev_priv) && !bxt_dsi_pll_is_enabled(dev_priv)) goto out_put_power; /* XXX: this only works for one DSI output */ @@ -1247,16 +1257,19 @@ static void intel_dsi_get_config(struct intel_encoder *encoder, pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI); - if (IS_GEN9_LP(dev_priv)) + if (IS_GEN9_LP(dev_priv)) { bxt_dsi_get_pipe_config(encoder, pipe_config); + pclk = bxt_dsi_get_pclk(encoder, pipe_config->pipe_bpp, + pipe_config); + } else { + pclk = vlv_dsi_get_pclk(encoder, pipe_config->pipe_bpp, + pipe_config); + } - pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp, - pipe_config); - if (!pclk) - return; - - pipe_config->base.adjusted_mode.crtc_clock = pclk; - pipe_config->port_clock = pclk; + if (pclk) { + pipe_config->base.adjusted_mode.crtc_clock = pclk; + pipe_config->port_clock = pclk; + } } static enum drm_mode_status @@ -1585,20 +1598,24 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder) enum port port; u32 val; - if (!IS_GEMINILAKE(dev_priv)) { - for_each_dsi_port(port, intel_dsi->ports) { - /* Panel commands can be sent when clock is in LP11 */ - I915_WRITE(MIPI_DEVICE_READY(port), 0x0); + if (IS_GEMINILAKE(dev_priv)) + return; - intel_dsi_reset_clocks(encoder, port); - I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP); + for_each_dsi_port(port, intel_dsi->ports) { + /* Panel commands can be sent when clock is in LP11 */ + I915_WRITE(MIPI_DEVICE_READY(port), 0x0); - val = I915_READ(MIPI_DSI_FUNC_PRG(port)); - val &= ~VID_MODE_FORMAT_MASK; - I915_WRITE(MIPI_DSI_FUNC_PRG(port), val); + if (IS_GEN9_LP(dev_priv)) + bxt_dsi_reset_clocks(encoder, port); + else + vlv_dsi_reset_clocks(encoder, port); + I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP); - I915_WRITE(MIPI_DEVICE_READY(port), 0x1); - } + val = I915_READ(MIPI_DSI_FUNC_PRG(port)); + val &= ~VID_MODE_FORMAT_MASK; + I915_WRITE(MIPI_DSI_FUNC_PRG(port), val); + + I915_WRITE(MIPI_DEVICE_READY(port), 0x1); } } @@ -1671,16 +1688,16 @@ static int intel_dsi_get_panel_orientation(struct intel_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); int orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL; - enum i9xx_plane_id plane; + enum i9xx_plane_id i9xx_plane; u32 val; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { if (connector->encoder->crtc_mask == BIT(PIPE_B)) - plane = PLANE_B; + i9xx_plane = PLANE_B; else - plane = PLANE_A; + i9xx_plane = PLANE_A; - val = I915_READ(DSPCNTR(plane)); + val = I915_READ(DSPCNTR(i9xx_plane)); if (val & DISPPLANE_ROTATE_180) orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP; } @@ -1713,7 +1730,7 @@ static void intel_dsi_add_properties(struct intel_connector *connector) } } -void intel_dsi_init(struct drm_i915_private *dev_priv) +void vlv_dsi_init(struct drm_i915_private *dev_priv) { struct drm_device *dev = &dev_priv->drm; struct intel_dsi *intel_dsi; @@ -1730,14 +1747,10 @@ void intel_dsi_init(struct drm_i915_private *dev_priv) if (!intel_bios_is_dsi_present(dev_priv, &port)) return; - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - dev_priv->mipi_mmio_base = VLV_MIPI_BASE; - } else if (IS_GEN9_LP(dev_priv)) { + if (IS_GEN9_LP(dev_priv)) dev_priv->mipi_mmio_base = BXT_MIPI_BASE; - } else { - DRM_ERROR("Unsupported Mipi device to reg base"); - return; - } + else + dev_priv->mipi_mmio_base = VLV_MIPI_BASE; intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL); if (!intel_dsi) diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/vlv_dsi_pll.c index 2ff2ee7f3b78..a132a8037ecc 100644 --- a/drivers/gpu/drm/i915/intel_dsi_pll.c +++ b/drivers/gpu/drm/i915/vlv_dsi_pll.c @@ -111,8 +111,8 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv, * XXX: The muxing and gating is hard coded for now. Need to add support for * sharing PLLs with two DSI outputs. */ -static int vlv_compute_dsi_pll(struct intel_encoder *encoder, - struct intel_crtc_state *config) +int vlv_dsi_pll_compute(struct intel_encoder *encoder, + struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); @@ -142,8 +142,8 @@ static int vlv_compute_dsi_pll(struct intel_encoder *encoder, return 0; } -static void vlv_enable_dsi_pll(struct intel_encoder *encoder, - const struct intel_crtc_state *config) +void vlv_dsi_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -175,7 +175,7 @@ static void vlv_enable_dsi_pll(struct intel_encoder *encoder, DRM_DEBUG_KMS("DSI PLL locked\n"); } -static void vlv_disable_dsi_pll(struct intel_encoder *encoder) +void vlv_dsi_pll_disable(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 tmp; @@ -192,7 +192,7 @@ static void vlv_disable_dsi_pll(struct intel_encoder *encoder) mutex_unlock(&dev_priv->sb_lock); } -static bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) +bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) { bool enabled; u32 val; @@ -229,7 +229,7 @@ static bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) return enabled; } -static void bxt_disable_dsi_pll(struct intel_encoder *encoder) +void bxt_dsi_pll_disable(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 val; @@ -261,8 +261,8 @@ static void assert_bpp_mismatch(enum mipi_dsi_pixel_format fmt, int pipe_bpp) bpp, pipe_bpp); } -static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, - struct intel_crtc_state *config) +u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, + struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); @@ -327,8 +327,8 @@ static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, return pclk; } -static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, - struct intel_crtc_state *config) +u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, + struct intel_crtc_state *config) { u32 pclk; u32 dsi_clk; @@ -357,16 +357,7 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, return pclk; } -u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, - struct intel_crtc_state *config) -{ - if (IS_GEN9_LP(to_i915(encoder->base.dev))) - return bxt_dsi_get_pclk(encoder, pipe_bpp, config); - else - return vlv_dsi_get_pclk(encoder, pipe_bpp, config); -} - -static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) +void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) { u32 temp; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -480,8 +471,8 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port, I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp); } -static int gen9lp_compute_dsi_pll(struct intel_encoder *encoder, - struct intel_crtc_state *config) +int bxt_dsi_pll_compute(struct intel_encoder *encoder, + struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); @@ -528,8 +519,8 @@ static int gen9lp_compute_dsi_pll(struct intel_encoder *encoder, return 0; } -static void gen9lp_enable_dsi_pll(struct intel_encoder *encoder, - const struct intel_crtc_state *config) +void bxt_dsi_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); @@ -568,52 +559,7 @@ static void gen9lp_enable_dsi_pll(struct intel_encoder *encoder, DRM_DEBUG_KMS("DSI PLL locked\n"); } -bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) -{ - if (IS_GEN9_LP(dev_priv)) - return bxt_dsi_pll_is_enabled(dev_priv); - - MISSING_CASE(INTEL_DEVID(dev_priv)); - - return false; -} - -int intel_compute_dsi_pll(struct intel_encoder *encoder, - struct intel_crtc_state *config) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - return vlv_compute_dsi_pll(encoder, config); - else if (IS_GEN9_LP(dev_priv)) - return gen9lp_compute_dsi_pll(encoder, config); - - return -ENODEV; -} - -void intel_enable_dsi_pll(struct intel_encoder *encoder, - const struct intel_crtc_state *config) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - vlv_enable_dsi_pll(encoder, config); - else if (IS_GEN9_LP(dev_priv)) - gen9lp_enable_dsi_pll(encoder, config); -} - -void intel_disable_dsi_pll(struct intel_encoder *encoder) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - vlv_disable_dsi_pll(encoder); - else if (IS_GEN9_LP(dev_priv)) - bxt_disable_dsi_pll(encoder); -} - -static void gen9lp_dsi_reset_clocks(struct intel_encoder *encoder, - enum port port) +void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) { u32 tmp; struct drm_device *dev = encoder->base.dev; @@ -638,13 +584,3 @@ static void gen9lp_dsi_reset_clocks(struct intel_encoder *encoder, } I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP); } - -void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - - if (IS_GEN9_LP(dev_priv)) - gen9lp_dsi_reset_clocks(encoder, port); - else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - vlv_dsi_reset_clocks(encoder, port); -} |