summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Makefile5
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c21
-rw-r--r--drivers/gpu/drm/i915/dvo_ns2501.c670
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c209
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c733
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c283
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c120
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h296
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c989
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c84
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.h42
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c39
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c92
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c90
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c1118
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h121
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c15
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c30
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c329
-rw-r--r--drivers/gpu/drm/i915/i915_params.c8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h610
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h44
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c184
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c48
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c24
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c183
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h4
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c17
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c467
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c912
-rw-r--r--drivers/gpu/drm/i915/intel_display.c3411
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c593
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c27
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h156
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c8
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c80
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c27
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c28
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c192
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c118
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c523
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h5
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c4
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c68
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c87
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c354
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c118
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c196
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h15
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c476
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c85
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c18
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c374
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c98
60 files changed, 9980 insertions, 4888 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index a69002e2257d..b7ddf48e1d75 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -2,8 +2,6 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-ccflags-y := -Iinclude/drm
-
# Please keep these build lists sorted!
# core driver code
@@ -12,7 +10,8 @@ i915-y := i915_drv.o \
i915_suspend.o \
i915_sysfs.o \
intel_pm.o \
- intel_runtime_pm.o
+ intel_runtime_pm.o \
+ intel_csr.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 0f2587ff347c..89b08a896d20 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -23,6 +23,9 @@
* Authors:
* Eric Anholt <eric@anholt.net>
*
+ * Minor modifications (Dithering enable):
+ * Thomas Richter <thor@math.tu-berlin.de>
+ *
*/
#include "dvo.h"
@@ -59,6 +62,8 @@
# define VR01_DVO_BYPASS_ENABLE (1 << 1)
/** Enables the DVO clock */
# define VR01_DVO_ENABLE (1 << 0)
+/** Enable dithering for 18bpp panels. Not documented. */
+# define VR01_DITHER_ENABLE (1 << 4)
/*
* LCD Interface Format
@@ -74,6 +79,8 @@
# define VR10_INTERFACE_2X18 (2 << 2)
/** Enables 2x24-bit LVDS output */
# define VR10_INTERFACE_2X24 (3 << 2)
+/** Mask that defines the depth of the pipeline */
+# define VR10_INTERFACE_DEPTH_MASK (3 << 2)
/*
* VR20 LCD Horizontal Display Size
@@ -342,9 +349,15 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *adjusted_mode)
{
uint16_t vr40 = 0;
- uint16_t vr01;
+ uint16_t vr01 = 0;
+ uint16_t vr10;
+
+ ivch_read(dvo, VR10, &vr10);
+ /* Enable dithering for 18 bpp pipelines */
+ vr10 &= VR10_INTERFACE_DEPTH_MASK;
+ if (vr10 == VR10_INTERFACE_2X18 || vr10 == VR10_INTERFACE_1X18)
+ vr01 = VR01_DITHER_ENABLE;
- vr01 = 0;
vr40 = (VR40_STALL_ENABLE | VR40_VERTICAL_INTERP_ENABLE |
VR40_HORIZONTAL_INTERP_ENABLE);
@@ -353,7 +366,7 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
uint16_t x_ratio, y_ratio;
vr01 |= VR01_PANEL_FIT_ENABLE;
- vr40 |= VR40_CLOCK_GATING_ENABLE;
+ vr40 |= VR40_CLOCK_GATING_ENABLE | VR40_ENHANCED_PANEL_FITTING;
x_ratio = (((mode->hdisplay - 1) << 16) /
(adjusted_mode->hdisplay - 1)) >> 2;
y_ratio = (((mode->vdisplay - 1) << 16) /
@@ -380,6 +393,8 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo)
DRM_DEBUG_KMS("VR00: 0x%04x\n", val);
ivch_read(dvo, VR01, &val);
DRM_DEBUG_KMS("VR01: 0x%04x\n", val);
+ ivch_read(dvo, VR10, &val);
+ DRM_DEBUG_KMS("VR10: 0x%04x\n", val);
ivch_read(dvo, VR30, &val);
DRM_DEBUG_KMS("VR30: 0x%04x\n", val);
ivch_read(dvo, VR40, &val);
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
index 441630434d34..97ae8aa157e9 100644
--- a/drivers/gpu/drm/i915/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -60,6 +60,130 @@
#define NS2501_REGC 0x0c
+/*
+ * The following registers are not part of the official datasheet
+ * and are the result of reverse engineering.
+ */
+
+/*
+ * Register c0 controls how the DVO synchronizes with
+ * its input.
+ */
+#define NS2501_REGC0 0xc0
+#define NS2501_C0_ENABLE (1<<0) /* enable the DVO sync in general */
+#define NS2501_C0_HSYNC (1<<1) /* synchronize horizontal with input */
+#define NS2501_C0_VSYNC (1<<2) /* synchronize vertical with input */
+#define NS2501_C0_RESET (1<<7) /* reset the synchronization flip/flops */
+
+/*
+ * Register 41 is somehow related to the sync register and sync
+ * configuration. It should be 0x32 whenever regC0 is 0x05 (hsync off)
+ * and 0x00 otherwise.
+ */
+#define NS2501_REG41 0x41
+
+/*
+ * this register controls the dithering of the DVO
+ * One bit enables it, the other define the dithering depth.
+ * The higher the value, the lower the dithering depth.
+ */
+#define NS2501_F9_REG 0xf9
+#define NS2501_F9_ENABLE (1<<0) /* if set, dithering is enabled */
+#define NS2501_F9_DITHER_MASK (0x7f<<1) /* controls the dither depth */
+#define NS2501_F9_DITHER_SHIFT 1 /* shifts the dither mask */
+
+/*
+ * PLL configuration register. This is a pair of registers,
+ * one single byte register at 1B, and a pair at 1C,1D.
+ * These registers are counters/dividers.
+ */
+#define NS2501_REG1B 0x1b /* one byte PLL control register */
+#define NS2501_REG1C 0x1c /* low-part of the second register */
+#define NS2501_REG1D 0x1d /* high-part of the second register */
+
+/*
+ * Scaler control registers. Horizontal at b8,b9,
+ * vertical at 10,11. The scale factor is computed as
+ * 2^16/control-value. The low-byte comes first.
+ */
+#define NS2501_REG10 0x10 /* low-byte vertical scaler */
+#define NS2501_REG11 0x11 /* high-byte vertical scaler */
+#define NS2501_REGB8 0xb8 /* low-byte horizontal scaler */
+#define NS2501_REGB9 0xb9 /* high-byte horizontal scaler */
+
+/*
+ * Display window definition. This consists of four registers
+ * per dimension. One register pair defines the start of the
+ * display, one the end.
+ * As far as I understand, this defines the window within which
+ * the scaler samples the input.
+ */
+#define NS2501_REGC1 0xc1 /* low-byte horizontal display start */
+#define NS2501_REGC2 0xc2 /* high-byte horizontal display start */
+#define NS2501_REGC3 0xc3 /* low-byte horizontal display stop */
+#define NS2501_REGC4 0xc4 /* high-byte horizontal display stop */
+#define NS2501_REGC5 0xc5 /* low-byte vertical display start */
+#define NS2501_REGC6 0xc6 /* high-byte vertical display start */
+#define NS2501_REGC7 0xc7 /* low-byte vertical display stop */
+#define NS2501_REGC8 0xc8 /* high-byte vertical display stop */
+
+/*
+ * The following register pair seems to define the start of
+ * the vertical sync. If automatic syncing is enabled, and the
+ * register value defines a sync pulse that is later than the
+ * incoming sync, then the register value is ignored and the
+ * external hsync triggers the synchronization.
+ */
+#define NS2501_REG80 0x80 /* low-byte vsync-start */
+#define NS2501_REG81 0x81 /* high-byte vsync-start */
+
+/*
+ * The following register pair seems to define the total number
+ * of lines created at the output side of the scaler.
+ * This is again a low-high register pair.
+ */
+#define NS2501_REG82 0x82 /* output display height, low byte */
+#define NS2501_REG83 0x83 /* output display height, high byte */
+
+/*
+ * The following registers define the end of the front-porch
+ * in horizontal and vertical position and hence allow to shift
+ * the image left/right or up/down.
+ */
+#define NS2501_REG98 0x98 /* horizontal start of display + 256, low */
+#define NS2501_REG99 0x99 /* horizontal start of display + 256, high */
+#define NS2501_REG8E 0x8e /* vertical start of the display, low byte */
+#define NS2501_REG8F 0x8f /* vertical start of the display, high byte */
+
+/*
+ * The following register pair control the function of the
+ * backlight and the DVO output. To enable the corresponding
+ * function, the corresponding bit must be set in both registers.
+ */
+#define NS2501_REG34 0x34 /* DVO enable functions, first register */
+#define NS2501_REG35 0x35 /* DVO enable functions, second register */
+#define NS2501_34_ENABLE_OUTPUT (1<<0) /* enable DVO output */
+#define NS2501_34_ENABLE_BACKLIGHT (1<<1) /* enable backlight */
+
+/*
+ * Registers 9C and 9D define the vertical output offset
+ * of the visible region.
+ */
+#define NS2501_REG9C 0x9c
+#define NS2501_REG9D 0x9d
+
+/*
+ * The register 9F defines the dithering. This requires the
+ * scaler to be ON. Bit 0 enables dithering, the remaining
+ * bits control the depth of the dither. The higher the value,
+ * the LOWER the dithering amplitude. A good value seems to be
+ * 15 (total register value).
+ */
+#define NS2501_REGF9 0xf9
+#define NS2501_F9_ENABLE_DITHER (1<<0) /* enable dithering */
+#define NS2501_F9_DITHER_MASK (0x7f<<1) /* dither masking */
+#define NS2501_F9_DITHER_SHIFT 1 /* upshift of the dither mask */
+
enum {
MODE_640x480,
MODE_800x600,
@@ -72,274 +196,178 @@ struct ns2501_reg {
};
/*
- * Magic values based on what the BIOS on
- * Fujitsu-Siemens Lifebook S6010 programs (1024x768 panel).
+ * The following structure keeps the complete configuration of
+ * the DVO, given a specific output configuration.
+ * This is pretty much guess-work from reverse-engineering, so
+ * read all this with a grain of salt.
+ */
+struct ns2501_configuration {
+ uint8_t sync; /* configuration of the C0 register */
+ uint8_t conf; /* configuration register 8 */
+ uint8_t syncb; /* configuration register 41 */
+ uint8_t dither; /* configuration of the dithering */
+ uint8_t pll_a; /* PLL configuration, register A, 1B */
+ uint16_t pll_b; /* PLL configuration, register B, 1C/1D */
+ uint16_t hstart; /* horizontal start, registers C1/C2 */
+ uint16_t hstop; /* horizontal total, registers C3/C4 */
+ uint16_t vstart; /* vertical start, registers C5/C6 */
+ uint16_t vstop; /* vertical total, registers C7/C8 */
+ uint16_t vsync; /* manual vertical sync start, 80/81 */
+ uint16_t vtotal; /* number of lines generated, 82/83 */
+ uint16_t hpos; /* horizontal position + 256, 98/99 */
+ uint16_t vpos; /* vertical position, 8e/8f */
+ uint16_t voffs; /* vertical output offset, 9c/9d */
+ uint16_t hscale; /* horizontal scaling factor, b8/b9 */
+ uint16_t vscale; /* vertical scaling factor, 10/11 */
+};
+
+/*
+ * DVO configuration values, partially based on what the BIOS
+ * of the Fujitsu Lifebook S6010 writes into registers,
+ * partially found by manual tweaking. These configurations assume
+ * a 1024x768 panel.
*/
-static const struct ns2501_reg regs_1024x768[][86] = {
+static const struct ns2501_configuration ns2501_modes[] = {
[MODE_640x480] = {
- [0] = { .offset = 0x0a, .value = 0x81, },
- [1] = { .offset = 0x18, .value = 0x07, },
- [2] = { .offset = 0x19, .value = 0x00, },
- [3] = { .offset = 0x1a, .value = 0x00, },
- [4] = { .offset = 0x1b, .value = 0x11, },
- [5] = { .offset = 0x1c, .value = 0x54, },
- [6] = { .offset = 0x1d, .value = 0x03, },
- [7] = { .offset = 0x1e, .value = 0x02, },
- [8] = { .offset = 0xf3, .value = 0x90, },
- [9] = { .offset = 0xf9, .value = 0x00, },
- [10] = { .offset = 0xc1, .value = 0x90, },
- [11] = { .offset = 0xc2, .value = 0x00, },
- [12] = { .offset = 0xc3, .value = 0x0f, },
- [13] = { .offset = 0xc4, .value = 0x03, },
- [14] = { .offset = 0xc5, .value = 0x16, },
- [15] = { .offset = 0xc6, .value = 0x00, },
- [16] = { .offset = 0xc7, .value = 0x02, },
- [17] = { .offset = 0xc8, .value = 0x02, },
- [18] = { .offset = 0xf4, .value = 0x00, },
- [19] = { .offset = 0x80, .value = 0xff, },
- [20] = { .offset = 0x81, .value = 0x07, },
- [21] = { .offset = 0x82, .value = 0x3d, },
- [22] = { .offset = 0x83, .value = 0x05, },
- [23] = { .offset = 0x94, .value = 0x00, },
- [24] = { .offset = 0x95, .value = 0x00, },
- [25] = { .offset = 0x96, .value = 0x05, },
- [26] = { .offset = 0x97, .value = 0x00, },
- [27] = { .offset = 0x9a, .value = 0x88, },
- [28] = { .offset = 0x9b, .value = 0x00, },
- [29] = { .offset = 0x98, .value = 0x00, },
- [30] = { .offset = 0x99, .value = 0x00, },
- [31] = { .offset = 0xf7, .value = 0x88, },
- [32] = { .offset = 0xf8, .value = 0x0a, },
- [33] = { .offset = 0x9c, .value = 0x24, },
- [34] = { .offset = 0x9d, .value = 0x00, },
- [35] = { .offset = 0x9e, .value = 0x25, },
- [36] = { .offset = 0x9f, .value = 0x03, },
- [37] = { .offset = 0xa0, .value = 0x28, },
- [38] = { .offset = 0xa1, .value = 0x01, },
- [39] = { .offset = 0xa2, .value = 0x28, },
- [40] = { .offset = 0xa3, .value = 0x05, },
- [41] = { .offset = 0xb6, .value = 0x09, },
- [42] = { .offset = 0xb8, .value = 0x00, },
- [43] = { .offset = 0xb9, .value = 0xa0, },
- [44] = { .offset = 0xba, .value = 0x00, },
- [45] = { .offset = 0xbb, .value = 0x20, },
- [46] = { .offset = 0x10, .value = 0x00, },
- [47] = { .offset = 0x11, .value = 0xa0, },
- [48] = { .offset = 0x12, .value = 0x02, },
- [49] = { .offset = 0x20, .value = 0x00, },
- [50] = { .offset = 0x22, .value = 0x00, },
- [51] = { .offset = 0x23, .value = 0x00, },
- [52] = { .offset = 0x24, .value = 0x00, },
- [53] = { .offset = 0x25, .value = 0x00, },
- [54] = { .offset = 0x8c, .value = 0x10, },
- [55] = { .offset = 0x8d, .value = 0x02, },
- [56] = { .offset = 0x8e, .value = 0x10, },
- [57] = { .offset = 0x8f, .value = 0x00, },
- [58] = { .offset = 0x90, .value = 0xff, },
- [59] = { .offset = 0x91, .value = 0x07, },
- [60] = { .offset = 0x92, .value = 0xa0, },
- [61] = { .offset = 0x93, .value = 0x02, },
- [62] = { .offset = 0xa5, .value = 0x00, },
- [63] = { .offset = 0xa6, .value = 0x00, },
- [64] = { .offset = 0xa7, .value = 0x00, },
- [65] = { .offset = 0xa8, .value = 0x00, },
- [66] = { .offset = 0xa9, .value = 0x04, },
- [67] = { .offset = 0xaa, .value = 0x70, },
- [68] = { .offset = 0xab, .value = 0x4f, },
- [69] = { .offset = 0xac, .value = 0x00, },
- [70] = { .offset = 0xa4, .value = 0x84, },
- [71] = { .offset = 0x7e, .value = 0x18, },
- [72] = { .offset = 0x84, .value = 0x00, },
- [73] = { .offset = 0x85, .value = 0x00, },
- [74] = { .offset = 0x86, .value = 0x00, },
- [75] = { .offset = 0x87, .value = 0x00, },
- [76] = { .offset = 0x88, .value = 0x00, },
- [77] = { .offset = 0x89, .value = 0x00, },
- [78] = { .offset = 0x8a, .value = 0x00, },
- [79] = { .offset = 0x8b, .value = 0x00, },
- [80] = { .offset = 0x26, .value = 0x00, },
- [81] = { .offset = 0x27, .value = 0x00, },
- [82] = { .offset = 0xad, .value = 0x00, },
- [83] = { .offset = 0x08, .value = 0x30, }, /* 0x31 */
- [84] = { .offset = 0x41, .value = 0x00, },
- [85] = { .offset = 0xc0, .value = 0x05, },
+ .sync = NS2501_C0_ENABLE | NS2501_C0_VSYNC,
+ .conf = NS2501_8_VEN | NS2501_8_HEN | NS2501_8_PD,
+ .syncb = 0x32,
+ .dither = 0x0f,
+ .pll_a = 17,
+ .pll_b = 852,
+ .hstart = 144,
+ .hstop = 783,
+ .vstart = 22,
+ .vstop = 514,
+ .vsync = 2047, /* actually, ignored with this config */
+ .vtotal = 1341,
+ .hpos = 0,
+ .vpos = 16,
+ .voffs = 36,
+ .hscale = 40960,
+ .vscale = 40960
},
[MODE_800x600] = {
- [0] = { .offset = 0x0a, .value = 0x81, },
- [1] = { .offset = 0x18, .value = 0x07, },
- [2] = { .offset = 0x19, .value = 0x00, },
- [3] = { .offset = 0x1a, .value = 0x00, },
- [4] = { .offset = 0x1b, .value = 0x19, },
- [5] = { .offset = 0x1c, .value = 0x64, },
- [6] = { .offset = 0x1d, .value = 0x02, },
- [7] = { .offset = 0x1e, .value = 0x02, },
- [8] = { .offset = 0xf3, .value = 0x90, },
- [9] = { .offset = 0xf9, .value = 0x00, },
- [10] = { .offset = 0xc1, .value = 0xd7, },
- [11] = { .offset = 0xc2, .value = 0x00, },
- [12] = { .offset = 0xc3, .value = 0xf8, },
- [13] = { .offset = 0xc4, .value = 0x03, },
- [14] = { .offset = 0xc5, .value = 0x1a, },
- [15] = { .offset = 0xc6, .value = 0x00, },
- [16] = { .offset = 0xc7, .value = 0x73, },
- [17] = { .offset = 0xc8, .value = 0x02, },
- [18] = { .offset = 0xf4, .value = 0x00, },
- [19] = { .offset = 0x80, .value = 0x27, },
- [20] = { .offset = 0x81, .value = 0x03, },
- [21] = { .offset = 0x82, .value = 0x41, },
- [22] = { .offset = 0x83, .value = 0x05, },
- [23] = { .offset = 0x94, .value = 0x00, },
- [24] = { .offset = 0x95, .value = 0x00, },
- [25] = { .offset = 0x96, .value = 0x05, },
- [26] = { .offset = 0x97, .value = 0x00, },
- [27] = { .offset = 0x9a, .value = 0x88, },
- [28] = { .offset = 0x9b, .value = 0x00, },
- [29] = { .offset = 0x98, .value = 0x00, },
- [30] = { .offset = 0x99, .value = 0x00, },
- [31] = { .offset = 0xf7, .value = 0x88, },
- [32] = { .offset = 0xf8, .value = 0x06, },
- [33] = { .offset = 0x9c, .value = 0x23, },
- [34] = { .offset = 0x9d, .value = 0x00, },
- [35] = { .offset = 0x9e, .value = 0x25, },
- [36] = { .offset = 0x9f, .value = 0x03, },
- [37] = { .offset = 0xa0, .value = 0x28, },
- [38] = { .offset = 0xa1, .value = 0x01, },
- [39] = { .offset = 0xa2, .value = 0x28, },
- [40] = { .offset = 0xa3, .value = 0x05, },
- [41] = { .offset = 0xb6, .value = 0x09, },
- [42] = { .offset = 0xb8, .value = 0x30, },
- [43] = { .offset = 0xb9, .value = 0xc8, },
- [44] = { .offset = 0xba, .value = 0x00, },
- [45] = { .offset = 0xbb, .value = 0x20, },
- [46] = { .offset = 0x10, .value = 0x20, },
- [47] = { .offset = 0x11, .value = 0xc8, },
- [48] = { .offset = 0x12, .value = 0x02, },
- [49] = { .offset = 0x20, .value = 0x00, },
- [50] = { .offset = 0x22, .value = 0x00, },
- [51] = { .offset = 0x23, .value = 0x00, },
- [52] = { .offset = 0x24, .value = 0x00, },
- [53] = { .offset = 0x25, .value = 0x00, },
- [54] = { .offset = 0x8c, .value = 0x10, },
- [55] = { .offset = 0x8d, .value = 0x02, },
- [56] = { .offset = 0x8e, .value = 0x04, },
- [57] = { .offset = 0x8f, .value = 0x00, },
- [58] = { .offset = 0x90, .value = 0xff, },
- [59] = { .offset = 0x91, .value = 0x07, },
- [60] = { .offset = 0x92, .value = 0xa0, },
- [61] = { .offset = 0x93, .value = 0x02, },
- [62] = { .offset = 0xa5, .value = 0x00, },
- [63] = { .offset = 0xa6, .value = 0x00, },
- [64] = { .offset = 0xa7, .value = 0x00, },
- [65] = { .offset = 0xa8, .value = 0x00, },
- [66] = { .offset = 0xa9, .value = 0x83, },
- [67] = { .offset = 0xaa, .value = 0x40, },
- [68] = { .offset = 0xab, .value = 0x32, },
- [69] = { .offset = 0xac, .value = 0x00, },
- [70] = { .offset = 0xa4, .value = 0x80, },
- [71] = { .offset = 0x7e, .value = 0x18, },
- [72] = { .offset = 0x84, .value = 0x00, },
- [73] = { .offset = 0x85, .value = 0x00, },
- [74] = { .offset = 0x86, .value = 0x00, },
- [75] = { .offset = 0x87, .value = 0x00, },
- [76] = { .offset = 0x88, .value = 0x00, },
- [77] = { .offset = 0x89, .value = 0x00, },
- [78] = { .offset = 0x8a, .value = 0x00, },
- [79] = { .offset = 0x8b, .value = 0x00, },
- [80] = { .offset = 0x26, .value = 0x00, },
- [81] = { .offset = 0x27, .value = 0x00, },
- [82] = { .offset = 0xad, .value = 0x00, },
- [83] = { .offset = 0x08, .value = 0x30, }, /* 0x31 */
- [84] = { .offset = 0x41, .value = 0x00, },
- [85] = { .offset = 0xc0, .value = 0x07, },
+ .sync = NS2501_C0_ENABLE |
+ NS2501_C0_HSYNC | NS2501_C0_VSYNC,
+ .conf = NS2501_8_VEN | NS2501_8_HEN | NS2501_8_PD,
+ .syncb = 0x00,
+ .dither = 0x0f,
+ .pll_a = 25,
+ .pll_b = 612,
+ .hstart = 215,
+ .hstop = 1016,
+ .vstart = 26,
+ .vstop = 627,
+ .vsync = 807,
+ .vtotal = 1341,
+ .hpos = 0,
+ .vpos = 4,
+ .voffs = 35,
+ .hscale = 51248,
+ .vscale = 51232
},
[MODE_1024x768] = {
- [0] = { .offset = 0x0a, .value = 0x81, },
- [1] = { .offset = 0x18, .value = 0x07, },
- [2] = { .offset = 0x19, .value = 0x00, },
- [3] = { .offset = 0x1a, .value = 0x00, },
- [4] = { .offset = 0x1b, .value = 0x11, },
- [5] = { .offset = 0x1c, .value = 0x54, },
- [6] = { .offset = 0x1d, .value = 0x03, },
- [7] = { .offset = 0x1e, .value = 0x02, },
- [8] = { .offset = 0xf3, .value = 0x90, },
- [9] = { .offset = 0xf9, .value = 0x00, },
- [10] = { .offset = 0xc1, .value = 0x90, },
- [11] = { .offset = 0xc2, .value = 0x00, },
- [12] = { .offset = 0xc3, .value = 0x0f, },
- [13] = { .offset = 0xc4, .value = 0x03, },
- [14] = { .offset = 0xc5, .value = 0x16, },
- [15] = { .offset = 0xc6, .value = 0x00, },
- [16] = { .offset = 0xc7, .value = 0x02, },
- [17] = { .offset = 0xc8, .value = 0x02, },
- [18] = { .offset = 0xf4, .value = 0x00, },
- [19] = { .offset = 0x80, .value = 0xff, },
- [20] = { .offset = 0x81, .value = 0x07, },
- [21] = { .offset = 0x82, .value = 0x3d, },
- [22] = { .offset = 0x83, .value = 0x05, },
- [23] = { .offset = 0x94, .value = 0x00, },
- [24] = { .offset = 0x95, .value = 0x00, },
- [25] = { .offset = 0x96, .value = 0x05, },
- [26] = { .offset = 0x97, .value = 0x00, },
- [27] = { .offset = 0x9a, .value = 0x88, },
- [28] = { .offset = 0x9b, .value = 0x00, },
- [29] = { .offset = 0x98, .value = 0x00, },
- [30] = { .offset = 0x99, .value = 0x00, },
- [31] = { .offset = 0xf7, .value = 0x88, },
- [32] = { .offset = 0xf8, .value = 0x0a, },
- [33] = { .offset = 0x9c, .value = 0x24, },
- [34] = { .offset = 0x9d, .value = 0x00, },
- [35] = { .offset = 0x9e, .value = 0x25, },
- [36] = { .offset = 0x9f, .value = 0x03, },
- [37] = { .offset = 0xa0, .value = 0x28, },
- [38] = { .offset = 0xa1, .value = 0x01, },
- [39] = { .offset = 0xa2, .value = 0x28, },
- [40] = { .offset = 0xa3, .value = 0x05, },
- [41] = { .offset = 0xb6, .value = 0x09, },
- [42] = { .offset = 0xb8, .value = 0x00, },
- [43] = { .offset = 0xb9, .value = 0xa0, },
- [44] = { .offset = 0xba, .value = 0x00, },
- [45] = { .offset = 0xbb, .value = 0x20, },
- [46] = { .offset = 0x10, .value = 0x00, },
- [47] = { .offset = 0x11, .value = 0xa0, },
- [48] = { .offset = 0x12, .value = 0x02, },
- [49] = { .offset = 0x20, .value = 0x00, },
- [50] = { .offset = 0x22, .value = 0x00, },
- [51] = { .offset = 0x23, .value = 0x00, },
- [52] = { .offset = 0x24, .value = 0x00, },
- [53] = { .offset = 0x25, .value = 0x00, },
- [54] = { .offset = 0x8c, .value = 0x10, },
- [55] = { .offset = 0x8d, .value = 0x02, },
- [56] = { .offset = 0x8e, .value = 0x10, },
- [57] = { .offset = 0x8f, .value = 0x00, },
- [58] = { .offset = 0x90, .value = 0xff, },
- [59] = { .offset = 0x91, .value = 0x07, },
- [60] = { .offset = 0x92, .value = 0xa0, },
- [61] = { .offset = 0x93, .value = 0x02, },
- [62] = { .offset = 0xa5, .value = 0x00, },
- [63] = { .offset = 0xa6, .value = 0x00, },
- [64] = { .offset = 0xa7, .value = 0x00, },
- [65] = { .offset = 0xa8, .value = 0x00, },
- [66] = { .offset = 0xa9, .value = 0x04, },
- [67] = { .offset = 0xaa, .value = 0x70, },
- [68] = { .offset = 0xab, .value = 0x4f, },
- [69] = { .offset = 0xac, .value = 0x00, },
- [70] = { .offset = 0xa4, .value = 0x84, },
- [71] = { .offset = 0x7e, .value = 0x18, },
- [72] = { .offset = 0x84, .value = 0x00, },
- [73] = { .offset = 0x85, .value = 0x00, },
- [74] = { .offset = 0x86, .value = 0x00, },
- [75] = { .offset = 0x87, .value = 0x00, },
- [76] = { .offset = 0x88, .value = 0x00, },
- [77] = { .offset = 0x89, .value = 0x00, },
- [78] = { .offset = 0x8a, .value = 0x00, },
- [79] = { .offset = 0x8b, .value = 0x00, },
- [80] = { .offset = 0x26, .value = 0x00, },
- [81] = { .offset = 0x27, .value = 0x00, },
- [82] = { .offset = 0xad, .value = 0x00, },
- [83] = { .offset = 0x08, .value = 0x34, }, /* 0x35 */
- [84] = { .offset = 0x41, .value = 0x00, },
- [85] = { .offset = 0xc0, .value = 0x01, },
- },
+ .sync = NS2501_C0_ENABLE | NS2501_C0_VSYNC,
+ .conf = NS2501_8_VEN | NS2501_8_HEN | NS2501_8_PD,
+ .syncb = 0x32,
+ .dither = 0x0f,
+ .pll_a = 11,
+ .pll_b = 1350,
+ .hstart = 276,
+ .hstop = 1299,
+ .vstart = 15,
+ .vstop = 1056,
+ .vsync = 2047,
+ .vtotal = 1341,
+ .hpos = 0,
+ .vpos = 7,
+ .voffs = 27,
+ .hscale = 65535,
+ .vscale = 65535
+ }
+};
+
+/*
+ * Other configuration values left by the BIOS of the
+ * Fujitsu S6010 in the DVO control registers. Their
+ * value does not depend on the BIOS and their meaning
+ * is unknown.
+ */
+
+static const struct ns2501_reg mode_agnostic_values[] = {
+ /* 08 is mode specific */
+ [0] = { .offset = 0x0a, .value = 0x81, },
+ /* 10,11 are part of the mode specific configuration */
+ [1] = { .offset = 0x12, .value = 0x02, },
+ [2] = { .offset = 0x18, .value = 0x07, },
+ [3] = { .offset = 0x19, .value = 0x00, },
+ [4] = { .offset = 0x1a, .value = 0x00, }, /* PLL?, ignored */
+ /* 1b,1c,1d are part of the mode specific configuration */
+ [5] = { .offset = 0x1e, .value = 0x02, },
+ [6] = { .offset = 0x1f, .value = 0x40, },
+ [7] = { .offset = 0x20, .value = 0x00, },
+ [8] = { .offset = 0x21, .value = 0x00, },
+ [9] = { .offset = 0x22, .value = 0x00, },
+ [10] = { .offset = 0x23, .value = 0x00, },
+ [11] = { .offset = 0x24, .value = 0x00, },
+ [12] = { .offset = 0x25, .value = 0x00, },
+ [13] = { .offset = 0x26, .value = 0x00, },
+ [14] = { .offset = 0x27, .value = 0x00, },
+ [15] = { .offset = 0x7e, .value = 0x18, },
+ /* 80-84 are part of the mode-specific configuration */
+ [16] = { .offset = 0x84, .value = 0x00, },
+ [17] = { .offset = 0x85, .value = 0x00, },
+ [18] = { .offset = 0x86, .value = 0x00, },
+ [19] = { .offset = 0x87, .value = 0x00, },
+ [20] = { .offset = 0x88, .value = 0x00, },
+ [21] = { .offset = 0x89, .value = 0x00, },
+ [22] = { .offset = 0x8a, .value = 0x00, },
+ [23] = { .offset = 0x8b, .value = 0x00, },
+ [24] = { .offset = 0x8c, .value = 0x10, },
+ [25] = { .offset = 0x8d, .value = 0x02, },
+ /* 8e,8f are part of the mode-specific configuration */
+ [26] = { .offset = 0x90, .value = 0xff, },
+ [27] = { .offset = 0x91, .value = 0x07, },
+ [28] = { .offset = 0x92, .value = 0xa0, },
+ [29] = { .offset = 0x93, .value = 0x02, },
+ [30] = { .offset = 0x94, .value = 0x00, },
+ [31] = { .offset = 0x95, .value = 0x00, },
+ [32] = { .offset = 0x96, .value = 0x05, },
+ [33] = { .offset = 0x97, .value = 0x00, },
+ /* 98,99 are part of the mode-specific configuration */
+ [34] = { .offset = 0x9a, .value = 0x88, },
+ [35] = { .offset = 0x9b, .value = 0x00, },
+ /* 9c,9d are part of the mode-specific configuration */
+ [36] = { .offset = 0x9e, .value = 0x25, },
+ [37] = { .offset = 0x9f, .value = 0x03, },
+ [38] = { .offset = 0xa0, .value = 0x28, },
+ [39] = { .offset = 0xa1, .value = 0x01, },
+ [40] = { .offset = 0xa2, .value = 0x28, },
+ [41] = { .offset = 0xa3, .value = 0x05, },
+ /* register 0xa4 is mode specific, but 0x80..0x84 works always */
+ [42] = { .offset = 0xa4, .value = 0x84, },
+ [43] = { .offset = 0xa5, .value = 0x00, },
+ [44] = { .offset = 0xa6, .value = 0x00, },
+ [45] = { .offset = 0xa7, .value = 0x00, },
+ [46] = { .offset = 0xa8, .value = 0x00, },
+ /* 0xa9 to 0xab are mode specific, but have no visible effect */
+ [47] = { .offset = 0xa9, .value = 0x04, },
+ [48] = { .offset = 0xaa, .value = 0x70, },
+ [49] = { .offset = 0xab, .value = 0x4f, },
+ [50] = { .offset = 0xac, .value = 0x00, },
+ [51] = { .offset = 0xad, .value = 0x00, },
+ [52] = { .offset = 0xb6, .value = 0x09, },
+ [53] = { .offset = 0xb7, .value = 0x03, },
+ /* b8,b9 are part of the mode-specific configuration */
+ [54] = { .offset = 0xba, .value = 0x00, },
+ [55] = { .offset = 0xbb, .value = 0x20, },
+ [56] = { .offset = 0xf3, .value = 0x90, },
+ [57] = { .offset = 0xf4, .value = 0x00, },
+ [58] = { .offset = 0xf7, .value = 0x88, },
+ /* f8 is mode specific, but the value does not matter */
+ [59] = { .offset = 0xf8, .value = 0x0a, },
+ [60] = { .offset = 0xf9, .value = 0x00, }
};
static const struct ns2501_reg regs_init[] = {
@@ -350,25 +378,12 @@ static const struct ns2501_reg regs_init[] = {
struct ns2501_priv {
bool quiet;
- const struct ns2501_reg *regs;
+ const struct ns2501_configuration *conf;
};
#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr))
/*
- * For reasons unclear to me, the ns2501 at least on the Fujitsu/Siemens
- * laptops does not react on the i2c bus unless
- * both the PLL is running and the display is configured in its native
- * resolution.
- * This function forces the DVO on, and stores the registers it touches.
- * Afterwards, registers are restored to regular values.
- *
- * This is pretty much a hack, though it works.
- * Without that, ns2501_readb and ns2501_writeb fail
- * when switching the resolution.
- */
-
-/*
** Read a register from the ns2501.
** Returns true if successful, false otherwise.
** If it returns false, it might be wise to enable the
@@ -534,6 +549,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ const struct ns2501_configuration *conf;
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
int mode_idx, i;
@@ -541,6 +557,36 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
("set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal);
+ DRM_DEBUG_KMS("Detailed requested mode settings are:\n"
+ "clock : %d kHz\n"
+ "hdisplay : %d\n"
+ "hblank start : %d\n"
+ "hblank end : %d\n"
+ "hsync start : %d\n"
+ "hsync end : %d\n"
+ "htotal : %d\n"
+ "hskew : %d\n"
+ "vdisplay : %d\n"
+ "vblank start : %d\n"
+ "hblank end : %d\n"
+ "vsync start : %d\n"
+ "vsync end : %d\n"
+ "vtotal : %d\n",
+ adjusted_mode->crtc_clock,
+ adjusted_mode->crtc_hdisplay,
+ adjusted_mode->crtc_hblank_start,
+ adjusted_mode->crtc_hblank_end,
+ adjusted_mode->crtc_hsync_start,
+ adjusted_mode->crtc_hsync_end,
+ adjusted_mode->crtc_htotal,
+ adjusted_mode->crtc_hskew,
+ adjusted_mode->crtc_vdisplay,
+ adjusted_mode->crtc_vblank_start,
+ adjusted_mode->crtc_vblank_end,
+ adjusted_mode->crtc_vsync_start,
+ adjusted_mode->crtc_vsync_end,
+ adjusted_mode->crtc_vtotal);
+
if (mode->hdisplay == 640 && mode->vdisplay == 480)
mode_idx = MODE_640x480;
else if (mode->hdisplay == 800 && mode->vdisplay == 600)
@@ -554,10 +600,44 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
for (i = 0; i < ARRAY_SIZE(regs_init); i++)
ns2501_writeb(dvo, regs_init[i].offset, regs_init[i].value);
- ns->regs = regs_1024x768[mode_idx];
-
- for (i = 0; i < 84; i++)
- ns2501_writeb(dvo, ns->regs[i].offset, ns->regs[i].value);
+ /* Write the mode-agnostic values */
+ for (i = 0; i < ARRAY_SIZE(mode_agnostic_values); i++)
+ ns2501_writeb(dvo, mode_agnostic_values[i].offset,
+ mode_agnostic_values[i].value);
+
+ /* Write now the mode-specific configuration */
+ conf = ns2501_modes + mode_idx;
+ ns->conf = conf;
+
+ ns2501_writeb(dvo, NS2501_REG8, conf->conf);
+ ns2501_writeb(dvo, NS2501_REG1B, conf->pll_a);
+ ns2501_writeb(dvo, NS2501_REG1C, conf->pll_b & 0xff);
+ ns2501_writeb(dvo, NS2501_REG1D, conf->pll_b >> 8);
+ ns2501_writeb(dvo, NS2501_REGC1, conf->hstart & 0xff);
+ ns2501_writeb(dvo, NS2501_REGC2, conf->hstart >> 8);
+ ns2501_writeb(dvo, NS2501_REGC3, conf->hstop & 0xff);
+ ns2501_writeb(dvo, NS2501_REGC4, conf->hstop >> 8);
+ ns2501_writeb(dvo, NS2501_REGC5, conf->vstart & 0xff);
+ ns2501_writeb(dvo, NS2501_REGC6, conf->vstart >> 8);
+ ns2501_writeb(dvo, NS2501_REGC7, conf->vstop & 0xff);
+ ns2501_writeb(dvo, NS2501_REGC8, conf->vstop >> 8);
+ ns2501_writeb(dvo, NS2501_REG80, conf->vsync & 0xff);
+ ns2501_writeb(dvo, NS2501_REG81, conf->vsync >> 8);
+ ns2501_writeb(dvo, NS2501_REG82, conf->vtotal & 0xff);
+ ns2501_writeb(dvo, NS2501_REG83, conf->vtotal >> 8);
+ ns2501_writeb(dvo, NS2501_REG98, conf->hpos & 0xff);
+ ns2501_writeb(dvo, NS2501_REG99, conf->hpos >> 8);
+ ns2501_writeb(dvo, NS2501_REG8E, conf->vpos & 0xff);
+ ns2501_writeb(dvo, NS2501_REG8F, conf->vpos >> 8);
+ ns2501_writeb(dvo, NS2501_REG9C, conf->voffs & 0xff);
+ ns2501_writeb(dvo, NS2501_REG9D, conf->voffs >> 8);
+ ns2501_writeb(dvo, NS2501_REGB8, conf->hscale & 0xff);
+ ns2501_writeb(dvo, NS2501_REGB9, conf->hscale >> 8);
+ ns2501_writeb(dvo, NS2501_REG10, conf->vscale & 0xff);
+ ns2501_writeb(dvo, NS2501_REG11, conf->vscale >> 8);
+ ns2501_writeb(dvo, NS2501_REGF9, conf->dither);
+ ns2501_writeb(dvo, NS2501_REG41, conf->syncb);
+ ns2501_writeb(dvo, NS2501_REGC0, conf->sync);
}
/* set the NS2501 power state */
@@ -579,34 +659,32 @@ static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
DRM_DEBUG_KMS("Trying set the dpms of the DVO to %i\n", enable);
if (enable) {
- if (WARN_ON(ns->regs[83].offset != 0x08 ||
- ns->regs[84].offset != 0x41 ||
- ns->regs[85].offset != 0xc0))
- return;
-
- ns2501_writeb(dvo, 0xc0, ns->regs[85].value | 0x08);
+ ns2501_writeb(dvo, NS2501_REGC0, ns->conf->sync | 0x08);
- ns2501_writeb(dvo, 0x41, ns->regs[84].value);
+ ns2501_writeb(dvo, NS2501_REG41, ns->conf->syncb);
- ns2501_writeb(dvo, 0x34, 0x01);
+ ns2501_writeb(dvo, NS2501_REG34, NS2501_34_ENABLE_OUTPUT);
msleep(15);
- ns2501_writeb(dvo, 0x08, 0x35);
- if (!(ns->regs[83].value & NS2501_8_BPAS))
- ns2501_writeb(dvo, 0x08, 0x31);
+ ns2501_writeb(dvo, NS2501_REG8,
+ ns->conf->conf | NS2501_8_BPAS);
+ if (!(ns->conf->conf & NS2501_8_BPAS))
+ ns2501_writeb(dvo, NS2501_REG8, ns->conf->conf);
msleep(200);
- ns2501_writeb(dvo, 0x34, 0x03);
+ ns2501_writeb(dvo, NS2501_REG34,
+ NS2501_34_ENABLE_OUTPUT | NS2501_34_ENABLE_BACKLIGHT);
- ns2501_writeb(dvo, 0xc0, ns->regs[85].value);
+ ns2501_writeb(dvo, NS2501_REGC0, ns->conf->sync);
} else {
- ns2501_writeb(dvo, 0x34, 0x01);
+ ns2501_writeb(dvo, NS2501_REG34, NS2501_34_ENABLE_OUTPUT);
msleep(200);
- ns2501_writeb(dvo, 0x08, 0x34);
+ ns2501_writeb(dvo, NS2501_REG8, NS2501_8_VEN | NS2501_8_HEN |
+ NS2501_8_BPAS);
msleep(15);
- ns2501_writeb(dvo, 0x34, 0x00);
+ ns2501_writeb(dvo, NS2501_REG34, 0x00);
}
}
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 61ae8ff4eaed..306d9e4e5cf3 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -123,7 +123,7 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
CMD( MI_SEMAPHORE_MBOX, SMI, !F, 0xFF, R ),
CMD( MI_STORE_DWORD_INDEX, SMI, !F, 0xFF, R ),
CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
- .reg = { .offset = 1, .mask = 0x007FFFFC } ),
+ .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ),
CMD( MI_STORE_REGISTER_MEM(1), SMI, !F, 0xFF, W | B,
.reg = { .offset = 1, .mask = 0x007FFFFC },
.bits = {{
@@ -395,16 +395,38 @@ static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
/*
* Register whitelists, sorted by increasing register offset.
+ */
+
+/*
+ * An individual whitelist entry granting access to register addr. If
+ * mask is non-zero the argument of immediate register writes will be
+ * AND-ed with mask, and the command will be rejected if the result
+ * doesn't match value.
+ *
+ * Registers with non-zero mask are only allowed to be written using
+ * LRI.
+ */
+struct drm_i915_reg_descriptor {
+ u32 addr;
+ u32 mask;
+ u32 value;
+};
+
+/* Convenience macro for adding 32-bit registers. */
+#define REG32(address, ...) \
+ { .addr = address, __VA_ARGS__ }
+
+/*
+ * Convenience macro for adding 64-bit registers.
*
* Some registers that userspace accesses are 64 bits. The register
* access commands only allow 32-bit accesses. Hence, we have to include
* entries for both halves of the 64-bit registers.
*/
+#define REG64(addr) \
+ REG32(addr), REG32(addr + sizeof(u32))
-/* Convenience macro for adding 64-bit registers */
-#define REG64(addr) (addr), (addr + sizeof(u32))
-
-static const u32 gen7_render_regs[] = {
+static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
REG64(GPGPU_THREADS_DISPATCHED),
REG64(HS_INVOCATION_COUNT),
REG64(DS_INVOCATION_COUNT),
@@ -417,15 +439,15 @@ static const u32 gen7_render_regs[] = {
REG64(CL_PRIMITIVES_COUNT),
REG64(PS_INVOCATION_COUNT),
REG64(PS_DEPTH_COUNT),
- OACONTROL, /* Only allowed for LRI and SRM. See below. */
+ REG32(OACONTROL), /* Only allowed for LRI and SRM. See below. */
REG64(MI_PREDICATE_SRC0),
REG64(MI_PREDICATE_SRC1),
- GEN7_3DPRIM_END_OFFSET,
- GEN7_3DPRIM_START_VERTEX,
- GEN7_3DPRIM_VERTEX_COUNT,
- GEN7_3DPRIM_INSTANCE_COUNT,
- GEN7_3DPRIM_START_INSTANCE,
- GEN7_3DPRIM_BASE_VERTEX,
+ REG32(GEN7_3DPRIM_END_OFFSET),
+ REG32(GEN7_3DPRIM_START_VERTEX),
+ REG32(GEN7_3DPRIM_VERTEX_COUNT),
+ REG32(GEN7_3DPRIM_INSTANCE_COUNT),
+ REG32(GEN7_3DPRIM_START_INSTANCE),
+ REG32(GEN7_3DPRIM_BASE_VERTEX),
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)),
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)),
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)),
@@ -434,33 +456,41 @@ static const u32 gen7_render_regs[] = {
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(1)),
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(2)),
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(3)),
- GEN7_SO_WRITE_OFFSET(0),
- GEN7_SO_WRITE_OFFSET(1),
- GEN7_SO_WRITE_OFFSET(2),
- GEN7_SO_WRITE_OFFSET(3),
- GEN7_L3SQCREG1,
- GEN7_L3CNTLREG2,
- GEN7_L3CNTLREG3,
+ REG32(GEN7_SO_WRITE_OFFSET(0)),
+ REG32(GEN7_SO_WRITE_OFFSET(1)),
+ REG32(GEN7_SO_WRITE_OFFSET(2)),
+ REG32(GEN7_SO_WRITE_OFFSET(3)),
+ REG32(GEN7_L3SQCREG1),
+ REG32(GEN7_L3CNTLREG2),
+ REG32(GEN7_L3CNTLREG3),
+ REG32(HSW_SCRATCH1,
+ .mask = ~HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE,
+ .value = 0),
+ REG32(HSW_ROW_CHICKEN3,
+ .mask = ~(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE << 16 |
+ HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
+ .value = 0),
};
-static const u32 gen7_blt_regs[] = {
- BCS_SWCTRL,
+static const struct drm_i915_reg_descriptor gen7_blt_regs[] = {
+ REG32(BCS_SWCTRL),
};
-static const u32 ivb_master_regs[] = {
- FORCEWAKE_MT,
- DERRMR,
- GEN7_PIPE_DE_LOAD_SL(PIPE_A),
- GEN7_PIPE_DE_LOAD_SL(PIPE_B),
- GEN7_PIPE_DE_LOAD_SL(PIPE_C),
+static const struct drm_i915_reg_descriptor ivb_master_regs[] = {
+ REG32(FORCEWAKE_MT),
+ REG32(DERRMR),
+ REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_A)),
+ REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_B)),
+ REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_C)),
};
-static const u32 hsw_master_regs[] = {
- FORCEWAKE_MT,
- DERRMR,
+static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
+ REG32(FORCEWAKE_MT),
+ REG32(DERRMR),
};
#undef REG64
+#undef REG32
static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
{
@@ -550,14 +580,16 @@ static bool validate_cmds_sorted(struct intel_engine_cs *ring,
return ret;
}
-static bool check_sorted(int ring_id, const u32 *reg_table, int reg_count)
+static bool check_sorted(int ring_id,
+ const struct drm_i915_reg_descriptor *reg_table,
+ int reg_count)
{
int i;
u32 previous = 0;
bool ret = true;
for (i = 0; i < reg_count; i++) {
- u32 curr = reg_table[i];
+ u32 curr = reg_table[i].addr;
if (curr < previous) {
DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
@@ -804,18 +836,20 @@ find_cmd(struct intel_engine_cs *ring,
return default_desc;
}
-static bool valid_reg(const u32 *table, int count, u32 addr)
+static const struct drm_i915_reg_descriptor *
+find_reg(const struct drm_i915_reg_descriptor *table,
+ int count, u32 addr)
{
- if (table && count != 0) {
+ if (table) {
int i;
for (i = 0; i < count; i++) {
- if (table[i] == addr)
- return true;
+ if (table[i].addr == addr)
+ return &table[i];
}
}
- return false;
+ return NULL;
}
static u32 *vmap_batch(struct drm_i915_gem_object *obj,
@@ -869,6 +903,9 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
batch_len + batch_start_offset > src_obj->base.size)
return ERR_PTR(-E2BIG);
+ if (WARN_ON(dest_obj->pages_pin_count == 0))
+ return ERR_PTR(-ENODEV);
+
ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
if (ret) {
DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n");
@@ -882,13 +919,6 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
goto unpin_src;
}
- ret = i915_gem_object_get_pages(dest_obj);
- if (ret) {
- DRM_DEBUG_DRIVER("CMD: Failed to get pages for shadow batch\n");
- goto unmap_src;
- }
- i915_gem_object_pin_pages(dest_obj);
-
ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
if (ret) {
DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n");
@@ -898,7 +928,6 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
dst = vmap_batch(dest_obj, 0, batch_len);
if (!dst) {
DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
- i915_gem_object_unpin_pages(dest_obj);
ret = -ENOMEM;
goto unmap_src;
}
@@ -939,7 +968,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
static bool check_cmd(const struct intel_engine_cs *ring,
const struct drm_i915_cmd_descriptor *desc,
- const u32 *cmd,
+ const u32 *cmd, u32 length,
const bool is_master,
bool *oacontrol_set)
{
@@ -955,38 +984,70 @@ static bool check_cmd(const struct intel_engine_cs *ring,
}
if (desc->flags & CMD_DESC_REGISTER) {
- u32 reg_addr = cmd[desc->reg.offset] & desc->reg.mask;
-
/*
- * OACONTROL requires some special handling for writes. We
- * want to make sure that any batch which enables OA also
- * disables it before the end of the batch. The goal is to
- * prevent one process from snooping on the perf data from
- * another process. To do that, we need to check the value
- * that will be written to the register. Hence, limit
- * OACONTROL writes to only MI_LOAD_REGISTER_IMM commands.
+ * Get the distance between individual register offset
+ * fields if the command can perform more than one
+ * access at a time.
*/
- if (reg_addr == OACONTROL) {
- if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
- DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
+ const u32 step = desc->reg.step ? desc->reg.step : length;
+ u32 offset;
+
+ for (offset = desc->reg.offset; offset < length;
+ offset += step) {
+ const u32 reg_addr = cmd[offset] & desc->reg.mask;
+ const struct drm_i915_reg_descriptor *reg =
+ find_reg(ring->reg_table, ring->reg_count,
+ reg_addr);
+
+ if (!reg && is_master)
+ reg = find_reg(ring->master_reg_table,
+ ring->master_reg_count,
+ reg_addr);
+
+ if (!reg) {
+ DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
+ reg_addr, *cmd, ring->id);
return false;
}
- if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
- *oacontrol_set = (cmd[2] != 0);
- }
+ /*
+ * OACONTROL requires some special handling for
+ * writes. We want to make sure that any batch which
+ * enables OA also disables it before the end of the
+ * batch. The goal is to prevent one process from
+ * snooping on the perf data from another process. To do
+ * that, we need to check the value that will be written
+ * to the register. Hence, limit OACONTROL writes to
+ * only MI_LOAD_REGISTER_IMM commands.
+ */
+ if (reg_addr == OACONTROL) {
+ if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
+ DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
+ return false;
+ }
+
+ if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
+ *oacontrol_set = (cmd[offset + 1] != 0);
+ }
- if (!valid_reg(ring->reg_table,
- ring->reg_count, reg_addr)) {
- if (!is_master ||
- !valid_reg(ring->master_reg_table,
- ring->master_reg_count,
- reg_addr)) {
- DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
- reg_addr,
- *cmd,
- ring->id);
- return false;
+ /*
+ * Check the value written to the register against the
+ * allowed mask/value pair given in the whitelist entry.
+ */
+ if (reg->mask) {
+ if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
+ DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n",
+ reg_addr);
+ return false;
+ }
+
+ if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
+ (offset + 2 > length ||
+ (cmd[offset + 1] & reg->mask) != reg->value)) {
+ DRM_DEBUG_DRIVER("CMD: Rejected LRI to masked register 0x%08X\n",
+ reg_addr);
+ return false;
+ }
}
}
}
@@ -1110,7 +1171,8 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
break;
}
- if (!check_cmd(ring, desc, cmd, is_master, &oacontrol_set)) {
+ if (!check_cmd(ring, desc, cmd, length, is_master,
+ &oacontrol_set)) {
ret = -EINVAL;
break;
}
@@ -1129,7 +1191,6 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
}
vunmap(batch_base);
- i915_gem_object_unpin_pages(shadow_batch_obj);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index dc55c51964ab..82bbe3f2a7e1 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -96,7 +96,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
static const char *get_pin_flag(struct drm_i915_gem_object *obj)
{
- if (i915_gem_obj_is_pinned(obj))
+ if (obj->pin_display)
return "p";
else
return " ";
@@ -120,18 +120,25 @@ static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct intel_engine_cs *ring;
struct i915_vma *vma;
int pin_count = 0;
+ int i;
- seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %x %x %x%s%s%s",
+ seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ",
&obj->base,
+ obj->active ? "*" : " ",
get_pin_flag(obj),
get_tiling_flag(obj),
get_global_flag(obj),
obj->base.size / 1024,
obj->base.read_domains,
- obj->base.write_domain,
- i915_gem_request_get_seqno(obj->last_read_req),
+ obj->base.write_domain);
+ for_each_ring(ring, dev_priv, i)
+ seq_printf(m, "%x ",
+ i915_gem_request_get_seqno(obj->last_read_req[i]));
+ seq_printf(m, "] %x %x%s%s%s",
i915_gem_request_get_seqno(obj->last_write_req),
i915_gem_request_get_seqno(obj->last_fenced_req),
i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
@@ -159,18 +166,18 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
}
if (obj->stolen)
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
- if (obj->pin_mappable || obj->fault_mappable) {
+ if (obj->pin_display || obj->fault_mappable) {
char s[3], *t = s;
- if (obj->pin_mappable)
+ if (obj->pin_display)
*t++ = 'p';
if (obj->fault_mappable)
*t++ = 'f';
*t = '\0';
seq_printf(m, " (%s mappable)", s);
}
- if (obj->last_read_req != NULL)
+ if (obj->last_write_req != NULL)
seq_printf(m, " (%s)",
- i915_gem_request_get_ring(obj->last_read_req)->name);
+ i915_gem_request_get_ring(obj->last_write_req)->name);
if (obj->frontbuffer_bits)
seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
}
@@ -361,31 +368,39 @@ static int per_file_stats(int id, void *ptr, void *data)
return 0;
}
-#define print_file_stats(m, name, stats) \
- seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", \
- name, \
- stats.count, \
- stats.total, \
- stats.active, \
- stats.inactive, \
- stats.global, \
- stats.shared, \
- stats.unbound)
+#define print_file_stats(m, name, stats) do { \
+ if (stats.count) \
+ seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", \
+ name, \
+ stats.count, \
+ stats.total, \
+ stats.active, \
+ stats.inactive, \
+ stats.global, \
+ stats.shared, \
+ stats.unbound); \
+} while (0)
static void print_batch_pool_stats(struct seq_file *m,
struct drm_i915_private *dev_priv)
{
struct drm_i915_gem_object *obj;
struct file_stats stats;
+ struct intel_engine_cs *ring;
+ int i, j;
memset(&stats, 0, sizeof(stats));
- list_for_each_entry(obj,
- &dev_priv->mm.batch_pool.cache_list,
- batch_pool_list)
- per_file_stats(0, obj, &stats);
+ for_each_ring(ring, dev_priv, i) {
+ for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) {
+ list_for_each_entry(obj,
+ &ring->batch_pool.cache_list[j],
+ batch_pool_link)
+ per_file_stats(0, obj, &stats);
+ }
+ }
- print_file_stats(m, "batch pool", stats);
+ print_file_stats(m, "[k]batch pool", stats);
}
#define count_vmas(list, member) do { \
@@ -449,7 +464,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
size += i915_gem_obj_ggtt_size(obj);
++count;
}
- if (obj->pin_mappable) {
+ if (obj->pin_display) {
mappable_size += i915_gem_obj_ggtt_size(obj);
++mappable_count;
}
@@ -471,8 +486,6 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
seq_putc(m, '\n');
print_batch_pool_stats(m, dev_priv);
-
- seq_putc(m, '\n');
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct file_stats stats;
struct task_struct *task;
@@ -613,24 +626,39 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
- int count = 0;
- int ret;
+ struct intel_engine_cs *ring;
+ int total = 0;
+ int ret, i, j;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- seq_puts(m, "cache:\n");
- list_for_each_entry(obj,
- &dev_priv->mm.batch_pool.cache_list,
- batch_pool_list) {
- seq_puts(m, " ");
- describe_obj(m, obj);
- seq_putc(m, '\n');
- count++;
+ for_each_ring(ring, dev_priv, i) {
+ for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) {
+ int count;
+
+ count = 0;
+ list_for_each_entry(obj,
+ &ring->batch_pool.cache_list[j],
+ batch_pool_link)
+ count++;
+ seq_printf(m, "%s cache[%d]: %d objects\n",
+ ring->name, j, count);
+
+ list_for_each_entry(obj,
+ &ring->batch_pool.cache_list[j],
+ batch_pool_link) {
+ seq_puts(m, " ");
+ describe_obj(m, obj);
+ seq_putc(m, '\n');
+ }
+
+ total += count;
+ }
}
- seq_printf(m, "total: %d\n", count);
+ seq_printf(m, "total: %d\n", total);
mutex_unlock(&dev->struct_mutex);
@@ -643,31 +671,44 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
- struct drm_i915_gem_request *gem_request;
- int ret, count, i;
+ struct drm_i915_gem_request *req;
+ int ret, any, i;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- count = 0;
+ any = 0;
for_each_ring(ring, dev_priv, i) {
- if (list_empty(&ring->request_list))
+ int count;
+
+ count = 0;
+ list_for_each_entry(req, &ring->request_list, list)
+ count++;
+ if (count == 0)
continue;
- seq_printf(m, "%s requests:\n", ring->name);
- list_for_each_entry(gem_request,
- &ring->request_list,
- list) {
- seq_printf(m, " %x @ %d\n",
- gem_request->seqno,
- (int) (jiffies - gem_request->emitted_jiffies));
+ seq_printf(m, "%s requests: %d\n", ring->name, count);
+ list_for_each_entry(req, &ring->request_list, list) {
+ struct task_struct *task;
+
+ rcu_read_lock();
+ task = NULL;
+ if (req->pid)
+ task = pid_task(req->pid, PIDTYPE_PID);
+ seq_printf(m, " %x @ %d: %s [%d]\n",
+ req->seqno,
+ (int) (jiffies - req->emitted_jiffies),
+ task ? task->comm : "<unknown>",
+ task ? task->pid : -1);
+ rcu_read_unlock();
}
- count++;
+
+ any++;
}
mutex_unlock(&dev->struct_mutex);
- if (count == 0)
+ if (any == 0)
seq_puts(m, "No requests\n");
return 0;
@@ -1176,12 +1217,17 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
GEN6_CURBSYTAVG_MASK);
seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
GEN6_CURBSYTAVG_MASK);
+ seq_printf(m, "Up threshold: %d%%\n",
+ dev_priv->rps.up_threshold);
+
seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
GEN6_CURIAVG_MASK);
seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
GEN6_CURBSYTAVG_MASK);
seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
GEN6_CURBSYTAVG_MASK);
+ seq_printf(m, "Down threshold: %d%%\n",
+ dev_priv->rps.down_threshold);
max_freq = (rp_state_cap & 0xff0000) >> 16;
max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
@@ -1197,12 +1243,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
-
seq_printf(m, "Max overclocked frequency: %dMHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+ seq_printf(m, "Current freq: %d MHz\n",
+ intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
+ seq_printf(m, "Actual freq: %d MHz\n", cagf);
seq_printf(m, "Idle freq: %d MHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
+ seq_printf(m, "Min freq: %d MHz\n",
+ intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
+ seq_printf(m, "Max freq: %d MHz\n",
+ intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+ seq_printf(m,
+ "efficient (RPe) frequency: %d MHz\n",
+ intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
} else if (IS_VALLEYVIEW(dev)) {
u32 freq_sts;
@@ -1211,6 +1266,12 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
+ seq_printf(m, "actual GPU freq: %d MHz\n",
+ intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
+
+ seq_printf(m, "current GPU freq: %d MHz\n",
+ intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
+
seq_printf(m, "max GPU freq: %d MHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
@@ -1223,9 +1284,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m,
"efficient (RPe) frequency: %d MHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
-
- seq_printf(m, "current GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
mutex_unlock(&dev_priv->rps.hw_lock);
} else {
seq_puts(m, "no P-state info available\n");
@@ -2156,8 +2214,6 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
if (!ppgtt)
return;
- seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages);
- seq_printf(m, "Page tables: %d\n", ppgtt->num_pd_entries);
for_each_ring(ring, dev_priv, unused) {
seq_printf(m, "%s\n", ring->name);
for (i = 0; i < 4; i++) {
@@ -2229,6 +2285,60 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
return 0;
}
+static int count_irq_waiters(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *ring;
+ int count = 0;
+ int i;
+
+ for_each_ring(ring, i915, i)
+ count += ring->irq_refcount;
+
+ return count;
+}
+
+static int i915_rps_boost_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_file *file;
+
+ seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
+ seq_printf(m, "GPU busy? %d\n", dev_priv->mm.busy);
+ seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
+ seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n",
+ intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
+ intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
+ intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
+ intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+ spin_lock(&dev_priv->rps.client_lock);
+ list_for_each_entry_reverse(file, &dev->filelist, lhead) {
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct task_struct *task;
+
+ rcu_read_lock();
+ task = pid_task(file->pid, PIDTYPE_PID);
+ seq_printf(m, "%s [%d]: %d boosts%s\n",
+ task ? task->comm : "<unknown>",
+ task ? task->pid : -1,
+ file_priv->rps.boosts,
+ list_empty(&file_priv->rps.link) ? "" : ", active");
+ rcu_read_unlock();
+ }
+ seq_printf(m, "Semaphore boosts: %d%s\n",
+ dev_priv->rps.semaphores.boosts,
+ list_empty(&dev_priv->rps.semaphores.link) ? "" : ", active");
+ seq_printf(m, "MMIO flip boosts: %d%s\n",
+ dev_priv->rps.mmioflips.boosts,
+ list_empty(&dev_priv->rps.mmioflips.link) ? "" : ", active");
+ seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts);
+ spin_unlock(&dev_priv->rps.client_lock);
+
+ return 0;
+}
+
static int i915_llc(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
@@ -2290,9 +2400,6 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
}
seq_puts(m, "\n");
- seq_printf(m, "Link standby: %s\n",
- yesno((bool)dev_priv->psr.link_standby));
-
/* CHV PSR has no kind of performance counter */
if (HAS_DDI(dev)) {
psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
@@ -3529,8 +3636,7 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
intel_display_power_get(dev_priv,
POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
- dev_priv->display.crtc_disable(&crtc->base);
- dev_priv->display.crtc_enable(&crtc->base);
+ intel_crtc_reset(crtc);
}
drm_modeset_unlock_all(dev);
}
@@ -3551,8 +3657,7 @@ static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
if (crtc->config->pch_pfit.force_thru) {
crtc->config->pch_pfit.force_thru = false;
- dev_priv->display.crtc_disable(&crtc->base);
- dev_priv->display.crtc_enable(&crtc->base);
+ intel_crtc_reset(crtc);
intel_display_power_put(dev_priv,
POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
@@ -3869,6 +3974,212 @@ static const struct file_operations i915_display_crc_ctl_fops = {
.write = display_crc_ctl_write
};
+static ssize_t i915_displayport_test_active_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ char *input_buffer;
+ int status = 0;
+ struct seq_file *m;
+ struct drm_device *dev;
+ struct drm_connector *connector;
+ struct list_head *connector_list;
+ struct intel_dp *intel_dp;
+ int val = 0;
+
+ m = file->private_data;
+ if (!m) {
+ status = -ENODEV;
+ return status;
+ }
+ dev = m->private;
+
+ if (!dev) {
+ status = -ENODEV;
+ return status;
+ }
+ connector_list = &dev->mode_config.connector_list;
+
+ if (len == 0)
+ return 0;
+
+ input_buffer = kmalloc(len + 1, GFP_KERNEL);
+ if (!input_buffer)
+ return -ENOMEM;
+
+ if (copy_from_user(input_buffer, ubuf, len)) {
+ status = -EFAULT;
+ goto out;
+ }
+
+ input_buffer[len] = '\0';
+ DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
+
+ list_for_each_entry(connector, connector_list, head) {
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ if (connector->connector_type ==
+ DRM_MODE_CONNECTOR_DisplayPort &&
+ connector->status == connector_status_connected &&
+ connector->encoder != NULL) {
+ intel_dp = enc_to_intel_dp(connector->encoder);
+ status = kstrtoint(input_buffer, 10, &val);
+ if (status < 0)
+ goto out;
+ DRM_DEBUG_DRIVER("Got %d for test active\n", val);
+ /* To prevent erroneous activation of the compliance
+ * testing code, only accept an actual value of 1 here
+ */
+ if (val == 1)
+ intel_dp->compliance_test_active = 1;
+ else
+ intel_dp->compliance_test_active = 0;
+ }
+ }
+out:
+ kfree(input_buffer);
+ if (status < 0)
+ return status;
+
+ *offp += len;
+ return len;
+}
+
+static int i915_displayport_test_active_show(struct seq_file *m, void *data)
+{
+ struct drm_device *dev = m->private;
+ struct drm_connector *connector;
+ struct list_head *connector_list = &dev->mode_config.connector_list;
+ struct intel_dp *intel_dp;
+
+ if (!dev)
+ return -ENODEV;
+
+ list_for_each_entry(connector, connector_list, head) {
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ if (connector->status == connector_status_connected &&
+ connector->encoder != NULL) {
+ intel_dp = enc_to_intel_dp(connector->encoder);
+ if (intel_dp->compliance_test_active)
+ seq_puts(m, "1");
+ else
+ seq_puts(m, "0");
+ } else
+ seq_puts(m, "0");
+ }
+
+ return 0;
+}
+
+static int i915_displayport_test_active_open(struct inode *inode,
+ struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+
+ return single_open(file, i915_displayport_test_active_show, dev);
+}
+
+static const struct file_operations i915_displayport_test_active_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_displayport_test_active_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_displayport_test_active_write
+};
+
+static int i915_displayport_test_data_show(struct seq_file *m, void *data)
+{
+ struct drm_device *dev = m->private;
+ struct drm_connector *connector;
+ struct list_head *connector_list = &dev->mode_config.connector_list;
+ struct intel_dp *intel_dp;
+
+ if (!dev)
+ return -ENODEV;
+
+ list_for_each_entry(connector, connector_list, head) {
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ if (connector->status == connector_status_connected &&
+ connector->encoder != NULL) {
+ intel_dp = enc_to_intel_dp(connector->encoder);
+ seq_printf(m, "%lx", intel_dp->compliance_test_data);
+ } else
+ seq_puts(m, "0");
+ }
+
+ return 0;
+}
+static int i915_displayport_test_data_open(struct inode *inode,
+ struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+
+ return single_open(file, i915_displayport_test_data_show, dev);
+}
+
+static const struct file_operations i915_displayport_test_data_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_displayport_test_data_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+static int i915_displayport_test_type_show(struct seq_file *m, void *data)
+{
+ struct drm_device *dev = m->private;
+ struct drm_connector *connector;
+ struct list_head *connector_list = &dev->mode_config.connector_list;
+ struct intel_dp *intel_dp;
+
+ if (!dev)
+ return -ENODEV;
+
+ list_for_each_entry(connector, connector_list, head) {
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ if (connector->status == connector_status_connected &&
+ connector->encoder != NULL) {
+ intel_dp = enc_to_intel_dp(connector->encoder);
+ seq_printf(m, "%02lx", intel_dp->compliance_test_type);
+ } else
+ seq_puts(m, "0");
+ }
+
+ return 0;
+}
+
+static int i915_displayport_test_type_open(struct inode *inode,
+ struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+
+ return single_open(file, i915_displayport_test_type_show, dev);
+}
+
+static const struct file_operations i915_displayport_test_type_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_displayport_test_type_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
{
struct drm_device *dev = m->private;
@@ -4473,12 +4784,116 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
i915_cache_sharing_get, i915_cache_sharing_set,
"%llu\n");
+struct sseu_dev_status {
+ unsigned int slice_total;
+ unsigned int subslice_total;
+ unsigned int subslice_per_slice;
+ unsigned int eu_total;
+ unsigned int eu_per_subslice;
+};
+
+static void cherryview_sseu_device_status(struct drm_device *dev,
+ struct sseu_dev_status *stat)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const int ss_max = 2;
+ int ss;
+ u32 sig1[ss_max], sig2[ss_max];
+
+ sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
+ sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
+ sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
+ sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
+
+ for (ss = 0; ss < ss_max; ss++) {
+ unsigned int eu_cnt;
+
+ if (sig1[ss] & CHV_SS_PG_ENABLE)
+ /* skip disabled subslice */
+ continue;
+
+ stat->slice_total = 1;
+ stat->subslice_per_slice++;
+ eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
+ ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
+ ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
+ ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
+ stat->eu_total += eu_cnt;
+ stat->eu_per_subslice = max(stat->eu_per_subslice, eu_cnt);
+ }
+ stat->subslice_total = stat->subslice_per_slice;
+}
+
+static void gen9_sseu_device_status(struct drm_device *dev,
+ struct sseu_dev_status *stat)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int s_max = 3, ss_max = 4;
+ int s, ss;
+ u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
+
+ /* BXT has a single slice and at most 3 subslices. */
+ if (IS_BROXTON(dev)) {
+ s_max = 1;
+ ss_max = 3;
+ }
+
+ for (s = 0; s < s_max; s++) {
+ s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
+ eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
+ eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
+ }
+
+ eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
+ GEN9_PGCTL_SSA_EU19_ACK |
+ GEN9_PGCTL_SSA_EU210_ACK |
+ GEN9_PGCTL_SSA_EU311_ACK;
+ eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
+ GEN9_PGCTL_SSB_EU19_ACK |
+ GEN9_PGCTL_SSB_EU210_ACK |
+ GEN9_PGCTL_SSB_EU311_ACK;
+
+ for (s = 0; s < s_max; s++) {
+ unsigned int ss_cnt = 0;
+
+ if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
+ /* skip disabled slice */
+ continue;
+
+ stat->slice_total++;
+
+ if (IS_SKYLAKE(dev))
+ ss_cnt = INTEL_INFO(dev)->subslice_per_slice;
+
+ for (ss = 0; ss < ss_max; ss++) {
+ unsigned int eu_cnt;
+
+ if (IS_BROXTON(dev) &&
+ !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
+ /* skip disabled subslice */
+ continue;
+
+ if (IS_BROXTON(dev))
+ ss_cnt++;
+
+ eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
+ eu_mask[ss%2]);
+ stat->eu_total += eu_cnt;
+ stat->eu_per_subslice = max(stat->eu_per_subslice,
+ eu_cnt);
+ }
+
+ stat->subslice_total += ss_cnt;
+ stat->subslice_per_slice = max(stat->subslice_per_slice,
+ ss_cnt);
+ }
+}
+
static int i915_sseu_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned int s_tot = 0, ss_tot = 0, ss_per = 0, eu_tot = 0, eu_per = 0;
+ struct sseu_dev_status stat;
if ((INTEL_INFO(dev)->gen < 8) || IS_BROADWELL(dev))
return -ENODEV;
@@ -4502,79 +4917,22 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
yesno(INTEL_INFO(dev)->has_eu_pg));
seq_puts(m, "SSEU Device Status\n");
+ memset(&stat, 0, sizeof(stat));
if (IS_CHERRYVIEW(dev)) {
- const int ss_max = 2;
- int ss;
- u32 sig1[ss_max], sig2[ss_max];
-
- sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
- sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
- sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
- sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
-
- for (ss = 0; ss < ss_max; ss++) {
- unsigned int eu_cnt;
-
- if (sig1[ss] & CHV_SS_PG_ENABLE)
- /* skip disabled subslice */
- continue;
-
- s_tot = 1;
- ss_per++;
- eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
- ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
- ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
- ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
- eu_tot += eu_cnt;
- eu_per = max(eu_per, eu_cnt);
- }
- ss_tot = ss_per;
- } else if (IS_SKYLAKE(dev)) {
- const int s_max = 3, ss_max = 4;
- int s, ss;
- u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
-
- s_reg[0] = I915_READ(GEN9_SLICE0_PGCTL_ACK);
- s_reg[1] = I915_READ(GEN9_SLICE1_PGCTL_ACK);
- s_reg[2] = I915_READ(GEN9_SLICE2_PGCTL_ACK);
- eu_reg[0] = I915_READ(GEN9_SLICE0_SS01_EU_PGCTL_ACK);
- eu_reg[1] = I915_READ(GEN9_SLICE0_SS23_EU_PGCTL_ACK);
- eu_reg[2] = I915_READ(GEN9_SLICE1_SS01_EU_PGCTL_ACK);
- eu_reg[3] = I915_READ(GEN9_SLICE1_SS23_EU_PGCTL_ACK);
- eu_reg[4] = I915_READ(GEN9_SLICE2_SS01_EU_PGCTL_ACK);
- eu_reg[5] = I915_READ(GEN9_SLICE2_SS23_EU_PGCTL_ACK);
- eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
- GEN9_PGCTL_SSA_EU19_ACK |
- GEN9_PGCTL_SSA_EU210_ACK |
- GEN9_PGCTL_SSA_EU311_ACK;
- eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
- GEN9_PGCTL_SSB_EU19_ACK |
- GEN9_PGCTL_SSB_EU210_ACK |
- GEN9_PGCTL_SSB_EU311_ACK;
-
- for (s = 0; s < s_max; s++) {
- if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
- /* skip disabled slice */
- continue;
-
- s_tot++;
- ss_per = INTEL_INFO(dev)->subslice_per_slice;
- ss_tot += ss_per;
- for (ss = 0; ss < ss_max; ss++) {
- unsigned int eu_cnt;
-
- eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
- eu_mask[ss%2]);
- eu_tot += eu_cnt;
- eu_per = max(eu_per, eu_cnt);
- }
- }
+ cherryview_sseu_device_status(dev, &stat);
+ } else if (INTEL_INFO(dev)->gen >= 9) {
+ gen9_sseu_device_status(dev, &stat);
}
- seq_printf(m, " Enabled Slice Total: %u\n", s_tot);
- seq_printf(m, " Enabled Subslice Total: %u\n", ss_tot);
- seq_printf(m, " Enabled Subslice Per Slice: %u\n", ss_per);
- seq_printf(m, " Enabled EU Total: %u\n", eu_tot);
- seq_printf(m, " Enabled EU Per Subslice: %u\n", eu_per);
+ seq_printf(m, " Enabled Slice Total: %u\n",
+ stat.slice_total);
+ seq_printf(m, " Enabled Subslice Total: %u\n",
+ stat.subslice_total);
+ seq_printf(m, " Enabled Subslice Per Slice: %u\n",
+ stat.subslice_per_slice);
+ seq_printf(m, " Enabled EU Total: %u\n",
+ stat.eu_total);
+ seq_printf(m, " Enabled EU Per Subslice: %u\n",
+ stat.eu_per_subslice);
return 0;
}
@@ -4694,6 +5052,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_ddb_info", i915_ddb_info, 0},
{"i915_sseu_status", i915_sseu_status, 0},
{"i915_drrs_status", i915_drrs_status, 0},
+ {"i915_rps_boost_info", i915_rps_boost_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
@@ -4716,6 +5075,9 @@ static const struct i915_debugfs_files {
{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
{"i915_fbc_false_color", &i915_fbc_fc_fops},
+ {"i915_dp_test_data", &i915_displayport_test_data_fops},
+ {"i915_dp_test_type", &i915_displayport_test_type_fops},
+ {"i915_dp_test_active", &i915_displayport_test_active_fops}
};
void intel_display_crc_init(struct drm_device *dev)
@@ -4783,3 +5145,102 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
drm_debugfs_remove_files(info_list, 1, minor);
}
}
+
+struct dpcd_block {
+ /* DPCD dump start address. */
+ unsigned int offset;
+ /* DPCD dump end address, inclusive. If unset, .size will be used. */
+ unsigned int end;
+ /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
+ size_t size;
+ /* Only valid for eDP. */
+ bool edp;
+};
+
+static const struct dpcd_block i915_dpcd_debug[] = {
+ { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
+ { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
+ { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
+ { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
+ { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
+ { .offset = DP_SET_POWER },
+ { .offset = DP_EDP_DPCD_REV },
+ { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
+ { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
+ { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
+};
+
+static int i915_dpcd_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct intel_dp *intel_dp =
+ enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+ uint8_t buf[16];
+ ssize_t err;
+ int i;
+
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
+ const struct dpcd_block *b = &i915_dpcd_debug[i];
+ size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
+
+ if (b->edp &&
+ connector->connector_type != DRM_MODE_CONNECTOR_eDP)
+ continue;
+
+ /* low tech for now */
+ if (WARN_ON(size > sizeof(buf)))
+ continue;
+
+ err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
+ if (err <= 0) {
+ DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
+ size, b->offset, err);
+ continue;
+ }
+
+ seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
+ }
+
+ return 0;
+}
+
+static int i915_dpcd_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, i915_dpcd_show, inode->i_private);
+}
+
+static const struct file_operations i915_dpcd_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_dpcd_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/**
+ * i915_debugfs_connector_add - add i915 specific connector debugfs files
+ * @connector: pointer to a registered drm_connector
+ *
+ * Cleanup will be done by drm_connector_unregister() through a call to
+ * drm_debugfs_connector_remove().
+ *
+ * Returns 0 on success, negative error codes on error.
+ */
+int i915_debugfs_connector_add(struct drm_connector *connector)
+{
+ struct dentry *root = connector->debugfs_entry;
+
+ /* The connector must have been registered beforehands. */
+ if (!root)
+ return -ENODEV;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ debugfs_create_file("i915_dpcd", S_IRUGO, root, connector,
+ &i915_dpcd_fops);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 68e0c85a17cf..d2df321ba634 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -564,6 +564,140 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
#undef SEP_COMMA
}
+static void cherryview_sseu_info_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_device_info *info;
+ u32 fuse, eu_dis;
+
+ info = (struct intel_device_info *)&dev_priv->info;
+ fuse = I915_READ(CHV_FUSE_GT);
+
+ info->slice_total = 1;
+
+ if (!(fuse & CHV_FGT_DISABLE_SS0)) {
+ info->subslice_per_slice++;
+ eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
+ CHV_FGT_EU_DIS_SS0_R1_MASK);
+ info->eu_total += 8 - hweight32(eu_dis);
+ }
+
+ if (!(fuse & CHV_FGT_DISABLE_SS1)) {
+ info->subslice_per_slice++;
+ eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
+ CHV_FGT_EU_DIS_SS1_R1_MASK);
+ info->eu_total += 8 - hweight32(eu_dis);
+ }
+
+ info->subslice_total = info->subslice_per_slice;
+ /*
+ * CHV expected to always have a uniform distribution of EU
+ * across subslices.
+ */
+ info->eu_per_subslice = info->subslice_total ?
+ info->eu_total / info->subslice_total :
+ 0;
+ /*
+ * CHV supports subslice power gating on devices with more than
+ * one subslice, and supports EU power gating on devices with
+ * more than one EU pair per subslice.
+ */
+ info->has_slice_pg = 0;
+ info->has_subslice_pg = (info->subslice_total > 1);
+ info->has_eu_pg = (info->eu_per_subslice > 2);
+}
+
+static void gen9_sseu_info_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_device_info *info;
+ int s_max = 3, ss_max = 4, eu_max = 8;
+ int s, ss;
+ u32 fuse2, s_enable, ss_disable, eu_disable;
+ u8 eu_mask = 0xff;
+
+ /*
+ * BXT has a single slice. BXT also has at most 6 EU per subslice,
+ * and therefore only the lowest 6 bits of the 8-bit EU disable
+ * fields are valid.
+ */
+ if (IS_BROXTON(dev)) {
+ s_max = 1;
+ eu_max = 6;
+ eu_mask = 0x3f;
+ }
+
+ info = (struct intel_device_info *)&dev_priv->info;
+ fuse2 = I915_READ(GEN8_FUSE2);
+ s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
+ GEN8_F2_S_ENA_SHIFT;
+ ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
+ GEN9_F2_SS_DIS_SHIFT;
+
+ info->slice_total = hweight32(s_enable);
+ /*
+ * The subslice disable field is global, i.e. it applies
+ * to each of the enabled slices.
+ */
+ info->subslice_per_slice = ss_max - hweight32(ss_disable);
+ info->subslice_total = info->slice_total *
+ info->subslice_per_slice;
+
+ /*
+ * Iterate through enabled slices and subslices to
+ * count the total enabled EU.
+ */
+ for (s = 0; s < s_max; s++) {
+ if (!(s_enable & (0x1 << s)))
+ /* skip disabled slice */
+ continue;
+
+ eu_disable = I915_READ(GEN9_EU_DISABLE(s));
+ for (ss = 0; ss < ss_max; ss++) {
+ int eu_per_ss;
+
+ if (ss_disable & (0x1 << ss))
+ /* skip disabled subslice */
+ continue;
+
+ eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
+ eu_mask);
+
+ /*
+ * Record which subslice(s) has(have) 7 EUs. we
+ * can tune the hash used to spread work among
+ * subslices if they are unbalanced.
+ */
+ if (eu_per_ss == 7)
+ info->subslice_7eu[s] |= 1 << ss;
+
+ info->eu_total += eu_per_ss;
+ }
+ }
+
+ /*
+ * SKL is expected to always have a uniform distribution
+ * of EU across subslices with the exception that any one
+ * EU in any one subslice may be fused off for die
+ * recovery. BXT is expected to be perfectly uniform in EU
+ * distribution.
+ */
+ info->eu_per_subslice = info->subslice_total ?
+ DIV_ROUND_UP(info->eu_total,
+ info->subslice_total) : 0;
+ /*
+ * SKL supports slice power gating on devices with more than
+ * one slice, and supports EU power gating on devices with
+ * more than one EU pair per subslice. BXT supports subslice
+ * power gating on devices with more than one subslice, and
+ * supports EU power gating on devices with more than one EU
+ * pair per subslice.
+ */
+ info->has_slice_pg = (IS_SKYLAKE(dev) && (info->slice_total > 1));
+ info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
+ info->has_eu_pg = (info->eu_per_subslice > 2);
+}
+
/*
* Determine various intel_device_info fields at runtime.
*
@@ -585,7 +719,11 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
info = (struct intel_device_info *)&dev_priv->info;
- if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
+ if (IS_BROXTON(dev)) {
+ info->num_sprites[PIPE_A] = 3;
+ info->num_sprites[PIPE_B] = 3;
+ info->num_sprites[PIPE_C] = 2;
+ } else if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 2;
else
@@ -620,116 +758,11 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
}
/* Initialize slice/subslice/EU info */
- if (IS_CHERRYVIEW(dev)) {
- u32 fuse, eu_dis;
-
- fuse = I915_READ(CHV_FUSE_GT);
+ if (IS_CHERRYVIEW(dev))
+ cherryview_sseu_info_init(dev);
+ else if (INTEL_INFO(dev)->gen >= 9)
+ gen9_sseu_info_init(dev);
- info->slice_total = 1;
-
- if (!(fuse & CHV_FGT_DISABLE_SS0)) {
- info->subslice_per_slice++;
- eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
- CHV_FGT_EU_DIS_SS0_R1_MASK);
- info->eu_total += 8 - hweight32(eu_dis);
- }
-
- if (!(fuse & CHV_FGT_DISABLE_SS1)) {
- info->subslice_per_slice++;
- eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
- CHV_FGT_EU_DIS_SS1_R1_MASK);
- info->eu_total += 8 - hweight32(eu_dis);
- }
-
- info->subslice_total = info->subslice_per_slice;
- /*
- * CHV expected to always have a uniform distribution of EU
- * across subslices.
- */
- info->eu_per_subslice = info->subslice_total ?
- info->eu_total / info->subslice_total :
- 0;
- /*
- * CHV supports subslice power gating on devices with more than
- * one subslice, and supports EU power gating on devices with
- * more than one EU pair per subslice.
- */
- info->has_slice_pg = 0;
- info->has_subslice_pg = (info->subslice_total > 1);
- info->has_eu_pg = (info->eu_per_subslice > 2);
- } else if (IS_SKYLAKE(dev)) {
- const int s_max = 3, ss_max = 4, eu_max = 8;
- int s, ss;
- u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
-
- fuse2 = I915_READ(GEN8_FUSE2);
- s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
- GEN8_F2_S_ENA_SHIFT;
- ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
- GEN9_F2_SS_DIS_SHIFT;
-
- eu_disable[0] = I915_READ(GEN8_EU_DISABLE0);
- eu_disable[1] = I915_READ(GEN8_EU_DISABLE1);
- eu_disable[2] = I915_READ(GEN8_EU_DISABLE2);
-
- info->slice_total = hweight32(s_enable);
- /*
- * The subslice disable field is global, i.e. it applies
- * to each of the enabled slices.
- */
- info->subslice_per_slice = ss_max - hweight32(ss_disable);
- info->subslice_total = info->slice_total *
- info->subslice_per_slice;
-
- /*
- * Iterate through enabled slices and subslices to
- * count the total enabled EU.
- */
- for (s = 0; s < s_max; s++) {
- if (!(s_enable & (0x1 << s)))
- /* skip disabled slice */
- continue;
-
- for (ss = 0; ss < ss_max; ss++) {
- u32 n_disabled;
-
- if (ss_disable & (0x1 << ss))
- /* skip disabled subslice */
- continue;
-
- n_disabled = hweight8(eu_disable[s] >>
- (ss * eu_max));
-
- /*
- * Record which subslice(s) has(have) 7 EUs. we
- * can tune the hash used to spread work among
- * subslices if they are unbalanced.
- */
- if (eu_max - n_disabled == 7)
- info->subslice_7eu[s] |= 1 << ss;
-
- info->eu_total += eu_max - n_disabled;
- }
- }
-
- /*
- * SKL is expected to always have a uniform distribution
- * of EU across subslices with the exception that any one
- * EU in any one subslice may be fused off for die
- * recovery.
- */
- info->eu_per_subslice = info->subslice_total ?
- DIV_ROUND_UP(info->eu_total,
- info->subslice_total) : 0;
- /*
- * SKL supports slice power gating on devices with more than
- * one slice, and supports EU power gating on devices with
- * more than one EU pair per subslice.
- */
- info->has_slice_pg = (info->slice_total > 1) ? 1 : 0;
- info->has_subslice_pg = 0;
- info->has_eu_pg = (info->eu_per_subslice > 2) ? 1 : 0;
- }
DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
@@ -781,8 +814,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
spin_lock_init(&dev_priv->mmio_flip_lock);
- mutex_init(&dev_priv->dpio_lock);
+ mutex_init(&dev_priv->sb_lock);
mutex_init(&dev_priv->modeset_restore_lock);
+ mutex_init(&dev_priv->csr_lock);
intel_pm_setup(dev);
@@ -828,9 +862,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_uncore_init(dev);
+ /* Load CSR Firmware for SKL */
+ intel_csr_ucode_init(dev);
+
ret = i915_gem_gtt_init(dev);
if (ret)
- goto out_regs;
+ goto out_freecsr;
/* WARNING: Apparently we must kick fbdev drivers before vgacon,
* otherwise the vga fbdev driver falls over. */
@@ -1000,14 +1037,19 @@ out_mtrrfree:
io_mapping_free(dev_priv->gtt.mappable);
out_gtt:
i915_global_gtt_cleanup(dev);
-out_regs:
+out_freecsr:
+ intel_csr_ucode_fini(dev);
intel_uncore_fini(dev);
pci_iounmap(dev->pdev, dev_priv->regs);
put_bridge:
pci_dev_put(dev_priv->bridge_dev);
free_priv:
- if (dev_priv->slab)
- kmem_cache_destroy(dev_priv->slab);
+ if (dev_priv->requests)
+ kmem_cache_destroy(dev_priv->requests);
+ if (dev_priv->vmas)
+ kmem_cache_destroy(dev_priv->vmas);
+ if (dev_priv->objects)
+ kmem_cache_destroy(dev_priv->objects);
kfree(dev_priv);
return ret;
}
@@ -1072,11 +1114,12 @@ int i915_driver_unload(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
- i915_gem_batch_pool_fini(&dev_priv->mm.batch_pool);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_stolen(dev);
+ intel_csr_ucode_fini(dev);
+
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
@@ -1091,8 +1134,12 @@ int i915_driver_unload(struct drm_device *dev)
if (dev_priv->regs != NULL)
pci_iounmap(dev->pdev, dev_priv->regs);
- if (dev_priv->slab)
- kmem_cache_destroy(dev_priv->slab);
+ if (dev_priv->requests)
+ kmem_cache_destroy(dev_priv->requests);
+ if (dev_priv->vmas)
+ kmem_cache_destroy(dev_priv->vmas);
+ if (dev_priv->objects)
+ kmem_cache_destroy(dev_priv->objects);
pci_dev_put(dev_priv->bridge_dev);
kfree(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index a19d2c71e205..884b4f9b81c4 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -381,6 +381,18 @@ static const struct intel_device_info intel_skylake_gt3_info = {
IVB_CURSOR_OFFSETS,
};
+static const struct intel_device_info intel_broxton_info = {
+ .is_preliminary = 1,
+ .gen = 9,
+ .need_gfx_hws = 1, .has_hotplug = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+ .num_pipes = 3,
+ .has_ddi = 1,
+ .has_fbc = 1,
+ GEN_DEFAULT_PIPEOFFSETS,
+ IVB_CURSOR_OFFSETS,
+};
+
/*
* Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem
@@ -420,7 +432,8 @@ static const struct intel_device_info intel_skylake_gt3_info = {
INTEL_CHV_IDS(&intel_cherryview_info), \
INTEL_SKL_GT1_IDS(&intel_skylake_info), \
INTEL_SKL_GT2_IDS(&intel_skylake_info), \
- INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info) \
+ INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), \
+ INTEL_BXT_IDS(&intel_broxton_info)
static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_PCI_IDS,
@@ -543,6 +556,26 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work);
}
+void i915_firmware_load_error_print(const char *fw_path, int err)
+{
+ DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
+
+ /*
+ * If the reason is not known assume -ENOENT since that's the most
+ * usual failure mode.
+ */
+ if (!err)
+ err = -ENOENT;
+
+ if (!(IS_BUILTIN(CONFIG_DRM_I915) && err == -ENOENT))
+ return;
+
+ DRM_ERROR(
+ "The driver is built-in, so to load the firmware you need to\n"
+ "include it either in the kernel (see CONFIG_EXTRA_FIRMWARE) or\n"
+ "in your initrd/initramfs image.\n");
+}
+
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
@@ -561,6 +594,9 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume);
+static int skl_resume_prepare(struct drm_i915_private *dev_priv);
+static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
+
static int i915_drm_suspend(struct drm_device *dev)
{
@@ -776,11 +812,16 @@ static int i915_drm_resume_early(struct drm_device *dev)
if (IS_VALLEYVIEW(dev_priv))
ret = vlv_resume_prepare(dev_priv, false);
if (ret)
- DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
+ DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
+ ret);
intel_uncore_early_sanitize(dev, true);
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (IS_BROXTON(dev))
+ ret = bxt_resume_prepare(dev_priv);
+ else if (IS_SKYLAKE(dev_priv))
+ ret = skl_resume_prepare(dev_priv);
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_disable_pc8(dev_priv);
intel_uncore_sanitize(dev);
@@ -952,7 +993,7 @@ static int i915_pm_suspend_late(struct device *dev)
struct drm_device *drm_dev = dev_to_i915(dev)->dev;
/*
- * We have a suspedn ordering issue with the snd-hda driver also
+ * We have a suspend ordering issue with the snd-hda driver also
* requiring our device to be power up. Due to the lack of a
* parent/child relationship we currently solve this with an late
* suspend hook.
@@ -996,6 +1037,21 @@ static int i915_pm_resume(struct device *dev)
return i915_drm_resume(drm_dev);
}
+static int skl_suspend_complete(struct drm_i915_private *dev_priv)
+{
+ /* Enabling DC6 is not a hard requirement to enter runtime D3 */
+
+ /*
+ * This is to ensure that CSR isn't identified as loaded before
+ * CSR-loading program is called during runtime-resume.
+ */
+ intel_csr_load_status_set(dev_priv, FW_UNINITIALIZED);
+
+ skl_uninit_cdclk(dev_priv);
+
+ return 0;
+}
+
static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
{
hsw_enable_pc8(dev_priv);
@@ -1003,6 +1059,48 @@ static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
return 0;
}
+static int bxt_suspend_complete(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ /* TODO: when DC5 support is added disable DC5 here. */
+
+ broxton_ddi_phy_uninit(dev);
+ broxton_uninit_cdclk(dev);
+ bxt_enable_dc9(dev_priv);
+
+ return 0;
+}
+
+static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ /* TODO: when CSR FW support is added make sure the FW is loaded */
+
+ bxt_disable_dc9(dev_priv);
+
+ /*
+ * TODO: when DC5 support is added enable DC5 here if the CSR FW
+ * is available.
+ */
+ broxton_init_cdclk(dev);
+ broxton_ddi_phy_init(dev);
+ intel_prepare_ddi(dev);
+
+ return 0;
+}
+
+static int skl_resume_prepare(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ skl_init_cdclk(dev_priv);
+ intel_csr_load_program(dev);
+
+ return 0;
+}
+
/*
* Save all Gunit registers that may be lost after a D3 and a subsequent
* S0i[R123] transition. The list of registers needing a save/restore is
@@ -1461,6 +1559,11 @@ static int intel_runtime_resume(struct device *device)
if (IS_GEN6(dev_priv))
intel_init_pch_refclk(dev);
+
+ if (IS_BROXTON(dev))
+ ret = bxt_resume_prepare(dev_priv);
+ else if (IS_SKYLAKE(dev))
+ ret = skl_resume_prepare(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_disable_pc8(dev_priv);
else if (IS_VALLEYVIEW(dev_priv))
@@ -1490,12 +1593,15 @@ static int intel_runtime_resume(struct device *device)
*/
static int intel_suspend_complete(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
int ret;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ if (IS_BROXTON(dev_priv))
+ ret = bxt_suspend_complete(dev_priv);
+ else if (IS_SKYLAKE(dev_priv))
+ ret = skl_suspend_complete(dev_priv);
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
ret = hsw_suspend_complete(dev_priv);
- else if (IS_VALLEYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv))
ret = vlv_suspend_complete(dev_priv);
else
ret = 0;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8ae6f7f06b3a..542fac628b28 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -56,7 +56,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20150327"
+#define DRIVER_DATE "20150522"
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -130,7 +130,7 @@ enum transcoder {
*
* This value doesn't count the cursor plane.
*/
-#define I915_MAX_PLANES 3
+#define I915_MAX_PLANES 4
enum plane {
PLANE_A = 0,
@@ -238,6 +238,11 @@ enum hpd_pin {
#define for_each_crtc(dev, crtc) \
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+#define for_each_intel_plane(dev, intel_plane) \
+ list_for_each_entry(intel_plane, \
+ &dev->mode_config.plane_list, \
+ base.head)
+
#define for_each_intel_crtc(dev, intel_crtc) \
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
@@ -251,7 +256,6 @@ enum hpd_pin {
&dev->mode_config.connector_list, \
base.head)
-
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
if ((intel_encoder)->base.crtc == (__crtc))
@@ -268,6 +272,30 @@ struct drm_i915_private;
struct i915_mm_struct;
struct i915_mmu_object;
+struct drm_i915_file_private {
+ struct drm_i915_private *dev_priv;
+ struct drm_file *file;
+
+ struct {
+ spinlock_t lock;
+ struct list_head request_list;
+/* 20ms is a fairly arbitrary limit (greater than the average frame time)
+ * chosen to prevent the CPU getting more than a frame ahead of the GPU
+ * (when using lax throttling for the frontbuffer). We also use it to
+ * offer free GPU waitboosts for severely congested workloads.
+ */
+#define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20)
+ } mm;
+ struct idr context_idr;
+
+ struct intel_rps_client {
+ struct list_head link;
+ unsigned boosts;
+ } rps;
+
+ struct intel_engine_cs *bsd_ring;
+};
+
enum intel_dpll_id {
DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
/* real shared dpll ids must be >= 0 */
@@ -296,13 +324,16 @@ struct intel_dpll_hw_state {
/* skl */
/*
* DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
- * lower part of crtl1 and they get shifted into position when writing
+ * lower part of ctrl1 and they get shifted into position when writing
* the register. This allows us to easily compare the state to share
* the DPLL.
*/
uint32_t ctrl1;
/* HDMI only, 0 when used for DP */
uint32_t cfgcr1, cfgcr2;
+
+ /* bxt */
+ uint32_t ebb0, pll0, pll1, pll2, pll3, pll6, pll8, pll10, pcsdw12;
};
struct intel_shared_dpll_config {
@@ -455,6 +486,7 @@ struct drm_i915_error_state {
u32 semaphore_seqno[I915_NUM_RINGS - 1];
/* Register state */
+ u32 start;
u32 tail;
u32 head;
u32 ctl;
@@ -500,7 +532,7 @@ struct drm_i915_error_state {
struct drm_i915_error_buffer {
u32 size;
u32 name;
- u32 rseqno, wseqno;
+ u32 rseqno[I915_NUM_RINGS], wseqno;
u32 gtt_offset;
u32 read_domains;
u32 write_domain;
@@ -666,6 +698,22 @@ struct intel_uncore {
#define for_each_fw_domain(domain__, dev_priv__, i__) \
for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__)
+enum csr_state {
+ FW_UNINITIALIZED = 0,
+ FW_LOADED,
+ FW_FAILED
+};
+
+struct intel_csr {
+ const char *fw_path;
+ __be32 *dmc_payload;
+ uint32_t dmc_fw_size;
+ uint32_t mmio_count;
+ uint32_t mmioaddr[8];
+ uint32_t mmiodata[8];
+ enum csr_state state;
+};
+
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
func(is_mobile) sep \
func(is_i85x) sep \
@@ -766,7 +814,7 @@ struct i915_ctx_hang_stats {
* context).
* @hang_stats: information about the role of this context in possible GPU
* hangs.
- * @vm: virtual memory space used by this context.
+ * @ppgtt: virtual memory space used by this context.
* @legacy_hw_ctx: render context backing object and whether it is correctly
* initialized (legacy ring submission mechanism only).
* @link: link in the global list of contexts.
@@ -880,7 +928,8 @@ struct i915_psr {
bool active;
struct delayed_work work;
unsigned busy_frontbuffer_bits;
- bool link_standby;
+ bool psr2_support;
+ bool aux_frame_sync;
};
enum intel_pch {
@@ -1034,18 +1083,30 @@ struct intel_gen6_power_mgmt {
u8 rp0_freq; /* Non-overclocked max frequency. */
u32 cz_freq;
+ u8 up_threshold; /* Current %busy required to uplock */
+ u8 down_threshold; /* Current %busy required to downclock */
+
int last_adj;
enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
+ spinlock_t client_lock;
+ struct list_head clients;
+ bool client_boost;
+
bool enabled;
struct delayed_work delayed_resume_work;
+ unsigned boosts;
+
+ struct intel_rps_client semaphores, mmioflips;
/* manual wa residency calculations */
struct intel_rps_ei up_ei, down_ei;
/*
* Protects RPS/RC6 register access and PCU communication.
- * Must be taken after struct_mutex if nested.
+ * Must be taken after struct_mutex if nested. Note that
+ * this lock may be held for long periods of time when
+ * talking to hw - so only take it when talking to hw!
*/
struct mutex hw_lock;
};
@@ -1136,11 +1197,6 @@ struct intel_l3_parity {
int which_slice;
};
-struct i915_gem_batch_pool {
- struct drm_device *dev;
- struct list_head cache_list;
-};
-
struct i915_gem_mm {
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
@@ -1154,13 +1210,6 @@ struct i915_gem_mm {
*/
struct list_head unbound_list;
- /*
- * A pool of objects to use as shadow copies of client batch buffers
- * when the command parser is enabled. Prevents the client from
- * modifying the batch contents after software parsing.
- */
- struct i915_gem_batch_pool batch_pool;
-
/** Usable portion of the GTT for GEM */
unsigned long stolen_base; /* limited to low memory (32-bit) */
@@ -1351,7 +1400,6 @@ struct intel_vbt_data {
bool edp_initialized;
bool edp_support;
int edp_bpp;
- bool edp_low_vswing;
struct edp_power_seq edp_pps;
struct {
@@ -1451,7 +1499,8 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
struct skl_ddb_allocation {
struct skl_ddb_entry pipe[I915_MAX_PIPES];
- struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES];
+ struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */
+ struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* y-plane */
struct skl_ddb_entry cursor[I915_MAX_PIPES];
};
@@ -1563,7 +1612,9 @@ struct i915_virtual_gpu {
struct drm_i915_private {
struct drm_device *dev;
- struct kmem_cache *slab;
+ struct kmem_cache *objects;
+ struct kmem_cache *vmas;
+ struct kmem_cache *requests;
const struct intel_device_info info;
@@ -1575,8 +1626,12 @@ struct drm_i915_private {
struct i915_virtual_gpu vgpu;
- struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
+ struct intel_csr csr;
+
+ /* Display CSR-related protection */
+ struct mutex csr_lock;
+ struct intel_gmbus gmbus[GMBUS_NUM_PINS];
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
* controller on different i2c buses. */
@@ -1611,8 +1666,8 @@ struct drm_i915_private {
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
struct pm_qos_request pm_qos;
- /* DPIO indirect register protection */
- struct mutex dpio_lock;
+ /* Sideband mailbox protection */
+ struct mutex sb_lock;
/** Cached value of IMR to avoid reads in updating the bitfield */
union {
@@ -1661,7 +1716,8 @@ struct drm_i915_private {
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
unsigned int fsb_freq, mem_freq, is_ddr3;
- unsigned int vlv_cdclk_freq;
+ unsigned int skl_boot_cdclk;
+ unsigned int cdclk_freq;
unsigned int hpll_freq;
/**
@@ -1759,6 +1815,8 @@ struct drm_i915_private {
u32 fdi_rx_config;
+ u32 chv_phy_control;
+
u32 suspend_count;
struct i915_suspend_saved_registers regfile;
struct vlv_s0ix_state vlv_s0ix_state;
@@ -1815,19 +1873,19 @@ struct drm_i915_private {
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct {
- int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
- struct intel_engine_cs *ring,
- struct intel_context *ctx,
- struct drm_i915_gem_execbuffer2 *args,
- struct list_head *vmas,
- struct drm_i915_gem_object *batch_obj,
- u64 exec_start, u32 flags);
+ int (*execbuf_submit)(struct drm_device *dev, struct drm_file *file,
+ struct intel_engine_cs *ring,
+ struct intel_context *ctx,
+ struct drm_i915_gem_execbuffer2 *args,
+ struct list_head *vmas,
+ struct drm_i915_gem_object *batch_obj,
+ u64 exec_start, u32 flags);
int (*init_rings)(struct drm_device *dev);
void (*cleanup_ring)(struct intel_engine_cs *ring);
void (*stop_ring)(struct intel_engine_cs *ring);
} gt;
- uint32_t request_uniq;
+ bool edp_low_vswing;
/*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
@@ -1913,18 +1971,18 @@ struct drm_i915_gem_object {
struct drm_mm_node *stolen;
struct list_head global_list;
- struct list_head ring_list;
+ struct list_head ring_list[I915_NUM_RINGS];
/** Used in execbuf to temporarily hold a ref */
struct list_head obj_exec_link;
- struct list_head batch_pool_list;
+ struct list_head batch_pool_link;
/**
* This is set if the object is on the active lists (has pending
* rendering and so a non-zero seqno), and is not set if it i s on
* inactive (ready to be unbound) list.
*/
- unsigned int active:1;
+ unsigned int active:I915_NUM_RINGS;
/**
* This is set if the object has been written to since last bound
@@ -1969,8 +2027,6 @@ struct drm_i915_gem_object {
* accurate mappable working set.
*/
unsigned int fault_mappable:1;
- unsigned int pin_mappable:1;
- unsigned int pin_display:1;
/*
* Is the object to be mapped as read-only to the GPU
@@ -1984,15 +2040,30 @@ struct drm_i915_gem_object {
unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
+ unsigned int pin_display;
+
struct sg_table *pages;
int pages_pin_count;
+ struct get_page {
+ struct scatterlist *sg;
+ int last;
+ } get_page;
/* prime dma-buf support */
void *dma_buf_vmapping;
int vmapping_count;
- /** Breadcrumb of last rendering to the buffer. */
- struct drm_i915_gem_request *last_read_req;
+ /** Breadcrumb of last rendering to the buffer.
+ * There can only be one writer, but we allow for multiple readers.
+ * If there is a writer that necessarily implies that all other
+ * read requests are complete - but we may only be lazily clearing
+ * the read requests. A read request is naturally the most recent
+ * request on a ring, so we may have two different write and read
+ * requests on one ring where the write request is older than the
+ * read request. This allows for the CPU to read from an active
+ * buffer by only waiting for the write to complete.
+ * */
+ struct drm_i915_gem_request *last_read_req[I915_NUM_RINGS];
struct drm_i915_gem_request *last_write_req;
/** Breadcrumb of last fenced GPU access to the buffer. */
struct drm_i915_gem_request *last_fenced_req;
@@ -2046,6 +2117,7 @@ struct drm_i915_gem_request {
struct kref ref;
/** On Which ring this request was generated */
+ struct drm_i915_private *i915;
struct intel_engine_cs *ring;
/** GEM sequence number associated with this request. */
@@ -2093,8 +2165,6 @@ struct drm_i915_gem_request {
/** process identifier submitting this request */
struct pid *pid;
- uint32_t uniq;
-
/**
* The ELSP only accepts two elements at a time, so we queue
* context/tail pairs on a given queue (ring->execlist_queue) until the
@@ -2116,6 +2186,8 @@ struct drm_i915_gem_request {
};
+int i915_gem_request_alloc(struct intel_engine_cs *ring,
+ struct intel_context *ctx);
void i915_gem_request_free(struct kref *req_ref);
static inline uint32_t
@@ -2130,10 +2202,12 @@ i915_gem_request_get_ring(struct drm_i915_gem_request *req)
return req ? req->ring : NULL;
}
-static inline void
+static inline struct drm_i915_gem_request *
i915_gem_request_reference(struct drm_i915_gem_request *req)
{
- kref_get(&req->ref);
+ if (req)
+ kref_get(&req->ref);
+ return req;
}
static inline void
@@ -2143,6 +2217,19 @@ i915_gem_request_unreference(struct drm_i915_gem_request *req)
kref_put(&req->ref, i915_gem_request_free);
}
+static inline void
+i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
+{
+ struct drm_device *dev;
+
+ if (!req)
+ return;
+
+ dev = req->ring->dev;
+ if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex))
+ mutex_unlock(&dev->struct_mutex);
+}
+
static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
struct drm_i915_gem_request *src)
{
@@ -2161,21 +2248,6 @@ static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
* a later patch when the call to i915_seqno_passed() is obsoleted...
*/
-struct drm_i915_file_private {
- struct drm_i915_private *dev_priv;
- struct drm_file *file;
-
- struct {
- spinlock_t lock;
- struct list_head request_list;
- struct delayed_work idle_work;
- } mm;
- struct idr context_idr;
-
- atomic_t rps_wait_boost;
- struct intel_engine_cs *bsd_ring;
-};
-
/*
* A command that requires special handling by the command parser.
*/
@@ -2228,10 +2300,15 @@ struct drm_i915_cmd_descriptor {
* Describes where to find a register address in the command to check
* against the ring's register whitelist. Only valid if flags has the
* CMD_DESC_REGISTER bit set.
+ *
+ * A non-zero step value implies that the command may access multiple
+ * registers in sequence (e.g. LRI), in that case step gives the
+ * distance in dwords between individual offset fields.
*/
struct {
u32 offset;
u32 mask;
+ u32 step;
} reg;
#define MAX_CMD_DESC_BITMASKS 3
@@ -2307,6 +2384,7 @@ struct drm_i915_cmd_table {
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
+#define IS_BROXTON(dev) (!INTEL_INFO(dev)->is_skylake && IS_GEN9(dev))
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
(INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
@@ -2330,6 +2408,11 @@ struct drm_i915_cmd_table {
#define SKL_REVID_C0 (0x2)
#define SKL_REVID_D0 (0x3)
#define SKL_REVID_E0 (0x4)
+#define SKL_REVID_F0 (0x5)
+
+#define BXT_REVID_A0 (0x0)
+#define BXT_REVID_B0 (0x3)
+#define BXT_REVID_C0 (0x6)
/*
* The genX designation typically refers to the render engine, so render
@@ -2396,16 +2479,22 @@ struct drm_i915_cmd_table {
#define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev))
+#define HAS_DP_MST(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
+ INTEL_INFO(dev)->gen >= 9)
+
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \
IS_SKYLAKE(dev))
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
- IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
+ IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \
+ IS_SKYLAKE(dev))
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
+#define HAS_CSR(dev) (IS_SKYLAKE(dev))
+
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
@@ -2471,6 +2560,7 @@ struct i915_params {
int mmio_debug;
bool verbose_state_checks;
bool nuclear_pageflip;
+ int edp_vswing;
};
extern struct i915_params i915 __read_mostly;
@@ -2496,6 +2586,7 @@ extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
+void i915_firmware_load_error_print(const char *fw_path, int err);
/* i915_irq.c */
void i915_queue_hangcheck(struct drm_device *dev);
@@ -2520,6 +2611,13 @@ void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
+/* Like above but the caller must manage the uncore.lock itself.
+ * Must be used with I915_READ_FW and friends.
+ */
+void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
+ enum forcewake_domains domains);
+void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
+ enum forcewake_domains domains);
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
static inline bool intel_vgpu_active(struct drm_device *dev)
{
@@ -2614,10 +2712,13 @@ void i915_init_vm(struct drm_i915_private *dev_priv,
void i915_gem_free_object(struct drm_gem_object *obj);
void i915_gem_vma_destroy(struct i915_vma *vma);
-#define PIN_MAPPABLE 0x1
-#define PIN_NONBLOCK 0x2
-#define PIN_GLOBAL 0x4
-#define PIN_OFFSET_BIAS 0x8
+/* Flags used by pin/bind&friends. */
+#define PIN_MAPPABLE (1<<0)
+#define PIN_NONBLOCK (1<<1)
+#define PIN_GLOBAL (1<<2)
+#define PIN_OFFSET_BIAS (1<<3)
+#define PIN_USER (1<<4)
+#define PIN_UPDATE (1<<5)
#define PIN_OFFSET_MASK (~4095)
int __must_check
i915_gem_object_pin(struct drm_i915_gem_object *obj,
@@ -2641,15 +2742,32 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
int *needs_clflush);
int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
-static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
+
+static inline int __sg_page_count(struct scatterlist *sg)
{
- struct sg_page_iter sg_iter;
+ return sg->length >> PAGE_SHIFT;
+}
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
- return sg_page_iter_page(&sg_iter);
+static inline struct page *
+i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
+{
+ if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT))
+ return NULL;
+
+ if (n < obj->get_page.last) {
+ obj->get_page.sg = obj->pages->sgl;
+ obj->get_page.last = 0;
+ }
- return NULL;
+ while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
+ obj->get_page.last += __sg_page_count(obj->get_page.sg++);
+ if (unlikely(sg_is_chain(obj->get_page.sg)))
+ obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
+ }
+
+ return nth_page(sg_page(obj->get_page.sg), n - obj->get_page.last);
}
+
static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
BUG_ON(obj->pages == NULL);
@@ -2739,7 +2857,6 @@ static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
void i915_gem_reset(struct drm_device *dev);
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
-int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init(struct drm_device *dev);
int i915_gem_init_rings(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
@@ -2757,10 +2874,13 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter,
bool interruptible,
s64 *timeout,
- struct drm_i915_file_private *file_priv);
+ struct intel_rps_client *rps);
int __must_check i915_wait_request(struct drm_i915_gem_request *req);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+ bool readonly);
+int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
bool write);
int __must_check
@@ -2993,8 +3113,10 @@ int i915_verify_lists(struct drm_device *dev);
int i915_debugfs_init(struct drm_minor *minor);
void i915_debugfs_cleanup(struct drm_minor *minor);
#ifdef CONFIG_DEBUG_FS
+int i915_debugfs_connector_add(struct drm_connector *connector);
void intel_display_crc_init(struct drm_device *dev);
#else
+static inline int i915_debugfs_connector_add(struct drm_connector *connector) {}
static inline void intel_display_crc_init(struct drm_device *dev) {}
#endif
@@ -3021,13 +3143,6 @@ void i915_destroy_error_state(struct drm_device *dev);
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
-/* i915_gem_batch_pool.c */
-void i915_gem_batch_pool_init(struct drm_device *dev,
- struct i915_gem_batch_pool *pool);
-void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
-struct drm_i915_gem_object*
-i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size);
-
/* i915_cmd_parser.c */
int i915_cmd_parser_get_version(void);
int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
@@ -3051,13 +3166,11 @@ void i915_teardown_sysfs(struct drm_device *dev_priv);
/* intel_i2c.c */
extern int intel_setup_gmbus(struct drm_device *dev);
extern void intel_teardown_gmbus(struct drm_device *dev);
-static inline bool intel_gmbus_is_port_valid(unsigned port)
-{
- return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
-}
+extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
+ unsigned int pin);
-extern struct i2c_adapter *intel_gmbus_get_adapter(
- struct drm_i915_private *dev_priv, unsigned port);
+extern struct i2c_adapter *
+intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin);
extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
@@ -3203,6 +3316,17 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
+/* These are untraced mmio-accessors that are only valid to be used inside
+ * criticial sections inside IRQ handlers where forcewake is explicitly
+ * controlled.
+ * Think twice, and think again, before using these.
+ * Note: Should only be used between intel_uncore_forcewake_irqlock() and
+ * intel_uncore_forcewake_irqunlock().
+ */
+#define I915_READ_FW(reg__) readl(dev_priv->regs + (reg__))
+#define I915_WRITE_FW(reg__, val__) writel(val__, dev_priv->regs + (reg__))
+#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
+
/* "Broadcast RGB" property */
#define INTEL_BROADCAST_RGB_AUTO 0
#define INTEL_BROADCAST_RGB_FULL 1
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2d0995e7afc3..248fd1ac7b3a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -38,14 +38,14 @@
#include <linux/pci.h>
#include <linux/dma-buf.h>
+#define RQ_BUG_ON(expr)
+
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
-static __must_check int
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
- bool readonly);
static void
-i915_gem_object_retire(struct drm_i915_gem_object *obj);
-
+i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
+static void
+i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
static void i915_gem_write_fence(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj);
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
@@ -378,13 +378,13 @@ out:
void *i915_gem_object_alloc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
+ return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
}
void i915_gem_object_free(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- kmem_cache_free(dev_priv->slab, obj);
+ kmem_cache_free(dev_priv->objects, obj);
}
static int
@@ -518,8 +518,6 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
ret = i915_gem_object_wait_rendering(obj, true);
if (ret)
return ret;
-
- i915_gem_object_retire(obj);
}
ret = i915_gem_object_get_pages(obj);
@@ -939,8 +937,6 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
ret = i915_gem_object_wait_rendering(obj, false);
if (ret)
return ret;
-
- i915_gem_object_retire(obj);
}
/* Same trick applies to invalidate partially written cachelines read
* before writing. */
@@ -1181,12 +1177,27 @@ static bool missed_irq(struct drm_i915_private *dev_priv,
return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
}
-static bool can_wait_boost(struct drm_i915_file_private *file_priv)
+static int __i915_spin_request(struct drm_i915_gem_request *req)
{
- if (file_priv == NULL)
- return true;
+ unsigned long timeout;
- return !atomic_xchg(&file_priv->rps_wait_boost, true);
+ if (i915_gem_request_get_ring(req)->irq_refcount)
+ return -EBUSY;
+
+ timeout = jiffies + 1;
+ while (!need_resched()) {
+ if (i915_gem_request_completed(req, true))
+ return 0;
+
+ if (time_after_eq(jiffies, timeout))
+ break;
+
+ cpu_relax_lowlatency();
+ }
+ if (i915_gem_request_completed(req, false))
+ return 0;
+
+ return -EAGAIN;
}
/**
@@ -1210,7 +1221,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter,
bool interruptible,
s64 *timeout,
- struct drm_i915_file_private *file_priv)
+ struct intel_rps_client *rps)
{
struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
struct drm_device *dev = ring->dev;
@@ -1224,26 +1235,32 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
+ if (list_empty(&req->list))
+ return 0;
+
if (i915_gem_request_completed(req, true))
return 0;
timeout_expire = timeout ?
jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0;
- if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
- gen6_rps_boost(dev_priv);
- if (file_priv)
- mod_delayed_work(dev_priv->wq,
- &file_priv->mm.idle_work,
- msecs_to_jiffies(100));
- }
-
- if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
- return -ENODEV;
+ if (INTEL_INFO(dev_priv)->gen >= 6)
+ gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
/* Record current time in case interrupted by signal, or wedged */
trace_i915_gem_request_wait_begin(req);
before = ktime_get_raw_ns();
+
+ /* Optimistic spin for the next jiffie before touching IRQs */
+ ret = __i915_spin_request(req);
+ if (ret == 0)
+ goto out;
+
+ if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring))) {
+ ret = -ENODEV;
+ goto out;
+ }
+
for (;;) {
struct timer_list timer;
@@ -1292,14 +1309,15 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
destroy_timer_on_stack(&timer);
}
}
- now = ktime_get_raw_ns();
- trace_i915_gem_request_wait_end(req);
-
if (!irq_test_in_progress)
ring->irq_put(ring);
finish_wait(&ring->irq_queue, &wait);
+out:
+ now = ktime_get_raw_ns();
+ trace_i915_gem_request_wait_end(req);
+
if (timeout) {
s64 tres = *timeout - (now - before);
@@ -1319,6 +1337,63 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
return ret;
}
+static inline void
+i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
+{
+ struct drm_i915_file_private *file_priv = request->file_priv;
+
+ if (!file_priv)
+ return;
+
+ spin_lock(&file_priv->mm.lock);
+ list_del(&request->client_list);
+ request->file_priv = NULL;
+ spin_unlock(&file_priv->mm.lock);
+}
+
+static void i915_gem_request_retire(struct drm_i915_gem_request *request)
+{
+ trace_i915_gem_request_retire(request);
+
+ /* We know the GPU must have read the request to have
+ * sent us the seqno + interrupt, so use the position
+ * of tail of the request to update the last known position
+ * of the GPU head.
+ *
+ * Note this requires that we are always called in request
+ * completion order.
+ */
+ request->ringbuf->last_retired_head = request->postfix;
+
+ list_del_init(&request->list);
+ i915_gem_request_remove_from_client(request);
+
+ put_pid(request->pid);
+
+ i915_gem_request_unreference(request);
+}
+
+static void
+__i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
+{
+ struct intel_engine_cs *engine = req->ring;
+ struct drm_i915_gem_request *tmp;
+
+ lockdep_assert_held(&engine->dev->struct_mutex);
+
+ if (list_empty(&req->list))
+ return;
+
+ do {
+ tmp = list_first_entry(&engine->request_list,
+ typeof(*tmp), list);
+
+ i915_gem_request_retire(tmp);
+ } while (tmp != req);
+
+ WARN_ON(i915_verify_lists(engine->dev));
+}
+
/**
* Waits for a request to be signaled, and cleans up the
* request and object lists appropriately for that event.
@@ -1329,7 +1404,6 @@ i915_wait_request(struct drm_i915_gem_request *req)
struct drm_device *dev;
struct drm_i915_private *dev_priv;
bool interruptible;
- unsigned reset_counter;
int ret;
BUG_ON(req == NULL);
@@ -1348,29 +1422,13 @@ i915_wait_request(struct drm_i915_gem_request *req)
if (ret)
return ret;
- reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
- i915_gem_request_reference(req);
- ret = __i915_wait_request(req, reset_counter,
+ ret = __i915_wait_request(req,
+ atomic_read(&dev_priv->gpu_error.reset_counter),
interruptible, NULL, NULL);
- i915_gem_request_unreference(req);
- return ret;
-}
-
-static int
-i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
-{
- if (!obj->active)
- return 0;
-
- /* Manually manage the write flush as we may have not yet
- * retired the buffer.
- *
- * Note that the last_write_req is always the earlier of
- * the two (read/write) requests, so if we haved successfully waited,
- * we know we have passed the last write.
- */
- i915_gem_request_assign(&obj->last_write_req, NULL);
+ if (ret)
+ return ret;
+ __i915_gem_request_retire__upto(req);
return 0;
}
@@ -1378,22 +1436,56 @@ i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
*/
-static __must_check int
+int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly)
{
- struct drm_i915_gem_request *req;
- int ret;
+ int ret, i;
- req = readonly ? obj->last_write_req : obj->last_read_req;
- if (!req)
+ if (!obj->active)
return 0;
- ret = i915_wait_request(req);
- if (ret)
- return ret;
+ if (readonly) {
+ if (obj->last_write_req != NULL) {
+ ret = i915_wait_request(obj->last_write_req);
+ if (ret)
+ return ret;
+
+ i = obj->last_write_req->ring->id;
+ if (obj->last_read_req[i] == obj->last_write_req)
+ i915_gem_object_retire__read(obj, i);
+ else
+ i915_gem_object_retire__write(obj);
+ }
+ } else {
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ if (obj->last_read_req[i] == NULL)
+ continue;
+
+ ret = i915_wait_request(obj->last_read_req[i]);
+ if (ret)
+ return ret;
+
+ i915_gem_object_retire__read(obj, i);
+ }
+ RQ_BUG_ON(obj->active);
+ }
+
+ return 0;
+}
+
+static void
+i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_request *req)
+{
+ int ring = req->ring->id;
+
+ if (obj->last_read_req[ring] == req)
+ i915_gem_object_retire__read(obj, ring);
+ else if (obj->last_write_req == req)
+ i915_gem_object_retire__write(obj);
- return i915_gem_object_wait_rendering__tail(obj);
+ __i915_gem_request_retire__upto(req);
}
/* A nonblocking variant of the above wait. This is a highly dangerous routine
@@ -1401,40 +1493,75 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
*/
static __must_check int
i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
- struct drm_i915_file_private *file_priv,
+ struct intel_rps_client *rps,
bool readonly)
{
- struct drm_i915_gem_request *req;
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_request *requests[I915_NUM_RINGS];
unsigned reset_counter;
- int ret;
+ int ret, i, n = 0;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!dev_priv->mm.interruptible);
- req = readonly ? obj->last_write_req : obj->last_read_req;
- if (!req)
+ if (!obj->active)
return 0;
ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
if (ret)
return ret;
- ret = i915_gem_check_olr(req);
- if (ret)
- return ret;
-
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
- i915_gem_request_reference(req);
+
+ if (readonly) {
+ struct drm_i915_gem_request *req;
+
+ req = obj->last_write_req;
+ if (req == NULL)
+ return 0;
+
+ ret = i915_gem_check_olr(req);
+ if (ret)
+ goto err;
+
+ requests[n++] = i915_gem_request_reference(req);
+ } else {
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ struct drm_i915_gem_request *req;
+
+ req = obj->last_read_req[i];
+ if (req == NULL)
+ continue;
+
+ ret = i915_gem_check_olr(req);
+ if (ret)
+ goto err;
+
+ requests[n++] = i915_gem_request_reference(req);
+ }
+ }
+
mutex_unlock(&dev->struct_mutex);
- ret = __i915_wait_request(req, reset_counter, true, NULL, file_priv);
+ for (i = 0; ret == 0 && i < n; i++)
+ ret = __i915_wait_request(requests[i], reset_counter, true,
+ NULL, rps);
mutex_lock(&dev->struct_mutex);
- i915_gem_request_unreference(req);
- if (ret)
- return ret;
- return i915_gem_object_wait_rendering__tail(obj);
+err:
+ for (i = 0; i < n; i++) {
+ if (ret == 0)
+ i915_gem_object_retire_request(obj, requests[i]);
+ i915_gem_request_unreference(requests[i]);
+ }
+
+ return ret;
+}
+
+static struct intel_rps_client *to_rps_client(struct drm_file *file)
+{
+ struct drm_i915_file_private *fpriv = file->driver_priv;
+ return &fpriv->rps;
}
/**
@@ -1479,7 +1606,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
* to catch cases where we are gazumped.
*/
ret = i915_gem_object_wait_rendering__nonblocking(obj,
- file->driver_priv,
+ to_rps_client(file),
!write_domain);
if (ret)
goto unref;
@@ -1616,6 +1743,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_ggtt_view view = i915_ggtt_view_normal;
pgoff_t page_offset;
unsigned long pfn;
int ret = 0;
@@ -1648,8 +1776,23 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto unlock;
}
- /* Now bind it into the GTT if needed */
- ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
+ /* Use a partial view if the object is bigger than the aperture. */
+ if (obj->base.size >= dev_priv->gtt.mappable_end &&
+ obj->tiling_mode == I915_TILING_NONE) {
+ static const unsigned int chunk_size = 256; // 1 MiB
+
+ memset(&view, 0, sizeof(view));
+ view.type = I915_GGTT_VIEW_PARTIAL;
+ view.params.partial.offset = rounddown(page_offset, chunk_size);
+ view.params.partial.size =
+ min_t(unsigned int,
+ chunk_size,
+ (vma->vm_end - vma->vm_start)/PAGE_SIZE -
+ view.params.partial.offset);
+ }
+
+ /* Now pin it into the GTT if needed */
+ ret = i915_gem_object_ggtt_pin(obj, &view, 0, PIN_MAPPABLE);
if (ret)
goto unlock;
@@ -1662,30 +1805,50 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto unpin;
/* Finally, remap it using the new GTT offset */
- pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
+ pfn = dev_priv->gtt.mappable_base +
+ i915_gem_obj_ggtt_offset_view(obj, &view);
pfn >>= PAGE_SHIFT;
- if (!obj->fault_mappable) {
- unsigned long size = min_t(unsigned long,
- vma->vm_end - vma->vm_start,
- obj->base.size);
- int i;
+ if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
+ /* Overriding existing pages in partial view does not cause
+ * us any trouble as TLBs are still valid because the fault
+ * is due to userspace losing part of the mapping or never
+ * having accessed it before (at this partials' range).
+ */
+ unsigned long base = vma->vm_start +
+ (view.params.partial.offset << PAGE_SHIFT);
+ unsigned int i;
- for (i = 0; i < size >> PAGE_SHIFT; i++) {
- ret = vm_insert_pfn(vma,
- (unsigned long)vma->vm_start + i * PAGE_SIZE,
- pfn + i);
+ for (i = 0; i < view.params.partial.size; i++) {
+ ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i);
if (ret)
break;
}
obj->fault_mappable = true;
- } else
- ret = vm_insert_pfn(vma,
- (unsigned long)vmf->virtual_address,
- pfn + page_offset);
+ } else {
+ if (!obj->fault_mappable) {
+ unsigned long size = min_t(unsigned long,
+ vma->vm_end - vma->vm_start,
+ obj->base.size);
+ int i;
+
+ for (i = 0; i < size >> PAGE_SHIFT; i++) {
+ ret = vm_insert_pfn(vma,
+ (unsigned long)vma->vm_start + i * PAGE_SIZE,
+ pfn + i);
+ if (ret)
+ break;
+ }
+
+ obj->fault_mappable = true;
+ } else
+ ret = vm_insert_pfn(vma,
+ (unsigned long)vmf->virtual_address,
+ pfn + page_offset);
+ }
unpin:
- i915_gem_object_ggtt_unpin(obj);
+ i915_gem_object_ggtt_unpin_view(obj, &view);
unlock:
mutex_unlock(&dev->struct_mutex);
out:
@@ -1864,7 +2027,6 @@ i915_gem_mmap_gtt(struct drm_file *file,
uint32_t handle,
uint64_t *offset)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
int ret;
@@ -1878,11 +2040,6 @@ i915_gem_mmap_gtt(struct drm_file *file,
goto unlock;
}
- if (obj->base.size > dev_priv->gtt.mappable_end) {
- ret = -E2BIG;
- goto out;
- }
-
if (obj->madv != I915_MADV_WILLNEED) {
DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
ret = -EFAULT;
@@ -2178,81 +2335,65 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
return ret;
list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+
+ obj->get_page.sg = obj->pages->sgl;
+ obj->get_page.last = 0;
+
return 0;
}
-static void
-i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring)
+void i915_vma_move_to_active(struct i915_vma *vma,
+ struct intel_engine_cs *ring)
{
- struct drm_i915_gem_request *req;
- struct intel_engine_cs *old_ring;
-
- BUG_ON(ring == NULL);
-
- req = intel_ring_get_request(ring);
- old_ring = i915_gem_request_get_ring(obj->last_read_req);
-
- if (old_ring != ring && obj->last_write_req) {
- /* Keep the request relative to the current ring */
- i915_gem_request_assign(&obj->last_write_req, req);
- }
+ struct drm_i915_gem_object *obj = vma->obj;
/* Add a reference if we're newly entering the active list. */
- if (!obj->active) {
+ if (obj->active == 0)
drm_gem_object_reference(&obj->base);
- obj->active = 1;
- }
+ obj->active |= intel_ring_flag(ring);
- list_move_tail(&obj->ring_list, &ring->active_list);
+ list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
+ i915_gem_request_assign(&obj->last_read_req[ring->id],
+ intel_ring_get_request(ring));
- i915_gem_request_assign(&obj->last_read_req, req);
+ list_move_tail(&vma->mm_list, &vma->vm->active_list);
}
-void i915_vma_move_to_active(struct i915_vma *vma,
- struct intel_engine_cs *ring)
+static void
+i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
{
- list_move_tail(&vma->mm_list, &vma->vm->active_list);
- return i915_gem_object_move_to_active(vma->obj, ring);
+ RQ_BUG_ON(obj->last_write_req == NULL);
+ RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring)));
+
+ i915_gem_request_assign(&obj->last_write_req, NULL);
+ intel_fb_obj_flush(obj, true);
}
static void
-i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
+i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
{
struct i915_vma *vma;
- BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
- BUG_ON(!obj->active);
+ RQ_BUG_ON(obj->last_read_req[ring] == NULL);
+ RQ_BUG_ON(!(obj->active & (1 << ring)));
+
+ list_del_init(&obj->ring_list[ring]);
+ i915_gem_request_assign(&obj->last_read_req[ring], NULL);
+
+ if (obj->last_write_req && obj->last_write_req->ring->id == ring)
+ i915_gem_object_retire__write(obj);
+
+ obj->active &= ~(1 << ring);
+ if (obj->active)
+ return;
list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (!list_empty(&vma->mm_list))
list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
}
- intel_fb_obj_flush(obj, true);
-
- list_del_init(&obj->ring_list);
-
- i915_gem_request_assign(&obj->last_read_req, NULL);
- i915_gem_request_assign(&obj->last_write_req, NULL);
- obj->base.write_domain = 0;
-
i915_gem_request_assign(&obj->last_fenced_req, NULL);
-
- obj->active = 0;
drm_gem_object_unreference(&obj->base);
-
- WARN_ON(i915_verify_lists(dev));
-}
-
-static void
-i915_gem_object_retire(struct drm_i915_gem_object *obj)
-{
- if (obj->last_read_req == NULL)
- return;
-
- if (i915_gem_request_completed(obj->last_read_req, true))
- i915_gem_object_move_to_inactive(obj);
}
static int
@@ -2421,7 +2562,6 @@ int __i915_add_request(struct intel_engine_cs *ring,
i915_queue_hangcheck(ring->dev);
- cancel_delayed_work_sync(&dev_priv->mm.idle_work);
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
@@ -2430,20 +2570,6 @@ int __i915_add_request(struct intel_engine_cs *ring,
return 0;
}
-static inline void
-i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
-{
- struct drm_i915_file_private *file_priv = request->file_priv;
-
- if (!file_priv)
- return;
-
- spin_lock(&file_priv->mm.lock);
- list_del(&request->client_list);
- request->file_priv = NULL;
- spin_unlock(&file_priv->mm.lock);
-}
-
static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
const struct intel_context *ctx)
{
@@ -2489,16 +2615,6 @@ static void i915_set_reset_status(struct drm_i915_private *dev_priv,
}
}
-static void i915_gem_free_request(struct drm_i915_gem_request *request)
-{
- list_del(&request->list);
- i915_gem_request_remove_from_client(request);
-
- put_pid(request->pid);
-
- i915_gem_request_unreference(request);
-}
-
void i915_gem_request_free(struct kref *req_ref)
{
struct drm_i915_gem_request *req = container_of(req_ref,
@@ -2516,7 +2632,45 @@ void i915_gem_request_free(struct kref *req_ref)
i915_gem_context_unreference(ctx);
}
- kfree(req);
+ kmem_cache_free(req->i915->requests, req);
+}
+
+int i915_gem_request_alloc(struct intel_engine_cs *ring,
+ struct intel_context *ctx)
+{
+ struct drm_i915_private *dev_priv = to_i915(ring->dev);
+ struct drm_i915_gem_request *req;
+ int ret;
+
+ if (ring->outstanding_lazy_request)
+ return 0;
+
+ req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
+ if (req == NULL)
+ return -ENOMEM;
+
+ kref_init(&req->ref);
+ req->i915 = dev_priv;
+
+ ret = i915_gem_get_seqno(ring->dev, &req->seqno);
+ if (ret)
+ goto err;
+
+ req->ring = ring;
+
+ if (i915.enable_execlists)
+ ret = intel_logical_ring_alloc_request_extras(req, ctx);
+ else
+ ret = intel_ring_alloc_request_extras(req);
+ if (ret)
+ goto err;
+
+ ring->outstanding_lazy_request = req;
+ return 0;
+
+err:
+ kmem_cache_free(dev_priv->requests, req);
+ return ret;
}
struct drm_i915_gem_request *
@@ -2561,9 +2715,9 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
obj = list_first_entry(&ring->active_list,
struct drm_i915_gem_object,
- ring_list);
+ ring_list[ring->id]);
- i915_gem_object_move_to_inactive(obj);
+ i915_gem_object_retire__read(obj, ring->id);
}
/*
@@ -2578,7 +2732,6 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
struct drm_i915_gem_request,
execlist_link);
list_del(&submit_req->execlist_link);
- intel_runtime_pm_put(dev_priv);
if (submit_req->ctx != ring->default_context)
intel_lr_context_unpin(ring, submit_req->ctx);
@@ -2600,7 +2753,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
struct drm_i915_gem_request,
list);
- i915_gem_free_request(request);
+ i915_gem_request_retire(request);
}
/* This may not have been flushed before the reset, so clean it now */
@@ -2648,6 +2801,8 @@ void i915_gem_reset(struct drm_device *dev)
i915_gem_context_reset(dev);
i915_gem_restore_fences(dev);
+
+ WARN_ON(i915_verify_lists(dev));
}
/**
@@ -2656,9 +2811,6 @@ void i915_gem_reset(struct drm_device *dev)
void
i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
{
- if (list_empty(&ring->request_list))
- return;
-
WARN_ON(i915_verify_lists(ring->dev));
/* Retire requests first as we use it above for the early return.
@@ -2676,16 +2828,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
if (!i915_gem_request_completed(request, true))
break;
- trace_i915_gem_request_retire(request);
-
- /* We know the GPU must have read the request to have
- * sent us the seqno + interrupt, so use the position
- * of tail of the request to update the last known position
- * of the GPU head.
- */
- request->ringbuf->last_retired_head = request->postfix;
-
- i915_gem_free_request(request);
+ i915_gem_request_retire(request);
}
/* Move any buffers on the active list that are no longer referenced
@@ -2697,12 +2840,12 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
obj = list_first_entry(&ring->active_list,
struct drm_i915_gem_object,
- ring_list);
+ ring_list[ring->id]);
- if (!i915_gem_request_completed(obj->last_read_req, true))
+ if (!list_empty(&obj->last_read_req[ring->id]->list))
break;
- i915_gem_object_move_to_inactive(obj);
+ i915_gem_object_retire__read(obj, ring->id);
}
if (unlikely(ring->trace_irq_req &&
@@ -2768,8 +2911,25 @@ i915_gem_idle_work_handler(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), mm.idle_work.work);
+ struct drm_device *dev = dev_priv->dev;
+ struct intel_engine_cs *ring;
+ int i;
+
+ for_each_ring(ring, dev_priv, i)
+ if (!list_empty(&ring->request_list))
+ return;
+
+ intel_mark_idle(dev);
+
+ if (mutex_trylock(&dev->struct_mutex)) {
+ struct intel_engine_cs *ring;
+ int i;
- intel_mark_idle(dev_priv->dev);
+ for_each_ring(ring, dev_priv, i)
+ i915_gem_batch_pool_fini(&ring->batch_pool);
+
+ mutex_unlock(&dev->struct_mutex);
+ }
}
/**
@@ -2780,17 +2940,30 @@ i915_gem_idle_work_handler(struct work_struct *work)
static int
i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
{
- struct intel_engine_cs *ring;
- int ret;
+ int ret, i;
+
+ if (!obj->active)
+ return 0;
+
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ struct drm_i915_gem_request *req;
+
+ req = obj->last_read_req[i];
+ if (req == NULL)
+ continue;
- if (obj->active) {
- ring = i915_gem_request_get_ring(obj->last_read_req);
+ if (list_empty(&req->list))
+ goto retire;
- ret = i915_gem_check_olr(obj->last_read_req);
+ ret = i915_gem_check_olr(req);
if (ret)
return ret;
- i915_gem_retire_requests_ring(ring);
+ if (i915_gem_request_completed(req, true)) {
+ __i915_gem_request_retire__upto(req);
+retire:
+ i915_gem_object_retire__read(obj, i);
+ }
}
return 0;
@@ -2824,9 +2997,10 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_wait *args = data;
struct drm_i915_gem_object *obj;
- struct drm_i915_gem_request *req;
+ struct drm_i915_gem_request *req[I915_NUM_RINGS];
unsigned reset_counter;
- int ret = 0;
+ int i, n = 0;
+ int ret;
if (args->flags != 0)
return -EINVAL;
@@ -2846,11 +3020,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (ret)
goto out;
- if (!obj->active || !obj->last_read_req)
+ if (!obj->active)
goto out;
- req = obj->last_read_req;
-
/* Do this after OLR check to make sure we make forward progress polling
* on this IOCTL with a timeout == 0 (like busy ioctl)
*/
@@ -2861,15 +3033,23 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
drm_gem_object_unreference(&obj->base);
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
- i915_gem_request_reference(req);
- mutex_unlock(&dev->struct_mutex);
- ret = __i915_wait_request(req, reset_counter, true,
- args->timeout_ns > 0 ? &args->timeout_ns : NULL,
- file->driver_priv);
- mutex_lock(&dev->struct_mutex);
- i915_gem_request_unreference(req);
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ if (obj->last_read_req[i] == NULL)
+ continue;
+
+ req[n++] = i915_gem_request_reference(obj->last_read_req[i]);
+ }
+
mutex_unlock(&dev->struct_mutex);
+
+ for (i = 0; i < n; i++) {
+ if (ret == 0)
+ ret = __i915_wait_request(req[i], reset_counter, true,
+ args->timeout_ns > 0 ? &args->timeout_ns : NULL,
+ file->driver_priv);
+ i915_gem_request_unreference__unlocked(req[i]);
+ }
return ret;
out:
@@ -2878,6 +3058,59 @@ out:
return ret;
}
+static int
+__i915_gem_object_sync(struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *to,
+ struct drm_i915_gem_request *req)
+{
+ struct intel_engine_cs *from;
+ int ret;
+
+ from = i915_gem_request_get_ring(req);
+ if (to == from)
+ return 0;
+
+ if (i915_gem_request_completed(req, true))
+ return 0;
+
+ ret = i915_gem_check_olr(req);
+ if (ret)
+ return ret;
+
+ if (!i915_semaphore_is_enabled(obj->base.dev)) {
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ ret = __i915_wait_request(req,
+ atomic_read(&i915->gpu_error.reset_counter),
+ i915->mm.interruptible,
+ NULL,
+ &i915->rps.semaphores);
+ if (ret)
+ return ret;
+
+ i915_gem_object_retire_request(obj, req);
+ } else {
+ int idx = intel_ring_sync_index(from, to);
+ u32 seqno = i915_gem_request_get_seqno(req);
+
+ if (seqno <= from->semaphore.sync_seqno[idx])
+ return 0;
+
+ trace_i915_gem_ring_sync_to(from, to, req);
+ ret = to->semaphore.sync_to(to, from, seqno);
+ if (ret)
+ return ret;
+
+ /* We use last_read_req because sync_to()
+ * might have just caused seqno wrap under
+ * the radar.
+ */
+ from->semaphore.sync_seqno[idx] =
+ i915_gem_request_get_seqno(obj->last_read_req[from->id]);
+ }
+
+ return 0;
+}
+
/**
* i915_gem_object_sync - sync an object to a ring.
*
@@ -2886,7 +3119,17 @@ out:
*
* This code is meant to abstract object synchronization with the GPU.
* Calling with NULL implies synchronizing the object with the CPU
- * rather than a particular GPU ring.
+ * rather than a particular GPU ring. Conceptually we serialise writes
+ * between engines inside the GPU. We only allow on engine to write
+ * into a buffer at any time, but multiple readers. To ensure each has
+ * a coherent view of memory, we must:
+ *
+ * - If there is an outstanding write request to the object, the new
+ * request must wait for it to complete (either CPU or in hw, requests
+ * on the same ring will be naturally ordered).
+ *
+ * - If we are a write request (pending_write_domain is set), the new
+ * request must wait for outstanding read requests to complete.
*
* Returns 0 if successful, else propagates up the lower layer error.
*/
@@ -2894,41 +3137,32 @@ int
i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_engine_cs *to)
{
- struct intel_engine_cs *from;
- u32 seqno;
- int ret, idx;
-
- from = i915_gem_request_get_ring(obj->last_read_req);
-
- if (from == NULL || to == from)
- return 0;
-
- if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
- return i915_gem_object_wait_rendering(obj, false);
+ const bool readonly = obj->base.pending_write_domain == 0;
+ struct drm_i915_gem_request *req[I915_NUM_RINGS];
+ int ret, i, n;
- idx = intel_ring_sync_index(from, to);
-
- seqno = i915_gem_request_get_seqno(obj->last_read_req);
- /* Optimization: Avoid semaphore sync when we are sure we already
- * waited for an object with higher seqno */
- if (seqno <= from->semaphore.sync_seqno[idx])
+ if (!obj->active)
return 0;
- ret = i915_gem_check_olr(obj->last_read_req);
- if (ret)
- return ret;
+ if (to == NULL)
+ return i915_gem_object_wait_rendering(obj, readonly);
- trace_i915_gem_ring_sync_to(from, to, obj->last_read_req);
- ret = to->semaphore.sync_to(to, from, seqno);
- if (!ret)
- /* We use last_read_req because sync_to()
- * might have just caused seqno wrap under
- * the radar.
- */
- from->semaphore.sync_seqno[idx] =
- i915_gem_request_get_seqno(obj->last_read_req);
+ n = 0;
+ if (readonly) {
+ if (obj->last_write_req)
+ req[n++] = obj->last_write_req;
+ } else {
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ if (obj->last_read_req[i])
+ req[n++] = obj->last_read_req[i];
+ }
+ for (i = 0; i < n; i++) {
+ ret = __i915_gem_object_sync(obj, to, req[i]);
+ if (ret)
+ return ret;
+ }
- return ret;
+ return 0;
}
static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
@@ -2974,7 +3208,7 @@ int i915_vma_unbind(struct i915_vma *vma)
BUG_ON(obj->pages == NULL);
- ret = i915_gem_object_finish_gpu(obj);
+ ret = i915_gem_object_wait_rendering(obj, false);
if (ret)
return ret;
/* Continue on if we fail due to EIO, the GPU is hung so we
@@ -2994,7 +3228,8 @@ int i915_vma_unbind(struct i915_vma *vma)
trace_i915_vma_unbind(vma);
- vma->unbind_vma(vma);
+ vma->vm->unbind_vma(vma);
+ vma->bound = 0;
list_del_init(&vma->mm_list);
if (i915_is_ggtt(vma->vm)) {
@@ -3013,10 +3248,6 @@ int i915_vma_unbind(struct i915_vma *vma)
/* Since the unbound list is global, only move to that list if
* no more VMAs exist. */
if (list_empty(&obj->vma_list)) {
- /* Throw away the active reference before
- * moving to the unbound list. */
- i915_gem_object_retire(obj);
-
i915_gem_gtt_finish_object(obj);
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
}
@@ -3049,6 +3280,7 @@ int i915_gpu_idle(struct drm_device *dev)
return ret;
}
+ WARN_ON(i915_verify_lists(dev));
return 0;
}
@@ -3423,7 +3655,8 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
}
/**
- * Finds free space in the GTT aperture and binds the object there.
+ * Finds free space in the GTT aperture and binds the object or a view of it
+ * there.
*/
static struct i915_vma *
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
@@ -3442,36 +3675,60 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
int ret;
- if(WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
- return ERR_PTR(-EINVAL);
+ if (i915_is_ggtt(vm)) {
+ u32 view_size;
+
+ if (WARN_ON(!ggtt_view))
+ return ERR_PTR(-EINVAL);
- fence_size = i915_gem_get_gtt_size(dev,
- obj->base.size,
- obj->tiling_mode);
- fence_alignment = i915_gem_get_gtt_alignment(dev,
- obj->base.size,
- obj->tiling_mode, true);
- unfenced_alignment =
- i915_gem_get_gtt_alignment(dev,
- obj->base.size,
- obj->tiling_mode, false);
+ view_size = i915_ggtt_view_size(obj, ggtt_view);
+
+ fence_size = i915_gem_get_gtt_size(dev,
+ view_size,
+ obj->tiling_mode);
+ fence_alignment = i915_gem_get_gtt_alignment(dev,
+ view_size,
+ obj->tiling_mode,
+ true);
+ unfenced_alignment = i915_gem_get_gtt_alignment(dev,
+ view_size,
+ obj->tiling_mode,
+ false);
+ size = flags & PIN_MAPPABLE ? fence_size : view_size;
+ } else {
+ fence_size = i915_gem_get_gtt_size(dev,
+ obj->base.size,
+ obj->tiling_mode);
+ fence_alignment = i915_gem_get_gtt_alignment(dev,
+ obj->base.size,
+ obj->tiling_mode,
+ true);
+ unfenced_alignment =
+ i915_gem_get_gtt_alignment(dev,
+ obj->base.size,
+ obj->tiling_mode,
+ false);
+ size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
+ }
if (alignment == 0)
alignment = flags & PIN_MAPPABLE ? fence_alignment :
unfenced_alignment;
if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
- DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
+ DRM_DEBUG("Invalid object (view type=%u) alignment requested %u\n",
+ ggtt_view ? ggtt_view->type : 0,
+ alignment);
return ERR_PTR(-EINVAL);
}
- size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
-
- /* If the object is bigger than the entire aperture, reject it early
- * before evicting everything in a vain attempt to find space.
+ /* If binding the object/GGTT view requires more space than the entire
+ * aperture has, reject it early before evicting everything in a vain
+ * attempt to find space.
*/
- if (obj->base.size > end) {
- DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
- obj->base.size,
+ if (size > end) {
+ DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%lu\n",
+ ggtt_view ? ggtt_view->type : 0,
+ size,
flags & PIN_MAPPABLE ? "mappable" : "total",
end);
return ERR_PTR(-E2BIG);
@@ -3515,20 +3772,8 @@ search_free:
if (ret)
goto err_remove_node;
- /* allocate before insert / bind */
- if (vma->vm->allocate_va_range) {
- trace_i915_va_alloc(vma->vm, vma->node.start, vma->node.size,
- VM_TO_TRACE_NAME(vma->vm));
- ret = vma->vm->allocate_va_range(vma->vm,
- vma->node.start,
- vma->node.size);
- if (ret)
- goto err_remove_node;
- }
-
trace_i915_vma_bind(vma, flags);
- ret = i915_vma_bind(vma, obj->cache_level,
- flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
+ ret = i915_vma_bind(vma, obj->cache_level, flags);
if (ret)
goto err_finish_gtt;
@@ -3658,8 +3903,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- i915_gem_object_retire(obj);
-
/* Flush and acquire obj->pages so that we are coherent through
* direct access in memory with previous cached writes through
* shmemfs and that our cache domain tracking remains valid.
@@ -3735,7 +3978,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
}
if (i915_gem_obj_bound_any(obj)) {
- ret = i915_gem_object_finish_gpu(obj);
+ ret = i915_gem_object_wait_rendering(obj, false);
if (ret)
return ret;
@@ -3754,7 +3997,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (drm_mm_node_allocated(&vma->node)) {
ret = i915_vma_bind(vma, cache_level,
- vma->bound & GLOBAL_BIND);
+ PIN_UPDATE);
if (ret)
return ret;
}
@@ -3779,17 +4022,10 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
- int ret;
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (&obj->base == NULL) {
- ret = -ENOENT;
- goto unlock;
- }
+ if (&obj->base == NULL)
+ return -ENOENT;
switch (obj->cache_level) {
case I915_CACHE_LLC:
@@ -3806,10 +4042,8 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
break;
}
- drm_gem_object_unreference(&obj->base);
-unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ drm_gem_object_unreference_unlocked(&obj->base);
+ return 0;
}
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
@@ -3852,24 +4086,6 @@ unlock:
return ret;
}
-static bool is_pin_display(struct drm_i915_gem_object *obj)
-{
- struct i915_vma *vma;
-
- vma = i915_gem_obj_to_ggtt(obj);
- if (!vma)
- return false;
-
- /* There are 2 sources that pin objects:
- * 1. The display engine (scanouts, sprites, cursors);
- * 2. Reservations for execbuffer;
- *
- * We can ignore reservations as we hold the struct_mutex and
- * are only called outside of the reservation path.
- */
- return vma->pin_count;
-}
-
/*
* Prepare buffer for display plane (scanout, cursors, etc).
* Can be called from an uninterruptible phase (modesetting) and allows
@@ -3882,20 +4098,16 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
u32 old_read_domains, old_write_domain;
- bool was_pin_display;
int ret;
- if (pipelined != i915_gem_request_get_ring(obj->last_read_req)) {
- ret = i915_gem_object_sync(obj, pipelined);
- if (ret)
- return ret;
- }
+ ret = i915_gem_object_sync(obj, pipelined);
+ if (ret)
+ return ret;
/* Mark the pin_display early so that we account for the
* display coherency whilst setting up the cache domains.
*/
- was_pin_display = obj->pin_display;
- obj->pin_display = true;
+ obj->pin_display++;
/* The display engine is not coherent with the LLC cache on gen6. As
* a result, we make sure that the pinning that is about to occur is
@@ -3939,8 +4151,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
return 0;
err_unpin_display:
- WARN_ON(was_pin_display != is_pin_display(obj));
- obj->pin_display = was_pin_display;
+ obj->pin_display--;
return ret;
}
@@ -3948,26 +4159,12 @@ void
i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
- i915_gem_object_ggtt_unpin_view(obj, view);
-
- obj->pin_display = is_pin_display(obj);
-}
-
-int
-i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
-{
- int ret;
-
- if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
- return 0;
+ if (WARN_ON(obj->pin_display == 0))
+ return;
- ret = i915_gem_object_wait_rendering(obj, false);
- if (ret)
- return ret;
+ i915_gem_object_ggtt_unpin_view(obj, view);
- /* Ensure that we invalidate the GPU's caches and TLBs. */
- obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
- return 0;
+ obj->pin_display--;
}
/**
@@ -3989,7 +4186,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- i915_gem_object_retire(obj);
i915_gem_object_flush_gtt_write_domain(obj);
old_write_domain = obj->base.write_domain;
@@ -4040,7 +4236,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_file_private *file_priv = file->driver_priv;
- unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
+ unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
struct drm_i915_gem_request *request, *target = NULL;
unsigned reset_counter;
int ret;
@@ -4072,9 +4268,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
- mutex_lock(&dev->struct_mutex);
- i915_gem_request_unreference(target);
- mutex_unlock(&dev->struct_mutex);
+ i915_gem_request_unreference__unlocked(target);
return ret;
}
@@ -4155,23 +4349,18 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
bound = vma ? vma->bound : 0;
if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
- /* In true PPGTT, bind has possibly changed PDEs, which
- * means we must do a context switch before the GPU can
- * accurately read some of the VMAs.
- */
vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
- }
-
- if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND)) {
- ret = i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND);
+ } else {
+ ret = i915_vma_bind(vma, obj->cache_level, flags);
if (ret)
return ret;
}
- if ((bound ^ vma->bound) & GLOBAL_BIND) {
+ if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
+ (bound ^ vma->bound) & GLOBAL_BIND) {
bool mappable, fenceable;
u32 fence_size, fence_alignment;
@@ -4190,14 +4379,11 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
dev_priv->gtt.mappable_end);
obj->map_and_fenceable = mappable && fenceable;
- }
- WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
+ WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
+ }
vma->pin_count++;
- if (flags & PIN_MAPPABLE)
- obj->pin_mappable |= true;
-
return 0;
}
@@ -4235,8 +4421,7 @@ i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
WARN_ON(vma->pin_count == 0);
WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
- if (--vma->pin_count == 0 && view->type == I915_GGTT_VIEW_NORMAL)
- obj->pin_mappable = false;
+ --vma->pin_count;
}
bool
@@ -4289,15 +4474,15 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* necessary flushes here.
*/
ret = i915_gem_object_flush_active(obj);
+ if (ret)
+ goto unref;
- args->busy = obj->active;
- if (obj->last_read_req) {
- struct intel_engine_cs *ring;
- BUILD_BUG_ON(I915_NUM_RINGS > 16);
- ring = i915_gem_request_get_ring(obj->last_read_req);
- args->busy |= intel_ring_flag(ring) << 16;
- }
+ BUILD_BUG_ON(I915_NUM_RINGS > 16);
+ args->busy = obj->active << 16;
+ if (obj->last_write_req)
+ args->busy |= obj->last_write_req->ring->id;
+unref:
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
@@ -4371,11 +4556,14 @@ unlock:
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops)
{
+ int i;
+
INIT_LIST_HEAD(&obj->global_list);
- INIT_LIST_HEAD(&obj->ring_list);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ INIT_LIST_HEAD(&obj->ring_list[i]);
INIT_LIST_HEAD(&obj->obj_exec_link);
INIT_LIST_HEAD(&obj->vma_list);
- INIT_LIST_HEAD(&obj->batch_pool_list);
+ INIT_LIST_HEAD(&obj->batch_pool_link);
obj->ops = ops;
@@ -4577,7 +4765,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
list_del(&vma->vma_link);
- kfree(vma);
+ kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
}
static void
@@ -4864,12 +5052,12 @@ int i915_gem_init(struct drm_device *dev)
}
if (!i915.enable_execlists) {
- dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission;
+ dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
dev_priv->gt.init_rings = i915_gem_init_rings;
dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
dev_priv->gt.stop_ring = intel_stop_ring_buffer;
} else {
- dev_priv->gt.do_execbuf = intel_execlists_submission;
+ dev_priv->gt.execbuf_submit = intel_execlists_submission;
dev_priv->gt.init_rings = intel_logical_rings_init;
dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
dev_priv->gt.stop_ring = intel_logical_ring_stop;
@@ -4951,11 +5139,21 @@ i915_gem_load(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- dev_priv->slab =
+ dev_priv->objects =
kmem_cache_create("i915_gem_object",
sizeof(struct drm_i915_gem_object), 0,
SLAB_HWCACHE_ALIGN,
NULL);
+ dev_priv->vmas =
+ kmem_cache_create("i915_gem_vma",
+ sizeof(struct i915_vma), 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+ dev_priv->requests =
+ kmem_cache_create("i915_gem_request",
+ sizeof(struct drm_i915_gem_request), 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
INIT_LIST_HEAD(&dev_priv->vm_list);
i915_init_vm(dev_priv, &dev_priv->gtt.base);
@@ -4998,8 +5196,6 @@ i915_gem_load(struct drm_device *dev)
i915_gem_shrinker_init(dev_priv);
- i915_gem_batch_pool_init(dev, &dev_priv->mm.batch_pool);
-
mutex_init(&dev_priv->fb_tracking.lock);
}
@@ -5007,8 +5203,6 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- cancel_delayed_work_sync(&file_priv->mm.idle_work);
-
/* Clean up our request list when the client is going away, so that
* later retire_requests won't dereference our soon-to-be-gone
* file_priv.
@@ -5024,15 +5218,12 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
request->file_priv = NULL;
}
spin_unlock(&file_priv->mm.lock);
-}
-static void
-i915_gem_file_idle_work_handler(struct work_struct *work)
-{
- struct drm_i915_file_private *file_priv =
- container_of(work, typeof(*file_priv), mm.idle_work.work);
-
- atomic_set(&file_priv->rps_wait_boost, false);
+ if (!list_empty(&file_priv->rps.link)) {
+ spin_lock(&to_i915(dev)->rps.client_lock);
+ list_del(&file_priv->rps.link);
+ spin_unlock(&to_i915(dev)->rps.client_lock);
+ }
}
int i915_gem_open(struct drm_device *dev, struct drm_file *file)
@@ -5049,11 +5240,10 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
file->driver_priv = file_priv;
file_priv->dev_priv = dev->dev_private;
file_priv->file = file;
+ INIT_LIST_HEAD(&file_priv->rps.link);
spin_lock_init(&file_priv->mm.lock);
INIT_LIST_HEAD(&file_priv->mm.request_list);
- INIT_DELAYED_WORK(&file_priv->mm.idle_work,
- i915_gem_file_idle_work_handler);
ret = i915_gem_context_open(dev, file);
if (ret)
@@ -5123,7 +5313,7 @@ i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma->node.start;
- WARN(1, "global vma for this object not found.\n");
+ WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
return -1;
}
@@ -5192,13 +5382,10 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
- if (i915_is_ggtt(vma->vm) &&
- vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
- continue;
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
if (vma->pin_count > 0)
return true;
- }
+
return false;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index c690170a1c4f..7bf2f3f2968e 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -23,6 +23,7 @@
*/
#include "i915_drv.h"
+#include "i915_gem_batch_pool.h"
/**
* DOC: batch pool
@@ -46,8 +47,12 @@
void i915_gem_batch_pool_init(struct drm_device *dev,
struct i915_gem_batch_pool *pool)
{
+ int n;
+
pool->dev = dev;
- INIT_LIST_HEAD(&pool->cache_list);
+
+ for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
+ INIT_LIST_HEAD(&pool->cache_list[n]);
}
/**
@@ -58,33 +63,35 @@ void i915_gem_batch_pool_init(struct drm_device *dev,
*/
void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
{
- WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
+ int n;
- while (!list_empty(&pool->cache_list)) {
- struct drm_i915_gem_object *obj =
- list_first_entry(&pool->cache_list,
- struct drm_i915_gem_object,
- batch_pool_list);
+ WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
- WARN_ON(obj->active);
+ for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
+ while (!list_empty(&pool->cache_list[n])) {
+ struct drm_i915_gem_object *obj =
+ list_first_entry(&pool->cache_list[n],
+ struct drm_i915_gem_object,
+ batch_pool_link);
- list_del_init(&obj->batch_pool_list);
- drm_gem_object_unreference(&obj->base);
+ list_del(&obj->batch_pool_link);
+ drm_gem_object_unreference(&obj->base);
+ }
}
}
/**
- * i915_gem_batch_pool_get() - select a buffer from the pool
+ * i915_gem_batch_pool_get() - allocate a buffer from the pool
* @pool: the batch buffer pool
* @size: the minimum desired size of the returned buffer
*
- * Finds or allocates a batch buffer in the pool with at least the requested
- * size. The caller is responsible for any domain, active/inactive, or
- * purgeability management for the returned buffer.
+ * Returns an inactive buffer from @pool with at least @size bytes,
+ * with the pages pinned. The caller must i915_gem_object_unpin_pages()
+ * on the returned object.
*
* Note: Callers must hold the struct_mutex
*
- * Return: the selected batch buffer object
+ * Return: the buffer object or an error pointer
*/
struct drm_i915_gem_object *
i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
@@ -92,46 +99,53 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
{
struct drm_i915_gem_object *obj = NULL;
struct drm_i915_gem_object *tmp, *next;
+ struct list_head *list;
+ int n;
WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
- list_for_each_entry_safe(tmp, next,
- &pool->cache_list, batch_pool_list) {
-
+ /* Compute a power-of-two bucket, but throw everything greater than
+ * 16KiB into the same bucket: i.e. the the buckets hold objects of
+ * (1 page, 2 pages, 4 pages, 8+ pages).
+ */
+ n = fls(size >> PAGE_SHIFT) - 1;
+ if (n >= ARRAY_SIZE(pool->cache_list))
+ n = ARRAY_SIZE(pool->cache_list) - 1;
+ list = &pool->cache_list[n];
+
+ list_for_each_entry_safe(tmp, next, list, batch_pool_link) {
+ /* The batches are strictly LRU ordered */
if (tmp->active)
- continue;
+ break;
/* While we're looping, do some clean up */
if (tmp->madv == __I915_MADV_PURGED) {
- list_del(&tmp->batch_pool_list);
+ list_del(&tmp->batch_pool_link);
drm_gem_object_unreference(&tmp->base);
continue;
}
- /*
- * Select a buffer that is at least as big as needed
- * but not 'too much' bigger. A better way to do this
- * might be to bucket the pool objects based on size.
- */
- if (tmp->base.size >= size &&
- tmp->base.size <= (2 * size)) {
+ if (tmp->base.size >= size) {
obj = tmp;
break;
}
}
- if (!obj) {
+ if (obj == NULL) {
+ int ret;
+
obj = i915_gem_alloc_object(pool->dev, size);
- if (!obj)
+ if (obj == NULL)
return ERR_PTR(-ENOMEM);
- list_add_tail(&obj->batch_pool_list, &pool->cache_list);
- }
- else
- /* Keep list in LRU order */
- list_move_tail(&obj->batch_pool_list, &pool->cache_list);
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ERR_PTR(ret);
- obj->madv = I915_MADV_WILLNEED;
+ obj->madv = I915_MADV_DONTNEED;
+ }
+ list_move_tail(&obj->batch_pool_link, list);
+ i915_gem_object_pin_pages(obj);
return obj;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.h b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
new file mode 100644
index 000000000000..848e90703eed
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef I915_GEM_BATCH_POOL_H
+#define I915_GEM_BATCH_POOL_H
+
+#include "i915_drv.h"
+
+struct i915_gem_batch_pool {
+ struct drm_device *dev;
+ struct list_head cache_list[4];
+};
+
+/* i915_gem_batch_pool.c */
+void i915_gem_batch_pool_init(struct drm_device *dev,
+ struct i915_gem_batch_pool *pool);
+void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
+struct drm_i915_gem_object*
+i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size);
+
+#endif /* I915_GEM_BATCH_POOL_H */
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index f3e84c44d009..8867818b1401 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -157,7 +157,9 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
struct drm_i915_gem_object *obj;
int ret;
- obj = i915_gem_alloc_object(dev, size);
+ obj = i915_gem_object_create_stolen(dev, size);
+ if (obj == NULL)
+ obj = i915_gem_alloc_object(dev, size);
if (obj == NULL)
return ERR_PTR(-ENOMEM);
@@ -573,20 +575,12 @@ static inline bool should_skip_switch(struct intel_engine_cs *ring,
struct intel_context *from,
struct intel_context *to)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
if (to->remap_slice)
return false;
- if (to->ppgtt) {
- if (from == to && !test_bit(ring->id,
- &to->ppgtt->pd_dirty_rings))
- return true;
- } else if (dev_priv->mm.aliasing_ppgtt) {
- if (from == to && !test_bit(ring->id,
- &dev_priv->mm.aliasing_ppgtt->pd_dirty_rings))
- return true;
- }
+ if (to->ppgtt && from == to &&
+ !(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings))
+ return true;
return false;
}
@@ -636,7 +630,6 @@ static int do_switch(struct intel_engine_cs *ring,
struct intel_context *from = ring->last_context;
u32 hw_flags = 0;
bool uninitialized = false;
- struct i915_vma *vma;
int ret, i;
if (from != NULL && ring == &dev_priv->ring[RCS]) {
@@ -673,7 +666,7 @@ static int do_switch(struct intel_engine_cs *ring,
goto unpin_out;
/* Doing a PD load always reloads the page dirs */
- clear_bit(ring->id, &to->ppgtt->pd_dirty_rings);
+ to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
}
if (ring != &dev_priv->ring[RCS]) {
@@ -694,16 +687,6 @@ static int do_switch(struct intel_engine_cs *ring,
if (ret)
goto unpin_out;
- vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state);
- if (!(vma->bound & GLOBAL_BIND)) {
- ret = i915_vma_bind(vma,
- to->legacy_hw_ctx.rcs_state->cache_level,
- GLOBAL_BIND);
- /* This shouldn't ever fail. */
- if (WARN_ONCE(ret, "GGTT context bind failed!"))
- goto unpin_out;
- }
-
if (!to->legacy_hw_ctx.initialized) {
hw_flags |= MI_RESTORE_INHIBIT;
/* NB: If we inhibit the restore, the context is not allowed to
@@ -711,12 +694,14 @@ static int do_switch(struct intel_engine_cs *ring,
* space. This means we must enforce that a page table load
* occur when this occurs. */
} else if (to->ppgtt &&
- test_and_clear_bit(ring->id, &to->ppgtt->pd_dirty_rings))
+ (intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) {
hw_flags |= MI_FORCE_RESTORE;
+ to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
+ }
/* We should never emit switch_mm more than once */
WARN_ON(needs_pd_load_pre(ring, to) &&
- needs_pd_load_post(ring, to, hw_flags));
+ needs_pd_load_post(ring, to, hw_flags));
ret = mi_set_context(ring, to, hw_flags);
if (ret)
@@ -768,8 +753,6 @@ static int do_switch(struct intel_engine_cs *ring,
* swapped, but there is no way to do that yet.
*/
from->legacy_hw_ctx.rcs_state->dirty = 1;
- BUG_ON(i915_gem_request_get_ring(
- from->legacy_hw_ctx.rcs_state->last_read_req) != ring);
/* obj is kept alive until the next request by its active ref */
i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index f462d1b51d97..17299d04189f 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -34,82 +34,34 @@ int
i915_verify_lists(struct drm_device *dev)
{
static int warned;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
+ struct intel_engine_cs *ring;
int err = 0;
+ int i;
if (warned)
return 0;
- list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) {
- if (obj->base.dev != dev ||
- !atomic_read(&obj->base.refcount.refcount)) {
- DRM_ERROR("freed render active %p\n", obj);
- err++;
- break;
- } else if (!obj->active ||
- (obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) {
- DRM_ERROR("invalid render active %p (a %d r %x)\n",
- obj,
- obj->active,
- obj->base.read_domains);
- err++;
- } else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) {
- DRM_ERROR("invalid render active %p (w %x, gwl %d)\n",
- obj,
- obj->base.write_domain,
- !list_empty(&obj->gpu_write_list));
- err++;
- }
- }
-
- list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) {
- if (obj->base.dev != dev ||
- !atomic_read(&obj->base.refcount.refcount)) {
- DRM_ERROR("freed flushing %p\n", obj);
- err++;
- break;
- } else if (!obj->active ||
- (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||
- list_empty(&obj->gpu_write_list)) {
- DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
- obj,
- obj->active,
- obj->base.write_domain,
- !list_empty(&obj->gpu_write_list));
- err++;
- }
- }
-
- list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) {
- if (obj->base.dev != dev ||
- !atomic_read(&obj->base.refcount.refcount)) {
- DRM_ERROR("freed gpu write %p\n", obj);
- err++;
- break;
- } else if (!obj->active ||
- (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) {
- DRM_ERROR("invalid gpu write %p (a %d w %x)\n",
- obj,
- obj->active,
- obj->base.write_domain);
- err++;
- }
- }
-
- list_for_each_entry(obj, &i915_gtt_vm->inactive_list, list) {
- if (obj->base.dev != dev ||
- !atomic_read(&obj->base.refcount.refcount)) {
- DRM_ERROR("freed inactive %p\n", obj);
- err++;
- break;
- } else if (obj->pin_count || obj->active ||
- (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
- DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n",
- obj,
- obj->pin_count, obj->active,
- obj->base.write_domain);
- err++;
+ for_each_ring(ring, dev_priv, i) {
+ list_for_each_entry(obj, &ring->active_list, ring_list[ring->id]) {
+ if (obj->base.dev != dev ||
+ !atomic_read(&obj->base.refcount.refcount)) {
+ DRM_ERROR("%s: freed active obj %p\n",
+ ring->name, obj);
+ err++;
+ break;
+ } else if (!obj->active ||
+ obj->last_read_req[ring->id] == NULL) {
+ DRM_ERROR("%s: invalid active obj %p\n",
+ ring->name, obj);
+ err++;
+ } else if (obj->base.write_domain) {
+ DRM_ERROR("%s: invalid write obj %p (w %x)\n",
+ ring->name,
+ obj, obj->base.write_domain);
+ err++;
+ }
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index cc552a4c1f3b..a7fa14516cda 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -38,7 +38,6 @@
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
#define __EXEC_OBJECT_NEEDS_MAP (1<<29)
#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
-#define __EXEC_OBJECT_PURGEABLE (1<<27)
#define BATCH_OFFSET_BIAS (256*1024)
@@ -225,12 +224,7 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
if (entry->flags & __EXEC_OBJECT_HAS_PIN)
vma->pin_count--;
- if (entry->flags & __EXEC_OBJECT_PURGEABLE)
- obj->madv = I915_MADV_DONTNEED;
-
- entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE |
- __EXEC_OBJECT_HAS_PIN |
- __EXEC_OBJECT_PURGEABLE);
+ entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
}
static void eb_destroy(struct eb_vmas *eb)
@@ -407,10 +401,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
* pipe_control writes because the gpu doesn't properly redirect them
* through the ppgtt for non_secure batchbuffers. */
if (unlikely(IS_GEN6(dev) &&
- reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
- !(target_vma->bound & GLOBAL_BIND))) {
+ reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
- GLOBAL_BIND);
+ PIN_GLOBAL);
if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
return ret;
}
@@ -592,12 +585,13 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
uint64_t flags;
int ret;
- flags = 0;
+ flags = PIN_USER;
+ if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
+ flags |= PIN_GLOBAL;
+
if (!drm_mm_node_allocated(&vma->node)) {
if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
flags |= PIN_GLOBAL | PIN_MAPPABLE;
- if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
- flags |= PIN_GLOBAL;
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
}
@@ -607,7 +601,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
only_mappable_for_reloc(entry->flags))
ret = i915_gem_object_pin(obj, vma->vm,
entry->alignment,
- flags & ~(PIN_GLOBAL | PIN_MAPPABLE));
+ flags & ~PIN_MAPPABLE);
if (ret)
return ret;
@@ -896,6 +890,7 @@ static int
i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
struct list_head *vmas)
{
+ const unsigned other_rings = ~intel_ring_flag(ring);
struct i915_vma *vma;
uint32_t flush_domains = 0;
bool flush_chipset = false;
@@ -903,9 +898,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
- ret = i915_gem_object_sync(obj, ring);
- if (ret)
- return ret;
+
+ if (obj->active & other_rings) {
+ ret = i915_gem_object_sync(obj, ring);
+ if (ret)
+ return ret;
+ }
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
flush_chipset |= i915_gem_clflush_object(obj, false);
@@ -1143,12 +1141,11 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
u32 batch_len,
bool is_master)
{
- struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev);
struct drm_i915_gem_object *shadow_batch_obj;
struct i915_vma *vma;
int ret;
- shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool,
+ shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool,
PAGE_ALIGN(batch_len));
if (IS_ERR(shadow_batch_obj))
return shadow_batch_obj;
@@ -1166,11 +1163,13 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
if (ret)
goto err;
+ i915_gem_object_unpin_pages(shadow_batch_obj);
+
memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
vma->exec_entry = shadow_exec_entry;
- vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE | __EXEC_OBJECT_HAS_PIN;
+ vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
drm_gem_object_reference(&shadow_batch_obj->base);
list_add_tail(&vma->exec_list, &eb->vmas);
@@ -1179,6 +1178,7 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
return shadow_batch_obj;
err:
+ i915_gem_object_unpin_pages(shadow_batch_obj);
if (ret == -EACCES) /* unhandled chained batch */
return batch_obj;
else
@@ -1252,12 +1252,8 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
if (ret)
goto error;
- if (ctx->ppgtt)
- WARN(ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
- "%s didn't clear reload\n", ring->name);
- else if (dev_priv->mm.aliasing_ppgtt)
- WARN(dev_priv->mm.aliasing_ppgtt->pd_dirty_rings &
- (1<<ring->id), "%s didn't clear reload\n", ring->name);
+ WARN(ctx->ppgtt && ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
+ "%s didn't clear reload\n", ring->name);
instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
instp_mask = I915_EXEC_CONSTANTS_MASK;
@@ -1549,33 +1545,39 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
if (i915_needs_cmd_parser(ring) && args->batch_len) {
- batch_obj = i915_gem_execbuffer_parse(ring,
+ struct drm_i915_gem_object *parsed_batch_obj;
+
+ parsed_batch_obj = i915_gem_execbuffer_parse(ring,
&shadow_exec_entry,
eb,
batch_obj,
args->batch_start_offset,
args->batch_len,
file->is_master);
- if (IS_ERR(batch_obj)) {
- ret = PTR_ERR(batch_obj);
+ if (IS_ERR(parsed_batch_obj)) {
+ ret = PTR_ERR(parsed_batch_obj);
goto err;
}
/*
- * Set the DISPATCH_SECURE bit to remove the NON_SECURE
- * bit from MI_BATCH_BUFFER_START commands issued in the
- * dispatch_execbuffer implementations. We specifically
- * don't want that set when the command parser is
- * enabled.
- *
- * FIXME: with aliasing ppgtt, buffers that should only
- * be in ggtt still end up in the aliasing ppgtt. remove
- * this check when that is fixed.
+ * parsed_batch_obj == batch_obj means batch not fully parsed:
+ * Accept, but don't promote to secure.
*/
- if (USES_FULL_PPGTT(dev))
- dispatch_flags |= I915_DISPATCH_SECURE;
- exec_start = 0;
+ if (parsed_batch_obj != batch_obj) {
+ /*
+ * Batch parsed and accepted:
+ *
+ * Set the DISPATCH_SECURE bit to remove the NON_SECURE
+ * bit from MI_BATCH_BUFFER_START commands issued in
+ * the dispatch_execbuffer implementations. We
+ * specifically don't want that set on batches the
+ * command parser has accepted.
+ */
+ dispatch_flags |= I915_DISPATCH_SECURE;
+ exec_start = 0;
+ batch_obj = parsed_batch_obj;
+ }
}
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
@@ -1602,9 +1604,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
} else
exec_start += i915_gem_obj_offset(batch_obj, vm);
- ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args,
- &eb->vmas, batch_obj, exec_start,
- dispatch_flags);
+ ret = dev_priv->gt.execbuf_submit(dev, file, ring, ctx, args,
+ &eb->vmas, batch_obj, exec_start,
+ dispatch_flags);
/*
* FIXME: We crucially rely upon the active tracking for the (ppgtt)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 0239fbff7bf7..619dad1b2386 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -92,14 +92,14 @@
*
*/
+static int
+i915_get_ggtt_vma_pages(struct i915_vma *vma);
+
const struct i915_ggtt_view i915_ggtt_view_normal;
const struct i915_ggtt_view i915_ggtt_view_rotated = {
.type = I915_GGTT_VIEW_ROTATED
};
-static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
-static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
-
static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
{
bool has_aliasing_ppgtt;
@@ -146,14 +146,33 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
return has_aliasing_ppgtt ? 1 : 0;
}
-static void ppgtt_bind_vma(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags);
-static void ppgtt_unbind_vma(struct i915_vma *vma);
+static int ppgtt_bind_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ u32 pte_flags = 0;
+
+ /* Currently applicable only to VLV */
+ if (vma->obj->gt_ro)
+ pte_flags |= PTE_READ_ONLY;
+
+ vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
+ cache_level, pte_flags);
-static inline gen8_pte_t gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- bool valid)
+ return 0;
+}
+
+static void ppgtt_unbind_vma(struct i915_vma *vma)
+{
+ vma->vm->clear_range(vma->vm,
+ vma->node.start,
+ vma->obj->base.size,
+ true);
+}
+
+static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ bool valid)
{
gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
pte |= addr;
@@ -173,9 +192,9 @@ static inline gen8_pte_t gen8_pte_encode(dma_addr_t addr,
return pte;
}
-static inline gen8_pde_t gen8_pde_encode(struct drm_device *dev,
- dma_addr_t addr,
- enum i915_cache_level level)
+static gen8_pde_t gen8_pde_encode(struct drm_device *dev,
+ dma_addr_t addr,
+ enum i915_cache_level level)
{
gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
pde |= addr;
@@ -285,8 +304,8 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
#define i915_dma_unmap_single(px, dev) \
__i915_dma_unmap_single((px)->daddr, dev)
-static inline void __i915_dma_unmap_single(dma_addr_t daddr,
- struct drm_device *dev)
+static void __i915_dma_unmap_single(dma_addr_t daddr,
+ struct drm_device *dev)
{
struct device *device = &dev->pdev->dev;
@@ -307,9 +326,9 @@ static inline void __i915_dma_unmap_single(dma_addr_t daddr,
#define i915_dma_map_single(px, dev) \
i915_dma_map_page_single((px)->page, (dev), &(px)->daddr)
-static inline int i915_dma_map_page_single(struct page *page,
- struct drm_device *dev,
- dma_addr_t *daddr)
+static int i915_dma_map_page_single(struct page *page,
+ struct drm_device *dev,
+ dma_addr_t *daddr)
{
struct device *device = &dev->pdev->dev;
@@ -320,7 +339,7 @@ static inline int i915_dma_map_page_single(struct page *page,
return 0;
}
-static void unmap_and_free_pt(struct i915_page_table_entry *pt,
+static void unmap_and_free_pt(struct i915_page_table *pt,
struct drm_device *dev)
{
if (WARN_ON(!pt->page))
@@ -332,9 +351,27 @@ static void unmap_and_free_pt(struct i915_page_table_entry *pt,
kfree(pt);
}
-static struct i915_page_table_entry *alloc_pt_single(struct drm_device *dev)
+static void gen8_initialize_pt(struct i915_address_space *vm,
+ struct i915_page_table *pt)
+{
+ gen8_pte_t *pt_vaddr, scratch_pte;
+ int i;
+
+ pt_vaddr = kmap_atomic(pt->page);
+ scratch_pte = gen8_pte_encode(vm->scratch.addr,
+ I915_CACHE_LLC, true);
+
+ for (i = 0; i < GEN8_PTES; i++)
+ pt_vaddr[i] = scratch_pte;
+
+ if (!HAS_LLC(vm->dev))
+ drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
+ kunmap_atomic(pt_vaddr);
+}
+
+static struct i915_page_table *alloc_pt_single(struct drm_device *dev)
{
- struct i915_page_table_entry *pt;
+ struct i915_page_table *pt;
const size_t count = INTEL_INFO(dev)->gen >= 8 ?
GEN8_PTES : GEN6_PTES;
int ret = -ENOMEM;
@@ -369,78 +406,55 @@ fail_bitmap:
return ERR_PTR(ret);
}
-/**
- * alloc_pt_range() - Allocate a multiple page tables
- * @pd: The page directory which will have at least @count entries
- * available to point to the allocated page tables.
- * @pde: First page directory entry for which we are allocating.
- * @count: Number of pages to allocate.
- * @dev: DRM device.
- *
- * Allocates multiple page table pages and sets the appropriate entries in the
- * page table structure within the page directory. Function cleans up after
- * itself on any failures.
- *
- * Return: 0 if allocation succeeded.
- */
-static int alloc_pt_range(struct i915_page_directory_entry *pd, uint16_t pde, size_t count,
- struct drm_device *dev)
-{
- int i, ret;
-
- /* 512 is the max page tables per page_directory on any platform. */
- if (WARN_ON(pde + count > I915_PDES))
- return -EINVAL;
-
- for (i = pde; i < pde + count; i++) {
- struct i915_page_table_entry *pt = alloc_pt_single(dev);
-
- if (IS_ERR(pt)) {
- ret = PTR_ERR(pt);
- goto err_out;
- }
- WARN(pd->page_table[i],
- "Leaking page directory entry %d (%p)\n",
- i, pd->page_table[i]);
- pd->page_table[i] = pt;
- }
-
- return 0;
-
-err_out:
- while (i-- > pde)
- unmap_and_free_pt(pd->page_table[i], dev);
- return ret;
-}
-
-static void unmap_and_free_pd(struct i915_page_directory_entry *pd)
+static void unmap_and_free_pd(struct i915_page_directory *pd,
+ struct drm_device *dev)
{
if (pd->page) {
+ i915_dma_unmap_single(pd, dev);
__free_page(pd->page);
+ kfree(pd->used_pdes);
kfree(pd);
}
}
-static struct i915_page_directory_entry *alloc_pd_single(void)
+static struct i915_page_directory *alloc_pd_single(struct drm_device *dev)
{
- struct i915_page_directory_entry *pd;
+ struct i915_page_directory *pd;
+ int ret = -ENOMEM;
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd)
return ERR_PTR(-ENOMEM);
- pd->page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!pd->page) {
- kfree(pd);
- return ERR_PTR(-ENOMEM);
- }
+ pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES),
+ sizeof(*pd->used_pdes), GFP_KERNEL);
+ if (!pd->used_pdes)
+ goto free_pd;
+
+ pd->page = alloc_page(GFP_KERNEL);
+ if (!pd->page)
+ goto free_bitmap;
+
+ ret = i915_dma_map_single(pd, dev);
+ if (ret)
+ goto free_page;
return pd;
+
+free_page:
+ __free_page(pd->page);
+free_bitmap:
+ kfree(pd->used_pdes);
+free_pd:
+ kfree(pd);
+
+ return ERR_PTR(ret);
}
/* Broadwell Page Directory Pointer Descriptors */
-static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
- uint64_t val)
+static int gen8_write_pdp(struct intel_engine_cs *ring,
+ unsigned entry,
+ dma_addr_t addr)
{
int ret;
@@ -452,10 +466,10 @@ static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry));
- intel_ring_emit(ring, (u32)(val >> 32));
+ intel_ring_emit(ring, upper_32_bits(addr));
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry));
- intel_ring_emit(ring, (u32)(val));
+ intel_ring_emit(ring, lower_32_bits(addr));
intel_ring_advance(ring);
return 0;
@@ -466,12 +480,12 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
{
int i, ret;
- /* bit of a hack to find the actual last used pd */
- int used_pd = ppgtt->num_pd_entries / I915_PDES;
-
- for (i = used_pd - 1; i >= 0; i--) {
- dma_addr_t addr = ppgtt->pdp.page_directory[i]->daddr;
- ret = gen8_write_pdp(ring, i, addr);
+ for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
+ struct i915_page_directory *pd = ppgtt->pdp.page_directory[i];
+ dma_addr_t pd_daddr = pd ? pd->daddr : ppgtt->scratch_pd->daddr;
+ /* The page directory might be NULL, but we need to clear out
+ * whatever the previous context might have used. */
+ ret = gen8_write_pdp(ring, i, pd_daddr);
if (ret)
return ret;
}
@@ -497,8 +511,8 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
I915_CACHE_LLC, use_scratch);
while (num_entries) {
- struct i915_page_directory_entry *pd;
- struct i915_page_table_entry *pt;
+ struct i915_page_directory *pd;
+ struct i915_page_table *pt;
struct page *page_table;
if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
@@ -559,8 +573,8 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
break;
if (pt_vaddr == NULL) {
- struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[pdpe];
- struct i915_page_table_entry *pt = pd->page_table[pde];
+ struct i915_page_directory *pd = ppgtt->pdp.page_directory[pdpe];
+ struct i915_page_table *pt = pd->page_table[pde];
struct page *page_table = pt->page;
pt_vaddr = kmap_atomic(page_table);
@@ -588,14 +602,43 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
}
}
-static void gen8_free_page_tables(struct i915_page_directory_entry *pd, struct drm_device *dev)
+static void __gen8_do_map_pt(gen8_pde_t * const pde,
+ struct i915_page_table *pt,
+ struct drm_device *dev)
+{
+ gen8_pde_t entry =
+ gen8_pde_encode(dev, pt->daddr, I915_CACHE_LLC);
+ *pde = entry;
+}
+
+static void gen8_initialize_pd(struct i915_address_space *vm,
+ struct i915_page_directory *pd)
+{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
+ gen8_pde_t *page_directory;
+ struct i915_page_table *pt;
+ int i;
+
+ page_directory = kmap_atomic(pd->page);
+ pt = ppgtt->scratch_pt;
+ for (i = 0; i < I915_PDES; i++)
+ /* Map the PDE to the page table */
+ __gen8_do_map_pt(page_directory + i, pt, vm->dev);
+
+ if (!HAS_LLC(vm->dev))
+ drm_clflush_virt_range(page_directory, PAGE_SIZE);
+ kunmap_atomic(page_directory);
+}
+
+static void gen8_free_page_tables(struct i915_page_directory *pd, struct drm_device *dev)
{
int i;
if (!pd->page)
return;
- for (i = 0; i < I915_PDES; i++) {
+ for_each_set_bit(i, pd->used_pdes, I915_PDES) {
if (WARN_ON(!pd->page_table[i]))
continue;
@@ -604,163 +647,287 @@ static void gen8_free_page_tables(struct i915_page_directory_entry *pd, struct d
}
}
-static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
+static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
int i;
- for (i = 0; i < ppgtt->num_pd_pages; i++) {
+ for_each_set_bit(i, ppgtt->pdp.used_pdpes, GEN8_LEGACY_PDPES) {
if (WARN_ON(!ppgtt->pdp.page_directory[i]))
continue;
gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
- unmap_and_free_pd(ppgtt->pdp.page_directory[i]);
+ unmap_and_free_pd(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
}
+
+ unmap_and_free_pd(ppgtt->scratch_pd, ppgtt->base.dev);
+ unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
}
-static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
+/**
+ * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range.
+ * @ppgtt: Master ppgtt structure.
+ * @pd: Page directory for this address range.
+ * @start: Starting virtual address to begin allocations.
+ * @length Size of the allocations.
+ * @new_pts: Bitmap set by function with new allocations. Likely used by the
+ * caller to free on error.
+ *
+ * Allocate the required number of page tables. Extremely similar to
+ * gen8_ppgtt_alloc_page_directories(). The main difference is here we are limited by
+ * the page directory boundary (instead of the page directory pointer). That
+ * boundary is 1GB virtual. Therefore, unlike gen8_ppgtt_alloc_page_directories(), it is
+ * possible, and likely that the caller will need to use multiple calls of this
+ * function to achieve the appropriate allocation.
+ *
+ * Return: 0 if success; negative error code otherwise.
+ */
+static int gen8_ppgtt_alloc_pagetabs(struct i915_hw_ppgtt *ppgtt,
+ struct i915_page_directory *pd,
+ uint64_t start,
+ uint64_t length,
+ unsigned long *new_pts)
{
- struct pci_dev *hwdev = ppgtt->base.dev->pdev;
- int i, j;
-
- for (i = 0; i < ppgtt->num_pd_pages; i++) {
- /* TODO: In the future we'll support sparse mappings, so this
- * will have to change. */
- if (!ppgtt->pdp.page_directory[i]->daddr)
+ struct drm_device *dev = ppgtt->base.dev;
+ struct i915_page_table *pt;
+ uint64_t temp;
+ uint32_t pde;
+
+ gen8_for_each_pde(pt, pd, start, length, temp, pde) {
+ /* Don't reallocate page tables */
+ if (pt) {
+ /* Scratch is never allocated this way */
+ WARN_ON(pt == ppgtt->scratch_pt);
continue;
+ }
- pci_unmap_page(hwdev, ppgtt->pdp.page_directory[i]->daddr, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ pt = alloc_pt_single(dev);
+ if (IS_ERR(pt))
+ goto unwind_out;
- for (j = 0; j < I915_PDES; j++) {
- struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i];
- struct i915_page_table_entry *pt;
- dma_addr_t addr;
+ gen8_initialize_pt(&ppgtt->base, pt);
+ pd->page_table[pde] = pt;
+ set_bit(pde, new_pts);
+ }
- if (WARN_ON(!pd->page_table[j]))
- continue;
+ return 0;
- pt = pd->page_table[j];
- addr = pt->daddr;
+unwind_out:
+ for_each_set_bit(pde, new_pts, I915_PDES)
+ unmap_and_free_pt(pd->page_table[pde], dev);
- if (addr)
- pci_unmap_page(hwdev, addr, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- }
- }
+ return -ENOMEM;
}
-static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
+/**
+ * gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range.
+ * @ppgtt: Master ppgtt structure.
+ * @pdp: Page directory pointer for this address range.
+ * @start: Starting virtual address to begin allocations.
+ * @length Size of the allocations.
+ * @new_pds Bitmap set by function with new allocations. Likely used by the
+ * caller to free on error.
+ *
+ * Allocate the required number of page directories starting at the pde index of
+ * @start, and ending at the pde index @start + @length. This function will skip
+ * over already allocated page directories within the range, and only allocate
+ * new ones, setting the appropriate pointer within the pdp as well as the
+ * correct position in the bitmap @new_pds.
+ *
+ * The function will only allocate the pages within the range for a give page
+ * directory pointer. In other words, if @start + @length straddles a virtually
+ * addressed PDP boundary (512GB for 4k pages), there will be more allocations
+ * required by the caller, This is not currently possible, and the BUG in the
+ * code will prevent it.
+ *
+ * Return: 0 if success; negative error code otherwise.
+ */
+static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt,
+ struct i915_page_directory_pointer *pdp,
+ uint64_t start,
+ uint64_t length,
+ unsigned long *new_pds)
{
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
+ struct drm_device *dev = ppgtt->base.dev;
+ struct i915_page_directory *pd;
+ uint64_t temp;
+ uint32_t pdpe;
- gen8_ppgtt_unmap_pages(ppgtt);
- gen8_ppgtt_free(ppgtt);
-}
+ WARN_ON(!bitmap_empty(new_pds, GEN8_LEGACY_PDPES));
-static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
-{
- int i, ret;
+ /* FIXME: upper bound must not overflow 32 bits */
+ WARN_ON((start + length) > (1ULL << 32));
- for (i = 0; i < ppgtt->num_pd_pages; i++) {
- ret = alloc_pt_range(ppgtt->pdp.page_directory[i],
- 0, I915_PDES, ppgtt->base.dev);
- if (ret)
+ gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
+ if (pd)
+ continue;
+
+ pd = alloc_pd_single(dev);
+ if (IS_ERR(pd))
goto unwind_out;
+
+ gen8_initialize_pd(&ppgtt->base, pd);
+ pdp->page_directory[pdpe] = pd;
+ set_bit(pdpe, new_pds);
}
return 0;
unwind_out:
- while (i--)
- gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
+ for_each_set_bit(pdpe, new_pds, GEN8_LEGACY_PDPES)
+ unmap_and_free_pd(pdp->page_directory[pdpe], dev);
return -ENOMEM;
}
-static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
- const int max_pdp)
+static void
+free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long **new_pts)
{
int i;
- for (i = 0; i < max_pdp; i++) {
- ppgtt->pdp.page_directory[i] = alloc_pd_single();
- if (IS_ERR(ppgtt->pdp.page_directory[i]))
- goto unwind_out;
+ for (i = 0; i < GEN8_LEGACY_PDPES; i++)
+ kfree(new_pts[i]);
+ kfree(new_pts);
+ kfree(new_pds);
+}
+
+/* Fills in the page directory bitmap, and the array of page tables bitmap. Both
+ * of these are based on the number of PDPEs in the system.
+ */
+static
+int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds,
+ unsigned long ***new_pts)
+{
+ int i;
+ unsigned long *pds;
+ unsigned long **pts;
+
+ pds = kcalloc(BITS_TO_LONGS(GEN8_LEGACY_PDPES), sizeof(unsigned long), GFP_KERNEL);
+ if (!pds)
+ return -ENOMEM;
+
+ pts = kcalloc(GEN8_LEGACY_PDPES, sizeof(unsigned long *), GFP_KERNEL);
+ if (!pts) {
+ kfree(pds);
+ return -ENOMEM;
}
- ppgtt->num_pd_pages = max_pdp;
- BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPES);
+ for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
+ pts[i] = kcalloc(BITS_TO_LONGS(I915_PDES),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!pts[i])
+ goto err_out;
+ }
- return 0;
+ *new_pds = pds;
+ *new_pts = pts;
-unwind_out:
- while (i--)
- unmap_and_free_pd(ppgtt->pdp.page_directory[i]);
+ return 0;
+err_out:
+ free_gen8_temp_bitmaps(pds, pts);
return -ENOMEM;
}
-static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
- const int max_pdp)
+static int gen8_alloc_va_range(struct i915_address_space *vm,
+ uint64_t start,
+ uint64_t length)
{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
+ unsigned long *new_page_dirs, **new_page_tables;
+ struct i915_page_directory *pd;
+ const uint64_t orig_start = start;
+ const uint64_t orig_length = length;
+ uint64_t temp;
+ uint32_t pdpe;
int ret;
- ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp);
+ /* Wrap is never okay since we can only represent 48b, and we don't
+ * actually use the other side of the canonical address space.
+ */
+ if (WARN_ON(start + length < start))
+ return -ERANGE;
+
+ ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables);
if (ret)
return ret;
- ret = gen8_ppgtt_allocate_page_tables(ppgtt);
- if (ret)
- goto err_out;
+ /* Do the allocations first so we can easily bail out */
+ ret = gen8_ppgtt_alloc_page_directories(ppgtt, &ppgtt->pdp, start, length,
+ new_page_dirs);
+ if (ret) {
+ free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
+ return ret;
+ }
- ppgtt->num_pd_entries = max_pdp * I915_PDES;
+ /* For every page directory referenced, allocate page tables */
+ gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
+ ret = gen8_ppgtt_alloc_pagetabs(ppgtt, pd, start, length,
+ new_page_tables[pdpe]);
+ if (ret)
+ goto err_out;
+ }
- return 0;
+ start = orig_start;
+ length = orig_length;
-err_out:
- gen8_ppgtt_free(ppgtt);
- return ret;
-}
+ /* Allocations have completed successfully, so set the bitmaps, and do
+ * the mappings. */
+ gen8_for_each_pdpe(pd, &ppgtt->pdp, start, length, temp, pdpe) {
+ gen8_pde_t *const page_directory = kmap_atomic(pd->page);
+ struct i915_page_table *pt;
+ uint64_t pd_len = gen8_clamp_pd(start, length);
+ uint64_t pd_start = start;
+ uint32_t pde;
-static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
- const int pd)
-{
- dma_addr_t pd_addr;
- int ret;
+ /* Every pd should be allocated, we just did that above. */
+ WARN_ON(!pd);
- pd_addr = pci_map_page(ppgtt->base.dev->pdev,
- ppgtt->pdp.page_directory[pd]->page, 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ gen8_for_each_pde(pt, pd, pd_start, pd_len, temp, pde) {
+ /* Same reasoning as pd */
+ WARN_ON(!pt);
+ WARN_ON(!pd_len);
+ WARN_ON(!gen8_pte_count(pd_start, pd_len));
- ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
- if (ret)
- return ret;
+ /* Set our used ptes within the page table */
+ bitmap_set(pt->used_ptes,
+ gen8_pte_index(pd_start),
+ gen8_pte_count(pd_start, pd_len));
- ppgtt->pdp.page_directory[pd]->daddr = pd_addr;
+ /* Our pde is now pointing to the pagetable, pt */
+ set_bit(pde, pd->used_pdes);
- return 0;
-}
+ /* Map the PDE to the page table */
+ __gen8_do_map_pt(page_directory + pde, pt, vm->dev);
-static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
- const int pd,
- const int pt)
-{
- dma_addr_t pt_addr;
- struct i915_page_directory_entry *pdir = ppgtt->pdp.page_directory[pd];
- struct i915_page_table_entry *ptab = pdir->page_table[pt];
- struct page *p = ptab->page;
- int ret;
+ /* NB: We haven't yet mapped ptes to pages. At this
+ * point we're still relying on insert_entries() */
+ }
- pt_addr = pci_map_page(ppgtt->base.dev->pdev,
- p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr);
- if (ret)
- return ret;
+ if (!HAS_LLC(vm->dev))
+ drm_clflush_virt_range(page_directory, PAGE_SIZE);
- ptab->daddr = pt_addr;
+ kunmap_atomic(page_directory);
+
+ set_bit(pdpe, ppgtt->pdp.used_pdpes);
+ }
+ free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
return 0;
+
+err_out:
+ while (pdpe--) {
+ for_each_set_bit(temp, new_page_tables[pdpe], I915_PDES)
+ unmap_and_free_pt(ppgtt->pdp.page_directory[pdpe]->page_table[temp], vm->dev);
+ }
+
+ for_each_set_bit(pdpe, new_page_dirs, GEN8_LEGACY_PDPES)
+ unmap_and_free_pd(ppgtt->pdp.page_directory[pdpe], vm->dev);
+
+ free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
+ return ret;
}
/*
@@ -769,115 +936,57 @@ static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
* PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
* space.
*
- * FIXME: split allocation into smaller pieces. For now we only ever do this
- * once, but with full PPGTT, the multiple contiguous allocations will be bad.
- * TODO: Do something with the size parameter
*/
-static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
+static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
- const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
- const int min_pt_pages = I915_PDES * max_pdp;
- int i, j, ret;
-
- if (size % (1<<30))
- DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
-
- /* 1. Do all our allocations for page directories and page tables.
- * We allocate more than was asked so that we can point the unused parts
- * to valid entries that point to scratch page. Dynamic page tables
- * will fix this eventually.
- */
- ret = gen8_ppgtt_alloc(ppgtt, GEN8_LEGACY_PDPES);
- if (ret)
- return ret;
+ ppgtt->scratch_pt = alloc_pt_single(ppgtt->base.dev);
+ if (IS_ERR(ppgtt->scratch_pt))
+ return PTR_ERR(ppgtt->scratch_pt);
- /*
- * 2. Create DMA mappings for the page directories and page tables.
- */
- for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
- ret = gen8_ppgtt_setup_page_directories(ppgtt, i);
- if (ret)
- goto bail;
+ ppgtt->scratch_pd = alloc_pd_single(ppgtt->base.dev);
+ if (IS_ERR(ppgtt->scratch_pd))
+ return PTR_ERR(ppgtt->scratch_pd);
- for (j = 0; j < I915_PDES; j++) {
- ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j);
- if (ret)
- goto bail;
- }
- }
+ gen8_initialize_pt(&ppgtt->base, ppgtt->scratch_pt);
+ gen8_initialize_pd(&ppgtt->base, ppgtt->scratch_pd);
- /*
- * 3. Map all the page directory entires to point to the page tables
- * we've allocated.
- *
- * For now, the PPGTT helper functions all require that the PDEs are
- * plugged in correctly. So we do that now/here. For aliasing PPGTT, we
- * will never need to touch the PDEs again.
- */
- for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
- struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i];
- gen8_pde_t *pd_vaddr;
- pd_vaddr = kmap_atomic(ppgtt->pdp.page_directory[i]->page);
- for (j = 0; j < I915_PDES; j++) {
- struct i915_page_table_entry *pt = pd->page_table[j];
- dma_addr_t addr = pt->daddr;
- pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
- I915_CACHE_LLC);
- }
- if (!HAS_LLC(ppgtt->base.dev))
- drm_clflush_virt_range(pd_vaddr, PAGE_SIZE);
- kunmap_atomic(pd_vaddr);
- }
-
- ppgtt->switch_mm = gen8_mm_switch;
- ppgtt->base.clear_range = gen8_ppgtt_clear_range;
- ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
- ppgtt->base.cleanup = gen8_ppgtt_cleanup;
ppgtt->base.start = 0;
+ ppgtt->base.total = 1ULL << 32;
+ if (IS_ENABLED(CONFIG_X86_32))
+ /* While we have a proliferation of size_t variables
+ * we cannot represent the full ppgtt size on 32bit,
+ * so limit it to the same size as the GGTT (currently
+ * 2GiB).
+ */
+ ppgtt->base.total = to_i915(ppgtt->base.dev)->gtt.base.total;
+ ppgtt->base.cleanup = gen8_ppgtt_cleanup;
+ ppgtt->base.allocate_va_range = gen8_alloc_va_range;
+ ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
+ ppgtt->base.clear_range = gen8_ppgtt_clear_range;
+ ppgtt->base.unbind_vma = ppgtt_unbind_vma;
+ ppgtt->base.bind_vma = ppgtt_bind_vma;
- /* This is the area that we advertise as usable for the caller */
- ppgtt->base.total = max_pdp * I915_PDES * GEN8_PTES * PAGE_SIZE;
-
- /* Set all ptes to a valid scratch page. Also above requested space */
- ppgtt->base.clear_range(&ppgtt->base, 0,
- ppgtt->num_pd_pages * GEN8_PTES * PAGE_SIZE,
- true);
+ ppgtt->switch_mm = gen8_mm_switch;
- DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
- ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
- DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
- ppgtt->num_pd_entries,
- (ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30));
return 0;
-
-bail:
- gen8_ppgtt_unmap_pages(ppgtt);
- gen8_ppgtt_free(ppgtt);
- return ret;
}
static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
- struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
struct i915_address_space *vm = &ppgtt->base;
- gen6_pte_t __iomem *pd_addr;
+ struct i915_page_table *unused;
gen6_pte_t scratch_pte;
uint32_t pd_entry;
- int pte, pde;
+ uint32_t pte, pde, temp;
+ uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
- pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
- ppgtt->pd.pd_offset / sizeof(gen6_pte_t);
-
- seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm,
- ppgtt->pd.pd_offset,
- ppgtt->pd.pd_offset + ppgtt->num_pd_entries);
- for (pde = 0; pde < ppgtt->num_pd_entries; pde++) {
+ gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) {
u32 expected;
gen6_pte_t *pt_vaddr;
dma_addr_t pt_addr = ppgtt->pd.page_table[pde]->daddr;
- pd_entry = readl(pd_addr + pde);
+ pd_entry = readl(ppgtt->pd_addr + pde);
expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
if (pd_entry != expected)
@@ -914,8 +1023,8 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
}
/* Write pde (index) from the page directory @pd to the page table @pt */
-static void gen6_write_pde(struct i915_page_directory_entry *pd,
- const int pde, struct i915_page_table_entry *pt)
+static void gen6_write_pde(struct i915_page_directory *pd,
+ const int pde, struct i915_page_table *pt)
{
/* Caller needs to make sure the write completes if necessary */
struct i915_hw_ppgtt *ppgtt =
@@ -931,10 +1040,10 @@ static void gen6_write_pde(struct i915_page_directory_entry *pd,
/* Write all the page tables found in the ppgtt structure to incrementing page
* directories. */
static void gen6_write_page_range(struct drm_i915_private *dev_priv,
- struct i915_page_directory_entry *pd,
+ struct i915_page_directory *pd,
uint32_t start, uint32_t length)
{
- struct i915_page_table_entry *pt;
+ struct i915_page_table *pt;
uint32_t pde, temp;
gen6_for_each_pde(pt, pd, start, length, temp, pde)
@@ -1162,14 +1271,14 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
* are switching between contexts with the same LRCA, we also must do a force
* restore.
*/
-static inline void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
+static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
{
/* If current vm != vm, */
ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
}
static void gen6_initialize_pt(struct i915_address_space *vm,
- struct i915_page_table_entry *pt)
+ struct i915_page_table *pt)
{
gen6_pte_t *pt_vaddr, scratch_pte;
int i;
@@ -1195,7 +1304,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
- struct i915_page_table_entry *pt;
+ struct i915_page_table *pt;
const uint32_t start_save = start, length_save = length;
uint32_t pde, temp;
int ret;
@@ -1263,7 +1372,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
unwind_out:
for_each_set_bit(pde, new_page_tables, I915_PDES) {
- struct i915_page_table_entry *pt = ppgtt->pd.page_table[pde];
+ struct i915_page_table *pt = ppgtt->pd.page_table[pde];
ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
unmap_and_free_pt(pt, vm->dev);
@@ -1273,29 +1382,23 @@ unwind_out:
return ret;
}
-static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
-{
- int i;
-
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- struct i915_page_table_entry *pt = ppgtt->pd.page_table[i];
-
- if (pt != ppgtt->scratch_pt)
- unmap_and_free_pt(ppgtt->pd.page_table[i], ppgtt->base.dev);
- }
-
- unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
- unmap_and_free_pd(&ppgtt->pd);
-}
-
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_page_table *pt;
+ uint32_t pde;
+
drm_mm_remove_node(&ppgtt->node);
- gen6_ppgtt_free(ppgtt);
+ gen6_for_all_pdes(pt, ppgtt, pde) {
+ if (pt != ppgtt->scratch_pt)
+ unmap_and_free_pt(pt, ppgtt->base.dev);
+ }
+
+ unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
+ unmap_and_free_pd(&ppgtt->pd, ppgtt->base.dev);
}
static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
@@ -1342,7 +1445,6 @@ alloc:
if (ppgtt->node.start < dev_priv->gtt.mappable_end)
DRM_DEBUG("Forced to use aperture for PDEs\n");
- ppgtt->num_pd_entries = I915_PDES;
return 0;
err_out:
@@ -1358,14 +1460,14 @@ static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
uint64_t start, uint64_t length)
{
- struct i915_page_table_entry *unused;
+ struct i915_page_table *unused;
uint32_t pde, temp;
gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
}
-static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt, bool aliasing)
+static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1388,23 +1490,14 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt, bool aliasing)
if (ret)
return ret;
- if (aliasing) {
- /* preallocate all pts */
- ret = alloc_pt_range(&ppgtt->pd, 0, ppgtt->num_pd_entries,
- ppgtt->base.dev);
-
- if (ret) {
- gen6_ppgtt_cleanup(&ppgtt->base);
- return ret;
- }
- }
-
ppgtt->base.allocate_va_range = gen6_alloc_va_range;
ppgtt->base.clear_range = gen6_ppgtt_clear_range;
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
+ ppgtt->base.unbind_vma = ppgtt_unbind_vma;
+ ppgtt->base.bind_vma = ppgtt_bind_vma;
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
ppgtt->base.start = 0;
- ppgtt->base.total = ppgtt->num_pd_entries * GEN6_PTES * PAGE_SIZE;
+ ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
ppgtt->debug_dump = gen6_dump_ppgtt;
ppgtt->pd.pd_offset =
@@ -1413,10 +1506,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt, bool aliasing)
ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
ppgtt->pd.pd_offset / sizeof(gen6_pte_t);
- if (aliasing)
- ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
- else
- gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
+ gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total);
@@ -1430,8 +1520,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt, bool aliasing)
return 0;
}
-static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt,
- bool aliasing)
+static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1439,16 +1528,16 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt,
ppgtt->base.scratch = dev_priv->gtt.base.scratch;
if (INTEL_INFO(dev)->gen < 8)
- return gen6_ppgtt_init(ppgtt, aliasing);
+ return gen6_ppgtt_init(ppgtt);
else
- return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
+ return gen8_ppgtt_init(ppgtt);
}
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
- ret = __hw_ppgtt_init(dev, ppgtt, false);
+ ret = __hw_ppgtt_init(dev, ppgtt);
if (ret == 0) {
kref_init(&ppgtt->ref);
drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
@@ -1535,32 +1624,11 @@ void i915_ppgtt_release(struct kref *kref)
kfree(ppgtt);
}
-static void
-ppgtt_bind_vma(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
-{
- /* Currently applicable only to VLV */
- if (vma->obj->gt_ro)
- flags |= PTE_READ_ONLY;
-
- vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
- cache_level, flags);
-}
-
-static void ppgtt_unbind_vma(struct i915_vma *vma)
-{
- vma->vm->clear_range(vma->vm,
- vma->node.start,
- vma->obj->base.size,
- true);
-}
-
extern int intel_iommu_gfx_mapped;
/* Certain Gen5 chipsets require require idling the GPU before
* unmapping anything from the GTT when VT-d is enabled.
*/
-static inline bool needs_idle_maps(struct drm_device *dev)
+static bool needs_idle_maps(struct drm_device *dev)
{
#ifdef CONFIG_INTEL_IOMMU
/* Query intel_iommu to see if we need the workaround. Presumably that
@@ -1653,67 +1721,6 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
i915_ggtt_flush(dev_priv);
}
-void i915_gem_restore_gtt_mappings(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj;
- struct i915_address_space *vm;
-
- i915_check_and_clear_faults(dev);
-
- /* First fill our portion of the GTT with scratch pages */
- dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
- dev_priv->gtt.base.start,
- dev_priv->gtt.base.total,
- true);
-
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- struct i915_vma *vma = i915_gem_obj_to_vma(obj,
- &dev_priv->gtt.base);
- if (!vma)
- continue;
-
- i915_gem_clflush_object(obj, obj->pin_display);
- /* The bind_vma code tries to be smart about tracking mappings.
- * Unfortunately above, we've just wiped out the mappings
- * without telling our object about it. So we need to fake it.
- *
- * Bind is not expected to fail since this is only called on
- * resume and assumption is all requirements exist already.
- */
- vma->bound &= ~GLOBAL_BIND;
- WARN_ON(i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND));
- }
-
-
- if (INTEL_INFO(dev)->gen >= 8) {
- if (IS_CHERRYVIEW(dev))
- chv_setup_private_ppat(dev_priv);
- else
- bdw_setup_private_ppat(dev_priv);
-
- return;
- }
-
- if (USES_PPGTT(dev)) {
- list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
- /* TODO: Perhaps it shouldn't be gen6 specific */
-
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt,
- base);
-
- if (i915_is_ggtt(vm))
- ppgtt = dev_priv->mm.aliasing_ppgtt;
-
- gen6_write_page_range(dev_priv, &ppgtt->pd,
- 0, ppgtt->base.total);
- }
- }
-
- i915_ggtt_flush(dev_priv);
-}
-
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
{
if (obj->has_dma_mapping)
@@ -1727,7 +1734,7 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
return 0;
}
-static inline void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
+static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
{
#ifdef writeq
writeq(pte, addr);
@@ -1872,18 +1879,16 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
readl(gtt_base);
}
-
-static void i915_ggtt_bind_vma(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 unused)
+static void i915_ggtt_insert_entries(struct i915_address_space *vm,
+ struct sg_table *pages,
+ uint64_t start,
+ enum i915_cache_level cache_level, u32 unused)
{
- const unsigned long entry = vma->node.start >> PAGE_SHIFT;
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
- BUG_ON(!i915_is_ggtt(vma->vm));
- intel_gtt_insert_sg_entries(vma->ggtt_view.pages, entry, flags);
- vma->bound = GLOBAL_BIND;
+ intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
+
}
static void i915_ggtt_clear_range(struct i915_address_space *vm,
@@ -1896,62 +1901,41 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm,
intel_gtt_clear_range(first_entry, num_entries);
}
-static void i915_ggtt_unbind_vma(struct i915_vma *vma)
-{
- const unsigned int first = vma->node.start >> PAGE_SHIFT;
- const unsigned int size = vma->obj->base.size >> PAGE_SHIFT;
-
- BUG_ON(!i915_is_ggtt(vma->vm));
- vma->bound = 0;
- intel_gtt_clear_range(first, size);
-}
-
-static void ggtt_bind_vma(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
+static int ggtt_bind_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
{
struct drm_device *dev = vma->vm->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = vma->obj;
struct sg_table *pages = obj->pages;
+ u32 pte_flags = 0;
+ int ret;
+
+ ret = i915_get_ggtt_vma_pages(vma);
+ if (ret)
+ return ret;
+ pages = vma->ggtt_view.pages;
/* Currently applicable only to VLV */
if (obj->gt_ro)
- flags |= PTE_READ_ONLY;
+ pte_flags |= PTE_READ_ONLY;
- if (i915_is_ggtt(vma->vm))
- pages = vma->ggtt_view.pages;
- /* If there is no aliasing PPGTT, or the caller needs a global mapping,
- * or we have a global mapping already but the cacheability flags have
- * changed, set the global PTEs.
- *
- * If there is an aliasing PPGTT it is anecdotally faster, so use that
- * instead if none of the above hold true.
- *
- * NB: A global mapping should only be needed for special regions like
- * "gtt mappable", SNB errata, or if specified via special execbuf
- * flags. At all other times, the GPU will use the aliasing PPGTT.
- */
if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
- if (!(vma->bound & GLOBAL_BIND) ||
- (cache_level != obj->cache_level)) {
- vma->vm->insert_entries(vma->vm, pages,
- vma->node.start,
- cache_level, flags);
- vma->bound |= GLOBAL_BIND;
- }
+ vma->vm->insert_entries(vma->vm, pages,
+ vma->node.start,
+ cache_level, pte_flags);
}
- if (dev_priv->mm.aliasing_ppgtt &&
- (!(vma->bound & LOCAL_BIND) ||
- (cache_level != obj->cache_level))) {
+ if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
appgtt->base.insert_entries(&appgtt->base, pages,
vma->node.start,
- cache_level, flags);
- vma->bound |= LOCAL_BIND;
+ cache_level, pte_flags);
}
+
+ return 0;
}
static void ggtt_unbind_vma(struct i915_vma *vma)
@@ -1959,22 +1943,24 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
struct drm_device *dev = vma->vm->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = vma->obj;
+ const uint64_t size = min_t(uint64_t,
+ obj->base.size,
+ vma->node.size);
if (vma->bound & GLOBAL_BIND) {
vma->vm->clear_range(vma->vm,
vma->node.start,
- obj->base.size,
+ size,
true);
- vma->bound &= ~GLOBAL_BIND;
}
- if (vma->bound & LOCAL_BIND) {
+ if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
+
appgtt->base.clear_range(&appgtt->base,
vma->node.start,
- obj->base.size,
+ size,
true);
- vma->bound &= ~LOCAL_BIND;
}
}
@@ -2083,12 +2069,27 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
if (!ppgtt)
return -ENOMEM;
- ret = __hw_ppgtt_init(dev, ppgtt, true);
+ ret = __hw_ppgtt_init(dev, ppgtt);
+ if (ret) {
+ ppgtt->base.cleanup(&ppgtt->base);
+ kfree(ppgtt);
+ return ret;
+ }
+
+ if (ppgtt->base.allocate_va_range)
+ ret = ppgtt->base.allocate_va_range(&ppgtt->base, 0,
+ ppgtt->base.total);
if (ret) {
+ ppgtt->base.cleanup(&ppgtt->base);
kfree(ppgtt);
return ret;
}
+ ppgtt->base.clear_range(&ppgtt->base,
+ ppgtt->base.start,
+ ppgtt->base.total,
+ true);
+
dev_priv->mm.aliasing_ppgtt = ppgtt;
}
@@ -2164,14 +2165,14 @@ static void teardown_scratch_page(struct drm_device *dev)
__free_page(page);
}
-static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
+static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
return snb_gmch_ctl << 20;
}
-static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
+static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
{
bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
@@ -2187,7 +2188,7 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
return bdw_gmch_ctl << 20;
}
-static inline unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
+static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
{
gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
gmch_ctrl &= SNB_GMCH_GGMS_MASK;
@@ -2198,14 +2199,14 @@ static inline unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
return 0;
}
-static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
+static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
return snb_gmch_ctl << 25; /* 32 MB units */
}
-static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
+static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
{
bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
@@ -2253,7 +2254,17 @@ static int ggtt_probe_common(struct drm_device *dev,
gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
(pci_resource_len(dev->pdev, 0) / 2);
- dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
+ /*
+ * On BXT writes larger than 64 bit to the GTT pagetable range will be
+ * dropped. For WC mappings in general we have 64 byte burst writes
+ * when the WC buffer is flushed, so we can't use it, but have to
+ * resort to an uncached mapping. The WC issue is easily caught by the
+ * readback check when writing GTT PTE entries.
+ */
+ if (IS_BROXTON(dev))
+ dev_priv->gtt.gsm = ioremap_nocache(gtt_phys_addr, gtt_size);
+ else
+ dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
if (!dev_priv->gtt.gsm) {
DRM_ERROR("Failed to map the gtt page table\n");
return -ENOMEM;
@@ -2375,7 +2386,7 @@ static int gen8_gmch_probe(struct drm_device *dev,
*gtt_total = (gtt_size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
- if (IS_CHERRYVIEW(dev))
+ if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
chv_setup_private_ppat(dev_priv);
else
bdw_setup_private_ppat(dev_priv);
@@ -2384,6 +2395,8 @@ static int gen8_gmch_probe(struct drm_device *dev,
dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
+ dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
+ dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
return ret;
}
@@ -2424,6 +2437,8 @@ static int gen6_gmch_probe(struct drm_device *dev,
dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
+ dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
+ dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
return ret;
}
@@ -2455,7 +2470,10 @@ static int i915_gmch_probe(struct drm_device *dev,
intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
+ dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
+ dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
+ dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
if (unlikely(dev_priv->gtt.do_idle_maps))
DRM_INFO("applying Ironlake quirks for intel_iommu\n");
@@ -2523,6 +2541,59 @@ int i915_gem_gtt_init(struct drm_device *dev)
return 0;
}
+void i915_gem_restore_gtt_mappings(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
+
+ i915_check_and_clear_faults(dev);
+
+ /* First fill our portion of the GTT with scratch pages */
+ dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
+ dev_priv->gtt.base.start,
+ dev_priv->gtt.base.total,
+ true);
+
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ struct i915_vma *vma = i915_gem_obj_to_vma(obj,
+ &dev_priv->gtt.base);
+ if (!vma)
+ continue;
+
+ i915_gem_clflush_object(obj, obj->pin_display);
+ WARN_ON(i915_vma_bind(vma, obj->cache_level, PIN_UPDATE));
+ }
+
+
+ if (INTEL_INFO(dev)->gen >= 8) {
+ if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
+ chv_setup_private_ppat(dev_priv);
+ else
+ bdw_setup_private_ppat(dev_priv);
+
+ return;
+ }
+
+ if (USES_PPGTT(dev)) {
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ /* TODO: Perhaps it shouldn't be gen6 specific */
+
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt,
+ base);
+
+ if (i915_is_ggtt(vm))
+ ppgtt = dev_priv->mm.aliasing_ppgtt;
+
+ gen6_write_page_range(dev_priv, &ppgtt->pd,
+ 0, ppgtt->base.total);
+ }
+ }
+
+ i915_ggtt_flush(dev_priv);
+}
+
static struct i915_vma *
__i915_gem_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
@@ -2532,7 +2603,8 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
return ERR_PTR(-EINVAL);
- vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+
+ vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
if (vma == NULL)
return ERR_PTR(-ENOMEM);
@@ -2542,22 +2614,8 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
vma->vm = vm;
vma->obj = obj;
- if (INTEL_INFO(vm->dev)->gen >= 6) {
- if (i915_is_ggtt(vm)) {
- vma->ggtt_view = *ggtt_view;
-
- vma->unbind_vma = ggtt_unbind_vma;
- vma->bind_vma = ggtt_bind_vma;
- } else {
- vma->unbind_vma = ppgtt_unbind_vma;
- vma->bind_vma = ppgtt_bind_vma;
- }
- } else {
- BUG_ON(!i915_is_ggtt(vm));
+ if (i915_is_ggtt(vm))
vma->ggtt_view = *ggtt_view;
- vma->unbind_vma = i915_ggtt_unbind_vma;
- vma->bind_vma = i915_ggtt_bind_vma;
- }
list_add_tail(&vma->vma_link, &obj->vma_list);
if (!i915_is_ggtt(vm))
@@ -2702,7 +2760,48 @@ err_st_alloc:
return ERR_PTR(ret);
}
-static inline int
+static struct sg_table *
+intel_partial_pages(const struct i915_ggtt_view *view,
+ struct drm_i915_gem_object *obj)
+{
+ struct sg_table *st;
+ struct scatterlist *sg;
+ struct sg_page_iter obj_sg_iter;
+ int ret = -ENOMEM;
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto err_st_alloc;
+
+ ret = sg_alloc_table(st, view->params.partial.size, GFP_KERNEL);
+ if (ret)
+ goto err_sg_alloc;
+
+ sg = st->sgl;
+ st->nents = 0;
+ for_each_sg_page(obj->pages->sgl, &obj_sg_iter, obj->pages->nents,
+ view->params.partial.offset)
+ {
+ if (st->nents >= view->params.partial.size)
+ break;
+
+ sg_set_page(sg, NULL, PAGE_SIZE, 0);
+ sg_dma_address(sg) = sg_page_iter_dma_address(&obj_sg_iter);
+ sg_dma_len(sg) = PAGE_SIZE;
+
+ sg = sg_next(sg);
+ st->nents++;
+ }
+
+ return st;
+
+err_sg_alloc:
+ kfree(st);
+err_st_alloc:
+ return ERR_PTR(ret);
+}
+
+static int
i915_get_ggtt_vma_pages(struct i915_vma *vma)
{
int ret = 0;
@@ -2715,6 +2814,9 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
vma->ggtt_view.pages =
intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj);
+ else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
+ vma->ggtt_view.pages =
+ intel_partial_pages(&vma->ggtt_view, vma->obj);
else
WARN_ONCE(1, "GGTT view %u not implemented!\n",
vma->ggtt_view.type);
@@ -2746,14 +2848,66 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags)
{
- if (i915_is_ggtt(vma->vm)) {
- int ret = i915_get_ggtt_vma_pages(vma);
+ int ret;
+ u32 bind_flags;
+
+ if (WARN_ON(flags == 0))
+ return -EINVAL;
+
+ bind_flags = 0;
+ if (flags & PIN_GLOBAL)
+ bind_flags |= GLOBAL_BIND;
+ if (flags & PIN_USER)
+ bind_flags |= LOCAL_BIND;
+
+ if (flags & PIN_UPDATE)
+ bind_flags |= vma->bound;
+ else
+ bind_flags &= ~vma->bound;
+ if (bind_flags == 0)
+ return 0;
+
+ if (vma->bound == 0 && vma->vm->allocate_va_range) {
+ trace_i915_va_alloc(vma->vm,
+ vma->node.start,
+ vma->node.size,
+ VM_TO_TRACE_NAME(vma->vm));
+
+ ret = vma->vm->allocate_va_range(vma->vm,
+ vma->node.start,
+ vma->node.size);
if (ret)
return ret;
}
- vma->bind_vma(vma, cache_level, flags);
+ ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
+ if (ret)
+ return ret;
+
+ vma->bound |= bind_flags;
return 0;
}
+
+/**
+ * i915_ggtt_view_size - Get the size of a GGTT view.
+ * @obj: Object the view is of.
+ * @view: The view in question.
+ *
+ * @return The size of the GGTT view in bytes.
+ */
+size_t
+i915_ggtt_view_size(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view)
+{
+ if (view->type == I915_GGTT_VIEW_NORMAL ||
+ view->type == I915_GGTT_VIEW_ROTATED) {
+ return obj->base.size;
+ } else if (view->type == I915_GGTT_VIEW_PARTIAL) {
+ return view->params.partial.size << PAGE_SHIFT;
+ } else {
+ WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type);
+ return obj->base.size;
+ }
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index fc03c99317c9..0d46dd20bf71 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -117,7 +117,8 @@ typedef uint64_t gen8_pde_t;
enum i915_ggtt_view_type {
I915_GGTT_VIEW_NORMAL = 0,
- I915_GGTT_VIEW_ROTATED
+ I915_GGTT_VIEW_ROTATED,
+ I915_GGTT_VIEW_PARTIAL,
};
struct intel_rotation_info {
@@ -130,6 +131,13 @@ struct intel_rotation_info {
struct i915_ggtt_view {
enum i915_ggtt_view_type type;
+ union {
+ struct {
+ unsigned long offset;
+ unsigned int size;
+ } partial;
+ } params;
+
struct sg_table *pages;
union {
@@ -158,7 +166,6 @@ struct i915_vma {
/** Flags and address space this VMA is bound to */
#define GLOBAL_BIND (1<<0)
#define LOCAL_BIND (1<<1)
-#define PTE_READ_ONLY (1<<2)
unsigned int bound : 4;
/**
@@ -196,36 +203,30 @@ struct i915_vma {
* bits with absolutely no headroom. So use 4 bits. */
unsigned int pin_count:4;
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
-
- /** Unmap an object from an address space. This usually consists of
- * setting the valid PTE entries to a reserved scratch page. */
- void (*unbind_vma)(struct i915_vma *vma);
- /* Map an object into an address space with the given cache flags. */
- void (*bind_vma)(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags);
};
-struct i915_page_table_entry {
+struct i915_page_table {
struct page *page;
dma_addr_t daddr;
unsigned long *used_ptes;
};
-struct i915_page_directory_entry {
+struct i915_page_directory {
struct page *page; /* NULL for GEN6-GEN7 */
union {
uint32_t pd_offset;
dma_addr_t daddr;
};
- struct i915_page_table_entry *page_table[I915_PDES]; /* PDEs */
+ unsigned long *used_pdes;
+ struct i915_page_table *page_table[I915_PDES]; /* PDEs */
};
-struct i915_page_directory_pointer_entry {
+struct i915_page_directory_pointer {
/* struct page *page; */
- struct i915_page_directory_entry *page_directory[GEN8_LEGACY_PDPES];
+ DECLARE_BITMAP(used_pdpes, GEN8_LEGACY_PDPES);
+ struct i915_page_directory *page_directory[GEN8_LEGACY_PDPES];
};
struct i915_address_space {
@@ -267,6 +268,8 @@ struct i915_address_space {
gen6_pte_t (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level,
bool valid, u32 flags); /* Create a valid PTE */
+ /* flags for pte_encode */
+#define PTE_READ_ONLY (1<<0)
int (*allocate_va_range)(struct i915_address_space *vm,
uint64_t start,
uint64_t length);
@@ -279,6 +282,13 @@ struct i915_address_space {
uint64_t start,
enum i915_cache_level cache_level, u32 flags);
void (*cleanup)(struct i915_address_space *vm);
+ /** Unmap an object from an address space. This usually consists of
+ * setting the valid PTE entries to a reserved scratch page. */
+ void (*unbind_vma)(struct i915_vma *vma);
+ /* Map an object into an address space with the given cache flags. */
+ int (*bind_vma)(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags);
};
/* The Graphics Translation Table is the way in which GEN hardware translates a
@@ -314,14 +324,13 @@ struct i915_hw_ppgtt {
struct kref ref;
struct drm_mm_node node;
unsigned long pd_dirty_rings;
- unsigned num_pd_entries;
- unsigned num_pd_pages; /* gen8+ */
union {
- struct i915_page_directory_pointer_entry pdp;
- struct i915_page_directory_entry pd;
+ struct i915_page_directory_pointer pdp;
+ struct i915_page_directory pd;
};
- struct i915_page_table_entry *scratch_pt;
+ struct i915_page_table *scratch_pt;
+ struct i915_page_directory *scratch_pd;
struct drm_i915_file_private *file_priv;
@@ -349,6 +358,11 @@ struct i915_hw_ppgtt {
temp = min_t(unsigned, temp, length), \
start += temp, length -= temp)
+#define gen6_for_all_pdes(pt, ppgtt, iter) \
+ for (iter = 0; \
+ pt = ppgtt->pd.page_table[iter], iter < I915_PDES; \
+ iter++)
+
static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
{
const uint32_t mask = NUM_PTE(pde_shift) - 1;
@@ -397,6 +411,63 @@ static inline uint32_t gen6_pde_index(uint32_t addr)
return i915_pde_index(addr, GEN6_PDE_SHIFT);
}
+/* Equivalent to the gen6 version, For each pde iterates over every pde
+ * between from start until start + length. On gen8+ it simply iterates
+ * over every page directory entry in a page directory.
+ */
+#define gen8_for_each_pde(pt, pd, start, length, temp, iter) \
+ for (iter = gen8_pde_index(start); \
+ pt = (pd)->page_table[iter], length > 0 && iter < I915_PDES; \
+ iter++, \
+ temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT) - start, \
+ temp = min(temp, length), \
+ start += temp, length -= temp)
+
+#define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter) \
+ for (iter = gen8_pdpe_index(start); \
+ pd = (pdp)->page_directory[iter], length > 0 && iter < GEN8_LEGACY_PDPES; \
+ iter++, \
+ temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT) - start, \
+ temp = min(temp, length), \
+ start += temp, length -= temp)
+
+/* Clamp length to the next page_directory boundary */
+static inline uint64_t gen8_clamp_pd(uint64_t start, uint64_t length)
+{
+ uint64_t next_pd = ALIGN(start + 1, 1 << GEN8_PDPE_SHIFT);
+
+ if (next_pd > (start + length))
+ return length;
+
+ return next_pd - start;
+}
+
+static inline uint32_t gen8_pte_index(uint64_t address)
+{
+ return i915_pte_index(address, GEN8_PDE_SHIFT);
+}
+
+static inline uint32_t gen8_pde_index(uint64_t address)
+{
+ return i915_pde_index(address, GEN8_PDE_SHIFT);
+}
+
+static inline uint32_t gen8_pdpe_index(uint64_t address)
+{
+ return (address >> GEN8_PDPE_SHIFT) & GEN8_PDPE_MASK;
+}
+
+static inline uint32_t gen8_pml4e_index(uint64_t address)
+{
+ WARN_ON(1); /* For 64B */
+ return 0;
+}
+
+static inline size_t gen8_pte_count(uint64_t address, uint64_t length)
+{
+ return i915_pte_count(address, length, GEN8_PDE_SHIFT);
+}
+
int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_init_global_gtt(struct drm_device *dev);
void i915_global_gtt_cleanup(struct drm_device *dev);
@@ -432,7 +503,15 @@ i915_ggtt_view_equal(const struct i915_ggtt_view *a,
if (WARN_ON(!a || !b))
return false;
- return a->type == b->type;
+ if (a->type != b->type)
+ return false;
+ if (a->type == I915_GGTT_VIEW_PARTIAL)
+ return !memcmp(&a->params, &b->params, sizeof(a->params));
+ return true;
}
+size_t
+i915_ggtt_view_size(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view);
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index f7929e769250..f6ecbda2c604 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -184,9 +184,12 @@ static int num_vma_bound(struct drm_i915_gem_object *obj)
struct i915_vma *vma;
int count = 0;
- list_for_each_entry(vma, &obj->vma_list, vma_link)
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (drm_mm_node_allocated(&vma->node))
count++;
+ if (vma->pin_count)
+ count++;
+ }
return count;
}
@@ -210,8 +213,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
count += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if (!i915_gem_obj_is_pinned(obj) &&
- obj->pages_pin_count == num_vma_bound(obj))
+ if (obj->pages_pin_count == num_vma_bound(obj))
count += obj->base.size >> PAGE_SHIFT;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index f8da71682c96..348ed5abcdbf 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -209,7 +209,7 @@ static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
dev_priv->fbc.threshold = ret;
- if (HAS_PCH_SPLIT(dev))
+ if (INTEL_INFO(dev_priv)->gen >= 5)
I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
else if (IS_GM45(dev)) {
I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 6377b22269ad..633bd1fcab69 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -336,7 +336,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
}
mutex_lock(&dev->struct_mutex);
- if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) {
+ if (obj->pin_display || obj->framebuffer_references) {
ret = -EBUSY;
goto err;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 1719078c763a..1f4e5a32a16e 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -22,8 +22,8 @@
*
*/
-#include "drmP.h"
-#include "i915_drm.h"
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
@@ -219,11 +219,14 @@ i915_mmu_notifier_add(struct drm_device *dev,
struct i915_mmu_object *mo)
{
struct interval_tree_node *it;
- int ret;
+ int ret = 0;
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
+ /* By this point we have already done a lot of expensive setup that
+ * we do not want to repeat just because the caller (e.g. X) has a
+ * signal pending (and partly because of that expensive setup, X
+ * using an interrupt timer is likely to get stuck in an EINTR loop).
+ */
+ mutex_lock(&dev->struct_mutex);
/* Make sure we drop the final active reference (and thereby
* remove the objects from the interval tree) before we do
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 1d4e60df8883..6f4256918f76 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -192,15 +192,20 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
struct drm_i915_error_buffer *err,
int count)
{
+ int i;
+
err_printf(m, " %s [%d]:\n", name, count);
while (count--) {
- err_printf(m, " %08x %8u %02x %02x %x %x",
+ err_printf(m, " %08x %8u %02x %02x [ ",
err->gtt_offset,
err->size,
err->read_domains,
- err->write_domain,
- err->rseqno, err->wseqno);
+ err->write_domain);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ err_printf(m, "%02x ", err->rseqno[i]);
+
+ err_printf(m, "] %02x", err->wseqno);
err_puts(m, pin_flag(err->pinned));
err_puts(m, tiling_flag(err->tiling));
err_puts(m, dirty_flag(err->dirty));
@@ -251,10 +256,11 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
return;
err_printf(m, "%s command stream:\n", ring_str(ring_idx));
- err_printf(m, " HEAD: 0x%08x\n", ring->head);
- err_printf(m, " TAIL: 0x%08x\n", ring->tail);
- err_printf(m, " CTL: 0x%08x\n", ring->ctl);
- err_printf(m, " HWS: 0x%08x\n", ring->hws);
+ err_printf(m, " START: 0x%08x\n", ring->start);
+ err_printf(m, " HEAD: 0x%08x\n", ring->head);
+ err_printf(m, " TAIL: 0x%08x\n", ring->tail);
+ err_printf(m, " CTL: 0x%08x\n", ring->ctl);
+ err_printf(m, " HWS: 0x%08x\n", ring->hws);
err_printf(m, " ACTHD: 0x%08x %08x\n", (u32)(ring->acthd>>32), (u32)ring->acthd);
err_printf(m, " IPEIR: 0x%08x\n", ring->ipeir);
err_printf(m, " IPEHR: 0x%08x\n", ring->ipehr);
@@ -553,6 +559,7 @@ static void i915_error_state_free(struct kref *error_ref)
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
i915_error_object_free(error->ring[i].batchbuffer);
+ i915_error_object_free(error->ring[i].wa_batchbuffer);
i915_error_object_free(error->ring[i].ringbuffer);
i915_error_object_free(error->ring[i].hws_page);
i915_error_object_free(error->ring[i].ctx);
@@ -679,10 +686,12 @@ static void capture_bo(struct drm_i915_error_buffer *err,
struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
+ int i;
err->size = obj->base.size;
err->name = obj->base.name;
- err->rseqno = i915_gem_request_get_seqno(obj->last_read_req);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ err->rseqno[i] = i915_gem_request_get_seqno(obj->last_read_req[i]);
err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
err->gtt_offset = vma->node.start;
err->read_domains = obj->base.read_domains;
@@ -695,8 +704,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->dirty = obj->dirty;
err->purgeable = obj->madv != I915_MADV_WILLNEED;
err->userptr = obj->userptr.mm != NULL;
- err->ring = obj->last_read_req ?
- i915_gem_request_get_ring(obj->last_read_req)->id : -1;
+ err->ring = obj->last_write_req ?
+ i915_gem_request_get_ring(obj->last_write_req)->id : -1;
err->cache_level = obj->cache_level;
}
@@ -883,6 +892,7 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base));
ering->seqno = ring->get_seqno(ring, false);
ering->acthd = intel_ring_get_active_head(ring);
+ ering->start = I915_READ_START(ring);
ering->head = I915_READ_HEAD(ring);
ering->tail = I915_READ_TAIL(ring);
ering->ctl = I915_READ_CTL(ring);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 6d494432b19f..e6bb72dca3ff 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -79,7 +79,7 @@ static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};
-static const u32 hpd_status_i915[HPD_NUM_PINS] = { /* i915 and valleyview are the same */
+static const u32 hpd_status_i915[HPD_NUM_PINS] = {
[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
@@ -88,6 +88,12 @@ static const u32 hpd_status_i915[HPD_NUM_PINS] = { /* i915 and valleyview are th
[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
};
+/* BXT hpd list */
+static const u32 hpd_bxt[HPD_NUM_PINS] = {
+ [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
+ [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
+};
+
/* IIR can theoretically queue up two events. Be paranoid. */
#define GEN8_IRQ_RESET_NDX(type, which) do { \
I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
@@ -985,8 +991,7 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev)
return;
}
-static void notify_ring(struct drm_device *dev,
- struct intel_engine_cs *ring)
+static void notify_ring(struct intel_engine_cs *ring)
{
if (!intel_ring_initialized(ring))
return;
@@ -1049,7 +1054,7 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
if (!vlv_c0_above(dev_priv,
&dev_priv->rps.down_ei, &now,
- VLV_RP_DOWN_EI_THRESHOLD))
+ dev_priv->rps.down_threshold))
events |= GEN6_PM_RP_DOWN_THRESHOLD;
dev_priv->rps.down_ei = now;
}
@@ -1057,7 +1062,7 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
if (vlv_c0_above(dev_priv,
&dev_priv->rps.up_ei, &now,
- VLV_RP_UP_EI_THRESHOLD))
+ dev_priv->rps.up_threshold))
events |= GEN6_PM_RP_UP_THRESHOLD;
dev_priv->rps.up_ei = now;
}
@@ -1065,12 +1070,25 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
return events;
}
+static bool any_waiters(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *ring;
+ int i;
+
+ for_each_ring(ring, dev_priv, i)
+ if (ring->irq_refcount)
+ return true;
+
+ return false;
+}
+
static void gen6_pm_rps_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private, rps.work);
+ bool client_boost;
+ int new_delay, adj, min, max;
u32 pm_iir;
- int new_delay, adj;
spin_lock_irq(&dev_priv->irq_lock);
/* Speed up work cancelation during disabling rps interrupts. */
@@ -1082,12 +1100,14 @@ static void gen6_pm_rps_work(struct work_struct *work)
dev_priv->rps.pm_iir = 0;
/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ client_boost = dev_priv->rps.client_boost;
+ dev_priv->rps.client_boost = false;
spin_unlock_irq(&dev_priv->irq_lock);
/* Make sure we didn't queue anything we're not going to process. */
WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
- if ((pm_iir & dev_priv->pm_rps_events) == 0)
+ if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
return;
mutex_lock(&dev_priv->rps.hw_lock);
@@ -1095,21 +1115,28 @@ static void gen6_pm_rps_work(struct work_struct *work)
pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
adj = dev_priv->rps.last_adj;
- if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
+ new_delay = dev_priv->rps.cur_freq;
+ min = dev_priv->rps.min_freq_softlimit;
+ max = dev_priv->rps.max_freq_softlimit;
+
+ if (client_boost) {
+ new_delay = dev_priv->rps.max_freq_softlimit;
+ adj = 0;
+ } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
if (adj > 0)
adj *= 2;
- else {
- /* CHV needs even encode values */
- adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
- }
- new_delay = dev_priv->rps.cur_freq + adj;
-
+ else /* CHV needs even encode values */
+ adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
/*
* For better performance, jump directly
* to RPe if we're below it.
*/
- if (new_delay < dev_priv->rps.efficient_freq)
+ if (new_delay < dev_priv->rps.efficient_freq - adj) {
new_delay = dev_priv->rps.efficient_freq;
+ adj = 0;
+ }
+ } else if (any_waiters(dev_priv)) {
+ adj = 0;
} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
new_delay = dev_priv->rps.efficient_freq;
@@ -1119,23 +1146,19 @@ static void gen6_pm_rps_work(struct work_struct *work)
} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
if (adj < 0)
adj *= 2;
- else {
- /* CHV needs even encode values */
- adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
- }
- new_delay = dev_priv->rps.cur_freq + adj;
+ else /* CHV needs even encode values */
+ adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
} else { /* unknown event */
- new_delay = dev_priv->rps.cur_freq;
+ adj = 0;
}
+ dev_priv->rps.last_adj = adj;
+
/* sysfs frequency interfaces may have snuck in while servicing the
* interrupt
*/
- new_delay = clamp_t(int, new_delay,
- dev_priv->rps.min_freq_softlimit,
- dev_priv->rps.max_freq_softlimit);
-
- dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
+ new_delay += adj;
+ new_delay = clamp_t(int, new_delay, min, max);
intel_set_rps(dev_priv->dev, new_delay);
@@ -1251,9 +1274,9 @@ static void ilk_gt_irq_handler(struct drm_device *dev,
{
if (gt_iir &
(GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
- notify_ring(dev, &dev_priv->ring[RCS]);
+ notify_ring(&dev_priv->ring[RCS]);
if (gt_iir & ILK_BSD_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[VCS]);
+ notify_ring(&dev_priv->ring[VCS]);
}
static void snb_gt_irq_handler(struct drm_device *dev,
@@ -1263,11 +1286,11 @@ static void snb_gt_irq_handler(struct drm_device *dev,
if (gt_iir &
(GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
- notify_ring(dev, &dev_priv->ring[RCS]);
+ notify_ring(&dev_priv->ring[RCS]);
if (gt_iir & GT_BSD_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[VCS]);
+ notify_ring(&dev_priv->ring[VCS]);
if (gt_iir & GT_BLT_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[BCS]);
+ notify_ring(&dev_priv->ring[BCS]);
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
@@ -1278,88 +1301,74 @@ static void snb_gt_irq_handler(struct drm_device *dev,
ivybridge_parity_error_irq_handler(dev, gt_iir);
}
-static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
- struct drm_i915_private *dev_priv,
+static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 master_ctl)
{
- struct intel_engine_cs *ring;
- u32 rcs, bcs, vcs;
- uint32_t tmp = 0;
irqreturn_t ret = IRQ_NONE;
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
- tmp = I915_READ(GEN8_GT_IIR(0));
+ u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
if (tmp) {
- I915_WRITE(GEN8_GT_IIR(0), tmp);
+ I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
ret = IRQ_HANDLED;
- rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
- ring = &dev_priv->ring[RCS];
- if (rcs & GT_RENDER_USER_INTERRUPT)
- notify_ring(dev, ring);
- if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
- intel_lrc_irq_handler(ring);
-
- bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
- ring = &dev_priv->ring[BCS];
- if (bcs & GT_RENDER_USER_INTERRUPT)
- notify_ring(dev, ring);
- if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
- intel_lrc_irq_handler(ring);
+ if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
+ intel_lrc_irq_handler(&dev_priv->ring[RCS]);
+ if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
+ notify_ring(&dev_priv->ring[RCS]);
+
+ if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
+ intel_lrc_irq_handler(&dev_priv->ring[BCS]);
+ if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
+ notify_ring(&dev_priv->ring[BCS]);
} else
DRM_ERROR("The master control interrupt lied (GT0)!\n");
}
if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
- tmp = I915_READ(GEN8_GT_IIR(1));
+ u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
if (tmp) {
- I915_WRITE(GEN8_GT_IIR(1), tmp);
+ I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
ret = IRQ_HANDLED;
- vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
- ring = &dev_priv->ring[VCS];
- if (vcs & GT_RENDER_USER_INTERRUPT)
- notify_ring(dev, ring);
- if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
- intel_lrc_irq_handler(ring);
-
- vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
- ring = &dev_priv->ring[VCS2];
- if (vcs & GT_RENDER_USER_INTERRUPT)
- notify_ring(dev, ring);
- if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
- intel_lrc_irq_handler(ring);
- } else
- DRM_ERROR("The master control interrupt lied (GT1)!\n");
- }
+ if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
+ intel_lrc_irq_handler(&dev_priv->ring[VCS]);
+ if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
+ notify_ring(&dev_priv->ring[VCS]);
- if (master_ctl & GEN8_GT_PM_IRQ) {
- tmp = I915_READ(GEN8_GT_IIR(2));
- if (tmp & dev_priv->pm_rps_events) {
- I915_WRITE(GEN8_GT_IIR(2),
- tmp & dev_priv->pm_rps_events);
- ret = IRQ_HANDLED;
- gen6_rps_irq_handler(dev_priv, tmp);
+ if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
+ intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
+ if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
+ notify_ring(&dev_priv->ring[VCS2]);
} else
- DRM_ERROR("The master control interrupt lied (PM)!\n");
+ DRM_ERROR("The master control interrupt lied (GT1)!\n");
}
if (master_ctl & GEN8_GT_VECS_IRQ) {
- tmp = I915_READ(GEN8_GT_IIR(3));
+ u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
if (tmp) {
- I915_WRITE(GEN8_GT_IIR(3), tmp);
+ I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
ret = IRQ_HANDLED;
- vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
- ring = &dev_priv->ring[VECS];
- if (vcs & GT_RENDER_USER_INTERRUPT)
- notify_ring(dev, ring);
- if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
- intel_lrc_irq_handler(ring);
+ if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
+ intel_lrc_irq_handler(&dev_priv->ring[VECS]);
+ if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
+ notify_ring(&dev_priv->ring[VECS]);
} else
DRM_ERROR("The master control interrupt lied (GT3)!\n");
}
+ if (master_ctl & GEN8_GT_PM_IRQ) {
+ u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
+ if (tmp & dev_priv->pm_rps_events) {
+ I915_WRITE_FW(GEN8_GT_IIR(2),
+ tmp & dev_priv->pm_rps_events);
+ ret = IRQ_HANDLED;
+ gen6_rps_irq_handler(dev_priv, tmp);
+ } else
+ DRM_ERROR("The master control interrupt lied (PM)!\n");
+ }
+
return ret;
}
@@ -1398,7 +1407,7 @@ static int i915_port_to_hotplug_shift(enum port port)
}
}
-static inline enum port get_port_from_pin(enum hpd_pin pin)
+static enum port get_port_from_pin(enum hpd_pin pin)
{
switch (pin) {
case HPD_PORT_B:
@@ -1412,10 +1421,10 @@ static inline enum port get_port_from_pin(enum hpd_pin pin)
}
}
-static inline void intel_hpd_irq_handler(struct drm_device *dev,
- u32 hotplug_trigger,
- u32 dig_hotplug_reg,
- const u32 hpd[HPD_NUM_PINS])
+static void intel_hpd_irq_handler(struct drm_device *dev,
+ u32 hotplug_trigger,
+ u32 dig_hotplug_reg,
+ const u32 hpd[HPD_NUM_PINS])
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
@@ -1440,7 +1449,7 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
if (port && dev_priv->hpd_irq_port[port]) {
bool long_hpd;
- if (HAS_PCH_SPLIT(dev)) {
+ if (!HAS_GMCH_DISPLAY(dev_priv)) {
dig_shift = pch_port_to_hotplug_shift(port);
long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
} else {
@@ -1654,7 +1663,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
if (HAS_VEBOX(dev_priv->dev)) {
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
- notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
+ notify_ring(&dev_priv->ring[VECS]);
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
@@ -1755,7 +1764,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev)
*/
POSTING_READ(PORT_HOTPLUG_STAT);
- if (IS_G4X(dev)) {
+ if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
@@ -1848,7 +1857,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
I915_WRITE(VLV_IIR, iir);
}
- gen8_gt_irq_handler(dev, dev_priv, master_ctl);
+ gen8_gt_irq_handler(dev_priv, master_ctl);
/* Call regardless, as some status bits might not be
* signalled in iir */
@@ -2164,6 +2173,38 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
return ret;
}
+static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t hp_control;
+ uint32_t hp_trigger;
+
+ /* Get the status */
+ hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK;
+ hp_control = I915_READ(BXT_HOTPLUG_CTL);
+
+ /* Hotplug not enabled ? */
+ if (!(hp_control & BXT_HOTPLUG_CTL_MASK)) {
+ DRM_ERROR("Interrupt when HPD disabled\n");
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+ hp_control & BXT_HOTPLUG_CTL_MASK);
+
+ /* Check for HPD storm and schedule bottom half */
+ intel_hpd_irq_handler(dev, hp_trigger, hp_control, hpd_bxt);
+
+ /*
+ * FIXME: Save the hot plug status for bottom half before
+ * clearing the sticky status bits, else the status will be
+ * lost.
+ */
+
+ /* Clear sticky bits in hpd status */
+ I915_WRITE(BXT_HOTPLUG_CTL, hp_control);
+}
+
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
@@ -2181,17 +2222,16 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
- master_ctl = I915_READ(GEN8_MASTER_IRQ);
+ master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
if (!master_ctl)
return IRQ_NONE;
- I915_WRITE(GEN8_MASTER_IRQ, 0);
- POSTING_READ(GEN8_MASTER_IRQ);
+ I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
/* Find, clear, then process each source of interrupt */
- ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
+ ret = gen8_gt_irq_handler(dev_priv, master_ctl);
if (master_ctl & GEN8_DE_MISC_IRQ) {
tmp = I915_READ(GEN8_DE_MISC_IIR);
@@ -2210,12 +2250,27 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
if (master_ctl & GEN8_DE_PORT_IRQ) {
tmp = I915_READ(GEN8_DE_PORT_IIR);
if (tmp) {
+ bool found = false;
+
I915_WRITE(GEN8_DE_PORT_IIR, tmp);
ret = IRQ_HANDLED;
- if (tmp & aux_mask)
+ if (tmp & aux_mask) {
dp_aux_irq_handler(dev);
- else
+ found = true;
+ }
+
+ if (IS_BROXTON(dev) && tmp & BXT_DE_PORT_HOTPLUG_MASK) {
+ bxt_hpd_handler(dev, tmp);
+ found = true;
+ }
+
+ if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
+ gmbus_irq_handler(dev);
+ found = true;
+ }
+
+ if (!found)
DRM_ERROR("Unexpected DE Port interrupt\n");
}
else
@@ -2268,7 +2323,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
}
- if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
+ if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
+ master_ctl & GEN8_DE_PCH_IRQ) {
/*
* FIXME(BDW): Assume for now that the new interrupt handling
* scheme also closed the SDE interrupt handling race we've seen
@@ -2284,8 +2340,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
}
- I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
- POSTING_READ(GEN8_MASTER_IRQ);
+ I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+ POSTING_READ_FW(GEN8_MASTER_IRQ);
return ret;
}
@@ -3104,7 +3160,8 @@ static void gen8_irq_reset(struct drm_device *dev)
GEN5_IRQ_RESET(GEN8_DE_MISC_);
GEN5_IRQ_RESET(GEN8_PCU_);
- ibx_irq_reset(dev);
+ if (HAS_PCH_SPLIT(dev))
+ ibx_irq_reset(dev);
}
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
@@ -3178,6 +3235,42 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
}
+static void bxt_hpd_irq_setup(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_encoder *intel_encoder;
+ u32 hotplug_port = 0;
+ u32 hotplug_ctrl;
+
+ /* Now, enable HPD */
+ for_each_intel_encoder(dev, intel_encoder) {
+ if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark
+ == HPD_ENABLED)
+ hotplug_port |= hpd_bxt[intel_encoder->hpd_pin];
+ }
+
+ /* Mask all HPD control bits */
+ hotplug_ctrl = I915_READ(BXT_HOTPLUG_CTL) & ~BXT_HOTPLUG_CTL_MASK;
+
+ /* Enable requested port in hotplug control */
+ /* TODO: implement (short) HPD support on port A */
+ WARN_ON_ONCE(hotplug_port & BXT_DE_PORT_HP_DDIA);
+ if (hotplug_port & BXT_DE_PORT_HP_DDIB)
+ hotplug_ctrl |= BXT_DDIB_HPD_ENABLE;
+ if (hotplug_port & BXT_DE_PORT_HP_DDIC)
+ hotplug_ctrl |= BXT_DDIC_HPD_ENABLE;
+ I915_WRITE(BXT_HOTPLUG_CTL, hotplug_ctrl);
+
+ /* Unmask DDI hotplug in IMR */
+ hotplug_ctrl = I915_READ(GEN8_DE_PORT_IMR) & ~hotplug_port;
+ I915_WRITE(GEN8_DE_PORT_IMR, hotplug_ctrl);
+
+ /* Enable DDI hotplug in IER */
+ hotplug_ctrl = I915_READ(GEN8_DE_PORT_IER) | hotplug_port;
+ I915_WRITE(GEN8_DE_PORT_IER, hotplug_ctrl);
+ POSTING_READ(GEN8_DE_PORT_IER);
+}
+
static void ibx_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3448,13 +3541,16 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
uint32_t de_pipe_enables;
int pipe;
- u32 aux_en = GEN8_AUX_CHANNEL_A;
+ u32 de_port_en = GEN8_AUX_CHANNEL_A;
if (IS_GEN9(dev_priv)) {
de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
- aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
+ de_port_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
+
+ if (IS_BROXTON(dev_priv))
+ de_port_en |= BXT_DE_PORT_GMBUS;
} else
de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
@@ -3473,19 +3569,21 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
dev_priv->de_irq_mask[pipe],
de_pipe_enables);
- GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en);
+ GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_en, de_port_en);
}
static int gen8_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- ibx_irq_pre_postinstall(dev);
+ if (HAS_PCH_SPLIT(dev))
+ ibx_irq_pre_postinstall(dev);
gen8_gt_irq_postinstall(dev_priv);
gen8_de_irq_postinstall(dev_priv);
- ibx_irq_postinstall(dev);
+ if (HAS_PCH_SPLIT(dev))
+ ibx_irq_postinstall(dev);
I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
@@ -3694,7 +3792,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
new_iir = I915_READ16(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[RCS]);
+ notify_ring(&dev_priv->ring[RCS]);
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
@@ -3883,7 +3981,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[RCS]);
+ notify_ring(&dev_priv->ring[RCS]);
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
@@ -4110,9 +4208,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[RCS]);
+ notify_ring(&dev_priv->ring[RCS]);
if (iir & I915_BSD_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[VCS]);
+ notify_ring(&dev_priv->ring[VCS]);
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
@@ -4294,7 +4392,10 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_uninstall = gen8_irq_uninstall;
dev->driver->enable_vblank = gen8_enable_vblank;
dev->driver->disable_vblank = gen8_disable_vblank;
- dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
+ if (HAS_PCH_SPLIT(dev))
+ dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
+ else
+ dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
} else if (HAS_PCH_SPLIT(dev)) {
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_reset;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index bb64415a1c3e..8ac5a1b29ac0 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -53,6 +53,7 @@ struct i915_params i915 __read_mostly = {
.mmio_debug = 0,
.verbose_state_checks = 1,
.nuclear_pageflip = 0,
+ .edp_vswing = 0,
};
module_param_named(modeset, i915.modeset, int, 0400);
@@ -184,3 +185,10 @@ MODULE_PARM_DESC(verbose_state_checks,
module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0600);
MODULE_PARM_DESC(nuclear_pageflip,
"Force atomic modeset functionality; only planes work for now (default: false).");
+
+/* WA to get away with the default setting in VBT for early platforms.Will be removed */
+module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400);
+MODULE_PARM_DESC(edp_vswing,
+ "Ignore/Override vswing pre-emph table selection from VBT "
+ "(0=use value from vbt [default], 1=low power swing(200mV),"
+ "2=default swing(400mV))");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3aaed099e4fe..f5edb3504167 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -595,10 +595,6 @@ enum punit_power_well {
PUNIT_POWER_WELL_DPIO_RX0 = 10,
PUNIT_POWER_WELL_DPIO_RX1 = 11,
PUNIT_POWER_WELL_DPIO_CMN_D = 12,
- /* FIXME: guesswork below */
- PUNIT_POWER_WELL_DPIO_TX_D_LANES_01 = 13,
- PUNIT_POWER_WELL_DPIO_TX_D_LANES_23 = 14,
- PUNIT_POWER_WELL_DPIO_RX2 = 15,
PUNIT_POWER_WELL_NUM,
};
@@ -670,9 +666,13 @@ enum skl_disp_power_wells {
#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
+#define VLV_TURBO_SOC_OVERRIDE 0x04
+#define VLV_OVERRIDE_EN 1
+#define VLV_SOC_TDP_EN (1 << 1)
+#define VLV_BIAS_CPU_125_SOC_875 (6 << 2)
+#define CHV_BIAS_CPU_50_SOC_50 (3 << 2)
+
#define VLV_CZ_CLOCK_TO_MILLI_SEC 100000
-#define VLV_RP_UP_EI_THRESHOLD 90
-#define VLV_RP_DOWN_EI_THRESHOLD 70
/* vlv2 north clock has */
#define CCK_FUSE_REG 0x8
@@ -717,7 +717,7 @@ enum skl_disp_power_wells {
/**
* DOC: DPIO
*
- * VLV and CHV have slightly peculiar display PHYs for driving DP/HDMI
+ * VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI
* ports. DPIO is the name given to such a display PHY. These PHYs
* don't follow the standard programming model using direct MMIO
* registers, and instead their registers must be accessed trough IOSF
@@ -748,7 +748,7 @@ enum skl_disp_power_wells {
* controlled from the display controller side. No DPIO registers
* need to be accessed during AUX communication,
*
- * Generally the common lane corresponds to the pipe and
+ * Generally on VLV/CHV the common lane corresponds to the pipe and
* the spline (PCS/TX) corresponds to the port.
*
* For dual channel PHY (VLV/CHV):
@@ -770,11 +770,17 @@ enum skl_disp_power_wells {
*
* port D == PCS/TX CH0
*
- * Note: digital port B is DDI0, digital port C is DDI1,
- * digital port D is DDI2
+ * On BXT the entire PHY channel corresponds to the port. That means
+ * the PLL is also now associated with the port rather than the pipe,
+ * and so the clock needs to be routed to the appropriate transcoder.
+ * Port A PLL is directly connected to transcoder EDP and port B/C
+ * PLLs can be routed to any transcoder A/B/C.
+ *
+ * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
+ * digital port D (CHV) or port A (BXT).
*/
/*
- * Dual channel PHY (VLV/CHV)
+ * Dual channel PHY (VLV/CHV/BXT)
* ---------------------------------
* | CH0 | CH1 |
* | CMN/PLL/REF | CMN/PLL/REF |
@@ -786,7 +792,7 @@ enum skl_disp_power_wells {
* | DDI0 | DDI1 | DP/HDMI ports
* ---------------------------------
*
- * Single channel PHY (CHV)
+ * Single channel PHY (CHV/BXT)
* -----------------
* | CH0 |
* | CMN/PLL/REF |
@@ -951,6 +957,7 @@ enum skl_disp_power_wells {
#define _VLV_PCS_DW11_CH0 0x822c
#define _VLV_PCS_DW11_CH1 0x842c
+#define DPIO_TX2_STAGGER_MASK(x) ((x)<<24)
#define DPIO_LANEDESKEW_STRAP_OVRD (1<<3)
#define DPIO_LEFT_TXFIFO_RST_MASTER (1<<1)
#define DPIO_RIGHT_TXFIFO_RST_MASTER (1<<0)
@@ -963,8 +970,20 @@ enum skl_disp_power_wells {
#define VLV_PCS01_DW11(ch) _PORT(ch, _VLV_PCS01_DW11_CH0, _VLV_PCS01_DW11_CH1)
#define VLV_PCS23_DW11(ch) _PORT(ch, _VLV_PCS23_DW11_CH0, _VLV_PCS23_DW11_CH1)
+#define _VLV_PCS01_DW12_CH0 0x0230
+#define _VLV_PCS23_DW12_CH0 0x0430
+#define _VLV_PCS01_DW12_CH1 0x2630
+#define _VLV_PCS23_DW12_CH1 0x2830
+#define VLV_PCS01_DW12(ch) _PORT(ch, _VLV_PCS01_DW12_CH0, _VLV_PCS01_DW12_CH1)
+#define VLV_PCS23_DW12(ch) _PORT(ch, _VLV_PCS23_DW12_CH0, _VLV_PCS23_DW12_CH1)
+
#define _VLV_PCS_DW12_CH0 0x8230
#define _VLV_PCS_DW12_CH1 0x8430
+#define DPIO_TX2_STAGGER_MULT(x) ((x)<<20)
+#define DPIO_TX1_STAGGER_MULT(x) ((x)<<16)
+#define DPIO_TX1_STAGGER_MASK(x) ((x)<<8)
+#define DPIO_LANESTAGGER_STRAP_OVRD (1<<6)
+#define DPIO_LANESTAGGER_STRAP(x) ((x)<<0)
#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
#define _VLV_PCS_DW14_CH0 0x8238
@@ -1119,6 +1138,245 @@ enum skl_disp_power_wells {
#define DPIO_FRC_LATENCY_SHFIT 8
#define CHV_TX_DW14(ch, lane) _TXLANE(ch, lane, 0xb8)
#define DPIO_UPAR_SHIFT 30
+
+/* BXT PHY registers */
+#define _BXT_PHY(phy, a, b) _PIPE((phy), (a), (b))
+
+#define BXT_P_CR_GT_DISP_PWRON 0x138090
+#define GT_DISPLAY_POWER_ON(phy) (1 << (phy))
+
+#define _PHY_CTL_FAMILY_EDP 0x64C80
+#define _PHY_CTL_FAMILY_DDI 0x64C90
+#define COMMON_RESET_DIS (1 << 31)
+#define BXT_PHY_CTL_FAMILY(phy) _BXT_PHY((phy), _PHY_CTL_FAMILY_DDI, \
+ _PHY_CTL_FAMILY_EDP)
+
+/* BXT PHY PLL registers */
+#define _PORT_PLL_A 0x46074
+#define _PORT_PLL_B 0x46078
+#define _PORT_PLL_C 0x4607c
+#define PORT_PLL_ENABLE (1 << 31)
+#define PORT_PLL_LOCK (1 << 30)
+#define PORT_PLL_REF_SEL (1 << 27)
+#define BXT_PORT_PLL_ENABLE(port) _PORT(port, _PORT_PLL_A, _PORT_PLL_B)
+
+#define _PORT_PLL_EBB_0_A 0x162034
+#define _PORT_PLL_EBB_0_B 0x6C034
+#define _PORT_PLL_EBB_0_C 0x6C340
+#define PORT_PLL_P1_MASK (0x07 << 13)
+#define PORT_PLL_P1(x) ((x) << 13)
+#define PORT_PLL_P2_MASK (0x1f << 8)
+#define PORT_PLL_P2(x) ((x) << 8)
+#define BXT_PORT_PLL_EBB_0(port) _PORT3(port, _PORT_PLL_EBB_0_A, \
+ _PORT_PLL_EBB_0_B, \
+ _PORT_PLL_EBB_0_C)
+
+#define _PORT_PLL_EBB_4_A 0x162038
+#define _PORT_PLL_EBB_4_B 0x6C038
+#define _PORT_PLL_EBB_4_C 0x6C344
+#define PORT_PLL_10BIT_CLK_ENABLE (1 << 13)
+#define PORT_PLL_RECALIBRATE (1 << 14)
+#define BXT_PORT_PLL_EBB_4(port) _PORT3(port, _PORT_PLL_EBB_4_A, \
+ _PORT_PLL_EBB_4_B, \
+ _PORT_PLL_EBB_4_C)
+
+#define _PORT_PLL_0_A 0x162100
+#define _PORT_PLL_0_B 0x6C100
+#define _PORT_PLL_0_C 0x6C380
+/* PORT_PLL_0_A */
+#define PORT_PLL_M2_MASK 0xFF
+/* PORT_PLL_1_A */
+#define PORT_PLL_N_MASK (0x0F << 8)
+#define PORT_PLL_N(x) ((x) << 8)
+/* PORT_PLL_2_A */
+#define PORT_PLL_M2_FRAC_MASK 0x3FFFFF
+/* PORT_PLL_3_A */
+#define PORT_PLL_M2_FRAC_ENABLE (1 << 16)
+/* PORT_PLL_6_A */
+#define PORT_PLL_PROP_COEFF_MASK 0xF
+#define PORT_PLL_INT_COEFF_MASK (0x1F << 8)
+#define PORT_PLL_INT_COEFF(x) ((x) << 8)
+#define PORT_PLL_GAIN_CTL_MASK (0x07 << 16)
+#define PORT_PLL_GAIN_CTL(x) ((x) << 16)
+/* PORT_PLL_8_A */
+#define PORT_PLL_TARGET_CNT_MASK 0x3FF
+/* PORT_PLL_9_A */
+#define PORT_PLL_LOCK_THRESHOLD_MASK 0xe
+/* PORT_PLL_10_A */
+#define PORT_PLL_DCO_AMP_OVR_EN_H (1<<27)
+#define PORT_PLL_DCO_AMP_MASK 0x3c00
+#define PORT_PLL_DCO_AMP(x) (x<<10)
+#define _PORT_PLL_BASE(port) _PORT3(port, _PORT_PLL_0_A, \
+ _PORT_PLL_0_B, \
+ _PORT_PLL_0_C)
+#define BXT_PORT_PLL(port, idx) (_PORT_PLL_BASE(port) + (idx) * 4)
+
+/* BXT PHY common lane registers */
+#define _PORT_CL1CM_DW0_A 0x162000
+#define _PORT_CL1CM_DW0_BC 0x6C000
+#define PHY_POWER_GOOD (1 << 16)
+#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC, \
+ _PORT_CL1CM_DW0_A)
+
+#define _PORT_CL1CM_DW9_A 0x162024
+#define _PORT_CL1CM_DW9_BC 0x6C024
+#define IREF0RC_OFFSET_SHIFT 8
+#define IREF0RC_OFFSET_MASK (0xFF << IREF0RC_OFFSET_SHIFT)
+#define BXT_PORT_CL1CM_DW9(phy) _BXT_PHY((phy), _PORT_CL1CM_DW9_BC, \
+ _PORT_CL1CM_DW9_A)
+
+#define _PORT_CL1CM_DW10_A 0x162028
+#define _PORT_CL1CM_DW10_BC 0x6C028
+#define IREF1RC_OFFSET_SHIFT 8
+#define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT)
+#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC, \
+ _PORT_CL1CM_DW10_A)
+
+#define _PORT_CL1CM_DW28_A 0x162070
+#define _PORT_CL1CM_DW28_BC 0x6C070
+#define OCL1_POWER_DOWN_EN (1 << 23)
+#define DW28_OLDO_DYN_PWR_DOWN_EN (1 << 22)
+#define SUS_CLK_CONFIG 0x3
+#define BXT_PORT_CL1CM_DW28(phy) _BXT_PHY((phy), _PORT_CL1CM_DW28_BC, \
+ _PORT_CL1CM_DW28_A)
+
+#define _PORT_CL1CM_DW30_A 0x162078
+#define _PORT_CL1CM_DW30_BC 0x6C078
+#define OCL2_LDOFUSE_PWR_DIS (1 << 6)
+#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC, \
+ _PORT_CL1CM_DW30_A)
+
+/* Defined for PHY0 only */
+#define BXT_PORT_CL2CM_DW6_BC 0x6C358
+#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28)
+
+/* BXT PHY Ref registers */
+#define _PORT_REF_DW3_A 0x16218C
+#define _PORT_REF_DW3_BC 0x6C18C
+#define GRC_DONE (1 << 22)
+#define BXT_PORT_REF_DW3(phy) _BXT_PHY((phy), _PORT_REF_DW3_BC, \
+ _PORT_REF_DW3_A)
+
+#define _PORT_REF_DW6_A 0x162198
+#define _PORT_REF_DW6_BC 0x6C198
+/*
+ * FIXME: BSpec/CHV ConfigDB disagrees on the following two fields, fix them
+ * after testing.
+ */
+#define GRC_CODE_SHIFT 23
+#define GRC_CODE_MASK (0x1FF << GRC_CODE_SHIFT)
+#define GRC_CODE_FAST_SHIFT 16
+#define GRC_CODE_FAST_MASK (0x7F << GRC_CODE_FAST_SHIFT)
+#define GRC_CODE_SLOW_SHIFT 8
+#define GRC_CODE_SLOW_MASK (0xFF << GRC_CODE_SLOW_SHIFT)
+#define GRC_CODE_NOM_MASK 0xFF
+#define BXT_PORT_REF_DW6(phy) _BXT_PHY((phy), _PORT_REF_DW6_BC, \
+ _PORT_REF_DW6_A)
+
+#define _PORT_REF_DW8_A 0x1621A0
+#define _PORT_REF_DW8_BC 0x6C1A0
+#define GRC_DIS (1 << 15)
+#define GRC_RDY_OVRD (1 << 1)
+#define BXT_PORT_REF_DW8(phy) _BXT_PHY((phy), _PORT_REF_DW8_BC, \
+ _PORT_REF_DW8_A)
+
+/* BXT PHY PCS registers */
+#define _PORT_PCS_DW10_LN01_A 0x162428
+#define _PORT_PCS_DW10_LN01_B 0x6C428
+#define _PORT_PCS_DW10_LN01_C 0x6C828
+#define _PORT_PCS_DW10_GRP_A 0x162C28
+#define _PORT_PCS_DW10_GRP_B 0x6CC28
+#define _PORT_PCS_DW10_GRP_C 0x6CE28
+#define BXT_PORT_PCS_DW10_LN01(port) _PORT3(port, _PORT_PCS_DW10_LN01_A, \
+ _PORT_PCS_DW10_LN01_B, \
+ _PORT_PCS_DW10_LN01_C)
+#define BXT_PORT_PCS_DW10_GRP(port) _PORT3(port, _PORT_PCS_DW10_GRP_A, \
+ _PORT_PCS_DW10_GRP_B, \
+ _PORT_PCS_DW10_GRP_C)
+#define TX2_SWING_CALC_INIT (1 << 31)
+#define TX1_SWING_CALC_INIT (1 << 30)
+
+#define _PORT_PCS_DW12_LN01_A 0x162430
+#define _PORT_PCS_DW12_LN01_B 0x6C430
+#define _PORT_PCS_DW12_LN01_C 0x6C830
+#define _PORT_PCS_DW12_LN23_A 0x162630
+#define _PORT_PCS_DW12_LN23_B 0x6C630
+#define _PORT_PCS_DW12_LN23_C 0x6CA30
+#define _PORT_PCS_DW12_GRP_A 0x162c30
+#define _PORT_PCS_DW12_GRP_B 0x6CC30
+#define _PORT_PCS_DW12_GRP_C 0x6CE30
+#define LANESTAGGER_STRAP_OVRD (1 << 6)
+#define LANE_STAGGER_MASK 0x1F
+#define BXT_PORT_PCS_DW12_LN01(port) _PORT3(port, _PORT_PCS_DW12_LN01_A, \
+ _PORT_PCS_DW12_LN01_B, \
+ _PORT_PCS_DW12_LN01_C)
+#define BXT_PORT_PCS_DW12_LN23(port) _PORT3(port, _PORT_PCS_DW12_LN23_A, \
+ _PORT_PCS_DW12_LN23_B, \
+ _PORT_PCS_DW12_LN23_C)
+#define BXT_PORT_PCS_DW12_GRP(port) _PORT3(port, _PORT_PCS_DW12_GRP_A, \
+ _PORT_PCS_DW12_GRP_B, \
+ _PORT_PCS_DW12_GRP_C)
+
+/* BXT PHY TX registers */
+#define _BXT_LANE_OFFSET(lane) (((lane) >> 1) * 0x200 + \
+ ((lane) & 1) * 0x80)
+
+#define _PORT_TX_DW2_LN0_A 0x162508
+#define _PORT_TX_DW2_LN0_B 0x6C508
+#define _PORT_TX_DW2_LN0_C 0x6C908
+#define _PORT_TX_DW2_GRP_A 0x162D08
+#define _PORT_TX_DW2_GRP_B 0x6CD08
+#define _PORT_TX_DW2_GRP_C 0x6CF08
+#define BXT_PORT_TX_DW2_GRP(port) _PORT3(port, _PORT_TX_DW2_GRP_A, \
+ _PORT_TX_DW2_GRP_B, \
+ _PORT_TX_DW2_GRP_C)
+#define BXT_PORT_TX_DW2_LN0(port) _PORT3(port, _PORT_TX_DW2_LN0_A, \
+ _PORT_TX_DW2_LN0_B, \
+ _PORT_TX_DW2_LN0_C)
+#define MARGIN_000_SHIFT 16
+#define MARGIN_000 (0xFF << MARGIN_000_SHIFT)
+#define UNIQ_TRANS_SCALE_SHIFT 8
+#define UNIQ_TRANS_SCALE (0xFF << UNIQ_TRANS_SCALE_SHIFT)
+
+#define _PORT_TX_DW3_LN0_A 0x16250C
+#define _PORT_TX_DW3_LN0_B 0x6C50C
+#define _PORT_TX_DW3_LN0_C 0x6C90C
+#define _PORT_TX_DW3_GRP_A 0x162D0C
+#define _PORT_TX_DW3_GRP_B 0x6CD0C
+#define _PORT_TX_DW3_GRP_C 0x6CF0C
+#define BXT_PORT_TX_DW3_GRP(port) _PORT3(port, _PORT_TX_DW3_GRP_A, \
+ _PORT_TX_DW3_GRP_B, \
+ _PORT_TX_DW3_GRP_C)
+#define BXT_PORT_TX_DW3_LN0(port) _PORT3(port, _PORT_TX_DW3_LN0_A, \
+ _PORT_TX_DW3_LN0_B, \
+ _PORT_TX_DW3_LN0_C)
+#define UNIQE_TRANGE_EN_METHOD (1 << 27)
+
+#define _PORT_TX_DW4_LN0_A 0x162510
+#define _PORT_TX_DW4_LN0_B 0x6C510
+#define _PORT_TX_DW4_LN0_C 0x6C910
+#define _PORT_TX_DW4_GRP_A 0x162D10
+#define _PORT_TX_DW4_GRP_B 0x6CD10
+#define _PORT_TX_DW4_GRP_C 0x6CF10
+#define BXT_PORT_TX_DW4_LN0(port) _PORT3(port, _PORT_TX_DW4_LN0_A, \
+ _PORT_TX_DW4_LN0_B, \
+ _PORT_TX_DW4_LN0_C)
+#define BXT_PORT_TX_DW4_GRP(port) _PORT3(port, _PORT_TX_DW4_GRP_A, \
+ _PORT_TX_DW4_GRP_B, \
+ _PORT_TX_DW4_GRP_C)
+#define DEEMPH_SHIFT 24
+#define DE_EMPHASIS (0xFF << DEEMPH_SHIFT)
+
+#define _PORT_TX_DW14_LN0_A 0x162538
+#define _PORT_TX_DW14_LN0_B 0x6C538
+#define _PORT_TX_DW14_LN0_C 0x6C938
+#define LATENCY_OPTIM_SHIFT 30
+#define LATENCY_OPTIM (1 << LATENCY_OPTIM_SHIFT)
+#define BXT_PORT_TX_DW14_LN(port, lane) (_PORT3((port), _PORT_TX_DW14_LN0_A, \
+ _PORT_TX_DW14_LN0_B, \
+ _PORT_TX_DW14_LN0_C) + \
+ _BXT_LANE_OFFSET(lane))
+
/*
* Fence registers
*/
@@ -1150,6 +1408,7 @@ enum skl_disp_power_wells {
/* control register for cpu gtt access */
#define TILECTL 0x101000
#define TILECTL_SWZCTL (1 << 0)
+#define TILECTL_TLBPF (1 << 1)
#define TILECTL_TLB_PREFETCH_DIS (1 << 2)
#define TILECTL_BACKSNOOP_DIS (1 << 3)
@@ -1198,6 +1457,8 @@ enum skl_disp_power_wells {
#define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
+#define HSW_GTT_CACHE_EN 0x4024
+#define GTT_CACHE_EN_ALL 0xF0007FFF
#define GEN7_WR_WATERMARK 0x4028
#define GEN7_GFX_PRIO_CTRL 0x402C
#define ARB_MODE 0x4030
@@ -1554,9 +1815,7 @@ enum skl_disp_power_wells {
#define GEN9_F2_SS_DIS_SHIFT 20
#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT)
-#define GEN8_EU_DISABLE0 0x9134
-#define GEN8_EU_DISABLE1 0x9138
-#define GEN8_EU_DISABLE2 0x913c
+#define GEN9_EU_DISABLE(slice) (0x9134 + (slice)*0x4)
#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
@@ -1788,16 +2047,19 @@ enum skl_disp_power_wells {
#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */
#define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */
#define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */
-#define GMBUS_PORT_DISABLED 0
-#define GMBUS_PORT_SSC 1
-#define GMBUS_PORT_VGADDC 2
-#define GMBUS_PORT_PANEL 3
-#define GMBUS_PORT_DPD_CHV 3 /* HDMID_CHV */
-#define GMBUS_PORT_DPC 4 /* HDMIC */
-#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
-#define GMBUS_PORT_DPD 6 /* HDMID */
-#define GMBUS_PORT_RESERVED 7 /* 7 reserved */
-#define GMBUS_NUM_PORTS (GMBUS_PORT_DPD - GMBUS_PORT_SSC + 1)
+#define GMBUS_PIN_DISABLED 0
+#define GMBUS_PIN_SSC 1
+#define GMBUS_PIN_VGADDC 2
+#define GMBUS_PIN_PANEL 3
+#define GMBUS_PIN_DPD_CHV 3 /* HDMID_CHV */
+#define GMBUS_PIN_DPC 4 /* HDMIC */
+#define GMBUS_PIN_DPB 5 /* SDVO, HDMIB */
+#define GMBUS_PIN_DPD 6 /* HDMID */
+#define GMBUS_PIN_RESERVED 7 /* 7 reserved */
+#define GMBUS_PIN_1_BXT 1
+#define GMBUS_PIN_2_BXT 2
+#define GMBUS_PIN_3_BXT 3
+#define GMBUS_NUM_PINS 7 /* including 0 */
#define GMBUS1 0x5104 /* command/status */
#define GMBUS_SW_CLR_INT (1<<31)
#define GMBUS_SW_RDY (1<<30)
@@ -1879,7 +2141,14 @@ enum skl_disp_power_wells {
#define DPIO_PHY_STATUS (VLV_DISPLAY_BASE + 0x6240)
#define DPLL_PORTD_READY_MASK (0xf)
#define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100)
-#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
+#define PHY_LDO_DELAY_0NS 0x0
+#define PHY_LDO_DELAY_200NS 0x1
+#define PHY_LDO_DELAY_600NS 0x2
+#define PHY_LDO_SEQ_DELAY(delay, phy) ((delay) << (2*(phy)+23))
+#define PHY_CH_SU_PSR 0x1
+#define PHY_CH_DEEP_PSR 0x7
+#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6*(phy)+3*(ch)+2))
+#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
#define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104)
#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30))
@@ -2689,7 +2958,6 @@ enum skl_disp_power_wells {
#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0)
#define EDP_PSR_ENABLE (1<<31)
#define BDW_PSR_SINGLE_FRAME (1<<30)
-#define EDP_PSR_LINK_DISABLE (0<<27)
#define EDP_PSR_LINK_STANDBY (1<<27)
#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25)
#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0<<25)
@@ -2749,6 +3017,20 @@ enum skl_disp_power_wells {
#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
+#define EDP_PSR2_CTL 0x6f900
+#define EDP_PSR2_ENABLE (1<<31)
+#define EDP_SU_TRACK_ENABLE (1<<30)
+#define EDP_MAX_SU_DISABLE_TIME(t) ((t)<<20)
+#define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f<<20)
+#define EDP_PSR2_TP2_TIME_500 (0<<8)
+#define EDP_PSR2_TP2_TIME_100 (1<<8)
+#define EDP_PSR2_TP2_TIME_2500 (2<<8)
+#define EDP_PSR2_TP2_TIME_50 (3<<8)
+#define EDP_PSR2_TP2_TIME_MASK (3<<8)
+#define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
+#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf<<4)
+#define EDP_PSR2_IDLE_MASK 0xf
+
/* VGA port control */
#define ADPA 0x61100
#define PCH_ADPA 0xe1100
@@ -3228,6 +3510,18 @@ enum skl_disp_power_wells {
#define UTIL_PIN_CTL 0x48400
#define UTIL_PIN_ENABLE (1 << 31)
+/* BXT backlight register definition. */
+#define BXT_BLC_PWM_CTL1 0xC8250
+#define BXT_BLC_PWM_ENABLE (1 << 31)
+#define BXT_BLC_PWM_POLARITY (1 << 29)
+#define BXT_BLC_PWM_FREQ1 0xC8254
+#define BXT_BLC_PWM_DUTY1 0xC8258
+
+#define BXT_BLC_PWM_CTL2 0xC8350
+#define BXT_BLC_PWM_FREQ2 0xC8354
+#define BXT_BLC_PWM_DUTY2 0xC8358
+
+
#define PCH_GTC_CTL 0xe7000
#define PCH_GTC_ENABLE (1 << 31)
@@ -4855,7 +5149,9 @@ enum skl_disp_power_wells {
#define PLANE_CTL_ALPHA_HW_PREMULTIPLY ( 3 << 4)
#define PLANE_CTL_ROTATE_MASK 0x3
#define PLANE_CTL_ROTATE_0 0x0
+#define PLANE_CTL_ROTATE_90 0x1
#define PLANE_CTL_ROTATE_180 0x2
+#define PLANE_CTL_ROTATE_270 0x3
#define _PLANE_STRIDE_1_A 0x70188
#define _PLANE_STRIDE_2_A 0x70288
#define _PLANE_STRIDE_3_A 0x70388
@@ -4879,6 +5175,8 @@ enum skl_disp_power_wells {
#define _PLANE_KEYMAX_2_A 0x702a0
#define _PLANE_BUF_CFG_1_A 0x7027c
#define _PLANE_BUF_CFG_2_A 0x7037c
+#define _PLANE_NV12_BUF_CFG_1_A 0x70278
+#define _PLANE_NV12_BUF_CFG_2_A 0x70378
#define _PLANE_CTL_1_B 0x71180
#define _PLANE_CTL_2_B 0x71280
@@ -4965,6 +5263,15 @@ enum skl_disp_power_wells {
#define PLANE_BUF_CFG(pipe, plane) \
_PLANE(plane, _PLANE_BUF_CFG_1(pipe), _PLANE_BUF_CFG_2(pipe))
+#define _PLANE_NV12_BUF_CFG_1_B 0x71278
+#define _PLANE_NV12_BUF_CFG_2_B 0x71378
+#define _PLANE_NV12_BUF_CFG_1(pipe) \
+ _PIPE(pipe, _PLANE_NV12_BUF_CFG_1_A, _PLANE_NV12_BUF_CFG_1_B)
+#define _PLANE_NV12_BUF_CFG_2(pipe) \
+ _PIPE(pipe, _PLANE_NV12_BUF_CFG_2_A, _PLANE_NV12_BUF_CFG_2_B)
+#define PLANE_NV12_BUF_CFG(pipe, plane) \
+ _PLANE(plane, _PLANE_NV12_BUF_CFG_1(pipe), _PLANE_NV12_BUF_CFG_2(pipe))
+
/* SKL new cursor registers */
#define _CUR_BUF_CFG_A 0x7017c
#define _CUR_BUF_CFG_B 0x7117c
@@ -5097,6 +5404,121 @@ enum skl_disp_power_wells {
#define PS_WIN_SZ(pipe) _PIPE(pipe, _PSA_WIN_SZ, _PSB_WIN_SZ)
#define PS_WIN_POS(pipe) _PIPE(pipe, _PSA_WIN_POS, _PSB_WIN_POS)
+/*
+ * Skylake scalers
+ */
+#define _PS_1A_CTRL 0x68180
+#define _PS_2A_CTRL 0x68280
+#define _PS_1B_CTRL 0x68980
+#define _PS_2B_CTRL 0x68A80
+#define _PS_1C_CTRL 0x69180
+#define PS_SCALER_EN (1 << 31)
+#define PS_SCALER_MODE_MASK (3 << 28)
+#define PS_SCALER_MODE_DYN (0 << 28)
+#define PS_SCALER_MODE_HQ (1 << 28)
+#define PS_PLANE_SEL_MASK (7 << 25)
+#define PS_PLANE_SEL(plane) ((plane + 1) << 25)
+#define PS_FILTER_MASK (3 << 23)
+#define PS_FILTER_MEDIUM (0 << 23)
+#define PS_FILTER_EDGE_ENHANCE (2 << 23)
+#define PS_FILTER_BILINEAR (3 << 23)
+#define PS_VERT3TAP (1 << 21)
+#define PS_VERT_INT_INVERT_FIELD1 (0 << 20)
+#define PS_VERT_INT_INVERT_FIELD0 (1 << 20)
+#define PS_PWRUP_PROGRESS (1 << 17)
+#define PS_V_FILTER_BYPASS (1 << 8)
+#define PS_VADAPT_EN (1 << 7)
+#define PS_VADAPT_MODE_MASK (3 << 5)
+#define PS_VADAPT_MODE_LEAST_ADAPT (0 << 5)
+#define PS_VADAPT_MODE_MOD_ADAPT (1 << 5)
+#define PS_VADAPT_MODE_MOST_ADAPT (3 << 5)
+
+#define _PS_PWR_GATE_1A 0x68160
+#define _PS_PWR_GATE_2A 0x68260
+#define _PS_PWR_GATE_1B 0x68960
+#define _PS_PWR_GATE_2B 0x68A60
+#define _PS_PWR_GATE_1C 0x69160
+#define PS_PWR_GATE_DIS_OVERRIDE (1 << 31)
+#define PS_PWR_GATE_SETTLING_TIME_32 (0 << 3)
+#define PS_PWR_GATE_SETTLING_TIME_64 (1 << 3)
+#define PS_PWR_GATE_SETTLING_TIME_96 (2 << 3)
+#define PS_PWR_GATE_SETTLING_TIME_128 (3 << 3)
+#define PS_PWR_GATE_SLPEN_8 0
+#define PS_PWR_GATE_SLPEN_16 1
+#define PS_PWR_GATE_SLPEN_24 2
+#define PS_PWR_GATE_SLPEN_32 3
+
+#define _PS_WIN_POS_1A 0x68170
+#define _PS_WIN_POS_2A 0x68270
+#define _PS_WIN_POS_1B 0x68970
+#define _PS_WIN_POS_2B 0x68A70
+#define _PS_WIN_POS_1C 0x69170
+
+#define _PS_WIN_SZ_1A 0x68174
+#define _PS_WIN_SZ_2A 0x68274
+#define _PS_WIN_SZ_1B 0x68974
+#define _PS_WIN_SZ_2B 0x68A74
+#define _PS_WIN_SZ_1C 0x69174
+
+#define _PS_VSCALE_1A 0x68184
+#define _PS_VSCALE_2A 0x68284
+#define _PS_VSCALE_1B 0x68984
+#define _PS_VSCALE_2B 0x68A84
+#define _PS_VSCALE_1C 0x69184
+
+#define _PS_HSCALE_1A 0x68190
+#define _PS_HSCALE_2A 0x68290
+#define _PS_HSCALE_1B 0x68990
+#define _PS_HSCALE_2B 0x68A90
+#define _PS_HSCALE_1C 0x69190
+
+#define _PS_VPHASE_1A 0x68188
+#define _PS_VPHASE_2A 0x68288
+#define _PS_VPHASE_1B 0x68988
+#define _PS_VPHASE_2B 0x68A88
+#define _PS_VPHASE_1C 0x69188
+
+#define _PS_HPHASE_1A 0x68194
+#define _PS_HPHASE_2A 0x68294
+#define _PS_HPHASE_1B 0x68994
+#define _PS_HPHASE_2B 0x68A94
+#define _PS_HPHASE_1C 0x69194
+
+#define _PS_ECC_STAT_1A 0x681D0
+#define _PS_ECC_STAT_2A 0x682D0
+#define _PS_ECC_STAT_1B 0x689D0
+#define _PS_ECC_STAT_2B 0x68AD0
+#define _PS_ECC_STAT_1C 0x691D0
+
+#define _ID(id, a, b) ((a) + (id)*((b)-(a)))
+#define SKL_PS_CTRL(pipe, id) _PIPE(pipe, \
+ _ID(id, _PS_1A_CTRL, _PS_2A_CTRL), \
+ _ID(id, _PS_1B_CTRL, _PS_2B_CTRL))
+#define SKL_PS_PWR_GATE(pipe, id) _PIPE(pipe, \
+ _ID(id, _PS_PWR_GATE_1A, _PS_PWR_GATE_2A), \
+ _ID(id, _PS_PWR_GATE_1B, _PS_PWR_GATE_2B))
+#define SKL_PS_WIN_POS(pipe, id) _PIPE(pipe, \
+ _ID(id, _PS_WIN_POS_1A, _PS_WIN_POS_2A), \
+ _ID(id, _PS_WIN_POS_1B, _PS_WIN_POS_2B))
+#define SKL_PS_WIN_SZ(pipe, id) _PIPE(pipe, \
+ _ID(id, _PS_WIN_SZ_1A, _PS_WIN_SZ_2A), \
+ _ID(id, _PS_WIN_SZ_1B, _PS_WIN_SZ_2B))
+#define SKL_PS_VSCALE(pipe, id) _PIPE(pipe, \
+ _ID(id, _PS_VSCALE_1A, _PS_VSCALE_2A), \
+ _ID(id, _PS_VSCALE_1B, _PS_VSCALE_2B))
+#define SKL_PS_HSCALE(pipe, id) _PIPE(pipe, \
+ _ID(id, _PS_HSCALE_1A, _PS_HSCALE_2A), \
+ _ID(id, _PS_HSCALE_1B, _PS_HSCALE_2B))
+#define SKL_PS_VPHASE(pipe, id) _PIPE(pipe, \
+ _ID(id, _PS_VPHASE_1A, _PS_VPHASE_2A), \
+ _ID(id, _PS_VPHASE_1B, _PS_VPHASE_2B))
+#define SKL_PS_HPHASE(pipe, id) _PIPE(pipe, \
+ _ID(id, _PS_HPHASE_1A, _PS_HPHASE_2A), \
+ _ID(id, _PS_HPHASE_1B, _PS_HPHASE_2B))
+#define SKL_PS_ECC_STAT(pipe, id) _PIPE(pipe, \
+ _ID(id, _PS_ECC_STAT_1A, _PS_ECC_STAT_2A), \
+ _ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B)
+
/* legacy palette */
#define _LGC_PALETTE_A 0x4a000
#define _LGC_PALETTE_B 0x4a800
@@ -5219,9 +5641,11 @@ enum skl_disp_power_wells {
#define GEN8_PIPE_VSYNC (1 << 1)
#define GEN8_PIPE_VBLANK (1 << 0)
#define GEN9_PIPE_CURSOR_FAULT (1 << 11)
+#define GEN9_PIPE_PLANE4_FAULT (1 << 10)
#define GEN9_PIPE_PLANE3_FAULT (1 << 9)
#define GEN9_PIPE_PLANE2_FAULT (1 << 8)
#define GEN9_PIPE_PLANE1_FAULT (1 << 7)
+#define GEN9_PIPE_PLANE4_FLIP_DONE (1 << 6)
#define GEN9_PIPE_PLANE3_FLIP_DONE (1 << 5)
#define GEN9_PIPE_PLANE2_FLIP_DONE (1 << 4)
#define GEN9_PIPE_PLANE1_FLIP_DONE (1 << 3)
@@ -5232,6 +5656,7 @@ enum skl_disp_power_wells {
GEN8_PIPE_PRIMARY_FAULT)
#define GEN9_DE_PIPE_IRQ_FAULT_ERRORS \
(GEN9_PIPE_CURSOR_FAULT | \
+ GEN9_PIPE_PLANE4_FAULT | \
GEN9_PIPE_PLANE3_FAULT | \
GEN9_PIPE_PLANE2_FAULT | \
GEN9_PIPE_PLANE1_FAULT)
@@ -5240,10 +5665,17 @@ enum skl_disp_power_wells {
#define GEN8_DE_PORT_IMR 0x44444
#define GEN8_DE_PORT_IIR 0x44448
#define GEN8_DE_PORT_IER 0x4444c
-#define GEN8_PORT_DP_A_HOTPLUG (1 << 3)
#define GEN9_AUX_CHANNEL_D (1 << 27)
#define GEN9_AUX_CHANNEL_C (1 << 26)
#define GEN9_AUX_CHANNEL_B (1 << 25)
+#define BXT_DE_PORT_HP_DDIC (1 << 5)
+#define BXT_DE_PORT_HP_DDIB (1 << 4)
+#define BXT_DE_PORT_HP_DDIA (1 << 3)
+#define BXT_DE_PORT_HOTPLUG_MASK (BXT_DE_PORT_HP_DDIA | \
+ BXT_DE_PORT_HP_DDIB | \
+ BXT_DE_PORT_HP_DDIC)
+#define GEN8_PORT_DP_A_HOTPLUG (1 << 3)
+#define BXT_DE_PORT_GMBUS (1 << 1)
#define GEN8_AUX_CHANNEL_A (1 << 0)
#define GEN8_DE_MISC_ISR 0x44460
@@ -5257,6 +5689,21 @@ enum skl_disp_power_wells {
#define GEN8_PCU_IIR 0x444e8
#define GEN8_PCU_IER 0x444ec
+/* BXT hotplug control */
+#define BXT_HOTPLUG_CTL 0xC4030
+#define BXT_DDIA_HPD_ENABLE (1 << 28)
+#define BXT_DDIA_HPD_STATUS (3 << 24)
+#define BXT_DDIC_HPD_ENABLE (1 << 12)
+#define BXT_DDIC_HPD_STATUS (3 << 8)
+#define BXT_DDIB_HPD_ENABLE (1 << 4)
+#define BXT_DDIB_HPD_STATUS (3 << 0)
+#define BXT_HOTPLUG_CTL_MASK (BXT_DDIA_HPD_ENABLE | \
+ BXT_DDIB_HPD_ENABLE | \
+ BXT_DDIC_HPD_ENABLE)
+#define BXT_HPD_STATUS_MASK (BXT_DDIA_HPD_STATUS | \
+ BXT_DDIB_HPD_STATUS | \
+ BXT_DDIC_HPD_STATUS)
+
#define ILK_DISPLAY_CHICKEN2 0x42004
/* Required on all Ironlake and Sandybridge according to the B-Spec. */
#define ILK_ELPIN_409_SELECT (1 << 25)
@@ -5297,13 +5744,16 @@ enum skl_disp_power_wells {
#define DISP_FBC_WM_DIS (1<<15)
#define DISP_ARB_CTL2 0x45004
#define DISP_DATA_PARTITION_5_6 (1<<6)
+#define DBUF_CTL 0x45008
+#define DBUF_POWER_REQUEST (1<<31)
+#define DBUF_POWER_STATE (1<<30)
#define GEN7_MSG_CTL 0x45010
#define WAIT_FOR_PCH_RESET_ACK (1<<1)
#define WAIT_FOR_PCH_FLR_ACK (1<<0)
#define HSW_NDE_RSTWRN_OPT 0x46408
#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
-#define FF_SLICE_CS_CHICKEN2 0x02e4
+#define FF_SLICE_CS_CHICKEN2 0x20e4
#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
/* GEN7 chicken */
@@ -5323,6 +5773,9 @@ enum skl_disp_power_wells {
#define GEN7_L3SQCREG1 0xB010
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
+#define GEN8_L3SQCREG1 0xB100
+#define BDW_WA_L3SQCREG1_DEFAULT 0x784000
+
#define GEN7_L3CNTLREG1 0xB01C
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
#define GEN7_L3AGDIS (1<<19)
@@ -5340,12 +5793,17 @@ enum skl_disp_power_wells {
/* GEN8 chicken */
#define HDC_CHICKEN0 0x7300
+#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1<<15)
#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
#define HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT (1<<5)
#define HDC_FORCE_NON_COHERENT (1<<4)
#define HDC_BARRIER_PERFORMANCE_DISABLE (1<<10)
+/* GEN9 chicken */
+#define SLICE_ECO_CHICKEN0 0x7308
+#define PIXEL_MASK_CAMMING_DISABLE (1 << 14)
+
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
@@ -5984,6 +6442,7 @@ enum skl_disp_power_wells {
#define TRANS_DP_PORT_SEL_D (2<<29)
#define TRANS_DP_PORT_SEL_NONE (3<<29)
#define TRANS_DP_PORT_SEL_MASK (3<<29)
+#define TRANS_DP_PIPE_TO_PORT(val) ((((val) & TRANS_DP_PORT_SEL_MASK) >> 29) + PORT_B)
#define TRANS_DP_AUDIO_ONLY (1<<26)
#define TRANS_DP_ENH_FRAMING (1<<18)
#define TRANS_DP_8BPC (0<<9)
@@ -6088,6 +6547,7 @@ enum skl_disp_power_wells {
# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
#define GEN6_UCGCTL2 0x9404
+# define GEN6_VFUNIT_CLOCK_GATE_DISABLE (1 << 31)
# define GEN7_VDSUNIT_CLOCK_GATE_DISABLE (1 << 30)
# define GEN7_TDLUNIT_CLOCK_GATE_DISABLE (1 << 22)
# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13)
@@ -6106,6 +6566,7 @@ enum skl_disp_power_wells {
#define GEN8_UCGCTL6 0x9430
#define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1<<24)
#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
+#define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1<<28)
#define GEN6_GFXPAUSE 0xA000
#define GEN6_RPNSWREQ 0xA008
@@ -6185,6 +6646,8 @@ enum skl_disp_power_wells {
#define GEN9_MEDIA_PG_IDLE_HYSTERESIS 0xA0C4
#define GEN9_RENDER_PG_IDLE_HYSTERESIS 0xA0C8
#define GEN9_PG_ENABLE 0xA210
+#define GEN9_RENDER_PG_ENABLE (1<<0)
+#define GEN9_MEDIA_PG_ENABLE (1<<1)
#define VLV_CHICKEN_3 (VLV_DISPLAY_BASE + 0x7040C)
#define PIXEL_OVERLAP_CNT_MASK (3 << 30)
@@ -6230,15 +6693,24 @@ enum skl_disp_power_wells {
#define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31)
-#define GEN6_READ_OC_PARAMS 0xc
-#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
-#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
#define GEN6_PCODE_READ_RC6VIDS 0x5
+#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
+#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
+#define GEN9_PCODE_READ_MEM_LATENCY 0x6
+#define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF
+#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8
+#define GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT 16
+#define GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT 24
+#define SKL_PCODE_CDCLK_CONTROL 0x7
+#define SKL_CDCLK_PREPARE_FOR_CHANGE 0x3
+#define SKL_CDCLK_READY_FOR_CHANGE 0x1
+#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
+#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
+#define GEN6_READ_OC_PARAMS 0xc
#define GEN6_PCODE_READ_D_COMP 0x10
#define GEN6_PCODE_WRITE_D_COMP 0x11
-#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
-#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
+#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
#define DISPLAY_IPS_CONTROL 0x19
#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
#define GEN6_PCODE_DATA 0x138128
@@ -6246,12 +6718,6 @@ enum skl_disp_power_wells {
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
#define GEN6_PCODE_DATA1 0x13812C
-#define GEN9_PCODE_READ_MEM_LATENCY 0x6
-#define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF
-#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8
-#define GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT 16
-#define GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT 24
-
#define GEN6_GT_CORE_STATUS 0x138060
#define GEN6_CORE_CPD_STATE_MASK (7<<4)
#define GEN6_RCn_MASK 7
@@ -6271,17 +6737,12 @@ enum skl_disp_power_wells {
#define CHV_POWER_SS1_SIG2 0xa72c
#define CHV_EU311_PG_ENABLE (1<<1)
-#define GEN9_SLICE0_PGCTL_ACK 0x804c
-#define GEN9_SLICE1_PGCTL_ACK 0x8050
-#define GEN9_SLICE2_PGCTL_ACK 0x8054
+#define GEN9_SLICE_PGCTL_ACK(slice) (0x804c + (slice)*0x4)
#define GEN9_PGCTL_SLICE_ACK (1 << 0)
+#define GEN9_PGCTL_SS_ACK(subslice) (1 << (2 + (subslice)*2))
-#define GEN9_SLICE0_SS01_EU_PGCTL_ACK 0x805c
-#define GEN9_SLICE0_SS23_EU_PGCTL_ACK 0x8060
-#define GEN9_SLICE1_SS01_EU_PGCTL_ACK 0x8064
-#define GEN9_SLICE1_SS23_EU_PGCTL_ACK 0x8068
-#define GEN9_SLICE2_SS01_EU_PGCTL_ACK 0x806c
-#define GEN9_SLICE2_SS23_EU_PGCTL_ACK 0x8070
+#define GEN9_SS01_EU_PGCTL_ACK(slice) (0x805c + (slice)*0x8)
+#define GEN9_SS23_EU_PGCTL_ACK(slice) (0x8060 + (slice)*0x8)
#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0)
#define GEN9_PGCTL_SSA_EU19_ACK (1 << 2)
#define GEN9_PGCTL_SSA_EU210_ACK (1 << 4)
@@ -6317,6 +6778,7 @@ enum skl_disp_power_wells {
#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100
#define GEN7_MAX_PS_THREAD_DEP (8<<12)
#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
+#define GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE (1<<4)
#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
#define GEN9_HALF_SLICE_CHICKEN5 0xe188
@@ -6718,6 +7180,13 @@ enum skl_disp_power_wells {
#define CDCLK_FREQ_675_617 (3<<26)
#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
+#define BXT_CDCLK_CD2X_DIV_SEL_MASK (3<<22)
+#define BXT_CDCLK_CD2X_DIV_SEL_1 (0<<22)
+#define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1<<22)
+#define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22)
+#define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22)
+#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16)
+
/* LCPLL_CTL */
#define LCPLL1_CTL 0x46010
#define LCPLL2_CTL 0x46014
@@ -6727,16 +7196,16 @@ enum skl_disp_power_wells {
#define DPLL_CTRL1 0x6C058
#define DPLL_CTRL1_HDMI_MODE(id) (1<<((id)*6+5))
#define DPLL_CTRL1_SSC(id) (1<<((id)*6+4))
-#define DPLL_CRTL1_LINK_RATE_MASK(id) (7<<((id)*6+1))
-#define DPLL_CRTL1_LINK_RATE_SHIFT(id) ((id)*6+1)
-#define DPLL_CRTL1_LINK_RATE(linkrate, id) ((linkrate)<<((id)*6+1))
+#define DPLL_CTRL1_LINK_RATE_MASK(id) (7<<((id)*6+1))
+#define DPLL_CTRL1_LINK_RATE_SHIFT(id) ((id)*6+1)
+#define DPLL_CTRL1_LINK_RATE(linkrate, id) ((linkrate)<<((id)*6+1))
#define DPLL_CTRL1_OVERRIDE(id) (1<<((id)*6))
-#define DPLL_CRTL1_LINK_RATE_2700 0
-#define DPLL_CRTL1_LINK_RATE_1350 1
-#define DPLL_CRTL1_LINK_RATE_810 2
-#define DPLL_CRTL1_LINK_RATE_1620 3
-#define DPLL_CRTL1_LINK_RATE_1080 4
-#define DPLL_CRTL1_LINK_RATE_2160 5
+#define DPLL_CTRL1_LINK_RATE_2700 0
+#define DPLL_CTRL1_LINK_RATE_1350 1
+#define DPLL_CTRL1_LINK_RATE_810 2
+#define DPLL_CTRL1_LINK_RATE_1620 3
+#define DPLL_CTRL1_LINK_RATE_1080 4
+#define DPLL_CTRL1_LINK_RATE_2160 5
/* DPLL control2 */
#define DPLL_CTRL2 0x6C05C
@@ -6782,6 +7251,31 @@ enum skl_disp_power_wells {
#define GET_CFG_CR1_REG(id) (DPLL1_CFGCR1 + (id - SKL_DPLL1) * 8)
#define GET_CFG_CR2_REG(id) (DPLL1_CFGCR2 + (id - SKL_DPLL1) * 8)
+/* BXT display engine PLL */
+#define BXT_DE_PLL_CTL 0x6d000
+#define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */
+#define BXT_DE_PLL_RATIO_MASK 0xff
+
+#define BXT_DE_PLL_ENABLE 0x46070
+#define BXT_DE_PLL_PLL_ENABLE (1 << 31)
+#define BXT_DE_PLL_LOCK (1 << 30)
+
+/* GEN9 DC */
+#define DC_STATE_EN 0x45504
+#define DC_STATE_EN_UPTO_DC5 (1<<0)
+#define DC_STATE_EN_DC9 (1<<3)
+
+/*
+* SKL DC
+*/
+#define DC_STATE_EN 0x45504
+#define DC_STATE_EN_UPTO_DC5 (1<<0)
+#define DC_STATE_EN_UPTO_DC6 (2<<0)
+#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3
+
+#define DC_STATE_DEBUG 0x45520
+#define DC_STATE_DEBUG_MASK_MEMORY_UP (1<<1)
+
/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
* since on HSW we can't write to it using I915_WRITE. */
#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 5fda6c70b423..497cba5deb1e 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -220,7 +220,7 @@ DEFINE_EVENT(i915_page_table_entry, i915_page_table_entry_alloc,
DECLARE_EVENT_CLASS(i915_page_table_entry_update,
TP_PROTO(struct i915_address_space *vm, u32 pde,
- struct i915_page_table_entry *pt, u32 first, u32 count, u32 bits),
+ struct i915_page_table *pt, u32 first, u32 count, u32 bits),
TP_ARGS(vm, pde, pt, first, count, bits),
TP_STRUCT__entry(
@@ -250,7 +250,7 @@ DECLARE_EVENT_CLASS(i915_page_table_entry_update,
DEFINE_EVENT(i915_page_table_entry_update, i915_page_table_entry_map,
TP_PROTO(struct i915_address_space *vm, u32 pde,
- struct i915_page_table_entry *pt, u32 first, u32 count, u32 bits),
+ struct i915_page_table *pt, u32 first, u32 count, u32 bits),
TP_ARGS(vm, pde, pt, first, count, bits)
);
@@ -504,7 +504,6 @@ DECLARE_EVENT_CLASS(i915_gem_request,
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
- __field(u32, uniq)
__field(u32, seqno)
),
@@ -513,13 +512,11 @@ DECLARE_EVENT_CLASS(i915_gem_request,
i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
- __entry->uniq = req ? req->uniq : 0;
__entry->seqno = i915_gem_request_get_seqno(req);
),
- TP_printk("dev=%u, ring=%u, uniq=%u, seqno=%u",
- __entry->dev, __entry->ring, __entry->uniq,
- __entry->seqno)
+ TP_printk("dev=%u, ring=%u, seqno=%u",
+ __entry->dev, __entry->ring, __entry->seqno)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
@@ -564,7 +561,6 @@ TRACE_EVENT(i915_gem_request_wait_begin,
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
- __field(u32, uniq)
__field(u32, seqno)
__field(bool, blocking)
),
@@ -580,14 +576,13 @@ TRACE_EVENT(i915_gem_request_wait_begin,
i915_gem_request_get_ring(req);
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
- __entry->uniq = req ? req->uniq : 0;
__entry->seqno = i915_gem_request_get_seqno(req);
__entry->blocking =
mutex_is_locked(&ring->dev->struct_mutex);
),
- TP_printk("dev=%u, ring=%u, uniq=%u, seqno=%u, blocking=%s",
- __entry->dev, __entry->ring, __entry->uniq,
+ TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
+ __entry->dev, __entry->ring,
__entry->seqno, __entry->blocking ? "yes (NB)" : "no")
);
@@ -596,33 +591,6 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
TP_ARGS(req)
);
-DECLARE_EVENT_CLASS(i915_ring,
- TP_PROTO(struct intel_engine_cs *ring),
- TP_ARGS(ring),
-
- TP_STRUCT__entry(
- __field(u32, dev)
- __field(u32, ring)
- ),
-
- TP_fast_assign(
- __entry->dev = ring->dev->primary->index;
- __entry->ring = ring->id;
- ),
-
- TP_printk("dev=%u, ring=%u", __entry->dev, __entry->ring)
-);
-
-DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
- TP_PROTO(struct intel_engine_cs *ring),
- TP_ARGS(ring)
-);
-
-DEFINE_EVENT(i915_ring, i915_ring_wait_end,
- TP_PROTO(struct intel_engine_cs *ring),
- TP_ARGS(ring)
-);
-
TRACE_EVENT(i915_flip_request,
TP_PROTO(int plane, struct drm_i915_gem_object *obj),
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 3903b90fb64e..7ed8033aae60 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -48,6 +48,8 @@ int intel_atomic_check(struct drm_device *dev,
int ncrtcs = dev->mode_config.num_crtc;
int nconnectors = dev->mode_config.num_connector;
enum pipe nuclear_pipe = INVALID_PIPE;
+ struct intel_crtc *nuclear_crtc = NULL;
+ struct intel_crtc_state *crtc_state = NULL;
int ret;
int i;
bool not_nuclear = false;
@@ -76,8 +78,14 @@ int intel_atomic_check(struct drm_device *dev,
state->allow_modeset = false;
for (i = 0; i < ncrtcs; i++) {
struct intel_crtc *crtc = to_intel_crtc(state->crtcs[i]);
+ if (crtc)
+ memset(&crtc->atomic, 0, sizeof(crtc->atomic));
if (crtc && crtc->pipe != nuclear_pipe)
not_nuclear = true;
+ if (crtc && crtc->pipe == nuclear_pipe) {
+ nuclear_crtc = crtc;
+ crtc_state = to_intel_crtc_state(state->crtc_states[i]);
+ }
}
for (i = 0; i < nconnectors; i++)
if (state->connectors[i] != NULL)
@@ -92,6 +100,11 @@ int intel_atomic_check(struct drm_device *dev,
if (ret)
return ret;
+ /* FIXME: move to crtc atomic check function once it is ready */
+ ret = intel_atomic_setup_scalers(dev, nuclear_crtc, crtc_state);
+ if (ret)
+ return ret;
+
return ret;
}
@@ -155,6 +168,21 @@ int intel_atomic_commit(struct drm_device *dev,
swap(state->plane_states[i], plane->state);
plane->state->state = NULL;
}
+
+ /* swap crtc_scaler_state */
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ struct drm_crtc *crtc = state->crtcs[i];
+ if (!crtc) {
+ continue;
+ }
+
+ to_intel_crtc(crtc)->config->scaler_state =
+ to_intel_crtc_state(state->crtc_states[i])->scaler_state;
+
+ if (INTEL_INFO(dev)->gen >= 9)
+ skl_detach_scalers(to_intel_crtc(crtc));
+ }
+
drm_atomic_helper_commit_planes(dev, state);
drm_atomic_helper_wait_for_vblanks(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
@@ -222,8 +250,12 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state = kmemdup(intel_crtc->config,
sizeof(*intel_crtc->config), GFP_KERNEL);
- if (crtc_state)
- crtc_state->base.crtc = crtc;
+ if (!crtc_state)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
+
+ crtc_state->base.crtc = crtc;
return &crtc_state->base;
}
@@ -241,3 +273,151 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
{
drm_atomic_helper_crtc_destroy_state(crtc, state);
}
+
+/**
+ * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
+ * @dev: DRM device
+ * @crtc: intel crtc
+ * @crtc_state: incoming crtc_state to validate and setup scalers
+ *
+ * This function sets up scalers based on staged scaling requests for
+ * a @crtc and its planes. It is called from crtc level check path. If request
+ * is a supportable request, it attaches scalers to requested planes and crtc.
+ *
+ * This function takes into account the current scaler(s) in use by any planes
+ * not being part of this atomic state
+ *
+ * Returns:
+ * 0 - scalers were setup succesfully
+ * error code - otherwise
+ */
+int intel_atomic_setup_scalers(struct drm_device *dev,
+ struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_plane *plane = NULL;
+ struct intel_plane *intel_plane;
+ struct intel_plane_state *plane_state = NULL;
+ struct intel_crtc_scaler_state *scaler_state;
+ struct drm_atomic_state *drm_state;
+ int num_scalers_need;
+ int i, j;
+
+ if (INTEL_INFO(dev)->gen < 9 || !intel_crtc || !crtc_state)
+ return 0;
+
+ scaler_state = &crtc_state->scaler_state;
+ drm_state = crtc_state->base.state;
+
+ num_scalers_need = hweight32(scaler_state->scaler_users);
+ DRM_DEBUG_KMS("crtc_state = %p need = %d avail = %d scaler_users = 0x%x\n",
+ crtc_state, num_scalers_need, intel_crtc->num_scalers,
+ scaler_state->scaler_users);
+
+ /*
+ * High level flow:
+ * - staged scaler requests are already in scaler_state->scaler_users
+ * - check whether staged scaling requests can be supported
+ * - add planes using scalers that aren't in current transaction
+ * - assign scalers to requested users
+ * - as part of plane commit, scalers will be committed
+ * (i.e., either attached or detached) to respective planes in hw
+ * - as part of crtc_commit, scaler will be either attached or detached
+ * to crtc in hw
+ */
+
+ /* fail if required scalers > available scalers */
+ if (num_scalers_need > intel_crtc->num_scalers){
+ DRM_DEBUG_KMS("Too many scaling requests %d > %d\n",
+ num_scalers_need, intel_crtc->num_scalers);
+ return -EINVAL;
+ }
+
+ /* walkthrough scaler_users bits and start assigning scalers */
+ for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
+ int *scaler_id;
+
+ /* skip if scaler not required */
+ if (!(scaler_state->scaler_users & (1 << i)))
+ continue;
+
+ if (i == SKL_CRTC_INDEX) {
+ /* panel fitter case: assign as a crtc scaler */
+ scaler_id = &scaler_state->scaler_id;
+ } else {
+ if (!drm_state)
+ continue;
+
+ /* plane scaler case: assign as a plane scaler */
+ /* find the plane that set the bit as scaler_user */
+ plane = drm_state->planes[i];
+
+ /*
+ * to enable/disable hq mode, add planes that are using scaler
+ * into this transaction
+ */
+ if (!plane) {
+ struct drm_plane_state *state;
+ plane = drm_plane_from_index(dev, i);
+ state = drm_atomic_get_plane_state(drm_state, plane);
+ if (IS_ERR(state)) {
+ DRM_DEBUG_KMS("Failed to add [PLANE:%d] to drm_state\n",
+ plane->base.id);
+ return PTR_ERR(state);
+ }
+ }
+
+ intel_plane = to_intel_plane(plane);
+
+ /* plane on different crtc cannot be a scaler user of this crtc */
+ if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) {
+ continue;
+ }
+
+ plane_state = to_intel_plane_state(drm_state->plane_states[i]);
+ scaler_id = &plane_state->scaler_id;
+ }
+
+ if (*scaler_id < 0) {
+ /* find a free scaler */
+ for (j = 0; j < intel_crtc->num_scalers; j++) {
+ if (!scaler_state->scalers[j].in_use) {
+ scaler_state->scalers[j].in_use = 1;
+ *scaler_id = scaler_state->scalers[j].id;
+ DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
+ intel_crtc->pipe,
+ i == SKL_CRTC_INDEX ? scaler_state->scaler_id :
+ plane_state->scaler_id,
+ i == SKL_CRTC_INDEX ? "CRTC" : "PLANE",
+ i == SKL_CRTC_INDEX ? intel_crtc->base.base.id :
+ plane->base.id);
+ break;
+ }
+ }
+ }
+
+ if (WARN_ON(*scaler_id < 0)) {
+ DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n",
+ i == SKL_CRTC_INDEX ? "CRTC" : "PLANE",
+ i == SKL_CRTC_INDEX ? intel_crtc->base.base.id:plane->base.id);
+ continue;
+ }
+
+ /* set scaler mode */
+ if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
+ /*
+ * when only 1 scaler is in use on either pipe A or B,
+ * scaler 0 operates in high quality (HQ) mode.
+ * In this case use scaler 0 to take advantage of HQ mode
+ */
+ *scaler_id = 0;
+ scaler_state->scalers[0].in_use = 1;
+ scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ;
+ scaler_state->scalers[1].in_use = 0;
+ } else {
+ scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 976b89156570..86ba4b2c3a65 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -85,8 +85,8 @@ intel_plane_duplicate_state(struct drm_plane *plane)
return NULL;
state = &intel_state->base;
- if (state->fb)
- drm_framebuffer_reference(state->fb);
+
+ __drm_atomic_helper_plane_duplicate_state(plane, state);
return state;
}
@@ -111,6 +111,7 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
{
struct drm_crtc *crtc = state->crtc;
struct intel_crtc *intel_crtc;
+ struct intel_crtc_state *crtc_state;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_plane_state *intel_state = to_intel_plane_state(state);
@@ -126,6 +127,17 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
if (!crtc)
return 0;
+ /* FIXME: temporary hack necessary while we still use the plane update
+ * helper. */
+ if (state->state) {
+ crtc_state =
+ intel_atomic_get_crtc_state(state->state, intel_crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ } else {
+ crtc_state = intel_crtc->config;
+ }
+
/*
* The original src/dest coordinates are stored in state->base, but
* we want to keep another copy internal to our driver that we can
@@ -144,9 +156,9 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
intel_state->clip.x1 = 0;
intel_state->clip.y1 = 0;
intel_state->clip.x2 =
- intel_crtc->active ? intel_crtc->config->pipe_src_w : 0;
+ crtc_state->base.active ? crtc_state->pipe_src_w : 0;
intel_state->clip.y2 =
- intel_crtc->active ? intel_crtc->config->pipe_src_h : 0;
+ crtc_state->base.active ? crtc_state->pipe_src_h : 0;
/*
* Disabling a plane is always okay; we just need to update
@@ -162,6 +174,30 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
(1 << drm_plane_index(plane));
}
+ if (state->fb && intel_rotation_90_or_270(state->rotation)) {
+ if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
+ state->fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)) {
+ DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n");
+ return -EINVAL;
+ }
+
+ /*
+ * 90/270 is not allowed with RGB64 16:16:16:16,
+ * RGB 16-bit 5:6:5, and Indexed 8-bit.
+ * TBD: Add RGB64 case once its added in supported format list.
+ */
+ switch (state->fb->pixel_format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB565:
+ DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
+ drm_get_format_name(state->fb->pixel_format));
+ return -EINVAL;
+
+ default:
+ break;
+ }
+ }
+
return intel_plane->check_plane(plane, intel_state);
}
@@ -172,10 +208,6 @@ static void intel_plane_atomic_update(struct drm_plane *plane,
struct intel_plane_state *intel_state =
to_intel_plane_state(plane->state);
- /* Don't disable an already disabled plane */
- if (!plane->state->fb && !old_state->fb)
- return;
-
intel_plane->commit_plane(plane, intel_state);
}
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index ef342571ae6a..3da9b8409f20 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -28,7 +28,6 @@
#include <drm/drmP.h>
#include <drm/drm_edid.h>
-#include "intel_drv.h"
#include "i915_drv.h"
/**
@@ -270,6 +269,9 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
port_name(port), pipe_name(pipe));
+ if (WARN_ON(port == PORT_A))
+ return;
+
if (HAS_PCH_IBX(dev_priv->dev)) {
aud_config = IBX_AUD_CFG(pipe);
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
@@ -291,12 +293,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
tmp |= AUD_CONFIG_N_VALUE_INDEX;
I915_WRITE(aud_config, tmp);
- if (WARN_ON(!port)) {
- eldv = IBX_ELD_VALID(PORT_B) | IBX_ELD_VALID(PORT_C) |
- IBX_ELD_VALID(PORT_D);
- } else {
- eldv = IBX_ELD_VALID(port);
- }
+ eldv = IBX_ELD_VALID(port);
/* Invalidate ELD */
tmp = I915_READ(aud_cntrl_st2);
@@ -326,6 +323,9 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n",
port_name(port), pipe_name(pipe), drm_eld_size(eld));
+ if (WARN_ON(port == PORT_A))
+ return;
+
/*
* FIXME: We're supposed to wait for vblank here, but we have vblanks
* disabled during the mode set. The proper fix would be to push the
@@ -350,12 +350,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
}
- if (WARN_ON(!port)) {
- eldv = IBX_ELD_VALID(PORT_B) | IBX_ELD_VALID(PORT_C) |
- IBX_ELD_VALID(PORT_D);
- } else {
- eldv = IBX_ELD_VALID(port);
- }
+ eldv = IBX_ELD_VALID(port);
/* Invalidate ELD */
tmp = I915_READ(aud_cntrl_st2);
@@ -511,7 +506,8 @@ static int i915_audio_component_get_cdclk_freq(struct device *dev)
return -ENODEV;
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
- ret = intel_ddi_get_cdclk_freq(dev_priv);
+ ret = dev_priv->display.get_display_clock_speed(dev_priv->dev);
+
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
return ret;
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index c684085cb56a..198fc3c3291b 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -36,10 +36,11 @@
static int panel_type;
-static void *
-find_section(struct bdb_header *bdb, int section_id)
+static const void *
+find_section(const void *_bdb, int section_id)
{
- u8 *base = (u8 *)bdb;
+ const struct bdb_header *bdb = _bdb;
+ const u8 *base = _bdb;
int index = 0;
u16 total, current_size;
u8 current_id;
@@ -53,7 +54,7 @@ find_section(struct bdb_header *bdb, int section_id)
current_id = *(base + index);
index++;
- current_size = *((u16 *)(base + index));
+ current_size = *((const u16 *)(base + index));
index += 2;
if (index + current_size > total)
@@ -69,7 +70,7 @@ find_section(struct bdb_header *bdb, int section_id)
}
static u16
-get_blocksize(void *p)
+get_blocksize(const void *p)
{
u16 *block_ptr, block_size;
@@ -204,7 +205,7 @@ get_lvds_fp_timing(const struct bdb_header *bdb,
/* Try to find integrated panel data */
static void
parse_lfp_panel_data(struct drm_i915_private *dev_priv,
- struct bdb_header *bdb)
+ const struct bdb_header *bdb)
{
const struct bdb_lvds_options *lvds_options;
const struct bdb_lvds_lfp_data *lvds_lfp_data;
@@ -310,7 +311,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
}
static void
-parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+parse_lfp_backlight(struct drm_i915_private *dev_priv,
+ const struct bdb_header *bdb)
{
const struct bdb_lfp_backlight_data *backlight_data;
const struct bdb_lfp_backlight_data_entry *entry;
@@ -348,9 +350,9 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
/* Try to find sdvo panel data */
static void
parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
- struct bdb_header *bdb)
+ const struct bdb_header *bdb)
{
- struct lvds_dvo_timing *dvo_timing;
+ const struct lvds_dvo_timing *dvo_timing;
struct drm_display_mode *panel_fixed_mode;
int index;
@@ -361,7 +363,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
}
if (index == -1) {
- struct bdb_sdvo_lvds_options *sdvo_lvds_options;
+ const struct bdb_sdvo_lvds_options *sdvo_lvds_options;
sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
if (!sdvo_lvds_options)
@@ -402,10 +404,10 @@ static int intel_bios_ssc_frequency(struct drm_device *dev,
static void
parse_general_features(struct drm_i915_private *dev_priv,
- struct bdb_header *bdb)
+ const struct bdb_header *bdb)
{
struct drm_device *dev = dev_priv->dev;
- struct bdb_general_features *general;
+ const struct bdb_general_features *general;
general = find_section(bdb, BDB_GENERAL_FEATURES);
if (general) {
@@ -428,9 +430,9 @@ parse_general_features(struct drm_i915_private *dev_priv,
static void
parse_general_definitions(struct drm_i915_private *dev_priv,
- struct bdb_header *bdb)
+ const struct bdb_header *bdb)
{
- struct bdb_general_definitions *general;
+ const struct bdb_general_definitions *general;
general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (general) {
@@ -438,7 +440,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
if (block_size >= sizeof(*general)) {
int bus_pin = general->crt_ddc_gmbus_pin;
DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
- if (intel_gmbus_is_port_valid(bus_pin))
+ if (intel_gmbus_is_valid_pin(dev_priv, bus_pin))
dev_priv->vbt.crt_ddc_pin = bus_pin;
} else {
DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
@@ -447,13 +449,19 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
}
}
+static const union child_device_config *
+child_device_ptr(const struct bdb_general_definitions *p_defs, int i)
+{
+ return (const void *) &p_defs->devices[i * p_defs->child_dev_size];
+}
+
static void
parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
- struct bdb_header *bdb)
+ const struct bdb_header *bdb)
{
struct sdvo_device_mapping *p_mapping;
- struct bdb_general_definitions *p_defs;
- union child_device_config *p_child;
+ const struct bdb_general_definitions *p_defs;
+ const union child_device_config *p_child;
int i, child_device_num, count;
u16 block_size;
@@ -476,10 +484,10 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
block_size = get_blocksize(p_defs);
/* get the number of child device */
child_device_num = (block_size - sizeof(*p_defs)) /
- sizeof(*p_child);
+ p_defs->child_dev_size;
count = 0;
for (i = 0; i < child_device_num; i++) {
- p_child = &(p_defs->devices[i]);
+ p_child = child_device_ptr(p_defs, i);
if (!p_child->old.device_type) {
/* skip the device block if device type is invalid */
continue;
@@ -539,9 +547,9 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
static void
parse_driver_features(struct drm_i915_private *dev_priv,
- struct bdb_header *bdb)
+ const struct bdb_header *bdb)
{
- struct bdb_driver_features *driver;
+ const struct bdb_driver_features *driver;
driver = find_section(bdb, BDB_DRIVER_FEATURES);
if (!driver)
@@ -565,11 +573,11 @@ parse_driver_features(struct drm_i915_private *dev_priv,
}
static void
-parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
{
- struct bdb_edp *edp;
- struct edp_power_seq *edp_pps;
- struct edp_link_params *edp_link_params;
+ const struct bdb_edp *edp;
+ const struct edp_power_seq *edp_pps;
+ const struct edp_link_params *edp_link_params;
edp = find_section(bdb, BDB_EDP);
if (!edp) {
@@ -666,16 +674,21 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
if (bdb->version >= 173) {
uint8_t vswing;
- vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
- dev_priv->vbt.edp_low_vswing = vswing == 0;
+ /* Don't read from VBT if module parameter has valid value*/
+ if (i915.edp_vswing) {
+ dev_priv->edp_low_vswing = i915.edp_vswing == 1;
+ } else {
+ vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
+ dev_priv->edp_low_vswing = vswing == 0;
+ }
}
}
static void
-parse_psr(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
{
- struct bdb_psr *psr;
- struct psr_table *psr_table;
+ const struct bdb_psr *psr;
+ const struct psr_table *psr_table;
psr = find_section(bdb, BDB_PSR);
if (!psr) {
@@ -783,13 +796,14 @@ static u8 *goto_next_sequence(u8 *data, int *size)
}
static void
-parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
{
- struct bdb_mipi_config *start;
- struct bdb_mipi_sequence *sequence;
- struct mipi_config *config;
- struct mipi_pps_data *pps;
- u8 *data, *seq_data;
+ const struct bdb_mipi_config *start;
+ const struct bdb_mipi_sequence *sequence;
+ const struct mipi_config *config;
+ const struct mipi_pps_data *pps;
+ u8 *data;
+ const u8 *seq_data;
int i, panel_id, seq_size;
u16 block_size;
@@ -933,7 +947,7 @@ err:
}
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
- struct bdb_header *bdb)
+ const struct bdb_header *bdb)
{
union child_device_config *it, *child = NULL;
struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
@@ -1035,7 +1049,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
}
static void parse_ddi_ports(struct drm_i915_private *dev_priv,
- struct bdb_header *bdb)
+ const struct bdb_header *bdb)
{
struct drm_device *dev = dev_priv->dev;
enum port port;
@@ -1055,10 +1069,11 @@ static void parse_ddi_ports(struct drm_i915_private *dev_priv,
static void
parse_device_mapping(struct drm_i915_private *dev_priv,
- struct bdb_header *bdb)
+ const struct bdb_header *bdb)
{
- struct bdb_general_definitions *p_defs;
- union child_device_config *p_child, *child_dev_ptr;
+ const struct bdb_general_definitions *p_defs;
+ const union child_device_config *p_child;
+ union child_device_config *child_dev_ptr;
int i, child_device_num, count;
u16 block_size;
@@ -1067,25 +1082,19 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
return;
}
- /* judge whether the size of child device meets the requirements.
- * If the child device size obtained from general definition block
- * is different with sizeof(struct child_device_config), skip the
- * parsing of sdvo device info
- */
- if (p_defs->child_dev_size != sizeof(*p_child)) {
- /* different child dev size . Ignore it */
- DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+ if (p_defs->child_dev_size < sizeof(*p_child)) {
+ DRM_ERROR("General definiton block child device size is too small.\n");
return;
}
/* get the block size of general definitions */
block_size = get_blocksize(p_defs);
/* get the number of child device */
child_device_num = (block_size - sizeof(*p_defs)) /
- sizeof(*p_child);
+ p_defs->child_dev_size;
count = 0;
/* get the number of child device that is present */
for (i = 0; i < child_device_num; i++) {
- p_child = &(p_defs->devices[i]);
+ p_child = child_device_ptr(p_defs, i);
if (!p_child->common.device_type) {
/* skip the device block if device type is invalid */
continue;
@@ -1105,7 +1114,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
dev_priv->vbt.child_dev_num = count;
count = 0;
for (i = 0; i < child_device_num; i++) {
- p_child = &(p_defs->devices[i]);
+ p_child = child_device_ptr(p_defs, i);
if (!p_child->common.device_type) {
/* skip the device block if device type is invalid */
continue;
@@ -1121,8 +1130,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
child_dev_ptr = dev_priv->vbt.child_dev + count;
count++;
- memcpy((void *)child_dev_ptr, (void *)p_child,
- sizeof(*p_child));
+ memcpy(child_dev_ptr, p_child, sizeof(*p_child));
}
return;
}
@@ -1133,7 +1141,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
struct drm_device *dev = dev_priv->dev;
enum port port;
- dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
+ dev_priv->vbt.crt_ddc_pin = GMBUS_PIN_VGADDC;
/* Default to having backlight */
dev_priv->vbt.backlight.present = true;
@@ -1191,19 +1199,22 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
{ }
};
-static struct bdb_header *validate_vbt(char *base, size_t size,
- struct vbt_header *vbt,
- const char *source)
+static const struct bdb_header *validate_vbt(const void __iomem *_base,
+ size_t size,
+ const void __iomem *_vbt,
+ const char *source)
{
- size_t offset;
- struct bdb_header *bdb;
-
- if (vbt == NULL) {
- DRM_DEBUG_DRIVER("VBT signature missing\n");
- return NULL;
- }
+ /*
+ * This is the one place where we explicitly discard the address space
+ * (__iomem) of the BIOS/VBT. (And this will cause a sparse complaint.)
+ * From now on everything is based on 'base', and treated as regular
+ * memory.
+ */
+ const void *base = (const void *) _base;
+ size_t offset = _vbt - _base;
+ const struct vbt_header *vbt = base + offset;
+ const struct bdb_header *bdb;
- offset = (char *)vbt - base;
if (offset + sizeof(struct vbt_header) > size) {
DRM_DEBUG_DRIVER("VBT header incomplete\n");
return NULL;
@@ -1220,7 +1231,7 @@ static struct bdb_header *validate_vbt(char *base, size_t size,
return NULL;
}
- bdb = (struct bdb_header *)(base + offset);
+ bdb = base + offset;
if (offset + bdb->bdb_size > size) {
DRM_DEBUG_DRIVER("BDB incomplete\n");
return NULL;
@@ -1231,6 +1242,22 @@ static struct bdb_header *validate_vbt(char *base, size_t size,
return bdb;
}
+static const struct bdb_header *find_vbt(void __iomem *bios, size_t size)
+{
+ const struct bdb_header *bdb = NULL;
+ size_t i;
+
+ /* Scour memory looking for the VBT signature. */
+ for (i = 0; i + 4 < size; i++) {
+ if (ioread32(bios + i) == *((const u32 *) "$VBT")) {
+ bdb = validate_vbt(bios, size, bios + i, "PCI ROM");
+ break;
+ }
+ }
+
+ return bdb;
+}
+
/**
* intel_parse_bios - find VBT and initialize settings from the BIOS
* @dev: DRM device
@@ -1245,7 +1272,7 @@ intel_parse_bios(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct pci_dev *pdev = dev->pdev;
- struct bdb_header *bdb = NULL;
+ const struct bdb_header *bdb = NULL;
u8 __iomem *bios = NULL;
if (HAS_PCH_NOP(dev))
@@ -1255,27 +1282,17 @@ intel_parse_bios(struct drm_device *dev)
/* XXX Should this validation be moved to intel_opregion.c? */
if (!dmi_check_system(intel_no_opregion_vbt) && dev_priv->opregion.vbt)
- bdb = validate_vbt((char *)dev_priv->opregion.header, OPREGION_SIZE,
- (struct vbt_header *)dev_priv->opregion.vbt,
- "OpRegion");
+ bdb = validate_vbt(dev_priv->opregion.header, OPREGION_SIZE,
+ dev_priv->opregion.vbt, "OpRegion");
if (bdb == NULL) {
- size_t i, size;
+ size_t size;
bios = pci_map_rom(pdev, &size);
if (!bios)
return -1;
- /* Scour memory looking for the VBT signature */
- for (i = 0; i + 4 < size; i++) {
- if (memcmp(bios + i, "$VBT", 4) == 0) {
- bdb = validate_vbt(bios, size,
- (struct vbt_header *)(bios + i),
- "PCI ROM");
- break;
- }
- }
-
+ bdb = find_vbt(bios, size);
if (!bdb) {
pci_unmap_rom(pdev, bios);
return -1;
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 6afd5be33367..af0b47652752 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -277,9 +277,9 @@ struct bdb_general_definitions {
* And the device num is related with the size of general definition
* block. It is obtained by using the following formula:
* number = (block_size - sizeof(bdb_general_definitions))/
- * sizeof(child_device_config);
+ * defs->child_dev_size;
*/
- union child_device_config devices[0];
+ uint8_t devices[0];
} __packed;
/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 515d7123785d..521af2c069cb 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -207,6 +207,14 @@ static void intel_disable_crt(struct intel_encoder *encoder)
intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
}
+static void pch_disable_crt(struct intel_encoder *encoder)
+{
+}
+
+static void pch_post_disable_crt(struct intel_encoder *encoder)
+{
+ intel_disable_crt(encoder);
+}
static void hsw_crt_post_disable(struct intel_encoder *encoder)
{
@@ -747,7 +755,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
goto out;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
- i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
+ i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPB);
ret = intel_crt_ddc_get_modes(connector, i2c);
out:
@@ -888,7 +896,12 @@ void intel_crt_init(struct drm_device *dev)
crt->adpa_reg = ADPA;
crt->base.compute_config = intel_crt_compute_config;
- crt->base.disable = intel_disable_crt;
+ if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev)) {
+ crt->base.disable = pch_disable_crt;
+ crt->base.post_disable = pch_post_disable_crt;
+ } else {
+ crt->base.disable = intel_disable_crt;
+ }
crt->base.enable = intel_enable_crt;
if (I915_HAS_HOTPLUG(dev))
crt->base.hpd_pin = HPD_CRT;
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
new file mode 100644
index 000000000000..bcb41e61877d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -0,0 +1,467 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "i915_drv.h"
+#include "i915_reg.h"
+
+/**
+ * DOC: csr support for dmc
+ *
+ * Display Context Save and Restore (CSR) firmware support added from gen9
+ * onwards to drive newly added DMC (Display microcontroller) in display
+ * engine to save and restore the state of display engine when it enter into
+ * low-power state and comes back to normal.
+ *
+ * Firmware loading status will be one of the below states: FW_UNINITIALIZED,
+ * FW_LOADED, FW_FAILED.
+ *
+ * Once the firmware is written into the registers status will be moved from
+ * FW_UNINITIALIZED to FW_LOADED and for any erroneous condition status will
+ * be moved to FW_FAILED.
+ */
+
+#define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
+
+MODULE_FIRMWARE(I915_CSR_SKL);
+
+/*
+* SKL CSR registers for DC5 and DC6
+*/
+#define CSR_PROGRAM_BASE 0x80000
+#define CSR_SSP_BASE_ADDR_GEN9 0x00002FC0
+#define CSR_HTP_ADDR_SKL 0x00500034
+#define CSR_SSP_BASE 0x8F074
+#define CSR_HTP_SKL 0x8F004
+#define CSR_LAST_WRITE 0x8F034
+#define CSR_LAST_WRITE_VALUE 0xc003b400
+/* MMIO address range for CSR program (0x80000 - 0x82FFF) */
+#define CSR_MAX_FW_SIZE 0x2FFF
+#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
+#define CSR_MMIO_START_RANGE 0x80000
+#define CSR_MMIO_END_RANGE 0x8FFFF
+
+struct intel_css_header {
+ /* 0x09 for DMC */
+ uint32_t module_type;
+
+ /* Includes the DMC specific header in dwords */
+ uint32_t header_len;
+
+ /* always value would be 0x10000 */
+ uint32_t header_ver;
+
+ /* Not used */
+ uint32_t module_id;
+
+ /* Not used */
+ uint32_t module_vendor;
+
+ /* in YYYYMMDD format */
+ uint32_t date;
+
+ /* Size in dwords (CSS_Headerlen + PackageHeaderLen + dmc FWsLen)/4 */
+ uint32_t size;
+
+ /* Not used */
+ uint32_t key_size;
+
+ /* Not used */
+ uint32_t modulus_size;
+
+ /* Not used */
+ uint32_t exponent_size;
+
+ /* Not used */
+ uint32_t reserved1[12];
+
+ /* Major Minor */
+ uint32_t version;
+
+ /* Not used */
+ uint32_t reserved2[8];
+
+ /* Not used */
+ uint32_t kernel_header_info;
+} __packed;
+
+struct intel_fw_info {
+ uint16_t reserved1;
+
+ /* Stepping (A, B, C, ..., *). * is a wildcard */
+ char stepping;
+
+ /* Sub-stepping (0, 1, ..., *). * is a wildcard */
+ char substepping;
+
+ uint32_t offset;
+ uint32_t reserved2;
+} __packed;
+
+struct intel_package_header {
+ /* DMC container header length in dwords */
+ unsigned char header_len;
+
+ /* always value would be 0x01 */
+ unsigned char header_ver;
+
+ unsigned char reserved[10];
+
+ /* Number of valid entries in the FWInfo array below */
+ uint32_t num_entries;
+
+ struct intel_fw_info fw_info[20];
+} __packed;
+
+struct intel_dmc_header {
+ /* always value would be 0x40403E3E */
+ uint32_t signature;
+
+ /* DMC binary header length */
+ unsigned char header_len;
+
+ /* 0x01 */
+ unsigned char header_ver;
+
+ /* Reserved */
+ uint16_t dmcc_ver;
+
+ /* Major, Minor */
+ uint32_t project;
+
+ /* Firmware program size (excluding header) in dwords */
+ uint32_t fw_size;
+
+ /* Major Minor version */
+ uint32_t fw_version;
+
+ /* Number of valid MMIO cycles present. */
+ uint32_t mmio_count;
+
+ /* MMIO address */
+ uint32_t mmioaddr[8];
+
+ /* MMIO data */
+ uint32_t mmiodata[8];
+
+ /* FW filename */
+ unsigned char dfile[32];
+
+ uint32_t reserved1[2];
+} __packed;
+
+struct stepping_info {
+ char stepping;
+ char substepping;
+};
+
+static const struct stepping_info skl_stepping_info[] = {
+ {'A', '0'}, {'B', '0'}, {'C', '0'},
+ {'D', '0'}, {'E', '0'}, {'F', '0'},
+ {'G', '0'}, {'H', '0'}, {'I', '0'}
+};
+
+static char intel_get_stepping(struct drm_device *dev)
+{
+ if (IS_SKYLAKE(dev) && (dev->pdev->revision <
+ ARRAY_SIZE(skl_stepping_info)))
+ return skl_stepping_info[dev->pdev->revision].stepping;
+ else
+ return -ENODATA;
+}
+
+static char intel_get_substepping(struct drm_device *dev)
+{
+ if (IS_SKYLAKE(dev) && (dev->pdev->revision <
+ ARRAY_SIZE(skl_stepping_info)))
+ return skl_stepping_info[dev->pdev->revision].substepping;
+ else
+ return -ENODATA;
+}
+
+/**
+ * intel_csr_load_status_get() - to get firmware loading status.
+ * @dev_priv: i915 device.
+ *
+ * This function helps to get the firmware loading status.
+ *
+ * Return: Firmware loading status.
+ */
+enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv)
+{
+ enum csr_state state;
+
+ mutex_lock(&dev_priv->csr_lock);
+ state = dev_priv->csr.state;
+ mutex_unlock(&dev_priv->csr_lock);
+
+ return state;
+}
+
+/**
+ * intel_csr_load_status_set() - help to set firmware loading status.
+ * @dev_priv: i915 device.
+ * @state: enumeration of firmware loading status.
+ *
+ * Set the firmware loading status.
+ */
+void intel_csr_load_status_set(struct drm_i915_private *dev_priv,
+ enum csr_state state)
+{
+ mutex_lock(&dev_priv->csr_lock);
+ dev_priv->csr.state = state;
+ mutex_unlock(&dev_priv->csr_lock);
+}
+
+/**
+ * intel_csr_load_program() - write the firmware from memory to register.
+ * @dev: drm device.
+ *
+ * CSR firmware is read from a .bin file and kept in internal memory one time.
+ * Everytime display comes back from low power state this function is called to
+ * copy the firmware from internal memory to registers.
+ */
+void intel_csr_load_program(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ __be32 *payload = dev_priv->csr.dmc_payload;
+ uint32_t i, fw_size;
+
+ if (!IS_GEN9(dev)) {
+ DRM_ERROR("No CSR support available for this platform\n");
+ return;
+ }
+
+ mutex_lock(&dev_priv->csr_lock);
+ fw_size = dev_priv->csr.dmc_fw_size;
+ for (i = 0; i < fw_size; i++)
+ I915_WRITE(CSR_PROGRAM_BASE + i * 4,
+ (u32 __force)payload[i]);
+
+ for (i = 0; i < dev_priv->csr.mmio_count; i++) {
+ I915_WRITE(dev_priv->csr.mmioaddr[i],
+ dev_priv->csr.mmiodata[i]);
+ }
+
+ dev_priv->csr.state = FW_LOADED;
+ mutex_unlock(&dev_priv->csr_lock);
+}
+
+static void finish_csr_load(const struct firmware *fw, void *context)
+{
+ struct drm_i915_private *dev_priv = context;
+ struct drm_device *dev = dev_priv->dev;
+ struct intel_css_header *css_header;
+ struct intel_package_header *package_header;
+ struct intel_dmc_header *dmc_header;
+ struct intel_csr *csr = &dev_priv->csr;
+ char stepping = intel_get_stepping(dev);
+ char substepping = intel_get_substepping(dev);
+ uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
+ uint32_t i;
+ __be32 *dmc_payload;
+ bool fw_loaded = false;
+
+ if (!fw) {
+ i915_firmware_load_error_print(csr->fw_path, 0);
+ goto out;
+ }
+
+ if ((stepping == -ENODATA) || (substepping == -ENODATA)) {
+ DRM_ERROR("Unknown stepping info, firmware loading failed\n");
+ goto out;
+ }
+
+ /* Extract CSS Header information*/
+ css_header = (struct intel_css_header *)fw->data;
+ if (sizeof(struct intel_css_header) !=
+ (css_header->header_len * 4)) {
+ DRM_ERROR("Firmware has wrong CSS header length %u bytes\n",
+ (css_header->header_len * 4));
+ goto out;
+ }
+ readcount += sizeof(struct intel_css_header);
+
+ /* Extract Package Header information*/
+ package_header = (struct intel_package_header *)
+ &fw->data[readcount];
+ if (sizeof(struct intel_package_header) !=
+ (package_header->header_len * 4)) {
+ DRM_ERROR("Firmware has wrong package header length %u bytes\n",
+ (package_header->header_len * 4));
+ goto out;
+ }
+ readcount += sizeof(struct intel_package_header);
+
+ /* Search for dmc_offset to find firware binary. */
+ for (i = 0; i < package_header->num_entries; i++) {
+ if (package_header->fw_info[i].substepping == '*' &&
+ stepping == package_header->fw_info[i].stepping) {
+ dmc_offset = package_header->fw_info[i].offset;
+ break;
+ } else if (stepping == package_header->fw_info[i].stepping &&
+ substepping == package_header->fw_info[i].substepping) {
+ dmc_offset = package_header->fw_info[i].offset;
+ break;
+ } else if (package_header->fw_info[i].stepping == '*' &&
+ package_header->fw_info[i].substepping == '*')
+ dmc_offset = package_header->fw_info[i].offset;
+ }
+ if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
+ DRM_ERROR("Firmware not supported for %c stepping\n", stepping);
+ goto out;
+ }
+ readcount += dmc_offset;
+
+ /* Extract dmc_header information. */
+ dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
+ if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) {
+ DRM_ERROR("Firmware has wrong dmc header length %u bytes\n",
+ (dmc_header->header_len));
+ goto out;
+ }
+ readcount += sizeof(struct intel_dmc_header);
+
+ /* Cache the dmc header info. */
+ if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) {
+ DRM_ERROR("Firmware has wrong mmio count %u\n",
+ dmc_header->mmio_count);
+ goto out;
+ }
+ csr->mmio_count = dmc_header->mmio_count;
+ for (i = 0; i < dmc_header->mmio_count; i++) {
+ if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE &&
+ dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) {
+ DRM_ERROR(" Firmware has wrong mmio address 0x%x\n",
+ dmc_header->mmioaddr[i]);
+ goto out;
+ }
+ csr->mmioaddr[i] = dmc_header->mmioaddr[i];
+ csr->mmiodata[i] = dmc_header->mmiodata[i];
+ }
+
+ /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
+ nbytes = dmc_header->fw_size * 4;
+ if (nbytes > CSR_MAX_FW_SIZE) {
+ DRM_ERROR("CSR firmware too big (%u) bytes\n", nbytes);
+ goto out;
+ }
+ csr->dmc_fw_size = dmc_header->fw_size;
+
+ csr->dmc_payload = kmalloc(nbytes, GFP_KERNEL);
+ if (!csr->dmc_payload) {
+ DRM_ERROR("Memory allocation failed for dmc payload\n");
+ goto out;
+ }
+
+ dmc_payload = csr->dmc_payload;
+ for (i = 0; i < dmc_header->fw_size; i++) {
+ uint32_t *tmp = (u32 *)&fw->data[readcount + i * 4];
+ /*
+ * The firmware payload is an array of 32 bit words stored in
+ * little-endian format in the firmware image and programmed
+ * as 32 bit big-endian format to memory.
+ */
+ dmc_payload[i] = cpu_to_be32(*tmp);
+ }
+
+ /* load csr program during system boot, as needed for DC states */
+ intel_csr_load_program(dev);
+ fw_loaded = true;
+
+out:
+ if (fw_loaded)
+ intel_runtime_pm_put(dev_priv);
+ else
+ intel_csr_load_status_set(dev_priv, FW_FAILED);
+
+ release_firmware(fw);
+}
+
+/**
+ * intel_csr_ucode_init() - initialize the firmware loading.
+ * @dev: drm device.
+ *
+ * This function is called at the time of loading the display driver to read
+ * firmware from a .bin file and copied into a internal memory.
+ */
+void intel_csr_ucode_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_csr *csr = &dev_priv->csr;
+ int ret;
+
+ if (!HAS_CSR(dev))
+ return;
+
+ if (IS_SKYLAKE(dev))
+ csr->fw_path = I915_CSR_SKL;
+ else {
+ DRM_ERROR("Unexpected: no known CSR firmware for platform\n");
+ intel_csr_load_status_set(dev_priv, FW_FAILED);
+ return;
+ }
+
+ /*
+ * Obtain a runtime pm reference, until CSR is loaded,
+ * to avoid entering runtime-suspend.
+ */
+ intel_runtime_pm_get(dev_priv);
+
+ /* CSR supported for platform, load firmware */
+ ret = request_firmware_nowait(THIS_MODULE, true, csr->fw_path,
+ &dev_priv->dev->pdev->dev,
+ GFP_KERNEL, dev_priv,
+ finish_csr_load);
+ if (ret) {
+ i915_firmware_load_error_print(csr->fw_path, ret);
+ intel_csr_load_status_set(dev_priv, FW_FAILED);
+ }
+}
+
+/**
+ * intel_csr_ucode_fini() - unload the CSR firmware.
+ * @dev: drm device.
+ *
+ * Firmmware unloading includes freeing the internal momory and reset the
+ * firmware loading status.
+ */
+void intel_csr_ucode_fini(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!HAS_CSR(dev))
+ return;
+
+ intel_csr_load_status_set(dev_priv, FW_FAILED);
+ kfree(dev_priv->csr.dmc_payload);
+}
+
+void assert_csr_loaded(struct drm_i915_private *dev_priv)
+{
+ WARN((intel_csr_load_status_get(dev_priv) != FW_LOADED), "CSR is not loaded.\n");
+ WARN(!I915_READ(CSR_PROGRAM_BASE),
+ "CSR program storage start is NULL\n");
+ WARN(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
+ WARN(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
+}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 3eb0efc2dd0d..cacb07b7a8f1 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -155,33 +155,100 @@ static const struct ddi_buf_trans skl_ddi_translations_edp[] = {
static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
- /* Idx NT mV T mV db */
- { 0x00004014, 0x00000087 }, /* 0: 800 1000 2 */
+ { 0x00000018, 0x000000ac },
+ { 0x00005012, 0x0000009d },
+ { 0x00007011, 0x00000088 },
+ { 0x00000018, 0x000000a1 },
+ { 0x00000018, 0x00000098 },
+ { 0x00004013, 0x00000088 },
+ { 0x00006012, 0x00000087 },
+ { 0x00000018, 0x000000df },
+ { 0x00003015, 0x00000087 },
+ { 0x00003015, 0x000000c7 },
+ { 0x00000018, 0x000000c7 },
};
-enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
+struct bxt_ddi_buf_trans {
+ u32 margin; /* swing value */
+ u32 scale; /* scale value */
+ u32 enable; /* scale enable */
+ u32 deemphasis;
+ bool default_index; /* true if the entry represents default value */
+};
+
+/* BSpec does not define separate vswing/pre-emphasis values for eDP.
+ * Using DP values for eDP as well.
+ */
+static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = {
+ /* Idx NT mV diff db */
+ { 52, 0, 0, 128, true }, /* 0: 400 0 */
+ { 78, 0, 0, 85, false }, /* 1: 400 3.5 */
+ { 104, 0, 0, 64, false }, /* 2: 400 6 */
+ { 154, 0, 0, 43, false }, /* 3: 400 9.5 */
+ { 77, 0, 0, 128, false }, /* 4: 600 0 */
+ { 116, 0, 0, 85, false }, /* 5: 600 3.5 */
+ { 154, 0, 0, 64, false }, /* 6: 600 6 */
+ { 102, 0, 0, 128, false }, /* 7: 800 0 */
+ { 154, 0, 0, 85, false }, /* 8: 800 3.5 */
+ { 154, 0x9A, 1, 128, false }, /* 9: 1200 0 */
+};
+
+/* BSpec has 2 recommended values - entries 0 and 8.
+ * Using the entry with higher vswing.
+ */
+static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = {
+ /* Idx NT mV diff db */
+ { 52, 0, 0, 128, false }, /* 0: 400 0 */
+ { 52, 0, 0, 85, false }, /* 1: 400 3.5 */
+ { 52, 0, 0, 64, false }, /* 2: 400 6 */
+ { 42, 0, 0, 43, false }, /* 3: 400 9.5 */
+ { 77, 0, 0, 128, false }, /* 4: 600 0 */
+ { 77, 0, 0, 85, false }, /* 5: 600 3.5 */
+ { 77, 0, 0, 64, false }, /* 6: 600 6 */
+ { 102, 0, 0, 128, false }, /* 7: 800 0 */
+ { 102, 0, 0, 85, false }, /* 8: 800 3.5 */
+ { 154, 0x9A, 1, 128, true }, /* 9: 1200 0 */
+};
+
+static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
+ struct intel_digital_port **dig_port,
+ enum port *port)
{
struct drm_encoder *encoder = &intel_encoder->base;
int type = intel_encoder->type;
if (type == INTEL_OUTPUT_DP_MST) {
- struct intel_digital_port *intel_dig_port = enc_to_mst(encoder)->primary;
- return intel_dig_port->port;
+ *dig_port = enc_to_mst(encoder)->primary;
+ *port = (*dig_port)->port;
} else if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) {
- struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(encoder);
- return intel_dig_port->port;
-
+ *dig_port = enc_to_dig_port(encoder);
+ *port = (*dig_port)->port;
} else if (type == INTEL_OUTPUT_ANALOG) {
- return PORT_E;
-
+ *dig_port = NULL;
+ *port = PORT_E;
} else {
DRM_ERROR("Invalid DDI encoder type %d\n", type);
BUG();
}
}
+enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
+{
+ struct intel_digital_port *dig_port;
+ enum port port;
+
+ ddi_get_encoder_port(intel_encoder, &dig_port, &port);
+
+ return port;
+}
+
+static bool
+intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port)
+{
+ return intel_dig_port->hdmi.hdmi_reg;
+}
+
/*
* Starting with Haswell, DDI port buffers must be programmed with correct
* values in advance. The buffer values are different for FDI and DP modes,
@@ -189,7 +256,8 @@ enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
* in either FDI or DP modes only, as HDMI connections will work with both
* of those
*/
-static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
+static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
+ bool supports_hdmi)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
@@ -202,11 +270,19 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
const struct ddi_buf_trans *ddi_translations_hdmi;
const struct ddi_buf_trans *ddi_translations;
- if (IS_SKYLAKE(dev)) {
+ if (IS_BROXTON(dev)) {
+ if (!supports_hdmi)
+ return;
+
+ /* Vswing programming for HDMI */
+ bxt_ddi_vswing_sequence(dev, hdmi_level, port,
+ INTEL_OUTPUT_HDMI);
+ return;
+ } else if (IS_SKYLAKE(dev)) {
ddi_translations_fdi = NULL;
ddi_translations_dp = skl_ddi_translations_dp;
n_dp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
- if (dev_priv->vbt.edp_low_vswing) {
+ if (dev_priv->edp_low_vswing) {
ddi_translations_edp = skl_ddi_translations_edp;
n_edp_entries = ARRAY_SIZE(skl_ddi_translations_edp);
} else {
@@ -214,16 +290,9 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
n_edp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
}
- /*
- * On SKL, the recommendation from the hw team is to always use
- * a certain type of level shifter (and thus the corresponding
- * 800mV+2dB entry). Given that's the only validated entry, we
- * override what is in the VBT, at least until further notice.
- */
- hdmi_level = 0;
ddi_translations_hdmi = skl_ddi_translations_hdmi;
n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
- hdmi_default_entry = 0;
+ hdmi_default_entry = 7;
} else if (IS_BROADWELL(dev)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
@@ -290,6 +359,9 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
reg += 4;
}
+ if (!supports_hdmi)
+ return;
+
/* Choose a good default if VBT is badly populated */
if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
hdmi_level >= n_hdmi_entries)
@@ -307,13 +379,28 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
*/
void intel_prepare_ddi(struct drm_device *dev)
{
- int port;
+ struct intel_encoder *intel_encoder;
+ bool visited[I915_MAX_PORTS] = { 0, };
if (!HAS_DDI(dev))
return;
- for (port = PORT_A; port <= PORT_E; port++)
- intel_prepare_ddi_buffers(dev, port);
+ for_each_intel_encoder(dev, intel_encoder) {
+ struct intel_digital_port *intel_dig_port;
+ enum port port;
+ bool supports_hdmi;
+
+ ddi_get_encoder_port(intel_encoder, &intel_dig_port, &port);
+
+ if (visited[port])
+ continue;
+
+ supports_hdmi = intel_dig_port &&
+ intel_dig_port_supports_hdmi(intel_dig_port);
+
+ intel_prepare_ddi_buffers(dev, port, supports_hdmi);
+ visited[port] = true;
+ }
}
static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
@@ -322,7 +409,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
uint32_t reg = DDI_BUF_CTL(port);
int i;
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < 16; i++) {
udelay(1);
if (I915_READ(reg) & DDI_BUF_IS_IDLE)
return;
@@ -491,23 +578,24 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
return ret;
}
-static struct intel_encoder *
+struct intel_encoder *
intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_encoder *ret = NULL;
struct drm_atomic_state *state;
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
int num_encoders = 0;
int i;
state = crtc_state->base.state;
- for (i = 0; i < state->num_connector; i++) {
- if (!state->connectors[i] ||
- state->connector_states[i]->crtc != crtc_state->base.crtc)
+ for_each_connector_in_state(state, connector, connector_state, i) {
+ if (connector_state->crtc != crtc_state->base.crtc)
continue;
- ret = to_intel_encoder(state->connector_states[i]->best_encoder);
+ ret = to_intel_encoder(connector_state->best_encoder);
num_encoders++;
}
@@ -783,26 +871,26 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(dpll)) {
link_clock = skl_calc_wrpll_link(dev_priv, dpll);
} else {
- link_clock = dpll_ctl1 & DPLL_CRTL1_LINK_RATE_MASK(dpll);
- link_clock >>= DPLL_CRTL1_LINK_RATE_SHIFT(dpll);
+ link_clock = dpll_ctl1 & DPLL_CTRL1_LINK_RATE_MASK(dpll);
+ link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(dpll);
switch (link_clock) {
- case DPLL_CRTL1_LINK_RATE_810:
+ case DPLL_CTRL1_LINK_RATE_810:
link_clock = 81000;
break;
- case DPLL_CRTL1_LINK_RATE_1080:
+ case DPLL_CTRL1_LINK_RATE_1080:
link_clock = 108000;
break;
- case DPLL_CRTL1_LINK_RATE_1350:
+ case DPLL_CTRL1_LINK_RATE_1350:
link_clock = 135000;
break;
- case DPLL_CRTL1_LINK_RATE_1620:
+ case DPLL_CTRL1_LINK_RATE_1620:
link_clock = 162000;
break;
- case DPLL_CRTL1_LINK_RATE_2160:
+ case DPLL_CTRL1_LINK_RATE_2160:
link_clock = 216000;
break;
- case DPLL_CRTL1_LINK_RATE_2700:
+ case DPLL_CTRL1_LINK_RATE_2700:
link_clock = 270000;
break;
default:
@@ -878,6 +966,32 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
+static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
+ enum intel_dpll_id dpll)
+{
+ /* FIXME formula not available in bspec */
+ return 0;
+}
+
+static void bxt_ddi_clock_get(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ enum port port = intel_ddi_get_encoder_port(encoder);
+ uint32_t dpll = port;
+
+ pipe_config->port_clock =
+ bxt_calc_pll_link(dev_priv, dpll);
+
+ if (pipe_config->has_dp_encoder)
+ pipe_config->base.adjusted_mode.crtc_clock =
+ intel_dotclock_calculate(pipe_config->port_clock,
+ &pipe_config->dp_m_n);
+ else
+ pipe_config->base.adjusted_mode.crtc_clock =
+ pipe_config->port_clock;
+}
+
void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -885,8 +999,10 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
if (INTEL_INFO(dev)->gen <= 8)
hsw_ddi_clock_get(encoder, pipe_config);
- else
+ else if (IS_SKYLAKE(dev))
skl_ddi_clock_get(encoder, pipe_config);
+ else if (IS_BROXTON(dev))
+ bxt_ddi_clock_get(encoder, pipe_config);
}
static void
@@ -971,6 +1087,9 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
WRPLL_DIVIDER_POST(p);
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
crtc_state->dpll_hw_state.wrpll = val;
pll = intel_get_shared_dpll(intel_crtc, crtc_state);
@@ -1073,69 +1192,69 @@ found:
if (min_dco_index > 2) {
WARN(1, "No valid values found for the given pixel clock\n");
} else {
- wrpll_params->central_freq = dco_central_freq[min_dco_index];
+ wrpll_params->central_freq = dco_central_freq[min_dco_index];
- switch (dco_central_freq[min_dco_index]) {
- case 9600000000ULL:
+ switch (dco_central_freq[min_dco_index]) {
+ case 9600000000ULL:
wrpll_params->central_freq = 0;
break;
- case 9000000000ULL:
+ case 9000000000ULL:
wrpll_params->central_freq = 1;
break;
- case 8400000000ULL:
+ case 8400000000ULL:
wrpll_params->central_freq = 3;
- }
+ }
- switch (candidate_p0[min_dco_index]) {
- case 1:
+ switch (candidate_p0[min_dco_index]) {
+ case 1:
wrpll_params->pdiv = 0;
break;
- case 2:
+ case 2:
wrpll_params->pdiv = 1;
break;
- case 3:
+ case 3:
wrpll_params->pdiv = 2;
break;
- case 7:
+ case 7:
wrpll_params->pdiv = 4;
break;
- default:
+ default:
WARN(1, "Incorrect PDiv\n");
- }
+ }
- switch (candidate_p2[min_dco_index]) {
- case 5:
+ switch (candidate_p2[min_dco_index]) {
+ case 5:
wrpll_params->kdiv = 0;
break;
- case 2:
+ case 2:
wrpll_params->kdiv = 1;
break;
- case 3:
+ case 3:
wrpll_params->kdiv = 2;
break;
- case 1:
+ case 1:
wrpll_params->kdiv = 3;
break;
- default:
+ default:
WARN(1, "Incorrect KDiv\n");
- }
+ }
- wrpll_params->qdiv_ratio = candidate_p1[min_dco_index];
- wrpll_params->qdiv_mode =
+ wrpll_params->qdiv_ratio = candidate_p1[min_dco_index];
+ wrpll_params->qdiv_mode =
(wrpll_params->qdiv_ratio == 1) ? 0 : 1;
- dco_freq = candidate_p0[min_dco_index] *
- candidate_p1[min_dco_index] *
- candidate_p2[min_dco_index] * afe_clock;
+ dco_freq = candidate_p0[min_dco_index] *
+ candidate_p1[min_dco_index] *
+ candidate_p2[min_dco_index] * afe_clock;
/*
- * Intermediate values are in Hz.
- * Divide by MHz to match bsepc
- */
- wrpll_params->dco_integer = div_u64(dco_freq, (24 * MHz(1)));
- wrpll_params->dco_fraction =
- div_u64(((div_u64(dco_freq, 24) -
- wrpll_params->dco_integer * MHz(1)) * 0x8000), MHz(1));
+ * Intermediate values are in Hz.
+ * Divide by MHz to match bsepc
+ */
+ wrpll_params->dco_integer = div_u64(dco_freq, (24 * MHz(1)));
+ wrpll_params->dco_fraction =
+ div_u64(((div_u64(dco_freq, 24) -
+ wrpll_params->dco_integer * MHz(1)) * 0x8000), MHz(1));
}
}
@@ -1179,13 +1298,13 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
switch (intel_dp->link_bw) {
case DP_LINK_BW_1_62:
- ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810, 0);
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
break;
case DP_LINK_BW_2_7:
- ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350, 0);
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
break;
case DP_LINK_BW_5_4:
- ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700, 0);
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
break;
}
@@ -1193,6 +1312,9 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
} else /* eDP */
return true;
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
crtc_state->dpll_hw_state.ctrl1 = ctrl1;
crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
@@ -1210,6 +1332,161 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
return true;
}
+/* bxt clock parameters */
+struct bxt_clk_div {
+ uint32_t p1;
+ uint32_t p2;
+ uint32_t m2_int;
+ uint32_t m2_frac;
+ bool m2_frac_en;
+ uint32_t n;
+};
+
+/* pre-calculated values for DP linkrates */
+static struct bxt_clk_div bxt_dp_clk_val[7] = {
+ /* 162 */ {4, 2, 32, 1677722, 1, 1},
+ /* 270 */ {4, 1, 27, 0, 0, 1},
+ /* 540 */ {2, 1, 27, 0, 0, 1},
+ /* 216 */ {3, 2, 32, 1677722, 1, 1},
+ /* 243 */ {4, 1, 24, 1258291, 1, 1},
+ /* 324 */ {4, 1, 32, 1677722, 1, 1},
+ /* 432 */ {3, 1, 32, 1677722, 1, 1}
+};
+
+static bool
+bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state,
+ struct intel_encoder *intel_encoder,
+ int clock)
+{
+ struct intel_shared_dpll *pll;
+ struct bxt_clk_div clk_div = {0};
+ int vco = 0;
+ uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
+ uint32_t dcoampovr_en_h, dco_amp, lanestagger;
+
+ if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
+ intel_clock_t best_clock;
+
+ /* Calculate HDMI div */
+ /*
+ * FIXME: tie the following calculation into
+ * i9xx_crtc_compute_clock
+ */
+ if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
+ DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
+ clock, pipe_name(intel_crtc->pipe));
+ return false;
+ }
+
+ clk_div.p1 = best_clock.p1;
+ clk_div.p2 = best_clock.p2;
+ WARN_ON(best_clock.m1 != 2);
+ clk_div.n = best_clock.n;
+ clk_div.m2_int = best_clock.m2 >> 22;
+ clk_div.m2_frac = best_clock.m2 & ((1 << 22) - 1);
+ clk_div.m2_frac_en = clk_div.m2_frac != 0;
+
+ vco = best_clock.vco;
+ } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_encoder->type == INTEL_OUTPUT_EDP) {
+ struct drm_encoder *encoder = &intel_encoder->base;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ switch (intel_dp->link_bw) {
+ case DP_LINK_BW_1_62:
+ clk_div = bxt_dp_clk_val[0];
+ break;
+ case DP_LINK_BW_2_7:
+ clk_div = bxt_dp_clk_val[1];
+ break;
+ case DP_LINK_BW_5_4:
+ clk_div = bxt_dp_clk_val[2];
+ break;
+ default:
+ clk_div = bxt_dp_clk_val[0];
+ DRM_ERROR("Unknown link rate\n");
+ }
+ vco = clock * 10 / 2 * clk_div.p1 * clk_div.p2;
+ }
+
+ dco_amp = 15;
+ dcoampovr_en_h = 0;
+ if (vco >= 6200000 && vco <= 6480000) {
+ prop_coef = 4;
+ int_coef = 9;
+ gain_ctl = 3;
+ targ_cnt = 8;
+ } else if ((vco > 5400000 && vco < 6200000) ||
+ (vco >= 4800000 && vco < 5400000)) {
+ prop_coef = 5;
+ int_coef = 11;
+ gain_ctl = 3;
+ targ_cnt = 9;
+ if (vco >= 4800000 && vco < 5400000)
+ dcoampovr_en_h = 1;
+ } else if (vco == 5400000) {
+ prop_coef = 3;
+ int_coef = 8;
+ gain_ctl = 1;
+ targ_cnt = 9;
+ } else {
+ DRM_ERROR("Invalid VCO\n");
+ return false;
+ }
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (clock > 270000)
+ lanestagger = 0x18;
+ else if (clock > 135000)
+ lanestagger = 0x0d;
+ else if (clock > 67000)
+ lanestagger = 0x07;
+ else if (clock > 33000)
+ lanestagger = 0x04;
+ else
+ lanestagger = 0x02;
+
+ crtc_state->dpll_hw_state.ebb0 =
+ PORT_PLL_P1(clk_div.p1) | PORT_PLL_P2(clk_div.p2);
+ crtc_state->dpll_hw_state.pll0 = clk_div.m2_int;
+ crtc_state->dpll_hw_state.pll1 = PORT_PLL_N(clk_div.n);
+ crtc_state->dpll_hw_state.pll2 = clk_div.m2_frac;
+
+ if (clk_div.m2_frac_en)
+ crtc_state->dpll_hw_state.pll3 =
+ PORT_PLL_M2_FRAC_ENABLE;
+
+ crtc_state->dpll_hw_state.pll6 =
+ prop_coef | PORT_PLL_INT_COEFF(int_coef);
+ crtc_state->dpll_hw_state.pll6 |=
+ PORT_PLL_GAIN_CTL(gain_ctl);
+
+ crtc_state->dpll_hw_state.pll8 = targ_cnt;
+
+ if (dcoampovr_en_h)
+ crtc_state->dpll_hw_state.pll10 = PORT_PLL_DCO_AMP_OVR_EN_H;
+
+ crtc_state->dpll_hw_state.pll10 |= PORT_PLL_DCO_AMP(dco_amp);
+
+ crtc_state->dpll_hw_state.pcsdw12 =
+ LANESTAGGER_STRAP_OVRD | lanestagger;
+
+ pll = intel_get_shared_dpll(intel_crtc, crtc_state);
+ if (pll == NULL) {
+ DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
+ pipe_name(intel_crtc->pipe));
+ return false;
+ }
+
+ /* shared DPLL id 0 is DPLL A */
+ crtc_state->ddi_pll_sel = pll->id;
+
+ return true;
+}
+
/*
* Tries to find a *shared* PLL for the CRTC and store it in
* intel_crtc->ddi_pll_sel.
@@ -1228,6 +1505,9 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
if (IS_SKYLAKE(dev))
return skl_ddi_pll_select(intel_crtc, crtc_state,
intel_encoder, clock);
+ else if (IS_BROXTON(dev))
+ return bxt_ddi_pll_select(intel_crtc, crtc_state,
+ intel_encoder, clock);
else
return hsw_ddi_pll_select(intel_crtc, crtc_state,
intel_encoder, clock);
@@ -1519,6 +1799,67 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
TRANS_CLK_SEL_DISABLED);
}
+void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
+ enum port port, int type)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct bxt_ddi_buf_trans *ddi_translations;
+ u32 n_entries, i;
+ uint32_t val;
+
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+ n_entries = ARRAY_SIZE(bxt_ddi_translations_dp);
+ ddi_translations = bxt_ddi_translations_dp;
+ } else if (type == INTEL_OUTPUT_HDMI) {
+ n_entries = ARRAY_SIZE(bxt_ddi_translations_hdmi);
+ ddi_translations = bxt_ddi_translations_hdmi;
+ } else {
+ DRM_DEBUG_KMS("Vswing programming not done for encoder %d\n",
+ type);
+ return;
+ }
+
+ /* Check if default value has to be used */
+ if (level >= n_entries ||
+ (type == INTEL_OUTPUT_HDMI && level == HDMI_LEVEL_SHIFT_UNKNOWN)) {
+ for (i = 0; i < n_entries; i++) {
+ if (ddi_translations[i].default_index) {
+ level = i;
+ break;
+ }
+ }
+ }
+
+ /*
+ * While we write to the group register to program all lanes at once we
+ * can read only lane registers and we pick lanes 0/1 for that.
+ */
+ val = I915_READ(BXT_PORT_PCS_DW10_LN01(port));
+ val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
+ I915_WRITE(BXT_PORT_PCS_DW10_GRP(port), val);
+
+ val = I915_READ(BXT_PORT_TX_DW2_LN0(port));
+ val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
+ val |= ddi_translations[level].margin << MARGIN_000_SHIFT |
+ ddi_translations[level].scale << UNIQ_TRANS_SCALE_SHIFT;
+ I915_WRITE(BXT_PORT_TX_DW2_GRP(port), val);
+
+ val = I915_READ(BXT_PORT_TX_DW3_LN0(port));
+ val &= ~UNIQE_TRANGE_EN_METHOD;
+ if (ddi_translations[level].enable)
+ val |= UNIQE_TRANGE_EN_METHOD;
+ I915_WRITE(BXT_PORT_TX_DW3_GRP(port), val);
+
+ val = I915_READ(BXT_PORT_TX_DW4_LN0(port));
+ val &= ~DE_EMPHASIS;
+ val |= ddi_translations[level].deemphasis << DEEMPH_SHIFT;
+ I915_WRITE(BXT_PORT_TX_DW4_GRP(port), val);
+
+ val = I915_READ(BXT_PORT_PCS_DW10_LN01(port));
+ val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
+ I915_WRITE(BXT_PORT_PCS_DW10_GRP(port), val);
+}
+
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
@@ -1527,6 +1868,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
+ int hdmi_level;
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -1548,7 +1890,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) |
DPLL_CTRL1_SSC(dpll) |
- DPLL_CRTL1_LINK_RATE_MASK(dpll));
+ DPLL_CTRL1_LINK_RATE_MASK(dpll));
val |= crtc->config->dpll_hw_state.ctrl1 << (dpll * 6);
I915_WRITE(DPLL_CTRL1, val);
@@ -1565,7 +1907,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
I915_WRITE(DPLL_CTRL2, val);
- } else {
+ } else if (INTEL_INFO(dev)->gen < 9) {
WARN_ON(crtc->config->ddi_pll_sel == PORT_CLK_SEL_NONE);
I915_WRITE(PORT_CLK_SEL(port), crtc->config->ddi_pll_sel);
}
@@ -1583,6 +1925,12 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
} else if (type == INTEL_OUTPUT_HDMI) {
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ if (IS_BROXTON(dev)) {
+ hdmi_level = dev_priv->vbt.
+ ddi_port_info[port].hdmi_level_shift;
+ bxt_ddi_vswing_sequence(dev, hdmi_level, port,
+ INTEL_OUTPUT_HDMI);
+ }
intel_hdmi->set_infoframes(encoder,
crtc->config->has_hdmi_sink,
&crtc->config->base.adjusted_mode);
@@ -1624,7 +1972,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
if (IS_SKYLAKE(dev))
I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
DPLL_CTRL2_DDI_CLK_OFF(port)));
- else
+ else if (INTEL_INFO(dev)->gen < 9)
I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
}
@@ -1689,105 +2037,6 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
}
}
-static int skl_get_cdclk_freq(struct drm_i915_private *dev_priv)
-{
- uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
- uint32_t cdctl = I915_READ(CDCLK_CTL);
- uint32_t linkrate;
-
- if (!(lcpll1 & LCPLL_PLL_ENABLE)) {
- WARN(1, "LCPLL1 not enabled\n");
- return 24000; /* 24MHz is the cd freq with NSSC ref */
- }
-
- if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
- return 540000;
-
- linkrate = (I915_READ(DPLL_CTRL1) &
- DPLL_CRTL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
-
- if (linkrate == DPLL_CRTL1_LINK_RATE_2160 ||
- linkrate == DPLL_CRTL1_LINK_RATE_1080) {
- /* vco 8640 */
- switch (cdctl & CDCLK_FREQ_SEL_MASK) {
- case CDCLK_FREQ_450_432:
- return 432000;
- case CDCLK_FREQ_337_308:
- return 308570;
- case CDCLK_FREQ_675_617:
- return 617140;
- default:
- WARN(1, "Unknown cd freq selection\n");
- }
- } else {
- /* vco 8100 */
- switch (cdctl & CDCLK_FREQ_SEL_MASK) {
- case CDCLK_FREQ_450_432:
- return 450000;
- case CDCLK_FREQ_337_308:
- return 337500;
- case CDCLK_FREQ_675_617:
- return 675000;
- default:
- WARN(1, "Unknown cd freq selection\n");
- }
- }
-
- /* error case, do as if DPLL0 isn't enabled */
- return 24000;
-}
-
-static int bdw_get_cdclk_freq(struct drm_i915_private *dev_priv)
-{
- uint32_t lcpll = I915_READ(LCPLL_CTL);
- uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
-
- if (lcpll & LCPLL_CD_SOURCE_FCLK)
- return 800000;
- else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
- return 450000;
- else if (freq == LCPLL_CLK_FREQ_450)
- return 450000;
- else if (freq == LCPLL_CLK_FREQ_54O_BDW)
- return 540000;
- else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
- return 337500;
- else
- return 675000;
-}
-
-static int hsw_get_cdclk_freq(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- uint32_t lcpll = I915_READ(LCPLL_CTL);
- uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
-
- if (lcpll & LCPLL_CD_SOURCE_FCLK)
- return 800000;
- else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
- return 450000;
- else if (freq == LCPLL_CLK_FREQ_450)
- return 450000;
- else if (IS_HSW_ULT(dev))
- return 337500;
- else
- return 540000;
-}
-
-int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
-
- if (IS_SKYLAKE(dev))
- return skl_get_cdclk_freq(dev_priv);
-
- if (IS_BROADWELL(dev))
- return bdw_get_cdclk_freq(dev_priv);
-
- /* Haswell */
- return hsw_get_cdclk_freq(dev_priv);
-}
-
static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
@@ -1887,7 +2136,7 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
val = I915_READ(DPLL_CTRL1);
val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) | DPLL_CTRL1_SSC(dpll) |
- DPLL_CRTL1_LINK_RATE_MASK(dpll));
+ DPLL_CTRL1_LINK_RATE_MASK(dpll));
val |= pll->config.hw_state.ctrl1 << (dpll * 6);
I915_WRITE(DPLL_CTRL1, val);
@@ -1963,22 +2212,325 @@ static void skl_shared_dplls_init(struct drm_i915_private *dev_priv)
}
}
+static void broxton_phy_init(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
+{
+ enum port port;
+ uint32_t val;
+
+ val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
+ val |= GT_DISPLAY_POWER_ON(phy);
+ I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
+
+ /* Considering 10ms timeout until BSpec is updated */
+ if (wait_for(I915_READ(BXT_PORT_CL1CM_DW0(phy)) & PHY_POWER_GOOD, 10))
+ DRM_ERROR("timeout during PHY%d power on\n", phy);
+
+ for (port = (phy == DPIO_PHY0 ? PORT_B : PORT_A);
+ port <= (phy == DPIO_PHY0 ? PORT_C : PORT_A); port++) {
+ int lane;
+
+ for (lane = 0; lane < 4; lane++) {
+ val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
+ /*
+ * Note that on CHV this flag is called UPAR, but has
+ * the same function.
+ */
+ val &= ~LATENCY_OPTIM;
+ if (lane != 1)
+ val |= LATENCY_OPTIM;
+
+ I915_WRITE(BXT_PORT_TX_DW14_LN(port, lane), val);
+ }
+ }
+
+ /* Program PLL Rcomp code offset */
+ val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
+ val &= ~IREF0RC_OFFSET_MASK;
+ val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
+ I915_WRITE(BXT_PORT_CL1CM_DW9(phy), val);
+
+ val = I915_READ(BXT_PORT_CL1CM_DW10(phy));
+ val &= ~IREF1RC_OFFSET_MASK;
+ val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
+ I915_WRITE(BXT_PORT_CL1CM_DW10(phy), val);
+
+ /* Program power gating */
+ val = I915_READ(BXT_PORT_CL1CM_DW28(phy));
+ val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
+ SUS_CLK_CONFIG;
+ I915_WRITE(BXT_PORT_CL1CM_DW28(phy), val);
+
+ if (phy == DPIO_PHY0) {
+ val = I915_READ(BXT_PORT_CL2CM_DW6_BC);
+ val |= DW6_OLDO_DYN_PWR_DOWN_EN;
+ I915_WRITE(BXT_PORT_CL2CM_DW6_BC, val);
+ }
+
+ val = I915_READ(BXT_PORT_CL1CM_DW30(phy));
+ val &= ~OCL2_LDOFUSE_PWR_DIS;
+ /*
+ * On PHY1 disable power on the second channel, since no port is
+ * connected there. On PHY0 both channels have a port, so leave it
+ * enabled.
+ * TODO: port C is only connected on BXT-P, so on BXT0/1 we should
+ * power down the second channel on PHY0 as well.
+ */
+ if (phy == DPIO_PHY1)
+ val |= OCL2_LDOFUSE_PWR_DIS;
+ I915_WRITE(BXT_PORT_CL1CM_DW30(phy), val);
+
+ if (phy == DPIO_PHY0) {
+ uint32_t grc_code;
+ /*
+ * PHY0 isn't connected to an RCOMP resistor so copy over
+ * the corresponding calibrated value from PHY1, and disable
+ * the automatic calibration on PHY0.
+ */
+ if (wait_for(I915_READ(BXT_PORT_REF_DW3(DPIO_PHY1)) & GRC_DONE,
+ 10))
+ DRM_ERROR("timeout waiting for PHY1 GRC\n");
+
+ val = I915_READ(BXT_PORT_REF_DW6(DPIO_PHY1));
+ val = (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
+ grc_code = val << GRC_CODE_FAST_SHIFT |
+ val << GRC_CODE_SLOW_SHIFT |
+ val;
+ I915_WRITE(BXT_PORT_REF_DW6(DPIO_PHY0), grc_code);
+
+ val = I915_READ(BXT_PORT_REF_DW8(DPIO_PHY0));
+ val |= GRC_DIS | GRC_RDY_OVRD;
+ I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val);
+ }
+
+ val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
+ val |= COMMON_RESET_DIS;
+ I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
+}
+
+void broxton_ddi_phy_init(struct drm_device *dev)
+{
+ /* Enable PHY1 first since it provides Rcomp for PHY0 */
+ broxton_phy_init(dev->dev_private, DPIO_PHY1);
+ broxton_phy_init(dev->dev_private, DPIO_PHY0);
+}
+
+static void broxton_phy_uninit(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
+{
+ uint32_t val;
+
+ val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
+ val &= ~COMMON_RESET_DIS;
+ I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
+}
+
+void broxton_ddi_phy_uninit(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ broxton_phy_uninit(dev_priv, DPIO_PHY1);
+ broxton_phy_uninit(dev_priv, DPIO_PHY0);
+
+ /* FIXME: do this in broxton_phy_uninit per phy */
+ I915_WRITE(BXT_P_CR_GT_DISP_PWRON, 0);
+}
+
+static const char * const bxt_ddi_pll_names[] = {
+ "PORT PLL A",
+ "PORT PLL B",
+ "PORT PLL C",
+};
+
+static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ uint32_t temp;
+ enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
+
+ temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ temp &= ~PORT_PLL_REF_SEL;
+ /* Non-SSC reference */
+ I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+
+ /* Disable 10 bit clock */
+ temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
+ temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
+ I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+
+ /* Write P1 & P2 */
+ temp = I915_READ(BXT_PORT_PLL_EBB_0(port));
+ temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
+ temp |= pll->config.hw_state.ebb0;
+ I915_WRITE(BXT_PORT_PLL_EBB_0(port), temp);
+
+ /* Write M2 integer */
+ temp = I915_READ(BXT_PORT_PLL(port, 0));
+ temp &= ~PORT_PLL_M2_MASK;
+ temp |= pll->config.hw_state.pll0;
+ I915_WRITE(BXT_PORT_PLL(port, 0), temp);
+
+ /* Write N */
+ temp = I915_READ(BXT_PORT_PLL(port, 1));
+ temp &= ~PORT_PLL_N_MASK;
+ temp |= pll->config.hw_state.pll1;
+ I915_WRITE(BXT_PORT_PLL(port, 1), temp);
+
+ /* Write M2 fraction */
+ temp = I915_READ(BXT_PORT_PLL(port, 2));
+ temp &= ~PORT_PLL_M2_FRAC_MASK;
+ temp |= pll->config.hw_state.pll2;
+ I915_WRITE(BXT_PORT_PLL(port, 2), temp);
+
+ /* Write M2 fraction enable */
+ temp = I915_READ(BXT_PORT_PLL(port, 3));
+ temp &= ~PORT_PLL_M2_FRAC_ENABLE;
+ temp |= pll->config.hw_state.pll3;
+ I915_WRITE(BXT_PORT_PLL(port, 3), temp);
+
+ /* Write coeff */
+ temp = I915_READ(BXT_PORT_PLL(port, 6));
+ temp &= ~PORT_PLL_PROP_COEFF_MASK;
+ temp &= ~PORT_PLL_INT_COEFF_MASK;
+ temp &= ~PORT_PLL_GAIN_CTL_MASK;
+ temp |= pll->config.hw_state.pll6;
+ I915_WRITE(BXT_PORT_PLL(port, 6), temp);
+
+ /* Write calibration val */
+ temp = I915_READ(BXT_PORT_PLL(port, 8));
+ temp &= ~PORT_PLL_TARGET_CNT_MASK;
+ temp |= pll->config.hw_state.pll8;
+ I915_WRITE(BXT_PORT_PLL(port, 8), temp);
+
+ temp = I915_READ(BXT_PORT_PLL(port, 9));
+ temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
+ temp |= (5 << 1);
+ I915_WRITE(BXT_PORT_PLL(port, 9), temp);
+
+ temp = I915_READ(BXT_PORT_PLL(port, 10));
+ temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
+ temp &= ~PORT_PLL_DCO_AMP_MASK;
+ temp |= pll->config.hw_state.pll10;
+ I915_WRITE(BXT_PORT_PLL(port, 10), temp);
+
+ /* Recalibrate with new settings */
+ temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
+ temp |= PORT_PLL_RECALIBRATE;
+ I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+ /* Enable 10 bit clock */
+ temp |= PORT_PLL_10BIT_CLK_ENABLE;
+ I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+
+ /* Enable PLL */
+ temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ temp |= PORT_PLL_ENABLE;
+ I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+ POSTING_READ(BXT_PORT_PLL_ENABLE(port));
+
+ if (wait_for_atomic_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
+ PORT_PLL_LOCK), 200))
+ DRM_ERROR("PLL %d not locked\n", port);
+
+ /*
+ * While we write to the group register to program all lanes at once we
+ * can read only lane registers and we pick lanes 0/1 for that.
+ */
+ temp = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
+ temp &= ~LANE_STAGGER_MASK;
+ temp &= ~LANESTAGGER_STRAP_OVRD;
+ temp |= pll->config.hw_state.pcsdw12;
+ I915_WRITE(BXT_PORT_PCS_DW12_GRP(port), temp);
+}
+
+static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
+ uint32_t temp;
+
+ temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ temp &= ~PORT_PLL_ENABLE;
+ I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+ POSTING_READ(BXT_PORT_PLL_ENABLE(port));
+}
+
+static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
+ uint32_t val;
+
+ if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ return false;
+
+ val = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ if (!(val & PORT_PLL_ENABLE))
+ return false;
+
+ hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
+ hw_state->pll0 = I915_READ(BXT_PORT_PLL(port, 0));
+ hw_state->pll1 = I915_READ(BXT_PORT_PLL(port, 1));
+ hw_state->pll2 = I915_READ(BXT_PORT_PLL(port, 2));
+ hw_state->pll3 = I915_READ(BXT_PORT_PLL(port, 3));
+ hw_state->pll6 = I915_READ(BXT_PORT_PLL(port, 6));
+ hw_state->pll8 = I915_READ(BXT_PORT_PLL(port, 8));
+ hw_state->pll10 = I915_READ(BXT_PORT_PLL(port, 10));
+ /*
+ * While we write to the group register to program all lanes at once we
+ * can read only lane registers. We configure all lanes the same way, so
+ * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
+ */
+ hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
+ if (I915_READ(BXT_PORT_PCS_DW12_LN23(port) != hw_state->pcsdw12))
+ DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
+ hw_state->pcsdw12,
+ I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
+
+ return true;
+}
+
+static void bxt_shared_dplls_init(struct drm_i915_private *dev_priv)
+{
+ int i;
+
+ dev_priv->num_shared_dpll = 3;
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ dev_priv->shared_dplls[i].id = i;
+ dev_priv->shared_dplls[i].name = bxt_ddi_pll_names[i];
+ dev_priv->shared_dplls[i].disable = bxt_ddi_pll_disable;
+ dev_priv->shared_dplls[i].enable = bxt_ddi_pll_enable;
+ dev_priv->shared_dplls[i].get_hw_state =
+ bxt_ddi_pll_get_hw_state;
+ }
+}
+
void intel_ddi_pll_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val = I915_READ(LCPLL_CTL);
+ int cdclk_freq;
if (IS_SKYLAKE(dev))
skl_shared_dplls_init(dev_priv);
+ else if (IS_BROXTON(dev))
+ bxt_shared_dplls_init(dev_priv);
else
hsw_shared_dplls_init(dev_priv);
- DRM_DEBUG_KMS("CDCLK running at %dKHz\n",
- intel_ddi_get_cdclk_freq(dev_priv));
+ cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
+ DRM_DEBUG_KMS("CDCLK running at %dKHz\n", cdclk_freq);
if (IS_SKYLAKE(dev)) {
+ dev_priv->skl_boot_cdclk = cdclk_freq;
if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
DRM_ERROR("LCPLL1 is disabled\n");
+ else
+ intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
+ } else if (IS_BROXTON(dev)) {
+ broxton_init_cdclk(dev);
+ broxton_ddi_phy_init(dev);
} else {
/*
* The LCPLL register should be turned on by the BIOS. For now
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d0f3cbc87474..dcb1d25d6f05 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -45,29 +45,33 @@
#include <drm/drm_rect.h>
#include <linux/dma_remapping.h>
-/* Primary plane formats supported by all gen */
-#define COMMON_PRIMARY_FORMATS \
- DRM_FORMAT_C8, \
- DRM_FORMAT_RGB565, \
- DRM_FORMAT_XRGB8888, \
- DRM_FORMAT_ARGB8888
-
/* Primary plane formats for gen <= 3 */
-static const uint32_t intel_primary_formats_gen2[] = {
- COMMON_PRIMARY_FORMATS,
+static const uint32_t i8xx_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB1555,
- DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XRGB8888,
};
/* Primary plane formats for gen >= 4 */
-static const uint32_t intel_primary_formats_gen4[] = {
- COMMON_PRIMARY_FORMATS, \
+static const uint32_t i965_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+};
+
+static const uint32_t skl_primary_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_ARGB2101010,
DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_ABGR2101010,
};
/* Cursor formats */
@@ -82,9 +86,9 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
-static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *old_fb,
- struct drm_atomic_state *state);
+static int intel_set_mode(struct drm_crtc *crtc,
+ struct drm_atomic_state *state,
+ bool force_restore);
static int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd2 *mode_cmd,
@@ -103,6 +107,12 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
static void intel_begin_crtc_commit(struct drm_crtc *crtc);
static void intel_finish_crtc_commit(struct drm_crtc *crtc);
+static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state);
+static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
+ int num_connectors);
+static void intel_crtc_enable_planes(struct drm_crtc *crtc);
+static void intel_crtc_disable_planes(struct drm_crtc *crtc);
static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
{
@@ -400,6 +410,18 @@ static const intel_limit_t intel_limits_chv = {
.p2 = { .p2_slow = 1, .p2_fast = 14 },
};
+static const intel_limit_t intel_limits_bxt = {
+ /* FIXME: find real dot limits */
+ .dot = { .min = 0, .max = INT_MAX },
+ .vco = { .min = 4800000, .max = 6480000 },
+ .n = { .min = 1, .max = 1 },
+ .m1 = { .min = 2, .max = 2 },
+ /* FIXME: find real m2 limits */
+ .m2 = { .min = 2 << 22, .max = 255 << 22 },
+ .p1 = { .min = 2, .max = 4 },
+ .p2 = { .p2_slow = 1, .p2_fast = 20 },
+};
+
static void vlv_clock(int refclk, intel_clock_t *clock)
{
clock->m = clock->m1 * clock->m2;
@@ -435,15 +457,12 @@ static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
int type)
{
struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_connector *connector;
struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
int i, num_connectors = 0;
- for (i = 0; i < state->num_connector; i++) {
- if (!state->connectors[i])
- continue;
-
- connector_state = state->connector_states[i];
+ for_each_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != crtc_state->base.crtc)
continue;
@@ -511,7 +530,9 @@ intel_limit(struct intel_crtc_state *crtc_state, int refclk)
struct drm_device *dev = crtc_state->base.crtc->dev;
const intel_limit_t *limit;
- if (HAS_PCH_SPLIT(dev))
+ if (IS_BROXTON(dev))
+ limit = &intel_limits_bxt;
+ else if (HAS_PCH_SPLIT(dev))
limit = intel_ironlake_limit(crtc_state, refclk);
else if (IS_G4X(dev)) {
limit = intel_g4x_limit(crtc_state);
@@ -596,11 +617,11 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
INTELPllInvalid("m1 out of range\n");
- if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
+ if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev))
if (clock->m1 <= clock->m2)
INTELPllInvalid("m1 <= m2\n");
- if (!IS_VALLEYVIEW(dev)) {
+ if (!IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) {
if (clock->p < limit->p.min || limit->p.max < clock->p)
INTELPllInvalid("p out of range\n");
if (clock->m < limit->m.min || limit->m.max < clock->m)
@@ -953,6 +974,15 @@ chv_find_best_dpll(const intel_limit_t *limit,
return found;
}
+bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
+ intel_clock_t *best_clock)
+{
+ int refclk = i9xx_get_refclk(crtc_state, 0);
+
+ return chv_find_best_dpll(intel_limit(crtc_state, refclk), crtc_state,
+ target_clock, refclk, NULL, best_clock);
+}
+
bool intel_crtc_active(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -1111,9 +1141,9 @@ static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
u32 val;
bool cur_state;
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
cur_state = val & DSI_PLL_VCO_EN;
I915_STATE_WARN(cur_state != state,
@@ -1632,13 +1662,15 @@ static void chv_enable_pll(struct intel_crtc *crtc,
BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
/* Enable back the 10bit clock to display controller */
tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
tmp |= DPIO_DCLKP_EN;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
+ mutex_unlock(&dev_priv->sb_lock);
+
/*
* Need to wait > 100ns between dclkp clock enable bit and PLL enable.
*/
@@ -1654,8 +1686,6 @@ static void chv_enable_pll(struct intel_crtc *crtc,
/* not sure when this should be written */
I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
POSTING_READ(DPLL_MD(pipe));
-
- mutex_unlock(&dev_priv->dpio_lock);
}
static int intel_num_dvo_pipes(struct drm_device *dev)
@@ -1797,7 +1827,7 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
I915_WRITE(DPLL(pipe), val);
POSTING_READ(DPLL(pipe));
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
/* Disable 10bit clock to display controller */
val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
@@ -1815,11 +1845,12 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
}
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
}
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
- struct intel_digital_port *dport)
+ struct intel_digital_port *dport,
+ unsigned int expected_mask)
{
u32 port_mask;
int dpll_reg;
@@ -1832,6 +1863,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
case PORT_C:
port_mask = DPLL_PORTC_READY_MASK;
dpll_reg = DPLL(0);
+ expected_mask <<= 4;
break;
case PORT_D:
port_mask = DPLL_PORTD_READY_MASK;
@@ -1841,9 +1873,9 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
BUG();
}
- if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000))
- WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
- port_name(dport->port), I915_READ(dpll_reg));
+ if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000))
+ WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
+ port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
}
static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
@@ -2104,7 +2136,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
* a plane. On ILK+ the pipe PLLs are integrated, so we don't
* need the check.
*/
- if (!HAS_PCH_SPLIT(dev_priv->dev))
+ if (HAS_GMCH_DISPLAY(dev_priv->dev))
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
@@ -2179,20 +2211,6 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
intel_wait_for_pipe_off(crtc);
}
-/*
- * Plane regs are double buffered, going from enabled->disabled needs a
- * trigger in order to latch. The display address reg provides this.
- */
-void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
- enum plane plane)
-{
- struct drm_device *dev = dev_priv->dev;
- u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
-
- I915_WRITE(reg, I915_READ(reg));
- POSTING_READ(reg);
-}
-
/**
* intel_enable_primary_hw_plane - enable the primary plane on a given pipe
* @plane: plane to be enabled
@@ -2209,45 +2227,7 @@ static void intel_enable_primary_hw_plane(struct drm_plane *plane,
/* If the pipe isn't enabled, we can't pump pixels and may hang */
assert_pipe_enabled(dev_priv, intel_crtc->pipe);
-
- if (intel_crtc->primary_enabled)
- return;
-
- intel_crtc->primary_enabled = true;
-
- dev_priv->display.update_primary_plane(crtc, plane->fb,
- crtc->x, crtc->y);
-
- /*
- * BDW signals flip done immediately if the plane
- * is disabled, even if the plane enable is already
- * armed to occur at the next vblank :(
- */
- if (IS_BROADWELL(dev))
- intel_wait_for_vblank(dev, intel_crtc->pipe);
-}
-
-/**
- * intel_disable_primary_hw_plane - disable the primary hardware plane
- * @plane: plane to be disabled
- * @crtc: crtc for the plane
- *
- * Disable @plane on @crtc, making sure that the pipe is running first.
- */
-static void intel_disable_primary_hw_plane(struct drm_plane *plane,
- struct drm_crtc *crtc)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- if (WARN_ON(!intel_crtc->active))
- return;
-
- if (!intel_crtc->primary_enabled)
- return;
-
- intel_crtc->primary_enabled = false;
+ to_intel_plane_state(plane->state)->visible = true;
dev_priv->display.update_primary_plane(crtc, plane->fb,
crtc->x, crtc->y);
@@ -2338,13 +2318,6 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
info->pitch = fb->pitches[0];
info->fb_modifier = fb->modifier[0];
- if (!(info->fb_modifier == I915_FORMAT_MOD_Y_TILED ||
- info->fb_modifier == I915_FORMAT_MOD_Yf_TILED)) {
- DRM_DEBUG_KMS(
- "Y or Yf tiling is needed for 90/270 rotation!\n");
- return -EINVAL;
- }
-
return 0;
}
@@ -2667,6 +2640,8 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_plane *primary = crtc->primary;
+ bool visible = to_intel_plane_state(primary->state)->visible;
struct drm_i915_gem_object *obj;
int plane = intel_crtc->plane;
unsigned long linear_offset;
@@ -2674,7 +2649,7 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
u32 reg = DSPCNTR(plane);
int pixel_size;
- if (!intel_crtc->primary_enabled) {
+ if (!visible || !fb) {
I915_WRITE(reg, 0);
if (INTEL_INFO(dev)->gen >= 4)
I915_WRITE(DSPSURF(plane), 0);
@@ -2718,26 +2693,21 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
dspcntr |= DISPPLANE_8BPP;
break;
case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_ARGB1555:
dspcntr |= DISPPLANE_BGRX555;
break;
case DRM_FORMAT_RGB565:
dspcntr |= DISPPLANE_BGRX565;
break;
case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
dspcntr |= DISPPLANE_BGRX888;
break;
case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ABGR8888:
dspcntr |= DISPPLANE_RGBX888;
break;
case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_ARGB2101010:
dspcntr |= DISPPLANE_BGRX101010;
break;
case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ABGR2101010:
dspcntr |= DISPPLANE_RGBX101010;
break;
default:
@@ -2796,6 +2766,8 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_plane *primary = crtc->primary;
+ bool visible = to_intel_plane_state(primary->state)->visible;
struct drm_i915_gem_object *obj;
int plane = intel_crtc->plane;
unsigned long linear_offset;
@@ -2803,7 +2775,7 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
u32 reg = DSPCNTR(plane);
int pixel_size;
- if (!intel_crtc->primary_enabled) {
+ if (!visible || !fb) {
I915_WRITE(reg, 0);
I915_WRITE(DSPSURF(plane), 0);
POSTING_READ(reg);
@@ -2831,19 +2803,15 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
dspcntr |= DISPPLANE_BGRX565;
break;
case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
dspcntr |= DISPPLANE_BGRX888;
break;
case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ABGR8888:
dspcntr |= DISPPLANE_RGBX888;
break;
case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_ARGB2101010:
dspcntr |= DISPPLANE_BGRX101010;
break;
case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ABGR2101010:
dspcntr |= DISPPLANE_RGBX101010;
break;
default:
@@ -2936,92 +2904,221 @@ unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
return i915_gem_obj_ggtt_offset_view(obj, view);
}
-static void skylake_update_primary_plane(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y)
+/*
+ * This function detaches (aka. unbinds) unused scalers in hardware
+ */
+void skl_detach_scalers(struct intel_crtc *intel_crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_i915_gem_object *obj;
- int pipe = intel_crtc->pipe;
- u32 plane_ctl, stride_div;
- unsigned long surf_addr;
+ struct drm_device *dev;
+ struct drm_i915_private *dev_priv;
+ struct intel_crtc_scaler_state *scaler_state;
+ int i;
- if (!intel_crtc->primary_enabled) {
- I915_WRITE(PLANE_CTL(pipe, 0), 0);
- I915_WRITE(PLANE_SURF(pipe, 0), 0);
- POSTING_READ(PLANE_CTL(pipe, 0));
+ if (!intel_crtc || !intel_crtc->config)
return;
- }
- plane_ctl = PLANE_CTL_ENABLE |
- PLANE_CTL_PIPE_GAMMA_ENABLE |
- PLANE_CTL_PIPE_CSC_ENABLE;
+ dev = intel_crtc->base.dev;
+ dev_priv = dev->dev_private;
+ scaler_state = &intel_crtc->config->scaler_state;
- switch (fb->pixel_format) {
+ /* loop through and disable scalers that aren't in use */
+ for (i = 0; i < intel_crtc->num_scalers; i++) {
+ if (!scaler_state->scalers[i].in_use) {
+ I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, i), 0);
+ I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, i), 0);
+ I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, i), 0);
+ DRM_DEBUG_KMS("CRTC:%d Disabled scaler id %u.%u\n",
+ intel_crtc->base.base.id, intel_crtc->pipe, i);
+ }
+ }
+}
+
+u32 skl_plane_ctl_format(uint32_t pixel_format)
+{
+ switch (pixel_format) {
+ case DRM_FORMAT_C8:
+ return PLANE_CTL_FORMAT_INDEXED;
case DRM_FORMAT_RGB565:
- plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
- break;
- case DRM_FORMAT_XRGB8888:
- plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
- break;
- case DRM_FORMAT_ARGB8888:
- plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
- plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY;
- break;
+ return PLANE_CTL_FORMAT_RGB_565;
case DRM_FORMAT_XBGR8888:
- plane_ctl |= PLANE_CTL_ORDER_RGBX;
- plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
- break;
+ return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
+ case DRM_FORMAT_XRGB8888:
+ return PLANE_CTL_FORMAT_XRGB_8888;
+ /*
+ * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
+ * to be already pre-multiplied. We need to add a knob (or a different
+ * DRM_FORMAT) for user-space to configure that.
+ */
case DRM_FORMAT_ABGR8888:
- plane_ctl |= PLANE_CTL_ORDER_RGBX;
- plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
- plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY;
- break;
+ return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
+ PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+ case DRM_FORMAT_ARGB8888:
+ return PLANE_CTL_FORMAT_XRGB_8888 |
+ PLANE_CTL_ALPHA_SW_PREMULTIPLY;
case DRM_FORMAT_XRGB2101010:
- plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
- break;
+ return PLANE_CTL_FORMAT_XRGB_2101010;
case DRM_FORMAT_XBGR2101010:
- plane_ctl |= PLANE_CTL_ORDER_RGBX;
- plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
- break;
+ return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
+ case DRM_FORMAT_YUYV:
+ return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
+ case DRM_FORMAT_YVYU:
+ return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
+ case DRM_FORMAT_UYVY:
+ return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
+ case DRM_FORMAT_VYUY:
+ return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
default:
- BUG();
+ MISSING_CASE(pixel_format);
}
- switch (fb->modifier[0]) {
+ return 0;
+}
+
+u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
+{
+ switch (fb_modifier) {
case DRM_FORMAT_MOD_NONE:
break;
case I915_FORMAT_MOD_X_TILED:
- plane_ctl |= PLANE_CTL_TILED_X;
- break;
+ return PLANE_CTL_TILED_X;
case I915_FORMAT_MOD_Y_TILED:
- plane_ctl |= PLANE_CTL_TILED_Y;
- break;
+ return PLANE_CTL_TILED_Y;
case I915_FORMAT_MOD_Yf_TILED:
- plane_ctl |= PLANE_CTL_TILED_YF;
+ return PLANE_CTL_TILED_YF;
+ default:
+ MISSING_CASE(fb_modifier);
+ }
+
+ return 0;
+}
+
+u32 skl_plane_ctl_rotation(unsigned int rotation)
+{
+ switch (rotation) {
+ case BIT(DRM_ROTATE_0):
break;
+ /*
+ * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
+ * while i915 HW rotation is clockwise, thats why this swapping.
+ */
+ case BIT(DRM_ROTATE_90):
+ return PLANE_CTL_ROTATE_270;
+ case BIT(DRM_ROTATE_180):
+ return PLANE_CTL_ROTATE_180;
+ case BIT(DRM_ROTATE_270):
+ return PLANE_CTL_ROTATE_90;
default:
- MISSING_CASE(fb->modifier[0]);
+ MISSING_CASE(rotation);
}
+ return 0;
+}
+
+static void skylake_update_primary_plane(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_plane *plane = crtc->primary;
+ bool visible = to_intel_plane_state(plane->state)->visible;
+ struct drm_i915_gem_object *obj;
+ int pipe = intel_crtc->pipe;
+ u32 plane_ctl, stride_div, stride;
+ u32 tile_height, plane_offset, plane_size;
+ unsigned int rotation;
+ int x_offset, y_offset;
+ unsigned long surf_addr;
+ struct intel_crtc_state *crtc_state = intel_crtc->config;
+ struct intel_plane_state *plane_state;
+ int src_x = 0, src_y = 0, src_w = 0, src_h = 0;
+ int dst_x = 0, dst_y = 0, dst_w = 0, dst_h = 0;
+ int scaler_id = -1;
+
+ plane_state = to_intel_plane_state(plane->state);
+
+ if (!visible || !fb) {
+ I915_WRITE(PLANE_CTL(pipe, 0), 0);
+ I915_WRITE(PLANE_SURF(pipe, 0), 0);
+ POSTING_READ(PLANE_CTL(pipe, 0));
+ return;
+ }
+
+ plane_ctl = PLANE_CTL_ENABLE |
+ PLANE_CTL_PIPE_GAMMA_ENABLE |
+ PLANE_CTL_PIPE_CSC_ENABLE;
+
+ plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
+ plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
- if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180))
- plane_ctl |= PLANE_CTL_ROTATE_180;
+
+ rotation = plane->state->rotation;
+ plane_ctl |= skl_plane_ctl_rotation(rotation);
obj = intel_fb_obj(fb);
stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
fb->pixel_format);
- surf_addr = intel_plane_obj_offset(to_intel_plane(crtc->primary), obj);
+ surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj);
+
+ /*
+ * FIXME: intel_plane_state->src, dst aren't set when transitional
+ * update_plane helpers are called from legacy paths.
+ * Once full atomic crtc is available, below check can be avoided.
+ */
+ if (drm_rect_width(&plane_state->src)) {
+ scaler_id = plane_state->scaler_id;
+ src_x = plane_state->src.x1 >> 16;
+ src_y = plane_state->src.y1 >> 16;
+ src_w = drm_rect_width(&plane_state->src) >> 16;
+ src_h = drm_rect_height(&plane_state->src) >> 16;
+ dst_x = plane_state->dst.x1;
+ dst_y = plane_state->dst.y1;
+ dst_w = drm_rect_width(&plane_state->dst);
+ dst_h = drm_rect_height(&plane_state->dst);
+
+ WARN_ON(x != src_x || y != src_y);
+ } else {
+ src_w = intel_crtc->config->pipe_src_w;
+ src_h = intel_crtc->config->pipe_src_h;
+ }
+
+ if (intel_rotation_90_or_270(rotation)) {
+ /* stride = Surface height in tiles */
+ tile_height = intel_tile_height(dev, fb->pixel_format,
+ fb->modifier[0]);
+ stride = DIV_ROUND_UP(fb->height, tile_height);
+ x_offset = stride * tile_height - y - src_h;
+ y_offset = x;
+ plane_size = (src_w - 1) << 16 | (src_h - 1);
+ } else {
+ stride = fb->pitches[0] / stride_div;
+ x_offset = x;
+ y_offset = y;
+ plane_size = (src_h - 1) << 16 | (src_w - 1);
+ }
+ plane_offset = y_offset << 16 | x_offset;
I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
- I915_WRITE(PLANE_POS(pipe, 0), 0);
- I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
- I915_WRITE(PLANE_SIZE(pipe, 0),
- (intel_crtc->config->pipe_src_h - 1) << 16 |
- (intel_crtc->config->pipe_src_w - 1));
- I915_WRITE(PLANE_STRIDE(pipe, 0), fb->pitches[0] / stride_div);
+ I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
+ I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
+ I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
+
+ if (scaler_id >= 0) {
+ uint32_t ps_ctrl = 0;
+
+ WARN_ON(!dst_w || !dst_h);
+ ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
+ crtc_state->scaler_state.scalers[scaler_id].mode;
+ I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
+ I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
+ I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
+ I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
+ I915_WRITE(PLANE_POS(pipe, 0), 0);
+ } else {
+ I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
+ }
+
I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
POSTING_READ(PLANE_SURF(pipe, 0));
@@ -3079,6 +3176,19 @@ static void intel_update_primary_planes(struct drm_device *dev)
}
}
+void intel_crtc_reset(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (!crtc->active)
+ return;
+
+ intel_crtc_disable_planes(&crtc->base);
+ dev_priv->display.crtc_disable(&crtc->base);
+ dev_priv->display.crtc_enable(&crtc->base);
+ intel_crtc_enable_planes(&crtc->base);
+}
+
void intel_prepare_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -3099,8 +3209,11 @@ void intel_prepare_reset(struct drm_device *dev)
* g33 docs say we should at least disable all the planes.
*/
for_each_intel_crtc(dev, crtc) {
- if (crtc->active)
- dev_priv->display.crtc_disable(&crtc->base);
+ if (!crtc->active)
+ continue;
+
+ intel_crtc_disable_planes(&crtc->base);
+ dev_priv->display.crtc_disable(&crtc->base);
}
}
@@ -3152,27 +3265,30 @@ void intel_finish_reset(struct drm_device *dev)
drm_modeset_unlock_all(dev);
}
-static int
+static void
intel_finish_fb(struct drm_framebuffer *old_fb)
{
struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
bool was_interruptible = dev_priv->mm.interruptible;
int ret;
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old
- * framebuffer.
+ * framebuffer. Note that we rely on userspace rendering
+ * into the buffer attached to the pipe they are waiting
+ * on. If not, userspace generates a GPU hang with IPEHR
+ * point to the MI_WAIT_FOR_EVENT.
*
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
dev_priv->mm.interruptible = false;
- ret = i915_gem_object_finish_gpu(obj);
+ ret = i915_gem_object_wait_rendering(obj, true);
dev_priv->mm.interruptible = was_interruptible;
- return ret;
+ WARN_ON(ret);
}
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
@@ -3824,7 +3940,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
u32 divsel, phaseinc, auxdiv, phasedir = 0;
u32 temp;
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
/* It is necessary to ungate the pixclk gate prior to programming
* the divisors, and gate it back when it is done.
@@ -3901,7 +4017,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
}
static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
@@ -4039,8 +4155,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
temp &= ~(TRANS_DP_PORT_SEL_MASK |
TRANS_DP_SYNC_MASK |
TRANS_DP_BPC_MASK);
- temp |= (TRANS_DP_OUTPUT_ENABLE |
- TRANS_DP_ENH_FRAMING);
+ temp |= TRANS_DP_OUTPUT_ENABLE;
temp |= bpc << 9; /* same format but at 11:9 */
if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
@@ -4126,6 +4241,26 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
goto found;
}
+ if (IS_BROXTON(dev_priv->dev)) {
+ /* PLL is attached to port in bxt */
+ struct intel_encoder *encoder;
+ struct intel_digital_port *intel_dig_port;
+
+ encoder = intel_ddi_get_crtc_new_encoder(crtc_state);
+ if (WARN_ON(!encoder))
+ return NULL;
+
+ intel_dig_port = enc_to_dig_port(&encoder->base);
+ /* 1:1 mapping between ports and PLLs */
+ i = (enum intel_dpll_id)intel_dig_port->port;
+ pll = &dev_priv->shared_dplls[i];
+ DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
+ crtc->base.base.id, pll->name);
+ WARN_ON(pll->new_config->crtc_mask);
+
+ goto found;
+ }
+
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
pll = &dev_priv->shared_dplls[i];
@@ -4251,16 +4386,180 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
}
}
-static void skylake_pfit_enable(struct intel_crtc *crtc)
+/**
+ * skl_update_scaler_users - Stages update to crtc's scaler state
+ * @intel_crtc: crtc
+ * @crtc_state: crtc_state
+ * @plane: plane (NULL indicates crtc is requesting update)
+ * @plane_state: plane's state
+ * @force_detach: request unconditional detachment of scaler
+ *
+ * This function updates scaler state for requested plane or crtc.
+ * To request scaler usage update for a plane, caller shall pass plane pointer.
+ * To request scaler usage update for crtc, caller shall pass plane pointer
+ * as NULL.
+ *
+ * Return
+ * 0 - scaler_usage updated successfully
+ * error - requested scaling cannot be supported or other error condition
+ */
+int
+skl_update_scaler_users(
+ struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state,
+ struct intel_plane *intel_plane, struct intel_plane_state *plane_state,
+ int force_detach)
+{
+ int need_scaling;
+ int idx;
+ int src_w, src_h, dst_w, dst_h;
+ int *scaler_id;
+ struct drm_framebuffer *fb;
+ struct intel_crtc_scaler_state *scaler_state;
+ unsigned int rotation;
+
+ if (!intel_crtc || !crtc_state)
+ return 0;
+
+ scaler_state = &crtc_state->scaler_state;
+
+ idx = intel_plane ? drm_plane_index(&intel_plane->base) : SKL_CRTC_INDEX;
+ fb = intel_plane ? plane_state->base.fb : NULL;
+
+ if (intel_plane) {
+ src_w = drm_rect_width(&plane_state->src) >> 16;
+ src_h = drm_rect_height(&plane_state->src) >> 16;
+ dst_w = drm_rect_width(&plane_state->dst);
+ dst_h = drm_rect_height(&plane_state->dst);
+ scaler_id = &plane_state->scaler_id;
+ rotation = plane_state->base.rotation;
+ } else {
+ struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+ src_w = crtc_state->pipe_src_w;
+ src_h = crtc_state->pipe_src_h;
+ dst_w = adjusted_mode->hdisplay;
+ dst_h = adjusted_mode->vdisplay;
+ scaler_id = &scaler_state->scaler_id;
+ rotation = DRM_ROTATE_0;
+ }
+
+ need_scaling = intel_rotation_90_or_270(rotation) ?
+ (src_h != dst_w || src_w != dst_h):
+ (src_w != dst_w || src_h != dst_h);
+
+ /*
+ * if plane is being disabled or scaler is no more required or force detach
+ * - free scaler binded to this plane/crtc
+ * - in order to do this, update crtc->scaler_usage
+ *
+ * Here scaler state in crtc_state is set free so that
+ * scaler can be assigned to other user. Actual register
+ * update to free the scaler is done in plane/panel-fit programming.
+ * For this purpose crtc/plane_state->scaler_id isn't reset here.
+ */
+ if (force_detach || !need_scaling || (intel_plane &&
+ (!fb || !plane_state->visible))) {
+ if (*scaler_id >= 0) {
+ scaler_state->scaler_users &= ~(1 << idx);
+ scaler_state->scalers[*scaler_id].in_use = 0;
+
+ DRM_DEBUG_KMS("Staged freeing scaler id %d.%d from %s:%d "
+ "crtc_state = %p scaler_users = 0x%x\n",
+ intel_crtc->pipe, *scaler_id, intel_plane ? "PLANE" : "CRTC",
+ intel_plane ? intel_plane->base.base.id :
+ intel_crtc->base.base.id, crtc_state,
+ scaler_state->scaler_users);
+ *scaler_id = -1;
+ }
+ return 0;
+ }
+
+ /* range checks */
+ if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
+ dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
+
+ src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
+ dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
+ DRM_DEBUG_KMS("%s:%d scaler_user index %u.%u: src %ux%u dst %ux%u "
+ "size is out of scaler range\n",
+ intel_plane ? "PLANE" : "CRTC",
+ intel_plane ? intel_plane->base.base.id : intel_crtc->base.base.id,
+ intel_crtc->pipe, idx, src_w, src_h, dst_w, dst_h);
+ return -EINVAL;
+ }
+
+ /* check colorkey */
+ if (WARN_ON(intel_plane &&
+ intel_plane->ckey.flags != I915_SET_COLORKEY_NONE)) {
+ DRM_DEBUG_KMS("PLANE:%d scaling %ux%u->%ux%u not allowed with colorkey",
+ intel_plane->base.base.id, src_w, src_h, dst_w, dst_h);
+ return -EINVAL;
+ }
+
+ /* Check src format */
+ if (intel_plane) {
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ break;
+ default:
+ DRM_DEBUG_KMS("PLANE:%d FB:%d unsupported scaling format 0x%x\n",
+ intel_plane->base.base.id, fb->base.id, fb->pixel_format);
+ return -EINVAL;
+ }
+ }
+
+ /* mark this plane as a scaler user in crtc_state */
+ scaler_state->scaler_users |= (1 << idx);
+ DRM_DEBUG_KMS("%s:%d staged scaling request for %ux%u->%ux%u "
+ "crtc_state = %p scaler_users = 0x%x\n",
+ intel_plane ? "PLANE" : "CRTC",
+ intel_plane ? intel_plane->base.base.id : intel_crtc->base.base.id,
+ src_w, src_h, dst_w, dst_h, crtc_state, scaler_state->scaler_users);
+ return 0;
+}
+
+static void skylake_pfit_update(struct intel_crtc *crtc, int enable)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
+ struct intel_crtc_scaler_state *scaler_state =
+ &crtc->config->scaler_state;
+
+ DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
+
+ /* To update pfit, first update scaler state */
+ skl_update_scaler_users(crtc, crtc->config, NULL, NULL, !enable);
+ intel_atomic_setup_scalers(crtc->base.dev, crtc, crtc->config);
+ skl_detach_scalers(crtc);
+ if (!enable)
+ return;
if (crtc->config->pch_pfit.enabled) {
- I915_WRITE(PS_CTL(pipe), PS_ENABLE);
- I915_WRITE(PS_WIN_POS(pipe), crtc->config->pch_pfit.pos);
- I915_WRITE(PS_WIN_SZ(pipe), crtc->config->pch_pfit.size);
+ int id;
+
+ if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
+ DRM_ERROR("Requesting pfit without getting a scaler first\n");
+ return;
+ }
+
+ id = scaler_state->scaler_id;
+ I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
+ PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
+ I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
+ I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
+
+ DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
}
}
@@ -4299,38 +4598,6 @@ static void intel_enable_sprite_planes(struct drm_crtc *crtc)
}
}
-/*
- * Disable a plane internally without actually modifying the plane's state.
- * This will allow us to easily restore the plane later by just reprogramming
- * its state.
- */
-static void disable_plane_internal(struct drm_plane *plane)
-{
- struct intel_plane *intel_plane = to_intel_plane(plane);
- struct drm_plane_state *state =
- plane->funcs->atomic_duplicate_state(plane);
- struct intel_plane_state *intel_state = to_intel_plane_state(state);
-
- intel_state->visible = false;
- intel_plane->commit_plane(plane, intel_state);
-
- intel_plane_destroy_state(plane, state);
-}
-
-static void intel_disable_sprite_planes(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
- struct drm_plane *plane;
- struct intel_plane *intel_plane;
-
- drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
- intel_plane = to_intel_plane(plane);
- if (plane->fb && intel_plane->pipe == pipe)
- disable_plane_internal(plane);
- }
-}
-
void hsw_enable_ips(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -4404,7 +4671,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
if (!crtc->state->enable || !intel_crtc->active)
return;
- if (!HAS_PCH_SPLIT(dev_priv->dev)) {
+ if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
@@ -4436,9 +4703,9 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
hsw_enable_ips(intel_crtc);
}
-static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
+static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
{
- if (!enable && intel_crtc->overlay) {
+ if (intel_crtc->overlay) {
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4454,17 +4721,38 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
*/
}
-static void intel_crtc_enable_planes(struct drm_crtc *crtc)
+/**
+ * intel_post_enable_primary - Perform operations after enabling primary plane
+ * @crtc: the CRTC whose primary plane was just enabled
+ *
+ * Performs potentially sleeping operations that must be done after the primary
+ * plane is enabled, such as updating FBC and IPS. Note that this may be
+ * called due to an explicit primary plane update, or due to an implicit
+ * re-enable that is caused when a sprite plane is updated to no longer
+ * completely hide the primary plane.
+ */
+static void
+intel_post_enable_primary(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- intel_enable_primary_hw_plane(crtc->primary, crtc);
- intel_enable_sprite_planes(crtc);
- intel_crtc_update_cursor(crtc, true);
- intel_crtc_dpms_overlay(intel_crtc, true);
+ /*
+ * BDW signals flip done immediately if the plane
+ * is disabled, even if the plane enable is already
+ * armed to occur at the next vblank :(
+ */
+ if (IS_BROADWELL(dev))
+ intel_wait_for_vblank(dev, pipe);
+ /*
+ * FIXME IPS should be fine as long as one plane is
+ * enabled, but in practice it seems to have problems
+ * when going from primary only to sprite only and vice
+ * versa.
+ */
hsw_enable_ips(intel_crtc);
mutex_lock(&dev->struct_mutex);
@@ -4472,9 +4760,89 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
mutex_unlock(&dev->struct_mutex);
/*
+ * Gen2 reports pipe underruns whenever all planes are disabled.
+ * So don't enable underrun reporting before at least some planes
+ * are enabled.
+ * FIXME: Need to fix the logic to work when we turn off all planes
+ * but leave the pipe running.
+ */
+ if (IS_GEN2(dev))
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+
+ /* Underruns don't raise interrupts, so check manually. */
+ if (HAS_GMCH_DISPLAY(dev))
+ i9xx_check_fifo_underruns(dev_priv);
+}
+
+/**
+ * intel_pre_disable_primary - Perform operations before disabling primary plane
+ * @crtc: the CRTC whose primary plane is to be disabled
+ *
+ * Performs potentially sleeping operations that must be done before the
+ * primary plane is disabled, such as updating FBC and IPS. Note that this may
+ * be called due to an explicit primary plane update, or due to an implicit
+ * disable that is caused when a sprite plane completely hides the primary
+ * plane.
+ */
+static void
+intel_pre_disable_primary(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+
+ /*
+ * Gen2 reports pipe underruns whenever all planes are disabled.
+ * So diasble underrun reporting before all the planes get disabled.
+ * FIXME: Need to fix the logic to work when we turn off all planes
+ * but leave the pipe running.
+ */
+ if (IS_GEN2(dev))
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+
+ /*
+ * Vblank time updates from the shadow to live plane control register
+ * are blocked if the memory self-refresh mode is active at that
+ * moment. So to make sure the plane gets truly disabled, disable
+ * first the self-refresh mode. The self-refresh enable bit in turn
+ * will be checked/applied by the HW only at the next frame start
+ * event which is after the vblank start event, so we need to have a
+ * wait-for-vblank between disabling the plane and the pipe.
+ */
+ if (HAS_GMCH_DISPLAY(dev))
+ intel_set_memory_cxsr(dev_priv, false);
+
+ mutex_lock(&dev->struct_mutex);
+ if (dev_priv->fbc.crtc == intel_crtc)
+ intel_fbc_disable(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ /*
+ * FIXME IPS should be fine as long as one plane is
+ * enabled, but in practice it seems to have problems
+ * when going from primary only to sprite only and vice
+ * versa.
+ */
+ hsw_disable_ips(intel_crtc);
+}
+
+static void intel_crtc_enable_planes(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+
+ intel_enable_primary_hw_plane(crtc->primary, crtc);
+ intel_enable_sprite_planes(crtc);
+ intel_crtc_update_cursor(crtc, true);
+
+ intel_post_enable_primary(crtc);
+
+ /*
* FIXME: Once we grow proper nuclear flip support out of this we need
* to compute the mask of flip planes precisely. For the time being
- * consider this a flip from a NULL plane.
+ * consider this a flip to a NULL plane.
*/
intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
}
@@ -4482,21 +4850,23 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
static void intel_crtc_disable_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_plane *intel_plane;
int pipe = intel_crtc->pipe;
intel_crtc_wait_for_pending_flips(crtc);
- if (dev_priv->fbc.crtc == intel_crtc)
- intel_fbc_disable(dev);
+ intel_pre_disable_primary(crtc);
- hsw_disable_ips(intel_crtc);
+ intel_crtc_dpms_overlay_disable(intel_crtc);
+ for_each_intel_plane(dev, intel_plane) {
+ if (intel_plane->pipe == pipe) {
+ struct drm_crtc *from = intel_plane->base.crtc;
- intel_crtc_dpms_overlay(intel_crtc, false);
- intel_crtc_update_cursor(crtc, false);
- intel_disable_sprite_planes(crtc);
- intel_disable_primary_hw_plane(crtc->primary, crtc);
+ intel_plane->disable_plane(&intel_plane->base,
+ from ?: crtc, true);
+ }
+ }
/*
* FIXME: Once we grow proper nuclear flip support out of this we need
@@ -4575,8 +4945,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
if (HAS_PCH_CPT(dev))
cpt_verify_modeset(dev, intel_crtc->pipe);
-
- intel_crtc_enable_planes(crtc);
}
/* IPS only exists on ULT machines and is tied to pipe A. */
@@ -4664,10 +5032,12 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_ddi_enable_pipe_clock(intel_crtc);
- if (IS_SKYLAKE(dev))
- skylake_pfit_enable(intel_crtc);
- else
+ if (INTEL_INFO(dev)->gen == 9)
+ skylake_pfit_update(intel_crtc, 1);
+ else if (INTEL_INFO(dev)->gen < 9)
ironlake_pfit_enable(intel_crtc);
+ else
+ MISSING_CASE(INTEL_INFO(dev)->gen);
/*
* On ILK+ LUT must be loaded before the pipe is running but with
@@ -4698,22 +5068,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
/* If we change the relative order between pipe/planes enabling, we need
* to change the workaround. */
haswell_mode_set_planes_workaround(intel_crtc);
- intel_crtc_enable_planes(crtc);
-}
-
-static void skylake_pfit_disable(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe = crtc->pipe;
-
- /* To avoid upsetting the power well on haswell only disable the pfit if
- * it's in use. The hw state code will make sure we get this right. */
- if (crtc->config->pch_pfit.enabled) {
- I915_WRITE(PS_CTL(pipe), 0);
- I915_WRITE(PS_WIN_POS(pipe), 0);
- I915_WRITE(PS_WIN_SZ(pipe), 0);
- }
}
static void ironlake_pfit_disable(struct intel_crtc *crtc)
@@ -4743,8 +5097,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
if (!intel_crtc->active)
return;
- intel_crtc_disable_planes(crtc);
-
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
@@ -4758,13 +5110,14 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
ironlake_pfit_disable(intel_crtc);
+ if (intel_crtc->config->has_pch_encoder)
+ ironlake_fdi_disable(crtc);
+
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->post_disable)
encoder->post_disable(encoder);
if (intel_crtc->config->has_pch_encoder) {
- ironlake_fdi_disable(crtc);
-
ironlake_disable_pch_transcoder(dev_priv, pipe);
if (HAS_PCH_CPT(dev)) {
@@ -4807,8 +5160,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
if (!intel_crtc->active)
return;
- intel_crtc_disable_planes(crtc);
-
for_each_encoder_on_crtc(dev, crtc, encoder) {
intel_opregion_notify_encoder(encoder, false);
encoder->disable(encoder);
@@ -4827,10 +5178,12 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
- if (IS_SKYLAKE(dev))
- skylake_pfit_disable(intel_crtc);
- else
+ if (INTEL_INFO(dev)->gen == 9)
+ skylake_pfit_update(intel_crtc, 0);
+ else if (INTEL_INFO(dev)->gen < 9)
ironlake_pfit_disable(intel_crtc);
+ else
+ MISSING_CASE(INTEL_INFO(dev)->gen);
intel_ddi_disable_pipe_clock(intel_crtc);
@@ -4994,16 +5347,403 @@ static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
intel_display_set_init_power(dev_priv, false);
}
+void broxton_set_cdclk(struct drm_device *dev, int frequency)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t divider;
+ uint32_t ratio;
+ uint32_t current_freq;
+ int ret;
+
+ /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
+ switch (frequency) {
+ case 144000:
+ divider = BXT_CDCLK_CD2X_DIV_SEL_4;
+ ratio = BXT_DE_PLL_RATIO(60);
+ break;
+ case 288000:
+ divider = BXT_CDCLK_CD2X_DIV_SEL_2;
+ ratio = BXT_DE_PLL_RATIO(60);
+ break;
+ case 384000:
+ divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
+ ratio = BXT_DE_PLL_RATIO(60);
+ break;
+ case 576000:
+ divider = BXT_CDCLK_CD2X_DIV_SEL_1;
+ ratio = BXT_DE_PLL_RATIO(60);
+ break;
+ case 624000:
+ divider = BXT_CDCLK_CD2X_DIV_SEL_1;
+ ratio = BXT_DE_PLL_RATIO(65);
+ break;
+ case 19200:
+ /*
+ * Bypass frequency with DE PLL disabled. Init ratio, divider
+ * to suppress GCC warning.
+ */
+ ratio = 0;
+ divider = 0;
+ break;
+ default:
+ DRM_ERROR("unsupported CDCLK freq %d", frequency);
+
+ return;
+ }
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ /* Inform power controller of upcoming frequency change */
+ ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
+ 0x80000000);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ if (ret) {
+ DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
+ ret, frequency);
+ return;
+ }
+
+ current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
+ /* convert from .1 fixpoint MHz with -1MHz offset to kHz */
+ current_freq = current_freq * 500 + 1000;
+
+ /*
+ * DE PLL has to be disabled when
+ * - setting to 19.2MHz (bypass, PLL isn't used)
+ * - before setting to 624MHz (PLL needs toggling)
+ * - before setting to any frequency from 624MHz (PLL needs toggling)
+ */
+ if (frequency == 19200 || frequency == 624000 ||
+ current_freq == 624000) {
+ I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
+ /* Timeout 200us */
+ if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
+ 1))
+ DRM_ERROR("timout waiting for DE PLL unlock\n");
+ }
+
+ if (frequency != 19200) {
+ uint32_t val;
+
+ val = I915_READ(BXT_DE_PLL_CTL);
+ val &= ~BXT_DE_PLL_RATIO_MASK;
+ val |= ratio;
+ I915_WRITE(BXT_DE_PLL_CTL, val);
+
+ I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
+ /* Timeout 200us */
+ if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
+ DRM_ERROR("timeout waiting for DE PLL lock\n");
+
+ val = I915_READ(CDCLK_CTL);
+ val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK;
+ val |= divider;
+ /*
+ * Disable SSA Precharge when CD clock frequency < 500 MHz,
+ * enable otherwise.
+ */
+ val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE;
+ if (frequency >= 500000)
+ val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
+
+ val &= ~CDCLK_FREQ_DECIMAL_MASK;
+ /* convert from kHz to .1 fixpoint MHz with -1MHz offset */
+ val |= (frequency - 1000) / 500;
+ I915_WRITE(CDCLK_CTL, val);
+ }
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
+ DIV_ROUND_UP(frequency, 25000));
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ if (ret) {
+ DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
+ ret, frequency);
+ return;
+ }
+
+ dev_priv->cdclk_freq = frequency;
+}
+
+void broxton_init_cdclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t val;
+
+ /*
+ * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
+ * or else the reset will hang because there is no PCH to respond.
+ * Move the handshake programming to initialization sequence.
+ * Previously was left up to BIOS.
+ */
+ val = I915_READ(HSW_NDE_RSTWRN_OPT);
+ val &= ~RESET_PCH_HANDSHAKE_ENABLE;
+ I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
+
+ /* Enable PG1 for cdclk */
+ intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
+
+ /* check if cd clock is enabled */
+ if (I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE) {
+ DRM_DEBUG_KMS("Display already initialized\n");
+ return;
+ }
+
+ /*
+ * FIXME:
+ * - The initial CDCLK needs to be read from VBT.
+ * Need to make this change after VBT has changes for BXT.
+ * - check if setting the max (or any) cdclk freq is really necessary
+ * here, it belongs to modeset time
+ */
+ broxton_set_cdclk(dev, 624000);
+
+ I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
+ POSTING_READ(DBUF_CTL);
+
+ udelay(10);
+
+ if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
+ DRM_ERROR("DBuf power enable timeout!\n");
+}
+
+void broxton_uninit_cdclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
+ POSTING_READ(DBUF_CTL);
+
+ udelay(10);
+
+ if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
+ DRM_ERROR("DBuf power disable timeout!\n");
+
+ /* Set minimum (bypass) frequency, in effect turning off the DE PLL */
+ broxton_set_cdclk(dev, 19200);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+}
+
+static const struct skl_cdclk_entry {
+ unsigned int freq;
+ unsigned int vco;
+} skl_cdclk_frequencies[] = {
+ { .freq = 308570, .vco = 8640 },
+ { .freq = 337500, .vco = 8100 },
+ { .freq = 432000, .vco = 8640 },
+ { .freq = 450000, .vco = 8100 },
+ { .freq = 540000, .vco = 8100 },
+ { .freq = 617140, .vco = 8640 },
+ { .freq = 675000, .vco = 8100 },
+};
+
+static unsigned int skl_cdclk_decimal(unsigned int freq)
+{
+ return (freq - 1000) / 500;
+}
+
+static unsigned int skl_cdclk_get_vco(unsigned int freq)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) {
+ const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i];
+
+ if (e->freq == freq)
+ return e->vco;
+ }
+
+ return 8100;
+}
+
+static void
+skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
+{
+ unsigned int min_freq;
+ u32 val;
+
+ /* select the minimum CDCLK before enabling DPLL 0 */
+ val = I915_READ(CDCLK_CTL);
+ val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK;
+ val |= CDCLK_FREQ_337_308;
+
+ if (required_vco == 8640)
+ min_freq = 308570;
+ else
+ min_freq = 337500;
+
+ val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq);
+
+ I915_WRITE(CDCLK_CTL, val);
+ POSTING_READ(CDCLK_CTL);
+
+ /*
+ * We always enable DPLL0 with the lowest link rate possible, but still
+ * taking into account the VCO required to operate the eDP panel at the
+ * desired frequency. The usual DP link rates operate with a VCO of
+ * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
+ * The modeset code is responsible for the selection of the exact link
+ * rate later on, with the constraint of choosing a frequency that
+ * works with required_vco.
+ */
+ val = I915_READ(DPLL_CTRL1);
+
+ val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
+ DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
+ val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
+ if (required_vco == 8640)
+ val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
+ SKL_DPLL0);
+ else
+ val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
+ SKL_DPLL0);
+
+ I915_WRITE(DPLL_CTRL1, val);
+ POSTING_READ(DPLL_CTRL1);
+
+ I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
+
+ if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
+ DRM_ERROR("DPLL0 not locked\n");
+}
+
+static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
+{
+ int ret;
+ u32 val;
+
+ /* inform PCU we want to change CDCLK */
+ val = SKL_CDCLK_PREPARE_FOR_CHANGE;
+ mutex_lock(&dev_priv->rps.hw_lock);
+ ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
+}
+
+static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
+{
+ unsigned int i;
+
+ for (i = 0; i < 15; i++) {
+ if (skl_cdclk_pcu_ready(dev_priv))
+ return true;
+ udelay(10);
+ }
+
+ return false;
+}
+
+static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
+{
+ u32 freq_select, pcu_ack;
+
+ DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
+
+ if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
+ DRM_ERROR("failed to inform PCU about cdclk change\n");
+ return;
+ }
+
+ /* set CDCLK_CTL */
+ switch(freq) {
+ case 450000:
+ case 432000:
+ freq_select = CDCLK_FREQ_450_432;
+ pcu_ack = 1;
+ break;
+ case 540000:
+ freq_select = CDCLK_FREQ_540;
+ pcu_ack = 2;
+ break;
+ case 308570:
+ case 337500:
+ default:
+ freq_select = CDCLK_FREQ_337_308;
+ pcu_ack = 0;
+ break;
+ case 617140:
+ case 675000:
+ freq_select = CDCLK_FREQ_675_617;
+ pcu_ack = 3;
+ break;
+ }
+
+ I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq));
+ POSTING_READ(CDCLK_CTL);
+
+ /* inform PCU of the change */
+ mutex_lock(&dev_priv->rps.hw_lock);
+ sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
+{
+ /* disable DBUF power */
+ I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
+ POSTING_READ(DBUF_CTL);
+
+ udelay(10);
+
+ if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
+ DRM_ERROR("DBuf power disable timeout\n");
+
+ /* disable DPLL0 */
+ I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
+ if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
+ DRM_ERROR("Couldn't disable DPLL0\n");
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+}
+
+void skl_init_cdclk(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+ unsigned int required_vco;
+
+ /* enable PCH reset handshake */
+ val = I915_READ(HSW_NDE_RSTWRN_OPT);
+ I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
+
+ /* enable PG1 and Misc I/O */
+ intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
+
+ /* DPLL0 already enabed !? */
+ if (I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE) {
+ DRM_DEBUG_DRIVER("DPLL0 already running\n");
+ return;
+ }
+
+ /* enable DPLL0 */
+ required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
+ skl_dpll0_enable(dev_priv, required_vco);
+
+ /* set CDCLK to the frequency the BIOS chose */
+ skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
+
+ /* enable DBUF power */
+ I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
+ POSTING_READ(DBUF_CTL);
+
+ udelay(10);
+
+ if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
+ DRM_ERROR("DBuf power enable timeout\n");
+}
+
/* returns HPLL frequency in kHz */
static int valleyview_get_vco(struct drm_i915_private *dev_priv)
{
int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
/* Obtain SKU information */
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
CCK_FUSE_HPLL_FREQ_MASK;
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
return vco_freq[hpll_freq] * 1000;
}
@@ -5012,16 +5752,16 @@ static void vlv_update_cdclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
+ dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
- dev_priv->vlv_cdclk_freq);
+ dev_priv->cdclk_freq);
/*
* Program the gmbus_freq based on the cdclk frequency.
* BSpec erroneously claims we should aim for 4MHz, but
* in fact 1MHz is the correct frequency.
*/
- I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->vlv_cdclk_freq, 1000));
+ I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
}
/* Adjust CDclk dividers to allow high res or save power if possible */
@@ -5030,7 +5770,8 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, cmd;
- WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
+ WARN_ON(dev_priv->display.get_display_clock_speed(dev)
+ != dev_priv->cdclk_freq);
if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
cmd = 2;
@@ -5051,12 +5792,13 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
}
mutex_unlock(&dev_priv->rps.hw_lock);
+ mutex_lock(&dev_priv->sb_lock);
+
if (cdclk == 400000) {
u32 divider;
divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
- mutex_lock(&dev_priv->dpio_lock);
/* adjust cdclk divider */
val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
val &= ~DISPLAY_FREQUENCY_VALUES;
@@ -5067,10 +5809,8 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
50))
DRM_ERROR("timed out waiting for CDclk change\n");
- mutex_unlock(&dev_priv->dpio_lock);
}
- mutex_lock(&dev_priv->dpio_lock);
/* adjust self-refresh exit latency value */
val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
val &= ~0x7f;
@@ -5084,7 +5824,8 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
else
val |= 3000 / 250; /* 3.0 usec */
vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
- mutex_unlock(&dev_priv->dpio_lock);
+
+ mutex_unlock(&dev_priv->sb_lock);
vlv_update_cdclk(dev);
}
@@ -5094,7 +5835,8 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, cmd;
- WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
+ WARN_ON(dev_priv->display.get_display_clock_speed(dev)
+ != dev_priv->cdclk_freq);
switch (cdclk) {
case 333333:
@@ -5159,37 +5901,89 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
return 200000;
}
-/* compute the max pixel clock for new configuration */
-static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv)
+static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
+ int max_pixclk)
+{
+ /*
+ * FIXME:
+ * - remove the guardband, it's not needed on BXT
+ * - set 19.2MHz bypass frequency if there are no active pipes
+ */
+ if (max_pixclk > 576000*9/10)
+ return 624000;
+ else if (max_pixclk > 384000*9/10)
+ return 576000;
+ else if (max_pixclk > 288000*9/10)
+ return 384000;
+ else if (max_pixclk > 144000*9/10)
+ return 288000;
+ else
+ return 144000;
+}
+
+/* Compute the max pixel clock for new configuration. Uses atomic state if
+ * that's non-NULL, look at current state otherwise. */
+static int intel_mode_max_pixclk(struct drm_device *dev,
+ struct drm_atomic_state *state)
{
- struct drm_device *dev = dev_priv->dev;
struct intel_crtc *intel_crtc;
+ struct intel_crtc_state *crtc_state;
int max_pixclk = 0;
for_each_intel_crtc(dev, intel_crtc) {
- if (intel_crtc->new_enabled)
- max_pixclk = max(max_pixclk,
- intel_crtc->new_config->base.adjusted_mode.crtc_clock);
+ if (state)
+ crtc_state =
+ intel_atomic_get_crtc_state(state, intel_crtc);
+ else
+ crtc_state = intel_crtc->config;
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (!crtc_state->base.enable)
+ continue;
+
+ max_pixclk = max(max_pixclk,
+ crtc_state->base.adjusted_mode.crtc_clock);
}
return max_pixclk;
}
-static void valleyview_modeset_global_pipes(struct drm_device *dev,
- unsigned *prepare_pipes)
+static int valleyview_modeset_global_pipes(struct drm_atomic_state *state)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc;
- int max_pixclk = intel_mode_max_pixclk(dev_priv);
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int max_pixclk = intel_mode_max_pixclk(state->dev, state);
+ int cdclk, i;
- if (valleyview_calc_cdclk(dev_priv, max_pixclk) ==
- dev_priv->vlv_cdclk_freq)
- return;
+ if (max_pixclk < 0)
+ return max_pixclk;
+
+ if (IS_VALLEYVIEW(dev_priv))
+ cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
+ else
+ cdclk = broxton_calc_cdclk(dev_priv, max_pixclk);
+
+ if (cdclk == dev_priv->cdclk_freq)
+ return 0;
+
+ /* add all active pipes to the state */
+ for_each_crtc(state->dev, crtc) {
+ if (!crtc->state->enable)
+ continue;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
/* disable/enable all currently active pipes while we change cdclk */
- for_each_intel_crtc(dev, intel_crtc)
- if (intel_crtc->base.state->enable)
- *prepare_pipes |= (1 << intel_crtc->pipe);
+ for_each_crtc_in_state(state, crtc, crtc_state, i)
+ if (crtc_state->enable)
+ crtc_state->mode_changed = true;
+
+ return 0;
}
static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
@@ -5201,7 +5995,7 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
else
default_credits = PFI_CREDIT(8);
- if (DIV_ROUND_CLOSEST(dev_priv->vlv_cdclk_freq, 1000) >= dev_priv->rps.cz_freq) {
+ if (DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 1000) >= dev_priv->rps.cz_freq) {
/* CHV suggested value is 31 or 63 */
if (IS_CHERRYVIEW(dev_priv))
credits = PFI_CREDIT_31;
@@ -5228,14 +6022,21 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
}
-static void valleyview_modeset_global_resources(struct drm_atomic_state *state)
+static void valleyview_modeset_global_resources(struct drm_atomic_state *old_state)
{
- struct drm_device *dev = state->dev;
+ struct drm_device *dev = old_state->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int max_pixclk = intel_mode_max_pixclk(dev_priv);
- int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
+ int max_pixclk = intel_mode_max_pixclk(dev, NULL);
+ int req_cdclk;
+
+ /* The path in intel_mode_max_pixclk() with a NULL atomic state should
+ * never fail. */
+ if (WARN_ON(max_pixclk < 0))
+ return;
- if (req_cdclk != dev_priv->vlv_cdclk_freq) {
+ req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
+
+ if (req_cdclk != dev_priv->cdclk_freq) {
/*
* FIXME: We can end up here with all power domains off, yet
* with a CDCLK frequency other than the minimum. To account
@@ -5326,11 +6127,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
-
- intel_crtc_enable_planes(crtc);
-
- /* Underruns don't raise interrupts, so check manually. */
- i9xx_check_fifo_underruns(dev_priv);
}
static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
@@ -5387,21 +6183,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
-
- intel_crtc_enable_planes(crtc);
-
- /*
- * Gen2 reports pipe underruns whenever all planes are disabled.
- * So don't enable underrun reporting before at least some planes
- * are enabled.
- * FIXME: Need to fix the logic to work when we turn off all planes
- * but leave the pipe running.
- */
- if (IS_GEN2(dev))
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-
- /* Underruns don't raise interrupts, so check manually. */
- i9xx_check_fifo_underruns(dev_priv);
}
static void i9xx_pfit_disable(struct intel_crtc *crtc)
@@ -5431,27 +6212,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
return;
/*
- * Gen2 reports pipe underruns whenever all planes are disabled.
- * So diasble underrun reporting before all the planes get disabled.
- * FIXME: Need to fix the logic to work when we turn off all planes
- * but leave the pipe running.
- */
- if (IS_GEN2(dev))
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-
- /*
- * Vblank time updates from the shadow to live plane control register
- * are blocked if the memory self-refresh mode is active at that
- * moment. So to make sure the plane gets truly disabled, disable
- * first the self-refresh mode. The self-refresh enable bit in turn
- * will be checked/applied by the HW only at the next frame start
- * event which is after the vblank start event, so we need to have a
- * wait-for-vblank between disabling the plane and the pipe.
- */
- intel_set_memory_cxsr(dev_priv, false);
- intel_crtc_disable_planes(crtc);
-
- /*
* On gen2 planes are double buffered but the pipe isn't, so we must
* wait for planes to fully turn off before disabling the pipe.
* We also need to wait on all gmch platforms because of the
@@ -5514,9 +6274,11 @@ void intel_crtc_control(struct drm_crtc *crtc, bool enable)
intel_crtc->enabled_power_domains = domains;
dev_priv->display.crtc_enable(crtc);
+ intel_crtc_enable_planes(crtc);
}
} else {
if (intel_crtc->active) {
+ intel_crtc_disable_planes(crtc);
dev_priv->display.crtc_disable(crtc);
domains = intel_crtc->enabled_power_domains;
@@ -5540,6 +6302,8 @@ void intel_crtc_update_dpms(struct drm_crtc *crtc)
enable |= intel_encoder->connectors_active;
intel_crtc_control(crtc, enable);
+
+ crtc->state->active = enable;
}
static void intel_crtc_disable(struct drm_crtc *crtc)
@@ -5551,10 +6315,11 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
/* crtc should still be enabled when we disable it. */
WARN_ON(!crtc->state->enable);
+ intel_crtc_disable_planes(crtc);
dev_priv->display.crtc_disable(crtc);
dev_priv->display.off(crtc);
- crtc->primary->funcs->disable_plane(crtc->primary);
+ drm_plane_helper_disable(crtc->primary);
/* Update computed state. */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -5695,65 +6460,80 @@ bool intel_connector_get_hw_state(struct intel_connector *connector)
return encoder->get_hw_state(encoder, &pipe);
}
-static int pipe_required_fdi_lanes(struct drm_device *dev, enum pipe pipe)
+static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc =
- to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
-
- if (crtc->base.state->enable &&
- crtc->config->has_pch_encoder)
- return crtc->config->fdi_lanes;
+ if (crtc_state->base.enable && crtc_state->has_pch_encoder)
+ return crtc_state->fdi_lanes;
return 0;
}
-static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
+static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
struct intel_crtc_state *pipe_config)
{
+ struct drm_atomic_state *state = pipe_config->base.state;
+ struct intel_crtc *other_crtc;
+ struct intel_crtc_state *other_crtc_state;
+
DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
pipe_name(pipe), pipe_config->fdi_lanes);
if (pipe_config->fdi_lanes > 4) {
DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
pipe_name(pipe), pipe_config->fdi_lanes);
- return false;
+ return -EINVAL;
}
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
if (pipe_config->fdi_lanes > 2) {
DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
pipe_config->fdi_lanes);
- return false;
+ return -EINVAL;
} else {
- return true;
+ return 0;
}
}
if (INTEL_INFO(dev)->num_pipes == 2)
- return true;
+ return 0;
/* Ivybridge 3 pipe is really complicated */
switch (pipe) {
case PIPE_A:
- return true;
+ return 0;
case PIPE_B:
- if (pipe_config->fdi_lanes > 2 &&
- pipe_required_fdi_lanes(dev, PIPE_C) > 0) {
+ if (pipe_config->fdi_lanes <= 2)
+ return 0;
+
+ other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
+ other_crtc_state =
+ intel_atomic_get_crtc_state(state, other_crtc);
+ if (IS_ERR(other_crtc_state))
+ return PTR_ERR(other_crtc_state);
+
+ if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
pipe_name(pipe), pipe_config->fdi_lanes);
- return false;
+ return -EINVAL;
}
- return true;
+ return 0;
case PIPE_C:
if (pipe_config->fdi_lanes > 2) {
DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
pipe_name(pipe), pipe_config->fdi_lanes);
- return false;
+ return -EINVAL;
}
- if (pipe_required_fdi_lanes(dev, PIPE_B) > 2) {
+
+ other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
+ other_crtc_state =
+ intel_atomic_get_crtc_state(state, other_crtc);
+ if (IS_ERR(other_crtc_state))
+ return PTR_ERR(other_crtc_state);
+
+ if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
- return false;
+ return -EINVAL;
}
- return true;
+ return 0;
default:
BUG();
}
@@ -5765,8 +6545,8 @@ static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- int lane, link_bw, fdi_dotclock;
- bool setup_ok, needs_recompute = false;
+ int lane, link_bw, fdi_dotclock, ret;
+ bool needs_recompute = false;
retry:
/* FDI is a binary signal running at ~2.7GHz, encoding
@@ -5788,9 +6568,9 @@ retry:
intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
link_bw, &pipe_config->fdi_m_n);
- setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
- intel_crtc->pipe, pipe_config);
- if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
+ ret = ironlake_check_fdi_lanes(intel_crtc->base.dev,
+ intel_crtc->pipe, pipe_config);
+ if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
pipe_config->pipe_bpp -= 2*3;
DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
pipe_config->pipe_bpp);
@@ -5803,7 +6583,7 @@ retry:
if (needs_recompute)
return RETRY;
- return setup_ok ? 0 : -EINVAL;
+ return ret;
}
static void hsw_compute_ips_config(struct intel_crtc *crtc,
@@ -5820,6 +6600,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ int ret;
/* FIXME should check pixel clock limits on all platforms */
if (INTEL_INFO(dev)->gen < 4) {
@@ -5860,21 +6641,107 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
adjusted_mode->hsync_start == adjusted_mode->hdisplay)
return -EINVAL;
- if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
- pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
- } else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) {
- /* only a 8bpc pipe, with 6bpc dither through the panel fitter
- * for lvds. */
- pipe_config->pipe_bpp = 8*3;
- }
-
if (HAS_IPS(dev))
hsw_compute_ips_config(crtc, pipe_config);
if (pipe_config->has_pch_encoder)
return ironlake_fdi_compute_config(crtc, pipe_config);
- return 0;
+ /* FIXME: remove below call once atomic mode set is place and all crtc
+ * related checks called from atomic_crtc_check function */
+ ret = 0;
+ DRM_DEBUG_KMS("intel_crtc = %p drm_state (pipe_config->base.state) = %p\n",
+ crtc, pipe_config->base.state);
+ ret = intel_atomic_setup_scalers(dev, crtc, pipe_config);
+
+ return ret;
+}
+
+static int skylake_get_display_clock_speed(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
+ uint32_t cdctl = I915_READ(CDCLK_CTL);
+ uint32_t linkrate;
+
+ if (!(lcpll1 & LCPLL_PLL_ENABLE)) {
+ WARN(1, "LCPLL1 not enabled\n");
+ return 24000; /* 24MHz is the cd freq with NSSC ref */
+ }
+
+ if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
+ return 540000;
+
+ linkrate = (I915_READ(DPLL_CTRL1) &
+ DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
+
+ if (linkrate == DPLL_CTRL1_LINK_RATE_2160 ||
+ linkrate == DPLL_CTRL1_LINK_RATE_1080) {
+ /* vco 8640 */
+ switch (cdctl & CDCLK_FREQ_SEL_MASK) {
+ case CDCLK_FREQ_450_432:
+ return 432000;
+ case CDCLK_FREQ_337_308:
+ return 308570;
+ case CDCLK_FREQ_675_617:
+ return 617140;
+ default:
+ WARN(1, "Unknown cd freq selection\n");
+ }
+ } else {
+ /* vco 8100 */
+ switch (cdctl & CDCLK_FREQ_SEL_MASK) {
+ case CDCLK_FREQ_450_432:
+ return 450000;
+ case CDCLK_FREQ_337_308:
+ return 337500;
+ case CDCLK_FREQ_675_617:
+ return 675000;
+ default:
+ WARN(1, "Unknown cd freq selection\n");
+ }
+ }
+
+ /* error case, do as if DPLL0 isn't enabled */
+ return 24000;
+}
+
+static int broadwell_get_display_clock_speed(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t lcpll = I915_READ(LCPLL_CTL);
+ uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
+
+ if (lcpll & LCPLL_CD_SOURCE_FCLK)
+ return 800000;
+ else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ return 450000;
+ else if (freq == LCPLL_CLK_FREQ_450)
+ return 450000;
+ else if (freq == LCPLL_CLK_FREQ_54O_BDW)
+ return 540000;
+ else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
+ return 337500;
+ else
+ return 675000;
+}
+
+static int haswell_get_display_clock_speed(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t lcpll = I915_READ(LCPLL_CTL);
+ uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
+
+ if (lcpll & LCPLL_CD_SOURCE_FCLK)
+ return 800000;
+ else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ return 450000;
+ else if (freq == LCPLL_CLK_FREQ_450)
+ return 450000;
+ else if (IS_HSW_ULT(dev))
+ return 337500;
+ else
+ return 540000;
}
static int valleyview_get_display_clock_speed(struct drm_device *dev)
@@ -5886,9 +6753,9 @@ static int valleyview_get_display_clock_speed(struct drm_device *dev)
if (dev_priv->hpll_freq == 0)
dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
divider = val & DISPLAY_FREQUENCY_VALUES;
@@ -5899,6 +6766,11 @@ static int valleyview_get_display_clock_speed(struct drm_device *dev)
return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
}
+static int ilk_get_display_clock_speed(struct drm_device *dev)
+{
+ return 450000;
+}
+
static int i945_get_display_clock_speed(struct drm_device *dev)
{
return 400000;
@@ -5906,7 +6778,7 @@ static int i945_get_display_clock_speed(struct drm_device *dev)
static int i915_get_display_clock_speed(struct drm_device *dev)
{
- return 333000;
+ return 333333;
}
static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
@@ -5922,19 +6794,19 @@ static int pnv_get_display_clock_speed(struct drm_device *dev)
switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
case GC_DISPLAY_CLOCK_267_MHZ_PNV:
- return 267000;
+ return 266667;
case GC_DISPLAY_CLOCK_333_MHZ_PNV:
- return 333000;
+ return 333333;
case GC_DISPLAY_CLOCK_444_MHZ_PNV:
- return 444000;
+ return 444444;
case GC_DISPLAY_CLOCK_200_MHZ_PNV:
return 200000;
default:
DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
case GC_DISPLAY_CLOCK_133_MHZ_PNV:
- return 133000;
+ return 133333;
case GC_DISPLAY_CLOCK_167_MHZ_PNV:
- return 167000;
+ return 166667;
}
}
@@ -5945,11 +6817,11 @@ static int i915gm_get_display_clock_speed(struct drm_device *dev)
pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
- return 133000;
+ return 133333;
else {
switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
case GC_DISPLAY_CLOCK_333_MHZ:
- return 333000;
+ return 333333;
default:
case GC_DISPLAY_CLOCK_190_200_MHZ:
return 190000;
@@ -5959,7 +6831,7 @@ static int i915gm_get_display_clock_speed(struct drm_device *dev)
static int i865_get_display_clock_speed(struct drm_device *dev)
{
- return 266000;
+ return 266667;
}
static int i855_get_display_clock_speed(struct drm_device *dev)
@@ -5975,7 +6847,7 @@ static int i855_get_display_clock_speed(struct drm_device *dev)
case GC_CLOCK_166_250:
return 250000;
case GC_CLOCK_100_133:
- return 133000;
+ return 133333;
}
/* Shouldn't happen */
@@ -5984,7 +6856,7 @@ static int i855_get_display_clock_speed(struct drm_device *dev)
static int i830_get_display_clock_speed(struct drm_device *dev)
{
- return 133000;
+ return 133333;
}
static void
@@ -6037,7 +6909,7 @@ static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
WARN_ON(!crtc_state->base.state);
- if (IS_VALLEYVIEW(dev)) {
+ if (IS_VALLEYVIEW(dev) || IS_BROXTON(dev)) {
refclk = 100000;
} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
@@ -6225,7 +7097,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
u32 bestn, bestm1, bestm2, bestp1, bestp2;
u32 coreclk, reg_val;
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
bestn = pipe_config->dpll.n;
bestm1 = pipe_config->dpll.m1;
@@ -6303,7 +7175,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
}
static void chv_update_pll(struct intel_crtc *crtc,
@@ -6348,7 +7220,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
I915_WRITE(dpll_reg,
pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
/* p1 and p2 divider */
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
@@ -6421,7 +7293,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
DPIO_AFC_RECAL);
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
}
/**
@@ -6792,14 +7664,14 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_encoder *encoder;
const intel_limit_t *limit;
struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_connector *connector;
struct drm_connector_state *connector_state;
int i;
- for (i = 0; i < state->num_connector; i++) {
- if (!state->connectors[i])
- continue;
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
- connector_state = state->connector_states[i];
+ for_each_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != &crtc->base)
continue;
@@ -6922,9 +7794,9 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
return;
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
clock.m2 = mdiv & DPIO_M2DIV_MASK;
@@ -7018,12 +7890,12 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2;
int refclk = 100000;
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff);
@@ -7389,7 +8261,7 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
with_fdi, "LP PCH doesn't have FDI\n"))
with_fdi = false;
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
tmp &= ~SBI_SSCCTL_DISABLE;
@@ -7415,7 +8287,7 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
}
/* Sequence to disable CLKOUT_DP */
@@ -7424,7 +8296,7 @@ static void lpt_disable_clkout_dp(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t reg, tmp;
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
SBI_GEN0 : SBI_DBUFF0;
@@ -7443,7 +8315,7 @@ static void lpt_disable_clkout_dp(struct drm_device *dev)
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
}
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
}
static void lpt_init_pch_refclk(struct drm_device *dev)
@@ -7483,16 +8355,13 @@ static int ironlake_get_refclk(struct intel_crtc_state *crtc_state)
struct drm_device *dev = crtc_state->base.crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_connector *connector;
struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
int num_connectors = 0, i;
bool is_lvds = false;
- for (i = 0; i < state->num_connector; i++) {
- if (!state->connectors[i])
- continue;
-
- connector_state = state->connector_states[i];
+ for_each_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != crtc_state->base.crtc)
continue;
@@ -7746,17 +8615,14 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_connector *connector;
struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
uint32_t dpll;
int factor, num_connectors = 0, i;
bool is_lvds = false, is_sdvo = false;
- for (i = 0; i < state->num_connector; i++) {
- if (!state->connectors[i])
- continue;
-
- connector_state = state->connector_states[i];
+ for_each_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != crtc_state->base.crtc)
continue;
@@ -7846,6 +8712,9 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
bool is_lvds = false;
struct intel_shared_dpll *pll;
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS);
WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
@@ -7980,14 +8849,28 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t tmp;
+ struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
+ uint32_t ps_ctrl = 0;
+ int id = -1;
+ int i;
- tmp = I915_READ(PS_CTL(crtc->pipe));
+ /* find scaler attached to this pipe */
+ for (i = 0; i < crtc->num_scalers; i++) {
+ ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
+ if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
+ id = i;
+ pipe_config->pch_pfit.enabled = true;
+ pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
+ pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
+ break;
+ }
+ }
- if (tmp & PS_ENABLE) {
- pipe_config->pch_pfit.enabled = true;
- pipe_config->pch_pfit.pos = I915_READ(PS_WIN_POS(crtc->pipe));
- pipe_config->pch_pfit.size = I915_READ(PS_WIN_SZ(crtc->pipe));
+ scaler_state->scaler_id = id;
+ if (id >= 0) {
+ scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
+ } else {
+ scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
}
}
@@ -8472,6 +9355,23 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
intel_prepare_ddi(dev);
}
+static void broxton_modeset_global_resources(struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = old_state->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int max_pixclk = intel_mode_max_pixclk(dev, NULL);
+ int req_cdclk;
+
+ /* see the comment in valleyview_modeset_global_resources */
+ if (WARN_ON(max_pixclk < 0))
+ return;
+
+ req_cdclk = broxton_calc_cdclk(dev_priv, max_pixclk);
+
+ if (req_cdclk != dev_priv->cdclk_freq)
+ broxton_set_cdclk(dev, req_cdclk);
+}
+
static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
@@ -8483,6 +9383,28 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
return 0;
}
+static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
+ enum port port,
+ struct intel_crtc_state *pipe_config)
+{
+ switch (port) {
+ case PORT_A:
+ pipe_config->ddi_pll_sel = SKL_DPLL0;
+ pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
+ break;
+ case PORT_B:
+ pipe_config->ddi_pll_sel = SKL_DPLL1;
+ pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
+ break;
+ case PORT_C:
+ pipe_config->ddi_pll_sel = SKL_DPLL2;
+ pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
+ break;
+ default:
+ DRM_ERROR("Incorrect port type\n");
+ }
+}
+
static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
enum port port,
struct intel_crtc_state *pipe_config)
@@ -8545,6 +9467,8 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
if (IS_SKYLAKE(dev))
skylake_get_ddi_pll(dev_priv, port, pipe_config);
+ else if (IS_BROXTON(dev))
+ bxt_get_ddi_pll(dev_priv, port, pipe_config);
else
haswell_get_ddi_pll(dev_priv, port, pipe_config);
@@ -8621,12 +9545,24 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
intel_get_pipe_timings(crtc, pipe_config);
+ if (INTEL_INFO(dev)->gen >= 9) {
+ skl_init_scalers(dev, crtc, pipe_config);
+ }
+
pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
+
+ if (INTEL_INFO(dev)->gen >= 9) {
+ pipe_config->scaler_state.scaler_id = -1;
+ pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
+ }
+
if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
- if (IS_SKYLAKE(dev))
+ if (INTEL_INFO(dev)->gen == 9)
skylake_get_pfit_config(crtc, pipe_config);
- else
+ else if (INTEL_INFO(dev)->gen < 9)
ironlake_get_pfit_config(crtc, pipe_config);
+ else
+ MISSING_CASE(INTEL_INFO(dev)->gen);
}
if (IS_HASWELL(dev))
@@ -8978,6 +9914,41 @@ mode_fits_in_fbdev(struct drm_device *dev,
#endif
}
+static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
+ struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_framebuffer *fb,
+ int x, int y)
+{
+ struct drm_plane_state *plane_state;
+ int hdisplay, vdisplay;
+ int ret;
+
+ plane_state = drm_atomic_get_plane_state(state, crtc->primary);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ if (mode)
+ drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
+ else
+ hdisplay = vdisplay = 0;
+
+ ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
+ if (ret)
+ return ret;
+ drm_atomic_set_fb_for_plane(plane_state, fb);
+ plane_state->crtc_x = 0;
+ plane_state->crtc_y = 0;
+ plane_state->crtc_w = hdisplay;
+ plane_state->crtc_h = vdisplay;
+ plane_state->src_x = x << 16;
+ plane_state->src_y = y << 16;
+ plane_state->src_w = hdisplay << 16;
+ plane_state->src_h = vdisplay << 16;
+
+ return 0;
+}
+
bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_display_mode *mode,
struct intel_load_detect_pipe *old,
@@ -8994,6 +9965,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_mode_config *config = &dev->mode_config;
struct drm_atomic_state *state = NULL;
struct drm_connector_state *connector_state;
+ struct intel_crtc_state *crtc_state;
int ret, i = -1;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
@@ -9070,7 +10042,6 @@ retry:
intel_crtc = to_intel_crtc(crtc);
intel_crtc->new_enabled = true;
- intel_crtc->new_config = intel_crtc->config;
old->dpms_mode = connector->dpms;
old->load_detect_temp = true;
old->release_fb = NULL;
@@ -9090,6 +10061,14 @@ retry:
connector_state->crtc = crtc;
connector_state->best_encoder = &intel_encoder->base;
+ crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto fail;
+ }
+
+ crtc_state->base.active = crtc_state->base.enable = true;
+
if (!mode)
mode = &load_detect_mode;
@@ -9112,7 +10091,13 @@ retry:
goto fail;
}
- if (intel_set_mode(crtc, mode, 0, 0, fb, state)) {
+ ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
+ if (ret)
+ goto fail;
+
+ drm_mode_copy(&crtc_state->base.mode, mode);
+
+ if (intel_set_mode(crtc, state, true)) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
@@ -9126,15 +10111,9 @@ retry:
fail:
intel_crtc->new_enabled = crtc->state->enable;
- if (intel_crtc->new_enabled)
- intel_crtc->new_config = intel_crtc->config;
- else
- intel_crtc->new_config = NULL;
fail_unlock:
- if (state) {
- drm_atomic_state_free(state);
- state = NULL;
- }
+ drm_atomic_state_free(state);
+ state = NULL;
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
@@ -9156,6 +10135,8 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_atomic_state *state;
struct drm_connector_state *connector_state;
+ struct intel_crtc_state *crtc_state;
+ int ret;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, connector->name,
@@ -9172,17 +10153,27 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
if (IS_ERR(connector_state))
goto fail;
+ crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
+ if (IS_ERR(crtc_state))
+ goto fail;
+
to_intel_connector(connector)->new_encoder = NULL;
intel_encoder->new_crtc = NULL;
intel_crtc->new_enabled = false;
- intel_crtc->new_config = NULL;
connector_state->best_encoder = NULL;
connector_state->crtc = NULL;
- intel_set_mode(crtc, NULL, 0, 0, NULL, state);
+ crtc_state->base.enable = crtc_state->base.active = false;
- drm_atomic_state_free(state);
+ ret = intel_modeset_setup_plane_state(state, crtc, NULL, NULL,
+ 0, 0);
+ if (ret)
+ goto fail;
+
+ ret = intel_set_mode(crtc, state, true);
+ if (ret)
+ goto fail;
if (old->release_fb) {
drm_framebuffer_unregister_private(old->release_fb);
@@ -9466,14 +10457,6 @@ void intel_mark_idle(struct drm_device *dev)
intel_runtime_pm_put(dev_priv);
}
-static void intel_crtc_set_state(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- kfree(crtc->config);
- crtc->config = crtc_state;
- crtc->base.state = &crtc_state->base;
-}
-
static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -9490,7 +10473,6 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
kfree(work);
}
- intel_crtc_set_state(intel_crtc, NULL);
drm_crtc_cleanup(crtc);
kfree(intel_crtc);
@@ -9907,7 +10889,7 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
else if (i915.enable_execlists)
return true;
else
- return ring != i915_gem_request_get_ring(obj->last_read_req);
+ return ring != i915_gem_request_get_ring(obj->last_write_req);
}
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc)
@@ -9915,23 +10897,34 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc)
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
const enum pipe pipe = intel_crtc->pipe;
u32 ctl, stride;
ctl = I915_READ(PLANE_CTL(pipe, 0));
ctl &= ~PLANE_CTL_TILED_MASK;
- if (obj->tiling_mode == I915_TILING_X)
+ switch (fb->modifier[0]) {
+ case DRM_FORMAT_MOD_NONE:
+ break;
+ case I915_FORMAT_MOD_X_TILED:
ctl |= PLANE_CTL_TILED_X;
+ break;
+ case I915_FORMAT_MOD_Y_TILED:
+ ctl |= PLANE_CTL_TILED_Y;
+ break;
+ case I915_FORMAT_MOD_Yf_TILED:
+ ctl |= PLANE_CTL_TILED_YF;
+ break;
+ default:
+ MISSING_CASE(fb->modifier[0]);
+ }
/*
* The stride is either expressed as a multiple of 64 bytes chunks for
* linear buffers or in number of tiles for tiled buffers.
*/
- stride = fb->pitches[0] >> 6;
- if (obj->tiling_mode == I915_TILING_X)
- stride = fb->pitches[0] >> 9; /* X tiles are 512 bytes wide */
+ stride = fb->pitches[0] /
+ intel_fb_stride_alignment(dev, fb->modifier[0],
+ fb->pixel_format);
/*
* Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
@@ -9996,22 +10989,19 @@ static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
static void intel_mmio_flip_work_func(struct work_struct *work)
{
- struct intel_crtc *crtc =
- container_of(work, struct intel_crtc, mmio_flip.work);
- struct intel_mmio_flip *mmio_flip;
+ struct intel_mmio_flip *mmio_flip =
+ container_of(work, struct intel_mmio_flip, work);
- mmio_flip = &crtc->mmio_flip;
if (mmio_flip->req)
WARN_ON(__i915_wait_request(mmio_flip->req,
- crtc->reset_counter,
- false, NULL, NULL) != 0);
+ mmio_flip->crtc->reset_counter,
+ false, NULL,
+ &mmio_flip->i915->rps.mmioflips));
- intel_do_mmio_flip(crtc);
- if (mmio_flip->req) {
- mutex_lock(&crtc->base.dev->struct_mutex);
- i915_gem_request_assign(&mmio_flip->req, NULL);
- mutex_unlock(&crtc->base.dev->struct_mutex);
- }
+ intel_do_mmio_flip(mmio_flip->crtc);
+
+ i915_gem_request_unreference__unlocked(mmio_flip->req);
+ kfree(mmio_flip);
}
static int intel_queue_mmio_flip(struct drm_device *dev,
@@ -10021,12 +11011,18 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
struct intel_engine_cs *ring,
uint32_t flags)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_mmio_flip *mmio_flip;
+
+ mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL);
+ if (mmio_flip == NULL)
+ return -ENOMEM;
- i915_gem_request_assign(&intel_crtc->mmio_flip.req,
- obj->last_write_req);
+ mmio_flip->i915 = to_i915(dev);
+ mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
+ mmio_flip->crtc = to_intel_crtc(crtc);
- schedule_work(&intel_crtc->mmio_flip.work);
+ INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
+ schedule_work(&mmio_flip->work);
return 0;
}
@@ -10085,6 +11081,7 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_unpin_work *work;
WARN_ON(!in_interrupt());
@@ -10092,12 +11089,16 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
return;
spin_lock(&dev->event_lock);
- if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) {
+ work = intel_crtc->unpin_work;
+ if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) {
WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
- intel_crtc->unpin_work->flip_queued_vblank,
- drm_vblank_count(dev, pipe));
+ work->flip_queued_vblank, drm_vblank_count(dev, pipe));
page_flip_completed(intel_crtc);
+ work = NULL;
}
+ if (work != NULL &&
+ drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1)
+ intel_queue_rps_boost_for_request(dev, work->flip_queued_req);
spin_unlock(&dev->event_lock);
}
@@ -10115,6 +11116,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
enum pipe pipe = intel_crtc->pipe;
struct intel_unpin_work *work;
struct intel_engine_cs *ring;
+ bool mmio_flip;
int ret;
/*
@@ -10205,22 +11207,30 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
ring = &dev_priv->ring[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) {
- ring = i915_gem_request_get_ring(obj->last_read_req);
+ ring = i915_gem_request_get_ring(obj->last_write_req);
if (ring == NULL || ring->id != RCS)
ring = &dev_priv->ring[BCS];
} else {
ring = &dev_priv->ring[RCS];
}
+ mmio_flip = use_mmio_flip(ring, obj);
+
+ /* When using CS flips, we want to emit semaphores between rings.
+ * However, when using mmio flips we will create a task to do the
+ * synchronisation, so all we want here is to pin the framebuffer
+ * into the display plane and skip any waits.
+ */
ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
- crtc->primary->state, ring);
+ crtc->primary->state,
+ mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring);
if (ret)
goto cleanup_pending;
work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), obj)
+ intel_crtc->dspaddr_offset;
- if (use_mmio_flip(ring, obj)) {
+ if (mmio_flip) {
ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
page_flip_flags);
if (ret)
@@ -10229,6 +11239,12 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
i915_gem_request_assign(&work->flip_queued_req,
obj->last_write_req);
} else {
+ if (obj->last_write_req) {
+ ret = i915_gem_check_olr(obj->last_write_req);
+ if (ret)
+ goto cleanup_unpin;
+ }
+
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
page_flip_flags);
if (ret)
@@ -10284,7 +11300,7 @@ out_hang:
return ret;
}
-static struct drm_crtc_helper_funcs intel_helper_funcs = {
+static const struct drm_crtc_helper_funcs intel_helper_funcs = {
.mode_set_base_atomic = intel_pipe_set_base_atomic,
.load_lut = intel_crtc_load_lut,
.atomic_begin = intel_begin_crtc_commit,
@@ -10315,11 +11331,6 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
for_each_intel_crtc(dev, crtc) {
crtc->new_enabled = crtc->base.state->enable;
-
- if (crtc->new_enabled)
- crtc->new_config = crtc->config;
- else
- crtc->new_config = NULL;
}
}
@@ -10344,31 +11355,37 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
}
}
-/**
- * intel_modeset_commit_output_state
- *
- * This function copies the stage display pipe configuration to the real one.
+/* Fixup legacy state after an atomic state swap.
*/
-static void intel_modeset_commit_output_state(struct drm_device *dev)
+static void intel_modeset_fixup_state(struct drm_atomic_state *state)
{
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
- for_each_intel_connector(dev, connector) {
- connector->base.encoder = &connector->new_encoder->base;
+ for_each_intel_connector(state->dev, connector) {
+ connector->base.encoder = connector->base.state->best_encoder;
+ if (connector->base.encoder)
+ connector->base.encoder->crtc =
+ connector->base.state->crtc;
}
- for_each_intel_encoder(dev, encoder) {
- encoder->base.crtc = &encoder->new_crtc->base;
- }
+ /* Update crtc of disabled encoders */
+ for_each_intel_encoder(state->dev, encoder) {
+ int num_connectors = 0;
- for_each_intel_crtc(dev, crtc) {
- crtc->base.state->enable = crtc->new_enabled;
- crtc->base.enabled = crtc->new_enabled;
+ for_each_intel_connector(state->dev, connector)
+ if (connector->base.encoder == &encoder->base)
+ num_connectors++;
+
+ if (num_connectors == 0)
+ encoder->base.crtc = NULL;
}
- intel_modeset_update_connector_atomic_state(dev);
+ for_each_intel_crtc(state->dev, crtc) {
+ crtc->base.enabled = crtc->base.state->enable;
+ crtc->config = to_intel_crtc_state(crtc->base.state);
+ }
}
static void
@@ -10399,64 +11416,33 @@ connected_sink_compute_bpp(struct intel_connector *connector,
static int
compute_baseline_pipe_bpp(struct intel_crtc *crtc,
- struct drm_framebuffer *fb,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_atomic_state *state;
- struct intel_connector *connector;
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
int bpp, i;
- switch (fb->pixel_format) {
- case DRM_FORMAT_C8:
- bpp = 8*3; /* since we go through a colormap */
- break;
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_ARGB1555:
- /* checked in intel_framebuffer_init already */
- if (WARN_ON(INTEL_INFO(dev)->gen > 3))
- return -EINVAL;
- case DRM_FORMAT_RGB565:
- bpp = 6*3; /* min is 18bpp */
- break;
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ABGR8888:
- /* checked in intel_framebuffer_init already */
- if (WARN_ON(INTEL_INFO(dev)->gen < 4))
- return -EINVAL;
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- bpp = 8*3;
- break;
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ABGR2101010:
- /* checked in intel_framebuffer_init already */
- if (WARN_ON(INTEL_INFO(dev)->gen < 4))
- return -EINVAL;
+ if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)))
bpp = 10*3;
- break;
- /* TODO: gen4+ supports 16 bpc floating point, too. */
- default:
- DRM_DEBUG_KMS("unsupported depth\n");
- return -EINVAL;
- }
+ else if (INTEL_INFO(dev)->gen >= 5)
+ bpp = 12*3;
+ else
+ bpp = 8*3;
+
pipe_config->pipe_bpp = bpp;
state = pipe_config->base.state;
/* Clamp display bpp to EDID value */
- for (i = 0; i < state->num_connector; i++) {
- if (!state->connectors[i])
- continue;
-
- connector = to_intel_connector(state->connectors[i]);
- if (state->connector_states[i]->crtc != &crtc->base)
+ for_each_connector_in_state(state, connector, connector_state, i) {
+ if (connector_state->crtc != &crtc->base)
continue;
- connected_sink_compute_bpp(connector, pipe_config);
+ connected_sink_compute_bpp(to_intel_connector(connector),
+ pipe_config);
}
return bpp;
@@ -10477,8 +11463,14 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
const char *context)
{
- DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
- context, pipe_name(crtc->pipe));
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_plane *plane;
+ struct intel_plane *intel_plane;
+ struct intel_plane_state *state;
+ struct drm_framebuffer *fb;
+
+ DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id,
+ context, pipe_config, pipe_name(crtc->pipe));
DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
@@ -10515,6 +11507,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
DRM_DEBUG_KMS("pipe src size: %dx%d\n",
pipe_config->pipe_src_w, pipe_config->pipe_src_h);
+ DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
+ crtc->num_scalers,
+ pipe_config->scaler_state.scaler_users,
+ pipe_config->scaler_state.scaler_id);
DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
pipe_config->gmch_pfit.control,
pipe_config->gmch_pfit.pgm_ratios,
@@ -10525,6 +11521,73 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
+
+ if (IS_BROXTON(dev)) {
+ DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, "
+ "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
+ "pll6: 0x%x, pll8: 0x%x, pcsdw12: 0x%x\n",
+ pipe_config->ddi_pll_sel,
+ pipe_config->dpll_hw_state.ebb0,
+ pipe_config->dpll_hw_state.pll0,
+ pipe_config->dpll_hw_state.pll1,
+ pipe_config->dpll_hw_state.pll2,
+ pipe_config->dpll_hw_state.pll3,
+ pipe_config->dpll_hw_state.pll6,
+ pipe_config->dpll_hw_state.pll8,
+ pipe_config->dpll_hw_state.pcsdw12);
+ } else if (IS_SKYLAKE(dev)) {
+ DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
+ "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
+ pipe_config->ddi_pll_sel,
+ pipe_config->dpll_hw_state.ctrl1,
+ pipe_config->dpll_hw_state.cfgcr1,
+ pipe_config->dpll_hw_state.cfgcr2);
+ } else if (HAS_DDI(dev)) {
+ DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x\n",
+ pipe_config->ddi_pll_sel,
+ pipe_config->dpll_hw_state.wrpll);
+ } else {
+ DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
+ "fp0: 0x%x, fp1: 0x%x\n",
+ pipe_config->dpll_hw_state.dpll,
+ pipe_config->dpll_hw_state.dpll_md,
+ pipe_config->dpll_hw_state.fp0,
+ pipe_config->dpll_hw_state.fp1);
+ }
+
+ DRM_DEBUG_KMS("planes on this crtc\n");
+ list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ intel_plane = to_intel_plane(plane);
+ if (intel_plane->pipe != crtc->pipe)
+ continue;
+
+ state = to_intel_plane_state(plane->state);
+ fb = state->base.fb;
+ if (!fb) {
+ DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d "
+ "disabled, scaler_id = %d\n",
+ plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
+ plane->base.id, intel_plane->pipe,
+ (crtc->base.primary == plane) ? 0 : intel_plane->plane + 1,
+ drm_plane_index(plane), state->scaler_id);
+ continue;
+ }
+
+ DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled",
+ plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
+ plane->base.id, intel_plane->pipe,
+ crtc->base.primary == plane ? 0 : intel_plane->plane + 1,
+ drm_plane_index(plane));
+ DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x",
+ fb->base.id, fb->width, fb->height, fb->pixel_format);
+ DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n",
+ state->scaler_id,
+ state->src.x1 >> 16, state->src.y1 >> 16,
+ drm_rect_width(&state->src) >> 16,
+ drm_rect_height(&state->src) >> 16,
+ state->dst.x1, state->dst.y1,
+ drm_rect_width(&state->dst), drm_rect_height(&state->dst));
+ }
}
static bool encoders_cloneable(const struct intel_encoder *a,
@@ -10535,16 +11598,21 @@ static bool encoders_cloneable(const struct intel_encoder *a,
b->cloneable & (1 << a->type));
}
-static bool check_single_encoder_cloning(struct intel_crtc *crtc,
+static bool check_single_encoder_cloning(struct drm_atomic_state *state,
+ struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
- struct drm_device *dev = crtc->base.dev;
struct intel_encoder *source_encoder;
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
+ int i;
- for_each_intel_encoder(dev, source_encoder) {
- if (source_encoder->new_crtc != crtc)
+ for_each_connector_in_state(state, connector, connector_state, i) {
+ if (connector_state->crtc != &crtc->base)
continue;
+ source_encoder =
+ to_intel_encoder(connector_state->best_encoder);
if (!encoders_cloneable(encoder, source_encoder))
return false;
}
@@ -10552,39 +11620,47 @@ static bool check_single_encoder_cloning(struct intel_crtc *crtc,
return true;
}
-static bool check_encoder_cloning(struct intel_crtc *crtc)
+static bool check_encoder_cloning(struct drm_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
struct intel_encoder *encoder;
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
+ int i;
- for_each_intel_encoder(dev, encoder) {
- if (encoder->new_crtc != crtc)
+ for_each_connector_in_state(state, connector, connector_state, i) {
+ if (connector_state->crtc != &crtc->base)
continue;
- if (!check_single_encoder_cloning(crtc, encoder))
+ encoder = to_intel_encoder(connector_state->best_encoder);
+ if (!check_single_encoder_cloning(state, crtc, encoder))
return false;
}
return true;
}
-static bool check_digital_port_conflicts(struct drm_device *dev)
+static bool check_digital_port_conflicts(struct drm_atomic_state *state)
{
- struct intel_connector *connector;
+ struct drm_device *dev = state->dev;
+ struct intel_encoder *encoder;
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
unsigned int used_ports = 0;
+ int i;
/*
* Walk the connector list instead of the encoder
* list to detect the problem on ddi platforms
* where there's just one encoder per digital port.
*/
- for_each_intel_connector(dev, connector) {
- struct intel_encoder *encoder = connector->new_encoder;
-
- if (!encoder)
+ for_each_connector_in_state(state, connector, connector_state, i) {
+ if (!connector_state->best_encoder)
continue;
- WARN_ON(!encoder->new_crtc);
+ encoder = to_intel_encoder(connector_state->best_encoder);
+
+ WARN_ON(!connector_state->crtc);
switch (encoder->type) {
unsigned int port_mask;
@@ -10613,51 +11689,57 @@ static void
clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
{
struct drm_crtc_state tmp_state;
+ struct intel_crtc_scaler_state scaler_state;
+ struct intel_dpll_hw_state dpll_hw_state;
+ enum intel_dpll_id shared_dpll;
+ uint32_t ddi_pll_sel;
+
+ /* FIXME: before the switch to atomic started, a new pipe_config was
+ * kzalloc'd. Code that depends on any field being zero should be
+ * fixed, so that the crtc_state can be safely duplicated. For now,
+ * only fields that are know to not cause problems are preserved. */
- /* Clear only the intel specific part of the crtc state */
tmp_state = crtc_state->base;
+ scaler_state = crtc_state->scaler_state;
+ shared_dpll = crtc_state->shared_dpll;
+ dpll_hw_state = crtc_state->dpll_hw_state;
+ ddi_pll_sel = crtc_state->ddi_pll_sel;
+
memset(crtc_state, 0, sizeof *crtc_state);
+
crtc_state->base = tmp_state;
+ crtc_state->scaler_state = scaler_state;
+ crtc_state->shared_dpll = shared_dpll;
+ crtc_state->dpll_hw_state = dpll_hw_state;
+ crtc_state->ddi_pll_sel = ddi_pll_sel;
}
-static struct intel_crtc_state *
+static int
intel_modeset_pipe_config(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_display_mode *mode,
- struct drm_atomic_state *state)
+ struct drm_atomic_state *state,
+ struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->dev;
struct intel_encoder *encoder;
- struct intel_connector *connector;
+ struct drm_connector *connector;
struct drm_connector_state *connector_state;
- struct intel_crtc_state *pipe_config;
- int plane_bpp, ret = -EINVAL;
+ int base_bpp, ret = -EINVAL;
int i;
bool retry = true;
- if (!check_encoder_cloning(to_intel_crtc(crtc))) {
+ if (!check_encoder_cloning(state, to_intel_crtc(crtc))) {
DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
- if (!check_digital_port_conflicts(dev)) {
+ if (!check_digital_port_conflicts(state)) {
DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
- pipe_config = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));
- if (IS_ERR(pipe_config))
- return pipe_config;
-
clear_intel_crtc_state(pipe_config);
- pipe_config->base.crtc = crtc;
- drm_mode_copy(&pipe_config->base.adjusted_mode, mode);
- drm_mode_copy(&pipe_config->base.mode, mode);
-
pipe_config->cpu_transcoder =
(enum transcoder) to_intel_crtc(crtc)->pipe;
- pipe_config->shared_dpll = DPLL_ID_PRIVATE;
/*
* Sanitize sync polarity flags based on requested ones. If neither
@@ -10676,9 +11758,9 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
* plane pixel format and any sink constraints into account. Returns the
* source plane bpp so that dithering can be selected on mismatches
* after encoders and crtc also have had their say. */
- plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
- fb, pipe_config);
- if (plane_bpp < 0)
+ base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
+ pipe_config);
+ if (base_bpp < 0)
goto fail;
/*
@@ -10706,12 +11788,7 @@ encoder_retry:
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
*/
- for (i = 0; i < state->num_connector; i++) {
- connector = to_intel_connector(state->connectors[i]);
- if (!connector)
- continue;
-
- connector_state = state->connector_states[i];
+ for_each_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != crtc)
continue;
@@ -10746,101 +11823,13 @@ encoder_retry:
goto encoder_retry;
}
- pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
+ pipe_config->dither = pipe_config->pipe_bpp != base_bpp;
DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
- plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
+ base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
- return pipe_config;
+ return 0;
fail:
- return ERR_PTR(ret);
-}
-
-/* Computes which crtcs are affected and sets the relevant bits in the mask. For
- * simplicity we use the crtc's pipe number (because it's easier to obtain). */
-static void
-intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
- unsigned *prepare_pipes, unsigned *disable_pipes)
-{
- struct intel_crtc *intel_crtc;
- struct drm_device *dev = crtc->dev;
- struct intel_encoder *encoder;
- struct intel_connector *connector;
- struct drm_crtc *tmp_crtc;
-
- *disable_pipes = *modeset_pipes = *prepare_pipes = 0;
-
- /* Check which crtcs have changed outputs connected to them, these need
- * to be part of the prepare_pipes mask. We don't (yet) support global
- * modeset across multiple crtcs, so modeset_pipes will only have one
- * bit set at most. */
- for_each_intel_connector(dev, connector) {
- if (connector->base.encoder == &connector->new_encoder->base)
- continue;
-
- if (connector->base.encoder) {
- tmp_crtc = connector->base.encoder->crtc;
-
- *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
- }
-
- if (connector->new_encoder)
- *prepare_pipes |=
- 1 << connector->new_encoder->new_crtc->pipe;
- }
-
- for_each_intel_encoder(dev, encoder) {
- if (encoder->base.crtc == &encoder->new_crtc->base)
- continue;
-
- if (encoder->base.crtc) {
- tmp_crtc = encoder->base.crtc;
-
- *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
- }
-
- if (encoder->new_crtc)
- *prepare_pipes |= 1 << encoder->new_crtc->pipe;
- }
-
- /* Check for pipes that will be enabled/disabled ... */
- for_each_intel_crtc(dev, intel_crtc) {
- if (intel_crtc->base.state->enable == intel_crtc->new_enabled)
- continue;
-
- if (!intel_crtc->new_enabled)
- *disable_pipes |= 1 << intel_crtc->pipe;
- else
- *prepare_pipes |= 1 << intel_crtc->pipe;
- }
-
-
- /* set_mode is also used to update properties on life display pipes. */
- intel_crtc = to_intel_crtc(crtc);
- if (intel_crtc->new_enabled)
- *prepare_pipes |= 1 << intel_crtc->pipe;
-
- /*
- * For simplicity do a full modeset on any pipe where the output routing
- * changed. We could be more clever, but that would require us to be
- * more careful with calling the relevant encoder->mode_set functions.
- */
- if (*prepare_pipes)
- *modeset_pipes = *prepare_pipes;
-
- /* ... and mask these out. */
- *modeset_pipes &= ~(*disable_pipes);
- *prepare_pipes &= ~(*disable_pipes);
-
- /*
- * HACK: We don't (yet) fully support global modesets. intel_set_config
- * obies this rule, but the modeset restore mode of
- * intel_modeset_setup_hw_state does not.
- */
- *modeset_pipes &= 1 << intel_crtc->pipe;
- *prepare_pipes &= 1 << intel_crtc->pipe;
-
- DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
- *modeset_pipes, *prepare_pipes, *disable_pipes);
+ return ret;
}
static bool intel_crtc_in_use(struct drm_crtc *crtc)
@@ -10855,13 +11844,22 @@ static bool intel_crtc_in_use(struct drm_crtc *crtc)
return false;
}
+static bool
+needs_modeset(struct drm_crtc_state *state)
+{
+ return state->mode_changed || state->active_changed;
+}
+
static void
-intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
+intel_modeset_update_state(struct drm_atomic_state *state)
{
+ struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
- struct intel_crtc *intel_crtc;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
struct drm_connector *connector;
+ int i;
intel_shared_dpll_commit(dev_priv);
@@ -10869,39 +11867,47 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
if (!intel_encoder->base.crtc)
continue;
- intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ if (crtc != intel_encoder->base.crtc)
+ continue;
- if (prepare_pipes & (1 << intel_crtc->pipe))
- intel_encoder->connectors_active = false;
+ if (crtc_state->enable && needs_modeset(crtc_state))
+ intel_encoder->connectors_active = false;
+
+ break;
+ }
}
- intel_modeset_commit_output_state(dev);
+ drm_atomic_helper_swap_state(state->dev, state);
+ intel_modeset_fixup_state(state);
/* Double check state. */
- for_each_intel_crtc(dev, intel_crtc) {
- WARN_ON(intel_crtc->base.state->enable != intel_crtc_in_use(&intel_crtc->base));
- WARN_ON(intel_crtc->new_config &&
- intel_crtc->new_config != intel_crtc->config);
- WARN_ON(intel_crtc->base.state->enable != !!intel_crtc->new_config);
+ for_each_crtc(dev, crtc) {
+ WARN_ON(crtc->state->enable != intel_crtc_in_use(crtc));
}
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (!connector->encoder || !connector->encoder->crtc)
continue;
- intel_crtc = to_intel_crtc(connector->encoder->crtc);
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ if (crtc != connector->encoder->crtc)
+ continue;
- if (prepare_pipes & (1 << intel_crtc->pipe)) {
- struct drm_property *dpms_property =
- dev->mode_config.dpms_property;
+ if (crtc->state->enable && needs_modeset(crtc->state)) {
+ struct drm_property *dpms_property =
+ dev->mode_config.dpms_property;
- connector->dpms = DRM_MODE_DPMS_ON;
- drm_object_property_set_value(&connector->base,
- dpms_property,
- DRM_MODE_DPMS_ON);
+ connector->dpms = DRM_MODE_DPMS_ON;
+ drm_object_property_set_value(&connector->base,
+ dpms_property,
+ DRM_MODE_DPMS_ON);
- intel_encoder = to_intel_encoder(connector->encoder);
- intel_encoder->connectors_active = true;
+ intel_encoder = to_intel_encoder(connector->encoder);
+ intel_encoder->connectors_active = true;
+ }
+
+ break;
}
}
@@ -11087,6 +12093,8 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(pch_pfit.size);
}
+ PIPE_CONF_CHECK_I(scaler_state.scaler_id);
+
/* BDW+ don't expose a synchronous way to read the state */
if (IS_HASWELL(dev))
PIPE_CONF_CHECK_I(ips_enabled);
@@ -11428,32 +12436,18 @@ static void update_scanline_offset(struct intel_crtc *crtc)
static struct intel_crtc_state *
intel_modeset_compute_config(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_framebuffer *fb,
- struct drm_atomic_state *state,
- unsigned *modeset_pipes,
- unsigned *prepare_pipes,
- unsigned *disable_pipes)
+ struct drm_atomic_state *state)
{
- struct drm_device *dev = crtc->dev;
- struct intel_crtc_state *pipe_config = NULL;
- struct intel_crtc *intel_crtc;
+ struct intel_crtc_state *pipe_config;
int ret = 0;
ret = drm_atomic_add_affected_connectors(state, crtc);
if (ret)
return ERR_PTR(ret);
- intel_modeset_affected_pipes(crtc, modeset_pipes,
- prepare_pipes, disable_pipes);
-
- for_each_intel_crtc_masked(dev, *disable_pipes, intel_crtc) {
- pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
- if (IS_ERR(pipe_config))
- return pipe_config;
-
- pipe_config->base.enable = false;
- }
+ ret = drm_atomic_helper_check_modeset(state->dev, state);
+ if (ret)
+ return ERR_PTR(ret);
/*
* Note this needs changes when we start tracking multiple modes
@@ -11461,43 +12455,76 @@ intel_modeset_compute_config(struct drm_crtc *crtc,
* (i.e. one pipe_config for each crtc) rather than just the one
* for this crtc.
*/
- for_each_intel_crtc_masked(dev, *modeset_pipes, intel_crtc) {
- /* FIXME: For now we still expect modeset_pipes has at most
- * one bit set. */
- if (WARN_ON(&intel_crtc->base != crtc))
- continue;
+ pipe_config = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));
+ if (IS_ERR(pipe_config))
+ return pipe_config;
+
+ if (!pipe_config->base.enable)
+ return pipe_config;
- pipe_config = intel_modeset_pipe_config(crtc, fb, mode, state);
- if (IS_ERR(pipe_config))
- return pipe_config;
+ ret = intel_modeset_pipe_config(crtc, state, pipe_config);
+ if (ret)
+ return ERR_PTR(ret);
- intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
- "[modeset]");
- }
+ /* Check things that can only be changed through modeset */
+ if (pipe_config->has_audio !=
+ to_intel_crtc(crtc)->config->has_audio)
+ pipe_config->base.mode_changed = true;
- return intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));;
+ /*
+ * Note we have an issue here with infoframes: current code
+ * only updates them on the full mode set path per hw
+ * requirements. So here we should be checking for any
+ * required changes and forcing a mode set.
+ */
+
+ intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,"[modeset]");
+
+ ret = drm_atomic_helper_check_planes(state->dev, state);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return pipe_config;
}
-static int __intel_set_mode_setup_plls(struct drm_device *dev,
- unsigned modeset_pipes,
- unsigned disable_pipes)
+static int __intel_set_mode_setup_plls(struct drm_atomic_state *state)
{
+ struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- unsigned clear_pipes = modeset_pipes | disable_pipes;
+ unsigned clear_pipes = 0;
struct intel_crtc *intel_crtc;
+ struct intel_crtc_state *intel_crtc_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
int ret = 0;
+ int i;
if (!dev_priv->display.crtc_compute_clock)
return 0;
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ intel_crtc = to_intel_crtc(crtc);
+ intel_crtc_state = to_intel_crtc_state(crtc_state);
+
+ if (needs_modeset(crtc_state)) {
+ clear_pipes |= 1 << intel_crtc->pipe;
+ intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE;
+ }
+ }
+
ret = intel_shared_dpll_start_config(dev_priv, clear_pipes);
if (ret)
goto done;
- for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
- struct intel_crtc_state *state = intel_crtc->new_config;
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ if (!needs_modeset(crtc_state) || !crtc_state->enable)
+ continue;
+
+ intel_crtc = to_intel_crtc(crtc);
+ intel_crtc_state = to_intel_crtc_state(crtc_state);
+
ret = dev_priv->display.crtc_compute_clock(intel_crtc,
- state);
+ intel_crtc_state);
if (ret) {
intel_shared_dpll_abort_config(dev_priv);
goto done;
@@ -11508,35 +12535,11 @@ done:
return ret;
}
-static int __intel_set_mode(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *fb,
- struct intel_crtc_state *pipe_config,
- unsigned modeset_pipes,
- unsigned prepare_pipes,
- unsigned disable_pipes)
+/* Code that should eventually be part of atomic_check() */
+static int __intel_set_mode_checks(struct drm_atomic_state *state)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_display_mode *saved_mode;
- struct intel_crtc_state *crtc_state_copy = NULL;
- struct intel_crtc *intel_crtc;
- int ret = 0;
-
- saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
- if (!saved_mode)
- return -ENOMEM;
-
- crtc_state_copy = kmalloc(sizeof(*crtc_state_copy), GFP_KERNEL);
- if (!crtc_state_copy) {
- ret = -ENOMEM;
- goto done;
- }
-
- *saved_mode = crtc->mode;
-
- if (modeset_pipes)
- to_intel_crtc(crtc)->new_config = pipe_config;
+ struct drm_device *dev = state->dev;
+ int ret;
/*
* See if the config requires any additional preparation, e.g.
@@ -11545,23 +12548,48 @@ static int __intel_set_mode(struct drm_crtc *crtc,
* mode set on this crtc. For other crtcs we need to use the
* adjusted_mode bits in the crtc directly.
*/
- if (IS_VALLEYVIEW(dev)) {
- valleyview_modeset_global_pipes(dev, &prepare_pipes);
-
- /* may have added more to prepare_pipes than we should */
- prepare_pipes &= ~disable_pipes;
+ if (IS_VALLEYVIEW(dev) || IS_BROXTON(dev)) {
+ ret = valleyview_modeset_global_pipes(state);
+ if (ret)
+ return ret;
}
- ret = __intel_set_mode_setup_plls(dev, modeset_pipes, disable_pipes);
+ ret = __intel_set_mode_setup_plls(state);
if (ret)
- goto done;
+ return ret;
+
+ return 0;
+}
+
+static int __intel_set_mode(struct drm_crtc *modeset_crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = modeset_crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_atomic_state *state = pipe_config->base.state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int ret = 0;
+ int i;
- for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
- intel_crtc_disable(&intel_crtc->base);
+ ret = __intel_set_mode_checks(state);
+ if (ret < 0)
+ return ret;
- for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
- if (intel_crtc->base.state->enable)
- dev_priv->display.crtc_disable(&intel_crtc->base);
+ ret = drm_atomic_helper_prepare_planes(dev, state);
+ if (ret)
+ return ret;
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ if (!needs_modeset(crtc_state))
+ continue;
+
+ if (!crtc_state->enable) {
+ intel_crtc_disable(crtc);
+ } else if (crtc->state->enable) {
+ intel_crtc_disable_planes(crtc);
+ dev_priv->display.crtc_disable(crtc);
+ }
}
/* crtc->mode is already used by the ->mode_set callbacks, hence we need
@@ -11571,115 +12599,79 @@ static int __intel_set_mode(struct drm_crtc *crtc,
* pipes; here we assume a single modeset_pipe and only track the
* single crtc and mode.
*/
- if (modeset_pipes) {
- crtc->mode = *mode;
- /* mode_set/enable/disable functions rely on a correct pipe
- * config. */
- intel_crtc_set_state(to_intel_crtc(crtc), pipe_config);
+ if (pipe_config->base.enable && needs_modeset(&pipe_config->base)) {
+ modeset_crtc->mode = pipe_config->base.mode;
/*
* Calculate and store various constants which
* are later needed by vblank and swap-completion
* timestamping. They are derived from true hwmode.
*/
- drm_calc_timestamping_constants(crtc,
+ drm_calc_timestamping_constants(modeset_crtc,
&pipe_config->base.adjusted_mode);
}
/* Only after disabling all output pipelines that will be changed can we
* update the the output configuration. */
- intel_modeset_update_state(dev, prepare_pipes);
+ intel_modeset_update_state(state);
- modeset_update_crtc_power_domains(pipe_config->base.state);
+ /* The state has been swaped above, so state actually contains the
+ * old state now. */
- /* Set up the DPLL and any encoders state that needs to adjust or depend
- * on the DPLL.
- */
- for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
- struct drm_plane *primary = intel_crtc->base.primary;
- int vdisplay, hdisplay;
+ modeset_update_crtc_power_domains(state);
- drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
- ret = primary->funcs->update_plane(primary, &intel_crtc->base,
- fb, 0, 0,
- hdisplay, vdisplay,
- x << 16, y << 16,
- hdisplay << 16, vdisplay << 16);
- }
+ drm_atomic_helper_commit_planes(dev, state);
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
- for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
- update_scanline_offset(intel_crtc);
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ if (!needs_modeset(crtc->state) || !crtc->state->enable)
+ continue;
+
+ update_scanline_offset(to_intel_crtc(crtc));
- dev_priv->display.crtc_enable(&intel_crtc->base);
+ dev_priv->display.crtc_enable(crtc);
+ intel_crtc_enable_planes(crtc);
}
/* FIXME: add subpixel order */
-done:
- if (ret && crtc->state->enable)
- crtc->mode = *saved_mode;
-
- if (ret == 0 && pipe_config) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- /* The pipe_config will be freed with the atomic state, so
- * make a copy. */
- memcpy(crtc_state_copy, intel_crtc->config,
- sizeof *crtc_state_copy);
- intel_crtc->config = crtc_state_copy;
- intel_crtc->base.state = &crtc_state_copy->base;
+ drm_atomic_helper_cleanup_planes(dev, state);
- if (modeset_pipes)
- intel_crtc->new_config = intel_crtc->config;
- } else {
- kfree(crtc_state_copy);
- }
+ drm_atomic_state_free(state);
- kfree(saved_mode);
- return ret;
+ return 0;
}
-static int intel_set_mode_pipes(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *fb,
- struct intel_crtc_state *pipe_config,
- unsigned modeset_pipes,
- unsigned prepare_pipes,
- unsigned disable_pipes)
+static int intel_set_mode_with_config(struct drm_crtc *crtc,
+ struct intel_crtc_state *pipe_config,
+ bool force_restore)
{
int ret;
- ret = __intel_set_mode(crtc, mode, x, y, fb, pipe_config, modeset_pipes,
- prepare_pipes, disable_pipes);
+ ret = __intel_set_mode(crtc, pipe_config);
- if (ret == 0)
+ if (ret == 0 && force_restore) {
+ intel_modeset_update_staged_output_state(crtc->dev);
intel_modeset_check_state(crtc->dev);
+ }
return ret;
}
static int intel_set_mode(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *fb,
- struct drm_atomic_state *state)
+ struct drm_atomic_state *state,
+ bool force_restore)
{
struct intel_crtc_state *pipe_config;
- unsigned modeset_pipes, prepare_pipes, disable_pipes;
int ret = 0;
- pipe_config = intel_modeset_compute_config(crtc, mode, fb, state,
- &modeset_pipes,
- &prepare_pipes,
- &disable_pipes);
-
+ pipe_config = intel_modeset_compute_config(crtc, state);
if (IS_ERR(pipe_config)) {
ret = PTR_ERR(pipe_config);
goto out;
}
- ret = intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config,
- modeset_pipes, prepare_pipes,
- disable_pipes);
+ ret = intel_set_mode_with_config(crtc, pipe_config, force_restore);
if (ret)
goto out;
@@ -11694,6 +12686,8 @@ void intel_crtc_restore_mode(struct drm_crtc *crtc)
struct intel_encoder *encoder;
struct intel_connector *connector;
struct drm_connector_state *connector_state;
+ struct intel_crtc_state *crtc_state;
+ int ret;
state = drm_atomic_state_alloc(dev);
if (!state) {
@@ -11731,186 +12725,52 @@ void intel_crtc_restore_mode(struct drm_crtc *crtc)
}
}
- intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb,
- state);
-
- drm_atomic_state_free(state);
-}
-
-#undef for_each_intel_crtc_masked
-
-static void intel_set_config_free(struct intel_set_config *config)
-{
- if (!config)
+ crtc_state = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));
+ if (IS_ERR(crtc_state)) {
+ DRM_DEBUG_KMS("Failed to add [CRTC:%d] to state: %ld\n",
+ crtc->base.id, PTR_ERR(crtc_state));
+ drm_atomic_state_free(state);
return;
-
- kfree(config->save_connector_encoders);
- kfree(config->save_encoder_crtcs);
- kfree(config->save_crtc_enabled);
- kfree(config);
-}
-
-static int intel_set_config_save_state(struct drm_device *dev,
- struct intel_set_config *config)
-{
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- int count;
-
- config->save_crtc_enabled =
- kcalloc(dev->mode_config.num_crtc,
- sizeof(bool), GFP_KERNEL);
- if (!config->save_crtc_enabled)
- return -ENOMEM;
-
- config->save_encoder_crtcs =
- kcalloc(dev->mode_config.num_encoder,
- sizeof(struct drm_crtc *), GFP_KERNEL);
- if (!config->save_encoder_crtcs)
- return -ENOMEM;
-
- config->save_connector_encoders =
- kcalloc(dev->mode_config.num_connector,
- sizeof(struct drm_encoder *), GFP_KERNEL);
- if (!config->save_connector_encoders)
- return -ENOMEM;
-
- /* Copy data. Note that driver private data is not affected.
- * Should anything bad happen only the expected state is
- * restored, not the drivers personal bookkeeping.
- */
- count = 0;
- for_each_crtc(dev, crtc) {
- config->save_crtc_enabled[count++] = crtc->state->enable;
}
- count = 0;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- config->save_encoder_crtcs[count++] = encoder->crtc;
- }
+ crtc_state->base.active = crtc_state->base.enable =
+ to_intel_crtc(crtc)->new_enabled;
- count = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- config->save_connector_encoders[count++] = connector->encoder;
- }
-
- return 0;
-}
+ drm_mode_copy(&crtc_state->base.mode, &crtc->mode);
-static void intel_set_config_restore_state(struct drm_device *dev,
- struct intel_set_config *config)
-{
- struct intel_crtc *crtc;
- struct intel_encoder *encoder;
- struct intel_connector *connector;
- int count;
+ intel_modeset_setup_plane_state(state, crtc, &crtc->mode,
+ crtc->primary->fb, crtc->x, crtc->y);
- count = 0;
- for_each_intel_crtc(dev, crtc) {
- crtc->new_enabled = config->save_crtc_enabled[count++];
-
- if (crtc->new_enabled)
- crtc->new_config = crtc->config;
- else
- crtc->new_config = NULL;
- }
-
- count = 0;
- for_each_intel_encoder(dev, encoder) {
- encoder->new_crtc =
- to_intel_crtc(config->save_encoder_crtcs[count++]);
- }
-
- count = 0;
- for_each_intel_connector(dev, connector) {
- connector->new_encoder =
- to_intel_encoder(config->save_connector_encoders[count++]);
- }
+ ret = intel_set_mode(crtc, state, false);
+ if (ret)
+ drm_atomic_state_free(state);
}
-static bool
-is_crtc_connector_off(struct drm_mode_set *set)
-{
- int i;
-
- if (set->num_connectors == 0)
- return false;
+#undef for_each_intel_crtc_masked
- if (WARN_ON(set->connectors == NULL))
- return false;
+static bool intel_connector_in_mode_set(struct intel_connector *connector,
+ struct drm_mode_set *set)
+{
+ int ro;
- for (i = 0; i < set->num_connectors; i++)
- if (set->connectors[i]->encoder &&
- set->connectors[i]->encoder->crtc == set->crtc &&
- set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
+ for (ro = 0; ro < set->num_connectors; ro++)
+ if (set->connectors[ro] == &connector->base)
return true;
return false;
}
-static void
-intel_set_config_compute_mode_changes(struct drm_mode_set *set,
- struct intel_set_config *config)
-{
-
- /* We should be able to check here if the fb has the same properties
- * and then just flip_or_move it */
- if (is_crtc_connector_off(set)) {
- config->mode_changed = true;
- } else if (set->crtc->primary->fb != set->fb) {
- /*
- * If we have no fb, we can only flip as long as the crtc is
- * active, otherwise we need a full mode set. The crtc may
- * be active if we've only disabled the primary plane, or
- * in fastboot situations.
- */
- if (set->crtc->primary->fb == NULL) {
- struct intel_crtc *intel_crtc =
- to_intel_crtc(set->crtc);
-
- if (intel_crtc->active) {
- DRM_DEBUG_KMS("crtc has no fb, will flip\n");
- config->fb_changed = true;
- } else {
- DRM_DEBUG_KMS("inactive crtc, full mode set\n");
- config->mode_changed = true;
- }
- } else if (set->fb == NULL) {
- config->mode_changed = true;
- } else if (set->fb->pixel_format !=
- set->crtc->primary->fb->pixel_format) {
- config->mode_changed = true;
- } else {
- config->fb_changed = true;
- }
- }
-
- if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
- config->fb_changed = true;
-
- if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
- DRM_DEBUG_KMS("modes are different, full mode set\n");
- drm_mode_debug_printmodeline(&set->crtc->mode);
- drm_mode_debug_printmodeline(set->mode);
- config->mode_changed = true;
- }
-
- DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
- set->crtc->base.id, config->mode_changed, config->fb_changed);
-}
-
static int
intel_modeset_stage_output_state(struct drm_device *dev,
struct drm_mode_set *set,
- struct intel_set_config *config,
struct drm_atomic_state *state)
{
struct intel_connector *connector;
+ struct drm_connector *drm_connector;
struct drm_connector_state *connector_state;
- struct intel_encoder *encoder;
- struct intel_crtc *crtc;
- int ro;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i, ret;
/* The upper layers ensure that we either disable a crtc or have a list
* of connectors. For paranoia, double-check this. */
@@ -11918,169 +12778,129 @@ intel_modeset_stage_output_state(struct drm_device *dev,
WARN_ON(set->fb && (set->num_connectors == 0));
for_each_intel_connector(dev, connector) {
- /* Otherwise traverse passed in connector list and get encoders
- * for them. */
- for (ro = 0; ro < set->num_connectors; ro++) {
- if (set->connectors[ro] == &connector->base) {
- connector->new_encoder = intel_find_encoder(connector, to_intel_crtc(set->crtc)->pipe);
- break;
- }
+ bool in_mode_set = intel_connector_in_mode_set(connector, set);
+
+ if (!in_mode_set && connector->base.state->crtc != set->crtc)
+ continue;
+
+ connector_state =
+ drm_atomic_get_connector_state(state, &connector->base);
+ if (IS_ERR(connector_state))
+ return PTR_ERR(connector_state);
+
+ if (in_mode_set) {
+ int pipe = to_intel_crtc(set->crtc)->pipe;
+ connector_state->best_encoder =
+ &intel_find_encoder(connector, pipe)->base;
}
+ if (connector->base.state->crtc != set->crtc)
+ continue;
+
/* If we disable the crtc, disable all its connectors. Also, if
* the connector is on the changing crtc but not on the new
* connector list, disable it. */
- if ((!set->fb || ro == set->num_connectors) &&
- connector->base.encoder &&
- connector->base.encoder->crtc == set->crtc) {
- connector->new_encoder = NULL;
+ if (!set->fb || !in_mode_set) {
+ connector_state->best_encoder = NULL;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
connector->base.base.id,
connector->base.name);
}
-
-
- if (&connector->new_encoder->base != connector->base.encoder) {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] encoder changed, full mode switch\n",
- connector->base.base.id,
- connector->base.name);
- config->mode_changed = true;
- }
}
/* connector->new_encoder is now updated for all connectors. */
- /* Update crtc of enabled connectors. */
- for_each_intel_connector(dev, connector) {
- struct drm_crtc *new_crtc;
+ for_each_connector_in_state(state, drm_connector, connector_state, i) {
+ connector = to_intel_connector(drm_connector);
+
+ if (!connector_state->best_encoder) {
+ ret = drm_atomic_set_crtc_for_connector(connector_state,
+ NULL);
+ if (ret)
+ return ret;
- if (!connector->new_encoder)
continue;
+ }
- new_crtc = connector->new_encoder->base.crtc;
+ if (intel_connector_in_mode_set(connector, set)) {
+ struct drm_crtc *crtc = connector->base.state->crtc;
- for (ro = 0; ro < set->num_connectors; ro++) {
- if (set->connectors[ro] == &connector->base)
- new_crtc = set->crtc;
+ /* If this connector was in a previous crtc, add it
+ * to the state. We might need to disable it. */
+ if (crtc) {
+ crtc_state =
+ drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
+
+ ret = drm_atomic_set_crtc_for_connector(connector_state,
+ set->crtc);
+ if (ret)
+ return ret;
}
/* Make sure the new CRTC will work with the encoder */
- if (!drm_encoder_crtc_ok(&connector->new_encoder->base,
- new_crtc)) {
+ if (!drm_encoder_crtc_ok(connector_state->best_encoder,
+ connector_state->crtc)) {
return -EINVAL;
}
- connector->new_encoder->new_crtc = to_intel_crtc(new_crtc);
-
- connector_state =
- drm_atomic_get_connector_state(state, &connector->base);
- if (IS_ERR(connector_state))
- return PTR_ERR(connector_state);
-
- connector_state->crtc = new_crtc;
- connector_state->best_encoder = &connector->new_encoder->base;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
connector->base.base.id,
connector->base.name,
- new_crtc->base.id);
+ connector_state->crtc->base.id);
+
+ if (connector_state->best_encoder != &connector->encoder->base)
+ connector->encoder =
+ to_intel_encoder(connector_state->best_encoder);
}
- /* Check for any encoders that needs to be disabled. */
- for_each_intel_encoder(dev, encoder) {
- int num_connectors = 0;
- for_each_intel_connector(dev, connector) {
- if (connector->new_encoder == encoder) {
- WARN_ON(!connector->new_encoder->new_crtc);
- num_connectors++;
- }
- }
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ bool has_connectors;
- if (num_connectors == 0)
- encoder->new_crtc = NULL;
- else if (num_connectors > 1)
- return -EINVAL;
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret)
+ return ret;
- /* Only now check for crtc changes so we don't miss encoders
- * that will be disabled. */
- if (&encoder->new_crtc->base != encoder->base.crtc) {
- DRM_DEBUG_KMS("[ENCODER:%d:%s] crtc changed, full mode switch\n",
- encoder->base.base.id,
- encoder->base.name);
- config->mode_changed = true;
- }
+ has_connectors = !!drm_atomic_connectors_for_crtc(state, crtc);
+ if (has_connectors != crtc_state->enable)
+ crtc_state->enable =
+ crtc_state->active = has_connectors;
}
- /* Now we've also updated encoder->new_crtc for all encoders. */
- for_each_intel_connector(dev, connector) {
- connector_state =
- drm_atomic_get_connector_state(state, &connector->base);
- if (IS_ERR(connector_state))
- return PTR_ERR(connector_state);
- if (connector->new_encoder) {
- if (connector->new_encoder != connector->encoder)
- connector->encoder = connector->new_encoder;
- } else {
- connector_state->crtc = NULL;
- }
- }
- for_each_intel_crtc(dev, crtc) {
- crtc->new_enabled = false;
+ ret = intel_modeset_setup_plane_state(state, set->crtc, set->mode,
+ set->fb, set->x, set->y);
+ if (ret)
+ return ret;
- for_each_intel_encoder(dev, encoder) {
- if (encoder->new_crtc == crtc) {
- crtc->new_enabled = true;
- break;
- }
- }
+ crtc_state = drm_atomic_get_crtc_state(state, set->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
- if (crtc->new_enabled != crtc->base.state->enable) {
- DRM_DEBUG_KMS("[CRTC:%d] %sabled, full mode switch\n",
- crtc->base.base.id,
- crtc->new_enabled ? "en" : "dis");
- config->mode_changed = true;
- }
+ if (set->mode)
+ drm_mode_copy(&crtc_state->mode, set->mode);
- if (crtc->new_enabled)
- crtc->new_config = crtc->config;
- else
- crtc->new_config = NULL;
- }
+ if (set->num_connectors)
+ crtc_state->active = true;
return 0;
}
-static void disable_crtc_nofb(struct intel_crtc *crtc)
+static bool primary_plane_visible(struct drm_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct intel_encoder *encoder;
- struct intel_connector *connector;
-
- DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n",
- pipe_name(crtc->pipe));
-
- for_each_intel_connector(dev, connector) {
- if (connector->new_encoder &&
- connector->new_encoder->new_crtc == crtc)
- connector->new_encoder = NULL;
- }
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(crtc->primary->state);
- for_each_intel_encoder(dev, encoder) {
- if (encoder->new_crtc == crtc)
- encoder->new_crtc = NULL;
- }
-
- crtc->new_enabled = false;
- crtc->new_config = NULL;
+ return plane_state->visible;
}
static int intel_crtc_set_config(struct drm_mode_set *set)
{
struct drm_device *dev;
- struct drm_mode_set save_set;
struct drm_atomic_state *state = NULL;
- struct intel_set_config *config;
struct intel_crtc_state *pipe_config;
- unsigned modeset_pipes, prepare_pipes, disable_pipes;
+ bool primary_plane_was_visible;
int ret;
BUG_ON(!set);
@@ -12101,85 +12921,42 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
dev = set->crtc->dev;
- ret = -ENOMEM;
- config = kzalloc(sizeof(*config), GFP_KERNEL);
- if (!config)
- goto out_config;
-
- ret = intel_set_config_save_state(dev, config);
- if (ret)
- goto out_config;
-
- save_set.crtc = set->crtc;
- save_set.mode = &set->crtc->mode;
- save_set.x = set->crtc->x;
- save_set.y = set->crtc->y;
- save_set.fb = set->crtc->primary->fb;
-
- /* Compute whether we need a full modeset, only an fb base update or no
- * change at all. In the future we might also check whether only the
- * mode changed, e.g. for LVDS where we only change the panel fitter in
- * such cases. */
- intel_set_config_compute_mode_changes(set, config);
-
state = drm_atomic_state_alloc(dev);
- if (!state) {
- ret = -ENOMEM;
- goto out_config;
- }
+ if (!state)
+ return -ENOMEM;
state->acquire_ctx = dev->mode_config.acquire_ctx;
- ret = intel_modeset_stage_output_state(dev, set, config, state);
+ ret = intel_modeset_stage_output_state(dev, set, state);
if (ret)
- goto fail;
+ goto out;
- pipe_config = intel_modeset_compute_config(set->crtc, set->mode,
- set->fb, state,
- &modeset_pipes,
- &prepare_pipes,
- &disable_pipes);
+ pipe_config = intel_modeset_compute_config(set->crtc, state);
if (IS_ERR(pipe_config)) {
ret = PTR_ERR(pipe_config);
- goto fail;
- } else if (pipe_config) {
- if (pipe_config->has_audio !=
- to_intel_crtc(set->crtc)->config->has_audio)
- config->mode_changed = true;
-
- /*
- * Note we have an issue here with infoframes: current code
- * only updates them on the full mode set path per hw
- * requirements. So here we should be checking for any
- * required changes and forcing a mode set.
- */
+ goto out;
}
intel_update_pipe_size(to_intel_crtc(set->crtc));
- if (config->mode_changed) {
- ret = intel_set_mode_pipes(set->crtc, set->mode,
- set->x, set->y, set->fb, pipe_config,
- modeset_pipes, prepare_pipes,
- disable_pipes);
- } else if (config->fb_changed) {
- struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
- struct drm_plane *primary = set->crtc->primary;
- int vdisplay, hdisplay;
+ primary_plane_was_visible = primary_plane_visible(set->crtc);
+
+ ret = intel_set_mode_with_config(set->crtc, pipe_config, true);
- drm_crtc_get_hv_timing(set->mode, &hdisplay, &vdisplay);
- ret = primary->funcs->update_plane(primary, set->crtc, set->fb,
- 0, 0, hdisplay, vdisplay,
- set->x << 16, set->y << 16,
- hdisplay << 16, vdisplay << 16);
+ if (ret == 0 &&
+ pipe_config->base.enable &&
+ pipe_config->base.planes_changed &&
+ !needs_modeset(&pipe_config->base)) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
/*
* We need to make sure the primary plane is re-enabled if it
* has previously been turned off.
*/
- if (!intel_crtc->primary_enabled && ret == 0) {
+ if (ret == 0 && !primary_plane_was_visible &&
+ primary_plane_visible(set->crtc)) {
WARN_ON(!intel_crtc->active);
- intel_enable_primary_hw_plane(set->crtc->primary, set->crtc);
+ intel_post_enable_primary(set->crtc);
}
/*
@@ -12197,33 +12974,11 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
if (ret) {
DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
set->crtc->base.id, ret);
-fail:
- intel_set_config_restore_state(dev, config);
-
- drm_atomic_state_clear(state);
-
- /*
- * HACK: if the pipe was on, but we didn't have a framebuffer,
- * force the pipe off to avoid oopsing in the modeset code
- * due to fb==NULL. This should only happen during boot since
- * we don't yet reconstruct the FB from the hardware state.
- */
- if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb)
- disable_crtc_nofb(to_intel_crtc(save_set.crtc));
-
- /* Try to restore the config */
- if (config->mode_changed &&
- intel_set_mode(save_set.crtc, save_set.mode,
- save_set.x, save_set.y, save_set.fb,
- state))
- DRM_ERROR("failed to restore config after modeset failure\n");
}
-out_config:
- if (state)
+out:
+ if (ret)
drm_atomic_state_free(state);
-
- intel_set_config_free(config);
return ret;
}
@@ -12444,6 +13199,36 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
}
}
+int
+skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
+{
+ int max_scale;
+ struct drm_device *dev;
+ struct drm_i915_private *dev_priv;
+ int crtc_clock, cdclk;
+
+ if (!intel_crtc || !crtc_state)
+ return DRM_PLANE_HELPER_NO_SCALING;
+
+ dev = intel_crtc->base.dev;
+ dev_priv = dev->dev_private;
+ crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
+ cdclk = dev_priv->display.get_display_clock_speed(dev);
+
+ if (!crtc_clock || !cdclk)
+ return DRM_PLANE_HELPER_NO_SCALING;
+
+ /*
+ * skl max scale is lower of:
+ * close to 3 but not 3, -1 is for that purpose
+ * or
+ * cdclk/crtc_clock
+ */
+ max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
+
+ return max_scale;
+}
+
static int
intel_check_primary_plane(struct drm_plane *plane,
struct intel_plane_state *state)
@@ -12452,24 +13237,43 @@ intel_check_primary_plane(struct drm_plane *plane,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = state->base.crtc;
struct intel_crtc *intel_crtc;
+ struct intel_crtc_state *crtc_state;
struct drm_framebuffer *fb = state->base.fb;
struct drm_rect *dest = &state->dst;
struct drm_rect *src = &state->src;
const struct drm_rect *clip = &state->clip;
+ bool can_position = false;
+ int max_scale = DRM_PLANE_HELPER_NO_SCALING;
+ int min_scale = DRM_PLANE_HELPER_NO_SCALING;
int ret;
crtc = crtc ? crtc : plane->crtc;
intel_crtc = to_intel_crtc(crtc);
+ crtc_state = state->base.state ?
+ intel_atomic_get_crtc_state(state->base.state, intel_crtc) : NULL;
+
+ if (INTEL_INFO(dev)->gen >= 9) {
+ /* use scaler when colorkey is not required */
+ if (to_intel_plane(plane)->ckey.flags == I915_SET_COLORKEY_NONE) {
+ min_scale = 1;
+ max_scale = skl_max_scale(intel_crtc, crtc_state);
+ }
+ can_position = true;
+ }
ret = drm_plane_helper_check_update(plane, crtc, fb,
src, dest, clip,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- false, true, &state->visible);
+ min_scale,
+ max_scale,
+ can_position, true,
+ &state->visible);
if (ret)
return ret;
if (intel_crtc->active) {
+ struct intel_plane_state *old_state =
+ to_intel_plane_state(plane->state);
+
intel_crtc->atomic.wait_for_flips = true;
/*
@@ -12482,20 +13286,20 @@ intel_check_primary_plane(struct drm_plane *plane,
* one is done too late. We eventually need to unify
* this.
*/
- if (intel_crtc->primary_enabled &&
+ if (state->visible &&
INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
dev_priv->fbc.crtc == intel_crtc &&
state->base.rotation != BIT(DRM_ROTATE_0)) {
intel_crtc->atomic.disable_fbc = true;
}
- if (state->visible) {
+ if (state->visible && !old_state->visible) {
/*
* BDW signals flip done immediately if the plane
* is disabled, even if the plane enable is already
* armed to occur at the next vblank :(
*/
- if (IS_BROADWELL(dev) && !intel_crtc->primary_enabled)
+ if (IS_BROADWELL(dev))
intel_crtc->atomic.wait_vblank = true;
}
@@ -12508,6 +13312,13 @@ intel_check_primary_plane(struct drm_plane *plane,
intel_crtc->atomic.update_wm = true;
}
+ if (INTEL_INFO(dev)->gen >= 9) {
+ ret = skl_update_scaler_users(intel_crtc, crtc_state,
+ to_intel_plane(plane), state, 0);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -12530,27 +13341,26 @@ intel_commit_primary_plane(struct drm_plane *plane,
crtc->y = src->y1 >> 16;
if (intel_crtc->active) {
- if (state->visible) {
+ if (state->visible)
/* FIXME: kill this fastboot hack */
intel_update_pipe_size(intel_crtc);
- intel_crtc->primary_enabled = true;
-
- dev_priv->display.update_primary_plane(crtc, plane->fb,
- crtc->x, crtc->y);
- } else {
- /*
- * If clipping results in a non-visible primary plane,
- * we'll disable the primary plane. Note that this is
- * a bit different than what happens if userspace
- * explicitly disables the plane by passing fb=0
- * because plane->fb still gets set and pinned.
- */
- intel_disable_primary_hw_plane(plane, crtc);
- }
+ dev_priv->display.update_primary_plane(crtc, plane->fb,
+ crtc->x, crtc->y);
}
}
+static void
+intel_disable_primary_plane(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ bool force)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->display.update_primary_plane(crtc, NULL, 0, 0);
+}
+
static void intel_begin_crtc_commit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -12655,8 +13465,8 @@ void intel_plane_destroy(struct drm_plane *plane)
}
const struct drm_plane_funcs intel_plane_funcs = {
- .update_plane = drm_plane_helper_update,
- .disable_plane = drm_plane_helper_disable,
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
.set_property = drm_atomic_helper_plane_set_property,
.atomic_get_property = intel_plane_atomic_get_property,
@@ -12687,19 +13497,28 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
primary->can_scale = false;
primary->max_downscale = 1;
+ if (INTEL_INFO(dev)->gen >= 9) {
+ primary->can_scale = true;
+ state->scaler_id = -1;
+ }
primary->pipe = pipe;
primary->plane = pipe;
primary->check_plane = intel_check_primary_plane;
primary->commit_plane = intel_commit_primary_plane;
+ primary->disable_plane = intel_disable_primary_plane;
+ primary->ckey.flags = I915_SET_COLORKEY_NONE;
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
primary->plane = !pipe;
- if (INTEL_INFO(dev)->gen <= 3) {
- intel_primary_formats = intel_primary_formats_gen2;
- num_formats = ARRAY_SIZE(intel_primary_formats_gen2);
+ if (INTEL_INFO(dev)->gen >= 9) {
+ intel_primary_formats = skl_primary_formats;
+ num_formats = ARRAY_SIZE(skl_primary_formats);
+ } else if (INTEL_INFO(dev)->gen >= 4) {
+ intel_primary_formats = i965_primary_formats;
+ num_formats = ARRAY_SIZE(i965_primary_formats);
} else {
- intel_primary_formats = intel_primary_formats_gen4;
- num_formats = ARRAY_SIZE(intel_primary_formats_gen4);
+ intel_primary_formats = i8xx_primary_formats;
+ num_formats = ARRAY_SIZE(i8xx_primary_formats);
}
drm_universal_plane_init(dev, &primary->base, 0,
@@ -12707,23 +13526,32 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
intel_primary_formats, num_formats,
DRM_PLANE_TYPE_PRIMARY);
- if (INTEL_INFO(dev)->gen >= 4) {
- if (!dev->mode_config.rotation_property)
- dev->mode_config.rotation_property =
- drm_mode_create_rotation_property(dev,
- BIT(DRM_ROTATE_0) |
- BIT(DRM_ROTATE_180));
- if (dev->mode_config.rotation_property)
- drm_object_attach_property(&primary->base.base,
- dev->mode_config.rotation_property,
- state->base.rotation);
- }
+ if (INTEL_INFO(dev)->gen >= 4)
+ intel_create_rotation_property(dev, primary);
drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
return &primary->base;
}
+void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
+{
+ if (!dev->mode_config.rotation_property) {
+ unsigned long flags = BIT(DRM_ROTATE_0) |
+ BIT(DRM_ROTATE_180);
+
+ if (INTEL_INFO(dev)->gen >= 9)
+ flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270);
+
+ dev->mode_config.rotation_property =
+ drm_mode_create_rotation_property(dev, flags);
+ }
+ if (dev->mode_config.rotation_property)
+ drm_object_attach_property(&plane->base.base,
+ dev->mode_config.rotation_property,
+ plane->base.state->rotation);
+}
+
static int
intel_check_cursor_plane(struct drm_plane *plane,
struct intel_plane_state *state)
@@ -12786,6 +13614,22 @@ finish:
}
static void
+intel_disable_cursor_plane(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ bool force)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ if (!force) {
+ plane->fb = NULL;
+ intel_crtc->cursor_bo = NULL;
+ intel_crtc->cursor_addr = 0;
+ }
+
+ intel_crtc_update_cursor(crtc, false);
+}
+
+static void
intel_commit_cursor_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
@@ -12843,6 +13687,7 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
cursor->plane = pipe;
cursor->check_plane = intel_check_cursor_plane;
cursor->commit_plane = intel_commit_cursor_plane;
+ cursor->disable_plane = intel_disable_cursor_plane;
drm_universal_plane_init(dev, &cursor->base, 0,
&intel_plane_funcs,
@@ -12862,11 +13707,32 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
state->base.rotation);
}
+ if (INTEL_INFO(dev)->gen >=9)
+ state->scaler_id = -1;
+
drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
return &cursor->base;
}
+static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ int i;
+ struct intel_scaler *intel_scaler;
+ struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
+
+ for (i = 0; i < intel_crtc->num_scalers; i++) {
+ intel_scaler = &scaler_state->scalers[i];
+ intel_scaler->in_use = 0;
+ intel_scaler->id = i;
+
+ intel_scaler->mode = PS_SCALER_MODE_DYN;
+ }
+
+ scaler_state->scaler_id = -1;
+}
+
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -12883,9 +13749,20 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
if (!crtc_state)
goto fail;
- intel_crtc_set_state(intel_crtc, crtc_state);
+ intel_crtc->config = crtc_state;
+ intel_crtc->base.state = &crtc_state->base;
crtc_state->base.crtc = &intel_crtc->base;
+ /* initialize shared scalers */
+ if (INTEL_INFO(dev)->gen >= 9) {
+ if (pipe == PIPE_C)
+ intel_crtc->num_scalers = 1;
+ else
+ intel_crtc->num_scalers = SKL_NUM_SCALERS;
+
+ skl_init_scalers(dev, intel_crtc, crtc_state);
+ }
+
primary = intel_primary_plane_create(dev, pipe);
if (!primary)
goto fail;
@@ -12926,8 +13803,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
- INIT_WORK(&intel_crtc->mmio_flip.work, intel_mmio_flip_work_func);
-
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
@@ -13038,7 +13913,16 @@ static void intel_setup_outputs(struct drm_device *dev)
if (intel_crt_present(dev))
intel_crt_init(dev);
- if (HAS_DDI(dev)) {
+ if (IS_BROXTON(dev)) {
+ /*
+ * FIXME: Broxton doesn't support port detection via the
+ * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
+ * detect the ports.
+ */
+ intel_ddi_init(dev, PORT_A);
+ intel_ddi_init(dev, PORT_B);
+ intel_ddi_init(dev, PORT_C);
+ } else if (HAS_DDI(dev)) {
int found;
/*
@@ -13315,25 +14199,35 @@ static int intel_framebuffer_init(struct drm_device *dev,
case DRM_FORMAT_ARGB8888:
break;
case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_ARGB1555:
if (INTEL_INFO(dev)->gen > 3) {
DRM_DEBUG("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format));
return -EINVAL;
}
break;
- case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
+ if (!IS_VALLEYVIEW(dev) && INTEL_INFO(dev)->gen < 9) {
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format));
+ return -EINVAL;
+ }
+ break;
+ case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_ARGB2101010:
case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ABGR2101010:
if (INTEL_INFO(dev)->gen < 4) {
DRM_DEBUG("unsupported pixel format: %s\n",
drm_get_format_name(mode_cmd->pixel_format));
return -EINVAL;
}
break;
+ case DRM_FORMAT_ABGR2101010:
+ if (!IS_VALLEYVIEW(dev)) {
+ DRM_DEBUG("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format));
+ return -EINVAL;
+ }
+ break;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_YVYU:
@@ -13474,10 +14368,23 @@ static void intel_init_display(struct drm_device *dev)
}
/* Returns the core display clock speed */
- if (IS_VALLEYVIEW(dev))
+ if (IS_SKYLAKE(dev))
+ dev_priv->display.get_display_clock_speed =
+ skylake_get_display_clock_speed;
+ else if (IS_BROADWELL(dev))
+ dev_priv->display.get_display_clock_speed =
+ broadwell_get_display_clock_speed;
+ else if (IS_HASWELL(dev))
+ dev_priv->display.get_display_clock_speed =
+ haswell_get_display_clock_speed;
+ else if (IS_VALLEYVIEW(dev))
dev_priv->display.get_display_clock_speed =
valleyview_get_display_clock_speed;
- else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
+ else if (IS_GEN5(dev))
+ dev_priv->display.get_display_clock_speed =
+ ilk_get_display_clock_speed;
+ else if (IS_I945G(dev) || IS_BROADWATER(dev) ||
+ IS_GEN6(dev) || IS_IVYBRIDGE(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
dev_priv->display.get_display_clock_speed =
i945_get_display_clock_speed;
else if (IS_I915G(dev))
@@ -13514,6 +14421,9 @@ static void intel_init_display(struct drm_device *dev)
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.modeset_global_resources =
valleyview_modeset_global_resources;
+ } else if (IS_BROXTON(dev)) {
+ dev_priv->display.modeset_global_resources =
+ broxton_modeset_global_resources;
}
switch (INTEL_INFO(dev)->gen) {
@@ -13922,8 +14832,9 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
* Temporarily change the plane mapping and disable everything
* ... */
plane = crtc->plane;
+ to_intel_plane_state(crtc->base.primary->state)->visible = true;
crtc->plane = !plane;
- crtc->primary_enabled = true;
+ intel_crtc_disable_planes(&crtc->base);
dev_priv->display.crtc_disable(&crtc->base);
crtc->plane = plane;
@@ -13945,6 +14856,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
WARN_ON(crtc->active);
crtc->base.state->enable = false;
+ crtc->base.state->active = false;
crtc->base.enabled = false;
}
@@ -13973,6 +14885,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
crtc->active ? "enabled" : "disabled");
crtc->base.state->enable = crtc->active;
+ crtc->base.state->active = crtc->active;
crtc->base.enabled = crtc->active;
/* Because we only establish the connector -> encoder ->
@@ -14100,6 +15013,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
int i;
for_each_intel_crtc(dev, crtc) {
+ struct drm_plane *primary = crtc->base.primary;
+ struct intel_plane_state *plane_state;
+
memset(crtc->config, 0, sizeof(*crtc->config));
crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
@@ -14108,8 +15024,11 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc->config);
crtc->base.state->enable = crtc->active;
+ crtc->base.state->active = crtc->active;
crtc->base.enabled = crtc->active;
- crtc->primary_enabled = primary_get_hw_state(crtc);
+
+ plane_state = to_intel_plane_state(primary->state);
+ plane_state->visible = primary_get_hw_state(crtc);
DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
crtc->base.base.id,
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d714a4b5711e..76afc62373d7 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -41,6 +41,12 @@
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
+/* Compliance test status bits */
+#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
+#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
+#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
+#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
+
struct dp_link_dpll {
int link_bw;
struct dpll dpll;
@@ -84,8 +90,8 @@ static const struct dp_link_dpll chv_dpll[] = {
{ DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
};
-/* Skylake supports following rates */
-static const int gen9_rates[] = { 162000, 216000, 270000,
+
+static const int skl_rates[] = { 162000, 216000, 270000,
324000, 432000, 540000 };
static const int chv_rates[] = { 162000, 202500, 210000, 216000,
243000, 270000, 324000, 405000,
@@ -696,15 +702,13 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
if (index)
return 0;
if (intel_dig_port->port == PORT_A) {
- if (IS_GEN6(dev) || IS_GEN7(dev))
- return 200; /* SNB & IVB eDP input clock at 400Mhz */
- else
- return 225; /* eDP input clock at 450Mhz */
+ return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000);
} else {
return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
}
@@ -719,7 +723,7 @@ static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
if (intel_dig_port->port == PORT_A) {
if (index)
return 0;
- return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
+ return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000);
} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
/* Workaround for non-ULT HSW */
switch (index) {
@@ -876,9 +880,18 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_RECEIVE_ERROR);
- if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR))
+ if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
+ continue;
+
+ /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
+ * 400us delay required for errors and timeouts
+ * Timeout errors from the HW already meet this
+ * requirement so skip to next iteration
+ */
+ if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
+ usleep_range(400, 500);
continue;
+ }
if (status & DP_AUX_CH_CTL_DONE)
goto done;
}
@@ -1083,6 +1096,9 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
{
u32 ctrl1;
+ memset(&pipe_config->dpll_hw_state, 0,
+ sizeof(pipe_config->dpll_hw_state));
+
pipe_config->ddi_pll_sel = SKL_DPLL0;
pipe_config->dpll_hw_state.cfgcr1 = 0;
pipe_config->dpll_hw_state.cfgcr2 = 0;
@@ -1090,30 +1106,30 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
switch (link_clock / 2) {
case 81000:
- ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
SKL_DPLL0);
break;
case 135000:
- ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
SKL_DPLL0);
break;
case 270000:
- ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
SKL_DPLL0);
break;
case 162000:
- ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
SKL_DPLL0);
break;
/* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
results in CDCLK change. Need to handle the change of CDCLK by
disabling pipes and re-enabling them */
case 108000:
- ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
SKL_DPLL0);
break;
case 216000:
- ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
SKL_DPLL0);
break;
@@ -1153,9 +1169,9 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
static int
intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
{
- if (INTEL_INFO(dev)->gen >= 9) {
- *source_rates = gen9_rates;
- return ARRAY_SIZE(gen9_rates);
+ if (IS_SKYLAKE(dev)) {
+ *source_rates = skl_rates;
+ return ARRAY_SIZE(skl_rates);
} else if (IS_CHERRYVIEW(dev)) {
*source_rates = chv_rates;
return ARRAY_SIZE(chv_rates);
@@ -1252,7 +1268,7 @@ static void snprintf_int_array(char *str, size_t len,
str[0] = '\0';
for (i = 0; i < nelem; i++) {
- int r = snprintf(str, len, "%d,", array[i]);
+ int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
if (r >= len)
return;
str += r;
@@ -1352,6 +1368,14 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
adjusted_mode);
+
+ if (INTEL_INFO(dev)->gen >= 9) {
+ int ret;
+ ret = skl_update_scaler_users(intel_crtc, pipe_config, NULL, NULL, 0);
+ if (ret)
+ return ret;
+ }
+
if (!HAS_PCH_SPLIT(dev))
intel_gmch_panel_fitting(intel_crtc, pipe_config,
intel_connector->panel.fitting_mode);
@@ -1464,6 +1488,8 @@ found:
if (IS_SKYLAKE(dev) && is_edp(intel_dp))
skl_edp_set_pll_config(pipe_config, common_rates[clock]);
+ else if (IS_BROXTON(dev))
+ /* handled in ddi */;
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
else
@@ -1543,7 +1569,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
/* Split out the IBX/CPU vs CPT settings */
- if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
+ if (IS_GEN7(dev) && port == PORT_A) {
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -1554,7 +1580,18 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
intel_dp->DP |= DP_ENHANCED_FRAMING;
intel_dp->DP |= crtc->pipe << 29;
- } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
+ } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
+ u32 trans_dp;
+
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+
+ trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
+ if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+ trans_dp |= TRANS_DP_ENH_FRAMING;
+ else
+ trans_dp &= ~TRANS_DP_ENH_FRAMING;
+ I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
+ } else {
if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
intel_dp->DP |= intel_dp->color_range;
@@ -1567,14 +1604,10 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
intel_dp->DP |= DP_ENHANCED_FRAMING;
- if (!IS_CHERRYVIEW(dev)) {
- if (crtc->pipe == 1)
- intel_dp->DP |= DP_PIPEB_SELECT;
- } else {
+ if (IS_CHERRYVIEW(dev))
intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
- }
- } else {
- intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+ else if (crtc->pipe == PIPE_B)
+ intel_dp->DP |= DP_PIPEB_SELECT;
}
}
@@ -2158,41 +2191,25 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
if (!(tmp & DP_PORT_EN))
return false;
- if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
+ if (IS_GEN7(dev) && port == PORT_A) {
*pipe = PORT_TO_PIPE_CPT(tmp);
- } else if (IS_CHERRYVIEW(dev)) {
- *pipe = DP_PORT_TO_PIPE_CHV(tmp);
- } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
- *pipe = PORT_TO_PIPE(tmp);
- } else {
- u32 trans_sel;
- u32 trans_dp;
- int i;
+ } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
+ enum pipe p;
- switch (intel_dp->output_reg) {
- case PCH_DP_B:
- trans_sel = TRANS_DP_PORT_SEL_B;
- break;
- case PCH_DP_C:
- trans_sel = TRANS_DP_PORT_SEL_C;
- break;
- case PCH_DP_D:
- trans_sel = TRANS_DP_PORT_SEL_D;
- break;
- default:
- return true;
- }
-
- for_each_pipe(dev_priv, i) {
- trans_dp = I915_READ(TRANS_DP_CTL(i));
- if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
- *pipe = i;
+ for_each_pipe(dev_priv, p) {
+ u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
+ if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
+ *pipe = p;
return true;
}
}
DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
intel_dp->output_reg);
+ } else if (IS_CHERRYVIEW(dev)) {
+ *pipe = DP_PORT_TO_PIPE_CHV(tmp);
+ } else {
+ *pipe = PORT_TO_PIPE(tmp);
}
return true;
@@ -2213,24 +2230,24 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
- if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
- if (tmp & DP_SYNC_HS_HIGH)
+ if (HAS_PCH_CPT(dev) && port != PORT_A) {
+ tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
+ if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
flags |= DRM_MODE_FLAG_NHSYNC;
- if (tmp & DP_SYNC_VS_HIGH)
+ if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PVSYNC;
else
flags |= DRM_MODE_FLAG_NVSYNC;
} else {
- tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
- if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
+ if (tmp & DP_SYNC_HS_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
flags |= DRM_MODE_FLAG_NHSYNC;
- if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
+ if (tmp & DP_SYNC_VS_HIGH)
flags |= DRM_MODE_FLAG_PVSYNC;
else
flags |= DRM_MODE_FLAG_NVSYNC;
@@ -2337,7 +2354,7 @@ static void chv_post_disable_dp(struct intel_encoder *encoder)
intel_dp_link_down(intel_dp);
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
/* Propagate soft reset to data lane reset */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
@@ -2356,7 +2373,7 @@ static void chv_post_disable_dp(struct intel_encoder *encoder)
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
}
static void
@@ -2395,7 +2412,8 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
}
I915_WRITE(DP_TP_CTL(port), temp);
- } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
+ } else if ((IS_GEN7(dev) && port == PORT_A) ||
+ (HAS_PCH_CPT(dev) && port != PORT_A)) {
*DP &= ~DP_LINK_TRAIN_MASK_CPT;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
@@ -2473,6 +2491,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+ unsigned int lane_mask = 0x0;
if (WARN_ON(dp_reg & DP_PORT_EN))
return;
@@ -2491,7 +2510,8 @@ static void intel_enable_dp(struct intel_encoder *encoder)
pps_unlock(intel_dp);
if (IS_VALLEYVIEW(dev))
- vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
+ vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
+ lane_mask);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
@@ -2650,7 +2670,7 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
int pipe = intel_crtc->pipe;
u32 val;
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
val = 0;
@@ -2663,7 +2683,7 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
intel_enable_dp(encoder);
}
@@ -2681,7 +2701,7 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
intel_dp_prepare(encoder);
/* Program Tx lane resets to default */
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
@@ -2695,7 +2715,7 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
}
static void chv_pre_enable_dp(struct intel_encoder *encoder)
@@ -2708,10 +2728,10 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
- int data, i;
+ int data, i, stagger;
u32 val;
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
/* allow hardware to manage TX FIFO reset source */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
@@ -2748,9 +2768,40 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
}
/* Data lane stagger programming */
- /* FIXME: Fix up value only after power analysis */
+ if (intel_crtc->config->port_clock > 270000)
+ stagger = 0x18;
+ else if (intel_crtc->config->port_clock > 135000)
+ stagger = 0xd;
+ else if (intel_crtc->config->port_clock > 67500)
+ stagger = 0x7;
+ else if (intel_crtc->config->port_clock > 33750)
+ stagger = 0x4;
+ else
+ stagger = 0x2;
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+ val |= DPIO_TX2_STAGGER_MASK(0x1f);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+ val |= DPIO_TX2_STAGGER_MASK(0x1f);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
- mutex_unlock(&dev_priv->dpio_lock);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
+ DPIO_LANESTAGGER_STRAP(stagger) |
+ DPIO_LANESTAGGER_STRAP_OVRD |
+ DPIO_TX1_STAGGER_MASK(0x1f) |
+ DPIO_TX1_STAGGER_MULT(6) |
+ DPIO_TX2_STAGGER_MULT(0));
+
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
+ DPIO_LANESTAGGER_STRAP(stagger) |
+ DPIO_LANESTAGGER_STRAP_OVRD |
+ DPIO_TX1_STAGGER_MASK(0x1f) |
+ DPIO_TX1_STAGGER_MULT(7) |
+ DPIO_TX2_STAGGER_MULT(5));
+
+ mutex_unlock(&dev_priv->sb_lock);
intel_enable_dp(encoder);
}
@@ -2768,7 +2819,7 @@ static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
intel_dp_prepare(encoder);
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
/* program left/right clock distribution */
if (pipe != PIPE_B) {
@@ -2818,7 +2869,7 @@ static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
val |= CHV_CMN_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
}
/*
@@ -2873,8 +2924,10 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = dp_to_dig_port(intel_dp)->port;
- if (INTEL_INFO(dev)->gen >= 9) {
- if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
+ if (IS_BROXTON(dev))
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+ else if (INTEL_INFO(dev)->gen >= 9) {
+ if (dev_priv->edp_low_vswing && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
} else if (IS_VALLEYVIEW(dev))
@@ -2955,7 +3008,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
}
}
-static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
+static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3041,7 +3094,7 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
return 0;
}
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
@@ -3050,12 +3103,12 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
return 0;
}
-static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
+static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3137,7 +3190,7 @@ static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
return 0;
}
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
/* Clear calc init */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
@@ -3224,7 +3277,7 @@ static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
val |= DPIO_LRC_BYPASS;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
return 0;
}
@@ -3262,7 +3315,7 @@ intel_get_adjust_train(struct intel_dp *intel_dp,
}
static uint32_t
-intel_gen4_signal_levels(uint8_t train_set)
+gen4_signal_levels(uint8_t train_set)
{
uint32_t signal_levels = 0;
@@ -3301,7 +3354,7 @@ intel_gen4_signal_levels(uint8_t train_set)
/* Gen6's DP voltage swing and pre-emphasis control */
static uint32_t
-intel_gen6_edp_signal_levels(uint8_t train_set)
+gen6_edp_signal_levels(uint8_t train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
@@ -3329,7 +3382,7 @@ intel_gen6_edp_signal_levels(uint8_t train_set)
/* Gen7's DP voltage swing and pre-emphasis control */
static uint32_t
-intel_gen7_edp_signal_levels(uint8_t train_set)
+gen7_edp_signal_levels(uint8_t train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
@@ -3360,7 +3413,7 @@ intel_gen7_edp_signal_levels(uint8_t train_set)
/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
static uint32_t
-intel_hsw_signal_levels(uint8_t train_set)
+hsw_signal_levels(uint8_t train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
@@ -3395,6 +3448,55 @@ intel_hsw_signal_levels(uint8_t train_set)
}
}
+static void bxt_signal_levels(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+ enum port port = dport->port;
+ struct drm_device *dev = dport->base.base.dev;
+ struct intel_encoder *encoder = &dport->base;
+ uint8_t train_set = intel_dp->train_set[0];
+ uint32_t level = 0;
+
+ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ switch (signal_levels) {
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emph level\n");
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ level = 0;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+ level = 1;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+ level = 2;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
+ level = 3;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ level = 4;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+ level = 5;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
+ level = 6;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ level = 7;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
+ level = 8;
+ break;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
+ level = 9;
+ break;
+ }
+
+ bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
+}
+
/* Properly updates "DP" with the correct signal levels. */
static void
intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
@@ -3405,27 +3507,38 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
uint32_t signal_levels, mask;
uint8_t train_set = intel_dp->train_set[0];
- if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
- signal_levels = intel_hsw_signal_levels(train_set);
+ if (IS_BROXTON(dev)) {
+ signal_levels = 0;
+ bxt_signal_levels(intel_dp);
+ mask = 0;
+ } else if (HAS_DDI(dev)) {
+ signal_levels = hsw_signal_levels(train_set);
mask = DDI_BUF_EMP_MASK;
} else if (IS_CHERRYVIEW(dev)) {
- signal_levels = intel_chv_signal_levels(intel_dp);
+ signal_levels = chv_signal_levels(intel_dp);
mask = 0;
} else if (IS_VALLEYVIEW(dev)) {
- signal_levels = intel_vlv_signal_levels(intel_dp);
+ signal_levels = vlv_signal_levels(intel_dp);
mask = 0;
} else if (IS_GEN7(dev) && port == PORT_A) {
- signal_levels = intel_gen7_edp_signal_levels(train_set);
+ signal_levels = gen7_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
} else if (IS_GEN6(dev) && port == PORT_A) {
- signal_levels = intel_gen6_edp_signal_levels(train_set);
+ signal_levels = gen6_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
} else {
- signal_levels = intel_gen4_signal_levels(train_set);
+ signal_levels = gen4_signal_levels(train_set);
mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
}
- DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
+ if (mask)
+ DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
+
+ DRM_DEBUG_KMS("Using vswing level %d\n",
+ train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
+ DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
+ (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT);
*DP = (*DP & ~mask) | signal_levels;
}
@@ -3467,7 +3580,8 @@ static bool
intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
uint8_t dp_train_pat)
{
- memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
+ if (!intel_dp->train_set_valid)
+ memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
intel_dp_set_signal_levels(intel_dp, DP);
return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
}
@@ -3580,6 +3694,23 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
break;
}
+ /*
+ * if we used previously trained voltage and pre-emphasis values
+ * and we don't get clock recovery, reset link training values
+ */
+ if (intel_dp->train_set_valid) {
+ DRM_DEBUG_KMS("clock recovery not ok, reset");
+ /* clear the flag as we are not reusing train set */
+ intel_dp->train_set_valid = false;
+ if (!intel_dp_reset_link_train(intel_dp, &DP,
+ DP_TRAINING_PATTERN_1 |
+ DP_LINK_SCRAMBLING_DISABLE)) {
+ DRM_ERROR("failed to enable link training\n");
+ return;
+ }
+ continue;
+ }
+
/* Check to see if we've tried the max voltage */
for (i = 0; i < intel_dp->lane_count; i++)
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
@@ -3657,6 +3788,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
/* Make sure clock is still ok */
if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+ intel_dp->train_set_valid = false;
intel_dp_start_link_train(intel_dp);
intel_dp_set_link_train(intel_dp, &DP,
training_pattern |
@@ -3672,6 +3804,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
/* Try 5 times, then try clock recovery if that fails */
if (tries > 5) {
+ intel_dp->train_set_valid = false;
intel_dp_start_link_train(intel_dp);
intel_dp_set_link_train(intel_dp, &DP,
training_pattern |
@@ -3693,9 +3826,10 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
intel_dp->DP = DP;
- if (channel_eq)
+ if (channel_eq) {
+ intel_dp->train_set_valid = true;
DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
-
+ }
}
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
@@ -3708,6 +3842,7 @@ static void
intel_dp_link_down(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
enum port port = intel_dig_port->port;
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3721,36 +3856,41 @@ intel_dp_link_down(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("\n");
- if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
+ if ((IS_GEN7(dev) && port == PORT_A) ||
+ (HAS_PCH_CPT(dev) && port != PORT_A)) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
- I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
+ DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
} else {
if (IS_CHERRYVIEW(dev))
DP &= ~DP_LINK_TRAIN_MASK_CHV;
else
DP &= ~DP_LINK_TRAIN_MASK;
- I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
+ DP |= DP_LINK_TRAIN_PAT_IDLE;
}
+ I915_WRITE(intel_dp->output_reg, DP);
POSTING_READ(intel_dp->output_reg);
- if (HAS_PCH_IBX(dev) &&
- I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
- /* Hardware workaround: leaving our transcoder select
- * set to transcoder B while it's off will prevent the
- * corresponding HDMI output on transcoder A.
- *
- * Combine this with another hardware workaround:
- * transcoder select bit can only be cleared while the
- * port is enabled.
- */
- DP &= ~DP_PIPEB_SELECT;
+ DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
+ I915_WRITE(intel_dp->output_reg, DP);
+ POSTING_READ(intel_dp->output_reg);
+
+ /*
+ * HW workaround for IBX, we need to move the port
+ * to transcoder A after disabling it to allow the
+ * matching HDMI port to be enabled on transcoder A.
+ */
+ if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
+ /* always enable with pattern 1 (as per spec) */
+ DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
+ DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
+ I915_WRITE(intel_dp->output_reg, DP);
+ POSTING_READ(intel_dp->output_reg);
+
+ DP &= ~DP_PORT_EN;
I915_WRITE(intel_dp->output_reg, DP);
POSTING_READ(intel_dp->output_reg);
}
- DP &= ~DP_AUDIO_OUTPUT_ENABLE;
- I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
- POSTING_READ(intel_dp->output_reg);
msleep(intel_dp->panel_power_down_delay);
}
@@ -3781,6 +3921,21 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
dev_priv->psr.sink_support = true;
DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
}
+
+ if (INTEL_INFO(dev)->gen >= 9 &&
+ (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
+ uint8_t frame_sync_cap;
+
+ dev_priv->psr.sink_support = true;
+ intel_dp_dpcd_read_wake(&intel_dp->aux,
+ DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
+ &frame_sync_cap, 1);
+ dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
+ /* PSR2 needs frame sync as well */
+ dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
+ DRM_DEBUG_KMS("PSR2 %s on sink",
+ dev_priv->psr.psr2_support ? "supported" : "not supported");
+ }
}
/* Training Pattern 3 support, both source and sink */
@@ -3885,46 +4040,70 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
u8 buf;
int test_crc_count;
int attempts = 6;
+ int ret = 0;
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
- return -EIO;
+ hsw_disable_ips(intel_crtc);
- if (!(buf & DP_TEST_CRC_SUPPORTED))
- return -ENOTTY;
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (!(buf & DP_TEST_CRC_SUPPORTED)) {
+ ret = -ENOTTY;
+ goto out;
+ }
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
- return -EIO;
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
+ ret = -EIO;
+ goto out;
+ }
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
- buf | DP_TEST_SINK_START) < 0)
- return -EIO;
+ buf | DP_TEST_SINK_START) < 0) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
+ ret = -EIO;
+ goto out;
+ }
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
- return -EIO;
test_crc_count = buf & DP_TEST_COUNT_MASK;
do {
if (drm_dp_dpcd_readb(&intel_dp->aux,
- DP_TEST_SINK_MISC, &buf) < 0)
- return -EIO;
+ DP_TEST_SINK_MISC, &buf) < 0) {
+ ret = -EIO;
+ goto out;
+ }
intel_wait_for_vblank(dev, intel_crtc->pipe);
} while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
if (attempts == 0) {
DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ goto out;
}
- if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
- return -EIO;
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
+ ret = -EIO;
+ goto out;
+ }
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
- return -EIO;
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
+ ret = -EIO;
+ goto out;
+ }
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
- buf & ~DP_TEST_SINK_START) < 0)
- return -EIO;
-
- return 0;
+ buf & ~DP_TEST_SINK_START) < 0) {
+ ret = -EIO;
+ goto out;
+ }
+out:
+ hsw_enable_ips(intel_crtc);
+ return ret;
}
static bool
@@ -3949,11 +4128,114 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
return true;
}
-static void
-intel_dp_handle_test_request(struct intel_dp *intel_dp)
+static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
+{
+ uint8_t test_result = DP_TEST_ACK;
+ return test_result;
+}
+
+static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
+{
+ uint8_t test_result = DP_TEST_NAK;
+ return test_result;
+}
+
+static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
+{
+ uint8_t test_result = DP_TEST_NAK;
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_connector *connector = &intel_connector->base;
+
+ if (intel_connector->detect_edid == NULL ||
+ connector->edid_corrupt ||
+ intel_dp->aux.i2c_defer_count > 6) {
+ /* Check EDID read for NACKs, DEFERs and corruption
+ * (DP CTS 1.2 Core r1.1)
+ * 4.2.2.4 : Failed EDID read, I2C_NAK
+ * 4.2.2.5 : Failed EDID read, I2C_DEFER
+ * 4.2.2.6 : EDID corruption detected
+ * Use failsafe mode for all cases
+ */
+ if (intel_dp->aux.i2c_nack_count > 0 ||
+ intel_dp->aux.i2c_defer_count > 0)
+ DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
+ intel_dp->aux.i2c_nack_count,
+ intel_dp->aux.i2c_defer_count);
+ intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
+ } else {
+ if (!drm_dp_dpcd_write(&intel_dp->aux,
+ DP_TEST_EDID_CHECKSUM,
+ &intel_connector->detect_edid->checksum,
+ 1))
+ DRM_DEBUG_KMS("Failed to write EDID checksum\n");
+
+ test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
+ intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
+ }
+
+ /* Set test active flag here so userspace doesn't interrupt things */
+ intel_dp->compliance_test_active = 1;
+
+ return test_result;
+}
+
+static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
+{
+ uint8_t test_result = DP_TEST_NAK;
+ return test_result;
+}
+
+static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
{
- /* NAK by default */
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
+ uint8_t response = DP_TEST_NAK;
+ uint8_t rxdata = 0;
+ int status = 0;
+
+ intel_dp->compliance_test_active = 0;
+ intel_dp->compliance_test_type = 0;
+ intel_dp->compliance_test_data = 0;
+
+ intel_dp->aux.i2c_nack_count = 0;
+ intel_dp->aux.i2c_defer_count = 0;
+
+ status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
+ if (status <= 0) {
+ DRM_DEBUG_KMS("Could not read test request from sink\n");
+ goto update_status;
+ }
+
+ switch (rxdata) {
+ case DP_TEST_LINK_TRAINING:
+ DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
+ intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
+ response = intel_dp_autotest_link_training(intel_dp);
+ break;
+ case DP_TEST_LINK_VIDEO_PATTERN:
+ DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
+ intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
+ response = intel_dp_autotest_video_pattern(intel_dp);
+ break;
+ case DP_TEST_LINK_EDID_READ:
+ DRM_DEBUG_KMS("EDID test requested\n");
+ intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
+ response = intel_dp_autotest_edid(intel_dp);
+ break;
+ case DP_TEST_LINK_PHY_TEST_PATTERN:
+ DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
+ intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
+ response = intel_dp_autotest_phy_pattern(intel_dp);
+ break;
+ default:
+ DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
+ break;
+ }
+
+update_status:
+ status = drm_dp_dpcd_write(&intel_dp->aux,
+ DP_TEST_RESPONSE,
+ &response, 1);
+ if (status <= 0)
+ DRM_DEBUG_KMS("Could not write test response to sink\n");
}
static int
@@ -4059,7 +4341,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
sink_irq_vector);
if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
- intel_dp_handle_test_request(intel_dp);
+ DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
}
@@ -4289,6 +4571,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
enum drm_connector_status status;
enum intel_display_power_domain power_domain;
bool ret;
+ u8 sink_irq_vector;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
@@ -4331,6 +4614,20 @@ intel_dp_detect(struct drm_connector *connector, bool force)
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
status = connector_status_connected;
+ /* Try to read the source of the interrupt */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+ intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
+ /* Clear interrupt source */
+ drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_DEVICE_SERVICE_IRQ_VECTOR,
+ sink_irq_vector);
+
+ if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
+ intel_dp_handle_test_request(intel_dp);
+ if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
+ DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
+ }
+
out:
intel_dp_power_put(intel_dp, power_domain);
return status;
@@ -4661,6 +4958,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
intel_display_power_get(dev_priv, power_domain);
if (long_hpd) {
+ /* indicate that we need to restart link training */
+ intel_dp->train_set_valid = false;
if (HAS_PCH_SPLIT(dev)) {
if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
@@ -5539,12 +5838,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp_aux_init(intel_dp, intel_connector);
/* init MST on ports that can support it */
- if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
- if (port == PORT_B || port == PORT_C || port == PORT_D) {
- intel_dp_mst_encoder_init(intel_dig_port,
- intel_connector->base.base.id);
- }
- }
+ if (HAS_DP_MST(dev) &&
+ (port == PORT_B || port == PORT_C || port == PORT_D))
+ intel_dp_mst_encoder_init(intel_dig_port,
+ intel_connector->base.base.id);
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
drm_dp_aux_unregister(&intel_dp->aux);
@@ -5574,6 +5871,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
+ i915_debugfs_connector_add(connector);
+
return true;
}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 5cb47482d29f..6e4cc5334f47 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -40,7 +40,9 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
int bpp, i;
int lane_count, slots, rate;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- struct intel_connector *found = NULL;
+ struct drm_connector *drm_connector;
+ struct intel_connector *connector, *found = NULL;
+ struct drm_connector_state *connector_state;
int mst_pbn;
pipe_config->dp_encoder_is_mst = true;
@@ -70,12 +72,11 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
state = pipe_config->base.state;
- for (i = 0; i < state->num_connector; i++) {
- if (!state->connectors[i])
- continue;
+ for_each_connector_in_state(state, drm_connector, connector_state, i) {
+ connector = to_intel_connector(drm_connector);
- if (state->connector_states[i]->best_encoder == &encoder->base) {
- found = to_intel_connector(state->connectors[i]);
+ if (connector_state->best_encoder == &encoder->base) {
+ found = connector;
break;
}
}
@@ -150,14 +151,14 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
enum port port = intel_dig_port->port;
int ret;
uint32_t temp;
- struct intel_connector *found = NULL, *intel_connector;
+ struct intel_connector *found = NULL, *connector;
int slots;
struct drm_crtc *crtc = encoder->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- for_each_intel_connector(dev, intel_connector) {
- if (intel_connector->new_encoder == encoder) {
- found = intel_connector;
+ for_each_intel_connector(dev, connector) {
+ if (connector->base.state->best_encoder == &encoder->base) {
+ found = connector;
break;
}
}
@@ -173,8 +174,10 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
if (intel_dp->active_mst_links == 0) {
enum port port = intel_ddi_get_encoder_port(encoder);
- I915_WRITE(PORT_CLK_SEL(port),
- intel_crtc->config->ddi_pll_sel);
+ /* FIXME: add support for SKL */
+ if (INTEL_INFO(dev)->gen < 9)
+ I915_WRITE(PORT_CLK_SEL(port),
+ intel_crtc->config->ddi_pll_sel);
intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 897f17db08af..2afb31a46275 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -249,10 +249,24 @@ struct intel_plane_state {
bool visible;
/*
- * used only for sprite planes to determine when to implicitly
- * enable/disable the primary plane
+ * scaler_id
+ * = -1 : not using a scaler
+ * >= 0 : using a scalers
+ *
+ * plane requiring a scaler:
+ * - During check_plane, its bit is set in
+ * crtc_state->scaler_state.scaler_users by calling helper function
+ * update_scaler_users.
+ * - scaler_id indicates the scaler it got assigned.
+ *
+ * plane doesn't require a scaler:
+ * - this can happen when scaling is no more required or plane simply
+ * got disabled.
+ * - During check_plane, corresponding bit is reset in
+ * crtc_state->scaler_state.scaler_users by calling helper function
+ * update_scaler_users.
*/
- bool hides_primary;
+ int scaler_id;
};
struct intel_initial_plane_config {
@@ -262,6 +276,49 @@ struct intel_initial_plane_config {
u32 base;
};
+#define SKL_MIN_SRC_W 8
+#define SKL_MAX_SRC_W 4096
+#define SKL_MIN_SRC_H 8
+#define SKL_MAX_SRC_H 4096
+#define SKL_MIN_DST_W 8
+#define SKL_MAX_DST_W 4096
+#define SKL_MIN_DST_H 8
+#define SKL_MAX_DST_H 4096
+
+struct intel_scaler {
+ int id;
+ int in_use;
+ uint32_t mode;
+};
+
+struct intel_crtc_scaler_state {
+#define SKL_NUM_SCALERS 2
+ struct intel_scaler scalers[SKL_NUM_SCALERS];
+
+ /*
+ * scaler_users: keeps track of users requesting scalers on this crtc.
+ *
+ * If a bit is set, a user is using a scaler.
+ * Here user can be a plane or crtc as defined below:
+ * bits 0-30 - plane (bit position is index from drm_plane_index)
+ * bit 31 - crtc
+ *
+ * Instead of creating a new index to cover planes and crtc, using
+ * existing drm_plane_index for planes which is well less than 31
+ * planes and bit 31 for crtc. This should be fine to cover all
+ * our platforms.
+ *
+ * intel_atomic_setup_scalers will setup available scalers to users
+ * requesting scalers. It will gracefully fail if request exceeds
+ * avilability.
+ */
+#define SKL_CRTC_INDEX 31
+ unsigned scaler_users;
+
+ /* scaler used by crtc for panel fitting purpose */
+ int scaler_id;
+};
+
struct intel_crtc_state {
struct drm_crtc_state base;
@@ -388,6 +445,8 @@ struct intel_crtc_state {
bool dp_encoder_is_mst;
int pbn;
+
+ struct intel_crtc_scaler_state scaler_state;
};
struct intel_pipe_wm {
@@ -400,8 +459,10 @@ struct intel_pipe_wm {
};
struct intel_mmio_flip {
- struct drm_i915_gem_request *req;
struct work_struct work;
+ struct drm_i915_private *i915;
+ struct drm_i915_gem_request *req;
+ struct intel_crtc *crtc;
};
struct skl_pipe_wm {
@@ -448,7 +509,6 @@ struct intel_crtc {
*/
bool active;
unsigned long enabled_power_domains;
- bool primary_enabled; /* is the primary plane (partially) visible? */
bool lowfreq_avail;
struct intel_overlay *overlay;
struct intel_unpin_work *unpin_work;
@@ -468,7 +528,6 @@ struct intel_crtc {
struct intel_initial_plane_config plane_config;
struct intel_crtc_state *config;
- struct intel_crtc_state *new_config;
bool new_enabled;
/* reset counter value when the last flip was submitted */
@@ -487,15 +546,25 @@ struct intel_crtc {
} wm;
int scanline_offset;
- struct intel_mmio_flip mmio_flip;
struct intel_crtc_atomic_commit atomic;
+
+ /* scalers available on this crtc */
+ int num_scalers;
};
struct intel_plane_wm_parameters {
uint32_t horiz_pixels;
uint32_t vert_pixels;
+ /*
+ * For packed pixel formats:
+ * bytes_per_pixel - holds bytes per pixel
+ * For planar pixel formats:
+ * bytes_per_pixel - holds bytes per pixel for uv-plane
+ * y_bytes_per_pixel - holds bytes per pixel for y-plane
+ */
uint8_t bytes_per_pixel;
+ uint8_t y_bytes_per_pixel;
bool enabled;
bool scaled;
u64 tiling;
@@ -533,7 +602,7 @@ struct intel_plane {
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h);
void (*disable_plane)(struct drm_plane *plane,
- struct drm_crtc *crtc);
+ struct drm_crtc *crtc, bool force);
int (*check_plane)(struct drm_plane *plane,
struct intel_plane_state *state);
void (*commit_plane)(struct drm_plane *plane,
@@ -669,6 +738,12 @@ struct intel_dp {
bool has_aux_irq,
int send_bytes,
uint32_t aux_clock_divider);
+ bool train_set_valid;
+
+ /* Displayport compliance testing */
+ unsigned long compliance_test_type;
+ unsigned long compliance_test_data;
+ bool compliance_test_active;
};
struct intel_digital_port {
@@ -747,15 +822,6 @@ struct intel_unpin_work {
bool enable_stall_check;
};
-struct intel_set_config {
- struct drm_encoder **save_connector_encoders;
- struct drm_crtc **save_encoder_crtcs;
- bool *save_crtc_enabled;
-
- bool fb_changed;
- bool mode_changed;
-};
-
struct intel_load_detect_pipe {
struct drm_framebuffer *release_fb;
bool load_detect_temp;
@@ -852,7 +918,6 @@ void hsw_fdi_link_train(struct drm_crtc *crtc);
void intel_ddi_init(struct drm_device *dev, enum port port);
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
-int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
void intel_ddi_pll_init(struct drm_device *dev);
void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
@@ -867,11 +932,15 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
void intel_ddi_fdi_disable(struct drm_crtc *crtc);
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
+struct intel_encoder *
+intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state);
void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
+void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
+ enum port port, int type);
/* intel_frontbuffer.c */
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
@@ -925,6 +994,7 @@ void intel_mark_busy(struct drm_device *dev);
void intel_mark_idle(struct drm_device *dev);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
void intel_crtc_control(struct drm_crtc *crtc, bool enable);
+void intel_crtc_reset(struct intel_crtc *crtc);
void intel_crtc_update_dpms(struct drm_crtc *crtc);
void intel_encoder_destroy(struct drm_encoder *encoder);
int intel_connector_init(struct intel_connector *);
@@ -952,7 +1022,8 @@ intel_wait_for_vblank(struct drm_device *dev, int pipe)
}
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
- struct intel_digital_port *dport);
+ struct intel_digital_port *dport,
+ unsigned int expected_mask);
bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_display_mode *mode,
struct intel_load_detect_pipe *old,
@@ -997,6 +1068,9 @@ intel_rotation_90_or_270(unsigned int rotation)
return rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270));
}
+void intel_create_rotation_property(struct drm_device *dev,
+ struct intel_plane *plane);
+
bool intel_wm_need_update(struct drm_plane *plane,
struct drm_plane_state *state);
@@ -1037,6 +1111,15 @@ void intel_prepare_reset(struct drm_device *dev);
void intel_finish_reset(struct drm_device *dev);
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
+void broxton_init_cdclk(struct drm_device *dev);
+void broxton_uninit_cdclk(struct drm_device *dev);
+void broxton_set_cdclk(struct drm_device *dev, int frequency);
+void broxton_ddi_phy_init(struct drm_device *dev);
+void broxton_ddi_phy_uninit(struct drm_device *dev);
+void bxt_enable_dc9(struct drm_i915_private *dev_priv);
+void bxt_disable_dc9(struct drm_i915_private *dev_priv);
+void skl_init_cdclk(struct drm_i915_private *dev_priv);
+void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
@@ -1044,6 +1127,8 @@ int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
void
ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
int dotclock);
+bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
+ intel_clock_t *best_clock);
bool intel_crtc_active(struct drm_crtc *crtc);
void hsw_enable_ips(struct intel_crtc *crtc);
void hsw_disable_ips(struct intel_crtc *crtc);
@@ -1053,9 +1138,26 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_state *pipe_config);
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
+void skl_detach_scalers(struct intel_crtc *intel_crtc);
+int skl_update_scaler_users(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state, struct intel_plane *intel_plane,
+ struct intel_plane_state *plane_state, int force_detach);
+int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
struct drm_i915_gem_object *obj);
+u32 skl_plane_ctl_format(uint32_t pixel_format);
+u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
+u32 skl_plane_ctl_rotation(unsigned int rotation);
+
+/* intel_csr.c */
+void intel_csr_ucode_init(struct drm_device *dev);
+enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv);
+void intel_csr_load_status_set(struct drm_i915_private *dev_priv,
+ enum csr_state state);
+void intel_csr_load_program(struct drm_device *dev);
+void intel_csr_ucode_fini(struct drm_device *dev);
+void assert_csr_loaded(struct drm_i915_private *dev_priv);
/* intel_dp.c */
void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
@@ -1215,6 +1317,7 @@ void intel_psr_invalidate(struct drm_device *dev,
void intel_psr_flush(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_psr_init(struct drm_device *dev);
+void intel_psr_single_frame_update(struct drm_device *dev);
/* intel_runtime_pm.c */
int intel_power_domains_init(struct drm_i915_private *);
@@ -1263,7 +1366,11 @@ void gen6_update_ring_freq(struct drm_device *dev);
void gen6_rps_busy(struct drm_i915_private *dev_priv);
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
-void gen6_rps_boost(struct drm_i915_private *dev_priv);
+void gen6_rps_boost(struct drm_i915_private *dev_priv,
+ struct intel_rps_client *rps,
+ unsigned long submitted);
+void intel_queue_rps_boost_for_request(struct drm_device *dev,
+ struct drm_i915_gem_request *req);
void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
@@ -1276,16 +1383,12 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
/* intel_sprite.c */
int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
-void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
- enum plane plane);
int intel_plane_restore(struct drm_plane *plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
bool intel_pipe_update_start(struct intel_crtc *crtc,
uint32_t *start_vbl_count);
void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
-void intel_post_enable_primary(struct drm_crtc *crtc);
-void intel_pre_disable_primary(struct drm_crtc *crtc);
/* intel_tv.c */
void intel_tv_init(struct drm_device *dev);
@@ -1310,10 +1413,13 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state,
struct drm_crtc_state *crtc_state;
crtc_state = drm_atomic_get_crtc_state(state, &crtc->base);
if (IS_ERR(crtc_state))
- return ERR_PTR(PTR_ERR(crtc_state));
+ return ERR_CAST(crtc_state);
return to_intel_crtc_state(crtc_state);
}
+int intel_atomic_setup_scalers(struct drm_device *dev,
+ struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state);
/* intel_atomic_plane.c */
struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 51966426addf..b5a5558ecd63 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -239,7 +239,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
static void band_gap_reset(struct drm_i915_private *dev_priv)
{
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
vlv_flisdsi_write(dev_priv, 0x08, 0x0001);
vlv_flisdsi_write(dev_priv, 0x0F, 0x0005);
@@ -248,7 +248,7 @@ static void band_gap_reset(struct drm_i915_private *dev_priv)
vlv_flisdsi_write(dev_priv, 0x0F, 0x0000);
vlv_flisdsi_write(dev_priv, 0x08, 0x0000);
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
}
static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
@@ -346,11 +346,11 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
/* program rcomp for compliance, reduce from 50 ohms to 45 ohms
* needed everytime after power gate */
vlv_flisdsi_write(dev_priv, 0x04, 0x0004);
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
/* bandgap reset is needed after everytime we do power gate */
band_gap_reset(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index d2cd8d5b27a1..a5e99ac305da 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -212,7 +212,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
function = gtable[gpio].function_reg;
pad = gtable[gpio].pad_reg;
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
if (!gtable[gpio].init) {
/* program the function */
/* FIXME: remove constant below */
@@ -224,7 +224,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
/* pull up/down */
vlv_gpio_nc_write(dev_priv, pad, val);
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
return data;
}
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index 3622d0bafdf8..d20cf37b6901 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -162,59 +162,41 @@ static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
#endif
-static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
+static int dsi_calc_mnp(int target_dsi_clk, struct dsi_mnp *dsi_mnp)
{
- u32 m, n, p;
- u32 ref_clk;
- u32 error;
- u32 tmp_error;
- int target_dsi_clk;
- int calc_dsi_clk;
- u32 calc_m;
- u32 calc_p;
+ unsigned int calc_m = 0, calc_p = 0;
+ unsigned int m, n = 1, p;
+ int ref_clk = 25000;
+ int delta = target_dsi_clk;
u32 m_seed;
- /* dsi_clk is expected in KHZ */
- if (dsi_clk < 300000 || dsi_clk > 1150000) {
+ /* target_dsi_clk is expected in kHz */
+ if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) {
DRM_ERROR("DSI CLK Out of Range\n");
return -ECHRNG;
}
- ref_clk = 25000;
- target_dsi_clk = dsi_clk;
- error = 0xFFFFFFFF;
- tmp_error = 0xFFFFFFFF;
- calc_m = 0;
- calc_p = 0;
-
- for (m = 62; m <= 92; m++) {
- for (p = 2; p <= 6; p++) {
- /* Find the optimal m and p divisors
- with minimal error +/- the required clock */
- calc_dsi_clk = (m * ref_clk) / p;
- if (calc_dsi_clk == target_dsi_clk) {
- calc_m = m;
- calc_p = p;
- error = 0;
- break;
- } else
- tmp_error = abs(target_dsi_clk - calc_dsi_clk);
-
- if (tmp_error < error) {
- error = tmp_error;
+ for (m = 62; m <= 92 && delta; m++) {
+ for (p = 2; p <= 6 && delta; p++) {
+ /*
+ * Find the optimal m and p divisors with minimal delta
+ * +/- the required clock
+ */
+ int calc_dsi_clk = (m * ref_clk) / (p * n);
+ int d = abs(target_dsi_clk - calc_dsi_clk);
+ if (d < delta) {
+ delta = d;
calc_m = m;
calc_p = p;
}
}
-
- if (error == 0)
- break;
}
+ /* register has log2(N1), this works fine for powers of two */
+ n = ffs(n) - 1;
m_seed = lfsr_converts[calc_m - 62];
- n = 1;
dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
- dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT |
+ dsi_mnp->dsi_pll_div = n << DSI_PLL_N1_DIV_SHIFT |
m_seed << DSI_PLL_M1_DIV_SHIFT;
return 0;
@@ -262,7 +244,7 @@ void vlv_enable_dsi_pll(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
vlv_configure_dsi_pll(encoder);
@@ -276,11 +258,11 @@ void vlv_enable_dsi_pll(struct intel_encoder *encoder)
if (wait_for(vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL) &
DSI_PLL_LOCK, 20)) {
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
DRM_ERROR("DSI PLL lock failed\n");
return;
}
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
DRM_DEBUG_KMS("DSI PLL locked\n");
}
@@ -292,14 +274,14 @@ void vlv_disable_dsi_pll(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
tmp &= ~DSI_PLL_VCO_EN;
tmp |= DSI_PLL_LDO_GATE;
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
}
static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
@@ -331,21 +313,25 @@ u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 dsi_clock, pclk;
u32 pll_ctl, pll_div;
- u32 m = 0, p = 0;
+ u32 m = 0, p = 0, n;
int refclk = 25000;
int i;
DRM_DEBUG_KMS("\n");
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
pll_div = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_DIVIDER);
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
/* mask out other bits and extract the P1 divisor */
pll_ctl &= DSI_PLL_P1_POST_DIV_MASK;
pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2);
+ /* N1 divisor */
+ n = (pll_div & DSI_PLL_N1_DIV_MASK) >> DSI_PLL_N1_DIV_SHIFT;
+ n = 1 << n; /* register has log2(N1) */
+
/* mask out the other bits and extract the M1 divisor */
pll_div &= DSI_PLL_M1_DIV_MASK;
pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT;
@@ -373,7 +359,7 @@ u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
m = i + 62;
- dsi_clock = (m * refclk) / p;
+ dsi_clock = (m * refclk) / (p * n);
/* pixel_format and pipe_bpp should agree */
assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 770040ff486e..ece5bd754f85 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -80,7 +80,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
.name = "ch7017",
.dvo_reg = DVOC,
.slave_addr = 0x75,
- .gpio = GMBUS_PORT_DPB,
+ .gpio = GMBUS_PIN_DPB,
.dev_ops = &ch7017_ops,
},
{
@@ -364,7 +364,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
* that's not the case.
*/
intel_ddc_get_modes(connector,
- intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPC));
+ intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPC));
if (!list_empty(&connector->probed_modes))
return 1;
@@ -495,17 +495,19 @@ void intel_dvo_init(struct drm_device *dev)
struct i2c_adapter *i2c;
int gpio;
bool dvoinit;
+ enum pipe pipe;
+ uint32_t dpll[I915_MAX_PIPES];
/* Allow the I2C driver info to specify the GPIO to be used in
* special cases, but otherwise default to what's defined
* in the spec.
*/
- if (intel_gmbus_is_port_valid(dvo->gpio))
+ if (intel_gmbus_is_valid_pin(dev_priv, dvo->gpio))
gpio = dvo->gpio;
else if (dvo->type == INTEL_DVO_CHIP_LVDS)
- gpio = GMBUS_PORT_SSC;
+ gpio = GMBUS_PIN_SSC;
else
- gpio = GMBUS_PORT_DPB;
+ gpio = GMBUS_PIN_DPB;
/* Set up the I2C bus necessary for the chip we're probing.
* It appears that everything is on GPIOE except for panels
@@ -520,8 +522,23 @@ void intel_dvo_init(struct drm_device *dev)
*/
intel_gmbus_force_bit(i2c, true);
+ /* ns2501 requires the DVO 2x clock before it will
+ * respond to i2c accesses, so make sure we have
+ * have the clock enabled before we attempt to
+ * initialize the device.
+ */
+ for_each_pipe(dev_priv, pipe) {
+ dpll[pipe] = I915_READ(DPLL(pipe));
+ I915_WRITE(DPLL(pipe), dpll[pipe] | DPLL_DVO_2X_MODE);
+ }
+
dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c);
+ /* restore the DVO 2x clock state to original */
+ for_each_pipe(dev_priv, pipe) {
+ I915_WRITE(DPLL(pipe), dpll[pipe]);
+ }
+
intel_gmbus_force_bit(i2c, false);
if (!dvoinit)
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 4165ce0644f7..6abb83432d4d 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -457,7 +457,7 @@ static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
if (intel_crtc_active(tmp_crtc) &&
- to_intel_crtc(tmp_crtc)->primary_enabled) {
+ to_intel_plane_state(tmp_crtc->primary->state)->visible) {
if (one_pipe_only && crtc) {
if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 4e7e7da2e03b..6372cfc7d053 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -96,6 +96,32 @@ static int intel_fbdev_blank(int blank, struct fb_info *info)
return ret;
}
+static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct intel_fbdev *ifbdev =
+ container_of(fb_helper, struct intel_fbdev, helper);
+
+ int ret;
+ ret = drm_fb_helper_pan_display(var, info);
+
+ if (ret == 0) {
+ /*
+ * FIXME: fbdev presumes that all callbacks also work from
+ * atomic contexts and relies on that for emergency oops
+ * printing. KMS totally doesn't do that and the locking here is
+ * by far not the only place this goes wrong. Ignore this for
+ * now until we solve this for real.
+ */
+ mutex_lock(&fb_helper->dev->struct_mutex);
+ intel_fb_obj_invalidate(ifbdev->fb->obj, NULL, ORIGIN_GTT);
+ mutex_unlock(&fb_helper->dev->struct_mutex);
+ }
+
+ return ret;
+}
+
static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
@@ -103,7 +129,7 @@ static struct fb_ops intelfb_ops = {
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_pan_display = intel_fbdev_pan_display,
.fb_blank = intel_fbdev_blank,
.fb_setcmap = drm_fb_helper_setcmap,
.fb_debug_enter = drm_fb_helper_debug_enter,
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index a20cffb78c0f..57095f54c1f2 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -243,6 +243,8 @@ void intel_frontbuffer_flip_prepare(struct drm_device *dev,
/* Remove stale busy bits due to the old buffer. */
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
mutex_unlock(&dev_priv->fb_tracking.lock);
+
+ intel_psr_single_frame_update(dev);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index bfbe07b6ddce..e97731aab6dc 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -223,10 +223,14 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
- return val & VIDEO_DIP_ENABLE;
+ if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
+ return val & VIDEO_DIP_ENABLE;
+
+ return false;
}
static void cpt_write_infoframe(struct drm_encoder *encoder,
@@ -324,10 +328,14 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
- return val & VIDEO_DIP_ENABLE;
+ if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
+ return val & VIDEO_DIP_ENABLE;
+
+ return false;
}
static void hsw_write_infoframe(struct drm_encoder *encoder,
@@ -865,59 +873,59 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
u32 temp;
- u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
-
- if (crtc->config->has_audio)
- intel_audio_codec_disable(encoder);
temp = I915_READ(intel_hdmi->hdmi_reg);
- /* HW workaround for IBX, we need to move the port to transcoder A
- * before disabling it. */
- if (HAS_PCH_IBX(dev)) {
- struct drm_crtc *crtc = encoder->base.crtc;
- int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
-
- if (temp & SDVO_PIPE_B_SELECT) {
- temp &= ~SDVO_PIPE_B_SELECT;
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
-
- /* Again we need to write this twice. */
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
-
- /* Transcoder selection bits only update
- * effectively on vblank. */
- if (crtc)
- intel_wait_for_vblank(dev, pipe);
- else
- msleep(50);
- }
- }
-
- /* HW workaround, need to toggle enable bit off and on for 12bpc, but
- * we do this anyway which shows more stable in testing.
- */
- if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
- POSTING_READ(intel_hdmi->hdmi_reg);
- }
-
- temp &= ~enable_bits;
-
+ temp &= ~(SDVO_ENABLE | SDVO_AUDIO_ENABLE);
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
- /* HW workaround, need to write this twice for issue that may result
- * in first write getting masked.
+ /*
+ * HW workaround for IBX, we need to move the port
+ * to transcoder A after disabling it to allow the
+ * matching DP port to be enabled on transcoder A.
*/
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B) {
+ temp &= ~SDVO_PIPE_B_SELECT;
+ temp |= SDVO_ENABLE;
+ /*
+ * HW workaround, need to write this twice for issue
+ * that may result in first write getting masked.
+ */
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
+ I915_WRITE(intel_hdmi->hdmi_reg, temp);
+ POSTING_READ(intel_hdmi->hdmi_reg);
+
+ temp &= ~SDVO_ENABLE;
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
}
}
+static void g4x_disable_hdmi(struct intel_encoder *encoder)
+{
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+
+ if (crtc->config->has_audio)
+ intel_audio_codec_disable(encoder);
+
+ intel_disable_hdmi(encoder);
+}
+
+static void pch_disable_hdmi(struct intel_encoder *encoder)
+{
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+
+ if (crtc->config->has_audio)
+ intel_audio_codec_disable(encoder);
+}
+
+static void pch_post_disable_hdmi(struct intel_encoder *encoder)
+{
+ intel_disable_hdmi(encoder);
+}
+
static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
{
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
@@ -956,6 +964,7 @@ static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
struct drm_device *dev = crtc_state->base.crtc->dev;
struct drm_atomic_state *state;
struct intel_encoder *encoder;
+ struct drm_connector *connector;
struct drm_connector_state *connector_state;
int count = 0, count_hdmi = 0;
int i;
@@ -965,11 +974,7 @@ static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
state = crtc_state->base.state;
- for (i = 0; i < state->num_connector; i++) {
- if (!state->connectors[i])
- continue;
-
- connector_state = state->connector_states[i];
+ for_each_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != crtc_state->base.crtc)
continue;
@@ -1031,7 +1036,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
*/
if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink &&
clock_12bpc <= portclock_limit &&
- hdmi_12bpc_possible(pipe_config)) {
+ hdmi_12bpc_possible(pipe_config) &&
+ 0 /* FIXME 12bpc support totally broken */) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
@@ -1288,7 +1294,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
u32 val;
/* Enable clock channels for this port */
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
val = 0;
if (pipe)
@@ -1311,7 +1317,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
/* Program lane clock */
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
intel_hdmi->set_infoframes(&encoder->base,
intel_crtc->config->has_hdmi_sink,
@@ -1319,7 +1325,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
intel_enable_hdmi(encoder);
- vlv_wait_port_ready(dev_priv, dport);
+ vlv_wait_port_ready(dev_priv, dport, 0x0);
}
static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
@@ -1335,7 +1341,7 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
intel_hdmi_prepare(encoder);
/* Program Tx lane resets to default */
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
@@ -1352,7 +1358,7 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
}
static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
@@ -1368,7 +1374,7 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
intel_hdmi_prepare(encoder);
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
/* program left/right clock distribution */
if (pipe != PIPE_B) {
@@ -1418,7 +1424,7 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
val |= CHV_CMN_USEDCLKCHANNEL;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
}
static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
@@ -1431,10 +1437,10 @@ static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
int pipe = intel_crtc->pipe;
/* Reset lanes to avoid HDMI flicker (VLV w/a) */
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
}
static void chv_hdmi_post_disable(struct intel_encoder *encoder)
@@ -1448,7 +1454,7 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder)
enum pipe pipe = intel_crtc->pipe;
u32 val;
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
/* Propagate soft reset to data lane reset */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
@@ -1467,7 +1473,7 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder)
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
}
static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
@@ -1482,10 +1488,10 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
&intel_crtc->config->base.adjusted_mode;
enum dpio_channel ch = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
- int data, i;
+ int data, i, stagger;
u32 val;
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
/* allow hardware to manage TX FIFO reset source */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
@@ -1522,7 +1528,38 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
}
/* Data lane stagger programming */
- /* FIXME: Fix up value only after power analysis */
+ if (intel_crtc->config->port_clock > 270000)
+ stagger = 0x18;
+ else if (intel_crtc->config->port_clock > 135000)
+ stagger = 0xd;
+ else if (intel_crtc->config->port_clock > 67500)
+ stagger = 0x7;
+ else if (intel_crtc->config->port_clock > 33750)
+ stagger = 0x4;
+ else
+ stagger = 0x2;
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+ val |= DPIO_TX2_STAGGER_MASK(0x1f);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+ val |= DPIO_TX2_STAGGER_MASK(0x1f);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
+ DPIO_LANESTAGGER_STRAP(stagger) |
+ DPIO_LANESTAGGER_STRAP_OVRD |
+ DPIO_TX1_STAGGER_MASK(0x1f) |
+ DPIO_TX1_STAGGER_MULT(6) |
+ DPIO_TX2_STAGGER_MULT(0));
+
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
+ DPIO_LANESTAGGER_STRAP(stagger) |
+ DPIO_LANESTAGGER_STRAP_OVRD |
+ DPIO_TX1_STAGGER_MASK(0x1f) |
+ DPIO_TX1_STAGGER_MULT(7) |
+ DPIO_TX2_STAGGER_MULT(5));
/* Clear calc init */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
@@ -1597,7 +1634,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
val |= DPIO_LRC_BYPASS;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
intel_hdmi->set_infoframes(&encoder->base,
intel_crtc->config->has_hdmi_sink,
@@ -1605,7 +1642,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
intel_enable_hdmi(encoder);
- vlv_wait_port_ready(dev_priv, dport);
+ vlv_wait_port_ready(dev_priv, dport, 0x0);
}
static void intel_hdmi_destroy(struct drm_connector *connector)
@@ -1676,18 +1713,26 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
switch (port) {
case PORT_B:
- intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
+ if (IS_BROXTON(dev_priv))
+ intel_hdmi->ddc_bus = GMBUS_PIN_1_BXT;
+ else
+ intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
intel_encoder->hpd_pin = HPD_PORT_B;
break;
case PORT_C:
- intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
+ if (IS_BROXTON(dev_priv))
+ intel_hdmi->ddc_bus = GMBUS_PIN_2_BXT;
+ else
+ intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
intel_encoder->hpd_pin = HPD_PORT_C;
break;
case PORT_D:
- if (IS_CHERRYVIEW(dev))
- intel_hdmi->ddc_bus = GMBUS_PORT_DPD_CHV;
+ if (WARN_ON(IS_BROXTON(dev_priv)))
+ intel_hdmi->ddc_bus = GMBUS_PIN_DISABLED;
+ else if (IS_CHERRYVIEW(dev_priv))
+ intel_hdmi->ddc_bus = GMBUS_PIN_DPD_CHV;
else
- intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
+ intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
intel_encoder->hpd_pin = HPD_PORT_D;
break;
case PORT_A:
@@ -1762,7 +1807,12 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
DRM_MODE_ENCODER_TMDS);
intel_encoder->compute_config = intel_hdmi_compute_config;
- intel_encoder->disable = intel_disable_hdmi;
+ if (HAS_PCH_SPLIT(dev)) {
+ intel_encoder->disable = pch_disable_hdmi;
+ intel_encoder->post_disable = pch_post_disable_hdmi;
+ } else {
+ intel_encoder->disable = g4x_disable_hdmi;
+ }
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->get_config = intel_hdmi_get_config;
if (IS_CHERRYVIEW(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index ae628001fd97..a64f26c670af 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -34,20 +34,71 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-struct gmbus_port {
+struct gmbus_pin {
const char *name;
int reg;
};
-static const struct gmbus_port gmbus_ports[] = {
- { "ssc", GPIOB },
- { "vga", GPIOA },
- { "panel", GPIOC },
- { "dpc", GPIOD },
- { "dpb", GPIOE },
- { "dpd", GPIOF },
+/* Map gmbus pin pairs to names and registers. */
+static const struct gmbus_pin gmbus_pins[] = {
+ [GMBUS_PIN_SSC] = { "ssc", GPIOB },
+ [GMBUS_PIN_VGADDC] = { "vga", GPIOA },
+ [GMBUS_PIN_PANEL] = { "panel", GPIOC },
+ [GMBUS_PIN_DPC] = { "dpc", GPIOD },
+ [GMBUS_PIN_DPB] = { "dpb", GPIOE },
+ [GMBUS_PIN_DPD] = { "dpd", GPIOF },
};
+static const struct gmbus_pin gmbus_pins_bdw[] = {
+ [GMBUS_PIN_VGADDC] = { "vga", GPIOA },
+ [GMBUS_PIN_DPC] = { "dpc", GPIOD },
+ [GMBUS_PIN_DPB] = { "dpb", GPIOE },
+ [GMBUS_PIN_DPD] = { "dpd", GPIOF },
+};
+
+static const struct gmbus_pin gmbus_pins_skl[] = {
+ [GMBUS_PIN_DPC] = { "dpc", GPIOD },
+ [GMBUS_PIN_DPB] = { "dpb", GPIOE },
+ [GMBUS_PIN_DPD] = { "dpd", GPIOF },
+};
+
+static const struct gmbus_pin gmbus_pins_bxt[] = {
+ [GMBUS_PIN_1_BXT] = { "dpb", PCH_GPIOB },
+ [GMBUS_PIN_2_BXT] = { "dpc", PCH_GPIOC },
+ [GMBUS_PIN_3_BXT] = { "misc", PCH_GPIOD },
+};
+
+/* pin is expected to be valid */
+static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
+ unsigned int pin)
+{
+ if (IS_BROXTON(dev_priv))
+ return &gmbus_pins_bxt[pin];
+ else if (IS_SKYLAKE(dev_priv))
+ return &gmbus_pins_skl[pin];
+ else if (IS_BROADWELL(dev_priv))
+ return &gmbus_pins_bdw[pin];
+ else
+ return &gmbus_pins[pin];
+}
+
+bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
+ unsigned int pin)
+{
+ unsigned int size;
+
+ if (IS_BROXTON(dev_priv))
+ size = ARRAY_SIZE(gmbus_pins_bxt);
+ else if (IS_SKYLAKE(dev_priv))
+ size = ARRAY_SIZE(gmbus_pins_skl);
+ else if (IS_BROADWELL(dev_priv))
+ size = ARRAY_SIZE(gmbus_pins_bdw);
+ else
+ size = ARRAY_SIZE(gmbus_pins);
+
+ return pin < size && get_gmbus_pin(dev_priv, pin)->reg;
+}
+
/* Intel GPIO access functions */
#define I2C_RISEFALL_TIME 10
@@ -182,15 +233,15 @@ intel_gpio_post_xfer(struct i2c_adapter *adapter)
}
static void
-intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
+intel_gpio_setup(struct intel_gmbus *bus, unsigned int pin)
{
struct drm_i915_private *dev_priv = bus->dev_priv;
struct i2c_algo_bit_data *algo;
algo = &bus->bit_algo;
- /* -1 to map pin pair to gmbus index */
- bus->gpio_reg = dev_priv->gpio_mmio_base + gmbus_ports[pin - 1].reg;
+ bus->gpio_reg = dev_priv->gpio_mmio_base +
+ get_gmbus_pin(dev_priv, pin)->reg;
bus->adapter.algo_data = algo;
algo->setsda = set_data;
@@ -577,7 +628,9 @@ static const struct i2c_algorithm gmbus_algorithm = {
int intel_setup_gmbus(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret, i;
+ struct intel_gmbus *bus;
+ unsigned int pin;
+ int ret;
if (HAS_PCH_NOP(dev))
return 0;
@@ -591,16 +644,18 @@ int intel_setup_gmbus(struct drm_device *dev)
mutex_init(&dev_priv->gmbus_mutex);
init_waitqueue_head(&dev_priv->gmbus_wait_queue);
- for (i = 0; i < GMBUS_NUM_PORTS; i++) {
- struct intel_gmbus *bus = &dev_priv->gmbus[i];
- u32 port = i + 1; /* +1 to map gmbus index to pin pair */
+ for (pin = 0; pin < ARRAY_SIZE(dev_priv->gmbus); pin++) {
+ if (!intel_gmbus_is_valid_pin(dev_priv, pin))
+ continue;
+
+ bus = &dev_priv->gmbus[pin];
bus->adapter.owner = THIS_MODULE;
bus->adapter.class = I2C_CLASS_DDC;
snprintf(bus->adapter.name,
sizeof(bus->adapter.name),
"i915 gmbus %s",
- gmbus_ports[i].name);
+ get_gmbus_pin(dev_priv, pin)->name);
bus->adapter.dev.parent = &dev->pdev->dev;
bus->dev_priv = dev_priv;
@@ -608,13 +663,13 @@ int intel_setup_gmbus(struct drm_device *dev)
bus->adapter.algo = &gmbus_algorithm;
/* By default use a conservative clock rate */
- bus->reg0 = port | GMBUS_RATE_100KHZ;
+ bus->reg0 = pin | GMBUS_RATE_100KHZ;
/* gmbus seems to be broken on i830 */
if (IS_I830(dev))
bus->force_bit = 1;
- intel_gpio_setup(bus, port);
+ intel_gpio_setup(bus, pin);
ret = i2c_add_adapter(&bus->adapter);
if (ret)
@@ -626,20 +681,23 @@ int intel_setup_gmbus(struct drm_device *dev)
return 0;
err:
- while (--i) {
- struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ while (--pin) {
+ if (!intel_gmbus_is_valid_pin(dev_priv, pin))
+ continue;
+
+ bus = &dev_priv->gmbus[pin];
i2c_del_adapter(&bus->adapter);
}
return ret;
}
struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
- unsigned port)
+ unsigned int pin)
{
- WARN_ON(!intel_gmbus_is_port_valid(port));
- /* -1 to map pin pair to gmbus index */
- return (intel_gmbus_is_port_valid(port)) ?
- &dev_priv->gmbus[port - 1].adapter : NULL;
+ if (WARN_ON(!intel_gmbus_is_valid_pin(dev_priv, pin)))
+ return NULL;
+
+ return &dev_priv->gmbus[pin].adapter;
}
void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
@@ -662,10 +720,14 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
void intel_teardown_gmbus(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
+ struct intel_gmbus *bus;
+ unsigned int pin;
+
+ for (pin = 0; pin < ARRAY_SIZE(dev_priv->gmbus); pin++) {
+ if (!intel_gmbus_is_valid_pin(dev_priv, pin))
+ continue;
- for (i = 0; i < GMBUS_NUM_PORTS; i++) {
- struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ bus = &dev_priv->gmbus[pin];
i2c_del_adapter(&bus->adapter);
}
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 424e62197787..9b74ffae5f5a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -188,6 +188,15 @@
#define GEN8_CTX_FORCE_RESTORE (1<<2)
#define GEN8_CTX_L3LLC_COHERENT (1<<5)
#define GEN8_CTX_PRIVILEGE (1<<8)
+
+#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) { \
+ const u64 _addr = test_bit(n, ppgtt->pdp.used_pdpes) ? \
+ ppgtt->pdp.page_directory[n]->daddr : \
+ ppgtt->scratch_pd->daddr; \
+ reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
+ reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
+}
+
enum {
ADVANCED_CONTEXT = 0,
LEGACY_CONTEXT,
@@ -265,7 +274,8 @@ static uint64_t execlists_ctx_descriptor(struct intel_engine_cs *ring,
desc = GEN8_CTX_VALID;
desc |= LEGACY_CONTEXT << GEN8_CTX_MODE_SHIFT;
- desc |= GEN8_CTX_L3LLC_COHERENT;
+ if (IS_GEN8(ctx_obj->base.dev))
+ desc |= GEN8_CTX_L3LLC_COHERENT;
desc |= GEN8_CTX_PRIVILEGE;
desc |= lrca;
desc |= (u64)intel_execlists_ctx_id(ctx_obj) << GEN8_CTX_ID_SHIFT;
@@ -305,21 +315,24 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
desc[3] = (u32)(temp >> 32);
desc[2] = (u32)temp;
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- I915_WRITE(RING_ELSP(ring), desc[1]);
- I915_WRITE(RING_ELSP(ring), desc[0]);
- I915_WRITE(RING_ELSP(ring), desc[3]);
+ spin_lock(&dev_priv->uncore.lock);
+ intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
+ I915_WRITE_FW(RING_ELSP(ring), desc[1]);
+ I915_WRITE_FW(RING_ELSP(ring), desc[0]);
+ I915_WRITE_FW(RING_ELSP(ring), desc[3]);
/* The context is automatically loaded after the following */
- I915_WRITE(RING_ELSP(ring), desc[2]);
+ I915_WRITE_FW(RING_ELSP(ring), desc[2]);
/* ELSP is a wo register, so use another nearby reg for posting instead */
- POSTING_READ(RING_EXECLIST_STATUS(ring));
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ POSTING_READ_FW(RING_EXECLIST_STATUS(ring));
+ intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
+ spin_unlock(&dev_priv->uncore.lock);
}
static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
struct drm_i915_gem_object *ring_obj,
+ struct i915_hw_ppgtt *ppgtt,
u32 tail)
{
struct page *page;
@@ -331,6 +344,16 @@ static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
reg_state[CTX_RING_TAIL+1] = tail;
reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
+ /* True PPGTT with dynamic page allocation: update PDP registers and
+ * point the unallocated PDPs to the scratch page
+ */
+ if (ppgtt) {
+ ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
+ ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
+ ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
+ ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
+ }
+
kunmap_atomic(reg_state);
return 0;
@@ -349,7 +372,7 @@ static void execlists_submit_contexts(struct intel_engine_cs *ring,
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0));
WARN_ON(!i915_gem_obj_is_pinned(ringbuf0->obj));
- execlists_update_context(ctx_obj0, ringbuf0->obj, tail0);
+ execlists_update_context(ctx_obj0, ringbuf0->obj, to0->ppgtt, tail0);
if (to1) {
ringbuf1 = to1->engine[ring->id].ringbuf;
@@ -358,7 +381,7 @@ static void execlists_submit_contexts(struct intel_engine_cs *ring,
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1));
WARN_ON(!i915_gem_obj_is_pinned(ringbuf1->obj));
- execlists_update_context(ctx_obj1, ringbuf1->obj, tail1);
+ execlists_update_context(ctx_obj1, ringbuf1->obj, to1->ppgtt, tail1);
}
execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
@@ -371,6 +394,12 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
assert_spin_locked(&ring->execlist_lock);
+ /*
+ * If irqs are not active generate a warning as batches that finish
+ * without the irqs may get lost and a GPU Hang may occur.
+ */
+ WARN_ON(!intel_irqs_enabled(ring->dev->dev_private));
+
if (list_empty(&ring->execlist_queue))
return;
@@ -398,7 +427,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
* WaIdleLiteRestore: make sure we never cause a lite
* restore with HEAD==TAIL
*/
- if (req0 && req0->elsp_submitted) {
+ if (req0->elsp_submitted) {
/*
* Apply the wa NOOPS to prevent ring:HEAD == req:TAIL
* as we resubmit the request. See gen8_emit_request()
@@ -520,8 +549,6 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
struct drm_i915_gem_request *request)
{
struct drm_i915_gem_request *cursor;
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- unsigned long flags;
int num_elements = 0;
if (to != ring->default_context)
@@ -538,7 +565,6 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
request->ring = ring;
request->ctx = to;
kref_init(&request->ref);
- request->uniq = dev_priv->request_uniq++;
i915_gem_context_reference(request->ctx);
} else {
i915_gem_request_reference(request);
@@ -546,9 +572,7 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
}
request->tail = tail;
- intel_runtime_pm_get(dev_priv);
-
- spin_lock_irqsave(&ring->execlist_lock, flags);
+ spin_lock_irq(&ring->execlist_lock);
list_for_each_entry(cursor, &ring->execlist_queue, execlist_link)
if (++num_elements > 2)
@@ -574,7 +598,7 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
if (num_elements == 0)
execlists_context_unqueue(ring);
- spin_unlock_irqrestore(&ring->execlist_lock, flags);
+ spin_unlock_irq(&ring->execlist_lock);
return 0;
}
@@ -604,6 +628,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
struct list_head *vmas)
{
struct intel_engine_cs *ring = ringbuf->ring;
+ const unsigned other_rings = ~intel_ring_flag(ring);
struct i915_vma *vma;
uint32_t flush_domains = 0;
bool flush_chipset = false;
@@ -612,9 +637,11 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
- ret = i915_gem_object_sync(obj, ring);
- if (ret)
- return ret;
+ if (obj->active & other_rings) {
+ ret = i915_gem_object_sync(obj, ring);
+ if (ret)
+ return ret;
+ }
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
flush_chipset |= i915_gem_clflush_object(obj, false);
@@ -631,6 +658,170 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
return logical_ring_invalidate_all_caches(ringbuf, ctx);
}
+int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request,
+ struct intel_context *ctx)
+{
+ int ret;
+
+ if (ctx != request->ring->default_context) {
+ ret = intel_lr_context_pin(request->ring, ctx);
+ if (ret)
+ return ret;
+ }
+
+ request->ringbuf = ctx->engine[request->ring->id].ringbuf;
+ request->ctx = ctx;
+ i915_gem_context_reference(request->ctx);
+
+ return 0;
+}
+
+static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
+ int bytes)
+{
+ struct intel_engine_cs *ring = ringbuf->ring;
+ struct drm_i915_gem_request *request;
+ unsigned space;
+ int ret;
+
+ if (intel_ring_space(ringbuf) >= bytes)
+ return 0;
+
+ list_for_each_entry(request, &ring->request_list, list) {
+ /*
+ * The request queue is per-engine, so can contain requests
+ * from multiple ringbuffers. Here, we must ignore any that
+ * aren't from the ringbuffer we're considering.
+ */
+ if (request->ringbuf != ringbuf)
+ continue;
+
+ /* Would completion of this request free enough space? */
+ space = __intel_ring_space(request->postfix, ringbuf->tail,
+ ringbuf->size);
+ if (space >= bytes)
+ break;
+ }
+
+ if (WARN_ON(&request->list == &ring->request_list))
+ return -ENOSPC;
+
+ ret = i915_wait_request(request);
+ if (ret)
+ return ret;
+
+ ringbuf->space = space;
+ return 0;
+}
+
+/*
+ * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
+ * @ringbuf: Logical Ringbuffer to advance.
+ *
+ * The tail is updated in our logical ringbuffer struct, not in the actual context. What
+ * really happens during submission is that the context and current tail will be placed
+ * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
+ * point, the tail *inside* the context is updated and the ELSP written to.
+ */
+static void
+intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx,
+ struct drm_i915_gem_request *request)
+{
+ struct intel_engine_cs *ring = ringbuf->ring;
+
+ intel_logical_ring_advance(ringbuf);
+
+ if (intel_ring_stopped(ring))
+ return;
+
+ execlists_context_queue(ring, ctx, ringbuf->tail, request);
+}
+
+static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx)
+{
+ uint32_t __iomem *virt;
+ int rem = ringbuf->size - ringbuf->tail;
+
+ if (ringbuf->space < rem) {
+ int ret = logical_ring_wait_for_space(ringbuf, ctx, rem);
+
+ if (ret)
+ return ret;
+ }
+
+ virt = ringbuf->virtual_start + ringbuf->tail;
+ rem /= 4;
+ while (rem--)
+ iowrite32(MI_NOOP, virt++);
+
+ ringbuf->tail = 0;
+ intel_ring_update_space(ringbuf);
+
+ return 0;
+}
+
+static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx, int bytes)
+{
+ int ret;
+
+ if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
+ ret = logical_ring_wrap_buffer(ringbuf, ctx);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ if (unlikely(ringbuf->space < bytes)) {
+ ret = logical_ring_wait_for_space(ringbuf, ctx, bytes);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
+ *
+ * @ringbuf: Logical ringbuffer.
+ * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
+ *
+ * The ringbuffer might not be ready to accept the commands right away (maybe it needs to
+ * be wrapped, or wait a bit for the tail to be updated). This function takes care of that
+ * and also preallocates a request (every workload submission is still mediated through
+ * requests, same as it did with legacy ringbuffer submission).
+ *
+ * Return: non-zero if the ringbuffer is not ready to be written to.
+ */
+static int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
+ struct intel_context *ctx, int num_dwords)
+{
+ struct intel_engine_cs *ring = ringbuf->ring;
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = i915_gem_check_wedge(&dev_priv->gpu_error,
+ dev_priv->mm.interruptible);
+ if (ret)
+ return ret;
+
+ ret = logical_ring_prepare(ringbuf, ctx, num_dwords * sizeof(uint32_t));
+ if (ret)
+ return ret;
+
+ /* Preallocate the olr before touching the ring */
+ ret = i915_gem_request_alloc(ring, ctx);
+ if (ret)
+ return ret;
+
+ ringbuf->space -= num_dwords * sizeof(uint32_t);
+ return 0;
+}
+
/**
* execlists_submission() - submit a batchbuffer for execution, Execlists style
* @dev: DRM device.
@@ -742,8 +933,6 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
void intel_execlists_retire_requests(struct intel_engine_cs *ring)
{
struct drm_i915_gem_request *req, *tmp;
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- unsigned long flags;
struct list_head retired_list;
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
@@ -751,9 +940,9 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
return;
INIT_LIST_HEAD(&retired_list);
- spin_lock_irqsave(&ring->execlist_lock, flags);
+ spin_lock_irq(&ring->execlist_lock);
list_replace_init(&ring->execlist_retired_req_list, &retired_list);
- spin_unlock_irqrestore(&ring->execlist_lock, flags);
+ spin_unlock_irq(&ring->execlist_lock);
list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
struct intel_context *ctx = req->ctx;
@@ -762,7 +951,6 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
if (ctx_obj && (ctx != ring->default_context))
intel_lr_context_unpin(ring, ctx);
- intel_runtime_pm_put(dev_priv);
list_del(&req->execlist_link);
i915_gem_request_unreference(req);
}
@@ -807,30 +995,6 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
return 0;
}
-/*
- * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
- * @ringbuf: Logical Ringbuffer to advance.
- *
- * The tail is updated in our logical ringbuffer struct, not in the actual context. What
- * really happens during submission is that the context and current tail will be placed
- * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
- * point, the tail *inside* the context is updated and the ELSP written to.
- */
-static void
-intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
- struct drm_i915_gem_request *request)
-{
- struct intel_engine_cs *ring = ringbuf->ring;
-
- intel_logical_ring_advance(ringbuf);
-
- if (intel_ring_stopped(ring))
- return;
-
- execlists_context_queue(ring, ctx, ringbuf->tail, request);
-}
-
static int intel_lr_context_pin(struct intel_engine_cs *ring,
struct intel_context *ctx)
{
@@ -875,219 +1039,6 @@ void intel_lr_context_unpin(struct intel_engine_cs *ring,
}
}
-static int logical_ring_alloc_request(struct intel_engine_cs *ring,
- struct intel_context *ctx)
-{
- struct drm_i915_gem_request *request;
- struct drm_i915_private *dev_private = ring->dev->dev_private;
- int ret;
-
- if (ring->outstanding_lazy_request)
- return 0;
-
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return -ENOMEM;
-
- if (ctx != ring->default_context) {
- ret = intel_lr_context_pin(ring, ctx);
- if (ret) {
- kfree(request);
- return ret;
- }
- }
-
- kref_init(&request->ref);
- request->ring = ring;
- request->uniq = dev_private->request_uniq++;
-
- ret = i915_gem_get_seqno(ring->dev, &request->seqno);
- if (ret) {
- intel_lr_context_unpin(ring, ctx);
- kfree(request);
- return ret;
- }
-
- request->ctx = ctx;
- i915_gem_context_reference(request->ctx);
- request->ringbuf = ctx->engine[ring->id].ringbuf;
-
- ring->outstanding_lazy_request = request;
- return 0;
-}
-
-static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
- int bytes)
-{
- struct intel_engine_cs *ring = ringbuf->ring;
- struct drm_i915_gem_request *request;
- int ret;
-
- if (intel_ring_space(ringbuf) >= bytes)
- return 0;
-
- list_for_each_entry(request, &ring->request_list, list) {
- /*
- * The request queue is per-engine, so can contain requests
- * from multiple ringbuffers. Here, we must ignore any that
- * aren't from the ringbuffer we're considering.
- */
- struct intel_context *ctx = request->ctx;
- if (ctx->engine[ring->id].ringbuf != ringbuf)
- continue;
-
- /* Would completion of this request free enough space? */
- if (__intel_ring_space(request->tail, ringbuf->tail,
- ringbuf->size) >= bytes) {
- break;
- }
- }
-
- if (&request->list == &ring->request_list)
- return -ENOSPC;
-
- ret = i915_wait_request(request);
- if (ret)
- return ret;
-
- i915_gem_retire_requests_ring(ring);
-
- return intel_ring_space(ringbuf) >= bytes ? 0 : -ENOSPC;
-}
-
-static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
- int bytes)
-{
- struct intel_engine_cs *ring = ringbuf->ring;
- struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long end;
- int ret;
-
- ret = logical_ring_wait_request(ringbuf, bytes);
- if (ret != -ENOSPC)
- return ret;
-
- /* Force the context submission in case we have been skipping it */
- intel_logical_ring_advance_and_submit(ringbuf, ctx, NULL);
-
- /* With GEM the hangcheck timer should kick us out of the loop,
- * leaving it early runs the risk of corrupting GEM state (due
- * to running on almost untested codepaths). But on resume
- * timers don't work yet, so prevent a complete hang in that
- * case by choosing an insanely large timeout. */
- end = jiffies + 60 * HZ;
-
- ret = 0;
- do {
- if (intel_ring_space(ringbuf) >= bytes)
- break;
-
- msleep(1);
-
- if (dev_priv->mm.interruptible && signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
-
- ret = i915_gem_check_wedge(&dev_priv->gpu_error,
- dev_priv->mm.interruptible);
- if (ret)
- break;
-
- if (time_after(jiffies, end)) {
- ret = -EBUSY;
- break;
- }
- } while (1);
-
- return ret;
-}
-
-static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx)
-{
- uint32_t __iomem *virt;
- int rem = ringbuf->size - ringbuf->tail;
-
- if (ringbuf->space < rem) {
- int ret = logical_ring_wait_for_space(ringbuf, ctx, rem);
-
- if (ret)
- return ret;
- }
-
- virt = ringbuf->virtual_start + ringbuf->tail;
- rem /= 4;
- while (rem--)
- iowrite32(MI_NOOP, virt++);
-
- ringbuf->tail = 0;
- intel_ring_update_space(ringbuf);
-
- return 0;
-}
-
-static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx, int bytes)
-{
- int ret;
-
- if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
- ret = logical_ring_wrap_buffer(ringbuf, ctx);
- if (unlikely(ret))
- return ret;
- }
-
- if (unlikely(ringbuf->space < bytes)) {
- ret = logical_ring_wait_for_space(ringbuf, ctx, bytes);
- if (unlikely(ret))
- return ret;
- }
-
- return 0;
-}
-
-/**
- * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
- *
- * @ringbuf: Logical ringbuffer.
- * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
- *
- * The ringbuffer might not be ready to accept the commands right away (maybe it needs to
- * be wrapped, or wait a bit for the tail to be updated). This function takes care of that
- * and also preallocates a request (every workload submission is still mediated through
- * requests, same as it did with legacy ringbuffer submission).
- *
- * Return: non-zero if the ringbuffer is not ready to be written to.
- */
-int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx, int num_dwords)
-{
- struct intel_engine_cs *ring = ringbuf->ring;
- struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- ret = i915_gem_check_wedge(&dev_priv->gpu_error,
- dev_priv->mm.interruptible);
- if (ret)
- return ret;
-
- ret = logical_ring_prepare(ringbuf, ctx, num_dwords * sizeof(uint32_t));
- if (ret)
- return ret;
-
- /* Preallocate the olr before touching the ring */
- ret = logical_ring_alloc_request(ring, ctx);
- if (ret)
- return ret;
-
- ringbuf->space -= num_dwords * sizeof(uint32_t);
- return 0;
-}
-
static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
struct intel_context *ctx)
{
@@ -1288,6 +1239,7 @@ static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
{
struct intel_engine_cs *ring = ringbuf->ring;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ bool vf_flush_wa;
u32 flags = 0;
int ret;
@@ -1309,10 +1261,26 @@ static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
}
- ret = intel_logical_ring_begin(ringbuf, ctx, 6);
+ /*
+ * On GEN9+ Before VF_CACHE_INVALIDATE we need to emit a NULL pipe
+ * control.
+ */
+ vf_flush_wa = INTEL_INFO(ring->dev)->gen >= 9 &&
+ flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
+
+ ret = intel_logical_ring_begin(ringbuf, ctx, vf_flush_wa ? 12 : 6);
if (ret)
return ret;
+ if (vf_flush_wa) {
+ intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ }
+
intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
intel_logical_ring_emit(ringbuf, flags);
intel_logical_ring_emit(ringbuf, scratch_addr);
@@ -1443,6 +1411,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
ring->cleanup(ring);
i915_cmd_parser_fini_ring(ring);
+ i915_gem_batch_pool_fini(&ring->batch_pool);
if (ring->status_page.obj) {
kunmap(sg_page(ring->status_page.obj->pages->sgl));
@@ -1460,6 +1429,7 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
+ i915_gem_batch_pool_init(dev, &ring->batch_pool);
init_waitqueue_head(&ring->irq_queue);
INIT_LIST_HEAD(&ring->execlist_queue);
@@ -1812,14 +1782,14 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
- reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[3]->daddr);
- reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[3]->daddr);
- reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[2]->daddr);
- reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[2]->daddr);
- reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[1]->daddr);
- reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[1]->daddr);
- reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pdp.page_directory[0]->daddr);
- reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pdp.page_directory[0]->daddr);
+
+ /* With dynamic page allocation, PDPs may not be allocated at this point,
+ * Point the unallocated PDPs to the scratch page
+ */
+ ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
+ ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
+ ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
+ ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
if (ring->id == RCS) {
reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
reg_state[CTX_R_PWR_CLK_STATE] = GEN8_R_PWR_CLK_STATE;
@@ -1936,11 +1906,10 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
context_size = round_up(get_lr_context_size(ring), 4096);
- ctx_obj = i915_gem_alloc_context_obj(dev, context_size);
- if (IS_ERR(ctx_obj)) {
- ret = PTR_ERR(ctx_obj);
- DRM_DEBUG_DRIVER("Alloc LRC backing obj failed: %d\n", ret);
- return ret;
+ ctx_obj = i915_gem_alloc_object(dev, context_size);
+ if (!ctx_obj) {
+ DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
+ return -ENOMEM;
}
if (is_global_default_ctx) {
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index adb731e49c57..04d3a6d8b207 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -36,6 +36,8 @@
#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
/* Logical Rings */
+int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request,
+ struct intel_context *ctx);
void intel_logical_ring_stop(struct intel_engine_cs *ring);
void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
int intel_logical_rings_init(struct drm_device *dev);
@@ -63,9 +65,6 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
ringbuf->tail += 4;
}
-int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
- int num_dwords);
/* Logical Ring Contexts */
void intel_lr_context_free(struct intel_context *ctx);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index fbcc7dff0d63..161ab26f81fb 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -781,7 +781,7 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
child->device_type != DEVICE_TYPE_LFP)
continue;
- if (intel_gmbus_is_port_valid(child->i2c_pin))
+ if (intel_gmbus_is_valid_pin(dev_priv, child->i2c_pin))
*i2c_pin = child->i2c_pin;
/* However, we cannot trust the BIOS writers to populate
@@ -942,7 +942,7 @@ void intel_lvds_init(struct drm_device *dev)
if (dmi_check_system(intel_no_lvds))
return;
- pin = GMBUS_PORT_PANEL;
+ pin = GMBUS_PIN_PANEL;
if (!lvds_is_present_in_vbt(dev, &pin)) {
DRM_DEBUG_KMS("LVDS is not present in VBT\n");
return;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index dd92122ed95c..25c8ec697da1 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -172,10 +172,11 @@ struct intel_overlay {
struct intel_crtc *crtc;
struct drm_i915_gem_object *vid_bo;
struct drm_i915_gem_object *old_vid_bo;
- int active;
- int pfit_active;
+ bool active;
+ bool pfit_active;
u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
- u32 color_key;
+ u32 color_key:24;
+ u32 color_key_enabled:1;
u32 brightness, contrast, saturation;
u32 old_xscale, old_yscale;
/* register access */
@@ -216,7 +217,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
int ret;
- BUG_ON(overlay->last_flip_req);
+ WARN_ON(overlay->last_flip_req);
i915_gem_request_assign(&overlay->last_flip_req,
ring->outstanding_lazy_request);
ret = i915_add_request(ring);
@@ -227,7 +228,6 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
ret = i915_wait_request(overlay->last_flip_req);
if (ret)
return ret;
- i915_gem_retire_requests(dev);
i915_gem_request_assign(&overlay->last_flip_req, NULL);
return 0;
@@ -241,15 +241,15 @@ static int intel_overlay_on(struct intel_overlay *overlay)
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
int ret;
- BUG_ON(overlay->active);
- overlay->active = 1;
-
+ WARN_ON(overlay->active);
WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
+ overlay->active = true;
+
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -270,7 +270,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
u32 tmp;
int ret;
- BUG_ON(!overlay->active);
+ WARN_ON(!overlay->active);
if (load_polyphase_filter)
flip_addr |= OFC_UPDATE;
@@ -309,7 +309,8 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
struct drm_i915_gem_object *obj = overlay->vid_bo;
/* never have the overlay hw on without showing a frame */
- BUG_ON(!overlay->vid_bo);
+ if (WARN_ON(!obj))
+ return;
i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base);
@@ -317,7 +318,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
overlay->crtc->overlay = NULL;
overlay->crtc = NULL;
- overlay->active = 0;
+ overlay->active = false;
}
/* overlay needs to be disabled in OCMD reg */
@@ -329,7 +330,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
u32 flip_addr = overlay->flip_addr;
int ret;
- BUG_ON(!overlay->active);
+ WARN_ON(!overlay->active);
/* According to intel docs the overlay hw may hang (when switching
* off) without loading the filter coeffs. It is however unclear whether
@@ -374,7 +375,6 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
ret = i915_wait_request(overlay->last_flip_req);
if (ret)
return ret;
- i915_gem_retire_requests(overlay->dev);
if (overlay->flip_tail)
overlay->flip_tail(overlay);
@@ -629,31 +629,36 @@ static void update_colorkey(struct intel_overlay *overlay,
struct overlay_registers __iomem *regs)
{
u32 key = overlay->color_key;
+ u32 flags;
+
+ flags = 0;
+ if (overlay->color_key_enabled)
+ flags |= DST_KEY_ENABLE;
switch (overlay->crtc->base.primary->fb->bits_per_pixel) {
case 8:
- iowrite32(0, &regs->DCLRKV);
- iowrite32(CLK_RGB8I_MASK | DST_KEY_ENABLE, &regs->DCLRKM);
+ key = 0;
+ flags |= CLK_RGB8I_MASK;
break;
case 16:
if (overlay->crtc->base.primary->fb->depth == 15) {
- iowrite32(RGB15_TO_COLORKEY(key), &regs->DCLRKV);
- iowrite32(CLK_RGB15_MASK | DST_KEY_ENABLE,
- &regs->DCLRKM);
+ key = RGB15_TO_COLORKEY(key);
+ flags |= CLK_RGB15_MASK;
} else {
- iowrite32(RGB16_TO_COLORKEY(key), &regs->DCLRKV);
- iowrite32(CLK_RGB16_MASK | DST_KEY_ENABLE,
- &regs->DCLRKM);
+ key = RGB16_TO_COLORKEY(key);
+ flags |= CLK_RGB16_MASK;
}
break;
case 24:
case 32:
- iowrite32(key, &regs->DCLRKV);
- iowrite32(CLK_RGB24_MASK | DST_KEY_ENABLE, &regs->DCLRKM);
+ flags |= CLK_RGB24_MASK;
break;
}
+
+ iowrite32(key, &regs->DCLRKV);
+ iowrite32(flags, &regs->DCLRKM);
}
static u32 overlay_cmd_reg(struct put_image_params *params)
@@ -712,9 +717,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
u32 swidth, swidthsw, sheight, ostride;
enum pipe pipe = overlay->crtc->pipe;
- BUG_ON(!mutex_is_locked(&dev->struct_mutex));
- BUG_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
- BUG_ON(!overlay);
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
ret = intel_overlay_release_old_vid(overlay);
if (ret != 0)
@@ -824,8 +828,8 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
struct drm_device *dev = overlay->dev;
int ret;
- BUG_ON(!mutex_is_locked(&dev->struct_mutex));
- BUG_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
ret = intel_overlay_recover_from_interrupt(overlay);
if (ret != 0)
@@ -1131,10 +1135,10 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
/* line too wide, i.e. one-line-mode */
if (mode->hdisplay > 1024 &&
intel_panel_fitter_pipe(dev) == crtc->pipe) {
- overlay->pfit_active = 1;
+ overlay->pfit_active = true;
update_pfit_vscale_ratio(overlay);
} else
- overlay->pfit_active = 0;
+ overlay->pfit_active = false;
}
ret = check_overlay_dst(overlay, put_image_rec);
@@ -1329,6 +1333,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
I915_WRITE(OGAMC5, attrs->gamma5);
}
}
+ overlay->color_key_enabled = (attrs->flags & I915_OVERLAY_DISABLE_DEST_COLORKEY) == 0;
ret = 0;
out_unlock:
@@ -1392,6 +1397,7 @@ void intel_setup_overlay(struct drm_device *dev)
/* init all values */
overlay->color_key = 0x0101fe;
+ overlay->color_key_enabled = true;
overlay->brightness = -19;
overlay->contrast = 75;
overlay->saturation = 146;
@@ -1432,7 +1438,7 @@ void intel_cleanup_overlay(struct drm_device *dev)
/* The bo's should be free'd by the generic code already.
* Furthermore modesetting teardown happens beforehand so the
* hardware should be off already */
- BUG_ON(dev_priv->overlay->active);
+ WARN_ON(dev_priv->overlay->active);
drm_gem_object_unreference_unlocked(&dev_priv->overlay->reg_bo->base);
kfree(dev_priv->overlay);
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 08532d4ffe0a..7d83527f95f7 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -536,6 +536,14 @@ static u32 vlv_get_backlight(struct intel_connector *connector)
return _vlv_get_backlight(dev, pipe);
}
+static u32 bxt_get_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(BXT_BLC_PWM_DUTY1);
+}
+
static u32 intel_panel_get_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
@@ -616,6 +624,14 @@ static void vlv_set_backlight(struct intel_connector *connector, u32 level)
I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level);
}
+static void bxt_set_backlight(struct intel_connector *connector, u32 level)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(BXT_BLC_PWM_DUTY1, level);
+}
+
static void
intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
{
@@ -741,6 +757,18 @@ static void vlv_disable_backlight(struct intel_connector *connector)
I915_WRITE(VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE);
}
+static void bxt_disable_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp;
+
+ intel_panel_actually_set_backlight(connector, 0);
+
+ tmp = I915_READ(BXT_BLC_PWM_CTL1);
+ I915_WRITE(BXT_BLC_PWM_CTL1, tmp & ~BXT_BLC_PWM_ENABLE);
+}
+
void intel_panel_disable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
@@ -947,6 +975,33 @@ static void vlv_enable_backlight(struct intel_connector *connector)
I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE);
}
+static void bxt_enable_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ u32 pwm_ctl;
+
+ pwm_ctl = I915_READ(BXT_BLC_PWM_CTL1);
+ if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
+ DRM_DEBUG_KMS("backlight already enabled\n");
+ pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
+ I915_WRITE(BXT_BLC_PWM_CTL1, pwm_ctl);
+ }
+
+ I915_WRITE(BXT_BLC_PWM_FREQ1, panel->backlight.max);
+
+ intel_panel_actually_set_backlight(connector, panel->backlight.level);
+
+ pwm_ctl = 0;
+ if (panel->backlight.active_low_pwm)
+ pwm_ctl |= BXT_BLC_PWM_POLARITY;
+
+ I915_WRITE(BXT_BLC_PWM_CTL1, pwm_ctl);
+ POSTING_READ(BXT_BLC_PWM_CTL1);
+ I915_WRITE(BXT_BLC_PWM_CTL1, pwm_ctl | BXT_BLC_PWM_ENABLE);
+}
+
void intel_panel_enable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
@@ -1299,6 +1354,30 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
return 0;
}
+static int
+bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ u32 pwm_ctl, val;
+
+ pwm_ctl = I915_READ(BXT_BLC_PWM_CTL1);
+ panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
+
+ panel->backlight.max = I915_READ(BXT_BLC_PWM_FREQ1);
+ if (!panel->backlight.max)
+ return -ENODEV;
+
+ val = bxt_get_backlight(connector);
+ panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+ panel->backlight.enabled = (pwm_ctl & BXT_BLC_PWM_ENABLE) &&
+ panel->backlight.level != 0;
+
+ return 0;
+}
+
int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
{
struct drm_device *dev = connector->dev;
@@ -1350,7 +1429,13 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9)) {
+ if (IS_BROXTON(dev)) {
+ dev_priv->display.setup_backlight = bxt_setup_backlight;
+ dev_priv->display.enable_backlight = bxt_enable_backlight;
+ dev_priv->display.disable_backlight = bxt_disable_backlight;
+ dev_priv->display.set_backlight = bxt_set_backlight;
+ dev_priv->display.get_backlight = bxt_get_backlight;
+ } else if (IS_BROADWELL(dev) || IS_SKYLAKE(dev)) {
dev_priv->display.setup_backlight = bdw_setup_backlight;
dev_priv->display.enable_backlight = bdw_enable_backlight;
dev_priv->display.disable_backlight = pch_disable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 555b896d2bda..eadc15cddbeb 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -67,7 +67,7 @@ static void skl_init_clock_gating(struct drm_device *dev)
gen9_init_clock_gating(dev);
- if (INTEL_REVID(dev) == SKL_REVID_A0) {
+ if (INTEL_REVID(dev) <= SKL_REVID_B0) {
/*
* WaDisableSDEUnitClockGating:skl
* WaSetGAPSunitClckGateDisable:skl
@@ -75,6 +75,10 @@ static void skl_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_GAPSUNIT_CLOCK_GATE_DISABLE |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaDisableVFUnitClockGating:skl */
+ I915_WRITE(GEN6_UCGCTL2, I915_READ(GEN6_UCGCTL2) |
+ GEN6_VFUNIT_CLOCK_GATE_DISABLE);
}
if (INTEL_REVID(dev) <= SKL_REVID_D0) {
@@ -84,8 +88,7 @@ static void skl_init_clock_gating(struct drm_device *dev)
/* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
I915_WRITE(FF_SLICE_CS_CHICKEN2,
- I915_READ(FF_SLICE_CS_CHICKEN2) |
- GEN9_TSG_BARRIER_ACK_DISABLE);
+ _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
}
if (INTEL_REVID(dev) <= SKL_REVID_E0)
@@ -94,6 +97,26 @@ static void skl_init_clock_gating(struct drm_device *dev)
GEN8_LQSC_RO_PERF_DIS);
}
+static void bxt_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ gen9_init_clock_gating(dev);
+
+ /*
+ * FIXME:
+ * GEN8_SDEUNIT_CLOCK_GATE_DISABLE applies on A0 only.
+ * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
+ */
+ /* WaDisableSDEUnitClockGating:bxt */
+ I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ GEN8_SDEUNIT_CLOCK_GATE_DISABLE |
+ GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
+
+ /* FIXME: apply on A0 only */
+ I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
+}
+
static void i915_pineview_get_mem_freq(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1792,7 +1815,7 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
mode->crtc_clock);
ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
- intel_ddi_get_cdclk_freq(dev_priv));
+ dev_priv->display.get_display_clock_speed(dev_priv->dev));
return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
PIPE_WM_LINETIME_TIME(linetime);
@@ -1923,7 +1946,7 @@ static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
int ilk_wm_max_level(const struct drm_device *dev)
{
/* how many WM levels are we expecting */
- if (IS_GEN9(dev))
+ if (INTEL_INFO(dev)->gen >= 9)
return 7;
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
return 4;
@@ -2536,6 +2559,7 @@ static bool ilk_disable_lp_wm(struct drm_device *dev)
*/
#define SKL_DDB_SIZE 896 /* in blocks */
+#define BXT_DDB_SIZE 512
static void
skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
@@ -2554,7 +2578,10 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
return;
}
- ddb_size = SKL_DDB_SIZE;
+ if (IS_BROXTON(dev))
+ ddb_size = BXT_DDB_SIZE;
+ else
+ ddb_size = SKL_DDB_SIZE;
ddb_size -= 4; /* 4 blocks for bypass path allocation */
@@ -2610,8 +2637,18 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
}
static unsigned int
-skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p)
+skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p, int y)
{
+
+ /* for planar format */
+ if (p->y_bytes_per_pixel) {
+ if (y) /* y-plane data rate */
+ return p->horiz_pixels * p->vert_pixels * p->y_bytes_per_pixel;
+ else /* uv-plane data rate */
+ return (p->horiz_pixels/2) * (p->vert_pixels/2) * p->bytes_per_pixel;
+ }
+
+ /* for packed formats */
return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel;
}
@@ -2634,7 +2671,10 @@ skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc,
if (!p->enabled)
continue;
- total_data_rate += skl_plane_relative_data_rate(p);
+ total_data_rate += skl_plane_relative_data_rate(p, 0); /* packed/uv */
+ if (p->y_bytes_per_pixel) {
+ total_data_rate += skl_plane_relative_data_rate(p, 1); /* y-plane */
+ }
}
return total_data_rate;
@@ -2653,6 +2693,7 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
uint16_t alloc_size, start, cursor_blocks;
uint16_t minimum[I915_MAX_PLANES];
+ uint16_t y_minimum[I915_MAX_PLANES];
unsigned int total_data_rate;
int plane;
@@ -2681,6 +2722,8 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
minimum[plane] = 8;
alloc_size -= minimum[plane];
+ y_minimum[plane] = p->y_bytes_per_pixel ? 8 : 0;
+ alloc_size -= y_minimum[plane];
}
/*
@@ -2694,16 +2737,17 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
start = alloc->start;
for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
const struct intel_plane_wm_parameters *p;
- unsigned int data_rate;
- uint16_t plane_blocks;
+ unsigned int data_rate, y_data_rate;
+ uint16_t plane_blocks, y_plane_blocks = 0;
p = &params->plane[plane];
if (!p->enabled)
continue;
- data_rate = skl_plane_relative_data_rate(p);
+ data_rate = skl_plane_relative_data_rate(p, 0);
/*
+ * allocation for (packed formats) or (uv-plane part of planar format):
* promote the expression to 64 bits to avoid overflowing, the
* result is < available as data_rate / total_data_rate < 1
*/
@@ -2715,6 +2759,22 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
ddb->plane[pipe][plane].end = start + plane_blocks;
start += plane_blocks;
+
+ /*
+ * allocation for y_plane part of planar format:
+ */
+ if (p->y_bytes_per_pixel) {
+ y_data_rate = skl_plane_relative_data_rate(p, 1);
+ y_plane_blocks = y_minimum[plane];
+ y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
+ total_data_rate);
+
+ ddb->y_plane[pipe][plane].start = start;
+ ddb->y_plane[pipe][plane].end = start + y_plane_blocks;
+
+ start += y_plane_blocks;
+ }
+
}
}
@@ -2827,13 +2887,18 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config);
fb = crtc->primary->state->fb;
+ /* For planar: Bpp is for uv plane, y_Bpp is for y plane */
if (fb) {
p->plane[0].enabled = true;
- p->plane[0].bytes_per_pixel = fb->bits_per_pixel / 8;
+ p->plane[0].bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
+ drm_format_plane_cpp(fb->pixel_format, 1) : fb->bits_per_pixel / 8;
+ p->plane[0].y_bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
+ drm_format_plane_cpp(fb->pixel_format, 0) : 0;
p->plane[0].tiling = fb->modifier[0];
} else {
p->plane[0].enabled = false;
p->plane[0].bytes_per_pixel = 0;
+ p->plane[0].y_bytes_per_pixel = 0;
p->plane[0].tiling = DRM_FORMAT_MOD_NONE;
}
p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w;
@@ -2841,6 +2906,7 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
p->plane[0].rotation = crtc->primary->state->rotation;
fb = crtc->cursor->state->fb;
+ p->cursor.y_bytes_per_pixel = 0;
if (fb) {
p->cursor.enabled = true;
p->cursor.bytes_per_pixel = fb->bits_per_pixel / 8;
@@ -2876,22 +2942,25 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
uint32_t plane_bytes_per_line, plane_blocks_per_line;
uint32_t res_blocks, res_lines;
uint32_t selected_result;
+ uint8_t bytes_per_pixel;
if (latency == 0 || !p->active || !p_params->enabled)
return false;
+ bytes_per_pixel = p_params->y_bytes_per_pixel ?
+ p_params->y_bytes_per_pixel :
+ p_params->bytes_per_pixel;
method1 = skl_wm_method1(p->pixel_rate,
- p_params->bytes_per_pixel,
+ bytes_per_pixel,
latency);
method2 = skl_wm_method2(p->pixel_rate,
p->pipe_htotal,
p_params->horiz_pixels,
- p_params->bytes_per_pixel,
+ bytes_per_pixel,
p_params->tiling,
latency);
- plane_bytes_per_line = p_params->horiz_pixels *
- p_params->bytes_per_pixel;
+ plane_bytes_per_line = p_params->horiz_pixels * bytes_per_pixel;
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
if (p_params->tiling == I915_FORMAT_MOD_Y_TILED ||
@@ -3108,10 +3177,14 @@ static void skl_write_wm_values(struct drm_i915_private *dev_priv,
new->plane_trans[pipe][i]);
I915_WRITE(CUR_WM_TRANS(pipe), new->cursor_trans[pipe]);
- for (i = 0; i < intel_num_planes(crtc); i++)
+ for (i = 0; i < intel_num_planes(crtc); i++) {
skl_ddb_entry_write(dev_priv,
PLANE_BUF_CFG(pipe, i),
&new->ddb.plane[pipe][i]);
+ skl_ddb_entry_write(dev_priv,
+ PLANE_NV12_BUF_CFG(pipe, i),
+ &new->ddb.y_plane[pipe][i]);
+ }
skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
&new->ddb.cursor[pipe]);
@@ -3176,7 +3249,7 @@ static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
{
struct drm_device *dev = dev_priv->dev;
struct skl_ddb_allocation *cur_ddb, *new_ddb;
- bool reallocated[I915_MAX_PIPES] = {false, false, false};
+ bool reallocated[I915_MAX_PIPES] = {};
struct intel_crtc *crtc;
enum pipe pipe;
@@ -3269,6 +3342,7 @@ static bool skl_update_pipe_wm(struct drm_crtc *crtc,
return false;
intel_crtc->wm.skl_active = *pipe_wm;
+
return true;
}
@@ -3362,8 +3436,16 @@ skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
intel_plane->wm.scaled = scaled;
intel_plane->wm.horiz_pixels = sprite_width;
intel_plane->wm.vert_pixels = sprite_height;
- intel_plane->wm.bytes_per_pixel = pixel_size;
intel_plane->wm.tiling = DRM_FORMAT_MOD_NONE;
+
+ /* For planar: Bpp is for UV plane, y_Bpp is for Y plane */
+ intel_plane->wm.bytes_per_pixel =
+ (fb && fb->pixel_format == DRM_FORMAT_NV12) ?
+ drm_format_plane_cpp(plane->state->fb->pixel_format, 1) : pixel_size;
+ intel_plane->wm.y_bytes_per_pixel =
+ (fb && fb->pixel_format == DRM_FORMAT_NV12) ?
+ drm_format_plane_cpp(plane->state->fb->pixel_format, 0) : 0;
+
/*
* Framebuffer can be NULL on plane disable, but it does not
* matter for watermarks if we assume no tiling in that case.
@@ -3928,6 +4010,8 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
GEN6_RP_DOWN_IDLE_AVG);
dev_priv->rps.power = new_power;
+ dev_priv->rps.up_threshold = threshold_up;
+ dev_priv->rps.down_threshold = threshold_down;
dev_priv->rps.last_adj = 0;
}
@@ -3999,8 +4083,11 @@ static void valleyview_set_rps(struct drm_device *dev, u8 val)
"Odd GPU freq value\n"))
val &= ~1;
- if (val != dev_priv->rps.cur_freq)
+ if (val != dev_priv->rps.cur_freq) {
vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
+ if (!IS_CHERRYVIEW(dev_priv))
+ gen6_set_rps_thresholds(dev_priv, val);
+ }
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
@@ -4008,50 +4095,25 @@ static void valleyview_set_rps(struct drm_device *dev, u8 val)
trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
}
-/* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
+/* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
*
* * If Gfx is Idle, then
- * 1. Mask Turbo interrupts
- * 2. Bring up Gfx clock
- * 3. Change the freq to Rpn and wait till P-Unit updates freq
- * 4. Clear the Force GFX CLK ON bit so that Gfx can down
- * 5. Unmask Turbo interrupts
+ * 1. Forcewake Media well.
+ * 2. Request idle freq.
+ * 3. Release Forcewake of Media well.
*/
static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
u32 val = dev_priv->rps.idle_freq;
- /* CHV and latest VLV don't need to force the gfx clock */
- if (IS_CHERRYVIEW(dev) || dev->pdev->revision >= 0xd) {
- valleyview_set_rps(dev_priv->dev, val);
- return;
- }
-
- /*
- * When we are idle. Drop to min voltage state.
- */
-
if (dev_priv->rps.cur_freq <= val)
return;
- /* Mask turbo interrupt so that they will not come in between */
- I915_WRITE(GEN6_PMINTRMSK,
- gen6_sanitize_rps_pm_mask(dev_priv, ~0));
-
- vlv_force_gfx_clock(dev_priv, true);
-
- dev_priv->rps.cur_freq = val;
-
- vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
-
- if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
- & GENFREQSTATUS) == 0, 100))
- DRM_ERROR("timed out waiting for Punit\n");
-
- vlv_force_gfx_clock(dev_priv, false);
-
- I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
+ /* Wake up the media well, as that takes a lot less
+ * power than the Render well. */
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
+ valleyview_set_rps(dev_priv->dev, val);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
}
void gen6_rps_busy(struct drm_i915_private *dev_priv)
@@ -4080,21 +4142,47 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
}
mutex_unlock(&dev_priv->rps.hw_lock);
+
+ spin_lock(&dev_priv->rps.client_lock);
+ while (!list_empty(&dev_priv->rps.clients))
+ list_del_init(dev_priv->rps.clients.next);
+ spin_unlock(&dev_priv->rps.client_lock);
}
-void gen6_rps_boost(struct drm_i915_private *dev_priv)
+void gen6_rps_boost(struct drm_i915_private *dev_priv,
+ struct intel_rps_client *rps,
+ unsigned long submitted)
{
- u32 val;
+ /* This is intentionally racy! We peek at the state here, then
+ * validate inside the RPS worker.
+ */
+ if (!(dev_priv->mm.busy &&
+ dev_priv->rps.enabled &&
+ dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit))
+ return;
- mutex_lock(&dev_priv->rps.hw_lock);
- val = dev_priv->rps.max_freq_softlimit;
- if (dev_priv->rps.enabled &&
- dev_priv->mm.busy &&
- dev_priv->rps.cur_freq < val) {
- intel_set_rps(dev_priv->dev, val);
- dev_priv->rps.last_adj = 0;
+ /* Force a RPS boost (and don't count it against the client) if
+ * the GPU is severely congested.
+ */
+ if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES))
+ rps = NULL;
+
+ spin_lock(&dev_priv->rps.client_lock);
+ if (rps == NULL || list_empty(&rps->link)) {
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->rps.interrupts_enabled) {
+ dev_priv->rps.client_boost = true;
+ queue_work(dev_priv->wq, &dev_priv->rps.work);
+ }
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ if (rps != NULL) {
+ list_add(&rps->link, &dev_priv->rps.clients);
+ rps->boosts++;
+ } else
+ dev_priv->rps.boosts++;
}
- mutex_unlock(&dev_priv->rps.hw_lock);
+ spin_unlock(&dev_priv->rps.client_lock);
}
void intel_set_rps(struct drm_device *dev, u8 val)
@@ -4248,8 +4336,8 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
if (dev_priv->rps.min_freq_softlimit == 0) {
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
dev_priv->rps.min_freq_softlimit =
- /* max(RPe, 450 MHz) */
- max(dev_priv->rps.efficient_freq, (u8) 9);
+ max_t(int, dev_priv->rps.efficient_freq,
+ intel_freq_opcode(dev_priv, 450));
else
dev_priv->rps.min_freq_softlimit =
dev_priv->rps.min_freq;
@@ -4323,8 +4411,13 @@ static void gen9_enable_rc6(struct drm_device *dev)
GEN6_RC_CTL_EI_MODE(1) |
rc6_mask);
- /* 3b: Enable Coarse Power Gating only when RC6 is enabled */
- I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? 3 : 0);
+ /*
+ * 3b: Enable Coarse Power Gating only when RC6 is enabled.
+ * WaDisableRenderPowerGating:skl,bxt - Render PG need to be disabled with RC6.
+ */
+ I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
+ GEN9_MEDIA_PG_ENABLE : 0);
+
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -4663,24 +4756,6 @@ static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
return rp1;
}
-static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
- u32 val, rpn;
-
- if (dev->pdev->revision >= 0x20) {
- val = vlv_punit_read(dev_priv, FB_GFX_FMIN_AT_VMIN_FUSE);
- rpn = ((val >> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT) &
- FB_GFX_FREQ_FUSE_MASK);
- } else { /* For pre-production hardware */
- val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
- rpn = ((val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) &
- PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK);
- }
-
- return rpn;
-}
-
static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
{
u32 val, rp1;
@@ -4887,9 +4962,9 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
mutex_lock(&dev_priv->rps.hw_lock);
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
switch ((val >> 2) & 0x7) {
case 0:
@@ -4932,7 +5007,8 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
dev_priv->rps.rp1_freq);
- dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
+ /* PUnit validated range is only [RPe, RP0] */
+ dev_priv->rps.min_freq = dev_priv->rps.efficient_freq;
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
dev_priv->rps.min_freq);
@@ -4994,8 +5070,8 @@ static void cherryview_enable_rps(struct drm_device *dev)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
- /* TO threshold set to 1750 us ( 0x557 * 1.28 us) */
- I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
+ /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
+ I915_WRITE(GEN6_RC6_THRESHOLD, 0x186);
/* allows RC6 residency counter to work */
I915_WRITE(VLV_COUNTER_CONTROL,
@@ -5030,6 +5106,12 @@ static void cherryview_enable_rps(struct drm_device *dev)
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
+ /* Setting Fixed Bias */
+ val = VLV_OVERRIDE_EN |
+ VLV_SOC_TDP_EN |
+ CHV_BIAS_CPU_50_SOC_50;
+ vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
+
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
/* RPS code assumes GPLL is used */
@@ -5114,6 +5196,12 @@ static void valleyview_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
+ /* Setting Fixed Bias */
+ val = VLV_OVERRIDE_EN |
+ VLV_SOC_TDP_EN |
+ VLV_BIAS_CPU_125_SOC_875;
+ vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
+
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
/* RPS code assumes GPLL is used */
@@ -5796,13 +5884,15 @@ static void ibx_init_clock_gating(struct drm_device *dev)
static void g4x_disable_trickle_feed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe;
+ enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
- intel_flush_primary_plane(dev_priv, pipe);
+
+ I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe)));
+ POSTING_READ(DSPSURF(pipe));
}
}
@@ -6092,10 +6182,9 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
+ uint32_t misccpctl;
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
+ ilk_init_lp_watermarks(dev);
/* WaSwitchSolVfFArbitrationPriority:bdw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@ -6124,6 +6213,22 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+ /*
+ * WaProgramL3SqcReg1Default:bdw
+ * WaTempDisableDOPClkGating:bdw
+ */
+ misccpctl = I915_READ(GEN7_MISCCPCTL);
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+ I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+
+ /*
+ * WaGttCachingOffByDefault:bdw
+ * GTT cache may not work with big pages, so if those
+ * are ever enabled GTT cache may need to be disabled.
+ */
+ I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
+
lpt_init_clock_gating(dev);
}
@@ -6399,6 +6504,12 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
/* WaDisableSDEUnitClockGating:chv */
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+
+ /*
+ * GTT cache may not work with big pages, so if those
+ * are ever enabled GTT cache may need to be disabled.
+ */
+ I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
}
static void g4x_init_clock_gating(struct drm_device *dev)
@@ -6542,7 +6653,12 @@ void intel_init_pm(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 9) {
skl_setup_wm_latency(dev);
- dev_priv->display.init_clock_gating = skl_init_clock_gating;
+ if (IS_BROXTON(dev))
+ dev_priv->display.init_clock_gating =
+ bxt_init_clock_gating;
+ else if (IS_SKYLAKE(dev))
+ dev_priv->display.init_clock_gating =
+ skl_init_clock_gating;
dev_priv->display.update_wm = skl_update_wm;
dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
} else if (HAS_PCH_SPLIT(dev)) {
@@ -6760,14 +6876,58 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
return val / GT_FREQUENCY_MULTIPLIER;
}
+struct request_boost {
+ struct work_struct work;
+ struct drm_i915_gem_request *req;
+};
+
+static void __intel_rps_boost_work(struct work_struct *work)
+{
+ struct request_boost *boost = container_of(work, struct request_boost, work);
+ struct drm_i915_gem_request *req = boost->req;
+
+ if (!i915_gem_request_completed(req, true))
+ gen6_rps_boost(to_i915(req->ring->dev), NULL,
+ req->emitted_jiffies);
+
+ i915_gem_request_unreference__unlocked(req);
+ kfree(boost);
+}
+
+void intel_queue_rps_boost_for_request(struct drm_device *dev,
+ struct drm_i915_gem_request *req)
+{
+ struct request_boost *boost;
+
+ if (req == NULL || INTEL_INFO(dev)->gen < 6)
+ return;
+
+ if (i915_gem_request_completed(req, true))
+ return;
+
+ boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
+ if (boost == NULL)
+ return;
+
+ i915_gem_request_reference(req);
+ boost->req = req;
+
+ INIT_WORK(&boost->work, __intel_rps_boost_work);
+ queue_work(to_i915(dev)->wq, &boost->work);
+}
+
void intel_pm_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
mutex_init(&dev_priv->rps.hw_lock);
+ spin_lock_init(&dev_priv->rps.client_lock);
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
intel_gen6_powersave_work);
+ INIT_LIST_HEAD(&dev_priv->rps.clients);
+ INIT_LIST_HEAD(&dev_priv->rps.semaphores.link);
+ INIT_LIST_HEAD(&dev_priv->rps.mmioflips.link);
dev_priv->pm.suspended = false;
}
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index a8f9348259ae..5ee0fa57ed19 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -117,6 +117,19 @@ static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
I915_WRITE(VLV_VSCSDP(pipe), val);
}
+static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp)
+{
+ struct edp_vsc_psr psr_vsc;
+
+ /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
+ memset(&psr_vsc, 0, sizeof(psr_vsc));
+ psr_vsc.sdp_header.HB0 = 0;
+ psr_vsc.sdp_header.HB1 = 0x7;
+ psr_vsc.sdp_header.HB2 = 0x3;
+ psr_vsc.sdp_header.HB3 = 0xb;
+ intel_psr_write_vsc(intel_dp, &psr_vsc);
+}
+
static void hsw_psr_setup_vsc(struct intel_dp *intel_dp)
{
struct edp_vsc_psr psr_vsc;
@@ -133,7 +146,7 @@ static void hsw_psr_setup_vsc(struct intel_dp *intel_dp)
static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
{
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE);
+ DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
}
static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
@@ -157,13 +170,14 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
- /* Enable PSR in sink */
- if (dev_priv->psr.link_standby)
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
- else
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
+
+ /* Enable AUX frame sync at sink */
+ if (dev_priv->psr.aux_frame_sync)
+ drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
+ DP_AUX_FRAME_SYNC_ENABLE);
aux_data_reg = (INTEL_INFO(dev)->gen >= 9) ?
DPA_AUX_CH_DATA1 : EDP_PSR_AUX_DATA1(dev);
@@ -183,8 +197,10 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
val |= DP_AUX_CH_CTL_TIME_OUT_1600us;
val &= ~DP_AUX_CH_CTL_MESSAGE_SIZE_MASK;
val |= (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
- /* Use hardcoded data values for PSR */
+ /* Use hardcoded data values for PSR, frame sync and GTC */
val &= ~DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL;
+ val &= ~DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL;
+ val &= ~DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL;
I915_WRITE(aux_ctl_reg, val);
} else {
I915_WRITE(aux_ctl_reg,
@@ -193,6 +209,8 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
}
+
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, DP_PSR_ENABLE);
}
static void vlv_psr_enable_source(struct intel_dp *intel_dp)
@@ -232,6 +250,7 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+
uint32_t max_sleep_time = 0x1f;
/* Lately it was identified that depending on panel idle frame count
* calculated at HW can be off by 1. So let's use what came
@@ -242,19 +261,25 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
uint32_t val = 0x0;
const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
- if (dev_priv->psr.link_standby) {
- val |= EDP_PSR_LINK_STANDBY;
+ if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
+ /* It doesn't mean we shouldn't send TPS patters, so let's
+ send the minimal TP1 possible and skip TP2. */
+ val |= EDP_PSR_TP1_TIME_100us;
val |= EDP_PSR_TP2_TP3_TIME_0us;
- val |= EDP_PSR_TP1_TIME_0us;
val |= EDP_PSR_SKIP_AUX_EXIT;
- } else
- val |= EDP_PSR_LINK_DISABLE;
+ /* Sink should be able to train with the 5 or 6 idle patterns */
+ idle_frames += 4;
+ }
I915_WRITE(EDP_PSR_CTL(dev), val |
(IS_BROADWELL(dev) ? 0 : link_entry_time) |
max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
EDP_PSR_ENABLE);
+
+ if (dev_priv->psr.psr2_support)
+ I915_WRITE(EDP_PSR2_CTL, EDP_PSR2_ENABLE |
+ EDP_SU_TRACK_ENABLE | EDP_PSR2_TP2_TIME_100);
}
static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
@@ -294,6 +319,12 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
return false;
}
+ if (!IS_VALLEYVIEW(dev) && ((dev_priv->vbt.psr.full_link) ||
+ (dig_port->port != PORT_A))) {
+ DRM_DEBUG_KMS("PSR condition failed: Link Standby requested/needed but not supported on this platform\n");
+ return false;
+ }
+
dev_priv->psr.source_ok = true;
return true;
}
@@ -332,6 +363,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
if (!HAS_PSR(dev)) {
DRM_DEBUG_KMS("PSR not supported on this platform\n");
@@ -352,18 +384,20 @@ void intel_psr_enable(struct intel_dp *intel_dp)
if (!intel_psr_match_conditions(intel_dp))
goto unlock;
- /* First we check VBT, but we must respect sink and source
- * known restrictions */
- dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
- if ((intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) ||
- (IS_BROADWELL(dev) && intel_dig_port->port != PORT_A))
- dev_priv->psr.link_standby = true;
-
dev_priv->psr.busy_frontbuffer_bits = 0;
if (HAS_DDI(dev)) {
hsw_psr_setup_vsc(intel_dp);
+ if (dev_priv->psr.psr2_support) {
+ /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
+ if (crtc->config->pipe_src_w > 3200 ||
+ crtc->config->pipe_src_h > 2000)
+ dev_priv->psr.psr2_support = false;
+ else
+ skl_psr_setup_su_vsc(intel_dp);
+ }
+
/* Avoid continuous PSR exit by masking memup and hpd */
I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
@@ -560,6 +594,48 @@ static void intel_psr_exit(struct drm_device *dev)
}
/**
+ * intel_psr_single_frame_update - Single Frame Update
+ * @dev: DRM device
+ *
+ * Some platforms support a single frame update feature that is used to
+ * send and update only one frame on Remote Frame Buffer.
+ * So far it is only implemented for Valleyview and Cherryview because
+ * hardware requires this to be done before a page flip.
+ */
+void intel_psr_single_frame_update(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ enum pipe pipe;
+ u32 val;
+
+ /*
+ * Single frame update is already supported on BDW+ but it requires
+ * many W/A and it isn't really needed.
+ */
+ if (!IS_VALLEYVIEW(dev))
+ return;
+
+ mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.enabled) {
+ mutex_unlock(&dev_priv->psr.lock);
+ return;
+ }
+
+ crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
+ pipe = to_intel_crtc(crtc)->pipe;
+ val = I915_READ(VLV_PSRCTL(pipe));
+
+ /*
+ * We need to set this bit before writing registers for a flip.
+ * This bit will be self-clear when it gets to the PSR active state.
+ */
+ I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
+
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+/**
* intel_psr_invalidate - Invalidade PSR
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 005b5e04de4d..3817a6f00d9e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -908,57 +908,63 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
- /* WaDisablePartialInstShootdown:skl */
+ /* WaDisablePartialInstShootdown:skl,bxt */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
- /* Syncing dependencies between camera and graphics */
+ /* Syncing dependencies between camera and graphics:skl,bxt */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
- if (INTEL_REVID(dev) == SKL_REVID_A0 ||
- INTEL_REVID(dev) == SKL_REVID_B0) {
- /* WaDisableDgMirrorFixInHalfSliceChicken5:skl */
+ if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) == SKL_REVID_A0 ||
+ INTEL_REVID(dev) == SKL_REVID_B0)) ||
+ (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) {
+ /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_DG_MIRROR_FIX_ENABLE);
}
- if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) {
- /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl */
+ if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
+ (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) {
+ /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
GEN9_RHWO_OPTIMIZATION_DISABLE);
WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN0,
DISABLE_PIXEL_MASK_CAMMING);
}
- if (INTEL_REVID(dev) >= SKL_REVID_C0) {
- /* WaEnableYV12BugFixInHalfSliceChicken7:skl */
+ if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) >= SKL_REVID_C0) ||
+ IS_BROXTON(dev)) {
+ /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
GEN9_ENABLE_YV12_BUGFIX);
}
- if (INTEL_REVID(dev) <= SKL_REVID_D0) {
- /*
- *Use Force Non-Coherent whenever executing a 3D context. This
- * is a workaround for a possible hang in the unlikely event
- * a TLB invalidation occurs during a PSD flush.
- */
- /* WaForceEnableNonCoherent:skl */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FORCE_NON_COHERENT);
- }
-
- /* Wa4x4STCOptimizationDisable:skl */
+ /* Wa4x4STCOptimizationDisable:skl,bxt */
WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
- /* WaDisablePartialResolveInVc:skl */
+ /* WaDisablePartialResolveInVc:skl,bxt */
WA_SET_BIT_MASKED(CACHE_MODE_1, GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
- /* WaCcsTlbPrefetchDisable:skl */
+ /* WaCcsTlbPrefetchDisable:skl,bxt */
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_CCS_TLB_PREFETCH_ENABLE);
+ /* WaDisableMaskBasedCammingInRCC:skl,bxt */
+ if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_C0) ||
+ (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0))
+ WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
+ PIXEL_MASK_CAMMING_DISABLE);
+
+ /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
+ tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
+ if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) == SKL_REVID_F0) ||
+ (IS_BROXTON(dev) && INTEL_REVID(dev) >= BXT_REVID_B0))
+ tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
+ WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
+
return 0;
}
@@ -1024,9 +1030,41 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
HDC_FENCE_DEST_SLM_DISABLE |
HDC_BARRIER_PERFORMANCE_DISABLE);
+ if (INTEL_REVID(dev) <= SKL_REVID_D0) {
+ /*
+ *Use Force Non-Coherent whenever executing a 3D context. This
+ * is a workaround for a possible hang in the unlikely event
+ * a TLB invalidation occurs during a PSD flush.
+ */
+ /* WaForceEnableNonCoherent:skl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_NON_COHERENT);
+ }
+
return skl_tune_iz_hashing(ring);
}
+static int bxt_init_workarounds(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ gen9_init_workarounds(ring);
+
+ /* WaDisableThreadStallDopClockGating:bxt */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+ STALL_DOP_GATING_DISABLE);
+
+ /* WaDisableSbeCacheDispatchPortSharing:bxt */
+ if (INTEL_REVID(dev) <= BXT_REVID_B0) {
+ WA_SET_BIT_MASKED(
+ GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+ }
+
+ return 0;
+}
+
int init_workarounds_ring(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
@@ -1044,8 +1082,9 @@ int init_workarounds_ring(struct intel_engine_cs *ring)
if (IS_SKYLAKE(dev))
return skl_init_workarounds(ring);
- else if (IS_GEN9(dev))
- return gen9_init_workarounds(ring);
+
+ if (IS_BROXTON(dev))
+ return bxt_init_workarounds(ring);
return 0;
}
@@ -1972,6 +2011,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
INIT_LIST_HEAD(&ring->execlist_queue);
+ i915_gem_batch_pool_init(dev, &ring->batch_pool);
ringbuf->size = 32 * PAGE_SIZE;
ringbuf->ring = ring;
memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
@@ -2050,91 +2090,40 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
cleanup_status_page(ring);
i915_cmd_parser_fini_ring(ring);
+ i915_gem_batch_pool_fini(&ring->batch_pool);
kfree(ringbuf);
ring->buffer = NULL;
}
-static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
+static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
{
struct intel_ringbuffer *ringbuf = ring->buffer;
struct drm_i915_gem_request *request;
+ unsigned space;
int ret;
if (intel_ring_space(ringbuf) >= n)
return 0;
list_for_each_entry(request, &ring->request_list, list) {
- if (__intel_ring_space(request->postfix, ringbuf->tail,
- ringbuf->size) >= n) {
+ space = __intel_ring_space(request->postfix, ringbuf->tail,
+ ringbuf->size);
+ if (space >= n)
break;
- }
}
- if (&request->list == &ring->request_list)
+ if (WARN_ON(&request->list == &ring->request_list))
return -ENOSPC;
ret = i915_wait_request(request);
if (ret)
return ret;
- i915_gem_retire_requests_ring(ring);
-
+ ringbuf->space = space;
return 0;
}
-static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
-{
- struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ringbuffer *ringbuf = ring->buffer;
- unsigned long end;
- int ret;
-
- ret = intel_ring_wait_request(ring, n);
- if (ret != -ENOSPC)
- return ret;
-
- /* force the tail write in case we have been skipping them */
- __intel_ring_advance(ring);
-
- /* With GEM the hangcheck timer should kick us out of the loop,
- * leaving it early runs the risk of corrupting GEM state (due
- * to running on almost untested codepaths). But on resume
- * timers don't work yet, so prevent a complete hang in that
- * case by choosing an insanely large timeout. */
- end = jiffies + 60 * HZ;
-
- ret = 0;
- trace_i915_ring_wait_begin(ring);
- do {
- if (intel_ring_space(ringbuf) >= n)
- break;
- ringbuf->head = I915_READ_HEAD(ring);
- if (intel_ring_space(ringbuf) >= n)
- break;
-
- msleep(1);
-
- if (dev_priv->mm.interruptible && signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
-
- ret = i915_gem_check_wedge(&dev_priv->gpu_error,
- dev_priv->mm.interruptible);
- if (ret)
- break;
-
- if (time_after(jiffies, end)) {
- ret = -EBUSY;
- break;
- }
- } while (1);
- trace_i915_ring_wait_end(ring);
- return ret;
-}
-
static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
{
uint32_t __iomem *virt;
@@ -2175,38 +2164,19 @@ int intel_ring_idle(struct intel_engine_cs *ring)
return 0;
req = list_entry(ring->request_list.prev,
- struct drm_i915_gem_request,
- list);
+ struct drm_i915_gem_request,
+ list);
- return i915_wait_request(req);
+ /* Make sure we do not trigger any retires */
+ return __i915_wait_request(req,
+ atomic_read(&to_i915(ring->dev)->gpu_error.reset_counter),
+ to_i915(ring->dev)->mm.interruptible,
+ NULL, NULL);
}
-static int
-intel_ring_alloc_request(struct intel_engine_cs *ring)
+int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{
- int ret;
- struct drm_i915_gem_request *request;
- struct drm_i915_private *dev_private = ring->dev->dev_private;
-
- if (ring->outstanding_lazy_request)
- return 0;
-
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return -ENOMEM;
-
- kref_init(&request->ref);
- request->ring = ring;
- request->ringbuf = ring->buffer;
- request->uniq = dev_private->request_uniq++;
-
- ret = i915_gem_get_seqno(ring->dev, &request->seqno);
- if (ret) {
- kfree(request);
- return ret;
- }
-
- ring->outstanding_lazy_request = request;
+ request->ringbuf = request->ring->buffer;
return 0;
}
@@ -2247,7 +2217,7 @@ int intel_ring_begin(struct intel_engine_cs *ring,
return ret;
/* Preallocate the olr before touching the ring */
- ret = intel_ring_alloc_request(ring);
+ ret = i915_gem_request_alloc(ring, ring->default_context);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index c761fe05ad6f..e539314ae87e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -2,6 +2,7 @@
#define _INTEL_RINGBUFFER_H_
#include <linux/hashtable.h>
+#include "i915_gem_batch_pool.h"
#define I915_CMD_HASH_ORDER 9
@@ -117,6 +118,7 @@ struct intel_ringbuffer {
};
struct intel_context;
+struct drm_i915_reg_descriptor;
struct intel_engine_cs {
const char *name;
@@ -133,6 +135,13 @@ struct intel_engine_cs {
struct drm_device *dev;
struct intel_ringbuffer *buffer;
+ /*
+ * A pool of objects to use as shadow copies of client batch buffers
+ * when the command parser is enabled. Prevents the client from
+ * modifying the batch contents after software parsing.
+ */
+ struct i915_gem_batch_pool batch_pool;
+
struct intel_hw_status_page status_page;
unsigned irq_refcount; /* protected by dev_priv->irq_lock */
@@ -292,14 +301,14 @@ struct intel_engine_cs {
/*
* Table of registers allowed in commands that read/write registers.
*/
- const u32 *reg_table;
+ const struct drm_i915_reg_descriptor *reg_table;
int reg_count;
/*
* Table of registers allowed in commands that read/write registers, but
* only from the DRM master.
*/
- const u32 *master_reg_table;
+ const struct drm_i915_reg_descriptor *master_reg_table;
int master_reg_count;
/*
@@ -390,6 +399,8 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev,
void intel_stop_ring_buffer(struct intel_engine_cs *ring);
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
+int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
+
int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n);
int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
static inline void intel_ring_emit(struct intel_engine_cs *ring,
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index ce00e6994eeb..1a45385f4d66 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -49,6 +49,9 @@
* present for a given platform.
*/
+#define GEN9_ENABLE_DC5(dev) 0
+#define SKL_ENABLE_DC6(dev) IS_SKYLAKE(dev)
+
#define for_each_power_well(i, power_well, domain_mask, power_domains) \
for (i = 0; \
i < (power_domains)->power_well_count && \
@@ -62,6 +65,9 @@
i--) \
if ((power_well)->domains & (domain_mask))
+bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
+ int power_well_id);
+
/*
* We should only use the power well if we explicitly asked the hardware to
* enable it, so check if it's enabled and also check if we've requested it to
@@ -308,7 +314,9 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
BIT(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_MISC_IO_POWER_DOMAINS ( \
- SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS)
+ SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
+ BIT(POWER_DOMAIN_PLLS) | \
+ BIT(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
(POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
@@ -319,9 +327,246 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
SKL_DISPLAY_MISC_IO_POWER_DOMAINS)) | \
BIT(POWER_DOMAIN_INIT))
+#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT(POWER_DOMAIN_PIPE_B) | \
+ BIT(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT(POWER_DOMAIN_PIPE_C) | \
+ BIT(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
+ BIT(POWER_DOMAIN_AUDIO) | \
+ BIT(POWER_DOMAIN_VGA) | \
+ BIT(POWER_DOMAIN_INIT))
+#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
+ BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT(POWER_DOMAIN_PIPE_A) | \
+ BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
+ BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
+ BIT(POWER_DOMAIN_AUX_A) | \
+ BIT(POWER_DOMAIN_PLLS) | \
+ BIT(POWER_DOMAIN_INIT))
+#define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
+ (POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
+ BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) | \
+ BIT(POWER_DOMAIN_INIT))
+
+static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ WARN(!IS_BROXTON(dev), "Platform doesn't support DC9.\n");
+ WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
+ "DC9 already programmed to be enabled.\n");
+ WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
+ "DC5 still not disabled to enable DC9.\n");
+ WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
+ WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
+
+ /*
+ * TODO: check for the following to verify the conditions to enter DC9
+ * state are satisfied:
+ * 1] Check relevant display engine registers to verify if mode set
+ * disable sequence was followed.
+ * 2] Check if display uninitialize sequence is initialized.
+ */
+}
+
+static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
+{
+ WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
+ WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
+ "DC9 already programmed to be disabled.\n");
+ WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
+ "DC5 still not disabled.\n");
+
+ /*
+ * TODO: check for the following to verify DC9 state was indeed
+ * entered before programming to disable it:
+ * 1] Check relevant display engine registers to verify if mode
+ * set disable sequence was followed.
+ * 2] Check if display uninitialize sequence is initialized.
+ */
+}
+
+void bxt_enable_dc9(struct drm_i915_private *dev_priv)
+{
+ uint32_t val;
+
+ assert_can_enable_dc9(dev_priv);
+
+ DRM_DEBUG_KMS("Enabling DC9\n");
+
+ val = I915_READ(DC_STATE_EN);
+ val |= DC_STATE_EN_DC9;
+ I915_WRITE(DC_STATE_EN, val);
+ POSTING_READ(DC_STATE_EN);
+}
+
+void bxt_disable_dc9(struct drm_i915_private *dev_priv)
+{
+ uint32_t val;
+
+ assert_can_disable_dc9(dev_priv);
+
+ DRM_DEBUG_KMS("Disabling DC9\n");
+
+ val = I915_READ(DC_STATE_EN);
+ val &= ~DC_STATE_EN_DC9;
+ I915_WRITE(DC_STATE_EN, val);
+ POSTING_READ(DC_STATE_EN);
+}
+
+static void gen9_set_dc_state_debugmask_memory_up(
+ struct drm_i915_private *dev_priv)
+{
+ uint32_t val;
+
+ /* The below bit doesn't need to be cleared ever afterwards */
+ val = I915_READ(DC_STATE_DEBUG);
+ if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) {
+ val |= DC_STATE_DEBUG_MASK_MEMORY_UP;
+ I915_WRITE(DC_STATE_DEBUG, val);
+ POSTING_READ(DC_STATE_DEBUG);
+ }
+}
+
+static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
+ SKL_DISP_PW_2);
+
+ WARN(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n");
+ WARN(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
+ WARN(pg2_enabled, "PG2 not disabled to enable DC5.\n");
+
+ WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
+ "DC5 already programmed to be enabled.\n");
+ WARN(dev_priv->pm.suspended,
+ "DC5 cannot be enabled, if platform is runtime-suspended.\n");
+
+ assert_csr_loaded(dev_priv);
+}
+
+static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
+{
+ bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
+ SKL_DISP_PW_2);
+ /*
+ * During initialization, the firmware may not be loaded yet.
+ * We still want to make sure that the DC enabling flag is cleared.
+ */
+ if (dev_priv->power_domains.initializing)
+ return;
+
+ WARN(!pg2_enabled, "PG2 not enabled to disable DC5.\n");
+ WARN(dev_priv->pm.suspended,
+ "Disabling of DC5 while platform is runtime-suspended should never happen.\n");
+}
+
+static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
+{
+ uint32_t val;
+
+ assert_can_enable_dc5(dev_priv);
+
+ DRM_DEBUG_KMS("Enabling DC5\n");
+
+ gen9_set_dc_state_debugmask_memory_up(dev_priv);
+
+ val = I915_READ(DC_STATE_EN);
+ val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
+ val |= DC_STATE_EN_UPTO_DC5;
+ I915_WRITE(DC_STATE_EN, val);
+ POSTING_READ(DC_STATE_EN);
+}
+
+static void gen9_disable_dc5(struct drm_i915_private *dev_priv)
+{
+ uint32_t val;
+
+ assert_can_disable_dc5(dev_priv);
+
+ DRM_DEBUG_KMS("Disabling DC5\n");
+
+ val = I915_READ(DC_STATE_EN);
+ val &= ~DC_STATE_EN_UPTO_DC5;
+ I915_WRITE(DC_STATE_EN, val);
+ POSTING_READ(DC_STATE_EN);
+}
+
+static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ WARN(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n");
+ WARN(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
+ WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+ "Backlight is not disabled.\n");
+ WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
+ "DC6 already programmed to be enabled.\n");
+
+ assert_csr_loaded(dev_priv);
+}
+
+static void assert_can_disable_dc6(struct drm_i915_private *dev_priv)
+{
+ /*
+ * During initialization, the firmware may not be loaded yet.
+ * We still want to make sure that the DC enabling flag is cleared.
+ */
+ if (dev_priv->power_domains.initializing)
+ return;
+
+ assert_csr_loaded(dev_priv);
+ WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
+ "DC6 already programmed to be disabled.\n");
+}
+
+static void skl_enable_dc6(struct drm_i915_private *dev_priv)
+{
+ uint32_t val;
+
+ assert_can_enable_dc6(dev_priv);
+
+ DRM_DEBUG_KMS("Enabling DC6\n");
+
+ gen9_set_dc_state_debugmask_memory_up(dev_priv);
+
+ val = I915_READ(DC_STATE_EN);
+ val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
+ val |= DC_STATE_EN_UPTO_DC6;
+ I915_WRITE(DC_STATE_EN, val);
+ POSTING_READ(DC_STATE_EN);
+}
+
+static void skl_disable_dc6(struct drm_i915_private *dev_priv)
+{
+ uint32_t val;
+
+ assert_can_disable_dc6(dev_priv);
+
+ DRM_DEBUG_KMS("Disabling DC6\n");
+
+ val = I915_READ(DC_STATE_EN);
+ val &= ~DC_STATE_EN_UPTO_DC6;
+ I915_WRITE(DC_STATE_EN, val);
+ POSTING_READ(DC_STATE_EN);
+}
+
static void skl_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
+ struct drm_device *dev = dev_priv->dev;
uint32_t tmp, fuse_status;
uint32_t req_mask, state_mask;
bool is_enabled, enable_requested, check_fuse_status = false;
@@ -361,6 +606,25 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
if (enable) {
if (!enable_requested) {
+ WARN((tmp & state_mask) &&
+ !I915_READ(HSW_PWR_WELL_BIOS),
+ "Invalid for power well status to be enabled, unless done by the BIOS, \
+ when request is to disable!\n");
+ if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
+ power_well->data == SKL_DISP_PW_2) {
+ if (SKL_ENABLE_DC6(dev)) {
+ skl_disable_dc6(dev_priv);
+ /*
+ * DDI buffer programming unnecessary during driver-load/resume
+ * as it's already done during modeset initialization then.
+ * It's also invalid here as encoder list is still uninitialized.
+ */
+ if (!dev_priv->power_domains.initializing)
+ intel_prepare_ddi(dev);
+ } else {
+ gen9_disable_dc5(dev_priv);
+ }
+ }
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
}
@@ -377,6 +641,25 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
POSTING_READ(HSW_PWR_WELL_DRIVER);
DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
+
+ if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
+ power_well->data == SKL_DISP_PW_2) {
+ enum csr_state state;
+ /* TODO: wait for a completion event or
+ * similar here instead of busy
+ * waiting using wait_for function.
+ */
+ wait_for((state = intel_csr_load_status_get(dev_priv)) !=
+ FW_UNINITIALIZED, 1000);
+ if (state != FW_LOADED)
+ DRM_ERROR("CSR firmware not ready (%d)\n",
+ state);
+ else
+ if (SKL_ENABLE_DC6(dev))
+ skl_enable_dc6(dev_priv);
+ else
+ gen9_enable_dc5(dev_priv);
+ }
}
}
@@ -488,7 +771,7 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv,
vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
if (wait_for(COND, 100))
- DRM_ERROR("timout setting power well state %08x (%08x)\n",
+ DRM_ERROR("timeout setting power well state %08x (%08x)\n",
state,
vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
@@ -666,8 +949,8 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
DRM_ERROR("Display PHY %d is not power up\n", phy);
- I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
- PHY_COM_LANE_RESET_DEASSERT(phy));
+ dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
+ I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
}
static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
@@ -687,8 +970,8 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
assert_pll_disabled(dev_priv, PIPE_C);
}
- I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
- ~PHY_COM_LANE_RESET_DEASSERT(phy));
+ dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
+ I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
vlv_set_power_well(dev_priv, power_well, false);
}
@@ -746,7 +1029,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
if (wait_for(COND, 100))
- DRM_ERROR("timout setting power well state %08x (%08x)\n",
+ DRM_ERROR("timeout setting power well state %08x (%08x)\n",
state,
vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
@@ -950,18 +1233,6 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
-#define CHV_PIPE_A_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PIPE_A) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define CHV_PIPE_B_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PIPE_B) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define CHV_PIPE_C_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PIPE_C) | \
- BIT(POWER_DOMAIN_INIT))
-
#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
@@ -977,17 +1248,6 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_AUX_D) | \
BIT(POWER_DOMAIN_INIT))
-#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
- BIT(POWER_DOMAIN_AUX_D) | \
- BIT(POWER_DOMAIN_INIT))
-
-#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
- BIT(POWER_DOMAIN_AUX_D) | \
- BIT(POWER_DOMAIN_INIT))
-
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.sync_hw = i9xx_always_on_power_well_noop,
.enable = i9xx_always_on_power_well_noop,
@@ -1145,110 +1405,33 @@ static struct i915_power_well chv_power_wells[] = {
.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
.ops = &i9xx_always_on_power_well_ops,
},
-#if 0
{
.name = "display",
- .domains = VLV_DISPLAY_POWER_DOMAINS,
- .data = PUNIT_POWER_WELL_DISP2D,
- .ops = &vlv_display_power_well_ops,
- },
-#endif
- {
- .name = "pipe-a",
/*
- * FIXME: pipe A power well seems to be the new disp2d well.
- * At least all registers seem to be housed there. Figure
- * out if this a a temporary situation in pre-production
- * hardware or a permanent state of affairs.
+ * Pipe A power well is the new disp2d well. Pipe B and C
+ * power wells don't actually exist. Pipe A power well is
+ * required for any pipe to work.
*/
- .domains = CHV_PIPE_A_POWER_DOMAINS | VLV_DISPLAY_POWER_DOMAINS,
+ .domains = VLV_DISPLAY_POWER_DOMAINS,
.data = PIPE_A,
.ops = &chv_pipe_power_well_ops,
},
-#if 0
- {
- .name = "pipe-b",
- .domains = CHV_PIPE_B_POWER_DOMAINS,
- .data = PIPE_B,
- .ops = &chv_pipe_power_well_ops,
- },
- {
- .name = "pipe-c",
- .domains = CHV_PIPE_C_POWER_DOMAINS,
- .data = PIPE_C,
- .ops = &chv_pipe_power_well_ops,
- },
-#endif
{
.name = "dpio-common-bc",
- /*
- * XXX: cmnreset for one PHY seems to disturb the other.
- * As a workaround keep both powered on at the same
- * time for now.
- */
- .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
+ .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
.ops = &chv_dpio_cmn_power_well_ops,
},
{
.name = "dpio-common-d",
- /*
- * XXX: cmnreset for one PHY seems to disturb the other.
- * As a workaround keep both powered on at the same
- * time for now.
- */
- .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
+ .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
.data = PUNIT_POWER_WELL_DPIO_CMN_D,
.ops = &chv_dpio_cmn_power_well_ops,
},
-#if 0
- {
- .name = "dpio-tx-b-01",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
- },
- {
- .name = "dpio-tx-b-23",
- .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
- },
- {
- .name = "dpio-tx-c-01",
- .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
- },
- {
- .name = "dpio-tx-c-23",
- .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
- VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
- },
- {
- .name = "dpio-tx-d-01",
- .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
- CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
- },
- {
- .name = "dpio-tx-d-23",
- .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
- CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
- .ops = &vlv_dpio_power_well_ops,
- .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
- },
-#endif
};
static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
- enum punit_power_well power_well_id)
+ int power_well_id)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *power_well;
@@ -1262,6 +1445,18 @@ static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_pr
return NULL;
}
+bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
+ int power_well_id)
+{
+ struct i915_power_well *power_well;
+ bool ret;
+
+ power_well = lookup_power_well(dev_priv, power_well_id);
+ ret = power_well->ops->is_enabled(dev_priv, power_well);
+
+ return ret;
+}
+
static struct i915_power_well skl_power_wells[] = {
{
.name = "always-on",
@@ -1313,6 +1508,27 @@ static struct i915_power_well skl_power_wells[] = {
},
};
+static struct i915_power_well bxt_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
+ .ops = &i9xx_always_on_power_well_ops,
+ },
+ {
+ .name = "power well 1",
+ .domains = BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .data = SKL_DISP_PW_1,
+ },
+ {
+ .name = "power well 2",
+ .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .data = SKL_DISP_PW_2,
+ }
+};
+
#define set_power_wells(power_domains, __power_wells) ({ \
(power_domains)->power_wells = (__power_wells); \
(power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
@@ -1341,6 +1557,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
set_power_wells(power_domains, bdw_power_wells);
} else if (IS_SKYLAKE(dev_priv->dev)) {
set_power_wells(power_domains, skl_power_wells);
+ } else if (IS_BROXTON(dev_priv->dev)) {
+ set_power_wells(power_domains, bxt_power_wells);
} else if (IS_CHERRYVIEW(dev_priv->dev)) {
set_power_wells(power_domains, chv_power_wells);
} else if (IS_VALLEYVIEW(dev_priv->dev)) {
@@ -1401,6 +1619,32 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
mutex_unlock(&power_domains->lock);
}
+static void chv_phy_control_init(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_well *cmn_bc =
+ lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
+ struct i915_power_well *cmn_d =
+ lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
+
+ /*
+ * DISPLAY_PHY_CONTROL can get corrupted if read. As a
+ * workaround never ever read DISPLAY_PHY_CONTROL, and
+ * instead maintain a shadow copy ourselves. Use the actual
+ * power well state to reconstruct the expected initial
+ * value.
+ */
+ dev_priv->chv_phy_control =
+ PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) |
+ PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) |
+ PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY0, DPIO_CH0) |
+ PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY0, DPIO_CH1) |
+ PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY1, DPIO_CH0);
+ if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc))
+ dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
+ if (cmn_d->ops->is_enabled(dev_priv, cmn_d))
+ dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
+}
+
static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
{
struct i915_power_well *cmn =
@@ -1443,7 +1687,9 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
power_domains->initializing = true;
- if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev)) {
+ chv_phy_control_init(dev_priv);
+ } else if (IS_VALLEYVIEW(dev)) {
mutex_lock(&power_domains->lock);
vlv_cmnlane_wa(dev_priv);
mutex_unlock(&power_domains->lock);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 987b81f31b0e..aa2fd751609c 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -242,7 +242,15 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
if (intel_sdvo->sdvo_reg == PCH_SDVOB) {
I915_WRITE(intel_sdvo->sdvo_reg, val);
- I915_READ(intel_sdvo->sdvo_reg);
+ POSTING_READ(intel_sdvo->sdvo_reg);
+ /*
+ * HW workaround, need to write this twice for issue
+ * that may result in first write getting masked.
+ */
+ if (HAS_PCH_IBX(dev)) {
+ I915_WRITE(intel_sdvo->sdvo_reg, val);
+ POSTING_READ(intel_sdvo->sdvo_reg);
+ }
return;
}
@@ -259,9 +267,9 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
for (i = 0; i < 2; i++)
{
I915_WRITE(GEN3_SDVOB, bval);
- I915_READ(GEN3_SDVOB);
+ POSTING_READ(GEN3_SDVOB);
I915_WRITE(GEN3_SDVOC, cval);
- I915_READ(GEN3_SDVOC);
+ POSTING_READ(GEN3_SDVOC);
}
}
@@ -1429,6 +1437,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
u32 temp;
intel_sdvo_set_active_outputs(intel_sdvo, 0);
@@ -1437,35 +1446,34 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
DRM_MODE_DPMS_OFF);
temp = I915_READ(intel_sdvo->sdvo_reg);
- if ((temp & SDVO_ENABLE) != 0) {
- /* HW workaround for IBX, we need to move the port to
- * transcoder A before disabling it. */
- if (HAS_PCH_IBX(encoder->base.dev)) {
- struct drm_crtc *crtc = encoder->base.crtc;
- int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
-
- if (temp & SDVO_PIPE_B_SELECT) {
- temp &= ~SDVO_PIPE_B_SELECT;
- I915_WRITE(intel_sdvo->sdvo_reg, temp);
- POSTING_READ(intel_sdvo->sdvo_reg);
-
- /* Again we need to write this twice. */
- I915_WRITE(intel_sdvo->sdvo_reg, temp);
- POSTING_READ(intel_sdvo->sdvo_reg);
-
- /* Transcoder selection bits only update
- * effectively on vblank. */
- if (crtc)
- intel_wait_for_vblank(encoder->base.dev, pipe);
- else
- msleep(50);
- }
- }
- intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
+ temp &= ~SDVO_ENABLE;
+ intel_sdvo_write_sdvox(intel_sdvo, temp);
+
+ /*
+ * HW workaround for IBX, we need to move the port
+ * to transcoder A after disabling it to allow the
+ * matching DP port to be enabled on transcoder A.
+ */
+ if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B) {
+ temp &= ~SDVO_PIPE_B_SELECT;
+ temp |= SDVO_ENABLE;
+ intel_sdvo_write_sdvox(intel_sdvo, temp);
+
+ temp &= ~SDVO_ENABLE;
+ intel_sdvo_write_sdvox(intel_sdvo, temp);
}
}
+static void pch_disable_sdvo(struct intel_encoder *encoder)
+{
+}
+
+static void pch_post_disable_sdvo(struct intel_encoder *encoder)
+{
+ intel_disable_sdvo(encoder);
+}
+
static void intel_enable_sdvo(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
@@ -1478,14 +1486,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
bool success;
temp = I915_READ(intel_sdvo->sdvo_reg);
- if ((temp & SDVO_ENABLE) == 0) {
- /* HW workaround for IBX, we need to move the port
- * to transcoder A before disabling it, so restore it here. */
- if (HAS_PCH_IBX(dev))
- temp |= SDVO_PIPE_SEL(intel_crtc->pipe);
+ temp |= SDVO_ENABLE;
+ intel_sdvo_write_sdvox(intel_sdvo, temp);
- intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
- }
for (i = 0; i < 2; i++)
intel_wait_for_vblank(dev, intel_crtc->pipe);
@@ -2291,10 +2294,11 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
else
mapping = &dev_priv->sdvo_mappings[1];
- if (mapping->initialized && intel_gmbus_is_port_valid(mapping->i2c_pin))
+ if (mapping->initialized &&
+ intel_gmbus_is_valid_pin(dev_priv, mapping->i2c_pin))
pin = mapping->i2c_pin;
else
- pin = GMBUS_PORT_DPB;
+ pin = GMBUS_PIN_DPB;
sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
@@ -2987,7 +2991,12 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
}
intel_encoder->compute_config = intel_sdvo_compute_config;
- intel_encoder->disable = intel_disable_sdvo;
+ if (HAS_PCH_SPLIT(dev)) {
+ intel_encoder->disable = pch_disable_sdvo;
+ intel_encoder->post_disable = pch_post_disable_sdvo;
+ } else {
+ intel_encoder->disable = intel_disable_sdvo;
+ }
intel_encoder->pre_enable = intel_sdvo_pre_enable;
intel_encoder->enable = intel_enable_sdvo;
intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 693ce8281970..8831fc579ade 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -49,7 +49,7 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
(port << IOSF_PORT_SHIFT) | (be << IOSF_BYTE_ENABLES_SHIFT) |
(bar << IOSF_BAR_SHIFT);
- WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
+ WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
@@ -81,10 +81,10 @@ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr)
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
SB_CRRDDA_NP, addr, &val);
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
return val;
}
@@ -93,10 +93,10 @@ void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val)
{
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
SB_CRWRDA_NP, addr, &val);
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
}
u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
@@ -121,10 +121,10 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- mutex_lock(&dev_priv->dpio_lock);
+ mutex_lock(&dev_priv->sb_lock);
vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_NC,
SB_CRRDDA_NP, addr, &val);
- mutex_unlock(&dev_priv->dpio_lock);
+ mutex_unlock(&dev_priv->sb_lock);
return val;
}
@@ -213,7 +213,7 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
enum intel_sbi_destination destination)
{
u32 value = 0;
- WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
+ WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
100)) {
@@ -243,7 +243,7 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
{
u32 tmp;
- WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
+ WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
100)) {
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index a4c0a04b5044..8193a35388d7 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -33,6 +33,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_rect.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_plane_helper.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
@@ -165,17 +166,6 @@ void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
pipe_name(pipe), start_vbl_count, end_vbl_count);
}
-static void intel_update_primary_plane(struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- int reg = DSPCNTR(crtc->plane);
-
- if (crtc->primary_enabled)
- I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
- else
- I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
-}
-
static void
skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -190,72 +180,24 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
- u32 plane_ctl, stride_div;
+ u32 plane_ctl, stride_div, stride;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey;
unsigned long surf_addr;
+ u32 tile_height, plane_offset, plane_size;
+ unsigned int rotation;
+ int x_offset, y_offset;
+ struct intel_crtc_state *crtc_state = to_intel_crtc(crtc)->config;
+ int scaler_id;
plane_ctl = PLANE_CTL_ENABLE |
PLANE_CTL_PIPE_CSC_ENABLE;
- switch (fb->pixel_format) {
- case DRM_FORMAT_RGB565:
- plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
- break;
- case DRM_FORMAT_XBGR8888:
- plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
- break;
- case DRM_FORMAT_XRGB8888:
- plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
- break;
- /*
- * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
- * to be already pre-multiplied. We need to add a knob (or a different
- * DRM_FORMAT) for user-space to configure that.
- */
- case DRM_FORMAT_ABGR8888:
- plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
- PLANE_CTL_ORDER_RGBX |
- PLANE_CTL_ALPHA_SW_PREMULTIPLY;
- break;
- case DRM_FORMAT_ARGB8888:
- plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
- PLANE_CTL_ALPHA_SW_PREMULTIPLY;
- break;
- case DRM_FORMAT_YUYV:
- plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
- break;
- case DRM_FORMAT_YVYU:
- plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
- break;
- case DRM_FORMAT_UYVY:
- plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
- break;
- case DRM_FORMAT_VYUY:
- plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
- break;
- default:
- BUG();
- }
-
- switch (fb->modifier[0]) {
- case DRM_FORMAT_MOD_NONE:
- break;
- case I915_FORMAT_MOD_X_TILED:
- plane_ctl |= PLANE_CTL_TILED_X;
- break;
- case I915_FORMAT_MOD_Y_TILED:
- plane_ctl |= PLANE_CTL_TILED_Y;
- break;
- case I915_FORMAT_MOD_Yf_TILED:
- plane_ctl |= PLANE_CTL_TILED_YF;
- break;
- default:
- MISSING_CASE(fb->modifier[0]);
- }
+ plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
+ plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
- if (drm_plane->state->rotation == BIT(DRM_ROTATE_180))
- plane_ctl |= PLANE_CTL_ROTATE_180;
+ rotation = drm_plane->state->rotation;
+ plane_ctl |= skl_plane_ctl_rotation(rotation);
intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h,
pixel_size, true,
@@ -264,6 +206,8 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
fb->pixel_format);
+ scaler_id = to_intel_plane_state(drm_plane->state)->scaler_id;
+
/* Sizes are 0 based */
src_w--;
src_h--;
@@ -283,31 +227,65 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
surf_addr = intel_plane_obj_offset(intel_plane, obj);
- I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x);
- I915_WRITE(PLANE_STRIDE(pipe, plane), fb->pitches[0] / stride_div);
- I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
- I915_WRITE(PLANE_SIZE(pipe, plane), (crtc_h << 16) | crtc_w);
+ if (intel_rotation_90_or_270(rotation)) {
+ /* stride: Surface height in tiles */
+ tile_height = intel_tile_height(dev, fb->pixel_format,
+ fb->modifier[0]);
+ stride = DIV_ROUND_UP(fb->height, tile_height);
+ plane_size = (src_w << 16) | src_h;
+ x_offset = stride * tile_height - y - (src_h + 1);
+ y_offset = x;
+ } else {
+ stride = fb->pitches[0] / stride_div;
+ plane_size = (src_h << 16) | src_w;
+ x_offset = x;
+ y_offset = y;
+ }
+ plane_offset = y_offset << 16 | x_offset;
+
+ I915_WRITE(PLANE_OFFSET(pipe, plane), plane_offset);
+ I915_WRITE(PLANE_STRIDE(pipe, plane), stride);
+ I915_WRITE(PLANE_SIZE(pipe, plane), plane_size);
+
+ /* program plane scaler */
+ if (scaler_id >= 0) {
+ uint32_t ps_ctrl = 0;
+
+ DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane,
+ PS_PLANE_SEL(plane));
+ ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) |
+ crtc_state->scaler_state.scalers[scaler_id].mode;
+ I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
+ I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
+ I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
+ I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id),
+ ((crtc_w + 1) << 16)|(crtc_h + 1));
+
+ I915_WRITE(PLANE_POS(pipe, plane), 0);
+ } else {
+ I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
+ }
+
I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
I915_WRITE(PLANE_SURF(pipe, plane), surf_addr);
POSTING_READ(PLANE_SURF(pipe, plane));
}
static void
-skl_disable_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc)
+skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc, bool force)
{
- struct drm_device *dev = drm_plane->dev;
+ struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+ struct intel_plane *intel_plane = to_intel_plane(dplane);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
I915_WRITE(PLANE_CTL(pipe, plane), 0);
- /* Activate double buffered register update */
I915_WRITE(PLANE_SURF(pipe, plane), 0);
POSTING_READ(PLANE_SURF(pipe, plane));
- intel_update_sprite_watermarks(drm_plane, crtc, 0, 0, 0, false, false);
+ intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
}
static void
@@ -360,7 +338,6 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(dplane);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int pipe = intel_plane->pipe;
int plane = intel_plane->plane;
@@ -448,8 +425,6 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
}
- intel_update_primary_plane(intel_crtc);
-
if (key->flags) {
I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value);
I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value);
@@ -476,33 +451,26 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
I915_WRITE(SPCNTR(pipe, plane), sprctl);
I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
sprsurf_offset);
-
- intel_flush_primary_plane(dev_priv, intel_crtc->plane);
+ POSTING_READ(SPSURF(pipe, plane));
}
static void
-vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
+vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc, bool force)
{
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(dplane);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_plane->pipe;
int plane = intel_plane->plane;
- intel_update_primary_plane(intel_crtc);
-
I915_WRITE(SPCNTR(pipe, plane), 0);
- /* Activate double buffered register update */
I915_WRITE(SPSURF(pipe, plane), 0);
-
- intel_flush_primary_plane(dev_priv, intel_crtc->plane);
+ POSTING_READ(SPSURF(pipe, plane));
intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
}
-
static void
ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -514,7 +482,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
enum pipe pipe = intel_plane->pipe;
u32 sprctl, sprscale = 0;
@@ -595,8 +562,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
}
}
- intel_update_primary_plane(intel_crtc);
-
if (key->flags) {
I915_WRITE(SPRKEYVAL(pipe), key->min_value);
I915_WRITE(SPRKEYMAX(pipe), key->max_value);
@@ -626,29 +591,24 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
I915_WRITE(SPRCTL(pipe), sprctl);
I915_WRITE(SPRSURF(pipe),
i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
-
- intel_flush_primary_plane(dev_priv, intel_crtc->plane);
+ POSTING_READ(SPRSURF(pipe));
}
static void
-ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
+ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc, bool force)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_plane->pipe;
- intel_update_primary_plane(intel_crtc);
-
I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
/* Can't leave the scaler enabled... */
if (intel_plane->can_scale)
I915_WRITE(SPRSCALE(pipe), 0);
- /* Activate double buffered register update */
- I915_WRITE(SPRSURF(pipe), 0);
- intel_flush_primary_plane(dev_priv, intel_crtc->plane);
+ I915_WRITE(SPRSURF(pipe), 0);
+ POSTING_READ(SPRSURF(pipe));
}
static void
@@ -662,7 +622,6 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int pipe = intel_plane->pipe;
unsigned long dvssurf_offset, linear_offset;
@@ -735,8 +694,6 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
}
- intel_update_primary_plane(intel_crtc);
-
if (key->flags) {
I915_WRITE(DVSKEYVAL(pipe), key->min_value);
I915_WRITE(DVSKEYMAX(pipe), key->max_value);
@@ -761,109 +718,32 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
I915_WRITE(DVSCNTR(pipe), dvscntr);
I915_WRITE(DVSSURF(pipe),
i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
-
- intel_flush_primary_plane(dev_priv, intel_crtc->plane);
+ POSTING_READ(DVSSURF(pipe));
}
static void
-ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
+ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc, bool force)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_plane->pipe;
- intel_update_primary_plane(intel_crtc);
-
I915_WRITE(DVSCNTR(pipe), 0);
/* Disable the scaler */
I915_WRITE(DVSSCALE(pipe), 0);
- /* Flush double buffered register updates */
I915_WRITE(DVSSURF(pipe), 0);
-
- intel_flush_primary_plane(dev_priv, intel_crtc->plane);
-}
-
-/**
- * intel_post_enable_primary - Perform operations after enabling primary plane
- * @crtc: the CRTC whose primary plane was just enabled
- *
- * Performs potentially sleeping operations that must be done after the primary
- * plane is enabled, such as updating FBC and IPS. Note that this may be
- * called due to an explicit primary plane update, or due to an implicit
- * re-enable that is caused when a sprite plane is updated to no longer
- * completely hide the primary plane.
- */
-void
-intel_post_enable_primary(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- /*
- * BDW signals flip done immediately if the plane
- * is disabled, even if the plane enable is already
- * armed to occur at the next vblank :(
- */
- if (IS_BROADWELL(dev))
- intel_wait_for_vblank(dev, intel_crtc->pipe);
-
- /*
- * FIXME IPS should be fine as long as one plane is
- * enabled, but in practice it seems to have problems
- * when going from primary only to sprite only and vice
- * versa.
- */
- hsw_enable_ips(intel_crtc);
-
- mutex_lock(&dev->struct_mutex);
- intel_fbc_update(dev);
- mutex_unlock(&dev->struct_mutex);
-}
-
-/**
- * intel_pre_disable_primary - Perform operations before disabling primary plane
- * @crtc: the CRTC whose primary plane is to be disabled
- *
- * Performs potentially sleeping operations that must be done before the
- * primary plane is enabled, such as updating FBC and IPS. Note that this may
- * be called due to an explicit primary plane update, or due to an implicit
- * disable that is caused when a sprite plane completely hides the primary
- * plane.
- */
-void
-intel_pre_disable_primary(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- mutex_lock(&dev->struct_mutex);
- if (dev_priv->fbc.crtc == intel_crtc)
- intel_fbc_disable(dev);
- mutex_unlock(&dev->struct_mutex);
-
- /*
- * FIXME IPS should be fine as long as one plane is
- * enabled, but in practice it seems to have problems
- * when going from primary only to sprite only and vice
- * versa.
- */
- hsw_disable_ips(intel_crtc);
-}
-
-static bool colorkey_enabled(struct intel_plane *intel_plane)
-{
- return intel_plane->ckey.flags != I915_SET_COLORKEY_NONE;
+ POSTING_READ(DVSSURF(pipe));
}
static int
intel_check_sprite_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
+ struct drm_device *dev = plane->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
+ struct intel_crtc_state *crtc_state;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = state->base.fb;
int crtc_x, crtc_y;
@@ -874,9 +754,13 @@ intel_check_sprite_plane(struct drm_plane *plane,
const struct drm_rect *clip = &state->clip;
int hscale, vscale;
int max_scale, min_scale;
+ bool can_scale;
int pixel_size;
+ int ret;
intel_crtc = intel_crtc ? intel_crtc : to_intel_crtc(plane->crtc);
+ crtc_state = state->base.state ?
+ intel_atomic_get_crtc_state(state->base.state, intel_crtc) : NULL;
if (!fb) {
state->visible = false;
@@ -895,13 +779,29 @@ intel_check_sprite_plane(struct drm_plane *plane,
return -EINVAL;
}
+ /* setup can_scale, min_scale, max_scale */
+ if (INTEL_INFO(dev)->gen >= 9) {
+ /* use scaler when colorkey is not required */
+ if (intel_plane->ckey.flags == I915_SET_COLORKEY_NONE) {
+ can_scale = 1;
+ min_scale = 1;
+ max_scale = skl_max_scale(intel_crtc, crtc_state);
+ } else {
+ can_scale = 0;
+ min_scale = DRM_PLANE_HELPER_NO_SCALING;
+ max_scale = DRM_PLANE_HELPER_NO_SCALING;
+ }
+ } else {
+ can_scale = intel_plane->can_scale;
+ max_scale = intel_plane->max_downscale << 16;
+ min_scale = intel_plane->can_scale ? 1 : (1 << 16);
+ }
+
/*
* FIXME the following code does a bunch of fuzzy adjustments to the
* coordinates and sizes. We probably need some way to decide whether
* more strict checking should be done instead.
*/
- max_scale = intel_plane->max_downscale << 16;
- min_scale = intel_plane->can_scale ? 1 : (1 << 16);
drm_rect_rotate(src, fb->width << 16, fb->height << 16,
state->base.rotation);
@@ -972,7 +872,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
* Must keep src and dst the
* same if we can't scale.
*/
- if (!intel_plane->can_scale)
+ if (!can_scale)
crtc_w &= ~1;
if (crtc_w == 0)
@@ -984,7 +884,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
if (state->visible && (src_w != crtc_w || src_h != crtc_h)) {
unsigned int width_bytes;
- WARN_ON(!intel_plane->can_scale);
+ WARN_ON(!can_scale);
/* FIXME interlacing min height is 6 */
@@ -998,18 +898,18 @@ intel_check_sprite_plane(struct drm_plane *plane,
width_bytes = ((src_x * pixel_size) & 63) +
src_w * pixel_size;
- if (src_w > 2048 || src_h > 2048 ||
- width_bytes > 4096 || fb->pitches[0] > 4096) {
+ if (INTEL_INFO(dev)->gen < 9 && (src_w > 2048 || src_h > 2048 ||
+ width_bytes > 4096 || fb->pitches[0] > 4096)) {
DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n");
return -EINVAL;
}
}
if (state->visible) {
- src->x1 = src_x;
- src->x2 = src_x + src_w;
- src->y1 = src_y;
- src->y2 = src_y + src_h;
+ src->x1 = src_x << 16;
+ src->x2 = (src_x + src_w) << 16;
+ src->y1 = src_y << 16;
+ src->y2 = (src_y + src_h) << 16;
}
dst->x1 = crtc_x;
@@ -1022,23 +922,10 @@ finish:
* If the sprite is completely covering the primary plane,
* we can disable the primary and save power.
*/
- state->hides_primary = fb != NULL && drm_rect_equals(dst, clip) &&
- !colorkey_enabled(intel_plane);
- WARN_ON(state->hides_primary && !state->visible && intel_crtc->active);
-
if (intel_crtc->active) {
- if (intel_crtc->primary_enabled == state->hides_primary)
- intel_crtc->atomic.wait_for_flips = true;
-
- if (intel_crtc->primary_enabled && state->hides_primary)
- intel_crtc->atomic.pre_disable_primary = true;
-
intel_crtc->atomic.fb_bits |=
INTEL_FRONTBUFFER_SPRITE(intel_crtc->pipe);
- if (!intel_crtc->primary_enabled && !state->hides_primary)
- intel_crtc->atomic.post_enable_primary = true;
-
if (intel_wm_need_update(plane, &state->base))
intel_crtc->atomic.update_wm = true;
@@ -1053,6 +940,13 @@ finish:
}
}
+ if (INTEL_INFO(dev)->gen >= 9) {
+ ret = skl_update_scaler_users(intel_crtc, crtc_state, intel_plane,
+ state, 0);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -1074,22 +968,20 @@ intel_commit_sprite_plane(struct drm_plane *plane,
plane->fb = fb;
if (intel_crtc->active) {
- intel_crtc->primary_enabled = !state->hides_primary;
-
if (state->visible) {
crtc_x = state->dst.x1;
crtc_y = state->dst.y1;
crtc_w = drm_rect_width(&state->dst);
crtc_h = drm_rect_height(&state->dst);
- src_x = state->src.x1;
- src_y = state->src.y1;
- src_w = drm_rect_width(&state->src);
- src_h = drm_rect_height(&state->src);
+ src_x = state->src.x1 >> 16;
+ src_y = state->src.y1 >> 16;
+ src_w = drm_rect_width(&state->src) >> 16;
+ src_h = drm_rect_height(&state->src) >> 16;
intel_plane->update_plane(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
} else {
- intel_plane->disable_plane(plane, crtc);
+ intel_plane->disable_plane(plane, crtc, false);
}
}
}
@@ -1119,6 +1011,16 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
}
intel_plane = to_intel_plane(plane);
+
+ if (INTEL_INFO(dev)->gen >= 9) {
+ /* plane scaling and colorkey are mutually exclusive */
+ if (to_intel_plane_state(plane->state)->scaler_id >= 0) {
+ DRM_ERROR("colorkey not allowed with scaler\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
intel_plane->ckey = *set;
/*
@@ -1139,14 +1041,14 @@ int intel_plane_restore(struct drm_plane *plane)
if (!plane->crtc || !plane->state->fb)
return 0;
- return plane->funcs->update_plane(plane, plane->crtc, plane->state->fb,
- plane->state->crtc_x, plane->state->crtc_y,
- plane->state->crtc_w, plane->state->crtc_h,
- plane->state->src_x, plane->state->src_y,
- plane->state->src_w, plane->state->src_h);
+ return drm_plane_helper_update(plane, plane->crtc, plane->state->fb,
+ plane->state->crtc_x, plane->state->crtc_y,
+ plane->state->crtc_w, plane->state->crtc_h,
+ plane->state->src_x, plane->state->src_y,
+ plane->state->src_w, plane->state->src_h);
}
-static uint32_t ilk_plane_formats[] = {
+static const uint32_t ilk_plane_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
@@ -1154,7 +1056,7 @@ static uint32_t ilk_plane_formats[] = {
DRM_FORMAT_VYUY,
};
-static uint32_t snb_plane_formats[] = {
+static const uint32_t snb_plane_formats[] = {
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUYV,
@@ -1163,7 +1065,7 @@ static uint32_t snb_plane_formats[] = {
DRM_FORMAT_VYUY,
};
-static uint32_t vlv_plane_formats[] = {
+static const uint32_t vlv_plane_formats[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_ARGB8888,
@@ -1255,14 +1157,10 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
}
break;
case 9:
- /*
- * FIXME: Skylake planes can be scaled (with some restrictions),
- * but this is for another time.
- */
- intel_plane->can_scale = false;
- intel_plane->max_downscale = 1;
+ intel_plane->can_scale = true;
intel_plane->update_plane = skl_update_plane;
intel_plane->disable_plane = skl_disable_plane;
+ state->scaler_id = -1;
plane_formats = skl_plane_formats;
num_plane_formats = ARRAY_SIZE(skl_plane_formats);
@@ -1276,6 +1174,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
intel_plane->plane = plane;
intel_plane->check_plane = intel_check_sprite_plane;
intel_plane->commit_plane = intel_commit_sprite_plane;
+ intel_plane->ckey.flags = I915_SET_COLORKEY_NONE;
possible_crtcs = (1 << pipe);
ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
&intel_plane_funcs,
@@ -1286,16 +1185,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
goto out;
}
- if (!dev->mode_config.rotation_property)
- dev->mode_config.rotation_property =
- drm_mode_create_rotation_property(dev,
- BIT(DRM_ROTATE_0) |
- BIT(DRM_ROTATE_180));
-
- if (dev->mode_config.rotation_property)
- drm_object_attach_property(&intel_plane->base.base,
- dev->mode_config.rotation_property,
- state->base.rotation);
+ intel_create_rotation_property(dev, intel_plane);
drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index ff2a74651dd4..a6d8a3ee7750 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -383,6 +383,26 @@ void intel_uncore_sanitize(struct drm_device *dev)
intel_disable_gt_powersave(dev);
}
+static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
+{
+ struct intel_uncore_forcewake_domain *domain;
+ enum forcewake_domain_id id;
+
+ if (!dev_priv->uncore.funcs.force_wake_get)
+ return;
+
+ fw_domains &= dev_priv->uncore.fw_domains;
+
+ for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
+ if (domain->wake_count++)
+ fw_domains &= ~(1 << id);
+ }
+
+ if (fw_domains)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+}
+
/**
* intel_uncore_forcewake_get - grab forcewake domain references
* @dev_priv: i915 device instance
@@ -400,41 +420,39 @@ void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
unsigned long irqflags;
- struct intel_uncore_forcewake_domain *domain;
- enum forcewake_domain_id id;
if (!dev_priv->uncore.funcs.force_wake_get)
return;
WARN_ON(dev_priv->pm.suspended);
- fw_domains &= dev_priv->uncore.fw_domains;
-
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
- if (domain->wake_count++)
- fw_domains &= ~(1 << id);
- }
-
- if (fw_domains)
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
-
+ __intel_uncore_forcewake_get(dev_priv, fw_domains);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
/**
- * intel_uncore_forcewake_put - release a forcewake domain reference
+ * intel_uncore_forcewake_get__locked - grab forcewake domain references
* @dev_priv: i915 device instance
- * @fw_domains: forcewake domains to put references
+ * @fw_domains: forcewake domains to get reference on
*
- * This function drops the device-level forcewakes for specified
- * domains obtained by intel_uncore_forcewake_get().
+ * See intel_uncore_forcewake_get(). This variant places the onus
+ * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
*/
-void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
- enum forcewake_domains fw_domains)
+void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
+{
+ assert_spin_locked(&dev_priv->uncore.lock);
+
+ if (!dev_priv->uncore.funcs.force_wake_get)
+ return;
+
+ __intel_uncore_forcewake_get(dev_priv, fw_domains);
+}
+
+static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
{
- unsigned long irqflags;
struct intel_uncore_forcewake_domain *domain;
enum forcewake_domain_id id;
@@ -443,8 +461,6 @@ void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
fw_domains &= dev_priv->uncore.fw_domains;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
if (WARN_ON(domain->wake_count == 0))
continue;
@@ -455,10 +471,48 @@ void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
domain->wake_count++;
fw_domain_arm_timer(domain);
}
+}
+/**
+ * intel_uncore_forcewake_put - release a forcewake domain reference
+ * @dev_priv: i915 device instance
+ * @fw_domains: forcewake domains to put references
+ *
+ * This function drops the device-level forcewakes for specified
+ * domains obtained by intel_uncore_forcewake_get().
+ */
+void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
+{
+ unsigned long irqflags;
+
+ if (!dev_priv->uncore.funcs.force_wake_put)
+ return;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ __intel_uncore_forcewake_put(dev_priv, fw_domains);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
+/**
+ * intel_uncore_forcewake_put__locked - grab forcewake domain references
+ * @dev_priv: i915 device instance
+ * @fw_domains: forcewake domains to get reference on
+ *
+ * See intel_uncore_forcewake_put(). This variant places the onus
+ * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
+ */
+void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
+{
+ assert_spin_locked(&dev_priv->uncore.lock);
+
+ if (!dev_priv->uncore.funcs.force_wake_put)
+ return;
+
+ __intel_uncore_forcewake_put(dev_priv, fw_domains);
+}
+
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
{
struct intel_uncore_forcewake_domain *domain;