diff options
51 files changed, 2375 insertions, 983 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 3b5176dd1c86..e41a2d0311f8 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -29,11 +29,17 @@ config DRM_USB config DRM_KMS_HELPER tristate depends on DRM + help + CRTC helpers for KMS drivers. + +config DRM_KMS_FB_HELPER + bool + depends on DRM_KMS_HELPER select FB select FRAMEBUFFER_CONSOLE if !EXPERT select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE help - FB and CRTC helpers for KMS drivers. + FBDEV helpers for KMS drivers. config DRM_LOAD_EDID_FIRMWARE bool "Allow to specify an EDID data set instead of probing for it" @@ -64,6 +70,7 @@ config DRM_GEM_CMA_HELPER config DRM_KMS_CMA_HELPER bool select DRM_GEM_CMA_HELPER + select DRM_KMS_FB_HELPER select FB_SYS_FILLRECT select FB_SYS_COPYAREA select FB_SYS_IMAGEBLIT @@ -96,6 +103,7 @@ config DRM_RADEON select FB_CFB_IMAGEBLIT select FW_LOADER select DRM_KMS_HELPER + select DRM_KMS_FB_HELPER select DRM_TTM select POWER_SUPPLY select HWMON @@ -120,64 +128,7 @@ config DRM_I810 selected, the module will be called i810. AGP support is required for this driver to work. -config DRM_I915 - tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics" - depends on DRM - depends on AGP - depends on AGP_INTEL - # we need shmfs for the swappable backing store, and in particular - # the shmem_readpage() which depends upon tmpfs - select SHMEM - select TMPFS - select DRM_KMS_HELPER - select FB_CFB_FILLRECT - select FB_CFB_COPYAREA - select FB_CFB_IMAGEBLIT - # i915 depends on ACPI_VIDEO when ACPI is enabled - # but for select to work, need to select ACPI_VIDEO's dependencies, ick - select BACKLIGHT_LCD_SUPPORT if ACPI - select BACKLIGHT_CLASS_DEVICE if ACPI - select VIDEO_OUTPUT_CONTROL if ACPI - select INPUT if ACPI - select THERMAL if ACPI - select ACPI_VIDEO if ACPI - select ACPI_BUTTON if ACPI - help - Choose this option if you have a system that has "Intel Graphics - Media Accelerator" or "HD Graphics" integrated graphics, - including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G, - G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3, - Core i5, Core i7 as well as Atom CPUs with integrated graphics. - If M is selected, the module will be called i915. AGP support - is required for this driver to work. This driver is used by - the Intel driver in X.org 6.8 and XFree86 4.4 and above. It - replaces the older i830 module that supported a subset of the - hardware in older X.org releases. - - Note that the older i810/i815 chipsets require the use of the - i810 driver instead, and the Atom z5xx series has an entirely - different implementation. - -config DRM_I915_KMS - bool "Enable modesetting on intel by default" - depends on DRM_I915 - help - Choose this option if you want kernel modesetting enabled by default, - and you have a new enough userspace to support this. Running old - userspaces with this enabled will cause pain. Note that this causes - the driver to bind to PCI devices, which precludes loading things - like intelfb. - -config DRM_I915_PRELIMINARY_HW_SUPPORT - bool "Enable preliminary support for prerelease Intel hardware by default" - depends on DRM_I915 - help - Choose this option if you have prerelease Intel hardware and want the - i915 driver to support it by default. You can enable such support at - runtime with the module option i915.preliminary_hw_support=1; this - option changes the default for that module option. - - If in doubt, say "N". +source "drivers/gpu/drm/i915/Kconfig" config DRM_MGA tristate "Matrox g200/g400" diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 385f460459d4..5266e9fae216 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -21,8 +21,9 @@ drm-$(CONFIG_PCI) += ati_pcigart.o drm-usb-y := drm_usb.o -drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_helper.o +drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o +drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig index da4a51eae824..8a784c460c89 100644 --- a/drivers/gpu/drm/ast/Kconfig +++ b/drivers/gpu/drm/ast/Kconfig @@ -6,6 +6,7 @@ config DRM_AST select FB_SYS_FILLRECT select FB_SYS_IMAGEBLIT select DRM_KMS_HELPER + select DRM_KMS_FB_HELPER select DRM_TTM help Say yes for experimental AST GPU driver. Do not enable diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig index bf67b22723f9..9864559e5fb9 100644 --- a/drivers/gpu/drm/cirrus/Kconfig +++ b/drivers/gpu/drm/cirrus/Kconfig @@ -5,6 +5,7 @@ config DRM_CIRRUS_QEMU select FB_SYS_COPYAREA select FB_SYS_IMAGEBLIT select DRM_KMS_HELPER + select DRM_KMS_FB_HELPER select DRM_TTM help This is a KMS driver for emulated cirrus device in qemu. diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 55ea4ac6fd50..dbcd68709ab7 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -39,6 +39,10 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_edid.h> +MODULE_AUTHOR("David Airlie, Jesse Barnes"); +MODULE_DESCRIPTION("DRM KMS helper"); +MODULE_LICENSE("GPL and additional rights"); + /** * drm_helper_move_panel_connectors_to_head() - move panels to the front in the * connector list diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 9e81609b1e29..f1764ec5818b 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -2652,6 +2652,50 @@ static int add_hdmi_mode(struct drm_connector *connector, u8 vic) return 1; } +static int add_3d_struct_modes(struct drm_connector *connector, u16 structure, + const u8 *video_db, u8 video_len, u8 video_index) +{ + struct drm_device *dev = connector->dev; + struct drm_display_mode *newmode; + int modes = 0; + u8 cea_mode; + + if (video_db == NULL || video_index > video_len) + return 0; + + /* CEA modes are numbered 1..127 */ + cea_mode = (video_db[video_index] & 127) - 1; + if (cea_mode >= ARRAY_SIZE(edid_cea_modes)) + return 0; + + if (structure & (1 << 0)) { + newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]); + if (newmode) { + newmode->flags |= DRM_MODE_FLAG_3D_FRAME_PACKING; + drm_mode_probed_add(connector, newmode); + modes++; + } + } + if (structure & (1 << 6)) { + newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]); + if (newmode) { + newmode->flags |= DRM_MODE_FLAG_3D_TOP_AND_BOTTOM; + drm_mode_probed_add(connector, newmode); + modes++; + } + } + if (structure & (1 << 8)) { + newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]); + if (newmode) { + newmode->flags = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF; + drm_mode_probed_add(connector, newmode); + modes++; + } + } + + return modes; +} + /* * do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block * @connector: connector corresponding to the HDMI sink @@ -2662,10 +2706,13 @@ static int add_hdmi_mode(struct drm_connector *connector, u8 vic) * also adds the stereo 3d modes when applicable. */ static int -do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len) +do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len, + const u8 *video_db, u8 video_len) { - int modes = 0, offset = 0, i; - u8 vic_len; + int modes = 0, offset = 0, i, multi_present = 0; + u8 vic_len, hdmi_3d_len = 0; + u16 mask; + u16 structure_all; if (len < 8) goto out; @@ -2689,11 +2736,16 @@ do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len) /* 3D_Present */ offset++; - if (db[8 + offset] & (1 << 7)) + if (db[8 + offset] & (1 << 7)) { modes += add_hdmi_mandatory_stereo_modes(connector); + /* 3D_Multi_present */ + multi_present = (db[8 + offset] & 0x60) >> 5; + } + offset++; vic_len = db[8 + offset] >> 5; + hdmi_3d_len = db[8 + offset] & 0x1f; for (i = 0; i < vic_len && len >= (9 + offset + i); i++) { u8 vic; @@ -2701,6 +2753,35 @@ do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len) vic = db[9 + offset + i]; modes += add_hdmi_mode(connector, vic); } + offset += 1 + vic_len; + + if (!(multi_present == 1 || multi_present == 2)) + goto out; + + if ((multi_present == 1 && len < (9 + offset)) || + (multi_present == 2 && len < (11 + offset))) + goto out; + + if ((multi_present == 1 && hdmi_3d_len < 2) || + (multi_present == 2 && hdmi_3d_len < 4)) + goto out; + + /* 3D_Structure_ALL */ + structure_all = (db[8 + offset] << 8) | db[9 + offset]; + + /* check if 3D_MASK is present */ + if (multi_present == 2) + mask = (db[10 + offset] << 8) | db[11 + offset]; + else + mask = 0xffff; + + for (i = 0; i < 16; i++) { + if (mask & (1 << i)) + modes += add_3d_struct_modes(connector, + structure_all, + video_db, + video_len, i); + } out: return modes; @@ -2759,8 +2840,8 @@ static int add_cea_modes(struct drm_connector *connector, struct edid *edid) { const u8 *cea = drm_find_cea_extension(edid); - const u8 *db, *hdmi = NULL; - u8 dbl, hdmi_len; + const u8 *db, *hdmi = NULL, *video = NULL; + u8 dbl, hdmi_len, video_len = 0; int modes = 0; if (cea && cea_revision(cea) >= 3) { @@ -2773,8 +2854,11 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid) db = &cea[i]; dbl = cea_db_payload_len(db); - if (cea_db_tag(db) == VIDEO_BLOCK) - modes += do_cea_modes(connector, db + 1, dbl); + if (cea_db_tag(db) == VIDEO_BLOCK) { + video = db + 1; + video_len = dbl; + modes += do_cea_modes(connector, video, dbl); + } else if (cea_db_is_hdmi_vsdb(db)) { hdmi = db; hdmi_len = dbl; @@ -2787,7 +2871,8 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid) * be patching their flags when the sink supports stereo 3D. */ if (hdmi) - modes += do_hdmi_vsdb_modes(connector, hdmi, hdmi_len); + modes += do_hdmi_vsdb_modes(connector, hdmi, hdmi_len, video, + video_len); return modes; } diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 21742a81cb9c..720352345452 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -39,10 +39,6 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_crtc_helper.h> -MODULE_AUTHOR("David Airlie, Jesse Barnes"); -MODULE_DESCRIPTION("DRM KMS helper"); -MODULE_LICENSE("GPL and additional rights"); - static LIST_HEAD(kernel_fb_helper_list); /** diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 45b6ef595965..f227f544aa36 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -2,6 +2,7 @@ config DRM_EXYNOS tristate "DRM Support for Samsung SoC EXYNOS Series" depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM) select DRM_KMS_HELPER + select DRM_KMS_FB_HELPER select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig index 1f6e2dfaaeae..508cf99a292d 100644 --- a/drivers/gpu/drm/gma500/Kconfig +++ b/drivers/gpu/drm/gma500/Kconfig @@ -5,6 +5,7 @@ config DRM_GMA500 select FB_CFB_FILLRECT select FB_CFB_IMAGEBLIT select DRM_KMS_HELPER + select DRM_KMS_FB_HELPER select DRM_TTM # GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915 select ACPI_VIDEO if ACPI diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig new file mode 100644 index 000000000000..6199d0b5b958 --- /dev/null +++ b/drivers/gpu/drm/i915/Kconfig @@ -0,0 +1,67 @@ +config DRM_I915 + tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics" + depends on DRM + depends on AGP + depends on AGP_INTEL + # we need shmfs for the swappable backing store, and in particular + # the shmem_readpage() which depends upon tmpfs + select SHMEM + select TMPFS + select DRM_KMS_HELPER + # i915 depends on ACPI_VIDEO when ACPI is enabled + # but for select to work, need to select ACPI_VIDEO's dependencies, ick + select BACKLIGHT_LCD_SUPPORT if ACPI + select BACKLIGHT_CLASS_DEVICE if ACPI + select VIDEO_OUTPUT_CONTROL if ACPI + select INPUT if ACPI + select ACPI_VIDEO if ACPI + select ACPI_BUTTON if ACPI + help + Choose this option if you have a system that has "Intel Graphics + Media Accelerator" or "HD Graphics" integrated graphics, + including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G, + G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3, + Core i5, Core i7 as well as Atom CPUs with integrated graphics. + If M is selected, the module will be called i915. AGP support + is required for this driver to work. This driver is used by + the Intel driver in X.org 6.8 and XFree86 4.4 and above. It + replaces the older i830 module that supported a subset of the + hardware in older X.org releases. + + Note that the older i810/i815 chipsets require the use of the + i810 driver instead, and the Atom z5xx series has an entirely + different implementation. + +config DRM_I915_KMS + bool "Enable modesetting on intel by default" + depends on DRM_I915 + help + Choose this option if you want kernel modesetting enabled by default, + and you have a new enough userspace to support this. Running old + userspaces with this enabled will cause pain. Note that this causes + the driver to bind to PCI devices, which precludes loading things + like intelfb. + +config DRM_I915_FBDEV + bool "Enable legacy fbdev support for the modesettting intel driver" + depends on DRM_I915 + select DRM_KMS_FB_HELPER + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + default y + help + Choose this option if you have a need for the legacy fbdev + support. Note that this support also provide the linux console + support on top of the intel modesetting driver. + +config DRM_I915_PRELIMINARY_HW_SUPPORT + bool "Enable preliminary support for prerelease Intel hardware by default" + depends on DRM_I915 + help + Choose this option if you have prerelease Intel hardware and want the + i915 driver to support it by default. You can enable such support at + runtime with the module option i915.preliminary_hw_support=1; this + option changes the default for that module option. + + If in doubt, say "N". diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 65e60d26891b..41838eaa799c 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -33,7 +33,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \ intel_panel.o \ intel_pm.o \ intel_i2c.o \ - intel_fb.o \ intel_tv.o \ intel_dvo.o \ intel_ringbuffer.o \ @@ -54,6 +53,8 @@ i915-$(CONFIG_COMPAT) += i915_ioc32.o i915-$(CONFIG_ACPI) += intel_acpi.o +i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o + obj-$(CONFIG_DRM_I915) += i915.o CFLAGS_i915_trace_points.o := -I$(src) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 61fd61969e21..061182a0ce1b 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -27,6 +27,8 @@ */ #include <linux/seq_file.h> +#include <linux/circ_buf.h> +#include <linux/ctype.h> #include <linux/debugfs.h> #include <linux/slab.h> #include <linux/export.h> @@ -38,9 +40,6 @@ #include <drm/i915_drm.h> #include "i915_drv.h" -#define DRM_I915_RING_DEBUG 1 - - #if defined(CONFIG_DEBUG_FS) enum { @@ -54,6 +53,32 @@ static const char *yesno(int v) return v ? "yes" : "no"; } +/* As the drm_debugfs_init() routines are called before dev->dev_private is + * allocated we need to hook into the minor for release. */ +static int +drm_add_fake_info_node(struct drm_minor *minor, + struct dentry *ent, + const void *key) +{ + struct drm_info_node *node; + + node = kmalloc(sizeof(*node), GFP_KERNEL); + if (node == NULL) { + debugfs_remove(ent); + return -ENOMEM; + } + + node->minor = minor; + node->dent = ent; + node->info_ent = (void *) key; + + mutex_lock(&minor->debugfs_lock); + list_add(&node->list, &minor->debugfs_list); + mutex_unlock(&minor->debugfs_lock); + + return 0; +} + static int i915_capabilities(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; @@ -850,6 +875,8 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) drm_i915_private_t *dev_priv = dev->dev_private; int ret; + flush_delayed_work(&dev_priv->rps.delayed_resume_work); + if (IS_GEN5(dev)) { u16 rgvswctl = I915_READ16(MEMSWCTL); u16 rgvstat = I915_READ16(MEMSTAT_ILK); @@ -1328,6 +1355,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) return 0; } + flush_delayed_work(&dev_priv->rps.delayed_resume_work); + ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); if (ret) return ret; @@ -1402,12 +1431,12 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - struct intel_fbdev *ifbdev; + struct intel_fbdev *ifbdev = NULL; struct intel_framebuffer *fb; - int ret; - ret = mutex_lock_interruptible(&dev->mode_config.mutex); +#ifdef CONFIG_DRM_I915_FBDEV + struct drm_i915_private *dev_priv = dev->dev_private; + int ret = mutex_lock_interruptible(&dev->mode_config.mutex); if (ret) return ret; @@ -1423,10 +1452,11 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) describe_obj(m, fb->obj); seq_putc(m, '\n'); mutex_unlock(&dev->mode_config.mutex); +#endif mutex_lock(&dev->mode_config.fb_lock); list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { - if (&fb->base == ifbdev->helper.fb) + if (ifbdev && &fb->base == ifbdev->helper.fb) continue; seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ", @@ -1730,6 +1760,467 @@ static int i915_pc8_status(struct seq_file *m, void *unused) return 0; } +struct pipe_crc_info { + const char *name; + struct drm_device *dev; + enum pipe pipe; +}; + +static int i915_pipe_crc_open(struct inode *inode, struct file *filep) +{ + struct pipe_crc_info *info = inode->i_private; + struct drm_i915_private *dev_priv = info->dev->dev_private; + struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; + + if (!atomic_dec_and_test(&pipe_crc->available)) { + atomic_inc(&pipe_crc->available); + return -EBUSY; /* already open */ + } + + filep->private_data = inode->i_private; + + return 0; +} + +static int i915_pipe_crc_release(struct inode *inode, struct file *filep) +{ + struct pipe_crc_info *info = inode->i_private; + struct drm_i915_private *dev_priv = info->dev->dev_private; + struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; + + atomic_inc(&pipe_crc->available); /* release the device */ + + return 0; +} + +/* (6 fields, 8 chars each, space separated (5) + '\n') */ +#define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1) +/* account for \'0' */ +#define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1) + +static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc) +{ + int head, tail; + + head = atomic_read(&pipe_crc->head); + tail = atomic_read(&pipe_crc->tail); + + return CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR); +} + +static ssize_t +i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, + loff_t *pos) +{ + struct pipe_crc_info *info = filep->private_data; + struct drm_device *dev = info->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; + char buf[PIPE_CRC_BUFFER_LEN]; + int head, tail, n_entries, n; + ssize_t bytes_read; + + /* + * Don't allow user space to provide buffers not big enough to hold + * a line of data. + */ + if (count < PIPE_CRC_LINE_LEN) + return -EINVAL; + + if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE) + return 0; + + /* nothing to read */ + while (pipe_crc_data_count(pipe_crc) == 0) { + if (filep->f_flags & O_NONBLOCK) + return -EAGAIN; + + if (wait_event_interruptible(pipe_crc->wq, + pipe_crc_data_count(pipe_crc))) + return -ERESTARTSYS; + } + + /* We now have one or more entries to read */ + head = atomic_read(&pipe_crc->head); + tail = atomic_read(&pipe_crc->tail); + n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR), + count / PIPE_CRC_LINE_LEN); + bytes_read = 0; + n = 0; + do { + struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail]; + int ret; + + bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN, + "%8u %8x %8x %8x %8x %8x\n", + entry->frame, entry->crc[0], + entry->crc[1], entry->crc[2], + entry->crc[3], entry->crc[4]); + + ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN, + buf, PIPE_CRC_LINE_LEN); + if (ret == PIPE_CRC_LINE_LEN) + return -EFAULT; + + BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR); + tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); + atomic_set(&pipe_crc->tail, tail); + n++; + } while (--n_entries); + + return bytes_read; +} + +static const struct file_operations i915_pipe_crc_fops = { + .owner = THIS_MODULE, + .open = i915_pipe_crc_open, + .read = i915_pipe_crc_read, + .release = i915_pipe_crc_release, +}; + +static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = { + { + .name = "i915_pipe_A_crc", + .pipe = PIPE_A, + }, + { + .name = "i915_pipe_B_crc", + .pipe = PIPE_B, + }, + { + .name = "i915_pipe_C_crc", + .pipe = PIPE_C, + }, +}; + +static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor, + enum pipe pipe) +{ + struct drm_device *dev = minor->dev; + struct dentry *ent; + struct pipe_crc_info *info = &i915_pipe_crc_data[pipe]; + + info->dev = dev; + ent = debugfs_create_file(info->name, S_IRUGO, root, info, + &i915_pipe_crc_fops); + if (IS_ERR(ent)) + return PTR_ERR(ent); + + return drm_add_fake_info_node(minor, ent, info); +} + +static const char * const pipe_crc_sources[] = { + "none", + "plane1", + "plane2", + "pf", + "pipe", +}; + +static const char *pipe_crc_source_name(enum intel_pipe_crc_source source) +{ + BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX); + return pipe_crc_sources[source]; +} + +static int display_crc_ctl_show(struct seq_file *m, void *data) +{ + struct drm_device *dev = m->private; + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + for (i = 0; i < I915_MAX_PIPES; i++) + seq_printf(m, "%c %s\n", pipe_name(i), + pipe_crc_source_name(dev_priv->pipe_crc[i].source)); + + return 0; +} + +static int display_crc_ctl_open(struct inode *inode, struct file *file) +{ + struct drm_device *dev = inode->i_private; + + return single_open(file, display_crc_ctl_show, dev); +} + +static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source source, + uint32_t *val) +{ + switch (source) { + case INTEL_PIPE_CRC_SOURCE_PLANE1: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK; + break; + case INTEL_PIPE_CRC_SOURCE_PLANE2: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK; + break; + case INTEL_PIPE_CRC_SOURCE_PF: + return -EINVAL; + case INTEL_PIPE_CRC_SOURCE_PIPE: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK; + break; + default: + *val = 0; + break; + } + + return 0; +} + +static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source source, + uint32_t *val) +{ + switch (source) { + case INTEL_PIPE_CRC_SOURCE_PLANE1: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB; + break; + case INTEL_PIPE_CRC_SOURCE_PLANE2: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB; + break; + case INTEL_PIPE_CRC_SOURCE_PF: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; + break; + case INTEL_PIPE_CRC_SOURCE_PIPE: + return -EINVAL; + default: + *val = 0; + break; + } + + return 0; +} + +static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, + enum intel_pipe_crc_source source) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; + u32 val; + int ret; + + if (!(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev))) + return -ENODEV; + + if (pipe_crc->source == source) + return 0; + + /* forbid changing the source without going back to 'none' */ + if (pipe_crc->source && source) + return -EINVAL; + + if (IS_GEN5(dev) || IS_GEN6(dev)) + ret = ilk_pipe_crc_ctl_reg(source, &val); + else + ret = ivb_pipe_crc_ctl_reg(source, &val); + + if (ret != 0) + return ret; + + /* none -> real source transition */ + if (source) { + DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n", + pipe_name(pipe), pipe_crc_source_name(source)); + + pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) * + INTEL_PIPE_CRC_ENTRIES_NR, + GFP_KERNEL); + if (!pipe_crc->entries) + return -ENOMEM; + + atomic_set(&pipe_crc->head, 0); + atomic_set(&pipe_crc->tail, 0); + } + + pipe_crc->source = source; + + I915_WRITE(PIPE_CRC_CTL(pipe), val); + POSTING_READ(PIPE_CRC_CTL(pipe)); + + /* real source -> none transition */ + if (source == INTEL_PIPE_CRC_SOURCE_NONE) { + DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n", + pipe_name(pipe)); + + intel_wait_for_vblank(dev, pipe); + + kfree(pipe_crc->entries); + pipe_crc->entries = NULL; + } + + return 0; +} + +/* + * Parse pipe CRC command strings: + * command: wsp* object wsp+ name wsp+ source wsp* + * object: 'pipe' + * name: (A | B | C) + * source: (none | plane1 | plane2 | pf) + * wsp: (#0x20 | #0x9 | #0xA)+ + * + * eg.: + * "pipe A plane1" -> Start CRC computations on plane1 of pipe A + * "pipe A none" -> Stop CRC + */ +static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words) +{ + int n_words = 0; + + while (*buf) { + char *end; + + /* skip leading white space */ + buf = skip_spaces(buf); + if (!*buf) + break; /* end of buffer */ + + /* find end of word */ + for (end = buf; *end && !isspace(*end); end++) + ; + + if (n_words == max_words) { + DRM_DEBUG_DRIVER("too many words, allowed <= %d\n", + max_words); + return -EINVAL; /* ran out of words[] before bytes */ + } + + if (*end) + *end++ = '\0'; + words[n_words++] = buf; + buf = end; + } + + return n_words; +} + +enum intel_pipe_crc_object { + PIPE_CRC_OBJECT_PIPE, +}; + +static const char * const pipe_crc_objects[] = { + "pipe", +}; + +static int +display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++) + if (!strcmp(buf, pipe_crc_objects[i])) { + *o = i; + return 0; + } + + return -EINVAL; +} + +static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe) +{ + const char name = buf[0]; + + if (name < 'A' || name >= pipe_name(I915_MAX_PIPES)) + return -EINVAL; + + *pipe = name - 'A'; + + return 0; +} + +static int +display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++) + if (!strcmp(buf, pipe_crc_sources[i])) { + *s = i; + return 0; + } + + return -EINVAL; +} + +static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len) +{ +#define N_WORDS 3 + int n_words; + char *words[N_WORDS]; + enum pipe pipe; + enum intel_pipe_crc_object object; + enum intel_pipe_crc_source source; + + n_words = display_crc_ctl_tokenize(buf, words, N_WORDS); + if (n_words != N_WORDS) { + DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n", + N_WORDS); + return -EINVAL; + } + + if (display_crc_ctl_parse_object(words[0], &object) < 0) { + DRM_DEBUG_DRIVER("unknown object %s\n", words[0]); + return -EINVAL; + } + + if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) { + DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]); + return -EINVAL; + } + + if (display_crc_ctl_parse_source(words[2], &source) < 0) { + DRM_DEBUG_DRIVER("unknown source %s\n", words[2]); + return -EINVAL; + } + + return pipe_crc_set_source(dev, pipe, source); +} + +static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct drm_device *dev = m->private; + char *tmpbuf; + int ret; + + if (len == 0) + return 0; + + if (len > PAGE_SIZE - 1) { + DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n", + PAGE_SIZE); + return -E2BIG; + } + + tmpbuf = kmalloc(len + 1, GFP_KERNEL); + if (!tmpbuf) + return -ENOMEM; + + if (copy_from_user(tmpbuf, ubuf, len)) { + ret = -EFAULT; + goto out; + } + tmpbuf[len] = '\0'; + + ret = display_crc_ctl_parse(dev, tmpbuf, len); + +out: + kfree(tmpbuf); + if (ret < 0) + return ret; + + *offp += len; + return len; +} + +static const struct file_operations i915_display_crc_ctl_fops = { + .owner = THIS_MODULE, + .open = display_crc_ctl_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = display_crc_ctl_write +}; + static int i915_wedged_get(void *data, u64 *val) { @@ -1943,6 +2434,8 @@ i915_max_freq_get(void *data, u64 *val) if (!(IS_GEN6(dev) || IS_GEN7(dev))) return -ENODEV; + flush_delayed_work(&dev_priv->rps.delayed_resume_work); + ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); if (ret) return ret; @@ -1967,6 +2460,8 @@ i915_max_freq_set(void *data, u64 val) if (!(IS_GEN6(dev) || IS_GEN7(dev))) return -ENODEV; + flush_delayed_work(&dev_priv->rps.delayed_resume_work); + DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); @@ -2005,6 +2500,8 @@ i915_min_freq_get(void *data, u64 *val) if (!(IS_GEN6(dev) || IS_GEN7(dev))) return -ENODEV; + flush_delayed_work(&dev_priv->rps.delayed_resume_work); + ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); if (ret) return ret; @@ -2029,6 +2526,8 @@ i915_min_freq_set(void *data, u64 val) if (!(IS_GEN6(dev) || IS_GEN7(dev))) return -ENODEV; + flush_delayed_work(&dev_priv->rps.delayed_resume_work); + DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); @@ -2107,32 +2606,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, i915_cache_sharing_get, i915_cache_sharing_set, "%llu\n"); -/* As the drm_debugfs_init() routines are called before dev->dev_private is - * allocated we need to hook into the minor for release. */ -static int -drm_add_fake_info_node(struct drm_minor *minor, - struct dentry *ent, - const void *key) -{ - struct drm_info_node *node; - - node = kmalloc(sizeof(*node), GFP_KERNEL); - if (node == NULL) { - debugfs_remove(ent); - return -ENOMEM; - } - - node->minor = minor; - node->dent = ent; - node->info_ent = (void *) key; - - mutex_lock(&minor->debugfs_lock); - list_add(&node->list, &minor->debugfs_list); - mutex_unlock(&minor->debugfs_lock); - - return 0; -} - static int i915_forcewake_open(struct inode *inode, struct file *file) { struct drm_device *dev = inode->i_private; @@ -2254,8 +2727,22 @@ static struct i915_debugfs_files { {"i915_gem_drop_caches", &i915_drop_caches_fops}, {"i915_error_state", &i915_error_state_fops}, {"i915_next_seqno", &i915_next_seqno_fops}, + {"i915_display_crc_ctl", &i915_display_crc_ctl_fops}, }; +void intel_display_crc_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) { + struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[i]; + + atomic_set(&pipe_crc->available, 1); + init_waitqueue_head(&pipe_crc->wq); + } +} + int i915_debugfs_init(struct drm_minor *minor) { int ret, i; @@ -2264,6 +2751,12 @@ int i915_debugfs_init(struct drm_minor *minor) if (ret) return ret; + for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { + ret = i915_pipe_crc_create(minor->debugfs_root, minor, i); + if (ret) + return ret; + } + for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { ret = i915_debugfs_create(minor->debugfs_root, minor, i915_debugfs_files[i].name, @@ -2283,8 +2776,17 @@ void i915_debugfs_cleanup(struct drm_minor *minor) drm_debugfs_remove_files(i915_debugfs_list, I915_DEBUGFS_ENTRIES, minor); + drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 1, minor); + + for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { + struct drm_info_list *info_list = + (struct drm_info_list *)&i915_pipe_crc_data[i]; + + drm_debugfs_remove_files(info_list, 1, minor); + } + for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { struct drm_info_list *info_list = (struct drm_info_list *) i915_debugfs_files[i].fops; diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index b3873c945d1b..437886641d90 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1416,6 +1416,7 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master) master->driver_priv = NULL; } +#ifdef CONFIG_DRM_I915_FBDEV static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) { struct apertures_struct *ap; @@ -1436,6 +1437,11 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) kfree(ap); } +#else +static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) +{ +} +#endif static void i915_dump_device_info(struct drm_i915_private *dev_priv) { @@ -1477,8 +1483,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) info = (struct intel_device_info *) flags; /* Refuse to load on gen6+ without kms enabled. */ - if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) + if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) { + DRM_INFO("Your hardware requires kernel modesetting (KMS)\n"); + DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n"); return -ENODEV; + } dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); if (dev_priv == NULL) @@ -1505,6 +1514,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */ INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work); + intel_display_crc_init(dev); + i915_dump_device_info(dev_priv); /* Not all pre-production machines fall into this category, only the @@ -1542,15 +1553,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) intel_uncore_early_sanitize(dev); - if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) { - /* The docs do not explain exactly how the calculation can be - * made. It is somewhat guessable, but for now, it's always - * 128MB. - * NB: We can't write IDICR yet because we do not have gt funcs - * set up */ - dev_priv->ellc_size = 128; - DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); - } + /* This must be called before any calls to HAS_PCH_* */ + intel_detect_pch(dev); + + intel_uncore_init(dev); ret = i915_gem_gtt_init(dev); if (ret) @@ -1609,13 +1615,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto out_mtrrfree; } - /* This must be called before any calls to HAS_PCH_* */ - intel_detect_pch(dev); - intel_irq_init(dev); intel_pm_init(dev); intel_uncore_sanitize(dev); - intel_uncore_init(dev); /* Try to make sure MCHBAR is enabled before poking at it */ intel_setup_mchbar(dev); @@ -1699,6 +1701,7 @@ out_gtt: drm_mm_takedown(&dev_priv->gtt.base.mm); dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); out_regs: + intel_uncore_fini(dev); pci_iounmap(dev->pdev, dev_priv->regs); put_bridge: pci_dev_put(dev_priv->bridge_dev); @@ -1729,15 +1732,9 @@ int i915_driver_unload(struct drm_device *dev) if (dev_priv->mm.inactive_shrinker.scan_objects) unregister_shrinker(&dev_priv->mm.inactive_shrinker); - mutex_lock(&dev->struct_mutex); - ret = i915_gpu_idle(dev); + ret = i915_gem_suspend(dev); if (ret) DRM_ERROR("failed to idle hardware: %d\n", ret); - i915_gem_retire_requests(dev); - mutex_unlock(&dev->struct_mutex); - - /* Cancel the retire work handler, which should be idle now. */ - cancel_delayed_work_sync(&dev_priv->mm.retire_work); io_mapping_free(dev_priv->gtt.mappable); arch_phys_wc_del(dev_priv->gtt.mtrr); @@ -1852,7 +1849,7 @@ void i915_driver_lastclose(struct drm_device * dev) return; if (drm_core_check_feature(dev, DRIVER_MODESET)) { - intel_fb_restore_mode(dev); + intel_fbdev_restore_mode(dev); vga_switcheroo_process_delayed_switch(); return; } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 96f230497cbe..1060a96d2184 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -160,49 +160,58 @@ extern int intel_agp_enabled; static const struct intel_device_info intel_i830_info = { .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, .has_overlay = 1, .overlay_needs_physical = 1, + .ring_mask = RENDER_RING, }; static const struct intel_device_info intel_845g_info = { .gen = 2, .num_pipes = 1, .has_overlay = 1, .overlay_needs_physical = 1, + .ring_mask = RENDER_RING, }; static const struct intel_device_info intel_i85x_info = { .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, + .ring_mask = RENDER_RING, }; static const struct intel_device_info intel_i865g_info = { .gen = 2, .num_pipes = 1, .has_overlay = 1, .overlay_needs_physical = 1, + .ring_mask = RENDER_RING, }; static const struct intel_device_info intel_i915g_info = { .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2, .has_overlay = 1, .overlay_needs_physical = 1, + .ring_mask = RENDER_RING, }; static const struct intel_device_info intel_i915gm_info = { .gen = 3, .is_mobile = 1, .num_pipes = 2, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, .supports_tv = 1, + .ring_mask = RENDER_RING, }; static const struct intel_device_info intel_i945g_info = { .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2, .has_overlay = 1, .overlay_needs_physical = 1, + .ring_mask = RENDER_RING, }; static const struct intel_device_info intel_i945gm_info = { .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2, .has_hotplug = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, .supports_tv = 1, + .ring_mask = RENDER_RING, }; static const struct intel_device_info intel_i965g_info = { .gen = 4, .is_broadwater = 1, .num_pipes = 2, .has_hotplug = 1, .has_overlay = 1, + .ring_mask = RENDER_RING, }; static const struct intel_device_info intel_i965gm_info = { @@ -210,18 +219,20 @@ static const struct intel_device_info intel_i965gm_info = { .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, .has_overlay = 1, .supports_tv = 1, + .ring_mask = RENDER_RING, }; static const struct intel_device_info intel_g33_info = { .gen = 3, .is_g33 = 1, .num_pipes = 2, .need_gfx_hws = 1, .has_hotplug = 1, .has_overlay = 1, + .ring_mask = RENDER_RING, }; static const struct intel_device_info intel_g45_info = { .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2, .has_pipe_cxsr = 1, .has_hotplug = 1, - .has_bsd_ring = 1, + .ring_mask = RENDER_RING | BSD_RING, }; static const struct intel_device_info intel_gm45_info = { @@ -229,7 +240,7 @@ static const struct intel_device_info intel_gm45_info = { .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, .supports_tv = 1, - .has_bsd_ring = 1, + .ring_mask = RENDER_RING | BSD_RING, }; static const struct intel_device_info intel_pineview_info = { @@ -241,42 +252,36 @@ static const struct intel_device_info intel_pineview_info = { static const struct intel_device_info intel_ironlake_d_info = { .gen = 5, .num_pipes = 2, .need_gfx_hws = 1, .has_hotplug = 1, - .has_bsd_ring = 1, + .ring_mask = RENDER_RING | BSD_RING, }; static const struct intel_device_info intel_ironlake_m_info = { .gen = 5, .is_mobile = 1, .num_pipes = 2, .need_gfx_hws = 1, .has_hotplug = 1, .has_fbc = 1, - .has_bsd_ring = 1, + .ring_mask = RENDER_RING | BSD_RING, }; static const struct intel_device_info intel_sandybridge_d_info = { .gen = 6, .num_pipes = 2, .need_gfx_hws = 1, .has_hotplug = 1, - .has_bsd_ring = 1, - .has_blt_ring = 1, + .ring_mask = RENDER_RING | BSD_RING | BLT_RING, .has_llc = 1, - .has_force_wake = 1, }; static const struct intel_device_info intel_sandybridge_m_info = { .gen = 6, .is_mobile = 1, .num_pipes = 2, .need_gfx_hws = 1, .has_hotplug = 1, .has_fbc = 1, - .has_bsd_ring = 1, - .has_blt_ring = 1, + .ring_mask = RENDER_RING | BSD_RING | BLT_RING, .has_llc = 1, - .has_force_wake = 1, }; #define GEN7_FEATURES \ .gen = 7, .num_pipes = 3, \ .need_gfx_hws = 1, .has_hotplug = 1, \ - .has_bsd_ring = 1, \ - .has_blt_ring = 1, \ - .has_llc = 1, \ - .has_force_wake = 1 + .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ + .has_llc = 1 static const struct intel_device_info intel_ivybridge_d_info = { GEN7_FEATURES, @@ -318,7 +323,7 @@ static const struct intel_device_info intel_haswell_d_info = { .is_haswell = 1, .has_ddi = 1, .has_fpga_dbg = 1, - .has_vebox_ring = 1, + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, }; static const struct intel_device_info intel_haswell_m_info = { @@ -328,7 +333,7 @@ static const struct intel_device_info intel_haswell_m_info = { .has_ddi = 1, .has_fpga_dbg = 1, .has_fbc = 1, - .has_vebox_ring = 1, + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, }; /* @@ -482,9 +487,7 @@ static int i915_drm_freeze(struct drm_device *dev) if (drm_core_check_feature(dev, DRIVER_MODESET)) { int error; - mutex_lock(&dev->struct_mutex); - error = i915_gem_idle(dev); - mutex_unlock(&dev->struct_mutex); + error = i915_gem_suspend(dev); if (error) { dev_err(&dev->pdev->dev, "GEM idle failed, resume might fail\n"); @@ -747,30 +750,17 @@ int i915_reset(struct drm_device *dev) */ if (drm_core_check_feature(dev, DRIVER_MODESET) || !dev_priv->ums.mm_suspended) { - struct intel_ring_buffer *ring; - int i; - + bool hw_contexts_disabled = dev_priv->hw_contexts_disabled; dev_priv->ums.mm_suspended = 0; - i915_gem_init_swizzling(dev); - - for_each_ring(ring, dev_priv, i) - ring->init(ring); - - i915_gem_context_init(dev); - if (dev_priv->mm.aliasing_ppgtt) { - ret = dev_priv->mm.aliasing_ppgtt->enable(dev); - if (ret) - i915_gem_cleanup_aliasing_ppgtt(dev); - } - - /* - * It would make sense to re-init all the other hw state, at - * least the rps/rc6/emon init done within modeset_init_hw. For - * some unknown reason, this blows up my ilk, so don't. - */ - + ret = i915_gem_init_hw(dev); + if (!hw_contexts_disabled && dev_priv->hw_contexts_disabled) + DRM_ERROR("HW contexts didn't survive reset\n"); mutex_unlock(&dev->struct_mutex); + if (ret) { + DRM_ERROR("Failed hw init on reset %d\n", ret); + return ret; + } drm_irq_uninstall(dev); drm_irq_install(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6a5b7ab0c3fa..2ea33eebf01c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -379,7 +379,8 @@ struct drm_i915_display_funcs { void (*crtc_disable)(struct drm_crtc *crtc); void (*off)(struct drm_crtc *crtc); void (*write_eld)(struct drm_connector *connector, - struct drm_crtc *crtc); + struct drm_crtc *crtc, + struct drm_display_mode *mode); void (*fdi_link_train)(struct drm_crtc *crtc); void (*init_clock_gating)(struct drm_device *dev); int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, @@ -399,6 +400,20 @@ struct drm_i915_display_funcs { struct intel_uncore_funcs { void (*force_wake_get)(struct drm_i915_private *dev_priv); void (*force_wake_put)(struct drm_i915_private *dev_priv); + + uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace); + uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace); + uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace); + uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace); + + void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset, + uint8_t val, bool trace); + void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset, + uint16_t val, bool trace); + void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset, + uint32_t val, bool trace); + void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset, + uint64_t val, bool trace); }; struct intel_uncore { @@ -427,7 +442,6 @@ struct intel_uncore { func(is_valleyview) sep \ func(is_haswell) sep \ func(is_preliminary) sep \ - func(has_force_wake) sep \ func(has_fbc) sep \ func(has_pipe_cxsr) sep \ func(has_hotplug) sep \ @@ -435,9 +449,6 @@ struct intel_uncore { func(has_overlay) sep \ func(overlay_needs_physical) sep \ func(supports_tv) sep \ - func(has_bsd_ring) sep \ - func(has_blt_ring) sep \ - func(has_vebox_ring) sep \ func(has_llc) sep \ func(has_ddi) sep \ func(has_fpga_dbg) @@ -449,6 +460,7 @@ struct intel_device_info { u32 display_mmio_offset; u8 num_pipes:3; u8 gen; + u8 ring_mask; /* Rings supported by the HW */ DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); }; @@ -849,6 +861,7 @@ struct intel_gen6_power_mgmt { int last_adj; enum { LOW_POWER, BETWEEN, HIGH_POWER } power; + bool enabled; struct delayed_work delayed_resume_work; /* @@ -1127,6 +1140,15 @@ struct intel_wm_level { uint32_t fbc_val; }; +struct hsw_wm_values { + uint32_t wm_pipe[3]; + uint32_t wm_lp[3]; + uint32_t wm_lp_spr[3]; + uint32_t wm_linetime[3]; + bool enable_fbc_wm; + enum intel_ddb_partitioning partitioning; +}; + /* * This struct tracks the state needed for the Package C8+ feature. * @@ -1196,6 +1218,29 @@ struct i915_package_c8 { } regsave; }; +enum intel_pipe_crc_source { + INTEL_PIPE_CRC_SOURCE_NONE, + INTEL_PIPE_CRC_SOURCE_PLANE1, + INTEL_PIPE_CRC_SOURCE_PLANE2, + INTEL_PIPE_CRC_SOURCE_PF, + INTEL_PIPE_CRC_SOURCE_PIPE, + INTEL_PIPE_CRC_SOURCE_MAX, +}; + +struct intel_pipe_crc_entry { + uint32_t frame; + uint32_t crc[5]; +}; + +#define INTEL_PIPE_CRC_ENTRIES_NR 128 +struct intel_pipe_crc { + atomic_t available; /* exclusive access to the device */ + struct intel_pipe_crc_entry *entries; + enum intel_pipe_crc_source source; + atomic_t head, tail; + wait_queue_head_t wq; +}; + typedef struct drm_i915_private { struct drm_device *dev; struct kmem_cache *slab; @@ -1354,8 +1399,10 @@ typedef struct drm_i915_private { struct drm_i915_gem_object *vlv_pctx; +#ifdef CONFIG_DRM_I915_FBDEV /* list of fbdev register on this device */ struct intel_fbdev *fbdev; +#endif /* * The console may be contended at resume, but we don't @@ -1386,6 +1433,9 @@ typedef struct drm_i915_private { uint16_t spr_latency[5]; /* cursor */ uint16_t cur_latency[5]; + + /* current hardware state */ + struct hsw_wm_values hw; } wm; struct i915_package_c8 pc8; @@ -1395,6 +1445,10 @@ typedef struct drm_i915_private { struct i915_dri1_state dri1; /* Old ums support infrastructure, same warning applies. */ struct i915_ums_state ums; + +#ifdef CONFIG_DEBUG_FS + struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; +#endif } drm_i915_private_t; static inline struct drm_i915_private *to_i915(const struct drm_device *dev) @@ -1545,11 +1599,14 @@ struct drm_i915_gem_object { /** Current tiling stride for the object, if it's tiled. */ uint32_t stride; + /** References from framebuffers, locks out tiling changes. */ + unsigned long framebuffer_references; + /** Record of address bit 17 of each page at last unbind. */ unsigned long *bit_17; /** User space pin count and filp owning the pin */ - uint32_t user_pin_count; + unsigned long user_pin_count; struct drm_file *pin_filp; /** for phy allocated objects */ @@ -1663,9 +1720,13 @@ struct drm_i915_file_private { #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) -#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) -#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) -#define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring) +#define RENDER_RING (1<<RCS) +#define BSD_RING (1<<VCS) +#define BLT_RING (1<<BCS) +#define VEBOX_RING (1<<VECS) +#define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING) +#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING) +#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING) #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) #define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size) #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) @@ -1715,8 +1776,6 @@ struct drm_i915_file_private { #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) -#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) - /* DPF == dynamic parity feature */ #define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev)) @@ -1983,7 +2042,7 @@ int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice); void i915_gem_init_swizzling(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev); int __must_check i915_gpu_idle(struct drm_device *dev); -int __must_check i915_gem_idle(struct drm_device *dev); +int __must_check i915_gem_suspend(struct drm_device *dev); int __i915_add_request(struct intel_ring_buffer *ring, struct drm_file *file, struct drm_i915_gem_object *batch_obj, @@ -2181,6 +2240,11 @@ int i915_verify_lists(struct drm_device *dev); /* i915_debugfs.c */ int i915_debugfs_init(struct drm_minor *minor); void i915_debugfs_cleanup(struct drm_minor *minor); +#ifdef CONFIG_DEBUG_FS +void intel_display_crc_init(struct drm_device *dev); +#else +static inline void intel_display_crc_init(struct drm_device *dev) {} +#endif /* i915_gpu_error.c */ __printf(2, 3) @@ -2337,37 +2401,21 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, int vlv_gpu_freq(int ddr_freq, int val); int vlv_freq_opcode(int ddr_freq, int val); -#define __i915_read(x) \ - u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace); -__i915_read(8) -__i915_read(16) -__i915_read(32) -__i915_read(64) -#undef __i915_read - -#define __i915_write(x) \ - void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace); -__i915_write(8) -__i915_write(16) -__i915_write(32) -__i915_write(64) -#undef __i915_write - -#define I915_READ8(reg) i915_read8(dev_priv, (reg), true) -#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val), true) - -#define I915_READ16(reg) i915_read16(dev_priv, (reg), true) -#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val), true) -#define I915_READ16_NOTRACE(reg) i915_read16(dev_priv, (reg), false) -#define I915_WRITE16_NOTRACE(reg, val) i915_write16(dev_priv, (reg), (val), false) - -#define I915_READ(reg) i915_read32(dev_priv, (reg), true) -#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val), true) -#define I915_READ_NOTRACE(reg) i915_read32(dev_priv, (reg), false) -#define I915_WRITE_NOTRACE(reg, val) i915_write32(dev_priv, (reg), (val), false) - -#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val), true) -#define I915_READ64(reg) i915_read64(dev_priv, (reg), true) +#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) +#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true) + +#define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true) +#define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true) +#define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false) +#define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false) + +#define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true) +#define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true) +#define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) +#define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) + +#define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true) +#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 13c885d66383..34df59b660f8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -64,8 +64,8 @@ static unsigned long i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc); static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc); -static long i915_gem_purge(struct drm_i915_private *dev_priv, long target); -static long i915_gem_shrink_all(struct drm_i915_private *dev_priv); +static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target); +static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); static bool cpu_cache_is_coherent(struct drm_device *dev, @@ -1082,7 +1082,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, mod_timer(&timer, expire); } - schedule(); + io_schedule(); if (timeout) timeout_jiffies = expire - jiffies; @@ -1728,13 +1728,13 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) return 0; } -static long +static unsigned long __i915_gem_shrink(struct drm_i915_private *dev_priv, long target, bool purgeable_only) { struct list_head still_bound_list; struct drm_i915_gem_object *obj, *next; - long count = 0; + unsigned long count = 0; list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, @@ -1800,13 +1800,13 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target, return count; } -static long +static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target) { return __i915_gem_shrink(dev_priv, target, true); } -static long +static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv) { struct drm_i915_gem_object *obj, *next; @@ -1816,9 +1816,8 @@ i915_gem_shrink_all(struct drm_i915_private *dev_priv) list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, global_list) { - if (obj->pages_pin_count == 0) + if (i915_gem_object_put_pages(obj) == 0) freed += obj->base.size >> PAGE_SHIFT; - i915_gem_object_put_pages(obj); } return freed; } @@ -1903,6 +1902,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) sg->length += PAGE_SIZE; } last_pfn = page_to_pfn(page); + + /* Check that the i965g/gm workaround works. */ + WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL)); } #ifdef CONFIG_SWIOTLB if (!swiotlb_nr_tbl()) @@ -2404,6 +2406,8 @@ void i915_gem_reset(struct drm_device *dev) for_each_ring(ring, dev_priv, i) i915_gem_reset_ring_lists(dev_priv, ring); + i915_gem_cleanup_ringbuffer(dev); + i915_gem_restore_fences(dev); } @@ -3927,6 +3931,11 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, goto out; } + if (obj->user_pin_count == ULONG_MAX) { + ret = -EBUSY; + goto out; + } + if (obj->user_pin_count == 0) { ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false); if (ret) @@ -4261,17 +4270,18 @@ void i915_gem_vma_destroy(struct i915_vma *vma) } int -i915_gem_idle(struct drm_device *dev) +i915_gem_suspend(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - int ret; + int ret = 0; + mutex_lock(&dev->struct_mutex); if (dev_priv->ums.mm_suspended) - return 0; + goto err; ret = i915_gpu_idle(dev); if (ret) - return ret; + goto err; i915_gem_retire_requests(dev); @@ -4279,16 +4289,26 @@ i915_gem_idle(struct drm_device *dev) if (!drm_core_check_feature(dev, DRIVER_MODESET)) i915_gem_evict_everything(dev); - del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); - i915_kernel_lost_context(dev); i915_gem_cleanup_ringbuffer(dev); - /* Cancel the retire work handler, which should be idle now. */ + /* Hack! Don't let anybody do execbuf while we don't control the chip. + * We need to replace this with a semaphore, or something. + * And not confound ums.mm_suspended! + */ + dev_priv->ums.mm_suspended = !drm_core_check_feature(dev, + DRIVER_MODESET); + mutex_unlock(&dev->struct_mutex); + + del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); cancel_delayed_work_sync(&dev_priv->mm.retire_work); cancel_delayed_work_sync(&dev_priv->mm.idle_work); return 0; + +err: + mutex_unlock(&dev->struct_mutex); + return ret; } int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice) @@ -4541,26 +4561,12 @@ int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - int ret; - if (drm_core_check_feature(dev, DRIVER_MODESET)) return 0; drm_irq_uninstall(dev); - mutex_lock(&dev->struct_mutex); - ret = i915_gem_idle(dev); - - /* Hack! Don't let anybody do execbuf while we don't control the chip. - * We need to replace this with a semaphore, or something. - * And not confound ums.mm_suspended! - */ - if (ret != 0) - dev_priv->ums.mm_suspended = 1; - mutex_unlock(&dev->struct_mutex); - - return ret; + return i915_gem_suspend(dev); } void @@ -4571,11 +4577,9 @@ i915_gem_lastclose(struct drm_device *dev) if (drm_core_check_feature(dev, DRIVER_MODESET)) return; - mutex_lock(&dev->struct_mutex); - ret = i915_gem_idle(dev); + ret = i915_gem_suspend(dev); if (ret) DRM_ERROR("failed to idle hardware: %d\n", ret); - mutex_unlock(&dev->struct_mutex); } static void @@ -4947,6 +4951,7 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc) if (unlock) mutex_unlock(&dev->struct_mutex); + return count; } @@ -5018,7 +5023,6 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc) struct drm_i915_private, mm.inactive_shrinker); struct drm_device *dev = dev_priv->dev; - int nr_to_scan = sc->nr_to_scan; unsigned long freed; bool unlock = true; @@ -5032,15 +5036,17 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc) unlock = false; } - freed = i915_gem_purge(dev_priv, nr_to_scan); - if (freed < nr_to_scan) - freed += __i915_gem_shrink(dev_priv, nr_to_scan, - false); - if (freed < nr_to_scan) + freed = i915_gem_purge(dev_priv, sc->nr_to_scan); + if (freed < sc->nr_to_scan) + freed += __i915_gem_shrink(dev_priv, + sc->nr_to_scan - freed, + false); + if (freed < sc->nr_to_scan) freed += i915_gem_shrink_all(dev_priv); if (unlock) mutex_unlock(&dev->struct_mutex); + return freed; } diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 1a877a547290..cc619c138777 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -220,7 +220,6 @@ static int create_default_context(struct drm_i915_private *dev_priv) * may not be available. To avoid this we always pin the * default context. */ - dev_priv->ring[RCS].default_context = ctx; ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false); if (ret) { DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); @@ -233,6 +232,8 @@ static int create_default_context(struct drm_i915_private *dev_priv) goto err_unpin; } + dev_priv->ring[RCS].default_context = ctx; + DRM_DEBUG_DRIVER("Default HW context loaded\n"); return 0; @@ -288,16 +289,24 @@ void i915_gem_context_fini(struct drm_device *dev) * other code, leading to spurious errors. */ intel_gpu_reset(dev); - i915_gem_object_unpin(dctx->obj); - /* When default context is created and switched to, base object refcount * will be 2 (+1 from object creation and +1 from do_switch()). * i915_gem_context_fini() will be called after gpu_idle() has switched * to default context. So we need to unreference the base object once * to offset the do_switch part, so that i915_gem_context_unreference() * can then free the base object correctly. */ - drm_gem_object_unreference(&dctx->obj->base); + WARN_ON(!dev_priv->ring[RCS].last_context); + if (dev_priv->ring[RCS].last_context == dctx) { + /* Fake switch to NULL context */ + WARN_ON(dctx->obj->active); + i915_gem_object_unpin(dctx->obj); + i915_gem_context_unreference(dctx); + } + + i915_gem_object_unpin(dctx->obj); i915_gem_context_unreference(dctx); + dev_priv->ring[RCS].default_context = NULL; + dev_priv->ring[RCS].last_context = NULL; } static int context_idr_cleanup(int id, void *p, void *data) diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index ac9ebe98f8b0..b13905348048 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -308,7 +308,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, return -EINVAL; } - if (obj->pin_count) { + if (obj->pin_count || obj->framebuffer_references) { drm_gem_object_unreference_unlocked(&obj->base); return -EBUSY; } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 915c8ca08969..5dde81026471 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -910,8 +910,12 @@ void i915_capture_error_state(struct drm_device *dev) return; } - DRM_INFO("capturing error event; look for more information in " - "/sys/class/drm/card%d/error\n", dev->primary->index); + DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", + dev->primary->index); + DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n"); + DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n"); + DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); + DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n"); kref_init(&error->ref); error->eir = I915_READ(EIR); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index d1739d3bdae9..eb67bd98bc42 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -30,6 +30,7 @@ #include <linux/sysrq.h> #include <linux/slab.h> +#include <linux/circ_buf.h> #include <drm/drmP.h> #include <drm/i915_drm.h> #include "i915_drv.h" @@ -518,6 +519,12 @@ i915_pipe_enabled(struct drm_device *dev, int pipe) } } +static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) +{ + /* Gen2 doesn't have a hardware frame counter */ + return 0; +} + /* Called from drm generic code, passed a 'crtc', which * we use as a pipe index */ @@ -526,7 +533,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long high_frame; unsigned long low_frame; - u32 high1, high2, low; + u32 high1, high2, low, pixel, vbl_start; if (!i915_pipe_enabled(dev, pipe)) { DRM_DEBUG_DRIVER("trying to get vblank count for disabled " @@ -534,6 +541,24 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) return 0; } + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + struct intel_crtc *intel_crtc = + to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); + const struct drm_display_mode *mode = + &intel_crtc->config.adjusted_mode; + + vbl_start = mode->crtc_vblank_start * mode->crtc_htotal; + } else { + enum transcoder cpu_transcoder = + intel_pipe_to_cpu_transcoder(dev_priv, pipe); + u32 htotal; + + htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; + vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1; + + vbl_start *= htotal; + } + high_frame = PIPEFRAME(pipe); low_frame = PIPEFRAMEPIXEL(pipe); @@ -544,13 +569,20 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) */ do { high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; - low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; + low = I915_READ(low_frame); high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; } while (high1 != high2); high1 >>= PIPE_FRAME_HIGH_SHIFT; + pixel = low & PIPE_PIXEL_MASK; low >>= PIPE_FRAME_LOW_SHIFT; - return (high1 << 8) | low; + + /* + * The frame counter increments at beginning of active. + * Cook up a vblank counter by also checking the pixel + * counter against vblank start. + */ + return ((high1 << 8) | low) + (pixel >= vbl_start); } static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) @@ -567,37 +599,98 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) return I915_READ(reg); } +static bool intel_pipe_in_vblank(struct drm_device *dev, enum pipe pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t status; + + if (IS_VALLEYVIEW(dev)) { + status = pipe == PIPE_A ? + I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT : + I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; + + return I915_READ(VLV_ISR) & status; + } else if (IS_GEN2(dev)) { + status = pipe == PIPE_A ? + I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT : + I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; + + return I915_READ16(ISR) & status; + } else if (INTEL_INFO(dev)->gen < 5) { + status = pipe == PIPE_A ? + I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT : + I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; + + return I915_READ(ISR) & status; + } else if (INTEL_INFO(dev)->gen < 7) { + status = pipe == PIPE_A ? + DE_PIPEA_VBLANK : + DE_PIPEB_VBLANK; + + return I915_READ(DEISR) & status; + } else { + switch (pipe) { + default: + case PIPE_A: + status = DE_PIPEA_VBLANK_IVB; + break; + case PIPE_B: + status = DE_PIPEB_VBLANK_IVB; + break; + case PIPE_C: + status = DE_PIPEC_VBLANK_IVB; + break; + } + + return I915_READ(DEISR) & status; + } +} + static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, int *vpos, int *hpos) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u32 vbl = 0, position = 0; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode; + int position; int vbl_start, vbl_end, htotal, vtotal; bool in_vbl = true; int ret = 0; - enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, - pipe); - if (!i915_pipe_enabled(dev, pipe)) { + if (!intel_crtc->active) { DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " "pipe %c\n", pipe_name(pipe)); return 0; } - /* Get vtotal. */ - vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); + htotal = mode->crtc_htotal; + vtotal = mode->crtc_vtotal; + vbl_start = mode->crtc_vblank_start; + vbl_end = mode->crtc_vblank_end; - if (INTEL_INFO(dev)->gen >= 4) { + ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; + + if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { /* No obvious pixelcount register. Only query vertical * scanout position from Display scan line register. */ - position = I915_READ(PIPEDSL(pipe)); + if (IS_GEN2(dev)) + position = I915_READ(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; + else + position = I915_READ(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; - /* Decode into vertical scanout position. Don't have - * horizontal scanout position. + /* + * The scanline counter increments at the leading edge + * of hsync, ie. it completely misses the active portion + * of the line. Fix up the counter at both edges of vblank + * to get a more accurate picture whether we're in vblank + * or not. */ - *vpos = position & 0x1fff; - *hpos = 0; + in_vbl = intel_pipe_in_vblank(dev, pipe); + if ((in_vbl && position == vbl_start - 1) || + (!in_vbl && position == vbl_end - 1)) + position = (position + 1) % vtotal; } else { /* Have access to pixelcount since start of frame. * We can split this into vertical and horizontal @@ -605,28 +698,32 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, */ position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; - htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); - *vpos = position / htotal; - *hpos = position - (*vpos * htotal); + /* convert to pixel counts */ + vbl_start *= htotal; + vbl_end *= htotal; + vtotal *= htotal; } - /* Query vblank area. */ - vbl = I915_READ(VBLANK(cpu_transcoder)); + in_vbl = position >= vbl_start && position < vbl_end; - /* Test position against vblank region. */ - vbl_start = vbl & 0x1fff; - vbl_end = (vbl >> 16) & 0x1fff; - - if ((*vpos < vbl_start) || (*vpos > vbl_end)) - in_vbl = false; - - /* Inside "upper part" of vblank area? Apply corrective offset: */ - if (in_vbl && (*vpos >= vbl_start)) - *vpos = *vpos - vtotal; + /* + * While in vblank, position will be negative + * counting up towards 0 at vbl_end. And outside + * vblank, position will be positive counting + * up since vbl_end. + */ + if (position >= vbl_start) + position -= vbl_end; + else + position += vtotal - vbl_end; - /* Readouts valid? */ - if (vbl > 0) - ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; + if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { + *vpos = position; + *hpos = 0; + } else { + *vpos = position / htotal; + *hpos = position - (*vpos * htotal); + } /* In vblank? */ if (in_vbl) @@ -1092,6 +1189,83 @@ static void dp_aux_irq_handler(struct drm_device *dev) wake_up_all(&dev_priv->gmbus_wait_queue); } +#if defined(CONFIG_DEBUG_FS) +static void display_pipe_crc_update(struct drm_device *dev, enum pipe pipe, + uint32_t crc0, uint32_t crc1, + uint32_t crc2, uint32_t crc3, + uint32_t crc4) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; + struct intel_pipe_crc_entry *entry; + int head, tail; + + if (!pipe_crc->entries) { + DRM_ERROR("spurious interrupt\n"); + return; + } + + head = atomic_read(&pipe_crc->head); + tail = atomic_read(&pipe_crc->tail); + + if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { + DRM_ERROR("CRC buffer overflowing\n"); + return; + } + + entry = &pipe_crc->entries[head]; + + entry->frame = dev->driver->get_vblank_counter(dev, pipe); + entry->crc[0] = crc0; + entry->crc[1] = crc1; + entry->crc[2] = crc2; + entry->crc[3] = crc3; + entry->crc[4] = crc4; + + head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); + atomic_set(&pipe_crc->head, head); + + wake_up_interruptible(&pipe_crc->wq); +} + +static void hsw_pipe_crc_update(struct drm_device *dev, enum pipe pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + display_pipe_crc_update(dev, pipe, + I915_READ(PIPE_CRC_RES_1_IVB(pipe)), + 0, 0, 0, 0); +} + +static void ivb_pipe_crc_update(struct drm_device *dev, enum pipe pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + display_pipe_crc_update(dev, pipe, + I915_READ(PIPE_CRC_RES_1_IVB(pipe)), + I915_READ(PIPE_CRC_RES_2_IVB(pipe)), + I915_READ(PIPE_CRC_RES_3_IVB(pipe)), + I915_READ(PIPE_CRC_RES_4_IVB(pipe)), + I915_READ(PIPE_CRC_RES_5_IVB(pipe))); +} + +static void ilk_pipe_crc_update(struct drm_device *dev, enum pipe pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + display_pipe_crc_update(dev, pipe, + I915_READ(PIPE_CRC_RES_RED_ILK(pipe)), + I915_READ(PIPE_CRC_RES_GREEN_ILK(pipe)), + I915_READ(PIPE_CRC_RES_BLUE_ILK(pipe)), + I915_READ(PIPE_CRC_RES_RES1_ILK(pipe)), + I915_READ(PIPE_CRC_RES_RES2_ILK(pipe))); +} +#else +static inline void hsw_pipe_crc_update(struct drm_device *dev, int pipe) {} +static inline void ivb_pipe_crc_update(struct drm_device *dev, int pipe) {} +static inline void ilk_pipe_crc_update(struct drm_device *dev, int pipe) {} +#endif + /* The RPS events need forcewake, so we add them to a work queue and mask their * IMR bits until the work is done. Other interrupts can be processed without * the work queue. */ @@ -1254,21 +1428,26 @@ static void ivb_err_int_handler(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 err_int = I915_READ(GEN7_ERR_INT); + enum pipe pipe; if (err_int & ERR_INT_POISON) DRM_ERROR("Poison interrupt\n"); - if (err_int & ERR_INT_FIFO_UNDERRUN_A) - if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) - DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); - - if (err_int & ERR_INT_FIFO_UNDERRUN_B) - if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) - DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); + for_each_pipe(pipe) { + if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { + if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, + false)) + DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", + pipe_name(pipe)); + } - if (err_int & ERR_INT_FIFO_UNDERRUN_C) - if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false)) - DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n"); + if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { + if (IS_IVYBRIDGE(dev)) + ivb_pipe_crc_update(dev, pipe); + else + hsw_pipe_crc_update(dev, pipe); + } + } I915_WRITE(GEN7_ERR_INT, err_int); } @@ -1363,6 +1542,12 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); + if (de_iir & DE_PIPEA_CRC_DONE) + ilk_pipe_crc_update(dev, PIPE_A); + + if (de_iir & DE_PIPEB_CRC_DONE) + ilk_pipe_crc_update(dev, PIPE_B); + if (de_iir & DE_PLANEA_FLIP_DONE) { intel_prepare_page_flip(dev, 0); intel_finish_page_flip_plane(dev, 0); @@ -1988,6 +2173,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd) if (tmp & RING_WAIT) { DRM_ERROR("Kicking stuck wait on %s\n", ring->name); + i915_handle_error(dev, false); I915_WRITE_CTL(ring, tmp); return HANGCHECK_KICK; } @@ -1999,6 +2185,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd) case 1: DRM_ERROR("Kicking stuck semaphore on %s\n", ring->name); + i915_handle_error(dev, false); I915_WRITE_CTL(ring, tmp); return HANGCHECK_KICK; case 0: @@ -2337,8 +2524,10 @@ static int ironlake_irq_postinstall(struct drm_device *dev) } else { display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | - DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN | - DE_PIPEA_FIFO_UNDERRUN | DE_POISON); + DE_AUX_CHANNEL_A | + DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | + DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | + DE_POISON); extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT; } @@ -3153,18 +3342,21 @@ void intel_irq_init(struct drm_device *dev) pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); - dev->driver->get_vblank_counter = i915_get_vblank_counter; - dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ - if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { + if (IS_GEN2(dev)) { + dev->max_vblank_count = 0; + dev->driver->get_vblank_counter = i8xx_get_vblank_counter; + } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ dev->driver->get_vblank_counter = gm45_get_vblank_counter; + } else { + dev->driver->get_vblank_counter = i915_get_vblank_counter; + dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ } - if (drm_core_check_feature(dev, DRIVER_MODESET)) + if (drm_core_check_feature(dev, DRIVER_MODESET)) { dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; - else - dev->driver->get_vblank_timestamp = NULL; - dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; + dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; + } if (IS_VALLEYVIEW(dev)) { dev->driver->irq_handler = valleyview_irq_handler; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 95385023e0ba..0e7488b64965 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -26,6 +26,7 @@ #define _I915_REG_H_ #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) +#define _PIPE_INC(pipe, base, inc) ((base) + (pipe)*(inc)) #define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) #define _PORT(port, a, b) ((a) + (port)*((b)-(a))) @@ -722,8 +723,12 @@ #define GEN7_ERR_INT 0x44040 #define ERR_INT_POISON (1<<31) #define ERR_INT_MMIO_UNCLAIMED (1<<13) +#define ERR_INT_PIPE_CRC_DONE_C (1<<8) #define ERR_INT_FIFO_UNDERRUN_C (1<<6) +#define ERR_INT_PIPE_CRC_DONE_B (1<<5) #define ERR_INT_FIFO_UNDERRUN_B (1<<3) +#define ERR_INT_PIPE_CRC_DONE_A (1<<2) +#define ERR_INT_PIPE_CRC_DONE(pipe) (1<<(2 + pipe*3)) #define ERR_INT_FIFO_UNDERRUN_A (1<<0) #define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3)) @@ -1835,6 +1840,61 @@ * Display engine regs */ +/* Pipe A CRC regs */ +#define _PIPE_CRC_CTL_A (dev_priv->info->display_mmio_offset + 0x60050) +#define PIPE_CRC_ENABLE (1 << 31) +#define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29) +#define PIPE_CRC_SOURCE_SPRITE_IVB (1 << 29) +#define PIPE_CRC_SOURCE_PF_IVB (2 << 29) +#define PIPE_CRC_SOURCE_PRIMARY_ILK (0 << 28) +#define PIPE_CRC_SOURCE_SPRITE_ILK (1 << 28) +#define PIPE_CRC_SOURCE_PIPE_ILK (2 << 28) +/* embedded DP port on the north display block, reserved on ivb */ +#define PIPE_CRC_SOURCE_PORT_A_ILK (4 << 28) +#define PIPE_CRC_SOURCE_FDI_ILK (5 << 28) /* reserved on ivb */ +#define _PIPE_CRC_RES_1_A_IVB 0x60064 +#define _PIPE_CRC_RES_2_A_IVB 0x60068 +#define _PIPE_CRC_RES_3_A_IVB 0x6006c +#define _PIPE_CRC_RES_4_A_IVB 0x60070 +#define _PIPE_CRC_RES_5_A_IVB 0x60074 + +#define _PIPE_CRC_RES_RED_A_ILK 0x60060 +#define _PIPE_CRC_RES_GREEN_A_ILK 0x60064 +#define _PIPE_CRC_RES_BLUE_A_ILK 0x60068 +#define _PIPE_CRC_RES_RES1_A_ILK 0x6006c +#define _PIPE_CRC_RES_RES2_A_ILK 0x60080 + +/* Pipe B CRC regs */ +#define _PIPE_CRC_CTL_B 0x61050 +#define _PIPE_CRC_RES_1_B_IVB 0x61064 +#define _PIPE_CRC_RES_2_B_IVB 0x61068 +#define _PIPE_CRC_RES_3_B_IVB 0x6106c +#define _PIPE_CRC_RES_4_B_IVB 0x61070 +#define _PIPE_CRC_RES_5_B_IVB 0x61074 + +#define PIPE_CRC_CTL(pipe) _PIPE(pipe, _PIPE_CRC_CTL_A, _PIPE_CRC_CTL_B) +#define PIPE_CRC_RES_1_IVB(pipe) \ + _PIPE(pipe, _PIPE_CRC_RES_1_A_IVB, _PIPE_CRC_RES_1_B_IVB) +#define PIPE_CRC_RES_2_IVB(pipe) \ + _PIPE(pipe, _PIPE_CRC_RES_2_A_IVB, _PIPE_CRC_RES_2_B_IVB) +#define PIPE_CRC_RES_3_IVB(pipe) \ + _PIPE(pipe, _PIPE_CRC_RES_3_A_IVB, _PIPE_CRC_RES_3_B_IVB) +#define PIPE_CRC_RES_4_IVB(pipe) \ + _PIPE(pipe, _PIPE_CRC_RES_4_A_IVB, _PIPE_CRC_RES_4_B_IVB) +#define PIPE_CRC_RES_5_IVB(pipe) \ + _PIPE(pipe, _PIPE_CRC_RES_5_A_IVB, _PIPE_CRC_RES_5_B_IVB) + +#define PIPE_CRC_RES_RED_ILK(pipe) \ + _PIPE_INC(pipe, _PIPE_CRC_RES_RED_A_ILK, 0x01000) +#define PIPE_CRC_RES_GREEN_ILK(pipe) \ + _PIPE_INC(pipe, _PIPE_CRC_RES_GREEN_A_ILK, 0x01000) +#define PIPE_CRC_RES_BLUE_ILK(pipe) \ + _PIPE_INC(pipe, _PIPE_CRC_RES_BLUE_A_ILK, 0x01000) +#define PIPE_CRC_RES_RES1_ILK(pipe) \ + _PIPE_INC(pipe, _PIPE_CRC_RES_RES1_A_ILK, 0x01000) +#define PIPE_CRC_RES_RES2_ILK(pipe) \ + _PIPE_INC(pipe, _PIPE_CRC_RES_RES2_A_ILK, 0x01000) + /* Pipe A timing regs */ #define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000) #define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004) @@ -1857,7 +1917,6 @@ #define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020) #define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028) - #define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B) #define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B) #define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B) @@ -3251,11 +3310,11 @@ /* define the Watermark register on Ironlake */ #define WM0_PIPEA_ILK 0x45100 -#define WM0_PIPE_PLANE_MASK (0x7f<<16) +#define WM0_PIPE_PLANE_MASK (0xffff<<16) #define WM0_PIPE_PLANE_SHIFT 16 -#define WM0_PIPE_SPRITE_MASK (0x3f<<8) +#define WM0_PIPE_SPRITE_MASK (0xff<<8) #define WM0_PIPE_SPRITE_SHIFT 8 -#define WM0_PIPE_CURSOR_MASK (0x1f) +#define WM0_PIPE_CURSOR_MASK (0xff) #define WM0_PIPEB_ILK 0x45104 #define WM0_PIPEC_IVB 0x45200 @@ -3265,9 +3324,9 @@ #define WM1_LP_LATENCY_MASK (0x7f<<24) #define WM1_LP_FBC_MASK (0xf<<20) #define WM1_LP_FBC_SHIFT 20 -#define WM1_LP_SR_MASK (0x1ff<<8) +#define WM1_LP_SR_MASK (0x7ff<<8) #define WM1_LP_SR_SHIFT 8 -#define WM1_LP_CURSOR_MASK (0x3f) +#define WM1_LP_CURSOR_MASK (0xff) #define WM2_LP_ILK 0x4510c #define WM2_LP_EN (1<<31) #define WM3_LP_ILK 0x45110 @@ -3348,17 +3407,17 @@ * } while (high1 != high2); * frame = (high1 << 8) | low1; */ -#define _PIPEAFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x70040) +#define _PIPEAFRAMEHIGH 0x70040 #define PIPE_FRAME_HIGH_MASK 0x0000ffff #define PIPE_FRAME_HIGH_SHIFT 0 -#define _PIPEAFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x70044) +#define _PIPEAFRAMEPIXEL 0x70044 #define PIPE_FRAME_LOW_MASK 0xff000000 #define PIPE_FRAME_LOW_SHIFT 24 #define PIPE_PIXEL_MASK 0x00ffffff #define PIPE_PIXEL_SHIFT 0 /* GM45+ just has to be different */ -#define _PIPEA_FRMCOUNT_GM45 0x70040 -#define _PIPEA_FLIPCOUNT_GM45 0x70044 +#define _PIPEA_FRMCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x70040) +#define _PIPEA_FLIPCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x70044) #define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45) /* Cursor A & B regs */ @@ -3489,10 +3548,10 @@ #define _PIPEBDSL (dev_priv->info->display_mmio_offset + 0x71000) #define _PIPEBCONF (dev_priv->info->display_mmio_offset + 0x71008) #define _PIPEBSTAT (dev_priv->info->display_mmio_offset + 0x71024) -#define _PIPEBFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x71040) -#define _PIPEBFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x71044) -#define _PIPEB_FRMCOUNT_GM45 0x71040 -#define _PIPEB_FLIPCOUNT_GM45 0x71044 +#define _PIPEBFRAMEHIGH 0x71040 +#define _PIPEBFRAMEPIXEL 0x71044 +#define _PIPEB_FRMCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x71040) +#define _PIPEB_FLIPCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x71044) /* Display B control */ @@ -3860,12 +3919,14 @@ #define DE_PIPEB_ODD_FIELD (1 << 13) #define DE_PIPEB_LINE_COMPARE (1 << 12) #define DE_PIPEB_VSYNC (1 << 11) +#define DE_PIPEB_CRC_DONE (1 << 10) #define DE_PIPEB_FIFO_UNDERRUN (1 << 8) #define DE_PIPEA_VBLANK (1 << 7) #define DE_PIPEA_EVEN_FIELD (1 << 6) #define DE_PIPEA_ODD_FIELD (1 << 5) #define DE_PIPEA_LINE_COMPARE (1 << 4) #define DE_PIPEA_VSYNC (1 << 3) +#define DE_PIPEA_CRC_DONE (1 << 2) #define DE_PIPEA_FIFO_UNDERRUN (1 << 0) /* More Ivybridge lolz */ @@ -4867,7 +4928,17 @@ #define AUD_CONFIG_LOWER_N_SHIFT 4 #define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4) #define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16 -#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16) +#define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK (0xf << 16) +#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 (0 << 16) +#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 (1 << 16) +#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 (2 << 16) +#define AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 (3 << 16) +#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 (4 << 16) +#define AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 (5 << 16) +#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 (6 << 16) +#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 (7 << 16) +#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 (8 << 16) +#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 (9 << 16) #define AUD_CONFIG_DISABLE_NCTS (1 << 3) /* HSW Audio */ diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 3538370e3a47..a088f1f46bdb 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -369,7 +369,8 @@ int i915_save_state(struct drm_device *dev) intel_disable_gt_powersave(dev); /* Cache mode state */ - dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); + if (INTEL_INFO(dev)->gen < 7) + dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); /* Memory Arbitration state */ dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); @@ -418,7 +419,9 @@ int i915_restore_state(struct drm_device *dev) } /* Cache mode state */ - I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000); + if (INTEL_INFO(dev)->gen < 7) + I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | + 0xffff0000); /* Memory arbitration state */ I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000); diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 1beec51b8e26..cef38fd320a7 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -253,6 +253,8 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev, struct drm_i915_private *dev_priv = dev->dev_private; int ret; + flush_delayed_work(&dev_priv->rps.delayed_resume_work); + mutex_lock(&dev_priv->rps.hw_lock); if (IS_VALLEYVIEW(dev_priv->dev)) { u32 freq; @@ -285,6 +287,8 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute struct drm_i915_private *dev_priv = dev->dev_private; int ret; + flush_delayed_work(&dev_priv->rps.delayed_resume_work); + mutex_lock(&dev_priv->rps.hw_lock); if (IS_VALLEYVIEW(dev_priv->dev)) ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay); @@ -309,6 +313,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, if (ret) return ret; + flush_delayed_work(&dev_priv->rps.delayed_resume_work); + mutex_lock(&dev_priv->rps.hw_lock); if (IS_VALLEYVIEW(dev_priv->dev)) { @@ -357,6 +363,8 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute struct drm_i915_private *dev_priv = dev->dev_private; int ret; + flush_delayed_work(&dev_priv->rps.delayed_resume_work); + mutex_lock(&dev_priv->rps.hw_lock); if (IS_VALLEYVIEW(dev_priv->dev)) ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay); @@ -381,6 +389,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, if (ret) return ret; + flush_delayed_work(&dev_priv->rps.delayed_resume_work); + mutex_lock(&dev_priv->rps.hw_lock); if (IS_VALLEYVIEW(dev)) { diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 942b9acb0d8e..2e01bd3a5d8c 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -274,7 +274,7 @@ static void intel_crt_mode_set(struct intel_encoder *encoder) struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode; u32 adpa; - if (HAS_PCH_SPLIT(dev)) + if (INTEL_INFO(dev)->gen >= 5) adpa = ADPA_HOTPLUG_BITS; else adpa = 0; diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 6d335f8ca343..31f4fe271388 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1345,6 +1345,41 @@ static const struct drm_encoder_funcs intel_ddi_funcs = { .destroy = intel_ddi_destroy, }; +static struct intel_connector * +intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port) +{ + struct intel_connector *connector; + enum port port = intel_dig_port->port; + + connector = kzalloc(sizeof(*connector), GFP_KERNEL); + if (!connector) + return NULL; + + intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); + if (!intel_dp_init_connector(intel_dig_port, connector)) { + kfree(connector); + return NULL; + } + + return connector; +} + +static struct intel_connector * +intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port) +{ + struct intel_connector *connector; + enum port port = intel_dig_port->port; + + connector = kzalloc(sizeof(*connector), GFP_KERNEL); + if (!connector) + return NULL; + + intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port); + intel_hdmi_init_connector(intel_dig_port, connector); + + return connector; +} + void intel_ddi_init(struct drm_device *dev, enum port port) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -1369,12 +1404,6 @@ void intel_ddi_init(struct drm_device *dev, enum port port) if (!intel_dig_port) return; - dp_connector = kzalloc(sizeof(*dp_connector), GFP_KERNEL); - if (!dp_connector) { - kfree(intel_dig_port); - return; - } - intel_encoder = &intel_dig_port->base; encoder = &intel_encoder->base; @@ -1394,29 +1423,22 @@ void intel_ddi_init(struct drm_device *dev, enum port port) intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES); - intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); intel_encoder->type = INTEL_OUTPUT_UNKNOWN; intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); intel_encoder->cloneable = false; intel_encoder->hot_plug = intel_ddi_hot_plug; - if (init_dp && !intel_dp_init_connector(intel_dig_port, dp_connector)) { - drm_encoder_cleanup(encoder); - kfree(intel_dig_port); - kfree(dp_connector); - return; - } + if (init_dp) + dp_connector = intel_ddi_init_dp_connector(intel_dig_port); /* In theory we don't need the encoder->type check, but leave it just in * case we have some really bad VBTs... */ - if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) { - hdmi_connector = kzalloc(sizeof(*hdmi_connector), - GFP_KERNEL); - if (!hdmi_connector) - return; + if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) + hdmi_connector = intel_ddi_init_hdmi_connector(intel_dig_port); - intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port); - intel_hdmi_init_connector(intel_dig_port, hdmi_connector); + if (!dp_connector && !hdmi_connector) { + drm_encoder_cleanup(encoder); + kfree(intel_dig_port); } } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 617b963dfb67..8905f83166f2 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -309,32 +309,30 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { .p2_slow = 7, .p2_fast = 7 }, }; -static const intel_limit_t intel_limits_vlv_dac = { - .dot = { .min = 25000, .max = 270000 }, +static const intel_limit_t intel_limits_vlv = { + /* + * These are the data rate limits (measured in fast clocks) + * since those are the strictest limits we have. The fast + * clock and actual rate limits are more relaxed, so checking + * them would make no difference. + */ + .dot = { .min = 25000 * 5, .max = 270000 * 5 }, .vco = { .min = 4000000, .max = 6000000 }, .n = { .min = 1, .max = 7 }, - .m = { .min = 22, .max = 450 }, /* guess */ .m1 = { .min = 2, .max = 3 }, .m2 = { .min = 11, .max = 156 }, - .p = { .min = 10, .max = 30 }, - .p1 = { .min = 1, .max = 3 }, - .p2 = { .dot_limit = 270000, - .p2_slow = 2, .p2_fast = 20 }, -}; - -static const intel_limit_t intel_limits_vlv_hdmi = { - .dot = { .min = 25000, .max = 270000 }, - .vco = { .min = 4000000, .max = 6000000 }, - .n = { .min = 1, .max = 7 }, - .m = { .min = 60, .max = 300 }, /* guess */ - .m1 = { .min = 2, .max = 3 }, - .m2 = { .min = 11, .max = 156 }, - .p = { .min = 10, .max = 30 }, .p1 = { .min = 2, .max = 3 }, - .p2 = { .dot_limit = 270000, - .p2_slow = 2, .p2_fast = 20 }, + .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ }; +static void vlv_clock(int refclk, intel_clock_t *clock) +{ + clock->m = clock->m1 * clock->m2; + clock->p = clock->p1 * clock->p2; + clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); + clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); +} + /** * Returns whether any output on the specified pipe is of the specified type */ @@ -410,10 +408,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) else limit = &intel_limits_pineview_sdvo; } else if (IS_VALLEYVIEW(dev)) { - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) - limit = &intel_limits_vlv_dac; - else - limit = &intel_limits_vlv_hdmi; + limit = &intel_limits_vlv; } else if (!IS_GEN2(dev)) { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) limit = &intel_limits_i9xx_lvds; @@ -435,8 +430,8 @@ static void pineview_clock(int refclk, intel_clock_t *clock) { clock->m = clock->m2 + 2; clock->p = clock->p1 * clock->p2; - clock->vco = refclk * clock->m / clock->n; - clock->dot = clock->vco / clock->p; + clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); + clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); } static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) @@ -448,8 +443,8 @@ static void i9xx_clock(int refclk, intel_clock_t *clock) { clock->m = i9xx_dpll_compute_m(clock); clock->p = clock->p1 * clock->p2; - clock->vco = refclk * clock->m / (clock->n + 2); - clock->dot = clock->vco / clock->p; + clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); + clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); } #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) @@ -462,20 +457,26 @@ static bool intel_PLL_is_valid(struct drm_device *dev, const intel_limit_t *limit, const intel_clock_t *clock) { + if (clock->n < limit->n.min || limit->n.max < clock->n) + INTELPllInvalid("n out of range\n"); if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) INTELPllInvalid("p1 out of range\n"); - if (clock->p < limit->p.min || limit->p.max < clock->p) - INTELPllInvalid("p out of range\n"); if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) INTELPllInvalid("m2 out of range\n"); if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) INTELPllInvalid("m1 out of range\n"); - if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev)) - INTELPllInvalid("m1 <= m2\n"); - if (clock->m < limit->m.min || limit->m.max < clock->m) - INTELPllInvalid("m out of range\n"); - if (clock->n < limit->n.min || limit->n.max < clock->n) - INTELPllInvalid("n out of range\n"); + + if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev)) + if (clock->m1 <= clock->m2) + INTELPllInvalid("m1 <= m2\n"); + + if (!IS_VALLEYVIEW(dev)) { + if (clock->p < limit->p.min || limit->p.max < clock->p) + INTELPllInvalid("p out of range\n"); + if (clock->m < limit->m.min || limit->m.max < clock->m) + INTELPllInvalid("m out of range\n"); + } + if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) INTELPllInvalid("vco out of range\n"); /* XXX: We may need to be checking "Dot clock" depending on the multiplier, @@ -669,68 +670,56 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *match_clock, intel_clock_t *best_clock) { - u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2; - u32 m, n, fastclk; - u32 updrate, minupdate, p; - unsigned long bestppm, ppm, absppm; - int dotclk, flag; - - flag = 0; - dotclk = target * 1000; - bestppm = 1000000; - ppm = absppm = 0; - fastclk = dotclk / (2*100); - updrate = 0; - minupdate = 19200; - n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0; - bestm1 = bestm2 = bestp1 = bestp2 = 0; + struct drm_device *dev = crtc->dev; + intel_clock_t clock; + unsigned int bestppm = 1000000; + /* min update 19.2 MHz */ + int max_n = min(limit->n.max, refclk / 19200); + bool found = false; + + target *= 5; /* fast clock */ + + memset(best_clock, 0, sizeof(*best_clock)); /* based on hardware requirement, prefer smaller n to precision */ - for (n = limit->n.min; n <= ((refclk) / minupdate); n++) { - updrate = refclk / n; - for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) { - for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) { - if (p2 > 10) - p2 = p2 - 1; - p = p1 * p2; + for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { + for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { + for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; + clock.p2 -= clock.p2 > 10 ? 2 : 1) { + clock.p = clock.p1 * clock.p2; /* based on hardware requirement, prefer bigger m1,m2 values */ - for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) { - m2 = DIV_ROUND_CLOSEST(fastclk * p * n, refclk * m1); - m = m1 * m2; - vco = updrate * m; + for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { + unsigned int ppm, diff; + + clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, + refclk * clock.m1); - if (vco < limit->vco.min || vco >= limit->vco.max) + vlv_clock(refclk, &clock); + + if (!intel_PLL_is_valid(dev, limit, + &clock)) continue; - ppm = 1000000 * ((vco / p) - fastclk) / fastclk; - absppm = (ppm > 0) ? ppm : (-ppm); - if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) { + diff = abs(clock.dot - target); + ppm = div_u64(1000000ULL * diff, target); + + if (ppm < 100 && clock.p > best_clock->p) { bestppm = 0; - flag = 1; - } - if (absppm < bestppm - 10) { - bestppm = absppm; - flag = 1; + *best_clock = clock; + found = true; } - if (flag) { - bestn = n; - bestm1 = m1; - bestm2 = m2; - bestp1 = p1; - bestp2 = p2; - flag = 0; + + if (bestppm >= 10 && ppm < bestppm - 10) { + bestppm = ppm; + *best_clock = clock; + found = true; } } } } } - best_clock->n = bestn; - best_clock->m1 = bestm1; - best_clock->m2 = bestm2; - best_clock->p1 = bestp1; - best_clock->p2 = bestp2; - return true; + return found; } bool intel_crtc_active(struct drm_crtc *crtc) @@ -811,6 +800,25 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe) DRM_DEBUG_KMS("vblank wait timed out\n"); } +static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 reg = PIPEDSL(pipe); + u32 line1, line2; + u32 line_mask; + + if (IS_GEN2(dev)) + line_mask = DSL_LINEMASK_GEN2; + else + line_mask = DSL_LINEMASK_GEN3; + + line1 = I915_READ(reg) & line_mask; + mdelay(5); + line2 = I915_READ(reg) & line_mask; + + return line1 == line2; +} + /* * intel_wait_for_pipe_off - wait for pipe to turn off * @dev: drm device @@ -842,22 +850,8 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) 100)) WARN(1, "pipe_off wait timed out\n"); } else { - u32 last_line, line_mask; - int reg = PIPEDSL(pipe); - unsigned long timeout = jiffies + msecs_to_jiffies(100); - - if (IS_GEN2(dev)) - line_mask = DSL_LINEMASK_GEN2; - else - line_mask = DSL_LINEMASK_GEN3; - /* Wait for the display line to settle */ - do { - last_line = I915_READ(reg) & line_mask; - mdelay(5); - } while (((I915_READ(reg) & line_mask) != last_line) && - time_after(timeout, jiffies)); - if (time_after(jiffies, timeout)) + if (wait_for(pipe_dsl_stopped(dev, pipe), 100)) WARN(1, "pipe_off wait timed out\n"); } } @@ -1823,63 +1817,75 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv, * Plane regs are double buffered, going from enabled->disabled needs a * trigger in order to latch. The display address reg provides this. */ -void intel_flush_display_plane(struct drm_i915_private *dev_priv, - enum plane plane) +void intel_flush_primary_plane(struct drm_i915_private *dev_priv, + enum plane plane) { - if (dev_priv->info->gen >= 4) - I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane))); - else - I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); + u32 reg = dev_priv->info->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane); + + I915_WRITE(reg, I915_READ(reg)); + POSTING_READ(reg); } /** - * intel_enable_plane - enable a display plane on a given pipe + * intel_enable_primary_plane - enable the primary plane on a given pipe * @dev_priv: i915 private structure * @plane: plane to enable * @pipe: pipe being fed * * Enable @plane on @pipe, making sure that @pipe is running first. */ -static void intel_enable_plane(struct drm_i915_private *dev_priv, - enum plane plane, enum pipe pipe) +static void intel_enable_primary_plane(struct drm_i915_private *dev_priv, + enum plane plane, enum pipe pipe) { + struct intel_crtc *intel_crtc = + to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); int reg; u32 val; /* If the pipe isn't enabled, we can't pump pixels and may hang */ assert_pipe_enabled(dev_priv, pipe); + WARN(intel_crtc->primary_enabled, "Primary plane already enabled\n"); + + intel_crtc->primary_enabled = true; + reg = DSPCNTR(plane); val = I915_READ(reg); if (val & DISPLAY_PLANE_ENABLE) return; I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); - intel_flush_display_plane(dev_priv, plane); + intel_flush_primary_plane(dev_priv, plane); intel_wait_for_vblank(dev_priv->dev, pipe); } /** - * intel_disable_plane - disable a display plane + * intel_disable_primary_plane - disable the primary plane * @dev_priv: i915 private structure * @plane: plane to disable * @pipe: pipe consuming the data * * Disable @plane; should be an independent operation. */ -static void intel_disable_plane(struct drm_i915_private *dev_priv, - enum plane plane, enum pipe pipe) +static void intel_disable_primary_plane(struct drm_i915_private *dev_priv, + enum plane plane, enum pipe pipe) { + struct intel_crtc *intel_crtc = + to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); int reg; u32 val; + WARN(!intel_crtc->primary_enabled, "Primary plane already disabled\n"); + + intel_crtc->primary_enabled = false; + reg = DSPCNTR(plane); val = I915_READ(reg); if ((val & DISPLAY_PLANE_ENABLE) == 0) return; I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); - intel_flush_display_plane(dev_priv, plane); + intel_flush_primary_plane(dev_priv, plane); intel_wait_for_vblank(dev_priv->dev, pipe); } @@ -1915,10 +1921,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, alignment = 0; break; case I915_TILING_Y: - /* Despite that we check this in framebuffer_init userspace can - * screw us over and change the tiling after the fact. Only - * pinned buffers can't change their tiling. */ - DRM_DEBUG_DRIVER("Y tiled not allowed for scan out buffers\n"); + WARN(1, "Y tiled bo slipped through, driver bug!\n"); return -EINVAL; default: BUG(); @@ -3332,7 +3335,7 @@ static void intel_disable_planes(struct drm_crtc *crtc) intel_plane_disable(&intel_plane->base); } -static void hsw_enable_ips(struct intel_crtc *crtc) +void hsw_enable_ips(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; @@ -3345,9 +3348,17 @@ static void hsw_enable_ips(struct intel_crtc *crtc) * for a vblank, so all we need to do here is to enable the IPS bit. */ assert_plane_enabled(dev_priv, crtc->plane); I915_WRITE(IPS_CTL, IPS_ENABLE); + + /* The bit only becomes 1 in the next vblank, so this wait here is + * essentially intel_wait_for_vblank. If we don't have this and don't + * wait for vblanks until the end of crtc_enable, then the HW state + * readout code will complain that the expected IPS_CTL value is not the + * one we read. */ + if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50)) + DRM_ERROR("Timed out waiting for IPS enable\n"); } -static void hsw_disable_ips(struct intel_crtc *crtc) +void hsw_disable_ips(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -3454,7 +3465,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) intel_update_watermarks(crtc); intel_enable_pipe(dev_priv, pipe, intel_crtc->config.has_pch_encoder, false); - intel_enable_plane(dev_priv, plane, pipe); + intel_enable_primary_plane(dev_priv, plane, pipe); intel_enable_planes(crtc); intel_crtc_update_cursor(crtc, true); @@ -3496,7 +3507,7 @@ static void haswell_crtc_enable_planes(struct drm_crtc *crtc) int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; - intel_enable_plane(dev_priv, plane, pipe); + intel_enable_primary_plane(dev_priv, plane, pipe); intel_enable_planes(crtc); intel_crtc_update_cursor(crtc, true); @@ -3526,7 +3537,7 @@ static void haswell_crtc_disable_planes(struct drm_crtc *crtc) intel_crtc_update_cursor(crtc, false); intel_disable_planes(crtc); - intel_disable_plane(dev_priv, plane, pipe); + intel_disable_primary_plane(dev_priv, plane, pipe); } /* @@ -3665,7 +3676,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) intel_crtc_update_cursor(crtc, false); intel_disable_planes(crtc); - intel_disable_plane(dev_priv, plane, pipe); + intel_disable_primary_plane(dev_priv, plane, pipe); if (intel_crtc->config.has_pch_encoder) intel_set_pch_fifo_underrun_reporting(dev, pipe, false); @@ -3873,7 +3884,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) intel_update_watermarks(crtc); intel_enable_pipe(dev_priv, pipe, false, is_dsi); - intel_enable_plane(dev_priv, plane, pipe); + intel_enable_primary_plane(dev_priv, plane, pipe); intel_enable_planes(crtc); intel_crtc_update_cursor(crtc, true); @@ -3911,7 +3922,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) intel_update_watermarks(crtc); intel_enable_pipe(dev_priv, pipe, false, false); - intel_enable_plane(dev_priv, plane, pipe); + intel_enable_primary_plane(dev_priv, plane, pipe); intel_enable_planes(crtc); /* The fixup needs to happen before cursor is enabled */ if (IS_G4X(dev)) @@ -3967,7 +3978,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) intel_crtc_dpms_overlay(intel_crtc, false); intel_crtc_update_cursor(crtc, false); intel_disable_planes(crtc); - intel_disable_plane(dev_priv, plane, pipe); + intel_disable_primary_plane(dev_priv, plane, pipe); intel_disable_pipe(dev_priv, pipe); @@ -5201,10 +5212,10 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc, clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; - clock.vco = refclk * clock.m1 * clock.m2 / clock.n; - clock.dot = 2 * clock.vco / (clock.p1 * clock.p2); + vlv_clock(refclk, &clock); - pipe_config->port_clock = clock.dot / 10; + /* clock.dot is the fast clock */ + pipe_config->port_clock = clock.dot / 5; } static bool i9xx_get_pipe_config(struct intel_crtc *crtc, @@ -6061,11 +6072,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, else intel_crtc->lowfreq_avail = false; - if (intel_crtc->config.has_pch_encoder) { - pll = intel_crtc_to_shared_dpll(intel_crtc); - - } - intel_set_pipe_timings(intel_crtc); if (intel_crtc->config.has_pch_encoder) { @@ -6712,6 +6718,44 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, return 0; } +static struct { + int clock; + u32 config; +} hdmi_audio_clock[] = { + { DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 }, + { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */ + { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 }, + { 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 }, + { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 }, + { 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 }, + { DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 }, + { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 }, + { DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 }, + { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 }, +}; + +/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */ +static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) { + if (mode->clock == hdmi_audio_clock[i].clock) + break; + } + + if (i == ARRAY_SIZE(hdmi_audio_clock)) { + DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock); + i = 1; + } + + DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n", + hdmi_audio_clock[i].clock, + hdmi_audio_clock[i].config); + + return hdmi_audio_clock[i].config; +} + static bool intel_eld_uptodate(struct drm_connector *connector, int reg_eldv, uint32_t bits_eldv, int reg_elda, uint32_t bits_elda, @@ -6742,7 +6786,8 @@ static bool intel_eld_uptodate(struct drm_connector *connector, } static void g4x_write_eld(struct drm_connector *connector, - struct drm_crtc *crtc) + struct drm_crtc *crtc, + struct drm_display_mode *mode) { struct drm_i915_private *dev_priv = connector->dev->dev_private; uint8_t *eld = connector->eld; @@ -6782,7 +6827,8 @@ static void g4x_write_eld(struct drm_connector *connector, } static void haswell_write_eld(struct drm_connector *connector, - struct drm_crtc *crtc) + struct drm_crtc *crtc, + struct drm_display_mode *mode) { struct drm_i915_private *dev_priv = connector->dev->dev_private; uint8_t *eld = connector->eld; @@ -6835,8 +6881,9 @@ static void haswell_write_eld(struct drm_connector *connector, DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ - } else - I915_WRITE(aud_config, 0); + } else { + I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode)); + } if (intel_eld_uptodate(connector, aud_cntrl_st2, eldv, @@ -6869,7 +6916,8 @@ static void haswell_write_eld(struct drm_connector *connector, } static void ironlake_write_eld(struct drm_connector *connector, - struct drm_crtc *crtc) + struct drm_crtc *crtc, + struct drm_display_mode *mode) { struct drm_i915_private *dev_priv = connector->dev->dev_private; uint8_t *eld = connector->eld; @@ -6913,8 +6961,9 @@ static void ironlake_write_eld(struct drm_connector *connector, DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ - } else - I915_WRITE(aud_config, 0); + } else { + I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode)); + } if (intel_eld_uptodate(connector, aud_cntrl_st2, eldv, @@ -6964,7 +7013,7 @@ void intel_write_eld(struct drm_encoder *encoder, connector->eld[6] = drm_av_sync_delay(connector, mode) / 2; if (dev_priv->display.write_eld) - dev_priv->display.write_eld(connector, crtc); + dev_priv->display.write_eld(connector, crtc, mode); } static void i845_update_cursor(struct drm_crtc *crtc, u32 base) @@ -7271,14 +7320,21 @@ intel_framebuffer_create(struct drm_device *dev, return ERR_PTR(-ENOMEM); } + ret = i915_mutex_lock_interruptible(dev); + if (ret) + goto err; + ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); - if (ret) { - drm_gem_object_unreference_unlocked(&obj->base); - kfree(intel_fb); - return ERR_PTR(ret); - } + mutex_unlock(&dev->struct_mutex); + if (ret) + goto err; return &intel_fb->base; +err: + drm_gem_object_unreference_unlocked(&obj->base); + kfree(intel_fb); + + return ERR_PTR(ret); } static u32 @@ -7321,6 +7377,7 @@ static struct drm_framebuffer * mode_fits_in_fbdev(struct drm_device *dev, struct drm_display_mode *mode) { +#ifdef CONFIG_DRM_I915_FBDEV struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; struct drm_framebuffer *fb; @@ -7341,6 +7398,9 @@ mode_fits_in_fbdev(struct drm_device *dev, return NULL; return fb; +#else + return NULL; +#endif } bool intel_get_load_detect_pipe(struct drm_connector *connector, @@ -9866,7 +9926,13 @@ static void intel_setup_outputs(struct drm_device *dev) if (I915_READ(PCH_DP_D) & DP_DETECTED) intel_dp_init(dev, PCH_DP_D, PORT_D); } else if (IS_VALLEYVIEW(dev)) { - /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */ + if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) { + intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB, + PORT_B); + if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED) + intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); + } + if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) { intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, PORT_C); @@ -9875,13 +9941,6 @@ static void intel_setup_outputs(struct drm_device *dev) PORT_C); } - if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) { - intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB, - PORT_B); - if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED) - intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); - } - intel_dsi_init(dev); } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { bool found = false; @@ -9938,6 +9997,7 @@ static void intel_setup_outputs(struct drm_device *dev) void intel_framebuffer_fini(struct intel_framebuffer *fb) { drm_framebuffer_cleanup(&fb->base); + WARN_ON(!fb->obj->framebuffer_references--); drm_gem_object_unreference_unlocked(&fb->obj->base); } @@ -9969,9 +10029,12 @@ int intel_framebuffer_init(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_i915_gem_object *obj) { + int aligned_height, tile_height; int pitch_limit; int ret; + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + if (obj->tiling_mode == I915_TILING_Y) { DRM_DEBUG("hardware does not support tiling Y\n"); return -EINVAL; @@ -10060,8 +10123,16 @@ int intel_framebuffer_init(struct drm_device *dev, if (mode_cmd->offsets[0] != 0) return -EINVAL; + tile_height = IS_GEN2(dev) ? 16 : 8; + aligned_height = ALIGN(mode_cmd->height, + obj->tiling_mode ? tile_height : 1); + /* FIXME drm helper for size checks (especially planar formats)? */ + if (obj->base.size < aligned_height * mode_cmd->pitches[0]) + return -EINVAL; + drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); intel_fb->obj = obj; + intel_fb->obj->framebuffer_references++; ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); if (ret) { @@ -10087,9 +10158,15 @@ intel_user_framebuffer_create(struct drm_device *dev, return intel_framebuffer_create(dev, mode_cmd, obj); } +#ifndef CONFIG_DRM_I915_FBDEV +static inline void intel_fbdev_output_poll_changed(struct drm_device *dev) +{ +} +#endif + static const struct drm_mode_config_funcs intel_mode_funcs = { .fb_create = intel_user_framebuffer_create, - .output_poll_changed = intel_fb_output_poll_changed, + .output_poll_changed = intel_fbdev_output_poll_changed, }; /* Set up chip specific display functions */ @@ -10304,8 +10381,7 @@ static struct intel_quirk intel_quirks[] = { /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, - /* 830/845 need to leave pipe A & dpll A up */ - { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, + /* 830 needs to leave pipe A & dpll A up */ { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, /* Lenovo U160 cannot use SSC on LVDS */ @@ -10674,7 +10750,7 @@ void i915_redisable_vga(struct drm_device *dev) (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0) return; - if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { + if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); i915_disable_vga(dev); i915_disable_vga_mem(dev); @@ -10698,6 +10774,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) &crtc->config); crtc->base.enabled = crtc->active; + crtc->primary_enabled = crtc->active; DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", crtc->base.base.id, @@ -10738,11 +10815,11 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) } encoder->connectors_active = false; - DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n", + DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", encoder->base.base.id, drm_get_encoder_name(&encoder->base), encoder->base.crtc ? "enabled" : "disabled", - pipe); + pipe_name(pipe)); } list_for_each_entry(connector, &dev->mode_config.connector_list, @@ -10815,6 +10892,9 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, pll->on = false; } + if (IS_HASWELL(dev)) + ilk_wm_get_hw_state(dev); + if (force_restore) { i915_redisable_vga(dev); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d5bd349105e5..1e3d2720d811 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -822,10 +822,11 @@ intel_dp_compute_config(struct intel_encoder *encoder, /* Walk through all bpp values. Luckily they're all nicely spaced with 2 * bpc in between. */ bpp = pipe_config->pipe_bpp; - if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) { + if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp && + dev_priv->vbt.edp_bpp < bpp) { DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", dev_priv->vbt.edp_bpp); - bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp); + bpp = dev_priv->vbt.edp_bpp; } for (; bpp >= 6*3; bpp -= 2*3) { @@ -2095,7 +2096,8 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp) } static void -intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) +intel_get_adjust_train(struct intel_dp *intel_dp, + const uint8_t link_status[DP_LINK_STATUS_SIZE]) { uint8_t v = 0; uint8_t p = 0; @@ -2297,7 +2299,8 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; enum port port = intel_dig_port->port; - int ret; + uint8_t buf[sizeof(intel_dp->train_set) + 1]; + int ret, len; if (HAS_DDI(dev)) { uint32_t temp = I915_READ(DP_TP_CTL(port)); @@ -2367,36 +2370,35 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, I915_WRITE(intel_dp->output_reg, *DP); POSTING_READ(intel_dp->output_reg); - ret = intel_dp_aux_native_write_1(intel_dp, DP_TRAINING_PATTERN_SET, - dp_train_pat); - if (ret != 1) - return false; - - if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) != + buf[0] = dp_train_pat; + if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) == DP_TRAINING_PATTERN_DISABLE) { - ret = intel_dp_aux_native_write(intel_dp, - DP_TRAINING_LANE0_SET, - intel_dp->train_set, - intel_dp->lane_count); - if (ret != intel_dp->lane_count) - return false; + /* don't write DP_TRAINING_LANEx_SET on disable */ + len = 1; + } else { + /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */ + memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count); + len = intel_dp->lane_count + 1; } - return true; + ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_PATTERN_SET, + buf, len); + + return ret == len; } static bool intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP, uint8_t dp_train_pat) { - memset(intel_dp->train_set, 0, 4); + memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); intel_dp_set_signal_levels(intel_dp, DP); return intel_dp_set_link_train(intel_dp, DP, dp_train_pat); } static bool intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP, - uint8_t link_status[DP_LINK_STATUS_SIZE]) + const uint8_t link_status[DP_LINK_STATUS_SIZE]) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; @@ -2507,7 +2509,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) if (i == intel_dp->lane_count) { ++loop_tries; if (loop_tries == 5) { - DRM_DEBUG_KMS("too many full retries, give up\n"); + DRM_ERROR("too many full retries, give up\n"); break; } intel_dp_reset_link_train(intel_dp, &DP, @@ -2521,7 +2523,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { ++voltage_tries; if (voltage_tries == 5) { - DRM_DEBUG_KMS("too many voltage retries, give up\n"); + DRM_ERROR("too many voltage retries, give up\n"); break; } } else diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index eaf0003ddfd9..e33f387d4185 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -309,6 +309,12 @@ struct intel_crtc_config { bool double_wide; }; +struct intel_pipe_wm { + struct intel_wm_level wm[5]; + uint32_t linetime; + bool fbc_wm_enabled; +}; + struct intel_crtc { struct drm_crtc base; enum pipe pipe; @@ -321,7 +327,7 @@ struct intel_crtc { */ bool active; bool eld_vld; - bool primary_disabled; /* is the crtc obscured by a plane? */ + bool primary_enabled; /* is the primary plane (partially) visible? */ bool lowfreq_avail; struct intel_overlay *overlay; struct intel_unpin_work *unpin_work; @@ -349,6 +355,12 @@ struct intel_crtc { /* Access to these should be protected by dev_priv->irq_lock. */ bool cpu_fifo_underrun_disabled; bool pch_fifo_underrun_disabled; + + /* per-pipe watermark state */ + struct { + /* watermarks currently being used */ + struct intel_pipe_wm active; + } wm; }; struct intel_plane_wm_parameters { @@ -677,6 +689,8 @@ ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config, int dotclock); bool intel_crtc_active(struct drm_crtc *crtc); void i915_disable_vga_mem(struct drm_device *dev); +void hsw_enable_ips(struct intel_crtc *crtc); +void hsw_disable_ips(struct intel_crtc *crtc); /* intel_dp.c */ @@ -711,14 +725,36 @@ bool intel_dsi_init(struct drm_device *dev); void intel_dvo_init(struct drm_device *dev); -/* intel_fb.c */ -int intel_fbdev_init(struct drm_device *dev); -void intel_fbdev_initial_config(struct drm_device *dev); -void intel_fbdev_fini(struct drm_device *dev); -void intel_fbdev_set_suspend(struct drm_device *dev, int state); -void intel_fb_output_poll_changed(struct drm_device *dev); -void intel_fb_restore_mode(struct drm_device *dev); +/* legacy fbdev emulation in intel_fbdev.c */ +#ifdef CONFIG_DRM_I915_FBDEV +extern int intel_fbdev_init(struct drm_device *dev); +extern void intel_fbdev_initial_config(struct drm_device *dev); +extern void intel_fbdev_fini(struct drm_device *dev); +extern void intel_fbdev_set_suspend(struct drm_device *dev, int state); +extern void intel_fbdev_output_poll_changed(struct drm_device *dev); +extern void intel_fbdev_restore_mode(struct drm_device *dev); +#else +static inline int intel_fbdev_init(struct drm_device *dev) +{ + return 0; +} + +static inline void intel_fbdev_initial_config(struct drm_device *dev) +{ +} + +static inline void intel_fbdev_fini(struct drm_device *dev) +{ +} + +static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state) +{ +} +static inline void intel_fbdev_restore_mode(struct drm_device *dev) +{ +} +#endif /* intel_hdmi.c */ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port); @@ -799,8 +835,11 @@ void intel_enable_gt_powersave(struct drm_device *dev); void intel_disable_gt_powersave(struct drm_device *dev); void ironlake_teardown_rc6(struct drm_device *dev); void gen6_update_ring_freq(struct drm_device *dev); +void gen6_rps_idle(struct drm_i915_private *dev_priv); +void gen6_rps_boost(struct drm_i915_private *dev_priv); void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); +void ilk_wm_get_hw_state(struct drm_device *dev); /* intel_sdvo.c */ @@ -809,7 +848,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); /* intel_sprite.c */ int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane); -void intel_flush_display_plane(struct drm_i915_private *dev_priv, +void intel_flush_primary_plane(struct drm_i915_private *dev_priv, enum plane plane); void intel_plane_restore(struct drm_plane *plane); void intel_plane_disable(struct drm_plane *plane); @@ -822,7 +861,4 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data, /* intel_tv.c */ void intel_tv_init(struct drm_device *dev); -void gen6_rps_idle(struct drm_i915_private *dev_priv); -void gen6_rps_boost(struct drm_i915_private *dev_priv); - #endif /* __INTEL_DRV_H__ */ diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 9a2fdd2a7e34..d257b093ca68 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -350,7 +350,7 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder) unsigned int bpp = intel_crtc->config.pipe_bpp; u32 val, tmp; - DRM_DEBUG_KMS("pipe %d\n", pipe); + DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe)); /* Update the DSI PLL */ vlv_enable_dsi_pll(intel_encoder); diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fbdev.c index d883b77b1b78..acc839569c3f 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -299,13 +299,13 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state) MODULE_LICENSE("GPL and additional rights"); -void intel_fb_output_poll_changed(struct drm_device *dev) +void intel_fbdev_output_poll_changed(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); } -void intel_fb_restore_mode(struct drm_device *dev) +void intel_fbdev_restore_mode(struct drm_device *dev) { int ret; struct drm_i915_private *dev_priv = dev->dev_private; diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 4f4d346db8f0..51a8336dec2e 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -1074,7 +1074,7 @@ done: return 0; } -static void intel_hdmi_pre_enable(struct intel_encoder *encoder) +static void vlv_hdmi_pre_enable(struct intel_encoder *encoder) { struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); struct drm_device *dev = encoder->base.dev; @@ -1127,7 +1127,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder) vlv_wait_port_ready(dev_priv, port); } -static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder) +static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder) { struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); struct drm_device *dev = encoder->base.dev; @@ -1163,7 +1163,7 @@ static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder) mutex_unlock(&dev_priv->dpio_lock); } -static void intel_hdmi_post_disable(struct intel_encoder *encoder) +static void vlv_hdmi_post_disable(struct intel_encoder *encoder) { struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; @@ -1313,10 +1313,10 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port) intel_encoder->get_hw_state = intel_hdmi_get_hw_state; intel_encoder->get_config = intel_hdmi_get_config; if (IS_VALLEYVIEW(dev)) { - intel_encoder->pre_pll_enable = intel_hdmi_pre_pll_enable; - intel_encoder->pre_enable = intel_hdmi_pre_enable; + intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable; + intel_encoder->pre_enable = vlv_hdmi_pre_enable; intel_encoder->enable = vlv_enable_hdmi; - intel_encoder->post_disable = intel_hdmi_post_disable; + intel_encoder->post_disable = vlv_hdmi_post_disable; } else { intel_encoder->enable = intel_enable_hdmi; } diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 2acf5cae20e4..b82050c96f3e 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -258,7 +258,9 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) /* Driver sleep timeout in ms. */ dslp = ioread32(&swsci->dslp); if (!dslp) { - dslp = 2; + /* The spec says 2ms should be the default, but it's too small + * for some machines. */ + dslp = 50; } else if (dslp > 500) { /* Hey bios, trust must be earned. */ WARN_ONCE(1, "excessive driver sleep timeout (DSPL) %u\n", dslp); @@ -405,6 +407,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) if (bclp > 255) return ASLC_BACKLIGHT_FAILED; + DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp); intel_panel_set_backlight(dev, bclp, 255); iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv); diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index c81020923ee4..09b2994c9b37 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -574,6 +574,8 @@ void intel_panel_enable_backlight(struct drm_device *dev, intel_pipe_to_cpu_transcoder(dev_priv, pipe); unsigned long flags; + DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe)); + spin_lock_irqsave(&dev_priv->backlight.lock, flags); if (dev_priv->backlight.level == 0) { @@ -680,6 +682,8 @@ intel_panel_detect(struct drm_device *dev) static int intel_panel_update_status(struct backlight_device *bd) { struct drm_device *dev = bl_get_data(bd); + DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n", + bd->props.brightness, bd->props.max_brightness); intel_panel_set_backlight(dev, bd->props.brightness, bd->props.max_brightness); return 0; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 008ec0bb017f..8064ff927bcc 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -475,7 +475,7 @@ void intel_update_fbc(struct drm_device *dev) */ list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { if (intel_crtc_active(tmp_crtc) && - !to_intel_crtc(tmp_crtc)->primary_disabled) { + to_intel_crtc(tmp_crtc)->primary_enabled) { if (crtc) { if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES)) DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); @@ -2200,20 +2200,11 @@ struct hsw_wm_maximums { uint16_t fbc; }; -struct hsw_wm_values { - uint32_t wm_pipe[3]; - uint32_t wm_lp[3]; - uint32_t wm_lp_spr[3]; - uint32_t wm_linetime[3]; - bool enable_fbc_wm; -}; - /* used in computing the new watermarks state */ struct intel_wm_config { unsigned int num_pipes_active; bool sprites_enabled; bool sprites_scaled; - bool fbc_wm_enabled; }; /* @@ -2380,11 +2371,11 @@ static unsigned int ilk_fbc_wm_max(void) return 15; } -static void ilk_wm_max(struct drm_device *dev, - int level, - const struct intel_wm_config *config, - enum intel_ddb_partitioning ddb_partitioning, - struct hsw_wm_maximums *max) +static void ilk_compute_wm_maximums(struct drm_device *dev, + int level, + const struct intel_wm_config *config, + enum intel_ddb_partitioning ddb_partitioning, + struct hsw_wm_maximums *max) { max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false); max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true); @@ -2392,9 +2383,9 @@ static void ilk_wm_max(struct drm_device *dev, max->fbc = ilk_fbc_wm_max(); } -static bool ilk_check_wm(int level, - const struct hsw_wm_maximums *max, - struct intel_wm_level *result) +static bool ilk_validate_wm_level(int level, + const struct hsw_wm_maximums *max, + struct intel_wm_level *result) { bool ret; @@ -2430,8 +2421,6 @@ static bool ilk_check_wm(int level, result->enable = true; } - DRM_DEBUG_KMS("WM%d: %sabled\n", level, result->enable ? "en" : "dis"); - return ret; } @@ -2458,53 +2447,6 @@ static void ilk_compute_wm_level(struct drm_i915_private *dev_priv, result->enable = true; } -static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv, - int level, const struct hsw_wm_maximums *max, - const struct hsw_pipe_wm_parameters *params, - struct intel_wm_level *result) -{ - enum pipe pipe; - struct intel_wm_level res[3]; - - for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) - ilk_compute_wm_level(dev_priv, level, ¶ms[pipe], &res[pipe]); - - result->pri_val = max3(res[0].pri_val, res[1].pri_val, res[2].pri_val); - result->spr_val = max3(res[0].spr_val, res[1].spr_val, res[2].spr_val); - result->cur_val = max3(res[0].cur_val, res[1].cur_val, res[2].cur_val); - result->fbc_val = max3(res[0].fbc_val, res[1].fbc_val, res[2].fbc_val); - result->enable = true; - - return ilk_check_wm(level, max, result); -} - - -static uint32_t hsw_compute_wm_pipe(struct drm_device *dev, - const struct hsw_pipe_wm_parameters *params) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_wm_config config = { - .num_pipes_active = 1, - .sprites_enabled = params->spr.enabled, - .sprites_scaled = params->spr.scaled, - }; - struct hsw_wm_maximums max; - struct intel_wm_level res; - - if (!params->active) - return 0; - - ilk_wm_max(dev, 0, &config, INTEL_DDB_PART_1_2, &max); - - ilk_compute_wm_level(dev_priv, 0, params, &res); - - ilk_check_wm(0, &max, &res); - - return (res.pri_val << WM0_PIPE_PLANE_SHIFT) | - (res.spr_val << WM0_PIPE_SPRITE_SHIFT) | - res.cur_val; -} - static uint32_t hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc) { @@ -2631,29 +2573,17 @@ static void intel_setup_wm_latency(struct drm_device *dev) intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); } -static void hsw_compute_wm_parameters(struct drm_device *dev, - struct hsw_pipe_wm_parameters *params, - struct hsw_wm_maximums *lp_max_1_2, - struct hsw_wm_maximums *lp_max_5_6) +static void hsw_compute_wm_parameters(struct drm_crtc *crtc, + struct hsw_pipe_wm_parameters *p, + struct intel_wm_config *config) { - struct drm_crtc *crtc; + struct drm_device *dev = crtc->dev; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + enum pipe pipe = intel_crtc->pipe; struct drm_plane *plane; - enum pipe pipe; - struct intel_wm_config config = {}; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct hsw_pipe_wm_parameters *p; - - pipe = intel_crtc->pipe; - p = ¶ms[pipe]; - - p->active = intel_crtc_active(crtc); - if (!p->active) - continue; - - config.num_pipes_active++; + p->active = intel_crtc_active(crtc); + if (p->active) { p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal; p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8; @@ -2665,66 +2595,132 @@ static void hsw_compute_wm_parameters(struct drm_device *dev, p->cur.enabled = true; } + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) + config->num_pipes_active += intel_crtc_active(crtc); + list_for_each_entry(plane, &dev->mode_config.plane_list, head) { struct intel_plane *intel_plane = to_intel_plane(plane); - struct hsw_pipe_wm_parameters *p; - - pipe = intel_plane->pipe; - p = ¶ms[pipe]; - p->spr = intel_plane->wm; + if (intel_plane->pipe == pipe) + p->spr = intel_plane->wm; - config.sprites_enabled |= p->spr.enabled; - config.sprites_scaled |= p->spr.scaled; + config->sprites_enabled |= intel_plane->wm.enabled; + config->sprites_scaled |= intel_plane->wm.scaled; } +} - ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_1_2, lp_max_1_2); +/* Compute new watermarks for the pipe */ +static bool intel_compute_pipe_wm(struct drm_crtc *crtc, + const struct hsw_pipe_wm_parameters *params, + struct intel_pipe_wm *pipe_wm) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int level, max_level = ilk_wm_max_level(dev); + /* LP0 watermark maximums depend on this pipe alone */ + struct intel_wm_config config = { + .num_pipes_active = 1, + .sprites_enabled = params->spr.enabled, + .sprites_scaled = params->spr.scaled, + }; + struct hsw_wm_maximums max; - /* 5/6 split only in single pipe config on IVB+ */ - if (INTEL_INFO(dev)->gen >= 7 && config.num_pipes_active <= 1) - ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_5_6, lp_max_5_6); - else - *lp_max_5_6 = *lp_max_1_2; + /* LP0 watermarks always use 1/2 DDB partitioning */ + ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max); + + for (level = 0; level <= max_level; level++) + ilk_compute_wm_level(dev_priv, level, params, + &pipe_wm->wm[level]); + + pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc); + + /* At least LP0 must be valid */ + return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]); } -static void hsw_compute_wm_results(struct drm_device *dev, - const struct hsw_pipe_wm_parameters *params, - const struct hsw_wm_maximums *lp_maximums, - struct hsw_wm_values *results) +/* + * Merge the watermarks from all active pipes for a specific level. + */ +static void ilk_merge_wm_level(struct drm_device *dev, + int level, + struct intel_wm_level *ret_wm) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc; - struct intel_wm_level lp_results[4] = {}; - enum pipe pipe; - int level, max_level, wm_lp; + const struct intel_crtc *intel_crtc; - for (level = 1; level <= 4; level++) - if (!hsw_compute_lp_wm(dev_priv, level, - lp_maximums, params, - &lp_results[level - 1])) - break; - max_level = level - 1; + list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) { + const struct intel_wm_level *wm = + &intel_crtc->wm.active.wm[level]; + + if (!wm->enable) + return; - memset(results, 0, sizeof(*results)); + ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val); + ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val); + ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val); + ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val); + } + + ret_wm->enable = true; +} - /* The spec says it is preferred to disable FBC WMs instead of disabling - * a WM level. */ - results->enable_fbc_wm = true; +/* + * Merge all low power watermarks for all active pipes. + */ +static void ilk_wm_merge(struct drm_device *dev, + const struct hsw_wm_maximums *max, + struct intel_pipe_wm *merged) +{ + int level, max_level = ilk_wm_max_level(dev); + + merged->fbc_wm_enabled = true; + + /* merge each WM1+ level */ for (level = 1; level <= max_level; level++) { - if (lp_results[level - 1].fbc_val > lp_maximums->fbc) { - results->enable_fbc_wm = false; - lp_results[level - 1].fbc_val = 0; + struct intel_wm_level *wm = &merged->wm[level]; + + ilk_merge_wm_level(dev, level, wm); + + if (!ilk_validate_wm_level(level, max, wm)) + break; + + /* + * The spec says it is preferred to disable + * FBC WMs instead of disabling a WM level. + */ + if (wm->fbc_val > max->fbc) { + merged->fbc_wm_enabled = false; + wm->fbc_val = 0; } } +} +static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) +{ + /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */ + return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable); +} + +static void hsw_compute_wm_results(struct drm_device *dev, + const struct intel_pipe_wm *merged, + enum intel_ddb_partitioning partitioning, + struct hsw_wm_values *results) +{ + struct intel_crtc *intel_crtc; + int level, wm_lp; + + results->enable_fbc_wm = merged->fbc_wm_enabled; + results->partitioning = partitioning; + + /* LP1+ register values */ for (wm_lp = 1; wm_lp <= 3; wm_lp++) { const struct intel_wm_level *r; - level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp; - if (level > max_level) + level = ilk_wm_lp_to_level(wm_lp, merged); + + r = &merged->wm[level]; + if (!r->enable) break; - r = &lp_results[level - 1]; results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2, r->fbc_val, r->pri_val, @@ -2732,116 +2728,158 @@ static void hsw_compute_wm_results(struct drm_device *dev, results->wm_lp_spr[wm_lp - 1] = r->spr_val; } - for_each_pipe(pipe) - results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev, - ¶ms[pipe]); + /* LP0 register values */ + list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) { + enum pipe pipe = intel_crtc->pipe; + const struct intel_wm_level *r = + &intel_crtc->wm.active.wm[0]; - for_each_pipe(pipe) { - crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - results->wm_linetime[pipe] = hsw_compute_linetime_wm(dev, crtc); + if (WARN_ON(!r->enable)) + continue; + + results->wm_linetime[pipe] = intel_crtc->wm.active.linetime; + + results->wm_pipe[pipe] = + (r->pri_val << WM0_PIPE_PLANE_SHIFT) | + (r->spr_val << WM0_PIPE_SPRITE_SHIFT) | + r->cur_val; } } /* Find the result with the highest level enabled. Check for enable_fbc_wm in * case both are at the same level. Prefer r1 in case they're the same. */ -static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1, - struct hsw_wm_values *r2) +static struct intel_pipe_wm *hsw_find_best_result(struct drm_device *dev, + struct intel_pipe_wm *r1, + struct intel_pipe_wm *r2) { - int i, val_r1 = 0, val_r2 = 0; + int level, max_level = ilk_wm_max_level(dev); + int level1 = 0, level2 = 0; - for (i = 0; i < 3; i++) { - if (r1->wm_lp[i] & WM3_LP_EN) - val_r1 = r1->wm_lp[i] & WM1_LP_LATENCY_MASK; - if (r2->wm_lp[i] & WM3_LP_EN) - val_r2 = r2->wm_lp[i] & WM1_LP_LATENCY_MASK; + for (level = 1; level <= max_level; level++) { + if (r1->wm[level].enable) + level1 = level; + if (r2->wm[level].enable) + level2 = level; } - if (val_r1 == val_r2) { - if (r2->enable_fbc_wm && !r1->enable_fbc_wm) + if (level1 == level2) { + if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled) return r2; else return r1; - } else if (val_r1 > val_r2) { + } else if (level1 > level2) { return r1; } else { return r2; } } +/* dirty bits used to track which watermarks need changes */ +#define WM_DIRTY_PIPE(pipe) (1 << (pipe)) +#define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe))) +#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp))) +#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3)) +#define WM_DIRTY_FBC (1 << 24) +#define WM_DIRTY_DDB (1 << 25) + +static unsigned int ilk_compute_wm_dirty(struct drm_device *dev, + const struct hsw_wm_values *old, + const struct hsw_wm_values *new) +{ + unsigned int dirty = 0; + enum pipe pipe; + int wm_lp; + + for_each_pipe(pipe) { + if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) { + dirty |= WM_DIRTY_LINETIME(pipe); + /* Must disable LP1+ watermarks too */ + dirty |= WM_DIRTY_LP_ALL; + } + + if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) { + dirty |= WM_DIRTY_PIPE(pipe); + /* Must disable LP1+ watermarks too */ + dirty |= WM_DIRTY_LP_ALL; + } + } + + if (old->enable_fbc_wm != new->enable_fbc_wm) { + dirty |= WM_DIRTY_FBC; + /* Must disable LP1+ watermarks too */ + dirty |= WM_DIRTY_LP_ALL; + } + + if (old->partitioning != new->partitioning) { + dirty |= WM_DIRTY_DDB; + /* Must disable LP1+ watermarks too */ + dirty |= WM_DIRTY_LP_ALL; + } + + /* LP1+ watermarks already deemed dirty, no need to continue */ + if (dirty & WM_DIRTY_LP_ALL) + return dirty; + + /* Find the lowest numbered LP1+ watermark in need of an update... */ + for (wm_lp = 1; wm_lp <= 3; wm_lp++) { + if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] || + old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1]) + break; + } + + /* ...and mark it and all higher numbered LP1+ watermarks as dirty */ + for (; wm_lp <= 3; wm_lp++) + dirty |= WM_DIRTY_LP(wm_lp); + + return dirty; +} + /* * The spec says we shouldn't write when we don't need, because every write * causes WMs to be re-evaluated, expending some power. */ static void hsw_write_wm_values(struct drm_i915_private *dev_priv, - struct hsw_wm_values *results, - enum intel_ddb_partitioning partitioning) + struct hsw_wm_values *results) { - struct hsw_wm_values previous; + struct hsw_wm_values *previous = &dev_priv->wm.hw; + unsigned int dirty; uint32_t val; - enum intel_ddb_partitioning prev_partitioning; - bool prev_enable_fbc_wm; - - previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK); - previous.wm_pipe[1] = I915_READ(WM0_PIPEB_ILK); - previous.wm_pipe[2] = I915_READ(WM0_PIPEC_IVB); - previous.wm_lp[0] = I915_READ(WM1_LP_ILK); - previous.wm_lp[1] = I915_READ(WM2_LP_ILK); - previous.wm_lp[2] = I915_READ(WM3_LP_ILK); - previous.wm_lp_spr[0] = I915_READ(WM1S_LP_ILK); - previous.wm_lp_spr[1] = I915_READ(WM2S_LP_IVB); - previous.wm_lp_spr[2] = I915_READ(WM3S_LP_IVB); - previous.wm_linetime[0] = I915_READ(PIPE_WM_LINETIME(PIPE_A)); - previous.wm_linetime[1] = I915_READ(PIPE_WM_LINETIME(PIPE_B)); - previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C)); - - prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? - INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; - - prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS); - - if (memcmp(results->wm_pipe, previous.wm_pipe, - sizeof(results->wm_pipe)) == 0 && - memcmp(results->wm_lp, previous.wm_lp, - sizeof(results->wm_lp)) == 0 && - memcmp(results->wm_lp_spr, previous.wm_lp_spr, - sizeof(results->wm_lp_spr)) == 0 && - memcmp(results->wm_linetime, previous.wm_linetime, - sizeof(results->wm_linetime)) == 0 && - partitioning == prev_partitioning && - results->enable_fbc_wm == prev_enable_fbc_wm) + + dirty = ilk_compute_wm_dirty(dev_priv->dev, previous, results); + if (!dirty) return; - if (previous.wm_lp[2] != 0) + if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != 0) I915_WRITE(WM3_LP_ILK, 0); - if (previous.wm_lp[1] != 0) + if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != 0) I915_WRITE(WM2_LP_ILK, 0); - if (previous.wm_lp[0] != 0) + if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != 0) I915_WRITE(WM1_LP_ILK, 0); - if (previous.wm_pipe[0] != results->wm_pipe[0]) + if (dirty & WM_DIRTY_PIPE(PIPE_A)) I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]); - if (previous.wm_pipe[1] != results->wm_pipe[1]) + if (dirty & WM_DIRTY_PIPE(PIPE_B)) I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]); - if (previous.wm_pipe[2] != results->wm_pipe[2]) + if (dirty & WM_DIRTY_PIPE(PIPE_C)) I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]); - if (previous.wm_linetime[0] != results->wm_linetime[0]) + if (dirty & WM_DIRTY_LINETIME(PIPE_A)) I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]); - if (previous.wm_linetime[1] != results->wm_linetime[1]) + if (dirty & WM_DIRTY_LINETIME(PIPE_B)) I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]); - if (previous.wm_linetime[2] != results->wm_linetime[2]) + if (dirty & WM_DIRTY_LINETIME(PIPE_C)) I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]); - if (prev_partitioning != partitioning) { + if (dirty & WM_DIRTY_DDB) { val = I915_READ(WM_MISC); - if (partitioning == INTEL_DDB_PART_1_2) + if (results->partitioning == INTEL_DDB_PART_1_2) val &= ~WM_MISC_DATA_PARTITION_5_6; else val |= WM_MISC_DATA_PARTITION_5_6; I915_WRITE(WM_MISC, val); } - if (prev_enable_fbc_wm != results->enable_fbc_wm) { + if (dirty & WM_DIRTY_FBC) { val = I915_READ(DISP_ARB_CTL); if (results->enable_fbc_wm) val &= ~DISP_FBC_WM_DIS; @@ -2850,46 +2888,65 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv, I915_WRITE(DISP_ARB_CTL, val); } - if (previous.wm_lp_spr[0] != results->wm_lp_spr[0]) + if (dirty & WM_DIRTY_LP(1) && previous->wm_lp_spr[0] != results->wm_lp_spr[0]) I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]); - if (previous.wm_lp_spr[1] != results->wm_lp_spr[1]) + if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1]) I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]); - if (previous.wm_lp_spr[2] != results->wm_lp_spr[2]) + if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2]) I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]); - if (results->wm_lp[0] != 0) + if (dirty & WM_DIRTY_LP(1) && results->wm_lp[0] != 0) I915_WRITE(WM1_LP_ILK, results->wm_lp[0]); - if (results->wm_lp[1] != 0) + if (dirty & WM_DIRTY_LP(2) && results->wm_lp[1] != 0) I915_WRITE(WM2_LP_ILK, results->wm_lp[1]); - if (results->wm_lp[2] != 0) + if (dirty & WM_DIRTY_LP(3) && results->wm_lp[2] != 0) I915_WRITE(WM3_LP_ILK, results->wm_lp[2]); + + dev_priv->wm.hw = *results; } static void haswell_update_wm(struct drm_crtc *crtc) { + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct hsw_wm_maximums lp_max_1_2, lp_max_5_6; - struct hsw_pipe_wm_parameters params[3]; - struct hsw_wm_values results_1_2, results_5_6, *best_results; + struct hsw_wm_maximums max; + struct hsw_pipe_wm_parameters params = {}; + struct hsw_wm_values results = {}; enum intel_ddb_partitioning partitioning; + struct intel_pipe_wm pipe_wm = {}; + struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; + struct intel_wm_config config = {}; + + hsw_compute_wm_parameters(crtc, ¶ms, &config); + + intel_compute_pipe_wm(crtc, ¶ms, &pipe_wm); + + if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm))) + return; + + intel_crtc->wm.active = pipe_wm; + + ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max); + ilk_wm_merge(dev, &max, &lp_wm_1_2); - hsw_compute_wm_parameters(dev, params, &lp_max_1_2, &lp_max_5_6); + /* 5/6 split only in single pipe config on IVB+ */ + if (INTEL_INFO(dev)->gen >= 7 && + config.num_pipes_active == 1 && config.sprites_enabled) { + ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max); + ilk_wm_merge(dev, &max, &lp_wm_5_6); - hsw_compute_wm_results(dev, params, - &lp_max_1_2, &results_1_2); - if (lp_max_1_2.pri != lp_max_5_6.pri) { - hsw_compute_wm_results(dev, params, - &lp_max_5_6, &results_5_6); - best_results = hsw_find_best_result(&results_1_2, &results_5_6); + best_lp_wm = hsw_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6); } else { - best_results = &results_1_2; + best_lp_wm = &lp_wm_1_2; } - partitioning = (best_results == &results_1_2) ? + partitioning = (best_lp_wm == &lp_wm_1_2) ? INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; - hsw_write_wm_values(dev_priv, best_results, partitioning); + hsw_compute_wm_results(dev, best_lp_wm, partitioning, &results); + + hsw_write_wm_values(dev_priv, &results); } static void haswell_update_sprite_wm(struct drm_plane *plane, @@ -3069,6 +3126,74 @@ static void sandybridge_update_sprite_wm(struct drm_plane *plane, I915_WRITE(WM3S_LP_IVB, sprite_wm); } +static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct hsw_wm_values *hw = &dev_priv->wm.hw; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_pipe_wm *active = &intel_crtc->wm.active; + enum pipe pipe = intel_crtc->pipe; + static const unsigned int wm0_pipe_reg[] = { + [PIPE_A] = WM0_PIPEA_ILK, + [PIPE_B] = WM0_PIPEB_ILK, + [PIPE_C] = WM0_PIPEC_IVB, + }; + + hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]); + hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); + + if (intel_crtc_active(crtc)) { + u32 tmp = hw->wm_pipe[pipe]; + + /* + * For active pipes LP0 watermark is marked as + * enabled, and LP1+ watermaks as disabled since + * we can't really reverse compute them in case + * multiple pipes are active. + */ + active->wm[0].enable = true; + active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT; + active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT; + active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK; + active->linetime = hw->wm_linetime[pipe]; + } else { + int level, max_level = ilk_wm_max_level(dev); + + /* + * For inactive pipes, all watermark levels + * should be marked as enabled but zeroed, + * which is what we'd compute them to. + */ + for (level = 0; level <= max_level; level++) + active->wm[level].enable = true; + } +} + +void ilk_wm_get_hw_state(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct hsw_wm_values *hw = &dev_priv->wm.hw; + struct drm_crtc *crtc; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) + ilk_pipe_wm_get_hw_state(crtc); + + hw->wm_lp[0] = I915_READ(WM1_LP_ILK); + hw->wm_lp[1] = I915_READ(WM2_LP_ILK); + hw->wm_lp[2] = I915_READ(WM3_LP_ILK); + + hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK); + hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB); + hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB); + + hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? + INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; + + hw->enable_fbc_wm = + !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS); +} + /** * intel_update_watermarks - update FIFO watermark values based on current modes * @@ -3442,22 +3567,26 @@ void gen6_set_rps(struct drm_device *dev, u8 val) void gen6_rps_idle(struct drm_i915_private *dev_priv) { mutex_lock(&dev_priv->rps.hw_lock); - if (dev_priv->info->is_valleyview) - valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay); - else - gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay); - dev_priv->rps.last_adj = 0; + if (dev_priv->rps.enabled) { + if (dev_priv->info->is_valleyview) + valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay); + else + gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay); + dev_priv->rps.last_adj = 0; + } mutex_unlock(&dev_priv->rps.hw_lock); } void gen6_rps_boost(struct drm_i915_private *dev_priv) { mutex_lock(&dev_priv->rps.hw_lock); - if (dev_priv->info->is_valleyview) - valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay); - else - gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay); - dev_priv->rps.last_adj = 0; + if (dev_priv->rps.enabled) { + if (dev_priv->info->is_valleyview) + valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay); + else + gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay); + dev_priv->rps.last_adj = 0; + } mutex_unlock(&dev_priv->rps.hw_lock); } @@ -3740,16 +3869,21 @@ void gen6_update_ring_freq(struct drm_device *dev) unsigned int gpu_freq; unsigned int max_ia_freq, min_ring_freq; int scaling_factor = 180; + struct cpufreq_policy *policy; WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); - max_ia_freq = cpufreq_quick_get_max(0); - /* - * Default to measured freq if none found, PCU will ensure we don't go - * over - */ - if (!max_ia_freq) + policy = cpufreq_cpu_get(0); + if (policy) { + max_ia_freq = policy->cpuinfo.max_freq; + cpufreq_cpu_put(policy); + } else { + /* + * Default to measured freq if none found, PCU will ensure we + * don't go over + */ max_ia_freq = tsc_khz; + } /* Convert from kHz to MHz */ max_ia_freq /= 1000; @@ -4711,6 +4845,7 @@ void intel_disable_gt_powersave(struct drm_device *dev) valleyview_disable_rps(dev); else gen6_disable_rps(dev); + dev_priv->rps.enabled = false; mutex_unlock(&dev_priv->rps.hw_lock); } } @@ -4730,6 +4865,7 @@ static void intel_gen6_powersave_work(struct work_struct *work) gen6_enable_rps(dev); gen6_update_ring_freq(dev); } + dev_priv->rps.enabled = true; mutex_unlock(&dev_priv->rps.hw_lock); } @@ -4773,7 +4909,7 @@ static void g4x_disable_trickle_feed(struct drm_device *dev) I915_WRITE(DSPCNTR(pipe), I915_READ(DSPCNTR(pipe)) | DISPPLANE_TRICKLE_FEED_DISABLE); - intel_flush_display_plane(dev_priv, pipe); + intel_flush_primary_plane(dev_priv, pipe); } } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index b67104aaade5..2dec134f75eb 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -395,8 +395,7 @@ static int init_ring_common(struct intel_ring_buffer *ring) int ret = 0; u32 head; - if (HAS_FORCE_WAKE(dev)) - gen6_gt_force_wake_get(dev_priv); + gen6_gt_force_wake_get(dev_priv); if (I915_NEED_GFX_HWS(dev)) intel_ring_setup_status_page(ring); @@ -469,8 +468,7 @@ static int init_ring_common(struct intel_ring_buffer *ring) memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); out: - if (HAS_FORCE_WAKE(dev)) - gen6_gt_force_wake_put(dev_priv); + gen6_gt_force_wake_put(dev_priv); return ret; } @@ -1326,7 +1324,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) /* Disable the ring buffer. The ring must be idle at this point */ dev_priv = ring->dev->dev_private; ret = intel_ring_idle(ring); - if (ret) + if (ret && !i915_reset_in_progress(&dev_priv->gpu_error)) DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", ring->name, ret); @@ -1337,6 +1335,8 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) i915_gem_object_unpin(ring->obj); drm_gem_object_unreference(&ring->obj->base); ring->obj = NULL; + ring->preallocated_lazy_request = NULL; + ring->outstanding_lazy_seqno = 0; if (ring->cleanup) ring->cleanup(ring); diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c index acd1cfe8b7dd..9944d8135e87 100644 --- a/drivers/gpu/drm/i915/intel_sideband.c +++ b/drivers/gpu/drm/i915/intel_sideband.c @@ -25,7 +25,10 @@ #include "i915_drv.h" #include "intel_drv.h" -/* IOSF sideband */ +/* + * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and + * VLV_VLV2_PUNIT_HAS_0.8.docx + */ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn, u32 port, u32 opcode, u32 addr, u32 *val) { diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index cae10bc746d0..8afaad6bcc48 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -521,13 +521,28 @@ intel_enable_primary(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int reg = DSPCNTR(intel_crtc->plane); - if (!intel_crtc->primary_disabled) + if (intel_crtc->primary_enabled) return; - intel_crtc->primary_disabled = false; - intel_update_fbc(dev); + intel_crtc->primary_enabled = true; I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE); + intel_flush_primary_plane(dev_priv, intel_crtc->plane); + + /* + * FIXME IPS should be fine as long as one plane is + * enabled, but in practice it seems to have problems + * when going from primary only to sprite only and vice + * versa. + */ + if (intel_crtc->config.ips_enabled) { + intel_wait_for_vblank(dev, intel_crtc->pipe); + hsw_enable_ips(intel_crtc); + } + + mutex_lock(&dev->struct_mutex); + intel_update_fbc(dev); + mutex_unlock(&dev->struct_mutex); } static void @@ -538,13 +553,26 @@ intel_disable_primary(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int reg = DSPCNTR(intel_crtc->plane); - if (intel_crtc->primary_disabled) + if (!intel_crtc->primary_enabled) return; - I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE); + intel_crtc->primary_enabled = false; - intel_crtc->primary_disabled = true; - intel_update_fbc(dev); + mutex_lock(&dev->struct_mutex); + if (dev_priv->fbc.plane == intel_crtc->plane) + intel_disable_fbc(dev); + mutex_unlock(&dev->struct_mutex); + + /* + * FIXME IPS should be fine as long as one plane is + * enabled, but in practice it seems to have problems + * when going from primary only to sprite only and vice + * versa. + */ + hsw_disable_ips(intel_crtc); + + I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE); + intel_flush_primary_plane(dev_priv, intel_crtc->plane); } static int @@ -623,15 +651,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, uint32_t src_w, uint32_t src_h) { struct drm_device *dev = plane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_plane *intel_plane = to_intel_plane(plane); - struct intel_framebuffer *intel_fb; - struct drm_i915_gem_object *obj, *old_obj; - int pipe = intel_plane->pipe; - enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, - pipe); - int ret = 0; + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct drm_i915_gem_object *obj = intel_fb->obj; + struct drm_i915_gem_object *old_obj = intel_plane->obj; + int ret; bool disable_primary = false; bool visible; int hscale, vscale; @@ -652,29 +677,23 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, .y2 = crtc_y + crtc_h, }; const struct drm_rect clip = { - .x2 = intel_crtc->config.pipe_src_w, - .y2 = intel_crtc->config.pipe_src_h, + .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0, + .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0, + }; + const struct { + int crtc_x, crtc_y; + unsigned int crtc_w, crtc_h; + uint32_t src_x, src_y, src_w, src_h; + } orig = { + .crtc_x = crtc_x, + .crtc_y = crtc_y, + .crtc_w = crtc_w, + .crtc_h = crtc_h, + .src_x = src_x, + .src_y = src_y, + .src_w = src_w, + .src_h = src_h, }; - - intel_fb = to_intel_framebuffer(fb); - obj = intel_fb->obj; - - old_obj = intel_plane->obj; - - intel_plane->crtc_x = crtc_x; - intel_plane->crtc_y = crtc_y; - intel_plane->crtc_w = crtc_w; - intel_plane->crtc_h = crtc_h; - intel_plane->src_x = src_x; - intel_plane->src_y = src_y; - intel_plane->src_w = src_w; - intel_plane->src_h = src_h; - - /* Pipe must be running... */ - if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE)) { - DRM_DEBUG_KMS("Pipe disabled\n"); - return -EINVAL; - } /* Don't modify another pipe's plane */ if (intel_plane->pipe != intel_crtc->pipe) { @@ -810,7 +829,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, * we can disable the primary and save power. */ disable_primary = drm_rect_equals(&dst, &clip); - WARN_ON(disable_primary && !visible); + WARN_ON(disable_primary && !visible && intel_crtc->active); mutex_lock(&dev->struct_mutex); @@ -820,27 +839,40 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, * the sprite planes only require 128KiB alignment and 32 PTE padding. */ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); - if (ret) - goto out_unlock; - intel_plane->obj = obj; - - /* - * Be sure to re-enable the primary before the sprite is no longer - * covering it fully. - */ - if (!disable_primary) - intel_enable_primary(crtc); + mutex_unlock(&dev->struct_mutex); - if (visible) - intel_plane->update_plane(plane, crtc, fb, obj, - crtc_x, crtc_y, crtc_w, crtc_h, - src_x, src_y, src_w, src_h); - else - intel_plane->disable_plane(plane, crtc); + if (ret) + return ret; + + intel_plane->crtc_x = orig.crtc_x; + intel_plane->crtc_y = orig.crtc_y; + intel_plane->crtc_w = orig.crtc_w; + intel_plane->crtc_h = orig.crtc_h; + intel_plane->src_x = orig.src_x; + intel_plane->src_y = orig.src_y; + intel_plane->src_w = orig.src_w; + intel_plane->src_h = orig.src_h; + intel_plane->obj = obj; - if (disable_primary) - intel_disable_primary(crtc); + if (intel_crtc->active) { + /* + * Be sure to re-enable the primary before the sprite is no longer + * covering it fully. + */ + if (!disable_primary) + intel_enable_primary(crtc); + + if (visible) + intel_plane->update_plane(plane, crtc, fb, obj, + crtc_x, crtc_y, crtc_w, crtc_h, + src_x, src_y, src_w, src_h); + else + intel_plane->disable_plane(plane, crtc); + + if (disable_primary) + intel_disable_primary(crtc); + } /* Unpin old obj after new one is active to avoid ugliness */ if (old_obj) { @@ -850,17 +882,15 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, * wait for vblank to avoid ugliness, we only need to * do the pin & ref bookkeeping. */ - if (old_obj != obj) { - mutex_unlock(&dev->struct_mutex); - intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); - mutex_lock(&dev->struct_mutex); - } + if (old_obj != obj && intel_crtc->active) + intel_wait_for_vblank(dev, intel_crtc->pipe); + + mutex_lock(&dev->struct_mutex); intel_unpin_fb_obj(old_obj); + mutex_unlock(&dev->struct_mutex); } -out_unlock: - mutex_unlock(&dev->struct_mutex); - return ret; + return 0; } static int @@ -868,7 +898,7 @@ intel_disable_plane(struct drm_plane *plane) { struct drm_device *dev = plane->dev; struct intel_plane *intel_plane = to_intel_plane(plane); - int ret = 0; + struct intel_crtc *intel_crtc; if (!plane->fb) return 0; @@ -876,21 +906,25 @@ intel_disable_plane(struct drm_plane *plane) if (WARN_ON(!plane->crtc)) return -EINVAL; - intel_enable_primary(plane->crtc); - intel_plane->disable_plane(plane, plane->crtc); + intel_crtc = to_intel_crtc(plane->crtc); - if (!intel_plane->obj) - goto out; + if (intel_crtc->active) { + intel_enable_primary(plane->crtc); + intel_plane->disable_plane(plane, plane->crtc); + } - intel_wait_for_vblank(dev, intel_plane->pipe); + if (intel_plane->obj) { + if (intel_crtc->active) + intel_wait_for_vblank(dev, intel_plane->pipe); - mutex_lock(&dev->struct_mutex); - intel_unpin_fb_obj(intel_plane->obj); - intel_plane->obj = NULL; - mutex_unlock(&dev->struct_mutex); -out: + mutex_lock(&dev->struct_mutex); + intel_unpin_fb_obj(intel_plane->obj); + mutex_unlock(&dev->struct_mutex); - return ret; + intel_plane->obj = NULL; + } + + return 0; } static void intel_destroy_plane(struct drm_plane *plane) diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index d61aec23a523..18c406246a2d 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1094,7 +1094,7 @@ static void intel_tv_mode_set(struct intel_encoder *encoder) unsigned int xsize, ysize; /* Pipe must be off here */ I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE); - intel_flush_display_plane(dev_priv, intel_crtc->plane); + intel_flush_primary_plane(dev_priv, intel_crtc->plane); /* Wait for vblank for the disable to take effect */ if (IS_GEN2(dev)) @@ -1123,7 +1123,7 @@ static void intel_tv_mode_set(struct intel_encoder *encoder) I915_WRITE(pipeconf_reg, pipeconf); I915_WRITE(dspcntr_reg, dspcntr); - intel_flush_display_plane(dev_priv, intel_crtc->plane); + intel_flush_primary_plane(dev_priv, intel_crtc->plane); } j = 0; diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 288a3a654f06..f6fae35c568e 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -222,70 +222,19 @@ void intel_uncore_early_sanitize(struct drm_device *dev) if (HAS_FPGA_DBG_UNCLAIMED(dev)) __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); -} - -void intel_uncore_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work, - gen6_force_wake_work); - if (IS_VALLEYVIEW(dev)) { - dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get; - dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put; - } else if (IS_HASWELL(dev)) { - dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get; - dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put; - } else if (IS_IVYBRIDGE(dev)) { - u32 ecobus; - - /* IVB configs may use multi-threaded forcewake */ - - /* A small trick here - if the bios hasn't configured - * MT forcewake, and if the device is in RC6, then - * force_wake_mt_get will not wake the device and the - * ECOBUS read will return zero. Which will be - * (correctly) interpreted by the test below as MT - * forcewake being disabled. - */ - mutex_lock(&dev->struct_mutex); - __gen6_gt_force_wake_mt_get(dev_priv); - ecobus = __raw_i915_read32(dev_priv, ECOBUS); - __gen6_gt_force_wake_mt_put(dev_priv); - mutex_unlock(&dev->struct_mutex); - - if (ecobus & FORCEWAKE_MT_ENABLE) { - dev_priv->uncore.funcs.force_wake_get = - __gen6_gt_force_wake_mt_get; - dev_priv->uncore.funcs.force_wake_put = - __gen6_gt_force_wake_mt_put; - } else { - DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); - DRM_INFO("when using vblank-synced partial screen updates.\n"); - dev_priv->uncore.funcs.force_wake_get = - __gen6_gt_force_wake_get; - dev_priv->uncore.funcs.force_wake_put = - __gen6_gt_force_wake_put; - } - } else if (IS_GEN6(dev)) { - dev_priv->uncore.funcs.force_wake_get = - __gen6_gt_force_wake_get; - dev_priv->uncore.funcs.force_wake_put = - __gen6_gt_force_wake_put; + if (IS_HASWELL(dev) && + (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) { + /* The docs do not explain exactly how the calculation can be + * made. It is somewhat guessable, but for now, it's always + * 128MB. + * NB: We can't write IDICR yet because we do not have gt funcs + * set up */ + dev_priv->ellc_size = 128; + DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); } } -void intel_uncore_fini(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - flush_delayed_work(&dev_priv->uncore.force_wake_work); - - /* Paranoia: make sure we have disabled everything before we exit. */ - intel_uncore_sanitize(dev); -} - static void intel_uncore_forcewake_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -333,6 +282,9 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) { unsigned long irqflags; + if (!dev_priv->uncore.funcs.force_wake_get) + return; + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); if (dev_priv->uncore.forcewake_count++ == 0) dev_priv->uncore.funcs.force_wake_get(dev_priv); @@ -346,6 +298,9 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) { unsigned long irqflags; + if (!dev_priv->uncore.funcs.force_wake_put) + return; + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); if (--dev_priv->uncore.forcewake_count == 0) { dev_priv->uncore.forcewake_count++; @@ -358,9 +313,7 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) /* We give fast paths for the really cool registers */ #define NEEDS_FORCE_WAKE(dev_priv, reg) \ - ((HAS_FORCE_WAKE((dev_priv)->dev)) && \ - ((reg) < 0x40000) && \ - ((reg) != FORCEWAKE)) + ((reg) < 0x40000 && (reg) != FORCEWAKE) static void ilk_dummy_write(struct drm_i915_private *dev_priv) @@ -374,8 +327,7 @@ ilk_dummy_write(struct drm_i915_private *dev_priv) static void hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg) { - if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && - (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { + if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); @@ -385,20 +337,43 @@ hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg) static void hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) { - if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && - (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { + if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { DRM_ERROR("Unclaimed write to %x\n", reg); __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); } } -#define __i915_read(x) \ -u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \ +#define REG_READ_HEADER(x) \ unsigned long irqflags; \ u##x val = 0; \ - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ - if (dev_priv->info->gen == 5) \ - ilk_dummy_write(dev_priv); \ + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) + +#define REG_READ_FOOTER \ + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ + trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ + return val + +#define __gen4_read(x) \ +static u##x \ +gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ + REG_READ_HEADER(x); \ + val = __raw_i915_read##x(dev_priv, reg); \ + REG_READ_FOOTER; \ +} + +#define __gen5_read(x) \ +static u##x \ +gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ + REG_READ_HEADER(x); \ + ilk_dummy_write(dev_priv); \ + val = __raw_i915_read##x(dev_priv, reg); \ + REG_READ_FOOTER; \ +} + +#define __gen6_read(x) \ +static u##x \ +gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ + REG_READ_HEADER(x); \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ if (dev_priv->uncore.forcewake_count == 0) \ dev_priv->uncore.funcs.force_wake_get(dev_priv); \ @@ -408,28 +383,73 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \ } else { \ val = __raw_i915_read##x(dev_priv, reg); \ } \ + REG_READ_FOOTER; \ +} + +__gen6_read(8) +__gen6_read(16) +__gen6_read(32) +__gen6_read(64) +__gen5_read(8) +__gen5_read(16) +__gen5_read(32) +__gen5_read(64) +__gen4_read(8) +__gen4_read(16) +__gen4_read(32) +__gen4_read(64) + +#undef __gen6_read +#undef __gen5_read +#undef __gen4_read +#undef REG_READ_FOOTER +#undef REG_READ_HEADER + +#define REG_WRITE_HEADER \ + unsigned long irqflags; \ + trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) + +#define __gen4_write(x) \ +static void \ +gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ + REG_WRITE_HEADER; \ + __raw_i915_write##x(dev_priv, reg, val); \ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ - trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ - return val; \ } -__i915_read(8) -__i915_read(16) -__i915_read(32) -__i915_read(64) -#undef __i915_read +#define __gen5_write(x) \ +static void \ +gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ + REG_WRITE_HEADER; \ + ilk_dummy_write(dev_priv); \ + __raw_i915_write##x(dev_priv, reg, val); \ + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ +} -#define __i915_write(x) \ -void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace) { \ - unsigned long irqflags; \ +#define __gen6_write(x) \ +static void \ +gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ u32 __fifo_ret = 0; \ - trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ + REG_WRITE_HEADER; \ + if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ + __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ + } \ + __raw_i915_write##x(dev_priv, reg, val); \ + if (unlikely(__fifo_ret)) { \ + gen6_gt_check_fifodbg(dev_priv); \ + } \ + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ +} + +#define __hsw_write(x) \ +static void \ +hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ + u32 __fifo_ret = 0; \ + REG_WRITE_HEADER; \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ } \ - if (dev_priv->info->gen == 5) \ - ilk_dummy_write(dev_priv); \ hsw_unclaimed_reg_clear(dev_priv, reg); \ __raw_i915_write##x(dev_priv, reg, val); \ if (unlikely(__fifo_ret)) { \ @@ -438,11 +458,134 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool tr hsw_unclaimed_reg_check(dev_priv, reg); \ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ } -__i915_write(8) -__i915_write(16) -__i915_write(32) -__i915_write(64) -#undef __i915_write + +__hsw_write(8) +__hsw_write(16) +__hsw_write(32) +__hsw_write(64) +__gen6_write(8) +__gen6_write(16) +__gen6_write(32) +__gen6_write(64) +__gen5_write(8) +__gen5_write(16) +__gen5_write(32) +__gen5_write(64) +__gen4_write(8) +__gen4_write(16) +__gen4_write(32) +__gen4_write(64) + +#undef __hsw_write +#undef __gen6_write +#undef __gen5_write +#undef __gen4_write +#undef REG_WRITE_HEADER + +void intel_uncore_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work, + gen6_force_wake_work); + + if (IS_VALLEYVIEW(dev)) { + dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get; + dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put; + } else if (IS_HASWELL(dev)) { + dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get; + dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put; + } else if (IS_IVYBRIDGE(dev)) { + u32 ecobus; + + /* IVB configs may use multi-threaded forcewake */ + + /* A small trick here - if the bios hasn't configured + * MT forcewake, and if the device is in RC6, then + * force_wake_mt_get will not wake the device and the + * ECOBUS read will return zero. Which will be + * (correctly) interpreted by the test below as MT + * forcewake being disabled. + */ + mutex_lock(&dev->struct_mutex); + __gen6_gt_force_wake_mt_get(dev_priv); + ecobus = __raw_i915_read32(dev_priv, ECOBUS); + __gen6_gt_force_wake_mt_put(dev_priv); + mutex_unlock(&dev->struct_mutex); + + if (ecobus & FORCEWAKE_MT_ENABLE) { + dev_priv->uncore.funcs.force_wake_get = + __gen6_gt_force_wake_mt_get; + dev_priv->uncore.funcs.force_wake_put = + __gen6_gt_force_wake_mt_put; + } else { + DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); + DRM_INFO("when using vblank-synced partial screen updates.\n"); + dev_priv->uncore.funcs.force_wake_get = + __gen6_gt_force_wake_get; + dev_priv->uncore.funcs.force_wake_put = + __gen6_gt_force_wake_put; + } + } else if (IS_GEN6(dev)) { + dev_priv->uncore.funcs.force_wake_get = + __gen6_gt_force_wake_get; + dev_priv->uncore.funcs.force_wake_put = + __gen6_gt_force_wake_put; + } + + switch (INTEL_INFO(dev)->gen) { + case 7: + case 6: + if (IS_HASWELL(dev)) { + dev_priv->uncore.funcs.mmio_writeb = hsw_write8; + dev_priv->uncore.funcs.mmio_writew = hsw_write16; + dev_priv->uncore.funcs.mmio_writel = hsw_write32; + dev_priv->uncore.funcs.mmio_writeq = hsw_write64; + } else { + dev_priv->uncore.funcs.mmio_writeb = gen6_write8; + dev_priv->uncore.funcs.mmio_writew = gen6_write16; + dev_priv->uncore.funcs.mmio_writel = gen6_write32; + dev_priv->uncore.funcs.mmio_writeq = gen6_write64; + } + dev_priv->uncore.funcs.mmio_readb = gen6_read8; + dev_priv->uncore.funcs.mmio_readw = gen6_read16; + dev_priv->uncore.funcs.mmio_readl = gen6_read32; + dev_priv->uncore.funcs.mmio_readq = gen6_read64; + break; + case 5: + dev_priv->uncore.funcs.mmio_writeb = gen5_write8; + dev_priv->uncore.funcs.mmio_writew = gen5_write16; + dev_priv->uncore.funcs.mmio_writel = gen5_write32; + dev_priv->uncore.funcs.mmio_writeq = gen5_write64; + dev_priv->uncore.funcs.mmio_readb = gen5_read8; + dev_priv->uncore.funcs.mmio_readw = gen5_read16; + dev_priv->uncore.funcs.mmio_readl = gen5_read32; + dev_priv->uncore.funcs.mmio_readq = gen5_read64; + break; + case 4: + case 3: + case 2: + dev_priv->uncore.funcs.mmio_writeb = gen4_write8; + dev_priv->uncore.funcs.mmio_writew = gen4_write16; + dev_priv->uncore.funcs.mmio_writel = gen4_write32; + dev_priv->uncore.funcs.mmio_writeq = gen4_write64; + dev_priv->uncore.funcs.mmio_readb = gen4_read8; + dev_priv->uncore.funcs.mmio_readw = gen4_read16; + dev_priv->uncore.funcs.mmio_readl = gen4_read32; + dev_priv->uncore.funcs.mmio_readq = gen4_read64; + break; + } +} + +void intel_uncore_fini(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + flush_delayed_work(&dev_priv->uncore.force_wake_work); + + /* Paranoia: make sure we have disabled everything before we exit. */ + intel_uncore_sanitize(dev); +} static const struct register_whitelist { uint64_t offset; @@ -490,36 +633,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, return 0; } -static int i8xx_do_reset(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (IS_I85X(dev)) - return -ENODEV; - - I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830); - POSTING_READ(D_STATE); - - if (IS_I830(dev) || IS_845G(dev)) { - I915_WRITE(DEBUG_RESET_I830, - DEBUG_RESET_DISPLAY | - DEBUG_RESET_RENDER | - DEBUG_RESET_FULL); - POSTING_READ(DEBUG_RESET_I830); - msleep(1); - - I915_WRITE(DEBUG_RESET_I830, 0); - POSTING_READ(DEBUG_RESET_I830); - } - - msleep(1); - - I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830); - POSTING_READ(D_STATE); - - return 0; -} - static int i965_reset_complete(struct drm_device *dev) { u8 gdrst; @@ -621,7 +734,6 @@ int intel_gpu_reset(struct drm_device *dev) case 6: return gen6_do_reset(dev); case 5: return ironlake_do_reset(dev); case 4: return i965_do_reset(dev); - case 2: return i8xx_do_reset(dev); default: return -ENODEV; } } diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig index b487cdec5ee7..3a1c5fbae54a 100644 --- a/drivers/gpu/drm/mgag200/Kconfig +++ b/drivers/gpu/drm/mgag200/Kconfig @@ -5,6 +5,7 @@ config DRM_MGAG200 select FB_SYS_COPYAREA select FB_SYS_IMAGEBLIT select DRM_KMS_HELPER + select DRM_KMS_FB_HELPER select DRM_TTM help This is a KMS driver for the MGA G200 server chips, it diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index a06c19cc56f8..f39ab7554fc9 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -14,6 +14,7 @@ config DRM_MSM config DRM_MSM_FBDEV bool "Enable legacy fbdev support for MSM modesetting driver" depends on DRM_MSM + select DRM_KMS_FB_HELPER select FB_SYS_FILLRECT select FB_SYS_COPYAREA select FB_SYS_IMAGEBLIT diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index ff80f12480ea..7cf787d697b1 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig @@ -3,6 +3,7 @@ config DRM_NOUVEAU depends on DRM && PCI select FW_LOADER select DRM_KMS_HELPER + select DRM_KMS_FB_HELPER select DRM_TTM select FB_CFB_FILLRECT select FB_CFB_COPYAREA diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig index 20c41e73d448..6c220cd3497a 100644 --- a/drivers/gpu/drm/omapdrm/Kconfig +++ b/drivers/gpu/drm/omapdrm/Kconfig @@ -5,6 +5,7 @@ config DRM_OMAP depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM depends on OMAP2_DSS select DRM_KMS_HELPER + select DRM_KMS_FB_HELPER select FB_SYS_FILLRECT select FB_SYS_COPYAREA select FB_SYS_IMAGEBLIT diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig index d6c12796023c..037d324bf58f 100644 --- a/drivers/gpu/drm/qxl/Kconfig +++ b/drivers/gpu/drm/qxl/Kconfig @@ -6,6 +6,7 @@ config DRM_QXL select FB_SYS_IMAGEBLIT select FB_DEFERRED_IO select DRM_KMS_HELPER + select DRM_KMS_FB_HELPER select DRM_TTM help QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting. diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig index c590cd9dca0b..d8e835ac2c5e 100644 --- a/drivers/gpu/drm/rcar-du/Kconfig +++ b/drivers/gpu/drm/rcar-du/Kconfig @@ -4,6 +4,7 @@ config DRM_RCAR_DU select DRM_KMS_HELPER select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER + select DRM_KMS_FB_HELPER help Choose this option if you have an R-Car chipset. If M is selected the module will be called rcar-du-drm. diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig index ca498d151a76..d1372862d871 100644 --- a/drivers/gpu/drm/shmobile/Kconfig +++ b/drivers/gpu/drm/shmobile/Kconfig @@ -2,6 +2,7 @@ config DRM_SHMOBILE tristate "DRM Support for SH Mobile" depends on DRM && (ARM || SUPERH) select DRM_KMS_HELPER + select DRM_KMS_FB_HELPER select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER help diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig index 7a4d10106906..7c3ef79fcb37 100644 --- a/drivers/gpu/drm/tilcdc/Kconfig +++ b/drivers/gpu/drm/tilcdc/Kconfig @@ -2,6 +2,7 @@ config DRM_TILCDC tristate "DRM Support for TI LCDC Display Controller" depends on DRM && OF && ARM select DRM_KMS_HELPER + select DRM_KMS_FB_HELPER select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER select VIDEOMODE_HELPERS diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig index 6222af19f456..f02528686cd5 100644 --- a/drivers/gpu/drm/udl/Kconfig +++ b/drivers/gpu/drm/udl/Kconfig @@ -8,6 +8,7 @@ config DRM_UDL select FB_SYS_IMAGEBLIT select FB_DEFERRED_IO select DRM_KMS_HELPER + select DRM_KMS_FB_HELPER help This is a KMS driver for the USB displaylink video adapters. Say M/Y to add support for these devices via drm/kms interfaces. diff --git a/drivers/gpu/host1x/drm/Kconfig b/drivers/gpu/host1x/drm/Kconfig index 69853a4de40a..0f36ddd74e87 100644 --- a/drivers/gpu/host1x/drm/Kconfig +++ b/drivers/gpu/host1x/drm/Kconfig @@ -2,6 +2,7 @@ config DRM_TEGRA bool "NVIDIA Tegra DRM" depends on DRM select DRM_KMS_HELPER + select DRM_KMS_FB_HELPER select FB_SYS_FILLRECT select FB_SYS_COPYAREA select FB_SYS_IMAGEBLIT diff --git a/drivers/staging/imx-drm/Kconfig b/drivers/staging/imx-drm/Kconfig index 394254f7d6b5..5032ff7c2259 100644 --- a/drivers/staging/imx-drm/Kconfig +++ b/drivers/staging/imx-drm/Kconfig @@ -1,6 +1,7 @@ config DRM_IMX tristate "DRM Support for Freescale i.MX" select DRM_KMS_HELPER + select DRM_KMS_FB_HELPER select VIDEOMODE_HELPERS select DRM_GEM_CMA_HELPER select DRM_KMS_CMA_HELPER diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index fcabc42d66ab..5ad9a4e2bc59 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -93,8 +93,16 @@ struct cpufreq_policy { #define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */ #define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/ +#ifdef CONFIG_CPU_FREQ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); void cpufreq_cpu_put(struct cpufreq_policy *policy); +#else +static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) +{ + return NULL; +} +static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { } +#endif static inline bool policy_is_shared(struct cpufreq_policy *policy) { |