diff options
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 102 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_dmabuf.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_reg.h | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 56 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_lvds.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 171 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 76 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_sprite.c | 10 |
14 files changed, 358 insertions, 105 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 8f63cd5de4b4..99daa896105d 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -989,6 +989,9 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_HAS_SECURE_BATCHES: value = capable(CAP_SYS_ADMIN); break; + case I915_PARAM_HAS_PINNED_BATCHES: + value = 1; + break; default: DRM_DEBUG_DRIVER("Unknown parameter %d\n", param->param); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 530db83ef320..117265840b1f 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -877,8 +877,7 @@ int i915_reset(struct drm_device *dev) return 0; } -static int __devinit -i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct intel_device_info *intel_info = (struct intel_device_info *) ent->driver_data; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 557843dd4b2e..ed3059575576 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -780,6 +780,7 @@ typedef struct drm_i915_private { struct i915_hw_ppgtt *aliasing_ppgtt; struct shrinker inactive_shrinker; + bool shrinker_no_lock_stealing; /** * List of objects currently involved in rendering. @@ -1100,6 +1101,7 @@ struct drm_i915_gem_object { */ atomic_t pending_flip; }; +#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base) #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) @@ -1166,6 +1168,9 @@ struct drm_i915_file_private { #define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \ (dev)->pci_device == 0x0152 || \ (dev)->pci_device == 0x015a) +#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \ + (dev)->pci_device == 0x0106 || \ + (dev)->pci_device == 0x010A) #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) @@ -1196,6 +1201,9 @@ struct drm_i915_file_private { #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) +/* Early gen2 have a totally busted CS tlb and require pinned batches. */ +#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) + /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte * rows, which changed the alignment requirements and fence programming. */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 742206e45103..8febea6daa08 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1517,9 +1517,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) if (obj->base.map_list.map) return 0; + dev_priv->mm.shrinker_no_lock_stealing = true; + ret = drm_gem_create_mmap_offset(&obj->base); if (ret != -ENOSPC) - return ret; + goto out; /* Badly fragmented mmap space? The only way we can recover * space is by destroying unwanted objects. We can't randomly release @@ -1531,10 +1533,14 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT); ret = drm_gem_create_mmap_offset(&obj->base); if (ret != -ENOSPC) - return ret; + goto out; i915_gem_shrink_all(dev_priv); - return drm_gem_create_mmap_offset(&obj->base); + ret = drm_gem_create_mmap_offset(&obj->base); +out: + dev_priv->mm.shrinker_no_lock_stealing = false; + + return ret; } static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) @@ -1711,7 +1717,8 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) } static long -i915_gem_purge(struct drm_i915_private *dev_priv, long target) +__i915_gem_shrink(struct drm_i915_private *dev_priv, long target, + bool purgeable_only) { struct drm_i915_gem_object *obj, *next; long count = 0; @@ -1719,7 +1726,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target) list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list) { - if (i915_gem_object_is_purgeable(obj) && + if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) && i915_gem_object_put_pages(obj) == 0) { count += obj->base.size >> PAGE_SHIFT; if (count >= target) @@ -1730,7 +1737,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target) list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list) { - if (i915_gem_object_is_purgeable(obj) && + if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) && i915_gem_object_unbind(obj) == 0 && i915_gem_object_put_pages(obj) == 0) { count += obj->base.size >> PAGE_SHIFT; @@ -1742,6 +1749,12 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target) return count; } +static long +i915_gem_purge(struct drm_i915_private *dev_priv, long target) +{ + return __i915_gem_shrink(dev_priv, target, true); +} + static void i915_gem_shrink_all(struct drm_i915_private *dev_priv) { @@ -2890,7 +2903,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, { struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_mm_node *free_space; + struct drm_mm_node *node; u32 size, fence_size, fence_alignment, unfenced_alignment; bool mappable, fenceable; int ret; @@ -2936,66 +2949,54 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, i915_gem_object_pin_pages(obj); + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (node == NULL) { + i915_gem_object_unpin_pages(obj); + return -ENOMEM; + } + search_free: if (map_and_fenceable) - free_space = drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space, - size, alignment, obj->cache_level, - 0, dev_priv->mm.gtt_mappable_end, - false); + ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node, + size, alignment, obj->cache_level, + 0, dev_priv->mm.gtt_mappable_end); else - free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space, - size, alignment, obj->cache_level, - false); - - if (free_space != NULL) { - if (map_and_fenceable) - free_space = - drm_mm_get_block_range_generic(free_space, - size, alignment, obj->cache_level, - 0, dev_priv->mm.gtt_mappable_end, - false); - else - free_space = - drm_mm_get_block_generic(free_space, - size, alignment, obj->cache_level, - false); - } - if (free_space == NULL) { + ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node, + size, alignment, obj->cache_level); + if (ret) { ret = i915_gem_evict_something(dev, size, alignment, obj->cache_level, map_and_fenceable, nonblocking); - if (ret) { - i915_gem_object_unpin_pages(obj); - return ret; - } + if (ret == 0) + goto search_free; - goto search_free; + i915_gem_object_unpin_pages(obj); + kfree(node); + return ret; } - if (WARN_ON(!i915_gem_valid_gtt_space(dev, - free_space, - obj->cache_level))) { + if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) { i915_gem_object_unpin_pages(obj); - drm_mm_put_block(free_space); + drm_mm_put_block(node); return -EINVAL; } ret = i915_gem_gtt_prepare_object(obj); if (ret) { i915_gem_object_unpin_pages(obj); - drm_mm_put_block(free_space); + drm_mm_put_block(node); return ret; } list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list); list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); - obj->gtt_space = free_space; - obj->gtt_offset = free_space->start; + obj->gtt_space = node; + obj->gtt_offset = node->start; fenceable = - free_space->size == fence_size && - (free_space->start & (fence_alignment - 1)) == 0; + node->size == fence_size && + (node->start & (fence_alignment - 1)) == 0; mappable = obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; @@ -3528,14 +3529,15 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, goto out; } - obj->user_pin_count++; - obj->pin_filp = file; - if (obj->user_pin_count == 1) { + if (obj->user_pin_count == 0) { ret = i915_gem_object_pin(obj, args->alignment, true, false); if (ret) goto out; } + obj->user_pin_count++; + obj->pin_filp = file; + /* XXX - flush the CPU caches for pinned objects * as the X server doesn't manage domains yet */ @@ -4392,12 +4394,18 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) if (!mutex_is_locked_by(&dev->struct_mutex, current)) return 0; + if (dev_priv->mm.shrinker_no_lock_stealing) + return 0; + unlock = false; } if (nr_to_scan) { nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan); if (nr_to_scan > 0) + nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan, + false); + if (nr_to_scan > 0) i915_gem_shrink_all(dev_priv); } @@ -4405,7 +4413,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) if (obj->pages_pin_count == 0) cnt += obj->base.size >> PAGE_SHIFT; - list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) + list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list) if (obj->pin_count == 0 && obj->pages_pin_count == 0) cnt += obj->base.size >> PAGE_SHIFT; diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 773ef77b6c22..abeaafef6d7e 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -226,7 +226,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev, { struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); - return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600); + return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags); } static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) @@ -266,7 +266,12 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, obj = dma_buf->priv; /* is it from our device? */ if (obj->base.dev == dev) { + /* + * Importing dmabuf exported from out own gem increases + * refcount on gem itself instead of f_count of dmabuf. + */ drm_gem_object_reference(&obj->base); + dma_buf_put(dma_buf); return &obj->base; } } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index ee8f97f0539e..d6a994a07393 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -808,6 +808,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, flags |= I915_DISPATCH_SECURE; } + if (args->flags & I915_EXEC_IS_PINNED) + flags |= I915_DISPATCH_PINNED; switch (args->flags & I915_EXEC_RING_MASK) { case I915_EXEC_DEFAULT: diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index a4dc97f8b9f0..2220dec3e5d9 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1087,6 +1087,18 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, if (!ring->get_seqno) return NULL; + if (HAS_BROKEN_CS_TLB(dev_priv->dev)) { + u32 acthd = I915_READ(ACTHD); + + if (WARN_ON(ring->id != RCS)) + return NULL; + + obj = ring->private; + if (acthd >= obj->gtt_offset && + acthd < obj->gtt_offset + obj->base.size) + return i915_error_object_create(dev_priv, obj); + } + seqno = ring->get_seqno(ring, false); list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { if (obj->ring != ring) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3f75cfaf1c3f..186ee5c85b51 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -517,6 +517,7 @@ * the enables for writing to the corresponding low bit. */ #define _3D_CHICKEN 0x02084 +#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10) #define _3D_CHICKEN2 0x0208c /* Disables pipelining of read flushes past the SF-WIZ interface. * Required on all Ironlake steppings according to the B-Spec, but the @@ -532,7 +533,8 @@ # define MI_FLUSH_ENABLE (1 << 12) #define GEN6_GT_MODE 0x20d0 -#define GEN6_GT_MODE_HI (1 << 9) +#define GEN6_GT_MODE_HI (1 << 9) +#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5) #define GFX_MODE 0x02520 #define GFX_MODE_GEN7 0x0229c diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 5d127e068950..da1ad9c80bb5 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -8144,10 +8144,6 @@ intel_modeset_stage_output_state(struct drm_device *dev, DRM_DEBUG_KMS("encoder changed, full mode switch\n"); config->mode_changed = true; } - - /* Disable all disconnected encoders. */ - if (connector->base.status == connector_status_disconnected) - connector->new_encoder = NULL; } /* connector->new_encoder is now updated for all connectors. */ @@ -8602,19 +8598,30 @@ int intel_framebuffer_init(struct drm_device *dev, { int ret; - if (obj->tiling_mode == I915_TILING_Y) + if (obj->tiling_mode == I915_TILING_Y) { + DRM_DEBUG("hardware does not support tiling Y\n"); return -EINVAL; + } - if (mode_cmd->pitches[0] & 63) + if (mode_cmd->pitches[0] & 63) { + DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n", + mode_cmd->pitches[0]); return -EINVAL; + } /* FIXME <= Gen4 stride limits are bit unclear */ - if (mode_cmd->pitches[0] > 32768) + if (mode_cmd->pitches[0] > 32768) { + DRM_DEBUG("pitch (%d) must be at less than 32768\n", + mode_cmd->pitches[0]); return -EINVAL; + } if (obj->tiling_mode != I915_TILING_NONE && - mode_cmd->pitches[0] != obj->stride) + mode_cmd->pitches[0] != obj->stride) { + DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n", + mode_cmd->pitches[0], obj->stride); return -EINVAL; + } /* Reject formats not supported by any plane early. */ switch (mode_cmd->pixel_format) { @@ -8625,8 +8632,10 @@ int intel_framebuffer_init(struct drm_device *dev, break; case DRM_FORMAT_XRGB1555: case DRM_FORMAT_ARGB1555: - if (INTEL_INFO(dev)->gen > 3) + if (INTEL_INFO(dev)->gen > 3) { + DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format); return -EINVAL; + } break; case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ABGR8888: @@ -8634,18 +8643,22 @@ int intel_framebuffer_init(struct drm_device *dev, case DRM_FORMAT_ARGB2101010: case DRM_FORMAT_XBGR2101010: case DRM_FORMAT_ABGR2101010: - if (INTEL_INFO(dev)->gen < 4) + if (INTEL_INFO(dev)->gen < 4) { + DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format); return -EINVAL; + } break; case DRM_FORMAT_YUYV: case DRM_FORMAT_UYVY: case DRM_FORMAT_YVYU: case DRM_FORMAT_VYUY: - if (INTEL_INFO(dev)->gen < 6) + if (INTEL_INFO(dev)->gen < 5) { + DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format); return -EINVAL; + } break; default: - DRM_DEBUG_KMS("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format); + DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format); return -EINVAL; } @@ -9167,6 +9180,23 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) * the crtc fixup. */ } +static void i915_redisable_vga(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 vga_reg; + + if (HAS_PCH_SPLIT(dev)) + vga_reg = CPU_VGACNTRL; + else + vga_reg = VGACNTRL; + + if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { + DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); + I915_WRITE(vga_reg, VGA_DISP_DISABLE); + POSTING_READ(vga_reg); + } +} + /* Scan out the current hw modeset state, sanitizes it and maps it into the drm * and i915 state tracking structures. */ void intel_modeset_setup_hw_state(struct drm_device *dev, @@ -9275,6 +9305,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, intel_set_mode(&crtc->base, &crtc->base.mode, crtc->base.x, crtc->base.y, crtc->base.fb); } + + i915_redisable_vga(dev); } else { intel_modeset_update_staged_output_state(dev); } diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index b9a660a53677..17aee74258ad 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -776,14 +776,6 @@ static const struct dmi_system_id intel_no_lvds[] = { }, { .callback = intel_no_lvds_dmi_callback, - .ident = "ZOTAC ZBOXSD-ID12/ID13", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"), - DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"), - }, - }, - { - .callback = intel_no_lvds_dmi_callback, .ident = "Gigabyte GA-D525TUD", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 496caa73eb70..e83a11794172 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -44,6 +44,14 @@ * i915.i915_enable_fbc parameter */ +static bool intel_crtc_active(struct drm_crtc *crtc) +{ + /* Be paranoid as we can arrive here with only partial + * state retrieved from the hardware during setup. + */ + return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock; +} + static void i8xx_disable_fbc(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -405,9 +413,8 @@ void intel_update_fbc(struct drm_device *dev) * - going to an unsupported config (interlace, pixel multiply, etc.) */ list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { - if (tmp_crtc->enabled && - !to_intel_crtc(tmp_crtc)->primary_disabled && - tmp_crtc->fb) { + if (intel_crtc_active(tmp_crtc) && + !to_intel_crtc(tmp_crtc)->primary_disabled) { if (crtc) { DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; @@ -992,7 +999,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev) struct drm_crtc *crtc, *enabled = NULL; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - if (crtc->enabled && crtc->fb) { + if (intel_crtc_active(crtc)) { if (enabled) return NULL; enabled = crtc; @@ -1086,7 +1093,7 @@ static bool g4x_compute_wm0(struct drm_device *dev, int entries, tlb_miss; crtc = intel_get_crtc_for_plane(dev, plane); - if (crtc->fb == NULL || !crtc->enabled) { + if (!intel_crtc_active(crtc)) { *cursor_wm = cursor->guard_size; *plane_wm = display->guard_size; return false; @@ -1215,7 +1222,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev, int entries; crtc = intel_get_crtc_for_plane(dev, plane); - if (crtc->fb == NULL || !crtc->enabled) + if (!intel_crtc_active(crtc)) return false; clock = crtc->mode.clock; /* VESA DOT Clock */ @@ -1286,6 +1293,7 @@ static void valleyview_update_wm(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int planea_wm, planeb_wm, cursora_wm, cursorb_wm; int plane_sr, cursor_sr; + int ignore_plane_sr, ignore_cursor_sr; unsigned int enabled = 0; vlv_update_drain_latency(dev); @@ -1302,17 +1310,23 @@ static void valleyview_update_wm(struct drm_device *dev) &planeb_wm, &cursorb_wm)) enabled |= 2; - plane_sr = cursor_sr = 0; if (single_plane_enabled(enabled) && g4x_compute_srwm(dev, ffs(enabled) - 1, sr_latency_ns, &valleyview_wm_info, &valleyview_cursor_wm_info, - &plane_sr, &cursor_sr)) + &plane_sr, &ignore_cursor_sr) && + g4x_compute_srwm(dev, ffs(enabled) - 1, + 2*sr_latency_ns, + &valleyview_wm_info, + &valleyview_cursor_wm_info, + &ignore_plane_sr, &cursor_sr)) { I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN); - else + } else { I915_WRITE(FW_BLC_SELF_VLV, I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN); + plane_sr = cursor_sr = 0; + } DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", planea_wm, cursora_wm, @@ -1352,17 +1366,18 @@ static void g4x_update_wm(struct drm_device *dev) &planeb_wm, &cursorb_wm)) enabled |= 2; - plane_sr = cursor_sr = 0; if (single_plane_enabled(enabled) && g4x_compute_srwm(dev, ffs(enabled) - 1, sr_latency_ns, &g4x_wm_info, &g4x_cursor_wm_info, - &plane_sr, &cursor_sr)) + &plane_sr, &cursor_sr)) { I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); - else + } else { I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); + plane_sr = cursor_sr = 0; + } DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", planea_wm, cursora_wm, @@ -1468,7 +1483,7 @@ static void i9xx_update_wm(struct drm_device *dev) fifo_size = dev_priv->display.get_fifo_size(dev, 0); crtc = intel_get_crtc_for_plane(dev, 0); - if (crtc->enabled && crtc->fb) { + if (intel_crtc_active(crtc)) { int cpp = crtc->fb->bits_per_pixel / 8; if (IS_GEN2(dev)) cpp = 4; @@ -1482,7 +1497,7 @@ static void i9xx_update_wm(struct drm_device *dev) fifo_size = dev_priv->display.get_fifo_size(dev, 1); crtc = intel_get_crtc_for_plane(dev, 1); - if (crtc->enabled && crtc->fb) { + if (intel_crtc_active(crtc)) { int cpp = crtc->fb->bits_per_pixel / 8; if (IS_GEN2(dev)) cpp = 4; @@ -1811,8 +1826,110 @@ static void sandybridge_update_wm(struct drm_device *dev) enabled |= 2; } - if ((dev_priv->num_pipe == 3) && - g4x_compute_wm0(dev, 2, + /* + * Calculate and update the self-refresh watermark only when one + * display plane is used. + * + * SNB support 3 levels of watermark. + * + * WM1/WM2/WM2 watermarks have to be enabled in the ascending order, + * and disabled in the descending order + * + */ + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); + + if (!single_plane_enabled(enabled) || + dev_priv->sprite_scaling_enabled) + return; + enabled = ffs(enabled) - 1; + + /* WM1 */ + if (!ironlake_compute_srwm(dev, 1, enabled, + SNB_READ_WM1_LATENCY() * 500, + &sandybridge_display_srwm_info, + &sandybridge_cursor_srwm_info, + &fbc_wm, &plane_wm, &cursor_wm)) + return; + + I915_WRITE(WM1_LP_ILK, + WM1_LP_SR_EN | + (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (fbc_wm << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); + + /* WM2 */ + if (!ironlake_compute_srwm(dev, 2, enabled, + SNB_READ_WM2_LATENCY() * 500, + &sandybridge_display_srwm_info, + &sandybridge_cursor_srwm_info, + &fbc_wm, &plane_wm, &cursor_wm)) + return; + + I915_WRITE(WM2_LP_ILK, + WM2_LP_EN | + (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (fbc_wm << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); + + /* WM3 */ + if (!ironlake_compute_srwm(dev, 3, enabled, + SNB_READ_WM3_LATENCY() * 500, + &sandybridge_display_srwm_info, + &sandybridge_cursor_srwm_info, + &fbc_wm, &plane_wm, &cursor_wm)) + return; + + I915_WRITE(WM3_LP_ILK, + WM3_LP_EN | + (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (fbc_wm << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); +} + +static void ivybridge_update_wm(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ + u32 val; + int fbc_wm, plane_wm, cursor_wm; + int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm; + unsigned int enabled; + + enabled = 0; + if (g4x_compute_wm0(dev, 0, + &sandybridge_display_wm_info, latency, + &sandybridge_cursor_wm_info, latency, + &plane_wm, &cursor_wm)) { + val = I915_READ(WM0_PIPEA_ILK); + val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); + I915_WRITE(WM0_PIPEA_ILK, val | + ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); + DRM_DEBUG_KMS("FIFO watermarks For pipe A -" + " plane %d, " "cursor: %d\n", + plane_wm, cursor_wm); + enabled |= 1; + } + + if (g4x_compute_wm0(dev, 1, + &sandybridge_display_wm_info, latency, + &sandybridge_cursor_wm_info, latency, + &plane_wm, &cursor_wm)) { + val = I915_READ(WM0_PIPEB_ILK); + val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); + I915_WRITE(WM0_PIPEB_ILK, val | + ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); + DRM_DEBUG_KMS("FIFO watermarks For pipe B -" + " plane %d, cursor: %d\n", + plane_wm, cursor_wm); + enabled |= 2; + } + + if (g4x_compute_wm0(dev, 2, &sandybridge_display_wm_info, latency, &sandybridge_cursor_wm_info, latency, &plane_wm, &cursor_wm)) { @@ -1875,12 +1992,17 @@ static void sandybridge_update_wm(struct drm_device *dev) (plane_wm << WM1_LP_SR_SHIFT) | cursor_wm); - /* WM3 */ + /* WM3, note we have to correct the cursor latency */ if (!ironlake_compute_srwm(dev, 3, enabled, SNB_READ_WM3_LATENCY() * 500, &sandybridge_display_srwm_info, &sandybridge_cursor_srwm_info, - &fbc_wm, &plane_wm, &cursor_wm)) + &fbc_wm, &plane_wm, &ignore_cursor_wm) || + !ironlake_compute_srwm(dev, 3, enabled, + 2 * SNB_READ_WM3_LATENCY() * 500, + &sandybridge_display_srwm_info, + &sandybridge_cursor_srwm_info, + &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm)) return; I915_WRITE(WM3_LP_ILK, @@ -1929,7 +2051,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane, int entries, tlb_miss; crtc = intel_get_crtc_for_plane(dev, plane); - if (crtc->fb == NULL || !crtc->enabled) { + if (!intel_crtc_active(crtc)) { *sprite_wm = display->guard_size; return false; } @@ -3471,6 +3593,15 @@ static void gen6_init_clock_gating(struct drm_device *dev) I915_READ(ILK_DISPLAY_CHICKEN2) | ILK_ELPIN_409_SELECT); + /* WaDisableHiZPlanesWhenMSAAEnabled */ + I915_WRITE(_3D_CHICKEN, + _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB)); + + /* WaSetupGtModeTdRowDispatch */ + if (IS_SNB_GT1(dev)) + I915_WRITE(GEN6_GT_MODE, + _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE)); + I915_WRITE(WM3_LP_ILK, 0); I915_WRITE(WM2_LP_ILK, 0); I915_WRITE(WM1_LP_ILK, 0); @@ -3999,7 +4130,7 @@ void intel_init_pm(struct drm_device *dev) } else if (IS_IVYBRIDGE(dev)) { /* FIXME: detect B0+ stepping and use auto training */ if (SNB_READ_WM0_LATENCY()) { - dev_priv->display.update_wm = sandybridge_update_wm; + dev_priv->display.update_wm = ivybridge_update_wm; dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; } else { DRM_DEBUG_KMS("Failed to read display plane latency. " diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 2346b920bd86..ae253e04c391 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -547,9 +547,14 @@ static int init_render_ring(struct intel_ring_buffer *ring) static void render_ring_cleanup(struct intel_ring_buffer *ring) { + struct drm_device *dev = ring->dev; + if (!ring->private) return; + if (HAS_BROKEN_CS_TLB(dev)) + drm_gem_object_unreference(to_gem_object(ring->private)); + cleanup_pipe_control(ring); } @@ -969,6 +974,8 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, return 0; } +/* Just userspace ABI convention to limit the wa batch bo to a resonable size */ +#define I830_BATCH_LIMIT (256*1024) static int i830_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 len, @@ -976,15 +983,47 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring, { int ret; - ret = intel_ring_begin(ring, 4); - if (ret) - return ret; + if (flags & I915_DISPATCH_PINNED) { + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; - intel_ring_emit(ring, MI_BATCH_BUFFER); - intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); - intel_ring_emit(ring, offset + len - 8); - intel_ring_emit(ring, 0); - intel_ring_advance(ring); + intel_ring_emit(ring, MI_BATCH_BUFFER); + intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); + intel_ring_emit(ring, offset + len - 8); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + } else { + struct drm_i915_gem_object *obj = ring->private; + u32 cs_offset = obj->gtt_offset; + + if (len > I830_BATCH_LIMIT) + return -ENOSPC; + + ret = intel_ring_begin(ring, 9+3); + if (ret) + return ret; + /* Blit the batch (which has now all relocs applied) to the stable batch + * scratch bo area (so that the CS never stumbles over its tlb + * invalidation bug) ... */ + intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD | + XY_SRC_COPY_BLT_WRITE_ALPHA | + XY_SRC_COPY_BLT_WRITE_RGB); + intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024); + intel_ring_emit(ring, cs_offset); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 4096); + intel_ring_emit(ring, offset); + intel_ring_emit(ring, MI_FLUSH); + + /* ... and execute it. */ + intel_ring_emit(ring, MI_BATCH_BUFFER); + intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); + intel_ring_emit(ring, cs_offset + len - 8); + intel_ring_advance(ring); + } return 0; } @@ -1596,6 +1635,27 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ring->init = init_render_ring; ring->cleanup = render_ring_cleanup; + /* Workaround batchbuffer to combat CS tlb bug. */ + if (HAS_BROKEN_CS_TLB(dev)) { + struct drm_i915_gem_object *obj; + int ret; + + obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); + if (obj == NULL) { + DRM_ERROR("Failed to allocate batch bo\n"); + return -ENOMEM; + } + + ret = i915_gem_object_pin(obj, 0, true, false); + if (ret != 0) { + drm_gem_object_unreference(&obj->base); + DRM_ERROR("Failed to ping batch bo\n"); + return ret; + } + + ring->private = obj; + } + return intel_init_ring_buffer(dev, ring); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 526182ed0c6d..6af87cd05725 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -94,6 +94,7 @@ struct intel_ring_buffer { u32 offset, u32 length, unsigned flags); #define I915_DISPATCH_SECURE 0x1 +#define I915_DISPATCH_PINNED 0x2 void (*cleanup)(struct intel_ring_buffer *ring); int (*sync_to)(struct intel_ring_buffer *ring, struct intel_ring_buffer *to, diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 827dcd4edf1c..d7b060e0a231 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -120,11 +120,10 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); - linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); + linear_offset = y * fb->pitches[0] + x * pixel_size; sprsurf_offset = intel_gen4_compute_offset_xtiled(&x, &y, - fb->bits_per_pixel / 8, - fb->pitches[0]); + pixel_size, fb->pitches[0]); linear_offset -= sprsurf_offset; /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET @@ -286,11 +285,10 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x); - linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); + linear_offset = y * fb->pitches[0] + x * pixel_size; dvssurf_offset = intel_gen4_compute_offset_xtiled(&x, &y, - fb->bits_per_pixel / 8, - fb->pitches[0]); + pixel_size, fb->pitches[0]); linear_offset -= dvssurf_offset; if (obj->tiling_mode != I915_TILING_NONE) |