diff options
author | Dave Airlie <airlied@redhat.com> | 2013-07-17 02:40:49 +0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2013-07-17 02:40:49 +0400 |
commit | 6bd2cab2c1fd0616b28bb567543f53b624bf885a (patch) | |
tree | 52f8616f78609ab433f68b52ec62d8fdb6506409 /drivers | |
parent | d1ce3d5496f2a7c90dd00a9133572f931d2acdcc (diff) | |
parent | 46a0b638f35b45fc13d3dc0deb6a7e17988170b2 (diff) | |
download | linux-6bd2cab2c1fd0616b28bb567543f53b624bf885a.tar.xz |
Merge tag 'drm-intel-fixes-2013-07-11' of git://people.freedesktop.org/~danvet/drm-intel
One feature latecomer, I've forgotten to merge the patch to reeanble the
Haswell power well feature now that the audio interaction is fixed up.
Since that was the only unfixed issue with it I've figured I could throw
it in a bit late, and it's trivial to revert in case I'm wrong.
Otherwise all bug/regression fixes:
- Fix status page reinit after gpu hangs, spotted by more paranoid igt
checks.
- Fix object list walking fumble regression in the shrinker (only the
counting part, the actual shrinking code was correct so no Oops
potential), from Xiong Zhang.
- Fix DP 1.2 bw limits (Imre).
- Restore legacy forcewake on ivb, too many broken biosen out there. We
dump a warn though that recent userspace might fall over with that
config (Guenter Roeck).
- Patch up the gen2 cs tlb w/a.
- Improve the fence coherency w/a now that we have a better understanding
what's going on. The removed wbinvd+ipi should make -rt folks happy. Big
thanks to Jon Bloomfield for figuring this out, patches from Chris.
- Fix write-read race when switching ring (Chris). Spotted with code
inspection, but now we also have an igt for it.
There's an ugly regression we're still working on introduced between
3.10-rc7 and 3.10.0. Unfortunately we can't just revert the offender since
that one fixes another regression :( I've asked Steven to include my
-fixes branch into linux-next to prevent such fallout in the future,
hopefully.
* tag 'drm-intel-fixes-2013-07-11' of git://people.freedesktop.org/~danvet/drm-intel:
Revert "drm/i915: Workaround incoherence between fences and LLC across multiple CPUs"
drm/i915: Fix incoherence with fence updates on Sandybridge+
drm/i915: Fix write-read race with multiple rings
Partially revert "drm/i915: unconditionally use mt forcewake on hsw/ivb"
drm/i915: fix lane bandwidth capping for DP 1.2 sinks
drm/i915: fix up ring cleanup for the i830/i845 CS tlb w/a
drm/i915: Correct obj->mm_list link to dev_priv->dev_priv->mm.inactive_list
drm/i915: switch disable_power_well default value to 1
drm/i915: reinit status page registers after gpu reset
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 83 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_dp.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 31 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 38 |
5 files changed, 93 insertions, 68 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 062cbda1bf4a..f4af1ca0fb62 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -123,10 +123,10 @@ module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 060 MODULE_PARM_DESC(preliminary_hw_support, "Enable preliminary hardware support. (default: false)"); -int i915_disable_power_well __read_mostly = 0; +int i915_disable_power_well __read_mostly = 1; module_param_named(disable_power_well, i915_disable_power_well, int, 0600); MODULE_PARM_DESC(disable_power_well, - "Disable the power well when possible (default: false)"); + "Disable the power well when possible (default: true)"); int i915_enable_ips __read_mostly = 1; module_param_named(enable_ips, i915_enable_ips, int, 0600); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 4200c32407ec..97afd2639fb6 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1880,6 +1880,10 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, u32 seqno = intel_ring_get_seqno(ring); BUG_ON(ring == NULL); + if (obj->ring != ring && obj->last_write_seqno) { + /* Keep the seqno relative to the current ring */ + obj->last_write_seqno = seqno; + } obj->ring = ring; /* Add a reference if we're newly entering the active list. */ @@ -2653,7 +2657,6 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, drm_i915_private_t *dev_priv = dev->dev_private; int fence_reg; int fence_pitch_shift; - uint64_t val; if (INTEL_INFO(dev)->gen >= 6) { fence_reg = FENCE_REG_SANDYBRIDGE_0; @@ -2663,8 +2666,23 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, fence_pitch_shift = I965_FENCE_PITCH_SHIFT; } + fence_reg += reg * 8; + + /* To w/a incoherency with non-atomic 64-bit register updates, + * we split the 64-bit update into two 32-bit writes. In order + * for a partial fence not to be evaluated between writes, we + * precede the update with write to turn off the fence register, + * and only enable the fence as the last step. + * + * For extra levels of paranoia, we make sure each step lands + * before applying the next step. + */ + I915_WRITE(fence_reg, 0); + POSTING_READ(fence_reg); + if (obj) { u32 size = obj->gtt_space->size; + uint64_t val; val = (uint64_t)((obj->gtt_offset + size - 4096) & 0xfffff000) << 32; @@ -2673,12 +2691,16 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, if (obj->tiling_mode == I915_TILING_Y) val |= 1 << I965_FENCE_TILING_Y_SHIFT; val |= I965_FENCE_REG_VALID; - } else - val = 0; - fence_reg += reg * 8; - I915_WRITE64(fence_reg, val); - POSTING_READ(fence_reg); + I915_WRITE(fence_reg + 4, val >> 32); + POSTING_READ(fence_reg + 4); + + I915_WRITE(fence_reg + 0, val); + POSTING_READ(fence_reg); + } else { + I915_WRITE(fence_reg + 4, 0); + POSTING_READ(fence_reg + 4); + } } static void i915_write_fence_reg(struct drm_device *dev, int reg, @@ -2796,56 +2818,17 @@ static inline int fence_number(struct drm_i915_private *dev_priv, return fence - dev_priv->fence_regs; } -struct write_fence { - struct drm_device *dev; - struct drm_i915_gem_object *obj; - int fence; -}; - -static void i915_gem_write_fence__ipi(void *data) -{ - struct write_fence *args = data; - - /* Required for SNB+ with LLC */ - wbinvd(); - - /* Required for VLV */ - i915_gem_write_fence(args->dev, args->fence, args->obj); -} - static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, struct drm_i915_fence_reg *fence, bool enable) { struct drm_i915_private *dev_priv = obj->base.dev->dev_private; - struct write_fence args = { - .dev = obj->base.dev, - .fence = fence_number(dev_priv, fence), - .obj = enable ? obj : NULL, - }; - - /* In order to fully serialize access to the fenced region and - * the update to the fence register we need to take extreme - * measures on SNB+. In theory, the write to the fence register - * flushes all memory transactions before, and coupled with the - * mb() placed around the register write we serialise all memory - * operations with respect to the changes in the tiler. Yet, on - * SNB+ we need to take a step further and emit an explicit wbinvd() - * on each processor in order to manually flush all memory - * transactions before updating the fence register. - * - * However, Valleyview complicates matter. There the wbinvd is - * insufficient and unlike SNB/IVB requires the serialising - * register write. (Note that that register write by itself is - * conversely not sufficient for SNB+.) To compromise, we do both. - */ - if (INTEL_INFO(args.dev)->gen >= 6) - on_each_cpu(i915_gem_write_fence__ipi, &args, 1); - else - i915_gem_write_fence(args.dev, args.fence, args.obj); + int reg = fence_number(dev_priv, fence); + + i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL); if (enable) { - obj->fence_reg = args.fence; + obj->fence_reg = reg; fence->obj = obj; list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); } else { @@ -4611,7 +4594,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) if (obj->pages_pin_count == 0) cnt += obj->base.size >> PAGE_SHIFT; - list_for_each_entry(obj, &dev_priv->mm.inactive_list, global_list) + list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) if (obj->pin_count == 0 && obj->pages_pin_count == 0) cnt += obj->base.size >> PAGE_SHIFT; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index b73971234013..26e162bb3a51 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -75,7 +75,12 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp) case DP_LINK_BW_1_62: case DP_LINK_BW_2_7: break; + case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ + max_link_bw = DP_LINK_BW_2_7; + break; default: + WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n", + max_link_bw); max_link_bw = DP_LINK_BW_1_62; break; } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ccbdd83f5220..d10e6735771f 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5500,9 +5500,38 @@ void intel_gt_init(struct drm_device *dev) if (IS_VALLEYVIEW(dev)) { dev_priv->gt.force_wake_get = vlv_force_wake_get; dev_priv->gt.force_wake_put = vlv_force_wake_put; - } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { + } else if (IS_HASWELL(dev)) { dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get; dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put; + } else if (IS_IVYBRIDGE(dev)) { + u32 ecobus; + + /* IVB configs may use multi-threaded forcewake */ + + /* A small trick here - if the bios hasn't configured + * MT forcewake, and if the device is in RC6, then + * force_wake_mt_get will not wake the device and the + * ECOBUS read will return zero. Which will be + * (correctly) interpreted by the test below as MT + * forcewake being disabled. + */ + mutex_lock(&dev->struct_mutex); + __gen6_gt_force_wake_mt_get(dev_priv); + ecobus = I915_READ_NOTRACE(ECOBUS); + __gen6_gt_force_wake_mt_put(dev_priv); + mutex_unlock(&dev->struct_mutex); + + if (ecobus & FORCEWAKE_MT_ENABLE) { + dev_priv->gt.force_wake_get = + __gen6_gt_force_wake_mt_get; + dev_priv->gt.force_wake_put = + __gen6_gt_force_wake_mt_put; + } else { + DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); + DRM_INFO("when using vblank-synced partial screen updates.\n"); + dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; + dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; + } } else if (IS_GEN6(dev)) { dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index e51ab552046c..664118d8c1d6 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -379,6 +379,17 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) return I915_READ(acthd_reg); } +static void ring_setup_phys_status_page(struct intel_ring_buffer *ring) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + u32 addr; + + addr = dev_priv->status_page_dmah->busaddr; + if (INTEL_INFO(ring->dev)->gen >= 4) + addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; + I915_WRITE(HWS_PGA, addr); +} + static int init_ring_common(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; @@ -390,6 +401,11 @@ static int init_ring_common(struct intel_ring_buffer *ring) if (HAS_FORCE_WAKE(dev)) gen6_gt_force_wake_get(dev_priv); + if (I915_NEED_GFX_HWS(dev)) + intel_ring_setup_status_page(ring); + else + ring_setup_phys_status_page(ring); + /* Stop the ring if it's running. */ I915_WRITE_CTL(ring, 0); I915_WRITE_HEAD(ring, 0); @@ -518,9 +534,6 @@ cleanup_pipe_control(struct intel_ring_buffer *ring) struct pipe_control *pc = ring->private; struct drm_i915_gem_object *obj; - if (!ring->private) - return; - obj = pc->obj; kunmap(sg_page(obj->pages->sgl)); @@ -528,7 +541,6 @@ cleanup_pipe_control(struct intel_ring_buffer *ring) drm_gem_object_unreference(&obj->base); kfree(pc); - ring->private = NULL; } static int init_render_ring(struct intel_ring_buffer *ring) @@ -601,7 +613,10 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring) if (HAS_BROKEN_CS_TLB(dev)) drm_gem_object_unreference(to_gem_object(ring->private)); - cleanup_pipe_control(ring); + if (INTEL_INFO(dev)->gen >= 5) + cleanup_pipe_control(ring); + + ring->private = NULL; } static void @@ -1223,7 +1238,6 @@ static int init_status_page(struct intel_ring_buffer *ring) ring->status_page.obj = obj; memset(ring->status_page.page_addr, 0, PAGE_SIZE); - intel_ring_setup_status_page(ring); DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", ring->name, ring->status_page.gfx_addr); @@ -1237,10 +1251,9 @@ err: return ret; } -static int init_phys_hws_pga(struct intel_ring_buffer *ring) +static int init_phys_status_page(struct intel_ring_buffer *ring) { struct drm_i915_private *dev_priv = ring->dev->dev_private; - u32 addr; if (!dev_priv->status_page_dmah) { dev_priv->status_page_dmah = @@ -1249,11 +1262,6 @@ static int init_phys_hws_pga(struct intel_ring_buffer *ring) return -ENOMEM; } - addr = dev_priv->status_page_dmah->busaddr; - if (INTEL_INFO(ring->dev)->gen >= 4) - addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; - I915_WRITE(HWS_PGA, addr); - ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; memset(ring->status_page.page_addr, 0, PAGE_SIZE); @@ -1281,7 +1289,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, return ret; } else { BUG_ON(ring->id != RCS); - ret = init_phys_hws_pga(ring); + ret = init_phys_status_page(ring); if (ret) return ret; } @@ -1893,7 +1901,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) } if (!I915_NEED_GFX_HWS(dev)) { - ret = init_phys_hws_pga(ring); + ret = init_phys_status_page(ring); if (ret) return ret; } |