summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_gem_execbuffer.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-11-27 20:22:52 +0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-11-29 14:43:52 +0400
commit9d7730914f4cd496e356acfab95b41075aa8eae8 (patch)
tree509e4ad7d5b7f32e3118fca228ce2f034403d634 /drivers/gpu/drm/i915/i915_gem_execbuffer.c
parent45e2b5f640b3766da3eda48f6c35f088155c06f3 (diff)
downloadlinux-9d7730914f4cd496e356acfab95b41075aa8eae8.tar.xz
drm/i915: Preallocate next seqno before touching the ring
Based on the work by Mika Kuoppala, we realised that we need to handle seqno wraparound prior to committing our changes to the ring. The most obvious point then is to grab the seqno inside intel_ring_begin(), and then to reuse that seqno for all ring operations until the next request. As intel_ring_begin() can fail, the callers must already be prepared to handle such failure and so we can safely add further checks. This patch looks like it should be split up into the interface changes and the tweaks to move seqno wrapping from the execbuffer into the core seqno increment. However, I found no easy way to break it into incremental steps without introducing further broken behaviour. v2: Mika found a silly mistake and a subtle error in the existing code; inside i915_gem_retire_requests() we were resetting the sync_seqno of the target ring based on the seqno from this ring - which are only related by the order of their allocation, not retirement. Hence we were applying the optimisation that the rings were synchronised too early, fortunately the only real casualty there is the handling of seqno wrapping. v3: Do not forget to reset the sync_seqno upon module reinitialisation, ala resume. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@intel.com> Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=863861 Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com> [v2] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_execbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c30
1 files changed, 6 insertions, 24 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 48e4317e72dc..ee8f97f0539e 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -713,8 +713,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
static void
i915_gem_execbuffer_move_to_active(struct list_head *objects,
- struct intel_ring_buffer *ring,
- u32 seqno)
+ struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *obj;
@@ -726,10 +725,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
obj->base.write_domain = obj->base.pending_write_domain;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
- i915_gem_object_move_to_active(obj, ring, seqno);
+ i915_gem_object_move_to_active(obj, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
- obj->last_write_seqno = seqno;
+ obj->last_write_seqno = intel_ring_get_seqno(ring);
if (obj->pin_count) /* check for potential scanout */
intel_mark_fb_busy(obj);
}
@@ -789,7 +788,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct intel_ring_buffer *ring;
u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 exec_start, exec_len;
- u32 seqno;
u32 mask;
u32 flags;
int ret, mode, i;
@@ -994,22 +992,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
goto err;
- seqno = i915_gem_next_request_seqno(ring);
- for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
- if (seqno < ring->sync_seqno[i]) {
- /* The GPU can not handle its semaphore value wrapping,
- * so every billion or so execbuffers, we need to stall
- * the GPU in order to reset the counters.
- */
- ret = i915_gpu_idle(dev);
- if (ret)
- goto err;
- i915_gem_retire_requests(dev);
-
- BUG_ON(ring->sync_seqno[i]);
- }
- }
-
ret = i915_switch_context(ring, file, ctx_id);
if (ret)
goto err;
@@ -1035,8 +1017,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
- trace_i915_gem_ring_dispatch(ring, seqno, flags);
-
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
exec_len = args->batch_len;
if (cliprects) {
@@ -1060,7 +1040,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
- i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
+ trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
+
+ i915_gem_execbuffer_move_to_active(&objects, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring);
err: