diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2014-01-28 02:43:07 +0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2014-02-06 20:43:13 +0400 |
commit | 1f70999f9052f5a1b0ce1a55aff3808f2ec9fe42 (patch) | |
tree | 180e47976121d75fb9a919b39cea60bd444cf377 /drivers/gpu/drm/i915/intel_ringbuffer.c | |
parent | 011cf577b2531dfbd2254bd9ec147ad71471abaf (diff) | |
download | linux-1f70999f9052f5a1b0ce1a55aff3808f2ec9fe42.tar.xz |
drm/i915: Prevent recursion by retiring requests when the ring is full
As the VM do not track activity of objects and instead use a large
hammer to forcibly idle and evict all of their associated objects when
one is released, it is possible for that to cause a recursion when we
need to wait for free space on a ring and call retire requests.
(intel_ring_begin -> intel_ring_wait_request ->
i915_gem_retire_requests_ring -> i915_gem_context_free ->
i915_gem_evict_vm -> i915_gpu_idle -> intel_ring_begin etc)
In order to remove the requirement for calling retire-requests from
intel_ring_wait_request, we have to inline a couple of steps from
retiring requests, notably we have to record the position of the request
we wait for and use that to update the available ring space.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 25 |
1 files changed, 5 insertions, 20 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index d897a19f887f..ba686d75ff32 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1430,28 +1430,16 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) cleanup_status_page(ring); } -static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) -{ - int ret; - - ret = i915_wait_seqno(ring, seqno); - if (!ret) - i915_gem_retire_requests_ring(ring); - - return ret; -} - static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) { struct drm_i915_gem_request *request; - u32 seqno = 0; + u32 seqno = 0, tail; int ret; - i915_gem_retire_requests_ring(ring); - if (ring->last_retired_head != -1) { ring->head = ring->last_retired_head; ring->last_retired_head = -1; + ring->space = ring_space(ring); if (ring->space >= n) return 0; @@ -1468,6 +1456,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) space += ring->size; if (space >= n) { seqno = request->seqno; + tail = request->tail; break; } @@ -1482,15 +1471,11 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) if (seqno == 0) return -ENOSPC; - ret = intel_ring_wait_seqno(ring, seqno); + ret = i915_wait_seqno(ring, seqno); if (ret) return ret; - if (WARN_ON(ring->last_retired_head == -1)) - return -ENOSPC; - - ring->head = ring->last_retired_head; - ring->last_retired_head = -1; + ring->head = tail; ring->space = ring_space(ring); if (WARN_ON(ring->space < n)) return -ENOSPC; |