diff options
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/i915/i915_request.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.h | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/mock_engine.c | 2 |
4 files changed, 7 insertions, 35 deletions
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 2cd1f51963f3..c8603a26606e 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -521,10 +521,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) reserve_gt(i915); - ret = intel_ring_wait_for_space(ce->ring, MIN_SPACE_FOR_ADD_REQUEST); - if (ret) - goto err_unreserve; - /* Move our oldest request to the slab-cache (if not in use!) */ rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link); if (!list_is_last(&rq->ring_link, &ce->ring->request_list) && @@ -616,9 +612,13 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) * i915_request_add() call can't fail. Note that the reserve may need * to be redone if the request is not actually submitted straight * away, e.g. because a GPU scheduler has deferred it. + * + * Note that due to how we add reserved_space to intel_ring_begin() + * we need to double our request to ensure that if we need to wrap + * around inside i915_request_add() there is sufficient space at + * the beginning of the ring as well. */ - rq->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; - GEM_BUG_ON(rq->reserved_space < engine->emit_breadcrumb_sz); + rq->reserved_space = 2 * engine->emit_breadcrumb_sz * sizeof(u32); /* * Record the position of the start of the request so that @@ -860,8 +860,8 @@ void i915_request_add(struct i915_request *request) * should already have been reserved in the ring buffer. Let the ring * know that it is time to use that space up. */ + GEM_BUG_ON(request->reserved_space > request->ring->space); request->reserved_space = 0; - engine->emit_flush(request, EMIT_FLUSH); /* * Record the position of the start of the breadcrumb so that diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index fc1e29305951..d773f7dd32a9 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1904,22 +1904,6 @@ static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes) return 0; } -int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes) -{ - GEM_BUG_ON(bytes > ring->effective_size); - if (unlikely(bytes > ring->effective_size - ring->emit)) - bytes += ring->size - ring->emit; - - if (unlikely(bytes > ring->space)) { - int ret = wait_for_space(ring, bytes); - if (unlikely(ret)) - return ret; - } - - GEM_BUG_ON(ring->space < bytes); - return 0; -} - u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords) { struct intel_ring *ring = rq->ring; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 32606d795af3..99e2cb75d29a 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -754,7 +754,6 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv); int __must_check intel_ring_cacheline_align(struct i915_request *rq); -int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes); u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n); static inline void intel_ring_advance(struct i915_request *rq, u32 *cs) @@ -895,15 +894,6 @@ static inline bool intel_engine_has_started(struct intel_engine_cs *engine, void intel_engine_get_instdone(struct intel_engine_cs *engine, struct intel_instdone *instdone); -/* - * Arbitrary size for largest possible 'add request' sequence. The code paths - * are complex and variable. Empirical measurement shows that the worst case - * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However, - * we need to allocate double the largest single packet within that emission - * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW). - */ -#define MIN_SPACE_FOR_ADD_REQUEST 336 - static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine) { return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR; diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c index d0c44c18db42..50e1a0b1af7e 100644 --- a/drivers/gpu/drm/i915/selftests/mock_engine.c +++ b/drivers/gpu/drm/i915/selftests/mock_engine.c @@ -148,8 +148,6 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine) const unsigned long sz = PAGE_SIZE / 2; struct mock_ring *ring; - BUILD_BUG_ON(MIN_SPACE_FOR_ADD_REQUEST > sz); - ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL); if (!ring) return NULL; |