diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 102 |
1 files changed, 65 insertions, 37 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0a2070112b66..17c5097721e8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2002,7 +2002,6 @@ int i915_gem_fault(struct vm_fault *vmf) bool write = !!(vmf->flags & FAULT_FLAG_WRITE); struct i915_vma *vma; pgoff_t page_offset; - unsigned int flags; int ret; /* We don't use vmf->pgoff since that has the fake offset */ @@ -2038,27 +2037,34 @@ int i915_gem_fault(struct vm_fault *vmf) goto err_unlock; } - /* If the object is smaller than a couple of partial vma, it is - * not worth only creating a single partial vma - we may as well - * clear enough space for the full object. - */ - flags = PIN_MAPPABLE; - if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT) - flags |= PIN_NONBLOCK | PIN_NONFAULT; /* Now pin it into the GTT as needed */ - vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags); + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, + PIN_MAPPABLE | + PIN_NONBLOCK | + PIN_NONFAULT); if (IS_ERR(vma)) { /* Use a partial view if it is bigger than available space */ struct i915_ggtt_view view = compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); + unsigned int flags; - /* Userspace is now writing through an untracked VMA, abandon + flags = PIN_MAPPABLE; + if (view.type == I915_GGTT_VIEW_NORMAL) + flags |= PIN_NONBLOCK; /* avoid warnings for pinned */ + + /* + * Userspace is now writing through an untracked VMA, abandon * all hope that the hardware is able to track future writes. */ obj->frontbuffer_ggtt_origin = ORIGIN_CPU; - vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); + vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags); + if (IS_ERR(vma) && !view.type) { + flags = PIN_MAPPABLE; + view.type = I915_GGTT_VIEW_PARTIAL; + vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags); + } } if (IS_ERR(vma)) { ret = PTR_ERR(vma); @@ -2933,32 +2939,54 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, return 0; } +static void i915_gem_client_mark_guilty(struct drm_i915_file_private *file_priv, + const struct i915_gem_context *ctx) +{ + unsigned int score; + unsigned long prev_hang; + + if (i915_gem_context_is_banned(ctx)) + score = I915_CLIENT_SCORE_CONTEXT_BAN; + else + score = 0; + + prev_hang = xchg(&file_priv->hang_timestamp, jiffies); + if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES)) + score += I915_CLIENT_SCORE_HANG_FAST; + + if (score) { + atomic_add(score, &file_priv->ban_score); + + DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n", + ctx->name, score, + atomic_read(&file_priv->ban_score)); + } +} + static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx) { - bool banned; + unsigned int score; + bool banned, bannable; atomic_inc(&ctx->guilty_count); - banned = false; - if (i915_gem_context_is_bannable(ctx)) { - unsigned int score; + bannable = i915_gem_context_is_bannable(ctx); + score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score); + banned = score >= CONTEXT_SCORE_BAN_THRESHOLD; - score = atomic_add_return(CONTEXT_SCORE_GUILTY, - &ctx->ban_score); - banned = score >= CONTEXT_SCORE_BAN_THRESHOLD; + DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, ban %s\n", + ctx->name, atomic_read(&ctx->guilty_count), + score, yesno(banned && bannable)); - DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n", - ctx->name, score, yesno(banned)); - } - if (!banned) + /* Cool contexts don't accumulate client ban score */ + if (!bannable) return; - i915_gem_context_set_banned(ctx); - if (!IS_ERR_OR_NULL(ctx->file_priv)) { - atomic_inc(&ctx->file_priv->context_bans); - DRM_DEBUG_DRIVER("client %s has had %d context banned\n", - ctx->name, atomic_read(&ctx->file_priv->context_bans)); - } + if (banned) + i915_gem_context_set_banned(ctx); + + if (!IS_ERR_OR_NULL(ctx->file_priv)) + i915_gem_client_mark_guilty(ctx->file_priv, ctx); } static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx) @@ -2972,23 +3000,22 @@ i915_gem_find_active_request(struct intel_engine_cs *engine) struct i915_request *request, *active = NULL; unsigned long flags; - /* We are called by the error capture and reset at a random - * point in time. In particular, note that neither is crucially - * ordered with an interrupt. After a hang, the GPU is dead and we - * assume that no more writes can happen (we waited long enough for - * all writes that were in transaction to be flushed) - adding an + /* + * We are called by the error capture, reset and to dump engine + * state at random points in time. In particular, note that neither is + * crucially ordered with an interrupt. After a hang, the GPU is dead + * and we assume that no more writes can happen (we waited long enough + * for all writes that were in transaction to be flushed) - adding an * extra delay for a recent interrupt is pointless. Hence, we do * not need an engine->irq_seqno_barrier() before the seqno reads. + * At all other times, we must assume the GPU is still running, but + * we only care about the snapshot of this moment. */ spin_lock_irqsave(&engine->timeline.lock, flags); list_for_each_entry(request, &engine->timeline.requests, link) { if (__i915_request_completed(request, request->global_seqno)) continue; - GEM_BUG_ON(request->engine != engine); - GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, - &request->fence.flags)); - active = request; break; } @@ -5737,6 +5764,7 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file) INIT_LIST_HEAD(&file_priv->mm.request_list); file_priv->bsd_engine = -1; + file_priv->hang_timestamp = jiffies; ret = i915_gem_context_open(i915, file); if (ret) |