diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_context.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_context.c | 1097 |
1 files changed, 771 insertions, 326 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 280813a4bf82..dd728b26b5aa 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -88,12 +88,32 @@ #include <linux/log2.h> #include <drm/i915_drm.h> #include "i915_drv.h" +#include "i915_globals.h" #include "i915_trace.h" +#include "i915_user_extensions.h" #include "intel_lrc_reg.h" #include "intel_workarounds.h" +#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1 << 1) +#define I915_CONTEXT_PARAM_VM 0x9 + #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 +static struct i915_global_gem_context { + struct i915_global base; + struct kmem_cache *slab_luts; +} global; + +struct i915_lut_handle *i915_lut_handle_alloc(void) +{ + return kmem_cache_alloc(global.slab_luts, GFP_KERNEL); +} + +void i915_lut_handle_free(struct i915_lut_handle *lut) +{ + return kmem_cache_free(global.slab_luts, lut); +} + static void lut_close(struct i915_gem_context *ctx) { struct i915_lut_handle *lut, *ln; @@ -102,14 +122,17 @@ static void lut_close(struct i915_gem_context *ctx) list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) { list_del(&lut->obj_link); - kmem_cache_free(ctx->i915->luts, lut); + i915_lut_handle_free(lut); } + INIT_LIST_HEAD(&ctx->handles_list); rcu_read_lock(); radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { struct i915_vma *vma = rcu_dereference_raw(*slot); radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); + + vma->open_count--; __i915_gem_object_release_unless_active(vma->obj); } rcu_read_unlock(); @@ -206,25 +229,26 @@ static void release_hw_id(struct i915_gem_context *ctx) static void i915_gem_context_free(struct i915_gem_context *ctx) { - unsigned int n; + struct intel_context *it, *n; lockdep_assert_held(&ctx->i915->drm.struct_mutex); GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); + GEM_BUG_ON(!list_empty(&ctx->active_engines)); release_hw_id(ctx); i915_ppgtt_put(ctx->ppgtt); - for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { - struct intel_context *ce = &ctx->__engine[n]; + rbtree_postorder_for_each_entry_safe(it, n, &ctx->hw_contexts, node) + intel_context_put(it); - if (ce->ops) - ce->ops->destroy(ce); - } + if (ctx->timeline) + i915_timeline_put(ctx->timeline); kfree(ctx->name); put_pid(ctx->pid); list_del(&ctx->link); + mutex_destroy(&ctx->mutex); kfree_rcu(ctx, rcu); } @@ -291,8 +315,6 @@ static void context_close(struct i915_gem_context *ctx) * the ppgtt). */ lut_close(ctx); - if (ctx->ppgtt) - i915_ppgtt_close(&ctx->ppgtt->vm); ctx->file_priv = ERR_PTR(-EBADF); i915_gem_context_put(ctx); @@ -307,7 +329,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915, desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE; address_mode = INTEL_LEGACY_32B_CONTEXT; - if (ppgtt && i915_vm_is_48bit(&ppgtt->vm)) + if (ppgtt && i915_vm_is_4lvl(&ppgtt->vm)) address_mode = INTEL_LEGACY_64B_CONTEXT; desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT; @@ -322,134 +344,115 @@ static u32 default_desc_template(const struct drm_i915_private *i915, return desc; } -static void intel_context_retire(struct i915_active_request *active, - struct i915_request *rq) -{ - struct intel_context *ce = - container_of(active, typeof(*ce), active_tracker); - - intel_context_unpin(ce); -} - -void -intel_context_init(struct intel_context *ce, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine) -{ - ce->gem_context = ctx; - - INIT_LIST_HEAD(&ce->signal_link); - INIT_LIST_HEAD(&ce->signals); - - /* Use the whole device by default */ - ce->sseu = intel_device_default_sseu(ctx->i915); - - i915_active_request_init(&ce->active_tracker, - NULL, intel_context_retire); -} - static struct i915_gem_context * -__create_hw_context(struct drm_i915_private *dev_priv, - struct drm_i915_file_private *file_priv) +__create_context(struct drm_i915_private *dev_priv) { struct i915_gem_context *ctx; - unsigned int n; - int ret; + int i; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (ctx == NULL) + if (!ctx) return ERR_PTR(-ENOMEM); kref_init(&ctx->ref); list_add_tail(&ctx->link, &dev_priv->contexts.list); ctx->i915 = dev_priv; ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL); + INIT_LIST_HEAD(&ctx->active_engines); + mutex_init(&ctx->mutex); - for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) - intel_context_init(&ctx->__engine[n], ctx, dev_priv->engine[n]); + ctx->hw_contexts = RB_ROOT; + spin_lock_init(&ctx->hw_contexts_lock); INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); INIT_LIST_HEAD(&ctx->handles_list); INIT_LIST_HEAD(&ctx->hw_id_link); - /* Default context will never have a file_priv */ - ret = DEFAULT_CONTEXT_HANDLE; - if (file_priv) { - ret = idr_alloc(&file_priv->context_idr, ctx, - DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL); - if (ret < 0) - goto err_lut; - } - ctx->user_handle = ret; - - ctx->file_priv = file_priv; - if (file_priv) { - ctx->pid = get_task_pid(current, PIDTYPE_PID); - ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x", - current->comm, - pid_nr(ctx->pid), - ctx->user_handle); - if (!ctx->name) { - ret = -ENOMEM; - goto err_pid; - } - } - /* NB: Mark all slices as needing a remap so that when the context first * loads it will restore whatever remap state already exists. If there * is no remap info, it will be a NOP. */ ctx->remap_slice = ALL_L3_SLICES(dev_priv); i915_gem_context_set_bannable(ctx); + i915_gem_context_set_recoverable(ctx); + ctx->ring_size = 4 * PAGE_SIZE; ctx->desc_template = default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt); + for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) + ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; + return ctx; +} -err_pid: - put_pid(ctx->pid); - idr_remove(&file_priv->context_idr, ctx->user_handle); -err_lut: - context_close(ctx); - return ERR_PTR(ret); +static struct i915_hw_ppgtt * +__set_ppgtt(struct i915_gem_context *ctx, struct i915_hw_ppgtt *ppgtt) +{ + struct i915_hw_ppgtt *old = ctx->ppgtt; + + ctx->ppgtt = i915_ppgtt_get(ppgtt); + ctx->desc_template = default_desc_template(ctx->i915, ppgtt); + + return old; } -static void __destroy_hw_context(struct i915_gem_context *ctx, - struct drm_i915_file_private *file_priv) +static void __assign_ppgtt(struct i915_gem_context *ctx, + struct i915_hw_ppgtt *ppgtt) { - idr_remove(&file_priv->context_idr, ctx->user_handle); - context_close(ctx); + if (ppgtt == ctx->ppgtt) + return; + + ppgtt = __set_ppgtt(ctx, ppgtt); + if (ppgtt) + i915_ppgtt_put(ppgtt); } static struct i915_gem_context * -i915_gem_create_context(struct drm_i915_private *dev_priv, - struct drm_i915_file_private *file_priv) +i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags) { struct i915_gem_context *ctx; lockdep_assert_held(&dev_priv->drm.struct_mutex); + BUILD_BUG_ON(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE & + ~I915_CONTEXT_CREATE_FLAGS_UNKNOWN); + if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE && + !HAS_EXECLISTS(dev_priv)) + return ERR_PTR(-EINVAL); + /* Reap the most stale context */ contexts_free_first(dev_priv); - ctx = __create_hw_context(dev_priv, file_priv); + ctx = __create_context(dev_priv); if (IS_ERR(ctx)) return ctx; if (HAS_FULL_PPGTT(dev_priv)) { struct i915_hw_ppgtt *ppgtt; - ppgtt = i915_ppgtt_create(dev_priv, file_priv); + ppgtt = i915_ppgtt_create(dev_priv); if (IS_ERR(ppgtt)) { DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", PTR_ERR(ppgtt)); - __destroy_hw_context(ctx, file_priv); + context_close(ctx); return ERR_CAST(ppgtt); } - ctx->ppgtt = ppgtt; - ctx->desc_template = default_desc_template(dev_priv, ppgtt); + __assign_ppgtt(ctx, ppgtt); + i915_ppgtt_put(ppgtt); + } + + if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { + struct i915_timeline *timeline; + + timeline = i915_timeline_create(dev_priv, NULL); + if (IS_ERR(timeline)) { + context_close(ctx); + return ERR_CAST(timeline); + } + + ctx->timeline = timeline; } trace_i915_context_create(ctx); @@ -480,10 +483,17 @@ i915_gem_context_create_gvt(struct drm_device *dev) if (ret) return ERR_PTR(ret); - ctx = i915_gem_create_context(to_i915(dev), NULL); + ctx = i915_gem_create_context(to_i915(dev), 0); if (IS_ERR(ctx)) goto out; + ret = i915_gem_context_pin_hw_id(ctx); + if (ret) { + context_close(ctx); + ctx = ERR_PTR(ret); + goto out; + } + ctx->file_priv = ERR_PTR(-EBADF); i915_gem_context_set_closed(ctx); /* not user accessible */ i915_gem_context_clear_bannable(ctx); @@ -516,7 +526,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio) struct i915_gem_context *ctx; int err; - ctx = i915_gem_create_context(i915, NULL); + ctx = i915_gem_create_context(i915, 0); if (IS_ERR(ctx)) return ctx; @@ -552,7 +562,7 @@ static void init_contexts(struct drm_i915_private *i915) static bool needs_preempt_context(struct drm_i915_private *i915) { - return HAS_LOGICAL_RING_PREEMPTION(i915); + return HAS_EXECLISTS(i915); } int i915_gem_contexts_init(struct drm_i915_private *dev_priv) @@ -563,7 +573,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) GEM_BUG_ON(dev_priv->kernel_context); GEM_BUG_ON(dev_priv->preempt_context); - intel_engine_init_ctx_wa(dev_priv->engine[RCS]); + intel_engine_init_ctx_wa(dev_priv->engine[RCS0]); init_contexts(dev_priv); /* lowest priority; idle task */ @@ -624,31 +634,87 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915) static int context_idr_cleanup(int id, void *p, void *data) { - struct i915_gem_context *ctx = p; + context_close(p); + return 0; +} - context_close(ctx); +static int vm_idr_cleanup(int id, void *p, void *data) +{ + i915_ppgtt_put(p); return 0; } +static int gem_context_register(struct i915_gem_context *ctx, + struct drm_i915_file_private *fpriv) +{ + int ret; + + ctx->file_priv = fpriv; + if (ctx->ppgtt) + ctx->ppgtt->vm.file = fpriv; + + ctx->pid = get_task_pid(current, PIDTYPE_PID); + ctx->name = kasprintf(GFP_KERNEL, "%s[%d]", + current->comm, pid_nr(ctx->pid)); + if (!ctx->name) { + ret = -ENOMEM; + goto err_pid; + } + + /* And finally expose ourselves to userspace via the idr */ + mutex_lock(&fpriv->context_idr_lock); + ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL); + mutex_unlock(&fpriv->context_idr_lock); + if (ret >= 0) + goto out; + + kfree(fetch_and_zero(&ctx->name)); +err_pid: + put_pid(fetch_and_zero(&ctx->pid)); +out: + return ret; +} + int i915_gem_context_open(struct drm_i915_private *i915, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; struct i915_gem_context *ctx; + int err; + + mutex_init(&file_priv->context_idr_lock); + mutex_init(&file_priv->vm_idr_lock); idr_init(&file_priv->context_idr); + idr_init_base(&file_priv->vm_idr, 1); mutex_lock(&i915->drm.struct_mutex); - ctx = i915_gem_create_context(i915, file_priv); + ctx = i915_gem_create_context(i915, 0); mutex_unlock(&i915->drm.struct_mutex); if (IS_ERR(ctx)) { - idr_destroy(&file_priv->context_idr); - return PTR_ERR(ctx); + err = PTR_ERR(ctx); + goto err; } + err = gem_context_register(ctx, file_priv); + if (err < 0) + goto err_ctx; + GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); + GEM_BUG_ON(err > 0); return 0; + +err_ctx: + mutex_lock(&i915->drm.struct_mutex); + context_close(ctx); + mutex_unlock(&i915->drm.struct_mutex); +err: + idr_destroy(&file_priv->vm_idr); + idr_destroy(&file_priv->context_idr); + mutex_destroy(&file_priv->vm_idr_lock); + mutex_destroy(&file_priv->context_idr_lock); + return err; } void i915_gem_context_close(struct drm_file *file) @@ -659,6 +725,100 @@ void i915_gem_context_close(struct drm_file *file) idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); idr_destroy(&file_priv->context_idr); + mutex_destroy(&file_priv->context_idr_lock); + + idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL); + idr_destroy(&file_priv->vm_idr); + mutex_destroy(&file_priv->vm_idr_lock); +} + +int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_private *i915 = to_i915(dev); + struct drm_i915_gem_vm_control *args = data; + struct drm_i915_file_private *file_priv = file->driver_priv; + struct i915_hw_ppgtt *ppgtt; + int err; + + if (!HAS_FULL_PPGTT(i915)) + return -ENODEV; + + if (args->flags) + return -EINVAL; + + ppgtt = i915_ppgtt_create(i915); + if (IS_ERR(ppgtt)) + return PTR_ERR(ppgtt); + + ppgtt->vm.file = file_priv; + + if (args->extensions) { + err = i915_user_extensions(u64_to_user_ptr(args->extensions), + NULL, 0, + ppgtt); + if (err) + goto err_put; + } + + err = mutex_lock_interruptible(&file_priv->vm_idr_lock); + if (err) + goto err_put; + + err = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL); + if (err < 0) + goto err_unlock; + + GEM_BUG_ON(err == 0); /* reserved for default/unassigned ppgtt */ + ppgtt->user_handle = err; + + mutex_unlock(&file_priv->vm_idr_lock); + + args->vm_id = err; + return 0; + +err_unlock: + mutex_unlock(&file_priv->vm_idr_lock); +err_put: + i915_ppgtt_put(ppgtt); + return err; +} + +int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_file_private *file_priv = file->driver_priv; + struct drm_i915_gem_vm_control *args = data; + struct i915_hw_ppgtt *ppgtt; + int err; + u32 id; + + if (args->flags) + return -EINVAL; + + if (args->extensions) + return -EINVAL; + + id = args->vm_id; + if (!id) + return -ENOENT; + + err = mutex_lock_interruptible(&file_priv->vm_idr_lock); + if (err) + return err; + + ppgtt = idr_remove(&file_priv->vm_idr, id); + if (ppgtt) { + GEM_BUG_ON(ppgtt->user_handle != id); + ppgtt->user_handle = 0; + } + + mutex_unlock(&file_priv->vm_idr_lock); + if (!ppgtt) + return -ENOENT; + + i915_ppgtt_put(ppgtt); + return 0; } static struct i915_request * @@ -671,10 +831,9 @@ last_request_on_engine(struct i915_timeline *timeline, rq = i915_active_request_raw(&timeline->last_request, &engine->i915->drm.struct_mutex); - if (rq && rq->engine == engine) { - GEM_TRACE("last request for %s on engine %s: %llx:%llu\n", - timeline->name, engine->name, - rq->fence.context, rq->fence.seqno); + if (rq && rq->engine->mask & engine->mask) { + GEM_TRACE("last request on engine %s: %llx:%llu\n", + engine->name, rq->fence.context, rq->fence.seqno); GEM_BUG_ON(rq->timeline != timeline); return rq; } @@ -682,81 +841,104 @@ last_request_on_engine(struct i915_timeline *timeline, return NULL; } -static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine) +struct context_barrier_task { + struct i915_active base; + void (*task)(void *data); + void *data; +}; + +static void cb_retire(struct i915_active *base) +{ + struct context_barrier_task *cb = container_of(base, typeof(*cb), base); + + if (cb->task) + cb->task(cb->data); + + i915_active_fini(&cb->base); + kfree(cb); +} + +I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault); +static int context_barrier_task(struct i915_gem_context *ctx, + intel_engine_mask_t engines, + int (*emit)(struct i915_request *rq, void *data), + void (*task)(void *data), + void *data) { - struct drm_i915_private *i915 = engine->i915; - const struct intel_context * const ce = - to_intel_context(i915->kernel_context, engine); - struct i915_timeline *barrier = ce->ring->timeline; - struct intel_ring *ring; - bool any_active = false; + struct drm_i915_private *i915 = ctx->i915; + struct context_barrier_task *cb; + struct intel_context *ce, *next; + intel_wakeref_t wakeref; + int err = 0; lockdep_assert_held(&i915->drm.struct_mutex); - list_for_each_entry(ring, &i915->gt.active_rings, active_link) { - struct i915_request *rq; + GEM_BUG_ON(!task); - rq = last_request_on_engine(ring->timeline, engine); - if (!rq) - continue; + cb = kmalloc(sizeof(*cb), GFP_KERNEL); + if (!cb) + return -ENOMEM; - any_active = true; + i915_active_init(i915, &cb->base, cb_retire); + i915_active_acquire(&cb->base); - if (rq->hw_context == ce) + wakeref = intel_runtime_pm_get(i915); + rbtree_postorder_for_each_entry_safe(ce, next, &ctx->hw_contexts, node) { + struct intel_engine_cs *engine = ce->engine; + struct i915_request *rq; + + if (!(engine->mask & engines)) continue; - /* - * Was this request submitted after the previous - * switch-to-kernel-context? - */ - if (!i915_timeline_sync_is_later(barrier, &rq->fence)) { - GEM_TRACE("%s needs barrier for %llx:%lld\n", - ring->timeline->name, - rq->fence.context, - rq->fence.seqno); - return false; + if (I915_SELFTEST_ONLY(context_barrier_inject_fault & + engine->mask)) { + err = -ENXIO; + break; + } + + rq = i915_request_alloc(engine, ctx); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; } - GEM_TRACE("%s has barrier after %llx:%lld\n", - ring->timeline->name, - rq->fence.context, - rq->fence.seqno); + err = 0; + if (emit) + err = emit(rq, data); + if (err == 0) + err = i915_active_ref(&cb->base, rq->fence.context, rq); + + i915_request_add(rq); + if (err) + break; } + intel_runtime_pm_put(i915, wakeref); - /* - * If any other timeline was still active and behind the last barrier, - * then our last switch-to-kernel-context must still be queued and - * will run last (leaving the engine in the kernel context when it - * eventually idles). - */ - if (any_active) - return true; + cb->task = err ? NULL : task; /* caller needs to unwind instead */ + cb->data = data; - /* The engine is idle; check that it is idling in the kernel context. */ - return engine->last_retired_context == ce; + i915_active_release(&cb->base); + + return err; } -int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915) +int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915, + intel_engine_mask_t mask) { struct intel_engine_cs *engine; - enum intel_engine_id id; GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake)); lockdep_assert_held(&i915->drm.struct_mutex); GEM_BUG_ON(!i915->kernel_context); - i915_retire_requests(i915); + /* Inoperable, so presume the GPU is safely pointing into the void! */ + if (i915_terminally_wedged(i915)) + return 0; - for_each_engine(engine, i915, id) { + for_each_engine_masked(engine, i915, mask, mask) { struct intel_ring *ring; struct i915_request *rq; - GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine)); - if (engine_has_kernel_context_barrier(engine)) - continue; - - GEM_TRACE("emit barrier on %s\n", engine->name); - rq = i915_request_alloc(engine, i915->kernel_context); if (IS_ERR(rq)) return PTR_ERR(rq); @@ -779,7 +961,6 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915) i915_sw_fence_await_sw_fence_gfp(&rq->submit, &prev->submit, I915_FENCE_GFP); - i915_timeline_sync_set(rq->timeline, &prev->fence); } i915_request_add(rq); @@ -788,183 +969,173 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915) return 0; } -static bool client_is_banned(struct drm_i915_file_private *file_priv) +static int get_ppgtt(struct drm_i915_file_private *file_priv, + struct i915_gem_context *ctx, + struct drm_i915_gem_context_param *args) { - return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; -} - -int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_i915_gem_context_create *args = data; - struct drm_i915_file_private *file_priv = file->driver_priv; - struct i915_gem_context *ctx; + struct i915_hw_ppgtt *ppgtt; int ret; - if (!DRIVER_CAPS(dev_priv)->has_logical_contexts) + return -EINVAL; /* nothing to see here; please move along */ + + if (!ctx->ppgtt) return -ENODEV; - if (args->pad != 0) - return -EINVAL; + /* XXX rcu acquire? */ + ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex); + if (ret) + return ret; - if (client_is_banned(file_priv)) { - DRM_DEBUG("client %s[%d] banned from creating ctx\n", - current->comm, - pid_nr(get_task_pid(current, PIDTYPE_PID))); + ppgtt = i915_ppgtt_get(ctx->ppgtt); + mutex_unlock(&ctx->i915->drm.struct_mutex); - return -EIO; + ret = mutex_lock_interruptible(&file_priv->vm_idr_lock); + if (ret) + goto err_put; + + if (!ppgtt->user_handle) { + ret = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL); + GEM_BUG_ON(!ret); + if (ret < 0) + goto err_unlock; + + ppgtt->user_handle = ret; + i915_ppgtt_get(ppgtt); } - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; + args->size = 0; + args->value = ppgtt->user_handle; - ctx = i915_gem_create_context(dev_priv, file_priv); - mutex_unlock(&dev->struct_mutex); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); + ret = 0; +err_unlock: + mutex_unlock(&file_priv->vm_idr_lock); +err_put: + i915_ppgtt_put(ppgtt); + return ret; +} - GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); +static void set_ppgtt_barrier(void *data) +{ + struct i915_hw_ppgtt *old = data; - args->ctx_id = ctx->user_handle; - DRM_DEBUG("HW context %d created\n", args->ctx_id); + if (INTEL_GEN(old->vm.i915) < 8) + gen6_ppgtt_unpin_all(old); - return 0; + i915_ppgtt_put(old); } -int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) +static int emit_ppgtt_update(struct i915_request *rq, void *data) { - struct drm_i915_gem_context_destroy *args = data; - struct drm_i915_file_private *file_priv = file->driver_priv; - struct i915_gem_context *ctx; - int ret; + struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt; + struct intel_engine_cs *engine = rq->engine; + u32 base = engine->mmio_base; + u32 *cs; + int i; - if (args->pad != 0) - return -EINVAL; + if (i915_vm_is_4lvl(&ppgtt->vm)) { + const dma_addr_t pd_daddr = px_dma(&ppgtt->pml4); - if (args->ctx_id == DEFAULT_CONTEXT_HANDLE) - return -ENOENT; + cs = intel_ring_begin(rq, 6); + if (IS_ERR(cs)) + return PTR_ERR(cs); - ctx = i915_gem_context_lookup(file_priv, args->ctx_id); - if (!ctx) - return -ENOENT; + *cs++ = MI_LOAD_REGISTER_IMM(2); - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - goto out; + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0)); + *cs++ = upper_32_bits(pd_daddr); + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0)); + *cs++ = lower_32_bits(pd_daddr); - __destroy_hw_context(ctx, file_priv); - mutex_unlock(&dev->struct_mutex); + *cs++ = MI_NOOP; + intel_ring_advance(rq, cs); + } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) { + cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES); + for (i = GEN8_3LVL_PDPES; i--; ) { + const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); + + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i)); + *cs++ = upper_32_bits(pd_daddr); + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i)); + *cs++ = lower_32_bits(pd_daddr); + } + *cs++ = MI_NOOP; + intel_ring_advance(rq, cs); + } else { + /* ppGTT is not part of the legacy context image */ + gen6_ppgtt_pin(ppgtt); + } -out: - i915_gem_context_put(ctx); return 0; } -static int get_sseu(struct i915_gem_context *ctx, - struct drm_i915_gem_context_param *args) +static int set_ppgtt(struct drm_i915_file_private *file_priv, + struct i915_gem_context *ctx, + struct drm_i915_gem_context_param *args) { - struct drm_i915_gem_context_param_sseu user_sseu; - struct intel_engine_cs *engine; - struct intel_context *ce; - int ret; - - if (args->size == 0) - goto out; - else if (args->size < sizeof(user_sseu)) - return -EINVAL; - - if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), - sizeof(user_sseu))) - return -EFAULT; + struct i915_hw_ppgtt *ppgtt, *old; + int err; - if (user_sseu.flags || user_sseu.rsvd) - return -EINVAL; + return -EINVAL; /* nothing to see here; please move along */ - engine = intel_engine_lookup_user(ctx->i915, - user_sseu.engine_class, - user_sseu.engine_instance); - if (!engine) + if (args->size) return -EINVAL; - /* Only use for mutex here is to serialize get_param and set_param. */ - ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex); - if (ret) - return ret; - - ce = to_intel_context(ctx, engine); - - user_sseu.slice_mask = ce->sseu.slice_mask; - user_sseu.subslice_mask = ce->sseu.subslice_mask; - user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice; - user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice; + if (!ctx->ppgtt) + return -ENODEV; - mutex_unlock(&ctx->i915->drm.struct_mutex); + if (upper_32_bits(args->value)) + return -ENOENT; - if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu, - sizeof(user_sseu))) - return -EFAULT; + err = mutex_lock_interruptible(&file_priv->vm_idr_lock); + if (err) + return err; -out: - args->size = sizeof(user_sseu); + ppgtt = idr_find(&file_priv->vm_idr, args->value); + if (ppgtt) { + GEM_BUG_ON(ppgtt->user_handle != args->value); + i915_ppgtt_get(ppgtt); + } + mutex_unlock(&file_priv->vm_idr_lock); + if (!ppgtt) + return -ENOENT; - return 0; -} + err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex); + if (err) + goto out; -int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) -{ - struct drm_i915_file_private *file_priv = file->driver_priv; - struct drm_i915_gem_context_param *args = data; - struct i915_gem_context *ctx; - int ret = 0; + if (ppgtt == ctx->ppgtt) + goto unlock; - ctx = i915_gem_context_lookup(file_priv, args->ctx_id); - if (!ctx) - return -ENOENT; + /* Teardown the existing obj:vma cache, it will have to be rebuilt. */ + lut_close(ctx); - switch (args->param) { - case I915_CONTEXT_PARAM_BAN_PERIOD: - ret = -EINVAL; - break; - case I915_CONTEXT_PARAM_NO_ZEROMAP: - args->size = 0; - args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); - break; - case I915_CONTEXT_PARAM_GTT_SIZE: - args->size = 0; + old = __set_ppgtt(ctx, ppgtt); - if (ctx->ppgtt) - args->value = ctx->ppgtt->vm.total; - else if (to_i915(dev)->mm.aliasing_ppgtt) - args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total; - else - args->value = to_i915(dev)->ggtt.vm.total; - break; - case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: - args->size = 0; - args->value = i915_gem_context_no_error_capture(ctx); - break; - case I915_CONTEXT_PARAM_BANNABLE: - args->size = 0; - args->value = i915_gem_context_is_bannable(ctx); - break; - case I915_CONTEXT_PARAM_PRIORITY: - args->size = 0; - args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT; - break; - case I915_CONTEXT_PARAM_SSEU: - ret = get_sseu(ctx, args); - break; - default: - ret = -EINVAL; - break; + /* + * We need to flush any requests using the current ppgtt before + * we release it as the requests do not hold a reference themselves, + * only indirectly through the context. + */ + err = context_barrier_task(ctx, ALL_ENGINES, + emit_ppgtt_update, + set_ppgtt_barrier, + old); + if (err) { + ctx->ppgtt = old; + ctx->desc_template = default_desc_template(ctx->i915, old); + i915_ppgtt_put(ppgtt); } - i915_gem_context_put(ctx); - return ret; +unlock: + mutex_unlock(&ctx->i915->drm.struct_mutex); + +out: + i915_ppgtt_put(ppgtt); + return err; } static int gen8_emit_rpcs_config(struct i915_request *rq, @@ -993,39 +1164,35 @@ static int gen8_emit_rpcs_config(struct i915_request *rq, } static int -gen8_modify_rpcs_gpu(struct intel_context *ce, - struct intel_engine_cs *engine, - struct intel_sseu sseu) +gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu) { - struct drm_i915_private *i915 = engine->i915; - struct i915_request *rq, *prev; + struct drm_i915_private *i915 = ce->engine->i915; + struct i915_request *rq; intel_wakeref_t wakeref; int ret; - GEM_BUG_ON(!ce->pin_count); + lockdep_assert_held(&ce->pin_mutex); - lockdep_assert_held(&i915->drm.struct_mutex); + /* + * If the context is not idle, we have to submit an ordered request to + * modify its context image via the kernel context (writing to our own + * image, or into the registers directory, does not stick). Pristine + * and idle contexts will be configured on pinning. + */ + if (!intel_context_is_pinned(ce)) + return 0; /* Submitting requests etc needs the hw awake. */ wakeref = intel_runtime_pm_get(i915); - rq = i915_request_alloc(engine, i915->kernel_context); + rq = i915_request_alloc(ce->engine, i915->kernel_context); if (IS_ERR(rq)) { ret = PTR_ERR(rq); goto out_put; } /* Queue this switch after all other activity by this context. */ - prev = i915_active_request_raw(&ce->ring->timeline->last_request, - &i915->drm.struct_mutex); - if (prev && !i915_request_completed(prev)) { - ret = i915_request_await_dma_fence(rq, &prev->fence); - if (ret < 0) - goto out_add; - } - - /* Order all following requests to be after. */ - ret = i915_timeline_set_barrier(ce->ring->timeline, rq); + ret = i915_active_request_set(&ce->ring->timeline->last_request, rq); if (ret) goto out_add; @@ -1057,27 +1224,26 @@ __i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx, struct intel_engine_cs *engine, struct intel_sseu sseu) { - struct intel_context *ce = to_intel_context(ctx, engine); + struct intel_context *ce; int ret = 0; GEM_BUG_ON(INTEL_GEN(ctx->i915) < 8); - GEM_BUG_ON(engine->id != RCS); + GEM_BUG_ON(engine->id != RCS0); + + ce = intel_context_pin_lock(ctx, engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); /* Nothing to do if unmodified. */ if (!memcmp(&ce->sseu, &sseu, sizeof(sseu))) - return 0; - - /* - * If context is not idle we have to submit an ordered request to modify - * its context image via the kernel context. Pristine and idle contexts - * will be configured on pinning. - */ - if (ce->pin_count) - ret = gen8_modify_rpcs_gpu(ce, engine, sseu); + goto unlock; + ret = gen8_modify_rpcs(ce, sseu); if (!ret) ce->sseu = sseu; +unlock: + intel_context_pin_unlock(ce); return ret; } @@ -1220,8 +1386,8 @@ static int set_sseu(struct i915_gem_context *ctx, return -EINVAL; engine = intel_engine_lookup_user(i915, - user_sseu.engine_class, - user_sseu.engine_instance); + user_sseu.engine.engine_class, + user_sseu.engine.engine_instance); if (!engine) return -EINVAL; @@ -1242,22 +1408,13 @@ static int set_sseu(struct i915_gem_context *ctx, return 0; } -int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) +static int ctx_setparam(struct drm_i915_file_private *fpriv, + struct i915_gem_context *ctx, + struct drm_i915_gem_context_param *args) { - struct drm_i915_file_private *file_priv = file->driver_priv; - struct drm_i915_gem_context_param *args = data; - struct i915_gem_context *ctx; int ret = 0; - ctx = i915_gem_context_lookup(file_priv, args->ctx_id); - if (!ctx) - return -ENOENT; - switch (args->param) { - case I915_CONTEXT_PARAM_BAN_PERIOD: - ret = -EINVAL; - break; case I915_CONTEXT_PARAM_NO_ZEROMAP: if (args->size) ret = -EINVAL; @@ -1266,6 +1423,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, else clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); break; + case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: if (args->size) ret = -EINVAL; @@ -1274,6 +1432,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, else i915_gem_context_clear_no_error_capture(ctx); break; + case I915_CONTEXT_PARAM_BANNABLE: if (args->size) ret = -EINVAL; @@ -1285,13 +1444,22 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, i915_gem_context_clear_bannable(ctx); break; + case I915_CONTEXT_PARAM_RECOVERABLE: + if (args->size) + ret = -EINVAL; + else if (args->value) + i915_gem_context_set_recoverable(ctx); + else + i915_gem_context_clear_recoverable(ctx); + break; + case I915_CONTEXT_PARAM_PRIORITY: { s64 priority = args->value; if (args->size) ret = -EINVAL; - else if (!(to_i915(dev)->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) + else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) ret = -ENODEV; else if (priority > I915_CONTEXT_MAX_USER_PRIORITY || priority < I915_CONTEXT_MIN_USER_PRIORITY) @@ -1304,14 +1472,266 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, I915_USER_PRIORITY(priority); } break; + case I915_CONTEXT_PARAM_SSEU: ret = set_sseu(ctx, args); break; + + case I915_CONTEXT_PARAM_VM: + ret = set_ppgtt(fpriv, ctx, args); + break; + + case I915_CONTEXT_PARAM_BAN_PERIOD: default: ret = -EINVAL; break; } + return ret; +} + +struct create_ext { + struct i915_gem_context *ctx; + struct drm_i915_file_private *fpriv; +}; + +static int create_setparam(struct i915_user_extension __user *ext, void *data) +{ + struct drm_i915_gem_context_create_ext_setparam local; + const struct create_ext *arg = data; + + if (copy_from_user(&local, ext, sizeof(local))) + return -EFAULT; + + if (local.param.ctx_id) + return -EINVAL; + + return ctx_setparam(arg->fpriv, arg->ctx, &local.param); +} + +static const i915_user_extension_fn create_extensions[] = { + [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam, +}; + +static bool client_is_banned(struct drm_i915_file_private *file_priv) +{ + return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; +} + +int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_private *i915 = to_i915(dev); + struct drm_i915_gem_context_create_ext *args = data; + struct create_ext ext_data; + int ret; + + if (!DRIVER_CAPS(i915)->has_logical_contexts) + return -ENODEV; + + if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN) + return -EINVAL; + + ret = i915_terminally_wedged(i915); + if (ret) + return ret; + + ext_data.fpriv = file->driver_priv; + if (client_is_banned(ext_data.fpriv)) { + DRM_DEBUG("client %s[%d] banned from creating ctx\n", + current->comm, + pid_nr(get_task_pid(current, PIDTYPE_PID))); + return -EIO; + } + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + ext_data.ctx = i915_gem_create_context(i915, args->flags); + mutex_unlock(&dev->struct_mutex); + if (IS_ERR(ext_data.ctx)) + return PTR_ERR(ext_data.ctx); + + if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) { + ret = i915_user_extensions(u64_to_user_ptr(args->extensions), + create_extensions, + ARRAY_SIZE(create_extensions), + &ext_data); + if (ret) + goto err_ctx; + } + + ret = gem_context_register(ext_data.ctx, ext_data.fpriv); + if (ret < 0) + goto err_ctx; + + args->ctx_id = ret; + DRM_DEBUG("HW context %d created\n", args->ctx_id); + + return 0; + +err_ctx: + mutex_lock(&dev->struct_mutex); + context_close(ext_data.ctx); + mutex_unlock(&dev->struct_mutex); + return ret; +} + +int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_context_destroy *args = data; + struct drm_i915_file_private *file_priv = file->driver_priv; + struct i915_gem_context *ctx; + + if (args->pad != 0) + return -EINVAL; + + if (!args->ctx_id) + return -ENOENT; + + if (mutex_lock_interruptible(&file_priv->context_idr_lock)) + return -EINTR; + + ctx = idr_remove(&file_priv->context_idr, args->ctx_id); + mutex_unlock(&file_priv->context_idr_lock); + if (!ctx) + return -ENOENT; + + mutex_lock(&dev->struct_mutex); + context_close(ctx); + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +static int get_sseu(struct i915_gem_context *ctx, + struct drm_i915_gem_context_param *args) +{ + struct drm_i915_gem_context_param_sseu user_sseu; + struct intel_engine_cs *engine; + struct intel_context *ce; + + if (args->size == 0) + goto out; + else if (args->size < sizeof(user_sseu)) + return -EINVAL; + + if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), + sizeof(user_sseu))) + return -EFAULT; + + if (user_sseu.flags || user_sseu.rsvd) + return -EINVAL; + + engine = intel_engine_lookup_user(ctx->i915, + user_sseu.engine.engine_class, + user_sseu.engine.engine_instance); + if (!engine) + return -EINVAL; + + ce = intel_context_pin_lock(ctx, engine); /* serialises with set_sseu */ + if (IS_ERR(ce)) + return PTR_ERR(ce); + + user_sseu.slice_mask = ce->sseu.slice_mask; + user_sseu.subslice_mask = ce->sseu.subslice_mask; + user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice; + user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice; + + intel_context_pin_unlock(ce); + + if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu, + sizeof(user_sseu))) + return -EFAULT; + +out: + args->size = sizeof(user_sseu); + + return 0; +} + +int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_file_private *file_priv = file->driver_priv; + struct drm_i915_gem_context_param *args = data; + struct i915_gem_context *ctx; + int ret = 0; + + ctx = i915_gem_context_lookup(file_priv, args->ctx_id); + if (!ctx) + return -ENOENT; + + switch (args->param) { + case I915_CONTEXT_PARAM_NO_ZEROMAP: + args->size = 0; + args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); + break; + + case I915_CONTEXT_PARAM_GTT_SIZE: + args->size = 0; + if (ctx->ppgtt) + args->value = ctx->ppgtt->vm.total; + else if (to_i915(dev)->mm.aliasing_ppgtt) + args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total; + else + args->value = to_i915(dev)->ggtt.vm.total; + break; + + case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: + args->size = 0; + args->value = i915_gem_context_no_error_capture(ctx); + break; + + case I915_CONTEXT_PARAM_BANNABLE: + args->size = 0; + args->value = i915_gem_context_is_bannable(ctx); + break; + + case I915_CONTEXT_PARAM_RECOVERABLE: + args->size = 0; + args->value = i915_gem_context_is_recoverable(ctx); + break; + + case I915_CONTEXT_PARAM_PRIORITY: + args->size = 0; + args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT; + break; + + case I915_CONTEXT_PARAM_SSEU: + ret = get_sseu(ctx, args); + break; + + case I915_CONTEXT_PARAM_VM: + ret = get_ppgtt(file_priv, ctx, args); + break; + + case I915_CONTEXT_PARAM_BAN_PERIOD: + default: + ret = -EINVAL; + break; + } + + i915_gem_context_put(ctx); + return ret; +} + +int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_file_private *file_priv = file->driver_priv; + struct drm_i915_gem_context_param *args = data; + struct i915_gem_context *ctx; + int ret; + + ctx = i915_gem_context_lookup(file_priv, args->ctx_id); + if (!ctx) + return -ENOENT; + + ret = ctx_setparam(file_priv, ctx, args); + i915_gem_context_put(ctx); return ret; } @@ -1385,3 +1805,28 @@ out_unlock: #include "selftests/mock_context.c" #include "selftests/i915_gem_context.c" #endif + +static void i915_global_gem_context_shrink(void) +{ + kmem_cache_shrink(global.slab_luts); +} + +static void i915_global_gem_context_exit(void) +{ + kmem_cache_destroy(global.slab_luts); +} + +static struct i915_global_gem_context global = { { + .shrink = i915_global_gem_context_shrink, + .exit = i915_global_gem_context_exit, +} }; + +int __init i915_global_gem_context_init(void) +{ + global.slab_luts = KMEM_CACHE(i915_lut_handle, 0); + if (!global.slab_luts) + return -ENOMEM; + + i915_global_register(&global.base); + return 0; +} |