summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_vma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_vma.c')
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c92
1 files changed, 78 insertions, 14 deletions
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 01c822256b39..17d7c525ea5c 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -28,7 +28,9 @@
#include "display/intel_frontbuffer.h"
#include "gt/intel_engine.h"
+#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
#include "i915_globals.h"
@@ -112,6 +114,7 @@ vma_create(struct drm_i915_gem_object *obj,
if (vma == NULL)
return ERR_PTR(-ENOMEM);
+ kref_init(&vma->ref);
mutex_init(&vma->pages_mutex);
vma->vm = i915_vm_get(vm);
vma->ops = &vm->vma_ops;
@@ -290,6 +293,7 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
struct i915_vma_work {
struct dma_fence_work base;
struct i915_vma *vma;
+ struct drm_i915_gem_object *pinned;
enum i915_cache_level cache_level;
unsigned int flags;
};
@@ -304,15 +308,21 @@ static int __vma_bind(struct dma_fence_work *work)
if (err)
atomic_or(I915_VMA_ERROR, &vma->flags);
- if (vma->obj)
- __i915_gem_object_unpin_pages(vma->obj);
-
return err;
}
+static void __vma_release(struct dma_fence_work *work)
+{
+ struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
+
+ if (vw->pinned)
+ __i915_gem_object_unpin_pages(vw->pinned);
+}
+
static const struct dma_fence_work_ops bind_ops = {
.name = "bind",
.work = __vma_bind,
+ .release = __vma_release,
};
struct i915_vma_work *i915_vma_work(void)
@@ -393,8 +403,10 @@ int i915_vma_bind(struct i915_vma *vma,
i915_active_set_exclusive(&vma->active, &work->base.dma);
work->base.dma.error = 0; /* enable the queue_work() */
- if (vma->obj)
+ if (vma->obj) {
__i915_gem_object_pin_pages(vma->obj);
+ work->pinned = vma->obj;
+ }
} else {
GEM_BUG_ON((bind_flags & ~vma_flags) & vma->vm->bind_async_flags);
ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
@@ -411,8 +423,6 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
void __iomem *ptr;
int err;
- /* Access through the GTT requires the device to be awake. */
- assert_rpm_wakelock_held(vma->vm->gt->uncore->rpm);
if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
err = -ENODEV;
goto err;
@@ -444,6 +454,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
goto err_unpin;
i915_vma_set_ggtt_write(vma);
+
+ /* NB Access through the GTT requires the device to be awake. */
return ptr;
err_unpin:
@@ -846,6 +858,7 @@ static void vma_unbind_pages(struct i915_vma *vma)
int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{
struct i915_vma_work *work = NULL;
+ intel_wakeref_t wakeref = 0;
unsigned int bound;
int err;
@@ -871,6 +884,9 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
}
}
+ if (flags & PIN_GLOBAL)
+ wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
+
/* No more allocations allowed once we hold vm->mutex */
err = mutex_lock_interruptible(&vma->vm->mutex);
if (err)
@@ -934,11 +950,45 @@ err_unlock:
err_fence:
if (work)
dma_fence_work_commit(&work->base);
+ if (wakeref)
+ intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
err_pages:
vma_put_pages(vma);
return err;
}
+static void flush_idle_contexts(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, gt, id)
+ intel_engine_flush_barriers(engine);
+
+ intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
+}
+
+int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
+{
+ struct i915_address_space *vm = vma->vm;
+ int err;
+
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+
+ do {
+ err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
+ if (err != -ENOSPC)
+ return err;
+
+ /* Unlike i915_vma_pin, we don't take no for an answer! */
+ flush_idle_contexts(vm->gt);
+ if (mutex_lock_interruptible(&vm->mutex) == 0) {
+ i915_gem_evict_vm(vm);
+ mutex_unlock(&vm->mutex);
+ }
+ } while (1);
+}
+
void i915_vma_close(struct i915_vma *vma)
{
struct intel_gt *gt = vma->vm->gt;
@@ -978,8 +1028,10 @@ void i915_vma_reopen(struct i915_vma *vma)
__i915_vma_remove_closed(vma);
}
-void i915_vma_destroy(struct i915_vma *vma)
+void i915_vma_release(struct kref *ref)
{
+ struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
+
if (drm_mm_node_allocated(&vma->node)) {
mutex_lock(&vma->vm->mutex);
atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
@@ -1019,7 +1071,9 @@ void i915_vma_parked(struct intel_gt *gt)
if (!kref_get_unless_zero(&obj->base.refcount))
continue;
- if (!i915_vm_tryopen(vm)) {
+ if (i915_vm_tryopen(vm)) {
+ list_del_init(&vma->closed_link);
+ } else {
i915_gem_object_put(obj);
obj = NULL;
}
@@ -1027,7 +1081,7 @@ void i915_vma_parked(struct intel_gt *gt)
spin_unlock_irq(&gt->closed_lock);
if (obj) {
- i915_vma_destroy(vma);
+ __i915_vma_put(vma);
i915_gem_object_put(obj);
}
@@ -1054,17 +1108,16 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
void i915_vma_revoke_mmap(struct i915_vma *vma)
{
- struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
+ struct drm_vma_offset_node *node;
u64 vma_offset;
- lockdep_assert_held(&vma->vm->mutex);
-
if (!i915_vma_has_userfault(vma))
return;
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
GEM_BUG_ON(!vma->obj->userfault_count);
+ node = &vma->mmo->vma_node;
vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
drm_vma_node_offset_addr(node) + vma_offset,
@@ -1152,7 +1205,7 @@ int __i915_vma_unbind(struct i915_vma *vma)
GEM_BUG_ON(i915_vma_is_active(vma));
if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned");
- return -EBUSY;
+ return -EAGAIN;
}
GEM_BUG_ON(i915_vma_is_active(vma));
@@ -1192,15 +1245,23 @@ int __i915_vma_unbind(struct i915_vma *vma)
i915_vma_detach(vma);
vma_unbind_pages(vma);
- drm_mm_remove_node(&vma->node); /* pairs with i915_vma_destroy() */
+ drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
return 0;
}
int i915_vma_unbind(struct i915_vma *vma)
{
struct i915_address_space *vm = vma->vm;
+ intel_wakeref_t wakeref = 0;
int err;
+ if (!drm_mm_node_allocated(&vma->node))
+ return 0;
+
+ if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
+ /* XXX not always required: nop_clear_range */
+ wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
+
err = mutex_lock_interruptible(&vm->mutex);
if (err)
return err;
@@ -1208,6 +1269,9 @@ int i915_vma_unbind(struct i915_vma *vma)
err = __i915_vma_unbind(vma);
mutex_unlock(&vm->mutex);
+ if (wakeref)
+ intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
+
return err;
}