summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/drm_gpuvm.c
diff options
context:
space:
mode:
authorMaarten Lankhorst <dev@lankhorst.se>2025-07-08 17:49:07 +0300
committerMaarten Lankhorst <dev@lankhorst.se>2025-07-08 17:49:07 +0300
commite21354aea4b4420b53c44e36828607a7c94a994c (patch)
tree003636d3a15eaebe9b948f9f8db6ad9e52a7e12c /drivers/gpu/drm/drm_gpuvm.c
parent482c7e296edc0f594e8869a789a40be53c49bd6a (diff)
parent203dcde881561f1a4ee1084e2ee438fb4522c94a (diff)
downloadlinux-e21354aea4b4420b53c44e36828607a7c94a994c.tar.xz
Merge remote-tracking branch 'drm/drm-next' into drm-misc-next
Pull in drm-intel-next for the updates to drm panic handling. Signed-off-by: Maarten Lankhorst <dev@lankhorst.se>
Diffstat (limited to 'drivers/gpu/drm/drm_gpuvm.c')
-rw-r--r--drivers/gpu/drm/drm_gpuvm.c132
1 files changed, 129 insertions, 3 deletions
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index e89b932e987c..bbc7fecb6f4a 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -2300,13 +2300,13 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
}
/**
- * drm_gpuvm_sm_map() - creates the &drm_gpuva_op split/merge steps
+ * drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps
* @gpuvm: the &drm_gpuvm representing the GPU VA space
+ * @priv: pointer to a driver private data structure
* @req_addr: the start address of the new mapping
* @req_range: the range of the new mapping
* @req_obj: the &drm_gem_object to map
* @req_offset: the offset within the &drm_gem_object
- * @priv: pointer to a driver private data structure
*
* This function iterates the given range of the GPU VA space. It utilizes the
* &drm_gpuvm_ops to call back into the driver providing the split and merge
@@ -2350,7 +2350,7 @@ drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
/**
- * drm_gpuvm_sm_unmap() - creates the &drm_gpuva_ops to split on unmap
+ * drm_gpuvm_sm_unmap() - calls the &drm_gpuva_ops to split on unmap
* @gpuvm: the &drm_gpuvm representing the GPU VA space
* @priv: pointer to a driver private data structure
* @req_addr: the start address of the range to unmap
@@ -2391,6 +2391,132 @@ drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap);
+static int
+drm_gpuva_sm_step_lock(struct drm_gpuva_op *op, void *priv)
+{
+ struct drm_exec *exec = priv;
+
+ switch (op->op) {
+ case DRM_GPUVA_OP_REMAP:
+ if (op->remap.unmap->va->gem.obj)
+ return drm_exec_lock_obj(exec, op->remap.unmap->va->gem.obj);
+ return 0;
+ case DRM_GPUVA_OP_UNMAP:
+ if (op->unmap.va->gem.obj)
+ return drm_exec_lock_obj(exec, op->unmap.va->gem.obj);
+ return 0;
+ default:
+ return 0;
+ }
+}
+
+static const struct drm_gpuvm_ops lock_ops = {
+ .sm_step_map = drm_gpuva_sm_step_lock,
+ .sm_step_remap = drm_gpuva_sm_step_lock,
+ .sm_step_unmap = drm_gpuva_sm_step_lock,
+};
+
+/**
+ * drm_gpuvm_sm_map_exec_lock() - locks the objects touched by a drm_gpuvm_sm_map()
+ * @gpuvm: the &drm_gpuvm representing the GPU VA space
+ * @exec: the &drm_exec locking context
+ * @num_fences: for newly mapped objects, the # of fences to reserve
+ * @req_addr: the start address of the range to unmap
+ * @req_range: the range of the mappings to unmap
+ * @req_obj: the &drm_gem_object to map
+ * @req_offset: the offset within the &drm_gem_object
+ *
+ * This function locks (drm_exec_lock_obj()) objects that will be unmapped/
+ * remapped, and locks+prepares (drm_exec_prepare_object()) objects that
+ * will be newly mapped.
+ *
+ * The expected usage is:
+ *
+ * vm_bind {
+ * struct drm_exec exec;
+ *
+ * // IGNORE_DUPLICATES is required, INTERRUPTIBLE_WAIT is recommended:
+ * drm_exec_init(&exec, IGNORE_DUPLICATES | INTERRUPTIBLE_WAIT, 0);
+ *
+ * drm_exec_until_all_locked (&exec) {
+ * for_each_vm_bind_operation {
+ * switch (op->op) {
+ * case DRIVER_OP_UNMAP:
+ * ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range);
+ * break;
+ * case DRIVER_OP_MAP:
+ * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences,
+ * op->addr, op->range,
+ * obj, op->obj_offset);
+ * break;
+ * }
+ *
+ * drm_exec_retry_on_contention(&exec);
+ * if (ret)
+ * return ret;
+ * }
+ * }
+ * }
+ *
+ * This enables all locking to be performed before the driver begins modifying
+ * the VM. This is safe to do in the case of overlapping DRIVER_VM_BIND_OPs,
+ * where an earlier op can alter the sequence of steps generated for a later
+ * op, because the later altered step will involve the same GEM object(s)
+ * already seen in the earlier locking step. For example:
+ *
+ * 1) An earlier driver DRIVER_OP_UNMAP op removes the need for a
+ * DRM_GPUVA_OP_REMAP/UNMAP step. This is safe because we've already
+ * locked the GEM object in the earlier DRIVER_OP_UNMAP op.
+ *
+ * 2) An earlier DRIVER_OP_MAP op overlaps with a later DRIVER_OP_MAP/UNMAP
+ * op, introducing a DRM_GPUVA_OP_REMAP/UNMAP that wouldn't have been
+ * required without the earlier DRIVER_OP_MAP. This is safe because we've
+ * already locked the GEM object in the earlier DRIVER_OP_MAP step.
+ *
+ * Returns: 0 on success or a negative error codec
+ */
+int
+drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec, unsigned int num_fences,
+ u64 req_addr, u64 req_range,
+ struct drm_gem_object *req_obj, u64 req_offset)
+{
+ if (req_obj) {
+ int ret = drm_exec_prepare_obj(exec, req_obj, num_fences);
+ if (ret)
+ return ret;
+ }
+
+ return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec,
+ req_addr, req_range,
+ req_obj, req_offset);
+
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock);
+
+/**
+ * drm_gpuvm_sm_unmap_exec_lock() - locks the objects touched by drm_gpuvm_sm_unmap()
+ * @gpuvm: the &drm_gpuvm representing the GPU VA space
+ * @exec: the &drm_exec locking context
+ * @req_addr: the start address of the range to unmap
+ * @req_range: the range of the mappings to unmap
+ *
+ * This function locks (drm_exec_lock_obj()) objects that will be unmapped/
+ * remapped by drm_gpuvm_sm_unmap().
+ *
+ * See drm_gpuvm_sm_map_exec_lock() for expected usage.
+ *
+ * Returns: 0 on success or a negative error code
+ */
+int
+drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
+ u64 req_addr, u64 req_range)
+{
+ return __drm_gpuvm_sm_unmap(gpuvm, &lock_ops, exec,
+ req_addr, req_range);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap_exec_lock);
+
static struct drm_gpuva_op *
gpuva_op_alloc(struct drm_gpuvm *gpuvm)
{