summaryrefslogtreecommitdiff
path: root/drivers/dma-buf
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma-buf')
-rw-r--r--drivers/dma-buf/Kconfig1
-rw-r--r--drivers/dma-buf/dma-buf.c265
-rw-r--r--drivers/dma-buf/dma-fence-unwrap.c62
-rw-r--r--drivers/dma-buf/dma-resv.c17
-rw-r--r--drivers/dma-buf/heaps/system_heap.c3
-rw-r--r--drivers/dma-buf/st-dma-fence-unwrap.c268
-rw-r--r--drivers/dma-buf/st-dma-fence.c6
-rw-r--r--drivers/dma-buf/sw_sync.c35
-rw-r--r--drivers/dma-buf/sync_debug.c70
-rw-r--r--drivers/dma-buf/sync_debug.h2
-rw-r--r--drivers/dma-buf/udmabuf.c31
11 files changed, 480 insertions, 280 deletions
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index fee04fdb0822..b46eb8a552d7 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -36,7 +36,6 @@ config UDMABUF
depends on DMA_SHARED_BUFFER
depends on MEMFD_CREATE || COMPILE_TEST
depends on MMU
- select VMAP_PFN
help
A driver to let userspace turn memfd regions into dma-bufs.
Qemu can use this to create host dmabufs for guest framebuffers.
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 5baa83b85515..2bcf9ceca997 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -19,7 +19,9 @@
#include <linux/anon_inodes.h>
#include <linux/export.h>
#include <linux/debugfs.h>
+#include <linux/list.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/seq_file.h>
#include <linux/sync_file.h>
#include <linux/poll.h>
@@ -35,35 +37,91 @@
static inline int is_dma_buf_file(struct file *);
-#if IS_ENABLED(CONFIG_DEBUG_FS)
-static DEFINE_MUTEX(debugfs_list_mutex);
-static LIST_HEAD(debugfs_list);
+static DEFINE_MUTEX(dmabuf_list_mutex);
+static LIST_HEAD(dmabuf_list);
-static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf)
+static void __dma_buf_list_add(struct dma_buf *dmabuf)
{
- mutex_lock(&debugfs_list_mutex);
- list_add(&dmabuf->list_node, &debugfs_list);
- mutex_unlock(&debugfs_list_mutex);
+ mutex_lock(&dmabuf_list_mutex);
+ list_add(&dmabuf->list_node, &dmabuf_list);
+ mutex_unlock(&dmabuf_list_mutex);
}
-static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf)
+static void __dma_buf_list_del(struct dma_buf *dmabuf)
{
if (!dmabuf)
return;
- mutex_lock(&debugfs_list_mutex);
+ mutex_lock(&dmabuf_list_mutex);
list_del(&dmabuf->list_node);
- mutex_unlock(&debugfs_list_mutex);
+ mutex_unlock(&dmabuf_list_mutex);
}
-#else
-static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf)
+
+/**
+ * dma_buf_iter_begin - begin iteration through global list of all DMA buffers
+ *
+ * Returns the first buffer in the global list of DMA-bufs that's not in the
+ * process of being destroyed. Increments that buffer's reference count to
+ * prevent buffer destruction. Callers must release the reference, either by
+ * continuing iteration with dma_buf_iter_next(), or with dma_buf_put().
+ *
+ * Return:
+ * * First buffer from global list, with refcount elevated
+ * * NULL if no active buffers are present
+ */
+struct dma_buf *dma_buf_iter_begin(void)
{
+ struct dma_buf *ret = NULL, *dmabuf;
+
+ /*
+ * The list mutex does not protect a dmabuf's refcount, so it can be
+ * zeroed while we are iterating. We cannot call get_dma_buf() since the
+ * caller may not already own a reference to the buffer.
+ */
+ mutex_lock(&dmabuf_list_mutex);
+ list_for_each_entry(dmabuf, &dmabuf_list, list_node) {
+ if (file_ref_get(&dmabuf->file->f_ref)) {
+ ret = dmabuf;
+ break;
+ }
+ }
+ mutex_unlock(&dmabuf_list_mutex);
+ return ret;
}
-static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf)
+/**
+ * dma_buf_iter_next - continue iteration through global list of all DMA buffers
+ * @dmabuf: [in] pointer to dma_buf
+ *
+ * Decrements the reference count on the provided buffer. Returns the next
+ * buffer from the remainder of the global list of DMA-bufs with its reference
+ * count incremented. Callers must release the reference, either by continuing
+ * iteration with dma_buf_iter_next(), or with dma_buf_put().
+ *
+ * Return:
+ * * Next buffer from global list, with refcount elevated
+ * * NULL if no additional active buffers are present
+ */
+struct dma_buf *dma_buf_iter_next(struct dma_buf *dmabuf)
{
+ struct dma_buf *ret = NULL;
+
+ /*
+ * The list mutex does not protect a dmabuf's refcount, so it can be
+ * zeroed while we are iterating. We cannot call get_dma_buf() since the
+ * caller may not already own a reference to the buffer.
+ */
+ mutex_lock(&dmabuf_list_mutex);
+ dma_buf_put(dmabuf);
+ list_for_each_entry_continue(dmabuf, &dmabuf_list, list_node) {
+ if (file_ref_get(&dmabuf->file->f_ref)) {
+ ret = dmabuf;
+ break;
+ }
+ }
+ mutex_unlock(&dmabuf_list_mutex);
+ return ret;
}
-#endif
static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
{
@@ -115,7 +173,7 @@ static int dma_buf_file_release(struct inode *inode, struct file *file)
if (!is_dma_buf_file(file))
return -EINVAL;
- __dma_buf_debugfs_list_del(file->private_data);
+ __dma_buf_list_del(file->private_data);
return 0;
}
@@ -636,10 +694,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|| !exp_info->ops->release))
return ERR_PTR(-EINVAL);
- if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
- (exp_info->ops->pin || exp_info->ops->unpin)))
- return ERR_PTR(-EINVAL);
-
if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
return ERR_PTR(-EINVAL);
@@ -689,7 +743,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
file->f_path.dentry->d_fsdata = dmabuf;
dmabuf->file = file;
- __dma_buf_debugfs_list_add(dmabuf);
+ __dma_buf_list_add(dmabuf);
return dmabuf;
@@ -782,7 +836,7 @@ static void mangle_sg_table(struct sg_table *sg_table)
/* To catch abuse of the underlying struct page by importers mix
* up the bits, but take care to preserve the low SG_ bits to
- * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
+ * not corrupt the sgt. The mixing is undone on unmap
* before passing the sgt back to the exporter.
*/
for_each_sgtable_sg(sg_table, sg, i)
@@ -790,29 +844,19 @@ static void mangle_sg_table(struct sg_table *sg_table)
#endif
}
-static struct sg_table *__map_dma_buf(struct dma_buf_attachment *attach,
- enum dma_data_direction direction)
-{
- struct sg_table *sg_table;
- signed long ret;
- sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
- if (IS_ERR_OR_NULL(sg_table))
- return sg_table;
-
- if (!dma_buf_attachment_is_dynamic(attach)) {
- ret = dma_resv_wait_timeout(attach->dmabuf->resv,
- DMA_RESV_USAGE_KERNEL, true,
- MAX_SCHEDULE_TIMEOUT);
- if (ret < 0) {
- attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
- direction);
- return ERR_PTR(ret);
- }
- }
+static inline bool
+dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
+{
+ return !!attach->importer_ops;
+}
- mangle_sg_table(sg_table);
- return sg_table;
+static bool
+dma_buf_pin_on_map(struct dma_buf_attachment *attach)
+{
+ return attach->dmabuf->ops->pin &&
+ (!dma_buf_attachment_is_dynamic(attach) ||
+ !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY));
}
/**
@@ -935,48 +979,11 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
list_add(&attach->node, &dmabuf->attachments);
dma_resv_unlock(dmabuf->resv);
- /* When either the importer or the exporter can't handle dynamic
- * mappings we cache the mapping here to avoid issues with the
- * reservation object lock.
- */
- if (dma_buf_attachment_is_dynamic(attach) !=
- dma_buf_is_dynamic(dmabuf)) {
- struct sg_table *sgt;
-
- dma_resv_lock(attach->dmabuf->resv, NULL);
- if (dma_buf_is_dynamic(attach->dmabuf)) {
- ret = dmabuf->ops->pin(attach);
- if (ret)
- goto err_unlock;
- }
-
- sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
- if (!sgt)
- sgt = ERR_PTR(-ENOMEM);
- if (IS_ERR(sgt)) {
- ret = PTR_ERR(sgt);
- goto err_unpin;
- }
- dma_resv_unlock(attach->dmabuf->resv);
- attach->sgt = sgt;
- attach->dir = DMA_BIDIRECTIONAL;
- }
-
return attach;
err_attach:
kfree(attach);
return ERR_PTR(ret);
-
-err_unpin:
- if (dma_buf_is_dynamic(attach->dmabuf))
- dmabuf->ops->unpin(attach);
-
-err_unlock:
- dma_resv_unlock(attach->dmabuf->resv);
-
- dma_buf_detach(dmabuf, attach);
- return ERR_PTR(ret);
}
EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, "DMA_BUF");
@@ -995,16 +1002,6 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
}
EXPORT_SYMBOL_NS_GPL(dma_buf_attach, "DMA_BUF");
-static void __unmap_dma_buf(struct dma_buf_attachment *attach,
- struct sg_table *sg_table,
- enum dma_data_direction direction)
-{
- /* uses XOR, hence this unmangles */
- mangle_sg_table(sg_table);
-
- attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
-}
-
/**
* dma_buf_detach - Remove the given attachment from dmabuf's attachments list
* @dmabuf: [in] buffer to detach from.
@@ -1020,16 +1017,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
return;
dma_resv_lock(dmabuf->resv, NULL);
-
- if (attach->sgt) {
-
- __unmap_dma_buf(attach, attach->sgt, attach->dir);
-
- if (dma_buf_is_dynamic(attach->dmabuf))
- dmabuf->ops->unpin(attach);
- }
list_del(&attach->node);
-
dma_resv_unlock(dmabuf->resv);
if (dmabuf->ops->detach)
@@ -1058,7 +1046,7 @@ int dma_buf_pin(struct dma_buf_attachment *attach)
struct dma_buf *dmabuf = attach->dmabuf;
int ret = 0;
- WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+ WARN_ON(!attach->importer_ops);
dma_resv_assert_held(dmabuf->resv);
@@ -1081,7 +1069,7 @@ void dma_buf_unpin(struct dma_buf_attachment *attach)
{
struct dma_buf *dmabuf = attach->dmabuf;
- WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+ WARN_ON(!attach->importer_ops);
dma_resv_assert_held(dmabuf->resv);
@@ -1115,7 +1103,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
{
struct sg_table *sg_table;
- int r;
+ signed long ret;
might_sleep();
@@ -1124,41 +1112,37 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
dma_resv_assert_held(attach->dmabuf->resv);
- if (attach->sgt) {
+ if (dma_buf_pin_on_map(attach)) {
+ ret = attach->dmabuf->ops->pin(attach);
/*
- * Two mappings with different directions for the same
- * attachment are not allowed.
+ * Catch exporters making buffers inaccessible even when
+ * attachments preventing that exist.
*/
- if (attach->dir != direction &&
- attach->dir != DMA_BIDIRECTIONAL)
- return ERR_PTR(-EBUSY);
-
- return attach->sgt;
- }
-
- if (dma_buf_is_dynamic(attach->dmabuf)) {
- if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
- r = attach->dmabuf->ops->pin(attach);
- if (r)
- return ERR_PTR(r);
- }
+ WARN_ON_ONCE(ret == -EBUSY);
+ if (ret)
+ return ERR_PTR(ret);
}
- sg_table = __map_dma_buf(attach, direction);
+ sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
if (!sg_table)
sg_table = ERR_PTR(-ENOMEM);
+ if (IS_ERR(sg_table))
+ goto error_unpin;
- if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
- !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
- attach->dmabuf->ops->unpin(attach);
-
- if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
- attach->sgt = sg_table;
- attach->dir = direction;
+ /*
+ * Importers with static attachments don't wait for fences.
+ */
+ if (!dma_buf_attachment_is_dynamic(attach)) {
+ ret = dma_resv_wait_timeout(attach->dmabuf->resv,
+ DMA_RESV_USAGE_KERNEL, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0)
+ goto error_unmap;
}
+ mangle_sg_table(sg_table);
#ifdef CONFIG_DMA_API_DEBUG
- if (!IS_ERR(sg_table)) {
+ {
struct scatterlist *sg;
u64 addr;
int len;
@@ -1175,6 +1159,16 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
}
#endif /* CONFIG_DMA_API_DEBUG */
return sg_table;
+
+error_unmap:
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+ sg_table = ERR_PTR(ret);
+
+error_unpin:
+ if (dma_buf_pin_on_map(attach))
+ attach->dmabuf->ops->unpin(attach);
+
+ return sg_table;
}
EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, "DMA_BUF");
@@ -1227,14 +1221,11 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
dma_resv_assert_held(attach->dmabuf->resv);
- if (attach->sgt == sg_table)
- return;
-
- __unmap_dma_buf(attach, sg_table, direction);
+ mangle_sg_table(sg_table);
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
- if (dma_buf_is_dynamic(attach->dmabuf) &&
- !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
- dma_buf_unpin(attach);
+ if (dma_buf_pin_on_map(attach))
+ attach->dmabuf->ops->unpin(attach);
}
EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, "DMA_BUF");
@@ -1630,7 +1621,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
size_t size = 0;
int ret;
- ret = mutex_lock_interruptible(&debugfs_list_mutex);
+ ret = mutex_lock_interruptible(&dmabuf_list_mutex);
if (ret)
return ret;
@@ -1639,7 +1630,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
"size", "flags", "mode", "count", "ino");
- list_for_each_entry(buf_obj, &debugfs_list, list_node) {
+ list_for_each_entry(buf_obj, &dmabuf_list, list_node) {
ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
if (ret)
@@ -1676,11 +1667,11 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
- mutex_unlock(&debugfs_list_mutex);
+ mutex_unlock(&dmabuf_list_mutex);
return 0;
error_unlock:
- mutex_unlock(&debugfs_list_mutex);
+ mutex_unlock(&dmabuf_list_mutex);
return ret;
}
diff --git a/drivers/dma-buf/dma-fence-unwrap.c b/drivers/dma-buf/dma-fence-unwrap.c
index 6345062731f1..a495d8a6c2e3 100644
--- a/drivers/dma-buf/dma-fence-unwrap.c
+++ b/drivers/dma-buf/dma-fence-unwrap.c
@@ -79,21 +79,58 @@ static int fence_cmp(const void *_a, const void *_b)
return 0;
}
+/**
+ * dma_fence_dedup_array - Sort and deduplicate an array of dma_fence pointers
+ * @fences: Array of dma_fence pointers to be deduplicated
+ * @num_fences: Number of entries in the @fences array
+ *
+ * Sorts the input array by context, then removes duplicate
+ * fences with the same context, keeping only the most recent one.
+ *
+ * The array is modified in-place and unreferenced duplicate fences are released
+ * via dma_fence_put(). The function returns the new number of fences after
+ * deduplication.
+ *
+ * Return: Number of unique fences remaining in the array.
+ */
+int dma_fence_dedup_array(struct dma_fence **fences, int num_fences)
+{
+ int i, j;
+
+ sort(fences, num_fences, sizeof(*fences), fence_cmp, NULL);
+
+ /*
+ * Only keep the most recent fence for each context.
+ */
+ j = 0;
+ for (i = 1; i < num_fences; i++) {
+ if (fences[i]->context == fences[j]->context)
+ dma_fence_put(fences[i]);
+ else
+ fences[++j] = fences[i];
+ }
+
+ return ++j;
+}
+EXPORT_SYMBOL_GPL(dma_fence_dedup_array);
+
/* Implementation for the dma_fence_merge() marco, don't use directly */
struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
struct dma_fence **fences,
struct dma_fence_unwrap *iter)
{
+ struct dma_fence *tmp, *unsignaled = NULL, **array;
struct dma_fence_array *result;
- struct dma_fence *tmp, **array;
ktime_t timestamp;
- int i, j, count;
+ int i, count;
count = 0;
timestamp = ns_to_ktime(0);
for (i = 0; i < num_fences; ++i) {
dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
if (!dma_fence_is_signaled(tmp)) {
+ dma_fence_put(unsignaled);
+ unsignaled = dma_fence_get(tmp);
++count;
} else {
ktime_t t = dma_fence_timestamp(tmp);
@@ -107,9 +144,16 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
/*
* If we couldn't find a pending fence just return a private signaled
* fence with the timestamp of the last signaled one.
+ *
+ * Or if there was a single unsignaled fence left we can return it
+ * directly and early since that is a major path on many workloads.
*/
if (count == 0)
return dma_fence_allocate_private_stub(timestamp);
+ else if (count == 1)
+ return unsignaled;
+
+ dma_fence_put(unsignaled);
array = kmalloc_array(count, sizeof(*array), GFP_KERNEL);
if (!array)
@@ -132,19 +176,7 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
if (count == 0 || count == 1)
goto return_fastpath;
- sort(array, count, sizeof(*array), fence_cmp, NULL);
-
- /*
- * Only keep the most recent fence for each context.
- */
- j = 0;
- for (i = 1; i < count; i++) {
- if (array[i]->context == array[j]->context)
- dma_fence_put(array[i]);
- else
- array[++j] = array[i];
- }
- count = ++j;
+ count = dma_fence_dedup_array(array, count);
if (count > 1) {
result = dma_fence_array_create(count, array,
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 5f8d010516f0..bea3e9858aca 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -320,8 +320,9 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
count++;
dma_resv_list_set(fobj, i, fence, usage);
- /* pointer update must be visible before we extend the num_fences */
- smp_store_mb(fobj->num_fences, count);
+ /* fence update must be visible before we extend the num_fences */
+ smp_wmb();
+ fobj->num_fences = count;
}
EXPORT_SYMBOL(dma_resv_add_fence);
@@ -684,11 +685,13 @@ long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
dma_resv_iter_begin(&cursor, obj, usage);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
- ret = dma_fence_wait_timeout(fence, intr, ret);
- if (ret <= 0) {
- dma_resv_iter_end(&cursor);
- return ret;
- }
+ ret = dma_fence_wait_timeout(fence, intr, timeout);
+ if (ret <= 0)
+ break;
+
+ /* Even for zero timeout the return value is 1 */
+ if (timeout)
+ timeout = ret;
}
dma_resv_iter_end(&cursor);
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index 26d5dc89ea16..82b1b714300d 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -21,8 +21,6 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
-static struct dma_heap *sys_heap;
-
struct system_heap_buffer {
struct dma_heap *heap;
struct list_head attachments;
@@ -424,6 +422,7 @@ static const struct dma_heap_ops system_heap_ops = {
static int __init system_heap_create(void)
{
struct dma_heap_export_info exp_info;
+ struct dma_heap *sys_heap;
exp_info.name = "system";
exp_info.ops = &system_heap_ops;
diff --git a/drivers/dma-buf/st-dma-fence-unwrap.c b/drivers/dma-buf/st-dma-fence-unwrap.c
index f0cee984b6c7..a3be888ae2e8 100644
--- a/drivers/dma-buf/st-dma-fence-unwrap.c
+++ b/drivers/dma-buf/st-dma-fence-unwrap.c
@@ -28,7 +28,7 @@ static const struct dma_fence_ops mock_ops = {
.get_timeline_name = mock_name,
};
-static struct dma_fence *mock_fence(void)
+static struct dma_fence *__mock_fence(u64 context, u64 seqno)
{
struct mock_fence *f;
@@ -37,12 +37,16 @@ static struct dma_fence *mock_fence(void)
return NULL;
spin_lock_init(&f->lock);
- dma_fence_init(&f->base, &mock_ops, &f->lock,
- dma_fence_context_alloc(1), 1);
+ dma_fence_init(&f->base, &mock_ops, &f->lock, context, seqno);
return &f->base;
}
+static struct dma_fence *mock_fence(void)
+{
+ return __mock_fence(dma_fence_context_alloc(1), 1);
+}
+
static struct dma_fence *mock_array(unsigned int num_fences, ...)
{
struct dma_fence_array *array;
@@ -304,6 +308,177 @@ error_put_f1:
return err;
}
+static int unwrap_merge_duplicate(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2;
+ struct dma_fence_unwrap iter;
+ int err = 0;
+
+ f1 = mock_fence();
+ if (!f1)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f1);
+
+ f2 = dma_fence_unwrap_merge(f1, f1);
+ if (!f2) {
+ err = -ENOMEM;
+ goto error_put_f1;
+ }
+
+ dma_fence_unwrap_for_each(fence, &iter, f2) {
+ if (fence == f1) {
+ dma_fence_put(f1);
+ f1 = NULL;
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f1) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_put(f2);
+error_put_f1:
+ dma_fence_put(f1);
+ return err;
+}
+
+static int unwrap_merge_seqno(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2, *f3, *f4;
+ struct dma_fence_unwrap iter;
+ int err = 0;
+ u64 ctx[2];
+
+ ctx[0] = dma_fence_context_alloc(1);
+ ctx[1] = dma_fence_context_alloc(1);
+
+ f1 = __mock_fence(ctx[1], 1);
+ if (!f1)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f1);
+
+ f2 = __mock_fence(ctx[1], 2);
+ if (!f2) {
+ err = -ENOMEM;
+ goto error_put_f1;
+ }
+
+ dma_fence_enable_sw_signaling(f2);
+
+ f3 = __mock_fence(ctx[0], 1);
+ if (!f3) {
+ err = -ENOMEM;
+ goto error_put_f2;
+ }
+
+ dma_fence_enable_sw_signaling(f3);
+
+ f4 = dma_fence_unwrap_merge(f1, f2, f3);
+ if (!f4) {
+ err = -ENOMEM;
+ goto error_put_f3;
+ }
+
+ dma_fence_unwrap_for_each(fence, &iter, f4) {
+ if (fence == f3 && f2) {
+ dma_fence_put(f3);
+ f3 = NULL;
+ } else if (fence == f2 && !f3) {
+ dma_fence_put(f2);
+ f2 = NULL;
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f2 || f3) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_put(f4);
+error_put_f3:
+ dma_fence_put(f3);
+error_put_f2:
+ dma_fence_put(f2);
+error_put_f1:
+ dma_fence_put(f1);
+ return err;
+}
+
+static int unwrap_merge_order(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2, *a1, *a2, *c1, *c2;
+ struct dma_fence_unwrap iter;
+ int err = 0;
+
+ f1 = mock_fence();
+ if (!f1)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f1);
+
+ f2 = mock_fence();
+ if (!f2) {
+ dma_fence_put(f1);
+ return -ENOMEM;
+ }
+
+ dma_fence_enable_sw_signaling(f2);
+
+ a1 = mock_array(2, f1, f2);
+ if (!a1)
+ return -ENOMEM;
+
+ c1 = mock_chain(NULL, dma_fence_get(f1));
+ if (!c1)
+ goto error_put_a1;
+
+ c2 = mock_chain(c1, dma_fence_get(f2));
+ if (!c2)
+ goto error_put_a1;
+
+ /*
+ * The fences in the chain are the same as in a1 but in oposite order,
+ * the dma_fence_merge() function should be able to handle that.
+ */
+ a2 = dma_fence_unwrap_merge(a1, c2);
+
+ dma_fence_unwrap_for_each(fence, &iter, a2) {
+ if (fence == f1) {
+ f1 = NULL;
+ if (!f2)
+ pr_err("Unexpected order!\n");
+ } else if (fence == f2) {
+ f2 = NULL;
+ if (f1)
+ pr_err("Unexpected order!\n");
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f1 || f2) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_put(a2);
+ return err;
+
+error_put_a1:
+ dma_fence_put(a1);
+ return -ENOMEM;
+}
+
static int unwrap_merge_complex(void *arg)
{
struct dma_fence *fence, *f1, *f2, *f3, *f4, *f5;
@@ -327,7 +502,7 @@ static int unwrap_merge_complex(void *arg)
goto error_put_f2;
/* The resulting array has the fences in reverse */
- f4 = dma_fence_unwrap_merge(f2, f1);
+ f4 = mock_array(2, dma_fence_get(f2), dma_fence_get(f1));
if (!f4)
goto error_put_f3;
@@ -367,6 +542,87 @@ error_put_f1:
return err;
}
+static int unwrap_merge_complex_seqno(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2, *f3, *f4, *f5, *f6, *f7;
+ struct dma_fence_unwrap iter;
+ int err = -ENOMEM;
+ u64 ctx[2];
+
+ ctx[0] = dma_fence_context_alloc(1);
+ ctx[1] = dma_fence_context_alloc(1);
+
+ f1 = __mock_fence(ctx[0], 2);
+ if (!f1)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f1);
+
+ f2 = __mock_fence(ctx[1], 1);
+ if (!f2)
+ goto error_put_f1;
+
+ dma_fence_enable_sw_signaling(f2);
+
+ f3 = __mock_fence(ctx[0], 1);
+ if (!f3)
+ goto error_put_f2;
+
+ dma_fence_enable_sw_signaling(f3);
+
+ f4 = __mock_fence(ctx[1], 2);
+ if (!f4)
+ goto error_put_f3;
+
+ dma_fence_enable_sw_signaling(f4);
+
+ f5 = mock_array(2, dma_fence_get(f1), dma_fence_get(f2));
+ if (!f5)
+ goto error_put_f4;
+
+ f6 = mock_array(2, dma_fence_get(f3), dma_fence_get(f4));
+ if (!f6)
+ goto error_put_f5;
+
+ f7 = dma_fence_unwrap_merge(f5, f6);
+ if (!f7)
+ goto error_put_f6;
+
+ err = 0;
+ dma_fence_unwrap_for_each(fence, &iter, f7) {
+ if (fence == f1 && f4) {
+ dma_fence_put(f1);
+ f1 = NULL;
+ } else if (fence == f4 && !f1) {
+ dma_fence_put(f4);
+ f4 = NULL;
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f1 || f4) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_put(f7);
+error_put_f6:
+ dma_fence_put(f6);
+error_put_f5:
+ dma_fence_put(f5);
+error_put_f4:
+ dma_fence_put(f4);
+error_put_f3:
+ dma_fence_put(f3);
+error_put_f2:
+ dma_fence_put(f2);
+error_put_f1:
+ dma_fence_put(f1);
+ return err;
+}
+
int dma_fence_unwrap(void)
{
static const struct subtest tests[] = {
@@ -375,7 +631,11 @@ int dma_fence_unwrap(void)
SUBTEST(unwrap_chain),
SUBTEST(unwrap_chain_array),
SUBTEST(unwrap_merge),
+ SUBTEST(unwrap_merge_duplicate),
+ SUBTEST(unwrap_merge_seqno),
+ SUBTEST(unwrap_merge_order),
SUBTEST(unwrap_merge_complex),
+ SUBTEST(unwrap_merge_complex_seqno),
};
return subtests(tests, NULL);
diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c
index cf2ce3744ce6..27a36045410b 100644
--- a/drivers/dma-buf/st-dma-fence.c
+++ b/drivers/dma-buf/st-dma-fence.c
@@ -375,7 +375,7 @@ struct wait_timer {
static void wait_timer(struct timer_list *timer)
{
- struct wait_timer *wt = from_timer(wt, timer, timer);
+ struct wait_timer *wt = timer_container_of(wt, timer, timer);
dma_fence_signal(wt->f);
}
@@ -412,8 +412,8 @@ static int test_wait_timeout(void *arg)
err = 0;
err_free:
- del_timer_sync(&wt.timer);
- destroy_timer_on_stack(&wt.timer);
+ timer_delete_sync(&wt.timer);
+ timer_destroy_on_stack(&wt.timer);
dma_fence_signal(wt.f);
dma_fence_put(wt.f);
return err;
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index f5905d67dedb..4f27ee93a00c 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -173,20 +173,6 @@ static bool timeline_fence_signaled(struct dma_fence *fence)
return !__dma_fence_is_later(fence->seqno, parent->value, fence->ops);
}
-static void timeline_fence_value_str(struct dma_fence *fence,
- char *str, int size)
-{
- snprintf(str, size, "%lld", fence->seqno);
-}
-
-static void timeline_fence_timeline_value_str(struct dma_fence *fence,
- char *str, int size)
-{
- struct sync_timeline *parent = dma_fence_parent(fence);
-
- snprintf(str, size, "%d", parent->value);
-}
-
static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
{
struct sync_pt *pt = dma_fence_to_sync_pt(fence);
@@ -208,8 +194,6 @@ static const struct dma_fence_ops timeline_fence_ops = {
.get_timeline_name = timeline_fence_get_timeline_name,
.signaled = timeline_fence_signaled,
.release = timeline_fence_release,
- .fence_value_str = timeline_fence_value_str,
- .timeline_value_str = timeline_fence_timeline_value_str,
.set_deadline = timeline_fence_set_deadline,
};
@@ -438,15 +422,17 @@ static int sw_sync_ioctl_get_deadline(struct sync_timeline *obj, unsigned long a
return -EINVAL;
pt = dma_fence_to_sync_pt(fence);
- if (!pt)
- return -EINVAL;
+ if (!pt) {
+ ret = -EINVAL;
+ goto put_fence;
+ }
spin_lock_irqsave(fence->lock, flags);
- if (test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) {
- data.deadline_ns = ktime_to_ns(pt->deadline);
- } else {
+ if (!test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) {
ret = -ENOENT;
+ goto unlock;
}
+ data.deadline_ns = ktime_to_ns(pt->deadline);
spin_unlock_irqrestore(fence->lock, flags);
dma_fence_put(fence);
@@ -458,6 +444,13 @@ static int sw_sync_ioctl_get_deadline(struct sync_timeline *obj, unsigned long a
return -EFAULT;
return 0;
+
+unlock:
+ spin_unlock_irqrestore(fence->lock, flags);
+put_fence:
+ dma_fence_put(fence);
+
+ return ret;
}
static long sw_sync_ioctl(struct file *file, unsigned int cmd,
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 237bce21d1e7..67cd69551e42 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -12,8 +12,6 @@ static struct dentry *dbgfs;
static LIST_HEAD(sync_timeline_list_head);
static DEFINE_SPINLOCK(sync_timeline_list_lock);
-static LIST_HEAD(sync_file_list_head);
-static DEFINE_SPINLOCK(sync_file_list_lock);
void sync_timeline_debug_add(struct sync_timeline *obj)
{
@@ -33,24 +31,6 @@ void sync_timeline_debug_remove(struct sync_timeline *obj)
spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
}
-void sync_file_debug_add(struct sync_file *sync_file)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sync_file_list_lock, flags);
- list_add_tail(&sync_file->sync_file_list, &sync_file_list_head);
- spin_unlock_irqrestore(&sync_file_list_lock, flags);
-}
-
-void sync_file_debug_remove(struct sync_file *sync_file)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sync_file_list_lock, flags);
- list_del(&sync_file->sync_file_list);
- spin_unlock_irqrestore(&sync_file_list_lock, flags);
-}
-
static const char *sync_status_str(int status)
{
if (status < 0)
@@ -82,25 +62,8 @@ static void sync_print_fence(struct seq_file *s,
seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
}
- if (fence->ops->timeline_value_str &&
- fence->ops->fence_value_str) {
- char value[64];
- bool success;
-
- fence->ops->fence_value_str(fence, value, sizeof(value));
- success = strlen(value);
-
- if (success) {
- seq_printf(s, ": %s", value);
-
- fence->ops->timeline_value_str(fence, value,
- sizeof(value));
-
- if (strlen(value))
- seq_printf(s, " / %s", value);
- }
- }
-
+ seq_printf(s, ": %lld", fence->seqno);
+ seq_printf(s, " / %d", parent->value);
seq_putc(s, '\n');
}
@@ -118,26 +81,6 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
spin_unlock(&obj->lock);
}
-static void sync_print_sync_file(struct seq_file *s,
- struct sync_file *sync_file)
-{
- char buf[128];
- int i;
-
- seq_printf(s, "[%p] %s: %s\n", sync_file,
- sync_file_get_name(sync_file, buf, sizeof(buf)),
- sync_status_str(dma_fence_get_status(sync_file->fence)));
-
- if (dma_fence_is_array(sync_file->fence)) {
- struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
-
- for (i = 0; i < array->num_fences; ++i)
- sync_print_fence(s, array->fences[i], true);
- } else {
- sync_print_fence(s, sync_file->fence, true);
- }
-}
-
static int sync_info_debugfs_show(struct seq_file *s, void *unused)
{
struct list_head *pos;
@@ -157,15 +100,6 @@ static int sync_info_debugfs_show(struct seq_file *s, void *unused)
seq_puts(s, "fences:\n--------------\n");
- spin_lock_irq(&sync_file_list_lock);
- list_for_each(pos, &sync_file_list_head) {
- struct sync_file *sync_file =
- container_of(pos, struct sync_file, sync_file_list);
-
- sync_print_sync_file(s, sync_file);
- seq_putc(s, '\n');
- }
- spin_unlock_irq(&sync_file_list_lock);
return 0;
}
diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h
index a1bdd62efccd..02af347293d0 100644
--- a/drivers/dma-buf/sync_debug.h
+++ b/drivers/dma-buf/sync_debug.h
@@ -68,7 +68,5 @@ extern const struct file_operations sw_sync_debugfs_fops;
void sync_timeline_debug_add(struct sync_timeline *obj);
void sync_timeline_debug_remove(struct sync_timeline *obj);
-void sync_file_debug_add(struct sync_file *fence);
-void sync_file_debug_remove(struct sync_file *fence);
#endif /* _LINUX_SYNC_H */
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index cc7398cc17d6..40399c26e6be 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -109,29 +109,22 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
{
struct udmabuf *ubuf = buf->priv;
- unsigned long *pfns;
+ struct page **pages;
void *vaddr;
pgoff_t pg;
dma_resv_assert_held(buf->resv);
- /**
- * HVO may free tail pages, so just use pfn to map each folio
- * into vmalloc area.
- */
- pfns = kvmalloc_array(ubuf->pagecount, sizeof(*pfns), GFP_KERNEL);
- if (!pfns)
+ pages = kvmalloc_array(ubuf->pagecount, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
return -ENOMEM;
- for (pg = 0; pg < ubuf->pagecount; pg++) {
- unsigned long pfn = folio_pfn(ubuf->folios[pg]);
-
- pfn += ubuf->offsets[pg] >> PAGE_SHIFT;
- pfns[pg] = pfn;
- }
+ for (pg = 0; pg < ubuf->pagecount; pg++)
+ pages[pg] = folio_page(ubuf->folios[pg],
+ ubuf->offsets[pg] >> PAGE_SHIFT);
- vaddr = vmap_pfn(pfns, ubuf->pagecount, PAGE_KERNEL);
- kvfree(pfns);
+ vaddr = vm_map_ram(pages, ubuf->pagecount, -1);
+ kvfree(pages);
if (!vaddr)
return -EINVAL;
@@ -264,8 +257,7 @@ static int begin_cpu_udmabuf(struct dma_buf *buf,
ubuf->sg = NULL;
}
} else {
- dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
- direction);
+ dma_sync_sgtable_for_cpu(dev, ubuf->sg, direction);
}
return ret;
@@ -280,12 +272,11 @@ static int end_cpu_udmabuf(struct dma_buf *buf,
if (!ubuf->sg)
return -EINVAL;
- dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
+ dma_sync_sgtable_for_device(dev, ubuf->sg, direction);
return 0;
}
static const struct dma_buf_ops udmabuf_ops = {
- .cache_sgt_mapping = true,
.map_dma_buf = map_udmabuf,
.unmap_dma_buf = unmap_udmabuf,
.release = release_udmabuf,
@@ -393,7 +384,7 @@ static long udmabuf_create(struct miscdevice *device,
if (!ubuf)
return -ENOMEM;
- pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
+ pglimit = ((u64)size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
for (i = 0; i < head->count; i++) {
pgoff_t subpgcnt;