diff options
Diffstat (limited to 'drivers/dma-buf')
-rw-r--r-- | drivers/dma-buf/Kconfig | 1 | ||||
-rw-r--r-- | drivers/dma-buf/dma-buf.c | 307 | ||||
-rw-r--r-- | drivers/dma-buf/dma-fence-array.c | 28 | ||||
-rw-r--r-- | drivers/dma-buf/dma-fence-chain.c | 7 | ||||
-rw-r--r-- | drivers/dma-buf/dma-fence-unwrap.c | 160 | ||||
-rw-r--r-- | drivers/dma-buf/dma-fence.c | 167 | ||||
-rw-r--r-- | drivers/dma-buf/dma-resv.c | 17 | ||||
-rw-r--r-- | drivers/dma-buf/heaps/Kconfig | 10 | ||||
-rw-r--r-- | drivers/dma-buf/heaps/cma_heap.c | 36 | ||||
-rw-r--r-- | drivers/dma-buf/heaps/system_heap.c | 46 | ||||
-rw-r--r-- | drivers/dma-buf/st-dma-fence-unwrap.c | 268 | ||||
-rw-r--r-- | drivers/dma-buf/st-dma-fence.c | 6 | ||||
-rw-r--r-- | drivers/dma-buf/sw_sync.c | 37 | ||||
-rw-r--r-- | drivers/dma-buf/sync_debug.c | 70 | ||||
-rw-r--r-- | drivers/dma-buf/sync_debug.h | 2 | ||||
-rw-r--r-- | drivers/dma-buf/sync_file.c | 24 | ||||
-rw-r--r-- | drivers/dma-buf/udmabuf.c | 74 |
17 files changed, 827 insertions, 433 deletions
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index fee04fdb0822..b46eb8a552d7 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig @@ -36,7 +36,6 @@ config UDMABUF depends on DMA_SHARED_BUFFER depends on MEMFD_CREATE || COMPILE_TEST depends on MMU - select VMAP_PFN help A driver to let userspace turn memfd regions into dma-bufs. Qemu can use this to create host dmabufs for guest framebuffers. diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 5ad0e9e2e1b9..2bcf9ceca997 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -19,7 +19,9 @@ #include <linux/anon_inodes.h> #include <linux/export.h> #include <linux/debugfs.h> +#include <linux/list.h> #include <linux/module.h> +#include <linux/mutex.h> #include <linux/seq_file.h> #include <linux/sync_file.h> #include <linux/poll.h> @@ -35,35 +37,91 @@ static inline int is_dma_buf_file(struct file *); -#if IS_ENABLED(CONFIG_DEBUG_FS) -static DEFINE_MUTEX(debugfs_list_mutex); -static LIST_HEAD(debugfs_list); +static DEFINE_MUTEX(dmabuf_list_mutex); +static LIST_HEAD(dmabuf_list); -static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf) +static void __dma_buf_list_add(struct dma_buf *dmabuf) { - mutex_lock(&debugfs_list_mutex); - list_add(&dmabuf->list_node, &debugfs_list); - mutex_unlock(&debugfs_list_mutex); + mutex_lock(&dmabuf_list_mutex); + list_add(&dmabuf->list_node, &dmabuf_list); + mutex_unlock(&dmabuf_list_mutex); } -static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf) +static void __dma_buf_list_del(struct dma_buf *dmabuf) { if (!dmabuf) return; - mutex_lock(&debugfs_list_mutex); + mutex_lock(&dmabuf_list_mutex); list_del(&dmabuf->list_node); - mutex_unlock(&debugfs_list_mutex); + mutex_unlock(&dmabuf_list_mutex); } -#else -static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf) + +/** + * dma_buf_iter_begin - begin iteration through global list of all DMA buffers + * + * Returns the first buffer in the global list of DMA-bufs that's not in the + * process of being destroyed. Increments that buffer's reference count to + * prevent buffer destruction. Callers must release the reference, either by + * continuing iteration with dma_buf_iter_next(), or with dma_buf_put(). + * + * Return: + * * First buffer from global list, with refcount elevated + * * NULL if no active buffers are present + */ +struct dma_buf *dma_buf_iter_begin(void) { + struct dma_buf *ret = NULL, *dmabuf; + + /* + * The list mutex does not protect a dmabuf's refcount, so it can be + * zeroed while we are iterating. We cannot call get_dma_buf() since the + * caller may not already own a reference to the buffer. + */ + mutex_lock(&dmabuf_list_mutex); + list_for_each_entry(dmabuf, &dmabuf_list, list_node) { + if (file_ref_get(&dmabuf->file->f_ref)) { + ret = dmabuf; + break; + } + } + mutex_unlock(&dmabuf_list_mutex); + return ret; } -static void __dma_buf_debugfs_list_del(struct file *file) +/** + * dma_buf_iter_next - continue iteration through global list of all DMA buffers + * @dmabuf: [in] pointer to dma_buf + * + * Decrements the reference count on the provided buffer. Returns the next + * buffer from the remainder of the global list of DMA-bufs with its reference + * count incremented. Callers must release the reference, either by continuing + * iteration with dma_buf_iter_next(), or with dma_buf_put(). + * + * Return: + * * Next buffer from global list, with refcount elevated + * * NULL if no additional active buffers are present + */ +struct dma_buf *dma_buf_iter_next(struct dma_buf *dmabuf) { + struct dma_buf *ret = NULL; + + /* + * The list mutex does not protect a dmabuf's refcount, so it can be + * zeroed while we are iterating. We cannot call get_dma_buf() since the + * caller may not already own a reference to the buffer. + */ + mutex_lock(&dmabuf_list_mutex); + dma_buf_put(dmabuf); + list_for_each_entry_continue(dmabuf, &dmabuf_list, list_node) { + if (file_ref_get(&dmabuf->file->f_ref)) { + ret = dmabuf; + break; + } + } + mutex_unlock(&dmabuf_list_mutex); + return ret; } -#endif static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen) { @@ -115,7 +173,7 @@ static int dma_buf_file_release(struct inode *inode, struct file *file) if (!is_dma_buf_file(file)) return -EINVAL; - __dma_buf_debugfs_list_del(file->private_data); + __dma_buf_list_del(file->private_data); return 0; } @@ -636,10 +694,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) || !exp_info->ops->release)) return ERR_PTR(-EINVAL); - if (WARN_ON(exp_info->ops->cache_sgt_mapping && - (exp_info->ops->pin || exp_info->ops->unpin))) - return ERR_PTR(-EINVAL); - if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin)) return ERR_PTR(-EINVAL); @@ -689,7 +743,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) file->f_path.dentry->d_fsdata = dmabuf; dmabuf->file = file; - __dma_buf_debugfs_list_add(dmabuf); + __dma_buf_list_add(dmabuf); return dmabuf; @@ -703,7 +757,7 @@ err_module: module_put(exp_info->owner); return ERR_PTR(ret); } -EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF); +EXPORT_SYMBOL_NS_GPL(dma_buf_export, "DMA_BUF"); /** * dma_buf_fd - returns a file descriptor for the given struct dma_buf @@ -727,7 +781,7 @@ int dma_buf_fd(struct dma_buf *dmabuf, int flags) return fd; } -EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF); +EXPORT_SYMBOL_NS_GPL(dma_buf_fd, "DMA_BUF"); /** * dma_buf_get - returns the struct dma_buf related to an fd @@ -753,7 +807,7 @@ struct dma_buf *dma_buf_get(int fd) return file->private_data; } -EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF); +EXPORT_SYMBOL_NS_GPL(dma_buf_get, "DMA_BUF"); /** * dma_buf_put - decreases refcount of the buffer @@ -772,7 +826,7 @@ void dma_buf_put(struct dma_buf *dmabuf) fput(dmabuf->file); } -EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF); +EXPORT_SYMBOL_NS_GPL(dma_buf_put, "DMA_BUF"); static void mangle_sg_table(struct sg_table *sg_table) { @@ -782,7 +836,7 @@ static void mangle_sg_table(struct sg_table *sg_table) /* To catch abuse of the underlying struct page by importers mix * up the bits, but take care to preserve the low SG_ bits to - * not corrupt the sgt. The mixing is undone in __unmap_dma_buf + * not corrupt the sgt. The mixing is undone on unmap * before passing the sgt back to the exporter. */ for_each_sgtable_sg(sg_table, sg, i) @@ -790,29 +844,19 @@ static void mangle_sg_table(struct sg_table *sg_table) #endif } -static struct sg_table *__map_dma_buf(struct dma_buf_attachment *attach, - enum dma_data_direction direction) -{ - struct sg_table *sg_table; - signed long ret; - sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); - if (IS_ERR_OR_NULL(sg_table)) - return sg_table; - - if (!dma_buf_attachment_is_dynamic(attach)) { - ret = dma_resv_wait_timeout(attach->dmabuf->resv, - DMA_RESV_USAGE_KERNEL, true, - MAX_SCHEDULE_TIMEOUT); - if (ret < 0) { - attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, - direction); - return ERR_PTR(ret); - } - } +static inline bool +dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach) +{ + return !!attach->importer_ops; +} - mangle_sg_table(sg_table); - return sg_table; +static bool +dma_buf_pin_on_map(struct dma_buf_attachment *attach) +{ + return attach->dmabuf->ops->pin && + (!dma_buf_attachment_is_dynamic(attach) || + !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)); } /** @@ -935,50 +979,13 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, list_add(&attach->node, &dmabuf->attachments); dma_resv_unlock(dmabuf->resv); - /* When either the importer or the exporter can't handle dynamic - * mappings we cache the mapping here to avoid issues with the - * reservation object lock. - */ - if (dma_buf_attachment_is_dynamic(attach) != - dma_buf_is_dynamic(dmabuf)) { - struct sg_table *sgt; - - dma_resv_lock(attach->dmabuf->resv, NULL); - if (dma_buf_is_dynamic(attach->dmabuf)) { - ret = dmabuf->ops->pin(attach); - if (ret) - goto err_unlock; - } - - sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL); - if (!sgt) - sgt = ERR_PTR(-ENOMEM); - if (IS_ERR(sgt)) { - ret = PTR_ERR(sgt); - goto err_unpin; - } - dma_resv_unlock(attach->dmabuf->resv); - attach->sgt = sgt; - attach->dir = DMA_BIDIRECTIONAL; - } - return attach; err_attach: kfree(attach); return ERR_PTR(ret); - -err_unpin: - if (dma_buf_is_dynamic(attach->dmabuf)) - dmabuf->ops->unpin(attach); - -err_unlock: - dma_resv_unlock(attach->dmabuf->resv); - - dma_buf_detach(dmabuf, attach); - return ERR_PTR(ret); } -EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF); +EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, "DMA_BUF"); /** * dma_buf_attach - Wrapper for dma_buf_dynamic_attach @@ -993,17 +1000,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, { return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL); } -EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF); - -static void __unmap_dma_buf(struct dma_buf_attachment *attach, - struct sg_table *sg_table, - enum dma_data_direction direction) -{ - /* uses XOR, hence this unmangles */ - mangle_sg_table(sg_table); - - attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); -} +EXPORT_SYMBOL_NS_GPL(dma_buf_attach, "DMA_BUF"); /** * dma_buf_detach - Remove the given attachment from dmabuf's attachments list @@ -1020,16 +1017,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) return; dma_resv_lock(dmabuf->resv, NULL); - - if (attach->sgt) { - - __unmap_dma_buf(attach, attach->sgt, attach->dir); - - if (dma_buf_is_dynamic(attach->dmabuf)) - dmabuf->ops->unpin(attach); - } list_del(&attach->node); - dma_resv_unlock(dmabuf->resv); if (dmabuf->ops->detach) @@ -1037,7 +1025,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) kfree(attach); } -EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF); +EXPORT_SYMBOL_NS_GPL(dma_buf_detach, "DMA_BUF"); /** * dma_buf_pin - Lock down the DMA-buf @@ -1058,7 +1046,7 @@ int dma_buf_pin(struct dma_buf_attachment *attach) struct dma_buf *dmabuf = attach->dmabuf; int ret = 0; - WARN_ON(!dma_buf_attachment_is_dynamic(attach)); + WARN_ON(!attach->importer_ops); dma_resv_assert_held(dmabuf->resv); @@ -1067,7 +1055,7 @@ int dma_buf_pin(struct dma_buf_attachment *attach) return ret; } -EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF); +EXPORT_SYMBOL_NS_GPL(dma_buf_pin, "DMA_BUF"); /** * dma_buf_unpin - Unpin a DMA-buf @@ -1081,14 +1069,14 @@ void dma_buf_unpin(struct dma_buf_attachment *attach) { struct dma_buf *dmabuf = attach->dmabuf; - WARN_ON(!dma_buf_attachment_is_dynamic(attach)); + WARN_ON(!attach->importer_ops); dma_resv_assert_held(dmabuf->resv); if (dmabuf->ops->unpin) dmabuf->ops->unpin(attach); } -EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF); +EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, "DMA_BUF"); /** * dma_buf_map_attachment - Returns the scatterlist table of the attachment; @@ -1115,7 +1103,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, enum dma_data_direction direction) { struct sg_table *sg_table; - int r; + signed long ret; might_sleep(); @@ -1124,41 +1112,37 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, dma_resv_assert_held(attach->dmabuf->resv); - if (attach->sgt) { + if (dma_buf_pin_on_map(attach)) { + ret = attach->dmabuf->ops->pin(attach); /* - * Two mappings with different directions for the same - * attachment are not allowed. + * Catch exporters making buffers inaccessible even when + * attachments preventing that exist. */ - if (attach->dir != direction && - attach->dir != DMA_BIDIRECTIONAL) - return ERR_PTR(-EBUSY); - - return attach->sgt; - } - - if (dma_buf_is_dynamic(attach->dmabuf)) { - if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) { - r = attach->dmabuf->ops->pin(attach); - if (r) - return ERR_PTR(r); - } + WARN_ON_ONCE(ret == -EBUSY); + if (ret) + return ERR_PTR(ret); } - sg_table = __map_dma_buf(attach, direction); + sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); if (!sg_table) sg_table = ERR_PTR(-ENOMEM); + if (IS_ERR(sg_table)) + goto error_unpin; - if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) && - !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) - attach->dmabuf->ops->unpin(attach); - - if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) { - attach->sgt = sg_table; - attach->dir = direction; + /* + * Importers with static attachments don't wait for fences. + */ + if (!dma_buf_attachment_is_dynamic(attach)) { + ret = dma_resv_wait_timeout(attach->dmabuf->resv, + DMA_RESV_USAGE_KERNEL, true, + MAX_SCHEDULE_TIMEOUT); + if (ret < 0) + goto error_unmap; } + mangle_sg_table(sg_table); #ifdef CONFIG_DMA_API_DEBUG - if (!IS_ERR(sg_table)) { + { struct scatterlist *sg; u64 addr; int len; @@ -1175,8 +1159,18 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, } #endif /* CONFIG_DMA_API_DEBUG */ return sg_table; + +error_unmap: + attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); + sg_table = ERR_PTR(ret); + +error_unpin: + if (dma_buf_pin_on_map(attach)) + attach->dmabuf->ops->unpin(attach); + + return sg_table; } -EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF); +EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, "DMA_BUF"); /** * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment; @@ -1204,7 +1198,7 @@ dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach, return sg_table; } -EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, DMA_BUF); +EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, "DMA_BUF"); /** * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might @@ -1227,16 +1221,13 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, dma_resv_assert_held(attach->dmabuf->resv); - if (attach->sgt == sg_table) - return; - - __unmap_dma_buf(attach, sg_table, direction); + mangle_sg_table(sg_table); + attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); - if (dma_buf_is_dynamic(attach->dmabuf) && - !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) - dma_buf_unpin(attach); + if (dma_buf_pin_on_map(attach)) + attach->dmabuf->ops->unpin(attach); } -EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF); +EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, "DMA_BUF"); /** * dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might @@ -1261,7 +1252,7 @@ void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach, dma_buf_unmap_attachment(attach, sg_table, direction); dma_resv_unlock(attach->dmabuf->resv); } -EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, DMA_BUF); +EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, "DMA_BUF"); /** * dma_buf_move_notify - notify attachments that DMA-buf is moving @@ -1281,7 +1272,7 @@ void dma_buf_move_notify(struct dma_buf *dmabuf) if (attach->importer_ops) attach->importer_ops->move_notify(attach); } -EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF); +EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, "DMA_BUF"); /** * DOC: cpu access @@ -1429,7 +1420,7 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, return ret; } -EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF); +EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, "DMA_BUF"); /** * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the @@ -1457,7 +1448,7 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf, return ret; } -EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF); +EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, "DMA_BUF"); /** @@ -1499,7 +1490,7 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, return dmabuf->ops->mmap(dmabuf, vma); } -EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF); +EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, "DMA_BUF"); /** * dma_buf_vmap - Create virtual mapping for the buffer object into kernel @@ -1552,7 +1543,7 @@ int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map) return 0; } -EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF); +EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, "DMA_BUF"); /** * dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel @@ -1579,7 +1570,7 @@ int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map) return ret; } -EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, DMA_BUF); +EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, "DMA_BUF"); /** * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap. @@ -1603,7 +1594,7 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) iosys_map_clear(&dmabuf->vmap_ptr); } } -EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF); +EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, "DMA_BUF"); /** * dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap. @@ -1619,7 +1610,7 @@ void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map) dma_buf_vunmap(dmabuf, map); dma_resv_unlock(dmabuf->resv); } -EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, DMA_BUF); +EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, "DMA_BUF"); #ifdef CONFIG_DEBUG_FS static int dma_buf_debug_show(struct seq_file *s, void *unused) @@ -1630,7 +1621,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) size_t size = 0; int ret; - ret = mutex_lock_interruptible(&debugfs_list_mutex); + ret = mutex_lock_interruptible(&dmabuf_list_mutex); if (ret) return ret; @@ -1639,7 +1630,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n", "size", "flags", "mode", "count", "ino"); - list_for_each_entry(buf_obj, &debugfs_list, list_node) { + list_for_each_entry(buf_obj, &dmabuf_list, list_node) { ret = dma_resv_lock_interruptible(buf_obj->resv, NULL); if (ret) @@ -1676,11 +1667,11 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size); - mutex_unlock(&debugfs_list_mutex); + mutex_unlock(&dmabuf_list_mutex); return 0; error_unlock: - mutex_unlock(&debugfs_list_mutex); + mutex_unlock(&dmabuf_list_mutex); return ret; } diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c index 8a08ffde31e7..6657d4b30af9 100644 --- a/drivers/dma-buf/dma-fence-array.c +++ b/drivers/dma-buf/dma-fence-array.c @@ -103,10 +103,36 @@ static bool dma_fence_array_enable_signaling(struct dma_fence *fence) static bool dma_fence_array_signaled(struct dma_fence *fence) { struct dma_fence_array *array = to_dma_fence_array(fence); + int num_pending; + unsigned int i; - if (atomic_read(&array->num_pending) > 0) + /* + * We need to read num_pending before checking the enable_signal bit + * to avoid racing with the enable_signaling() implementation, which + * might decrement the counter, and cause a partial check. + * atomic_read_acquire() pairs with atomic_dec_and_test() in + * dma_fence_array_enable_signaling() + * + * The !--num_pending check is here to account for the any_signaled case + * if we race with enable_signaling(), that means the !num_pending check + * in the is_signalling_enabled branch might be outdated (num_pending + * might have been decremented), but that's fine. The user will get the + * right value when testing again later. + */ + num_pending = atomic_read_acquire(&array->num_pending); + if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &array->base.flags)) { + if (num_pending <= 0) + goto signal; return false; + } + + for (i = 0; i < array->num_fences; ++i) { + if (dma_fence_is_signaled(array->fences[i]) && !--num_pending) + goto signal; + } + return false; +signal: dma_fence_array_clear_pending_error(array); return true; } diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c index 9663ba1bb6ac..a8a90acf4f34 100644 --- a/drivers/dma-buf/dma-fence-chain.c +++ b/drivers/dma-buf/dma-fence-chain.c @@ -218,7 +218,6 @@ static void dma_fence_chain_set_deadline(struct dma_fence *fence, } const struct dma_fence_ops dma_fence_chain_ops = { - .use_64bit_seqno = true, .get_driver_name = dma_fence_chain_get_driver_name, .get_timeline_name = dma_fence_chain_get_timeline_name, .enable_signaling = dma_fence_chain_enable_signaling, @@ -252,7 +251,7 @@ void dma_fence_chain_init(struct dma_fence_chain *chain, chain->prev_seqno = 0; /* Try to reuse the context of the previous chain node. */ - if (prev_chain && __dma_fence_is_later(seqno, prev->seqno, prev->ops)) { + if (prev_chain && __dma_fence_is_later(prev, seqno, prev->seqno)) { context = prev->context; chain->prev_seqno = prev->seqno; } else { @@ -262,8 +261,8 @@ void dma_fence_chain_init(struct dma_fence_chain *chain, seqno = max(prev->seqno, seqno); } - dma_fence_init(&chain->base, &dma_fence_chain_ops, - &chain->lock, context, seqno); + dma_fence_init64(&chain->base, &dma_fence_chain_ops, &chain->lock, + context, seqno); /* * Chaining dma_fence_chain container together is only allowed through diff --git a/drivers/dma-buf/dma-fence-unwrap.c b/drivers/dma-buf/dma-fence-unwrap.c index 628af51c81af..a495d8a6c2e3 100644 --- a/drivers/dma-buf/dma-fence-unwrap.c +++ b/drivers/dma-buf/dma-fence-unwrap.c @@ -12,6 +12,7 @@ #include <linux/dma-fence-chain.h> #include <linux/dma-fence-unwrap.h> #include <linux/slab.h> +#include <linux/sort.h> /* Internal helper to start new array iteration, don't use directly */ static struct dma_fence * @@ -59,22 +60,77 @@ struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor) } EXPORT_SYMBOL_GPL(dma_fence_unwrap_next); + +static int fence_cmp(const void *_a, const void *_b) +{ + struct dma_fence *a = *(struct dma_fence **)_a; + struct dma_fence *b = *(struct dma_fence **)_b; + + if (a->context < b->context) + return -1; + else if (a->context > b->context) + return 1; + + if (dma_fence_is_later(b, a)) + return 1; + else if (dma_fence_is_later(a, b)) + return -1; + + return 0; +} + +/** + * dma_fence_dedup_array - Sort and deduplicate an array of dma_fence pointers + * @fences: Array of dma_fence pointers to be deduplicated + * @num_fences: Number of entries in the @fences array + * + * Sorts the input array by context, then removes duplicate + * fences with the same context, keeping only the most recent one. + * + * The array is modified in-place and unreferenced duplicate fences are released + * via dma_fence_put(). The function returns the new number of fences after + * deduplication. + * + * Return: Number of unique fences remaining in the array. + */ +int dma_fence_dedup_array(struct dma_fence **fences, int num_fences) +{ + int i, j; + + sort(fences, num_fences, sizeof(*fences), fence_cmp, NULL); + + /* + * Only keep the most recent fence for each context. + */ + j = 0; + for (i = 1; i < num_fences; i++) { + if (fences[i]->context == fences[j]->context) + dma_fence_put(fences[i]); + else + fences[++j] = fences[i]; + } + + return ++j; +} +EXPORT_SYMBOL_GPL(dma_fence_dedup_array); + /* Implementation for the dma_fence_merge() marco, don't use directly */ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences, struct dma_fence **fences, struct dma_fence_unwrap *iter) { + struct dma_fence *tmp, *unsignaled = NULL, **array; struct dma_fence_array *result; - struct dma_fence *tmp, **array; ktime_t timestamp; - unsigned int i; - size_t count; + int i, count; count = 0; timestamp = ns_to_ktime(0); for (i = 0; i < num_fences; ++i) { dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) { if (!dma_fence_is_signaled(tmp)) { + dma_fence_put(unsignaled); + unsignaled = dma_fence_get(tmp); ++count; } else { ktime_t t = dma_fence_timestamp(tmp); @@ -88,86 +144,58 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences, /* * If we couldn't find a pending fence just return a private signaled * fence with the timestamp of the last signaled one. + * + * Or if there was a single unsignaled fence left we can return it + * directly and early since that is a major path on many workloads. */ if (count == 0) return dma_fence_allocate_private_stub(timestamp); + else if (count == 1) + return unsignaled; + + dma_fence_put(unsignaled); array = kmalloc_array(count, sizeof(*array), GFP_KERNEL); if (!array) return NULL; - /* - * This trashes the input fence array and uses it as position for the - * following merge loop. This works because the dma_fence_merge() - * wrapper macro is creating this temporary array on the stack together - * with the iterators. - */ - for (i = 0; i < num_fences; ++i) - fences[i] = dma_fence_unwrap_first(fences[i], &iter[i]); - count = 0; - do { - unsigned int sel; - -restart: - tmp = NULL; - for (i = 0; i < num_fences; ++i) { - struct dma_fence *next; - - while (fences[i] && dma_fence_is_signaled(fences[i])) - fences[i] = dma_fence_unwrap_next(&iter[i]); - - next = fences[i]; - if (!next) - continue; - - /* - * We can't guarantee that inpute fences are ordered by - * context, but it is still quite likely when this - * function is used multiple times. So attempt to order - * the fences by context as we pass over them and merge - * fences with the same context. - */ - if (!tmp || tmp->context > next->context) { - tmp = next; - sel = i; - - } else if (tmp->context < next->context) { - continue; - - } else if (dma_fence_is_later(tmp, next)) { - fences[i] = dma_fence_unwrap_next(&iter[i]); - goto restart; + for (i = 0; i < num_fences; ++i) { + dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) { + if (!dma_fence_is_signaled(tmp)) { + array[count++] = dma_fence_get(tmp); } else { - fences[sel] = dma_fence_unwrap_next(&iter[sel]); - goto restart; + ktime_t t = dma_fence_timestamp(tmp); + + if (ktime_after(t, timestamp)) + timestamp = t; } } + } - if (tmp) { - array[count++] = dma_fence_get(tmp); - fences[sel] = dma_fence_unwrap_next(&iter[sel]); - } - } while (tmp); + if (count == 0 || count == 1) + goto return_fastpath; - if (count == 0) { - tmp = dma_fence_allocate_private_stub(ktime_get()); - goto return_tmp; - } + count = dma_fence_dedup_array(array, count); - if (count == 1) { - tmp = array[0]; - goto return_tmp; + if (count > 1) { + result = dma_fence_array_create(count, array, + dma_fence_context_alloc(1), + 1, false); + if (!result) { + for (i = 0; i < count; i++) + dma_fence_put(array[i]); + tmp = NULL; + goto return_tmp; + } + return &result->base; } - result = dma_fence_array_create(count, array, - dma_fence_context_alloc(1), - 1, false); - if (!result) { - tmp = NULL; - goto return_tmp; - } - return &result->base; +return_fastpath: + if (count == 0) + tmp = dma_fence_allocate_private_stub(timestamp); + else + tmp = array[0]; return_tmp: kfree(array); diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index f0cdd3e99d36..3f78c56b58dc 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -511,12 +511,20 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout) dma_fence_enable_sw_signaling(fence); - trace_dma_fence_wait_start(fence); + if (trace_dma_fence_wait_start_enabled()) { + rcu_read_lock(); + trace_dma_fence_wait_start(fence); + rcu_read_unlock(); + } if (fence->ops->wait) ret = fence->ops->wait(fence, intr, timeout); else ret = dma_fence_default_wait(fence, intr, timeout); - trace_dma_fence_wait_end(fence); + if (trace_dma_fence_wait_end_enabled()) { + rcu_read_lock(); + trace_dma_fence_wait_end(fence); + rcu_read_unlock(); + } return ret; } EXPORT_SYMBOL(dma_fence_wait_timeout); @@ -533,16 +541,23 @@ void dma_fence_release(struct kref *kref) struct dma_fence *fence = container_of(kref, struct dma_fence, refcount); + rcu_read_lock(); trace_dma_fence_destroy(fence); - if (WARN(!list_empty(&fence->cb_list) && - !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags), - "Fence %s:%s:%llx:%llx released with pending signals!\n", - fence->ops->get_driver_name(fence), - fence->ops->get_timeline_name(fence), - fence->context, fence->seqno)) { + if (!list_empty(&fence->cb_list) && + !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { + const char __rcu *timeline; + const char __rcu *driver; unsigned long flags; + driver = dma_fence_driver_name(fence); + timeline = dma_fence_timeline_name(fence); + + WARN(1, + "Fence %s:%s:%llx:%llx released with pending signals!\n", + rcu_dereference(driver), rcu_dereference(timeline), + fence->context, fence->seqno); + /* * Failed to signal before release, likely a refcounting issue. * @@ -556,6 +571,8 @@ void dma_fence_release(struct kref *kref) spin_unlock_irqrestore(fence->lock, flags); } + rcu_read_unlock(); + if (fence->ops->release) fence->ops->release(fence); else @@ -982,13 +999,43 @@ EXPORT_SYMBOL(dma_fence_set_deadline); */ void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq) { + const char __rcu *timeline; + const char __rcu *driver; + + rcu_read_lock(); + + timeline = dma_fence_timeline_name(fence); + driver = dma_fence_driver_name(fence); + seq_printf(seq, "%s %s seq %llu %ssignalled\n", - fence->ops->get_driver_name(fence), - fence->ops->get_timeline_name(fence), fence->seqno, + rcu_dereference(driver), + rcu_dereference(timeline), + fence->seqno, dma_fence_is_signaled(fence) ? "" : "un"); + + rcu_read_unlock(); } EXPORT_SYMBOL(dma_fence_describe); +static void +__dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, + spinlock_t *lock, u64 context, u64 seqno, unsigned long flags) +{ + BUG_ON(!lock); + BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name); + + kref_init(&fence->refcount); + fence->ops = ops; + INIT_LIST_HEAD(&fence->cb_list); + fence->lock = lock; + fence->context = context; + fence->seqno = seqno; + fence->flags = flags; + fence->error = 0; + + trace_dma_fence_init(fence); +} + /** * dma_fence_init - Initialize a custom fence. * @fence: the fence to initialize @@ -1008,18 +1055,94 @@ void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, spinlock_t *lock, u64 context, u64 seqno) { - BUG_ON(!lock); - BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name); + __dma_fence_init(fence, ops, lock, context, seqno, 0UL); +} +EXPORT_SYMBOL(dma_fence_init); - kref_init(&fence->refcount); - fence->ops = ops; - INIT_LIST_HEAD(&fence->cb_list); - fence->lock = lock; - fence->context = context; - fence->seqno = seqno; - fence->flags = 0UL; - fence->error = 0; +/** + * dma_fence_init64 - Initialize a custom fence with 64-bit seqno support. + * @fence: the fence to initialize + * @ops: the dma_fence_ops for operations on this fence + * @lock: the irqsafe spinlock to use for locking this fence + * @context: the execution context this fence is run on + * @seqno: a linear increasing sequence number for this context + * + * Initializes an allocated fence, the caller doesn't have to keep its + * refcount after committing with this fence, but it will need to hold a + * refcount again if &dma_fence_ops.enable_signaling gets called. + * + * Context and seqno are used for easy comparison between fences, allowing + * to check which fence is later by simply using dma_fence_later(). + */ +void +dma_fence_init64(struct dma_fence *fence, const struct dma_fence_ops *ops, + spinlock_t *lock, u64 context, u64 seqno) +{ + __dma_fence_init(fence, ops, lock, context, seqno, + BIT(DMA_FENCE_FLAG_SEQNO64_BIT)); +} +EXPORT_SYMBOL(dma_fence_init64); - trace_dma_fence_init(fence); +/** + * dma_fence_driver_name - Access the driver name + * @fence: the fence to query + * + * Returns a driver name backing the dma-fence implementation. + * + * IMPORTANT CONSIDERATION: + * Dma-fence contract stipulates that access to driver provided data (data not + * directly embedded into the object itself), such as the &dma_fence.lock and + * memory potentially accessed by the &dma_fence.ops functions, is forbidden + * after the fence has been signalled. Drivers are allowed to free that data, + * and some do. + * + * To allow safe access drivers are mandated to guarantee a RCU grace period + * between signalling the fence and freeing said data. + * + * As such access to the driver name is only valid inside a RCU locked section. + * The pointer MUST be both queried and USED ONLY WITHIN a SINGLE block guarded + * by the &rcu_read_lock and &rcu_read_unlock pair. + */ +const char __rcu *dma_fence_driver_name(struct dma_fence *fence) +{ + RCU_LOCKDEP_WARN(!rcu_read_lock_held(), + "RCU protection is required for safe access to returned string"); + + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return fence->ops->get_driver_name(fence); + else + return "detached-driver"; } -EXPORT_SYMBOL(dma_fence_init); +EXPORT_SYMBOL(dma_fence_driver_name); + +/** + * dma_fence_timeline_name - Access the timeline name + * @fence: the fence to query + * + * Returns a timeline name provided by the dma-fence implementation. + * + * IMPORTANT CONSIDERATION: + * Dma-fence contract stipulates that access to driver provided data (data not + * directly embedded into the object itself), such as the &dma_fence.lock and + * memory potentially accessed by the &dma_fence.ops functions, is forbidden + * after the fence has been signalled. Drivers are allowed to free that data, + * and some do. + * + * To allow safe access drivers are mandated to guarantee a RCU grace period + * between signalling the fence and freeing said data. + * + * As such access to the driver name is only valid inside a RCU locked section. + * The pointer MUST be both queried and USED ONLY WITHIN a SINGLE block guarded + * by the &rcu_read_lock and &rcu_read_unlock pair. + */ +const char __rcu *dma_fence_timeline_name(struct dma_fence *fence) +{ + RCU_LOCKDEP_WARN(!rcu_read_lock_held(), + "RCU protection is required for safe access to returned string"); + + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return fence->ops->get_driver_name(fence); + else + return "signaled-timeline"; +} +EXPORT_SYMBOL(dma_fence_timeline_name); diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 5f8d010516f0..bea3e9858aca 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -320,8 +320,9 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence, count++; dma_resv_list_set(fobj, i, fence, usage); - /* pointer update must be visible before we extend the num_fences */ - smp_store_mb(fobj->num_fences, count); + /* fence update must be visible before we extend the num_fences */ + smp_wmb(); + fobj->num_fences = count; } EXPORT_SYMBOL(dma_resv_add_fence); @@ -684,11 +685,13 @@ long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage, dma_resv_iter_begin(&cursor, obj, usage); dma_resv_for_each_fence_unlocked(&cursor, fence) { - ret = dma_fence_wait_timeout(fence, intr, ret); - if (ret <= 0) { - dma_resv_iter_end(&cursor); - return ret; - } + ret = dma_fence_wait_timeout(fence, intr, timeout); + if (ret <= 0) + break; + + /* Even for zero timeout the return value is 1 */ + if (timeout) + timeout = ret; } dma_resv_iter_end(&cursor); diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig index a5eef06c4226..bb369b38b001 100644 --- a/drivers/dma-buf/heaps/Kconfig +++ b/drivers/dma-buf/heaps/Kconfig @@ -12,3 +12,13 @@ config DMABUF_HEAPS_CMA Choose this option to enable dma-buf CMA heap. This heap is backed by the Contiguous Memory Allocator (CMA). If your system has these regions, you should say Y here. + +config DMABUF_HEAPS_CMA_LEGACY + bool "Legacy DMA-BUF CMA Heap" + default y + depends on DMABUF_HEAPS_CMA + help + Add a duplicate CMA-backed dma-buf heap with legacy naming derived + from the CMA area's devicetree node, or "reserved" if the area is not + defined in the devicetree. This uses the same underlying allocator as + CONFIG_DMABUF_HEAPS_CMA. diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c index 9512d050563a..0df007111975 100644 --- a/drivers/dma-buf/heaps/cma_heap.c +++ b/drivers/dma-buf/heaps/cma_heap.c @@ -9,6 +9,9 @@ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ * Andrew F. Davis <afd@ti.com> */ + +#define pr_fmt(fmt) "cma_heap: " fmt + #include <linux/cma.h> #include <linux/dma-buf.h> #include <linux/dma-heap.h> @@ -22,6 +25,7 @@ #include <linux/slab.h> #include <linux/vmalloc.h> +#define DEFAULT_CMA_NAME "default_cma_region" struct cma_heap { struct dma_heap *heap; @@ -366,17 +370,17 @@ static const struct dma_heap_ops cma_heap_ops = { .allocate = cma_heap_allocate, }; -static int __init __add_cma_heap(struct cma *cma, void *data) +static int __init __add_cma_heap(struct cma *cma, const char *name) { - struct cma_heap *cma_heap; struct dma_heap_export_info exp_info; + struct cma_heap *cma_heap; cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL); if (!cma_heap) return -ENOMEM; cma_heap->cma = cma; - exp_info.name = cma_get_name(cma); + exp_info.name = name; exp_info.ops = &cma_heap_ops; exp_info.priv = cma_heap; @@ -394,12 +398,30 @@ static int __init __add_cma_heap(struct cma *cma, void *data) static int __init add_default_cma_heap(void) { struct cma *default_cma = dev_get_cma_area(NULL); - int ret = 0; + const char *legacy_cma_name; + int ret; - if (default_cma) - ret = __add_cma_heap(default_cma, NULL); + if (!default_cma) + return 0; - return ret; + ret = __add_cma_heap(default_cma, DEFAULT_CMA_NAME); + if (ret) + return ret; + + if (IS_ENABLED(CONFIG_DMABUF_HEAPS_CMA_LEGACY)) { + legacy_cma_name = cma_get_name(default_cma); + if (!strcmp(legacy_cma_name, DEFAULT_CMA_NAME)) { + pr_warn("legacy name and default name are the same, skipping legacy heap\n"); + return 0; + } + + ret = __add_cma_heap(default_cma, legacy_cma_name); + if (ret) + pr_warn("failed to add legacy heap: %pe\n", + ERR_PTR(ret)); + } + + return 0; } module_init(add_default_cma_heap); MODULE_DESCRIPTION("DMA-BUF CMA Heap"); diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c index 26d5dc89ea16..bbe7881f1360 100644 --- a/drivers/dma-buf/heaps/system_heap.c +++ b/drivers/dma-buf/heaps/system_heap.c @@ -21,8 +21,6 @@ #include <linux/slab.h> #include <linux/vmalloc.h> -static struct dma_heap *sys_heap; - struct system_heap_buffer { struct dma_heap *heap; struct list_head attachments; @@ -35,7 +33,7 @@ struct system_heap_buffer { struct dma_heap_attachment { struct device *dev; - struct sg_table *table; + struct sg_table table; struct list_head list; bool mapped; }; @@ -54,29 +52,22 @@ static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP}; static const unsigned int orders[] = {8, 4, 0}; #define NUM_ORDERS ARRAY_SIZE(orders) -static struct sg_table *dup_sg_table(struct sg_table *table) +static int dup_sg_table(struct sg_table *from, struct sg_table *to) { - struct sg_table *new_table; - int ret, i; struct scatterlist *sg, *new_sg; + int ret, i; - new_table = kzalloc(sizeof(*new_table), GFP_KERNEL); - if (!new_table) - return ERR_PTR(-ENOMEM); - - ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL); - if (ret) { - kfree(new_table); - return ERR_PTR(-ENOMEM); - } + ret = sg_alloc_table(to, from->orig_nents, GFP_KERNEL); + if (ret) + return ret; - new_sg = new_table->sgl; - for_each_sgtable_sg(table, sg, i) { + new_sg = to->sgl; + for_each_sgtable_sg(from, sg, i) { sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset); new_sg = sg_next(new_sg); } - return new_table; + return 0; } static int system_heap_attach(struct dma_buf *dmabuf, @@ -84,19 +75,18 @@ static int system_heap_attach(struct dma_buf *dmabuf, { struct system_heap_buffer *buffer = dmabuf->priv; struct dma_heap_attachment *a; - struct sg_table *table; + int ret; a = kzalloc(sizeof(*a), GFP_KERNEL); if (!a) return -ENOMEM; - table = dup_sg_table(&buffer->sg_table); - if (IS_ERR(table)) { + ret = dup_sg_table(&buffer->sg_table, &a->table); + if (ret) { kfree(a); - return -ENOMEM; + return ret; } - a->table = table; a->dev = attachment->dev; INIT_LIST_HEAD(&a->list); a->mapped = false; @@ -120,8 +110,7 @@ static void system_heap_detach(struct dma_buf *dmabuf, list_del(&a->list); mutex_unlock(&buffer->lock); - sg_free_table(a->table); - kfree(a->table); + sg_free_table(&a->table); kfree(a); } @@ -129,7 +118,7 @@ static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attac enum dma_data_direction direction) { struct dma_heap_attachment *a = attachment->priv; - struct sg_table *table = a->table; + struct sg_table *table = &a->table; int ret; ret = dma_map_sgtable(attachment->dev, table, direction, 0); @@ -164,7 +153,7 @@ static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, list_for_each_entry(a, &buffer->attachments, list) { if (!a->mapped) continue; - dma_sync_sgtable_for_cpu(a->dev, a->table, direction); + dma_sync_sgtable_for_cpu(a->dev, &a->table, direction); } mutex_unlock(&buffer->lock); @@ -185,7 +174,7 @@ static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, list_for_each_entry(a, &buffer->attachments, list) { if (!a->mapped) continue; - dma_sync_sgtable_for_device(a->dev, a->table, direction); + dma_sync_sgtable_for_device(a->dev, &a->table, direction); } mutex_unlock(&buffer->lock); @@ -424,6 +413,7 @@ static const struct dma_heap_ops system_heap_ops = { static int __init system_heap_create(void) { struct dma_heap_export_info exp_info; + struct dma_heap *sys_heap; exp_info.name = "system"; exp_info.ops = &system_heap_ops; diff --git a/drivers/dma-buf/st-dma-fence-unwrap.c b/drivers/dma-buf/st-dma-fence-unwrap.c index f0cee984b6c7..a3be888ae2e8 100644 --- a/drivers/dma-buf/st-dma-fence-unwrap.c +++ b/drivers/dma-buf/st-dma-fence-unwrap.c @@ -28,7 +28,7 @@ static const struct dma_fence_ops mock_ops = { .get_timeline_name = mock_name, }; -static struct dma_fence *mock_fence(void) +static struct dma_fence *__mock_fence(u64 context, u64 seqno) { struct mock_fence *f; @@ -37,12 +37,16 @@ static struct dma_fence *mock_fence(void) return NULL; spin_lock_init(&f->lock); - dma_fence_init(&f->base, &mock_ops, &f->lock, - dma_fence_context_alloc(1), 1); + dma_fence_init(&f->base, &mock_ops, &f->lock, context, seqno); return &f->base; } +static struct dma_fence *mock_fence(void) +{ + return __mock_fence(dma_fence_context_alloc(1), 1); +} + static struct dma_fence *mock_array(unsigned int num_fences, ...) { struct dma_fence_array *array; @@ -304,6 +308,177 @@ error_put_f1: return err; } +static int unwrap_merge_duplicate(void *arg) +{ + struct dma_fence *fence, *f1, *f2; + struct dma_fence_unwrap iter; + int err = 0; + + f1 = mock_fence(); + if (!f1) + return -ENOMEM; + + dma_fence_enable_sw_signaling(f1); + + f2 = dma_fence_unwrap_merge(f1, f1); + if (!f2) { + err = -ENOMEM; + goto error_put_f1; + } + + dma_fence_unwrap_for_each(fence, &iter, f2) { + if (fence == f1) { + dma_fence_put(f1); + f1 = NULL; + } else { + pr_err("Unexpected fence!\n"); + err = -EINVAL; + } + } + + if (f1) { + pr_err("Not all fences seen!\n"); + err = -EINVAL; + } + + dma_fence_put(f2); +error_put_f1: + dma_fence_put(f1); + return err; +} + +static int unwrap_merge_seqno(void *arg) +{ + struct dma_fence *fence, *f1, *f2, *f3, *f4; + struct dma_fence_unwrap iter; + int err = 0; + u64 ctx[2]; + + ctx[0] = dma_fence_context_alloc(1); + ctx[1] = dma_fence_context_alloc(1); + + f1 = __mock_fence(ctx[1], 1); + if (!f1) + return -ENOMEM; + + dma_fence_enable_sw_signaling(f1); + + f2 = __mock_fence(ctx[1], 2); + if (!f2) { + err = -ENOMEM; + goto error_put_f1; + } + + dma_fence_enable_sw_signaling(f2); + + f3 = __mock_fence(ctx[0], 1); + if (!f3) { + err = -ENOMEM; + goto error_put_f2; + } + + dma_fence_enable_sw_signaling(f3); + + f4 = dma_fence_unwrap_merge(f1, f2, f3); + if (!f4) { + err = -ENOMEM; + goto error_put_f3; + } + + dma_fence_unwrap_for_each(fence, &iter, f4) { + if (fence == f3 && f2) { + dma_fence_put(f3); + f3 = NULL; + } else if (fence == f2 && !f3) { + dma_fence_put(f2); + f2 = NULL; + } else { + pr_err("Unexpected fence!\n"); + err = -EINVAL; + } + } + + if (f2 || f3) { + pr_err("Not all fences seen!\n"); + err = -EINVAL; + } + + dma_fence_put(f4); +error_put_f3: + dma_fence_put(f3); +error_put_f2: + dma_fence_put(f2); +error_put_f1: + dma_fence_put(f1); + return err; +} + +static int unwrap_merge_order(void *arg) +{ + struct dma_fence *fence, *f1, *f2, *a1, *a2, *c1, *c2; + struct dma_fence_unwrap iter; + int err = 0; + + f1 = mock_fence(); + if (!f1) + return -ENOMEM; + + dma_fence_enable_sw_signaling(f1); + + f2 = mock_fence(); + if (!f2) { + dma_fence_put(f1); + return -ENOMEM; + } + + dma_fence_enable_sw_signaling(f2); + + a1 = mock_array(2, f1, f2); + if (!a1) + return -ENOMEM; + + c1 = mock_chain(NULL, dma_fence_get(f1)); + if (!c1) + goto error_put_a1; + + c2 = mock_chain(c1, dma_fence_get(f2)); + if (!c2) + goto error_put_a1; + + /* + * The fences in the chain are the same as in a1 but in oposite order, + * the dma_fence_merge() function should be able to handle that. + */ + a2 = dma_fence_unwrap_merge(a1, c2); + + dma_fence_unwrap_for_each(fence, &iter, a2) { + if (fence == f1) { + f1 = NULL; + if (!f2) + pr_err("Unexpected order!\n"); + } else if (fence == f2) { + f2 = NULL; + if (f1) + pr_err("Unexpected order!\n"); + } else { + pr_err("Unexpected fence!\n"); + err = -EINVAL; + } + } + + if (f1 || f2) { + pr_err("Not all fences seen!\n"); + err = -EINVAL; + } + + dma_fence_put(a2); + return err; + +error_put_a1: + dma_fence_put(a1); + return -ENOMEM; +} + static int unwrap_merge_complex(void *arg) { struct dma_fence *fence, *f1, *f2, *f3, *f4, *f5; @@ -327,7 +502,7 @@ static int unwrap_merge_complex(void *arg) goto error_put_f2; /* The resulting array has the fences in reverse */ - f4 = dma_fence_unwrap_merge(f2, f1); + f4 = mock_array(2, dma_fence_get(f2), dma_fence_get(f1)); if (!f4) goto error_put_f3; @@ -367,6 +542,87 @@ error_put_f1: return err; } +static int unwrap_merge_complex_seqno(void *arg) +{ + struct dma_fence *fence, *f1, *f2, *f3, *f4, *f5, *f6, *f7; + struct dma_fence_unwrap iter; + int err = -ENOMEM; + u64 ctx[2]; + + ctx[0] = dma_fence_context_alloc(1); + ctx[1] = dma_fence_context_alloc(1); + + f1 = __mock_fence(ctx[0], 2); + if (!f1) + return -ENOMEM; + + dma_fence_enable_sw_signaling(f1); + + f2 = __mock_fence(ctx[1], 1); + if (!f2) + goto error_put_f1; + + dma_fence_enable_sw_signaling(f2); + + f3 = __mock_fence(ctx[0], 1); + if (!f3) + goto error_put_f2; + + dma_fence_enable_sw_signaling(f3); + + f4 = __mock_fence(ctx[1], 2); + if (!f4) + goto error_put_f3; + + dma_fence_enable_sw_signaling(f4); + + f5 = mock_array(2, dma_fence_get(f1), dma_fence_get(f2)); + if (!f5) + goto error_put_f4; + + f6 = mock_array(2, dma_fence_get(f3), dma_fence_get(f4)); + if (!f6) + goto error_put_f5; + + f7 = dma_fence_unwrap_merge(f5, f6); + if (!f7) + goto error_put_f6; + + err = 0; + dma_fence_unwrap_for_each(fence, &iter, f7) { + if (fence == f1 && f4) { + dma_fence_put(f1); + f1 = NULL; + } else if (fence == f4 && !f1) { + dma_fence_put(f4); + f4 = NULL; + } else { + pr_err("Unexpected fence!\n"); + err = -EINVAL; + } + } + + if (f1 || f4) { + pr_err("Not all fences seen!\n"); + err = -EINVAL; + } + + dma_fence_put(f7); +error_put_f6: + dma_fence_put(f6); +error_put_f5: + dma_fence_put(f5); +error_put_f4: + dma_fence_put(f4); +error_put_f3: + dma_fence_put(f3); +error_put_f2: + dma_fence_put(f2); +error_put_f1: + dma_fence_put(f1); + return err; +} + int dma_fence_unwrap(void) { static const struct subtest tests[] = { @@ -375,7 +631,11 @@ int dma_fence_unwrap(void) SUBTEST(unwrap_chain), SUBTEST(unwrap_chain_array), SUBTEST(unwrap_merge), + SUBTEST(unwrap_merge_duplicate), + SUBTEST(unwrap_merge_seqno), + SUBTEST(unwrap_merge_order), SUBTEST(unwrap_merge_complex), + SUBTEST(unwrap_merge_complex_seqno), }; return subtests(tests, NULL); diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c index cf2ce3744ce6..27a36045410b 100644 --- a/drivers/dma-buf/st-dma-fence.c +++ b/drivers/dma-buf/st-dma-fence.c @@ -375,7 +375,7 @@ struct wait_timer { static void wait_timer(struct timer_list *timer) { - struct wait_timer *wt = from_timer(wt, timer, timer); + struct wait_timer *wt = timer_container_of(wt, timer, timer); dma_fence_signal(wt->f); } @@ -412,8 +412,8 @@ static int test_wait_timeout(void *arg) err = 0; err_free: - del_timer_sync(&wt.timer); - destroy_timer_on_stack(&wt.timer); + timer_delete_sync(&wt.timer); + timer_destroy_on_stack(&wt.timer); dma_fence_signal(wt.f); dma_fence_put(wt.f); return err; diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index f5905d67dedb..3c20f1d31cf5 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -170,21 +170,7 @@ static bool timeline_fence_signaled(struct dma_fence *fence) { struct sync_timeline *parent = dma_fence_parent(fence); - return !__dma_fence_is_later(fence->seqno, parent->value, fence->ops); -} - -static void timeline_fence_value_str(struct dma_fence *fence, - char *str, int size) -{ - snprintf(str, size, "%lld", fence->seqno); -} - -static void timeline_fence_timeline_value_str(struct dma_fence *fence, - char *str, int size) -{ - struct sync_timeline *parent = dma_fence_parent(fence); - - snprintf(str, size, "%d", parent->value); + return !__dma_fence_is_later(fence, fence->seqno, parent->value); } static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadline) @@ -208,8 +194,6 @@ static const struct dma_fence_ops timeline_fence_ops = { .get_timeline_name = timeline_fence_get_timeline_name, .signaled = timeline_fence_signaled, .release = timeline_fence_release, - .fence_value_str = timeline_fence_value_str, - .timeline_value_str = timeline_fence_timeline_value_str, .set_deadline = timeline_fence_set_deadline, }; @@ -438,15 +422,17 @@ static int sw_sync_ioctl_get_deadline(struct sync_timeline *obj, unsigned long a return -EINVAL; pt = dma_fence_to_sync_pt(fence); - if (!pt) - return -EINVAL; + if (!pt) { + ret = -EINVAL; + goto put_fence; + } spin_lock_irqsave(fence->lock, flags); - if (test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) { - data.deadline_ns = ktime_to_ns(pt->deadline); - } else { + if (!test_bit(SW_SYNC_HAS_DEADLINE_BIT, &fence->flags)) { ret = -ENOENT; + goto unlock; } + data.deadline_ns = ktime_to_ns(pt->deadline); spin_unlock_irqrestore(fence->lock, flags); dma_fence_put(fence); @@ -458,6 +444,13 @@ static int sw_sync_ioctl_get_deadline(struct sync_timeline *obj, unsigned long a return -EFAULT; return 0; + +unlock: + spin_unlock_irqrestore(fence->lock, flags); +put_fence: + dma_fence_put(fence); + + return ret; } static long sw_sync_ioctl(struct file *file, unsigned int cmd, diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c index 237bce21d1e7..67cd69551e42 100644 --- a/drivers/dma-buf/sync_debug.c +++ b/drivers/dma-buf/sync_debug.c @@ -12,8 +12,6 @@ static struct dentry *dbgfs; static LIST_HEAD(sync_timeline_list_head); static DEFINE_SPINLOCK(sync_timeline_list_lock); -static LIST_HEAD(sync_file_list_head); -static DEFINE_SPINLOCK(sync_file_list_lock); void sync_timeline_debug_add(struct sync_timeline *obj) { @@ -33,24 +31,6 @@ void sync_timeline_debug_remove(struct sync_timeline *obj) spin_unlock_irqrestore(&sync_timeline_list_lock, flags); } -void sync_file_debug_add(struct sync_file *sync_file) -{ - unsigned long flags; - - spin_lock_irqsave(&sync_file_list_lock, flags); - list_add_tail(&sync_file->sync_file_list, &sync_file_list_head); - spin_unlock_irqrestore(&sync_file_list_lock, flags); -} - -void sync_file_debug_remove(struct sync_file *sync_file) -{ - unsigned long flags; - - spin_lock_irqsave(&sync_file_list_lock, flags); - list_del(&sync_file->sync_file_list); - spin_unlock_irqrestore(&sync_file_list_lock, flags); -} - static const char *sync_status_str(int status) { if (status < 0) @@ -82,25 +62,8 @@ static void sync_print_fence(struct seq_file *s, seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec); } - if (fence->ops->timeline_value_str && - fence->ops->fence_value_str) { - char value[64]; - bool success; - - fence->ops->fence_value_str(fence, value, sizeof(value)); - success = strlen(value); - - if (success) { - seq_printf(s, ": %s", value); - - fence->ops->timeline_value_str(fence, value, - sizeof(value)); - - if (strlen(value)) - seq_printf(s, " / %s", value); - } - } - + seq_printf(s, ": %lld", fence->seqno); + seq_printf(s, " / %d", parent->value); seq_putc(s, '\n'); } @@ -118,26 +81,6 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) spin_unlock(&obj->lock); } -static void sync_print_sync_file(struct seq_file *s, - struct sync_file *sync_file) -{ - char buf[128]; - int i; - - seq_printf(s, "[%p] %s: %s\n", sync_file, - sync_file_get_name(sync_file, buf, sizeof(buf)), - sync_status_str(dma_fence_get_status(sync_file->fence))); - - if (dma_fence_is_array(sync_file->fence)) { - struct dma_fence_array *array = to_dma_fence_array(sync_file->fence); - - for (i = 0; i < array->num_fences; ++i) - sync_print_fence(s, array->fences[i], true); - } else { - sync_print_fence(s, sync_file->fence, true); - } -} - static int sync_info_debugfs_show(struct seq_file *s, void *unused) { struct list_head *pos; @@ -157,15 +100,6 @@ static int sync_info_debugfs_show(struct seq_file *s, void *unused) seq_puts(s, "fences:\n--------------\n"); - spin_lock_irq(&sync_file_list_lock); - list_for_each(pos, &sync_file_list_head) { - struct sync_file *sync_file = - container_of(pos, struct sync_file, sync_file_list); - - sync_print_sync_file(s, sync_file); - seq_putc(s, '\n'); - } - spin_unlock_irq(&sync_file_list_lock); return 0; } diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h index a1bdd62efccd..02af347293d0 100644 --- a/drivers/dma-buf/sync_debug.h +++ b/drivers/dma-buf/sync_debug.h @@ -68,7 +68,5 @@ extern const struct file_operations sw_sync_debugfs_fops; void sync_timeline_debug_add(struct sync_timeline *obj); void sync_timeline_debug_remove(struct sync_timeline *obj); -void sync_file_debug_add(struct sync_file *fence); -void sync_file_debug_remove(struct sync_file *fence); #endif /* _LINUX_SYNC_H */ diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index d9b1c1b2a72b..747e377fb954 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -135,12 +135,18 @@ char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len) strscpy(buf, sync_file->user_name, len); } else { struct dma_fence *fence = sync_file->fence; + const char __rcu *timeline; + const char __rcu *driver; + rcu_read_lock(); + driver = dma_fence_driver_name(fence); + timeline = dma_fence_timeline_name(fence); snprintf(buf, len, "%s-%s%llu-%lld", - fence->ops->get_driver_name(fence), - fence->ops->get_timeline_name(fence), + rcu_dereference(driver), + rcu_dereference(timeline), fence->context, fence->seqno); + rcu_read_unlock(); } return buf; @@ -262,9 +268,17 @@ err_put_fd: static int sync_fill_fence_info(struct dma_fence *fence, struct sync_fence_info *info) { - strscpy(info->obj_name, fence->ops->get_timeline_name(fence), + const char __rcu *timeline; + const char __rcu *driver; + + rcu_read_lock(); + + driver = dma_fence_driver_name(fence); + timeline = dma_fence_timeline_name(fence); + + strscpy(info->obj_name, rcu_dereference(timeline), sizeof(info->obj_name)); - strscpy(info->driver_name, fence->ops->get_driver_name(fence), + strscpy(info->driver_name, rcu_dereference(driver), sizeof(info->driver_name)); info->status = dma_fence_get_status(fence); @@ -273,6 +287,8 @@ static int sync_fill_fence_info(struct dma_fence *fence, ktime_to_ns(dma_fence_timestamp(fence)) : ktime_set(0, 0); + rcu_read_unlock(); + return info->status; } diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index 8ce1f074c2d3..40399c26e6be 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -109,29 +109,22 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma) static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map) { struct udmabuf *ubuf = buf->priv; - unsigned long *pfns; + struct page **pages; void *vaddr; pgoff_t pg; dma_resv_assert_held(buf->resv); - /** - * HVO may free tail pages, so just use pfn to map each folio - * into vmalloc area. - */ - pfns = kvmalloc_array(ubuf->pagecount, sizeof(*pfns), GFP_KERNEL); - if (!pfns) + pages = kvmalloc_array(ubuf->pagecount, sizeof(*pages), GFP_KERNEL); + if (!pages) return -ENOMEM; - for (pg = 0; pg < ubuf->pagecount; pg++) { - unsigned long pfn = folio_pfn(ubuf->folios[pg]); - - pfn += ubuf->offsets[pg] >> PAGE_SHIFT; - pfns[pg] = pfn; - } + for (pg = 0; pg < ubuf->pagecount; pg++) + pages[pg] = folio_page(ubuf->folios[pg], + ubuf->offsets[pg] >> PAGE_SHIFT); - vaddr = vmap_pfn(pfns, ubuf->pagecount, PAGE_KERNEL); - kvfree(pfns); + vaddr = vm_map_ram(pages, ubuf->pagecount, -1); + kvfree(pages); if (!vaddr) return -EINVAL; @@ -264,8 +257,7 @@ static int begin_cpu_udmabuf(struct dma_buf *buf, ubuf->sg = NULL; } } else { - dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents, - direction); + dma_sync_sgtable_for_cpu(dev, ubuf->sg, direction); } return ret; @@ -280,12 +272,11 @@ static int end_cpu_udmabuf(struct dma_buf *buf, if (!ubuf->sg) return -EINVAL; - dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction); + dma_sync_sgtable_for_device(dev, ubuf->sg, direction); return 0; } static const struct dma_buf_ops udmabuf_ops = { - .cache_sgt_mapping = true, .map_dma_buf = map_udmabuf, .unmap_dma_buf = unmap_udmabuf, .release = release_udmabuf, @@ -297,7 +288,7 @@ static const struct dma_buf_ops udmabuf_ops = { }; #define SEALS_WANTED (F_SEAL_SHRINK) -#define SEALS_DENIED (F_SEAL_WRITE) +#define SEALS_DENIED (F_SEAL_WRITE|F_SEAL_FUTURE_WRITE) static int check_memfd_seals(struct file *memfd) { @@ -317,12 +308,10 @@ static int check_memfd_seals(struct file *memfd) return 0; } -static int export_udmabuf(struct udmabuf *ubuf, - struct miscdevice *device, - u32 flags) +static struct dma_buf *export_udmabuf(struct udmabuf *ubuf, + struct miscdevice *device) { DEFINE_DMA_BUF_EXPORT_INFO(exp_info); - struct dma_buf *buf; ubuf->device = device; exp_info.ops = &udmabuf_ops; @@ -330,11 +319,7 @@ static int export_udmabuf(struct udmabuf *ubuf, exp_info.priv = ubuf; exp_info.flags = O_RDWR; - buf = dma_buf_export(&exp_info); - if (IS_ERR(buf)) - return PTR_ERR(buf); - - return dma_buf_fd(buf, flags); + return dma_buf_export(&exp_info); } static long udmabuf_pin_folios(struct udmabuf *ubuf, struct file *memfd, @@ -391,6 +376,7 @@ static long udmabuf_create(struct miscdevice *device, struct folio **folios = NULL; pgoff_t pgcnt = 0, pglimit; struct udmabuf *ubuf; + struct dma_buf *dmabuf; long ret = -EINVAL; u32 i, flags; @@ -398,7 +384,7 @@ static long udmabuf_create(struct miscdevice *device, if (!ubuf) return -ENOMEM; - pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT; + pglimit = ((u64)size_limit_mb * 1024 * 1024) >> PAGE_SHIFT; for (i = 0; i < head->count; i++) { pgoff_t subpgcnt; @@ -436,23 +422,39 @@ static long udmabuf_create(struct miscdevice *device, goto err; } + /* + * Take the inode lock to protect against concurrent + * memfd_add_seals(), which takes this lock in write mode. + */ + inode_lock_shared(file_inode(memfd)); ret = check_memfd_seals(memfd); - if (ret < 0) { - fput(memfd); - goto err; - } + if (ret) + goto out_unlock; ret = udmabuf_pin_folios(ubuf, memfd, list[i].offset, list[i].size, folios); +out_unlock: + inode_unlock_shared(file_inode(memfd)); fput(memfd); if (ret) goto err; } flags = head->flags & UDMABUF_FLAGS_CLOEXEC ? O_CLOEXEC : 0; - ret = export_udmabuf(ubuf, device, flags); - if (ret < 0) + dmabuf = export_udmabuf(ubuf, device); + if (IS_ERR(dmabuf)) { + ret = PTR_ERR(dmabuf); goto err; + } + /* + * Ownership of ubuf is held by the dmabuf from here. + * If the following dma_buf_fd() fails, dma_buf_put() cleans up both the + * dmabuf and the ubuf (through udmabuf_ops.release). + */ + + ret = dma_buf_fd(dmabuf, flags); + if (ret < 0) + dma_buf_put(dmabuf); kvfree(folios); return ret; |