summaryrefslogtreecommitdiff
path: root/drivers/dma-buf
diff options
context:
space:
mode:
authorMaxime Ripard <maxime@cerno.tech>2023-01-03 10:32:12 +0300
committerMaxime Ripard <maxime@cerno.tech>2023-01-03 10:32:12 +0300
commit2c55d703391acf7e9101da596d0c15ee03b318a3 (patch)
tree422ef671343624b56d333b47d20a538eeedbfe10 /drivers/dma-buf
parent92d43bd3bc9728c1fb114d7011d46f5ea9489e28 (diff)
parent88603b6dc419445847923fcb7fe5080067a30f98 (diff)
downloadlinux-2c55d703391acf7e9101da596d0c15ee03b318a3.tar.xz
Merge drm/drm-fixes into drm-misc-fixes
Let's start the fixes cycle. Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Diffstat (limited to 'drivers/dma-buf')
-rw-r--r--drivers/dma-buf/dma-buf-sysfs-stats.c2
-rw-r--r--drivers/dma-buf/dma-buf.c225
-rw-r--r--drivers/dma-buf/dma-heap.c2
-rw-r--r--drivers/dma-buf/dma-resv.c9
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c3
-rw-r--r--drivers/dma-buf/heaps/system_heap.c3
-rw-r--r--drivers/dma-buf/st-dma-fence-chain.c6
-rw-r--r--drivers/dma-buf/udmabuf.c3
8 files changed, 213 insertions, 40 deletions
diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.c b/drivers/dma-buf/dma-buf-sysfs-stats.c
index 4b680e10c15a..fbf725fae7c1 100644
--- a/drivers/dma-buf/dma-buf-sysfs-stats.c
+++ b/drivers/dma-buf/dma-buf-sysfs-stats.c
@@ -132,7 +132,7 @@ void dma_buf_stats_teardown(struct dma_buf *dmabuf)
/* Statistics files do not need to send uevents. */
-static int dmabuf_sysfs_uevent_filter(struct kobject *kobj)
+static int dmabuf_sysfs_uevent_filter(const struct kobject *kobj)
{
return 0;
}
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index eb6b59363c4f..e6528767efc7 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -131,6 +131,7 @@ static struct file_system_type dma_buf_fs_type = {
static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
{
struct dma_buf *dmabuf;
+ int ret;
if (!is_dma_buf_file(file))
return -EINVAL;
@@ -146,7 +147,11 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
dmabuf->size >> PAGE_SHIFT)
return -EINVAL;
- return dmabuf->ops->mmap(dmabuf, vma);
+ dma_resv_lock(dmabuf->resv, NULL);
+ ret = dmabuf->ops->mmap(dmabuf, vma);
+ dma_resv_unlock(dmabuf->resv);
+
+ return ret;
}
static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
@@ -655,7 +660,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
init_waitqueue_head(&dmabuf->poll);
dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
- mutex_init(&dmabuf->lock);
INIT_LIST_HEAD(&dmabuf->attachments);
if (!resv) {
@@ -801,6 +805,70 @@ static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
}
/**
+ * DOC: locking convention
+ *
+ * In order to avoid deadlock situations between dma-buf exports and importers,
+ * all dma-buf API users must follow the common dma-buf locking convention.
+ *
+ * Convention for importers
+ *
+ * 1. Importers must hold the dma-buf reservation lock when calling these
+ * functions:
+ *
+ * - dma_buf_pin()
+ * - dma_buf_unpin()
+ * - dma_buf_map_attachment()
+ * - dma_buf_unmap_attachment()
+ * - dma_buf_vmap()
+ * - dma_buf_vunmap()
+ *
+ * 2. Importers must not hold the dma-buf reservation lock when calling these
+ * functions:
+ *
+ * - dma_buf_attach()
+ * - dma_buf_dynamic_attach()
+ * - dma_buf_detach()
+ * - dma_buf_export(
+ * - dma_buf_fd()
+ * - dma_buf_get()
+ * - dma_buf_put()
+ * - dma_buf_mmap()
+ * - dma_buf_begin_cpu_access()
+ * - dma_buf_end_cpu_access()
+ * - dma_buf_map_attachment_unlocked()
+ * - dma_buf_unmap_attachment_unlocked()
+ * - dma_buf_vmap_unlocked()
+ * - dma_buf_vunmap_unlocked()
+ *
+ * Convention for exporters
+ *
+ * 1. These &dma_buf_ops callbacks are invoked with unlocked dma-buf
+ * reservation and exporter can take the lock:
+ *
+ * - &dma_buf_ops.attach()
+ * - &dma_buf_ops.detach()
+ * - &dma_buf_ops.release()
+ * - &dma_buf_ops.begin_cpu_access()
+ * - &dma_buf_ops.end_cpu_access()
+ *
+ * 2. These &dma_buf_ops callbacks are invoked with locked dma-buf
+ * reservation and exporter can't take the lock:
+ *
+ * - &dma_buf_ops.pin()
+ * - &dma_buf_ops.unpin()
+ * - &dma_buf_ops.map_dma_buf()
+ * - &dma_buf_ops.unmap_dma_buf()
+ * - &dma_buf_ops.mmap()
+ * - &dma_buf_ops.vmap()
+ * - &dma_buf_ops.vunmap()
+ *
+ * 3. Exporters must hold the dma-buf reservation lock when calling these
+ * functions:
+ *
+ * - dma_buf_move_notify()
+ */
+
+/**
* dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
* @dmabuf: [in] buffer to attach device to.
* @dev: [in] device to be attached.
@@ -864,8 +932,8 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
dma_buf_is_dynamic(dmabuf)) {
struct sg_table *sgt;
+ dma_resv_lock(attach->dmabuf->resv, NULL);
if (dma_buf_is_dynamic(attach->dmabuf)) {
- dma_resv_lock(attach->dmabuf->resv, NULL);
ret = dmabuf->ops->pin(attach);
if (ret)
goto err_unlock;
@@ -878,8 +946,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
ret = PTR_ERR(sgt);
goto err_unpin;
}
- if (dma_buf_is_dynamic(attach->dmabuf))
- dma_resv_unlock(attach->dmabuf->resv);
+ dma_resv_unlock(attach->dmabuf->resv);
attach->sgt = sgt;
attach->dir = DMA_BIDIRECTIONAL;
}
@@ -895,8 +962,7 @@ err_unpin:
dmabuf->ops->unpin(attach);
err_unlock:
- if (dma_buf_is_dynamic(attach->dmabuf))
- dma_resv_unlock(attach->dmabuf->resv);
+ dma_resv_unlock(attach->dmabuf->resv);
dma_buf_detach(dmabuf, attach);
return ERR_PTR(ret);
@@ -939,24 +1005,22 @@ static void __unmap_dma_buf(struct dma_buf_attachment *attach,
*/
void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
{
- if (WARN_ON(!dmabuf || !attach))
+ if (WARN_ON(!dmabuf || !attach || dmabuf != attach->dmabuf))
return;
+ dma_resv_lock(dmabuf->resv, NULL);
+
if (attach->sgt) {
- if (dma_buf_is_dynamic(attach->dmabuf))
- dma_resv_lock(attach->dmabuf->resv, NULL);
__unmap_dma_buf(attach, attach->sgt, attach->dir);
- if (dma_buf_is_dynamic(attach->dmabuf)) {
+ if (dma_buf_is_dynamic(attach->dmabuf))
dmabuf->ops->unpin(attach);
- dma_resv_unlock(attach->dmabuf->resv);
- }
}
-
- dma_resv_lock(dmabuf->resv, NULL);
list_del(&attach->node);
+
dma_resv_unlock(dmabuf->resv);
+
if (dmabuf->ops->detach)
dmabuf->ops->detach(dmabuf, attach);
@@ -1047,8 +1111,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
if (WARN_ON(!attach || !attach->dmabuf))
return ERR_PTR(-EINVAL);
- if (dma_buf_attachment_is_dynamic(attach))
- dma_resv_assert_held(attach->dmabuf->resv);
+ dma_resv_assert_held(attach->dmabuf->resv);
if (attach->sgt) {
/*
@@ -1063,7 +1126,6 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
}
if (dma_buf_is_dynamic(attach->dmabuf)) {
- dma_resv_assert_held(attach->dmabuf->resv);
if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
r = attach->dmabuf->ops->pin(attach);
if (r)
@@ -1106,6 +1168,34 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF);
/**
+ * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment;
+ * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
+ * dma_buf_ops.
+ * @attach: [in] attachment whose scatterlist is to be returned
+ * @direction: [in] direction of DMA transfer
+ *
+ * Unlocked variant of dma_buf_map_attachment().
+ */
+struct sg_table *
+dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction)
+{
+ struct sg_table *sg_table;
+
+ might_sleep();
+
+ if (WARN_ON(!attach || !attach->dmabuf))
+ return ERR_PTR(-EINVAL);
+
+ dma_resv_lock(attach->dmabuf->resv, NULL);
+ sg_table = dma_buf_map_attachment(attach, direction);
+ dma_resv_unlock(attach->dmabuf->resv);
+
+ return sg_table;
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, DMA_BUF);
+
+/**
* dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
* deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
* dma_buf_ops.
@@ -1124,15 +1214,11 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
return;
- if (dma_buf_attachment_is_dynamic(attach))
- dma_resv_assert_held(attach->dmabuf->resv);
+ dma_resv_assert_held(attach->dmabuf->resv);
if (attach->sgt == sg_table)
return;
- if (dma_buf_is_dynamic(attach->dmabuf))
- dma_resv_assert_held(attach->dmabuf->resv);
-
__unmap_dma_buf(attach, sg_table, direction);
if (dma_buf_is_dynamic(attach->dmabuf) &&
@@ -1142,6 +1228,31 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF);
/**
+ * dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might
+ * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
+ * dma_buf_ops.
+ * @attach: [in] attachment to unmap buffer from
+ * @sg_table: [in] scatterlist info of the buffer to unmap
+ * @direction: [in] direction of DMA transfer
+ *
+ * Unlocked variant of dma_buf_unmap_attachment().
+ */
+void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
+ struct sg_table *sg_table,
+ enum dma_data_direction direction)
+{
+ might_sleep();
+
+ if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
+ return;
+
+ dma_resv_lock(attach->dmabuf->resv, NULL);
+ dma_buf_unmap_attachment(attach, sg_table, direction);
+ dma_resv_unlock(attach->dmabuf->resv);
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, DMA_BUF);
+
+/**
* dma_buf_move_notify - notify attachments that DMA-buf is moving
*
* @dmabuf: [in] buffer which is moving
@@ -1352,6 +1463,8 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
unsigned long pgoff)
{
+ int ret;
+
if (WARN_ON(!dmabuf || !vma))
return -EINVAL;
@@ -1372,7 +1485,11 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
vma_set_file(vma, dmabuf->file);
vma->vm_pgoff = pgoff;
- return dmabuf->ops->mmap(dmabuf, vma);
+ dma_resv_lock(dmabuf->resv, NULL);
+ ret = dmabuf->ops->mmap(dmabuf, vma);
+ dma_resv_unlock(dmabuf->resv);
+
+ return ret;
}
EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
@@ -1395,42 +1512,68 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
{
struct iosys_map ptr;
- int ret = 0;
+ int ret;
iosys_map_clear(map);
if (WARN_ON(!dmabuf))
return -EINVAL;
+ dma_resv_assert_held(dmabuf->resv);
+
if (!dmabuf->ops->vmap)
return -EINVAL;
- mutex_lock(&dmabuf->lock);
if (dmabuf->vmapping_counter) {
dmabuf->vmapping_counter++;
BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
*map = dmabuf->vmap_ptr;
- goto out_unlock;
+ return 0;
}
BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));
ret = dmabuf->ops->vmap(dmabuf, &ptr);
if (WARN_ON_ONCE(ret))
- goto out_unlock;
+ return ret;
dmabuf->vmap_ptr = ptr;
dmabuf->vmapping_counter = 1;
*map = dmabuf->vmap_ptr;
-out_unlock:
- mutex_unlock(&dmabuf->lock);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF);
/**
+ * dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel
+ * address space. Same restrictions as for vmap and friends apply.
+ * @dmabuf: [in] buffer to vmap
+ * @map: [out] returns the vmap pointer
+ *
+ * Unlocked version of dma_buf_vmap()
+ *
+ * Returns 0 on success, or a negative errno code otherwise.
+ */
+int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
+{
+ int ret;
+
+ iosys_map_clear(map);
+
+ if (WARN_ON(!dmabuf))
+ return -EINVAL;
+
+ dma_resv_lock(dmabuf->resv, NULL);
+ ret = dma_buf_vmap(dmabuf, map);
+ dma_resv_unlock(dmabuf->resv);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, DMA_BUF);
+
+/**
* dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
* @dmabuf: [in] buffer to vunmap
* @map: [in] vmap pointer to vunmap
@@ -1440,20 +1583,36 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
if (WARN_ON(!dmabuf))
return;
+ dma_resv_assert_held(dmabuf->resv);
+
BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
BUG_ON(dmabuf->vmapping_counter == 0);
BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));
- mutex_lock(&dmabuf->lock);
if (--dmabuf->vmapping_counter == 0) {
if (dmabuf->ops->vunmap)
dmabuf->ops->vunmap(dmabuf, map);
iosys_map_clear(&dmabuf->vmap_ptr);
}
- mutex_unlock(&dmabuf->lock);
}
EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
+/**
+ * dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap.
+ * @dmabuf: [in] buffer to vunmap
+ * @map: [in] vmap pointer to vunmap
+ */
+void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
+{
+ if (WARN_ON(!dmabuf))
+ return;
+
+ dma_resv_lock(dmabuf->resv, NULL);
+ dma_buf_vunmap(dmabuf, map);
+ dma_resv_unlock(dmabuf->resv);
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, DMA_BUF);
+
#ifdef CONFIG_DEBUG_FS
static int dma_buf_debug_show(struct seq_file *s, void *unused)
{
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
index 59d158873f4c..c9e41e8a9e27 100644
--- a/drivers/dma-buf/dma-heap.c
+++ b/drivers/dma-buf/dma-heap.c
@@ -301,7 +301,7 @@ err0:
return err_ret;
}
-static char *dma_heap_devnode(struct device *dev, umode_t *mode)
+static char *dma_heap_devnode(const struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "dma_heap/%s", dev_name(dev));
}
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index e3885c90a3ac..1c76aed8e262 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -98,12 +98,17 @@ static void dma_resv_list_set(struct dma_resv_list *list,
static struct dma_resv_list *dma_resv_list_alloc(unsigned int max_fences)
{
struct dma_resv_list *list;
+ size_t size;
- list = kmalloc(struct_size(list, table, max_fences), GFP_KERNEL);
+ /* Round up to the next kmalloc bucket size. */
+ size = kmalloc_size_roundup(struct_size(list, table, max_fences));
+
+ list = kmalloc(size, GFP_KERNEL);
if (!list)
return NULL;
- list->max_fences = (ksize(list) - offsetof(typeof(*list), table)) /
+ /* Given the resulting bucket size, recalculated max_fences. */
+ list->max_fences = (size - offsetof(typeof(*list), table)) /
sizeof(*list->table);
return list;
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 28fb04eccdd0..1131fb943992 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -13,6 +13,7 @@
#include <linux/dma-buf.h>
#include <linux/dma-heap.h>
#include <linux/dma-map-ops.h>
+#include <linux/dma-resv.h>
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/io.h>
@@ -182,6 +183,8 @@ static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{
struct cma_heap_buffer *buffer = dmabuf->priv;
+ dma_resv_assert_held(dmabuf->resv);
+
if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
return -EINVAL;
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index fcf836ba9c1f..e8bd10e60998 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -13,6 +13,7 @@
#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <linux/dma-heap.h>
+#include <linux/dma-resv.h>
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/mm.h>
@@ -201,6 +202,8 @@ static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
struct sg_page_iter piter;
int ret;
+ dma_resv_assert_held(dmabuf->resv);
+
for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
struct page *page = sg_page_iter_page(&piter);
diff --git a/drivers/dma-buf/st-dma-fence-chain.c b/drivers/dma-buf/st-dma-fence-chain.c
index 0a9b099d0518..c0979c8049b5 100644
--- a/drivers/dma-buf/st-dma-fence-chain.c
+++ b/drivers/dma-buf/st-dma-fence-chain.c
@@ -400,7 +400,7 @@ static int __find_race(void *arg)
struct dma_fence *fence = dma_fence_get(data->fc.tail);
int seqno;
- seqno = prandom_u32_max(data->fc.chain_length) + 1;
+ seqno = get_random_u32_inclusive(1, data->fc.chain_length);
err = dma_fence_chain_find_seqno(&fence, seqno);
if (err) {
@@ -429,7 +429,7 @@ static int __find_race(void *arg)
dma_fence_put(fence);
signal:
- seqno = prandom_u32_max(data->fc.chain_length - 1);
+ seqno = get_random_u32_below(data->fc.chain_length - 1);
dma_fence_signal(data->fc.fences[seqno]);
cond_resched();
}
@@ -637,7 +637,7 @@ static void randomise_fences(struct fence_chains *fc)
while (--count) {
unsigned int swp;
- swp = prandom_u32_max(count + 1);
+ swp = get_random_u32_below(count + 1);
if (swp == count)
continue;
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 2bcdb935a3ac..283816fbd72f 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -2,6 +2,7 @@
#include <linux/cred.h>
#include <linux/device.h>
#include <linux/dma-buf.h>
+#include <linux/dma-resv.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -49,6 +50,8 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
{
struct udmabuf *ubuf = buf->priv;
+ dma_resv_assert_held(buf->resv);
+
if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
return -EINVAL;