diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-12-18 23:38:28 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-12-18 23:38:28 +0300 |
commit | c59c7588fc922e27c378a7e2a920b889bd6bf872 (patch) | |
tree | b96b91dfeb778e315a9fdded7438e494a9bed1c2 /drivers/dma-buf | |
parent | 432c19a8d965aa0123e0a81492492a7cb1da5257 (diff) | |
parent | 4efd7faba5e0687ae9143b7e6a19547ede20a7dd (diff) | |
download | linux-c59c7588fc922e27c378a7e2a920b889bd6bf872.tar.xz |
Merge tag 'drm-next-2020-12-18' of git://anongit.freedesktop.org/drm/drm
Pull more drm updates from Daniel Vetter:
"UAPI Changes:
- Only enable char/agp uapi when CONFIG_DRM_LEGACY is set
Cross-subsystem Changes:
- vma_set_file helper to make vma->vm_file changing less brittle,
acked by Andrew
Core Changes:
- dma-buf heaps improvements
- pass full atomic modeset state to driver callbacks
- shmem helpers: cached bo by default
- cleanups for fbdev, fb-helpers
- better docs for drm modes and SCALING_FITLER uapi
- ttm: fix dma32 page pool regression
Driver Changes:
- multi-hop regression fixes for amdgpu, radeon, nouveau
- lots of small amdgpu hw enabling fixes (display, pm, ...)
- fixes for imx, mcde, meson, some panels, virtio, qxl, i915, all
fairly minor
- some cleanups for legacy drm/fbdev drivers"
* tag 'drm-next-2020-12-18' of git://anongit.freedesktop.org/drm/drm: (117 commits)
drm/qxl: don't allocate a dma_address array
drm/nouveau: fix multihop when move doesn't work.
drm/i915/tgl: Fix REVID macros for TGL to fetch correct stepping
drm/i915: Fix mismatch between misplaced vma check and vma insert
drm/i915/perf: also include Gen11 in OATAILPTR workaround
Revert "drm/i915: re-order if/else ladder for hpd_irq_setup"
drm/amdgpu/disply: fix documentation warnings in display manager
drm/amdgpu: print mmhub client name for dimgrey_cavefish
drm/amdgpu: set mode1 reset as default for dimgrey_cavefish
drm/amd/display: Add get_dig_frontend implementation for DCEx
drm/radeon: remove h from printk format specifier
drm/amdgpu: remove h from printk format specifier
drm/amdgpu: Fix spelling mistake "Heterogenous" -> "Heterogeneous"
drm/amdgpu: fix regression in vbios reservation handling on headless
drm/amdgpu/SRIOV: Extend VF reset request wait period
drm/amdkfd: correct amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu log.
drm/amd/display: Adding prototype for dccg21_update_dpp_dto()
drm/amdgpu: print what method we are using for runtime pm
drm/amdgpu: simplify logic in atpx resume handling
drm/amdgpu: no need to call pci_ignore_hotplug for _PR3
...
Diffstat (limited to 'drivers/dma-buf')
-rw-r--r-- | drivers/dma-buf/dma-buf.c | 19 | ||||
-rw-r--r-- | drivers/dma-buf/dma-resv.c | 2 | ||||
-rw-r--r-- | drivers/dma-buf/heaps/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma-buf/heaps/cma_heap.c | 329 | ||||
-rw-r--r-- | drivers/dma-buf/heaps/heap-helpers.c | 274 | ||||
-rw-r--r-- | drivers/dma-buf/heaps/heap-helpers.h | 53 | ||||
-rw-r--r-- | drivers/dma-buf/heaps/system_heap.c | 414 |
7 files changed, 650 insertions, 442 deletions
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 0eb80c1ecdab..e63684d4cd90 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -1166,9 +1166,6 @@ EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, unsigned long pgoff) { - struct file *oldfile; - int ret; - if (WARN_ON(!dmabuf || !vma)) return -EINVAL; @@ -1186,22 +1183,10 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, return -EINVAL; /* readjust the vma */ - get_file(dmabuf->file); - oldfile = vma->vm_file; - vma->vm_file = dmabuf->file; + vma_set_file(vma, dmabuf->file); vma->vm_pgoff = pgoff; - ret = dmabuf->ops->mmap(dmabuf, vma); - if (ret) { - /* restore old parameters on failure */ - vma->vm_file = oldfile; - fput(dmabuf->file); - } else { - if (oldfile) - fput(oldfile); - } - return ret; - + return dmabuf->ops->mmap(dmabuf, vma); } EXPORT_SYMBOL_GPL(dma_buf_mmap); diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index bb5a42b10c29..6ddbeb5dfbf6 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -200,7 +200,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences) max = max(old->shared_count + num_fences, old->shared_max * 2); } else { - max = 4; + max = max(4ul, roundup_pow_of_two(num_fences)); } new = dma_resv_list_alloc(max); diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile index 6e54cdec3da0..974467791032 100644 --- a/drivers/dma-buf/heaps/Makefile +++ b/drivers/dma-buf/heaps/Makefile @@ -1,4 +1,3 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y += heap-helpers.o obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c index e55384dc115b..5e7c3436310c 100644 --- a/drivers/dma-buf/heaps/cma_heap.c +++ b/drivers/dma-buf/heaps/cma_heap.c @@ -2,76 +2,305 @@ /* * DMABUF CMA heap exporter * - * Copyright (C) 2012, 2019 Linaro Ltd. + * Copyright (C) 2012, 2019, 2020 Linaro Ltd. * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson. + * + * Also utilizing parts of Andrew Davis' SRAM heap: + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ + * Andrew F. Davis <afd@ti.com> */ - #include <linux/cma.h> -#include <linux/device.h> #include <linux/dma-buf.h> #include <linux/dma-heap.h> #include <linux/dma-map-ops.h> #include <linux/err.h> -#include <linux/errno.h> #include <linux/highmem.h> +#include <linux/io.h> +#include <linux/mm.h> #include <linux/module.h> -#include <linux/slab.h> #include <linux/scatterlist.h> -#include <linux/sched/signal.h> +#include <linux/slab.h> -#include "heap-helpers.h" struct cma_heap { struct dma_heap *heap; struct cma *cma; }; -static void cma_heap_free(struct heap_helper_buffer *buffer) +struct cma_heap_buffer { + struct cma_heap *heap; + struct list_head attachments; + struct mutex lock; + unsigned long len; + struct page *cma_pages; + struct page **pages; + pgoff_t pagecount; + int vmap_cnt; + void *vaddr; +}; + +struct dma_heap_attachment { + struct device *dev; + struct sg_table table; + struct list_head list; + bool mapped; +}; + +static int cma_heap_attach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) { - struct cma_heap *cma_heap = dma_heap_get_drvdata(buffer->heap); - unsigned long nr_pages = buffer->pagecount; - struct page *cma_pages = buffer->priv_virt; + struct cma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + int ret; - /* free page list */ - kfree(buffer->pages); - /* release memory */ - cma_release(cma_heap->cma, cma_pages, nr_pages); + a = kzalloc(sizeof(*a), GFP_KERNEL); + if (!a) + return -ENOMEM; + + ret = sg_alloc_table_from_pages(&a->table, buffer->pages, + buffer->pagecount, 0, + buffer->pagecount << PAGE_SHIFT, + GFP_KERNEL); + if (ret) { + kfree(a); + return ret; + } + + a->dev = attachment->dev; + INIT_LIST_HEAD(&a->list); + a->mapped = false; + + attachment->priv = a; + + mutex_lock(&buffer->lock); + list_add(&a->list, &buffer->attachments); + mutex_unlock(&buffer->lock); + + return 0; +} + +static void cma_heap_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a = attachment->priv; + + mutex_lock(&buffer->lock); + list_del(&a->list); + mutex_unlock(&buffer->lock); + + sg_free_table(&a->table); + kfree(a); +} + +static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction direction) +{ + struct dma_heap_attachment *a = attachment->priv; + struct sg_table *table = &a->table; + int ret; + + ret = dma_map_sgtable(attachment->dev, table, direction, 0); + if (ret) + return ERR_PTR(-ENOMEM); + a->mapped = true; + return table; +} + +static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, + struct sg_table *table, + enum dma_data_direction direction) +{ + struct dma_heap_attachment *a = attachment->priv; + + a->mapped = false; + dma_unmap_sgtable(attachment->dev, table, direction, 0); +} + +static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + if (buffer->vmap_cnt) + invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); + + mutex_lock(&buffer->lock); + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + dma_sync_sgtable_for_cpu(a->dev, &a->table, direction); + } + mutex_unlock(&buffer->lock); + + return 0; +} + +static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + if (buffer->vmap_cnt) + flush_kernel_vmap_range(buffer->vaddr, buffer->len); + + mutex_lock(&buffer->lock); + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + dma_sync_sgtable_for_device(a->dev, &a->table, direction); + } + mutex_unlock(&buffer->lock); + + return 0; +} + +static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct cma_heap_buffer *buffer = vma->vm_private_data; + + if (vmf->pgoff > buffer->pagecount) + return VM_FAULT_SIGBUS; + + vmf->page = buffer->pages[vmf->pgoff]; + get_page(vmf->page); + + return 0; +} + +static const struct vm_operations_struct dma_heap_vm_ops = { + .fault = cma_heap_vm_fault, +}; + +static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + + if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) + return -EINVAL; + + vma->vm_ops = &dma_heap_vm_ops; + vma->vm_private_data = buffer; + + return 0; +} + +static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer) +{ + void *vaddr; + + vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL); + if (!vaddr) + return ERR_PTR(-ENOMEM); + + return vaddr; +} + +static int cma_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + void *vaddr; + int ret = 0; + + mutex_lock(&buffer->lock); + if (buffer->vmap_cnt) { + buffer->vmap_cnt++; + dma_buf_map_set_vaddr(map, buffer->vaddr); + goto out; + } + + vaddr = cma_heap_do_vmap(buffer); + if (IS_ERR(vaddr)) { + ret = PTR_ERR(vaddr); + goto out; + } + buffer->vaddr = vaddr; + buffer->vmap_cnt++; + dma_buf_map_set_vaddr(map, buffer->vaddr); +out: + mutex_unlock(&buffer->lock); + + return ret; +} + +static void cma_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + + mutex_lock(&buffer->lock); + if (!--buffer->vmap_cnt) { + vunmap(buffer->vaddr); + buffer->vaddr = NULL; + } + mutex_unlock(&buffer->lock); + dma_buf_map_clear(map); +} + +static void cma_heap_dma_buf_release(struct dma_buf *dmabuf) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct cma_heap *cma_heap = buffer->heap; + + if (buffer->vmap_cnt > 0) { + WARN(1, "%s: buffer still mapped in the kernel\n", __func__); + vunmap(buffer->vaddr); + buffer->vaddr = NULL; + } + + cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount); kfree(buffer); } -/* dmabuf heap CMA operations functions */ +static const struct dma_buf_ops cma_heap_buf_ops = { + .attach = cma_heap_attach, + .detach = cma_heap_detach, + .map_dma_buf = cma_heap_map_dma_buf, + .unmap_dma_buf = cma_heap_unmap_dma_buf, + .begin_cpu_access = cma_heap_dma_buf_begin_cpu_access, + .end_cpu_access = cma_heap_dma_buf_end_cpu_access, + .mmap = cma_heap_mmap, + .vmap = cma_heap_vmap, + .vunmap = cma_heap_vunmap, + .release = cma_heap_dma_buf_release, +}; + static int cma_heap_allocate(struct dma_heap *heap, - unsigned long len, - unsigned long fd_flags, - unsigned long heap_flags) + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags) { struct cma_heap *cma_heap = dma_heap_get_drvdata(heap); - struct heap_helper_buffer *helper_buffer; - struct page *cma_pages; + struct cma_heap_buffer *buffer; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); size_t size = PAGE_ALIGN(len); - unsigned long nr_pages = size >> PAGE_SHIFT; + pgoff_t pagecount = size >> PAGE_SHIFT; unsigned long align = get_order(size); + struct page *cma_pages; struct dma_buf *dmabuf; int ret = -ENOMEM; pgoff_t pg; - if (align > CONFIG_CMA_ALIGNMENT) - align = CONFIG_CMA_ALIGNMENT; - - helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL); - if (!helper_buffer) + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + if (!buffer) return -ENOMEM; - init_heap_helper_buffer(helper_buffer, cma_heap_free); - helper_buffer->heap = heap; - helper_buffer->size = len; + INIT_LIST_HEAD(&buffer->attachments); + mutex_init(&buffer->lock); + buffer->len = size; + + if (align > CONFIG_CMA_ALIGNMENT) + align = CONFIG_CMA_ALIGNMENT; - cma_pages = cma_alloc(cma_heap->cma, nr_pages, align, false); + cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false); if (!cma_pages) - goto free_buf; + goto free_buffer; + /* Clear the cma pages */ if (PageHighMem(cma_pages)) { - unsigned long nr_clear_pages = nr_pages; + unsigned long nr_clear_pages = pagecount; struct page *page = cma_pages; while (nr_clear_pages > 0) { @@ -85,7 +314,6 @@ static int cma_heap_allocate(struct dma_heap *heap, */ if (fatal_signal_pending(current)) goto free_cma; - page++; nr_clear_pages--; } @@ -93,28 +321,30 @@ static int cma_heap_allocate(struct dma_heap *heap, memset(page_address(cma_pages), 0, size); } - helper_buffer->pagecount = nr_pages; - helper_buffer->pages = kmalloc_array(helper_buffer->pagecount, - sizeof(*helper_buffer->pages), - GFP_KERNEL); - if (!helper_buffer->pages) { + buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL); + if (!buffer->pages) { ret = -ENOMEM; goto free_cma; } - for (pg = 0; pg < helper_buffer->pagecount; pg++) - helper_buffer->pages[pg] = &cma_pages[pg]; + for (pg = 0; pg < pagecount; pg++) + buffer->pages[pg] = &cma_pages[pg]; + + buffer->cma_pages = cma_pages; + buffer->heap = cma_heap; + buffer->pagecount = pagecount; /* create the dmabuf */ - dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags); + exp_info.ops = &cma_heap_buf_ops; + exp_info.size = buffer->len; + exp_info.flags = fd_flags; + exp_info.priv = buffer; + dmabuf = dma_buf_export(&exp_info); if (IS_ERR(dmabuf)) { ret = PTR_ERR(dmabuf); goto free_pages; } - helper_buffer->dmabuf = dmabuf; - helper_buffer->priv_virt = cma_pages; - ret = dma_buf_fd(dmabuf, fd_flags); if (ret < 0) { dma_buf_put(dmabuf); @@ -125,11 +355,12 @@ static int cma_heap_allocate(struct dma_heap *heap, return ret; free_pages: - kfree(helper_buffer->pages); + kfree(buffer->pages); free_cma: - cma_release(cma_heap->cma, cma_pages, nr_pages); -free_buf: - kfree(helper_buffer); + cma_release(cma_heap->cma, cma_pages, pagecount); +free_buffer: + kfree(buffer); + return ret; } diff --git a/drivers/dma-buf/heaps/heap-helpers.c b/drivers/dma-buf/heaps/heap-helpers.c deleted file mode 100644 index fcf4ce3e2cbb..000000000000 --- a/drivers/dma-buf/heaps/heap-helpers.c +++ /dev/null @@ -1,274 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <linux/device.h> -#include <linux/dma-buf.h> -#include <linux/err.h> -#include <linux/highmem.h> -#include <linux/idr.h> -#include <linux/list.h> -#include <linux/slab.h> -#include <linux/uaccess.h> -#include <linux/vmalloc.h> -#include <uapi/linux/dma-heap.h> - -#include "heap-helpers.h" - -void init_heap_helper_buffer(struct heap_helper_buffer *buffer, - void (*free)(struct heap_helper_buffer *)) -{ - buffer->priv_virt = NULL; - mutex_init(&buffer->lock); - buffer->vmap_cnt = 0; - buffer->vaddr = NULL; - buffer->pagecount = 0; - buffer->pages = NULL; - INIT_LIST_HEAD(&buffer->attachments); - buffer->free = free; -} - -struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer, - int fd_flags) -{ - DEFINE_DMA_BUF_EXPORT_INFO(exp_info); - - exp_info.ops = &heap_helper_ops; - exp_info.size = buffer->size; - exp_info.flags = fd_flags; - exp_info.priv = buffer; - - return dma_buf_export(&exp_info); -} - -static void *dma_heap_map_kernel(struct heap_helper_buffer *buffer) -{ - void *vaddr; - - vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL); - if (!vaddr) - return ERR_PTR(-ENOMEM); - - return vaddr; -} - -static void dma_heap_buffer_destroy(struct heap_helper_buffer *buffer) -{ - if (buffer->vmap_cnt > 0) { - WARN(1, "%s: buffer still mapped in the kernel\n", __func__); - vunmap(buffer->vaddr); - } - - buffer->free(buffer); -} - -static void *dma_heap_buffer_vmap_get(struct heap_helper_buffer *buffer) -{ - void *vaddr; - - if (buffer->vmap_cnt) { - buffer->vmap_cnt++; - return buffer->vaddr; - } - vaddr = dma_heap_map_kernel(buffer); - if (IS_ERR(vaddr)) - return vaddr; - buffer->vaddr = vaddr; - buffer->vmap_cnt++; - return vaddr; -} - -static void dma_heap_buffer_vmap_put(struct heap_helper_buffer *buffer) -{ - if (!--buffer->vmap_cnt) { - vunmap(buffer->vaddr); - buffer->vaddr = NULL; - } -} - -struct dma_heaps_attachment { - struct device *dev; - struct sg_table table; - struct list_head list; -}; - -static int dma_heap_attach(struct dma_buf *dmabuf, - struct dma_buf_attachment *attachment) -{ - struct dma_heaps_attachment *a; - struct heap_helper_buffer *buffer = dmabuf->priv; - int ret; - - a = kzalloc(sizeof(*a), GFP_KERNEL); - if (!a) - return -ENOMEM; - - ret = sg_alloc_table_from_pages(&a->table, buffer->pages, - buffer->pagecount, 0, - buffer->pagecount << PAGE_SHIFT, - GFP_KERNEL); - if (ret) { - kfree(a); - return ret; - } - - a->dev = attachment->dev; - INIT_LIST_HEAD(&a->list); - - attachment->priv = a; - - mutex_lock(&buffer->lock); - list_add(&a->list, &buffer->attachments); - mutex_unlock(&buffer->lock); - - return 0; -} - -static void dma_heap_detach(struct dma_buf *dmabuf, - struct dma_buf_attachment *attachment) -{ - struct dma_heaps_attachment *a = attachment->priv; - struct heap_helper_buffer *buffer = dmabuf->priv; - - mutex_lock(&buffer->lock); - list_del(&a->list); - mutex_unlock(&buffer->lock); - - sg_free_table(&a->table); - kfree(a); -} - -static -struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment, - enum dma_data_direction direction) -{ - struct dma_heaps_attachment *a = attachment->priv; - struct sg_table *table = &a->table; - int ret; - - ret = dma_map_sgtable(attachment->dev, table, direction, 0); - if (ret) - table = ERR_PTR(ret); - return table; -} - -static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, - struct sg_table *table, - enum dma_data_direction direction) -{ - dma_unmap_sgtable(attachment->dev, table, direction, 0); -} - -static vm_fault_t dma_heap_vm_fault(struct vm_fault *vmf) -{ - struct vm_area_struct *vma = vmf->vma; - struct heap_helper_buffer *buffer = vma->vm_private_data; - - if (vmf->pgoff > buffer->pagecount) - return VM_FAULT_SIGBUS; - - vmf->page = buffer->pages[vmf->pgoff]; - get_page(vmf->page); - - return 0; -} - -static const struct vm_operations_struct dma_heap_vm_ops = { - .fault = dma_heap_vm_fault, -}; - -static int dma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) -{ - struct heap_helper_buffer *buffer = dmabuf->priv; - - if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) - return -EINVAL; - - vma->vm_ops = &dma_heap_vm_ops; - vma->vm_private_data = buffer; - - return 0; -} - -static void dma_heap_dma_buf_release(struct dma_buf *dmabuf) -{ - struct heap_helper_buffer *buffer = dmabuf->priv; - - dma_heap_buffer_destroy(buffer); -} - -static int dma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, - enum dma_data_direction direction) -{ - struct heap_helper_buffer *buffer = dmabuf->priv; - struct dma_heaps_attachment *a; - int ret = 0; - - mutex_lock(&buffer->lock); - - if (buffer->vmap_cnt) - invalidate_kernel_vmap_range(buffer->vaddr, buffer->size); - - list_for_each_entry(a, &buffer->attachments, list) { - dma_sync_sg_for_cpu(a->dev, a->table.sgl, a->table.nents, - direction); - } - mutex_unlock(&buffer->lock); - - return ret; -} - -static int dma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, - enum dma_data_direction direction) -{ - struct heap_helper_buffer *buffer = dmabuf->priv; - struct dma_heaps_attachment *a; - - mutex_lock(&buffer->lock); - - if (buffer->vmap_cnt) - flush_kernel_vmap_range(buffer->vaddr, buffer->size); - - list_for_each_entry(a, &buffer->attachments, list) { - dma_sync_sg_for_device(a->dev, a->table.sgl, a->table.nents, - direction); - } - mutex_unlock(&buffer->lock); - - return 0; -} - -static int dma_heap_dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) -{ - struct heap_helper_buffer *buffer = dmabuf->priv; - void *vaddr; - - mutex_lock(&buffer->lock); - vaddr = dma_heap_buffer_vmap_get(buffer); - mutex_unlock(&buffer->lock); - - if (!vaddr) - return -ENOMEM; - dma_buf_map_set_vaddr(map, vaddr); - - return 0; -} - -static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) -{ - struct heap_helper_buffer *buffer = dmabuf->priv; - - mutex_lock(&buffer->lock); - dma_heap_buffer_vmap_put(buffer); - mutex_unlock(&buffer->lock); -} - -const struct dma_buf_ops heap_helper_ops = { - .map_dma_buf = dma_heap_map_dma_buf, - .unmap_dma_buf = dma_heap_unmap_dma_buf, - .mmap = dma_heap_mmap, - .release = dma_heap_dma_buf_release, - .attach = dma_heap_attach, - .detach = dma_heap_detach, - .begin_cpu_access = dma_heap_dma_buf_begin_cpu_access, - .end_cpu_access = dma_heap_dma_buf_end_cpu_access, - .vmap = dma_heap_dma_buf_vmap, - .vunmap = dma_heap_dma_buf_vunmap, -}; diff --git a/drivers/dma-buf/heaps/heap-helpers.h b/drivers/dma-buf/heaps/heap-helpers.h deleted file mode 100644 index 805d2df88024..000000000000 --- a/drivers/dma-buf/heaps/heap-helpers.h +++ /dev/null @@ -1,53 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * DMABUF Heaps helper code - * - * Copyright (C) 2011 Google, Inc. - * Copyright (C) 2019 Linaro Ltd. - */ - -#ifndef _HEAP_HELPERS_H -#define _HEAP_HELPERS_H - -#include <linux/dma-heap.h> -#include <linux/list.h> - -/** - * struct heap_helper_buffer - helper buffer metadata - * @heap: back pointer to the heap the buffer came from - * @dmabuf: backing dma-buf for this buffer - * @size: size of the buffer - * @priv_virt pointer to heap specific private value - * @lock mutext to protect the data in this structure - * @vmap_cnt count of vmap references on the buffer - * @vaddr vmap'ed virtual address - * @pagecount number of pages in the buffer - * @pages list of page pointers - * @attachments list of device attachments - * - * @free heap callback to free the buffer - */ -struct heap_helper_buffer { - struct dma_heap *heap; - struct dma_buf *dmabuf; - size_t size; - - void *priv_virt; - struct mutex lock; - int vmap_cnt; - void *vaddr; - pgoff_t pagecount; - struct page **pages; - struct list_head attachments; - - void (*free)(struct heap_helper_buffer *buffer); -}; - -void init_heap_helper_buffer(struct heap_helper_buffer *buffer, - void (*free)(struct heap_helper_buffer *)); - -struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer, - int fd_flags); - -extern const struct dma_buf_ops heap_helper_ops; -#endif /* _HEAP_HELPERS_H */ diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c index 0bf688e3c023..17e0e9a68baf 100644 --- a/drivers/dma-buf/heaps/system_heap.c +++ b/drivers/dma-buf/heaps/system_heap.c @@ -3,7 +3,11 @@ * DMABUF System heap exporter * * Copyright (C) 2011 Google, Inc. - * Copyright (C) 2019 Linaro Ltd. + * Copyright (C) 2019, 2020 Linaro Ltd. + * + * Portions based off of Andrew Davis' SRAM heap: + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ + * Andrew F. Davis <afd@ti.com> */ #include <linux/dma-buf.h> @@ -15,87 +19,404 @@ #include <linux/module.h> #include <linux/scatterlist.h> #include <linux/slab.h> -#include <linux/sched/signal.h> -#include <asm/page.h> +#include <linux/vmalloc.h> + +static struct dma_heap *sys_heap; + +struct system_heap_buffer { + struct dma_heap *heap; + struct list_head attachments; + struct mutex lock; + unsigned long len; + struct sg_table sg_table; + int vmap_cnt; + void *vaddr; +}; + +struct dma_heap_attachment { + struct device *dev; + struct sg_table *table; + struct list_head list; + bool mapped; +}; + +#define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \ + | __GFP_NORETRY) & ~__GFP_RECLAIM) \ + | __GFP_COMP) +#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP) +static gfp_t order_flags[] = {HIGH_ORDER_GFP, LOW_ORDER_GFP, LOW_ORDER_GFP}; +/* + * The selection of the orders used for allocation (1MB, 64K, 4K) is designed + * to match with the sizes often found in IOMMUs. Using order 4 pages instead + * of order 0 pages can significantly improve the performance of many IOMMUs + * by reducing TLB pressure and time spent updating page tables. + */ +static const unsigned int orders[] = {8, 4, 0}; +#define NUM_ORDERS ARRAY_SIZE(orders) + +static struct sg_table *dup_sg_table(struct sg_table *table) +{ + struct sg_table *new_table; + int ret, i; + struct scatterlist *sg, *new_sg; + + new_table = kzalloc(sizeof(*new_table), GFP_KERNEL); + if (!new_table) + return ERR_PTR(-ENOMEM); + + ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL); + if (ret) { + kfree(new_table); + return ERR_PTR(-ENOMEM); + } + + new_sg = new_table->sgl; + for_each_sgtable_sg(table, sg, i) { + sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset); + new_sg = sg_next(new_sg); + } + + return new_table; +} + +static int system_heap_attach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + struct sg_table *table; + + a = kzalloc(sizeof(*a), GFP_KERNEL); + if (!a) + return -ENOMEM; + + table = dup_sg_table(&buffer->sg_table); + if (IS_ERR(table)) { + kfree(a); + return -ENOMEM; + } + + a->table = table; + a->dev = attachment->dev; + INIT_LIST_HEAD(&a->list); + a->mapped = false; + + attachment->priv = a; + + mutex_lock(&buffer->lock); + list_add(&a->list, &buffer->attachments); + mutex_unlock(&buffer->lock); + + return 0; +} + +static void system_heap_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a = attachment->priv; + + mutex_lock(&buffer->lock); + list_del(&a->list); + mutex_unlock(&buffer->lock); + + sg_free_table(a->table); + kfree(a->table); + kfree(a); +} + +static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction direction) +{ + struct dma_heap_attachment *a = attachment->priv; + struct sg_table *table = a->table; + int ret; + + ret = dma_map_sgtable(attachment->dev, table, direction, 0); + if (ret) + return ERR_PTR(ret); + + a->mapped = true; + return table; +} + +static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, + struct sg_table *table, + enum dma_data_direction direction) +{ + struct dma_heap_attachment *a = attachment->priv; + + a->mapped = false; + dma_unmap_sgtable(attachment->dev, table, direction, 0); +} + +static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + mutex_lock(&buffer->lock); + + if (buffer->vmap_cnt) + invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); + + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + dma_sync_sgtable_for_cpu(a->dev, a->table, direction); + } + mutex_unlock(&buffer->lock); + + return 0; +} + +static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + mutex_lock(&buffer->lock); + + if (buffer->vmap_cnt) + flush_kernel_vmap_range(buffer->vaddr, buffer->len); -#include "heap-helpers.h" + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + dma_sync_sgtable_for_device(a->dev, a->table, direction); + } + mutex_unlock(&buffer->lock); + + return 0; +} + +static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + struct sg_table *table = &buffer->sg_table; + unsigned long addr = vma->vm_start; + struct sg_page_iter piter; + int ret; + + for_each_sgtable_page(table, &piter, vma->vm_pgoff) { + struct page *page = sg_page_iter_page(&piter); + + ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE, + vma->vm_page_prot); + if (ret) + return ret; + addr += PAGE_SIZE; + if (addr >= vma->vm_end) + return 0; + } + return 0; +} + +static void *system_heap_do_vmap(struct system_heap_buffer *buffer) +{ + struct sg_table *table = &buffer->sg_table; + int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE; + struct page **pages = vmalloc(sizeof(struct page *) * npages); + struct page **tmp = pages; + struct sg_page_iter piter; + void *vaddr; + + if (!pages) + return ERR_PTR(-ENOMEM); + + for_each_sgtable_page(table, &piter, 0) { + WARN_ON(tmp - pages >= npages); + *tmp++ = sg_page_iter_page(&piter); + } + + vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL); + vfree(pages); + + if (!vaddr) + return ERR_PTR(-ENOMEM); + + return vaddr; +} + +static int system_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +{ + struct system_heap_buffer *buffer = dmabuf->priv; + void *vaddr; + int ret = 0; + + mutex_lock(&buffer->lock); + if (buffer->vmap_cnt) { + buffer->vmap_cnt++; + dma_buf_map_set_vaddr(map, buffer->vaddr); + goto out; + } + + vaddr = system_heap_do_vmap(buffer); + if (IS_ERR(vaddr)) { + ret = PTR_ERR(vaddr); + goto out; + } + + buffer->vaddr = vaddr; + buffer->vmap_cnt++; + dma_buf_map_set_vaddr(map, buffer->vaddr); +out: + mutex_unlock(&buffer->lock); + + return ret; +} -struct dma_heap *sys_heap; +static void system_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +{ + struct system_heap_buffer *buffer = dmabuf->priv; -static void system_heap_free(struct heap_helper_buffer *buffer) + mutex_lock(&buffer->lock); + if (!--buffer->vmap_cnt) { + vunmap(buffer->vaddr); + buffer->vaddr = NULL; + } + mutex_unlock(&buffer->lock); + dma_buf_map_clear(map); +} + +static void system_heap_dma_buf_release(struct dma_buf *dmabuf) { - pgoff_t pg; + struct system_heap_buffer *buffer = dmabuf->priv; + struct sg_table *table; + struct scatterlist *sg; + int i; + + table = &buffer->sg_table; + for_each_sg(table->sgl, sg, table->nents, i) { + struct page *page = sg_page(sg); - for (pg = 0; pg < buffer->pagecount; pg++) - __free_page(buffer->pages[pg]); - kfree(buffer->pages); + __free_pages(page, compound_order(page)); + } + sg_free_table(table); kfree(buffer); } +static const struct dma_buf_ops system_heap_buf_ops = { + .attach = system_heap_attach, + .detach = system_heap_detach, + .map_dma_buf = system_heap_map_dma_buf, + .unmap_dma_buf = system_heap_unmap_dma_buf, + .begin_cpu_access = system_heap_dma_buf_begin_cpu_access, + .end_cpu_access = system_heap_dma_buf_end_cpu_access, + .mmap = system_heap_mmap, + .vmap = system_heap_vmap, + .vunmap = system_heap_vunmap, + .release = system_heap_dma_buf_release, +}; + +static struct page *alloc_largest_available(unsigned long size, + unsigned int max_order) +{ + struct page *page; + int i; + + for (i = 0; i < NUM_ORDERS; i++) { + if (size < (PAGE_SIZE << orders[i])) + continue; + if (max_order < orders[i]) + continue; + + page = alloc_pages(order_flags[i], orders[i]); + if (!page) + continue; + return page; + } + return NULL; +} + static int system_heap_allocate(struct dma_heap *heap, unsigned long len, unsigned long fd_flags, unsigned long heap_flags) { - struct heap_helper_buffer *helper_buffer; + struct system_heap_buffer *buffer; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + unsigned long size_remaining = len; + unsigned int max_order = orders[0]; struct dma_buf *dmabuf; - int ret = -ENOMEM; - pgoff_t pg; + struct sg_table *table; + struct scatterlist *sg; + struct list_head pages; + struct page *page, *tmp_page; + int i, ret = -ENOMEM; - helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL); - if (!helper_buffer) + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + if (!buffer) return -ENOMEM; - init_heap_helper_buffer(helper_buffer, system_heap_free); - helper_buffer->heap = heap; - helper_buffer->size = len; - - helper_buffer->pagecount = len / PAGE_SIZE; - helper_buffer->pages = kmalloc_array(helper_buffer->pagecount, - sizeof(*helper_buffer->pages), - GFP_KERNEL); - if (!helper_buffer->pages) { - ret = -ENOMEM; - goto err0; - } + INIT_LIST_HEAD(&buffer->attachments); + mutex_init(&buffer->lock); + buffer->heap = heap; + buffer->len = len; - for (pg = 0; pg < helper_buffer->pagecount; pg++) { + INIT_LIST_HEAD(&pages); + i = 0; + while (size_remaining > 0) { /* * Avoid trying to allocate memory if the process - * has been killed by by SIGKILL + * has been killed by SIGKILL */ if (fatal_signal_pending(current)) - goto err1; + goto free_buffer; + + page = alloc_largest_available(size_remaining, max_order); + if (!page) + goto free_buffer; + + list_add_tail(&page->lru, &pages); + size_remaining -= page_size(page); + max_order = compound_order(page); + i++; + } + + table = &buffer->sg_table; + if (sg_alloc_table(table, i, GFP_KERNEL)) + goto free_buffer; - helper_buffer->pages[pg] = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!helper_buffer->pages[pg]) - goto err1; + sg = table->sgl; + list_for_each_entry_safe(page, tmp_page, &pages, lru) { + sg_set_page(sg, page, page_size(page), 0); + sg = sg_next(sg); + list_del(&page->lru); } /* create the dmabuf */ - dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags); + exp_info.ops = &system_heap_buf_ops; + exp_info.size = buffer->len; + exp_info.flags = fd_flags; + exp_info.priv = buffer; + dmabuf = dma_buf_export(&exp_info); if (IS_ERR(dmabuf)) { ret = PTR_ERR(dmabuf); - goto err1; + goto free_pages; } - helper_buffer->dmabuf = dmabuf; - ret = dma_buf_fd(dmabuf, fd_flags); if (ret < 0) { dma_buf_put(dmabuf); /* just return, as put will call release and that will free */ return ret; } - return ret; -err1: - while (pg > 0) - __free_page(helper_buffer->pages[--pg]); - kfree(helper_buffer->pages); -err0: - kfree(helper_buffer); +free_pages: + for_each_sgtable_sg(table, sg, i) { + struct page *p = sg_page(sg); + + __free_pages(p, compound_order(p)); + } + sg_free_table(table); +free_buffer: + list_for_each_entry_safe(page, tmp_page, &pages, lru) + __free_pages(page, compound_order(page)); + kfree(buffer); return ret; } @@ -107,7 +428,6 @@ static const struct dma_heap_ops system_heap_ops = { static int system_heap_create(void) { struct dma_heap_export_info exp_info; - int ret = 0; exp_info.name = "system"; exp_info.ops = &system_heap_ops; @@ -115,9 +435,9 @@ static int system_heap_create(void) sys_heap = dma_heap_add(&exp_info); if (IS_ERR(sys_heap)) - ret = PTR_ERR(sys_heap); + return PTR_ERR(sys_heap); - return ret; + return 0; } module_init(system_heap_create); MODULE_LICENSE("GPL v2"); |