summaryrefslogtreecommitdiff
path: root/sound/core/memalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'sound/core/memalloc.c')
-rw-r--r--sound/core/memalloc.c214
1 files changed, 88 insertions, 126 deletions
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index f901504b5afc..13b71069ae18 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -17,7 +17,17 @@
#include <asm/set_memory.h>
#endif
#include <sound/memalloc.h>
-#include "memalloc_local.h"
+
+struct snd_malloc_ops {
+ void *(*alloc)(struct snd_dma_buffer *dmab, size_t size);
+ void (*free)(struct snd_dma_buffer *dmab);
+ dma_addr_t (*get_addr)(struct snd_dma_buffer *dmab, size_t offset);
+ struct page *(*get_page)(struct snd_dma_buffer *dmab, size_t offset);
+ unsigned int (*get_chunk_size)(struct snd_dma_buffer *dmab,
+ unsigned int ofs, unsigned int size);
+ int (*mmap)(struct snd_dma_buffer *dmab, struct vm_area_struct *area);
+ void (*sync)(struct snd_dma_buffer *dmab, enum snd_dma_sync_mode mode);
+};
#define DEFAULT_GFP \
(GFP_KERNEL | \
@@ -26,10 +36,6 @@
static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
-#ifdef CONFIG_SND_DMA_SGBUF
-static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size);
-#endif
-
static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
{
const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
@@ -490,15 +496,26 @@ static const struct snd_malloc_ops snd_dma_dev_ops = {
/*
* Write-combined pages
*/
-/* x86-specific allocations */
#ifdef CONFIG_SND_DMA_SGBUF
+/* x86-specific allocations */
static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
{
- return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, true);
+ void *p = do_alloc_pages(dmab->dev.dev, size, &dmab->addr, true);
+
+ if (!p)
+ return NULL;
+ dmab->addr = dma_map_single(dmab->dev.dev, p, size, DMA_BIDIRECTIONAL);
+ if (dmab->addr == DMA_MAPPING_ERROR) {
+ do_free_pages(dmab->area, size, true);
+ return NULL;
+ }
+ return p;
}
static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
{
+ dma_unmap_single(dmab->dev.dev, dmab->addr, dmab->bytes,
+ DMA_BIDIRECTIONAL);
do_free_pages(dmab->area, dmab->bytes, true);
}
@@ -506,7 +523,8 @@ static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
struct vm_area_struct *area)
{
area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
- return snd_dma_continuous_mmap(dmab, area);
+ return dma_mmap_coherent(dmab->dev.dev, area,
+ dmab->area, dmab->addr, dmab->bytes);
}
#else
static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
@@ -525,7 +543,7 @@ static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
return dma_mmap_wc(dmab->dev.dev, area,
dmab->area, dmab->addr, dmab->bytes);
}
-#endif /* CONFIG_SND_DMA_SGBUF */
+#endif
static const struct snd_malloc_ops snd_dma_wc_ops = {
.alloc = snd_dma_wc_alloc,
@@ -541,16 +559,8 @@ static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
struct sg_table *sgt;
void *p;
-#ifdef CONFIG_SND_DMA_SGBUF
- if (cpu_feature_enabled(X86_FEATURE_XENPV))
- return snd_dma_sg_fallback_alloc(dmab, size);
-#endif
sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
DEFAULT_GFP, 0);
-#ifdef CONFIG_SND_DMA_SGBUF
- if (!sgt && !get_dma_ops(dmab->dev.dev))
- return snd_dma_sg_fallback_alloc(dmab, size);
-#endif
if (!sgt)
return NULL;
@@ -667,125 +677,64 @@ static const struct snd_malloc_ops snd_dma_noncontig_ops = {
.get_chunk_size = snd_dma_noncontig_get_chunk_size,
};
-/* x86-specific SG-buffer with WC pages */
#ifdef CONFIG_SND_DMA_SGBUF
-#define sg_wc_address(it) ((unsigned long)page_address(sg_page_iter_page(it)))
-
-static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
-{
- void *p = snd_dma_noncontig_alloc(dmab, size);
- struct sg_table *sgt = dmab->private_data;
- struct sg_page_iter iter;
-
- if (!p)
- return NULL;
- if (dmab->dev.type != SNDRV_DMA_TYPE_DEV_WC_SG)
- return p;
- for_each_sgtable_page(sgt, &iter, 0)
- set_memory_wc(sg_wc_address(&iter), 1);
- return p;
-}
-
-static void snd_dma_sg_wc_free(struct snd_dma_buffer *dmab)
-{
- struct sg_table *sgt = dmab->private_data;
- struct sg_page_iter iter;
-
- for_each_sgtable_page(sgt, &iter, 0)
- set_memory_wb(sg_wc_address(&iter), 1);
- snd_dma_noncontig_free(dmab);
-}
-
-static int snd_dma_sg_wc_mmap(struct snd_dma_buffer *dmab,
- struct vm_area_struct *area)
-{
- area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
- return dma_mmap_noncontiguous(dmab->dev.dev, area,
- dmab->bytes, dmab->private_data);
-}
-
-static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
- .alloc = snd_dma_sg_wc_alloc,
- .free = snd_dma_sg_wc_free,
- .mmap = snd_dma_sg_wc_mmap,
- .sync = snd_dma_noncontig_sync,
- .get_addr = snd_dma_noncontig_get_addr,
- .get_page = snd_dma_noncontig_get_page,
- .get_chunk_size = snd_dma_noncontig_get_chunk_size,
-};
-
/* Fallback SG-buffer allocations for x86 */
struct snd_dma_sg_fallback {
- bool use_dma_alloc_coherent;
+ struct sg_table sgt; /* used by get_addr - must be the first item */
size_t count;
struct page **pages;
- /* DMA address array; the first page contains #pages in ~PAGE_MASK */
- dma_addr_t *addrs;
+ unsigned int *npages;
};
static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
struct snd_dma_sg_fallback *sgbuf)
{
+ bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG;
size_t i, size;
- if (sgbuf->pages && sgbuf->addrs) {
+ if (sgbuf->pages && sgbuf->npages) {
i = 0;
while (i < sgbuf->count) {
- if (!sgbuf->pages[i] || !sgbuf->addrs[i])
- break;
- size = sgbuf->addrs[i] & ~PAGE_MASK;
- if (WARN_ON(!size))
+ size = sgbuf->npages[i];
+ if (!size)
break;
- if (sgbuf->use_dma_alloc_coherent)
- dma_free_coherent(dmab->dev.dev, size << PAGE_SHIFT,
- page_address(sgbuf->pages[i]),
- sgbuf->addrs[i] & PAGE_MASK);
- else
- do_free_pages(page_address(sgbuf->pages[i]),
- size << PAGE_SHIFT, false);
+ do_free_pages(page_address(sgbuf->pages[i]),
+ size << PAGE_SHIFT, wc);
i += size;
}
}
kvfree(sgbuf->pages);
- kvfree(sgbuf->addrs);
+ kvfree(sgbuf->npages);
kfree(sgbuf);
}
+/* fallback manual S/G buffer allocations */
static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
{
+ bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG;
struct snd_dma_sg_fallback *sgbuf;
struct page **pagep, *curp;
- size_t chunk, npages;
- dma_addr_t *addrp;
+ size_t chunk;
dma_addr_t addr;
+ unsigned int idx, npages;
void *p;
- /* correct the type */
- if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG)
- dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK;
- else if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
- dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
-
sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
if (!sgbuf)
return NULL;
- sgbuf->use_dma_alloc_coherent = cpu_feature_enabled(X86_FEATURE_XENPV);
size = PAGE_ALIGN(size);
sgbuf->count = size >> PAGE_SHIFT;
sgbuf->pages = kvcalloc(sgbuf->count, sizeof(*sgbuf->pages), GFP_KERNEL);
- sgbuf->addrs = kvcalloc(sgbuf->count, sizeof(*sgbuf->addrs), GFP_KERNEL);
- if (!sgbuf->pages || !sgbuf->addrs)
+ sgbuf->npages = kvcalloc(sgbuf->count, sizeof(*sgbuf->npages), GFP_KERNEL);
+ if (!sgbuf->pages || !sgbuf->npages)
goto error;
pagep = sgbuf->pages;
- addrp = sgbuf->addrs;
- chunk = (PAGE_SIZE - 1) << PAGE_SHIFT; /* to fit in low bits in addrs */
+ chunk = size;
+ idx = 0;
while (size > 0) {
chunk = min(size, chunk);
- if (sgbuf->use_dma_alloc_coherent)
- p = dma_alloc_coherent(dmab->dev.dev, chunk, &addr, DEFAULT_GFP);
- else
- p = do_alloc_pages(dmab->dev.dev, chunk, &addr, false);
+ p = do_alloc_pages(dmab->dev.dev, chunk, &addr, wc);
if (!p) {
if (chunk <= PAGE_SIZE)
goto error;
@@ -797,27 +746,33 @@ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
size -= chunk;
/* fill pages */
npages = chunk >> PAGE_SHIFT;
- *addrp = npages; /* store in lower bits */
+ sgbuf->npages[idx] = npages;
+ idx += npages;
curp = virt_to_page(p);
- while (npages--) {
+ while (npages--)
*pagep++ = curp++;
- *addrp++ |= addr;
- addr += PAGE_SIZE;
- }
}
- p = vmap(sgbuf->pages, sgbuf->count, VM_MAP, PAGE_KERNEL);
- if (!p)
+ if (sg_alloc_table_from_pages(&sgbuf->sgt, sgbuf->pages, sgbuf->count,
+ 0, sgbuf->count << PAGE_SHIFT, GFP_KERNEL))
goto error;
- if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
- set_pages_array_wc(sgbuf->pages, sgbuf->count);
+ if (dma_map_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0))
+ goto error_dma_map;
+
+ p = vmap(sgbuf->pages, sgbuf->count, VM_MAP, PAGE_KERNEL);
+ if (!p)
+ goto error_vmap;
dmab->private_data = sgbuf;
/* store the first page address for convenience */
- dmab->addr = sgbuf->addrs[0] & PAGE_MASK;
+ dmab->addr = snd_sgbuf_get_addr(dmab, 0);
return p;
+ error_vmap:
+ dma_unmap_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0);
+ error_dma_map:
+ sg_free_table(&sgbuf->sgt);
error:
__snd_dma_sg_fallback_free(dmab, sgbuf);
return NULL;
@@ -827,36 +782,46 @@ static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab)
{
struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
- if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
- set_pages_array_wb(sgbuf->pages, sgbuf->count);
vunmap(dmab->area);
+ dma_unmap_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0);
+ sg_free_table(&sgbuf->sgt);
__snd_dma_sg_fallback_free(dmab, dmab->private_data);
}
-static dma_addr_t snd_dma_sg_fallback_get_addr(struct snd_dma_buffer *dmab,
- size_t offset)
-{
- struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
- size_t index = offset >> PAGE_SHIFT;
-
- return (sgbuf->addrs[index] & PAGE_MASK) | (offset & ~PAGE_MASK);
-}
-
static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab,
struct vm_area_struct *area)
{
struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
- if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
+ if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
return vm_map_pages(area, sgbuf->pages, sgbuf->count);
}
-static const struct snd_malloc_ops snd_dma_sg_fallback_ops = {
- .alloc = snd_dma_sg_fallback_alloc,
+static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ int type = dmab->dev.type;
+ void *p;
+
+ /* try the standard DMA API allocation at first */
+ if (type == SNDRV_DMA_TYPE_DEV_WC_SG)
+ dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC;
+ else
+ dmab->dev.type = SNDRV_DMA_TYPE_DEV;
+ p = __snd_dma_alloc_pages(dmab, size);
+ if (p)
+ return p;
+
+ dmab->dev.type = type; /* restore the type */
+ return snd_dma_sg_fallback_alloc(dmab, size);
+}
+
+static const struct snd_malloc_ops snd_dma_sg_ops = {
+ .alloc = snd_dma_sg_alloc,
.free = snd_dma_sg_fallback_free,
.mmap = snd_dma_sg_fallback_mmap,
- .get_addr = snd_dma_sg_fallback_get_addr,
+ /* reuse noncontig helper */
+ .get_addr = snd_dma_noncontig_get_addr,
/* reuse vmalloc helpers */
.get_page = snd_dma_vmalloc_get_page,
.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
@@ -927,15 +892,12 @@ static const struct snd_malloc_ops *snd_dma_ops[] = {
[SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops,
[SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops,
#ifdef CONFIG_SND_DMA_SGBUF
- [SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_wc_ops,
+ [SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops,
+ [SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_ops,
#endif
#ifdef CONFIG_GENERIC_ALLOCATOR
[SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
#endif /* CONFIG_GENERIC_ALLOCATOR */
-#ifdef CONFIG_SND_DMA_SGBUF
- [SNDRV_DMA_TYPE_DEV_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
- [SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
-#endif
#endif /* CONFIG_HAS_DMA */
};