diff options
author | Takashi Iwai <tiwai@suse.de> | 2021-10-17 10:48:59 +0300 |
---|---|---|
committer | Takashi Iwai <tiwai@suse.de> | 2021-10-18 14:32:13 +0300 |
commit | 2d9ea39917a4e4293bc2caea902c7059a330b611 (patch) | |
tree | 07b9ee3a8103a1f1f0872c63caac39321cbe5f13 /sound | |
parent | 73325f60e2ed28f04032d43c2828b73776cfefd0 (diff) | |
download | linux-2d9ea39917a4e4293bc2caea902c7059a330b611.tar.xz |
ALSA: memalloc: Convert x86 SG-buffer handling with non-contiguous type
We've had an x86-specific SG-buffer handling code, but now it can be
merged gracefully with the standard non-contiguous DMA pages.
After the migration, SNDRV_DMA_TYPE_DMA_SG becomes identical with
SNDRV_DMA_TYPE_NONCONTIG on x86, while others still fall back to
SNDRV_DMA_TYPE_DEV.
The remaining problem is about the SG-buffer with WC pages: the DMA
core stuff on x86 doesn't treat it well, so we still need some special
handling to manipulate the page attribute manually. The mmap handler
for SNDRV_DMA_TYPE_DEV_SG_WC still returns -ENOENT intentionally for
the fallback to the default handler.
Link: https://lore.kernel.org/r/20211017074859.24112-4-tiwai@suse.de
Signed-off-by: Takashi Iwai <tiwai@suse.de>
Diffstat (limited to 'sound')
-rw-r--r-- | sound/core/Makefile | 1 | ||||
-rw-r--r-- | sound/core/memalloc.c | 51 | ||||
-rw-r--r-- | sound/core/sgbuf.c | 201 |
3 files changed, 47 insertions, 206 deletions
diff --git a/sound/core/Makefile b/sound/core/Makefile index 79e1407cd0de..350d704ced98 100644 --- a/sound/core/Makefile +++ b/sound/core/Makefile @@ -19,7 +19,6 @@ snd-$(CONFIG_SND_JACK) += ctljack.o jack.o snd-pcm-y := pcm.o pcm_native.o pcm_lib.o pcm_misc.o \ pcm_memory.o memalloc.o snd-pcm-$(CONFIG_SND_PCM_TIMER) += pcm_timer.o -snd-pcm-$(CONFIG_SND_DMA_SGBUF) += sgbuf.o snd-pcm-$(CONFIG_SND_PCM_ELD) += pcm_drm_eld.o snd-pcm-$(CONFIG_SND_PCM_IEC958) += pcm_iec958.o diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c index 99681e651223..acdebecf1a2e 100644 --- a/sound/core/memalloc.c +++ b/sound/core/memalloc.c @@ -560,6 +560,50 @@ static const struct snd_malloc_ops snd_dma_noncontig_ops = { .get_chunk_size = snd_dma_vmalloc_get_chunk_size, }; +/* x86-specific SG-buffer with WC pages */ +#ifdef CONFIG_SND_DMA_SGBUF +#define vmalloc_to_virt(v) (unsigned long)page_to_virt(vmalloc_to_page(v)) + +static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size) +{ + void *p = snd_dma_noncontig_alloc(dmab, size); + size_t ofs; + + if (!p) + return NULL; + for (ofs = 0; ofs < size; ofs += PAGE_SIZE) + set_memory_uc(vmalloc_to_virt(p + ofs), 1); + return p; +} + +static void snd_dma_sg_wc_free(struct snd_dma_buffer *dmab) +{ + size_t ofs; + + for (ofs = 0; ofs < dmab->bytes; ofs += PAGE_SIZE) + set_memory_wb(vmalloc_to_virt(dmab->area + ofs), 1); + snd_dma_noncontig_free(dmab); +} + +static int snd_dma_sg_wc_mmap(struct snd_dma_buffer *dmab, + struct vm_area_struct *area) +{ + area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); + /* FIXME: dma_mmap_noncontiguous() works? */ + return -ENOENT; /* continue with the default mmap handler */ +} + +const struct snd_malloc_ops snd_dma_sg_wc_ops = { + .alloc = snd_dma_sg_wc_alloc, + .free = snd_dma_sg_wc_free, + .mmap = snd_dma_sg_wc_mmap, + .sync = snd_dma_noncontig_sync, + .get_addr = snd_dma_vmalloc_get_addr, + .get_page = snd_dma_vmalloc_get_page, + .get_chunk_size = snd_dma_vmalloc_get_chunk_size, +}; +#endif /* CONFIG_SND_DMA_SGBUF */ + /* * Non-coherent pages allocator */ @@ -619,14 +663,13 @@ static const struct snd_malloc_ops *dma_ops[] = { [SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops, [SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops, [SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops, +#ifdef CONFIG_SND_DMA_SGBUF + [SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_wc_ops, +#endif #ifdef CONFIG_GENERIC_ALLOCATOR [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops, #endif /* CONFIG_GENERIC_ALLOCATOR */ #endif /* CONFIG_HAS_DMA */ -#ifdef CONFIG_SND_DMA_SGBUF - [SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops, - [SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_ops, -#endif }; static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab) diff --git a/sound/core/sgbuf.c b/sound/core/sgbuf.c deleted file mode 100644 index 8352a5cdb19f..000000000000 --- a/sound/core/sgbuf.c +++ /dev/null @@ -1,201 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Scatter-Gather buffer - * - * Copyright (c) by Takashi Iwai <tiwai@suse.de> - */ - -#include <linux/slab.h> -#include <linux/mm.h> -#include <linux/vmalloc.h> -#include <linux/export.h> -#include <sound/memalloc.h> -#include "memalloc_local.h" - -struct snd_sg_page { - void *buf; - dma_addr_t addr; -}; - -struct snd_sg_buf { - int size; /* allocated byte size */ - int pages; /* allocated pages */ - int tblsize; /* allocated table size */ - struct snd_sg_page *table; /* address table */ - struct page **page_table; /* page table (for vmap/vunmap) */ - struct device *dev; -}; - -/* table entries are align to 32 */ -#define SGBUF_TBL_ALIGN 32 -#define sgbuf_align_table(tbl) ALIGN((tbl), SGBUF_TBL_ALIGN) - -static void snd_dma_sg_free(struct snd_dma_buffer *dmab) -{ - struct snd_sg_buf *sgbuf = dmab->private_data; - struct snd_dma_buffer tmpb; - int i; - - if (!sgbuf) - return; - - vunmap(dmab->area); - dmab->area = NULL; - - tmpb.dev.type = SNDRV_DMA_TYPE_DEV; - if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) - tmpb.dev.type = SNDRV_DMA_TYPE_DEV_WC; - tmpb.dev.dev = sgbuf->dev; - for (i = 0; i < sgbuf->pages; i++) { - if (!(sgbuf->table[i].addr & ~PAGE_MASK)) - continue; /* continuous pages */ - tmpb.area = sgbuf->table[i].buf; - tmpb.addr = sgbuf->table[i].addr & PAGE_MASK; - tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT; - snd_dma_free_pages(&tmpb); - } - - kfree(sgbuf->table); - kfree(sgbuf->page_table); - kfree(sgbuf); - dmab->private_data = NULL; -} - -#define MAX_ALLOC_PAGES 32 - -static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size) -{ - struct snd_sg_buf *sgbuf; - unsigned int i, pages, chunk, maxpages; - struct snd_dma_buffer tmpb; - struct snd_sg_page *table; - struct page **pgtable; - int type = SNDRV_DMA_TYPE_DEV; - pgprot_t prot = PAGE_KERNEL; - void *area; - - dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL); - if (!sgbuf) - return NULL; - if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) { - type = SNDRV_DMA_TYPE_DEV_WC; -#ifdef pgprot_noncached - prot = pgprot_noncached(PAGE_KERNEL); -#endif - } - sgbuf->dev = dmab->dev.dev; - pages = snd_sgbuf_aligned_pages(size); - sgbuf->tblsize = sgbuf_align_table(pages); - table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL); - if (!table) - goto _failed; - sgbuf->table = table; - pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL); - if (!pgtable) - goto _failed; - sgbuf->page_table = pgtable; - - /* allocate pages */ - maxpages = MAX_ALLOC_PAGES; - while (pages > 0) { - chunk = pages; - /* don't be too eager to take a huge chunk */ - if (chunk > maxpages) - chunk = maxpages; - chunk <<= PAGE_SHIFT; - if (snd_dma_alloc_pages_fallback(type, dmab->dev.dev, - chunk, &tmpb) < 0) { - if (!sgbuf->pages) - goto _failed; - size = sgbuf->pages * PAGE_SIZE; - break; - } - chunk = tmpb.bytes >> PAGE_SHIFT; - for (i = 0; i < chunk; i++) { - table->buf = tmpb.area; - table->addr = tmpb.addr; - if (!i) - table->addr |= chunk; /* mark head */ - table++; - *pgtable++ = virt_to_page(tmpb.area); - tmpb.area += PAGE_SIZE; - tmpb.addr += PAGE_SIZE; - } - sgbuf->pages += chunk; - pages -= chunk; - if (chunk < maxpages) - maxpages = chunk; - } - - sgbuf->size = size; - area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot); - if (!area) - goto _failed; - return area; - - _failed: - snd_dma_sg_free(dmab); /* free the table */ - return NULL; -} - -static dma_addr_t snd_dma_sg_get_addr(struct snd_dma_buffer *dmab, - size_t offset) -{ - struct snd_sg_buf *sgbuf = dmab->private_data; - dma_addr_t addr; - - addr = sgbuf->table[offset >> PAGE_SHIFT].addr; - addr &= ~((dma_addr_t)PAGE_SIZE - 1); - return addr + offset % PAGE_SIZE; -} - -static struct page *snd_dma_sg_get_page(struct snd_dma_buffer *dmab, - size_t offset) -{ - struct snd_sg_buf *sgbuf = dmab->private_data; - unsigned int idx = offset >> PAGE_SHIFT; - - if (idx >= (unsigned int)sgbuf->pages) - return NULL; - return sgbuf->page_table[idx]; -} - -static unsigned int snd_dma_sg_get_chunk_size(struct snd_dma_buffer *dmab, - unsigned int ofs, - unsigned int size) -{ - struct snd_sg_buf *sg = dmab->private_data; - unsigned int start, end, pg; - - start = ofs >> PAGE_SHIFT; - end = (ofs + size - 1) >> PAGE_SHIFT; - /* check page continuity */ - pg = sg->table[start].addr >> PAGE_SHIFT; - for (;;) { - start++; - if (start > end) - break; - pg++; - if ((sg->table[start].addr >> PAGE_SHIFT) != pg) - return (start << PAGE_SHIFT) - ofs; - } - /* ok, all on continuous pages */ - return size; -} - -static int snd_dma_sg_mmap(struct snd_dma_buffer *dmab, - struct vm_area_struct *area) -{ - if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) - area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); - return -ENOENT; /* continue with the default mmap handler */ -} - -const struct snd_malloc_ops snd_dma_sg_ops = { - .alloc = snd_dma_sg_alloc, - .free = snd_dma_sg_free, - .get_addr = snd_dma_sg_get_addr, - .get_page = snd_dma_sg_get_page, - .get_chunk_size = snd_dma_sg_get_chunk_size, - .mmap = snd_dma_sg_mmap, -}; |