From d73cd42893f4cdc06e6829fea2347bb92cb789d1 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 15 Sep 2008 16:44:55 -0400 Subject: [ARM] kmap support The kmap virtual area borrows a 2MB range at the top of the 16MB area below PAGE_OFFSET currently reserved for kernel modules and/or the XIP kernel. This 2MB corresponds to the range covered by 2 consecutive second-level page tables, or a single pmd entry as seen by the Linux page table abstraction. Because XIP kernels are unlikely to be seen on systems needing highmem support, there shouldn't be any shortage of VM space for modules (14 MB for modules is still way more than twice the typical usage). Because the virtual mapping of highmem pages can go away at any moment after kunmap() is called on them, we need to bypass the delayed cache flushing provided by flush_dcache_page() in that case. The atomic kmap versions are based on fixmaps, and __cpuc_flush_dcache_page() is used directly in that case. Signed-off-by: Nicolas Pitre --- arch/arm/include/asm/highmem.h | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 arch/arm/include/asm/highmem.h (limited to 'arch/arm/include/asm/highmem.h') diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h new file mode 100644 index 000000000000..023d5b374544 --- /dev/null +++ b/arch/arm/include/asm/highmem.h @@ -0,0 +1,28 @@ +#ifndef _ASM_HIGHMEM_H +#define _ASM_HIGHMEM_H + +#include + +#define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE) +#define LAST_PKMAP PTRS_PER_PTE +#define LAST_PKMAP_MASK (LAST_PKMAP - 1) +#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT) +#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) + +#define kmap_prot PAGE_KERNEL + +#define flush_cache_kmaps() flush_cache_all() + +extern pte_t *pkmap_page_table; + +extern void *kmap_high(struct page *page); +extern void kunmap_high(struct page *page); + +extern void *kmap(struct page *page); +extern void kunmap(struct page *page); +extern void *kmap_atomic(struct page *page, enum km_type type); +extern void kunmap_atomic(void *kvaddr, enum km_type type); +extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); +extern struct page *kmap_atomic_to_page(const void *ptr); + +#endif -- cgit v1.2.3 From 43377453af83b8ff8c1c731da1508bd6b84ebfea Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Thu, 12 Mar 2009 22:52:09 -0400 Subject: [ARM] introduce dma_cache_maint_page() This is a helper to be used by the DMA mapping API to handle cache maintenance for memory identified by a page structure instead of a virtual address. Those pages may or may not be highmem pages, and when they're highmem pages, they may or may not be virtually mapped. When they're not mapped then there is no L1 cache to worry about. But even in that case the L2 cache must be processed since unmapped highmem pages can still be L2 cached. Signed-off-by: Nicolas Pitre --- arch/arm/include/asm/dma-mapping.h | 4 ++- arch/arm/include/asm/highmem.h | 3 ++ arch/arm/mm/dma-mapping.c | 72 +++++++++++++++++++++++++++++++++++++- 3 files changed, 77 insertions(+), 2 deletions(-) (limited to 'arch/arm/include/asm/highmem.h') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 22cb14ec3438..59fa762e9c66 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -57,6 +57,8 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) * Use the driver DMA support - see dma-mapping.h (dma_sync_*) */ extern void dma_cache_maint(const void *kaddr, size_t size, int rw); +extern void dma_cache_maint_page(struct page *page, unsigned long offset, + size_t size, int rw); /* * Return whether the given device DMA address mask can be supported @@ -316,7 +318,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, BUG_ON(!valid_dma_direction(dir)); if (!arch_is_coherent()) - dma_cache_maint(page_address(page) + offset, size, dir); + dma_cache_maint_page(page, offset, size, dir); return page_to_dma(dev, page) + offset; } diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 023d5b374544..7f36d00600b4 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h @@ -15,7 +15,10 @@ extern pte_t *pkmap_page_table; +#define ARCH_NEEDS_KMAP_HIGH_GET + extern void *kmap_high(struct page *page); +extern void *kmap_high_get(struct page *page); extern void kunmap_high(struct page *page); extern void *kmap(struct page *page); diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index f1ef5613ccd4..510c179b0ac8 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -517,6 +518,74 @@ void dma_cache_maint(const void *start, size_t size, int direction) } EXPORT_SYMBOL(dma_cache_maint); +static void dma_cache_maint_contiguous(struct page *page, unsigned long offset, + size_t size, int direction) +{ + void *vaddr; + unsigned long paddr; + void (*inner_op)(const void *, const void *); + void (*outer_op)(unsigned long, unsigned long); + + switch (direction) { + case DMA_FROM_DEVICE: /* invalidate only */ + inner_op = dmac_inv_range; + outer_op = outer_inv_range; + break; + case DMA_TO_DEVICE: /* writeback only */ + inner_op = dmac_clean_range; + outer_op = outer_clean_range; + break; + case DMA_BIDIRECTIONAL: /* writeback and invalidate */ + inner_op = dmac_flush_range; + outer_op = outer_flush_range; + break; + default: + BUG(); + } + + if (!PageHighMem(page)) { + vaddr = page_address(page) + offset; + inner_op(vaddr, vaddr + size); + } else { + vaddr = kmap_high_get(page); + if (vaddr) { + vaddr += offset; + inner_op(vaddr, vaddr + size); + kunmap_high(page); + } + } + + paddr = page_to_phys(page) + offset; + outer_op(paddr, paddr + size); +} + +void dma_cache_maint_page(struct page *page, unsigned long offset, + size_t size, int dir) +{ + /* + * A single sg entry may refer to multiple physically contiguous + * pages. But we still need to process highmem pages individually. + * If highmem is not configured then the bulk of this loop gets + * optimized out. + */ + size_t left = size; + do { + size_t len = left; + if (PageHighMem(page) && len + offset > PAGE_SIZE) { + if (offset >= PAGE_SIZE) { + page += offset / PAGE_SIZE; + offset %= PAGE_SIZE; + } + len = PAGE_SIZE - offset; + } + dma_cache_maint_contiguous(page, offset, len, dir); + offset = 0; + page++; + left -= len; + } while (left); +} +EXPORT_SYMBOL(dma_cache_maint_page); + /** * dma_map_sg - map a set of SG buffers for streaming mode DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices @@ -614,7 +683,8 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, continue; if (!arch_is_coherent()) - dma_cache_maint(sg_virt(s), s->length, dir); + dma_cache_maint_page(sg_page(s), s->offset, + s->length, dir); } } EXPORT_SYMBOL(dma_sync_sg_for_device); -- cgit v1.2.3