From f00a75c094c340c4e7435665816c3273c870e849 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 5 Oct 2009 15:17:45 +0100 Subject: ARM: Pass VMA to copy_user_highpage() implementations Our copy_user_highpage() implementations may require cache maintainence. Ensure that implementations have all necessary details to perform this maintainence. Signed-off-by: Russell King --- arch/arm/mm/copypage-feroceon.c | 2 +- arch/arm/mm/copypage-v3.c | 2 +- arch/arm/mm/copypage-v4mc.c | 2 +- arch/arm/mm/copypage-v4wb.c | 2 +- arch/arm/mm/copypage-v4wt.c | 2 +- arch/arm/mm/copypage-v6.c | 4 ++-- arch/arm/mm/copypage-xsc3.c | 2 +- arch/arm/mm/copypage-xscale.c | 2 +- 8 files changed, 9 insertions(+), 9 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c index 70997d5bee2d..e2e8d29c548b 100644 --- a/arch/arm/mm/copypage-feroceon.c +++ b/arch/arm/mm/copypage-feroceon.c @@ -68,7 +68,7 @@ feroceon_copy_user_page(void *kto, const void *kfrom) } void feroceon_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr) + unsigned long vaddr, struct vm_area_struct *vma) { void *kto, *kfrom; diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c index de9c06854ad7..f72303e1d804 100644 --- a/arch/arm/mm/copypage-v3.c +++ b/arch/arm/mm/copypage-v3.c @@ -38,7 +38,7 @@ v3_copy_user_page(void *kto, const void *kfrom) } void v3_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr) + unsigned long vaddr, struct vm_area_struct *vma) { void *kto, *kfrom; diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 7370a7142b04..598c51ad5071 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -69,7 +69,7 @@ mc_copy_user_page(void *from, void *to) } void v4_mc_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr) + unsigned long vaddr, struct vm_area_struct *vma) { void *kto = kmap_atomic(to, KM_USER1); diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c index 9ab098414227..e9920f68b76b 100644 --- a/arch/arm/mm/copypage-v4wb.c +++ b/arch/arm/mm/copypage-v4wb.c @@ -48,7 +48,7 @@ v4wb_copy_user_page(void *kto, const void *kfrom) } void v4wb_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr) + unsigned long vaddr, struct vm_area_struct *vma) { void *kto, *kfrom; diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c index 300efafd6643..172e6a55458e 100644 --- a/arch/arm/mm/copypage-v4wt.c +++ b/arch/arm/mm/copypage-v4wt.c @@ -44,7 +44,7 @@ v4wt_copy_user_page(void *kto, const void *kfrom) } void v4wt_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr) + unsigned long vaddr, struct vm_area_struct *vma) { void *kto, *kfrom; diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 4127a7bddfe5..334d5602770e 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -34,7 +34,7 @@ static DEFINE_SPINLOCK(v6_lock); * attack the kernel's existing mapping of these pages. */ static void v6_copy_user_highpage_nonaliasing(struct page *to, - struct page *from, unsigned long vaddr) + struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { void *kto, *kfrom; @@ -73,7 +73,7 @@ static void discard_old_kernel_data(void *kto) * Copy the page, taking account of the cache colour. */ static void v6_copy_user_highpage_aliasing(struct page *to, - struct page *from, unsigned long vaddr) + struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { unsigned int offset = CACHE_COLOUR(vaddr); unsigned long kfrom, kto; diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c index bc4525f5ab23..18ae05d5829c 100644 --- a/arch/arm/mm/copypage-xsc3.c +++ b/arch/arm/mm/copypage-xsc3.c @@ -71,7 +71,7 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom) } void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr) + unsigned long vaddr, struct vm_area_struct *vma) { void *kto, *kfrom; diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 76824d3e966a..9920c0ae2096 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -91,7 +91,7 @@ mc_copy_user_page(void *from, void *to) } void xscale_mc_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr) + unsigned long vaddr, struct vm_area_struct *vma) { void *kto = kmap_atomic(to, KM_USER1); -- cgit v1.2.3 From 2725898fc9bb2121ac0fb1b5e4faf4fc09014729 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 5 Oct 2009 15:34:22 +0100 Subject: ARM: Flush user mapping on VIVT processors when copying a page Steven Walter writes: > I've been tracking down an instance of userspace data corruption, > and I believe I have found a window during fork where data can be > lost. The corruption is occurring on an ARMv5 system with VIVT > caches. Here's the scenario in question. Thread A is forking, > Thread B is running in userspace: > > Thread A: flush_cache_mm() (dup_mmap) > Thread B: writes to a page in the above mm > Thread A: pte_wrprotect() the above page (copy_one_pte) > Thread B: writes to the same page again > > During thread B's second write, he'll take a fault and enter the > do_wp_page() case. We'll end up calling copy_page(), which notably > uses the kernel virtual addresses for the old and new pages. This > means that the new page does not necessarily have the data from the > first write. Now there are two conflicting copies of the same > cache-line in dcache. If the userspace cache-line flushes before > the kernel cache-line, we lose the changes made during the first > write. do_wp_page does call flush_dcache_page on the newly-copied > page, but there's still a window where the CPU could flush the > userspace cache-line before then. Resolve this by flushing the user mapping before copying the page on processors with a writeback VIVT cache. Note: this does have a performance impact, and so needs further consideration before being merged - can we optimize out some of the cache flushes if, eg, we know that the page isn't yet mapped? Thread: Signed-off-by: Russell King --- arch/arm/mm/copypage-feroceon.c | 1 + arch/arm/mm/copypage-v4wb.c | 1 + arch/arm/mm/copypage-xsc3.c | 1 + 3 files changed, 3 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c index e2e8d29c548b..5eb4fd93893d 100644 --- a/arch/arm/mm/copypage-feroceon.c +++ b/arch/arm/mm/copypage-feroceon.c @@ -74,6 +74,7 @@ void feroceon_copy_user_highpage(struct page *to, struct page *from, kto = kmap_atomic(to, KM_USER0); kfrom = kmap_atomic(from, KM_USER1); + flush_cache_page(vma, vaddr, page_to_pfn(from)); feroceon_copy_user_page(kto, kfrom); kunmap_atomic(kfrom, KM_USER1); kunmap_atomic(kto, KM_USER0); diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c index e9920f68b76b..7c2eb55cd4a9 100644 --- a/arch/arm/mm/copypage-v4wb.c +++ b/arch/arm/mm/copypage-v4wb.c @@ -54,6 +54,7 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from, kto = kmap_atomic(to, KM_USER0); kfrom = kmap_atomic(from, KM_USER1); + flush_cache_page(vma, vaddr, page_to_pfn(from)); v4wb_copy_user_page(kto, kfrom); kunmap_atomic(kfrom, KM_USER1); kunmap_atomic(kto, KM_USER0); diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c index 18ae05d5829c..747ad4140fc7 100644 --- a/arch/arm/mm/copypage-xsc3.c +++ b/arch/arm/mm/copypage-xsc3.c @@ -77,6 +77,7 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, kto = kmap_atomic(to, KM_USER0); kfrom = kmap_atomic(from, KM_USER1); + flush_cache_page(vma, vaddr, page_to_pfn(from)); xsc3_mc_copy_user_page(kto, kfrom); kunmap_atomic(kfrom, KM_USER1); kunmap_atomic(kto, KM_USER0); -- cgit v1.2.3 From 2ef7f3dbd7a70a48c3f09b498df528cb00ea03a4 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 5 Nov 2009 13:29:36 +0000 Subject: ARM: Fix ptrace accesses Signed-off-by: Russell King --- arch/arm/include/asm/cacheflush.h | 24 ++---------------- arch/arm/include/asm/smp_plat.h | 5 ++++ arch/arm/mm/flush.c | 51 +++++++++++++++++++++++++++++++++------ 3 files changed, 50 insertions(+), 30 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 730aefcfbee3..3d2ef54c7cb9 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -316,12 +316,8 @@ static inline void outer_flush_range(unsigned long start, unsigned long end) * processes address space. Really, we want to allow our "user * space" model to handle this. */ -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ - do { \ - memcpy(dst, src, len); \ - flush_ptrace_access(vma, page, vaddr, dst, len, 1);\ - } while (0) - +extern void copy_to_user_page(struct vm_area_struct *, struct page *, + unsigned long, void *, const void *, unsigned long); #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ do { \ memcpy(dst, src, len); \ @@ -355,17 +351,6 @@ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig } } -static inline void -vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page, - unsigned long uaddr, void *kaddr, - unsigned long len, int write) -{ - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { - unsigned long addr = (unsigned long)kaddr; - __cpuc_coherent_kern_range(addr, addr + len); - } -} - #ifndef CONFIG_CPU_CACHE_VIPT #define flush_cache_mm(mm) \ vivt_flush_cache_mm(mm) @@ -373,15 +358,10 @@ vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page, vivt_flush_cache_range(vma,start,end) #define flush_cache_page(vma,addr,pfn) \ vivt_flush_cache_page(vma,addr,pfn) -#define flush_ptrace_access(vma,page,ua,ka,len,write) \ - vivt_flush_ptrace_access(vma,page,ua,ka,len,write) #else extern void flush_cache_mm(struct mm_struct *mm); extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); -extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, - unsigned long uaddr, void *kaddr, - unsigned long len, int write); #endif #define flush_cache_dup_mm(mm) flush_cache_mm(mm) diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index 59303e200845..e6215305544a 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h @@ -13,4 +13,9 @@ static inline int tlb_ops_need_broadcast(void) return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; } +static inline int cache_ops_need_broadcast(void) +{ + return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1; +} + #endif diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 6f3a4b7a3b82..e34f095e2090 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -13,6 +13,7 @@ #include #include +#include #include #include @@ -87,13 +88,26 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) __flush_icache_all(); } +#else +#define flush_pfn_alias(pfn,vaddr) do { } while (0) +#endif +#ifdef CONFIG_SMP +static void flush_ptrace_access_other(void *args) +{ + __flush_icache_all(); +} +#endif + +static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, - unsigned long uaddr, void *kaddr, - unsigned long len, int write) + unsigned long uaddr, void *kaddr, unsigned long len) { if (cache_is_vivt()) { - vivt_flush_ptrace_access(vma, page, uaddr, kaddr, len, write); + if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { + unsigned long addr = (unsigned long)kaddr; + __cpuc_coherent_kern_range(addr, addr + len); + } return; } @@ -104,16 +118,37 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, } /* VIPT non-aliasing cache */ - if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) && - vma->vm_flags & VM_EXEC) { + if (vma->vm_flags & VM_EXEC) { unsigned long addr = (unsigned long)kaddr; - /* only flushing the kernel mapping on non-aliasing VIPT */ __cpuc_coherent_kern_range(addr, addr + len); +#ifdef CONFIG_SMP + if (cache_ops_need_broadcast()) + smp_call_function(flush_ptrace_access_other, + NULL, 1); +#endif } } -#else -#define flush_pfn_alias(pfn,vaddr) do { } while (0) + +/* + * Copy user data from/to a page which is mapped into a different + * processes address space. Really, we want to allow our "user + * space" model to handle this. + * + * Note that this code needs to run on the current CPU. + */ +void copy_to_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long uaddr, void *dst, const void *src, + unsigned long len) +{ +#ifdef CONFIG_SMP + preempt_disable(); #endif + memcpy(dst, src, len); + flush_ptrace_access(vma, page, uaddr, dst, len); +#ifdef CONFIG_SMP + preempt_enable(); +#endif +} void __flush_dcache_page(struct address_space *mapping, struct page *page) { -- cgit v1.2.3 From dc8601a224d546bb321b058fc5ecabdb688a3582 Mon Sep 17 00:00:00 2001 From: Haojian Zhuang Date: Wed, 30 Dec 2009 02:27:24 -0500 Subject: [ARM] pxa: do not enable L2 after MMU is enabled Outer cache checked whether L2 is enabled or not. If L2 isn't enabled in XSC3, it would enable L2. This operation is evil that would make system hang. In XSC3 core document, these words are mentioned in below. "Following reset, the L2 Unified Cache Enable bit is cleared. To enable the L2 Cache, software may set the bit to a '1' before or at the same time as enabling the MMU. Enabling the L2 Cache after the MMU has been enabled or disabling the L2 Cache after the L2 Cache has been enabled, may result in unpredictable behavior of the processor." When outer cache is initialized, the MMU is already enabled. We couldn't enable L2 after MMU enabled. Signed-off-by: Haojian Zhuang Signed-off-by: Eric Miao --- arch/arm/mm/cache-xsc3l2.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c index 5d180cb0bd94..c3154928bccd 100644 --- a/arch/arm/mm/cache-xsc3l2.c +++ b/arch/arm/mm/cache-xsc3l2.c @@ -221,15 +221,14 @@ static int __init xsc3_l2_init(void) if (!cpu_is_xsc3() || !xsc3_l2_present()) return 0; - if (!(get_cr() & CR_L2)) { + if (get_cr() & CR_L2) { pr_info("XScale3 L2 cache enabled.\n"); - adjust_cr(CR_L2, CR_L2); xsc3_l2_inv_all(); - } - outer_cache.inv_range = xsc3_l2_inv_range; - outer_cache.clean_range = xsc3_l2_clean_range; - outer_cache.flush_range = xsc3_l2_flush_range; + outer_cache.inv_range = xsc3_l2_inv_range; + outer_cache.clean_range = xsc3_l2_clean_range; + outer_cache.flush_range = xsc3_l2_flush_range; + } return 0; } -- cgit v1.2.3 From 548c6af4627f4dcb24512a381193206e09bd6d31 Mon Sep 17 00:00:00 2001 From: Haojian Zhuang Date: Wed, 30 Dec 2009 10:02:57 -0500 Subject: [ARM] pxa: enable L2 if present in XSC3 Check whether L2 is present or not in XSC3. If it's present, enable L2 immediately. Disabling L2 after L2 is enabled that would result in unpredicatable behavior of XSC3 processor. Signed-off-by: Haojian Zhuang Signed-off-by: Eric Miao --- arch/arm/mm/proc-xsc3.S | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 96456f548798..8e4f6dca8997 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -407,6 +407,13 @@ __xsc3_setup: adr r5, xsc3_crval ldmia r5, {r5, r6} + +#ifdef CONFIG_CACHE_XSC3L2 + mrc p15, 1, r0, c0, c0, 1 @ get L2 present information + ands r0, r0, #0xf8 + orrne r6, r6, #(1 << 26) @ enable L2 if present +#endif + mrc p15, 0, r0, c1, c0, 0 @ get control register bic r0, r0, r5 @ ..V. ..R. .... ..A. orr r0, r0, r6 @ ..VI Z..S .... .C.M (mmu) -- cgit v1.2.3 From 070f1f178c4377c09f72e414513aeacd8403f6d6 Mon Sep 17 00:00:00 2001 From: Bahadir Balban Date: Fri, 25 Dec 2009 14:25:48 +0100 Subject: ARM: 5858/1: Remove unused vma_vm_flags macro from v7wbi_flush_user_tlb_range Signed-off-by: Bahadir Balban Signed-off-by: Russell King --- arch/arm/mm/tlb-v7.S | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S index a26a605b73bd..0cb1848bd876 100644 --- a/arch/arm/mm/tlb-v7.S +++ b/arch/arm/mm/tlb-v7.S @@ -40,7 +40,6 @@ ENTRY(v7wbi_flush_user_tlb_range) asid r3, r3 @ mask ASID orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA mov r1, r1, lsl #PAGE_SHIFT - vma_vm_flags r2, r2 @ get vma->vm_flags 1: #ifdef CONFIG_SMP mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable) -- cgit v1.2.3 From 4b529401c5089cf33f7165607cbc2fde43357bfb Mon Sep 17 00:00:00 2001 From: Andreas Fenkart Date: Fri, 8 Jan 2010 14:42:31 -0800 Subject: mm: make totalhigh_pages unsigned long Makes it consistent with the extern declaration, used when CONFIG_HIGHMEM is set Removes redundant casts in printout messages Signed-off-by: Andreas Fenkart Acked-by: Russell King Cc: Ralf Baechle Cc: David Howells Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Chen Liqin Cc: Lennox Wu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/mm/init.c | 2 +- arch/mips/mm/init.c | 2 +- arch/mips/sgi-ip27/ip27-memory.c | 2 +- arch/mn10300/mm/init.c | 3 +-- arch/score/mm/init.c | 2 +- arch/x86/mm/init_32.c | 3 +-- include/linux/highmem.h | 2 +- 7 files changed, 7 insertions(+), 9 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 52c40d155672..a04ffbbbe253 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -616,7 +616,7 @@ void __init mem_init(void) "%dK data, %dK init, %luK highmem)\n", nr_free_pages() << (PAGE_SHIFT-10), codesize >> 10, datasize >> 10, initsize >> 10, - (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))); + totalhigh_pages << (PAGE_SHIFT-10)); if (PAGE_SIZE >= 16384 && num_physpages <= 128) { extern int sysctl_overcommit_memory; diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 9e8d00389eef..1651942f7feb 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -424,7 +424,7 @@ void __init mem_init(void) reservedpages << (PAGE_SHIFT-10), datasize >> 10, initsize >> 10, - (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))); + totalhigh_pages << (PAGE_SHIFT-10)); } #endif /* !CONFIG_NEED_MULTIPLE_NODES */ diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index f61c164d1e67..bc1297109cc5 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c @@ -505,5 +505,5 @@ void __init mem_init(void) (num_physpages - tmp) << (PAGE_SHIFT-10), datasize >> 10, initsize >> 10, - (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))); + totalhigh_pages << (PAGE_SHIFT-10)); } diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c index ec1420562dc7..dd27a9a35152 100644 --- a/arch/mn10300/mm/init.c +++ b/arch/mn10300/mm/init.c @@ -118,8 +118,7 @@ void __init mem_init(void) reservedpages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10, - (unsigned long) (totalhigh_pages << (PAGE_SHIFT - 10)) - ); + totalhigh_pages << (PAGE_SHIFT - 10)); } /* diff --git a/arch/score/mm/init.c b/arch/score/mm/init.c index 8c15b2c85d5a..dfaf458d6702 100644 --- a/arch/score/mm/init.c +++ b/arch/score/mm/init.c @@ -106,7 +106,7 @@ void __init mem_init(void) ram << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), datasize >> 10, initsize >> 10, - (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))); + totalhigh_pages << (PAGE_SHIFT-10)); } #endif /* !CONFIG_NEED_MULTIPLE_NODES */ diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index c973f8e2a6cf..9a0c258a86be 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -892,8 +892,7 @@ void __init mem_init(void) reservedpages << (PAGE_SHIFT-10), datasize >> 10, initsize >> 10, - (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)) - ); + totalhigh_pages << (PAGE_SHIFT-10)); printk(KERN_INFO "virtual kernel memory layout:\n" " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 211ff4497269..ab2cc20e21a5 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -46,7 +46,7 @@ void kmap_flush_unused(void); static inline unsigned int nr_free_highpages(void) { return 0; } -#define totalhigh_pages 0 +#define totalhigh_pages 0UL #ifndef ARCH_HAS_KMAP static inline void *kmap(struct page *page) -- cgit v1.2.3 From aff7b4f86737f1ae364bf5ece9a9b8586ddb2db4 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 12 Jan 2010 19:02:05 +0000 Subject: ARM: Ensure ARMv6/7 mm files are built using appropriate assembler options A kernel with both ARMv6 and ARMv7 selected results in build errors. Fix this by specifying the proper architectures for these assembly files. Signed-off-by: Russell King --- arch/arm/mm/Makefile | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 827e238e5d4a..e8d34a80851c 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -27,6 +27,9 @@ obj-$(CONFIG_CPU_ABRT_EV5TJ) += abort-ev5tj.o obj-$(CONFIG_CPU_ABRT_EV6) += abort-ev6.o obj-$(CONFIG_CPU_ABRT_EV7) += abort-ev7.o +AFLAGS_abort-ev6.o :=-Wa,-march=armv6k +AFLAGS_abort-ev7.o :=-Wa,-march=armv7-a + obj-$(CONFIG_CPU_PABRT_LEGACY) += pabort-legacy.o obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o @@ -39,6 +42,9 @@ obj-$(CONFIG_CPU_CACHE_V6) += cache-v6.o obj-$(CONFIG_CPU_CACHE_V7) += cache-v7.o obj-$(CONFIG_CPU_CACHE_FA) += cache-fa.o +AFLAGS_cache-v6.o :=-Wa,-march=armv6 +AFLAGS_cache-v7.o :=-Wa,-march=armv7-a + obj-$(CONFIG_CPU_COPY_V3) += copypage-v3.o obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o @@ -58,6 +64,9 @@ obj-$(CONFIG_CPU_TLB_V6) += tlb-v6.o obj-$(CONFIG_CPU_TLB_V7) += tlb-v7.o obj-$(CONFIG_CPU_TLB_FA) += tlb-fa.o +AFLAGS_tlb-v6.o :=-Wa,-march=armv6 +AFLAGS_tlb-v7.o :=-Wa,-march=armv7-a + obj-$(CONFIG_CPU_ARM610) += proc-arm6_7.o obj-$(CONFIG_CPU_ARM710) += proc-arm6_7.o obj-$(CONFIG_CPU_ARM7TDMI) += proc-arm7tdmi.o @@ -84,6 +93,9 @@ obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o obj-$(CONFIG_CPU_V6) += proc-v6.o obj-$(CONFIG_CPU_V7) += proc-v7.o +AFLAGS_proc-v6.o :=-Wa,-march=armv6 +AFLAGS_proc-v7.o :=-Wa,-march=armv7-a + obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o -- cgit v1.2.3 From c26c20b823d48addbde9cb5709d80655c6fadf18 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 18 Dec 2009 16:21:35 +0000 Subject: ARM: make_coherent: split adjust_pte() in two adjust_pte() walks the page tables, and do_adjust_pte() does the page table manipulation. Signed-off-by: Russell King --- arch/arm/mm/fault-armv.c | 52 +++++++++++++++++++++++++++++------------------- 1 file changed, 32 insertions(+), 20 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 56ee15321b00..074e6bb54eb3 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -36,28 +36,12 @@ static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; * Therefore those configurations which might call adjust_pte (those * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock. */ -static int adjust_pte(struct vm_area_struct *vma, unsigned long address) +static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, + pte_t *ptep) { - pgd_t *pgd; - pmd_t *pmd; - pte_t *pte, entry; + pte_t entry = *ptep; int ret; - pgd = pgd_offset(vma->vm_mm, address); - if (pgd_none(*pgd)) - goto no_pgd; - if (pgd_bad(*pgd)) - goto bad_pgd; - - pmd = pmd_offset(pgd, address); - if (pmd_none(*pmd)) - goto no_pmd; - if (pmd_bad(*pmd)) - goto bad_pmd; - - pte = pte_offset_map(pmd, address); - entry = *pte; - /* * If this page is present, it's actually being shared. */ @@ -74,10 +58,38 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address) (pfn << PAGE_SHIFT) + PAGE_SIZE); pte_val(entry) &= ~L_PTE_MT_MASK; pte_val(entry) |= shared_pte_mask; - set_pte_at(vma->vm_mm, address, pte, entry); + set_pte_at(vma->vm_mm, address, ptep, entry); flush_tlb_page(vma, address); } + + return ret; +} + +static int adjust_pte(struct vm_area_struct *vma, unsigned long address) +{ + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + int ret; + + pgd = pgd_offset(vma->vm_mm, address); + if (pgd_none(*pgd)) + goto no_pgd; + if (pgd_bad(*pgd)) + goto bad_pgd; + + pmd = pmd_offset(pgd, address); + if (pmd_none(*pmd)) + goto no_pmd; + if (pmd_bad(*pmd)) + goto bad_pmd; + + pte = pte_offset_map(pmd, address); + + ret = do_adjust_pte(vma, address, pte); + pte_unmap(pte); + return ret; bad_pgd: -- cgit v1.2.3 From f8a85f1164a33e3eb5b421b137ced793ed53ee33 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 18 Dec 2009 16:23:44 +0000 Subject: ARM: make_coherent: convert adjust_pte() to use p*d_none_or_clear_bad() Signed-off-by: Russell King --- arch/arm/mm/fault-armv.c | 24 ++++-------------------- 1 file changed, 4 insertions(+), 20 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 074e6bb54eb3..7a8efe1b37d8 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -73,16 +73,12 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address) int ret; pgd = pgd_offset(vma->vm_mm, address); - if (pgd_none(*pgd)) - goto no_pgd; - if (pgd_bad(*pgd)) - goto bad_pgd; + if (pgd_none_or_clear_bad(pgd)) + return 0; pmd = pmd_offset(pgd, address); - if (pmd_none(*pmd)) - goto no_pmd; - if (pmd_bad(*pmd)) - goto bad_pmd; + if (pmd_none_or_clear_bad(pmd)) + return 0; pte = pte_offset_map(pmd, address); @@ -91,18 +87,6 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address) pte_unmap(pte); return ret; - -bad_pgd: - pgd_ERROR(*pgd); - pgd_clear(pgd); -no_pgd: - return 0; - -bad_pmd: - pmd_ERROR(*pmd); - pmd_clear(pmd); -no_pmd: - return 0; } static void -- cgit v1.2.3 From 56dd47098abe1fdde598a8d8b7c04d775506f456 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 18 Dec 2009 16:24:34 +0000 Subject: ARM: make_coherent: fix problems with highpte, part 1 update_mmu_cache() is called with a page table already mapped. We call make_coherent(), which then calls adjust_pte() which wants to map other page tables. This causes kmap_atomic() to BUG() because the slot its trying to use is already taken. Since do_adjust_pte() modifies the page tables, we are also missing any form of locking, so we're risking corrupting the page tables. Fix this by using pte_offset_map_nested(), and taking the pte page table lock around do_adjust_pte(). Signed-off-by: Russell King --- arch/arm/mm/fault-armv.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 7a8efe1b37d8..8e9bc517132e 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -67,6 +67,7 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, static int adjust_pte(struct vm_area_struct *vma, unsigned long address) { + spinlock_t *ptl; pgd_t *pgd; pmd_t *pmd; pte_t *pte; @@ -80,11 +81,19 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address) if (pmd_none_or_clear_bad(pmd)) return 0; - pte = pte_offset_map(pmd, address); + /* + * This is called while another page table is mapped, so we + * must use the nested version. This also means we need to + * open-code the spin-locking. + */ + ptl = pte_lockptr(vma->vm_mm, pmd); + pte = pte_offset_map_nested(pmd, address); + spin_lock(ptl); ret = do_adjust_pte(vma, address, pte); - pte_unmap(pte); + spin_unlock(ptl); + pte_unmap_nested(pte); return ret; } -- cgit v1.2.3 From ed42acaef1a9d51631a31b55e9ed52d400430492 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 18 Dec 2009 16:31:38 +0000 Subject: ARM: make_coherent: avoid recalculating the pfn for the modified page We already know the pfn for the page to be modified in make_coherent, so let's stop recalculating it unnecessarily. Signed-off-by: Russell King --- arch/arm/mm/fault-armv.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 8e9bc517132e..ae88f2c3a6df 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -37,7 +37,7 @@ static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock. */ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, - pte_t *ptep) + unsigned long pfn, pte_t *ptep) { pte_t entry = *ptep; int ret; @@ -52,7 +52,6 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, * fault (ie, is old), we can safely ignore any issues. */ if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) { - unsigned long pfn = pte_pfn(entry); flush_cache_page(vma, address, pfn); outer_flush_range((pfn << PAGE_SHIFT), (pfn << PAGE_SHIFT) + PAGE_SIZE); @@ -65,7 +64,8 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, return ret; } -static int adjust_pte(struct vm_area_struct *vma, unsigned long address) +static int adjust_pte(struct vm_area_struct *vma, unsigned long address, + unsigned long pfn) { spinlock_t *ptl; pgd_t *pgd; @@ -90,7 +90,7 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address) pte = pte_offset_map_nested(pmd, address); spin_lock(ptl); - ret = do_adjust_pte(vma, address, pte); + ret = do_adjust_pte(vma, address, pfn, pte); spin_unlock(ptl); pte_unmap_nested(pte); @@ -127,11 +127,11 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigne if (!(mpnt->vm_flags & VM_MAYSHARE)) continue; offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; - aliases += adjust_pte(mpnt, mpnt->vm_start + offset); + aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn); } flush_dcache_mmap_unlock(mapping); if (aliases) - adjust_pte(vma, addr); + adjust_pte(vma, addr, pfn); else flush_cache_page(vma, addr, pfn); } -- cgit v1.2.3 From 18eabe2347ae7a11b3db768695913724166dfb0e Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 31 Oct 2009 16:52:16 +0000 Subject: ARM: dma-mapping: introduce the idea of buffer ownership The DMA API has the notion of buffer ownership; make it explicit in the ARM implementation of this API. This gives us a set of hooks to allow us to deal with CPU cache issues arising from non-cache coherent DMA. Signed-off-by: Russell King Tested-By: Santosh Shilimkar Tested-By: Jamie Iles --- arch/arm/common/dmabounce.c | 4 ++- arch/arm/include/asm/dma-mapping.h | 64 ++++++++++++++++++++++++++++---------- arch/arm/mm/dma-mapping.c | 13 +++++--- 3 files changed, 58 insertions(+), 23 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index bc90364a96c7..51499d68b161 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -277,7 +277,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, * We don't need to sync the DMA buffer since * it was allocated via the coherent allocators. */ - dma_cache_maint(ptr, size, dir); + __dma_single_cpu_to_dev(ptr, size, dir); } return dma_addr; @@ -315,6 +315,8 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr, __cpuc_flush_kernel_dcache_area(ptr, size); } free_safe_buffer(dev->archdata.dmabounce, buf); + } else { + __dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir); } } diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index a96300bf83fd..e850f5c1607b 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -57,19 +57,48 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) #endif /* - * DMA-consistent mapping functions. These allocate/free a region of - * uncached, unwrite-buffered mapped memory space for use with DMA - * devices. This is the "generic" version. The PCI specific version - * is in pci.h - * - * Note: Drivers should NOT use this function directly, as it will break - * platforms with CONFIG_DMABOUNCE. - * Use the driver DMA support - see dma-mapping.h (dma_sync_*) + * Private support functions: these are not part of the API and are + * liable to change. Drivers must not use these. */ extern void dma_cache_maint(const void *kaddr, size_t size, int rw); extern void dma_cache_maint_page(struct page *page, unsigned long offset, size_t size, int rw); +/* + * The DMA API is built upon the notion of "buffer ownership". A buffer + * is either exclusively owned by the CPU (and therefore may be accessed + * by it) or exclusively owned by the DMA device. These helper functions + * represent the transitions between these two ownership states. + * + * As above, these are private support functions and not part of the API. + * Drivers must not use these. + */ +static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, + enum dma_data_direction dir) +{ + if (!arch_is_coherent()) + dma_cache_maint(kaddr, size, dir); +} + +static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size, + enum dma_data_direction dir) +{ + /* nothing to do */ +} + +static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off, + size_t size, enum dma_data_direction dir) +{ + if (!arch_is_coherent()) + dma_cache_maint_page(page, off, size, dir); +} + +static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, + size_t size, enum dma_data_direction dir) +{ + /* nothing to do */ +} + /* * Return whether the given device DMA address mask can be supported * properly. For example, if your device can only drive the low 24-bits @@ -304,8 +333,7 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, { BUG_ON(!valid_dma_direction(dir)); - if (!arch_is_coherent()) - dma_cache_maint(cpu_addr, size, dir); + __dma_single_cpu_to_dev(cpu_addr, size, dir); return virt_to_dma(dev, cpu_addr); } @@ -329,8 +357,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, { BUG_ON(!valid_dma_direction(dir)); - if (!arch_is_coherent()) - dma_cache_maint_page(page, offset, size, dir); + __dma_page_cpu_to_dev(page, offset, size, dir); return page_to_dma(dev, page) + offset; } @@ -352,7 +379,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - /* nothing to do */ + __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); } /** @@ -372,7 +399,8 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - /* nothing to do */ + __dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK, + size, dir); } #endif /* CONFIG_DMABOUNCE */ @@ -400,7 +428,10 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, { BUG_ON(!valid_dma_direction(dir)); - dmabounce_sync_for_cpu(dev, handle, offset, size, dir); + if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) + return; + + __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir); } static inline void dma_sync_single_range_for_device(struct device *dev, @@ -412,8 +443,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev, if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) return; - if (!arch_is_coherent()) - dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); + __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir); } static inline void dma_sync_single_for_cpu(struct device *dev, diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 26325cb5d368..a316c9459526 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -573,8 +573,12 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int i; for_each_sg(sg, s, nents, i) { - dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0, - sg_dma_len(s), dir); + if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0, + sg_dma_len(s), dir)) + continue; + + __dma_page_dev_to_cpu(sg_page(s), s->offset, + s->length, dir); } } EXPORT_SYMBOL(dma_sync_sg_for_cpu); @@ -597,9 +601,8 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, sg_dma_len(s), dir)) continue; - if (!arch_is_coherent()) - dma_cache_maint_page(sg_page(s), s->offset, - s->length, dir); + __dma_page_cpu_to_dev(sg_page(s), s->offset, + s->length, dir); } } EXPORT_SYMBOL(dma_sync_sg_for_device); -- cgit v1.2.3 From 4ea0d7371e808628d11154b0d44140b70f05b998 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 24 Nov 2009 16:27:17 +0000 Subject: ARM: dma-mapping: push buffer ownership down into dma-mapping.c Signed-off-by: Russell King Tested-By: Santosh Shilimkar --- arch/arm/include/asm/dma-mapping.h | 39 ++++++++++++++++++++++++-------------- arch/arm/mm/dma-mapping.c | 34 +++++++++++++++++++++++++++++---- 2 files changed, 55 insertions(+), 18 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index e850f5c1607b..256ee1c9f51a 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -56,47 +56,58 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) } #endif -/* - * Private support functions: these are not part of the API and are - * liable to change. Drivers must not use these. - */ -extern void dma_cache_maint(const void *kaddr, size_t size, int rw); -extern void dma_cache_maint_page(struct page *page, unsigned long offset, - size_t size, int rw); - /* * The DMA API is built upon the notion of "buffer ownership". A buffer * is either exclusively owned by the CPU (and therefore may be accessed * by it) or exclusively owned by the DMA device. These helper functions * represent the transitions between these two ownership states. * - * As above, these are private support functions and not part of the API. - * Drivers must not use these. + * Note, however, that on later ARMs, this notion does not work due to + * speculative prefetches. We model our approach on the assumption that + * the CPU does do speculative prefetches, which means we clean caches + * before transfers and delay cache invalidation until transfer completion. + * + * Private support functions: these are not part of the API and are + * liable to change. Drivers must not use these. */ static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, enum dma_data_direction dir) { + extern void ___dma_single_cpu_to_dev(const void *, size_t, + enum dma_data_direction); + if (!arch_is_coherent()) - dma_cache_maint(kaddr, size, dir); + ___dma_single_cpu_to_dev(kaddr, size, dir); } static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size, enum dma_data_direction dir) { - /* nothing to do */ + extern void ___dma_single_dev_to_cpu(const void *, size_t, + enum dma_data_direction); + + if (!arch_is_coherent()) + ___dma_single_dev_to_cpu(kaddr, size, dir); } static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { + extern void ___dma_page_cpu_to_dev(struct page *, unsigned long, + size_t, enum dma_data_direction); + if (!arch_is_coherent()) - dma_cache_maint_page(page, off, size, dir); + ___dma_page_cpu_to_dev(page, off, size, dir); } static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { - /* nothing to do */ + extern void ___dma_page_dev_to_cpu(struct page *, unsigned long, + size_t, enum dma_data_direction); + + if (!arch_is_coherent()) + ___dma_page_dev_to_cpu(page, off, size, dir); } /* diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index a316c9459526..bbf87880b915 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -404,7 +404,7 @@ EXPORT_SYMBOL(dma_free_coherent); * platforms with CONFIG_DMABOUNCE. * Use the driver DMA support - see dma-mapping.h (dma_sync_*) */ -void dma_cache_maint(const void *start, size_t size, int direction) +static void dma_cache_maint(const void *start, size_t size, int direction) { void (*inner_op)(const void *, const void *); void (*outer_op)(unsigned long, unsigned long); @@ -431,7 +431,20 @@ void dma_cache_maint(const void *start, size_t size, int direction) inner_op(start, start + size); outer_op(__pa(start), __pa(start) + size); } -EXPORT_SYMBOL(dma_cache_maint); + +void ___dma_single_cpu_to_dev(const void *kaddr, size_t size, + enum dma_data_direction dir) +{ + dma_cache_maint(kaddr, size, dir); +} +EXPORT_SYMBOL(___dma_single_cpu_to_dev); + +void ___dma_single_dev_to_cpu(const void *kaddr, size_t size, + enum dma_data_direction dir) +{ + /* nothing to do */ +} +EXPORT_SYMBOL(___dma_single_dev_to_cpu); static void dma_cache_maint_contiguous(struct page *page, unsigned long offset, size_t size, int direction) @@ -474,7 +487,7 @@ static void dma_cache_maint_contiguous(struct page *page, unsigned long offset, outer_op(paddr, paddr + size); } -void dma_cache_maint_page(struct page *page, unsigned long offset, +static void dma_cache_maint_page(struct page *page, unsigned long offset, size_t size, int dir) { /* @@ -499,7 +512,20 @@ void dma_cache_maint_page(struct page *page, unsigned long offset, left -= len; } while (left); } -EXPORT_SYMBOL(dma_cache_maint_page); + +void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, + size_t size, enum dma_data_direction dir) +{ + dma_cache_maint_page(page, off, size, dir); +} +EXPORT_SYMBOL(___dma_page_cpu_to_dev); + +void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, + size_t size, enum dma_data_direction dir) +{ + /* nothing to do */ +} +EXPORT_SYMBOL(___dma_page_dev_to_cpu); /** * dma_map_sg - map a set of SG buffers for streaming mode DMA -- cgit v1.2.3 From 65af191a0414d0e1145f67c153e1b63d122dfbb4 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 24 Nov 2009 17:53:33 +0000 Subject: ARM: dma-mapping: move selection of page ops out of dma_cache_maint_contiguous Signed-off-by: Russell King Tested-By: Santosh Shilimkar --- arch/arm/mm/dma-mapping.c | 59 ++++++++++++++++++++++++----------------------- 1 file changed, 30 insertions(+), 29 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index bbf87880b915..77dc483e64c1 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -447,48 +447,25 @@ void ___dma_single_dev_to_cpu(const void *kaddr, size_t size, EXPORT_SYMBOL(___dma_single_dev_to_cpu); static void dma_cache_maint_contiguous(struct page *page, unsigned long offset, - size_t size, int direction) + size_t size, void (*op)(const void *, const void *)) { void *vaddr; - unsigned long paddr; - void (*inner_op)(const void *, const void *); - void (*outer_op)(unsigned long, unsigned long); - - switch (direction) { - case DMA_FROM_DEVICE: /* invalidate only */ - inner_op = dmac_inv_range; - outer_op = outer_inv_range; - break; - case DMA_TO_DEVICE: /* writeback only */ - inner_op = dmac_clean_range; - outer_op = outer_clean_range; - break; - case DMA_BIDIRECTIONAL: /* writeback and invalidate */ - inner_op = dmac_flush_range; - outer_op = outer_flush_range; - break; - default: - BUG(); - } if (!PageHighMem(page)) { vaddr = page_address(page) + offset; - inner_op(vaddr, vaddr + size); + op(vaddr, vaddr + size); } else { vaddr = kmap_high_get(page); if (vaddr) { vaddr += offset; - inner_op(vaddr, vaddr + size); + op(vaddr, vaddr + size); kunmap_high(page); } } - - paddr = page_to_phys(page) + offset; - outer_op(paddr, paddr + size); } static void dma_cache_maint_page(struct page *page, unsigned long offset, - size_t size, int dir) + size_t size, void (*op)(const void *, const void *)) { /* * A single sg entry may refer to multiple physically contiguous @@ -506,7 +483,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, } len = PAGE_SIZE - offset; } - dma_cache_maint_contiguous(page, offset, len, dir); + dma_cache_maint_contiguous(page, offset, len, op); offset = 0; page++; left -= len; @@ -516,7 +493,31 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { - dma_cache_maint_page(page, off, size, dir); + unsigned long paddr; + void (*inner_op)(const void *, const void *); + void (*outer_op)(unsigned long, unsigned long); + + switch (direction) { + case DMA_FROM_DEVICE: /* invalidate only */ + inner_op = dmac_inv_range; + outer_op = outer_inv_range; + break; + case DMA_TO_DEVICE: /* writeback only */ + inner_op = dmac_clean_range; + outer_op = outer_clean_range; + break; + case DMA_BIDIRECTIONAL: /* writeback and invalidate */ + inner_op = dmac_flush_range; + outer_op = outer_flush_range; + break; + default: + BUG(); + } + + dma_cache_maint_page(page, off, size, inner_op); + + paddr = page_to_phys(page) + off; + outer_op(paddr, paddr + size); } EXPORT_SYMBOL(___dma_page_cpu_to_dev); -- cgit v1.2.3 From 93f1d629e22b08642eb713ad96ac2cb9ade0641c Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 24 Nov 2009 14:41:01 +0000 Subject: ARM: dma-mapping: simplify dma_cache_maint_page dma_cache_maint_contiguous is now simple enough to live inside dma_cache_maint_page, so move it there. Signed-off-by: Russell King Tested-By: Santosh Shilimkar --- arch/arm/mm/dma-mapping.c | 42 ++++++++++++++++++------------------------ 1 file changed, 18 insertions(+), 24 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 77dc483e64c1..0d68d2c83cda 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -446,24 +446,6 @@ void ___dma_single_dev_to_cpu(const void *kaddr, size_t size, } EXPORT_SYMBOL(___dma_single_dev_to_cpu); -static void dma_cache_maint_contiguous(struct page *page, unsigned long offset, - size_t size, void (*op)(const void *, const void *)) -{ - void *vaddr; - - if (!PageHighMem(page)) { - vaddr = page_address(page) + offset; - op(vaddr, vaddr + size); - } else { - vaddr = kmap_high_get(page); - if (vaddr) { - vaddr += offset; - op(vaddr, vaddr + size); - kunmap_high(page); - } - } -} - static void dma_cache_maint_page(struct page *page, unsigned long offset, size_t size, void (*op)(const void *, const void *)) { @@ -476,14 +458,26 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, size_t left = size; do { size_t len = left; - if (PageHighMem(page) && len + offset > PAGE_SIZE) { - if (offset >= PAGE_SIZE) { - page += offset / PAGE_SIZE; - offset %= PAGE_SIZE; + void *vaddr; + + if (PageHighMem(page)) { + if (len + offset > PAGE_SIZE) { + if (offset >= PAGE_SIZE) { + page += offset / PAGE_SIZE; + offset %= PAGE_SIZE; + } + len = PAGE_SIZE - offset; + } + vaddr = kmap_high_get(page); + if (vaddr) { + vaddr += offset; + op(vaddr, vaddr + len); + kunmap_high(page); } - len = PAGE_SIZE - offset; + } else { + vaddr = page_address(page) + offset; + op(vaddr, vaddr + len); } - dma_cache_maint_contiguous(page, offset, len, op); offset = 0; page++; left -= len; -- cgit v1.2.3 From a9c9147eb9b1dba0ce567a41897c7773b4d1b0bc Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 26 Nov 2009 16:19:58 +0000 Subject: ARM: dma-mapping: provide per-cpu type map/unmap functions Signed-off-by: Russell King Tested-By: Santosh Shilimkar --- arch/arm/include/asm/cacheflush.h | 9 +++++++++ arch/arm/kernel/asm-offsets.c | 5 +++++ arch/arm/mm/cache-fa.S | 26 ++++++++++++++++++++++++ arch/arm/mm/cache-v3.S | 24 ++++++++++++++++++++++ arch/arm/mm/cache-v4.S | 24 ++++++++++++++++++++++ arch/arm/mm/cache-v4wb.S | 26 ++++++++++++++++++++++++ arch/arm/mm/cache-v4wt.S | 25 +++++++++++++++++++++++ arch/arm/mm/cache-v6.S | 26 ++++++++++++++++++++++++ arch/arm/mm/cache-v7.S | 26 ++++++++++++++++++++++++ arch/arm/mm/dma-mapping.c | 29 +++++++++++---------------- arch/arm/mm/proc-arm1020.S | 26 ++++++++++++++++++++++++ arch/arm/mm/proc-arm1020e.S | 26 ++++++++++++++++++++++++ arch/arm/mm/proc-arm1022.S | 26 ++++++++++++++++++++++++ arch/arm/mm/proc-arm1026.S | 26 ++++++++++++++++++++++++ arch/arm/mm/proc-arm920.S | 26 ++++++++++++++++++++++++ arch/arm/mm/proc-arm922.S | 26 ++++++++++++++++++++++++ arch/arm/mm/proc-arm925.S | 26 ++++++++++++++++++++++++ arch/arm/mm/proc-arm926.S | 26 ++++++++++++++++++++++++ arch/arm/mm/proc-arm940.S | 26 ++++++++++++++++++++++++ arch/arm/mm/proc-arm946.S | 26 ++++++++++++++++++++++++ arch/arm/mm/proc-feroceon.S | 42 +++++++++++++++++++++++++++++++++++++++ arch/arm/mm/proc-mohawk.S | 26 ++++++++++++++++++++++++ arch/arm/mm/proc-xsc3.S | 26 ++++++++++++++++++++++++ arch/arm/mm/proc-xscale.S | 41 ++++++++++++++++++++++++++++++++++++++ 24 files changed, 598 insertions(+), 17 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 730aefcfbee3..4c733236e342 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -213,6 +213,9 @@ struct cpu_cache_fns { void (*coherent_user_range)(unsigned long, unsigned long); void (*flush_kern_dcache_area)(void *, size_t); + void (*dma_map_area)(const void *, size_t, int); + void (*dma_unmap_area)(const void *, size_t, int); + void (*dma_inv_range)(const void *, const void *); void (*dma_clean_range)(const void *, const void *); void (*dma_flush_range)(const void *, const void *); @@ -244,6 +247,8 @@ extern struct cpu_cache_fns cpu_cache; * is visible to DMA, or data written by DMA to system memory is * visible to the CPU. */ +#define dmac_map_area cpu_cache.dma_map_area +#define dmac_unmap_area cpu_cache.dma_unmap_area #define dmac_inv_range cpu_cache.dma_inv_range #define dmac_clean_range cpu_cache.dma_clean_range #define dmac_flush_range cpu_cache.dma_flush_range @@ -270,10 +275,14 @@ extern void __cpuc_flush_dcache_area(void *, size_t); * is visible to DMA, or data written by DMA to system memory is * visible to the CPU. */ +#define dmac_map_area __glue(_CACHE,_dma_map_area) +#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area) #define dmac_inv_range __glue(_CACHE,_dma_inv_range) #define dmac_clean_range __glue(_CACHE,_dma_clean_range) #define dmac_flush_range __glue(_CACHE,_dma_flush_range) +extern void dmac_map_area(const void *, size_t, int); +extern void dmac_unmap_area(const void *, size_t, int); extern void dmac_inv_range(const void *, const void *); extern void dmac_clean_range(const void *, const void *); extern void dmac_flush_range(const void *, const void *); diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 4a881258bb17..883511522fca 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -12,6 +12,7 @@ */ #include #include +#include #include #include #include @@ -112,5 +113,9 @@ int main(void) #ifdef MULTI_PABORT DEFINE(PROCESSOR_PABT_FUNC, offsetof(struct processor, _prefetch_abort)); #endif + BLANK(); + DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); + DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); + DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); return 0; } diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S index a89444a3c016..8ebffdd6fcff 100644 --- a/arch/arm/mm/cache-fa.S +++ b/arch/arm/mm/cache-fa.S @@ -205,6 +205,30 @@ ENTRY(fa_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(fa_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq fa_dma_clean_range + bcs fa_dma_inv_range + b fa_dma_flush_range +ENDPROC(fa_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(fa_dma_unmap_area) + mov pc, lr +ENDPROC(fa_dma_unmap_area) + __INITDATA .type fa_cache_fns, #object @@ -215,6 +239,8 @@ ENTRY(fa_cache_fns) .long fa_coherent_kern_range .long fa_coherent_user_range .long fa_flush_kern_dcache_area + .long fa_dma_map_area + .long fa_dma_unmap_area .long fa_dma_inv_range .long fa_dma_clean_range .long fa_dma_flush_range diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S index 2a482731ea36..6df52dc014be 100644 --- a/arch/arm/mm/cache-v3.S +++ b/arch/arm/mm/cache-v3.S @@ -121,6 +121,28 @@ ENTRY(v3_dma_flush_range) ENTRY(v3_dma_clean_range) mov pc, lr +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v3_dma_unmap_area) + teq r2, #DMA_TO_DEVICE + bne v3_dma_inv_range + /* FALLTHROUGH */ + +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v3_dma_map_area) + mov pc, lr +ENDPROC(v3_dma_unmap_area) +ENDPROC(v3_dma_map_area) + __INITDATA .type v3_cache_fns, #object @@ -131,6 +153,8 @@ ENTRY(v3_cache_fns) .long v3_coherent_kern_range .long v3_coherent_user_range .long v3_flush_kern_dcache_area + .long v3_dma_map_area + .long v3_dma_unmap_area .long v3_dma_inv_range .long v3_dma_clean_range .long v3_dma_flush_range diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index 5c7da3e372e9..df3b423713b9 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S @@ -133,6 +133,28 @@ ENTRY(v4_dma_flush_range) ENTRY(v4_dma_clean_range) mov pc, lr +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v4_dma_unmap_area) + teq r2, #DMA_TO_DEVICE + bne v4_dma_inv_range + /* FALLTHROUGH */ + +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v4_dma_map_area) + mov pc, lr +ENDPROC(v4_dma_unmap_area) +ENDPROC(v4_dma_map_area) + __INITDATA .type v4_cache_fns, #object @@ -143,6 +165,8 @@ ENTRY(v4_cache_fns) .long v4_coherent_kern_range .long v4_coherent_user_range .long v4_flush_kern_dcache_area + .long v4_dma_map_area + .long v4_dma_unmap_area .long v4_dma_inv_range .long v4_dma_clean_range .long v4_dma_flush_range diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S index 3dbedf1ec0e7..32e7a7448496 100644 --- a/arch/arm/mm/cache-v4wb.S +++ b/arch/arm/mm/cache-v4wb.S @@ -216,6 +216,30 @@ ENTRY(v4wb_dma_clean_range) .globl v4wb_dma_flush_range .set v4wb_dma_flush_range, v4wb_coherent_kern_range +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v4wb_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq v4wb_dma_clean_range + bcs v4wb_dma_inv_range + b v4wb_dma_flush_range +ENDPROC(v4wb_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v4wb_dma_unmap_area) + mov pc, lr +ENDPROC(v4wb_dma_unmap_area) + __INITDATA .type v4wb_cache_fns, #object @@ -226,6 +250,8 @@ ENTRY(v4wb_cache_fns) .long v4wb_coherent_kern_range .long v4wb_coherent_user_range .long v4wb_flush_kern_dcache_area + .long v4wb_dma_map_area + .long v4wb_dma_unmap_area .long v4wb_dma_inv_range .long v4wb_dma_clean_range .long v4wb_dma_flush_range diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S index b3b7410270b4..3d8dad5b2650 100644 --- a/arch/arm/mm/cache-v4wt.S +++ b/arch/arm/mm/cache-v4wt.S @@ -172,6 +172,29 @@ ENTRY(v4wt_dma_clean_range) .globl v4wt_dma_flush_range .equ v4wt_dma_flush_range, v4wt_dma_inv_range +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v4wt_dma_unmap_area) + add r1, r1, r0 + teq r2, #DMA_TO_DEVICE + bne v4wt_dma_inv_range + /* FALLTHROUGH */ + +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v4wt_dma_map_area) + mov pc, lr +ENDPROC(v4wt_dma_unmap_area) +ENDPROC(v4wt_dma_map_area) + __INITDATA .type v4wt_cache_fns, #object @@ -182,6 +205,8 @@ ENTRY(v4wt_cache_fns) .long v4wt_coherent_kern_range .long v4wt_coherent_user_range .long v4wt_flush_kern_dcache_area + .long v4wt_dma_map_area + .long v4wt_dma_unmap_area .long v4wt_dma_inv_range .long v4wt_dma_clean_range .long v4wt_dma_flush_range diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 4ba0a24ce6f5..6f926dd0e0f7 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -263,6 +263,30 @@ ENTRY(v6_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v6_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq v6_dma_clean_range + bcs v6_dma_inv_range + b v6_dma_flush_range +ENDPROC(v6_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v6_dma_unmap_area) + mov pc, lr +ENDPROC(v6_dma_unmap_area) + __INITDATA .type v6_cache_fns, #object @@ -273,6 +297,8 @@ ENTRY(v6_cache_fns) .long v6_coherent_kern_range .long v6_coherent_user_range .long v6_flush_kern_dcache_area + .long v6_dma_map_area + .long v6_dma_unmap_area .long v6_dma_inv_range .long v6_dma_clean_range .long v6_dma_flush_range diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 9073db849fb4..e30d8bc67182 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -271,6 +271,30 @@ ENTRY(v7_dma_flush_range) mov pc, lr ENDPROC(v7_dma_flush_range) +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v7_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq v7_dma_clean_range + bcs v7_dma_inv_range + b v7_dma_flush_range +ENDPROC(v7_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(v7_dma_unmap_area) + mov pc, lr +ENDPROC(v7_dma_unmap_area) + __INITDATA .type v7_cache_fns, #object @@ -281,6 +305,8 @@ ENTRY(v7_cache_fns) .long v7_coherent_kern_range .long v7_coherent_user_range .long v7_flush_kern_dcache_area + .long v7_dma_map_area + .long v7_dma_unmap_area .long v7_dma_inv_range .long v7_dma_clean_range .long v7_dma_flush_range diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 0d68d2c83cda..efa8efa33f5e 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -406,35 +406,31 @@ EXPORT_SYMBOL(dma_free_coherent); */ static void dma_cache_maint(const void *start, size_t size, int direction) { - void (*inner_op)(const void *, const void *); void (*outer_op)(unsigned long, unsigned long); - BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(start + size - 1)); - switch (direction) { case DMA_FROM_DEVICE: /* invalidate only */ - inner_op = dmac_inv_range; outer_op = outer_inv_range; break; case DMA_TO_DEVICE: /* writeback only */ - inner_op = dmac_clean_range; outer_op = outer_clean_range; break; case DMA_BIDIRECTIONAL: /* writeback and invalidate */ - inner_op = dmac_flush_range; outer_op = outer_flush_range; break; default: BUG(); } - inner_op(start, start + size); outer_op(__pa(start), __pa(start) + size); } void ___dma_single_cpu_to_dev(const void *kaddr, size_t size, enum dma_data_direction dir) { + BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); + + dmac_map_area(kaddr, size, dir); dma_cache_maint(kaddr, size, dir); } EXPORT_SYMBOL(___dma_single_cpu_to_dev); @@ -442,12 +438,15 @@ EXPORT_SYMBOL(___dma_single_cpu_to_dev); void ___dma_single_dev_to_cpu(const void *kaddr, size_t size, enum dma_data_direction dir) { - /* nothing to do */ + BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); + + dmac_unmap_area(kaddr, size, dir); } EXPORT_SYMBOL(___dma_single_dev_to_cpu); static void dma_cache_maint_page(struct page *page, unsigned long offset, - size_t size, void (*op)(const void *, const void *)) + size_t size, enum dma_data_direction dir, + void (*op)(const void *, size_t, int)) { /* * A single sg entry may refer to multiple physically contiguous @@ -471,12 +470,12 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, vaddr = kmap_high_get(page); if (vaddr) { vaddr += offset; - op(vaddr, vaddr + len); + op(vaddr, len, dir); kunmap_high(page); } } else { vaddr = page_address(page) + offset; - op(vaddr, vaddr + len); + op(vaddr, len, dir); } offset = 0; page++; @@ -488,27 +487,23 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { unsigned long paddr; - void (*inner_op)(const void *, const void *); void (*outer_op)(unsigned long, unsigned long); switch (direction) { case DMA_FROM_DEVICE: /* invalidate only */ - inner_op = dmac_inv_range; outer_op = outer_inv_range; break; case DMA_TO_DEVICE: /* writeback only */ - inner_op = dmac_clean_range; outer_op = outer_clean_range; break; case DMA_BIDIRECTIONAL: /* writeback and invalidate */ - inner_op = dmac_flush_range; outer_op = outer_flush_range; break; default: BUG(); } - dma_cache_maint_page(page, off, size, inner_op); + dma_cache_maint_page(page, off, size, dir, dmac_map_area); paddr = page_to_phys(page) + off; outer_op(paddr, paddr + size); @@ -518,7 +513,7 @@ EXPORT_SYMBOL(___dma_page_cpu_to_dev); void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { - /* nothing to do */ + dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); } EXPORT_SYMBOL(___dma_page_dev_to_cpu); diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index 8012e24282b2..c85f5eb42634 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S @@ -330,6 +330,30 @@ ENTRY(arm1020_dma_flush_range) mcr p15, 0, ip, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1020_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm1020_dma_clean_range + bcs arm1020_dma_inv_range + b arm1020_dma_flush_range +ENDPROC(arm1020_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1020_dma_unmap_area) + mov pc, lr +ENDPROC(arm1020_dma_unmap_area) + ENTRY(arm1020_cache_fns) .long arm1020_flush_kern_cache_all .long arm1020_flush_user_cache_all @@ -337,6 +361,8 @@ ENTRY(arm1020_cache_fns) .long arm1020_coherent_kern_range .long arm1020_coherent_user_range .long arm1020_flush_kern_dcache_area + .long arm1020_dma_map_area + .long arm1020_dma_unmap_area .long arm1020_dma_inv_range .long arm1020_dma_clean_range .long arm1020_dma_flush_range diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index 41fe25d234f5..5a3cf7620a2c 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S @@ -316,6 +316,30 @@ ENTRY(arm1020e_dma_flush_range) mcr p15, 0, ip, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1020e_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm1020e_dma_clean_range + bcs arm1020e_dma_inv_range + b arm1020e_dma_flush_range +ENDPROC(arm1020e_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1020e_dma_unmap_area) + mov pc, lr +ENDPROC(arm1020e_dma_unmap_area) + ENTRY(arm1020e_cache_fns) .long arm1020e_flush_kern_cache_all .long arm1020e_flush_user_cache_all @@ -323,6 +347,8 @@ ENTRY(arm1020e_cache_fns) .long arm1020e_coherent_kern_range .long arm1020e_coherent_user_range .long arm1020e_flush_kern_dcache_area + .long arm1020e_dma_map_area + .long arm1020e_dma_unmap_area .long arm1020e_dma_inv_range .long arm1020e_dma_clean_range .long arm1020e_dma_flush_range diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index 20a5b1b31a70..fec8f5878438 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S @@ -305,6 +305,30 @@ ENTRY(arm1022_dma_flush_range) mcr p15, 0, ip, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1022_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm1022_dma_clean_range + bcs arm1022_dma_inv_range + b arm1022_dma_flush_range +ENDPROC(arm1022_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1022_dma_unmap_area) + mov pc, lr +ENDPROC(arm1022_dma_unmap_area) + ENTRY(arm1022_cache_fns) .long arm1022_flush_kern_cache_all .long arm1022_flush_user_cache_all @@ -312,6 +336,8 @@ ENTRY(arm1022_cache_fns) .long arm1022_coherent_kern_range .long arm1022_coherent_user_range .long arm1022_flush_kern_dcache_area + .long arm1022_dma_map_area + .long arm1022_dma_unmap_area .long arm1022_dma_inv_range .long arm1022_dma_clean_range .long arm1022_dma_flush_range diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index 96aedb10fcc4..9ece6f666497 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S @@ -299,6 +299,30 @@ ENTRY(arm1026_dma_flush_range) mcr p15, 0, ip, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1026_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm1026_dma_clean_range + bcs arm1026_dma_inv_range + b arm1026_dma_flush_range +ENDPROC(arm1026_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm1026_dma_unmap_area) + mov pc, lr +ENDPROC(arm1026_dma_unmap_area) + ENTRY(arm1026_cache_fns) .long arm1026_flush_kern_cache_all .long arm1026_flush_user_cache_all @@ -306,6 +330,8 @@ ENTRY(arm1026_cache_fns) .long arm1026_coherent_kern_range .long arm1026_coherent_user_range .long arm1026_flush_kern_dcache_area + .long arm1026_dma_map_area + .long arm1026_dma_unmap_area .long arm1026_dma_inv_range .long arm1026_dma_clean_range .long arm1026_dma_flush_range diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 471669e2d7cb..6f6ab2747da6 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -288,6 +288,30 @@ ENTRY(arm920_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm920_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm920_dma_clean_range + bcs arm920_dma_inv_range + b arm920_dma_flush_range +ENDPROC(arm920_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm920_dma_unmap_area) + mov pc, lr +ENDPROC(arm920_dma_unmap_area) + ENTRY(arm920_cache_fns) .long arm920_flush_kern_cache_all .long arm920_flush_user_cache_all @@ -295,6 +319,8 @@ ENTRY(arm920_cache_fns) .long arm920_coherent_kern_range .long arm920_coherent_user_range .long arm920_flush_kern_dcache_area + .long arm920_dma_map_area + .long arm920_dma_unmap_area .long arm920_dma_inv_range .long arm920_dma_clean_range .long arm920_dma_flush_range diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index ee111b00fa41..4e4396b121ca 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S @@ -290,6 +290,30 @@ ENTRY(arm922_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm922_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm922_dma_clean_range + bcs arm922_dma_inv_range + b arm922_dma_flush_range +ENDPROC(arm922_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm922_dma_unmap_area) + mov pc, lr +ENDPROC(arm922_dma_unmap_area) + ENTRY(arm922_cache_fns) .long arm922_flush_kern_cache_all .long arm922_flush_user_cache_all @@ -297,6 +321,8 @@ ENTRY(arm922_cache_fns) .long arm922_coherent_kern_range .long arm922_coherent_user_range .long arm922_flush_kern_dcache_area + .long arm922_dma_map_area + .long arm922_dma_unmap_area .long arm922_dma_inv_range .long arm922_dma_clean_range .long arm922_dma_flush_range diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index 8deb5bde58e4..7c01c5d1108c 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S @@ -341,6 +341,30 @@ ENTRY(arm925_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm925_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm925_dma_clean_range + bcs arm925_dma_inv_range + b arm925_dma_flush_range +ENDPROC(arm925_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm925_dma_unmap_area) + mov pc, lr +ENDPROC(arm925_dma_unmap_area) + ENTRY(arm925_cache_fns) .long arm925_flush_kern_cache_all .long arm925_flush_user_cache_all @@ -348,6 +372,8 @@ ENTRY(arm925_cache_fns) .long arm925_coherent_kern_range .long arm925_coherent_user_range .long arm925_flush_kern_dcache_area + .long arm925_dma_map_area + .long arm925_dma_unmap_area .long arm925_dma_inv_range .long arm925_dma_clean_range .long arm925_dma_flush_range diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 64db6e275a44..72a01a4b80ab 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -304,6 +304,30 @@ ENTRY(arm926_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm926_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm926_dma_clean_range + bcs arm926_dma_inv_range + b arm926_dma_flush_range +ENDPROC(arm926_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm926_dma_unmap_area) + mov pc, lr +ENDPROC(arm926_dma_unmap_area) + ENTRY(arm926_cache_fns) .long arm926_flush_kern_cache_all .long arm926_flush_user_cache_all @@ -311,6 +335,8 @@ ENTRY(arm926_cache_fns) .long arm926_coherent_kern_range .long arm926_coherent_user_range .long arm926_flush_kern_dcache_area + .long arm926_dma_map_area + .long arm926_dma_unmap_area .long arm926_dma_inv_range .long arm926_dma_clean_range .long arm926_dma_flush_range diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index 8196b9f401fb..6bb58fca7270 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S @@ -233,6 +233,30 @@ ENTRY(arm940_dma_flush_range) mcr p15, 0, ip, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm940_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm940_dma_clean_range + bcs arm940_dma_inv_range + b arm940_dma_flush_range +ENDPROC(arm940_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm940_dma_unmap_area) + mov pc, lr +ENDPROC(arm940_dma_unmap_area) + ENTRY(arm940_cache_fns) .long arm940_flush_kern_cache_all .long arm940_flush_user_cache_all @@ -240,6 +264,8 @@ ENTRY(arm940_cache_fns) .long arm940_coherent_kern_range .long arm940_coherent_user_range .long arm940_flush_kern_dcache_area + .long arm940_dma_map_area + .long arm940_dma_unmap_area .long arm940_dma_inv_range .long arm940_dma_clean_range .long arm940_dma_flush_range diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index 9a951239c86c..ac0f9ba719d7 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S @@ -275,6 +275,30 @@ ENTRY(arm946_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm946_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq arm946_dma_clean_range + bcs arm946_dma_inv_range + b arm946_dma_flush_range +ENDPROC(arm946_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(arm946_dma_unmap_area) + mov pc, lr +ENDPROC(arm946_dma_unmap_area) + ENTRY(arm946_cache_fns) .long arm946_flush_kern_cache_all .long arm946_flush_user_cache_all @@ -282,6 +306,8 @@ ENTRY(arm946_cache_fns) .long arm946_coherent_kern_range .long arm946_coherent_user_range .long arm946_flush_kern_dcache_area + .long arm946_dma_map_area + .long arm946_dma_unmap_area .long arm946_dma_inv_range .long arm946_dma_clean_range .long arm946_dma_flush_range diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index dbc39383e66a..97e1d784f152 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S @@ -367,6 +367,44 @@ ENTRY(feroceon_range_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(feroceon_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq feroceon_dma_clean_range + bcs feroceon_dma_inv_range + b feroceon_dma_flush_range +ENDPROC(feroceon_dma_map_area) + +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(feroceon_range_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq feroceon_range_dma_clean_range + bcs feroceon_range_dma_inv_range + b feroceon_range_dma_flush_range +ENDPROC(feroceon_range_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(feroceon_dma_unmap_area) + mov pc, lr +ENDPROC(feroceon_dma_unmap_area) + ENTRY(feroceon_cache_fns) .long feroceon_flush_kern_cache_all .long feroceon_flush_user_cache_all @@ -374,6 +412,8 @@ ENTRY(feroceon_cache_fns) .long feroceon_coherent_kern_range .long feroceon_coherent_user_range .long feroceon_flush_kern_dcache_area + .long feroceon_dma_map_area + .long feroceon_dma_unmap_area .long feroceon_dma_inv_range .long feroceon_dma_clean_range .long feroceon_dma_flush_range @@ -385,6 +425,8 @@ ENTRY(feroceon_range_cache_fns) .long feroceon_coherent_kern_range .long feroceon_coherent_user_range .long feroceon_range_flush_kern_dcache_area + .long feroceon_range_dma_map_area + .long feroceon_dma_unmap_area .long feroceon_range_dma_inv_range .long feroceon_range_dma_clean_range .long feroceon_range_dma_flush_range diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 9674d36cc97d..55b7fbec6548 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S @@ -268,6 +268,30 @@ ENTRY(mohawk_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ drain WB mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(mohawk_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq mohawk_dma_clean_range + bcs mohawk_dma_inv_range + b mohawk_dma_flush_range +ENDPROC(mohawk_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(mohawk_dma_unmap_area) + mov pc, lr +ENDPROC(mohawk_dma_unmap_area) + ENTRY(mohawk_cache_fns) .long mohawk_flush_kern_cache_all .long mohawk_flush_user_cache_all @@ -275,6 +299,8 @@ ENTRY(mohawk_cache_fns) .long mohawk_coherent_kern_range .long mohawk_coherent_user_range .long mohawk_flush_kern_dcache_area + .long mohawk_dma_map_area + .long mohawk_dma_unmap_area .long mohawk_dma_inv_range .long mohawk_dma_clean_range .long mohawk_dma_flush_range diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 96456f548798..4e4ce889b3e6 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -304,6 +304,30 @@ ENTRY(xsc3_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ data write barrier mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(xsc3_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq xsc3_dma_clean_range + bcs xsc3_dma_inv_range + b xsc3_dma_flush_range +ENDPROC(xsc3_dma_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(xsc3_dma_unmap_area) + mov pc, lr +ENDPROC(xsc3_dma_unmap_area) + ENTRY(xsc3_cache_fns) .long xsc3_flush_kern_cache_all .long xsc3_flush_user_cache_all @@ -311,6 +335,8 @@ ENTRY(xsc3_cache_fns) .long xsc3_coherent_kern_range .long xsc3_coherent_user_range .long xsc3_flush_kern_dcache_area + .long xsc3_dma_map_area + .long xsc3_dma_unmap_area .long xsc3_dma_inv_range .long xsc3_dma_clean_range .long xsc3_dma_flush_range diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 93df47265f2d..a7999f94bf27 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -363,6 +363,43 @@ ENTRY(xscale_dma_flush_range) mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer mov pc, lr +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(xscale_dma_map_area) + add r1, r1, r0 + cmp r2, #DMA_TO_DEVICE + beq xscale_dma_clean_range + bcs xscale_dma_inv_range + b xscale_dma_flush_range +ENDPROC(xscale_dma_map_area) + +/* + * dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(xscale_dma_a0_map_area) + add r1, r1, r0 + teq r2, #DMA_TO_DEVICE + beq xscale_dma_clean_range + b xscale_dma_flush_range +ENDPROC(xscsale_dma_a0_map_area) + +/* + * dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +ENTRY(xscale_dma_unmap_area) + mov pc, lr +ENDPROC(xscale_dma_unmap_area) + ENTRY(xscale_cache_fns) .long xscale_flush_kern_cache_all .long xscale_flush_user_cache_all @@ -370,6 +407,8 @@ ENTRY(xscale_cache_fns) .long xscale_coherent_kern_range .long xscale_coherent_user_range .long xscale_flush_kern_dcache_area + .long xscale_dma_map_area + .long xscale_dma_unmap_area .long xscale_dma_inv_range .long xscale_dma_clean_range .long xscale_dma_flush_range @@ -394,6 +433,8 @@ ENTRY(xscale_80200_A0_A1_cache_fns) .long xscale_coherent_kern_range .long xscale_coherent_user_range .long xscale_flush_kern_dcache_area + .long xscale_dma_a0_map_area + .long xscale_dma_unmap_area .long xscale_dma_flush_range .long xscale_dma_clean_range .long xscale_dma_flush_range -- cgit v1.2.3 From 702b94bff3c50542a6e4ab9a4f4cef093262fe65 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 26 Nov 2009 16:24:19 +0000 Subject: ARM: dma-mapping: remove dmac_clean_range and dmac_inv_range These are now unused, and so can be removed. Signed-off-by: Russell King Tested-By: Santosh Shilimkar --- arch/arm/include/asm/cacheflush.h | 23 ----------------------- arch/arm/mm/cache-fa.S | 6 ++---- arch/arm/mm/cache-v3.S | 29 +---------------------------- arch/arm/mm/cache-v4.S | 29 +---------------------------- arch/arm/mm/cache-v4wb.S | 6 ++---- arch/arm/mm/cache-v4wt.S | 15 +-------------- arch/arm/mm/cache-v6.S | 6 ++---- arch/arm/mm/cache-v7.S | 6 ++---- arch/arm/mm/proc-arm1020.S | 6 ++---- arch/arm/mm/proc-arm1020e.S | 6 ++---- arch/arm/mm/proc-arm1022.S | 6 ++---- arch/arm/mm/proc-arm1026.S | 6 ++---- arch/arm/mm/proc-arm920.S | 6 ++---- arch/arm/mm/proc-arm922.S | 6 ++---- arch/arm/mm/proc-arm925.S | 6 ++---- arch/arm/mm/proc-arm926.S | 6 ++---- arch/arm/mm/proc-arm940.S | 6 ++---- arch/arm/mm/proc-arm946.S | 6 ++---- arch/arm/mm/proc-feroceon.S | 12 ++++-------- arch/arm/mm/proc-mohawk.S | 6 ++---- arch/arm/mm/proc-xsc3.S | 6 ++---- arch/arm/mm/proc-xscale.S | 8 ++------ 22 files changed, 41 insertions(+), 171 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 4c733236e342..e29088587412 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -182,21 +182,6 @@ * DMA Cache Coherency * =================== * - * dma_inv_range(start, end) - * - * Invalidate (discard) the specified virtual address range. - * May not write back any entries. If 'start' or 'end' - * are not cache line aligned, those lines must be written - * back. - * - start - virtual start address - * - end - virtual end address - * - * dma_clean_range(start, end) - * - * Clean (write back) the specified virtual address range. - * - start - virtual start address - * - end - virtual end address - * * dma_flush_range(start, end) * * Clean and invalidate the specified virtual address range. @@ -216,8 +201,6 @@ struct cpu_cache_fns { void (*dma_map_area)(const void *, size_t, int); void (*dma_unmap_area)(const void *, size_t, int); - void (*dma_inv_range)(const void *, const void *); - void (*dma_clean_range)(const void *, const void *); void (*dma_flush_range)(const void *, const void *); }; @@ -249,8 +232,6 @@ extern struct cpu_cache_fns cpu_cache; */ #define dmac_map_area cpu_cache.dma_map_area #define dmac_unmap_area cpu_cache.dma_unmap_area -#define dmac_inv_range cpu_cache.dma_inv_range -#define dmac_clean_range cpu_cache.dma_clean_range #define dmac_flush_range cpu_cache.dma_flush_range #else @@ -277,14 +258,10 @@ extern void __cpuc_flush_dcache_area(void *, size_t); */ #define dmac_map_area __glue(_CACHE,_dma_map_area) #define dmac_unmap_area __glue(_CACHE,_dma_unmap_area) -#define dmac_inv_range __glue(_CACHE,_dma_inv_range) -#define dmac_clean_range __glue(_CACHE,_dma_clean_range) #define dmac_flush_range __glue(_CACHE,_dma_flush_range) extern void dmac_map_area(const void *, size_t, int); extern void dmac_unmap_area(const void *, size_t, int); -extern void dmac_inv_range(const void *, const void *); -extern void dmac_clean_range(const void *, const void *); extern void dmac_flush_range(const void *, const void *); #endif diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S index 8ebffdd6fcff..7148e53e6078 100644 --- a/arch/arm/mm/cache-fa.S +++ b/arch/arm/mm/cache-fa.S @@ -157,7 +157,7 @@ ENTRY(fa_flush_kern_dcache_area) * - start - virtual start address * - end - virtual end address */ -ENTRY(fa_dma_inv_range) +fa_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry @@ -180,7 +180,7 @@ ENTRY(fa_dma_inv_range) * - start - virtual start address * - end - virtual end address */ -ENTRY(fa_dma_clean_range) +fa_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -241,7 +241,5 @@ ENTRY(fa_cache_fns) .long fa_flush_kern_dcache_area .long fa_dma_map_area .long fa_dma_unmap_area - .long fa_dma_inv_range - .long fa_dma_clean_range .long fa_dma_flush_range .size fa_cache_fns, . - fa_cache_fns diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S index 6df52dc014be..c2ff3c599fee 100644 --- a/arch/arm/mm/cache-v3.S +++ b/arch/arm/mm/cache-v3.S @@ -83,20 +83,6 @@ ENTRY(v3_coherent_user_range) ENTRY(v3_flush_kern_dcache_area) /* FALLTHROUGH */ -/* - * dma_inv_range(start, end) - * - * Invalidate (discard) the specified virtual address range. - * May not write back any entries. If 'start' or 'end' - * are not cache line aligned, those lines must be written - * back. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v3_dma_inv_range) - /* FALLTHROUGH */ - /* * dma_flush_range(start, end) * @@ -108,17 +94,6 @@ ENTRY(v3_dma_inv_range) ENTRY(v3_dma_flush_range) mov r0, #0 mcr p15, 0, r0, c7, c0, 0 @ flush ID cache - /* FALLTHROUGH */ - -/* - * dma_clean_range(start, end) - * - * Clean (write back) the specified virtual address range. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v3_dma_clean_range) mov pc, lr /* @@ -129,7 +104,7 @@ ENTRY(v3_dma_clean_range) */ ENTRY(v3_dma_unmap_area) teq r2, #DMA_TO_DEVICE - bne v3_dma_inv_range + bne v3_dma_flush_range /* FALLTHROUGH */ /* @@ -155,7 +130,5 @@ ENTRY(v3_cache_fns) .long v3_flush_kern_dcache_area .long v3_dma_map_area .long v3_dma_unmap_area - .long v3_dma_inv_range - .long v3_dma_clean_range .long v3_dma_flush_range .size v3_cache_fns, . - v3_cache_fns diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index df3b423713b9..4810f7e3e813 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S @@ -93,20 +93,6 @@ ENTRY(v4_coherent_user_range) ENTRY(v4_flush_kern_dcache_area) /* FALLTHROUGH */ -/* - * dma_inv_range(start, end) - * - * Invalidate (discard) the specified virtual address range. - * May not write back any entries. If 'start' or 'end' - * are not cache line aligned, those lines must be written - * back. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v4_dma_inv_range) - /* FALLTHROUGH */ - /* * dma_flush_range(start, end) * @@ -120,17 +106,6 @@ ENTRY(v4_dma_flush_range) mov r0, #0 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache #endif - /* FALLTHROUGH */ - -/* - * dma_clean_range(start, end) - * - * Clean (write back) the specified virtual address range. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v4_dma_clean_range) mov pc, lr /* @@ -141,7 +116,7 @@ ENTRY(v4_dma_clean_range) */ ENTRY(v4_dma_unmap_area) teq r2, #DMA_TO_DEVICE - bne v4_dma_inv_range + bne v4_dma_flush_range /* FALLTHROUGH */ /* @@ -167,7 +142,5 @@ ENTRY(v4_cache_fns) .long v4_flush_kern_dcache_area .long v4_dma_map_area .long v4_dma_unmap_area - .long v4_dma_inv_range - .long v4_dma_clean_range .long v4_dma_flush_range .size v4_cache_fns, . - v4_cache_fns diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S index 32e7a7448496..df8368afa102 100644 --- a/arch/arm/mm/cache-v4wb.S +++ b/arch/arm/mm/cache-v4wb.S @@ -173,7 +173,7 @@ ENTRY(v4wb_coherent_user_range) * - start - virtual start address * - end - virtual end address */ -ENTRY(v4wb_dma_inv_range) +v4wb_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -194,7 +194,7 @@ ENTRY(v4wb_dma_inv_range) * - start - virtual start address * - end - virtual end address */ -ENTRY(v4wb_dma_clean_range) +v4wb_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -252,7 +252,5 @@ ENTRY(v4wb_cache_fns) .long v4wb_flush_kern_dcache_area .long v4wb_dma_map_area .long v4wb_dma_unmap_area - .long v4wb_dma_inv_range - .long v4wb_dma_clean_range .long v4wb_dma_flush_range .size v4wb_cache_fns, . - v4wb_cache_fns diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S index 3d8dad5b2650..45c70312f43b 100644 --- a/arch/arm/mm/cache-v4wt.S +++ b/arch/arm/mm/cache-v4wt.S @@ -142,23 +142,12 @@ ENTRY(v4wt_flush_kern_dcache_area) * - start - virtual start address * - end - virtual end address */ -ENTRY(v4wt_dma_inv_range) +v4wt_dma_inv_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry add r0, r0, #CACHE_DLINESIZE cmp r0, r1 blo 1b - /* FALLTHROUGH */ - -/* - * dma_clean_range(start, end) - * - * Clean the specified virtual address range. - * - * - start - virtual start address - * - end - virtual end address - */ -ENTRY(v4wt_dma_clean_range) mov pc, lr /* @@ -207,7 +196,5 @@ ENTRY(v4wt_cache_fns) .long v4wt_flush_kern_dcache_area .long v4wt_dma_map_area .long v4wt_dma_unmap_area - .long v4wt_dma_inv_range - .long v4wt_dma_clean_range .long v4wt_dma_flush_range .size v4wt_cache_fns, . - v4wt_cache_fns diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 6f926dd0e0f7..a11934e53fbd 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -195,7 +195,7 @@ ENTRY(v6_flush_kern_dcache_area) * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(v6_dma_inv_range) +v6_dma_inv_range: tst r0, #D_CACHE_LINE_SIZE - 1 bic r0, r0, #D_CACHE_LINE_SIZE - 1 #ifdef HARVARD_CACHE @@ -228,7 +228,7 @@ ENTRY(v6_dma_inv_range) * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(v6_dma_clean_range) +v6_dma_clean_range: bic r0, r0, #D_CACHE_LINE_SIZE - 1 1: #ifdef HARVARD_CACHE @@ -299,7 +299,5 @@ ENTRY(v6_cache_fns) .long v6_flush_kern_dcache_area .long v6_dma_map_area .long v6_dma_unmap_area - .long v6_dma_inv_range - .long v6_dma_clean_range .long v6_dma_flush_range .size v6_cache_fns, . - v6_cache_fns diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index e30d8bc67182..b1cd0fd91207 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -216,7 +216,7 @@ ENDPROC(v7_flush_kern_dcache_area) * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(v7_dma_inv_range) +v7_dma_inv_range: dcache_line_size r2, r3 sub r3, r2, #1 tst r0, r3 @@ -240,7 +240,7 @@ ENDPROC(v7_dma_inv_range) * - start - virtual start address of region * - end - virtual end address of region */ -ENTRY(v7_dma_clean_range) +v7_dma_clean_range: dcache_line_size r2, r3 sub r3, r2, #1 bic r0, r0, r3 @@ -307,7 +307,5 @@ ENTRY(v7_cache_fns) .long v7_flush_kern_dcache_area .long v7_dma_map_area .long v7_dma_unmap_area - .long v7_dma_inv_range - .long v7_dma_clean_range .long v7_dma_flush_range .size v7_cache_fns, . - v7_cache_fns diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index c85f5eb42634..72507c630ceb 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S @@ -265,7 +265,7 @@ ENTRY(arm1020_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm1020_dma_inv_range) +arm1020_dma_inv_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE tst r0, #CACHE_DLINESIZE - 1 @@ -295,7 +295,7 @@ ENTRY(arm1020_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm1020_dma_clean_range) +arm1020_dma_clean_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 @@ -363,8 +363,6 @@ ENTRY(arm1020_cache_fns) .long arm1020_flush_kern_dcache_area .long arm1020_dma_map_area .long arm1020_dma_unmap_area - .long arm1020_dma_inv_range - .long arm1020_dma_clean_range .long arm1020_dma_flush_range .align 5 diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index 5a3cf7620a2c..d27829805609 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S @@ -258,7 +258,7 @@ ENTRY(arm1020e_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm1020e_dma_inv_range) +arm1020e_dma_inv_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE tst r0, #CACHE_DLINESIZE - 1 @@ -284,7 +284,7 @@ ENTRY(arm1020e_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm1020e_dma_clean_range) +arm1020e_dma_clean_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 @@ -349,8 +349,6 @@ ENTRY(arm1020e_cache_fns) .long arm1020e_flush_kern_dcache_area .long arm1020e_dma_map_area .long arm1020e_dma_unmap_area - .long arm1020e_dma_inv_range - .long arm1020e_dma_clean_range .long arm1020e_dma_flush_range .align 5 diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index fec8f5878438..ce13e4a827de 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S @@ -247,7 +247,7 @@ ENTRY(arm1022_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm1022_dma_inv_range) +arm1022_dma_inv_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE tst r0, #CACHE_DLINESIZE - 1 @@ -273,7 +273,7 @@ ENTRY(arm1022_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm1022_dma_clean_range) +arm1022_dma_clean_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 @@ -338,8 +338,6 @@ ENTRY(arm1022_cache_fns) .long arm1022_flush_kern_dcache_area .long arm1022_dma_map_area .long arm1022_dma_unmap_area - .long arm1022_dma_inv_range - .long arm1022_dma_clean_range .long arm1022_dma_flush_range .align 5 diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index 9ece6f666497..636672a29c6d 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S @@ -241,7 +241,7 @@ ENTRY(arm1026_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm1026_dma_inv_range) +arm1026_dma_inv_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE tst r0, #CACHE_DLINESIZE - 1 @@ -267,7 +267,7 @@ ENTRY(arm1026_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm1026_dma_clean_range) +arm1026_dma_clean_range: mov ip, #0 #ifndef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CACHE_DLINESIZE - 1 @@ -332,8 +332,6 @@ ENTRY(arm1026_cache_fns) .long arm1026_flush_kern_dcache_area .long arm1026_dma_map_area .long arm1026_dma_unmap_area - .long arm1026_dma_inv_range - .long arm1026_dma_clean_range .long arm1026_dma_flush_range .align 5 diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 6f6ab2747da6..8be81992645d 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -239,7 +239,7 @@ ENTRY(arm920_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm920_dma_inv_range) +arm920_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -262,7 +262,7 @@ ENTRY(arm920_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm920_dma_clean_range) +arm920_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -321,8 +321,6 @@ ENTRY(arm920_cache_fns) .long arm920_flush_kern_dcache_area .long arm920_dma_map_area .long arm920_dma_unmap_area - .long arm920_dma_inv_range - .long arm920_dma_clean_range .long arm920_dma_flush_range #endif diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index 4e4396b121ca..c0ff8e4b1074 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S @@ -241,7 +241,7 @@ ENTRY(arm922_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm922_dma_inv_range) +arm922_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -264,7 +264,7 @@ ENTRY(arm922_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm922_dma_clean_range) +arm922_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -323,8 +323,6 @@ ENTRY(arm922_cache_fns) .long arm922_flush_kern_dcache_area .long arm922_dma_map_area .long arm922_dma_unmap_area - .long arm922_dma_inv_range - .long arm922_dma_clean_range .long arm922_dma_flush_range #endif diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index 7c01c5d1108c..3c6cffe400f6 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S @@ -283,7 +283,7 @@ ENTRY(arm925_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm925_dma_inv_range) +arm925_dma_inv_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -308,7 +308,7 @@ ENTRY(arm925_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm925_dma_clean_range) +arm925_dma_clean_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry @@ -374,8 +374,6 @@ ENTRY(arm925_cache_fns) .long arm925_flush_kern_dcache_area .long arm925_dma_map_area .long arm925_dma_unmap_area - .long arm925_dma_inv_range - .long arm925_dma_clean_range .long arm925_dma_flush_range ENTRY(cpu_arm925_dcache_clean_area) diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 72a01a4b80ab..75b707c9cce1 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -246,7 +246,7 @@ ENTRY(arm926_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(arm926_dma_inv_range) +arm926_dma_inv_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -271,7 +271,7 @@ ENTRY(arm926_dma_inv_range) * * (same as v4wb) */ -ENTRY(arm926_dma_clean_range) +arm926_dma_clean_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry @@ -337,8 +337,6 @@ ENTRY(arm926_cache_fns) .long arm926_flush_kern_dcache_area .long arm926_dma_map_area .long arm926_dma_unmap_area - .long arm926_dma_inv_range - .long arm926_dma_clean_range .long arm926_dma_flush_range ENTRY(cpu_arm926_dcache_clean_area) diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index 6bb58fca7270..1af1657819eb 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S @@ -171,7 +171,7 @@ ENTRY(arm940_flush_kern_dcache_area) * - start - virtual start address * - end - virtual end address */ -ENTRY(arm940_dma_inv_range) +arm940_dma_inv_range: mov ip, #0 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries @@ -192,7 +192,7 @@ ENTRY(arm940_dma_inv_range) * - start - virtual start address * - end - virtual end address */ -ENTRY(arm940_dma_clean_range) +arm940_dma_clean_range: ENTRY(cpu_arm940_dcache_clean_area) mov ip, #0 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH @@ -266,8 +266,6 @@ ENTRY(arm940_cache_fns) .long arm940_flush_kern_dcache_area .long arm940_dma_map_area .long arm940_dma_unmap_area - .long arm940_dma_inv_range - .long arm940_dma_clean_range .long arm940_dma_flush_range __INIT diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index ac0f9ba719d7..1664b6aaff79 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S @@ -215,7 +215,7 @@ ENTRY(arm946_flush_kern_dcache_area) * - end - virtual end address * (same as arm926) */ -ENTRY(arm946_dma_inv_range) +arm946_dma_inv_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -240,7 +240,7 @@ ENTRY(arm946_dma_inv_range) * * (same as arm926) */ -ENTRY(arm946_dma_clean_range) +arm946_dma_clean_range: #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry @@ -308,8 +308,6 @@ ENTRY(arm946_cache_fns) .long arm946_flush_kern_dcache_area .long arm946_dma_map_area .long arm946_dma_unmap_area - .long arm946_dma_inv_range - .long arm946_dma_clean_range .long arm946_dma_flush_range diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index 97e1d784f152..53e632343849 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S @@ -274,7 +274,7 @@ ENTRY(feroceon_range_flush_kern_dcache_area) * (same as v4wb) */ .align 5 -ENTRY(feroceon_dma_inv_range) +feroceon_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 bic r0, r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -288,7 +288,7 @@ ENTRY(feroceon_dma_inv_range) mov pc, lr .align 5 -ENTRY(feroceon_range_dma_inv_range) +feroceon_range_dma_inv_range: mrs r2, cpsr tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -314,7 +314,7 @@ ENTRY(feroceon_range_dma_inv_range) * (same as v4wb) */ .align 5 -ENTRY(feroceon_dma_clean_range) +feroceon_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -324,7 +324,7 @@ ENTRY(feroceon_dma_clean_range) mov pc, lr .align 5 -ENTRY(feroceon_range_dma_clean_range) +feroceon_range_dma_clean_range: mrs r2, cpsr cmp r1, r0 subne r1, r1, #1 @ top address is inclusive @@ -414,8 +414,6 @@ ENTRY(feroceon_cache_fns) .long feroceon_flush_kern_dcache_area .long feroceon_dma_map_area .long feroceon_dma_unmap_area - .long feroceon_dma_inv_range - .long feroceon_dma_clean_range .long feroceon_dma_flush_range ENTRY(feroceon_range_cache_fns) @@ -427,8 +425,6 @@ ENTRY(feroceon_range_cache_fns) .long feroceon_range_flush_kern_dcache_area .long feroceon_range_dma_map_area .long feroceon_dma_unmap_area - .long feroceon_range_dma_inv_range - .long feroceon_range_dma_clean_range .long feroceon_range_dma_flush_range .align 5 diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 55b7fbec6548..caa31154e7db 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S @@ -218,7 +218,7 @@ ENTRY(mohawk_flush_kern_dcache_area) * * (same as v4wb) */ -ENTRY(mohawk_dma_inv_range) +mohawk_dma_inv_range: tst r0, #CACHE_DLINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry tst r1, #CACHE_DLINESIZE - 1 @@ -241,7 +241,7 @@ ENTRY(mohawk_dma_inv_range) * * (same as v4wb) */ -ENTRY(mohawk_dma_clean_range) +mohawk_dma_clean_range: bic r0, r0, #CACHE_DLINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHE_DLINESIZE @@ -301,8 +301,6 @@ ENTRY(mohawk_cache_fns) .long mohawk_flush_kern_dcache_area .long mohawk_dma_map_area .long mohawk_dma_unmap_area - .long mohawk_dma_inv_range - .long mohawk_dma_clean_range .long mohawk_dma_flush_range ENTRY(cpu_mohawk_dcache_clean_area) diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 4e4ce889b3e6..046b3d88955e 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -257,7 +257,7 @@ ENTRY(xsc3_flush_kern_dcache_area) * - start - virtual start address * - end - virtual end address */ -ENTRY(xsc3_dma_inv_range) +xsc3_dma_inv_range: tst r0, #CACHELINESIZE - 1 bic r0, r0, #CACHELINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D line @@ -278,7 +278,7 @@ ENTRY(xsc3_dma_inv_range) * - start - virtual start address * - end - virtual end address */ -ENTRY(xsc3_dma_clean_range) +xsc3_dma_clean_range: bic r0, r0, #CACHELINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line add r0, r0, #CACHELINESIZE @@ -337,8 +337,6 @@ ENTRY(xsc3_cache_fns) .long xsc3_flush_kern_dcache_area .long xsc3_dma_map_area .long xsc3_dma_unmap_area - .long xsc3_dma_inv_range - .long xsc3_dma_clean_range .long xsc3_dma_flush_range ENTRY(cpu_xsc3_dcache_clean_area) diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index a7999f94bf27..63037e2162f2 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -315,7 +315,7 @@ ENTRY(xscale_flush_kern_dcache_area) * - start - virtual start address * - end - virtual end address */ -ENTRY(xscale_dma_inv_range) +xscale_dma_inv_range: tst r0, #CACHELINESIZE - 1 bic r0, r0, #CACHELINESIZE - 1 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry @@ -336,7 +336,7 @@ ENTRY(xscale_dma_inv_range) * - start - virtual start address * - end - virtual end address */ -ENTRY(xscale_dma_clean_range) +xscale_dma_clean_range: bic r0, r0, #CACHELINESIZE - 1 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, #CACHELINESIZE @@ -409,8 +409,6 @@ ENTRY(xscale_cache_fns) .long xscale_flush_kern_dcache_area .long xscale_dma_map_area .long xscale_dma_unmap_area - .long xscale_dma_inv_range - .long xscale_dma_clean_range .long xscale_dma_flush_range /* @@ -436,8 +434,6 @@ ENTRY(xscale_80200_A0_A1_cache_fns) .long xscale_dma_a0_map_area .long xscale_dma_unmap_area .long xscale_dma_flush_range - .long xscale_dma_clean_range - .long xscale_dma_flush_range ENTRY(cpu_xscale_dcache_clean_area) 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry -- cgit v1.2.3 From 2ffe2da3e71652d4f4cae19539b5c78c2a239136 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 31 Oct 2009 16:52:16 +0000 Subject: ARM: dma-mapping: fix for speculative prefetching ARMv6 and ARMv7 CPUs can perform speculative prefetching, which makes DMA cache coherency handling slightly more interesting. Rather than being able to rely upon the CPU not accessing the DMA buffer until DMA has completed, we now must expect that the cache could be loaded with possibly stale data from the DMA buffer. Where DMA involves data being transferred to the device, we clean the cache before handing it over for DMA, otherwise we invalidate the buffer to get rid of potential writebacks. On DMA Completion, if data was transferred from the device, we invalidate the buffer to get rid of any stale speculative prefetches. Signed-off-by: Russell King Tested-By: Santosh Shilimkar --- arch/arm/mm/cache-v6.S | 10 ++++--- arch/arm/mm/cache-v7.S | 10 ++++--- arch/arm/mm/dma-mapping.c | 68 +++++++++++++++++++++-------------------------- 3 files changed, 42 insertions(+), 46 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index a11934e53fbd..9d89c67a1cc3 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -271,10 +271,9 @@ ENTRY(v6_dma_flush_range) */ ENTRY(v6_dma_map_area) add r1, r1, r0 - cmp r2, #DMA_TO_DEVICE - beq v6_dma_clean_range - bcs v6_dma_inv_range - b v6_dma_flush_range + teq r2, #DMA_FROM_DEVICE + beq v6_dma_inv_range + b v6_dma_clean_range ENDPROC(v6_dma_map_area) /* @@ -284,6 +283,9 @@ ENDPROC(v6_dma_map_area) * - dir - DMA direction */ ENTRY(v6_dma_unmap_area) + add r1, r1, r0 + teq r2, #DMA_TO_DEVICE + bne v6_dma_inv_range mov pc, lr ENDPROC(v6_dma_unmap_area) diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index b1cd0fd91207..bcd64f265870 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -279,10 +279,9 @@ ENDPROC(v7_dma_flush_range) */ ENTRY(v7_dma_map_area) add r1, r1, r0 - cmp r2, #DMA_TO_DEVICE - beq v7_dma_clean_range - bcs v7_dma_inv_range - b v7_dma_flush_range + teq r2, #DMA_FROM_DEVICE + beq v7_dma_inv_range + b v7_dma_clean_range ENDPROC(v7_dma_map_area) /* @@ -292,6 +291,9 @@ ENDPROC(v7_dma_map_area) * - dir - DMA direction */ ENTRY(v7_dma_unmap_area) + add r1, r1, r0 + teq r2, #DMA_TO_DEVICE + bne v7_dma_inv_range mov pc, lr ENDPROC(v7_dma_unmap_area) diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index efa8efa33f5e..64daef2173bd 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -404,34 +404,22 @@ EXPORT_SYMBOL(dma_free_coherent); * platforms with CONFIG_DMABOUNCE. * Use the driver DMA support - see dma-mapping.h (dma_sync_*) */ -static void dma_cache_maint(const void *start, size_t size, int direction) -{ - void (*outer_op)(unsigned long, unsigned long); - - switch (direction) { - case DMA_FROM_DEVICE: /* invalidate only */ - outer_op = outer_inv_range; - break; - case DMA_TO_DEVICE: /* writeback only */ - outer_op = outer_clean_range; - break; - case DMA_BIDIRECTIONAL: /* writeback and invalidate */ - outer_op = outer_flush_range; - break; - default: - BUG(); - } - - outer_op(__pa(start), __pa(start) + size); -} - void ___dma_single_cpu_to_dev(const void *kaddr, size_t size, enum dma_data_direction dir) { + unsigned long paddr; + BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); dmac_map_area(kaddr, size, dir); - dma_cache_maint(kaddr, size, dir); + + paddr = __pa(kaddr); + if (dir == DMA_FROM_DEVICE) { + outer_inv_range(paddr, paddr + size); + } else { + outer_clean_range(paddr, paddr + size); + } + /* FIXME: non-speculating: flush on bidirectional mappings? */ } EXPORT_SYMBOL(___dma_single_cpu_to_dev); @@ -440,6 +428,13 @@ void ___dma_single_dev_to_cpu(const void *kaddr, size_t size, { BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); + /* FIXME: non-speculating: not required */ + /* don't bother invalidating if DMA to device */ + if (dir != DMA_TO_DEVICE) { + unsigned long paddr = __pa(kaddr); + outer_inv_range(paddr, paddr + size); + } + dmac_unmap_area(kaddr, size, dir); } EXPORT_SYMBOL(___dma_single_dev_to_cpu); @@ -487,32 +482,29 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { unsigned long paddr; - void (*outer_op)(unsigned long, unsigned long); - - switch (direction) { - case DMA_FROM_DEVICE: /* invalidate only */ - outer_op = outer_inv_range; - break; - case DMA_TO_DEVICE: /* writeback only */ - outer_op = outer_clean_range; - break; - case DMA_BIDIRECTIONAL: /* writeback and invalidate */ - outer_op = outer_flush_range; - break; - default: - BUG(); - } dma_cache_maint_page(page, off, size, dir, dmac_map_area); paddr = page_to_phys(page) + off; - outer_op(paddr, paddr + size); + if (dir == DMA_FROM_DEVICE) { + outer_inv_range(paddr, paddr + size); + } else { + outer_clean_range(paddr, paddr + size); + } + /* FIXME: non-speculating: flush on bidirectional mappings? */ } EXPORT_SYMBOL(___dma_page_cpu_to_dev); void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { + unsigned long paddr = page_to_phys(page) + off; + + /* FIXME: non-speculating: not required */ + /* don't bother invalidating if DMA to device */ + if (dir != DMA_TO_DEVICE) + outer_inv_range(paddr, paddr + size); + dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); } EXPORT_SYMBOL(___dma_page_dev_to_cpu); -- cgit v1.2.3 From 4b3073e1c53a256275f1079c0fbfbe85883d9275 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 18 Dec 2009 16:40:18 +0000 Subject: MM: Pass a PTE pointer to update_mmu_cache() rather than the PTE itself On VIVT ARM, when we have multiple shared mappings of the same file in the same MM, we need to ensure that we have coherency across all copies. We do this via make_coherent() by making the pages uncacheable. This used to work fine, until we allowed highmem with highpte - we now have a page table which is mapped as required, and is not available for modification via update_mmu_cache(). Ralf Beache suggested getting rid of the PTE value passed to update_mmu_cache(): On MIPS update_mmu_cache() calls __update_tlb() which walks pagetables to construct a pointer to the pte again. Passing a pte_t * is much more elegant. Maybe we might even replace the pte argument with the pte_t? Ben Herrenschmidt would also like the pte pointer for PowerPC: Passing the ptep in there is exactly what I want. I want that -instead- of the PTE value, because I have issue on some ppc cases, for I$/D$ coherency, where set_pte_at() may decide to mask out the _PAGE_EXEC. So, pass in the mapped page table pointer into update_mmu_cache(), and remove the PTE value, updating all implementations and call sites to suit. Includes a fix from Stephen Rothwell: sparc: fix fallout from update_mmu_cache API change Signed-off-by: Stephen Rothwell Acked-by: Benjamin Herrenschmidt Signed-off-by: Russell King --- Documentation/cachetlb.txt | 6 +++--- arch/alpha/include/asm/pgtable.h | 2 +- arch/arm/include/asm/tlbflush.h | 3 ++- arch/arm/mm/fault-armv.c | 5 +++-- arch/avr32/include/asm/pgtable.h | 2 +- arch/avr32/mm/tlb.c | 4 ++-- arch/cris/include/asm/pgtable.h | 2 +- arch/frv/include/asm/pgtable.h | 2 +- arch/ia64/include/asm/pgtable.h | 2 +- arch/m32r/include/asm/tlbflush.h | 2 +- arch/m32r/mm/fault-nommu.c | 2 +- arch/m32r/mm/fault.c | 6 +++--- arch/m68k/include/asm/pgtable_mm.h | 2 +- arch/microblaze/include/asm/tlbflush.h | 2 +- arch/mips/include/asm/pgtable.h | 3 ++- arch/mn10300/include/asm/pgtable.h | 2 +- arch/mn10300/mm/mmu-context.c | 3 ++- arch/parisc/include/asm/pgtable.h | 2 +- arch/parisc/kernel/cache.c | 4 ++-- arch/powerpc/include/asm/pgtable.h | 2 +- arch/powerpc/mm/mem.c | 4 ++-- arch/s390/include/asm/pgtable.h | 2 +- arch/score/include/asm/pgtable.h | 3 ++- arch/sh/include/asm/pgtable.h | 3 ++- arch/sh/mm/fault_32.c | 2 +- arch/sparc/include/asm/pgtable_32.h | 4 ++-- arch/sparc/include/asm/pgtable_64.h | 2 +- arch/sparc/mm/fault_32.c | 4 ++-- arch/sparc/mm/init_64.c | 3 ++- arch/sparc/mm/nosun4c.c | 2 +- arch/sparc/mm/srmmu.c | 6 +++--- arch/sparc/mm/sun4c.c | 6 +++--- arch/um/include/asm/pgtable.h | 2 +- arch/x86/include/asm/pgtable_32.h | 2 +- arch/x86/include/asm/pgtable_64.h | 2 +- arch/xtensa/include/asm/pgtable.h | 2 +- arch/xtensa/mm/cache.c | 4 ++-- mm/hugetlb.c | 4 ++-- mm/memory.c | 14 +++++++------- mm/migrate.c | 2 +- 40 files changed, 69 insertions(+), 62 deletions(-) (limited to 'arch/arm/mm') diff --git a/Documentation/cachetlb.txt b/Documentation/cachetlb.txt index da42ab414c48..74a8b6fefa29 100644 --- a/Documentation/cachetlb.txt +++ b/Documentation/cachetlb.txt @@ -88,12 +88,12 @@ changes occur: This is used primarily during fault processing. 5) void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t pte) + unsigned long address, pte_t *ptep) At the end of every page fault, this routine is invoked to tell the architecture specific code that a translation - described by "pte" now exists at virtual address "address" - for address space "vma->vm_mm", in the software page tables. + now exists at virtual address "address" for address space + "vma->vm_mm", in the software page tables. A port may use this information in any way it so chooses. For example, it could use this event to pre-load TLB diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h index 3f0c59f6d8aa..71a243294142 100644 --- a/arch/alpha/include/asm/pgtable.h +++ b/arch/alpha/include/asm/pgtable.h @@ -329,7 +329,7 @@ extern pgd_t swapper_pg_dir[1024]; * tables contain all the necessary information. */ extern inline void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte) + unsigned long address, pte_t *ptep) { } diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index c2f1605de359..e085e2c545eb 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -529,7 +529,8 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); * cache entries for the kernels virtual memory range are written * back to the page. */ -extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte); +extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep); #endif diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index ae88f2c3a6df..c45f9bb318ad 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -149,9 +149,10 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigne * * Note that the pte lock will be held. */ -void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) +void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep) { - unsigned long pfn = pte_pfn(pte); + unsigned long pfn = pte_pfn(*ptep); struct address_space *mapping; struct page *page; diff --git a/arch/avr32/include/asm/pgtable.h b/arch/avr32/include/asm/pgtable.h index fecdda16f444..a9ae30c41e74 100644 --- a/arch/avr32/include/asm/pgtable.h +++ b/arch/avr32/include/asm/pgtable.h @@ -325,7 +325,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) struct vm_area_struct; extern void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte); + unsigned long address, pte_t *ptep); /* * Encode and decode a swap entry diff --git a/arch/avr32/mm/tlb.c b/arch/avr32/mm/tlb.c index 06677be98ffb..0da23109f817 100644 --- a/arch/avr32/mm/tlb.c +++ b/arch/avr32/mm/tlb.c @@ -101,7 +101,7 @@ static void update_dtlb(unsigned long address, pte_t pte) } void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t pte) + unsigned long address, pte_t *ptep) { unsigned long flags; @@ -110,7 +110,7 @@ void update_mmu_cache(struct vm_area_struct *vma, return; local_irq_save(flags); - update_dtlb(address, pte); + update_dtlb(address, *ptep); local_irq_restore(flags); } diff --git a/arch/cris/include/asm/pgtable.h b/arch/cris/include/asm/pgtable.h index 1fcce00f01f4..99ea6cd1b143 100644 --- a/arch/cris/include/asm/pgtable.h +++ b/arch/cris/include/asm/pgtable.h @@ -270,7 +270,7 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */ * Actually I am not sure on what this could be used for. */ static inline void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte) + unsigned long address, pte_t *ptep) { } diff --git a/arch/frv/include/asm/pgtable.h b/arch/frv/include/asm/pgtable.h index 22c60692b551..c18b0d32e636 100644 --- a/arch/frv/include/asm/pgtable.h +++ b/arch/frv/include/asm/pgtable.h @@ -505,7 +505,7 @@ static inline int pte_file(pte_t pte) /* * preload information about a newly instantiated PTE into the SCR0/SCR1 PGE cache */ -static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) +static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { struct mm_struct *mm; unsigned long ampr; diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 69bf13857a9f..c3286f42e501 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h @@ -462,7 +462,7 @@ pte_same (pte_t a, pte_t b) return pte_val(a) == pte_val(b); } -#define update_mmu_cache(vma, address, pte) do { } while (0) +#define update_mmu_cache(vma, address, ptep) do { } while (0) extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern void paging_init (void); diff --git a/arch/m32r/include/asm/tlbflush.h b/arch/m32r/include/asm/tlbflush.h index 0ef95307784e..92614b0ccf17 100644 --- a/arch/m32r/include/asm/tlbflush.h +++ b/arch/m32r/include/asm/tlbflush.h @@ -92,6 +92,6 @@ static __inline__ void __flush_tlb_all(void) ); } -extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); +extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); #endif /* _ASM_M32R_TLBFLUSH_H */ diff --git a/arch/m32r/mm/fault-nommu.c b/arch/m32r/mm/fault-nommu.c index 88469178ea6b..888aab1157ed 100644 --- a/arch/m32r/mm/fault-nommu.c +++ b/arch/m32r/mm/fault-nommu.c @@ -95,7 +95,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, * update_mmu_cache() *======================================================================*/ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, - pte_t pte) + pte_t *ptep) { BUG(); } diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c index 7274b47f4c22..28ee389e5f5a 100644 --- a/arch/m32r/mm/fault.c +++ b/arch/m32r/mm/fault.c @@ -336,7 +336,7 @@ vmalloc_fault: addr = (address & PAGE_MASK); set_thread_fault_code(error_code); - update_mmu_cache(NULL, addr, *pte_k); + update_mmu_cache(NULL, addr, pte_k); set_thread_fault_code(0); return; } @@ -349,7 +349,7 @@ vmalloc_fault: #define ITLB_END (unsigned long *)(ITLB_BASE + (NR_TLB_ENTRIES * 8)) #define DTLB_END (unsigned long *)(DTLB_BASE + (NR_TLB_ENTRIES * 8)) void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr, - pte_t pte) + pte_t *ptep) { volatile unsigned long *entry1, *entry2; unsigned long pte_data, flags; @@ -365,7 +365,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr, vaddr = (vaddr & PAGE_MASK) | get_asid(); - pte_data = pte_val(pte); + pte_data = pte_val(*ptep); #ifdef CONFIG_CHIP_OPSP entry1 = (unsigned long *)ITLB_BASE; diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h index aca0e28581c7..87174c904d2b 100644 --- a/arch/m68k/include/asm/pgtable_mm.h +++ b/arch/m68k/include/asm/pgtable_mm.h @@ -115,7 +115,7 @@ extern void kernel_set_cachemode(void *addr, unsigned long size, int cmode); * they are updated on demand. */ static inline void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t pte) + unsigned long address, pte_t *ptep) { } diff --git a/arch/microblaze/include/asm/tlbflush.h b/arch/microblaze/include/asm/tlbflush.h index eb31a0e8a772..10ec70cd8735 100644 --- a/arch/microblaze/include/asm/tlbflush.h +++ b/arch/microblaze/include/asm/tlbflush.h @@ -38,7 +38,7 @@ static inline void local_flush_tlb_range(struct vm_area_struct *vma, #define flush_tlb_kernel_range(start, end) do { } while (0) -#define update_mmu_cache(vma, addr, pte) do { } while (0) +#define update_mmu_cache(vma, addr, ptep) do { } while (0) #define flush_tlb_all local_flush_tlb_all #define flush_tlb_mm local_flush_tlb_mm diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 1854336e56a2..c56bf8afc099 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -362,8 +362,9 @@ extern void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte); static inline void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t pte) + unsigned long address, pte_t *ptep) { + pte_t pte = *ptep; __update_tlb(vma, address, pte); __update_cache(vma, address, pte); } diff --git a/arch/mn10300/include/asm/pgtable.h b/arch/mn10300/include/asm/pgtable.h index 6dc30fc827c4..16d88577f3e0 100644 --- a/arch/mn10300/include/asm/pgtable.h +++ b/arch/mn10300/include/asm/pgtable.h @@ -466,7 +466,7 @@ static inline int set_kernel_exec(unsigned long vaddr, int enable) * the kernel page tables containing the necessary information by tlb-mn10300.S */ extern void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t pte); + unsigned long address, pte_t *ptep); #endif /* !__ASSEMBLY__ */ diff --git a/arch/mn10300/mm/mmu-context.c b/arch/mn10300/mm/mmu-context.c index 31c9d27a75ae..36ba02191d40 100644 --- a/arch/mn10300/mm/mmu-context.c +++ b/arch/mn10300/mm/mmu-context.c @@ -51,9 +51,10 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) /* * preemptively set a TLB entry */ -void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) +void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { unsigned long pteu, ptel, cnx, flags; + pte_t pte = *ptep; addr &= PAGE_MASK; ptel = pte_val(pte) & ~(xPTEL_UNUSED1 | xPTEL_UNUSED2); diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index a27d2e200fb2..01c15035e783 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -410,7 +410,7 @@ extern void paging_init (void); #define PG_dcache_dirty PG_arch_1 -extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); +extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); /* Encode and de-code a swap entry */ diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index b6ed34de14e1..1054baa2fc69 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -68,9 +68,9 @@ flush_cache_all_local(void) EXPORT_SYMBOL(flush_cache_all_local); void -update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) +update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { - struct page *page = pte_page(pte); + struct page *page = pte_page(*ptep); if (pfn_valid(page_to_pfn(page)) && page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) { diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 21207e54825b..89f158731ce3 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -209,7 +209,7 @@ extern void paging_init(void); * corresponding HPTE into the hash table ahead of time, instead of * waiting for the inevitable extra hash-table miss exception. */ -extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); +extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); extern int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index b9b152558f9c..311224cdb7ad 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -494,13 +494,13 @@ EXPORT_SYMBOL(flush_icache_user_range); * This must always be called with the pte lock held. */ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, - pte_t pte) + pte_t *ptep) { #ifdef CONFIG_PPC_STD_MMU unsigned long access = 0, trap; /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ - if (!pte_young(pte) || address >= TASK_SIZE) + if (!pte_young(*ptep) || address >= TASK_SIZE) return; /* We try to figure out if we are coming from an instruction diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index e2fa79cf0614..9b5b9189c15e 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -43,7 +43,7 @@ extern void vmem_map_init(void); * The S390 doesn't have any external MMU info: the kernel page * tables contain all the necessary information. */ -#define update_mmu_cache(vma, address, pte) do { } while (0) +#define update_mmu_cache(vma, address, ptep) do { } while (0) /* * ZERO_PAGE is a global shared page that is always zero: used diff --git a/arch/score/include/asm/pgtable.h b/arch/score/include/asm/pgtable.h index 674934b40170..ccf38f06c57d 100644 --- a/arch/score/include/asm/pgtable.h +++ b/arch/score/include/asm/pgtable.h @@ -272,8 +272,9 @@ extern void __update_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte); static inline void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t pte) + unsigned long address, pte_t *ptep) { + pte_t pte = *ptep; __update_tlb(vma, address, pte); __update_cache(vma, address, pte); } diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index ba3046e4f06f..1ff93ac1aa44 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -165,8 +165,9 @@ extern void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte); static inline void -update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) +update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { + pte_t pte = *ptep; __update_cache(vma, address, pte); __update_tlb(vma, address, pte); } diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 47530104e0ad..1677b5ee191d 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c @@ -371,7 +371,7 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess, local_flush_tlb_one(get_asid(), address & PAGE_MASK); #endif - update_mmu_cache(NULL, address, entry); + update_mmu_cache(NULL, address, pte); return 0; } diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h index e0cabe790ec1..77f906d8cc21 100644 --- a/arch/sparc/include/asm/pgtable_32.h +++ b/arch/sparc/include/asm/pgtable_32.h @@ -330,9 +330,9 @@ BTFIXUPDEF_CALL(void, mmu_info, struct seq_file *) #define FAULT_CODE_WRITE 0x2 #define FAULT_CODE_USER 0x4 -BTFIXUPDEF_CALL(void, update_mmu_cache, struct vm_area_struct *, unsigned long, pte_t) +BTFIXUPDEF_CALL(void, update_mmu_cache, struct vm_area_struct *, unsigned long, pte_t *) -#define update_mmu_cache(vma,addr,pte) BTFIXUP_CALL(update_mmu_cache)(vma,addr,pte) +#define update_mmu_cache(vma,addr,ptep) BTFIXUP_CALL(update_mmu_cache)(vma,addr,ptep) BTFIXUPDEF_CALL(void, sparc_mapiorange, unsigned int, unsigned long, unsigned long, unsigned int) diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index f3cb790fa2ae..f5b5fa76c02d 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -706,7 +706,7 @@ extern unsigned long find_ecache_flush_span(unsigned long size); #define mmu_unlockarea(vaddr, len) do { } while(0) struct vm_area_struct; -extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); +extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); /* Encode and de-code a swap entry */ #define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL) diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index b99f81c4906f..43e20efb2511 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -370,7 +370,7 @@ asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write, unsigned long address) { extern void sun4c_update_mmu_cache(struct vm_area_struct *, - unsigned long,pte_t); + unsigned long,pte_t *); extern pte_t *sun4c_pte_offset_kernel(pmd_t *,unsigned long); struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; @@ -447,7 +447,7 @@ asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write, * on the CPU and doing a shrink_mmap() on this vma. */ sun4c_update_mmu_cache (find_vma(current->mm, address), address, - *ptep); + ptep); else do_sparc_fault(regs, text_fault, write, address); } diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 1886d37d411b..9245a822a2f1 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -289,12 +289,13 @@ static void flush_dcache(unsigned long pfn) } } -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { struct mm_struct *mm; struct tsb *tsb; unsigned long tag, flags; unsigned long tsb_index, tsb_hash_shift; + pte_t pte = *ptep; if (tlb_type != hypervisor) { unsigned long pfn = pte_pfn(pte); diff --git a/arch/sparc/mm/nosun4c.c b/arch/sparc/mm/nosun4c.c index 196263f895b7..4e62c27147c4 100644 --- a/arch/sparc/mm/nosun4c.c +++ b/arch/sparc/mm/nosun4c.c @@ -62,7 +62,7 @@ pte_t *sun4c_pte_offset_kernel(pmd_t *dir, unsigned long address) return NULL; } -void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) +void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { } diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 367321a030dd..df49b200ca4c 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -694,7 +694,7 @@ extern void tsunami_setup_blockops(void); * The following code is a deadwood that may be necessary when * we start to make precise page flushes again. --zaitcev */ -static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte) +static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t *ptep) { #if 0 static unsigned long last; @@ -703,10 +703,10 @@ static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long ad if (address == last) { val = srmmu_hwprobe(address); - if (val != 0 && pte_val(pte) != val) { + if (val != 0 && pte_val(*ptep) != val) { printk("swift_update_mmu_cache: " "addr %lx put %08x probed %08x from %p\n", - address, pte_val(pte), val, + address, pte_val(*ptep), val, __builtin_return_address(0)); srmmu_flush_whole_tlb(); } diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c index a89baf0d875a..18652534b91a 100644 --- a/arch/sparc/mm/sun4c.c +++ b/arch/sparc/mm/sun4c.c @@ -1887,7 +1887,7 @@ static void sun4c_check_pgt_cache(int low, int high) /* An experiment, turn off by default for now... -DaveM */ #define SUN4C_PRELOAD_PSEG -void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) +void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { unsigned long flags; int pseg; @@ -1929,7 +1929,7 @@ void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, p start += PAGE_SIZE; } #ifndef SUN4C_PRELOAD_PSEG - sun4c_put_pte(address, pte_val(pte)); + sun4c_put_pte(address, pte_val(*ptep)); #endif local_irq_restore(flags); return; @@ -1940,7 +1940,7 @@ void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, p add_lru(entry); } - sun4c_put_pte(address, pte_val(pte)); + sun4c_put_pte(address, pte_val(*ptep)); local_irq_restore(flags); } diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h index 9ce3f165111a..a9f7251b4a8d 100644 --- a/arch/um/include/asm/pgtable.h +++ b/arch/um/include/asm/pgtable.h @@ -345,7 +345,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) struct mm_struct; extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); -#define update_mmu_cache(vma,address,pte) do ; while (0) +#define update_mmu_cache(vma,address,ptep) do ; while (0) /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val >> 4) & 0x3f) diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 01fd9461d323..a28668396508 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -80,7 +80,7 @@ do { \ * The i386 doesn't have any external MMU info: the kernel page * tables contain all the necessary information. */ -#define update_mmu_cache(vma, address, pte) do { } while (0) +#define update_mmu_cache(vma, address, ptep) do { } while (0) #endif /* !__ASSEMBLY__ */ diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index c57a30117149..181be528c612 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -129,7 +129,7 @@ static inline int pgd_large(pgd_t pgd) { return 0; } #define pte_unmap(pte) /* NOP */ #define pte_unmap_nested(pte) /* NOP */ -#define update_mmu_cache(vma, address, pte) do { } while (0) +#define update_mmu_cache(vma, address, ptep) do { } while (0) /* Encode and de-code a swap entry */ #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index a138770c358e..76bf35554117 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -394,7 +394,7 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) #define kern_addr_valid(addr) (1) extern void update_mmu_cache(struct vm_area_struct * vma, - unsigned long address, pte_t pte); + unsigned long address, pte_t *ptep); /* * remap a physical page `pfn' of size `size' with page protection `prot' diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c index 3ba990c67676..85df4655d326 100644 --- a/arch/xtensa/mm/cache.c +++ b/arch/xtensa/mm/cache.c @@ -147,9 +147,9 @@ void flush_cache_page(struct vm_area_struct* vma, unsigned long address, #endif void -update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t pte) +update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) { - unsigned long pfn = pte_pfn(pte); + unsigned long pfn = pte_pfn(*ptep); struct page *page; if (!pfn_valid(pfn)) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e91b81b63670..94cd94df56e3 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2088,7 +2088,7 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma, entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep))); if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) { - update_mmu_cache(vma, address, entry); + update_mmu_cache(vma, address, ptep); } } @@ -2559,7 +2559,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, entry = pte_mkyoung(entry); if (huge_ptep_set_access_flags(vma, address, ptep, entry, flags & FAULT_FLAG_WRITE)) - update_mmu_cache(vma, address, entry); + update_mmu_cache(vma, address, ptep); out_page_table_lock: spin_unlock(&mm->page_table_lock); diff --git a/mm/memory.c b/mm/memory.c index 09e4b1be7b67..72fb5f39bccc 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1593,7 +1593,7 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, /* Ok, finally just insert the thing.. */ entry = pte_mkspecial(pfn_pte(pfn, prot)); set_pte_at(mm, addr, pte, entry); - update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */ + update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ retval = 0; out_unlock: @@ -2116,7 +2116,7 @@ reuse: entry = pte_mkyoung(orig_pte); entry = maybe_mkwrite(pte_mkdirty(entry), vma); if (ptep_set_access_flags(vma, address, page_table, entry,1)) - update_mmu_cache(vma, address, entry); + update_mmu_cache(vma, address, page_table); ret |= VM_FAULT_WRITE; goto unlock; } @@ -2185,7 +2185,7 @@ gotten: * new page to be mapped directly into the secondary page table. */ set_pte_at_notify(mm, address, page_table, entry); - update_mmu_cache(vma, address, entry); + update_mmu_cache(vma, address, page_table); if (old_page) { /* * Only after switching the pte to the new page may @@ -2629,7 +2629,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, } /* No need to invalidate - it was non-present before */ - update_mmu_cache(vma, address, pte); + update_mmu_cache(vma, address, page_table); unlock: pte_unmap_unlock(page_table, ptl); out: @@ -2694,7 +2694,7 @@ setpte: set_pte_at(mm, address, page_table, entry); /* No need to invalidate - it was non-present before */ - update_mmu_cache(vma, address, entry); + update_mmu_cache(vma, address, page_table); unlock: pte_unmap_unlock(page_table, ptl); return 0; @@ -2855,7 +2855,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, set_pte_at(mm, address, page_table, entry); /* no need to invalidate: a not-present page won't be cached */ - update_mmu_cache(vma, address, entry); + update_mmu_cache(vma, address, page_table); } else { if (charged) mem_cgroup_uncharge_page(page); @@ -2992,7 +2992,7 @@ static inline int handle_pte_fault(struct mm_struct *mm, } entry = pte_mkyoung(entry); if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) { - update_mmu_cache(vma, address, entry); + update_mmu_cache(vma, address, pte); } else { /* * This is needed only for protection faults but the arch code diff --git a/mm/migrate.c b/mm/migrate.c index efddbf0926b2..e58e5da25b91 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -134,7 +134,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, page_add_file_rmap(new); /* No need to invalidate - it was non-present before */ - update_mmu_cache(vma, addr, pte); + update_mmu_cache(vma, addr, ptep); unlock: pte_unmap_unlock(ptep, ptl); out: -- cgit v1.2.3 From ae1402022edbeef3991f1e4bae8fa99558be291b Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 18 Dec 2009 16:43:57 +0000 Subject: ARM: make_coherent(): fix problems with highpte, part 2 update_mmu_cache() is called with the page table for the faulted-in page still mapped. We need to modify the PTE for this page to ensure coherency with other shared mappings when multiple shared mappings exist within a MM. Signed-off-by: Russell King --- arch/arm/mm/fault-armv.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index c45f9bb318ad..c9b97e9836a2 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -99,7 +99,8 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address, } static void -make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) +make_coherent(struct address_space *mapping, struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, unsigned long pfn) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *mpnt; @@ -131,7 +132,7 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigne } flush_dcache_mmap_unlock(mapping); if (aliases) - adjust_pte(vma, addr, pfn); + do_adjust_pte(vma, addr, pfn, ptep); else flush_cache_page(vma, addr, pfn); } @@ -174,7 +175,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, #endif if (mapping) { if (cache_is_vivt()) - make_coherent(mapping, vma, addr, pfn); + make_coherent(mapping, vma, addr, ptep, pfn); else if (vma->vm_flags & VM_EXEC) __flush_icache_all(); } -- cgit v1.2.3