diff options
author | Richard Purdie <rpurdie@rpsys.net> | 2006-12-30 18:08:50 +0300 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2006-12-30 20:05:08 +0300 |
commit | 1c9d3df5e88ad7db23f5b22f4341c39722a904a4 (patch) | |
tree | dbabefd52a5f8a5f35216bda33f29e4b9b398569 /arch/arm | |
parent | b0b1d60a64054697ef828e0565f006cc0f823590 (diff) | |
download | linux-1c9d3df5e88ad7db23f5b22f4341c39722a904a4.tar.xz |
[ARM] 4078/1: Fix ARM copypage cache coherency problems
If PG_dcache_dirty is set for a page, we need to flush the source page
before performing any copypage operation using a different virtual address.
This fixes the copypage implementations for XScale, StrongARM and ARMv6.
This patch fixes segmentation faults seen in the dynamic linker under
the usage patterns in glibc 2.4/2.5.
Signed-off-by: Richard Purdie <rpurdie@rpsys.net>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/mm/copypage-v4mc.c | 6 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v6.c | 4 | ||||
-rw-r--r-- | arch/arm/mm/copypage-xscale.c | 6 |
3 files changed, 16 insertions, 0 deletions
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 408b05ae6b9b..ded0e96d069d 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -19,6 +19,7 @@ #include <asm/page.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> +#include <asm/cacheflush.h> #include "mm.h" @@ -69,6 +70,11 @@ mc_copy_user_page(void *from, void *to) void v4_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) { + struct page *page = virt_to_page(kfrom); + + if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) + __flush_dcache_page(page_mapping(page), page); + spin_lock(&minicache_lock); set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 865777dec161..3adb79257f43 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -53,6 +53,10 @@ static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned lo { unsigned int offset = CACHE_COLOUR(vaddr); unsigned long from, to; + struct page *page = virt_to_page(kfrom); + + if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) + __flush_dcache_page(page_mapping(page), page); /* * Discard data in the kernel mapping for the new page. diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index aea5da723596..2e455f82a4d5 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -19,6 +19,7 @@ #include <asm/page.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> +#include <asm/cacheflush.h> #include "mm.h" @@ -91,6 +92,11 @@ mc_copy_user_page(void *from, void *to) void xscale_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) { + struct page *page = virt_to_page(kfrom); + + if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) + __flush_dcache_page(page_mapping(page), page); + spin_lock(&minicache_lock); set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); |