summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorEugene Surovegin <ebs@ebshome.net>2006-03-28 22:13:12 +0400
committerPaul Mackerras <paulus@samba.org>2006-03-29 06:44:15 +0400
commitbab70a4af737f623de5b034976a311055308ab86 (patch)
treef8c0e9463de01323db2cb1a62f4eb83adb5ce7ca /arch
parentbac30d1a78d0f11c613968fc8b351a91ed465386 (diff)
downloadlinux-bab70a4af737f623de5b034976a311055308ab86.tar.xz
[PATCH] lock PTE before updating it in 440/BookE page fault handler
Fix 44x and BookE page fault handler to correctly lock PTE before trying to pte_update() it, otherwise this PTE might be swapped out after pte_present() check but before pte_uptdate() call, resulting in corrupted PTE. This can happen with enabled preemption and low memory condition. Signed-off-by: Eugene Surovegin <ebs@ebshome.net> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/mm/fault.c30
-rw-r--r--arch/powerpc/mm/pgtable_32.c6
-rw-r--r--arch/ppc/mm/fault.c30
-rw-r--r--arch/ppc/mm/pgtable.c6
4 files changed, 42 insertions, 30 deletions
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index ec4adcb4bc28..5aea0909a5ec 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -267,25 +267,29 @@ good_area:
#endif
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
pte_t *ptep;
+ pmd_t *pmdp;
/* Since 4xx/Book-E supports per-page execute permission,
* we lazily flush dcache to icache. */
ptep = NULL;
- if (get_pteptr(mm, address, &ptep) && pte_present(*ptep)) {
- struct page *page = pte_page(*ptep);
-
- if (! test_bit(PG_arch_1, &page->flags)) {
- flush_dcache_icache_page(page);
- set_bit(PG_arch_1, &page->flags);
+ if (get_pteptr(mm, address, &ptep, &pmdp)) {
+ spinlock_t *ptl = pte_lockptr(mm, pmdp);
+ spin_lock(ptl);
+ if (pte_present(*ptep)) {
+ struct page *page = pte_page(*ptep);
+
+ if (!test_bit(PG_arch_1, &page->flags)) {
+ flush_dcache_icache_page(page);
+ set_bit(PG_arch_1, &page->flags);
+ }
+ pte_update(ptep, 0, _PAGE_HWEXEC);
+ _tlbie(address);
+ pte_unmap_unlock(ptep, ptl);
+ up_read(&mm->mmap_sem);
+ return 0;
}
- pte_update(ptep, 0, _PAGE_HWEXEC);
- _tlbie(address);
- pte_unmap(ptep);
- up_read(&mm->mmap_sem);
- return 0;
+ pte_unmap_unlock(ptep, ptl);
}
- if (ptep != NULL)
- pte_unmap(ptep);
#endif
/* a write */
} else if (is_write) {
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index d296eb6b4545..90628601fac7 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -372,7 +372,7 @@ void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
* the PTE pointer is unmodified if PTE is not found.
*/
int
-get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
+get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
{
pgd_t *pgd;
pmd_t *pmd;
@@ -387,6 +387,8 @@ get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
if (pte) {
retval = 1;
*ptep = pte;
+ if (pmdp)
+ *pmdp = pmd;
/* XXX caller needs to do pte_unmap, yuck */
}
}
@@ -424,7 +426,7 @@ unsigned long iopa(unsigned long addr)
mm = &init_mm;
pa = 0;
- if (get_pteptr(mm, addr, &pte)) {
+ if (get_pteptr(mm, addr, &pte, NULL)) {
pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
pte_unmap(pte);
}
diff --git a/arch/ppc/mm/fault.c b/arch/ppc/mm/fault.c
index 0217188ef465..8e08ca32531a 100644
--- a/arch/ppc/mm/fault.c
+++ b/arch/ppc/mm/fault.c
@@ -202,6 +202,7 @@ good_area:
/* an exec - 4xx/Book-E allows for per-page execute permission */
} else if (TRAP(regs) == 0x400) {
pte_t *ptep;
+ pmd_t *pmdp;
#if 0
/* It would be nice to actually enforce the VM execute
@@ -215,21 +216,24 @@ good_area:
/* Since 4xx/Book-E supports per-page execute permission,
* we lazily flush dcache to icache. */
ptep = NULL;
- if (get_pteptr(mm, address, &ptep) && pte_present(*ptep)) {
- struct page *page = pte_page(*ptep);
-
- if (! test_bit(PG_arch_1, &page->flags)) {
- flush_dcache_icache_page(page);
- set_bit(PG_arch_1, &page->flags);
+ if (get_pteptr(mm, address, &ptep, &pmdp)) {
+ spinlock_t *ptl = pte_lockptr(mm, pmdp);
+ spin_lock(ptl);
+ if (pte_present(*ptep)) {
+ struct page *page = pte_page(*ptep);
+
+ if (!test_bit(PG_arch_1, &page->flags)) {
+ flush_dcache_icache_page(page);
+ set_bit(PG_arch_1, &page->flags);
+ }
+ pte_update(ptep, 0, _PAGE_HWEXEC);
+ _tlbie(address);
+ pte_unmap_unlock(ptep, ptl);
+ up_read(&mm->mmap_sem);
+ return 0;
}
- pte_update(ptep, 0, _PAGE_HWEXEC);
- _tlbie(address);
- pte_unmap(ptep);
- up_read(&mm->mmap_sem);
- return 0;
+ pte_unmap_unlock(ptep, ptl);
}
- if (ptep != NULL)
- pte_unmap(ptep);
#endif
/* a read */
} else {
diff --git a/arch/ppc/mm/pgtable.c b/arch/ppc/mm/pgtable.c
index a1924876cad6..706bca8eb144 100644
--- a/arch/ppc/mm/pgtable.c
+++ b/arch/ppc/mm/pgtable.c
@@ -368,7 +368,7 @@ void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
* the PTE pointer is unmodified if PTE is not found.
*/
int
-get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
+get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
{
pgd_t *pgd;
pmd_t *pmd;
@@ -383,6 +383,8 @@ get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
if (pte) {
retval = 1;
*ptep = pte;
+ if (pmdp)
+ *pmdp = pmd;
/* XXX caller needs to do pte_unmap, yuck */
}
}
@@ -420,7 +422,7 @@ unsigned long iopa(unsigned long addr)
mm = &init_mm;
pa = 0;
- if (get_pteptr(mm, addr, &pte)) {
+ if (get_pteptr(mm, addr, &pte, NULL)) {
pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
pte_unmap(pte);
}