diff options
author | Zachary Amsden <zach@vmware.com> | 2007-04-09 03:04:01 +0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-04-09 06:47:55 +0400 |
commit | 49f19710512c825aaea73b9207b3a848027cda1d (patch) | |
tree | 06da31bd9a84273e12aa43f536f90eb8146ff92e | |
parent | a5bfffac645a7b2d8119f8bbae34df5c94832799 (diff) | |
download | linux-49f19710512c825aaea73b9207b3a848027cda1d.tar.xz |
[PATCH] Proper fix for highmem kmap_atomic functions for VMI for 2.6.21
Since lazy MMU batching mode still allows interrupts to enter, it is
possible for interrupt handlers to try to use kmap_atomic, which fails when
lazy mode is active, since the PTE update to highmem will be delayed. The
best workaround is to issue an explicit flush in kmap_atomic_functions
case; this is the only way nested PTE updates can happen in the interrupt
handler.
Thanks to Jeremy Fitzhardinge for noting the bug and suggestions on a fix.
This patch gets reverted again when we start 2.6.22 and the bug gets fixed
differently.
Signed-off-by: Zachary Amsden <zach@vmware.com>
Cc: Andi Kleen <ak@muc.de>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | arch/i386/kernel/vmi.c | 23 | ||||
-rw-r--r-- | arch/i386/mm/highmem.c | 2 | ||||
-rw-r--r-- | include/asm-generic/pgtable.h | 2 | ||||
-rw-r--r-- | include/asm-i386/paravirt.h | 3 |
4 files changed, 29 insertions, 1 deletions
diff --git a/arch/i386/kernel/vmi.c b/arch/i386/kernel/vmi.c index fb07a1aad225..edc339fa5038 100644 --- a/arch/i386/kernel/vmi.c +++ b/arch/i386/kernel/vmi.c @@ -69,6 +69,7 @@ struct { void (*flush_tlb)(int); void (*set_initial_ap_state)(int, int); void (*halt)(void); + void (*set_lazy_mode)(int mode); } vmi_ops; /* XXX move this to alternative.h */ @@ -574,6 +575,26 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip, } #endif +static void vmi_set_lazy_mode(int mode) +{ + static DEFINE_PER_CPU(int, lazy_mode); + + if (!vmi_ops.set_lazy_mode) + return; + + /* Modes should never nest or overlap */ + BUG_ON(__get_cpu_var(lazy_mode) && !(mode == PARAVIRT_LAZY_NONE || + mode == PARAVIRT_LAZY_FLUSH)); + + if (mode == PARAVIRT_LAZY_FLUSH) { + vmi_ops.set_lazy_mode(0); + vmi_ops.set_lazy_mode(__get_cpu_var(lazy_mode)); + } else { + vmi_ops.set_lazy_mode(mode); + __get_cpu_var(lazy_mode) = mode; + } +} + static inline int __init check_vmi_rom(struct vrom_header *rom) { struct pci_header *pci; @@ -804,7 +825,7 @@ static inline int __init activate_vmi(void) para_wrap(load_esp0, vmi_load_esp0, set_kernel_stack, UpdateKernelStack); para_fill(set_iopl_mask, SetIOPLMask); para_fill(io_delay, IODelay); - para_fill(set_lazy_mode, SetLazyMode); + para_wrap(set_lazy_mode, vmi_set_lazy_mode, set_lazy_mode, SetLazyMode); /* user and kernel flush are just handled with different flags to FlushTLB */ para_wrap(flush_tlb_user, vmi_flush_tlb_user, flush_tlb, FlushTLB); diff --git a/arch/i386/mm/highmem.c b/arch/i386/mm/highmem.c index bb2de1089add..ac70d09df7ee 100644 --- a/arch/i386/mm/highmem.c +++ b/arch/i386/mm/highmem.c @@ -42,6 +42,7 @@ void *kmap_atomic(struct page *page, enum km_type type) vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); + arch_flush_lazy_mmu_mode(); return (void*) vaddr; } @@ -82,6 +83,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); + arch_flush_lazy_mmu_mode(); return (void*) vaddr; } diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 00c23433b39f..6d7e279b1490 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -180,6 +180,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE #define arch_enter_lazy_mmu_mode() do {} while (0) #define arch_leave_lazy_mmu_mode() do {} while (0) +#define arch_flush_lazy_mmu_mode() do {} while (0) #endif /* @@ -193,6 +194,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres #ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE #define arch_enter_lazy_cpu_mode() do {} while (0) #define arch_leave_lazy_cpu_mode() do {} while (0) +#define arch_flush_lazy_cpu_mode() do {} while (0) #endif /* diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h index 46dc34ca887a..e63f1e444fcf 100644 --- a/include/asm-i386/paravirt.h +++ b/include/asm-i386/paravirt.h @@ -421,14 +421,17 @@ static inline void pmd_clear(pmd_t *pmdp) #define PARAVIRT_LAZY_NONE 0 #define PARAVIRT_LAZY_MMU 1 #define PARAVIRT_LAZY_CPU 2 +#define PARAVIRT_LAZY_FLUSH 3 #define __HAVE_ARCH_ENTER_LAZY_CPU_MODE #define arch_enter_lazy_cpu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_CPU) #define arch_leave_lazy_cpu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_NONE) +#define arch_flush_lazy_cpu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_FLUSH) #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE #define arch_enter_lazy_mmu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_MMU) #define arch_leave_lazy_mmu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_NONE) +#define arch_flush_lazy_mmu_mode() paravirt_ops.set_lazy_mode(PARAVIRT_LAZY_FLUSH) /* These all sit in the .parainstructions section to tell us what to patch. */ struct paravirt_patch { |