diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-19 20:23:24 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-19 20:23:24 +0300 |
commit | 1335d9a1fb2abbe5022de3c517989cc7c7161dee (patch) | |
tree | 23e4a27cb22bb09fefea09a62e419a8c5bcc32cd | |
parent | 4c4a5c99af7f479a14759196f8df9467128f3baf (diff) | |
parent | 8ea58f1e8b11cca3087b294779bf5959bf89cc10 (diff) | |
download | linux-1335d9a1fb2abbe5022de3c517989cc7c7161dee.tar.xz |
Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core fixes from Ingo Molnar:
"This fixes a particularly thorny munmap() bug with MPX, plus fixes a
host build environment assumption in objtool"
* 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
objtool: Allow AR to be overridden with HOSTAR
x86/mpx, mm/core: Fix recursive munmap() corruption
-rw-r--r-- | arch/powerpc/include/asm/mmu_context.h | 1 | ||||
-rw-r--r-- | arch/um/include/asm/mmu_context.h | 1 | ||||
-rw-r--r-- | arch/unicore32/include/asm/mmu_context.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/mmu_context.h | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/mpx.h | 15 | ||||
-rw-r--r-- | arch/x86/mm/mpx.c | 10 | ||||
-rw-r--r-- | include/asm-generic/mm_hooks.h | 1 | ||||
-rw-r--r-- | mm/mmap.c | 15 | ||||
-rw-r--r-- | tools/objtool/Makefile | 3 |
9 files changed, 27 insertions, 26 deletions
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 611204e588b9..58efca934311 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -232,7 +232,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, extern void arch_exit_mmap(struct mm_struct *mm); static inline void arch_unmap(struct mm_struct *mm, - struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (start <= mm->context.vdso_base && mm->context.vdso_base < end) diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h index fca34b2177e2..9f4b4bb78120 100644 --- a/arch/um/include/asm/mmu_context.h +++ b/arch/um/include/asm/mmu_context.h @@ -22,7 +22,6 @@ static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) } extern void arch_exit_mmap(struct mm_struct *mm); static inline void arch_unmap(struct mm_struct *mm, - struct vm_area_struct *vma, unsigned long start, unsigned long end) { } diff --git a/arch/unicore32/include/asm/mmu_context.h b/arch/unicore32/include/asm/mmu_context.h index 5c205a9cb5a6..9f06ea5466dd 100644 --- a/arch/unicore32/include/asm/mmu_context.h +++ b/arch/unicore32/include/asm/mmu_context.h @@ -88,7 +88,6 @@ static inline int arch_dup_mmap(struct mm_struct *oldmm, } static inline void arch_unmap(struct mm_struct *mm, - struct vm_area_struct *vma, unsigned long start, unsigned long end) { } diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 93dff1963337..9024236693d2 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -278,8 +278,8 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm, mpx_mm_init(mm); } -static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long start, unsigned long end) +static inline void arch_unmap(struct mm_struct *mm, unsigned long start, + unsigned long end) { /* * mpx_notify_unmap() goes and reads a rarely-hot @@ -299,7 +299,7 @@ static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, * consistently wrong. */ if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX))) - mpx_notify_unmap(mm, vma, start, end); + mpx_notify_unmap(mm, start, end); } /* diff --git a/arch/x86/include/asm/mpx.h b/arch/x86/include/asm/mpx.h index d0b1434fb0b6..143a5c193ed3 100644 --- a/arch/x86/include/asm/mpx.h +++ b/arch/x86/include/asm/mpx.h @@ -64,12 +64,15 @@ struct mpx_fault_info { }; #ifdef CONFIG_X86_INTEL_MPX -int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs); -int mpx_handle_bd_fault(void); + +extern int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs); +extern int mpx_handle_bd_fault(void); + static inline int kernel_managing_mpx_tables(struct mm_struct *mm) { return (mm->context.bd_addr != MPX_INVALID_BOUNDS_DIR); } + static inline void mpx_mm_init(struct mm_struct *mm) { /* @@ -78,11 +81,10 @@ static inline void mpx_mm_init(struct mm_struct *mm) */ mm->context.bd_addr = MPX_INVALID_BOUNDS_DIR; } -void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long start, unsigned long end); -unsigned long mpx_unmapped_area_check(unsigned long addr, unsigned long len, - unsigned long flags); +extern void mpx_notify_unmap(struct mm_struct *mm, unsigned long start, unsigned long end); +extern unsigned long mpx_unmapped_area_check(unsigned long addr, unsigned long len, unsigned long flags); + #else static inline int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs) { @@ -100,7 +102,6 @@ static inline void mpx_mm_init(struct mm_struct *mm) { } static inline void mpx_notify_unmap(struct mm_struct *mm, - struct vm_area_struct *vma, unsigned long start, unsigned long end) { } diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index 59726aaf4671..0d1c47cbbdd6 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c @@ -881,9 +881,10 @@ static int mpx_unmap_tables(struct mm_struct *mm, * the virtual address region start...end have already been split if * necessary, and the 'vma' is the first vma in this range (start -> end). */ -void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long start, unsigned long end) +void mpx_notify_unmap(struct mm_struct *mm, unsigned long start, + unsigned long end) { + struct vm_area_struct *vma; int ret; /* @@ -902,11 +903,12 @@ void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma, * which should not occur normally. Being strict about it here * helps ensure that we do not have an exploitable stack overflow. */ - do { + vma = find_vma(mm, start); + while (vma && vma->vm_start < end) { if (vma->vm_flags & VM_MPX) return; vma = vma->vm_next; - } while (vma && vma->vm_start < end); + } ret = mpx_unmap_tables(mm, start, end); if (ret) diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h index 8ac4e68a12f0..6736ed2f632b 100644 --- a/include/asm-generic/mm_hooks.h +++ b/include/asm-generic/mm_hooks.h @@ -18,7 +18,6 @@ static inline void arch_exit_mmap(struct mm_struct *mm) } static inline void arch_unmap(struct mm_struct *mm, - struct vm_area_struct *vma, unsigned long start, unsigned long end) { } diff --git a/mm/mmap.c b/mm/mmap.c index bd7b9f293b39..2d6a6662edb9 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2735,9 +2735,17 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, return -EINVAL; len = PAGE_ALIGN(len); + end = start + len; if (len == 0) return -EINVAL; + /* + * arch_unmap() might do unmaps itself. It must be called + * and finish any rbtree manipulation before this code + * runs and also starts to manipulate the rbtree. + */ + arch_unmap(mm, start, end); + /* Find the first overlapping VMA */ vma = find_vma(mm, start); if (!vma) @@ -2746,7 +2754,6 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, /* we have start < vma->vm_end */ /* if it doesn't overlap, we have nothing.. */ - end = start + len; if (vma->vm_start >= end) return 0; @@ -2816,12 +2823,6 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len, /* Detach vmas from rbtree */ detach_vmas_to_be_unmapped(mm, vma, prev, end); - /* - * mpx unmap needs to be called with mmap_sem held for write. - * It is safe to call it before unmap_region(). - */ - arch_unmap(mm, vma, start, end); - if (downgrade) downgrade_write(&mm->mmap_sem); diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index 53f8be0f4a1f..88158239622b 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -7,11 +7,12 @@ ARCH := x86 endif # always use the host compiler +HOSTAR ?= ar HOSTCC ?= gcc HOSTLD ?= ld +AR = $(HOSTAR) CC = $(HOSTCC) LD = $(HOSTLD) -AR = ar ifeq ($(srctree),) srctree := $(patsubst %/,%,$(dir $(CURDIR))) |