diff options
author | Ingo Molnar <mingo@kernel.org> | 2015-08-25 10:59:19 +0300 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-08-25 10:59:19 +0300 |
commit | 8d58b66ed2b000f27658c88a4ed70e8042e86a58 (patch) | |
tree | 5bae2c74f932b5d863f72cb698a6f71260340b26 /arch/x86/mm | |
parent | 13fe86f465b72fc9328d4f5ebc33223c011852ae (diff) | |
parent | c13dcf9f2d6f5f06ef1bf79ec456df614c5e058b (diff) | |
download | linux-8d58b66ed2b000f27658c88a4ed70e8042e86a58.tar.xz |
Merge tag 'v4.2-rc8' into x86/mm, before applying new changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/fault.c | 5 | ||||
-rw-r--r-- | arch/x86/mm/highmem_32.c | 3 | ||||
-rw-r--r-- | arch/x86/mm/init_32.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/iomap_32.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/ioremap.c | 30 | ||||
-rw-r--r-- | arch/x86/mm/kasan_init_64.c | 47 | ||||
-rw-r--r-- | arch/x86/mm/mmap.c | 7 | ||||
-rw-r--r-- | arch/x86/mm/mpx.c | 527 | ||||
-rw-r--r-- | arch/x86/mm/tlb.c | 2 |
9 files changed, 384 insertions, 241 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 181c53bac3a7..9dc909841739 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -13,6 +13,7 @@ #include <linux/hugetlb.h> /* hstate_index_to_shift */ #include <linux/prefetch.h> /* prefetchw */ #include <linux/context_tracking.h> /* exception_enter(), ... */ +#include <linux/uaccess.h> /* faulthandler_disabled() */ #include <asm/traps.h> /* dotraplinkage, ... */ #include <asm/pgalloc.h> /* pgd_*(), ... */ @@ -1126,9 +1127,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, /* * If we're in an interrupt, have no user context or are running - * in an atomic region then we must not take the fault: + * in a region with pagefaults disabled then we must not take the fault */ - if (unlikely(in_atomic() || !mm)) { + if (unlikely(faulthandler_disabled() || !mm)) { bad_area_nosemaphore(regs, error_code, address); return; } diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 4500142bc4aa..eecb207a2037 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c @@ -35,7 +35,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot) unsigned long vaddr; int idx, type; - /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ + preempt_disable(); pagefault_disable(); if (!PageHighMem(page)) @@ -100,6 +100,7 @@ void __kunmap_atomic(void *kvaddr) #endif pagefault_enable(); + preempt_enable(); } EXPORT_SYMBOL(__kunmap_atomic); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index c23ab1ee3a9a..68aec42545c2 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -434,7 +434,7 @@ void __init add_highpages_with_active_regions(int nid, phys_addr_t start, end; u64 i; - for_each_free_mem_range(i, nid, &start, &end, NULL) { + for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &start, &end, NULL) { unsigned long pfn = clamp_t(unsigned long, PFN_UP(start), start_pfn, end_pfn); unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end), diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index a9dc7a37e6a2..9c0ff045fdd4 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c @@ -59,6 +59,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) unsigned long vaddr; int idx, type; + preempt_disable(); pagefault_disable(); type = kmap_atomic_idx_push(); @@ -117,5 +118,6 @@ iounmap_atomic(void __iomem *kvaddr) } pagefault_enable(); + preempt_enable(); } EXPORT_SYMBOL_GPL(iounmap_atomic); diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 8405c0c6a535..b9c78f3bcd67 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -63,8 +63,6 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, !PageReserved(pfn_to_page(start_pfn + i))) return 1; - WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn); - return 0; } @@ -94,7 +92,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, pgprot_t prot; int retval; void __iomem *ret_addr; - int ram_region; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; @@ -117,23 +114,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, /* * Don't allow anybody to remap normal RAM that we're using.. */ - /* First check if whole region can be identified as RAM or not */ - ram_region = region_is_ram(phys_addr, size); - if (ram_region > 0) { - WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n", - (unsigned long int)phys_addr, - (unsigned long int)last_addr); + pfn = phys_addr >> PAGE_SHIFT; + last_pfn = last_addr >> PAGE_SHIFT; + if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, + __ioremap_check_ram) == 1) { + WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n", + &phys_addr, &last_addr); return NULL; } - /* If could not be identified(-1), check page by page */ - if (ram_region < 0) { - pfn = phys_addr >> PAGE_SHIFT; - last_pfn = last_addr >> PAGE_SHIFT; - if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, - __ioremap_check_ram) == 1) - return NULL; - } /* * Mappings have to be page-aligned */ @@ -408,18 +397,18 @@ void *xlate_dev_mem_ptr(phys_addr_t phys) { unsigned long start = phys & PAGE_MASK; unsigned long offset = phys & ~PAGE_MASK; - unsigned long vaddr; + void *vaddr; /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */ if (page_is_ram(start >> PAGE_SHIFT)) return __va(phys); - vaddr = (unsigned long)ioremap_cache(start, PAGE_SIZE); + vaddr = ioremap_cache(start, PAGE_SIZE); /* Only add the offset on success and return NULL if the ioremap() failed: */ if (vaddr) vaddr += offset; - return (void *)vaddr; + return vaddr; } void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) @@ -428,7 +417,6 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) return; iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK)); - return; } static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 4860906c6b9f..e1840f3db5b5 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -1,3 +1,4 @@ +#define pr_fmt(fmt) "kasan: " fmt #include <linux/bootmem.h> #include <linux/kasan.h> #include <linux/kdebug.h> @@ -11,7 +12,19 @@ extern pgd_t early_level4_pgt[PTRS_PER_PGD]; extern struct range pfn_mapped[E820_X_MAX]; -extern unsigned char kasan_zero_page[PAGE_SIZE]; +static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss; +static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss; +static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss; + +/* + * This page used as early shadow. We don't use empty_zero_page + * at early stages, stack instrumentation could write some garbage + * to this page. + * Latter we reuse it as zero shadow for large ranges of memory + * that allowed to access, but not instrumented by kasan + * (vmalloc/vmemmap ...). + */ +static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss; static int __init map_range(struct range *range) { @@ -36,7 +49,7 @@ static void __init clear_pgds(unsigned long start, pgd_clear(pgd_offset_k(start)); } -void __init kasan_map_early_shadow(pgd_t *pgd) +static void __init kasan_map_early_shadow(pgd_t *pgd) { int i; unsigned long start = KASAN_SHADOW_START; @@ -73,7 +86,7 @@ static int __init zero_pmd_populate(pud_t *pud, unsigned long addr, while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) { WARN_ON(!pmd_none(*pmd)); set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte) - | __PAGE_KERNEL_RO)); + | _KERNPG_TABLE)); addr += PMD_SIZE; pmd = pmd_offset(pud, addr); } @@ -99,7 +112,7 @@ static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr, while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) { WARN_ON(!pud_none(*pud)); set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd) - | __PAGE_KERNEL_RO)); + | _KERNPG_TABLE)); addr += PUD_SIZE; pud = pud_offset(pgd, addr); } @@ -124,7 +137,7 @@ static int __init zero_pgd_populate(unsigned long addr, unsigned long end) while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) { WARN_ON(!pgd_none(*pgd)); set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud) - | __PAGE_KERNEL_RO)); + | _KERNPG_TABLE)); addr += PGDIR_SIZE; pgd = pgd_offset_k(addr); } @@ -166,6 +179,26 @@ static struct notifier_block kasan_die_notifier = { }; #endif +void __init kasan_early_init(void) +{ + int i; + pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL; + pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE; + pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE; + + for (i = 0; i < PTRS_PER_PTE; i++) + kasan_zero_pte[i] = __pte(pte_val); + + for (i = 0; i < PTRS_PER_PMD; i++) + kasan_zero_pmd[i] = __pmd(pmd_val); + + for (i = 0; i < PTRS_PER_PUD; i++) + kasan_zero_pud[i] = __pud(pud_val); + + kasan_map_early_shadow(early_level4_pgt); + kasan_map_early_shadow(init_level4_pgt); +} + void __init kasan_init(void) { int i; @@ -176,6 +209,7 @@ void __init kasan_init(void) memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt)); load_cr3(early_level4_pgt); + __flush_tlb_all(); clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); @@ -202,5 +236,8 @@ void __init kasan_init(void) memset(kasan_zero_page, 0, PAGE_SIZE); load_cr3(init_level4_pgt); + __flush_tlb_all(); init_task.kasan_depth = 0; + + pr_info("Kernel address sanitizer initialized\n"); } diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 9d518d693b4b..844b06d67df4 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -126,3 +126,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm) mm->get_unmapped_area = arch_get_unmapped_area_topdown; } } + +const char *arch_vma_name(struct vm_area_struct *vma) +{ + if (vma->vm_flags & VM_MPX) + return "[mpx]"; + return NULL; +} diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index c439ec478216..db1b0bc5017c 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c @@ -10,34 +10,35 @@ #include <linux/syscalls.h> #include <linux/sched/sysctl.h> -#include <asm/i387.h> #include <asm/insn.h> #include <asm/mman.h> #include <asm/mmu_context.h> #include <asm/mpx.h> #include <asm/processor.h> -#include <asm/fpu-internal.h> +#include <asm/fpu/internal.h> -static const char *mpx_mapping_name(struct vm_area_struct *vma) +#define CREATE_TRACE_POINTS +#include <asm/trace/mpx.h> + +static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm) { - return "[mpx]"; + if (is_64bit_mm(mm)) + return MPX_BD_SIZE_BYTES_64; + else + return MPX_BD_SIZE_BYTES_32; } -static struct vm_operations_struct mpx_vma_ops = { - .name = mpx_mapping_name, -}; - -static int is_mpx_vma(struct vm_area_struct *vma) +static inline unsigned long mpx_bt_size_bytes(struct mm_struct *mm) { - return (vma->vm_ops == &mpx_vma_ops); + if (is_64bit_mm(mm)) + return MPX_BT_SIZE_BYTES_64; + else + return MPX_BT_SIZE_BYTES_32; } /* * This is really a simplified "vm_mmap". it only handles MPX * bounds tables (the bounds directory is user-allocated). - * - * Later on, we use the vma->vm_ops to uniquely identify these - * VMAs. */ static unsigned long mpx_mmap(unsigned long len) { @@ -47,8 +48,8 @@ static unsigned long mpx_mmap(unsigned long len) vm_flags_t vm_flags; struct vm_area_struct *vma; - /* Only bounds table and bounds directory can be allocated here */ - if (len != MPX_BD_SIZE_BYTES && len != MPX_BT_SIZE_BYTES) + /* Only bounds table can be allocated here */ + if (len != mpx_bt_size_bytes(mm)) return -EINVAL; down_write(&mm->mmap_sem); @@ -83,7 +84,6 @@ static unsigned long mpx_mmap(unsigned long len) ret = -ENOMEM; goto out; } - vma->vm_ops = &mpx_vma_ops; if (vm_flags & VM_LOCKED) { up_write(&mm->mmap_sem); @@ -272,10 +272,9 @@ bad_opcode: * * The caller is expected to kfree() the returned siginfo_t. */ -siginfo_t *mpx_generate_siginfo(struct pt_regs *regs, - struct xsave_struct *xsave_buf) +siginfo_t *mpx_generate_siginfo(struct pt_regs *regs) { - struct bndreg *bndregs, *bndreg; + const struct bndreg *bndregs, *bndreg; siginfo_t *info = NULL; struct insn insn; uint8_t bndregno; @@ -295,8 +294,8 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs, err = -EINVAL; goto err_out; } - /* get the bndregs _area_ of the xsave structure */ - bndregs = get_xsave_addr(xsave_buf, XSTATE_BNDREGS); + /* get bndregs field from current task's xsave area */ + bndregs = get_xsave_field_ptr(XSTATE_BNDREGS); if (!bndregs) { err = -EINVAL; goto err_out; @@ -334,6 +333,7 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs, err = -EINVAL; goto err_out; } + trace_mpx_bounds_register_exception(info->si_addr, bndreg); return info; err_out: /* info might be NULL, but kfree() handles that */ @@ -341,25 +341,18 @@ err_out: return ERR_PTR(err); } -static __user void *task_get_bounds_dir(struct task_struct *tsk) +static __user void *mpx_get_bounds_dir(void) { - struct bndcsr *bndcsr; + const struct bndcsr *bndcsr; if (!cpu_feature_enabled(X86_FEATURE_MPX)) return MPX_INVALID_BOUNDS_DIR; /* - * 32-bit binaries on 64-bit kernels are currently - * unsupported. - */ - if (IS_ENABLED(CONFIG_X86_64) && test_thread_flag(TIF_IA32)) - return MPX_INVALID_BOUNDS_DIR; - /* * The bounds directory pointer is stored in a register * only accessible if we first do an xsave. */ - fpu_save_init(&tsk->thread.fpu); - bndcsr = get_xsave_addr(&tsk->thread.fpu.state->xsave, XSTATE_BNDCSR); + bndcsr = get_xsave_field_ptr(XSTATE_BNDCSR); if (!bndcsr) return MPX_INVALID_BOUNDS_DIR; @@ -378,10 +371,10 @@ static __user void *task_get_bounds_dir(struct task_struct *tsk) (bndcsr->bndcfgu & MPX_BNDCFG_ADDR_MASK); } -int mpx_enable_management(struct task_struct *tsk) +int mpx_enable_management(void) { void __user *bd_base = MPX_INVALID_BOUNDS_DIR; - struct mm_struct *mm = tsk->mm; + struct mm_struct *mm = current->mm; int ret = 0; /* @@ -390,11 +383,12 @@ int mpx_enable_management(struct task_struct *tsk) * directory into XSAVE/XRSTOR Save Area and enable MPX through * XRSTOR instruction. * - * fpu_xsave() is expected to be very expensive. Storing the bounds - * directory here means that we do not have to do xsave in the unmap - * path; we can just use mm->bd_addr instead. + * The copy_xregs_to_kernel() beneath get_xsave_field_ptr() is + * expected to be relatively expensive. Storing the bounds + * directory here means that we do not have to do xsave in the + * unmap path; we can just use mm->bd_addr instead. */ - bd_base = task_get_bounds_dir(tsk); + bd_base = mpx_get_bounds_dir(); down_write(&mm->mmap_sem); mm->bd_addr = bd_base; if (mm->bd_addr == MPX_INVALID_BOUNDS_DIR) @@ -404,7 +398,7 @@ int mpx_enable_management(struct task_struct *tsk) return ret; } -int mpx_disable_management(struct task_struct *tsk) +int mpx_disable_management(void) { struct mm_struct *mm = current->mm; @@ -417,29 +411,59 @@ int mpx_disable_management(struct task_struct *tsk) return 0; } +static int mpx_cmpxchg_bd_entry(struct mm_struct *mm, + unsigned long *curval, + unsigned long __user *addr, + unsigned long old_val, unsigned long new_val) +{ + int ret; + /* + * user_atomic_cmpxchg_inatomic() actually uses sizeof() + * the pointer that we pass to it to figure out how much + * data to cmpxchg. We have to be careful here not to + * pass a pointer to a 64-bit data type when we only want + * a 32-bit copy. + */ + if (is_64bit_mm(mm)) { + ret = user_atomic_cmpxchg_inatomic(curval, + addr, old_val, new_val); + } else { + u32 uninitialized_var(curval_32); + u32 old_val_32 = old_val; + u32 new_val_32 = new_val; + u32 __user *addr_32 = (u32 __user *)addr; + + ret = user_atomic_cmpxchg_inatomic(&curval_32, + addr_32, old_val_32, new_val_32); + *curval = curval_32; + } + return ret; +} + /* - * With 32-bit mode, MPX_BT_SIZE_BYTES is 4MB, and the size of each - * bounds table is 16KB. With 64-bit mode, MPX_BT_SIZE_BYTES is 2GB, + * With 32-bit mode, a bounds directory is 4MB, and the size of each + * bounds table is 16KB. With 64-bit mode, a bounds directory is 2GB, * and the size of each bounds table is 4MB. */ -static int allocate_bt(long __user *bd_entry) +static int allocate_bt(struct mm_struct *mm, long __user *bd_entry) { unsigned long expected_old_val = 0; unsigned long actual_old_val = 0; unsigned long bt_addr; + unsigned long bd_new_entry; int ret = 0; /* * Carve the virtual space out of userspace for the new * bounds table: */ - bt_addr = mpx_mmap(MPX_BT_SIZE_BYTES); + bt_addr = mpx_mmap(mpx_bt_size_bytes(mm)); if (IS_ERR((void *)bt_addr)) return PTR_ERR((void *)bt_addr); /* * Set the valid flag (kinda like _PAGE_PRESENT in a pte) */ - bt_addr = bt_addr | MPX_BD_ENTRY_VALID_FLAG; + bd_new_entry = bt_addr | MPX_BD_ENTRY_VALID_FLAG; /* * Go poke the address of the new bounds table in to the @@ -452,8 +476,8 @@ static int allocate_bt(long __user *bd_entry) * mmap_sem at this point, unlike some of the other part * of the MPX code that have to pagefault_disable(). */ - ret = user_atomic_cmpxchg_inatomic(&actual_old_val, bd_entry, - expected_old_val, bt_addr); + ret = mpx_cmpxchg_bd_entry(mm, &actual_old_val, bd_entry, + expected_old_val, bd_new_entry); if (ret) goto out_unmap; @@ -481,9 +505,10 @@ static int allocate_bt(long __user *bd_entry) ret = -EINVAL; goto out_unmap; } + trace_mpx_new_bounds_table(bt_addr); return 0; out_unmap: - vm_munmap(bt_addr & MPX_BT_ADDR_MASK, MPX_BT_SIZE_BYTES); + vm_munmap(bt_addr, mpx_bt_size_bytes(mm)); return ret; } @@ -498,12 +523,13 @@ out_unmap: * bound table is 16KB. With 64-bit mode, the size of BD is 2GB, * and the size of each bound table is 4MB. */ -static int do_mpx_bt_fault(struct xsave_struct *xsave_buf) +static int do_mpx_bt_fault(void) { unsigned long bd_entry, bd_base; - struct bndcsr *bndcsr; + const struct bndcsr *bndcsr; + struct mm_struct *mm = current->mm; - bndcsr = get_xsave_addr(xsave_buf, XSTATE_BNDCSR); + bndcsr = get_xsave_field_ptr(XSTATE_BNDCSR); if (!bndcsr) return -EINVAL; /* @@ -520,13 +546,13 @@ static int do_mpx_bt_fault(struct xsave_struct *xsave_buf) * the directory is. */ if ((bd_entry < bd_base) || - (bd_entry >= bd_base + MPX_BD_SIZE_BYTES)) + (bd_entry >= bd_base + mpx_bd_size_bytes(mm))) return -EINVAL; - return allocate_bt((long __user *)bd_entry); + return allocate_bt(mm, (long __user *)bd_entry); } -int mpx_handle_bd_fault(struct xsave_struct *xsave_buf) +int mpx_handle_bd_fault(void) { /* * Userspace never asked us to manage the bounds tables, @@ -535,7 +561,7 @@ int mpx_handle_bd_fault(struct xsave_struct *xsave_buf) if (!kernel_managing_mpx_tables(current->mm)) return -EINVAL; - if (do_mpx_bt_fault(xsave_buf)) { + if (do_mpx_bt_fault()) { force_sig(SIGSEGV, current); /* * The force_sig() is essentially "handling" this @@ -572,29 +598,55 @@ static int mpx_resolve_fault(long __user *addr, int write) return 0; } +static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm, + unsigned long bd_entry) +{ + unsigned long bt_addr = bd_entry; + int align_to_bytes; + /* + * Bit 0 in a bt_entry is always the valid bit. + */ + bt_addr &= ~MPX_BD_ENTRY_VALID_FLAG; + /* + * Tables are naturally aligned at 8-byte boundaries + * on 64-bit and 4-byte boundaries on 32-bit. The + * documentation makes it appear that the low bits + * are ignored by the hardware, so we do the same. + */ + if (is_64bit_mm(mm)) + align_to_bytes = 8; + else + align_to_bytes = 4; + bt_addr &= ~(align_to_bytes-1); + return bt_addr; +} + /* * Get the base of bounds tables pointed by specific bounds * directory entry. */ static int get_bt_addr(struct mm_struct *mm, - long __user *bd_entry, unsigned long *bt_addr) + long __user *bd_entry_ptr, + unsigned long *bt_addr_result) { int ret; int valid_bit; + unsigned long bd_entry; + unsigned long bt_addr; - if (!access_ok(VERIFY_READ, (bd_entry), sizeof(*bd_entry))) + if (!access_ok(VERIFY_READ, (bd_entry_ptr), sizeof(*bd_entry_ptr))) return -EFAULT; while (1) { int need_write = 0; pagefault_disable(); - ret = get_user(*bt_addr, bd_entry); + ret = get_user(bd_entry, bd_entry_ptr); pagefault_enable(); if (!ret) break; if (ret == -EFAULT) - ret = mpx_resolve_fault(bd_entry, need_write); + ret = mpx_resolve_fault(bd_entry_ptr, need_write); /* * If we could not resolve the fault, consider it * userspace's fault and error out. @@ -603,8 +655,8 @@ static int get_bt_addr(struct mm_struct *mm, return ret; } - valid_bit = *bt_addr & MPX_BD_ENTRY_VALID_FLAG; - *bt_addr &= MPX_BT_ADDR_MASK; + valid_bit = bd_entry & MPX_BD_ENTRY_VALID_FLAG; + bt_addr = mpx_bd_entry_to_bt_addr(mm, bd_entry); /* * When the kernel is managing bounds tables, a bounds directory @@ -613,7 +665,7 @@ static int get_bt_addr(struct mm_struct *mm, * data in the address field, we know something is wrong. This * -EINVAL return will cause a SIGSEGV. */ - if (!valid_bit && *bt_addr) + if (!valid_bit && bt_addr) return -EINVAL; /* * Do we have an completely zeroed bt entry? That is OK. It @@ -624,19 +676,100 @@ static int get_bt_addr(struct mm_struct *mm, if (!valid_bit) return -ENOENT; + *bt_addr_result = bt_addr; return 0; } +static inline int bt_entry_size_bytes(struct mm_struct *mm) +{ + if (is_64bit_mm(mm)) + return MPX_BT_ENTRY_BYTES_64; + else + return MPX_BT_ENTRY_BYTES_32; +} + +/* + * Take a virtual address and turns it in to the offset in bytes + * inside of the bounds table where the bounds table entry + * controlling 'addr' can be found. + */ +static unsigned long mpx_get_bt_entry_offset_bytes(struct mm_struct *mm, + unsigned long addr) +{ + unsigned long bt_table_nr_entries; + unsigned long offset = addr; + + if (is_64bit_mm(mm)) { + /* Bottom 3 bits are ignored on 64-bit */ + offset >>= 3; + bt_table_nr_entries = MPX_BT_NR_ENTRIES_64; + } else { + /* Bottom 2 bits are ignored on 32-bit */ + offset >>= 2; + bt_table_nr_entries = MPX_BT_NR_ENTRIES_32; + } + /* + * We know the size of the table in to which we are + * indexing, and we have eliminated all the low bits + * which are ignored for indexing. + * + * Mask out all the high bits which we do not need + * to index in to the table. Note that the tables + * are always powers of two so this gives us a proper + * mask. + */ + offset &= (bt_table_nr_entries-1); + /* + * We now have an entry offset in terms of *entries* in + * the table. We need to scale it back up to bytes. + */ + offset *= bt_entry_size_bytes(mm); + return offset; +} + +/* + * How much virtual address space does a single bounds + * directory entry cover? + * + * Note, we need a long long because 4GB doesn't fit in + * to a long on 32-bit. + */ +static inline unsigned long bd_entry_virt_space(struct mm_struct *mm) +{ + unsigned long long virt_space = (1ULL << boot_cpu_data.x86_virt_bits); + if (is_64bit_mm(mm)) + return virt_space / MPX_BD_NR_ENTRIES_64; + else + return virt_space / MPX_BD_NR_ENTRIES_32; +} + /* * Free the backing physical pages of bounds table 'bt_addr'. * Assume start...end is within that bounds table. */ -static int zap_bt_entries(struct mm_struct *mm, +static noinline int zap_bt_entries_mapping(struct mm_struct *mm, unsigned long bt_addr, - unsigned long start, unsigned long end) + unsigned long start_mapping, unsigned long end_mapping) { struct vm_area_struct *vma; unsigned long addr, len; + unsigned long start; + unsigned long end; + + /* + * if we 'end' on a boundary, the offset will be 0 which + * is not what we want. Back it up a byte to get the + * last bt entry. Then once we have the entry itself, + * move 'end' back up by the table entry size. + */ + start = bt_addr + mpx_get_bt_entry_offset_bytes(mm, start_mapping); + end = bt_addr + mpx_get_bt_entry_offset_bytes(mm, end_mapping - 1); + /* + * Move end back up by one entry. Among other things + * this ensures that it remains page-aligned and does + * not screw up zap_page_range() + */ + end += bt_entry_size_bytes(mm); /* * Find the first overlapping vma. If vma->vm_start > start, there @@ -648,7 +781,7 @@ static int zap_bt_entries(struct mm_struct *mm, return -EINVAL; /* - * A NUMA policy on a VM_MPX VMA could cause this bouds table to + * A NUMA policy on a VM_MPX VMA could cause this bounds table to * be split. So we need to look across the entire 'start -> end' * range of this bounds table, find all of the VM_MPX VMAs, and * zap only those. @@ -661,32 +794,70 @@ static int zap_bt_entries(struct mm_struct *mm, * so stop immediately and return an error. This * probably results in a SIGSEGV. */ - if (!is_mpx_vma(vma)) + if (!(vma->vm_flags & VM_MPX)) return -EINVAL; len = min(vma->vm_end, end) - addr; zap_page_range(vma, addr, len, NULL); + trace_mpx_unmap_zap(addr, addr+len); vma = vma->vm_next; addr = vma->vm_start; } - return 0; } -static int unmap_single_bt(struct mm_struct *mm, +static unsigned long mpx_get_bd_entry_offset(struct mm_struct *mm, + unsigned long addr) +{ + /* + * There are several ways to derive the bd offsets. We + * use the following approach here: + * 1. We know the size of the virtual address space + * 2. We know the number of entries in a bounds table + * 3. We know that each entry covers a fixed amount of + * virtual address space. + * So, we can just divide the virtual address by the + * virtual space used by one entry to determine which + * entry "controls" the given virtual address. + */ + if (is_64bit_mm(mm)) { + int bd_entry_size = 8; /* 64-bit pointer */ + /* + * Take the 64-bit addressing hole in to account. + */ + addr &= ((1UL << boot_cpu_data.x86_virt_bits) - 1); + return (addr / bd_entry_virt_space(mm)) * bd_entry_size; + } else { + int bd_entry_size = 4; /* 32-bit pointer */ + /* + * 32-bit has no hole so this case needs no mask + */ + return (addr / bd_entry_virt_space(mm)) * bd_entry_size; + } + /* + * The two return calls above are exact copies. If we + * pull out a single copy and put it in here, gcc won't + * realize that we're doing a power-of-2 divide and use + * shifts. It uses a real divide. If we put them up + * there, it manages to figure it out (gcc 4.8.3). + */ +} + +static int unmap_entire_bt(struct mm_struct *mm, long __user *bd_entry, unsigned long bt_addr) { unsigned long expected_old_val = bt_addr | MPX_BD_ENTRY_VALID_FLAG; - unsigned long actual_old_val = 0; + unsigned long uninitialized_var(actual_old_val); int ret; while (1) { int need_write = 1; + unsigned long cleared_bd_entry = 0; pagefault_disable(); - ret = user_atomic_cmpxchg_inatomic(&actual_old_val, bd_entry, - expected_old_val, 0); + ret = mpx_cmpxchg_bd_entry(mm, &actual_old_val, + bd_entry, expected_old_val, cleared_bd_entry); pagefault_enable(); if (!ret) break; @@ -705,9 +876,8 @@ static int unmap_single_bt(struct mm_struct *mm, if (actual_old_val != expected_old_val) { /* * Someone else raced with us to unmap the table. - * There was no bounds table pointed to by the - * directory, so declare success. Somebody freed - * it. + * That is OK, since we were both trying to do + * the same thing. Declare success. */ if (!actual_old_val) return 0; @@ -720,176 +890,113 @@ static int unmap_single_bt(struct mm_struct *mm, */ return -EINVAL; } - /* * Note, we are likely being called under do_munmap() already. To * avoid recursion, do_munmap() will check whether it comes * from one bounds table through VM_MPX flag. */ - return do_munmap(mm, bt_addr, MPX_BT_SIZE_BYTES); + return do_munmap(mm, bt_addr, mpx_bt_size_bytes(mm)); } -/* - * If the bounds table pointed by bounds directory 'bd_entry' is - * not shared, unmap this whole bounds table. Otherwise, only free - * those backing physical pages of bounds table entries covered - * in this virtual address region start...end. - */ -static int unmap_shared_bt(struct mm_struct *mm, - long __user *bd_entry, unsigned long start, - unsigned long end, bool prev_shared, bool next_shared) +static int try_unmap_single_bt(struct mm_struct *mm, + unsigned long start, unsigned long end) { - unsigned long bt_addr; - int ret; - - ret = get_bt_addr(mm, bd_entry, &bt_addr); + struct vm_area_struct *next; + struct vm_area_struct *prev; /* - * We could see an "error" ret for not-present bounds - * tables (not really an error), or actual errors, but - * stop unmapping either way. + * "bta" == Bounds Table Area: the area controlled by the + * bounds table that we are unmapping. */ - if (ret) - return ret; - - if (prev_shared && next_shared) - ret = zap_bt_entries(mm, bt_addr, - bt_addr+MPX_GET_BT_ENTRY_OFFSET(start), - bt_addr+MPX_GET_BT_ENTRY_OFFSET(end)); - else if (prev_shared) - ret = zap_bt_entries(mm, bt_addr, - bt_addr+MPX_GET_BT_ENTRY_OFFSET(start), - bt_addr+MPX_BT_SIZE_BYTES); - else if (next_shared) - ret = zap_bt_entries(mm, bt_addr, bt_addr, - bt_addr+MPX_GET_BT_ENTRY_OFFSET(end)); - else - ret = unmap_single_bt(mm, bd_entry, bt_addr); - - return ret; -} - -/* - * A virtual address region being munmap()ed might share bounds table - * with adjacent VMAs. We only need to free the backing physical - * memory of these shared bounds tables entries covered in this virtual - * address region. - */ -static int unmap_edge_bts(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ + unsigned long bta_start_vaddr = start & ~(bd_entry_virt_space(mm)-1); + unsigned long bta_end_vaddr = bta_start_vaddr + bd_entry_virt_space(mm); + unsigned long uninitialized_var(bt_addr); + void __user *bde_vaddr; int ret; - long __user *bde_start, *bde_end; - struct vm_area_struct *prev, *next; - bool prev_shared = false, next_shared = false; - - bde_start = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(start); - bde_end = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(end-1); - /* - * Check whether bde_start and bde_end are shared with adjacent - * VMAs. - * - * We already unliked the VMAs from the mm's rbtree so 'start' + * We already unlinked the VMAs from the mm's rbtree so 'start' * is guaranteed to be in a hole. This gets us the first VMA * before the hole in to 'prev' and the next VMA after the hole * in to 'next'. */ next = find_vma_prev(mm, start, &prev); - if (prev && (mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(prev->vm_end-1)) - == bde_start) - prev_shared = true; - if (next && (mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(next->vm_start)) - == bde_end) - next_shared = true; - /* - * This virtual address region being munmap()ed is only - * covered by one bounds table. - * - * In this case, if this table is also shared with adjacent - * VMAs, only part of the backing physical memory of the bounds - * table need be freeed. Otherwise the whole bounds table need - * be unmapped. - */ - if (bde_start == bde_end) { - return unmap_shared_bt(mm, bde_start, start, end, - prev_shared, next_shared); + * Do not count other MPX bounds table VMAs as neighbors. + * Although theoretically possible, we do not allow bounds + * tables for bounds tables so our heads do not explode. + * If we count them as neighbors here, we may end up with + * lots of tables even though we have no actual table + * entries in use. + */ + while (next && (next->vm_flags & VM_MPX)) + next = next->vm_next; + while (prev && (prev->vm_flags & VM_MPX)) + prev = prev->vm_prev; + /* + * We know 'start' and 'end' lie within an area controlled + * by a single bounds table. See if there are any other + * VMAs controlled by that bounds table. If there are not + * then we can "expand" the are we are unmapping to possibly + * cover the entire table. + */ + next = find_vma_prev(mm, start, &prev); + if ((!prev || prev->vm_end <= bta_start_vaddr) && + (!next || next->vm_start >= bta_end_vaddr)) { + /* + * No neighbor VMAs controlled by same bounds + * table. Try to unmap the whole thing + */ + start = bta_start_vaddr; + end = bta_end_vaddr; } + bde_vaddr = mm->bd_addr + mpx_get_bd_entry_offset(mm, start); + ret = get_bt_addr(mm, bde_vaddr, &bt_addr); /* - * If more than one bounds tables are covered in this virtual - * address region being munmap()ed, we need to separately check - * whether bde_start and bde_end are shared with adjacent VMAs. + * No bounds table there, so nothing to unmap. */ - ret = unmap_shared_bt(mm, bde_start, start, end, prev_shared, false); - if (ret) - return ret; - ret = unmap_shared_bt(mm, bde_end, start, end, false, next_shared); + if (ret == -ENOENT) { + ret = 0; + return 0; + } if (ret) return ret; - - return 0; + /* + * We are unmapping an entire table. Either because the + * unmap that started this whole process was large enough + * to cover an entire table, or that the unmap was small + * but was the area covered by a bounds table. + */ + if ((start == bta_start_vaddr) && + (end == bta_end_vaddr)) + return unmap_entire_bt(mm, bde_vaddr, bt_addr); + return zap_bt_entries_mapping(mm, bt_addr, start, end); } static int mpx_unmap_tables(struct mm_struct *mm, unsigned long start, unsigned long end) { - int ret; - long __user *bd_entry, *bde_start, *bde_end; - unsigned long bt_addr; - - /* - * "Edge" bounds tables are those which are being used by the region - * (start -> end), but that may be shared with adjacent areas. If they - * turn out to be completely unshared, they will be freed. If they are - * shared, we will free the backing store (like an MADV_DONTNEED) for - * areas used by this region. - */ - ret = unmap_edge_bts(mm, start, end); - switch (ret) { - /* non-present tables are OK */ - case 0: - case -ENOENT: - /* Success, or no tables to unmap */ - break; - case -EINVAL: - case -EFAULT: - default: - return ret; - } - - /* - * Only unmap the bounds table that are - * 1. fully covered - * 2. not at the edges of the mapping, even if full aligned - */ - bde_start = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(start); - bde_end = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(end-1); - for (bd_entry = bde_start + 1; bd_entry < bde_end; bd_entry++) { - ret = get_bt_addr(mm, bd_entry, &bt_addr); - switch (ret) { - case 0: - break; - case -ENOENT: - /* No table here, try the next one */ - continue; - case -EINVAL: - case -EFAULT: - default: - /* - * Note: we are being strict here. - * Any time we run in to an issue - * unmapping tables, we stop and - * SIGSEGV. - */ - return ret; - } - - ret = unmap_single_bt(mm, bd_entry, bt_addr); + unsigned long one_unmap_start; + trace_mpx_unmap_search(start, end); + + one_unmap_start = start; + while (one_unmap_start < end) { + int ret; + unsigned long next_unmap_start = ALIGN(one_unmap_start+1, + bd_entry_virt_space(mm)); + unsigned long one_unmap_end = end; + /* + * if the end is beyond the current bounds table, + * move it back so we only deal with a single one + * at a time + */ + if (one_unmap_end > next_unmap_start) + one_unmap_end = next_unmap_start; + ret = try_unmap_single_bt(mm, one_unmap_start, one_unmap_end); if (ret) return ret; - } + one_unmap_start = next_unmap_start; + } return 0; } diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 3250f2371aea..90b924acd982 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -117,7 +117,7 @@ static void flush_tlb_func(void *info) } else { unsigned long addr; unsigned long nr_pages = - f->flush_end - f->flush_start / PAGE_SIZE; + (f->flush_end - f->flush_start) / PAGE_SIZE; addr = f->flush_start; while (addr < f->flush_end) { __flush_tlb_single(addr); |