diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-05 23:14:50 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-05 23:14:50 +0300 |
commit | 9b3499d752fc292f37503e5c3a5e5df4f7e76d42 (patch) | |
tree | 07f2c8c9d191be7b2b0ad836136be1ef753697fa /arch | |
parent | b21172cf6dc6e2af21346e774ea2e7c784de30bd (diff) | |
parent | eda9cec4c9a12208a6f69fbe68f72a6311d50032 (diff) | |
download | linux-9b3499d752fc292f37503e5c3a5e5df4f7e76d42.tar.xz |
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Ingo Molnar:
"Two fixes:
- A PCID related revert that fixes power management and performance
regressions.
- The module loader robustization and sanity check commit is rather
fresh, but it looked like a good idea to apply because of the
hidden data corruption problem such invalid modules could cause"
* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/module: Detect and skip invalid relocations
Revert "x86/mm: Stop calling leave_mm() in idle code"
Diffstat (limited to 'arch')
-rw-r--r-- | arch/ia64/include/asm/acpi.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/acpi.h | 2 | ||||
-rw-r--r-- | arch/x86/kernel/module.c | 13 | ||||
-rw-r--r-- | arch/x86/mm/tlb.c | 17 |
4 files changed, 31 insertions, 3 deletions
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index c86a947f5368..a3d0211970e9 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h @@ -112,6 +112,8 @@ static inline void arch_acpi_set_pdc_bits(u32 *buf) buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP; } +#define acpi_unlazy_tlb(x) + #ifdef CONFIG_ACPI_NUMA extern cpumask_t early_cpu_possible_map; #define for_each_possible_early_cpu(cpu) \ diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 72d867f6b518..8d0ec9df1cbe 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -150,6 +150,8 @@ static inline void disable_acpi(void) { } extern int x86_acpi_numa_init(void); #endif /* CONFIG_ACPI_NUMA */ +#define acpi_unlazy_tlb(x) leave_mm(x) + #ifdef CONFIG_ACPI_APEI static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr) { diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index 62e7d70aadd5..da0c160e5589 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -172,19 +172,27 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, case R_X86_64_NONE: break; case R_X86_64_64: + if (*(u64 *)loc != 0) + goto invalid_relocation; *(u64 *)loc = val; break; case R_X86_64_32: + if (*(u32 *)loc != 0) + goto invalid_relocation; *(u32 *)loc = val; if (val != *(u32 *)loc) goto overflow; break; case R_X86_64_32S: + if (*(s32 *)loc != 0) + goto invalid_relocation; *(s32 *)loc = val; if ((s64)val != *(s32 *)loc) goto overflow; break; case R_X86_64_PC32: + if (*(u32 *)loc != 0) + goto invalid_relocation; val -= (u64)loc; *(u32 *)loc = val; #if 0 @@ -200,6 +208,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, } return 0; +invalid_relocation: + pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n", + (int)ELF64_R_TYPE(rel[i].r_info), loc, val); + return -ENOEXEC; + overflow: pr_err("overflow in relocation type %d val %Lx\n", (int)ELF64_R_TYPE(rel[i].r_info), val); diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 0f3d0cea4d00..3118392cdf75 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -85,6 +85,7 @@ void leave_mm(int cpu) switch_mm(NULL, &init_mm, NULL); } +EXPORT_SYMBOL_GPL(leave_mm); void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) @@ -195,12 +196,22 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); write_cr3(build_cr3(next, new_asid)); - trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, - TLB_FLUSH_ALL); + + /* + * NB: This gets called via leave_mm() in the idle path + * where RCU functions differently. Tracing normally + * uses RCU, so we need to use the _rcuidle variant. + * + * (There is no good reason for this. The idle code should + * be rearranged to call this before rcu_idle_enter().) + */ + trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); } else { /* The new ASID is already up to date. */ write_cr3(build_cr3_noflush(next, new_asid)); - trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, 0); + + /* See above wrt _rcuidle. */ + trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0); } this_cpu_write(cpu_tlbstate.loaded_mm, next); |