diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-27 04:03:51 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-27 04:03:51 +0300 |
commit | 312a466155108329c458049dc76a21ad56106960 (patch) | |
tree | d195001a1d258992820b7d7eaf1c0f7941e9de59 /arch/x86 | |
parent | 6e54df001ac9262e3b78b34b87390fcb54677a0d (diff) | |
parent | 4b1bacab61aa252d15dde99cd0440a965bd863e5 (diff) | |
download | linux-312a466155108329c458049dc76a21ad56106960.tar.xz |
Merge branch 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 cleanups from Ingo Molnar:
"Misc cleanups"
* 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/kprobes: Remove trampoline_handler() prototype
x86/kernel: Fix more -Wmissing-prototypes warnings
x86: Fix various typos in comments
x86/headers: Fix -Wmissing-prototypes warning
x86/process: Avoid unnecessary NULL check in get_wchan()
x86/traps: Complete prototype declarations
x86/mce: Fix -Wmissing-prototypes warnings
x86/gart: Rewrite early_gart_iommu_check() comment
Diffstat (limited to 'arch/x86')
63 files changed, 151 insertions, 76 deletions
diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c index 41034745d6a2..d1ce49119da8 100644 --- a/arch/x86/crypto/cast5_avx_glue.c +++ b/arch/x86/crypto/cast5_avx_glue.c @@ -1,5 +1,5 @@ /* - * Glue Code for the AVX assembler implemention of the Cast5 Cipher + * Glue Code for the AVX assembler implementation of the Cast5 Cipher * * Copyright (C) 2012 Johannes Goetzfried * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c index 9fb66b5e94b2..18965c39305e 100644 --- a/arch/x86/crypto/cast6_avx_glue.c +++ b/arch/x86/crypto/cast6_avx_glue.c @@ -1,5 +1,5 @@ /* - * Glue Code for the AVX assembler implemention of the Cast6 Cipher + * Glue Code for the AVX assembler implementation of the Cast6 Cipher * * Copyright (C) 2012 Johannes Goetzfried * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 3b2490b81918..7bc105f47d21 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -140,7 +140,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) /* * In order to return to user mode, we need to have IRQs off with * none of EXIT_TO_USERMODE_LOOP_FLAGS set. Several of these flags - * can be set at any time on preemptable kernels if we have IRQs on, + * can be set at any time on preemptible kernels if we have IRQs on, * so we need to loop. Disabling preemption wouldn't help: doing the * work to clear some of the flags can sleep. */ diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 7eb878561910..babc4e7a519c 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -261,7 +261,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr) * abusing from userspace install_speciall_mapping, which may * not do accounting and rlimit right. * We could search vma near context.vdso, but it's a slowpath, - * so let's explicitely check all VMAs to be completely sure. + * so let's explicitly check all VMAs to be completely sure. */ for (vma = mm->mmap; vma; vma = vma->vm_next) { if (vma_is_special_mapping(vma, &vdso_mapping) || diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c index 24ffa1e88cf9..a01ef1b0f883 100644 --- a/arch/x86/events/intel/bts.c +++ b/arch/x86/events/intel/bts.c @@ -589,7 +589,7 @@ static __init int bts_init(void) * the AUX buffer. * * However, since this driver supports per-CPU and per-task inherit - * we cannot use the user mapping since it will not be availble + * we cannot use the user mapping since it will not be available * if we're not running the owning process. * * With PTI we can't use the kernal map either, because its not diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index ecc3e34ca955..40e12cfc87f6 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -1930,7 +1930,7 @@ static void intel_pmu_enable_all(int added) * in sequence on the same PMC or on different PMCs. * * In practise it appears some of these events do in fact count, and - * we need to programm all 4 events. + * we need to program all 4 events. */ static void intel_pmu_nhm_workaround(void) { diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index b7b01d762d32..e9acf1d2e7b2 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1199,7 +1199,7 @@ static void setup_pebs_sample_data(struct perf_event *event, /* * We must however always use iregs for the unwinder to stay sane; the * record BP,SP,IP can point into thin air when the record is from a - * previous PMI context or an (I)RET happend between the record and + * previous PMI context or an (I)RET happened between the record and * PMI. */ if (sample_type & PERF_SAMPLE_CALLCHAIN) diff --git a/arch/x86/events/intel/p4.c b/arch/x86/events/intel/p4.c index d32c0eed38ca..dee579efb2b2 100644 --- a/arch/x86/events/intel/p4.c +++ b/arch/x86/events/intel/p4.c @@ -1259,7 +1259,7 @@ again: } /* * Perf does test runs to see if a whole group can be assigned - * together succesfully. There can be multiple rounds of this. + * together successfully. There can be multiple rounds of this. * Unfortunately, p4_pmu_swap_config_ts touches the hwc->config * bits, such that the next round of group assignments will * cause the above p4_should_swap_ts to pass instead of fail. diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 4cd6a3b71824..0660e14690c8 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -174,7 +174,7 @@ static inline int alternatives_text_reserved(void *start, void *end) /* * Alternative inline assembly with input. * - * Pecularities: + * Peculiarities: * No memory clobber here. * Argument numbers start with 1. * Best is to use constraints that are fixed size (like (%1) ... "r") diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h index bfb85e5844ab..a8bfac131256 100644 --- a/arch/x86/include/asm/cmpxchg.h +++ b/arch/x86/include/asm/cmpxchg.h @@ -7,7 +7,7 @@ #include <asm/alternative.h> /* Provides LOCK_PREFIX */ /* - * Non-existant functions to indicate usage errors at link time + * Non-existent functions to indicate usage errors at link time * (or compile-time if the compiler implements __compiletime_error(). */ extern void __xchg_wrong_size(void) diff --git a/arch/x86/include/asm/crash.h b/arch/x86/include/asm/crash.h index a7adb2bfbf0b..0acf5ee45a21 100644 --- a/arch/x86/include/asm/crash.h +++ b/arch/x86/include/asm/crash.h @@ -6,5 +6,6 @@ int crash_load_segments(struct kimage *image); int crash_copy_backup_region(struct kimage *image); int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params); +void crash_smp_send_stop(void); #endif /* _ASM_X86_CRASH_H */ diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index d1e64ac80b9c..42982a6cc6cf 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -19,7 +19,7 @@ * This is the main reason why we're doing stable VA mappings for RT * services. * - * This flag is used in conjuction with a chicken bit called + * This flag is used in conjunction with a chicken bit called * "efi=old_map" which can be used as a fallback to the old runtime * services mapping method in case there's some b0rkage with a * particular EFI implementation (haha, it is hard to hold up the diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index 2395bb794c7b..fbb16e6b6c18 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h @@ -30,6 +30,9 @@ extern void fixup_irqs(void); #ifdef CONFIG_HAVE_KVM extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void)); +extern __visible void smp_kvm_posted_intr_ipi(struct pt_regs *regs); +extern __visible void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs *regs); +extern __visible void smp_kvm_posted_intr_nested_ipi(struct pt_regs *regs); #endif extern void (*x86_platform_ipi_callback)(void); @@ -41,9 +44,13 @@ extern __visible unsigned int do_IRQ(struct pt_regs *regs); extern void init_ISA_irqs(void); +extern void __init init_IRQ(void); + #ifdef CONFIG_X86_LOCAL_APIC void arch_trigger_cpumask_backtrace(const struct cpumask *mask, bool exclude_self); + +extern __visible void smp_x86_platform_ipi(struct pt_regs *regs); #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace #endif diff --git a/arch/x86/include/asm/irq_work.h b/arch/x86/include/asm/irq_work.h index 800ffce0db29..80b35e3adf03 100644 --- a/arch/x86/include/asm/irq_work.h +++ b/arch/x86/include/asm/irq_work.h @@ -10,6 +10,7 @@ static inline bool arch_irq_work_has_interrupt(void) return boot_cpu_has(X86_FEATURE_APIC); } extern void arch_irq_work_raise(void); +extern __visible void smp_irq_work_interrupt(struct pt_regs *regs); #else static inline bool arch_irq_work_has_interrupt(void) { diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index 4c723632c036..5ed3cf1c3934 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h @@ -92,6 +92,7 @@ void kvm_async_pf_task_wait(u32 token, int interrupt_kernel); void kvm_async_pf_task_wake(u32 token); u32 kvm_read_and_reset_pf_reason(void); extern void kvm_disable_steal_time(void); +void do_async_page_fault(struct pt_regs *regs, unsigned long error_code); #ifdef CONFIG_PARAVIRT_SPINLOCKS void __init kvm_spinlock_init(void); diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 4bf42f9e4eea..a97f28d914d5 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -26,6 +26,11 @@ struct static_key; extern struct static_key paravirt_steal_enabled; extern struct static_key paravirt_steal_rq_enabled; +__visible void __native_queued_spin_unlock(struct qspinlock *lock); +bool pv_is_native_spin_unlock(void); +__visible bool __native_vcpu_is_preempted(long cpu); +bool pv_is_native_vcpu_is_preempted(void); + static inline u64 paravirt_steal_clock(int cpu) { return PVOP_CALL1(u64, time.steal_clock, cpu); diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h index a671a1145906..04c17be9b5fd 100644 --- a/arch/x86/include/asm/reboot.h +++ b/arch/x86/include/asm/reboot.h @@ -26,6 +26,7 @@ void __noreturn machine_real_restart(unsigned int type); #define MRR_APM 1 typedef void (*nmi_shootdown_cb)(int, struct pt_regs*); +void nmi_panic_self_stop(struct pt_regs *regs); void nmi_shootdown_cpus(nmi_shootdown_cb callback); void run_crash_ipi_callback(struct pt_regs *regs); diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index ae13bc974416..ed8ec011a9fd 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -46,6 +46,9 @@ extern unsigned long saved_video_mode; extern void reserve_standard_io_resources(void); extern void i386_reserve_resources(void); +extern unsigned long __startup_64(unsigned long physaddr, struct boot_params *bp); +extern unsigned long __startup_secondary_64(void); +extern int early_make_pgtable(unsigned long address); #ifdef CONFIG_X86_INTEL_MID extern void x86_intel_mid_early_setup(void); diff --git a/arch/x86/include/asm/sighandling.h b/arch/x86/include/asm/sighandling.h index bd26834724e5..2fcbd6f33ef7 100644 --- a/arch/x86/include/asm/sighandling.h +++ b/arch/x86/include/asm/sighandling.h @@ -17,4 +17,9 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where); int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate, struct pt_regs *regs, unsigned long mask); + +#ifdef CONFIG_X86_X32_ABI +asmlinkage long sys32_x32_rt_sigreturn(void); +#endif + #endif /* _ASM_X86_SIGHANDLING_H */ diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 547c4fe50711..2e95b6c1bca3 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -148,6 +148,12 @@ void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle); void smp_store_boot_cpu_info(void); void smp_store_cpu_info(int id); + +asmlinkage __visible void smp_reboot_interrupt(void); +__visible void smp_reschedule_interrupt(struct pt_regs *regs); +__visible void smp_call_function_interrupt(struct pt_regs *regs); +__visible void smp_call_function_single_interrupt(struct pt_regs *r); + #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) #define cpu_acpi_id(cpu) per_cpu(x86_cpu_to_acpiid, cpu) diff --git a/arch/x86/include/asm/trace/exceptions.h b/arch/x86/include/asm/trace/exceptions.h index 69615e387973..e0e6d7f21399 100644 --- a/arch/x86/include/asm/trace/exceptions.h +++ b/arch/x86/include/asm/trace/exceptions.h @@ -45,6 +45,7 @@ DEFINE_PAGE_FAULT_EVENT(page_fault_user); DEFINE_PAGE_FAULT_EVENT(page_fault_kernel); #undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_PATH . #define TRACE_INCLUDE_FILE exceptions #endif /* _TRACE_PAGE_FAULT_H */ diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h index 0af81b590a0c..33b9d0f0aafe 100644 --- a/arch/x86/include/asm/trace/irq_vectors.h +++ b/arch/x86/include/asm/trace/irq_vectors.h @@ -389,6 +389,7 @@ TRACE_EVENT(vector_free_moved, #endif /* CONFIG_X86_LOCAL_APIC */ #undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_PATH . #define TRACE_INCLUDE_FILE irq_vectors #endif /* _TRACE_IRQ_VECTORS_H */ diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 3de69330e6c5..7d6f3f3fad78 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -61,34 +61,38 @@ asmlinkage void xen_machine_check(void); asmlinkage void xen_simd_coprocessor_error(void); #endif -dotraplinkage void do_divide_error(struct pt_regs *, long); -dotraplinkage void do_debug(struct pt_regs *, long); -dotraplinkage void do_nmi(struct pt_regs *, long); -dotraplinkage void do_int3(struct pt_regs *, long); -dotraplinkage void do_overflow(struct pt_regs *, long); -dotraplinkage void do_bounds(struct pt_regs *, long); -dotraplinkage void do_invalid_op(struct pt_regs *, long); -dotraplinkage void do_device_not_available(struct pt_regs *, long); -dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *, long); -dotraplinkage void do_invalid_TSS(struct pt_regs *, long); -dotraplinkage void do_segment_not_present(struct pt_regs *, long); -dotraplinkage void do_stack_segment(struct pt_regs *, long); +dotraplinkage void do_divide_error(struct pt_regs *regs, long error_code); +dotraplinkage void do_debug(struct pt_regs *regs, long error_code); +dotraplinkage void do_nmi(struct pt_regs *regs, long error_code); +dotraplinkage void do_int3(struct pt_regs *regs, long error_code); +dotraplinkage void do_overflow(struct pt_regs *regs, long error_code); +dotraplinkage void do_bounds(struct pt_regs *regs, long error_code); +dotraplinkage void do_invalid_op(struct pt_regs *regs, long error_code); +dotraplinkage void do_device_not_available(struct pt_regs *regs, long error_code); +dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *regs, long error_code); +dotraplinkage void do_invalid_TSS(struct pt_regs *regs, long error_code); +dotraplinkage void do_segment_not_present(struct pt_regs *regs, long error_code); +dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code); #ifdef CONFIG_X86_64 -dotraplinkage void do_double_fault(struct pt_regs *, long); +dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code); +asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs); +asmlinkage __visible notrace +struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s); +void __init trap_init(void); #endif -dotraplinkage void do_general_protection(struct pt_regs *, long); -dotraplinkage void do_page_fault(struct pt_regs *, unsigned long); -dotraplinkage void do_spurious_interrupt_bug(struct pt_regs *, long); -dotraplinkage void do_coprocessor_error(struct pt_regs *, long); -dotraplinkage void do_alignment_check(struct pt_regs *, long); +dotraplinkage void do_general_protection(struct pt_regs *regs, long error_code); +dotraplinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code); +dotraplinkage void do_spurious_interrupt_bug(struct pt_regs *regs, long error_code); +dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code); +dotraplinkage void do_alignment_check(struct pt_regs *regs, long error_code); #ifdef CONFIG_X86_MCE -dotraplinkage void do_machine_check(struct pt_regs *, long); +dotraplinkage void do_machine_check(struct pt_regs *regs, long error_code); #endif -dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long); +dotraplinkage void do_simd_coprocessor_error(struct pt_regs *regs, long error_code); #ifdef CONFIG_X86_32 -dotraplinkage void do_iret_error(struct pt_regs *, long); +dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code); #endif -dotraplinkage void do_mce(struct pt_regs *, long); +dotraplinkage void do_mce(struct pt_regs *regs, long error_code); static inline int get_si_code(unsigned long condition) { @@ -104,11 +108,16 @@ extern int panic_on_unrecovered_nmi; void math_emulate(struct math_emu_info *); #ifndef CONFIG_X86_32 -asmlinkage void smp_thermal_interrupt(void); -asmlinkage void smp_threshold_interrupt(void); -asmlinkage void smp_deferred_error_interrupt(void); +asmlinkage void smp_thermal_interrupt(struct pt_regs *regs); +asmlinkage void smp_threshold_interrupt(struct pt_regs *regs); +asmlinkage void smp_deferred_error_interrupt(struct pt_regs *regs); #endif +void smp_apic_timer_interrupt(struct pt_regs *regs); +void smp_spurious_interrupt(struct pt_regs *regs); +void smp_error_interrupt(struct pt_regs *regs); +asmlinkage void smp_irq_move_cleanup_interrupt(void); + extern void ist_enter(struct pt_regs *regs); extern void ist_exit(struct pt_regs *regs); extern void ist_begin_non_atomic(struct pt_regs *regs); diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index eb5bbfeccb66..8a0c25c6bf09 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h @@ -35,6 +35,7 @@ extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns); extern void tsc_early_init(void); extern void tsc_init(void); +extern unsigned long calibrate_delay_is_known(void); extern void mark_tsc_unstable(char *reason); extern int unsynchronized_tsc(void); extern int check_tsc_unstable(void); diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 06635fbca81c..2624de16cd7a 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -848,7 +848,7 @@ EXPORT_SYMBOL(acpi_unregister_ioapic); /** * acpi_ioapic_registered - Check whether IOAPIC assoicatied with @gsi_base * has been registered - * @handle: ACPI handle of the IOAPIC deivce + * @handle: ACPI handle of the IOAPIC device * @gsi_base: GSI base associated with the IOAPIC * * Assume caller holds some type of lock to serialize acpi_ioapic_registered() diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 2c4d5ece7456..58176b56354e 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c @@ -264,18 +264,23 @@ static int __init parse_gart_mem(char *p) } early_param("gart_fix_e820", parse_gart_mem); +/* + * With kexec/kdump, if the first kernel doesn't shut down the GART and the + * second kernel allocates a different GART region, there might be two + * overlapping GART regions present: + * + * - the first still used by the GART initialized in the first kernel. + * - (sub-)set of it used as normal RAM by the second kernel. + * + * which leads to memory corruptions and a kernel panic eventually. + * + * This can also happen if the BIOS has forgotten to mark the GART region + * as reserved. + * + * Try to update the e820 map to mark that new region as reserved. + */ void __init early_gart_iommu_check(void) { - /* - * in case it is enabled before, esp for kexec/kdump, - * previous kernel already enable that. memset called - * by allocate_aperture/__alloc_bootmem_nopanic cause restart. - * or second kernel have different position for GART hole. and new - * kernel could use hole as RAM that is still used by GART set by - * first kernel - * or BIOS forget to put that in reserved. - * try to update e820 to make that region as reserved. - */ u32 agp_aper_order = 0; int i, fix, slot, valid_agp = 0; u32 ctl; diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 32b2b7a41ef5..b7bcdd781651 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -44,6 +44,7 @@ #include <asm/mpspec.h> #include <asm/i8259.h> #include <asm/proto.h> +#include <asm/traps.h> #include <asm/apic.h> #include <asm/io_apic.h> #include <asm/desc.h> diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index e84c9eb4e5b4..0005c284a5c5 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c @@ -8,6 +8,7 @@ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and * James Cleverdon. */ +#include <linux/acpi.h> #include <linux/errno.h> #include <linux/threads.h> #include <linux/cpumask.h> @@ -16,13 +17,13 @@ #include <linux/ctype.h> #include <linux/hardirq.h> #include <linux/export.h> + #include <asm/smp.h> -#include <asm/apic.h> #include <asm/ipi.h> +#include <asm/apic.h> +#include <asm/apic_flat_64.h> #include <asm/jailhouse_para.h> -#include <linux/acpi.h> - static struct apic apic_physflat; static struct apic apic_flat; diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 652e7ffa9b9d..3173e07d3791 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -18,6 +18,7 @@ #include <linux/slab.h> #include <asm/irqdomain.h> #include <asm/hw_irq.h> +#include <asm/traps.h> #include <asm/apic.h> #include <asm/i8259.h> #include <asm/desc.h> diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 391f358ebb4c..a555da094157 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -1079,7 +1079,7 @@ late_initcall(uv_init_heartbeat); #endif /* !CONFIG_HOTPLUG_CPU */ /* Direct Legacy VGA I/O traffic to designated IOH */ -int uv_set_vga_state(struct pci_dev *pdev, bool decode, unsigned int command_bits, u32 flags) +static int uv_set_vga_state(struct pci_dev *pdev, bool decode, unsigned int command_bits, u32 flags) { int domain, bus, rc; @@ -1148,7 +1148,7 @@ static void get_mn(struct mn *mnp) mnp->m_shift = mnp->m_val ? 64 - mnp->m_val : 0; } -void __init uv_init_hub_info(struct uv_hub_info_s *hi) +static void __init uv_init_hub_info(struct uv_hub_info_s *hi) { union uvh_node_id_u node_id; struct mn mn; diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 72adf6c335dc..168543d077d7 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -29,7 +29,8 @@ # include "asm-offsets_64.c" #endif -void common(void) { +static void __used common(void) +{ BLANK(); OFFSET(TASK_threadsp, task_struct, thread.sp); #ifdef CONFIG_STACKPROTECTOR diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c index 1979a76bfadd..5136e6818da8 100644 --- a/arch/x86/kernel/check.c +++ b/arch/x86/kernel/check.c @@ -9,6 +9,7 @@ #include <linux/memblock.h> #include <asm/proto.h> +#include <asm/setup.h> /* * Some BIOSes seem to corrupt the low 64k of memory during events @@ -136,7 +137,7 @@ void __init setup_bios_corruption_check(void) } -void check_for_bios_corruption(void) +static void check_for_bios_corruption(void) { int i; int corruption = 0; diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index eeea634bee0a..69f6bbb41be0 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -15,6 +15,7 @@ #include <asm/smp.h> #include <asm/pci-direct.h> #include <asm/delay.h> +#include <asm/debugreg.h> #ifdef CONFIG_X86_64 # include <asm/mmconfig.h> diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c index 7eba34df54c3..804c49493938 100644 --- a/arch/x86/kernel/cpu/aperfmperf.c +++ b/arch/x86/kernel/cpu/aperfmperf.c @@ -12,6 +12,7 @@ #include <linux/ktime.h> #include <linux/math64.h> #include <linux/percpu.h> +#include <linux/cpufreq.h> #include <linux/smp.h> #include "cpu.h" diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 77bf22546ddd..8654b8b0c848 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -32,6 +32,8 @@ #include <asm/e820/api.h> #include <asm/hypervisor.h> +#include "cpu.h" + static void __init spectre_v2_select_mitigation(void); static void __init ssb_select_mitigation(void); static void __init l1tf_select_mitigation(void); diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index dc1b9342e9c4..c4d1023fb0ab 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -17,6 +17,7 @@ #include <linux/pci.h> #include <asm/cpufeature.h> +#include <asm/cacheinfo.h> #include <asm/amd_nb.h> #include <asm/smp.h> diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index 4a2fb59a372e..89298c83de53 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -23,6 +23,7 @@ #include <linux/string.h> #include <asm/amd_nb.h> +#include <asm/traps.h> #include <asm/apic.h> #include <asm/mce.h> #include <asm/msr.h> @@ -99,7 +100,7 @@ static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init = [0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 } }; -const char *smca_get_name(enum smca_bank_types t) +static const char *smca_get_name(enum smca_bank_types t) { if (t >= N_SMCA_BANK_TYPES) return NULL; @@ -824,7 +825,7 @@ static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc) mce_log(&m); } -asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(void) +asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(struct pt_regs *regs) { entering_irq(); trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR); diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index ad8f62a0c706..672c7225cb1b 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -684,7 +684,7 @@ DEFINE_PER_CPU(unsigned, mce_poll_count); * errors here. However this would be quite problematic -- * we would need to reimplement the Monarch handling and * it would mess up the exclusion between exception handler - * and poll hander -- * so we skip this for now. + * and poll handler -- * so we skip this for now. * These cases should not happen anyways, or only when the CPU * is already totally * confused. In this case it's likely it will * not fully execute the machine check handler either. diff --git a/arch/x86/kernel/cpu/mce/therm_throt.c b/arch/x86/kernel/cpu/mce/therm_throt.c index df01ff8513a5..10a3b0599300 100644 --- a/arch/x86/kernel/cpu/mce/therm_throt.c +++ b/arch/x86/kernel/cpu/mce/therm_throt.c @@ -25,6 +25,7 @@ #include <linux/cpu.h> #include <asm/processor.h> +#include <asm/traps.h> #include <asm/apic.h> #include <asm/mce.h> #include <asm/msr.h> @@ -392,7 +393,7 @@ static void unexpected_thermal_interrupt(void) static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt; -asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *r) +asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *regs) { entering_irq(); trace_thermal_apic_entry(THERMAL_APIC_VECTOR); diff --git a/arch/x86/kernel/cpu/mce/threshold.c b/arch/x86/kernel/cpu/mce/threshold.c index 10586a85c23f..28812cc15300 100644 --- a/arch/x86/kernel/cpu/mce/threshold.c +++ b/arch/x86/kernel/cpu/mce/threshold.c @@ -6,6 +6,7 @@ #include <linux/kernel.h> #include <asm/irq_vectors.h> +#include <asm/traps.h> #include <asm/apic.h> #include <asm/mce.h> #include <asm/trace/irq_vectors.h> @@ -20,7 +21,7 @@ static void default_threshold_interrupt(void) void (*mce_threshold_vector)(void) = default_threshold_interrupt; -asmlinkage __visible void __irq_entry smp_threshold_interrupt(void) +asmlinkage __visible void __irq_entry smp_threshold_interrupt(struct pt_regs *regs) { entering_irq(); trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR); diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index a4d74d616222..0277267239f2 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -5,9 +5,10 @@ #include <linux/cpu.h> #include <asm/pat.h> +#include <asm/apic.h> #include <asm/processor.h> -#include <asm/apic.h> +#include "cpu.h" struct cpuid_bit { u16 feature; diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c index 71ca064e3794..8f6c784141d1 100644 --- a/arch/x86/kernel/cpu/topology.c +++ b/arch/x86/kernel/cpu/topology.c @@ -10,6 +10,8 @@ #include <asm/pat.h> #include <asm/processor.h> +#include "cpu.h" + /* leaf 0xb SMT level */ #define SMT_LEVEL 0 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index f631a3f15587..c8b07d8ea5a2 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -37,6 +37,7 @@ #include <asm/reboot.h> #include <asm/virtext.h> #include <asm/intel_pt.h> +#include <asm/crash.h> /* Used while preparing memory map entries for second kernel */ struct crash_memmap_data { diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c index eb8ab3915268..22369dd5de3b 100644 --- a/arch/x86/kernel/crash_dump_64.c +++ b/arch/x86/kernel/crash_dump_64.c @@ -62,7 +62,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, /** * copy_oldmem_page_encrypted - same as copy_oldmem_page() above but ioremap the - * memory with the encryption mask set to accomodate kdump on SME-enabled + * memory with the encryption mask set to accommodate kdump on SME-enabled * machines. */ ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c index 7299dcbf8e85..8d85e00bb40a 100644 --- a/arch/x86/kernel/devicetree.c +++ b/arch/x86/kernel/devicetree.c @@ -23,6 +23,7 @@ #include <asm/pci_x86.h> #include <asm/setup.h> #include <asm/i8259.h> +#include <asm/prom.h> __initdata u64 initial_dtb; char __initdata cmd_line[COMMAND_LINE_SIZE]; diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 87a57b7642d3..cd3956fc8158 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -811,7 +811,7 @@ void fpu__resume_cpu(void) * * Note: does not work for compacted buffers. */ -void *__raw_xsave_addr(struct xregs_state *xsave, int xstate_feature_mask) +static void *__raw_xsave_addr(struct xregs_state *xsave, int xstate_feature_mask) { int feature_nr = fls64(xstate_feature_mask) - 1; diff --git a/arch/x86/kernel/jailhouse.c b/arch/x86/kernel/jailhouse.c index 108c48d0d40e..1b2ee55a2dfb 100644 --- a/arch/x86/kernel/jailhouse.c +++ b/arch/x86/kernel/jailhouse.c @@ -19,6 +19,7 @@ #include <asm/pci_x86.h> #include <asm/reboot.h> #include <asm/setup.h> +#include <asm/jailhouse_para.h> static __initdata struct jailhouse_setup_data setup_data; static unsigned int precalibrated_tsc_khz; diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index d5f88fe57064..4ba75afba527 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -751,7 +751,7 @@ STACK_FRAME_NON_STANDARD(kretprobe_trampoline); /* * Called from kretprobe_trampoline */ -__visible __used void *trampoline_handler(struct pt_regs *regs) +static __used void *trampoline_handler(struct pt_regs *regs) { struct kretprobe_instance *ri = NULL; struct hlist_head *head, empty_rp; diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 7d31192296a8..90ae0ca51083 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -22,6 +22,8 @@ #include <linux/utsname.h> #include <linux/stackprotector.h> #include <linux/cpuidle.h> +#include <linux/acpi.h> +#include <linux/elf-randomize.h> #include <trace/events/power.h> #include <linux/hw_breakpoint.h> #include <asm/cpu.h> @@ -39,6 +41,7 @@ #include <asm/desc.h> #include <asm/prctl.h> #include <asm/spec-ctrl.h> +#include <asm/proto.h> #include "process.h" @@ -793,7 +796,7 @@ unsigned long get_wchan(struct task_struct *p) unsigned long start, bottom, top, sp, fp, ip, ret = 0; int count = 0; - if (!p || p == current || p->state == TASK_RUNNING) + if (p == current || p->state == TASK_RUNNING) return 0; if (!try_get_task_stack(p)) diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 60783b318936..721d02bd2d0d 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -646,7 +646,7 @@ void set_personality_64bit(void) /* TBD: overwrites user setup. Should have two bits. But 64bit processes have always behaved this way, so it's not too bad. The main problem is just that - 32bit childs are affected again. */ + 32bit children are affected again. */ current->personality &= ~READ_IMPLIES_EXEC; } diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 736348ead421..8451f38ad399 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c @@ -7,6 +7,7 @@ #include <linux/irq.h> #include <asm/hpet.h> +#include <asm/setup.h> #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI) diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c index 623965e86b65..fa51723571c8 100644 --- a/arch/x86/kernel/sysfb_efi.c +++ b/arch/x86/kernel/sysfb_efi.c @@ -19,12 +19,15 @@ #include <linux/dmi.h> #include <linux/err.h> +#include <linux/efi.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/pci.h> #include <linux/screen_info.h> #include <video/vga.h> + +#include <asm/efi.h> #include <asm/sysfb.h> enum { diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c index 5bd30c442794..496748ed266a 100644 --- a/arch/x86/kernel/tracepoint.c +++ b/arch/x86/kernel/tracepoint.c @@ -10,6 +10,8 @@ #include <asm/hw_irq.h> #include <asm/desc.h> +#include <asm/trace/exceptions.h> +#include <asm/trace/irq_vectors.h> DEFINE_STATIC_KEY_FALSE(trace_pagefault_key); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 41d6f7081ff7..4d39f731bc33 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -2494,7 +2494,7 @@ static __init int alloc_kvm_area(void) * vmcs->revision_id to KVM_EVMCS_VERSION instead of * revision_id reported by MSR_IA32_VMX_BASIC. * - * However, even though not explictly documented by + * However, even though not explicitly documented by * TLFS, VMXArea passed as VMXON argument should * still be marked with revision_id reported by * physical CPU. diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 61bc7d1800d7..e44fe1a63f72 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -1712,7 +1712,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, } else if (!(in_flag & CPA_PAGES_ARRAY)) { /* * in_flag of CPA_PAGES_ARRAY implies it is aligned. - * No need to cehck in that case + * No need to check in that case */ if (*addr & ~PAGE_MASK) { *addr &= PAGE_MASK; diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c index ce4b06733c09..b3233b1835ea 100644 --- a/arch/x86/platform/ce4100/ce4100.c +++ b/arch/x86/platform/ce4100/ce4100.c @@ -84,7 +84,7 @@ static void ce4100_mem_serial_out(struct uart_port *p, int offset, int value) } static void ce4100_serial_fixup(int port, struct uart_port *up, - u32 *capabilites) + u32 *capabilities) { #ifdef CONFIG_EARLY_PRINTK /* @@ -111,7 +111,7 @@ static void ce4100_serial_fixup(int port, struct uart_port *up, up->serial_in = ce4100_mem_serial_in; up->serial_out = ce4100_mem_serial_out; - *capabilites |= (1 << 12); + *capabilities |= (1 << 12); } static __init void sdv_serial_fixup(void) diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c b/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c index dbfc5cf2aa93..96f438d4b026 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c @@ -1,5 +1,5 @@ /* - * platform_bcm43xx.c: bcm43xx platform data initilization file + * platform_bcm43xx.c: bcm43xx platform data initialization file * * (C) Copyright 2016 Intel Corporation * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c index 27186ad654c9..7a7fc54c449b 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c @@ -1,5 +1,5 @@ /* - * spidev platform data initilization file + * spidev platform data initialization file * * (C) Copyright 2014, 2016 Intel Corporation * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com> diff --git a/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c b/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c index 429a94192671..8344d5a928c9 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c @@ -1,5 +1,5 @@ /* - * PCAL9555a platform data initilization file + * PCAL9555a platform data initialization file * * Copyright (C) 2016, Intel Corporation * diff --git a/arch/x86/platform/intel/iosf_mbi.c b/arch/x86/platform/intel/iosf_mbi.c index 2e569d10f2d0..a9f2e888e135 100644 --- a/arch/x86/platform/intel/iosf_mbi.c +++ b/arch/x86/platform/intel/iosf_mbi.c @@ -13,7 +13,7 @@ * * * The IOSF-SB is a fabric bus available on Atom based SOC's that uses a - * mailbox interface (MBI) to communicate with mutiple devices. This + * mailbox interface (MBI) to communicate with multiple devices. This * driver implements access to this interface for those platforms that can * enumerate the device using PCI. */ diff --git a/arch/x86/platform/olpc/olpc-xo1-sci.c b/arch/x86/platform/olpc/olpc-xo1-sci.c index 7fa8b3b53bc0..d9b8a1c1ab0f 100644 --- a/arch/x86/platform/olpc/olpc-xo1-sci.c +++ b/arch/x86/platform/olpc/olpc-xo1-sci.c @@ -109,7 +109,7 @@ static void detect_lid_state(void) * the edge detector hookup on the gpio inputs on the geode is * odd, to say the least. See http://dev.laptop.org/ticket/5703 * for details, but in a nutshell: we don't use the edge - * detectors. instead, we make use of an anomoly: with the both + * detectors. instead, we make use of an anomaly: with the both * edge detectors turned off, we still get an edge event on a * positive edge transition. to take advantage of this, we use the * front-end inverter to ensure that that's the edge we're always diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c index 5f64f30873e2..b21a932c220c 100644 --- a/arch/x86/platform/uv/uv_nmi.c +++ b/arch/x86/platform/uv/uv_nmi.c @@ -560,7 +560,7 @@ static inline void uv_clear_nmi(int cpu) } } -/* Ping non-responding CPU's attemping to force them into the NMI handler */ +/* Ping non-responding CPU's attempting to force them into the NMI handler */ static void uv_nmi_nr_cpus_ping(void) { int cpu; diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 075ed47993bb..d5f303c0e656 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -493,7 +493,7 @@ static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages, * The remap information (which mfn remap to which pfn) is contained in the * to be remapped memory itself in a linked list anchored at xen_remap_mfn. * This scheme allows to remap the different chunks in arbitrary order while - * the resulting mapping will be independant from the order. + * the resulting mapping will be independent from the order. */ void __init xen_remap_memory(void) { |