diff options
-rw-r--r-- | arch/x86/entry/calling.h | 5 | ||||
-rw-r--r-- | arch/x86/entry/entry_32.S | 33 | ||||
-rw-r--r-- | arch/x86/entry/entry_64.S | 40 | ||||
-rw-r--r-- | arch/x86/include/asm/entry_arch.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/hw_irq.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/idtentry.h | 48 | ||||
-rw-r--r-- | arch/x86/include/asm/irq.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/traps.h | 3 | ||||
-rw-r--r-- | arch/x86/kernel/apic/apic.c | 31 | ||||
-rw-r--r-- | arch/x86/kernel/idt.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/irq.c | 14 |
11 files changed, 103 insertions, 78 deletions
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 1c7f13bb6728..98da0d3c0b1a 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -341,7 +341,10 @@ For 32-bit we have the following conventions - kernel is built with #endif .endm -#endif /* CONFIG_X86_64 */ +#else /* CONFIG_X86_64 */ +# undef UNWIND_HINT_IRET_REGS +# define UNWIND_HINT_IRET_REGS +#endif /* !CONFIG_X86_64 */ .macro STACKLEAK_ERASE #ifdef CONFIG_GCC_PLUGIN_STACKLEAK diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 158a5250ebc5..40092c81dcb8 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -1215,40 +1215,15 @@ SYM_FUNC_END(entry_INT80_32) #endif .endm -/* - * Build the entry stubs with some assembler magic. - * We pack 1 stub into every 8-byte block. - */ - .align 8 -SYM_CODE_START(irq_entries_start) - vector=FIRST_EXTERNAL_VECTOR - .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) - pushl $(~vector+0x80) /* Note: always in signed byte range */ - vector=vector+1 - jmp common_interrupt - .align 8 - .endr -SYM_CODE_END(irq_entries_start) - #ifdef CONFIG_X86_LOCAL_APIC - .align 8 -SYM_CODE_START(spurious_entries_start) - vector=FIRST_SYSTEM_VECTOR - .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR) - pushl $(~vector+0x80) /* Note: always in signed byte range */ - vector=vector+1 - jmp common_spurious - .align 8 - .endr -SYM_CODE_END(spurious_entries_start) - SYM_CODE_START_LOCAL(common_spurious) ASM_CLAC - addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */ SAVE_ALL switch_stacks=1 ENCODE_FRAME_POINTER TRACE_IRQS_OFF movl %esp, %eax + movl PT_ORIG_EAX(%esp), %edx /* get the vector from stack */ + movl $-1, PT_ORIG_EAX(%esp) /* no syscall to restart */ call smp_spurious_interrupt jmp ret_from_intr SYM_CODE_END(common_spurious) @@ -1261,12 +1236,12 @@ SYM_CODE_END(common_spurious) .p2align CONFIG_X86_L1_CACHE_SHIFT SYM_CODE_START_LOCAL(common_interrupt) ASM_CLAC - addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */ - SAVE_ALL switch_stacks=1 ENCODE_FRAME_POINTER TRACE_IRQS_OFF movl %esp, %eax + movl PT_ORIG_EAX(%esp), %edx /* get the vector from stack */ + movl $-1, PT_ORIG_EAX(%esp) /* no syscall to restart */ call do_IRQ jmp ret_from_intr SYM_CODE_END(common_interrupt) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 76993591fdf6..e7434cda9a38 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -358,34 +358,6 @@ SYM_CODE_START(ret_from_fork) SYM_CODE_END(ret_from_fork) .popsection -/* - * Build the entry stubs with some assembler magic. - * We pack 1 stub into every 8-byte block. - */ - .align 8 -SYM_CODE_START(irq_entries_start) - vector=FIRST_EXTERNAL_VECTOR - .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) - UNWIND_HINT_IRET_REGS - pushq $(~vector+0x80) /* Note: always in signed byte range */ - jmp common_interrupt - .align 8 - vector=vector+1 - .endr -SYM_CODE_END(irq_entries_start) - - .align 8 -SYM_CODE_START(spurious_entries_start) - vector=FIRST_SYSTEM_VECTOR - .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR) - UNWIND_HINT_IRET_REGS - pushq $(~vector+0x80) /* Note: always in signed byte range */ - jmp common_spurious - .align 8 - vector=vector+1 - .endr -SYM_CODE_END(spurious_entries_start) - .macro DEBUG_ENTRY_ASSERT_IRQS_OFF #ifdef CONFIG_DEBUG_ENTRY pushq %rax @@ -755,13 +727,14 @@ _ASM_NOKPROBE(interrupt_entry) /* Interrupt entry/exit. */ /* - * The interrupt stubs push (~vector+0x80) onto the stack and + * The interrupt stubs push vector onto the stack and * then jump to common_spurious/interrupt. */ SYM_CODE_START_LOCAL(common_spurious) - addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ call interrupt_entry UNWIND_HINT_REGS indirect=1 + movq ORIG_RAX(%rdi), %rsi /* get vector from stack */ + movq $-1, ORIG_RAX(%rdi) /* no syscall to restart */ call smp_spurious_interrupt /* rdi points to pt_regs */ jmp ret_from_intr SYM_CODE_END(common_spurious) @@ -770,10 +743,11 @@ _ASM_NOKPROBE(common_spurious) /* common_interrupt is a hotpath. Align it */ .p2align CONFIG_X86_L1_CACHE_SHIFT SYM_CODE_START_LOCAL(common_interrupt) - addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ call interrupt_entry UNWIND_HINT_REGS indirect=1 - call do_IRQ /* rdi points to pt_regs */ + movq ORIG_RAX(%rdi), %rsi /* get vector from stack */ + movq $-1, ORIG_RAX(%rdi) /* no syscall to restart */ + call do_IRQ /* rdi points to pt_regs */ /* 0(%rsp): old RSP */ ret_from_intr: DISABLE_INTERRUPTS(CLBR_ANY) @@ -1022,7 +996,7 @@ apicinterrupt RESCHEDULE_VECTOR reschedule_interrupt smp_reschedule_interrupt #endif apicinterrupt ERROR_APIC_VECTOR error_interrupt smp_error_interrupt -apicinterrupt SPURIOUS_APIC_VECTOR spurious_interrupt smp_spurious_interrupt +apicinterrupt SPURIOUS_APIC_VECTOR spurious_apic_interrupt smp_spurious_apic_interrupt #ifdef CONFIG_IRQ_WORK apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h index 416422762845..cd57ce6134c9 100644 --- a/arch/x86/include/asm/entry_arch.h +++ b/arch/x86/include/asm/entry_arch.h @@ -35,7 +35,7 @@ BUILD_INTERRUPT(kvm_posted_intr_nested_ipi, POSTED_INTR_NESTED_VECTOR) BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR) BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) -BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) +BUILD_INTERRUPT(spurious_apic_interrupt,SPURIOUS_APIC_VECTOR) BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR) #ifdef CONFIG_IRQ_WORK diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 4154bc5f6a4e..0ffe80792b2d 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -39,6 +39,7 @@ extern asmlinkage void irq_work_interrupt(void); extern asmlinkage void uv_bau_message_intr1(void); extern asmlinkage void spurious_interrupt(void); +extern asmlinkage void spurious_apic_interrupt(void); extern asmlinkage void thermal_interrupt(void); extern asmlinkage void reschedule_interrupt(void); diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h index 36e5b929389b..2fc0dc8af2a4 100644 --- a/arch/x86/include/asm/idtentry.h +++ b/arch/x86/include/asm/idtentry.h @@ -347,6 +347,54 @@ __visible noinstr void func(struct pt_regs *regs, \ #define DECLARE_IDTENTRY_XEN(vector, func) \ idtentry vector asm_exc_xen##func exc_##func has_error_code=0 +/* + * ASM code to emit the common vector entry stubs where each stub is + * packed into 8 bytes. + * + * Note, that the 'pushq imm8' is emitted via '.byte 0x6a, vector' because + * GCC treats the local vector variable as unsigned int and would expand + * all vectors above 0x7F to a 5 byte push. The original code did an + * adjustment of the vector number to be in the signed byte range to avoid + * this. While clever it's mindboggling counterintuitive and requires the + * odd conversion back to a real vector number in the C entry points. Using + * .byte achieves the same thing and the only fixup needed in the C entry + * point is to mask off the bits above bit 7 because the push is sign + * extending. + */ + .align 8 +SYM_CODE_START(irq_entries_start) + vector=FIRST_EXTERNAL_VECTOR + pos = . + .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) + UNWIND_HINT_IRET_REGS + .byte 0x6a, vector + jmp common_interrupt + nop + /* Ensure that the above is 8 bytes max */ + . = pos + 8 + pos=pos+8 + vector=vector+1 + .endr +SYM_CODE_END(irq_entries_start) + +#ifdef CONFIG_X86_LOCAL_APIC + .align 8 +SYM_CODE_START(spurious_entries_start) + vector=FIRST_SYSTEM_VECTOR + pos = . + .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR) + UNWIND_HINT_IRET_REGS + .byte 0x6a, vector + jmp common_spurious + nop + /* Ensure that the above is 8 bytes max */ + . = pos + 8 + pos=pos+8 + vector=vector+1 + .endr +SYM_CODE_END(spurious_entries_start) +#endif + #endif /* __ASSEMBLY__ */ /* diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index 72fba0eeeb30..74690a373c58 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h @@ -36,7 +36,7 @@ extern void native_init_IRQ(void); extern void handle_irq(struct irq_desc *desc, struct pt_regs *regs); -extern __visible void do_IRQ(struct pt_regs *regs); +extern __visible void do_IRQ(struct pt_regs *regs, unsigned long vector); extern void init_ISA_irqs(void); diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index d7de360eec74..32b2becf7806 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -41,8 +41,9 @@ asmlinkage void smp_deferred_error_interrupt(struct pt_regs *regs); #endif void smp_apic_timer_interrupt(struct pt_regs *regs); -void smp_spurious_interrupt(struct pt_regs *regs); void smp_error_interrupt(struct pt_regs *regs); +void smp_spurious_apic_interrupt(struct pt_regs *regs); +void smp_spurious_interrupt(struct pt_regs *regs, unsigned long vector); asmlinkage void smp_irq_move_cleanup_interrupt(void); #ifdef CONFIG_VMAP_STACK diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 4b1d31be50b4..6c2b807a7eae 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -2120,15 +2120,29 @@ void __init register_lapic_address(unsigned long address) * Local APIC interrupts */ -/* - * This interrupt should _never_ happen with our APIC/SMP architecture +/** + * smp_spurious_interrupt - Catch all for interrupts raised on unused vectors + * @regs: Pointer to pt_regs on stack + * @error_code: The vector number is in the lower 8 bits + * + * This is invoked from ASM entry code to catch all interrupts which + * trigger on an entry which is routed to the common_spurious idtentry + * point. + * + * Also called from smp_spurious_apic_interrupt(). */ -__visible void __irq_entry smp_spurious_interrupt(struct pt_regs *regs) +__visible void __irq_entry smp_spurious_interrupt(struct pt_regs *regs, + unsigned long vector) { - u8 vector = ~regs->orig_ax; u32 v; entering_irq(); + /* + * The push in the entry ASM code which stores the vector number on + * the stack in the error code slot is sign expanding. Just use the + * lower 8 bits. + */ + vector &= 0xFF; trace_spurious_apic_entry(vector); inc_irq_stat(irq_spurious_count); @@ -2149,11 +2163,11 @@ __visible void __irq_entry smp_spurious_interrupt(struct pt_regs *regs) */ v = apic_read(APIC_ISR + ((vector & ~0x1f) >> 1)); if (v & (1 << (vector & 0x1f))) { - pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Acked\n", + pr_info("Spurious interrupt (vector 0x%02lx) on CPU#%d. Acked\n", vector, smp_processor_id()); ack_APIC_irq(); } else { - pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Not pending!\n", + pr_info("Spurious interrupt (vector 0x%02lx) on CPU#%d. Not pending!\n", vector, smp_processor_id()); } out: @@ -2161,6 +2175,11 @@ out: exiting_irq(); } +__visible void smp_spurious_apic_interrupt(struct pt_regs *regs) +{ + smp_spurious_interrupt(regs, SPURIOUS_APIC_VECTOR); +} + /* * This interrupt should never happen with our APIC/SMP architecture */ diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c index ddb11154aeee..20408e31c18d 100644 --- a/arch/x86/kernel/idt.c +++ b/arch/x86/kernel/idt.c @@ -145,7 +145,7 @@ static const __initconst struct idt_data apic_idts[] = { #ifdef CONFIG_X86_UV INTG(UV_BAU_MESSAGE, uv_bau_message_intr1), #endif - INTG(SPURIOUS_APIC_VECTOR, spurious_interrupt), + INTG(SPURIOUS_APIC_VECTOR, spurious_apic_interrupt), INTG(ERROR_APIC_VECTOR, error_interrupt), #endif }; diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 252065d32ab5..c7669363251a 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -227,14 +227,18 @@ u64 arch_irq_stat(void) * SMP cross-CPU interrupts have their own specific * handlers). */ -__visible void __irq_entry do_IRQ(struct pt_regs *regs) +__visible void __irq_entry do_IRQ(struct pt_regs *regs, unsigned long vector) { struct pt_regs *old_regs = set_irq_regs(regs); - struct irq_desc * desc; - /* high bit used in ret_from_ code */ - unsigned vector = ~regs->orig_ax; + struct irq_desc *desc; entering_irq(); + /* + * The push in the entry ASM code which stores the vector number on + * the stack in the error code slot is sign expanding. Just use the + * lower 8 bits. + */ + vector &= 0xFF; /* entering_irq() tells RCU that we're not quiescent. Check it. */ RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU"); @@ -249,7 +253,7 @@ __visible void __irq_entry do_IRQ(struct pt_regs *regs) ack_APIC_irq(); if (desc == VECTOR_UNUSED) { - pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n", + pr_emerg_ratelimited("%s: %d.%lu No irq handler for vector\n", __func__, smp_processor_id(), vector); } else { |