diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-26 21:42:40 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-26 21:42:40 +0300 |
commit | 1d87200446f1d10dfe9672ca8edb027a82612f8c (patch) | |
tree | 45cc71ff8e4d1bcde9b07ce8203277f2b8982941 /arch/x86 | |
parent | 5c4a1c090d8676d8b84e2ac40671602be44afdfc (diff) | |
parent | f01ec4fca8207e31b59a010c3de679c833f3a877 (diff) | |
download | linux-1d87200446f1d10dfe9672ca8edb027a82612f8c.tar.xz |
Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 asm updates from Ingo Molnar:
"The main changes in this cycle were:
- Cross-arch changes to move the linker sections for NOTES and
EXCEPTION_TABLE into the RO_DATA area, where they belong on most
architectures. (Kees Cook)
- Switch the x86 linker fill byte from x90 (NOP) to 0xcc (INT3), to
trap jumps into the middle of those padding areas instead of
sliding execution. (Kees Cook)
- A thorough cleanup of symbol definitions within x86 assembler code.
The rather randomly named macros got streamlined around a
(hopefully) straightforward naming scheme:
SYM_START(name, linkage, align...)
SYM_END(name, sym_type)
SYM_FUNC_START(name)
SYM_FUNC_END(name)
SYM_CODE_START(name)
SYM_CODE_END(name)
SYM_DATA_START(name)
SYM_DATA_END(name)
etc - with about three times of these basic primitives with some
label, local symbol or attribute variant, expressed via postfixes.
No change in functionality intended. (Jiri Slaby)
- Misc other changes, cleanups and smaller fixes"
* 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (67 commits)
x86/entry/64: Remove pointless jump in paranoid_exit
x86/entry/32: Remove unused resume_userspace label
x86/build/vdso: Remove meaningless CFLAGS_REMOVE_*.o
m68k: Convert missed RODATA to RO_DATA
x86/vmlinux: Use INT3 instead of NOP for linker fill bytes
x86/mm: Report actual image regions in /proc/iomem
x86/mm: Report which part of kernel image is freed
x86/mm: Remove redundant address-of operators on addresses
xtensa: Move EXCEPTION_TABLE to RO_DATA segment
powerpc: Move EXCEPTION_TABLE to RO_DATA segment
parisc: Move EXCEPTION_TABLE to RO_DATA segment
microblaze: Move EXCEPTION_TABLE to RO_DATA segment
ia64: Move EXCEPTION_TABLE to RO_DATA segment
h8300: Move EXCEPTION_TABLE to RO_DATA segment
c6x: Move EXCEPTION_TABLE to RO_DATA segment
arm64: Move EXCEPTION_TABLE to RO_DATA segment
alpha: Move EXCEPTION_TABLE to RO_DATA segment
x86/vmlinux: Move EXCEPTION_TABLE to RO_DATA segment
x86/vmlinux: Actually use _etext for the end of the text segment
vmlinux.lds.h: Allow EXCEPTION_TABLE to live in RO_DATA
...
Diffstat (limited to 'arch/x86')
132 files changed, 1059 insertions, 1027 deletions
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index c30a9b642a86..95410d6ee2ff 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile @@ -67,6 +67,7 @@ clean-files += cpustr.h KBUILD_CFLAGS := $(REALMODE_CFLAGS) -D_SETUP KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ +KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) GCOV_PROFILE := n UBSAN_SANITIZE := n diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index fad3b18e2cc3..aa976adb7094 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -38,6 +38,7 @@ KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector) KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) KBUILD_CFLAGS += $(call cc-disable-warning, gnu) KBUILD_CFLAGS += -Wno-pointer-sign +KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ GCOV_PROFILE := n diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S index 257e341fd2c8..ed6c351d34ed 100644 --- a/arch/x86/boot/compressed/efi_stub_32.S +++ b/arch/x86/boot/compressed/efi_stub_32.S @@ -24,7 +24,7 @@ */ .text -ENTRY(efi_call_phys) +SYM_FUNC_START(efi_call_phys) /* * 0. The function can only be called in Linux kernel. So CS has been * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found @@ -77,7 +77,7 @@ ENTRY(efi_call_phys) movl saved_return_addr(%edx), %ecx pushl %ecx ret -ENDPROC(efi_call_phys) +SYM_FUNC_END(efi_call_phys) .previous .data diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S index bff9ab7c6317..593913692d16 100644 --- a/arch/x86/boot/compressed/efi_thunk_64.S +++ b/arch/x86/boot/compressed/efi_thunk_64.S @@ -23,7 +23,7 @@ .code64 .text -ENTRY(efi64_thunk) +SYM_FUNC_START(efi64_thunk) push %rbp push %rbx @@ -97,14 +97,14 @@ ENTRY(efi64_thunk) pop %rbx pop %rbp ret -ENDPROC(efi64_thunk) +SYM_FUNC_END(efi64_thunk) -ENTRY(efi_exit32) +SYM_FUNC_START_LOCAL(efi_exit32) movq func_rt_ptr(%rip), %rax push %rax mov %rdi, %rax ret -ENDPROC(efi_exit32) +SYM_FUNC_END(efi_exit32) .code32 /* @@ -112,7 +112,7 @@ ENDPROC(efi_exit32) * * The stack should represent the 32-bit calling convention. */ -ENTRY(efi_enter32) +SYM_FUNC_START_LOCAL(efi_enter32) movl $__KERNEL_DS, %eax movl %eax, %ds movl %eax, %es @@ -172,20 +172,23 @@ ENTRY(efi_enter32) btsl $X86_CR0_PG_BIT, %eax movl %eax, %cr0 lret -ENDPROC(efi_enter32) +SYM_FUNC_END(efi_enter32) .data .balign 8 - .global efi32_boot_gdt -efi32_boot_gdt: .word 0 - .quad 0 +SYM_DATA_START(efi32_boot_gdt) + .word 0 + .quad 0 +SYM_DATA_END(efi32_boot_gdt) + +SYM_DATA_START_LOCAL(save_gdt) + .word 0 + .quad 0 +SYM_DATA_END(save_gdt) -save_gdt: .word 0 - .quad 0 -func_rt_ptr: .quad 0 +SYM_DATA_LOCAL(func_rt_ptr, .quad 0) - .global efi_gdt64 -efi_gdt64: +SYM_DATA_START(efi_gdt64) .word efi_gdt64_end - efi_gdt64 .long 0 /* Filled out by user */ .word 0 @@ -194,4 +197,4 @@ efi_gdt64: .quad 0x00cf92000000ffff /* __KERNEL_DS */ .quad 0x0080890000000000 /* TS descriptor */ .quad 0x0000000000000000 /* TS continued */ -efi_gdt64_end: +SYM_DATA_END_LABEL(efi_gdt64, SYM_L_LOCAL, efi_gdt64_end) diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index 5e30eaaf8576..f2dfd6d083ef 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S @@ -61,7 +61,7 @@ .hidden _egot __HEAD -ENTRY(startup_32) +SYM_FUNC_START(startup_32) cld /* * Test KEEP_SEGMENTS flag to see if the bootloader is asking @@ -142,14 +142,14 @@ ENTRY(startup_32) */ leal .Lrelocated(%ebx), %eax jmp *%eax -ENDPROC(startup_32) +SYM_FUNC_END(startup_32) #ifdef CONFIG_EFI_STUB /* * We don't need the return address, so set up the stack so efi_main() can find * its arguments. */ -ENTRY(efi_pe_entry) +SYM_FUNC_START(efi_pe_entry) add $0x4, %esp call 1f @@ -174,9 +174,9 @@ ENTRY(efi_pe_entry) pushl %eax pushl %ecx jmp 2f /* Skip efi_config initialization */ -ENDPROC(efi_pe_entry) +SYM_FUNC_END(efi_pe_entry) -ENTRY(efi32_stub_entry) +SYM_FUNC_START(efi32_stub_entry) add $0x4, %esp popl %ecx popl %edx @@ -205,11 +205,11 @@ fail: movl BP_code32_start(%esi), %eax leal startup_32(%eax), %eax jmp *%eax -ENDPROC(efi32_stub_entry) +SYM_FUNC_END(efi32_stub_entry) #endif .text -.Lrelocated: +SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) /* * Clear BSS (stack is currently empty) @@ -260,6 +260,7 @@ ENDPROC(efi32_stub_entry) */ xorl %ebx, %ebx jmp *%eax +SYM_FUNC_END(.Lrelocated) #ifdef CONFIG_EFI_STUB .data diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index d98cd483377e..58a512e33d8d 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -45,7 +45,7 @@ __HEAD .code32 -ENTRY(startup_32) +SYM_FUNC_START(startup_32) /* * 32bit entry is 0 and it is ABI so immutable! * If we come here directly from a bootloader, @@ -222,11 +222,11 @@ ENTRY(startup_32) /* Jump from 32bit compatibility mode into 64bit mode. */ lret -ENDPROC(startup_32) +SYM_FUNC_END(startup_32) #ifdef CONFIG_EFI_MIXED .org 0x190 -ENTRY(efi32_stub_entry) +SYM_FUNC_START(efi32_stub_entry) add $0x4, %esp /* Discard return address */ popl %ecx popl %edx @@ -245,12 +245,12 @@ ENTRY(efi32_stub_entry) movl %eax, efi_config(%ebp) jmp startup_32 -ENDPROC(efi32_stub_entry) +SYM_FUNC_END(efi32_stub_entry) #endif .code64 .org 0x200 -ENTRY(startup_64) +SYM_CODE_START(startup_64) /* * 64bit entry is 0x200 and it is ABI so immutable! * We come here either from startup_32 or directly from a @@ -442,11 +442,12 @@ trampoline_return: */ leaq .Lrelocated(%rbx), %rax jmp *%rax +SYM_CODE_END(startup_64) #ifdef CONFIG_EFI_STUB /* The entry point for the PE/COFF executable is efi_pe_entry. */ -ENTRY(efi_pe_entry) +SYM_FUNC_START(efi_pe_entry) movq %rcx, efi64_config(%rip) /* Handle */ movq %rdx, efi64_config+8(%rip) /* EFI System table pointer */ @@ -495,10 +496,10 @@ fail: movl BP_code32_start(%esi), %eax leaq startup_64(%rax), %rax jmp *%rax -ENDPROC(efi_pe_entry) +SYM_FUNC_END(efi_pe_entry) .org 0x390 -ENTRY(efi64_stub_entry) +SYM_FUNC_START(efi64_stub_entry) movq %rdi, efi64_config(%rip) /* Handle */ movq %rsi, efi64_config+8(%rip) /* EFI System table pointer */ @@ -507,11 +508,11 @@ ENTRY(efi64_stub_entry) movq %rdx, %rsi jmp handover_entry -ENDPROC(efi64_stub_entry) +SYM_FUNC_END(efi64_stub_entry) #endif .text -.Lrelocated: +SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) /* * Clear BSS (stack is currently empty) @@ -540,6 +541,7 @@ ENDPROC(efi64_stub_entry) * Jump to the decompressed kernel. */ jmp *%rax +SYM_FUNC_END(.Lrelocated) /* * Adjust the global offset table @@ -570,7 +572,7 @@ ENDPROC(efi64_stub_entry) * ECX contains the base address of the trampoline memory. * Non zero RDX means trampoline needs to enable 5-level paging. */ -ENTRY(trampoline_32bit_src) +SYM_CODE_START(trampoline_32bit_src) /* Set up data and stack segments */ movl $__KERNEL_DS, %eax movl %eax, %ds @@ -633,11 +635,13 @@ ENTRY(trampoline_32bit_src) movl %eax, %cr0 lret +SYM_CODE_END(trampoline_32bit_src) .code64 -.Lpaging_enabled: +SYM_FUNC_START_LOCAL_NOALIGN(.Lpaging_enabled) /* Return from the trampoline */ jmp *%rdi +SYM_FUNC_END(.Lpaging_enabled) /* * The trampoline code has a size limit. @@ -647,20 +651,22 @@ ENTRY(trampoline_32bit_src) .org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE .code32 -.Lno_longmode: +SYM_FUNC_START_LOCAL_NOALIGN(.Lno_longmode) /* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */ 1: hlt jmp 1b +SYM_FUNC_END(.Lno_longmode) #include "../../kernel/verify_cpu.S" .data -gdt64: +SYM_DATA_START_LOCAL(gdt64) .word gdt_end - gdt .quad 0 +SYM_DATA_END(gdt64) .balign 8 -gdt: +SYM_DATA_START_LOCAL(gdt) .word gdt_end - gdt .long gdt .word 0 @@ -669,25 +675,24 @@ gdt: .quad 0x00cf92000000ffff /* __KERNEL_DS */ .quad 0x0080890000000000 /* TS descriptor */ .quad 0x0000000000000000 /* TS continued */ -gdt_end: +SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end) #ifdef CONFIG_EFI_STUB -efi_config: - .quad 0 +SYM_DATA_LOCAL(efi_config, .quad 0) #ifdef CONFIG_EFI_MIXED - .global efi32_config -efi32_config: +SYM_DATA_START(efi32_config) .fill 5,8,0 .quad efi64_thunk .byte 0 +SYM_DATA_END(efi32_config) #endif - .global efi64_config -efi64_config: +SYM_DATA_START(efi64_config) .fill 5,8,0 .quad efi_call .byte 1 +SYM_DATA_END(efi64_config) #endif /* CONFIG_EFI_STUB */ /* @@ -695,23 +700,21 @@ efi64_config: */ .bss .balign 4 -boot_heap: - .fill BOOT_HEAP_SIZE, 1, 0 -boot_stack: +SYM_DATA_LOCAL(boot_heap, .fill BOOT_HEAP_SIZE, 1, 0) + +SYM_DATA_START_LOCAL(boot_stack) .fill BOOT_STACK_SIZE, 1, 0 -boot_stack_end: +SYM_DATA_END_LABEL(boot_stack, SYM_L_LOCAL, boot_stack_end) /* * Space for page tables (not in .bss so not zeroed) */ .section ".pgtable","a",@nobits .balign 4096 -pgtable: - .fill BOOT_PGT_SIZE, 1, 0 +SYM_DATA_LOCAL(pgtable, .fill BOOT_PGT_SIZE, 1, 0) /* * The page table is going to be used instead of page table in the trampoline * memory. */ -top_pgtable: - .fill PAGE_SIZE, 1, 0 +SYM_DATA_LOCAL(top_pgtable, .fill PAGE_SIZE, 1, 0) diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S index 6afb7130a387..dd07e7b41b11 100644 --- a/arch/x86/boot/compressed/mem_encrypt.S +++ b/arch/x86/boot/compressed/mem_encrypt.S @@ -15,7 +15,7 @@ .text .code32 -ENTRY(get_sev_encryption_bit) +SYM_FUNC_START(get_sev_encryption_bit) xor %eax, %eax #ifdef CONFIG_AMD_MEM_ENCRYPT @@ -65,10 +65,10 @@ ENTRY(get_sev_encryption_bit) #endif /* CONFIG_AMD_MEM_ENCRYPT */ ret -ENDPROC(get_sev_encryption_bit) +SYM_FUNC_END(get_sev_encryption_bit) .code64 -ENTRY(set_sev_encryption_mask) +SYM_FUNC_START(set_sev_encryption_mask) #ifdef CONFIG_AMD_MEM_ENCRYPT push %rbp push %rdx @@ -90,12 +90,11 @@ ENTRY(set_sev_encryption_mask) xor %rax, %rax ret -ENDPROC(set_sev_encryption_mask) +SYM_FUNC_END(set_sev_encryption_mask) .data #ifdef CONFIG_AMD_MEM_ENCRYPT .balign 8 -GLOBAL(sme_me_mask) - .quad 0 +SYM_DATA(sme_me_mask, .quad 0) #endif diff --git a/arch/x86/boot/copy.S b/arch/x86/boot/copy.S index 4c5f4f4ad035..6afd05e819d2 100644 --- a/arch/x86/boot/copy.S +++ b/arch/x86/boot/copy.S @@ -15,7 +15,7 @@ .code16 .text -GLOBAL(memcpy) +SYM_FUNC_START_NOALIGN(memcpy) pushw %si pushw %di movw %ax, %di @@ -29,9 +29,9 @@ GLOBAL(memcpy) popw %di popw %si retl -ENDPROC(memcpy) +SYM_FUNC_END(memcpy) -GLOBAL(memset) +SYM_FUNC_START_NOALIGN(memset) pushw %di movw %ax, %di movzbl %dl, %eax @@ -44,22 +44,22 @@ GLOBAL(memset) rep; stosb popw %di retl -ENDPROC(memset) +SYM_FUNC_END(memset) -GLOBAL(copy_from_fs) +SYM_FUNC_START_NOALIGN(copy_from_fs) pushw %ds pushw %fs popw %ds calll memcpy popw %ds retl -ENDPROC(copy_from_fs) +SYM_FUNC_END(copy_from_fs) -GLOBAL(copy_to_fs) +SYM_FUNC_START_NOALIGN(copy_to_fs) pushw %es pushw %fs popw %es calll memcpy popw %es retl -ENDPROC(copy_to_fs) +SYM_FUNC_END(copy_to_fs) diff --git a/arch/x86/boot/pmjump.S b/arch/x86/boot/pmjump.S index c22f9a7d1aeb..cbec8bd0841f 100644 --- a/arch/x86/boot/pmjump.S +++ b/arch/x86/boot/pmjump.S @@ -21,7 +21,7 @@ /* * void protected_mode_jump(u32 entrypoint, u32 bootparams); */ -GLOBAL(protected_mode_jump) +SYM_FUNC_START_NOALIGN(protected_mode_jump) movl %edx, %esi # Pointer to boot_params table xorl %ebx, %ebx @@ -40,13 +40,13 @@ GLOBAL(protected_mode_jump) # Transition to 32-bit mode .byte 0x66, 0xea # ljmpl opcode -2: .long in_pm32 # offset +2: .long .Lin_pm32 # offset .word __BOOT_CS # segment -ENDPROC(protected_mode_jump) +SYM_FUNC_END(protected_mode_jump) .code32 .section ".text32","ax" -GLOBAL(in_pm32) +SYM_FUNC_START_LOCAL_NOALIGN(.Lin_pm32) # Set up data segments for flat 32-bit mode movl %ecx, %ds movl %ecx, %es @@ -72,4 +72,4 @@ GLOBAL(in_pm32) lldt %cx jmpl *%eax # Jump to the 32-bit entrypoint -ENDPROC(in_pm32) +SYM_FUNC_END(.Lin_pm32) diff --git a/arch/x86/crypto/aegis128-aesni-asm.S b/arch/x86/crypto/aegis128-aesni-asm.S index 4434607e366d..51d46d93efbc 100644 --- a/arch/x86/crypto/aegis128-aesni-asm.S +++ b/arch/x86/crypto/aegis128-aesni-asm.S @@ -71,7 +71,7 @@ * %r8 * %r9 */ -__load_partial: +SYM_FUNC_START_LOCAL(__load_partial) xor %r9d, %r9d pxor MSG, MSG @@ -123,7 +123,7 @@ __load_partial: .Lld_partial_8: ret -ENDPROC(__load_partial) +SYM_FUNC_END(__load_partial) /* * __store_partial: internal ABI @@ -137,7 +137,7 @@ ENDPROC(__load_partial) * %r9 * %r10 */ -__store_partial: +SYM_FUNC_START_LOCAL(__store_partial) mov LEN, %r8 mov DST, %r9 @@ -181,12 +181,12 @@ __store_partial: .Lst_partial_1: ret -ENDPROC(__store_partial) +SYM_FUNC_END(__store_partial) /* * void crypto_aegis128_aesni_init(void *state, const void *key, const void *iv); */ -ENTRY(crypto_aegis128_aesni_init) +SYM_FUNC_START(crypto_aegis128_aesni_init) FRAME_BEGIN /* load IV: */ @@ -226,13 +226,13 @@ ENTRY(crypto_aegis128_aesni_init) FRAME_END ret -ENDPROC(crypto_aegis128_aesni_init) +SYM_FUNC_END(crypto_aegis128_aesni_init) /* * void crypto_aegis128_aesni_ad(void *state, unsigned int length, * const void *data); */ -ENTRY(crypto_aegis128_aesni_ad) +SYM_FUNC_START(crypto_aegis128_aesni_ad) FRAME_BEGIN cmp $0x10, LEN @@ -378,7 +378,7 @@ ENTRY(crypto_aegis128_aesni_ad) .Lad_out: FRAME_END ret -ENDPROC(crypto_aegis128_aesni_ad) +SYM_FUNC_END(crypto_aegis128_aesni_ad) .macro encrypt_block a s0 s1 s2 s3 s4 i movdq\a (\i * 0x10)(SRC), MSG @@ -402,7 +402,7 @@ ENDPROC(crypto_aegis128_aesni_ad) * void crypto_aegis128_aesni_enc(void *state, unsigned int length, * const void *src, void *dst); */ -ENTRY(crypto_aegis128_aesni_enc) +SYM_FUNC_START(crypto_aegis128_aesni_enc) FRAME_BEGIN cmp $0x10, LEN @@ -493,13 +493,13 @@ ENTRY(crypto_aegis128_aesni_enc) .Lenc_out: FRAME_END ret -ENDPROC(crypto_aegis128_aesni_enc) +SYM_FUNC_END(crypto_aegis128_aesni_enc) /* * void crypto_aegis128_aesni_enc_tail(void *state, unsigned int length, * const void *src, void *dst); */ -ENTRY(crypto_aegis128_aesni_enc_tail) +SYM_FUNC_START(crypto_aegis128_aesni_enc_tail) FRAME_BEGIN /* load the state: */ @@ -533,7 +533,7 @@ ENTRY(crypto_aegis128_aesni_enc_tail) FRAME_END ret -ENDPROC(crypto_aegis128_aesni_enc_tail) +SYM_FUNC_END(crypto_aegis128_aesni_enc_tail) .macro decrypt_block a s0 s1 s2 s3 s4 i movdq\a (\i * 0x10)(SRC), MSG @@ -556,7 +556,7 @@ ENDPROC(crypto_aegis128_aesni_enc_tail) * void crypto_aegis128_aesni_dec(void *state, unsigned int length, * const void *src, void *dst); */ -ENTRY(crypto_aegis128_aesni_dec) +SYM_FUNC_START(crypto_aegis128_aesni_dec) FRAME_BEGIN cmp $0x10, LEN @@ -647,13 +647,13 @@ ENTRY(crypto_aegis128_aesni_dec) .Ldec_out: FRAME_END ret -ENDPROC(crypto_aegis128_aesni_dec) +SYM_FUNC_END(crypto_aegis128_aesni_dec) /* * void crypto_aegis128_aesni_dec_tail(void *state, unsigned int length, * const void *src, void *dst); */ -ENTRY(crypto_aegis128_aesni_dec_tail) +SYM_FUNC_START(crypto_aegis128_aesni_dec_tail) FRAME_BEGIN /* load the state: */ @@ -697,13 +697,13 @@ ENTRY(crypto_aegis128_aesni_dec_tail) FRAME_END ret -ENDPROC(crypto_aegis128_aesni_dec_tail) +SYM_FUNC_END(crypto_aegis128_aesni_dec_tail) /* * void crypto_aegis128_aesni_final(void *state, void *tag_xor, * u64 assoclen, u64 cryptlen); */ -ENTRY(crypto_aegis128_aesni_final) +SYM_FUNC_START(crypto_aegis128_aesni_final) FRAME_BEGIN /* load the state: */ @@ -744,4 +744,4 @@ ENTRY(crypto_aegis128_aesni_final) FRAME_END ret -ENDPROC(crypto_aegis128_aesni_final) +SYM_FUNC_END(crypto_aegis128_aesni_final) diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S index 5f6a5af9c489..ec437db1fa54 100644 --- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S +++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S @@ -544,11 +544,11 @@ ddq_add_8: * aes_ctr_enc_128_avx_by8(void *in, void *iv, void *keys, void *out, * unsigned int num_bytes) */ -ENTRY(aes_ctr_enc_128_avx_by8) +SYM_FUNC_START(aes_ctr_enc_128_avx_by8) /* call the aes main loop */ do_aes_ctrmain KEY_128 -ENDPROC(aes_ctr_enc_128_avx_by8) +SYM_FUNC_END(aes_ctr_enc_128_avx_by8) /* * routine to do AES192 CTR enc/decrypt "by8" @@ -557,11 +557,11 @@ ENDPROC(aes_ctr_enc_128_avx_by8) * aes_ctr_enc_192_avx_by8(void *in, void *iv, void *keys, void *out, * unsigned int num_bytes) */ -ENTRY(aes_ctr_enc_192_avx_by8) +SYM_FUNC_START(aes_ctr_enc_192_avx_by8) /* call the aes main loop */ do_aes_ctrmain KEY_192 -ENDPROC(aes_ctr_enc_192_avx_by8) +SYM_FUNC_END(aes_ctr_enc_192_avx_by8) /* * routine to do AES256 CTR enc/decrypt "by8" @@ -570,8 +570,8 @@ ENDPROC(aes_ctr_enc_192_avx_by8) * aes_ctr_enc_256_avx_by8(void *in, void *iv, void *keys, void *out, * unsigned int num_bytes) */ -ENTRY(aes_ctr_enc_256_avx_by8) +SYM_FUNC_START(aes_ctr_enc_256_avx_by8) /* call the aes main loop */ do_aes_ctrmain KEY_256 -ENDPROC(aes_ctr_enc_256_avx_by8) +SYM_FUNC_END(aes_ctr_enc_256_avx_by8) diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index e40bdf024ba7..d28503f99f58 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -1592,7 +1592,7 @@ _esb_loop_\@: * poly = x^128 + x^127 + x^126 + x^121 + 1 * *****************************************************************************/ -ENTRY(aesni_gcm_dec) +SYM_FUNC_START(aesni_gcm_dec) FUNC_SAVE GCM_INIT %arg6, arg7, arg8, arg9 @@ -1600,7 +1600,7 @@ ENTRY(aesni_gcm_dec) GCM_COMPLETE arg10, arg11 FUNC_RESTORE ret -ENDPROC(aesni_gcm_dec) +SYM_FUNC_END(aesni_gcm_dec) /***************************************************************************** @@ -1680,7 +1680,7 @@ ENDPROC(aesni_gcm_dec) * * poly = x^128 + x^127 + x^126 + x^121 + 1 ***************************************************************************/ -ENTRY(aesni_gcm_enc) +SYM_FUNC_START(aesni_gcm_enc) FUNC_SAVE GCM_INIT %arg6, arg7, arg8, arg9 @@ -1689,7 +1689,7 @@ ENTRY(aesni_gcm_enc) GCM_COMPLETE arg10, arg11 FUNC_RESTORE ret -ENDPROC(aesni_gcm_enc) +SYM_FUNC_END(aesni_gcm_enc) /***************************************************************************** * void aesni_gcm_init(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. @@ -1702,12 +1702,12 @@ ENDPROC(aesni_gcm_enc) * const u8 *aad, // Additional Authentication Data (AAD) * u64 aad_len) // Length of AAD in bytes. */ -ENTRY(aesni_gcm_init) +SYM_FUNC_START(aesni_gcm_init) FUNC_SAVE GCM_INIT %arg3, %arg4,%arg5, %arg6 FUNC_RESTORE ret -ENDPROC(aesni_gcm_init) +SYM_FUNC_END(aesni_gcm_init) /***************************************************************************** * void aesni_gcm_enc_update(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. @@ -1717,12 +1717,12 @@ ENDPROC(aesni_gcm_init) * const u8 *in, // Plaintext input * u64 plaintext_len, // Length of data in bytes for encryption. */ -ENTRY(aesni_gcm_enc_update) +SYM_FUNC_START(aesni_gcm_enc_update) FUNC_SAVE GCM_ENC_DEC enc FUNC_RESTORE ret -ENDPROC(aesni_gcm_enc_update) +SYM_FUNC_END(aesni_gcm_enc_update) /***************************************************************************** * void aesni_gcm_dec_update(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. @@ -1732,12 +1732,12 @@ ENDPROC(aesni_gcm_enc_update) * const u8 *in, // Plaintext input * u64 plaintext_len, // Length of data in bytes for encryption. */ -ENTRY(aesni_gcm_dec_update) +SYM_FUNC_START(aesni_gcm_dec_update) FUNC_SAVE GCM_ENC_DEC dec FUNC_RESTORE ret -ENDPROC(aesni_gcm_dec_update) +SYM_FUNC_END(aesni_gcm_dec_update) /***************************************************************************** * void aesni_gcm_finalize(void *aes_ctx, // AES Key schedule. Starts on a 16 byte boundary. @@ -1747,19 +1747,18 @@ ENDPROC(aesni_gcm_dec_update) * u64 auth_tag_len); // Authenticated Tag Length in bytes. Valid values are 16 (most likely), * // 12 or 8. */ -ENTRY(aesni_gcm_finalize) +SYM_FUNC_START(aesni_gcm_finalize) FUNC_SAVE GCM_COMPLETE %arg3 %arg4 FUNC_RESTORE ret -ENDPROC(aesni_gcm_finalize) +SYM_FUNC_END(aesni_gcm_finalize) #endif -.align 4 -_key_expansion_128: -_key_expansion_256a: +SYM_FUNC_START_LOCAL_ALIAS(_key_expansion_128) +SYM_FUNC_START_LOCAL(_key_expansion_256a) pshufd $0b11111111, %xmm1, %xmm1 shufps $0b00010000, %xmm0, %xmm4 pxor %xmm4, %xmm0 @@ -1769,11 +1768,10 @@ _key_expansion_256a: movaps %xmm0, (TKEYP) add $0x10, TKEYP ret -ENDPROC(_key_expansion_128) -ENDPROC(_key_expansion_256a) +SYM_FUNC_END(_key_expansion_256a) +SYM_FUNC_END_ALIAS(_key_expansion_128) -.align 4 -_key_expansion_192a: +SYM_FUNC_START_LOCAL(_key_expansion_192a) pshufd $0b01010101, %xmm1, %xmm1 shufps $0b00010000, %xmm0, %xmm4 pxor %xmm4, %xmm0 @@ -1795,10 +1793,9 @@ _key_expansion_192a: movaps %xmm1, 0x10(TKEYP) add $0x20, TKEYP ret -ENDPROC(_key_expansion_192a) +SYM_FUNC_END(_key_expansion_192a) -.align 4 -_key_expansion_192b: +SYM_FUNC_START_LOCAL(_key_expansion_192b) pshufd $0b01010101, %xmm1, %xmm1 shufps $0b00010000, %xmm0, %xmm4 pxor %xmm4, %xmm0 @@ -1815,10 +1812,9 @@ _key_expansion_192b: movaps %xmm0, (TKEYP) add $0x10, TKEYP ret -ENDPROC(_key_expansion_192b) +SYM_FUNC_END(_key_expansion_192b) -.align 4 -_key_expansion_256b: +SYM_FUNC_START_LOCAL(_key_expansion_256b) pshufd $0b10101010, %xmm1, %xmm1 shufps $0b00010000, %xmm2, %xmm4 pxor %xmm4, %xmm2 @@ -1828,13 +1824,13 @@ _key_expansion_256b: movaps %xmm2, (TKEYP) add $0x10, TKEYP ret -ENDPROC(_key_expansion_256b) +SYM_FUNC_END(_key_expansion_256b) /* * int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, * unsigned int key_len) */ -ENTRY(aesni_set_key) +SYM_FUNC_START(aesni_set_key) FRAME_BEGIN #ifndef __x86_64__ pushl KEYP @@ -1943,12 +1939,12 @@ ENTRY(aesni_set_key) #endif FRAME_END ret -ENDPROC(aesni_set_key) +SYM_FUNC_END(aesni_set_key) /* * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) */ -ENTRY(aesni_enc) +SYM_FUNC_START(aesni_enc) FRAME_BEGIN #ifndef __x86_64__ pushl KEYP @@ -1967,7 +1963,7 @@ ENTRY(aesni_enc) #endif FRAME_END ret -ENDPROC(aesni_enc) +SYM_FUNC_END(aesni_enc) /* * _aesni_enc1: internal ABI @@ -1981,8 +1977,7 @@ ENDPROC(aesni_enc) * KEY * TKEYP (T1) */ -.align 4 -_aesni_enc1: +SYM_FUNC_START_LOCAL(_aesni_enc1) movaps (KEYP), KEY # key mov KEYP, TKEYP pxor KEY, STATE # round 0 @@ -2025,7 +2020,7 @@ _aesni_enc1: movaps 0x70(TKEYP), KEY AESENCLAST KEY STATE ret -ENDPROC(_aesni_enc1) +SYM_FUNC_END(_aesni_enc1) /* * _aesni_enc4: internal ABI @@ -2045,8 +2040,7 @@ ENDPROC(_aesni_enc1) * KEY * TKEYP (T1) */ -.align 4 -_aesni_enc4: +SYM_FUNC_START_LOCAL(_aesni_enc4) movaps (KEYP), KEY # key mov KEYP, TKEYP pxor KEY, STATE1 # round 0 @@ -2134,12 +2128,12 @@ _aesni_enc4: AESENCLAST KEY STATE3 AESENCLAST KEY STATE4 ret -ENDPROC(_aesni_enc4) +SYM_FUNC_END(_aesni_enc4) /* * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) */ -ENTRY(aesni_dec) +SYM_FUNC_START(aesni_dec) FRAME_BEGIN #ifndef __x86_64__ pushl KEYP @@ -2159,7 +2153,7 @@ ENTRY(aesni_dec) #endif FRAME_END ret -ENDPROC(aesni_dec) +SYM_FUNC_END(aesni_dec) /* * _aesni_dec1: internal ABI @@ -2173,8 +2167,7 @@ ENDPROC(aesni_dec) * KEY * TKEYP (T1) */ -.align 4 -_aesni_dec1: +SYM_FUNC_START_LOCAL(_aesni_dec1) movaps (KEYP), KEY # key mov KEYP, TKEYP pxor KEY, STATE # round 0 @@ -2217,7 +2210,7 @@ _aesni_dec1: movaps 0x70(TKEYP), KEY AESDECLAST KEY STATE ret -ENDPROC(_aesni_dec1) +SYM_FUNC_END(_aesni_dec1) /* * _aesni_dec4: internal ABI @@ -2237,8 +2230,7 @@ ENDPROC(_aesni_dec1) * KEY * TKEYP (T1) */ -.align 4 -_aesni_dec4: +SYM_FUNC_START_LOCAL(_aesni_dec4) movaps (KEYP), KEY # key mov KEYP, TKEYP pxor KEY, STATE1 # round 0 @@ -2326,13 +2318,13 @@ _aesni_dec4: AESDECLAST KEY STATE3 AESDECLAST KEY STATE4 ret -ENDPROC(_aesni_dec4) +SYM_FUNC_END(_aesni_dec4) /* * void aesni_ecb_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, * size_t len) */ -ENTRY(aesni_ecb_enc) +SYM_FUNC_START(aesni_ecb_enc) FRAME_BEGIN #ifndef __x86_64__ pushl LEN @@ -2386,13 +2378,13 @@ ENTRY(aesni_ecb_enc) #endif FRAME_END ret -ENDPROC(aesni_ecb_enc) +SYM_FUNC_END(aesni_ecb_enc) /* * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, * size_t len); */ -ENTRY(aesni_ecb_dec) +SYM_FUNC_START(aesni_ecb_dec) FRAME_BEGIN #ifndef __x86_64__ pushl LEN @@ -2447,13 +2439,13 @@ ENTRY(aesni_ecb_dec) #endif FRAME_END ret -ENDPROC(aesni_ecb_dec) +SYM_FUNC_END(aesni_ecb_dec) /* * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, * size_t len, u8 *iv) */ -ENTRY(aesni_cbc_enc) +SYM_FUNC_START(aesni_cbc_enc) FRAME_BEGIN #ifndef __x86_64__ pushl IVP @@ -2491,13 +2483,13 @@ ENTRY(aesni_cbc_enc) #endif FRAME_END ret -ENDPROC(aesni_cbc_enc) +SYM_FUNC_END(aesni_cbc_enc) /* * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, * size_t len, u8 *iv) */ -ENTRY(aesni_cbc_dec) +SYM_FUNC_START(aesni_cbc_dec) FRAME_BEGIN #ifndef __x86_64__ pushl IVP @@ -2584,7 +2576,7 @@ ENTRY(aesni_cbc_dec) #endif FRAME_END ret -ENDPROC(aesni_cbc_dec) +SYM_FUNC_END(aesni_cbc_dec) #ifdef __x86_64__ .pushsection .rodata @@ -2604,8 +2596,7 @@ ENDPROC(aesni_cbc_dec) * INC: == 1, in little endian * BSWAP_MASK == endian swapping mask */ -.align 4 -_aesni_inc_init: +SYM_FUNC_START_LOCAL(_aesni_inc_init) movaps .Lbswap_mask, BSWAP_MASK movaps IV, CTR PSHUFB_XMM BSWAP_MASK CTR @@ -2613,7 +2604,7 @@ _aesni_inc_init: MOVQ_R64_XMM TCTR_LOW INC MOVQ_R64_XMM CTR TCTR_LOW ret -ENDPROC(_aesni_inc_init) +SYM_FUNC_END(_aesni_inc_init) /* * _aesni_inc: internal ABI @@ -2630,8 +2621,7 @@ ENDPROC(_aesni_inc_init) * CTR: == output IV, in little endian * TCTR_LOW: == lower qword of CTR */ -.align 4 -_aesni_inc: +SYM_FUNC_START_LOCAL(_aesni_inc) paddq INC, CTR add $1, TCTR_LOW jnc .Linc_low @@ -2642,13 +2632,13 @@ _aesni_inc: movaps CTR, IV PSHUFB_XMM BSWAP_MASK IV ret -ENDPROC(_aesni_inc) +SYM_FUNC_END(_aesni_inc) /* * void aesni_ctr_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, * size_t len, u8 *iv) */ -ENTRY(aesni_ctr_enc) +SYM_FUNC_START(aesni_ctr_enc) FRAME_BEGIN cmp $16, LEN jb .Lctr_enc_just_ret @@ -2705,7 +2695,7 @@ ENTRY(aesni_ctr_enc) .Lctr_enc_just_ret: FRAME_END ret -ENDPROC(aesni_ctr_enc) +SYM_FUNC_END(aesni_ctr_enc) /* * _aesni_gf128mul_x_ble: internal ABI @@ -2729,7 +2719,7 @@ ENDPROC(aesni_ctr_enc) * void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, * bool enc, u8 *iv) */ -ENTRY(aesni_xts_crypt8) +SYM_FUNC_START(aesni_xts_crypt8) FRAME_BEGIN cmpb $0, %cl movl $0, %ecx @@ -2833,6 +2823,6 @@ ENTRY(aesni_xts_crypt8) FRAME_END ret -ENDPROC(aesni_xts_crypt8) +SYM_FUNC_END(aesni_xts_crypt8) #endif diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S index 91c039ab5699..bfa1c0b3e5b4 100644 --- a/arch/x86/crypto/aesni-intel_avx-x86_64.S +++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S @@ -1775,12 +1775,12 @@ _initial_blocks_done\@: # const u8 *aad, /* Additional Authentication Data (AAD)*/ # u64 aad_len) /* Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 Bytes */ ############################################################# -ENTRY(aesni_gcm_init_avx_gen2) +SYM_FUNC_START(aesni_gcm_init_avx_gen2) FUNC_SAVE INIT GHASH_MUL_AVX, PRECOMPUTE_AVX FUNC_RESTORE ret -ENDPROC(aesni_gcm_init_avx_gen2) +SYM_FUNC_END(aesni_gcm_init_avx_gen2) ############################################################################### #void aesni_gcm_enc_update_avx_gen2( @@ -1790,7 +1790,7 @@ ENDPROC(aesni_gcm_init_avx_gen2) # const u8 *in, /* Plaintext input */ # u64 plaintext_len) /* Length of data in Bytes for encryption. */ ############################################################################### -ENTRY(aesni_gcm_enc_update_avx_gen2) +SYM_FUNC_START(aesni_gcm_enc_update_avx_gen2) FUNC_SAVE mov keysize, %eax cmp $32, %eax @@ -1809,7 +1809,7 @@ key_256_enc_update: GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 13 FUNC_RESTORE ret -ENDPROC(aesni_gcm_enc_update_avx_gen2) +SYM_FUNC_END(aesni_gcm_enc_update_avx_gen2) ############################################################################### #void aesni_gcm_dec_update_avx_gen2( @@ -1819,7 +1819,7 @@ ENDPROC(aesni_gcm_enc_update_avx_gen2) # const u8 *in, /* Ciphertext input */ # u64 plaintext_len) /* Length of data in Bytes for encryption. */ ############################################################################### -ENTRY(aesni_gcm_dec_update_avx_gen2) +SYM_FUNC_START(aesni_gcm_dec_update_avx_gen2) FUNC_SAVE mov keysize,%eax cmp $32, %eax @@ -1838,7 +1838,7 @@ key_256_dec_update: GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 13 FUNC_RESTORE ret -ENDPROC(aesni_gcm_dec_update_avx_gen2) +SYM_FUNC_END(aesni_gcm_dec_update_avx_gen2) ############################################################################### #void aesni_gcm_finalize_avx_gen2( @@ -1848,7 +1848,7 @@ ENDPROC(aesni_gcm_dec_update_avx_gen2) # u64 auth_tag_len)# /* Authenticated Tag Length in bytes. # Valid values are 16 (most likely), 12 or 8. */ ############################################################################### -ENTRY(aesni_gcm_finalize_avx_gen2) +SYM_FUNC_START(aesni_gcm_finalize_avx_gen2) FUNC_SAVE mov keysize,%eax cmp $32, %eax @@ -1867,7 +1867,7 @@ key_256_finalize: GCM_COMPLETE GHASH_MUL_AVX, 13, arg3, arg4 FUNC_RESTORE ret -ENDPROC(aesni_gcm_finalize_avx_gen2) +SYM_FUNC_END(aesni_gcm_finalize_avx_gen2) #endif /* CONFIG_AS_AVX */ @@ -2746,12 +2746,12 @@ _initial_blocks_done\@: # const u8 *aad, /* Additional Authentication Data (AAD)*/ # u64 aad_len) /* Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 Bytes */ ############################################################# -ENTRY(aesni_gcm_init_avx_gen4) +SYM_FUNC_START(aesni_gcm_init_avx_gen4) FUNC_SAVE INIT GHASH_MUL_AVX2, PRECOMPUTE_AVX2 FUNC_RESTORE ret -ENDPROC(aesni_gcm_init_avx_gen4) +SYM_FUNC_END(aesni_gcm_init_avx_gen4) ############################################################################### #void aesni_gcm_enc_avx_gen4( @@ -2761,7 +2761,7 @@ ENDPROC(aesni_gcm_init_avx_gen4) # const u8 *in, /* Plaintext input */ # u64 plaintext_len) /* Length of data in Bytes for encryption. */ ############################################################################### -ENTRY(aesni_gcm_enc_update_avx_gen4) +SYM_FUNC_START(aesni_gcm_enc_update_avx_gen4) FUNC_SAVE mov keysize,%eax cmp $32, %eax @@ -2780,7 +2780,7 @@ key_256_enc_update4: GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 13 FUNC_RESTORE ret -ENDPROC(aesni_gcm_enc_update_avx_gen4) +SYM_FUNC_END(aesni_gcm_enc_update_avx_gen4) ############################################################################### #void aesni_gcm_dec_update_avx_gen4( @@ -2790,7 +2790,7 @@ ENDPROC(aesni_gcm_enc_update_avx_gen4) # const u8 *in, /* Ciphertext input */ # u64 plaintext_len) /* Length of data in Bytes for encryption. */ ############################################################################### -ENTRY(aesni_gcm_dec_update_avx_gen4) +SYM_FUNC_START(aesni_gcm_dec_update_avx_gen4) FUNC_SAVE mov keysize,%eax cmp $32, %eax @@ -2809,7 +2809,7 @@ key_256_dec_update4: GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 13 FUNC_RESTORE ret -ENDPROC(aesni_gcm_dec_update_avx_gen4) +SYM_FUNC_END(aesni_gcm_dec_update_avx_gen4) ############################################################################### #void aesni_gcm_finalize_avx_gen4( @@ -2819,7 +2819,7 @@ ENDPROC(aesni_gcm_dec_update_avx_gen4) # u64 auth_tag_len)# /* Authenticated Tag Length in bytes. # Valid values are 16 (most likely), 12 or 8. */ ############################################################################### -ENTRY(aesni_gcm_finalize_avx_gen4) +SYM_FUNC_START(aesni_gcm_finalize_avx_gen4) FUNC_SAVE mov keysize,%eax cmp $32, %eax @@ -2838,6 +2838,6 @@ key_256_finalize4: GCM_COMPLETE GHASH_MUL_AVX2, 13, arg3, arg4 FUNC_RESTORE ret -ENDPROC(aesni_gcm_finalize_avx_gen4) +SYM_FUNC_END(aesni_gcm_finalize_avx_gen4) #endif /* CONFIG_AS_AVX2 */ diff --git a/arch/x86/crypto/blake2s-core.S b/arch/x86/crypto/blake2s-core.S index 8591938eee26..24910b766bdd 100644 --- a/arch/x86/crypto/blake2s-core.S +++ b/arch/x86/crypto/blake2s-core.S @@ -47,7 +47,7 @@ SIGMA2: .text #ifdef CONFIG_AS_SSSE3 -ENTRY(blake2s_compress_ssse3) +SYM_FUNC_START(blake2s_compress_ssse3) testq %rdx,%rdx je .Lendofloop movdqu (%rdi),%xmm0 @@ -173,11 +173,11 @@ ENTRY(blake2s_compress_ssse3) movdqu %xmm14,0x20(%rdi) .Lendofloop: ret -ENDPROC(blake2s_compress_ssse3) +SYM_FUNC_END(blake2s_compress_ssse3) #endif /* CONFIG_AS_SSSE3 */ #ifdef CONFIG_AS_AVX512 -ENTRY(blake2s_compress_avx512) +SYM_FUNC_START(blake2s_compress_avx512) vmovdqu (%rdi),%xmm0 vmovdqu 0x10(%rdi),%xmm1 vmovdqu 0x20(%rdi),%xmm4 @@ -254,5 +254,5 @@ ENTRY(blake2s_compress_avx512) vmovdqu %xmm4,0x20(%rdi) vzeroupper retq -ENDPROC(blake2s_compress_avx512) +SYM_FUNC_END(blake2s_compress_avx512) #endif /* CONFIG_AS_AVX512 */ diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S index 330db7a48af8..4222ac6d6584 100644 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S @@ -103,7 +103,7 @@ bswapq RX0; \ xorq RX0, (RIO); -ENTRY(__blowfish_enc_blk) +SYM_FUNC_START(__blowfish_enc_blk) /* input: * %rdi: ctx * %rsi: dst @@ -139,9 +139,9 @@ ENTRY(__blowfish_enc_blk) .L__enc_xor: xor_block(); ret; -ENDPROC(__blowfish_enc_blk) +SYM_FUNC_END(__blowfish_enc_blk) -ENTRY(blowfish_dec_blk) +SYM_FUNC_START(blowfish_dec_blk) /* input: * %rdi: ctx * %rsi: dst @@ -171,7 +171,7 @@ ENTRY(blowfish_dec_blk) movq %r11, %r12; ret; -ENDPROC(blowfish_dec_blk) +SYM_FUNC_END(blowfish_dec_blk) /********************************************************************** 4-way blowfish, four blocks parallel @@ -283,7 +283,7 @@ ENDPROC(blowfish_dec_blk) bswapq RX3; \ xorq RX3, 24(RIO); -ENTRY(__blowfish_enc_blk_4way) +SYM_FUNC_START(__blowfish_enc_blk_4way) /* input: * %rdi: ctx * %rsi: dst @@ -330,9 +330,9 @@ ENTRY(__blowfish_enc_blk_4way) popq %rbx; popq %r12; ret; -ENDPROC(__blowfish_enc_blk_4way) +SYM_FUNC_END(__blowfish_enc_blk_4way) -ENTRY(blowfish_dec_blk_4way) +SYM_FUNC_START(blowfish_dec_blk_4way) /* input: * %rdi: ctx * %rsi: dst @@ -365,4 +365,4 @@ ENTRY(blowfish_dec_blk_4way) popq %r12; ret; -ENDPROC(blowfish_dec_blk_4way) +SYM_FUNC_END(blowfish_dec_blk_4way) diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S index a14af6eb09cb..d01ddd73de65 100644 --- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S @@ -189,20 +189,20 @@ * larger and would only be 0.5% faster (on sandy-bridge). */ .align 8 -roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd: +SYM_FUNC_START_LOCAL(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rcx, (%r9)); ret; -ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) +SYM_FUNC_END(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) .align 8 -roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab: +SYM_FUNC_START_LOCAL(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3, %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11, %rax, (%r9)); ret; -ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) +SYM_FUNC_END(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) /* * IN/OUT: @@ -722,7 +722,7 @@ ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) .text .align 8 -__camellia_enc_blk16: +SYM_FUNC_START_LOCAL(__camellia_enc_blk16) /* input: * %rdi: ctx, CTX * %rax: temporary storage, 256 bytes @@ -806,10 +806,10 @@ __camellia_enc_blk16: %xmm15, %rax, %rcx, 24); jmp .Lenc_done; -ENDPROC(__camellia_enc_blk16) +SYM_FUNC_END(__camellia_enc_blk16) .align 8 -__camellia_dec_blk16: +SYM_FUNC_START_LOCAL(__camellia_dec_blk16) /* input: * %rdi: ctx, CTX * %rax: temporary storage, 256 bytes @@ -891,9 +891,9 @@ __camellia_dec_blk16: ((key_table + (24) * 8) + 4)(CTX)); jmp .Ldec_max24; -ENDPROC(__camellia_dec_blk16) +SYM_FUNC_END(__camellia_dec_blk16) -ENTRY(camellia_ecb_enc_16way) +SYM_FUNC_START(camellia_ecb_enc_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) @@ -916,9 +916,9 @@ ENTRY(camellia_ecb_enc_16way) FRAME_END ret; -ENDPROC(camellia_ecb_enc_16way) +SYM_FUNC_END(camellia_ecb_enc_16way) -ENTRY(camellia_ecb_dec_16way) +SYM_FUNC_START(camellia_ecb_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) @@ -946,9 +946,9 @@ ENTRY(camellia_ecb_dec_16way) FRAME_END ret; -ENDPROC(camellia_ecb_dec_16way) +SYM_FUNC_END(camellia_ecb_dec_16way) -ENTRY(camellia_cbc_dec_16way) +SYM_FUNC_START(camellia_cbc_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) @@ -997,7 +997,7 @@ ENTRY(camellia_cbc_dec_16way) FRAME_END ret; -ENDPROC(camellia_cbc_dec_16way) +SYM_FUNC_END(camellia_cbc_dec_16way) #define inc_le128(x, minus_one, tmp) \ vpcmpeqq minus_one, x, tmp; \ @@ -1005,7 +1005,7 @@ ENDPROC(camellia_cbc_dec_16way) vpslldq $8, tmp, tmp; \ vpsubq tmp, x, x; -ENTRY(camellia_ctr_16way) +SYM_FUNC_START(camellia_ctr_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) @@ -1110,7 +1110,7 @@ ENTRY(camellia_ctr_16way) FRAME_END ret; -ENDPROC(camellia_ctr_16way) +SYM_FUNC_END(camellia_ctr_16way) #define gf128mul_x_ble(iv, mask, tmp) \ vpsrad $31, iv, tmp; \ @@ -1120,7 +1120,7 @@ ENDPROC(camellia_ctr_16way) vpxor tmp, iv, iv; .align 8 -camellia_xts_crypt_16way: +SYM_FUNC_START_LOCAL(camellia_xts_crypt_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) @@ -1254,9 +1254,9 @@ camellia_xts_crypt_16way: FRAME_END ret; -ENDPROC(camellia_xts_crypt_16way) +SYM_FUNC_END(camellia_xts_crypt_16way) -ENTRY(camellia_xts_enc_16way) +SYM_FUNC_START(camellia_xts_enc_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) @@ -1268,9 +1268,9 @@ ENTRY(camellia_xts_enc_16way) leaq __camellia_enc_blk16, %r9; jmp camellia_xts_crypt_16way; -ENDPROC(camellia_xts_enc_16way) +SYM_FUNC_END(camellia_xts_enc_16way) -ENTRY(camellia_xts_dec_16way) +SYM_FUNC_START(camellia_xts_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) @@ -1286,4 +1286,4 @@ ENTRY(camellia_xts_dec_16way) leaq __camellia_dec_blk16, %r9; jmp camellia_xts_crypt_16way; -ENDPROC(camellia_xts_dec_16way) +SYM_FUNC_END(camellia_xts_dec_16way) diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S index 4be4c7c3ba27..563ef6e83cdd 100644 --- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S @@ -223,20 +223,20 @@ * larger and would only marginally faster. */ .align 8 -roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd: +SYM_FUNC_START_LOCAL(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rcx, (%r9)); ret; -ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) +SYM_FUNC_END(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) .align 8 -roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab: +SYM_FUNC_START_LOCAL(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3, %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11, %rax, (%r9)); ret; -ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) +SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) /* * IN/OUT: @@ -760,7 +760,7 @@ ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) .text .align 8 -__camellia_enc_blk32: +SYM_FUNC_START_LOCAL(__camellia_enc_blk32) /* input: * %rdi: ctx, CTX * %rax: temporary storage, 512 bytes @@ -844,10 +844,10 @@ __camellia_enc_blk32: %ymm15, %rax, %rcx, 24); jmp .Lenc_done; -ENDPROC(__camellia_enc_blk32) +SYM_FUNC_END(__camellia_enc_blk32) .align 8 -__camellia_dec_blk32: +SYM_FUNC_START_LOCAL(__camellia_dec_blk32) /* input: * %rdi: ctx, CTX * %rax: temporary storage, 512 bytes @@ -929,9 +929,9 @@ __camellia_dec_blk32: ((key_table + (24) * 8) + 4)(CTX)); jmp .Ldec_max24; -ENDPROC(__camellia_dec_blk32) +SYM_FUNC_END(__camellia_dec_blk32) -ENTRY(camellia_ecb_enc_32way) +SYM_FUNC_START(camellia_ecb_enc_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) @@ -958,9 +958,9 @@ ENTRY(camellia_ecb_enc_32way) FRAME_END ret; -ENDPROC(camellia_ecb_enc_32way) +SYM_FUNC_END(camellia_ecb_enc_32way) -ENTRY(camellia_ecb_dec_32way) +SYM_FUNC_START(camellia_ecb_dec_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) @@ -992,9 +992,9 @@ ENTRY(camellia_ecb_dec_32way) FRAME_END ret; -ENDPROC(camellia_ecb_dec_32way) +SYM_FUNC_END(camellia_ecb_dec_32way) -ENTRY(camellia_cbc_dec_32way) +SYM_FUNC_START(camellia_cbc_dec_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) @@ -1060,7 +1060,7 @@ ENTRY(camellia_cbc_dec_32way) FRAME_END ret; -ENDPROC(camellia_cbc_dec_32way) +SYM_FUNC_END(camellia_cbc_dec_32way) #define inc_le128(x, minus_one, tmp) \ vpcmpeqq minus_one, x, tmp; \ @@ -1076,7 +1076,7 @@ ENDPROC(camellia_cbc_dec_32way) vpslldq $8, tmp1, tmp1; \ vpsubq tmp1, x, x; -ENTRY(camellia_ctr_32way) +SYM_FUNC_START(camellia_ctr_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) @@ -1200,7 +1200,7 @@ ENTRY(camellia_ctr_32way) FRAME_END ret; -ENDPROC(camellia_ctr_32way) +SYM_FUNC_END(camellia_ctr_32way) #define gf128mul_x_ble(iv, mask, tmp) \ vpsrad $31, iv, tmp; \ @@ -1222,7 +1222,7 @@ ENDPROC(camellia_ctr_32way) vpxor tmp1, iv, iv; .align 8 -camellia_xts_crypt_32way: +SYM_FUNC_START_LOCAL(camellia_xts_crypt_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) @@ -1367,9 +1367,9 @@ camellia_xts_crypt_32way: FRAME_END ret; -ENDPROC(camellia_xts_crypt_32way) +SYM_FUNC_END(camellia_xts_crypt_32way) -ENTRY(camellia_xts_enc_32way) +SYM_FUNC_START(camellia_xts_enc_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) @@ -1382,9 +1382,9 @@ ENTRY(camellia_xts_enc_32way) leaq __camellia_enc_blk32, %r9; jmp camellia_xts_crypt_32way; -ENDPROC(camellia_xts_enc_32way) +SYM_FUNC_END(camellia_xts_enc_32way) -ENTRY(camellia_xts_dec_32way) +SYM_FUNC_START(camellia_xts_dec_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) @@ -1400,4 +1400,4 @@ ENTRY(camellia_xts_dec_32way) leaq __camellia_dec_blk32, %r9; jmp camellia_xts_crypt_32way; -ENDPROC(camellia_xts_dec_32way) +SYM_FUNC_END(camellia_xts_dec_32way) diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S index 23528bc18fc6..1372e6408850 100644 --- a/arch/x86/crypto/camellia-x86_64-asm_64.S +++ b/arch/x86/crypto/camellia-x86_64-asm_64.S @@ -175,7 +175,7 @@ bswapq RAB0; \ movq RAB0, 4*2(RIO); -ENTRY(__camellia_enc_blk) +SYM_FUNC_START(__camellia_enc_blk) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -220,9 +220,9 @@ ENTRY(__camellia_enc_blk) movq RR12, %r12; ret; -ENDPROC(__camellia_enc_blk) +SYM_FUNC_END(__camellia_enc_blk) -ENTRY(camellia_dec_blk) +SYM_FUNC_START(camellia_dec_blk) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -258,7 +258,7 @@ ENTRY(camellia_dec_blk) movq RR12, %r12; ret; -ENDPROC(camellia_dec_blk) +SYM_FUNC_END(camellia_dec_blk) /********************************************************************** 2-way camellia @@ -409,7 +409,7 @@ ENDPROC(camellia_dec_blk) bswapq RAB1; \ movq RAB1, 12*2(RIO); -ENTRY(__camellia_enc_blk_2way) +SYM_FUNC_START(__camellia_enc_blk_2way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -456,9 +456,9 @@ ENTRY(__camellia_enc_blk_2way) movq RR12, %r12; popq %rbx; ret; -ENDPROC(__camellia_enc_blk_2way) +SYM_FUNC_END(__camellia_enc_blk_2way) -ENTRY(camellia_dec_blk_2way) +SYM_FUNC_START(camellia_dec_blk_2way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -496,4 +496,4 @@ ENTRY(camellia_dec_blk_2way) movq RR12, %r12; movq RXOR, %rbx; ret; -ENDPROC(camellia_dec_blk_2way) +SYM_FUNC_END(camellia_dec_blk_2way) diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S index dc55c3332fcc..8a6181b08b59 100644 --- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S @@ -209,7 +209,7 @@ .text .align 16 -__cast5_enc_blk16: +SYM_FUNC_START_LOCAL(__cast5_enc_blk16) /* input: * %rdi: ctx * RL1: blocks 1 and 2 @@ -280,10 +280,10 @@ __cast5_enc_blk16: outunpack_blocks(RR4, RL4, RTMP, RX, RKM); ret; -ENDPROC(__cast5_enc_blk16) +SYM_FUNC_END(__cast5_enc_blk16) .align 16 -__cast5_dec_blk16: +SYM_FUNC_START_LOCAL(__cast5_dec_blk16) /* input: * %rdi: ctx * RL1: encrypted blocks 1 and 2 @@ -357,9 +357,9 @@ __cast5_dec_blk16: .L__skip_dec: vpsrldq $4, RKR, RKR; jmp .L__dec_tail; -ENDPROC(__cast5_dec_blk16) +SYM_FUNC_END(__cast5_dec_blk16) -ENTRY(cast5_ecb_enc_16way) +SYM_FUNC_START(cast5_ecb_enc_16way) /* input: * %rdi: ctx * %rsi: dst @@ -394,9 +394,9 @@ ENTRY(cast5_ecb_enc_16way) popq %r15; FRAME_END ret; -ENDPROC(cast5_ecb_enc_16way) +SYM_FUNC_END(cast5_ecb_enc_16way) -ENTRY(cast5_ecb_dec_16way) +SYM_FUNC_START(cast5_ecb_dec_16way) /* input: * %rdi: ctx * %rsi: dst @@ -432,9 +432,9 @@ ENTRY(cast5_ecb_dec_16way) popq %r15; FRAME_END ret; -ENDPROC(cast5_ecb_dec_16way) +SYM_FUNC_END(cast5_ecb_dec_16way) -ENTRY(cast5_cbc_dec_16way) +SYM_FUNC_START(cast5_cbc_dec_16way) /* input: * %rdi: ctx * %rsi: dst @@ -484,9 +484,9 @@ ENTRY(cast5_cbc_dec_16way) popq %r12; FRAME_END ret; -ENDPROC(cast5_cbc_dec_16way) +SYM_FUNC_END(cast5_cbc_dec_16way) -ENTRY(cast5_ctr_16way) +SYM_FUNC_START(cast5_ctr_16way) /* input: * %rdi: ctx * %rsi: dst @@ -560,4 +560,4 @@ ENTRY(cast5_ctr_16way) popq %r12; FRAME_END ret; -ENDPROC(cast5_ctr_16way) +SYM_FUNC_END(cast5_ctr_16way) diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S index 4f0a7cdb94d9..932a3ce32a88 100644 --- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S @@ -247,7 +247,7 @@ .text .align 8 -__cast6_enc_blk8: +SYM_FUNC_START_LOCAL(__cast6_enc_blk8) /* input: * %rdi: ctx * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks @@ -292,10 +292,10 @@ __cast6_enc_blk8: outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); ret; -ENDPROC(__cast6_enc_blk8) +SYM_FUNC_END(__cast6_enc_blk8) .align 8 -__cast6_dec_blk8: +SYM_FUNC_START_LOCAL(__cast6_dec_blk8) /* input: * %rdi: ctx * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks @@ -339,9 +339,9 @@ __cast6_dec_blk8: outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); ret; -ENDPROC(__cast6_dec_blk8) +SYM_FUNC_END(__cast6_dec_blk8) -ENTRY(cast6_ecb_enc_8way) +SYM_FUNC_START(cast6_ecb_enc_8way) /* input: * %rdi: ctx * %rsi: dst @@ -362,9 +362,9 @@ ENTRY(cast6_ecb_enc_8way) popq %r15; FRAME_END ret; -ENDPROC(cast6_ecb_enc_8way) +SYM_FUNC_END(cast6_ecb_enc_8way) -ENTRY(cast6_ecb_dec_8way) +SYM_FUNC_START(cast6_ecb_dec_8way) /* input: * %rdi: ctx * %rsi: dst @@ -385,9 +385,9 @@ ENTRY(cast6_ecb_dec_8way) popq %r15; FRAME_END ret; -ENDPROC(cast6_ecb_dec_8way) +SYM_FUNC_END(cast6_ecb_dec_8way) -ENTRY(cast6_cbc_dec_8way) +SYM_FUNC_START(cast6_cbc_dec_8way) /* input: * %rdi: ctx * %rsi: dst @@ -411,9 +411,9 @@ ENTRY(cast6_cbc_dec_8way) popq %r12; FRAME_END ret; -ENDPROC(cast6_cbc_dec_8way) +SYM_FUNC_END(cast6_cbc_dec_8way) -ENTRY(cast6_ctr_8way) +SYM_FUNC_START(cast6_ctr_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -439,9 +439,9 @@ ENTRY(cast6_ctr_8way) popq %r12; FRAME_END ret; -ENDPROC(cast6_ctr_8way) +SYM_FUNC_END(cast6_ctr_8way) -ENTRY(cast6_xts_enc_8way) +SYM_FUNC_START(cast6_xts_enc_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -466,9 +466,9 @@ ENTRY(cast6_xts_enc_8way) popq %r15; FRAME_END ret; -ENDPROC(cast6_xts_enc_8way) +SYM_FUNC_END(cast6_xts_enc_8way) -ENTRY(cast6_xts_dec_8way) +SYM_FUNC_START(cast6_xts_dec_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -493,4 +493,4 @@ ENTRY(cast6_xts_dec_8way) popq %r15; FRAME_END ret; -ENDPROC(cast6_xts_dec_8way) +SYM_FUNC_END(cast6_xts_dec_8way) diff --git a/arch/x86/crypto/chacha-avx2-x86_64.S b/arch/x86/crypto/chacha-avx2-x86_64.S index 831e4434fc20..ee9a40ab4109 100644 --- a/arch/x86/crypto/chacha-avx2-x86_64.S +++ b/arch/x86/crypto/chacha-avx2-x86_64.S @@ -34,7 +34,7 @@ CTR4BL: .octa 0x00000000000000000000000000000002 .text -ENTRY(chacha_2block_xor_avx2) +SYM_FUNC_START(chacha_2block_xor_avx2) # %rdi: Input state matrix, s # %rsi: up to 2 data blocks output, o # %rdx: up to 2 data blocks input, i @@ -224,9 +224,9 @@ ENTRY(chacha_2block_xor_avx2) lea -8(%r10),%rsp jmp .Ldone2 -ENDPROC(chacha_2block_xor_avx2) +SYM_FUNC_END(chacha_2block_xor_avx2) -ENTRY(chacha_4block_xor_avx2) +SYM_FUNC_START(chacha_4block_xor_avx2) # %rdi: Input state matrix, s # %rsi: up to 4 data blocks output, o # %rdx: up to 4 data blocks input, i @@ -529,9 +529,9 @@ ENTRY(chacha_4block_xor_avx2) lea -8(%r10),%rsp jmp .Ldone4 -ENDPROC(chacha_4block_xor_avx2) +SYM_FUNC_END(chacha_4block_xor_avx2) -ENTRY(chacha_8block_xor_avx2) +SYM_FUNC_START(chacha_8block_xor_avx2) # %rdi: Input state matrix, s # %rsi: up to 8 data blocks output, o # %rdx: up to 8 data blocks input, i @@ -1018,4 +1018,4 @@ ENTRY(chacha_8block_xor_avx2) jmp .Ldone8 -ENDPROC(chacha_8block_xor_avx2) +SYM_FUNC_END(chacha_8block_xor_avx2) diff --git a/arch/x86/crypto/chacha-avx512vl-x86_64.S b/arch/x86/crypto/chacha-avx512vl-x86_64.S index 848f9c75fd4f..bb193fde123a 100644 --- a/arch/x86/crypto/chacha-avx512vl-x86_64.S +++ b/arch/x86/crypto/chacha-avx512vl-x86_64.S @@ -24,7 +24,7 @@ CTR8BL: .octa 0x00000003000000020000000100000000 .text -ENTRY(chacha_2block_xor_avx512vl) +SYM_FUNC_START(chacha_2block_xor_avx512vl) # %rdi: Input state matrix, s # %rsi: up to 2 data blocks output, o # %rdx: up to 2 data blocks input, i @@ -187,9 +187,9 @@ ENTRY(chacha_2block_xor_avx512vl) jmp .Ldone2 -ENDPROC(chacha_2block_xor_avx512vl) +SYM_FUNC_END(chacha_2block_xor_avx512vl) -ENTRY(chacha_4block_xor_avx512vl) +SYM_FUNC_START(chacha_4block_xor_avx512vl) # %rdi: Input state matrix, s # %rsi: up to 4 data blocks output, o # %rdx: up to 4 data blocks input, i @@ -453,9 +453,9 @@ ENTRY(chacha_4block_xor_avx512vl) jmp .Ldone4 -ENDPROC(chacha_4block_xor_avx512vl) +SYM_FUNC_END(chacha_4block_xor_avx512vl) -ENTRY(chacha_8block_xor_avx512vl) +SYM_FUNC_START(chacha_8block_xor_avx512vl) # %rdi: Input state matrix, s # %rsi: up to 8 data blocks output, o # %rdx: up to 8 data blocks input, i @@ -833,4 +833,4 @@ ENTRY(chacha_8block_xor_avx512vl) jmp .Ldone8 -ENDPROC(chacha_8block_xor_avx512vl) +SYM_FUNC_END(chacha_8block_xor_avx512vl) diff --git a/arch/x86/crypto/chacha-ssse3-x86_64.S b/arch/x86/crypto/chacha-ssse3-x86_64.S index 2d86c7d6dc88..a38ab2512a6f 100644 --- a/arch/x86/crypto/chacha-ssse3-x86_64.S +++ b/arch/x86/crypto/chacha-ssse3-x86_64.S @@ -33,7 +33,7 @@ CTRINC: .octa 0x00000003000000020000000100000000 * * Clobbers: %r8d, %xmm4-%xmm7 */ -chacha_permute: +SYM_FUNC_START_LOCAL(chacha_permute) movdqa ROT8(%rip),%xmm4 movdqa ROT16(%rip),%xmm5 @@ -109,9 +109,9 @@ chacha_permute: jnz .Ldoubleround ret -ENDPROC(chacha_permute) +SYM_FUNC_END(chacha_permute) -ENTRY(chacha_block_xor_ssse3) +SYM_FUNC_START(chacha_block_xor_ssse3) # %rdi: Input state matrix, s # %rsi: up to 1 data block output, o # %rdx: up to 1 data block input, i @@ -197,9 +197,9 @@ ENTRY(chacha_block_xor_ssse3) lea -8(%r10),%rsp jmp .Ldone -ENDPROC(chacha_block_xor_ssse3) +SYM_FUNC_END(chacha_block_xor_ssse3) -ENTRY(hchacha_block_ssse3) +SYM_FUNC_START(hchacha_block_ssse3) # %rdi: Input state matrix, s # %rsi: output (8 32-bit words) # %edx: nrounds @@ -218,9 +218,9 @@ ENTRY(hchacha_block_ssse3) FRAME_END ret -ENDPROC(hchacha_block_ssse3) +SYM_FUNC_END(hchacha_block_ssse3) -ENTRY(chacha_4block_xor_ssse3) +SYM_FUNC_START(chacha_4block_xor_ssse3) # %rdi: Input state matrix, s # %rsi: up to 4 data blocks output, o # %rdx: up to 4 data blocks input, i @@ -788,4 +788,4 @@ ENTRY(chacha_4block_xor_ssse3) jmp .Ldone4 -ENDPROC(chacha_4block_xor_ssse3) +SYM_FUNC_END(chacha_4block_xor_ssse3) diff --git a/arch/x86/crypto/crc32-pclmul_asm.S b/arch/x86/crypto/crc32-pclmul_asm.S index 1c099dc08cc3..9fd28ff65bc2 100644 --- a/arch/x86/crypto/crc32-pclmul_asm.S +++ b/arch/x86/crypto/crc32-pclmul_asm.S @@ -103,7 +103,7 @@ * size_t len, uint crc32) */ -ENTRY(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */ +SYM_FUNC_START(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */ movdqa (BUF), %xmm1 movdqa 0x10(BUF), %xmm2 movdqa 0x20(BUF), %xmm3 @@ -238,4 +238,4 @@ fold_64: PEXTRD 0x01, %xmm1, %eax ret -ENDPROC(crc32_pclmul_le_16) +SYM_FUNC_END(crc32_pclmul_le_16) diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S index d9b734d0c8cc..0e6690e3618c 100644 --- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S +++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S @@ -74,7 +74,7 @@ # unsigned int crc_pcl(u8 *buffer, int len, unsigned int crc_init); .text -ENTRY(crc_pcl) +SYM_FUNC_START(crc_pcl) #define bufp %rdi #define bufp_dw %edi #define bufp_w %di @@ -311,7 +311,7 @@ do_return: popq %rdi popq %rbx ret -ENDPROC(crc_pcl) +SYM_FUNC_END(crc_pcl) .section .rodata, "a", @progbits ################################################################ diff --git a/arch/x86/crypto/crct10dif-pcl-asm_64.S b/arch/x86/crypto/crct10dif-pcl-asm_64.S index 3d873e67749d..b2533d63030e 100644 --- a/arch/x86/crypto/crct10dif-pcl-asm_64.S +++ b/arch/x86/crypto/crct10dif-pcl-asm_64.S @@ -95,7 +95,7 @@ # Assumes len >= 16. # .align 16 -ENTRY(crc_t10dif_pcl) +SYM_FUNC_START(crc_t10dif_pcl) movdqa .Lbswap_mask(%rip), BSWAP_MASK @@ -280,7 +280,7 @@ ENTRY(crc_t10dif_pcl) jge .Lfold_16_bytes_loop # 32 <= len <= 255 add $16, len jmp .Lhandle_partial_segment # 17 <= len <= 31 -ENDPROC(crc_t10dif_pcl) +SYM_FUNC_END(crc_t10dif_pcl) .section .rodata, "a", @progbits .align 16 diff --git a/arch/x86/crypto/des3_ede-asm_64.S b/arch/x86/crypto/des3_ede-asm_64.S index 7fca43099a5f..fac0fdc3f25d 100644 --- a/arch/x86/crypto/des3_ede-asm_64.S +++ b/arch/x86/crypto/des3_ede-asm_64.S @@ -162,7 +162,7 @@ movl left##d, (io); \ movl right##d, 4(io); -ENTRY(des3_ede_x86_64_crypt_blk) +SYM_FUNC_START(des3_ede_x86_64_crypt_blk) /* input: * %rdi: round keys, CTX * %rsi: dst @@ -244,7 +244,7 @@ ENTRY(des3_ede_x86_64_crypt_blk) popq %rbx; ret; -ENDPROC(des3_ede_x86_64_crypt_blk) +SYM_FUNC_END(des3_ede_x86_64_crypt_blk) /*********************************************************************** * 3-way 3DES @@ -418,7 +418,7 @@ ENDPROC(des3_ede_x86_64_crypt_blk) #define __movq(src, dst) \ movq src, dst; -ENTRY(des3_ede_x86_64_crypt_blk_3way) +SYM_FUNC_START(des3_ede_x86_64_crypt_blk_3way) /* input: * %rdi: ctx, round keys * %rsi: dst (3 blocks) @@ -529,7 +529,7 @@ ENTRY(des3_ede_x86_64_crypt_blk_3way) popq %rbx; ret; -ENDPROC(des3_ede_x86_64_crypt_blk_3way) +SYM_FUNC_END(des3_ede_x86_64_crypt_blk_3way) .section .rodata, "a", @progbits .align 16 diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S index 5d53effe8abe..bb9735fbb865 100644 --- a/arch/x86/crypto/ghash-clmulni-intel_asm.S +++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S @@ -44,7 +44,7 @@ * T2 * T3 */ -__clmul_gf128mul_ble: +SYM_FUNC_START_LOCAL(__clmul_gf128mul_ble) movaps DATA, T1 pshufd $0b01001110, DATA, T2 pshufd $0b01001110, SHASH, T3 @@ -87,10 +87,10 @@ __clmul_gf128mul_ble: pxor T2, T1 pxor T1, DATA ret -ENDPROC(__clmul_gf128mul_ble) +SYM_FUNC_END(__clmul_gf128mul_ble) /* void clmul_ghash_mul(char *dst, const u128 *shash) */ -ENTRY(clmul_ghash_mul) +SYM_FUNC_START(clmul_ghash_mul) FRAME_BEGIN movups (%rdi), DATA movups (%rsi), SHASH @@ -101,13 +101,13 @@ ENTRY(clmul_ghash_mul) movups DATA, (%rdi) FRAME_END ret -ENDPROC(clmul_ghash_mul) +SYM_FUNC_END(clmul_ghash_mul) /* * void clmul_ghash_update(char *dst, const char *src, unsigned int srclen, * const u128 *shash); */ -ENTRY(clmul_ghash_update) +SYM_FUNC_START(clmul_ghash_update) FRAME_BEGIN cmp $16, %rdx jb .Lupdate_just_ret # check length @@ -130,4 +130,4 @@ ENTRY(clmul_ghash_update) .Lupdate_just_ret: FRAME_END ret -ENDPROC(clmul_ghash_update) +SYM_FUNC_END(clmul_ghash_update) diff --git a/arch/x86/crypto/nh-avx2-x86_64.S b/arch/x86/crypto/nh-avx2-x86_64.S index f7946ea1b704..b22c7b936272 100644 --- a/arch/x86/crypto/nh-avx2-x86_64.S +++ b/arch/x86/crypto/nh-avx2-x86_64.S @@ -69,7 +69,7 @@ * * It's guaranteed that message_len % 16 == 0. */ -ENTRY(nh_avx2) +SYM_FUNC_START(nh_avx2) vmovdqu 0x00(KEY), K0 vmovdqu 0x10(KEY), K1 @@ -154,4 +154,4 @@ ENTRY(nh_avx2) vpaddq T4, T0, T0 vmovdqu T0, (HASH) ret -ENDPROC(nh_avx2) +SYM_FUNC_END(nh_avx2) diff --git a/arch/x86/crypto/nh-sse2-x86_64.S b/arch/x86/crypto/nh-sse2-x86_64.S index 51f52d4ab4bb..d7ae22dd6683 100644 --- a/arch/x86/crypto/nh-sse2-x86_64.S +++ b/arch/x86/crypto/nh-sse2-x86_64.S @@ -71,7 +71,7 @@ * * It's guaranteed that message_len % 16 == 0. */ -ENTRY(nh_sse2) +SYM_FUNC_START(nh_sse2) movdqu 0x00(KEY), K0 movdqu 0x10(KEY), K1 @@ -120,4 +120,4 @@ ENTRY(nh_sse2) movdqu T0, 0x00(HASH) movdqu T1, 0x10(HASH) ret -ENDPROC(nh_sse2) +SYM_FUNC_END(nh_sse2) diff --git a/arch/x86/crypto/poly1305-avx2-x86_64.S b/arch/x86/crypto/poly1305-avx2-x86_64.S index 8b341bc29d41..d6063feda9da 100644 --- a/arch/x86/crypto/poly1305-avx2-x86_64.S +++ b/arch/x86/crypto/poly1305-avx2-x86_64.S @@ -79,7 +79,7 @@ ORMASK: .octa 0x00000000010000000000000001000000 #define d3 %r12 #define d4 %r13 -ENTRY(poly1305_4block_avx2) +SYM_FUNC_START(poly1305_4block_avx2) # %rdi: Accumulator h[5] # %rsi: 64 byte input block m # %rdx: Poly1305 key r[5] @@ -387,4 +387,4 @@ ENTRY(poly1305_4block_avx2) pop %r12 pop %rbx ret -ENDPROC(poly1305_4block_avx2) +SYM_FUNC_END(poly1305_4block_avx2) diff --git a/arch/x86/crypto/poly1305-sse2-x86_64.S b/arch/x86/crypto/poly1305-sse2-x86_64.S index 5578f846e622..d8ea29b96640 100644 --- a/arch/x86/crypto/poly1305-sse2-x86_64.S +++ b/arch/x86/crypto/poly1305-sse2-x86_64.S @@ -46,7 +46,7 @@ ORMASK: .octa 0x00000000010000000000000001000000 #define d3 %r11 #define d4 %r12 -ENTRY(poly1305_block_sse2) +SYM_FUNC_START(poly1305_block_sse2) # %rdi: Accumulator h[5] # %rsi: 16 byte input block m # %rdx: Poly1305 key r[5] @@ -276,7 +276,7 @@ ENTRY(poly1305_block_sse2) pop %r12 pop %rbx ret -ENDPROC(poly1305_block_sse2) +SYM_FUNC_END(poly1305_block_sse2) #define u0 0x00(%r8) @@ -301,7 +301,7 @@ ENDPROC(poly1305_block_sse2) #undef d0 #define d0 %r13 -ENTRY(poly1305_2block_sse2) +SYM_FUNC_START(poly1305_2block_sse2) # %rdi: Accumulator h[5] # %rsi: 16 byte input block m # %rdx: Poly1305 key r[5] @@ -587,4 +587,4 @@ ENTRY(poly1305_2block_sse2) pop %r12 pop %rbx ret -ENDPROC(poly1305_2block_sse2) +SYM_FUNC_END(poly1305_2block_sse2) diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S index ddc51dbba3af..ba9e4c1e7f5c 100644 --- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S @@ -555,7 +555,7 @@ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) .align 8 -__serpent_enc_blk8_avx: +SYM_FUNC_START_LOCAL(__serpent_enc_blk8_avx) /* input: * %rdi: ctx, CTX * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks @@ -606,10 +606,10 @@ __serpent_enc_blk8_avx: write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); ret; -ENDPROC(__serpent_enc_blk8_avx) +SYM_FUNC_END(__serpent_enc_blk8_avx) .align 8 -__serpent_dec_blk8_avx: +SYM_FUNC_START_LOCAL(__serpent_dec_blk8_avx) /* input: * %rdi: ctx, CTX * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks @@ -660,9 +660,9 @@ __serpent_dec_blk8_avx: write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2); ret; -ENDPROC(__serpent_dec_blk8_avx) +SYM_FUNC_END(__serpent_dec_blk8_avx) -ENTRY(serpent_ecb_enc_8way_avx) +SYM_FUNC_START(serpent_ecb_enc_8way_avx) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -678,9 +678,9 @@ ENTRY(serpent_ecb_enc_8way_avx) FRAME_END ret; -ENDPROC(serpent_ecb_enc_8way_avx) +SYM_FUNC_END(serpent_ecb_enc_8way_avx) -ENTRY(serpent_ecb_dec_8way_avx) +SYM_FUNC_START(serpent_ecb_dec_8way_avx) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -696,9 +696,9 @@ ENTRY(serpent_ecb_dec_8way_avx) FRAME_END ret; -ENDPROC(serpent_ecb_dec_8way_avx) +SYM_FUNC_END(serpent_ecb_dec_8way_avx) -ENTRY(serpent_cbc_dec_8way_avx) +SYM_FUNC_START(serpent_cbc_dec_8way_avx) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -714,9 +714,9 @@ ENTRY(serpent_cbc_dec_8way_avx) FRAME_END ret; -ENDPROC(serpent_cbc_dec_8way_avx) +SYM_FUNC_END(serpent_cbc_dec_8way_avx) -ENTRY(serpent_ctr_8way_avx) +SYM_FUNC_START(serpent_ctr_8way_avx) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -734,9 +734,9 @@ ENTRY(serpent_ctr_8way_avx) FRAME_END ret; -ENDPROC(serpent_ctr_8way_avx) +SYM_FUNC_END(serpent_ctr_8way_avx) -ENTRY(serpent_xts_enc_8way_avx) +SYM_FUNC_START(serpent_xts_enc_8way_avx) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -756,9 +756,9 @@ ENTRY(serpent_xts_enc_8way_avx) FRAME_END ret; -ENDPROC(serpent_xts_enc_8way_avx) +SYM_FUNC_END(serpent_xts_enc_8way_avx) -ENTRY(serpent_xts_dec_8way_avx) +SYM_FUNC_START(serpent_xts_dec_8way_avx) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -778,4 +778,4 @@ ENTRY(serpent_xts_dec_8way_avx) FRAME_END ret; -ENDPROC(serpent_xts_dec_8way_avx) +SYM_FUNC_END(serpent_xts_dec_8way_avx) diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S index 37bc1d48106c..c9648aeae705 100644 --- a/arch/x86/crypto/serpent-avx2-asm_64.S +++ b/arch/x86/crypto/serpent-avx2-asm_64.S @@ -561,7 +561,7 @@ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) .align 8 -__serpent_enc_blk16: +SYM_FUNC_START_LOCAL(__serpent_enc_blk16) /* input: * %rdi: ctx, CTX * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: plaintext @@ -612,10 +612,10 @@ __serpent_enc_blk16: write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); ret; -ENDPROC(__serpent_enc_blk16) +SYM_FUNC_END(__serpent_enc_blk16) .align 8 -__serpent_dec_blk16: +SYM_FUNC_START_LOCAL(__serpent_dec_blk16) /* input: * %rdi: ctx, CTX * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: ciphertext @@ -666,9 +666,9 @@ __serpent_dec_blk16: write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2); ret; -ENDPROC(__serpent_dec_blk16) +SYM_FUNC_END(__serpent_dec_blk16) -ENTRY(serpent_ecb_enc_16way) +SYM_FUNC_START(serpent_ecb_enc_16way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -688,9 +688,9 @@ ENTRY(serpent_ecb_enc_16way) FRAME_END ret; -ENDPROC(serpent_ecb_enc_16way) +SYM_FUNC_END(serpent_ecb_enc_16way) -ENTRY(serpent_ecb_dec_16way) +SYM_FUNC_START(serpent_ecb_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -710,9 +710,9 @@ ENTRY(serpent_ecb_dec_16way) FRAME_END ret; -ENDPROC(serpent_ecb_dec_16way) +SYM_FUNC_END(serpent_ecb_dec_16way) -ENTRY(serpent_cbc_dec_16way) +SYM_FUNC_START(serpent_cbc_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -733,9 +733,9 @@ ENTRY(serpent_cbc_dec_16way) FRAME_END ret; -ENDPROC(serpent_cbc_dec_16way) +SYM_FUNC_END(serpent_cbc_dec_16way) -ENTRY(serpent_ctr_16way) +SYM_FUNC_START(serpent_ctr_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) @@ -758,9 +758,9 @@ ENTRY(serpent_ctr_16way) FRAME_END ret; -ENDPROC(serpent_ctr_16way) +SYM_FUNC_END(serpent_ctr_16way) -ENTRY(serpent_xts_enc_16way) +SYM_FUNC_START(serpent_xts_enc_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) @@ -784,9 +784,9 @@ ENTRY(serpent_xts_enc_16way) FRAME_END ret; -ENDPROC(serpent_xts_enc_16way) +SYM_FUNC_END(serpent_xts_enc_16way) -ENTRY(serpent_xts_dec_16way) +SYM_FUNC_START(serpent_xts_dec_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) @@ -810,4 +810,4 @@ ENTRY(serpent_xts_dec_16way) FRAME_END ret; -ENDPROC(serpent_xts_dec_16way) +SYM_FUNC_END(serpent_xts_dec_16way) diff --git a/arch/x86/crypto/serpent-sse2-i586-asm_32.S b/arch/x86/crypto/serpent-sse2-i586-asm_32.S index e5c4a4690ca9..6379b99cb722 100644 --- a/arch/x86/crypto/serpent-sse2-i586-asm_32.S +++ b/arch/x86/crypto/serpent-sse2-i586-asm_32.S @@ -497,7 +497,7 @@ pxor t0, x3; \ movdqu x3, (3*4*4)(out); -ENTRY(__serpent_enc_blk_4way) +SYM_FUNC_START(__serpent_enc_blk_4way) /* input: * arg_ctx(%esp): ctx, CTX * arg_dst(%esp): dst @@ -559,9 +559,9 @@ ENTRY(__serpent_enc_blk_4way) xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE); ret; -ENDPROC(__serpent_enc_blk_4way) +SYM_FUNC_END(__serpent_enc_blk_4way) -ENTRY(serpent_dec_blk_4way) +SYM_FUNC_START(serpent_dec_blk_4way) /* input: * arg_ctx(%esp): ctx, CTX * arg_dst(%esp): dst @@ -613,4 +613,4 @@ ENTRY(serpent_dec_blk_4way) write_blocks(%eax, RC, RD, RB, RE, RT0, RT1, RA); ret; -ENDPROC(serpent_dec_blk_4way) +SYM_FUNC_END(serpent_dec_blk_4way) diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S index 5e0b3a3e97af..efb6dc17dc90 100644 --- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S +++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S @@ -619,7 +619,7 @@ pxor t0, x3; \ movdqu x3, (3*4*4)(out); -ENTRY(__serpent_enc_blk_8way) +SYM_FUNC_START(__serpent_enc_blk_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -682,9 +682,9 @@ ENTRY(__serpent_enc_blk_8way) xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); ret; -ENDPROC(__serpent_enc_blk_8way) +SYM_FUNC_END(__serpent_enc_blk_8way) -ENTRY(serpent_dec_blk_8way) +SYM_FUNC_START(serpent_dec_blk_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -736,4 +736,4 @@ ENTRY(serpent_dec_blk_8way) write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2); ret; -ENDPROC(serpent_dec_blk_8way) +SYM_FUNC_END(serpent_dec_blk_8way) diff --git a/arch/x86/crypto/sha1_avx2_x86_64_asm.S b/arch/x86/crypto/sha1_avx2_x86_64_asm.S index 9f712a7dfd79..6decc85ef7b7 100644 --- a/arch/x86/crypto/sha1_avx2_x86_64_asm.S +++ b/arch/x86/crypto/sha1_avx2_x86_64_asm.S @@ -634,7 +634,7 @@ _loop3: * param: function's name */ .macro SHA1_VECTOR_ASM name - ENTRY(\name) + SYM_FUNC_START(\name) push %rbx push %r12 @@ -676,7 +676,7 @@ _loop3: ret - ENDPROC(\name) + SYM_FUNC_END(\name) .endm .section .rodata diff --git a/arch/x86/crypto/sha1_ni_asm.S b/arch/x86/crypto/sha1_ni_asm.S index ebbdba72ae07..11efe3a45a1f 100644 --- a/arch/x86/crypto/sha1_ni_asm.S +++ b/arch/x86/crypto/sha1_ni_asm.S @@ -95,7 +95,7 @@ */ .text .align 32 -ENTRY(sha1_ni_transform) +SYM_FUNC_START(sha1_ni_transform) mov %rsp, RSPSAVE sub $FRAME_SIZE, %rsp and $~0xF, %rsp @@ -291,7 +291,7 @@ ENTRY(sha1_ni_transform) mov RSPSAVE, %rsp ret -ENDPROC(sha1_ni_transform) +SYM_FUNC_END(sha1_ni_transform) .section .rodata.cst16.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 16 .align 16 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S index 99c5b8c4dc38..5d03c1173690 100644 --- a/arch/x86/crypto/sha1_ssse3_asm.S +++ b/arch/x86/crypto/sha1_ssse3_asm.S @@ -67,7 +67,7 @@ * param: function's name */ .macro SHA1_VECTOR_ASM name - ENTRY(\name) + SYM_FUNC_START(\name) push %rbx push %r12 @@ -101,7 +101,7 @@ pop %rbx ret - ENDPROC(\name) + SYM_FUNC_END(\name) .endm /* diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S index 001bbcf93c79..22e14c8dd2e4 100644 --- a/arch/x86/crypto/sha256-avx-asm.S +++ b/arch/x86/crypto/sha256-avx-asm.S @@ -347,7 +347,7 @@ a = TMP_ ## arg 3 : Num blocks ######################################################################## .text -ENTRY(sha256_transform_avx) +SYM_FUNC_START(sha256_transform_avx) .align 32 pushq %rbx pushq %r12 @@ -460,7 +460,7 @@ done_hash: popq %r12 popq %rbx ret -ENDPROC(sha256_transform_avx) +SYM_FUNC_END(sha256_transform_avx) .section .rodata.cst256.K256, "aM", @progbits, 256 .align 64 diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S index 1420db15dcdd..519b551ad576 100644 --- a/arch/x86/crypto/sha256-avx2-asm.S +++ b/arch/x86/crypto/sha256-avx2-asm.S @@ -526,7 +526,7 @@ STACK_SIZE = _RSP + _RSP_SIZE ## arg 3 : Num blocks ######################################################################## .text -ENTRY(sha256_transform_rorx) +SYM_FUNC_START(sha256_transform_rorx) .align 32 pushq %rbx pushq %r12 @@ -713,7 +713,7 @@ done_hash: popq %r12 popq %rbx ret -ENDPROC(sha256_transform_rorx) +SYM_FUNC_END(sha256_transform_rorx) .section .rodata.cst512.K256, "aM", @progbits, 512 .align 64 diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S index c6c05ed2c16a..69cc2f91dc4c 100644 --- a/arch/x86/crypto/sha256-ssse3-asm.S +++ b/arch/x86/crypto/sha256-ssse3-asm.S @@ -353,7 +353,7 @@ a = TMP_ ## arg 3 : Num blocks ######################################################################## .text -ENTRY(sha256_transform_ssse3) +SYM_FUNC_START(sha256_transform_ssse3) .align 32 pushq %rbx pushq %r12 @@ -471,7 +471,7 @@ done_hash: popq %rbx ret -ENDPROC(sha256_transform_ssse3) +SYM_FUNC_END(sha256_transform_ssse3) .section .rodata.cst256.K256, "aM", @progbits, 256 .align 64 diff --git a/arch/x86/crypto/sha256_ni_asm.S b/arch/x86/crypto/sha256_ni_asm.S index fb58f58ecfbc..7abade04a3a3 100644 --- a/arch/x86/crypto/sha256_ni_asm.S +++ b/arch/x86/crypto/sha256_ni_asm.S @@ -97,7 +97,7 @@ .text .align 32 -ENTRY(sha256_ni_transform) +SYM_FUNC_START(sha256_ni_transform) shl $6, NUM_BLKS /* convert to bytes */ jz .Ldone_hash @@ -327,7 +327,7 @@ ENTRY(sha256_ni_transform) .Ldone_hash: ret -ENDPROC(sha256_ni_transform) +SYM_FUNC_END(sha256_ni_transform) .section .rodata.cst256.K256, "aM", @progbits, 256 .align 64 diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S index 39235fefe6f7..3704ddd7e5d5 100644 --- a/arch/x86/crypto/sha512-avx-asm.S +++ b/arch/x86/crypto/sha512-avx-asm.S @@ -277,7 +277,7 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE # message blocks. # L is the message length in SHA512 blocks ######################################################################## -ENTRY(sha512_transform_avx) +SYM_FUNC_START(sha512_transform_avx) cmp $0, msglen je nowork @@ -365,7 +365,7 @@ updateblock: nowork: ret -ENDPROC(sha512_transform_avx) +SYM_FUNC_END(sha512_transform_avx) ######################################################################## ### Binary Data diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S index b16d56005162..80d830e7ee09 100644 --- a/arch/x86/crypto/sha512-avx2-asm.S +++ b/arch/x86/crypto/sha512-avx2-asm.S @@ -569,7 +569,7 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE # message blocks. # L is the message length in SHA512 blocks ######################################################################## -ENTRY(sha512_transform_rorx) +SYM_FUNC_START(sha512_transform_rorx) # Allocate Stack Space mov %rsp, %rax sub $frame_size, %rsp @@ -682,7 +682,7 @@ done_hash: # Restore Stack Pointer mov frame_RSPSAVE(%rsp), %rsp ret -ENDPROC(sha512_transform_rorx) +SYM_FUNC_END(sha512_transform_rorx) ######################################################################## ### Binary Data diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S index 66bbd9058a90..838f984e95d9 100644 --- a/arch/x86/crypto/sha512-ssse3-asm.S +++ b/arch/x86/crypto/sha512-ssse3-asm.S @@ -275,7 +275,7 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE # message blocks. # L is the message length in SHA512 blocks. ######################################################################## -ENTRY(sha512_transform_ssse3) +SYM_FUNC_START(sha512_transform_ssse3) cmp $0, msglen je nowork @@ -364,7 +364,7 @@ updateblock: nowork: ret -ENDPROC(sha512_transform_ssse3) +SYM_FUNC_END(sha512_transform_ssse3) ######################################################################## ### Binary Data diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S index 698b8f2a56e2..a5151393bb2f 100644 --- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S @@ -234,7 +234,7 @@ vpxor x3, wkey, x3; .align 8 -__twofish_enc_blk8: +SYM_FUNC_START_LOCAL(__twofish_enc_blk8) /* input: * %rdi: ctx, CTX * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks @@ -273,10 +273,10 @@ __twofish_enc_blk8: outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2); ret; -ENDPROC(__twofish_enc_blk8) +SYM_FUNC_END(__twofish_enc_blk8) .align 8 -__twofish_dec_blk8: +SYM_FUNC_START_LOCAL(__twofish_dec_blk8) /* input: * %rdi: ctx, CTX * RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2: encrypted blocks @@ -313,9 +313,9 @@ __twofish_dec_blk8: outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2); ret; -ENDPROC(__twofish_dec_blk8) +SYM_FUNC_END(__twofish_dec_blk8) -ENTRY(twofish_ecb_enc_8way) +SYM_FUNC_START(twofish_ecb_enc_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -333,9 +333,9 @@ ENTRY(twofish_ecb_enc_8way) FRAME_END ret; -ENDPROC(twofish_ecb_enc_8way) +SYM_FUNC_END(twofish_ecb_enc_8way) -ENTRY(twofish_ecb_dec_8way) +SYM_FUNC_START(twofish_ecb_dec_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -353,9 +353,9 @@ ENTRY(twofish_ecb_dec_8way) FRAME_END ret; -ENDPROC(twofish_ecb_dec_8way) +SYM_FUNC_END(twofish_ecb_dec_8way) -ENTRY(twofish_cbc_dec_8way) +SYM_FUNC_START(twofish_cbc_dec_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -378,9 +378,9 @@ ENTRY(twofish_cbc_dec_8way) FRAME_END ret; -ENDPROC(twofish_cbc_dec_8way) +SYM_FUNC_END(twofish_cbc_dec_8way) -ENTRY(twofish_ctr_8way) +SYM_FUNC_START(twofish_ctr_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -405,9 +405,9 @@ ENTRY(twofish_ctr_8way) FRAME_END ret; -ENDPROC(twofish_ctr_8way) +SYM_FUNC_END(twofish_ctr_8way) -ENTRY(twofish_xts_enc_8way) +SYM_FUNC_START(twofish_xts_enc_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -429,9 +429,9 @@ ENTRY(twofish_xts_enc_8way) FRAME_END ret; -ENDPROC(twofish_xts_enc_8way) +SYM_FUNC_END(twofish_xts_enc_8way) -ENTRY(twofish_xts_dec_8way) +SYM_FUNC_START(twofish_xts_dec_8way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -453,4 +453,4 @@ ENTRY(twofish_xts_dec_8way) FRAME_END ret; -ENDPROC(twofish_xts_dec_8way) +SYM_FUNC_END(twofish_xts_dec_8way) diff --git a/arch/x86/crypto/twofish-i586-asm_32.S b/arch/x86/crypto/twofish-i586-asm_32.S index 290cc4e9a6fe..a6f09e4f2e46 100644 --- a/arch/x86/crypto/twofish-i586-asm_32.S +++ b/arch/x86/crypto/twofish-i586-asm_32.S @@ -207,7 +207,7 @@ xor %esi, d ## D;\ ror $1, d ## D; -ENTRY(twofish_enc_blk) +SYM_FUNC_START(twofish_enc_blk) push %ebp /* save registers according to calling convention*/ push %ebx push %esi @@ -261,9 +261,9 @@ ENTRY(twofish_enc_blk) pop %ebp mov $1, %eax ret -ENDPROC(twofish_enc_blk) +SYM_FUNC_END(twofish_enc_blk) -ENTRY(twofish_dec_blk) +SYM_FUNC_START(twofish_dec_blk) push %ebp /* save registers according to calling convention*/ push %ebx push %esi @@ -318,4 +318,4 @@ ENTRY(twofish_dec_blk) pop %ebp mov $1, %eax ret -ENDPROC(twofish_dec_blk) +SYM_FUNC_END(twofish_dec_blk) diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S index e495e07c7f1b..fc23552afe37 100644 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S @@ -220,7 +220,7 @@ rorq $32, RAB2; \ outunpack3(mov, RIO, 2, RAB, 2); -ENTRY(__twofish_enc_blk_3way) +SYM_FUNC_START(__twofish_enc_blk_3way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -267,9 +267,9 @@ ENTRY(__twofish_enc_blk_3way) popq %r12; popq %r13; ret; -ENDPROC(__twofish_enc_blk_3way) +SYM_FUNC_END(__twofish_enc_blk_3way) -ENTRY(twofish_dec_blk_3way) +SYM_FUNC_START(twofish_dec_blk_3way) /* input: * %rdi: ctx, CTX * %rsi: dst @@ -302,4 +302,4 @@ ENTRY(twofish_dec_blk_3way) popq %r12; popq %r13; ret; -ENDPROC(twofish_dec_blk_3way) +SYM_FUNC_END(twofish_dec_blk_3way) diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S index ecef2cb9f43f..d2e56232494a 100644 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S @@ -202,7 +202,7 @@ xor %r8d, d ## D;\ ror $1, d ## D; -ENTRY(twofish_enc_blk) +SYM_FUNC_START(twofish_enc_blk) pushq R1 /* %rdi contains the ctx address */ @@ -253,9 +253,9 @@ ENTRY(twofish_enc_blk) popq R1 movl $1,%eax ret -ENDPROC(twofish_enc_blk) +SYM_FUNC_END(twofish_enc_blk) -ENTRY(twofish_dec_blk) +SYM_FUNC_START(twofish_dec_blk) pushq R1 /* %rdi contains the ctx address */ @@ -305,4 +305,4 @@ ENTRY(twofish_dec_blk) popq R1 movl $1,%eax ret -ENDPROC(twofish_dec_blk) +SYM_FUNC_END(twofish_dec_blk) diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index f07baf0388bc..09fe5606a118 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -730,7 +730,7 @@ * %eax: prev task * %edx: next task */ -ENTRY(__switch_to_asm) +SYM_CODE_START(__switch_to_asm) /* * Save callee-saved registers * This must match the order in struct inactive_task_frame @@ -769,7 +769,7 @@ ENTRY(__switch_to_asm) popl %ebp jmp __switch_to -END(__switch_to_asm) +SYM_CODE_END(__switch_to_asm) /* * The unwinder expects the last frame on the stack to always be at the same @@ -778,7 +778,7 @@ END(__switch_to_asm) * asmlinkage function so its argument has to be pushed on the stack. This * wrapper creates a proper "end of stack" frame header before the call. */ -ENTRY(schedule_tail_wrapper) +SYM_FUNC_START(schedule_tail_wrapper) FRAME_BEGIN pushl %eax @@ -787,7 +787,7 @@ ENTRY(schedule_tail_wrapper) FRAME_END ret -ENDPROC(schedule_tail_wrapper) +SYM_FUNC_END(schedule_tail_wrapper) /* * A newly forked process directly context switches into this address. * @@ -795,7 +795,7 @@ ENDPROC(schedule_tail_wrapper) * ebx: kernel thread func (NULL for user thread) * edi: kernel thread arg */ -ENTRY(ret_from_fork) +SYM_CODE_START(ret_from_fork) call schedule_tail_wrapper testl %ebx, %ebx @@ -818,7 +818,7 @@ ENTRY(ret_from_fork) */ movl $0, PT_EAX(%esp) jmp 2b -END(ret_from_fork) +SYM_CODE_END(ret_from_fork) /* * Return to user mode is not as complex as all this looks, @@ -828,8 +828,7 @@ END(ret_from_fork) */ # userspace resumption stub bypassing syscall exit tracing - ALIGN -ret_from_exception: +SYM_CODE_START_LOCAL(ret_from_exception) preempt_stop(CLBR_ANY) ret_from_intr: #ifdef CONFIG_VM86 @@ -846,15 +845,14 @@ ret_from_intr: cmpl $USER_RPL, %eax jb restore_all_kernel # not returning to v8086 or userspace -ENTRY(resume_userspace) DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF movl %esp, %eax call prepare_exit_to_usermode jmp restore_all -END(ret_from_exception) +SYM_CODE_END(ret_from_exception) -GLOBAL(__begin_SYSENTER_singlestep_region) +SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) /* * All code from here through __end_SYSENTER_singlestep_region is subject * to being single-stepped if a user program sets TF and executes SYSENTER. @@ -869,9 +867,10 @@ GLOBAL(__begin_SYSENTER_singlestep_region) * Xen doesn't set %esp to be precisely what the normal SYSENTER * entry point expects, so fix it up before using the normal path. */ -ENTRY(xen_sysenter_target) +SYM_CODE_START(xen_sysenter_target) addl $5*4, %esp /* remove xen-provided frame */ jmp .Lsysenter_past_esp +SYM_CODE_END(xen_sysenter_target) #endif /* @@ -906,7 +905,7 @@ ENTRY(xen_sysenter_target) * ebp user stack * 0(%ebp) arg6 */ -ENTRY(entry_SYSENTER_32) +SYM_FUNC_START(entry_SYSENTER_32) /* * On entry-stack with all userspace-regs live - save and * restore eflags and %eax to use it as scratch-reg for the cr3 @@ -1033,8 +1032,8 @@ ENTRY(entry_SYSENTER_32) pushl $X86_EFLAGS_FIXED popfl jmp .Lsysenter_flags_fixed -GLOBAL(__end_SYSENTER_singlestep_region) -ENDPROC(entry_SYSENTER_32) +SYM_ENTRY(__end_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) +SYM_FUNC_END(entry_SYSENTER_32) /* * 32-bit legacy system call entry. @@ -1064,7 +1063,7 @@ ENDPROC(entry_SYSENTER_32) * edi arg5 * ebp arg6 */ -ENTRY(entry_INT80_32) +SYM_FUNC_START(entry_INT80_32) ASM_CLAC pushl %eax /* pt_regs->orig_ax */ @@ -1120,7 +1119,7 @@ restore_all_kernel: jmp .Lirq_return .section .fixup, "ax" -ENTRY(iret_exc ) +SYM_CODE_START(iret_exc) pushl $0 # no error code pushl $do_iret_error @@ -1137,9 +1136,10 @@ ENTRY(iret_exc ) #endif jmp common_exception +SYM_CODE_END(iret_exc) .previous _ASM_EXTABLE(.Lirq_return, iret_exc) -ENDPROC(entry_INT80_32) +SYM_FUNC_END(entry_INT80_32) .macro FIXUP_ESPFIX_STACK /* @@ -1193,7 +1193,7 @@ ENDPROC(entry_INT80_32) * We pack 1 stub into every 8-byte block. */ .align 8 -ENTRY(irq_entries_start) +SYM_CODE_START(irq_entries_start) vector=FIRST_EXTERNAL_VECTOR .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) pushl $(~vector+0x80) /* Note: always in signed byte range */ @@ -1201,11 +1201,11 @@ ENTRY(irq_entries_start) jmp common_interrupt .align 8 .endr -END(irq_entries_start) +SYM_CODE_END(irq_entries_start) #ifdef CONFIG_X86_LOCAL_APIC .align 8 -ENTRY(spurious_entries_start) +SYM_CODE_START(spurious_entries_start) vector=FIRST_SYSTEM_VECTOR .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR) pushl $(~vector+0x80) /* Note: always in signed byte range */ @@ -1213,9 +1213,9 @@ ENTRY(spurious_entries_start) jmp common_spurious .align 8 .endr -END(spurious_entries_start) +SYM_CODE_END(spurious_entries_start) -common_spurious: +SYM_CODE_START_LOCAL(common_spurious) ASM_CLAC addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */ SAVE_ALL switch_stacks=1 @@ -1224,7 +1224,7 @@ common_spurious: movl %esp, %eax call smp_spurious_interrupt jmp ret_from_intr -ENDPROC(common_spurious) +SYM_CODE_END(common_spurious) #endif /* @@ -1232,7 +1232,7 @@ ENDPROC(common_spurious) * so IRQ-flags tracing has to follow that: */ .p2align CONFIG_X86_L1_CACHE_SHIFT -common_interrupt: +SYM_CODE_START_LOCAL(common_interrupt) ASM_CLAC addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */ @@ -1242,10 +1242,10 @@ common_interrupt: movl %esp, %eax call do_IRQ jmp ret_from_intr -ENDPROC(common_interrupt) +SYM_CODE_END(common_interrupt) #define BUILD_INTERRUPT3(name, nr, fn) \ -ENTRY(name) \ +SYM_FUNC_START(name) \ ASM_CLAC; \ pushl $~(nr); \ SAVE_ALL switch_stacks=1; \ @@ -1254,7 +1254,7 @@ ENTRY(name) \ movl %esp, %eax; \ call fn; \ jmp ret_from_intr; \ -ENDPROC(name) +SYM_FUNC_END(name) #define BUILD_INTERRUPT(name, nr) \ BUILD_INTERRUPT3(name, nr, smp_##name); \ @@ -1262,14 +1262,14 @@ ENDPROC(name) /* The include is where all of the SMP etc. interrupts come from */ #include <asm/entry_arch.h> -ENTRY(coprocessor_error) +SYM_CODE_START(coprocessor_error) ASM_CLAC pushl $0 pushl $do_coprocessor_error jmp common_exception -END(coprocessor_error) +SYM_CODE_END(coprocessor_error) -ENTRY(simd_coprocessor_error) +SYM_CODE_START(simd_coprocessor_error) ASM_CLAC pushl $0 #ifdef CONFIG_X86_INVD_BUG @@ -1281,99 +1281,99 @@ ENTRY(simd_coprocessor_error) pushl $do_simd_coprocessor_error #endif jmp common_exception -END(simd_coprocessor_error) +SYM_CODE_END(simd_coprocessor_error) -ENTRY(device_not_available) +SYM_CODE_START(device_not_available) ASM_CLAC pushl $-1 # mark this as an int pushl $do_device_not_available jmp common_exception -END(device_not_available) +SYM_CODE_END(device_not_available) #ifdef CONFIG_PARAVIRT -ENTRY(native_iret) +SYM_CODE_START(native_iret) iret _ASM_EXTABLE(native_iret, iret_exc) -END(native_iret) +SYM_CODE_END(native_iret) #endif -ENTRY(overflow) +SYM_CODE_START(overflow) ASM_CLAC pushl $0 pushl $do_overflow jmp common_exception -END(overflow) +SYM_CODE_END(overflow) -ENTRY(bounds) +SYM_CODE_START(bounds) ASM_CLAC pushl $0 pushl $do_bounds jmp common_exception -END(bounds) +SYM_CODE_END(bounds) -ENTRY(invalid_op) +SYM_CODE_START(invalid_op) ASM_CLAC pushl $0 pushl $do_invalid_op jmp common_exception -END(invalid_op) +SYM_CODE_END(invalid_op) -ENTRY(coprocessor_segment_overrun) +SYM_CODE_START(coprocessor_segment_overrun) ASM_CLAC pushl $0 pushl $do_coprocessor_segment_overrun jmp common_exception -END(coprocessor_segment_overrun) +SYM_CODE_END(coprocessor_segment_overrun) -ENTRY(invalid_TSS) +SYM_CODE_START(invalid_TSS) ASM_CLAC pushl $do_invalid_TSS jmp common_exception -END(invalid_TSS) +SYM_CODE_END(invalid_TSS) -ENTRY(segment_not_present) +SYM_CODE_START(segment_not_present) ASM_CLAC pushl $do_segment_not_present jmp common_exception -END(segment_not_present) +SYM_CODE_END(segment_not_present) -ENTRY(stack_segment) +SYM_CODE_START(stack_segment) ASM_CLAC pushl $do_stack_segment jmp common_exception -END(stack_segment) +SYM_CODE_END(stack_segment) -ENTRY(alignment_check) +SYM_CODE_START(alignment_check) ASM_CLAC pushl $do_alignment_check jmp common_exception -END(alignment_check) +SYM_CODE_END(alignment_check) -ENTRY(divide_error) +SYM_CODE_START(divide_error) ASM_CLAC pushl $0 # no error code pushl $do_divide_error jmp common_exception -END(divide_error) +SYM_CODE_END(divide_error) #ifdef CONFIG_X86_MCE -ENTRY(machine_check) +SYM_CODE_START(machine_check) ASM_CLAC pushl $0 pushl machine_check_vector jmp common_exception -END(machine_check) +SYM_CODE_END(machine_check) #endif -ENTRY(spurious_interrupt_bug) +SYM_CODE_START(spurious_interrupt_bug) ASM_CLAC pushl $0 pushl $do_spurious_interrupt_bug jmp common_exception -END(spurious_interrupt_bug) +SYM_CODE_END(spurious_interrupt_bug) #ifdef CONFIG_XEN_PV -ENTRY(xen_hypervisor_callback) +SYM_FUNC_START(xen_hypervisor_callback) /* * Check to see if we got the event in the critical * region in xen_iret_direct, after we've reenabled @@ -1397,7 +1397,7 @@ ENTRY(xen_hypervisor_callback) call xen_maybe_preempt_hcall #endif jmp ret_from_intr -ENDPROC(xen_hypervisor_callback) +SYM_FUNC_END(xen_hypervisor_callback) /* * Hypervisor uses this for application faults while it executes. @@ -1411,7 +1411,7 @@ ENDPROC(xen_hypervisor_callback) * to pop the stack frame we end up in an infinite loop of failsafe callbacks. * We distinguish between categories by maintaining a status value in EAX. */ -ENTRY(xen_failsafe_callback) +SYM_FUNC_START(xen_failsafe_callback) pushl %eax movl $1, %eax 1: mov 4(%esp), %ds @@ -1448,7 +1448,7 @@ ENTRY(xen_failsafe_callback) _ASM_EXTABLE(2b, 7b) _ASM_EXTABLE(3b, 8b) _ASM_EXTABLE(4b, 9b) -ENDPROC(xen_failsafe_callback) +SYM_FUNC_END(xen_failsafe_callback) #endif /* CONFIG_XEN_PV */ #ifdef CONFIG_XEN_PVHVM @@ -1470,13 +1470,13 @@ BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR, #endif /* CONFIG_HYPERV */ -ENTRY(page_fault) +SYM_CODE_START(page_fault) ASM_CLAC pushl $do_page_fault jmp common_exception_read_cr2 -END(page_fault) +SYM_CODE_END(page_fault) -common_exception_read_cr2: +SYM_CODE_START_LOCAL_NOALIGN(common_exception_read_cr2) /* the function address is in %gs's slot on the stack */ SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1 @@ -1498,9 +1498,9 @@ common_exception_read_cr2: movl %esp, %eax # pt_regs pointer CALL_NOSPEC %edi jmp ret_from_exception -END(common_exception_read_cr2) +SYM_CODE_END(common_exception_read_cr2) -common_exception: +SYM_CODE_START_LOCAL_NOALIGN(common_exception) /* the function address is in %gs's slot on the stack */ SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1 ENCODE_FRAME_POINTER @@ -1519,9 +1519,9 @@ common_exception: movl %esp, %eax # pt_regs pointer CALL_NOSPEC %edi jmp ret_from_exception -END(common_exception) +SYM_CODE_END(common_exception) -ENTRY(debug) +SYM_CODE_START(debug) /* * Entry from sysenter is now handled in common_exception */ @@ -1529,7 +1529,7 @@ ENTRY(debug) pushl $-1 # mark this as an int pushl $do_debug jmp common_exception -END(debug) +SYM_CODE_END(debug) /* * NMI is doubly nasty. It can happen on the first instruction of @@ -1538,7 +1538,7 @@ END(debug) * switched stacks. We handle both conditions by simply checking whether we * interrupted kernel code running on the SYSENTER stack. */ -ENTRY(nmi) +SYM_CODE_START(nmi) ASM_CLAC #ifdef CONFIG_X86_ESPFIX32 @@ -1631,9 +1631,9 @@ ENTRY(nmi) lss (1+5+6)*4(%esp), %esp # back to espfix stack jmp .Lirq_return #endif -END(nmi) +SYM_CODE_END(nmi) -ENTRY(int3) +SYM_CODE_START(int3) ASM_CLAC pushl $-1 # mark this as an int @@ -1644,22 +1644,22 @@ ENTRY(int3) movl %esp, %eax # pt_regs pointer call do_int3 jmp ret_from_exception -END(int3) +SYM_CODE_END(int3) -ENTRY(general_protection) +SYM_CODE_START(general_protection) pushl $do_general_protection jmp common_exception -END(general_protection) +SYM_CODE_END(general_protection) #ifdef CONFIG_KVM_GUEST -ENTRY(async_page_fault) +SYM_CODE_START(async_page_fault) ASM_CLAC pushl $do_async_page_fault jmp common_exception_read_cr2 -END(async_page_fault) +SYM_CODE_END(async_page_fault) #endif -ENTRY(rewind_stack_do_exit) +SYM_CODE_START(rewind_stack_do_exit) /* Prevent any naive code from trying to unwind to our caller. */ xorl %ebp, %ebp @@ -1668,4 +1668,4 @@ ENTRY(rewind_stack_do_exit) call do_exit 1: jmp 1b -END(rewind_stack_do_exit) +SYM_CODE_END(rewind_stack_do_exit) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index b7c3ea4cb19d..76942cbd95a1 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -15,7 +15,7 @@ * at the top of the kernel process stack. * * Some macro usage: - * - ENTRY/END: Define functions in the symbol table. + * - SYM_FUNC_START/END:Define functions in the symbol table. * - TRACE_IRQ_*: Trace hardirq state for lock debugging. * - idtentry: Define exception entry points. */ @@ -46,11 +46,11 @@ .section .entry.text, "ax" #ifdef CONFIG_PARAVIRT -ENTRY(native_usergs_sysret64) +SYM_CODE_START(native_usergs_sysret64) UNWIND_HINT_EMPTY swapgs sysretq -END(native_usergs_sysret64) +SYM_CODE_END(native_usergs_sysret64) #endif /* CONFIG_PARAVIRT */ .macro TRACE_IRQS_FLAGS flags:req @@ -142,7 +142,7 @@ END(native_usergs_sysret64) * with them due to bugs in both AMD and Intel CPUs. */ -ENTRY(entry_SYSCALL_64) +SYM_CODE_START(entry_SYSCALL_64) UNWIND_HINT_EMPTY /* * Interrupts are off on entry. @@ -162,7 +162,7 @@ ENTRY(entry_SYSCALL_64) pushq %r11 /* pt_regs->flags */ pushq $__USER_CS /* pt_regs->cs */ pushq %rcx /* pt_regs->ip */ -GLOBAL(entry_SYSCALL_64_after_hwframe) +SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL) pushq %rax /* pt_regs->orig_ax */ PUSH_AND_CLEAR_REGS rax=$-ENOSYS @@ -273,13 +273,13 @@ syscall_return_via_sysret: popq %rdi popq %rsp USERGS_SYSRET64 -END(entry_SYSCALL_64) +SYM_CODE_END(entry_SYSCALL_64) /* * %rdi: prev task * %rsi: next task */ -ENTRY(__switch_to_asm) +SYM_CODE_START(__switch_to_asm) UNWIND_HINT_FUNC /* * Save callee-saved registers @@ -321,7 +321,7 @@ ENTRY(__switch_to_asm) popq %rbp jmp __switch_to -END(__switch_to_asm) +SYM_CODE_END(__switch_to_asm) /* * A newly forked process directly context switches into this address. @@ -330,7 +330,7 @@ END(__switch_to_asm) * rbx: kernel thread func (NULL for user thread) * r12: kernel thread arg */ -ENTRY(ret_from_fork) +SYM_CODE_START(ret_from_fork) UNWIND_HINT_EMPTY movq %rax, %rdi call schedule_tail /* rdi: 'prev' task parameter */ @@ -357,14 +357,14 @@ ENTRY(ret_from_fork) */ movq $0, RAX(%rsp) jmp 2b -END(ret_from_fork) +SYM_CODE_END(ret_from_fork) /* * Build the entry stubs with some assembler magic. * We pack 1 stub into every 8-byte block. */ .align 8 -ENTRY(irq_entries_start) +SYM_CODE_START(irq_entries_start) vector=FIRST_EXTERNAL_VECTOR .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) UNWIND_HINT_IRET_REGS @@ -373,10 +373,10 @@ ENTRY(irq_entries_start) .align 8 vector=vector+1 .endr -END(irq_entries_start) +SYM_CODE_END(irq_entries_start) .align 8 -ENTRY(spurious_entries_start) +SYM_CODE_START(spurious_entries_start) vector=FIRST_SYSTEM_VECTOR .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR) UNWIND_HINT_IRET_REGS @@ -385,7 +385,7 @@ ENTRY(spurious_entries_start) .align 8 vector=vector+1 .endr -END(spurious_entries_start) +SYM_CODE_END(spurious_entries_start) .macro DEBUG_ENTRY_ASSERT_IRQS_OFF #ifdef CONFIG_DEBUG_ENTRY @@ -511,7 +511,7 @@ END(spurious_entries_start) * | return address | * +----------------------------------------------------+ */ -ENTRY(interrupt_entry) +SYM_CODE_START(interrupt_entry) UNWIND_HINT_FUNC ASM_CLAC cld @@ -579,7 +579,7 @@ ENTRY(interrupt_entry) TRACE_IRQS_OFF ret -END(interrupt_entry) +SYM_CODE_END(interrupt_entry) _ASM_NOKPROBE(interrupt_entry) @@ -589,18 +589,18 @@ _ASM_NOKPROBE(interrupt_entry) * The interrupt stubs push (~vector+0x80) onto the stack and * then jump to common_spurious/interrupt. */ -common_spurious: +SYM_CODE_START_LOCAL(common_spurious) addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ call interrupt_entry UNWIND_HINT_REGS indirect=1 call smp_spurious_interrupt /* rdi points to pt_regs */ jmp ret_from_intr -END(common_spurious) +SYM_CODE_END(common_spurious) _ASM_NOKPROBE(common_spurious) /* common_interrupt is a hotpath. Align it */ .p2align CONFIG_X86_L1_CACHE_SHIFT -common_interrupt: +SYM_CODE_START_LOCAL(common_interrupt) addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ call interrupt_entry UNWIND_HINT_REGS indirect=1 @@ -616,12 +616,12 @@ ret_from_intr: jz retint_kernel /* Interrupt came from user space */ -GLOBAL(retint_user) +.Lretint_user: mov %rsp,%rdi call prepare_exit_to_usermode TRACE_IRQS_IRETQ -GLOBAL(swapgs_restore_regs_and_return_to_usermode) +SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL) #ifdef CONFIG_DEBUG_ENTRY /* Assert that pt_regs indicates user mode. */ testb $3, CS(%rsp) @@ -679,7 +679,7 @@ retint_kernel: */ TRACE_IRQS_IRETQ -GLOBAL(restore_regs_and_return_to_kernel) +SYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL) #ifdef CONFIG_DEBUG_ENTRY /* Assert that pt_regs indicates kernel mode. */ testb $3, CS(%rsp) @@ -695,7 +695,7 @@ GLOBAL(restore_regs_and_return_to_kernel) */ INTERRUPT_RETURN -ENTRY(native_iret) +SYM_INNER_LABEL_ALIGN(native_iret, SYM_L_GLOBAL) UNWIND_HINT_IRET_REGS /* * Are we returning to a stack segment from the LDT? Note: in @@ -706,8 +706,7 @@ ENTRY(native_iret) jnz native_irq_return_ldt #endif -.global native_irq_return_iret -native_irq_return_iret: +SYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL) /* * This may fault. Non-paranoid faults on return to userspace are * handled by fixup_bad_iret. These include #SS, #GP, and #NP. @@ -789,14 +788,14 @@ native_irq_return_ldt: */ jmp native_irq_return_iret #endif -END(common_interrupt) +SYM_CODE_END(common_interrupt) _ASM_NOKPROBE(common_interrupt) /* * APIC interrupts. */ .macro apicinterrupt3 num sym do_sym -ENTRY(\sym) +SYM_CODE_START(\sym) UNWIND_HINT_IRET_REGS pushq $~(\num) .Lcommon_\sym: @@ -804,7 +803,7 @@ ENTRY(\sym) UNWIND_HINT_REGS indirect=1 call \do_sym /* rdi points to pt_regs */ jmp ret_from_intr -END(\sym) +SYM_CODE_END(\sym) _ASM_NOKPROBE(\sym) .endm @@ -969,7 +968,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt * #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS. */ .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ist_offset=0 create_gap=0 read_cr2=0 -ENTRY(\sym) +SYM_CODE_START(\sym) UNWIND_HINT_IRET_REGS offset=\has_error_code*8 /* Sanity check */ @@ -1019,7 +1018,7 @@ ENTRY(\sym) .endif _ASM_NOKPROBE(\sym) -END(\sym) +SYM_CODE_END(\sym) .endm idtentry divide_error do_divide_error has_error_code=0 @@ -1041,7 +1040,7 @@ idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0 * Reload gs selector with exception handling * edi: new selector */ -ENTRY(native_load_gs_index) +SYM_FUNC_START(native_load_gs_index) FRAME_BEGIN pushfq DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) @@ -1055,13 +1054,13 @@ ENTRY(native_load_gs_index) popfq FRAME_END ret -ENDPROC(native_load_gs_index) +SYM_FUNC_END(native_load_gs_index) EXPORT_SYMBOL(native_load_gs_index) _ASM_EXTABLE(.Lgs_change, .Lbad_gs) .section .fixup, "ax" /* running with kernelgs */ -.Lbad_gs: +SYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs) SWAPGS /* switch back to user gs */ .macro ZAP_GS /* This can't be a string because the preprocessor needs to see it. */ @@ -1072,10 +1071,11 @@ EXPORT_SYMBOL(native_load_gs_index) xorl %eax, %eax movl %eax, %gs jmp 2b +SYM_CODE_END(.Lbad_gs) .previous /* Call softirq on interrupt stack. Interrupts are off. */ -ENTRY(do_softirq_own_stack) +SYM_FUNC_START(do_softirq_own_stack) pushq %rbp mov %rsp, %rbp ENTER_IRQ_STACK regs=0 old_rsp=%r11 @@ -1083,7 +1083,7 @@ ENTRY(do_softirq_own_stack) LEAVE_IRQ_STACK regs=0 leaveq ret -ENDPROC(do_softirq_own_stack) +SYM_FUNC_END(do_softirq_own_stack) #ifdef CONFIG_XEN_PV idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0 @@ -1101,7 +1101,8 @@ idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0 * existing activation in its critical region -- if so, we pop the current * activation and restart the handler using the previous one. */ -ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */ +/* do_hypervisor_callback(struct *pt_regs) */ +SYM_CODE_START_LOCAL(xen_do_hypervisor_callback) /* * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will @@ -1119,7 +1120,7 @@ ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */ call xen_maybe_preempt_hcall #endif jmp error_exit -END(xen_do_hypervisor_callback) +SYM_CODE_END(xen_do_hypervisor_callback) /* * Hypervisor uses this for application faults while it executes. @@ -1134,7 +1135,7 @@ END(xen_do_hypervisor_callback) * We distinguish between categories by comparing each saved segment register * with its current contents: any discrepancy means we in category 1. */ -ENTRY(xen_failsafe_callback) +SYM_CODE_START(xen_failsafe_callback) UNWIND_HINT_EMPTY movl %ds, %ecx cmpw %cx, 0x10(%rsp) @@ -1164,7 +1165,7 @@ ENTRY(xen_failsafe_callback) PUSH_AND_CLEAR_REGS ENCODE_FRAME_POINTER jmp error_exit -END(xen_failsafe_callback) +SYM_CODE_END(xen_failsafe_callback) #endif /* CONFIG_XEN_PV */ #ifdef CONFIG_XEN_PVHVM @@ -1214,7 +1215,7 @@ idtentry machine_check do_mce has_error_code=0 paranoid=1 * Use slow, but surefire "are we in kernel?" check. * Return: ebx=0: need swapgs on exit, ebx=1: otherwise */ -ENTRY(paranoid_entry) +SYM_CODE_START_LOCAL(paranoid_entry) UNWIND_HINT_FUNC cld PUSH_AND_CLEAR_REGS save_ret=1 @@ -1248,7 +1249,7 @@ ENTRY(paranoid_entry) FENCE_SWAPGS_KERNEL_ENTRY ret -END(paranoid_entry) +SYM_CODE_END(paranoid_entry) /* * "Paranoid" exit path from exception stack. This is invoked @@ -1262,7 +1263,7 @@ END(paranoid_entry) * * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */ -ENTRY(paranoid_exit) +SYM_CODE_START_LOCAL(paranoid_exit) UNWIND_HINT_REGS DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF_DEBUG @@ -1272,19 +1273,18 @@ ENTRY(paranoid_exit) /* Always restore stashed CR3 value (see paranoid_entry) */ RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 SWAPGS_UNSAFE_STACK - jmp .Lparanoid_exit_restore + jmp restore_regs_and_return_to_kernel .Lparanoid_exit_no_swapgs: TRACE_IRQS_IRETQ_DEBUG /* Always restore stashed CR3 value (see paranoid_entry) */ RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 -.Lparanoid_exit_restore: jmp restore_regs_and_return_to_kernel -END(paranoid_exit) +SYM_CODE_END(paranoid_exit) /* * Save all registers in pt_regs, and switch GS if needed. */ -ENTRY(error_entry) +SYM_CODE_START_LOCAL(error_entry) UNWIND_HINT_FUNC cld PUSH_AND_CLEAR_REGS save_ret=1 @@ -1364,16 +1364,16 @@ ENTRY(error_entry) call fixup_bad_iret mov %rax, %rsp jmp .Lerror_entry_from_usermode_after_swapgs -END(error_entry) +SYM_CODE_END(error_entry) -ENTRY(error_exit) +SYM_CODE_START_LOCAL(error_exit) UNWIND_HINT_REGS DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF testb $3, CS(%rsp) jz retint_kernel - jmp retint_user -END(error_exit) + jmp .Lretint_user +SYM_CODE_END(error_exit) /* * Runs on exception stack. Xen PV does not go through this path at all, @@ -1383,7 +1383,7 @@ END(error_exit) * %r14: Used to save/restore the CR3 of the interrupted context * when PAGE_TABLE_ISOLATION is in use. Do not clobber. */ -ENTRY(nmi) +SYM_CODE_START(nmi) UNWIND_HINT_IRET_REGS /* @@ -1718,21 +1718,21 @@ nmi_restore: * about espfix64 on the way back to kernel mode. */ iretq -END(nmi) +SYM_CODE_END(nmi) #ifndef CONFIG_IA32_EMULATION /* * This handles SYSCALL from 32-bit code. There is no way to program * MSRs to fully disable 32-bit SYSCALL. */ -ENTRY(ignore_sysret) +SYM_CODE_START(ignore_sysret) UNWIND_HINT_EMPTY mov $-ENOSYS, %eax sysret -END(ignore_sysret) +SYM_CODE_END(ignore_sysret) #endif -ENTRY(rewind_stack_do_exit) +SYM_CODE_START(rewind_stack_do_exit) UNWIND_HINT_FUNC /* Prevent any naive code from trying to unwind to our caller. */ xorl %ebp, %ebp @@ -1742,4 +1742,4 @@ ENTRY(rewind_stack_do_exit) UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE call do_exit -END(rewind_stack_do_exit) +SYM_CODE_END(rewind_stack_do_exit) diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index 39913770a44d..f1d3ccae5dd5 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -46,7 +46,7 @@ * ebp user stack * 0(%ebp) arg6 */ -ENTRY(entry_SYSENTER_compat) +SYM_FUNC_START(entry_SYSENTER_compat) /* Interrupts are off on entry. */ SWAPGS @@ -146,8 +146,8 @@ ENTRY(entry_SYSENTER_compat) pushq $X86_EFLAGS_FIXED popfq jmp .Lsysenter_flags_fixed -GLOBAL(__end_entry_SYSENTER_compat) -ENDPROC(entry_SYSENTER_compat) +SYM_INNER_LABEL(__end_entry_SYSENTER_compat, SYM_L_GLOBAL) +SYM_FUNC_END(entry_SYSENTER_compat) /* * 32-bit SYSCALL entry. @@ -196,7 +196,7 @@ ENDPROC(entry_SYSENTER_compat) * esp user stack * 0(%esp) arg6 */ -ENTRY(entry_SYSCALL_compat) +SYM_CODE_START(entry_SYSCALL_compat) /* Interrupts are off on entry. */ swapgs @@ -215,7 +215,7 @@ ENTRY(entry_SYSCALL_compat) pushq %r11 /* pt_regs->flags */ pushq $__USER32_CS /* pt_regs->cs */ pushq %rcx /* pt_regs->ip */ -GLOBAL(entry_SYSCALL_compat_after_hwframe) +SYM_INNER_LABEL(entry_SYSCALL_compat_after_hwframe, SYM_L_GLOBAL) movl %eax, %eax /* discard orig_ax high bits */ pushq %rax /* pt_regs->orig_ax */ pushq %rdi /* pt_regs->di */ @@ -311,7 +311,7 @@ sysret32_from_system_call: xorl %r10d, %r10d swapgs sysretl -END(entry_SYSCALL_compat) +SYM_CODE_END(entry_SYSCALL_compat) /* * 32-bit legacy system call entry. @@ -339,7 +339,7 @@ END(entry_SYSCALL_compat) * edi arg5 * ebp arg6 */ -ENTRY(entry_INT80_compat) +SYM_CODE_START(entry_INT80_compat) /* * Interrupts are off on entry. */ @@ -416,4 +416,4 @@ ENTRY(entry_INT80_compat) /* Go back to user mode. */ TRACE_IRQS_ON jmp swapgs_restore_regs_and_return_to_usermode -END(entry_INT80_compat) +SYM_CODE_END(entry_INT80_compat) diff --git a/arch/x86/entry/thunk_32.S b/arch/x86/entry/thunk_32.S index 2713490611a3..e010d4ae11f1 100644 --- a/arch/x86/entry/thunk_32.S +++ b/arch/x86/entry/thunk_32.S @@ -10,8 +10,7 @@ /* put return address in eax (arg1) */ .macro THUNK name, func, put_ret_addr_in_eax=0 - .globl \name -\name: +SYM_CODE_START_NOALIGN(\name) pushl %eax pushl %ecx pushl %edx @@ -27,6 +26,7 @@ popl %eax ret _ASM_NOKPROBE(\name) +SYM_CODE_END(\name) .endm #ifdef CONFIG_TRACE_IRQFLAGS diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S index ea5c4167086c..c5c3b6e86e62 100644 --- a/arch/x86/entry/thunk_64.S +++ b/arch/x86/entry/thunk_64.S @@ -12,7 +12,7 @@ /* rdi: arg1 ... normal C conventions. rax is saved/restored. */ .macro THUNK name, func, put_ret_addr_in_rdi=0 - ENTRY(\name) +SYM_FUNC_START_NOALIGN(\name) pushq %rbp movq %rsp, %rbp @@ -33,7 +33,7 @@ call \func jmp .L_restore - ENDPROC(\name) +SYM_FUNC_END(\name) _ASM_NOKPROBE(\name) .endm @@ -56,7 +56,7 @@ #if defined(CONFIG_TRACE_IRQFLAGS) \ || defined(CONFIG_DEBUG_LOCK_ALLOC) \ || defined(CONFIG_PREEMPTION) -.L_restore: +SYM_CODE_START_LOCAL_NOALIGN(.L_restore) popq %r11 popq %r10 popq %r9 @@ -69,4 +69,5 @@ popq %rbp ret _ASM_NOKPROBE(.L_restore) +SYM_CODE_END(.L_restore) #endif diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index 0f2154106d01..2b75e80f6b41 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -87,11 +87,9 @@ $(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS # # vDSO code runs in userspace and -pg doesn't help with profiling anyway. # -CFLAGS_REMOVE_vdso-note.o = -pg CFLAGS_REMOVE_vclock_gettime.o = -pg CFLAGS_REMOVE_vdso32/vclock_gettime.o = -pg CFLAGS_REMOVE_vgetcpu.o = -pg -CFLAGS_REMOVE_vvar.o = -pg # # X32 processes use x32 vDSO to access 64bit kernel data. diff --git a/arch/x86/entry/vdso/vdso32/system_call.S b/arch/x86/entry/vdso/vdso32/system_call.S index 263d7433dea8..de1fff7188aa 100644 --- a/arch/x86/entry/vdso/vdso32/system_call.S +++ b/arch/x86/entry/vdso/vdso32/system_call.S @@ -62,7 +62,7 @@ __kernel_vsyscall: /* Enter using int $0x80 */ int $0x80 -GLOBAL(int80_landing_pad) +SYM_INNER_LABEL(int80_landing_pad, SYM_L_GLOBAL) /* * Restore EDX and ECX in case they were clobbered. EBP is not diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h index 14caa9d9fb7f..365111789cc6 100644 --- a/arch/x86/include/asm/linkage.h +++ b/arch/x86/include/asm/linkage.h @@ -13,10 +13,6 @@ #ifdef __ASSEMBLY__ -#define GLOBAL(name) \ - .globl name; \ - name: - #if defined(CONFIG_X86_64) || defined(CONFIG_X86_ALIGNMENT_16) #define __ALIGN .p2align 4, 0x90 #define __ALIGN_STR __stringify(__ALIGN) diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 450b7236253e..94227da69da1 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -966,7 +966,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) extern unsigned long arch_align_stack(unsigned long sp); void free_init_pages(const char *what, unsigned long begin, unsigned long end); -extern void free_kernel_image_pages(void *begin, void *end); +extern void free_kernel_image_pages(const char *what, void *begin, void *end); void default_idle(void); #ifdef CONFIG_XEN diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h index 71b32f2570ab..036c360910c5 100644 --- a/arch/x86/include/asm/sections.h +++ b/arch/x86/include/asm/sections.h @@ -6,7 +6,6 @@ #include <asm/extable.h> extern char __brk_base[], __brk_limit[]; -extern struct exception_table_entry __stop___ex_table[]; extern char __end_rodata_aligned[]; #if defined(CONFIG_X86_64) diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S index e95e95960156..daf88f8143c5 100644 --- a/arch/x86/kernel/acpi/wakeup_32.S +++ b/arch/x86/kernel/acpi/wakeup_32.S @@ -9,8 +9,7 @@ .code32 ALIGN -ENTRY(wakeup_pmode_return) -wakeup_pmode_return: +SYM_CODE_START(wakeup_pmode_return) movw $__KERNEL_DS, %ax movw %ax, %ss movw %ax, %fs @@ -39,6 +38,7 @@ wakeup_pmode_return: # jump to place where we left off movl saved_eip, %eax jmp *%eax +SYM_CODE_END(wakeup_pmode_return) bogus_magic: jmp bogus_magic @@ -72,7 +72,7 @@ restore_registers: popfl ret -ENTRY(do_suspend_lowlevel) +SYM_CODE_START(do_suspend_lowlevel) call save_processor_state call save_registers pushl $3 @@ -87,10 +87,11 @@ ret_point: call restore_registers call restore_processor_state ret +SYM_CODE_END(do_suspend_lowlevel) .data ALIGN -ENTRY(saved_magic) .long 0 +SYM_DATA(saved_magic, .long 0) saved_eip: .long 0 # saved registers diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S index 7f9ade13bbcf..c8daa92f38dc 100644 --- a/arch/x86/kernel/acpi/wakeup_64.S +++ b/arch/x86/kernel/acpi/wakeup_64.S @@ -14,7 +14,7 @@ /* * Hooray, we are in Long 64-bit mode (but still running in low memory) */ -ENTRY(wakeup_long64) +SYM_FUNC_START(wakeup_long64) movq saved_magic, %rax movq $0x123456789abcdef0, %rdx cmpq %rdx, %rax @@ -40,9 +40,9 @@ ENTRY(wakeup_long64) movq saved_rip, %rax jmp *%rax -ENDPROC(wakeup_long64) +SYM_FUNC_END(wakeup_long64) -ENTRY(do_suspend_lowlevel) +SYM_FUNC_START(do_suspend_lowlevel) FRAME_BEGIN subq $8, %rsp xorl %eax, %eax @@ -125,7 +125,7 @@ ENTRY(do_suspend_lowlevel) addq $8, %rsp FRAME_END jmp restore_processor_state -ENDPROC(do_suspend_lowlevel) +SYM_FUNC_END(do_suspend_lowlevel) .data saved_rbp: .quad 0 @@ -136,4 +136,4 @@ saved_rbx: .quad 0 saved_rip: .quad 0 saved_rsp: .quad 0 -ENTRY(saved_magic) .quad 0 +SYM_DATA(saved_magic, .quad 0) diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S index 073aab525d80..e8a9f8370112 100644 --- a/arch/x86/kernel/ftrace_32.S +++ b/arch/x86/kernel/ftrace_32.S @@ -12,20 +12,18 @@ #include <asm/frame.h> #include <asm/asm-offsets.h> -# define function_hook __fentry__ -EXPORT_SYMBOL(__fentry__) - #ifdef CONFIG_FRAME_POINTER # define MCOUNT_FRAME 1 /* using frame = true */ #else # define MCOUNT_FRAME 0 /* using frame = false */ #endif -ENTRY(function_hook) +SYM_FUNC_START(__fentry__) ret -END(function_hook) +SYM_FUNC_END(__fentry__) +EXPORT_SYMBOL(__fentry__) -ENTRY(ftrace_caller) +SYM_CODE_START(ftrace_caller) #ifdef CONFIG_FRAME_POINTER /* @@ -85,11 +83,11 @@ ftrace_graph_call: #endif /* This is weak to keep gas from relaxing the jumps */ -WEAK(ftrace_stub) +SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK) ret -END(ftrace_caller) +SYM_CODE_END(ftrace_caller) -ENTRY(ftrace_regs_caller) +SYM_CODE_START(ftrace_regs_caller) /* * We're here from an mcount/fentry CALL, and the stack frame looks like: * @@ -138,7 +136,7 @@ ENTRY(ftrace_regs_caller) movl function_trace_op, %ecx # 3rd argument: ftrace_pos pushl %esp # 4th argument: pt_regs -GLOBAL(ftrace_regs_call) +SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL) call ftrace_stub addl $4, %esp # skip 4th argument @@ -163,9 +161,10 @@ GLOBAL(ftrace_regs_call) popl %eax jmp .Lftrace_ret +SYM_CODE_END(ftrace_regs_caller) #ifdef CONFIG_FUNCTION_GRAPH_TRACER -ENTRY(ftrace_graph_caller) +SYM_CODE_START(ftrace_graph_caller) pushl %eax pushl %ecx pushl %edx @@ -179,7 +178,7 @@ ENTRY(ftrace_graph_caller) popl %ecx popl %eax ret -END(ftrace_graph_caller) +SYM_CODE_END(ftrace_graph_caller) .globl return_to_handler return_to_handler: diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S index 809d54397dba..6e8961ca3605 100644 --- a/arch/x86/kernel/ftrace_64.S +++ b/arch/x86/kernel/ftrace_64.S @@ -14,9 +14,6 @@ .code64 .section .entry.text, "ax" -# define function_hook __fentry__ -EXPORT_SYMBOL(__fentry__) - #ifdef CONFIG_FRAME_POINTER /* Save parent and function stack frames (rip and rbp) */ # define MCOUNT_FRAME_SIZE (8+16*2) @@ -132,22 +129,23 @@ EXPORT_SYMBOL(__fentry__) #ifdef CONFIG_DYNAMIC_FTRACE -ENTRY(function_hook) +SYM_FUNC_START(__fentry__) retq -ENDPROC(function_hook) +SYM_FUNC_END(__fentry__) +EXPORT_SYMBOL(__fentry__) -ENTRY(ftrace_caller) +SYM_FUNC_START(ftrace_caller) /* save_mcount_regs fills in first two parameters */ save_mcount_regs -GLOBAL(ftrace_caller_op_ptr) +SYM_INNER_LABEL(ftrace_caller_op_ptr, SYM_L_GLOBAL) /* Load the ftrace_ops into the 3rd parameter */ movq function_trace_op(%rip), %rdx /* regs go into 4th parameter (but make it NULL) */ movq $0, %rcx -GLOBAL(ftrace_call) +SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) call ftrace_stub restore_mcount_regs @@ -157,10 +155,10 @@ GLOBAL(ftrace_call) * think twice before adding any new code or changing the * layout here. */ -GLOBAL(ftrace_epilogue) +SYM_INNER_LABEL(ftrace_epilogue, SYM_L_GLOBAL) #ifdef CONFIG_FUNCTION_GRAPH_TRACER -GLOBAL(ftrace_graph_call) +SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) jmp ftrace_stub #endif @@ -168,11 +166,11 @@ GLOBAL(ftrace_graph_call) * This is weak to keep gas from relaxing the jumps. * It is also used to copy the retq for trampolines. */ -WEAK(ftrace_stub) +SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK) retq -ENDPROC(ftrace_caller) +SYM_FUNC_END(ftrace_caller) -ENTRY(ftrace_regs_caller) +SYM_FUNC_START(ftrace_regs_caller) /* Save the current flags before any operations that can change them */ pushfq @@ -180,7 +178,7 @@ ENTRY(ftrace_regs_caller) save_mcount_regs 8 /* save_mcount_regs fills in first two parameters */ -GLOBAL(ftrace_regs_caller_op_ptr) +SYM_INNER_LABEL(ftrace_regs_caller_op_ptr, SYM_L_GLOBAL) /* Load the ftrace_ops into the 3rd parameter */ movq function_trace_op(%rip), %rdx @@ -209,7 +207,7 @@ GLOBAL(ftrace_regs_caller_op_ptr) /* regs go into 4th parameter */ leaq (%rsp), %rcx -GLOBAL(ftrace_regs_call) +SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL) call ftrace_stub /* Copy flags back to SS, to restore them */ @@ -239,16 +237,16 @@ GLOBAL(ftrace_regs_call) * The trampoline will add the code to jump * to the return. */ -GLOBAL(ftrace_regs_caller_end) +SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL) jmp ftrace_epilogue -ENDPROC(ftrace_regs_caller) +SYM_FUNC_END(ftrace_regs_caller) #else /* ! CONFIG_DYNAMIC_FTRACE */ -ENTRY(function_hook) +SYM_FUNC_START(__fentry__) cmpq $ftrace_stub, ftrace_trace_function jnz trace @@ -261,7 +259,7 @@ fgraph_trace: jnz ftrace_graph_caller #endif -GLOBAL(ftrace_stub) +SYM_INNER_LABEL(ftrace_stub, SYM_L_GLOBAL) retq trace: @@ -279,11 +277,12 @@ trace: restore_mcount_regs jmp fgraph_trace -ENDPROC(function_hook) +SYM_FUNC_END(__fentry__) +EXPORT_SYMBOL(__fentry__) #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER -ENTRY(ftrace_graph_caller) +SYM_FUNC_START(ftrace_graph_caller) /* Saves rbp into %rdx and fills first parameter */ save_mcount_regs @@ -294,9 +293,9 @@ ENTRY(ftrace_graph_caller) restore_mcount_regs retq -ENDPROC(ftrace_graph_caller) +SYM_FUNC_END(ftrace_graph_caller) -ENTRY(return_to_handler) +SYM_CODE_START(return_to_handler) UNWIND_HINT_EMPTY subq $24, %rsp @@ -312,5 +311,5 @@ ENTRY(return_to_handler) movq (%rsp), %rax addq $24, %rsp JMP_NOSPEC %rdi -END(return_to_handler) +SYM_CODE_END(return_to_handler) #endif diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 2e6a0676c1f4..3923ab4630d7 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -64,7 +64,7 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE) * can. */ __HEAD -ENTRY(startup_32) +SYM_CODE_START(startup_32) movl pa(initial_stack),%ecx /* test KEEP_SEGMENTS flag to see if the bootloader is asking @@ -156,7 +156,7 @@ ENTRY(startup_32) jmp *%eax .Lbad_subarch: -WEAK(xen_entry) +SYM_INNER_LABEL_ALIGN(xen_entry, SYM_L_WEAK) /* Unknown implementation; there's really nothing we can do at this point. */ ud2a @@ -172,6 +172,7 @@ num_subarch_entries = (. - subarch_entries) / 4 #else jmp .Ldefault_entry #endif /* CONFIG_PARAVIRT */ +SYM_CODE_END(startup_32) #ifdef CONFIG_HOTPLUG_CPU /* @@ -179,12 +180,12 @@ num_subarch_entries = (. - subarch_entries) / 4 * up already except stack. We just set up stack here. Then call * start_secondary(). */ -ENTRY(start_cpu0) +SYM_FUNC_START(start_cpu0) movl initial_stack, %ecx movl %ecx, %esp call *(initial_code) 1: jmp 1b -ENDPROC(start_cpu0) +SYM_FUNC_END(start_cpu0) #endif /* @@ -195,7 +196,7 @@ ENDPROC(start_cpu0) * If cpu hotplug is not supported then this code can go in init section * which will be freed later */ -ENTRY(startup_32_smp) +SYM_FUNC_START(startup_32_smp) cld movl $(__BOOT_DS),%eax movl %eax,%ds @@ -362,7 +363,7 @@ ENTRY(startup_32_smp) call *(initial_code) 1: jmp 1b -ENDPROC(startup_32_smp) +SYM_FUNC_END(startup_32_smp) #include "verify_cpu.S" @@ -392,7 +393,7 @@ setup_once: andl $0,setup_once_ref /* Once is enough, thanks */ ret -ENTRY(early_idt_handler_array) +SYM_FUNC_START(early_idt_handler_array) # 36(%esp) %eflags # 32(%esp) %cs # 28(%esp) %eip @@ -407,9 +408,9 @@ ENTRY(early_idt_handler_array) i = i + 1 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc .endr -ENDPROC(early_idt_handler_array) +SYM_FUNC_END(early_idt_handler_array) -early_idt_handler_common: +SYM_CODE_START_LOCAL(early_idt_handler_common) /* * The stack is the hardware frame, an error code or zero, and the * vector number. @@ -460,10 +461,10 @@ early_idt_handler_common: decl %ss:early_recursion_flag addl $4, %esp /* pop pt_regs->orig_ax */ iret -ENDPROC(early_idt_handler_common) +SYM_CODE_END(early_idt_handler_common) /* This is the default interrupt "handler" :-) */ -ENTRY(early_ignore_irq) +SYM_FUNC_START(early_ignore_irq) cld #ifdef CONFIG_PRINTK pushl %eax @@ -498,19 +499,16 @@ ENTRY(early_ignore_irq) hlt_loop: hlt jmp hlt_loop -ENDPROC(early_ignore_irq) +SYM_FUNC_END(early_ignore_irq) __INITDATA .align 4 -GLOBAL(early_recursion_flag) - .long 0 +SYM_DATA(early_recursion_flag, .long 0) __REFDATA .align 4 -ENTRY(initial_code) - .long i386_start_kernel -ENTRY(setup_once_ref) - .long setup_once +SYM_DATA(initial_code, .long i386_start_kernel) +SYM_DATA(setup_once_ref, .long setup_once) #ifdef CONFIG_PAGE_TABLE_ISOLATION #define PGD_ALIGN (2 * PAGE_SIZE) @@ -553,7 +551,7 @@ EXPORT_SYMBOL(empty_zero_page) __PAGE_ALIGNED_DATA /* Page-aligned for the benefit of paravirt? */ .align PGD_ALIGN -ENTRY(initial_page_table) +SYM_DATA_START(initial_page_table) .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ # if KPMDS == 3 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 @@ -581,17 +579,18 @@ ENTRY(initial_page_table) .fill 1024, 4, 0 #endif +SYM_DATA_END(initial_page_table) #endif .data .balign 4 -ENTRY(initial_stack) - /* - * The SIZEOF_PTREGS gap is a convention which helps the in-kernel - * unwinder reliably detect the end of the stack. - */ - .long init_thread_union + THREAD_SIZE - SIZEOF_PTREGS - \ - TOP_OF_KERNEL_STACK_PADDING; +/* + * The SIZEOF_PTREGS gap is a convention which helps the in-kernel unwinder + * reliably detect the end of the stack. + */ +SYM_DATA(initial_stack, + .long init_thread_union + THREAD_SIZE - + SIZEOF_PTREGS - TOP_OF_KERNEL_STACK_PADDING) __INITRODATA int_msg: @@ -607,27 +606,28 @@ int_msg: */ .data -.globl boot_gdt_descr - ALIGN # early boot GDT descriptor (must use 1:1 address mapping) .word 0 # 32 bit align gdt_desc.address -boot_gdt_descr: +SYM_DATA_START_LOCAL(boot_gdt_descr) .word __BOOT_DS+7 .long boot_gdt - __PAGE_OFFSET +SYM_DATA_END(boot_gdt_descr) # boot GDT descriptor (later on used by CPU#0): .word 0 # 32 bit align gdt_desc.address -ENTRY(early_gdt_descr) +SYM_DATA_START(early_gdt_descr) .word GDT_ENTRIES*8-1 .long gdt_page /* Overwritten for secondary CPUs */ +SYM_DATA_END(early_gdt_descr) /* * The boot_gdt must mirror the equivalent in setup.S and is * used only for booting. */ .align L1_CACHE_BYTES -ENTRY(boot_gdt) +SYM_DATA_START(boot_gdt) .fill GDT_ENTRY_BOOT_CS,8,0 .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */ .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */ +SYM_DATA_END(boot_gdt) diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index f3d3e9646a99..4bbc770af632 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -49,8 +49,7 @@ L3_START_KERNEL = pud_index(__START_KERNEL_map) .text __HEAD .code64 - .globl startup_64 -startup_64: +SYM_CODE_START_NOALIGN(startup_64) UNWIND_HINT_EMPTY /* * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, @@ -90,7 +89,9 @@ startup_64: /* Form the CR3 value being sure to include the CR3 modifier */ addq $(early_top_pgt - __START_KERNEL_map), %rax jmp 1f -ENTRY(secondary_startup_64) +SYM_CODE_END(startup_64) + +SYM_CODE_START(secondary_startup_64) UNWIND_HINT_EMPTY /* * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, @@ -240,7 +241,7 @@ ENTRY(secondary_startup_64) pushq %rax # target address in negative space lretq .Lafter_lret: -END(secondary_startup_64) +SYM_CODE_END(secondary_startup_64) #include "verify_cpu.S" @@ -250,30 +251,28 @@ END(secondary_startup_64) * up already except stack. We just set up stack here. Then call * start_secondary() via .Ljump_to_C_code. */ -ENTRY(start_cpu0) +SYM_CODE_START(start_cpu0) UNWIND_HINT_EMPTY movq initial_stack(%rip), %rsp jmp .Ljump_to_C_code -END(start_cpu0) +SYM_CODE_END(start_cpu0) #endif /* Both SMP bootup and ACPI suspend change these variables */ __REFDATA .balign 8 - GLOBAL(initial_code) - .quad x86_64_start_kernel - GLOBAL(initial_gs) - .quad INIT_PER_CPU_VAR(fixed_percpu_data) - GLOBAL(initial_stack) - /* - * The SIZEOF_PTREGS gap is a convention which helps the in-kernel - * unwinder reliably detect the end of the stack. - */ - .quad init_thread_union + THREAD_SIZE - SIZEOF_PTREGS +SYM_DATA(initial_code, .quad x86_64_start_kernel) +SYM_DATA(initial_gs, .quad INIT_PER_CPU_VAR(fixed_percpu_data)) + +/* + * The SIZEOF_PTREGS gap is a convention which helps the in-kernel unwinder + * reliably detect the end of the stack. + */ +SYM_DATA(initial_stack, .quad init_thread_union + THREAD_SIZE - SIZEOF_PTREGS) __FINITDATA __INIT -ENTRY(early_idt_handler_array) +SYM_CODE_START(early_idt_handler_array) i = 0 .rept NUM_EXCEPTION_VECTORS .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0 @@ -289,9 +288,9 @@ ENTRY(early_idt_handler_array) .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc .endr UNWIND_HINT_IRET_REGS offset=16 -END(early_idt_handler_array) +SYM_CODE_END(early_idt_handler_array) -early_idt_handler_common: +SYM_CODE_START_LOCAL(early_idt_handler_common) /* * The stack is the hardware frame, an error code or zero, and the * vector number. @@ -333,17 +332,11 @@ early_idt_handler_common: 20: decl early_recursion_flag(%rip) jmp restore_regs_and_return_to_kernel -END(early_idt_handler_common) +SYM_CODE_END(early_idt_handler_common) - __INITDATA - .balign 4 -GLOBAL(early_recursion_flag) - .long 0 - -#define NEXT_PAGE(name) \ - .balign PAGE_SIZE; \ -GLOBAL(name) +#define SYM_DATA_START_PAGE_ALIGNED(name) \ + SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE) #ifdef CONFIG_PAGE_TABLE_ISOLATION /* @@ -358,11 +351,11 @@ GLOBAL(name) */ #define PTI_USER_PGD_FILL 512 /* This ensures they are 8k-aligned: */ -#define NEXT_PGD_PAGE(name) \ - .balign 2 * PAGE_SIZE; \ -GLOBAL(name) +#define SYM_DATA_START_PTI_ALIGNED(name) \ + SYM_START(name, SYM_L_GLOBAL, .balign 2 * PAGE_SIZE) #else -#define NEXT_PGD_PAGE(name) NEXT_PAGE(name) +#define SYM_DATA_START_PTI_ALIGNED(name) \ + SYM_DATA_START_PAGE_ALIGNED(name) #define PTI_USER_PGD_FILL 0 #endif @@ -375,17 +368,23 @@ GLOBAL(name) .endr __INITDATA -NEXT_PGD_PAGE(early_top_pgt) + .balign 4 + +SYM_DATA_START_PTI_ALIGNED(early_top_pgt) .fill 512,8,0 .fill PTI_USER_PGD_FILL,8,0 +SYM_DATA_END(early_top_pgt) -NEXT_PAGE(early_dynamic_pgts) +SYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts) .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 +SYM_DATA_END(early_dynamic_pgts) + +SYM_DATA(early_recursion_flag, .long 0) .data #if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH) -NEXT_PGD_PAGE(init_top_pgt) +SYM_DATA_START_PTI_ALIGNED(init_top_pgt) .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC .org init_top_pgt + L4_PAGE_OFFSET*8, 0 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC @@ -393,11 +392,13 @@ NEXT_PGD_PAGE(init_top_pgt) /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC .fill PTI_USER_PGD_FILL,8,0 +SYM_DATA_END(init_top_pgt) -NEXT_PAGE(level3_ident_pgt) +SYM_DATA_START_PAGE_ALIGNED(level3_ident_pgt) .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC .fill 511, 8, 0 -NEXT_PAGE(level2_ident_pgt) +SYM_DATA_END(level3_ident_pgt) +SYM_DATA_START_PAGE_ALIGNED(level2_ident_pgt) /* * Since I easily can, map the first 1G. * Don't set NX because code runs from these pages. @@ -407,25 +408,29 @@ NEXT_PAGE(level2_ident_pgt) * the CPU should ignore the bit. */ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) +SYM_DATA_END(level2_ident_pgt) #else -NEXT_PGD_PAGE(init_top_pgt) +SYM_DATA_START_PTI_ALIGNED(init_top_pgt) .fill 512,8,0 .fill PTI_USER_PGD_FILL,8,0 +SYM_DATA_END(init_top_pgt) #endif #ifdef CONFIG_X86_5LEVEL -NEXT_PAGE(level4_kernel_pgt) +SYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt) .fill 511,8,0 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC +SYM_DATA_END(level4_kernel_pgt) #endif -NEXT_PAGE(level3_kernel_pgt) +SYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt) .fill L3_START_KERNEL,8,0 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC +SYM_DATA_END(level3_kernel_pgt) -NEXT_PAGE(level2_kernel_pgt) +SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt) /* * 512 MB kernel mapping. We spend a full page on this pagetable * anyway. @@ -442,8 +447,9 @@ NEXT_PAGE(level2_kernel_pgt) */ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE) +SYM_DATA_END(level2_kernel_pgt) -NEXT_PAGE(level2_fixmap_pgt) +SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt) .fill (512 - 4 - FIXMAP_PMD_NUM),8,0 pgtno = 0 .rept (FIXMAP_PMD_NUM) @@ -453,31 +459,32 @@ NEXT_PAGE(level2_fixmap_pgt) .endr /* 6 MB reserved space + a 2MB hole */ .fill 4,8,0 +SYM_DATA_END(level2_fixmap_pgt) -NEXT_PAGE(level1_fixmap_pgt) +SYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt) .rept (FIXMAP_PMD_NUM) .fill 512,8,0 .endr +SYM_DATA_END(level1_fixmap_pgt) #undef PMDS .data .align 16 - .globl early_gdt_descr -early_gdt_descr: - .word GDT_ENTRIES*8-1 -early_gdt_descr_base: - .quad INIT_PER_CPU_VAR(gdt_page) - -ENTRY(phys_base) - /* This must match the first entry in level2_kernel_pgt */ - .quad 0x0000000000000000 + +SYM_DATA(early_gdt_descr, .word GDT_ENTRIES*8-1) +SYM_DATA_LOCAL(early_gdt_descr_base, .quad INIT_PER_CPU_VAR(gdt_page)) + + .align 16 +/* This must match the first entry in level2_kernel_pgt */ +SYM_DATA(phys_base, .quad 0x0) EXPORT_SYMBOL(phys_base) #include "../../x86/xen/xen-head.S" __PAGE_ALIGNED_BSS -NEXT_PAGE(empty_zero_page) +SYM_DATA_START_PAGE_ALIGNED(empty_zero_page) .skip PAGE_SIZE +SYM_DATA_END(empty_zero_page) EXPORT_SYMBOL(empty_zero_page) diff --git a/arch/x86/kernel/irqflags.S b/arch/x86/kernel/irqflags.S index ddeeaac8adda..0db0375235b4 100644 --- a/arch/x86/kernel/irqflags.S +++ b/arch/x86/kernel/irqflags.S @@ -7,20 +7,20 @@ /* * unsigned long native_save_fl(void) */ -ENTRY(native_save_fl) +SYM_FUNC_START(native_save_fl) pushf pop %_ASM_AX ret -ENDPROC(native_save_fl) +SYM_FUNC_END(native_save_fl) EXPORT_SYMBOL(native_save_fl) /* * void native_restore_fl(unsigned long flags) * %eax/%rdi: flags */ -ENTRY(native_restore_fl) +SYM_FUNC_START(native_restore_fl) push %_ASM_ARG1 popf ret -ENDPROC(native_restore_fl) +SYM_FUNC_END(native_restore_fl) EXPORT_SYMBOL(native_restore_fl) diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S index ee26df08002e..94b33885f8d2 100644 --- a/arch/x86/kernel/relocate_kernel_32.S +++ b/arch/x86/kernel/relocate_kernel_32.S @@ -35,8 +35,7 @@ #define CP_PA_BACKUP_PAGES_MAP DATA(0x1c) .text - .globl relocate_kernel -relocate_kernel: +SYM_CODE_START_NOALIGN(relocate_kernel) /* Save the CPU context, used for jumping back */ pushl %ebx @@ -93,8 +92,9 @@ relocate_kernel: addl $(identity_mapped - relocate_kernel), %eax pushl %eax ret +SYM_CODE_END(relocate_kernel) -identity_mapped: +SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) /* set return address to 0 if not preserving context */ pushl $0 /* store the start address on the stack */ @@ -191,8 +191,9 @@ identity_mapped: addl $(virtual_mapped - relocate_kernel), %eax pushl %eax ret +SYM_CODE_END(identity_mapped) -virtual_mapped: +SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped) movl CR4(%edi), %eax movl %eax, %cr4 movl CR3(%edi), %eax @@ -208,9 +209,10 @@ virtual_mapped: popl %esi popl %ebx ret +SYM_CODE_END(virtual_mapped) /* Do the copies */ -swap_pages: +SYM_CODE_START_LOCAL_NOALIGN(swap_pages) movl 8(%esp), %edx movl 4(%esp), %ecx pushl %ebp @@ -270,6 +272,7 @@ swap_pages: popl %ebx popl %ebp ret +SYM_CODE_END(swap_pages) .globl kexec_control_code_size .set kexec_control_code_size, . - relocate_kernel diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index c51ccff5cd01..ef3ba99068d3 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S @@ -38,8 +38,7 @@ .text .align PAGE_SIZE .code64 - .globl relocate_kernel -relocate_kernel: +SYM_CODE_START_NOALIGN(relocate_kernel) /* * %rdi indirection_page * %rsi page_list @@ -103,8 +102,9 @@ relocate_kernel: addq $(identity_mapped - relocate_kernel), %r8 pushq %r8 ret +SYM_CODE_END(relocate_kernel) -identity_mapped: +SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) /* set return address to 0 if not preserving context */ pushq $0 /* store the start address on the stack */ @@ -209,8 +209,9 @@ identity_mapped: movq $virtual_mapped, %rax pushq %rax ret +SYM_CODE_END(identity_mapped) -virtual_mapped: +SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped) movq RSP(%r8), %rsp movq CR4(%r8), %rax movq %rax, %cr4 @@ -228,9 +229,10 @@ virtual_mapped: popq %rbp popq %rbx ret +SYM_CODE_END(virtual_mapped) /* Do the copies */ -swap_pages: +SYM_CODE_START_LOCAL_NOALIGN(swap_pages) movq %rdi, %rcx /* Put the page_list in %rcx */ xorl %edi, %edi xorl %esi, %esi @@ -283,6 +285,7 @@ swap_pages: jmp 0b 3: ret +SYM_CODE_END(swap_pages) .globl kexec_control_code_size .set kexec_control_code_size, . - relocate_kernel diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 7f4dae49cc49..d398afd206b8 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -143,6 +143,13 @@ struct boot_params boot_params; /* * Machine setup.. */ +static struct resource rodata_resource = { + .name = "Kernel rodata", + .start = 0, + .end = 0, + .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM +}; + static struct resource data_resource = { .name = "Kernel data", .start = 0, @@ -957,7 +964,9 @@ void __init setup_arch(char **cmdline_p) code_resource.start = __pa_symbol(_text); code_resource.end = __pa_symbol(_etext)-1; - data_resource.start = __pa_symbol(_etext); + rodata_resource.start = __pa_symbol(__start_rodata); + rodata_resource.end = __pa_symbol(__end_rodata)-1; + data_resource.start = __pa_symbol(_sdata); data_resource.end = __pa_symbol(_edata)-1; bss_resource.start = __pa_symbol(__bss_start); bss_resource.end = __pa_symbol(__bss_stop)-1; @@ -1046,6 +1055,7 @@ void __init setup_arch(char **cmdline_p) /* after parse_early_param, so could debug it */ insert_resource(&iomem_resource, &code_resource); + insert_resource(&iomem_resource, &rodata_resource); insert_resource(&iomem_resource, &data_resource); insert_resource(&iomem_resource, &bss_resource); diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S index a024c4f7ba56..641f0fe1e5b4 100644 --- a/arch/x86/kernel/verify_cpu.S +++ b/arch/x86/kernel/verify_cpu.S @@ -31,7 +31,7 @@ #include <asm/cpufeatures.h> #include <asm/msr-index.h> -ENTRY(verify_cpu) +SYM_FUNC_START_LOCAL(verify_cpu) pushf # Save caller passed flags push $0 # Kill any dangerous flags popf @@ -137,4 +137,4 @@ ENTRY(verify_cpu) popf # Restore caller passed flags xorl %eax, %eax ret -ENDPROC(verify_cpu) +SYM_FUNC_END(verify_cpu) diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index e2feacf921a0..3a1a819da137 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -21,6 +21,9 @@ #define LOAD_OFFSET __START_KERNEL_map #endif +#define EMITS_PT_NOTE +#define RO_EXCEPTION_TABLE_ALIGN 16 + #include <asm-generic/vmlinux.lds.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> @@ -141,17 +144,12 @@ SECTIONS *(.text.__x86.indirect_thunk) __indirect_thunk_end = .; #endif + } :text =0xcccc - /* End of text section */ - _etext = .; - } :text = 0x9090 - - NOTES :text :note - - EXCEPTION_TABLE(16) :text = 0x9090 - - /* .text should occupy whole number of pages */ + /* End of text section, which should occupy whole number of pages */ + _etext = .; . = ALIGN(PAGE_SIZE); + X86_ALIGN_RODATA_BEGIN RO_DATA(PAGE_SIZE) X86_ALIGN_RODATA_END diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S index 751a384c2eb0..81ada2ce99e7 100644 --- a/arch/x86/kvm/vmx/vmenter.S +++ b/arch/x86/kvm/vmx/vmenter.S @@ -43,7 +43,7 @@ * they VM-Fail, whereas a successful VM-Enter + VM-Exit will jump * to vmx_vmexit. */ -ENTRY(vmx_vmenter) +SYM_FUNC_START(vmx_vmenter) /* EFLAGS.ZF is set if VMCS.LAUNCHED == 0 */ je 2f @@ -65,7 +65,7 @@ ENTRY(vmx_vmenter) _ASM_EXTABLE(1b, 5b) _ASM_EXTABLE(2b, 5b) -ENDPROC(vmx_vmenter) +SYM_FUNC_END(vmx_vmenter) /** * vmx_vmexit - Handle a VMX VM-Exit @@ -77,7 +77,7 @@ ENDPROC(vmx_vmenter) * here after hardware loads the host's state, i.e. this is the destination * referred to by VMCS.HOST_RIP. */ -ENTRY(vmx_vmexit) +SYM_FUNC_START(vmx_vmexit) #ifdef CONFIG_RETPOLINE ALTERNATIVE "jmp .Lvmexit_skip_rsb", "", X86_FEATURE_RETPOLINE /* Preserve guest's RAX, it's used to stuff the RSB. */ @@ -90,7 +90,7 @@ ENTRY(vmx_vmexit) .Lvmexit_skip_rsb: #endif ret -ENDPROC(vmx_vmexit) +SYM_FUNC_END(vmx_vmexit) /** * __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode @@ -101,7 +101,7 @@ ENDPROC(vmx_vmexit) * Returns: * 0 on VM-Exit, 1 on VM-Fail */ -ENTRY(__vmx_vcpu_run) +SYM_FUNC_START(__vmx_vcpu_run) push %_ASM_BP mov %_ASM_SP, %_ASM_BP #ifdef CONFIG_X86_64 @@ -233,4 +233,4 @@ ENTRY(__vmx_vcpu_run) /* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */ 2: mov $1, %eax jmp 1b -ENDPROC(__vmx_vcpu_run) +SYM_FUNC_END(__vmx_vcpu_run) diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S index e0788bade5ab..3b6544111ac9 100644 --- a/arch/x86/lib/atomic64_386_32.S +++ b/arch/x86/lib/atomic64_386_32.S @@ -20,10 +20,10 @@ #define BEGIN(op) \ .macro endp; \ -ENDPROC(atomic64_##op##_386); \ +SYM_FUNC_END(atomic64_##op##_386); \ .purgem endp; \ .endm; \ -ENTRY(atomic64_##op##_386); \ +SYM_FUNC_START(atomic64_##op##_386); \ LOCK v; #define ENDP endp diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S index 843d978ee341..1c5c81c16b06 100644 --- a/arch/x86/lib/atomic64_cx8_32.S +++ b/arch/x86/lib/atomic64_cx8_32.S @@ -16,12 +16,12 @@ cmpxchg8b (\reg) .endm -ENTRY(atomic64_read_cx8) +SYM_FUNC_START(atomic64_read_cx8) read64 %ecx ret -ENDPROC(atomic64_read_cx8) +SYM_FUNC_END(atomic64_read_cx8) -ENTRY(atomic64_set_cx8) +SYM_FUNC_START(atomic64_set_cx8) 1: /* we don't need LOCK_PREFIX since aligned 64-bit writes * are atomic on 586 and newer */ @@ -29,19 +29,19 @@ ENTRY(atomic64_set_cx8) jne 1b ret -ENDPROC(atomic64_set_cx8) +SYM_FUNC_END(atomic64_set_cx8) -ENTRY(atomic64_xchg_cx8) +SYM_FUNC_START(atomic64_xchg_cx8) 1: LOCK_PREFIX cmpxchg8b (%esi) jne 1b ret -ENDPROC(atomic64_xchg_cx8) +SYM_FUNC_END(atomic64_xchg_cx8) .macro addsub_return func ins insc -ENTRY(atomic64_\func\()_return_cx8) +SYM_FUNC_START(atomic64_\func\()_return_cx8) pushl %ebp pushl %ebx pushl %esi @@ -69,14 +69,14 @@ ENTRY(atomic64_\func\()_return_cx8) popl %ebx popl %ebp ret -ENDPROC(atomic64_\func\()_return_cx8) +SYM_FUNC_END(atomic64_\func\()_return_cx8) .endm addsub_return add add adc addsub_return sub sub sbb .macro incdec_return func ins insc -ENTRY(atomic64_\func\()_return_cx8) +SYM_FUNC_START(atomic64_\func\()_return_cx8) pushl %ebx read64 %esi @@ -94,13 +94,13 @@ ENTRY(atomic64_\func\()_return_cx8) movl %ecx, %edx popl %ebx ret -ENDPROC(atomic64_\func\()_return_cx8) +SYM_FUNC_END(atomic64_\func\()_return_cx8) .endm incdec_return inc add adc incdec_return dec sub sbb -ENTRY(atomic64_dec_if_positive_cx8) +SYM_FUNC_START(atomic64_dec_if_positive_cx8) pushl %ebx read64 %esi @@ -119,9 +119,9 @@ ENTRY(atomic64_dec_if_positive_cx8) movl %ecx, %edx popl %ebx ret -ENDPROC(atomic64_dec_if_positive_cx8) +SYM_FUNC_END(atomic64_dec_if_positive_cx8) -ENTRY(atomic64_add_unless_cx8) +SYM_FUNC_START(atomic64_add_unless_cx8) pushl %ebp pushl %ebx /* these just push these two parameters on the stack */ @@ -155,9 +155,9 @@ ENTRY(atomic64_add_unless_cx8) jne 2b xorl %eax, %eax jmp 3b -ENDPROC(atomic64_add_unless_cx8) +SYM_FUNC_END(atomic64_add_unless_cx8) -ENTRY(atomic64_inc_not_zero_cx8) +SYM_FUNC_START(atomic64_inc_not_zero_cx8) pushl %ebx read64 %esi @@ -177,4 +177,4 @@ ENTRY(atomic64_inc_not_zero_cx8) 3: popl %ebx ret -ENDPROC(atomic64_inc_not_zero_cx8) +SYM_FUNC_END(atomic64_inc_not_zero_cx8) diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S index 4df90c9ea383..4742e8fa7ee7 100644 --- a/arch/x86/lib/checksum_32.S +++ b/arch/x86/lib/checksum_32.S @@ -46,7 +46,7 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) * Fortunately, it is easy to convert 2-byte alignment to 4-byte * alignment for the unrolled loop. */ -ENTRY(csum_partial) +SYM_FUNC_START(csum_partial) pushl %esi pushl %ebx movl 20(%esp),%eax # Function arg: unsigned int sum @@ -128,13 +128,13 @@ ENTRY(csum_partial) popl %ebx popl %esi ret -ENDPROC(csum_partial) +SYM_FUNC_END(csum_partial) #else /* Version for PentiumII/PPro */ -ENTRY(csum_partial) +SYM_FUNC_START(csum_partial) pushl %esi pushl %ebx movl 20(%esp),%eax # Function arg: unsigned int sum @@ -246,7 +246,7 @@ ENTRY(csum_partial) popl %ebx popl %esi ret -ENDPROC(csum_partial) +SYM_FUNC_END(csum_partial) #endif EXPORT_SYMBOL(csum_partial) @@ -280,7 +280,7 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, #define ARGBASE 16 #define FP 12 -ENTRY(csum_partial_copy_generic) +SYM_FUNC_START(csum_partial_copy_generic) subl $4,%esp pushl %edi pushl %esi @@ -398,7 +398,7 @@ DST( movb %cl, (%edi) ) popl %edi popl %ecx # equivalent to addl $4,%esp ret -ENDPROC(csum_partial_copy_generic) +SYM_FUNC_END(csum_partial_copy_generic) #else @@ -416,7 +416,7 @@ ENDPROC(csum_partial_copy_generic) #define ARGBASE 12 -ENTRY(csum_partial_copy_generic) +SYM_FUNC_START(csum_partial_copy_generic) pushl %ebx pushl %edi pushl %esi @@ -483,7 +483,7 @@ DST( movb %dl, (%edi) ) popl %edi popl %ebx ret -ENDPROC(csum_partial_copy_generic) +SYM_FUNC_END(csum_partial_copy_generic) #undef ROUND #undef ROUND1 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S index 75a5a4515fa7..c4c7dd115953 100644 --- a/arch/x86/lib/clear_page_64.S +++ b/arch/x86/lib/clear_page_64.S @@ -13,15 +13,15 @@ * Zero a page. * %rdi - page */ -ENTRY(clear_page_rep) +SYM_FUNC_START(clear_page_rep) movl $4096/8,%ecx xorl %eax,%eax rep stosq ret -ENDPROC(clear_page_rep) +SYM_FUNC_END(clear_page_rep) EXPORT_SYMBOL_GPL(clear_page_rep) -ENTRY(clear_page_orig) +SYM_FUNC_START(clear_page_orig) xorl %eax,%eax movl $4096/64,%ecx .p2align 4 @@ -40,13 +40,13 @@ ENTRY(clear_page_orig) jnz .Lloop nop ret -ENDPROC(clear_page_orig) +SYM_FUNC_END(clear_page_orig) EXPORT_SYMBOL_GPL(clear_page_orig) -ENTRY(clear_page_erms) +SYM_FUNC_START(clear_page_erms) movl $4096,%ecx xorl %eax,%eax rep stosb ret -ENDPROC(clear_page_erms) +SYM_FUNC_END(clear_page_erms) EXPORT_SYMBOL_GPL(clear_page_erms) diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S index d63185698a23..3542502faa3b 100644 --- a/arch/x86/lib/cmpxchg16b_emu.S +++ b/arch/x86/lib/cmpxchg16b_emu.S @@ -13,7 +13,7 @@ * %rcx : high 64 bits of new value * %al : Operation successful */ -ENTRY(this_cpu_cmpxchg16b_emu) +SYM_FUNC_START(this_cpu_cmpxchg16b_emu) # # Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not @@ -44,4 +44,4 @@ ENTRY(this_cpu_cmpxchg16b_emu) xor %al,%al ret -ENDPROC(this_cpu_cmpxchg16b_emu) +SYM_FUNC_END(this_cpu_cmpxchg16b_emu) diff --git a/arch/x86/lib/cmpxchg8b_emu.S b/arch/x86/lib/cmpxchg8b_emu.S index 691d80e97488..ca01ed6029f4 100644 --- a/arch/x86/lib/cmpxchg8b_emu.S +++ b/arch/x86/lib/cmpxchg8b_emu.S @@ -13,7 +13,7 @@ * %ebx : low 32 bits of new value * %ecx : high 32 bits of new value */ -ENTRY(cmpxchg8b_emu) +SYM_FUNC_START(cmpxchg8b_emu) # # Emulate 'cmpxchg8b (%esi)' on UP except we don't @@ -42,5 +42,5 @@ ENTRY(cmpxchg8b_emu) popfl ret -ENDPROC(cmpxchg8b_emu) +SYM_FUNC_END(cmpxchg8b_emu) EXPORT_SYMBOL(cmpxchg8b_emu) diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S index fd2d09afa097..2402d4c489d2 100644 --- a/arch/x86/lib/copy_page_64.S +++ b/arch/x86/lib/copy_page_64.S @@ -13,15 +13,15 @@ * prefetch distance based on SMP/UP. */ ALIGN -ENTRY(copy_page) +SYM_FUNC_START(copy_page) ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD movl $4096/8, %ecx rep movsq ret -ENDPROC(copy_page) +SYM_FUNC_END(copy_page) EXPORT_SYMBOL(copy_page) -ENTRY(copy_page_regs) +SYM_FUNC_START_LOCAL(copy_page_regs) subq $2*8, %rsp movq %rbx, (%rsp) movq %r12, 1*8(%rsp) @@ -86,4 +86,4 @@ ENTRY(copy_page_regs) movq 1*8(%rsp), %r12 addq $2*8, %rsp ret -ENDPROC(copy_page_regs) +SYM_FUNC_END(copy_page_regs) diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 86976b55ae74..816f128a6d52 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -53,7 +53,7 @@ * Output: * eax uncopied bytes or 0 if successful. */ -ENTRY(copy_user_generic_unrolled) +SYM_FUNC_START(copy_user_generic_unrolled) ASM_STAC cmpl $8,%edx jb 20f /* less then 8 bytes, go to byte copy loop */ @@ -136,7 +136,7 @@ ENTRY(copy_user_generic_unrolled) _ASM_EXTABLE_UA(19b, 40b) _ASM_EXTABLE_UA(21b, 50b) _ASM_EXTABLE_UA(22b, 50b) -ENDPROC(copy_user_generic_unrolled) +SYM_FUNC_END(copy_user_generic_unrolled) EXPORT_SYMBOL(copy_user_generic_unrolled) /* Some CPUs run faster using the string copy instructions. @@ -157,7 +157,7 @@ EXPORT_SYMBOL(copy_user_generic_unrolled) * Output: * eax uncopied bytes or 0 if successful. */ -ENTRY(copy_user_generic_string) +SYM_FUNC_START(copy_user_generic_string) ASM_STAC cmpl $8,%edx jb 2f /* less than 8 bytes, go to byte copy loop */ @@ -182,7 +182,7 @@ ENTRY(copy_user_generic_string) _ASM_EXTABLE_UA(1b, 11b) _ASM_EXTABLE_UA(3b, 12b) -ENDPROC(copy_user_generic_string) +SYM_FUNC_END(copy_user_generic_string) EXPORT_SYMBOL(copy_user_generic_string) /* @@ -197,7 +197,7 @@ EXPORT_SYMBOL(copy_user_generic_string) * Output: * eax uncopied bytes or 0 if successful. */ -ENTRY(copy_user_enhanced_fast_string) +SYM_FUNC_START(copy_user_enhanced_fast_string) ASM_STAC cmpl $64,%edx jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */ @@ -214,7 +214,7 @@ ENTRY(copy_user_enhanced_fast_string) .previous _ASM_EXTABLE_UA(1b, 12b) -ENDPROC(copy_user_enhanced_fast_string) +SYM_FUNC_END(copy_user_enhanced_fast_string) EXPORT_SYMBOL(copy_user_enhanced_fast_string) /* @@ -230,8 +230,7 @@ EXPORT_SYMBOL(copy_user_enhanced_fast_string) * Output: * eax uncopied bytes or 0 if successful. */ -ALIGN; -.Lcopy_user_handle_tail: +SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail) movl %edx,%ecx 1: rep movsb 2: mov %ecx,%eax @@ -239,7 +238,7 @@ ALIGN; ret _ASM_EXTABLE_UA(1b, 2b) -END(.Lcopy_user_handle_tail) +SYM_CODE_END(.Lcopy_user_handle_tail) /* * copy_user_nocache - Uncached memory copy with exception handling @@ -250,7 +249,7 @@ END(.Lcopy_user_handle_tail) * - Require 8-byte alignment when size is 8 bytes or larger. * - Require 4-byte alignment when size is 4 bytes. */ -ENTRY(__copy_user_nocache) +SYM_FUNC_START(__copy_user_nocache) ASM_STAC /* If size is less than 8 bytes, go to 4-byte copy */ @@ -389,5 +388,5 @@ ENTRY(__copy_user_nocache) _ASM_EXTABLE_UA(31b, .L_fixup_4b_copy) _ASM_EXTABLE_UA(40b, .L_fixup_1b_copy) _ASM_EXTABLE_UA(41b, .L_fixup_1b_copy) -ENDPROC(__copy_user_nocache) +SYM_FUNC_END(__copy_user_nocache) EXPORT_SYMBOL(__copy_user_nocache) diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S index a4a379e79259..3394a8ff7fd0 100644 --- a/arch/x86/lib/csum-copy_64.S +++ b/arch/x86/lib/csum-copy_64.S @@ -49,7 +49,7 @@ .endm -ENTRY(csum_partial_copy_generic) +SYM_FUNC_START(csum_partial_copy_generic) cmpl $3*64, %edx jle .Lignore @@ -225,4 +225,4 @@ ENTRY(csum_partial_copy_generic) jz .Lende movl $-EFAULT, (%rax) jmp .Lende -ENDPROC(csum_partial_copy_generic) +SYM_FUNC_END(csum_partial_copy_generic) diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index 9578eb88fc87..c8a85b512796 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S @@ -36,7 +36,7 @@ #include <asm/export.h> .text -ENTRY(__get_user_1) +SYM_FUNC_START(__get_user_1) mov PER_CPU_VAR(current_task), %_ASM_DX cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX jae bad_get_user @@ -47,10 +47,10 @@ ENTRY(__get_user_1) xor %eax,%eax ASM_CLAC ret -ENDPROC(__get_user_1) +SYM_FUNC_END(__get_user_1) EXPORT_SYMBOL(__get_user_1) -ENTRY(__get_user_2) +SYM_FUNC_START(__get_user_2) add $1,%_ASM_AX jc bad_get_user mov PER_CPU_VAR(current_task), %_ASM_DX @@ -63,10 +63,10 @@ ENTRY(__get_user_2) xor %eax,%eax ASM_CLAC ret -ENDPROC(__get_user_2) +SYM_FUNC_END(__get_user_2) EXPORT_SYMBOL(__get_user_2) -ENTRY(__get_user_4) +SYM_FUNC_START(__get_user_4) add $3,%_ASM_AX jc bad_get_user mov PER_CPU_VAR(current_task), %_ASM_DX @@ -79,10 +79,10 @@ ENTRY(__get_user_4) xor %eax,%eax ASM_CLAC ret -ENDPROC(__get_user_4) +SYM_FUNC_END(__get_user_4) EXPORT_SYMBOL(__get_user_4) -ENTRY(__get_user_8) +SYM_FUNC_START(__get_user_8) #ifdef CONFIG_X86_64 add $7,%_ASM_AX jc bad_get_user @@ -111,25 +111,27 @@ ENTRY(__get_user_8) ASM_CLAC ret #endif -ENDPROC(__get_user_8) +SYM_FUNC_END(__get_user_8) EXPORT_SYMBOL(__get_user_8) -.Lbad_get_user_clac: +SYM_CODE_START_LOCAL(.Lbad_get_user_clac) ASM_CLAC bad_get_user: xor %edx,%edx mov $(-EFAULT),%_ASM_AX ret +SYM_CODE_END(.Lbad_get_user_clac) #ifdef CONFIG_X86_32 -.Lbad_get_user_8_clac: +SYM_CODE_START_LOCAL(.Lbad_get_user_8_clac) ASM_CLAC bad_get_user_8: xor %edx,%edx xor %ecx,%ecx mov $(-EFAULT),%_ASM_AX ret +SYM_CODE_END(.Lbad_get_user_8_clac) #endif _ASM_EXTABLE_UA(1b, .Lbad_get_user_clac) diff --git a/arch/x86/lib/hweight.S b/arch/x86/lib/hweight.S index a14f9939c365..dbf8cc97b7f5 100644 --- a/arch/x86/lib/hweight.S +++ b/arch/x86/lib/hweight.S @@ -8,7 +8,7 @@ * unsigned int __sw_hweight32(unsigned int w) * %rdi: w */ -ENTRY(__sw_hweight32) +SYM_FUNC_START(__sw_hweight32) #ifdef CONFIG_X86_64 movl %edi, %eax # w @@ -33,10 +33,10 @@ ENTRY(__sw_hweight32) shrl $24, %eax # w = w_tmp >> 24 __ASM_SIZE(pop,) %__ASM_REG(dx) ret -ENDPROC(__sw_hweight32) +SYM_FUNC_END(__sw_hweight32) EXPORT_SYMBOL(__sw_hweight32) -ENTRY(__sw_hweight64) +SYM_FUNC_START(__sw_hweight64) #ifdef CONFIG_X86_64 pushq %rdi pushq %rdx @@ -79,5 +79,5 @@ ENTRY(__sw_hweight64) popl %ecx ret #endif -ENDPROC(__sw_hweight64) +SYM_FUNC_END(__sw_hweight64) EXPORT_SYMBOL(__sw_hweight64) diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S index a9bdf0805be0..cb5a1964506b 100644 --- a/arch/x86/lib/iomap_copy_64.S +++ b/arch/x86/lib/iomap_copy_64.S @@ -8,8 +8,8 @@ /* * override generic version in lib/iomap_copy.c */ -ENTRY(__iowrite32_copy) +SYM_FUNC_START(__iowrite32_copy) movl %edx,%ecx rep movsd ret -ENDPROC(__iowrite32_copy) +SYM_FUNC_END(__iowrite32_copy) diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 92748660ba51..56b243b14c3a 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -28,8 +28,8 @@ * Output: * rax original destination */ -ENTRY(__memcpy) -ENTRY(memcpy) +SYM_FUNC_START_ALIAS(__memcpy) +SYM_FUNC_START_LOCAL(memcpy) ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \ "jmp memcpy_erms", X86_FEATURE_ERMS @@ -41,8 +41,8 @@ ENTRY(memcpy) movl %edx, %ecx rep movsb ret -ENDPROC(memcpy) -ENDPROC(__memcpy) +SYM_FUNC_END(memcpy) +SYM_FUNC_END_ALIAS(__memcpy) EXPORT_SYMBOL(memcpy) EXPORT_SYMBOL(__memcpy) @@ -50,14 +50,14 @@ EXPORT_SYMBOL(__memcpy) * memcpy_erms() - enhanced fast string memcpy. This is faster and * simpler than memcpy. Use memcpy_erms when possible. */ -ENTRY(memcpy_erms) +SYM_FUNC_START_LOCAL(memcpy_erms) movq %rdi, %rax movq %rdx, %rcx rep movsb ret -ENDPROC(memcpy_erms) +SYM_FUNC_END(memcpy_erms) -ENTRY(memcpy_orig) +SYM_FUNC_START_LOCAL(memcpy_orig) movq %rdi, %rax cmpq $0x20, %rdx @@ -182,7 +182,7 @@ ENTRY(memcpy_orig) .Lend: retq -ENDPROC(memcpy_orig) +SYM_FUNC_END(memcpy_orig) #ifndef CONFIG_UML @@ -193,7 +193,7 @@ MCSAFE_TEST_CTL * Note that we only catch machine checks when reading the source addresses. * Writes to target are posted and don't generate machine checks. */ -ENTRY(__memcpy_mcsafe) +SYM_FUNC_START(__memcpy_mcsafe) cmpl $8, %edx /* Less than 8 bytes? Go to byte copy loop */ jb .L_no_whole_words @@ -260,7 +260,7 @@ ENTRY(__memcpy_mcsafe) xorl %eax, %eax .L_done: ret -ENDPROC(__memcpy_mcsafe) +SYM_FUNC_END(__memcpy_mcsafe) EXPORT_SYMBOL_GPL(__memcpy_mcsafe) .section .fixup, "ax" diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S index bbec69d8223b..337830d7a59c 100644 --- a/arch/x86/lib/memmove_64.S +++ b/arch/x86/lib/memmove_64.S @@ -26,8 +26,8 @@ */ .weak memmove -ENTRY(memmove) -ENTRY(__memmove) +SYM_FUNC_START_ALIAS(memmove) +SYM_FUNC_START(__memmove) /* Handle more 32 bytes in loop */ mov %rdi, %rax @@ -207,7 +207,7 @@ ENTRY(__memmove) movb %r11b, (%rdi) 13: retq -ENDPROC(__memmove) -ENDPROC(memmove) +SYM_FUNC_END(__memmove) +SYM_FUNC_END_ALIAS(memmove) EXPORT_SYMBOL(__memmove) EXPORT_SYMBOL(memmove) diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S index 9bc861c71e75..9ff15ee404a4 100644 --- a/arch/x86/lib/memset_64.S +++ b/arch/x86/lib/memset_64.S @@ -19,8 +19,8 @@ * * rax original destination */ -ENTRY(memset) -ENTRY(__memset) +SYM_FUNC_START_ALIAS(memset) +SYM_FUNC_START(__memset) /* * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended * to use it when possible. If not available, use fast string instructions. @@ -43,8 +43,8 @@ ENTRY(__memset) rep stosb movq %r9,%rax ret -ENDPROC(memset) -ENDPROC(__memset) +SYM_FUNC_END(__memset) +SYM_FUNC_END_ALIAS(memset) EXPORT_SYMBOL(memset) EXPORT_SYMBOL(__memset) @@ -59,16 +59,16 @@ EXPORT_SYMBOL(__memset) * * rax original destination */ -ENTRY(memset_erms) +SYM_FUNC_START_LOCAL(memset_erms) movq %rdi,%r9 movb %sil,%al movq %rdx,%rcx rep stosb movq %r9,%rax ret -ENDPROC(memset_erms) +SYM_FUNC_END(memset_erms) -ENTRY(memset_orig) +SYM_FUNC_START_LOCAL(memset_orig) movq %rdi,%r10 /* expand byte value */ @@ -139,4 +139,4 @@ ENTRY(memset_orig) subq %r8,%rdx jmp .Lafter_bad_alignment .Lfinal: -ENDPROC(memset_orig) +SYM_FUNC_END(memset_orig) diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S index ed33cbab3958..a2b9caa5274c 100644 --- a/arch/x86/lib/msr-reg.S +++ b/arch/x86/lib/msr-reg.S @@ -12,7 +12,7 @@ * */ .macro op_safe_regs op -ENTRY(\op\()_safe_regs) +SYM_FUNC_START(\op\()_safe_regs) pushq %rbx pushq %r12 movq %rdi, %r10 /* Save pointer */ @@ -41,13 +41,13 @@ ENTRY(\op\()_safe_regs) jmp 2b _ASM_EXTABLE(1b, 3b) -ENDPROC(\op\()_safe_regs) +SYM_FUNC_END(\op\()_safe_regs) .endm #else /* X86_32 */ .macro op_safe_regs op -ENTRY(\op\()_safe_regs) +SYM_FUNC_START(\op\()_safe_regs) pushl %ebx pushl %ebp pushl %esi @@ -83,7 +83,7 @@ ENTRY(\op\()_safe_regs) jmp 2b _ASM_EXTABLE(1b, 3b) -ENDPROC(\op\()_safe_regs) +SYM_FUNC_END(\op\()_safe_regs) .endm #endif diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S index 126dd6a9ec9b..7c7c92db8497 100644 --- a/arch/x86/lib/putuser.S +++ b/arch/x86/lib/putuser.S @@ -34,7 +34,7 @@ #define ENTER mov PER_CPU_VAR(current_task), %_ASM_BX .text -ENTRY(__put_user_1) +SYM_FUNC_START(__put_user_1) ENTER cmp TASK_addr_limit(%_ASM_BX),%_ASM_CX jae .Lbad_put_user @@ -43,10 +43,10 @@ ENTRY(__put_user_1) xor %eax,%eax ASM_CLAC ret -ENDPROC(__put_user_1) +SYM_FUNC_END(__put_user_1) EXPORT_SYMBOL(__put_user_1) -ENTRY(__put_user_2) +SYM_FUNC_START(__put_user_2) ENTER mov TASK_addr_limit(%_ASM_BX),%_ASM_BX sub $1,%_ASM_BX @@ -57,10 +57,10 @@ ENTRY(__put_user_2) xor %eax,%eax ASM_CLAC ret -ENDPROC(__put_user_2) +SYM_FUNC_END(__put_user_2) EXPORT_SYMBOL(__put_user_2) -ENTRY(__put_user_4) +SYM_FUNC_START(__put_user_4) ENTER mov TASK_addr_limit(%_ASM_BX),%_ASM_BX sub $3,%_ASM_BX @@ -71,10 +71,10 @@ ENTRY(__put_user_4) xor %eax,%eax ASM_CLAC ret -ENDPROC(__put_user_4) +SYM_FUNC_END(__put_user_4) EXPORT_SYMBOL(__put_user_4) -ENTRY(__put_user_8) +SYM_FUNC_START(__put_user_8) ENTER mov TASK_addr_limit(%_ASM_BX),%_ASM_BX sub $7,%_ASM_BX @@ -88,14 +88,15 @@ ENTRY(__put_user_8) xor %eax,%eax ASM_CLAC RET -ENDPROC(__put_user_8) +SYM_FUNC_END(__put_user_8) EXPORT_SYMBOL(__put_user_8) -.Lbad_put_user_clac: +SYM_CODE_START_LOCAL(.Lbad_put_user_clac) ASM_CLAC .Lbad_put_user: movl $-EFAULT,%eax RET +SYM_CODE_END(.Lbad_put_user_clac) _ASM_EXTABLE_UA(1b, .Lbad_put_user_clac) _ASM_EXTABLE_UA(2b, .Lbad_put_user_clac) diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index c909961e678a..363ec132df7e 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -11,11 +11,11 @@ .macro THUNK reg .section .text.__x86.indirect_thunk -ENTRY(__x86_indirect_thunk_\reg) +SYM_FUNC_START(__x86_indirect_thunk_\reg) CFI_STARTPROC JMP_NOSPEC %\reg CFI_ENDPROC -ENDPROC(__x86_indirect_thunk_\reg) +SYM_FUNC_END(__x86_indirect_thunk_\reg) .endm /* diff --git a/arch/x86/math-emu/div_Xsig.S b/arch/x86/math-emu/div_Xsig.S index ee08449d20fd..951da2ad54bb 100644 --- a/arch/x86/math-emu/div_Xsig.S +++ b/arch/x86/math-emu/div_Xsig.S @@ -75,7 +75,7 @@ FPU_result_1: .text -ENTRY(div_Xsig) +SYM_FUNC_START(div_Xsig) pushl %ebp movl %esp,%ebp #ifndef NON_REENTRANT_FPU @@ -364,4 +364,4 @@ L_bugged_2: pop %ebx jmp L_exit #endif /* PARANOID */ -ENDPROC(div_Xsig) +SYM_FUNC_END(div_Xsig) diff --git a/arch/x86/math-emu/div_small.S b/arch/x86/math-emu/div_small.S index 8f5025c80ee0..d047d1816abe 100644 --- a/arch/x86/math-emu/div_small.S +++ b/arch/x86/math-emu/div_small.S @@ -19,7 +19,7 @@ #include "fpu_emu.h" .text -ENTRY(FPU_div_small) +SYM_FUNC_START(FPU_div_small) pushl %ebp movl %esp,%ebp @@ -45,4 +45,4 @@ ENTRY(FPU_div_small) leave ret -ENDPROC(FPU_div_small) +SYM_FUNC_END(FPU_div_small) diff --git a/arch/x86/math-emu/mul_Xsig.S b/arch/x86/math-emu/mul_Xsig.S index 3e489122a2b0..4afc7b1fa6e9 100644 --- a/arch/x86/math-emu/mul_Xsig.S +++ b/arch/x86/math-emu/mul_Xsig.S @@ -25,7 +25,7 @@ #include "fpu_emu.h" .text -ENTRY(mul32_Xsig) +SYM_FUNC_START(mul32_Xsig) pushl %ebp movl %esp,%ebp subl $16,%esp @@ -63,10 +63,10 @@ ENTRY(mul32_Xsig) popl %esi leave ret -ENDPROC(mul32_Xsig) +SYM_FUNC_END(mul32_Xsig) -ENTRY(mul64_Xsig) +SYM_FUNC_START(mul64_Xsig) pushl %ebp movl %esp,%ebp subl $16,%esp @@ -116,11 +116,11 @@ ENTRY(mul64_Xsig) popl %esi leave ret -ENDPROC(mul64_Xsig) +SYM_FUNC_END(mul64_Xsig) -ENTRY(mul_Xsig_Xsig) +SYM_FUNC_START(mul_Xsig_Xsig) pushl %ebp movl %esp,%ebp subl $16,%esp @@ -176,4 +176,4 @@ ENTRY(mul_Xsig_Xsig) popl %esi leave ret -ENDPROC(mul_Xsig_Xsig) +SYM_FUNC_END(mul_Xsig_Xsig) diff --git a/arch/x86/math-emu/polynom_Xsig.S b/arch/x86/math-emu/polynom_Xsig.S index 604f0b2d17e8..702315eecb86 100644 --- a/arch/x86/math-emu/polynom_Xsig.S +++ b/arch/x86/math-emu/polynom_Xsig.S @@ -37,7 +37,7 @@ #define OVERFLOWED -16(%ebp) /* addition overflow flag */ .text -ENTRY(polynomial_Xsig) +SYM_FUNC_START(polynomial_Xsig) pushl %ebp movl %esp,%ebp subl $32,%esp @@ -134,4 +134,4 @@ L_accum_done: popl %esi leave ret -ENDPROC(polynomial_Xsig) +SYM_FUNC_END(polynomial_Xsig) diff --git a/arch/x86/math-emu/reg_norm.S b/arch/x86/math-emu/reg_norm.S index 7f6b4392a15d..cad1d60b1e84 100644 --- a/arch/x86/math-emu/reg_norm.S +++ b/arch/x86/math-emu/reg_norm.S @@ -22,7 +22,7 @@ .text -ENTRY(FPU_normalize) +SYM_FUNC_START(FPU_normalize) pushl %ebp movl %esp,%ebp pushl %ebx @@ -95,12 +95,12 @@ L_overflow: call arith_overflow pop %ebx jmp L_exit -ENDPROC(FPU_normalize) +SYM_FUNC_END(FPU_normalize) /* Normalise without reporting underflow or overflow */ -ENTRY(FPU_normalize_nuo) +SYM_FUNC_START(FPU_normalize_nuo) pushl %ebp movl %esp,%ebp pushl %ebx @@ -147,4 +147,4 @@ L_exit_nuo_zero: popl %ebx leave ret -ENDPROC(FPU_normalize_nuo) +SYM_FUNC_END(FPU_normalize_nuo) diff --git a/arch/x86/math-emu/reg_round.S b/arch/x86/math-emu/reg_round.S index 04563421ee7d..11a1f798451b 100644 --- a/arch/x86/math-emu/reg_round.S +++ b/arch/x86/math-emu/reg_round.S @@ -109,7 +109,7 @@ FPU_denormal: .globl fpu_Arith_exit /* Entry point when called from C */ -ENTRY(FPU_round) +SYM_FUNC_START(FPU_round) pushl %ebp movl %esp,%ebp pushl %esi @@ -708,4 +708,4 @@ L_exception_exit: jmp fpu_reg_round_special_exit #endif /* PARANOID */ -ENDPROC(FPU_round) +SYM_FUNC_END(FPU_round) diff --git a/arch/x86/math-emu/reg_u_add.S b/arch/x86/math-emu/reg_u_add.S index 50fe9f8c893c..9c9e2c810afe 100644 --- a/arch/x86/math-emu/reg_u_add.S +++ b/arch/x86/math-emu/reg_u_add.S @@ -32,7 +32,7 @@ #include "control_w.h" .text -ENTRY(FPU_u_add) +SYM_FUNC_START(FPU_u_add) pushl %ebp movl %esp,%ebp pushl %esi @@ -166,4 +166,4 @@ L_exit: leave ret #endif /* PARANOID */ -ENDPROC(FPU_u_add) +SYM_FUNC_END(FPU_u_add) diff --git a/arch/x86/math-emu/reg_u_div.S b/arch/x86/math-emu/reg_u_div.S index 94d545e118e4..e2fb5c2644c5 100644 --- a/arch/x86/math-emu/reg_u_div.S +++ b/arch/x86/math-emu/reg_u_div.S @@ -75,7 +75,7 @@ FPU_ovfl_flag: #define DEST PARAM3 .text -ENTRY(FPU_u_div) +SYM_FUNC_START(FPU_u_div) pushl %ebp movl %esp,%ebp #ifndef NON_REENTRANT_FPU @@ -471,4 +471,4 @@ L_exit: ret #endif /* PARANOID */ -ENDPROC(FPU_u_div) +SYM_FUNC_END(FPU_u_div) diff --git a/arch/x86/math-emu/reg_u_mul.S b/arch/x86/math-emu/reg_u_mul.S index 21cde47fb3e5..0c779c87ac5b 100644 --- a/arch/x86/math-emu/reg_u_mul.S +++ b/arch/x86/math-emu/reg_u_mul.S @@ -45,7 +45,7 @@ FPU_accum_1: .text -ENTRY(FPU_u_mul) +SYM_FUNC_START(FPU_u_mul) pushl %ebp movl %esp,%ebp #ifndef NON_REENTRANT_FPU @@ -147,4 +147,4 @@ L_exit: ret #endif /* PARANOID */ -ENDPROC(FPU_u_mul) +SYM_FUNC_END(FPU_u_mul) diff --git a/arch/x86/math-emu/reg_u_sub.S b/arch/x86/math-emu/reg_u_sub.S index f05dea7dec38..e9bb7c248649 100644 --- a/arch/x86/math-emu/reg_u_sub.S +++ b/arch/x86/math-emu/reg_u_sub.S @@ -33,7 +33,7 @@ #include "control_w.h" .text -ENTRY(FPU_u_sub) +SYM_FUNC_START(FPU_u_sub) pushl %ebp movl %esp,%ebp pushl %esi @@ -271,4 +271,4 @@ L_exit: popl %esi leave ret -ENDPROC(FPU_u_sub) +SYM_FUNC_END(FPU_u_sub) diff --git a/arch/x86/math-emu/round_Xsig.S b/arch/x86/math-emu/round_Xsig.S index 226a51e991f1..d9d7de8dbd7b 100644 --- a/arch/x86/math-emu/round_Xsig.S +++ b/arch/x86/math-emu/round_Xsig.S @@ -23,7 +23,7 @@ .text -ENTRY(round_Xsig) +SYM_FUNC_START(round_Xsig) pushl %ebp movl %esp,%ebp pushl %ebx /* Reserve some space */ @@ -79,11 +79,11 @@ L_exit: popl %ebx leave ret -ENDPROC(round_Xsig) +SYM_FUNC_END(round_Xsig) -ENTRY(norm_Xsig) +SYM_FUNC_START(norm_Xsig) pushl %ebp movl %esp,%ebp pushl %ebx /* Reserve some space */ @@ -139,4 +139,4 @@ L_n_exit: popl %ebx leave ret -ENDPROC(norm_Xsig) +SYM_FUNC_END(norm_Xsig) diff --git a/arch/x86/math-emu/shr_Xsig.S b/arch/x86/math-emu/shr_Xsig.S index 96f4779aa9c1..726af985f758 100644 --- a/arch/x86/math-emu/shr_Xsig.S +++ b/arch/x86/math-emu/shr_Xsig.S @@ -22,7 +22,7 @@ #include "fpu_emu.h" .text -ENTRY(shr_Xsig) +SYM_FUNC_START(shr_Xsig) push %ebp movl %esp,%ebp pushl %esi @@ -86,4 +86,4 @@ L_more_than_95: popl %esi leave ret -ENDPROC(shr_Xsig) +SYM_FUNC_END(shr_Xsig) diff --git a/arch/x86/math-emu/wm_shrx.S b/arch/x86/math-emu/wm_shrx.S index d588874eb6fb..4fc89174caf0 100644 --- a/arch/x86/math-emu/wm_shrx.S +++ b/arch/x86/math-emu/wm_shrx.S @@ -33,7 +33,7 @@ | Results returned in the 64 bit arg and eax. | +---------------------------------------------------------------------------*/ -ENTRY(FPU_shrx) +SYM_FUNC_START(FPU_shrx) push %ebp movl %esp,%ebp pushl %esi @@ -93,7 +93,7 @@ L_more_than_95: popl %esi leave ret -ENDPROC(FPU_shrx) +SYM_FUNC_END(FPU_shrx) /*---------------------------------------------------------------------------+ @@ -112,7 +112,7 @@ ENDPROC(FPU_shrx) | part which has been shifted out of the arg. | | Results returned in the 64 bit arg and eax. | +---------------------------------------------------------------------------*/ -ENTRY(FPU_shrxs) +SYM_FUNC_START(FPU_shrxs) push %ebp movl %esp,%ebp pushl %esi @@ -204,4 +204,4 @@ Ls_more_than_95: popl %esi leave ret -ENDPROC(FPU_shrxs) +SYM_FUNC_END(FPU_shrxs) diff --git a/arch/x86/math-emu/wm_sqrt.S b/arch/x86/math-emu/wm_sqrt.S index f031c0e19356..3b2b58164ec1 100644 --- a/arch/x86/math-emu/wm_sqrt.S +++ b/arch/x86/math-emu/wm_sqrt.S @@ -75,7 +75,7 @@ FPU_fsqrt_arg_0: .text -ENTRY(wm_sqrt) +SYM_FUNC_START(wm_sqrt) pushl %ebp movl %esp,%ebp #ifndef NON_REENTRANT_FPU @@ -469,4 +469,4 @@ sqrt_more_prec_large: /* Our estimate is too large */ movl $0x7fffff00,%eax jmp sqrt_round_result -ENDPROC(wm_sqrt) +SYM_FUNC_END(wm_sqrt) diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index fd10d91a6115..e7bb483557c9 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -829,14 +829,13 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end) * used for the kernel image only. free_init_pages() will do the * right thing for either kind of address. */ -void free_kernel_image_pages(void *begin, void *end) +void free_kernel_image_pages(const char *what, void *begin, void *end) { unsigned long begin_ul = (unsigned long)begin; unsigned long end_ul = (unsigned long)end; unsigned long len_pages = (end_ul - begin_ul) >> PAGE_SHIFT; - - free_init_pages("unused kernel image", begin_ul, end_ul); + free_init_pages(what, begin_ul, end_ul); /* * PTI maps some of the kernel into userspace. For performance, @@ -865,7 +864,8 @@ void __ref free_initmem(void) mem_encrypt_free_decrypted_mem(); - free_kernel_image_pages(&__init_begin, &__init_end); + free_kernel_image_pages("unused kernel image (initmem)", + &__init_begin, &__init_end); } #ifdef CONFIG_BLK_DEV_INITRD diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index a6b5c653727b..dcb9bc961b39 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1263,7 +1263,7 @@ int kernel_set_to_readonly; void set_kernel_text_rw(void) { unsigned long start = PFN_ALIGN(_text); - unsigned long end = PFN_ALIGN(__stop___ex_table); + unsigned long end = PFN_ALIGN(_etext); if (!kernel_set_to_readonly) return; @@ -1282,7 +1282,7 @@ void set_kernel_text_rw(void) void set_kernel_text_ro(void) { unsigned long start = PFN_ALIGN(_text); - unsigned long end = PFN_ALIGN(__stop___ex_table); + unsigned long end = PFN_ALIGN(_etext); if (!kernel_set_to_readonly) return; @@ -1300,9 +1300,9 @@ void mark_rodata_ro(void) { unsigned long start = PFN_ALIGN(_text); unsigned long rodata_start = PFN_ALIGN(__start_rodata); - unsigned long end = (unsigned long) &__end_rodata_hpage_align; - unsigned long text_end = PFN_ALIGN(&__stop___ex_table); - unsigned long rodata_end = PFN_ALIGN(&__end_rodata); + unsigned long end = (unsigned long)__end_rodata_hpage_align; + unsigned long text_end = PFN_ALIGN(_etext); + unsigned long rodata_end = PFN_ALIGN(__end_rodata); unsigned long all_end; printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", @@ -1334,8 +1334,10 @@ void mark_rodata_ro(void) set_memory_ro(start, (end-start) >> PAGE_SHIFT); #endif - free_kernel_image_pages((void *)text_end, (void *)rodata_start); - free_kernel_image_pages((void *)rodata_end, (void *)_sdata); + free_kernel_image_pages("unused kernel image (text/rodata gap)", + (void *)text_end, (void *)rodata_start); + free_kernel_image_pages("unused kernel image (rodata/data gap)", + (void *)rodata_end, (void *)_sdata); debug_checkwx(); } diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S index 6d71481a1e70..106ead05bbe3 100644 --- a/arch/x86/mm/mem_encrypt_boot.S +++ b/arch/x86/mm/mem_encrypt_boot.S @@ -16,7 +16,7 @@ .text .code64 -ENTRY(sme_encrypt_execute) +SYM_FUNC_START(sme_encrypt_execute) /* * Entry parameters: @@ -66,9 +66,9 @@ ENTRY(sme_encrypt_execute) pop %rbp ret -ENDPROC(sme_encrypt_execute) +SYM_FUNC_END(sme_encrypt_execute) -ENTRY(__enc_copy) +SYM_FUNC_START(__enc_copy) /* * Routine used to encrypt memory in place. * This routine must be run outside of the kernel proper since @@ -153,4 +153,4 @@ ENTRY(__enc_copy) ret .L__enc_copy_end: -ENDPROC(__enc_copy) +SYM_FUNC_END(__enc_copy) diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index 7f2140414440..44a9f068eee0 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -574,7 +574,7 @@ static void pti_clone_kernel_text(void) */ unsigned long start = PFN_ALIGN(_text); unsigned long end_clone = (unsigned long)__end_rodata_aligned; - unsigned long end_global = PFN_ALIGN((unsigned long)__stop___ex_table); + unsigned long end_global = PFN_ALIGN((unsigned long)_etext); if (!pti_kernel_image_global_ok()) return; diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S index ab2e91e76894..eed8b5b441f8 100644 --- a/arch/x86/platform/efi/efi_stub_32.S +++ b/arch/x86/platform/efi/efi_stub_32.S @@ -22,7 +22,7 @@ */ .text -ENTRY(efi_call_phys) +SYM_FUNC_START(efi_call_phys) /* * 0. The function can only be called in Linux kernel. So CS has been * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found @@ -114,7 +114,7 @@ ENTRY(efi_call_phys) movl (%edx), %ecx pushl %ecx ret -ENDPROC(efi_call_phys) +SYM_FUNC_END(efi_call_phys) .previous .data diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S index 74628ec78f29..b1d2313fe3bf 100644 --- a/arch/x86/platform/efi/efi_stub_64.S +++ b/arch/x86/platform/efi/efi_stub_64.S @@ -39,7 +39,7 @@ mov %rsi, %cr0; \ mov (%rsp), %rsp -ENTRY(efi_call) +SYM_FUNC_START(efi_call) pushq %rbp movq %rsp, %rbp SAVE_XMM @@ -55,4 +55,4 @@ ENTRY(efi_call) RESTORE_XMM popq %rbp ret -ENDPROC(efi_call) +SYM_FUNC_END(efi_call) diff --git a/arch/x86/platform/efi/efi_thunk_64.S b/arch/x86/platform/efi/efi_thunk_64.S index 46c58b08739c..3189f1394701 100644 --- a/arch/x86/platform/efi/efi_thunk_64.S +++ b/arch/x86/platform/efi/efi_thunk_64.S @@ -25,7 +25,7 @@ .text .code64 -ENTRY(efi64_thunk) +SYM_FUNC_START(efi64_thunk) push %rbp push %rbx @@ -60,14 +60,14 @@ ENTRY(efi64_thunk) pop %rbx pop %rbp retq -ENDPROC(efi64_thunk) +SYM_FUNC_END(efi64_thunk) /* * We run this function from the 1:1 mapping. * * This function must be invoked with a 1:1 mapped stack. */ -ENTRY(__efi64_thunk) +SYM_FUNC_START_LOCAL(__efi64_thunk) movl %ds, %eax push %rax movl %es, %eax @@ -114,14 +114,14 @@ ENTRY(__efi64_thunk) or %rcx, %rax 1: ret -ENDPROC(__efi64_thunk) +SYM_FUNC_END(__efi64_thunk) -ENTRY(efi_exit32) +SYM_FUNC_START_LOCAL(efi_exit32) movq func_rt_ptr(%rip), %rax push %rax mov %rdi, %rax ret -ENDPROC(efi_exit32) +SYM_FUNC_END(efi_exit32) .code32 /* @@ -129,7 +129,7 @@ ENDPROC(efi_exit32) * * The stack should represent the 32-bit calling convention. */ -ENTRY(efi_enter32) +SYM_FUNC_START_LOCAL(efi_enter32) movl $__KERNEL_DS, %eax movl %eax, %ds movl %eax, %es @@ -145,7 +145,7 @@ ENTRY(efi_enter32) pushl %eax lret -ENDPROC(efi_enter32) +SYM_FUNC_END(efi_enter32) .data .balign 8 diff --git a/arch/x86/platform/olpc/xo1-wakeup.S b/arch/x86/platform/olpc/xo1-wakeup.S index 5fee3a2c2fd4..75f4faff8468 100644 --- a/arch/x86/platform/olpc/xo1-wakeup.S +++ b/arch/x86/platform/olpc/xo1-wakeup.S @@ -90,7 +90,7 @@ restore_registers: ret -ENTRY(do_olpc_suspend_lowlevel) +SYM_CODE_START(do_olpc_suspend_lowlevel) call save_processor_state call save_registers @@ -110,6 +110,7 @@ ret_point: call restore_registers call restore_processor_state ret +SYM_CODE_END(do_olpc_suspend_lowlevel) .data saved_gdt: .long 0,0 diff --git a/arch/x86/platform/pvh/head.S b/arch/x86/platform/pvh/head.S index 1f8825bbaffb..43b4d864817e 100644 --- a/arch/x86/platform/pvh/head.S +++ b/arch/x86/platform/pvh/head.S @@ -50,7 +50,7 @@ #define PVH_DS_SEL (PVH_GDT_ENTRY_DS * 8) #define PVH_CANARY_SEL (PVH_GDT_ENTRY_CANARY * 8) -ENTRY(pvh_start_xen) +SYM_CODE_START_LOCAL(pvh_start_xen) cld lgdt (_pa(gdt)) @@ -146,15 +146,16 @@ ENTRY(pvh_start_xen) ljmp $PVH_CS_SEL, $_pa(startup_32) #endif -END(pvh_start_xen) +SYM_CODE_END(pvh_start_xen) .section ".init.data","aw" .balign 8 -gdt: +SYM_DATA_START_LOCAL(gdt) .word gdt_end - gdt_start .long _pa(gdt_start) .word 0 -gdt_start: +SYM_DATA_END(gdt) +SYM_DATA_START_LOCAL(gdt_start) .quad 0x0000000000000000 /* NULL descriptor */ #ifdef CONFIG_X86_64 .quad GDT_ENTRY(0xa09a, 0, 0xfffff) /* PVH_CS_SEL */ @@ -163,15 +164,14 @@ gdt_start: #endif .quad GDT_ENTRY(0xc092, 0, 0xfffff) /* PVH_DS_SEL */ .quad GDT_ENTRY(0x4090, 0, 0x18) /* PVH_CANARY_SEL */ -gdt_end: +SYM_DATA_END_LABEL(gdt_start, SYM_L_LOCAL, gdt_end) .balign 16 -canary: - .fill 48, 1, 0 +SYM_DATA_LOCAL(canary, .fill 48, 1, 0) -early_stack: +SYM_DATA_START_LOCAL(early_stack) .fill BOOT_STACK_SIZE, 1, 0 -early_stack_end: +SYM_DATA_END_LABEL(early_stack, SYM_L_LOCAL, early_stack_end) ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY, _ASM_PTR (pvh_start_xen - __START_KERNEL_map)) diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S index 6fe383002125..8786653ad3c0 100644 --- a/arch/x86/power/hibernate_asm_32.S +++ b/arch/x86/power/hibernate_asm_32.S @@ -16,7 +16,7 @@ .text -ENTRY(swsusp_arch_suspend) +SYM_FUNC_START(swsusp_arch_suspend) movl %esp, saved_context_esp movl %ebx, saved_context_ebx movl %ebp, saved_context_ebp @@ -33,9 +33,9 @@ ENTRY(swsusp_arch_suspend) call swsusp_save FRAME_END ret -ENDPROC(swsusp_arch_suspend) +SYM_FUNC_END(swsusp_arch_suspend) -ENTRY(restore_image) +SYM_CODE_START(restore_image) /* prepare to jump to the image kernel */ movl restore_jump_address, %ebx movl restore_cr3, %ebp @@ -45,9 +45,10 @@ ENTRY(restore_image) /* jump to relocated restore code */ movl relocated_restore_code, %eax jmpl *%eax +SYM_CODE_END(restore_image) /* code below has been relocated to a safe page */ -ENTRY(core_restore_code) +SYM_CODE_START(core_restore_code) movl temp_pgt, %eax movl %eax, %cr3 @@ -77,10 +78,11 @@ copy_loop: done: jmpl *%ebx +SYM_CODE_END(core_restore_code) /* code below belongs to the image kernel */ .align PAGE_SIZE -ENTRY(restore_registers) +SYM_FUNC_START(restore_registers) /* go back to the original page tables */ movl %ebp, %cr3 movl mmu_cr4_features, %ecx @@ -107,4 +109,4 @@ ENTRY(restore_registers) movl %eax, in_suspend ret -ENDPROC(restore_registers) +SYM_FUNC_END(restore_registers) diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S index a4d5eb0a7ece..7918b8415f13 100644 --- a/arch/x86/power/hibernate_asm_64.S +++ b/arch/x86/power/hibernate_asm_64.S @@ -22,7 +22,7 @@ #include <asm/processor-flags.h> #include <asm/frame.h> -ENTRY(swsusp_arch_suspend) +SYM_FUNC_START(swsusp_arch_suspend) movq $saved_context, %rax movq %rsp, pt_regs_sp(%rax) movq %rbp, pt_regs_bp(%rax) @@ -50,9 +50,9 @@ ENTRY(swsusp_arch_suspend) call swsusp_save FRAME_END ret -ENDPROC(swsusp_arch_suspend) +SYM_FUNC_END(swsusp_arch_suspend) -ENTRY(restore_image) +SYM_CODE_START(restore_image) /* prepare to jump to the image kernel */ movq restore_jump_address(%rip), %r8 movq restore_cr3(%rip), %r9 @@ -67,9 +67,10 @@ ENTRY(restore_image) /* jump to relocated restore code */ movq relocated_restore_code(%rip), %rcx jmpq *%rcx +SYM_CODE_END(restore_image) /* code below has been relocated to a safe page */ -ENTRY(core_restore_code) +SYM_CODE_START(core_restore_code) /* switch to temporary page tables */ movq %rax, %cr3 /* flush TLB */ @@ -97,10 +98,11 @@ ENTRY(core_restore_code) .Ldone: /* jump to the restore_registers address from the image header */ jmpq *%r8 +SYM_CODE_END(core_restore_code) /* code below belongs to the image kernel */ .align PAGE_SIZE -ENTRY(restore_registers) +SYM_FUNC_START(restore_registers) /* go back to the original page tables */ movq %r9, %cr3 @@ -142,4 +144,4 @@ ENTRY(restore_registers) movq %rax, in_suspend(%rip) ret -ENDPROC(restore_registers) +SYM_FUNC_END(restore_registers) diff --git a/arch/x86/purgatory/entry64.S b/arch/x86/purgatory/entry64.S index 275a646d1048..0b4390ce586b 100644 --- a/arch/x86/purgatory/entry64.S +++ b/arch/x86/purgatory/entry64.S @@ -8,13 +8,13 @@ * This code has been taken from kexec-tools. */ +#include <linux/linkage.h> + .text .balign 16 .code64 - .globl entry64, entry64_regs - -entry64: +SYM_CODE_START(entry64) /* Setup a gdt that should be preserved */ lgdt gdt(%rip) @@ -54,10 +54,11 @@ new_cs_exit: /* Jump to the new code... */ jmpq *rip(%rip) +SYM_CODE_END(entry64) .section ".rodata" .balign 4 -entry64_regs: +SYM_DATA_START(entry64_regs) rax: .quad 0x0 rcx: .quad 0x0 rdx: .quad 0x0 @@ -75,13 +76,14 @@ r13: .quad 0x0 r14: .quad 0x0 r15: .quad 0x0 rip: .quad 0x0 - .size entry64_regs, . - entry64_regs +SYM_DATA_END(entry64_regs) /* GDT */ .section ".rodata" .balign 16 -gdt: - /* 0x00 unusable segment +SYM_DATA_START_LOCAL(gdt) + /* + * 0x00 unusable segment * 0x08 unused * so use them as gdt ptr */ @@ -94,6 +96,8 @@ gdt: /* 0x18 4GB flat data segment */ .word 0xFFFF, 0x0000, 0x9200, 0x00CF -gdt_end: -stack: .quad 0, 0 -stack_init: +SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end) + +SYM_DATA_START_LOCAL(stack) + .quad 0, 0 +SYM_DATA_END_LABEL(stack, SYM_L_LOCAL, stack_init) diff --git a/arch/x86/purgatory/setup-x86_64.S b/arch/x86/purgatory/setup-x86_64.S index 321146be741d..89d9e9e53fcd 100644 --- a/arch/x86/purgatory/setup-x86_64.S +++ b/arch/x86/purgatory/setup-x86_64.S @@ -7,14 +7,14 @@ * * This code has been taken from kexec-tools. */ +#include <linux/linkage.h> #include <asm/purgatory.h> .text - .globl purgatory_start .balign 16 -purgatory_start: .code64 +SYM_CODE_START(purgatory_start) /* Load a gdt so I know what the segment registers are */ lgdt gdt(%rip) @@ -32,10 +32,12 @@ purgatory_start: /* Call the C code */ call purgatory jmp entry64 +SYM_CODE_END(purgatory_start) .section ".rodata" .balign 16 -gdt: /* 0x00 unusable segment +SYM_DATA_START_LOCAL(gdt) + /* 0x00 unusable segment * 0x08 unused * so use them as the gdt ptr */ @@ -48,10 +50,10 @@ gdt: /* 0x00 unusable segment /* 0x18 4GB flat data segment */ .word 0xFFFF, 0x0000, 0x9200, 0x00CF -gdt_end: +SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end) .bss .balign 4096 -lstack: +SYM_DATA_START_LOCAL(lstack) .skip 4096 -lstack_end: +SYM_DATA_END_LABEL(lstack, SYM_L_LOCAL, lstack_end) diff --git a/arch/x86/purgatory/stack.S b/arch/x86/purgatory/stack.S index 8b1427422dfc..1ef507ca50a5 100644 --- a/arch/x86/purgatory/stack.S +++ b/arch/x86/purgatory/stack.S @@ -5,13 +5,14 @@ * Copyright (C) 2014 Red Hat Inc. */ +#include <linux/linkage.h> + /* A stack for the loaded kernel. * Separate and in the data section so it can be prepopulated. */ .data .balign 4096 - .globl stack, stack_end -stack: +SYM_DATA_START(stack) .skip 4096 -stack_end: +SYM_DATA_END_LABEL(stack, SYM_L_GLOBAL, stack_end) diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S index 6363761cc74c..af04512c02d9 100644 --- a/arch/x86/realmode/rm/header.S +++ b/arch/x86/realmode/rm/header.S @@ -14,7 +14,7 @@ .section ".header", "a" .balign 16 -GLOBAL(real_mode_header) +SYM_DATA_START(real_mode_header) .long pa_text_start .long pa_ro_end /* SMP trampoline */ @@ -33,11 +33,9 @@ GLOBAL(real_mode_header) #ifdef CONFIG_X86_64 .long __KERNEL32_CS #endif -END(real_mode_header) +SYM_DATA_END(real_mode_header) /* End signature, used to verify integrity */ .section ".signature","a" .balign 4 -GLOBAL(end_signature) - .long REALMODE_END_SIGNATURE -END(end_signature) +SYM_DATA(end_signature, .long REALMODE_END_SIGNATURE) diff --git a/arch/x86/realmode/rm/reboot.S b/arch/x86/realmode/rm/reboot.S index cd2f97b9623b..f10515b10e0a 100644 --- a/arch/x86/realmode/rm/reboot.S +++ b/arch/x86/realmode/rm/reboot.S @@ -19,7 +19,7 @@ */ .section ".text32", "ax" .code32 -ENTRY(machine_real_restart_asm) +SYM_CODE_START(machine_real_restart_asm) #ifdef CONFIG_X86_64 /* Switch to trampoline GDT as it is guaranteed < 4 GiB */ @@ -33,7 +33,7 @@ ENTRY(machine_real_restart_asm) movl %eax, %cr0 ljmpl $__KERNEL32_CS, $pa_machine_real_restart_paging_off -GLOBAL(machine_real_restart_paging_off) +SYM_INNER_LABEL(machine_real_restart_paging_off, SYM_L_GLOBAL) xorl %eax, %eax xorl %edx, %edx movl $MSR_EFER, %ecx @@ -63,6 +63,7 @@ GLOBAL(machine_real_restart_paging_off) movl %ecx, %gs movl %ecx, %ss ljmpw $8, $1f +SYM_CODE_END(machine_real_restart_asm) /* * This is 16-bit protected mode code to disable paging and the cache, @@ -127,13 +128,13 @@ bios: .section ".rodata", "a" .balign 16 -GLOBAL(machine_real_restart_idt) +SYM_DATA_START(machine_real_restart_idt) .word 0xffff /* Length - real mode default value */ .long 0 /* Base - real mode default value */ -END(machine_real_restart_idt) +SYM_DATA_END(machine_real_restart_idt) .balign 16 -GLOBAL(machine_real_restart_gdt) +SYM_DATA_START(machine_real_restart_gdt) /* Self-pointer */ .word 0xffff /* Length - real mode default value */ .long pa_machine_real_restart_gdt @@ -153,4 +154,4 @@ GLOBAL(machine_real_restart_gdt) * semantics we don't have to reload the segments once CR0.PE = 0. */ .quad GDT_ENTRY(0x0093, 0x100, 0xffff) -END(machine_real_restart_gdt) +SYM_DATA_END(machine_real_restart_gdt) diff --git a/arch/x86/realmode/rm/stack.S b/arch/x86/realmode/rm/stack.S index 8d4cb64799ea..0fca64061ad2 100644 --- a/arch/x86/realmode/rm/stack.S +++ b/arch/x86/realmode/rm/stack.S @@ -6,15 +6,13 @@ #include <linux/linkage.h> .data -GLOBAL(HEAP) - .long rm_heap -GLOBAL(heap_end) - .long rm_stack +SYM_DATA(HEAP, .long rm_heap) +SYM_DATA(heap_end, .long rm_stack) .bss .balign 16 -GLOBAL(rm_heap) - .space 2048 -GLOBAL(rm_stack) +SYM_DATA(rm_heap, .space 2048) + +SYM_DATA_START(rm_stack) .space 2048 -GLOBAL(rm_stack_end) +SYM_DATA_END_LABEL(rm_stack, SYM_L_GLOBAL, rm_stack_end) diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S index 1868b158480d..3fad907a179f 100644 --- a/arch/x86/realmode/rm/trampoline_32.S +++ b/arch/x86/realmode/rm/trampoline_32.S @@ -29,7 +29,7 @@ .code16 .balign PAGE_SIZE -ENTRY(trampoline_start) +SYM_CODE_START(trampoline_start) wbinvd # Needed for NUMA-Q should be harmless for others LJMPW_RM(1f) @@ -54,18 +54,20 @@ ENTRY(trampoline_start) lmsw %dx # into protected mode ljmpl $__BOOT_CS, $pa_startup_32 +SYM_CODE_END(trampoline_start) .section ".text32","ax" .code32 -ENTRY(startup_32) # note: also used from wakeup_asm.S +SYM_CODE_START(startup_32) # note: also used from wakeup_asm.S jmp *%eax +SYM_CODE_END(startup_32) .bss .balign 8 -GLOBAL(trampoline_header) - tr_start: .space 4 - tr_gdt_pad: .space 2 - tr_gdt: .space 6 -END(trampoline_header) +SYM_DATA_START(trampoline_header) + SYM_DATA_LOCAL(tr_start, .space 4) + SYM_DATA_LOCAL(tr_gdt_pad, .space 2) + SYM_DATA_LOCAL(tr_gdt, .space 6) +SYM_DATA_END(trampoline_header) #include "trampoline_common.S" diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S index aee2b45d83b8..251758ed7443 100644 --- a/arch/x86/realmode/rm/trampoline_64.S +++ b/arch/x86/realmode/rm/trampoline_64.S @@ -38,7 +38,7 @@ .code16 .balign PAGE_SIZE -ENTRY(trampoline_start) +SYM_CODE_START(trampoline_start) cli # We should be safe anyway wbinvd @@ -78,12 +78,14 @@ ENTRY(trampoline_start) no_longmode: hlt jmp no_longmode +SYM_CODE_END(trampoline_start) + #include "../kernel/verify_cpu.S" .section ".text32","ax" .code32 .balign 4 -ENTRY(startup_32) +SYM_CODE_START(startup_32) movl %edx, %ss addl $pa_real_mode_base, %esp movl %edx, %ds @@ -137,38 +139,39 @@ ENTRY(startup_32) * the new gdt/idt that has __KERNEL_CS with CS.L = 1. */ ljmpl $__KERNEL_CS, $pa_startup_64 +SYM_CODE_END(startup_32) .section ".text64","ax" .code64 .balign 4 -ENTRY(startup_64) +SYM_CODE_START(startup_64) # Now jump into the kernel using virtual addresses jmpq *tr_start(%rip) +SYM_CODE_END(startup_64) .section ".rodata","a" # Duplicate the global descriptor table # so the kernel can live anywhere .balign 16 - .globl tr_gdt -tr_gdt: +SYM_DATA_START(tr_gdt) .short tr_gdt_end - tr_gdt - 1 # gdt limit .long pa_tr_gdt .short 0 .quad 0x00cf9b000000ffff # __KERNEL32_CS .quad 0x00af9b000000ffff # __KERNEL_CS .quad 0x00cf93000000ffff # __KERNEL_DS -tr_gdt_end: +SYM_DATA_END_LABEL(tr_gdt, SYM_L_LOCAL, tr_gdt_end) .bss .balign PAGE_SIZE -GLOBAL(trampoline_pgd) .space PAGE_SIZE +SYM_DATA(trampoline_pgd, .space PAGE_SIZE) .balign 8 -GLOBAL(trampoline_header) - tr_start: .space 8 - GLOBAL(tr_efer) .space 8 - GLOBAL(tr_cr4) .space 4 - GLOBAL(tr_flags) .space 4 -END(trampoline_header) +SYM_DATA_START(trampoline_header) + SYM_DATA_LOCAL(tr_start, .space 8) + SYM_DATA(tr_efer, .space 8) + SYM_DATA(tr_cr4, .space 4) + SYM_DATA(tr_flags, .space 4) +SYM_DATA_END(trampoline_header) #include "trampoline_common.S" diff --git a/arch/x86/realmode/rm/trampoline_common.S b/arch/x86/realmode/rm/trampoline_common.S index 8d8208dcca24..5033e640f957 100644 --- a/arch/x86/realmode/rm/trampoline_common.S +++ b/arch/x86/realmode/rm/trampoline_common.S @@ -1,4 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ .section ".rodata","a" .balign 16 -tr_idt: .fill 1, 6, 0 +SYM_DATA_LOCAL(tr_idt, .fill 1, 6, 0) diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S index 05ac9c17c811..02d0ba16ae33 100644 --- a/arch/x86/realmode/rm/wakeup_asm.S +++ b/arch/x86/realmode/rm/wakeup_asm.S @@ -17,7 +17,7 @@ .section ".data", "aw" .balign 16 -GLOBAL(wakeup_header) +SYM_DATA_START(wakeup_header) video_mode: .short 0 /* Video mode number */ pmode_entry: .long 0 pmode_cs: .short __KERNEL_CS @@ -31,13 +31,13 @@ GLOBAL(wakeup_header) realmode_flags: .long 0 real_magic: .long 0 signature: .long WAKEUP_HEADER_SIGNATURE -END(wakeup_header) +SYM_DATA_END(wakeup_header) .text .code16 .balign 16 -ENTRY(wakeup_start) +SYM_CODE_START(wakeup_start) cli cld @@ -73,7 +73,7 @@ ENTRY(wakeup_start) movw %ax, %fs movw %ax, %gs - lidtl wakeup_idt + lidtl .Lwakeup_idt /* Clear the EFLAGS */ pushl $0 @@ -135,6 +135,7 @@ ENTRY(wakeup_start) #else jmp trampoline_start #endif +SYM_CODE_END(wakeup_start) bogus_real_magic: 1: @@ -152,7 +153,7 @@ bogus_real_magic: */ .balign 16 -GLOBAL(wakeup_gdt) +SYM_DATA_START(wakeup_gdt) .word 3*8-1 /* Self-descriptor */ .long pa_wakeup_gdt .word 0 @@ -164,15 +165,15 @@ GLOBAL(wakeup_gdt) .word 0xffff /* 16-bit data segment @ real_mode_base */ .long 0x93000000 + pa_real_mode_base .word 0x008f /* big real mode */ -END(wakeup_gdt) +SYM_DATA_END(wakeup_gdt) .section ".rodata","a" .balign 8 /* This is the standard real-mode IDT */ .balign 16 -GLOBAL(wakeup_idt) +SYM_DATA_START_LOCAL(.Lwakeup_idt) .word 0xffff /* limit */ .long 0 /* address */ .word 0 -END(wakeup_idt) +SYM_DATA_END(.Lwakeup_idt) diff --git a/arch/x86/realmode/rmpiggy.S b/arch/x86/realmode/rmpiggy.S index c078dba40cef..c8fef76743f6 100644 --- a/arch/x86/realmode/rmpiggy.S +++ b/arch/x86/realmode/rmpiggy.S @@ -10,12 +10,10 @@ .balign PAGE_SIZE -GLOBAL(real_mode_blob) +SYM_DATA_START(real_mode_blob) .incbin "arch/x86/realmode/rm/realmode.bin" -END(real_mode_blob) +SYM_DATA_END_LABEL(real_mode_blob, SYM_L_GLOBAL, real_mode_blob_end) -GLOBAL(real_mode_blob_end); - -GLOBAL(real_mode_relocs) +SYM_DATA_START(real_mode_relocs) .incbin "arch/x86/realmode/rm/realmode.relocs" -END(real_mode_relocs) +SYM_DATA_END(real_mode_relocs) diff --git a/arch/x86/um/vdso/vdso.S b/arch/x86/um/vdso/vdso.S index a4a3870dc059..a6eaf293a73b 100644 --- a/arch/x86/um/vdso/vdso.S +++ b/arch/x86/um/vdso/vdso.S @@ -1,11 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/init.h> +#include <linux/linkage.h> __INITDATA - .globl vdso_start, vdso_end -vdso_start: +SYM_DATA_START(vdso_start) .incbin "arch/x86/um/vdso/vdso.so" -vdso_end: +SYM_DATA_END_LABEL(vdso_start, SYM_L_GLOBAL, vdso_end) __FINIT diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S index be104eef80be..508fe204520b 100644 --- a/arch/x86/xen/xen-asm.S +++ b/arch/x86/xen/xen-asm.S @@ -19,7 +19,7 @@ * event status with one and operation. If there are pending events, * then enter the hypervisor to get them handled. */ -ENTRY(xen_irq_enable_direct) +SYM_FUNC_START(xen_irq_enable_direct) FRAME_BEGIN /* Unmask events */ movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask @@ -38,17 +38,17 @@ ENTRY(xen_irq_enable_direct) 1: FRAME_END ret - ENDPROC(xen_irq_enable_direct) +SYM_FUNC_END(xen_irq_enable_direct) /* * Disabling events is simply a matter of making the event mask * non-zero. */ -ENTRY(xen_irq_disable_direct) +SYM_FUNC_START(xen_irq_disable_direct) movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask ret -ENDPROC(xen_irq_disable_direct) +SYM_FUNC_END(xen_irq_disable_direct) /* * (xen_)save_fl is used to get the current interrupt enable status. @@ -59,12 +59,12 @@ ENDPROC(xen_irq_disable_direct) * undefined. We need to toggle the state of the bit, because Xen and * x86 use opposite senses (mask vs enable). */ -ENTRY(xen_save_fl_direct) +SYM_FUNC_START(xen_save_fl_direct) testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask setz %ah addb %ah, %ah ret - ENDPROC(xen_save_fl_direct) +SYM_FUNC_END(xen_save_fl_direct) /* @@ -74,7 +74,7 @@ ENTRY(xen_save_fl_direct) * interrupt mask state, it checks for unmasked pending events and * enters the hypervisor to get them delivered if so. */ -ENTRY(xen_restore_fl_direct) +SYM_FUNC_START(xen_restore_fl_direct) FRAME_BEGIN #ifdef CONFIG_X86_64 testw $X86_EFLAGS_IF, %di @@ -95,14 +95,14 @@ ENTRY(xen_restore_fl_direct) 1: FRAME_END ret - ENDPROC(xen_restore_fl_direct) +SYM_FUNC_END(xen_restore_fl_direct) /* * Force an event check by making a hypercall, but preserve regs * before making the call. */ -ENTRY(check_events) +SYM_FUNC_START(check_events) FRAME_BEGIN #ifdef CONFIG_X86_32 push %eax @@ -135,19 +135,19 @@ ENTRY(check_events) #endif FRAME_END ret -ENDPROC(check_events) +SYM_FUNC_END(check_events) -ENTRY(xen_read_cr2) +SYM_FUNC_START(xen_read_cr2) FRAME_BEGIN _ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX _ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX FRAME_END ret - ENDPROC(xen_read_cr2); +SYM_FUNC_END(xen_read_cr2); -ENTRY(xen_read_cr2_direct) +SYM_FUNC_START(xen_read_cr2_direct) FRAME_BEGIN _ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX FRAME_END ret - ENDPROC(xen_read_cr2_direct); +SYM_FUNC_END(xen_read_cr2_direct); diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S index cd177772fe4d..2712e9155306 100644 --- a/arch/x86/xen/xen-asm_32.S +++ b/arch/x86/xen/xen-asm_32.S @@ -56,7 +56,7 @@ _ASM_EXTABLE(1b,2b) .endm -ENTRY(xen_iret) +SYM_CODE_START(xen_iret) /* test eflags for special cases */ testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp) jnz hyper_iret @@ -122,6 +122,7 @@ xen_iret_end_crit: hyper_iret: /* put this out of line since its very rarely used */ jmp hypercall_page + __HYPERVISOR_iret * 32 +SYM_CODE_END(xen_iret) .globl xen_iret_start_crit, xen_iret_end_crit @@ -152,7 +153,7 @@ hyper_iret: * The only caveat is that if the outer eax hasn't been restored yet (i.e. * it's still on stack), we need to restore its value here. */ -ENTRY(xen_iret_crit_fixup) +SYM_CODE_START(xen_iret_crit_fixup) /* * Paranoia: Make sure we're really coming from kernel space. * One could imagine a case where userspace jumps into the @@ -179,4 +180,4 @@ ENTRY(xen_iret_crit_fixup) 2: ret -END(xen_iret_crit_fixup) +SYM_CODE_END(xen_iret_crit_fixup) diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S index ebf610b49c06..0a0fd168683a 100644 --- a/arch/x86/xen/xen-asm_64.S +++ b/arch/x86/xen/xen-asm_64.S @@ -20,11 +20,11 @@ #include <linux/linkage.h> .macro xen_pv_trap name -ENTRY(xen_\name) +SYM_CODE_START(xen_\name) pop %rcx pop %r11 jmp \name -END(xen_\name) +SYM_CODE_END(xen_\name) _ASM_NOKPROBE(xen_\name) .endm @@ -57,7 +57,7 @@ xen_pv_trap entry_INT80_compat xen_pv_trap hypervisor_callback __INIT -ENTRY(xen_early_idt_handler_array) +SYM_CODE_START(xen_early_idt_handler_array) i = 0 .rept NUM_EXCEPTION_VECTORS pop %rcx @@ -66,7 +66,7 @@ ENTRY(xen_early_idt_handler_array) i = i + 1 .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc .endr -END(xen_early_idt_handler_array) +SYM_CODE_END(xen_early_idt_handler_array) __FINIT hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 @@ -85,11 +85,12 @@ hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 * r11 }<-- pushed by hypercall page * rsp->rax } */ -ENTRY(xen_iret) +SYM_CODE_START(xen_iret) pushq $0 jmp hypercall_iret +SYM_CODE_END(xen_iret) -ENTRY(xen_sysret64) +SYM_CODE_START(xen_sysret64) /* * We're already on the usermode stack at this point, but * still with the kernel gs, so we can easily switch back. @@ -107,6 +108,7 @@ ENTRY(xen_sysret64) pushq $VGCF_in_syscall jmp hypercall_iret +SYM_CODE_END(xen_sysret64) /* * Xen handles syscall callbacks much like ordinary exceptions, which @@ -124,7 +126,7 @@ ENTRY(xen_sysret64) */ /* Normal 64-bit system call target */ -ENTRY(xen_syscall_target) +SYM_FUNC_START(xen_syscall_target) popq %rcx popq %r11 @@ -137,12 +139,12 @@ ENTRY(xen_syscall_target) movq $__USER_CS, 1*8(%rsp) jmp entry_SYSCALL_64_after_hwframe -ENDPROC(xen_syscall_target) +SYM_FUNC_END(xen_syscall_target) #ifdef CONFIG_IA32_EMULATION /* 32-bit compat syscall target */ -ENTRY(xen_syscall32_target) +SYM_FUNC_START(xen_syscall32_target) popq %rcx popq %r11 @@ -155,25 +157,25 @@ ENTRY(xen_syscall32_target) movq $__USER32_CS, 1*8(%rsp) jmp entry_SYSCALL_compat_after_hwframe -ENDPROC(xen_syscall32_target) +SYM_FUNC_END(xen_syscall32_target) /* 32-bit compat sysenter target */ -ENTRY(xen_sysenter_target) +SYM_FUNC_START(xen_sysenter_target) mov 0*8(%rsp), %rcx mov 1*8(%rsp), %r11 mov 5*8(%rsp), %rsp jmp entry_SYSENTER_compat -ENDPROC(xen_sysenter_target) +SYM_FUNC_END(xen_sysenter_target) #else /* !CONFIG_IA32_EMULATION */ -ENTRY(xen_syscall32_target) -ENTRY(xen_sysenter_target) +SYM_FUNC_START_ALIAS(xen_syscall32_target) +SYM_FUNC_START(xen_sysenter_target) lea 16(%rsp), %rsp /* strip %rcx, %r11 */ mov $-ENOSYS, %rax pushq $0 jmp hypercall_iret -ENDPROC(xen_syscall32_target) -ENDPROC(xen_sysenter_target) +SYM_FUNC_END(xen_sysenter_target) +SYM_FUNC_END_ALIAS(xen_syscall32_target) #endif /* CONFIG_IA32_EMULATION */ diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S index c1d8b90aa4e2..1d0cee3163e4 100644 --- a/arch/x86/xen/xen-head.S +++ b/arch/x86/xen/xen-head.S @@ -22,7 +22,7 @@ #ifdef CONFIG_XEN_PV __INIT -ENTRY(startup_xen) +SYM_CODE_START(startup_xen) UNWIND_HINT_EMPTY cld @@ -52,13 +52,13 @@ ENTRY(startup_xen) #endif jmp xen_start_kernel -END(startup_xen) +SYM_CODE_END(startup_xen) __FINIT #endif .pushsection .text .balign PAGE_SIZE -ENTRY(hypercall_page) +SYM_CODE_START(hypercall_page) .rept (PAGE_SIZE / 32) UNWIND_HINT_EMPTY .skip 32 @@ -69,7 +69,7 @@ ENTRY(hypercall_page) .type xen_hypercall_##n, @function; .size xen_hypercall_##n, 32 #include <asm/xen-hypercalls.h> #undef HYPERCALL -END(hypercall_page) +SYM_CODE_END(hypercall_page) .popsection ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux") |