summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2020-03-25 14:10:46 +0300
committerCatalin Marinas <catalin.marinas@arm.com>2020-03-25 14:10:46 +0300
commit0829a076958ddd203cf4824dd330c93ba4662815 (patch)
treec5d66e31e41b71b9ac1610cf82db0d413c226726 /arch/arm64/kernel
parentda12d2739fb69531bf6bb6eb7e46d73d1dabc814 (diff)
parentd4abd29d6775d4f3d0f01ca9a6de2d66dac74764 (diff)
downloadlinux-0829a076958ddd203cf4824dd330c93ba4662815.tar.xz
Merge branch 'for-next/asm-annotations' into for-next/core
* for-next/asm-annotations: : Modernise arm64 assembly annotations arm64: head: Convert install_el2_stub to SYM_INNER_LABEL arm64: Mark call_smc_arch_workaround_1 as __maybe_unused arm64: entry-ftrace.S: Fix missing argument for CONFIG_FUNCTION_GRAPH_TRACER=y arm64: vdso32: Convert to modern assembler annotations arm64: vdso: Convert to modern assembler annotations arm64: sdei: Annotate SDEI entry points using new style annotations arm64: kvm: Modernize __smccc_workaround_1_smc_start annotations arm64: kvm: Modernize annotation for __bp_harden_hyp_vecs arm64: kvm: Annotate assembly using modern annoations arm64: kernel: Convert to modern annotations for assembly data arm64: head: Annotate stext and preserve_boot_args as code arm64: head.S: Convert to modern annotations for assembly functions arm64: ftrace: Modernise annotation of return_to_handler arm64: ftrace: Correct annotation of ftrace_caller assembly arm64: entry-ftrace.S: Convert to modern annotations for assembly functions arm64: entry: Additional annotation conversions for entry.S arm64: entry: Annotate ret_from_fork as code arm64: entry: Annotate vector table and handlers as code arm64: crypto: Modernize names for AES function macros arm64: crypto: Modernize some extra assembly annotations
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/cpu_errata.c18
-rw-r--r--arch/arm64/kernel/entry-ftrace.S48
-rw-r--r--arch/arm64/kernel/entry.S115
-rw-r--r--arch/arm64/kernel/head.S75
-rw-r--r--arch/arm64/kernel/vdso/sigreturn.S4
-rw-r--r--arch/arm64/kernel/vdso32/sigreturn.S23
6 files changed, 139 insertions, 144 deletions
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 703ad0a84f99..df56d2295d16 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -11,6 +11,7 @@
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/cpufeature.h>
+#include <asm/kvm_asm.h>
#include <asm/smp_plat.h>
static bool __maybe_unused
@@ -113,13 +114,10 @@ atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
#ifdef CONFIG_KVM_INDIRECT_VECTORS
-extern char __smccc_workaround_1_smc_start[];
-extern char __smccc_workaround_1_smc_end[];
-
static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
const char *hyp_vecs_end)
{
- void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
+ void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
int i;
for (i = 0; i < SZ_2K; i += 0x80)
@@ -163,9 +161,6 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn,
raw_spin_unlock(&bp_lock);
}
#else
-#define __smccc_workaround_1_smc_start NULL
-#define __smccc_workaround_1_smc_end NULL
-
static void install_bp_hardening_cb(bp_hardening_cb_t fn,
const char *hyp_vecs_start,
const char *hyp_vecs_end)
@@ -176,7 +171,7 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn,
#include <linux/arm-smccc.h>
-static void call_smc_arch_workaround_1(void)
+static void __maybe_unused call_smc_arch_workaround_1(void)
{
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
}
@@ -239,11 +234,14 @@ static int detect_harden_bp_fw(void)
smccc_end = NULL;
break;
+#if IS_ENABLED(CONFIG_KVM_ARM_HOST)
case SMCCC_CONDUIT_SMC:
cb = call_smc_arch_workaround_1;
- smccc_start = __smccc_workaround_1_smc_start;
- smccc_end = __smccc_workaround_1_smc_end;
+ smccc_start = __smccc_workaround_1_smc;
+ smccc_end = __smccc_workaround_1_smc +
+ __SMCCC_WORKAROUND_1_SMC_SZ;
break;
+#endif
default:
return -1;
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 7d02f9966d34..833d48c9acb5 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -75,27 +75,27 @@
add x29, sp, #S_STACKFRAME
.endm
-ENTRY(ftrace_regs_caller)
+SYM_CODE_START(ftrace_regs_caller)
ftrace_regs_entry 1
b ftrace_common
-ENDPROC(ftrace_regs_caller)
+SYM_CODE_END(ftrace_regs_caller)
-ENTRY(ftrace_caller)
+SYM_CODE_START(ftrace_caller)
ftrace_regs_entry 0
b ftrace_common
-ENDPROC(ftrace_caller)
+SYM_CODE_END(ftrace_caller)
-ENTRY(ftrace_common)
+SYM_CODE_START(ftrace_common)
sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
mov x1, x9 // parent_ip (callsite's LR)
ldr_l x2, function_trace_op // op
mov x3, sp // regs
-GLOBAL(ftrace_call)
+SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
bl ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-GLOBAL(ftrace_graph_call) // ftrace_graph_caller();
+SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
nop // If enabled, this will be replaced
// "b ftrace_graph_caller"
#endif
@@ -122,17 +122,17 @@ ftrace_common_return:
add sp, sp, #S_FRAME_SIZE + 16
ret x9
-ENDPROC(ftrace_common)
+SYM_CODE_END(ftrace_common)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-ENTRY(ftrace_graph_caller)
+SYM_CODE_START(ftrace_graph_caller)
ldr x0, [sp, #S_PC]
sub x0, x0, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
add x1, sp, #S_LR // parent_ip (callsite's LR)
ldr x2, [sp, #S_FRAME_SIZE] // parent fp (callsite's FP)
bl prepare_ftrace_return
b ftrace_common_return
-ENDPROC(ftrace_graph_caller)
+SYM_CODE_END(ftrace_graph_caller)
#endif
#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
@@ -218,7 +218,7 @@ ENDPROC(ftrace_graph_caller)
* - tracer function to probe instrumented function's entry,
* - ftrace_graph_caller to set up an exit hook
*/
-ENTRY(_mcount)
+SYM_FUNC_START(_mcount)
mcount_enter
ldr_l x2, ftrace_trace_function
@@ -242,7 +242,7 @@ skip_ftrace_call: // }
b.ne ftrace_graph_caller // ftrace_graph_caller();
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
mcount_exit
-ENDPROC(_mcount)
+SYM_FUNC_END(_mcount)
EXPORT_SYMBOL(_mcount)
NOKPROBE(_mcount)
@@ -253,9 +253,9 @@ NOKPROBE(_mcount)
* and later on, NOP to branch to ftrace_caller() when enabled or branch to
* NOP when disabled per-function base.
*/
-ENTRY(_mcount)
+SYM_FUNC_START(_mcount)
ret
-ENDPROC(_mcount)
+SYM_FUNC_END(_mcount)
EXPORT_SYMBOL(_mcount)
NOKPROBE(_mcount)
@@ -268,24 +268,24 @@ NOKPROBE(_mcount)
* - tracer function to probe instrumented function's entry,
* - ftrace_graph_caller to set up an exit hook
*/
-ENTRY(ftrace_caller)
+SYM_FUNC_START(ftrace_caller)
mcount_enter
mcount_get_pc0 x0 // function's pc
mcount_get_lr x1 // function's lr
-GLOBAL(ftrace_call) // tracer(pc, lr);
+SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) // tracer(pc, lr);
nop // This will be replaced with "bl xxx"
// where xxx can be any kind of tracer.
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-GLOBAL(ftrace_graph_call) // ftrace_graph_caller();
+SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
nop // If enabled, this will be replaced
// "b ftrace_graph_caller"
#endif
mcount_exit
-ENDPROC(ftrace_caller)
+SYM_FUNC_END(ftrace_caller)
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -298,20 +298,20 @@ ENDPROC(ftrace_caller)
* the call stack in order to intercept instrumented function's return path
* and run return_to_handler() later on its exit.
*/
-ENTRY(ftrace_graph_caller)
+SYM_FUNC_START(ftrace_graph_caller)
mcount_get_pc x0 // function's pc
mcount_get_lr_addr x1 // pointer to function's saved lr
mcount_get_parent_fp x2 // parent's fp
bl prepare_ftrace_return // prepare_ftrace_return(pc, &lr, fp)
mcount_exit
-ENDPROC(ftrace_graph_caller)
+SYM_FUNC_END(ftrace_graph_caller)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
-ENTRY(ftrace_stub)
+SYM_FUNC_START(ftrace_stub)
ret
-ENDPROC(ftrace_stub)
+SYM_FUNC_END(ftrace_stub)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
@@ -320,7 +320,7 @@ ENDPROC(ftrace_stub)
* Run ftrace_return_to_handler() before going back to parent.
* @fp is checked against the value passed by ftrace_graph_caller().
*/
-ENTRY(return_to_handler)
+SYM_CODE_START(return_to_handler)
/* save return value regs */
sub sp, sp, #64
stp x0, x1, [sp]
@@ -340,5 +340,5 @@ ENTRY(return_to_handler)
add sp, sp, #64
ret
-END(return_to_handler)
+SYM_CODE_END(return_to_handler)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 9461d812ae27..e5d4e30ee242 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -465,7 +465,7 @@ alternative_endif
.pushsection ".entry.text", "ax"
.align 11
-ENTRY(vectors)
+SYM_CODE_START(vectors)
kernel_ventry 1, sync_invalid // Synchronous EL1t
kernel_ventry 1, irq_invalid // IRQ EL1t
kernel_ventry 1, fiq_invalid // FIQ EL1t
@@ -492,7 +492,7 @@ ENTRY(vectors)
kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
#endif
-END(vectors)
+SYM_CODE_END(vectors)
#ifdef CONFIG_VMAP_STACK
/*
@@ -534,57 +534,57 @@ __bad_stack:
ASM_BUG()
.endm
-el0_sync_invalid:
+SYM_CODE_START_LOCAL(el0_sync_invalid)
inv_entry 0, BAD_SYNC
-ENDPROC(el0_sync_invalid)
+SYM_CODE_END(el0_sync_invalid)
-el0_irq_invalid:
+SYM_CODE_START_LOCAL(el0_irq_invalid)
inv_entry 0, BAD_IRQ
-ENDPROC(el0_irq_invalid)
+SYM_CODE_END(el0_irq_invalid)
-el0_fiq_invalid:
+SYM_CODE_START_LOCAL(el0_fiq_invalid)
inv_entry 0, BAD_FIQ
-ENDPROC(el0_fiq_invalid)
+SYM_CODE_END(el0_fiq_invalid)
-el0_error_invalid:
+SYM_CODE_START_LOCAL(el0_error_invalid)
inv_entry 0, BAD_ERROR
-ENDPROC(el0_error_invalid)
+SYM_CODE_END(el0_error_invalid)
#ifdef CONFIG_COMPAT
-el0_fiq_invalid_compat:
+SYM_CODE_START_LOCAL(el0_fiq_invalid_compat)
inv_entry 0, BAD_FIQ, 32
-ENDPROC(el0_fiq_invalid_compat)
+SYM_CODE_END(el0_fiq_invalid_compat)
#endif
-el1_sync_invalid:
+SYM_CODE_START_LOCAL(el1_sync_invalid)
inv_entry 1, BAD_SYNC
-ENDPROC(el1_sync_invalid)
+SYM_CODE_END(el1_sync_invalid)
-el1_irq_invalid:
+SYM_CODE_START_LOCAL(el1_irq_invalid)
inv_entry 1, BAD_IRQ
-ENDPROC(el1_irq_invalid)
+SYM_CODE_END(el1_irq_invalid)
-el1_fiq_invalid:
+SYM_CODE_START_LOCAL(el1_fiq_invalid)
inv_entry 1, BAD_FIQ
-ENDPROC(el1_fiq_invalid)
+SYM_CODE_END(el1_fiq_invalid)
-el1_error_invalid:
+SYM_CODE_START_LOCAL(el1_error_invalid)
inv_entry 1, BAD_ERROR
-ENDPROC(el1_error_invalid)
+SYM_CODE_END(el1_error_invalid)
/*
* EL1 mode handlers.
*/
.align 6
-el1_sync:
+SYM_CODE_START_LOCAL_NOALIGN(el1_sync)
kernel_entry 1
mov x0, sp
bl el1_sync_handler
kernel_exit 1
-ENDPROC(el1_sync)
+SYM_CODE_END(el1_sync)
.align 6
-el1_irq:
+SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
kernel_entry 1
gic_prio_irq_setup pmr=x20, tmp=x1
enable_da_f
@@ -639,42 +639,42 @@ alternative_else_nop_endif
#endif
kernel_exit 1
-ENDPROC(el1_irq)
+SYM_CODE_END(el1_irq)
/*
* EL0 mode handlers.
*/
.align 6
-el0_sync:
+SYM_CODE_START_LOCAL_NOALIGN(el0_sync)
kernel_entry 0
mov x0, sp
bl el0_sync_handler
b ret_to_user
-ENDPROC(el0_sync)
+SYM_CODE_END(el0_sync)
#ifdef CONFIG_COMPAT
.align 6
-el0_sync_compat:
+SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
kernel_entry 0, 32
mov x0, sp
bl el0_sync_compat_handler
b ret_to_user
-ENDPROC(el0_sync_compat)
+SYM_CODE_END(el0_sync_compat)
.align 6
-el0_irq_compat:
+SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
kernel_entry 0, 32
b el0_irq_naked
-ENDPROC(el0_irq_compat)
+SYM_CODE_END(el0_irq_compat)
-el0_error_compat:
+SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
kernel_entry 0, 32
b el0_error_naked
-ENDPROC(el0_error_compat)
+SYM_CODE_END(el0_error_compat)
#endif
.align 6
-el0_irq:
+SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
kernel_entry 0
el0_irq_naked:
gic_prio_irq_setup pmr=x20, tmp=x0
@@ -696,9 +696,9 @@ el0_irq_naked:
bl trace_hardirqs_on
#endif
b ret_to_user
-ENDPROC(el0_irq)
+SYM_CODE_END(el0_irq)
-el1_error:
+SYM_CODE_START_LOCAL(el1_error)
kernel_entry 1
mrs x1, esr_el1
gic_prio_kentry_setup tmp=x2
@@ -706,9 +706,9 @@ el1_error:
mov x0, sp
bl do_serror
kernel_exit 1
-ENDPROC(el1_error)
+SYM_CODE_END(el1_error)
-el0_error:
+SYM_CODE_START_LOCAL(el0_error)
kernel_entry 0
el0_error_naked:
mrs x25, esr_el1
@@ -720,7 +720,7 @@ el0_error_naked:
bl do_serror
enable_da_f
b ret_to_user
-ENDPROC(el0_error)
+SYM_CODE_END(el0_error)
/*
* Ok, we need to do extra processing, enter the slow path.
@@ -832,7 +832,7 @@ alternative_else_nop_endif
.endm
.align 11
-ENTRY(tramp_vectors)
+SYM_CODE_START_NOALIGN(tramp_vectors)
.space 0x400
tramp_ventry
@@ -844,24 +844,24 @@ ENTRY(tramp_vectors)
tramp_ventry 32
tramp_ventry 32
tramp_ventry 32
-END(tramp_vectors)
+SYM_CODE_END(tramp_vectors)
-ENTRY(tramp_exit_native)
+SYM_CODE_START(tramp_exit_native)
tramp_exit
-END(tramp_exit_native)
+SYM_CODE_END(tramp_exit_native)
-ENTRY(tramp_exit_compat)
+SYM_CODE_START(tramp_exit_compat)
tramp_exit 32
-END(tramp_exit_compat)
+SYM_CODE_END(tramp_exit_compat)
.ltorg
.popsection // .entry.tramp.text
#ifdef CONFIG_RANDOMIZE_BASE
.pushsection ".rodata", "a"
.align PAGE_SHIFT
- .globl __entry_tramp_data_start
-__entry_tramp_data_start:
+SYM_DATA_START(__entry_tramp_data_start)
.quad vectors
+SYM_DATA_END(__entry_tramp_data_start)
.popsection // .rodata
#endif /* CONFIG_RANDOMIZE_BASE */
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
@@ -874,7 +874,7 @@ __entry_tramp_data_start:
* Previous and next are guaranteed not to be the same.
*
*/
-ENTRY(cpu_switch_to)
+SYM_FUNC_START(cpu_switch_to)
mov x10, #THREAD_CPU_CONTEXT
add x8, x0, x10
mov x9, sp
@@ -896,20 +896,20 @@ ENTRY(cpu_switch_to)
mov sp, x9
msr sp_el0, x1
ret
-ENDPROC(cpu_switch_to)
+SYM_FUNC_END(cpu_switch_to)
NOKPROBE(cpu_switch_to)
/*
* This is how we return from a fork.
*/
-ENTRY(ret_from_fork)
+SYM_CODE_START(ret_from_fork)
bl schedule_tail
cbz x19, 1f // not a kernel thread
mov x0, x20
blr x19
1: get_current_task tsk
b ret_to_user
-ENDPROC(ret_from_fork)
+SYM_CODE_END(ret_from_fork)
NOKPROBE(ret_from_fork)
#ifdef CONFIG_ARM_SDE_INTERFACE
@@ -938,7 +938,7 @@ NOKPROBE(ret_from_fork)
*/
.ltorg
.pushsection ".entry.tramp.text", "ax"
-ENTRY(__sdei_asm_entry_trampoline)
+SYM_CODE_START(__sdei_asm_entry_trampoline)
mrs x4, ttbr1_el1
tbz x4, #USER_ASID_BIT, 1f
@@ -960,7 +960,7 @@ ENTRY(__sdei_asm_entry_trampoline)
ldr x4, =__sdei_asm_handler
#endif
br x4
-ENDPROC(__sdei_asm_entry_trampoline)
+SYM_CODE_END(__sdei_asm_entry_trampoline)
NOKPROBE(__sdei_asm_entry_trampoline)
/*
@@ -970,21 +970,22 @@ NOKPROBE(__sdei_asm_entry_trampoline)
* x2: exit_mode
* x4: struct sdei_registered_event argument from registration time.
*/
-ENTRY(__sdei_asm_exit_trampoline)
+SYM_CODE_START(__sdei_asm_exit_trampoline)
ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
cbnz x4, 1f
tramp_unmap_kernel tmp=x4
1: sdei_handler_exit exit_mode=x2
-ENDPROC(__sdei_asm_exit_trampoline)
+SYM_CODE_END(__sdei_asm_exit_trampoline)
NOKPROBE(__sdei_asm_exit_trampoline)
.ltorg
.popsection // .entry.tramp.text
#ifdef CONFIG_RANDOMIZE_BASE
.pushsection ".rodata", "a"
-__sdei_asm_trampoline_next_handler:
+SYM_DATA_START(__sdei_asm_trampoline_next_handler)
.quad __sdei_asm_handler
+SYM_DATA_END(__sdei_asm_trampoline_next_handler)
.popsection // .rodata
#endif /* CONFIG_RANDOMIZE_BASE */
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
@@ -1002,7 +1003,7 @@ __sdei_asm_trampoline_next_handler:
* follow SMC-CC. We save (or retrieve) all the registers as the handler may
* want them.
*/
-ENTRY(__sdei_asm_handler)
+SYM_CODE_START(__sdei_asm_handler)
stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
@@ -1085,6 +1086,6 @@ alternative_else_nop_endif
tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline
br x5
#endif
-ENDPROC(__sdei_asm_handler)
+SYM_CODE_END(__sdei_asm_handler)
NOKPROBE(__sdei_asm_handler)
#endif /* CONFIG_ARM_SDE_INTERFACE */
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index f79023c9b374..5fa9daa1d227 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -105,7 +105,7 @@ pe_header:
* x24 __primary_switch() .. relocate_kernel()
* current RELR displacement
*/
-ENTRY(stext)
+SYM_CODE_START(stext)
bl preserve_boot_args
bl el2_setup // Drop to EL1, w0=cpu_boot_mode
adrp x23, __PHYS_OFFSET
@@ -120,12 +120,12 @@ ENTRY(stext)
*/
bl __cpu_setup // initialise processor
b __primary_switch
-ENDPROC(stext)
+SYM_CODE_END(stext)
/*
* Preserve the arguments passed by the bootloader in x0 .. x3
*/
-preserve_boot_args:
+SYM_CODE_START_LOCAL(preserve_boot_args)
mov x21, x0 // x21=FDT
adr_l x0, boot_args // record the contents of
@@ -137,7 +137,7 @@ preserve_boot_args:
mov x1, #0x20 // 4 x 8 bytes
b __inval_dcache_area // tail call
-ENDPROC(preserve_boot_args)
+SYM_CODE_END(preserve_boot_args)
/*
* Macro to create a table entry to the next page.
@@ -275,7 +275,7 @@ ENDPROC(preserve_boot_args)
* - first few MB of the kernel linear mapping to jump to once the MMU has
* been enabled
*/
-__create_page_tables:
+SYM_FUNC_START_LOCAL(__create_page_tables)
mov x28, lr
/*
@@ -403,14 +403,14 @@ __create_page_tables:
bl __inval_dcache_area
ret x28
-ENDPROC(__create_page_tables)
+SYM_FUNC_END(__create_page_tables)
/*
* The following fragment of code is executed with the MMU enabled.
*
* x0 = __PHYS_OFFSET
*/
-__primary_switched:
+SYM_FUNC_START_LOCAL(__primary_switched)
adrp x4, init_thread_union
add sp, x4, #THREAD_SIZE
adr_l x5, init_task
@@ -455,7 +455,7 @@ __primary_switched:
mov x29, #0
mov x30, #0
b start_kernel
-ENDPROC(__primary_switched)
+SYM_FUNC_END(__primary_switched)
/*
* end early head section, begin head code that is also used for
@@ -463,8 +463,9 @@ ENDPROC(__primary_switched)
*/
.section ".idmap.text","awx"
-ENTRY(kimage_vaddr)
+SYM_DATA_START(kimage_vaddr)
.quad _text - TEXT_OFFSET
+SYM_DATA_END(kimage_vaddr)
EXPORT_SYMBOL(kimage_vaddr)
/*
@@ -474,7 +475,7 @@ EXPORT_SYMBOL(kimage_vaddr)
* Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if
* booted in EL1 or EL2 respectively.
*/
-ENTRY(el2_setup)
+SYM_FUNC_START(el2_setup)
msr SPsel, #1 // We want to use SP_EL{1,2}
mrs x0, CurrentEL
cmp x0, #CurrentEL_EL2
@@ -598,7 +599,7 @@ set_hcr:
isb
ret
-install_el2_stub:
+SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL)
/*
* When VHE is not in use, early init of EL2 and EL1 needs to be
* done here.
@@ -635,13 +636,13 @@ install_el2_stub:
msr elr_el2, lr
mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
eret
-ENDPROC(el2_setup)
+SYM_FUNC_END(el2_setup)
/*
* Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
* in w0. See arch/arm64/include/asm/virt.h for more info.
*/
-set_cpu_boot_mode_flag:
+SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag)
adr_l x1, __boot_cpu_mode
cmp w0, #BOOT_CPU_MODE_EL2
b.ne 1f
@@ -650,7 +651,7 @@ set_cpu_boot_mode_flag:
dmb sy
dc ivac, x1 // Invalidate potentially stale cache line
ret
-ENDPROC(set_cpu_boot_mode_flag)
+SYM_FUNC_END(set_cpu_boot_mode_flag)
/*
* These values are written with the MMU off, but read with the MMU on.
@@ -666,15 +667,17 @@ ENDPROC(set_cpu_boot_mode_flag)
* This is not in .bss, because we set it sufficiently early that the boot-time
* zeroing of .bss would clobber it.
*/
-ENTRY(__boot_cpu_mode)
+SYM_DATA_START(__boot_cpu_mode)
.long BOOT_CPU_MODE_EL2
.long BOOT_CPU_MODE_EL1
+SYM_DATA_END(__boot_cpu_mode)
/*
* The booting CPU updates the failed status @__early_cpu_boot_status,
* with MMU turned off.
*/
-ENTRY(__early_cpu_boot_status)
+SYM_DATA_START(__early_cpu_boot_status)
.quad 0
+SYM_DATA_END(__early_cpu_boot_status)
.popsection
@@ -682,7 +685,7 @@ ENTRY(__early_cpu_boot_status)
* This provides a "holding pen" for platforms to hold all secondary
* cores are held until we're ready for them to initialise.
*/
-ENTRY(secondary_holding_pen)
+SYM_FUNC_START(secondary_holding_pen)
bl el2_setup // Drop to EL1, w0=cpu_boot_mode
bl set_cpu_boot_mode_flag
mrs x0, mpidr_el1
@@ -694,19 +697,19 @@ pen: ldr x4, [x3]
b.eq secondary_startup
wfe
b pen
-ENDPROC(secondary_holding_pen)
+SYM_FUNC_END(secondary_holding_pen)
/*
* Secondary entry point that jumps straight into the kernel. Only to
* be used where CPUs are brought online dynamically by the kernel.
*/
-ENTRY(secondary_entry)
+SYM_FUNC_START(secondary_entry)
bl el2_setup // Drop to EL1
bl set_cpu_boot_mode_flag
b secondary_startup
-ENDPROC(secondary_entry)
+SYM_FUNC_END(secondary_entry)
-secondary_startup:
+SYM_FUNC_START_LOCAL(secondary_startup)
/*
* Common entry point for secondary CPUs.
*/
@@ -716,9 +719,9 @@ secondary_startup:
bl __enable_mmu
ldr x8, =__secondary_switched
br x8
-ENDPROC(secondary_startup)
+SYM_FUNC_END(secondary_startup)
-__secondary_switched:
+SYM_FUNC_START_LOCAL(__secondary_switched)
adr_l x5, vectors
msr vbar_el1, x5
isb
@@ -733,13 +736,13 @@ __secondary_switched:
mov x29, #0
mov x30, #0
b secondary_start_kernel
-ENDPROC(__secondary_switched)
+SYM_FUNC_END(__secondary_switched)
-__secondary_too_slow:
+SYM_FUNC_START_LOCAL(__secondary_too_slow)
wfe
wfi
b __secondary_too_slow
-ENDPROC(__secondary_too_slow)
+SYM_FUNC_END(__secondary_too_slow)
/*
* The booting CPU updates the failed status @__early_cpu_boot_status,
@@ -771,7 +774,7 @@ ENDPROC(__secondary_too_slow)
* Checks if the selected granule size is supported by the CPU.
* If it isn't, park the CPU
*/
-ENTRY(__enable_mmu)
+SYM_FUNC_START(__enable_mmu)
mrs x2, ID_AA64MMFR0_EL1
ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
@@ -795,9 +798,9 @@ ENTRY(__enable_mmu)
dsb nsh
isb
ret
-ENDPROC(__enable_mmu)
+SYM_FUNC_END(__enable_mmu)
-ENTRY(__cpu_secondary_check52bitva)
+SYM_FUNC_START(__cpu_secondary_check52bitva)
#ifdef CONFIG_ARM64_VA_BITS_52
ldr_l x0, vabits_actual
cmp x0, #52
@@ -815,9 +818,9 @@ ENTRY(__cpu_secondary_check52bitva)
#endif
2: ret
-ENDPROC(__cpu_secondary_check52bitva)
+SYM_FUNC_END(__cpu_secondary_check52bitva)
-__no_granule_support:
+SYM_FUNC_START_LOCAL(__no_granule_support)
/* Indicate that this CPU can't boot and is stuck in the kernel */
update_early_cpu_boot_status \
CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2
@@ -825,10 +828,10 @@ __no_granule_support:
wfe
wfi
b 1b
-ENDPROC(__no_granule_support)
+SYM_FUNC_END(__no_granule_support)
#ifdef CONFIG_RELOCATABLE
-__relocate_kernel:
+SYM_FUNC_START_LOCAL(__relocate_kernel)
/*
* Iterate over each entry in the relocation table, and apply the
* relocations in place.
@@ -930,10 +933,10 @@ __relocate_kernel:
#endif
ret
-ENDPROC(__relocate_kernel)
+SYM_FUNC_END(__relocate_kernel)
#endif
-__primary_switch:
+SYM_FUNC_START_LOCAL(__primary_switch)
#ifdef CONFIG_RANDOMIZE_BASE
mov x19, x0 // preserve new SCTLR_EL1 value
mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value
@@ -976,4 +979,4 @@ __primary_switch:
ldr x8, =__primary_switched
adrp x0, __PHYS_OFFSET
br x8
-ENDPROC(__primary_switch)
+SYM_FUNC_END(__primary_switch)
diff --git a/arch/arm64/kernel/vdso/sigreturn.S b/arch/arm64/kernel/vdso/sigreturn.S
index 0723aa398d6e..12324863d5c2 100644
--- a/arch/arm64/kernel/vdso/sigreturn.S
+++ b/arch/arm64/kernel/vdso/sigreturn.S
@@ -14,7 +14,7 @@
.text
nop
-ENTRY(__kernel_rt_sigreturn)
+SYM_FUNC_START(__kernel_rt_sigreturn)
.cfi_startproc
.cfi_signal_frame
.cfi_def_cfa x29, 0
@@ -23,4 +23,4 @@ ENTRY(__kernel_rt_sigreturn)
mov x8, #__NR_rt_sigreturn
svc #0
.cfi_endproc
-ENDPROC(__kernel_rt_sigreturn)
+SYM_FUNC_END(__kernel_rt_sigreturn)
diff --git a/arch/arm64/kernel/vdso32/sigreturn.S b/arch/arm64/kernel/vdso32/sigreturn.S
index 1a81277c2d09..620524969696 100644
--- a/arch/arm64/kernel/vdso32/sigreturn.S
+++ b/arch/arm64/kernel/vdso32/sigreturn.S
@@ -10,13 +10,6 @@
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
-#define ARM_ENTRY(name) \
- ENTRY(name)
-
-#define ARM_ENDPROC(name) \
- .type name, %function; \
- END(name)
-
.text
.arm
@@ -24,39 +17,39 @@
.save {r0-r15}
.pad #COMPAT_SIGFRAME_REGS_OFFSET
nop
-ARM_ENTRY(__kernel_sigreturn_arm)
+SYM_FUNC_START(__kernel_sigreturn_arm)
mov r7, #__NR_compat_sigreturn
svc #0
.fnend
-ARM_ENDPROC(__kernel_sigreturn_arm)
+SYM_FUNC_END(__kernel_sigreturn_arm)
.fnstart
.save {r0-r15}
.pad #COMPAT_RT_SIGFRAME_REGS_OFFSET
nop
-ARM_ENTRY(__kernel_rt_sigreturn_arm)
+SYM_FUNC_START(__kernel_rt_sigreturn_arm)
mov r7, #__NR_compat_rt_sigreturn
svc #0
.fnend
-ARM_ENDPROC(__kernel_rt_sigreturn_arm)
+SYM_FUNC_END(__kernel_rt_sigreturn_arm)
.thumb
.fnstart
.save {r0-r15}
.pad #COMPAT_SIGFRAME_REGS_OFFSET
nop
-ARM_ENTRY(__kernel_sigreturn_thumb)
+SYM_FUNC_START(__kernel_sigreturn_thumb)
mov r7, #__NR_compat_sigreturn
svc #0
.fnend
-ARM_ENDPROC(__kernel_sigreturn_thumb)
+SYM_FUNC_END(__kernel_sigreturn_thumb)
.fnstart
.save {r0-r15}
.pad #COMPAT_RT_SIGFRAME_REGS_OFFSET
nop
-ARM_ENTRY(__kernel_rt_sigreturn_thumb)
+SYM_FUNC_START(__kernel_rt_sigreturn_thumb)
mov r7, #__NR_compat_rt_sigreturn
svc #0
.fnend
-ARM_ENDPROC(__kernel_rt_sigreturn_thumb)
+SYM_FUNC_END(__kernel_rt_sigreturn_thumb)