summaryrefslogtreecommitdiff
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/armksyms.c6
-rw-r--r--arch/arm/kernel/entry-armv.S2
-rw-r--r--arch/arm/kernel/entry-common.S62
-rw-r--r--arch/arm/kernel/head.S3
-rw-r--r--arch/arm/kernel/irq.c1
-rw-r--r--arch/arm/kernel/perf_event.c7
-rw-r--r--arch/arm/kernel/process.c7
-rw-r--r--arch/arm/kernel/reboot.c2
-rw-r--r--arch/arm/kernel/setup.c6
-rw-r--r--arch/arm/kernel/signal.c6
-rw-r--r--arch/arm/kernel/smp.c21
-rw-r--r--arch/arm/kernel/vdso.c7
12 files changed, 94 insertions, 36 deletions
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index a35d72d30b56..f89811fb9a55 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -50,6 +50,9 @@ extern void __aeabi_ulcmp(void);
extern void fpundefinstr(void);
+void mmioset(void *, unsigned int, size_t);
+void mmiocpy(void *, const void *, size_t);
+
/* platform dependent support */
EXPORT_SYMBOL(arm_delay_ops);
@@ -88,6 +91,9 @@ EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(__memzero);
+EXPORT_SYMBOL(mmioset);
+EXPORT_SYMBOL(mmiocpy);
+
#ifdef CONFIG_MMU
EXPORT_SYMBOL(copy_page);
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 61f00a3f3047..3e1c26eb32b4 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -419,7 +419,7 @@ ENDPROC(__fiq_abt)
zero_fp
.if \trace
-#ifdef CONFIG_IRQSOFF_TRACER
+#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
ct_user_exit save = 0
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 189154980703..30a7228eaceb 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -24,35 +24,55 @@
.align 5
+#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
/*
- * This is the fast syscall return path. We do as little as
- * possible here, and this includes saving r0 back into the SVC
- * stack.
+ * This is the fast syscall return path. We do as little as possible here,
+ * such as avoiding writing r0 to the stack. We only use this path if we
+ * have tracing and context tracking disabled - the overheads from those
+ * features make this path too inefficient.
*/
ret_fast_syscall:
UNWIND(.fnstart )
UNWIND(.cantunwind )
- disable_irq @ disable interrupts
+ disable_irq_notrace @ disable interrupts
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
- tst r1, #_TIF_SYSCALL_WORK
- bne __sys_trace_return
- tst r1, #_TIF_WORK_MASK
+ tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
bne fast_work_pending
- asm_trace_hardirqs_on
/* perform architecture specific actions before user return */
arch_ret_to_user r1, lr
- ct_user_enter
restore_user_regs fast = 1, offset = S_OFF
UNWIND(.fnend )
+ENDPROC(ret_fast_syscall)
-/*
- * Ok, we need to do extra processing, enter the slow path.
- */
+ /* Ok, we need to do extra processing, enter the slow path. */
fast_work_pending:
str r0, [sp, #S_R0+S_OFF]! @ returned r0
-work_pending:
+ /* fall through to work_pending */
+#else
+/*
+ * The "replacement" ret_fast_syscall for when tracing or context tracking
+ * is enabled. As we will need to call out to some C functions, we save
+ * r0 first to avoid needing to save registers around each C function call.
+ */
+ret_fast_syscall:
+ UNWIND(.fnstart )
+ UNWIND(.cantunwind )
+ str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
+ disable_irq_notrace @ disable interrupts
+ ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
+ tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
+ beq no_work_pending
+ UNWIND(.fnend )
+ENDPROC(ret_fast_syscall)
+
+ /* Slower path - fall through to work_pending */
+#endif
+
+ tst r1, #_TIF_SYSCALL_WORK
+ bne __sys_trace_return_nosave
+slow_work_pending:
mov r0, sp @ 'regs'
mov r2, why @ 'syscall'
bl do_work_pending
@@ -61,19 +81,23 @@ work_pending:
movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
ldmia sp, {r0 - r6} @ have to reload r0 - r6
b local_restart @ ... and off we go
+ENDPROC(ret_fast_syscall)
/*
* "slow" syscall return path. "why" tells us if this was a real syscall.
+ * IRQs may be enabled here, so always disable them. Note that we use the
+ * "notrace" version to avoid calling into the tracing code unnecessarily.
+ * do_work_pending() will update this state if necessary.
*/
ENTRY(ret_to_user)
ret_slow_syscall:
- disable_irq @ disable interrupts
+ disable_irq_notrace @ disable interrupts
ENTRY(ret_to_user_from_irq)
ldr r1, [tsk, #TI_FLAGS]
tst r1, #_TIF_WORK_MASK
- bne work_pending
+ bne slow_work_pending
no_work_pending:
- asm_trace_hardirqs_on
+ asm_trace_hardirqs_on save = 0
/* perform architecture specific actions before user return */
arch_ret_to_user r1, lr
@@ -253,6 +277,12 @@ __sys_trace_return:
bl syscall_trace_exit
b ret_slow_syscall
+__sys_trace_return_nosave:
+ enable_irq_notrace
+ mov r0, sp
+ bl syscall_trace_exit
+ b ret_slow_syscall
+
.align 5
#ifdef CONFIG_ALIGNMENT_TRAP
.type __cr_alignment, #object
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index d56e5e9a9e1e..04286fd9e09c 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -399,6 +399,9 @@ ENTRY(secondary_startup)
sub lr, r4, r5 @ mmu has been enabled
add r3, r7, lr
ldrd r4, [r3, #0] @ get secondary_data.pgdir
+ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE:
+ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps
+ARM_BE8(eor r4, r4, r5) @ without using a temp reg.
ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir
badr lr, __enable_mmu @ return address
mov r13, r12 @ __secondary_switched address
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 350f188c92d2..b96c8ed1723a 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -39,6 +39,7 @@
#include <linux/export.h>
#include <asm/hardware/cache-l2x0.h>
+#include <asm/outercache.h>
#include <asm/exception.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 357f57ea83f4..7d5379c1c443 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -795,8 +795,10 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
/* Don't bother with PPIs; they're already affine */
irq = platform_get_irq(pdev, 0);
- if (irq >= 0 && irq_is_percpu(irq))
+ if (irq >= 0 && irq_is_percpu(irq)) {
+ cpumask_setall(&pmu->supported_cpus);
return 0;
+ }
irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
if (!irqs)
@@ -818,12 +820,13 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
break;
- of_node_put(dn);
if (cpu >= nr_cpu_ids) {
pr_warn("Failed to find logical CPU for %s\n",
dn->name);
+ of_node_put(dn);
break;
}
+ of_node_put(dn);
irqs[i] = cpu;
cpumask_set_cpu(cpu, &pmu->supported_cpus);
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 3f18098dfd08..a3089bacb8d8 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -91,13 +91,6 @@ void arch_cpu_idle_exit(void)
ledtrig_cpu(CPU_LED_IDLE_END);
}
-#ifdef CONFIG_HOTPLUG_CPU
-void arch_cpu_idle_dead(void)
-{
- cpu_die();
-}
-#endif
-
void __show_regs(struct pt_regs *regs)
{
unsigned long flags;
diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c
index 1a4d232796be..38269358fd25 100644
--- a/arch/arm/kernel/reboot.c
+++ b/arch/arm/kernel/reboot.c
@@ -50,7 +50,7 @@ static void __soft_restart(void *addr)
flush_cache_all();
/* Switch to the identity mapping. */
- phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
+ phys_reset = (phys_reset_t)(unsigned long)virt_to_idmap(cpu_reset);
phys_reset((unsigned long)addr);
/* Should never get here. */
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 36c18b73c1f4..e2ecee6b70ca 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -37,6 +37,7 @@
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/elf.h>
+#include <asm/fixmap.h>
#include <asm/procinfo.h>
#include <asm/psci.h>
#include <asm/sections.h>
@@ -954,6 +955,9 @@ void __init setup_arch(char **cmdline_p)
strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = cmd_line;
+ if (IS_ENABLED(CONFIG_FIX_EARLYCON_MEM))
+ early_fixmap_init();
+
parse_early_param();
#ifdef CONFIG_MMU
@@ -1015,7 +1019,7 @@ static int __init topology_init(void)
for_each_possible_cpu(cpu) {
struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
- cpuinfo->cpu.hotpluggable = 1;
+ cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
register_cpu(&cpuinfo->cpu, cpu);
}
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 423663e23791..b6cda06b455f 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -562,6 +562,12 @@ static int do_signal(struct pt_regs *regs, int syscall)
asmlinkage int
do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
{
+ /*
+ * The assembly code enters us with IRQs off, but it hasn't
+ * informed the tracing code of that for efficiency reasons.
+ * Update the trace code with the current status.
+ */
+ trace_hardirqs_off();
do {
if (likely(thread_flags & _TIF_NEED_RESCHED)) {
schedule();
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 90dfbedfbfb8..ba0063c539c3 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -175,13 +175,26 @@ static int platform_cpu_disable(unsigned int cpu)
if (smp_ops.cpu_disable)
return smp_ops.cpu_disable(cpu);
+ return 0;
+}
+
+int platform_can_hotplug_cpu(unsigned int cpu)
+{
+ /* cpu_die must be specified to support hotplug */
+ if (!smp_ops.cpu_die)
+ return 0;
+
+ if (smp_ops.cpu_can_disable)
+ return smp_ops.cpu_can_disable(cpu);
+
/*
* By default, allow disabling all CPUs except the first one,
* since this is special on a lot of platforms, e.g. because
* of clock tick interrupts.
*/
- return cpu == 0 ? -EPERM : 0;
+ return cpu != 0;
}
+
/*
* __cpu_disable runs on the processor to be shutdown.
*/
@@ -253,7 +266,7 @@ void __cpu_die(unsigned int cpu)
* of the other hotplug-cpu capable cores, so presumably coming
* out of idle fixes this.
*/
-void __ref cpu_die(void)
+void arch_cpu_idle_dead(void)
{
unsigned int cpu = smp_processor_id();
@@ -578,7 +591,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
struct pt_regs *old_regs = set_irq_regs(regs);
if ((unsigned)ipinr < NR_IPI) {
- trace_ipi_entry(ipi_types[ipinr]);
+ trace_ipi_entry_rcuidle(ipi_types[ipinr]);
__inc_irq_stat(cpu, ipi_irqs[ipinr]);
}
@@ -637,7 +650,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
}
if ((unsigned)ipinr < NR_IPI)
- trace_ipi_exit(ipi_types[ipinr]);
+ trace_ipi_exit_rcuidle(ipi_types[ipinr]);
set_irq_regs(old_regs);
}
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index efe17dd9b921..54a5aeab988d 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -296,7 +296,6 @@ static bool tk_is_cntvct(const struct timekeeper *tk)
*/
void update_vsyscall(struct timekeeper *tk)
{
- struct timespec xtime_coarse;
struct timespec64 *wtm = &tk->wall_to_monotonic;
if (!cntvct_ok) {
@@ -308,10 +307,10 @@ void update_vsyscall(struct timekeeper *tk)
vdso_write_begin(vdso_data);
- xtime_coarse = __current_kernel_time();
vdso_data->tk_is_cntvct = tk_is_cntvct(tk);
- vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
- vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
+ vdso_data->xtime_coarse_sec = tk->xtime_sec;
+ vdso_data->xtime_coarse_nsec = (u32)(tk->tkr_mono.xtime_nsec >>
+ tk->tkr_mono.shift);
vdso_data->wtm_clock_sec = wtm->tv_sec;
vdso_data->wtm_clock_nsec = wtm->tv_nsec;