diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/debug.S | 2 | ||||
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 12 | ||||
-rw-r--r-- | arch/arm/kernel/entry-common.S | 2 | ||||
-rw-r--r-- | arch/arm/kernel/entry-header.S | 11 | ||||
-rw-r--r-- | arch/arm/kernel/entry-v7m.S | 4 | ||||
-rw-r--r-- | arch/arm/kernel/head-nommu.S | 4 | ||||
-rw-r--r-- | arch/arm/kernel/hyp-stub.S | 4 | ||||
-rw-r--r-- | arch/arm/kernel/irq.c | 62 | ||||
-rw-r--r-- | arch/arm/kernel/machine_kexec.c | 5 | ||||
-rw-r--r-- | arch/arm/kernel/patch.c | 6 | ||||
-rw-r--r-- | arch/arm/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/setup.c | 6 | ||||
-rw-r--r-- | arch/arm/kernel/sleep.S | 12 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 12 | ||||
-rw-r--r-- | arch/arm/kernel/smp_twd.c | 66 | ||||
-rw-r--r-- | arch/arm/kernel/sys_oabi-compat.c | 8 | ||||
-rw-r--r-- | arch/arm/kernel/unwind.c | 14 |
17 files changed, 63 insertions, 169 deletions
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index b795dc2408c0..b9f94e03d916 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S @@ -86,7 +86,7 @@ hexbuf_rel: .long hexbuf_addr - . ENTRY(printascii) addruart_current r3, r1, r2 1: teq r0, #0 - ldrneb r1, [r0], #1 + ldrbne r1, [r0], #1 teqne r1, #0 reteq lr 2: teq r1, #'\n' diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index e85a3af9ddeb..ce4aea57130a 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -636,7 +636,7 @@ call_fpe: @ Test if we need to give access to iWMMXt coprocessors ldr r5, [r10, #TI_FLAGS] rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only - movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1) + movscs r7, r5, lsr #(TIF_USING_IWMMXT + 1) bcs iwmmxt_task_enable #endif ARM( add pc, pc, r8, lsr #6 ) @@ -872,7 +872,7 @@ __kuser_cmpxchg64: @ 0xffff0f60 smp_dmb arm 1: ldrexd r0, r1, [r2] @ load current val eors r3, r0, r4 @ compare with oldval (1) - eoreqs r3, r1, r5 @ compare with oldval (2) + eorseq r3, r1, r5 @ compare with oldval (2) strexdeq r3, r6, r7, [r2] @ store newval if eq teqeq r3, #1 @ success? beq 1b @ if no then retry @@ -896,8 +896,8 @@ __kuser_cmpxchg64: @ 0xffff0f60 ldmia r1, {r6, lr} @ load new val 1: ldmia r2, {r0, r1} @ load current val eors r3, r0, r4 @ compare with oldval (1) - eoreqs r3, r1, r5 @ compare with oldval (2) -2: stmeqia r2, {r6, lr} @ store newval if eq + eorseq r3, r1, r5 @ compare with oldval (2) +2: stmiaeq r2, {r6, lr} @ store newval if eq rsbs r0, r3, #0 @ set return val and C flag ldmfd sp!, {r4, r5, r6, pc} @@ -911,7 +911,7 @@ kuser_cmpxchg64_fixup: mov r7, #0xffff0fff sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64))) subs r8, r4, r7 - rsbcss r8, r8, #(2b - 1b) + rsbscs r8, r8, #(2b - 1b) strcs r7, [sp, #S_PC] #if __LINUX_ARM_ARCH__ < 6 bcc kuser_cmpxchg32_fixup @@ -969,7 +969,7 @@ kuser_cmpxchg32_fixup: mov r7, #0xffff0fff sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) subs r8, r4, r7 - rsbcss r8, r8, #(2b - 1b) + rsbscs r8, r8, #(2b - 1b) strcs r7, [sp, #S_PC] ret lr .previous diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 0465d65d23de..f7649adef505 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -373,7 +373,7 @@ sys_syscall: movhs scno, #0 csdb #endif - stmloia sp, {r5, r6} @ shuffle args + stmialo sp, {r5, r6} @ shuffle args movlo r0, r1 movlo r1, r2 movlo r2, r3 diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 773424843d6e..32051ec5b33f 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -127,7 +127,8 @@ */ .macro v7m_exception_slow_exit ret_r0 cpsid i - ldr lr, =EXC_RET_THREADMODE_PROCESSSTACK + ldr lr, =exc_ret + ldr lr, [lr] @ read original r12, sp, lr, pc and xPSR add r12, sp, #S_IP @@ -387,8 +388,8 @@ badr lr, \ret @ return address .if \reload add r1, sp, #S_R0 + S_OFF @ pointer to regs - ldmccia r1, {r0 - r6} @ reload r0-r6 - stmccia sp, {r4, r5} @ update stack arguments + ldmiacc r1, {r0 - r6} @ reload r0-r6 + stmiacc sp, {r4, r5} @ update stack arguments .endif ldrcc pc, [\table, \tmp, lsl #2] @ call sys_* routine #else @@ -396,8 +397,8 @@ badr lr, \ret @ return address .if \reload add r1, sp, #S_R0 + S_OFF @ pointer to regs - ldmccia r1, {r0 - r6} @ reload r0-r6 - stmccia sp, {r4, r5} @ update stack arguments + ldmiacc r1, {r0 - r6} @ reload r0-r6 + stmiacc sp, {r4, r5} @ update stack arguments .endif ldrcc pc, [\table, \nr, lsl #2] @ call sys_* routine #endif diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S index abcf47848525..19d2dcd6530d 100644 --- a/arch/arm/kernel/entry-v7m.S +++ b/arch/arm/kernel/entry-v7m.S @@ -146,3 +146,7 @@ ENTRY(vector_table) .rept CONFIG_CPU_V7M_NUM_IRQ .long __irq_entry @ External Interrupts .endr + .align 2 + .globl exc_ret +exc_ret: + .space 4 diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index ec29de250076..c08d2d890f7b 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -439,8 +439,8 @@ M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(3)]) str r5, [r12, #PMSAv8_RBAR_A(0)] str r6, [r12, #PMSAv8_RLAR_A(0)] #else - mcr p15, 0, r5, c6, c10, 1 @ PRBAR4 - mcr p15, 0, r6, c6, c10, 2 @ PRLAR4 + mcr p15, 0, r5, c6, c10, 0 @ PRBAR4 + mcr p15, 0, r6, c6, c10, 1 @ PRLAR4 #endif #endif ret lr diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S index 60146e32619a..82a942894fc0 100644 --- a/arch/arm/kernel/hyp-stub.S +++ b/arch/arm/kernel/hyp-stub.S @@ -180,8 +180,8 @@ ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE @ Check whether GICv3 system registers are available mrc p15, 0, r7, c0, c1, 1 @ ID_PFR1 ubfx r7, r7, #28, #4 - cmp r7, #1 - bne 2f + teq r7, #0 + beq 2f @ Enable system register accesses mrc p15, 4, r7, c12, c9, 5 @ ICC_HSRE diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 9908dacf9229..844861368cd5 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -31,7 +31,6 @@ #include <linux/smp.h> #include <linux/init.h> #include <linux/seq_file.h> -#include <linux/ratelimit.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/kallsyms.h> @@ -109,64 +108,3 @@ int __init arch_probe_nr_irqs(void) return nr_irqs; } #endif - -#ifdef CONFIG_HOTPLUG_CPU -static bool migrate_one_irq(struct irq_desc *desc) -{ - struct irq_data *d = irq_desc_get_irq_data(desc); - const struct cpumask *affinity = irq_data_get_affinity_mask(d); - struct irq_chip *c; - bool ret = false; - - /* - * If this is a per-CPU interrupt, or the affinity does not - * include this CPU, then we have nothing to do. - */ - if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) - return false; - - if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { - affinity = cpu_online_mask; - ret = true; - } - - c = irq_data_get_irq_chip(d); - if (!c->irq_set_affinity) - pr_debug("IRQ%u: unable to set affinity\n", d->irq); - else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) - cpumask_copy(irq_data_get_affinity_mask(d), affinity); - - return ret; -} - -/* - * The current CPU has been marked offline. Migrate IRQs off this CPU. - * If the affinity settings do not allow other CPUs, force them onto any - * available CPU. - * - * Note: we must iterate over all IRQs, whether they have an attached - * action structure or not, as we need to get chained interrupts too. - */ -void migrate_irqs(void) -{ - unsigned int i; - struct irq_desc *desc; - unsigned long flags; - - local_irq_save(flags); - - for_each_irq_desc(i, desc) { - bool affinity_broken; - - raw_spin_lock(&desc->lock); - affinity_broken = migrate_one_irq(desc); - raw_spin_unlock(&desc->lock); - - if (affinity_broken) - pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n", - i, smp_processor_id()); - } - - local_irq_restore(flags); -} -#endif /* CONFIG_HOTPLUG_CPU */ diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index dd2eb5f76b9f..76300f3813e8 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -91,8 +91,11 @@ void machine_crash_nonpanic_core(void *unused) set_cpu_online(smp_processor_id(), false); atomic_dec(&waiting_for_crash_ipi); - while (1) + + while (1) { cpu_relax(); + wfe(); + } } void crash_smp_send_stop(void) diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c index a50dc00d79a2..d0a05a3bdb96 100644 --- a/arch/arm/kernel/patch.c +++ b/arch/arm/kernel/patch.c @@ -16,7 +16,7 @@ struct patch { unsigned int insn; }; -static DEFINE_SPINLOCK(patch_lock); +static DEFINE_RAW_SPINLOCK(patch_lock); static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags) __acquires(&patch_lock) @@ -33,7 +33,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags) return addr; if (flags) - spin_lock_irqsave(&patch_lock, *flags); + raw_spin_lock_irqsave(&patch_lock, *flags); else __acquire(&patch_lock); @@ -48,7 +48,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags) clear_fixmap(fixmap); if (flags) - spin_unlock_irqrestore(&patch_lock, *flags); + raw_spin_unlock_irqrestore(&patch_lock, *flags); else __release(&patch_lock); } diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 16601d1442d1..72cc0862a30e 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -150,7 +150,7 @@ void __show_regs(struct pt_regs *regs) if ((domain & domain_mask(DOMAIN_USER)) == domain_val(DOMAIN_USER, DOMAIN_NOACCESS)) segment = "none"; - else if (fs == get_ds()) + else if (fs == KERNEL_DS) segment = "kernel"; else segment = "user"; diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 375b13f7e780..5d78b6ac0429 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -867,6 +867,9 @@ static void __init request_standard_resources(const struct machine_desc *mdesc) boot_alias_start = phys_to_idmap(start); if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) { res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES); + if (!res) + panic("%s: Failed to allocate %zu bytes\n", + __func__, sizeof(*res)); res->name = "System RAM (boot alias)"; res->start = boot_alias_start; res->end = phys_to_idmap(end); @@ -875,6 +878,9 @@ static void __init request_standard_resources(const struct machine_desc *mdesc) } res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES); + if (!res) + panic("%s: Failed to allocate %zu bytes\n", __func__, + sizeof(*res)); res->name = "System RAM"; res->start = start; res->end = end; diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index a8257fc9cf2a..5dc8b80bb693 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S @@ -120,6 +120,14 @@ ENDPROC(cpu_resume_after_mmu) .text .align +#ifdef CONFIG_MCPM + .arm +THUMB( .thumb ) +ENTRY(cpu_resume_no_hyp) +ARM_BE8(setend be) @ ensure we are in BE mode + b no_hyp +#endif + #ifdef CONFIG_MMU .arm ENTRY(cpu_resume_arm) @@ -135,6 +143,7 @@ ARM_BE8(setend be) @ ensure we are in BE mode bl __hyp_stub_install_secondary #endif safe_svcmode_maskall r1 +no_hyp: mov r1, #0 ALT_SMP(mrc p15, 0, r0, c0, c0, 5) ALT_UP_B(1f) @@ -164,6 +173,9 @@ ENDPROC(cpu_resume) #ifdef CONFIG_MMU ENDPROC(cpu_resume_arm) #endif +#ifdef CONFIG_MCPM +ENDPROC(cpu_resume_no_hyp) +#endif .align 2 _sleep_save_sp: diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 3bf82232b1be..facd4240ca02 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -62,12 +62,6 @@ */ struct secondary_data secondary_data; -/* - * control for which core is the next to come out of the secondary - * boot "holding pen" - */ -volatile int pen_release = -1; - enum ipi_msg_type { IPI_WAKEUP, IPI_TIMER, @@ -254,7 +248,7 @@ int __cpu_disable(void) /* * OK - migrate IRQs away from this CPU */ - migrate_irqs(); + irq_migrate_all_off_this_cpu(); /* * Flush user cache and TLB mappings, and then remove this CPU @@ -604,8 +598,10 @@ static void ipi_cpu_stop(unsigned int cpu) local_fiq_disable(); local_irq_disable(); - while (1) + while (1) { cpu_relax(); + wfe(); + } } static DEFINE_PER_CPU(struct completion *, cpu_completion); diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index b30eafeef096..3cdc399b9fc3 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -100,8 +100,6 @@ static void twd_timer_stop(void) disable_percpu_irq(clk->irq); } -#ifdef CONFIG_COMMON_CLK - /* * Updates clockevent frequency when the cpu frequency changes. * Called on the cpu that is changing frequency with interrupts disabled. @@ -143,54 +141,6 @@ static int twd_clk_init(void) } core_initcall(twd_clk_init); -#elif defined (CONFIG_CPU_FREQ) - -#include <linux/cpufreq.h> - -/* - * Updates clockevent frequency when the cpu frequency changes. - * Called on the cpu that is changing frequency with interrupts disabled. - */ -static void twd_update_frequency(void *data) -{ - twd_timer_rate = clk_get_rate(twd_clk); - - clockevents_update_freq(raw_cpu_ptr(twd_evt), twd_timer_rate); -} - -static int twd_cpufreq_transition(struct notifier_block *nb, - unsigned long state, void *data) -{ - struct cpufreq_freqs *freqs = data; - - /* - * The twd clock events must be reprogrammed to account for the new - * frequency. The timer is local to a cpu, so cross-call to the - * changing cpu. - */ - if (state == CPUFREQ_POSTCHANGE) - smp_call_function_single(freqs->cpu, twd_update_frequency, - NULL, 1); - - return NOTIFY_OK; -} - -static struct notifier_block twd_cpufreq_nb = { - .notifier_call = twd_cpufreq_transition, -}; - -static int twd_cpufreq_init(void) -{ - if (twd_evt && raw_cpu_ptr(twd_evt) && !IS_ERR(twd_clk)) - return cpufreq_register_notifier(&twd_cpufreq_nb, - CPUFREQ_TRANSITION_NOTIFIER); - - return 0; -} -core_initcall(twd_cpufreq_init); - -#endif - static void twd_calibrate_rate(void) { unsigned long count; @@ -366,21 +316,6 @@ out_free: return err; } -int __init twd_local_timer_register(struct twd_local_timer *tlt) -{ - if (twd_base || twd_evt) - return -EBUSY; - - twd_ppi = tlt->res[1].start; - - twd_base = ioremap(tlt->res[0].start, resource_size(&tlt->res[0])); - if (!twd_base) - return -ENOMEM; - - return twd_local_timer_common_register(NULL); -} - -#ifdef CONFIG_OF static int __init twd_local_timer_of_register(struct device_node *np) { int err; @@ -406,4 +341,3 @@ out: TIMER_OF_DECLARE(arm_twd_a9, "arm,cortex-a9-twd-timer", twd_local_timer_of_register); TIMER_OF_DECLARE(arm_twd_a5, "arm,cortex-a5-twd-timer", twd_local_timer_of_register); TIMER_OF_DECLARE(arm_twd_11mp, "arm,arm11mp-twd-timer", twd_local_timer_of_register); -#endif diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index 92ab36f38795..acd054a42ba2 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -317,10 +317,10 @@ struct oabi_sembuf { asmlinkage long sys_oabi_semtimedop(int semid, struct oabi_sembuf __user *tsops, unsigned nsops, - const struct timespec __user *timeout) + const struct old_timespec32 __user *timeout) { struct sembuf *sops; - struct timespec local_timeout; + struct old_timespec32 local_timeout; long err; int i; @@ -350,7 +350,7 @@ asmlinkage long sys_oabi_semtimedop(int semid, } else { mm_segment_t fs = get_fs(); set_fs(KERNEL_DS); - err = sys_semtimedop(semid, sops, nsops, timeout); + err = sys_semtimedop_time32(semid, sops, nsops, timeout); set_fs(fs); } kfree(sops); @@ -375,7 +375,7 @@ asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third, return sys_oabi_semtimedop(first, (struct oabi_sembuf __user *)ptr, second, - (const struct timespec __user *)fifth); + (const struct old_timespec32 __user *)fifth); default: return sys_ipc(call, first, second, third, ptr, fifth); } diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c index 0bee233fef9a..314cfb232a63 100644 --- a/arch/arm/kernel/unwind.c +++ b/arch/arm/kernel/unwind.c @@ -93,7 +93,7 @@ extern const struct unwind_idx __start_unwind_idx[]; static const struct unwind_idx *__origin_unwind_idx; extern const struct unwind_idx __stop_unwind_idx[]; -static DEFINE_SPINLOCK(unwind_lock); +static DEFINE_RAW_SPINLOCK(unwind_lock); static LIST_HEAD(unwind_tables); /* Convert a prel31 symbol to an absolute address */ @@ -201,7 +201,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr) /* module unwind tables */ struct unwind_table *table; - spin_lock_irqsave(&unwind_lock, flags); + raw_spin_lock_irqsave(&unwind_lock, flags); list_for_each_entry(table, &unwind_tables, list) { if (addr >= table->begin_addr && addr < table->end_addr) { @@ -213,7 +213,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr) break; } } - spin_unlock_irqrestore(&unwind_lock, flags); + raw_spin_unlock_irqrestore(&unwind_lock, flags); } pr_debug("%s: idx = %p\n", __func__, idx); @@ -529,9 +529,9 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size, tab->begin_addr = text_addr; tab->end_addr = text_addr + text_size; - spin_lock_irqsave(&unwind_lock, flags); + raw_spin_lock_irqsave(&unwind_lock, flags); list_add_tail(&tab->list, &unwind_tables); - spin_unlock_irqrestore(&unwind_lock, flags); + raw_spin_unlock_irqrestore(&unwind_lock, flags); return tab; } @@ -543,9 +543,9 @@ void unwind_table_del(struct unwind_table *tab) if (!tab) return; - spin_lock_irqsave(&unwind_lock, flags); + raw_spin_lock_irqsave(&unwind_lock, flags); list_del(&tab->list); - spin_unlock_irqrestore(&unwind_lock, flags); + raw_spin_unlock_irqrestore(&unwind_lock, flags); kfree(tab); } |