From 5c5b06c3c09510a2e90f6453266823dc6f940c70 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 16 Aug 2012 07:49:31 +0000 Subject: ARM: kprobes: make more tests conditional The mls instruction is not available in ARMv6K or below, so we should make the test conditional on at least ARMv7. ldrexd/strexd are available in ARMv6K or ARMv7, which we can test by checking the CONFIG_CPU_32v6K symbol. /tmp/ccuMTZ8D.s: Assembler messages: /tmp/ccuMTZ8D.s:22188: Error: selected processor does not support ARM mode `mls r0,r1,r2,r3' /tmp/ccuMTZ8D.s:22222: Error: selected processor does not support ARM mode `mlshi r7,r8,r9,r10' /tmp/ccuMTZ8D.s:22252: Error: selected processor does not support ARM mode `mls lr,r1,r2,r13' Signed-off-by: Arnd Bergmann Acked-by: Jon Medhurst Acked-by: Nicolas Pitre Cc: Russell King Cc: Leif Lindholm --- arch/arm/kernel/kprobes-test-arm.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/kprobes-test-arm.c b/arch/arm/kernel/kprobes-test-arm.c index 38c1a3b103a0..839312905067 100644 --- a/arch/arm/kernel/kprobes-test-arm.c +++ b/arch/arm/kernel/kprobes-test-arm.c @@ -366,7 +366,9 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(".word 0xe04f0392 @ umaal r0, pc, r2, r3") TEST_UNSUPPORTED(".word 0xe0500090 @ undef") TEST_UNSUPPORTED(".word 0xe05fff9f @ undef") +#endif +#if __LINUX_ARM_ARCH__ >= 7 TEST_RRR( "mls r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"") TEST_RRR( "mlshi r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"") TEST_RR( "mls lr, r",1, VAL2,", r",2, VAL3,", r13") @@ -456,6 +458,8 @@ void kprobe_arm_test_cases(void) TEST_UNSUPPORTED(".word 0xe1700090") /* Unallocated space */ #if __LINUX_ARM_ARCH__ >= 6 TEST_UNSUPPORTED("ldrex r2, [sp]") +#endif +#if (__LINUX_ARM_ARCH__ >= 7) || defined(CONFIG_CPU_32v6K) TEST_UNSUPPORTED("strexd r0, r2, r3, [sp]") TEST_UNSUPPORTED("ldrexd r2, r3, [sp]") TEST_UNSUPPORTED("strexb r0, r2, [sp]") -- cgit v1.2.3 From 05c769823cd0648fc56c8f0289c5f14d465389a8 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 16 Aug 2012 07:49:26 +0000 Subject: ARM: export set_irq_flags The recently added Emma Mobile GPIO driver calls set_irq_flags and irq_set_chip_and_handler for the interrupts it exports and it can be built as a module, which currently fails with ERROR: "set_irq_flags" [drivers/gpio/gpio-em.ko] undefined! We either need to replace the call to set_irq_flags with something else or export that function. This patch does the latter. Signed-off-by: Arnd Bergmann Acked-by: Thomas Gleixner Cc: Magnus Damm Cc: Linus Walleij Cc: Rafael J. Wysocki Cc: Russell King --- arch/arm/kernel/irq.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 16cedb42c0c3..896165096d6a 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include @@ -109,6 +110,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags) /* Order is clear bits in "clr" then set bits in "set" */ irq_modify_status(irq, clr, set & ~clr); } +EXPORT_SYMBOL_GPL(set_irq_flags); void __init init_IRQ(void) { -- cgit v1.2.3 From 68687c842caefd5386d47a571fb4725df3556891 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 15 Oct 2012 00:16:49 +0100 Subject: ARM: fix oops on initial entry to userspace with Thumb2 kernels Daniel Mack reports an oops at boot with the latest kernels: Internal error: Oops - undefined instruction: 0 [#1] SMP THUMB2 Modules linked in: CPU: 0 Not tainted (3.6.0-11057-g584df1d #145) PC is at cpsw_probe+0x45a/0x9ac LR is at trace_hardirqs_on_caller+0x8f/0xfc pc : [] lr : [] psr: 60000113 sp : cf055fb0 ip : 00000000 fp : 00000000 r10: 00000000 r9 : 00000000 r8 : 00000000 r7 : 00000000 r6 : 00000000 r5 : c0344555 r4 : 00000000 r3 : cf057a40 r2 : 00000000 r1 : 00000001 r0 : 00000000 Flags: nZCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment user Control: 50c5387d Table: 8f3f4019 DAC: 00000015 Process init (pid: 1, stack limit = 0xcf054240) Stack: (0xcf055fb0 to 0xcf056000) 5fa0: 00000001 00000000 00000000 00000000 5fc0: cf055fb0 c000d1a8 00000000 00000000 00000000 00000000 00000000 00000000 5fe0: 00000000 be9b3f10 00000000 b6f6add0 00000010 00000000 aaaabfaf a8babbaa The analysis of this is as follows. In init/main.c, we issue: kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); This creates a new thread, which falls through to the ret_from_fork assembly, with r4 set NULL and r5 set to kernel_init. You can see this in your oops dump register set - r5 is 0xc0344555, which is the address of kernel_init plus 1 which marks the function as Thumb code. Now, let's look at this code a little closer - this is what the disassembly looks like: c000d180 : c000d180: f03a fe08 bl c0047d94 c000d184: 2d00 cmp r5, #0 c000d186: bf1e ittt ne c000d188: 4620 movne r0, r4 c000d18a: 46fe movne lr, pc <-- XXXXXXX c000d18c: 46af movne pc, r5 c000d18e: 46e9 mov r9, sp c000d190: ea4f 3959 mov.w r9, r9, lsr #13 c000d194: ea4f 3949 mov.w r9, r9, lsl #13 c000d198: e7c8 b.n c000d12c c000d19a: bf00 nop c000d19c: f3af 8000 nop.w This code was introduced in 9fff2fa0db911 (arm: switch to saner kernel_execve() semantics). I have marked one instruction, and it's the significant one - I'll come back to that later. Eventually, having had a successful call to kernel_execve(), kernel_init() returns zero. In returning, it uses the value in 'lr' which was set by the instruction I marked above. Unfortunately, this causes lr to contain 0xc000d18e - an even address. This switches the ISA to ARM on return but with a non word aligned PC value. So, what do we end up executing? Well, not the instructions above - yes the opcodes, but they don't mean the same thing in ARM mode. In ARM mode, it looks like this instead: c000d18c: 46e946af strbtmi r4, [r9], pc, lsr #13 c000d190: 3959ea4f ldmdbcc r9, {r0, r1, r2, r3, r6, r9, fp, sp, lr, pc}^ c000d194: 3949ea4f stmdbcc r9, {r0, r1, r2, r3, r6, r9, fp, sp, lr, pc}^ c000d198: bf00e7c8 svclt 0x0000e7c8 c000d19c: 8000f3af andhi pc, r0, pc, lsr #7 c000d1a0: e88db092 stm sp, {r1, r4, r7, ip, sp, pc} c000d1a4: 46e81fff ; instruction: 0x46e81fff c000d1a8: 8a00f3ef bhi 0xc004a16c c000d1ac: 0a0cf08a beq 0xc03493dc I have included more above, because it's relevant. The PSR flags which we can see in the oops dump are nZCv, so Z and C are set. All the above ARM instructions are not executed, except for two. c000d1a0, which has no writeback, and writes below the current stack pointer (and that data is lost when we take the next exception.) The other instruction which is executed is c000d1ac, which takes us to... 0xc03493dc. However, remember that bit 1 of the PC got set. So that makes the PC value 0xc03493de. And that value is the value we find in the oops dump for PC. What is the instruction here when interpreted in ARM mode? 0: f71e150c ; instruction: 0xf71e150c and there we have our undefined instruction (remember that the 'never' condition code, 0xf, has been deprecated and is now always executed as it is now being used for additional instructions.) This path also nicely explains the state of the stack we see in the oops dump too. The above is a consistent and sane story for how we got to the oops dump, which all stems from the instruction at 0xc000d18a being wrong. Reported-by: Daniel Mack Tested-by: Daniel Mack Signed-off-by: Russell King Signed-off-by: Linus Torvalds --- arch/arm/kernel/entry-common.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 417bac1846bd..34711757ba59 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -88,9 +88,9 @@ ENTRY(ret_from_fork) bl schedule_tail cmp r5, #0 movne r0, r4 - movne lr, pc + adrne lr, BSYM(1f) movne pc, r5 - get_thread_info tsk +1: get_thread_info tsk b ret_slow_syscall ENDPROC(ret_from_fork) -- cgit v1.2.3 From 2456f44dd7a9aaffc2cd21a13f78198b3d94da08 Mon Sep 17 00:00:00 2001 From: Aaro Koskinen Date: Tue, 16 Oct 2012 19:35:14 +0100 Subject: ARM: 7555/1: kexec: fix segment memory addresses check Commit c564df4db85aac8d1d65a56176a0a25f46138064 (ARM: 7540/1: kexec: Check segment memory addresses) added a safety check with accidentally reversed condition, and broke kexec functionality on ARM. Fix this. Acked-by: Will Deacon Signed-off-by: Aaro Koskinen Signed-off-by: Russell King --- arch/arm/kernel/machine_kexec.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index e29c3337ca81..8ef8c9337809 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -45,10 +45,9 @@ int machine_kexec_prepare(struct kimage *image) for (i = 0; i < image->nr_segments; i++) { current_segment = &image->segment[i]; - err = memblock_is_region_memory(current_segment->mem, - current_segment->memsz); - if (err) - return - EINVAL; + if (!memblock_is_region_memory(current_segment->mem, + current_segment->memsz)) + return -EINVAL; err = get_user(header, (__be32*)current_segment->buf); if (err) -- cgit v1.2.3 From 3581fe0ef37ce12ac7a4f74831168352ae848edc Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 17 Oct 2012 12:01:34 +0100 Subject: ARM: 7556/1: perf: fix updated event period in response to PERF_EVENT_IOC_PERIOD The PERF_EVENT_IOC_PERIOD ioctl command can be used to change the sample period of a running perf_event. Consequently, when calculating the next event period, the new period will only be considered after the previous one has overflowed. This patch changes the calculation of the remaining event ticks so that they are offset if the period has changed. Cc: Peter Zijlstra Reported-by: Andreas Sandberg Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/perf_event.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 93971b1a4f0b..53c0304b734a 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -96,6 +96,10 @@ armpmu_event_set_period(struct perf_event *event, s64 period = hwc->sample_period; int ret = 0; + /* The period may have been changed by PERF_EVENT_IOC_PERIOD */ + if (unlikely(period != hwc->last_period)) + left = period - (hwc->last_period - left); + if (unlikely(left <= -period)) { left = period; local64_set(&hwc->period_left, left); -- cgit v1.2.3 From 871df85a592396b36d4c40b3860e8d7373626552 Mon Sep 17 00:00:00 2001 From: fwu Date: Sat, 29 Sep 2012 04:14:03 +0100 Subject: ARM: 7544/1: Add BUG_ON when hlt counter is wrongly used 1. On ARM platform, "nohlt" can be used to prevent core from idle process, returning immediately. 2. There are two interfaces, exported for other modules, named "disable_hlt" and "enable_hlt" are used to enable/disable the cpuidle mechanism by increasing/decreasing "hlt_counter". Disable_hlt and enable_hlt are paired operation, when you first call disable_hlt and then enable_hlt, the semantics are right. 3. There is no obvious constraint to prevent user(driver/module) code to prevent the case that enable_hlt is ahead of disable_hlt, which is a fatal operation on kernel state change from user, and there is no any WARNING or notification if the case happens in current kernel code. This patch aims to report BUG when the case happens, just like what the kernel do when enable_irq is ahead of disable_irq. Link: https://patchwork.kernel.org/patch/1527881/ Signed-off-by: fwu Signed-off-by: YiLu Mao Signed-off-by: Ning Jiang Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/kernel/process.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 90084a6de35a..45fd05186a39 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -70,6 +70,7 @@ EXPORT_SYMBOL(disable_hlt); void enable_hlt(void) { hlt_counter--; + BUG_ON(hlt_counter < 0); } EXPORT_SYMBOL(enable_hlt); -- cgit v1.2.3 From 5f40b909728ad784eb43aa309d3c4e9bdf050781 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 19 Oct 2012 17:53:01 +0100 Subject: ARM: 7559/1: smp: switch away from the idmap before updating init_mm.mm_count When booting a secondary CPU, the primary CPU hands two sets of page tables via the secondary_data struct: (1) swapper_pg_dir: a normal, cacheable, shared (if SMP) mapping of the kernel image (i.e. the tables used by init_mm). (2) idmap_pgd: an uncached mapping of the .idmap.text ELF section. The idmap is generally used when enabling and disabling the MMU, which includes early CPU boot. In this case, the secondary CPU switches to swapper as soon as it enters C code: struct mm_struct *mm = &init_mm; unsigned int cpu = smp_processor_id(); /* * All kernel threads share the same mm context; grab a * reference and switch to it. */ atomic_inc(&mm->mm_count); current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); cpu_switch_mm(mm->pgd, mm); This causes a problem on ARMv7, where the identity mapping is treated as strongly-ordered leading to architecturally UNPREDICTABLE behaviour of exclusive accesses, such as those used by atomic_inc. This patch re-orders the secondary_start_kernel function so that we switch to swapper before performing any exclusive accesses. Cc: Cc: David McKay Reported-by: Gilles Chanteperdrix Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/smp.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 8e20754dd31d..fbc8b2623d82 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -294,18 +294,24 @@ static void percpu_timer_setup(void); asmlinkage void __cpuinit secondary_start_kernel(void) { struct mm_struct *mm = &init_mm; - unsigned int cpu = smp_processor_id(); + unsigned int cpu; + + /* + * The identity mapping is uncached (strongly ordered), so + * switch away from it before attempting any exclusive accesses. + */ + cpu_switch_mm(mm->pgd, mm); + enter_lazy_tlb(mm, current); + local_flush_tlb_all(); /* * All kernel threads share the same mm context; grab a * reference and switch to it. */ + cpu = smp_processor_id(); atomic_inc(&mm->mm_count); current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); - cpu_switch_mm(mm->pgd, mm); - enter_lazy_tlb(mm, current); - local_flush_tlb_all(); printk("CPU%u: Booted secondary processor\n", cpu); -- cgit v1.2.3 From ad17a26e2273d18d27fcbb4a8d8a341ebb2d721f Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 22 Oct 2012 10:17:24 +0100 Subject: ARM: 7560/1: SMP_TWD: use DIV_ROUND_CLOSEST() for periodic mode The periodic mode is currently calculated by a simple division but we should pay more attention to our integer arithmetics. Also delete a comment that does not make any sense. Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/kernel/smp_twd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index e1f906989bb8..b22d700fea27 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -42,10 +42,10 @@ static void twd_set_mode(enum clock_event_mode mode, switch (mode) { case CLOCK_EVT_MODE_PERIODIC: - /* timer load already set up */ ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE | TWD_TIMER_CONTROL_PERIODIC; - __raw_writel(twd_timer_rate / HZ, twd_base + TWD_TIMER_LOAD); + __raw_writel(DIV_ROUND_CLOSEST(twd_timer_rate, HZ), + twd_base + TWD_TIMER_LOAD); break; case CLOCK_EVT_MODE_ONESHOT: /* period set, and timer enabled in 'next_event' hook */ -- cgit v1.2.3 From 6a4dae5e138a32b45ca5218cc2b81802f9d378c3 Mon Sep 17 00:00:00 2001 From: Felipe Balbi 2 Date: Tue, 23 Oct 2012 19:00:03 +0100 Subject: ARM: 7565/1: sched: stop sched_clock() during suspend The scheduler imposes a requirement to sched_clock() which is to stop the clock during suspend, if we don't do that any RT thread will be rescheduled in the future which might cause any sort of problems. This became an issue on OMAP when we converted omap-i2c.c to use threaded IRQs, it turned out that depending on how much time we spent on suspend, the I2C IRQ thread would end up being rescheduled so far in the future that I2C transfers would timeout and, because omap_hsmmc depends on an I2C-connected device to detect if an MMC card is inserted in the slot, our rootfs would just vanish. arch/arm/kernel/sched_clock.c already had an optional implementation (sched_clock_needs_suspend()) which would handle scheduler's requirement properly, what this patch does is simply to make that implementation non-optional. Note that this has the side-effect that printk timings won't reflect the actual time spent on suspend so other methods to measure that will have to be used. This has been tested with beagleboard XM (OMAP3630) and pandaboard rev A3 (OMAP4430). Suspend to RAM is now working after this patch. Thanks to Kevin Hilman for helping out with debugging. Acked-by: Kevin Hilman Acked-by: Linus Walleij Signed-off-by: Felipe Balbi Signed-off-by: Russell King --- arch/arm/include/asm/sched_clock.h | 2 -- arch/arm/kernel/sched_clock.c | 18 ++++-------------- 2 files changed, 4 insertions(+), 16 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h index 05b8e82ec9f5..e3f757263438 100644 --- a/arch/arm/include/asm/sched_clock.h +++ b/arch/arm/include/asm/sched_clock.h @@ -10,7 +10,5 @@ extern void sched_clock_postinit(void); extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate); -extern void setup_sched_clock_needs_suspend(u32 (*read)(void), int bits, - unsigned long rate); #endif diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c index e21bac20d90d..fc6692e2b603 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/arch/arm/kernel/sched_clock.c @@ -107,13 +107,6 @@ static void sched_clock_poll(unsigned long wrap_ticks) update_sched_clock(); } -void __init setup_sched_clock_needs_suspend(u32 (*read)(void), int bits, - unsigned long rate) -{ - setup_sched_clock(read, bits, rate); - cd.needs_suspend = true; -} - void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) { unsigned long r, w; @@ -189,18 +182,15 @@ void __init sched_clock_postinit(void) static int sched_clock_suspend(void) { sched_clock_poll(sched_clock_timer.data); - if (cd.needs_suspend) - cd.suspended = true; + cd.suspended = true; return 0; } static void sched_clock_resume(void) { - if (cd.needs_suspend) { - cd.epoch_cyc = read_sched_clock(); - cd.epoch_cyc_copy = cd.epoch_cyc; - cd.suspended = false; - } + cd.epoch_cyc = read_sched_clock(); + cd.epoch_cyc_copy = cd.epoch_cyc; + cd.suspended = false; } static struct syscore_ops sched_clock_ops = { -- cgit v1.2.3 From 2577cf246233b1e4e38576f28a5ec05c9c6a6c2a Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 22 Oct 2012 10:18:06 +0100 Subject: ARM: 7561/1: SMP_TWD: use clk_prepare_enable() A minor code refactoring saving a few lines by merging prepare() and enable() calls. Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/kernel/smp_twd.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index e1f906989bb8..780b05706364 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -248,17 +248,9 @@ static struct clk *twd_get_clock(void) return clk; } - err = clk_prepare(clk); + err = clk_prepare_enable(clk); if (err) { - pr_err("smp_twd: clock failed to prepare: %d\n", err); - clk_put(clk); - return ERR_PTR(err); - } - - err = clk_enable(clk); - if (err) { - pr_err("smp_twd: clock failed to enable: %d\n", err); - clk_unprepare(clk); + pr_err("smp_twd: clock failed to prepare+enable: %d\n", err); clk_put(clk); return ERR_PTR(err); } -- cgit v1.2.3 From a68becd1dcda55b467dcabaff136cadc10abb761 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Tue, 23 Oct 2012 08:29:48 +0100 Subject: ARM: 7563/1: SMP_TWD: make setup()/stop() reentrant It has been brought to my knowledge that the .setup()/.stop() function pair in the SMP TWD is going to be called from atomic contexts for CPUs coming and going, and then the clk_prepare()/clk_unprepare() calls cannot be called on subsequent .setup()/.stop() iterations. This is however just the tip of an iceberg as the function pair is not designed to be reentrant at all. This change makes the SMP_TWD clock .setup()/.stop() pair reentrant by splitting the .setup() function in three parts: - One COMMON part that is executed the first time the first CPU in the TWD cluster is initialized. This will fetch the TWD clk for the cluster and prepare+enable it. If no clk is available it will calibrate the rate instead. - One part that is executed the FIRST TIME a certain CPU is brought on-line. This initializes and sets up the clock event for a certain CPU. - One part that is executed on every subsequent .setup() call. This will re-initialize the clock event. This is augmented to call the clk_enable()/clk_disable() pair properly. Cc: Shawn Guo Reported-by: Peter Chen Reviewed-by: Santosh Shilimkar Tested-by: Shawn Guo Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/kernel/smp_twd.c | 42 +++++++++++++++++++++++++++++++++++++----- 1 file changed, 37 insertions(+), 5 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index 780b05706364..a2e74375945a 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -31,6 +31,8 @@ static void __iomem *twd_base; static struct clk *twd_clk; static unsigned long twd_timer_rate; +static bool common_setup_called; +static DEFINE_PER_CPU(bool, percpu_setup_called); static struct clock_event_device __percpu **twd_evt; static int twd_ppi; @@ -264,15 +266,45 @@ static struct clk *twd_get_clock(void) static int __cpuinit twd_timer_setup(struct clock_event_device *clk) { struct clock_event_device **this_cpu_clk; + int cpu = smp_processor_id(); - if (!twd_clk) + /* + * If the basic setup for this CPU has been done before don't + * bother with the below. + */ + if (per_cpu(percpu_setup_called, cpu)) { + __raw_writel(0, twd_base + TWD_TIMER_CONTROL); + clockevents_register_device(*__this_cpu_ptr(twd_evt)); + enable_percpu_irq(clk->irq, 0); + return 0; + } + per_cpu(percpu_setup_called, cpu) = true; + + /* + * This stuff only need to be done once for the entire TWD cluster + * during the runtime of the system. + */ + if (!common_setup_called) { twd_clk = twd_get_clock(); - if (!IS_ERR_OR_NULL(twd_clk)) - twd_timer_rate = clk_get_rate(twd_clk); - else - twd_calibrate_rate(); + /* + * We use IS_ERR_OR_NULL() here, because if the clock stubs + * are active we will get a valid clk reference which is + * however NULL and will return the rate 0. In that case we + * need to calibrate the rate instead. + */ + if (!IS_ERR_OR_NULL(twd_clk)) + twd_timer_rate = clk_get_rate(twd_clk); + else + twd_calibrate_rate(); + + common_setup_called = true; + } + /* + * The following is done once per CPU the first time .setup() is + * called. + */ __raw_writel(0, twd_base + TWD_TIMER_CONTROL); clk->name = "local_timer"; -- cgit v1.2.3 From ee951c630c5ce5108f8014ce1c9d738b5bbfea60 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Mon, 29 Oct 2012 19:19:34 +0100 Subject: ARM: 7568/1: Sort exception table at compile time Add the ARM machine identifier to sortextable and select the config option so that we can sort the exception table at compile time. sortextable relies on a section named __ex_table existing in the vmlinux, but ARM's linker script places the exception table in the data section. Give the exception table its own section so that sortextable can find it. This allows us to skip the sorting step during boot. Cc: David Daney Signed-off-by: Stephen Boyd Tested-by: Will Deacon Signed-off-by: Russell King --- arch/arm/Kconfig | 1 + arch/arm/kernel/vmlinux.lds.S | 19 +++++++++---------- scripts/sortextable.c | 1 + 3 files changed, 11 insertions(+), 10 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 73067efd4845..208414c0506a 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -5,6 +5,7 @@ config ARM select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_WANT_IPC_PARSE_VERSION + select BUILDTIME_EXTABLE_SORT if MMU select CPU_PM if (SUSPEND || CPU_IDLE) select DCACHE_WORD_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && !CPU_BIG_ENDIAN select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI) diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 36ff15bbfdd4..b9f38e388b43 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -114,6 +114,15 @@ SECTIONS RO_DATA(PAGE_SIZE) + . = ALIGN(4); + __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { + __start___ex_table = .; +#ifdef CONFIG_MMU + *(__ex_table) +#endif + __stop___ex_table = .; + } + #ifdef CONFIG_ARM_UNWIND /* * Stack unwinding tables @@ -219,16 +228,6 @@ SECTIONS CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) READ_MOSTLY_DATA(L1_CACHE_BYTES) - /* - * The exception fixup table (might need resorting at runtime) - */ - . = ALIGN(4); - __start___ex_table = .; -#ifdef CONFIG_MMU - *(__ex_table) -#endif - __stop___ex_table = .; - /* * and the usual data section */ diff --git a/scripts/sortextable.c b/scripts/sortextable.c index f19ddc47304c..1f10e89d15b4 100644 --- a/scripts/sortextable.c +++ b/scripts/sortextable.c @@ -248,6 +248,7 @@ do_file(char const *const fname) case EM_S390: custom_sort = sort_relative_table; break; + case EM_ARM: case EM_MIPS: break; } /* end switch */ -- cgit v1.2.3 From b62655f4c6f3e4d21934eee14ac2ac5cd479c97c Mon Sep 17 00:00:00 2001 From: Shawn Guo Date: Tue, 6 Nov 2012 03:48:40 +0100 Subject: ARM: 7571/1: SMP: add function arch_send_wakeup_ipi_mask() Add function arch_send_wakeup_ipi_mask(), so that platform code can use it as an easy way to wake up cores that are in WFI. Signed-off-by: Shawn Guo Signed-off-by: Russell King --- arch/arm/include/asm/smp.h | 1 + arch/arm/kernel/smp.c | 5 +++++ 2 files changed, 6 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h index 2e3be16c6766..d3a22bebe6ce 100644 --- a/arch/arm/include/asm/smp.h +++ b/arch/arm/include/asm/smp.h @@ -79,6 +79,7 @@ extern void cpu_die(void); extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); +extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask); struct smp_operations { #ifdef CONFIG_SMP diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 8e20754dd31d..dd5dd0248b88 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -415,6 +415,11 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) smp_cross_call(mask, IPI_CALL_FUNC); } +void arch_send_wakeup_ipi_mask(const struct cpumask *mask) +{ + smp_cross_call(mask, IPI_WAKEUP); +} + void arch_send_call_function_single_ipi(int cpu) { smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); -- cgit v1.2.3 From 9ecb47de3490b8f2d4b818568935da9ca2c22398 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Thu, 8 Nov 2012 19:54:11 +0100 Subject: ARM: 7574/1: kernel/process.c: include idmap.h instead of redeclaring setup_mm_for_reboot() Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/kernel/process.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 45fd05186a39..44bc0b327e2b 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -34,6 +34,7 @@ #include #include +#include #include #include #include @@ -56,8 +57,6 @@ static const char *isa_modes[] = { "ARM" , "Thumb" , "Jazelle", "ThumbEE" }; -extern void setup_mm_for_reboot(void); - static volatile int hlt_counter; void disable_hlt(void) -- cgit v1.2.3 From 9b790d71d58be65f9508ab60920eb978af828412 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 15 Nov 2012 22:12:00 +0100 Subject: ARM: 7578/1: arch/move secure_computing into trace There is very little difference in the TIF_SECCOMP and TIF_SYSCALL_WORK path in entry-common.S, so merge TIF_SECCOMP into TIF_SYSCALL_WORK and move seccomp into the syscall_trace_enter() handler. Expanded some of the tracehook logic into the callers to make this code more readable. Since tracehook needs to do register changing, this portion is best left in its own function instead of copy/pasting into the callers. Additionally, the return value for secure_computing() is now checked and a -1 value will result in the system call being skipped. Signed-off-by: Kees Cook Acked-by: Will Drewry Reviewed-by: Will Deacon Signed-off-by: Russell King --- arch/arm/include/asm/thread_info.h | 7 ++++--- arch/arm/kernel/entry-common.S | 10 ---------- arch/arm/kernel/ptrace.c | 29 ++++++++++++++++++++--------- 3 files changed, 24 insertions(+), 22 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 8477b4c1d39f..cddda1f41f0f 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -151,10 +151,10 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define TIF_SYSCALL_TRACE 8 #define TIF_SYSCALL_AUDIT 9 #define TIF_SYSCALL_TRACEPOINT 10 +#define TIF_SECCOMP 11 /* seccomp syscall filtering active */ #define TIF_USING_IWMMXT 17 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_RESTORE_SIGMASK 20 -#define TIF_SECCOMP 21 #define TIF_SWITCH_MM 22 /* deferred switch_mm */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) @@ -163,11 +163,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) -#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) +#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) /* Checks for any syscall work in entry-common.S */ -#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT) +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP) /* * Change these and you break ASM code in entry-common.S diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 417bac1846bd..b621871dd277 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -417,16 +417,6 @@ local_restart: ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing stmdb sp!, {r4, r5} @ push fifth and sixth args -#ifdef CONFIG_SECCOMP - tst r10, #_TIF_SECCOMP - beq 1f - mov r0, scno - bl __secure_computing - add r0, sp, #S_R0 + S_OFF @ pointer to regs - ldmia r0, {r0 - r3} @ have to reload r0 - r3 -1: -#endif - tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? bne __sys_trace diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 739db3a1b2d2..518536d93fba 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -916,16 +916,11 @@ enum ptrace_syscall_dir { PTRACE_SYSCALL_EXIT, }; -static int ptrace_syscall_trace(struct pt_regs *regs, int scno, - enum ptrace_syscall_dir dir) +static int tracehook_report_syscall(struct pt_regs *regs, + enum ptrace_syscall_dir dir) { unsigned long ip; - current_thread_info()->syscall = scno; - - if (!test_thread_flag(TIF_SYSCALL_TRACE)) - return scno; - /* * IP is used to denote syscall entry/exit: * IP = 0 -> entry, =1 -> exit @@ -944,19 +939,35 @@ static int ptrace_syscall_trace(struct pt_regs *regs, int scno, asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno) { - scno = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_ENTER); + current_thread_info()->syscall = scno; + + /* Do the secure computing check first; failures should be fast. */ + if (secure_computing(scno) == -1) + return -1; + + if (test_thread_flag(TIF_SYSCALL_TRACE)) + scno = tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); + if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) trace_sys_enter(regs, scno); + audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0, regs->ARM_r1, regs->ARM_r2, regs->ARM_r3); + return scno; } asmlinkage int syscall_trace_exit(struct pt_regs *regs, int scno) { - scno = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_EXIT); + current_thread_info()->syscall = scno; + + if (test_thread_flag(TIF_SYSCALL_TRACE)) + scno = tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT); + if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) trace_sys_exit(regs, scno); + audit_syscall_exit(regs); + return scno; } -- cgit v1.2.3 From ad75b51459ae076a0d406391496f81b897bf6992 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 15 Nov 2012 22:12:17 +0100 Subject: ARM: 7579/1: arch/allow a scno of -1 to not cause a SIGILL On tracehook-friendly platforms, a system call number of -1 falls through without running much code or taking much action. ARM is different. This adds a short-circuit check in the trace path to avoid any additional work, as suggested by Russell King, to make sure that ARM behaves the same way as other platforms. Signed-off-by: Kees Cook Acked-by: Will Drewry Reviewed-by: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/entry-common.S | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index b621871dd277..ee81dbc6fa10 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -448,7 +448,10 @@ __sys_trace: ldmccia r1, {r0 - r6} @ have to reload r0 - r6 stmccia sp, {r4, r5} @ and update the stack args ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine - b 2b + cmp scno, #-1 @ skip the syscall? + bne 2b + add sp, sp, #S_OFF @ restore stack + b ret_slow_syscall __sys_trace_return: str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 -- cgit v1.2.3 From 026b7c6bf0bf044aa03e2affbda73b6c6a302538 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 3 Dec 2012 21:13:03 +0100 Subject: ARM: 7590/1: /proc/interrupts: limit the display of IPIs to online CPUs only This is what is done for the regular interrupts in kernel/irqs/proc.c already, before calling arch_show_interrupts(). Not doing so for the IPIs causes the column headers not to match with the content whenever some CPUs are offline. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/kernel/smp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index fbc8b2623d82..fc4d526e2906 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -443,7 +443,7 @@ void show_ipi_list(struct seq_file *p, int prec) for (i = 0; i < NR_IPI; i++) { seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); - for_each_present_cpu(cpu) + for_each_online_cpu(cpu) seq_printf(p, "%10u ", __get_irq_stat(cpu, ipi_irqs[i])); -- cgit v1.2.3 From 76e0920403d3de1a9ed39cffc3ec9fcb00fa4bc9 Mon Sep 17 00:00:00 2001 From: Armando Visconti Date: Tue, 4 Dec 2012 10:34:39 +0100 Subject: ARM: 7591/1: nommu: Enable the strict alignment (CR_A) bit only if ARCH < v6 This patch keeps disabled the strict alignment CP15 bit for all armv6 and armv7 processor without the mmu. This behaviour is now same as in the mmu case. Signed-off-by: Armando Visconti Signed-off-by: Russell King --- arch/arm/kernel/head-nommu.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 278cfc144f44..2c228a07e58c 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -68,7 +68,7 @@ __after_proc_init: * CP15 system control register value returned in r0 from * the CPU init function. */ -#ifdef CONFIG_ALIGNMENT_TRAP +#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6 orr r0, r0, #CR_A #else bic r0, r0, #CR_A -- cgit v1.2.3 From b10bca0bc699af201770989a88fa293155e9d8de Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 7 Dec 2012 17:34:37 +0100 Subject: ARM: 7595/1: syscall: rework ordering in syscall_trace_exit syscall_trace_exit is currently doing things back-to-front; invoking the audit hook *after* signalling the debugger, which presents an opportunity for the registers to be re-written by userspace in order to bypass auditing constaints. This patch fixes the ordering by moving the audit code first and the tracehook code last. On the face of it, it looks like current_thread_info()->syscall may be incorrect for the sys_exit tracepoint, but that's actually not an issue because it will have been set during syscall entry and cannot have changed since then. Reported-by: Andrew Gabbasov Tested-by: Mark Rutland Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/entry-common.S | 1 - arch/arm/kernel/ptrace.c | 24 +++++++++++++++--------- 2 files changed, 15 insertions(+), 10 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index ee81dbc6fa10..d863bbf0f1f5 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -455,7 +455,6 @@ __sys_trace: __sys_trace_return: str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 - mov r1, scno mov r0, sp bl syscall_trace_exit b ret_slow_syscall diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 518536d93fba..03deeffd9f6d 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -957,17 +957,23 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno) return scno; } -asmlinkage int syscall_trace_exit(struct pt_regs *regs, int scno) +asmlinkage void syscall_trace_exit(struct pt_regs *regs) { - current_thread_info()->syscall = scno; - - if (test_thread_flag(TIF_SYSCALL_TRACE)) - scno = tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT); + /* + * Audit the syscall before anything else, as a debugger may + * come in and change the current registers. + */ + audit_syscall_exit(regs); + /* + * Note that we haven't updated the ->syscall field for the + * current thread. This isn't a problem because it will have + * been set on syscall entry and there hasn't been an opportunity + * for a PTRACE_SET_SYSCALL since then. + */ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) - trace_sys_exit(regs, scno); - - audit_syscall_exit(regs); + trace_sys_exit(regs, regs_return_value(regs)); - return scno; + if (test_thread_flag(TIF_SYSCALL_TRACE)) + tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT); } -- cgit v1.2.3