diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-01 02:34:41 +0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-01 02:34:41 +0400 |
commit | 655861e328cea83320190f4a57b3656ee952388c (patch) | |
tree | 856801b431bdf1f65e4d57345bc79ca9768b6768 | |
parent | e7a7c9ab415874f4ad78a0352ca0ec6711092017 (diff) | |
parent | 5e7371ded05adfcfcee44a8bc070bfc37979b8f2 (diff) | |
download | linux-655861e328cea83320190f4a57b3656ee952388c.tar.xz |
Merge branch 'fixes' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM fixes from Russell King.
* 'fixes' of git://git.linaro.org/people/rmk/linux-arm:
ARM: 7406/1: hotplug: copy the affinity mask when forcefully migrating IRQs
ARM: 7405/1: kexec: call platform_cpu_kill on the killer rather than the victim
ARM: 7403/1: tls: remove covert channel via TPIDRURW
ARM: 7401/1: mm: Fix section mismatches
ARM: OMAP: fix DMA vs memory ordering
ARM: 7390/1: dts: versatile-pb/ab fix MMC IRQs
ARM: 7400/1: vfp: clear fpscr length and stride bits on entry to sig handler
ARM: 7399/1: vfp: move user vfp state save/restore code out of signal.c
ARM: 7398/1: l2x0: only write to debug registers on PL310
ARM: 7397/1: l2x0: only apply workaround for erratum #753970 on PL310
ARM: 7396/1: errata: only handle ARM erratum #326103 on affected cores
-rw-r--r-- | arch/arm/Kconfig | 9 | ||||
-rw-r--r-- | arch/arm/boot/dts/versatile-ab.dts | 2 | ||||
-rw-r--r-- | arch/arm/boot/dts/versatile-pb.dts | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/thread_info.h | 7 | ||||
-rw-r--r-- | arch/arm/include/asm/tls.h | 4 | ||||
-rw-r--r-- | arch/arm/kernel/irq.c | 6 | ||||
-rw-r--r-- | arch/arm/kernel/signal.c | 55 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 28 | ||||
-rw-r--r-- | arch/arm/mm/abort-ev6.S | 17 | ||||
-rw-r--r-- | arch/arm/mm/cache-l2x0.c | 25 | ||||
-rw-r--r-- | arch/arm/mm/init.c | 4 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 4 | ||||
-rw-r--r-- | arch/arm/plat-omap/dma.c | 14 | ||||
-rw-r--r-- | arch/arm/vfp/vfpmodule.c | 99 |
14 files changed, 188 insertions, 88 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index cf006d40342c..36586dba6fa6 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1186,6 +1186,15 @@ if !MMU source "arch/arm/Kconfig-nommu" endif +config ARM_ERRATA_326103 + bool "ARM errata: FSR write bit incorrect on a SWP to read-only memory" + depends on CPU_V6 + help + Executing a SWP instruction to read-only memory does not set bit 11 + of the FSR on the ARM 1136 prior to r1p0. This causes the kernel to + treat the access as a read, preventing a COW from occurring and + causing the faulting task to livelock. + config ARM_ERRATA_411920 bool "ARM errata: Invalidation of the Instruction Cache operation can fail" depends on CPU_V6 || CPU_V6K diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts index 0b32925f2147..e2fe3195c0d1 100644 --- a/arch/arm/boot/dts/versatile-ab.dts +++ b/arch/arm/boot/dts/versatile-ab.dts @@ -173,7 +173,7 @@ mmc@5000 { compatible = "arm,primecell"; reg = < 0x5000 0x1000>; - interrupts = <22>; + interrupts = <22 34>; }; kmi@6000 { compatible = "arm,pl050", "arm,primecell"; diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts index 166461073b78..7e8175269064 100644 --- a/arch/arm/boot/dts/versatile-pb.dts +++ b/arch/arm/boot/dts/versatile-pb.dts @@ -41,7 +41,7 @@ mmc@b000 { compatible = "arm,primecell"; reg = <0xb000 0x1000>; - interrupts = <23>; + interrupts = <23 34>; }; }; }; diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index d4c24d412a8d..0f04d84582e1 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -118,6 +118,13 @@ extern void iwmmxt_task_switch(struct thread_info *); extern void vfp_sync_hwstate(struct thread_info *); extern void vfp_flush_hwstate(struct thread_info *); +struct user_vfp; +struct user_vfp_exc; + +extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *, + struct user_vfp_exc __user *); +extern int vfp_restore_user_hwstate(struct user_vfp __user *, + struct user_vfp_exc __user *); #endif /* diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h index 60843eb0f61c..73409e6c0251 100644 --- a/arch/arm/include/asm/tls.h +++ b/arch/arm/include/asm/tls.h @@ -7,6 +7,8 @@ .macro set_tls_v6k, tp, tmp1, tmp2 mcr p15, 0, \tp, c13, c0, 3 @ set TLS register + mov \tmp1, #0 + mcr p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register .endm .macro set_tls_v6, tp, tmp1, tmp2 @@ -15,6 +17,8 @@ mov \tmp2, #0xffff0fff tst \tmp1, #HWCAP_TLS @ hardware TLS available? mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register + movne \tmp1, #0 + mcrne p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0 .endm diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 71ccdbfed662..8349d4e97e2b 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -155,10 +155,10 @@ static bool migrate_one_irq(struct irq_desc *desc) } c = irq_data_get_irq_chip(d); - if (c->irq_set_affinity) - c->irq_set_affinity(d, affinity, true); - else + if (!c->irq_set_affinity) pr_debug("IRQ%u: unable to set affinity\n", d->irq); + else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) + cpumask_copy(d->affinity, affinity); return ret; } diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 7cb532fc8aa4..d68d1b694680 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -180,44 +180,23 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) static int preserve_vfp_context(struct vfp_sigframe __user *frame) { - struct thread_info *thread = current_thread_info(); - struct vfp_hard_struct *h = &thread->vfpstate.hard; const unsigned long magic = VFP_MAGIC; const unsigned long size = VFP_STORAGE_SIZE; int err = 0; - vfp_sync_hwstate(thread); __put_user_error(magic, &frame->magic, err); __put_user_error(size, &frame->size, err); - /* - * Copy the floating point registers. There can be unused - * registers see asm/hwcap.h for details. - */ - err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs, - sizeof(h->fpregs)); - /* - * Copy the status and control register. - */ - __put_user_error(h->fpscr, &frame->ufp.fpscr, err); - - /* - * Copy the exception registers. - */ - __put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err); - __put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); - __put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); + if (err) + return -EFAULT; - return err ? -EFAULT : 0; + return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc); } static int restore_vfp_context(struct vfp_sigframe __user *frame) { - struct thread_info *thread = current_thread_info(); - struct vfp_hard_struct *h = &thread->vfpstate.hard; unsigned long magic; unsigned long size; - unsigned long fpexc; int err = 0; __get_user_error(magic, &frame->magic, err); @@ -228,33 +207,7 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame) if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) return -EINVAL; - vfp_flush_hwstate(thread); - - /* - * Copy the floating point registers. There can be unused - * registers see asm/hwcap.h for details. - */ - err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs, - sizeof(h->fpregs)); - /* - * Copy the status and control register. - */ - __get_user_error(h->fpscr, &frame->ufp.fpscr, err); - - /* - * Sanitise and restore the exception registers. - */ - __get_user_error(fpexc, &frame->ufp_exc.fpexc, err); - /* Ensure the VFP is enabled. */ - fpexc |= FPEXC_EN; - /* Ensure FPINST2 is invalid and the exception flag is cleared. */ - fpexc &= ~(FPEXC_EX | FPEXC_FP2V); - h->fpexc = fpexc; - - __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); - __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); - - return err ? -EFAULT : 0; + return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc); } #endif diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index addbbe8028c2..f6a4d32b0421 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -510,10 +510,6 @@ static void ipi_cpu_stop(unsigned int cpu) local_fiq_disable(); local_irq_disable(); -#ifdef CONFIG_HOTPLUG_CPU - platform_cpu_kill(cpu); -#endif - while (1) cpu_relax(); } @@ -576,17 +572,25 @@ void smp_send_reschedule(int cpu) smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); } +#ifdef CONFIG_HOTPLUG_CPU +static void smp_kill_cpus(cpumask_t *mask) +{ + unsigned int cpu; + for_each_cpu(cpu, mask) + platform_cpu_kill(cpu); +} +#else +static void smp_kill_cpus(cpumask_t *mask) { } +#endif + void smp_send_stop(void) { unsigned long timeout; + struct cpumask mask; - if (num_online_cpus() > 1) { - struct cpumask mask; - cpumask_copy(&mask, cpu_online_mask); - cpumask_clear_cpu(smp_processor_id(), &mask); - - smp_cross_call(&mask, IPI_CPU_STOP); - } + cpumask_copy(&mask, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &mask); + smp_cross_call(&mask, IPI_CPU_STOP); /* Wait up to one second for other CPUs to stop */ timeout = USEC_PER_SEC; @@ -595,6 +599,8 @@ void smp_send_stop(void) if (num_online_cpus() > 1) pr_warning("SMP: failed to stop secondary CPUs\n"); + + smp_kill_cpus(&mask); } /* diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index ff1f7cc11f87..80741992a9fc 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S @@ -26,18 +26,23 @@ ENTRY(v6_early_abort) mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR /* - * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR (erratum 326103). - * The test below covers all the write situations, including Java bytecodes + * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR. */ - bic r1, r1, #1 << 11 @ clear bit 11 of FSR +#ifdef CONFIG_ARM_ERRATA_326103 + ldr ip, =0x4107b36 + mrc p15, 0, r3, c0, c0, 0 @ get processor id + teq ip, r3, lsr #4 @ r0 ARM1136? + bne do_DataAbort tst r5, #PSR_J_BIT @ Java? + tsteq r5, #PSR_T_BIT @ Thumb? bne do_DataAbort - do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 - ldreq r3, [r4] @ read aborted ARM instruction + bic r1, r1, #1 << 11 @ clear bit 11 of FSR + ldr r3, [r4] @ read aborted ARM instruction #ifdef CONFIG_CPU_ENDIAN_BE8 - reveq r3, r3 + rev r3, r3 #endif do_ldrd_abort tmp=ip, insn=r3 tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. +#endif b do_DataAbort diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index a53fd2aaa2f4..2a8e380501e8 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -32,6 +32,7 @@ static void __iomem *l2x0_base; static DEFINE_RAW_SPINLOCK(l2x0_lock); static u32 l2x0_way_mask; /* Bitmask of active ways */ static u32 l2x0_size; +static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; struct l2x0_regs l2x0_saved_regs; @@ -61,12 +62,7 @@ static inline void cache_sync(void) { void __iomem *base = l2x0_base; -#ifdef CONFIG_PL310_ERRATA_753970 - /* write to an unmmapped register */ - writel_relaxed(0, base + L2X0_DUMMY_REG); -#else - writel_relaxed(0, base + L2X0_CACHE_SYNC); -#endif + writel_relaxed(0, base + sync_reg_offset); cache_wait(base + L2X0_CACHE_SYNC, 1); } @@ -85,10 +81,13 @@ static inline void l2x0_inv_line(unsigned long addr) } #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) +static inline void debug_writel(unsigned long val) +{ + if (outer_cache.set_debug) + outer_cache.set_debug(val); +} -#define debug_writel(val) outer_cache.set_debug(val) - -static void l2x0_set_debug(unsigned long val) +static void pl310_set_debug(unsigned long val) { writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); } @@ -98,7 +97,7 @@ static inline void debug_writel(unsigned long val) { } -#define l2x0_set_debug NULL +#define pl310_set_debug NULL #endif #ifdef CONFIG_PL310_ERRATA_588369 @@ -331,6 +330,11 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) else ways = 8; type = "L310"; +#ifdef CONFIG_PL310_ERRATA_753970 + /* Unmapped register. */ + sync_reg_offset = L2X0_DUMMY_REG; +#endif + outer_cache.set_debug = pl310_set_debug; break; case L2X0_CACHE_ID_PART_L210: ways = (aux >> 13) & 0xf; @@ -379,7 +383,6 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) outer_cache.flush_all = l2x0_flush_all; outer_cache.inv_all = l2x0_inv_all; outer_cache.disable = l2x0_disable; - outer_cache.set_debug = l2x0_set_debug; printk(KERN_INFO "%s cache controller enabled\n", type); printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 595079fa9d1d..8f5813bbffb5 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -293,11 +293,11 @@ EXPORT_SYMBOL(pfn_valid); #endif #ifndef CONFIG_SPARSEMEM -static void arm_memory_present(void) +static void __init arm_memory_present(void) { } #else -static void arm_memory_present(void) +static void __init arm_memory_present(void) { struct memblock_region *reg; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index b86f8933ff91..2c7cf2f9c837 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -618,8 +618,8 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr, } } -static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, - unsigned long phys, const struct mem_type *type) +static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, + unsigned long end, unsigned long phys, const struct mem_type *type) { pud_t *pud = pud_offset(pgd, addr); unsigned long next; diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index ecdb3da0dea9..c58d896cd5c3 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c @@ -916,6 +916,13 @@ void omap_start_dma(int lch) l |= OMAP_DMA_CCR_BUFFERING_DISABLE; l |= OMAP_DMA_CCR_EN; + /* + * As dma_write() uses IO accessors which are weakly ordered, there + * is no guarantee that data in coherent DMA memory will be visible + * to the DMA device. Add a memory barrier here to ensure that any + * such data is visible prior to enabling DMA. + */ + mb(); p->dma_write(l, CCR, lch); dma_chan[lch].flags |= OMAP_DMA_ACTIVE; @@ -965,6 +972,13 @@ void omap_stop_dma(int lch) p->dma_write(l, CCR, lch); } + /* + * Ensure that data transferred by DMA is visible to any access + * after DMA has been disabled. This is important for coherent + * DMA regions. + */ + mb(); + if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { int next_lch, cur_lch = lch; char dma_chan_link_map[dma_lch_count]; diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 858748eaa144..bc683b8219b5 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -17,6 +17,8 @@ #include <linux/sched.h> #include <linux/smp.h> #include <linux/init.h> +#include <linux/uaccess.h> +#include <linux/user.h> #include <asm/cp15.h> #include <asm/cputype.h> @@ -529,6 +531,103 @@ void vfp_flush_hwstate(struct thread_info *thread) } /* + * Save the current VFP state into the provided structures and prepare + * for entry into a new function (signal handler). + */ +int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp, + struct user_vfp_exc __user *ufp_exc) +{ + struct thread_info *thread = current_thread_info(); + struct vfp_hard_struct *hwstate = &thread->vfpstate.hard; + int err = 0; + + /* Ensure that the saved hwstate is up-to-date. */ + vfp_sync_hwstate(thread); + + /* + * Copy the floating point registers. There can be unused + * registers see asm/hwcap.h for details. + */ + err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs, + sizeof(hwstate->fpregs)); + /* + * Copy the status and control register. + */ + __put_user_error(hwstate->fpscr, &ufp->fpscr, err); + + /* + * Copy the exception registers. + */ + __put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err); + __put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err); + __put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err); + + if (err) + return -EFAULT; + + /* Ensure that VFP is disabled. */ + vfp_flush_hwstate(thread); + + /* + * As per the PCS, clear the length and stride bits for function + * entry. + */ + hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK); + + /* + * Disable VFP in the hwstate so that we can detect if it gets + * used. + */ + hwstate->fpexc &= ~FPEXC_EN; + return 0; +} + +/* Sanitise and restore the current VFP state from the provided structures. */ +int vfp_restore_user_hwstate(struct user_vfp __user *ufp, + struct user_vfp_exc __user *ufp_exc) +{ + struct thread_info *thread = current_thread_info(); + struct vfp_hard_struct *hwstate = &thread->vfpstate.hard; + unsigned long fpexc; + int err = 0; + + /* + * If VFP has been used, then disable it to avoid corrupting + * the new thread state. + */ + if (hwstate->fpexc & FPEXC_EN) + vfp_flush_hwstate(thread); + + /* + * Copy the floating point registers. There can be unused + * registers see asm/hwcap.h for details. + */ + err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs, + sizeof(hwstate->fpregs)); + /* + * Copy the status and control register. + */ + __get_user_error(hwstate->fpscr, &ufp->fpscr, err); + + /* + * Sanitise and restore the exception registers. + */ + __get_user_error(fpexc, &ufp_exc->fpexc, err); + + /* Ensure the VFP is enabled. */ + fpexc |= FPEXC_EN; + + /* Ensure FPINST2 is invalid and the exception flag is cleared. */ + fpexc &= ~(FPEXC_EX | FPEXC_FP2V); + hwstate->fpexc = fpexc; + + __get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err); + __get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err); + + return err ? -EFAULT : 0; +} + +/* * VFP hardware can lose all context when a CPU goes offline. * As we will be running in SMP mode with CPU hotplug, we will save the * hardware state at every thread switch. We clear our held state when |