summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/Kconfig2
-rw-r--r--arch/alpha/include/asm/atomic.h4
-rw-r--r--arch/alpha/include/asm/fpu.h2
-rw-r--r--arch/alpha/include/asm/ptrace.h5
-rw-r--r--arch/alpha/include/asm/socket.h2
-rw-r--r--arch/alpha/include/asm/uaccess.h34
-rw-r--r--arch/alpha/include/asm/unistd.h4
-rw-r--r--arch/alpha/include/asm/word-at-a-time.h55
-rw-r--r--arch/alpha/kernel/alpha_ksyms.c3
-rw-r--r--arch/alpha/kernel/entry.S161
-rw-r--r--arch/alpha/kernel/osf_sys.c49
-rw-r--r--arch/alpha/kernel/process.c19
-rw-r--r--arch/alpha/kernel/systbls.S4
-rw-r--r--arch/alpha/lib/Makefile2
-rw-r--r--arch/alpha/lib/ev6-strncpy_from_user.S424
-rw-r--r--arch/alpha/lib/ev67-strlen_user.S107
-rw-r--r--arch/alpha/lib/strlen_user.S91
-rw-r--r--arch/alpha/lib/strncpy_from_user.S339
-rw-r--r--arch/alpha/mm/fault.c36
-rw-r--r--arch/alpha/oprofile/common.c1
-rw-r--r--arch/arm/Kconfig9
-rw-r--r--arch/arm/Kconfig.debug6
-rw-r--r--arch/arm/Makefile4
-rw-r--r--arch/arm/boot/compressed/head.S5
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi5
-rw-r--r--arch/arm/boot/dts/at91sam9260.dtsi3
-rw-r--r--arch/arm/boot/dts/at91sam9263.dtsi5
-rw-r--r--arch/arm/boot/dts/at91sam9g25ek.dts2
-rw-r--r--arch/arm/boot/dts/at91sam9g45.dtsi5
-rw-r--r--arch/arm/boot/dts/at91sam9n12.dtsi4
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi4
-rw-r--r--arch/arm/boot/dts/imx51-babbage.dts4
-rw-r--r--arch/arm/boot/dts/kirkwood-iconnect.dts6
-rw-r--r--arch/arm/boot/dts/twl6030.dtsi3
-rw-r--r--arch/arm/configs/armadillo800eva_defconfig2
-rw-r--r--arch/arm/configs/u8500_defconfig1
-rw-r--r--arch/arm/include/asm/assembler.h8
-rw-r--r--arch/arm/include/asm/dma-mapping.h7
-rw-r--r--arch/arm/include/asm/memory.h3
-rw-r--r--arch/arm/include/asm/pgtable.h40
-rw-r--r--arch/arm/include/asm/sched_clock.h2
-rw-r--r--arch/arm/include/asm/tlb.h4
-rw-r--r--arch/arm/include/asm/uaccess.h58
-rw-r--r--arch/arm/include/asm/unistd.h2
-rw-r--r--arch/arm/kernel/calls.S1
-rw-r--r--arch/arm/kernel/hw_breakpoint.c62
-rw-r--r--arch/arm/kernel/sched_clock.c24
-rw-r--r--arch/arm/kernel/smp_twd.c48
-rw-r--r--arch/arm/kernel/topology.c2
-rw-r--r--arch/arm/kernel/traps.c11
-rw-r--r--arch/arm/lib/Makefile23
-rw-r--r--arch/arm/lib/delay.c1
-rw-r--r--arch/arm/lib/getuser.S23
-rw-r--r--arch/arm/lib/io-readsw-armv3.S106
-rw-r--r--arch/arm/lib/io-writesw-armv3.S126
-rw-r--r--arch/arm/lib/putuser.S6
-rw-r--r--arch/arm/lib/uaccess.S564
-rw-r--r--arch/arm/mach-at91/at91rm9200_time.c2
-rw-r--r--arch/arm/mach-at91/at91sam9260_devices.c6
-rw-r--r--arch/arm/mach-at91/at91sam9261_devices.c6
-rw-r--r--arch/arm/mach-at91/at91sam9263_devices.c10
-rw-r--r--arch/arm/mach-at91/at91sam9g45_devices.c6
-rw-r--r--arch/arm/mach-at91/at91sam9rl_devices.c6
-rw-r--r--arch/arm/mach-at91/clock.c12
-rw-r--r--arch/arm/mach-dove/common.c3
-rw-r--r--arch/arm/mach-exynos/mach-origen.c7
-rw-r--r--arch/arm/mach-exynos/mach-smdkv310.c7
-rw-r--r--arch/arm/mach-gemini/irq.c1
-rw-r--r--arch/arm/mach-imx/Makefile10
-rw-r--r--arch/arm/mach-imx/clk-imx25.c8
-rw-r--r--arch/arm/mach-imx/clk-imx35.c6
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c8
-rw-r--r--arch/arm/mach-imx/headsmp.S (renamed from arch/arm/mach-imx/head-v7.S)0
-rw-r--r--arch/arm/mach-imx/hotplug.c23
-rw-r--r--arch/arm/mach-imx/mach-armadillo5x0.c3
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c4
-rw-r--r--arch/arm/mach-kirkwood/Makefile.boot3
-rw-r--r--arch/arm/mach-kirkwood/common.c11
-rw-r--r--arch/arm/mach-kirkwood/db88f6281-bp-setup.c1
-rw-r--r--arch/arm/mach-mmp/sram.c2
-rw-r--r--arch/arm/mach-mv78xx0/addr-map.c2
-rw-r--r--arch/arm/mach-mv78xx0/common.c6
-rw-r--r--arch/arm/mach-omap2/Kconfig6
-rw-r--r--arch/arm/mach-omap2/Makefile2
-rw-r--r--arch/arm/mach-omap2/board-igep0020.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c1
-rw-r--r--arch/arm/mach-omap2/clock33xx_data.c14
-rw-r--r--arch/arm/mach-omap2/clockdomain2xxx_3xxx.c50
-rw-r--r--arch/arm/mach-omap2/cm-regbits-34xx.h1
-rw-r--r--arch/arm/mach-omap2/common-board-devices.c11
-rw-r--r--arch/arm/mach-omap2/common-board-devices.h1
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c3
-rw-r--r--arch/arm/mach-omap2/mux.h1
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c15
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c12
-rw-r--r--arch/arm/mach-omap2/opp4xxx_data.c2
-rw-r--r--arch/arm/mach-omap2/pm34xx.c19
-rw-r--r--arch/arm/mach-omap2/sleep44xx.S8
-rw-r--r--arch/arm/mach-omap2/timer.c7
-rw-r--r--arch/arm/mach-omap2/twl-common.c1
-rw-r--r--arch/arm/mach-orion5x/common.c3
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/dma.h3
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c13
-rw-r--r--arch/arm/mach-shmobile/board-kzm9g.c4
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c3
-rw-r--r--arch/arm/mach-shmobile/board-marzen.c2
-rw-r--r--arch/arm/mach-shmobile/intc-sh73a0.c4
-rw-r--r--arch/arm/mach-ux500/Kconfig1
-rw-r--r--arch/arm/mach-ux500/board-mop500-msp.c10
-rw-r--r--arch/arm/mach-ux500/board-mop500.c4
-rw-r--r--arch/arm/mm/context.c7
-rw-r--r--arch/arm/mm/dma-mapping.c114
-rw-r--r--arch/arm/mm/flush.c2
-rw-r--r--arch/arm/mm/mm.h3
-rw-r--r--arch/arm/mm/mmu.c8
-rw-r--r--arch/arm/mm/tlb-v7.S6
-rw-r--r--arch/arm/plat-mxc/include/mach/mx25.h1
-rw-r--r--arch/arm/plat-omap/dmtimer.c6
-rw-r--r--arch/arm/plat-omap/include/plat/cpu.h3
-rw-r--r--arch/arm/plat-omap/include/plat/multi.h9
-rw-r--r--arch/arm/plat-omap/include/plat/uncompress.h4
-rw-r--r--arch/arm/plat-omap/sram.c11
-rw-r--r--arch/arm/plat-orion/common.c8
-rw-r--r--arch/arm/plat-orion/include/plat/common.h6
-rw-r--r--arch/arm/plat-s3c24xx/dma.c2
-rw-r--r--arch/arm/plat-samsung/clock.c10
-rw-r--r--arch/arm/plat-samsung/devs.c29
-rw-r--r--arch/arm/plat-samsung/include/plat/hdmi.h16
-rw-r--r--arch/arm/plat-samsung/pm.c2
-rw-r--r--arch/arm/vfp/vfpmodule.c2
-rw-r--r--arch/blackfin/Kconfig1
-rw-r--r--arch/blackfin/Makefile1
-rw-r--r--arch/blackfin/include/asm/smp.h2
-rw-r--r--arch/blackfin/mach-common/smp.c223
-rw-r--r--arch/c6x/Kconfig1
-rw-r--r--arch/c6x/include/asm/cache.h16
-rw-r--r--arch/ia64/configs/generic_defconfig1
-rw-r--r--arch/ia64/configs/gensparse_defconfig1
-rw-r--r--arch/m68k/Kconfig1
-rw-r--r--arch/m68k/Kconfig.cpu5
-rw-r--r--arch/m68k/platform/coldfire/clk.c6
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/alchemy/board-mtx1.c2
-rw-r--r--arch/mips/ath79/dev-usb.c2
-rw-r--r--arch/mips/ath79/gpio.c6
-rw-r--r--arch/mips/bcm63xx/dev-spi.c4
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c89
-rw-r--r--arch/mips/include/asm/mach-ath79/ar71xx_regs.h3
-rw-r--r--arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h2
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h13
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/irq.h10
-rw-r--r--arch/mips/include/asm/module.h1
-rw-r--r--arch/mips/include/asm/r4k-timer.h8
-rw-r--r--arch/mips/kernel/module.c43
-rw-r--r--arch/mips/kernel/smp-cmp.c2
-rw-r--r--arch/mips/kernel/smp.c4
-rw-r--r--arch/mips/kernel/sync-r4k.c26
-rw-r--r--arch/mips/mm/gup.c2
-rw-r--r--arch/mips/mti-malta/malta-int.c9
-rw-r--r--arch/mips/mti-malta/malta-pci.c13
-rw-r--r--arch/mips/mti-malta/malta-platform.c5
-rw-r--r--arch/mips/pci/pci-ar724x.c22
-rw-r--r--arch/parisc/include/asm/atomic.h4
-rw-r--r--arch/parisc/kernel/process.c2
-rw-r--r--arch/parisc/kernel/sys_parisc.c8
-rw-r--r--arch/powerpc/boot/dts/fsl/p4080si-post.dtsi7
-rw-r--r--arch/powerpc/configs/85xx/p1023rds_defconfig31
-rw-r--r--arch/powerpc/configs/corenet32_smp_defconfig29
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig1
-rw-r--r--arch/powerpc/configs/g5_defconfig103
-rw-r--r--arch/powerpc/configs/mpc83xx_defconfig18
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig33
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig32
-rw-r--r--arch/powerpc/include/asm/cputable.h2
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h12
-rw-r--r--arch/powerpc/include/asm/mpic_msgr.h1
-rw-r--r--arch/powerpc/include/asm/processor.h1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/dbell.c2
-rw-r--r--arch/powerpc/kernel/dma-iommu.c9
-rw-r--r--arch/powerpc/kernel/entry_64.S23
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S3
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c2
-rw-r--r--arch/powerpc/kernel/idle_power7.S2
-rw-r--r--arch/powerpc/kernel/kgdb.c27
-rw-r--r--arch/powerpc/kernel/process.c12
-rw-r--r--arch/powerpc/kernel/smp.c11
-rw-r--r--arch/powerpc/kernel/syscalls.c8
-rw-r--r--arch/powerpc/kernel/sysfs.c10
-rw-r--r--arch/powerpc/kernel/time.c9
-rw-r--r--arch/powerpc/kernel/traps.c3
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c3
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S12
-rw-r--r--arch/powerpc/kvm/e500_tlb.c11
-rw-r--r--arch/powerpc/lib/code-patching.c2
-rw-r--r--arch/powerpc/lib/copyuser_power7.S35
-rw-r--r--arch/powerpc/lib/memcpy_power7.S4
-rw-r--r--arch/powerpc/mm/mem.c1
-rw-r--r--arch/powerpc/mm/numa.c7
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/powerpc/platforms/powernv/smp.c10
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c13
-rw-r--r--arch/powerpc/sysdev/mpic_msgr.c3
-rw-r--r--arch/powerpc/sysdev/xics/icp-hv.c6
-rw-r--r--arch/powerpc/xmon/xmon.c84
-rw-r--r--arch/s390/include/asm/elf.h3
-rw-r--r--arch/s390/include/asm/hugetlb.h24
-rw-r--r--arch/s390/include/asm/posix_types.h3
-rw-r--r--arch/s390/include/asm/smp.h1
-rw-r--r--arch/s390/include/asm/tlbflush.h2
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/lib/uaccess_pt.c142
-rw-r--r--arch/s390/oprofile/init.c10
-rw-r--r--arch/sh/kernel/cpu/sh5/entry.S2
-rw-r--r--arch/sh/kernel/entry-common.S2
-rw-r--r--arch/sparc/kernel/module.c13
-rw-r--r--arch/um/os-Linux/time.c2
-rw-r--r--arch/x86/Kconfig8
-rw-r--r--arch/x86/Makefile6
-rw-r--r--arch/x86/boot/Makefile2
-rw-r--r--arch/x86/include/asm/spinlock.h3
-rw-r--r--arch/x86/include/asm/xen/page.h3
-rw-r--r--arch/x86/kernel/alternative.c2
-rw-r--r--arch/x86/kernel/apic/io_apic.c10
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event.h2
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c35
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c14
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c3
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c259
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.h46
-rw-r--r--arch/x86/kernel/irq.c2
-rw-r--r--arch/x86/kernel/microcode_amd.c7
-rw-r--r--arch/x86/kernel/microcode_core.c3
-rw-r--r--arch/x86/kvm/emulate.c30
-rw-r--r--arch/x86/kvm/i8259.c2
-rw-r--r--arch/x86/kvm/mmu.c13
-rw-r--r--arch/x86/kvm/vmx.c23
-rw-r--r--arch/x86/kvm/x86.c18
-rw-r--r--arch/x86/mm/hugetlbpage.c21
-rw-r--r--arch/x86/mm/init.c2
-rw-r--r--arch/x86/mm/pageattr.c10
-rw-r--r--arch/x86/platform/efi/efi.c30
-rw-r--r--arch/x86/realmode/rm/Makefile2
-rw-r--r--arch/x86/syscalls/syscall_64.tbl6
-rw-r--r--arch/x86/xen/enlighten.c122
-rw-r--r--arch/x86/xen/mmu.c2
-rw-r--r--arch/x86/xen/p2m.c122
-rw-r--r--arch/x86/xen/setup.c9
-rw-r--r--arch/x86/xen/suspend.c2
-rw-r--r--arch/x86/xen/xen-ops.h2
257 files changed, 2861 insertions, 2449 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index d5b9b5e645cc..9944dedee5b1 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -18,6 +18,8 @@ config ALPHA
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_SMP_IDLE_THREAD
select GENERIC_CMOS_UPDATE
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
help
The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory,
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 3bb7ffeae3bc..c2cbe4fc391c 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -14,8 +14,8 @@
*/
-#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
-#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } )
+#define ATOMIC_INIT(i) { (i) }
+#define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic64_read(v) (*(volatile long *)&(v)->counter)
diff --git a/arch/alpha/include/asm/fpu.h b/arch/alpha/include/asm/fpu.h
index db00f7885faa..e477bcd5b94a 100644
--- a/arch/alpha/include/asm/fpu.h
+++ b/arch/alpha/include/asm/fpu.h
@@ -1,7 +1,9 @@
#ifndef __ASM_ALPHA_FPU_H
#define __ASM_ALPHA_FPU_H
+#ifdef __KERNEL__
#include <asm/special_insns.h>
+#endif
/*
* Alpha floating-point control register defines:
diff --git a/arch/alpha/include/asm/ptrace.h b/arch/alpha/include/asm/ptrace.h
index fd698a174f26..b87755a19554 100644
--- a/arch/alpha/include/asm/ptrace.h
+++ b/arch/alpha/include/asm/ptrace.h
@@ -76,7 +76,10 @@ struct switch_stack {
#define task_pt_regs(task) \
((struct pt_regs *) (task_stack_page(task) + 2*PAGE_SIZE) - 1)
-#define force_successful_syscall_return() (task_pt_regs(current)->r0 = 0)
+#define current_pt_regs() \
+ ((struct pt_regs *) ((char *)current_thread_info() + 2*PAGE_SIZE) - 1)
+
+#define force_successful_syscall_return() (current_pt_regs()->r0 = 0)
#endif
diff --git a/arch/alpha/include/asm/socket.h b/arch/alpha/include/asm/socket.h
index dcb221a4b5be..7d2f75be932e 100644
--- a/arch/alpha/include/asm/socket.h
+++ b/arch/alpha/include/asm/socket.h
@@ -76,9 +76,11 @@
/* Instruct lower device to use last 4-bytes of skb data as FCS */
#define SO_NOFCS 43
+#ifdef __KERNEL__
/* O_NONBLOCK clashes with the bits used for socket types. Therefore we
* have to define SOCK_NONBLOCK to a different value here.
*/
#define SOCK_NONBLOCK 0x40000000
+#endif /* __KERNEL__ */
#endif /* _ASM_SOCKET_H */
diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h
index b49ec2f8d6e3..766fdfde2b7a 100644
--- a/arch/alpha/include/asm/uaccess.h
+++ b/arch/alpha/include/asm/uaccess.h
@@ -433,36 +433,12 @@ clear_user(void __user *to, long len)
#undef __module_address
#undef __module_call
-/* Returns: -EFAULT if exception before terminator, N if the entire
- buffer filled, else strlen. */
+#define user_addr_max() \
+ (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
-extern long __strncpy_from_user(char *__to, const char __user *__from, long __to_len);
-
-extern inline long
-strncpy_from_user(char *to, const char __user *from, long n)
-{
- long ret = -EFAULT;
- if (__access_ok((unsigned long)from, 0, get_fs()))
- ret = __strncpy_from_user(to, from, n);
- return ret;
-}
-
-/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
-extern long __strlen_user(const char __user *);
-
-extern inline long strlen_user(const char __user *str)
-{
- return access_ok(VERIFY_READ,str,0) ? __strlen_user(str) : 0;
-}
-
-/* Returns: 0 if exception before NUL or reaching the supplied limit (N),
- * a value greater than N if the limit would be exceeded, else strlen. */
-extern long __strnlen_user(const char __user *, long);
-
-extern inline long strnlen_user(const char __user *str, long n)
-{
- return access_ok(VERIFY_READ,str,0) ? __strnlen_user(str, n) : 0;
-}
+extern long strncpy_from_user(char *dest, const char __user *src, long count);
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
/*
* About the exception table:
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index 633b23b0664a..a31a78eac9b9 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -465,10 +465,12 @@
#define __NR_setns 501
#define __NR_accept4 502
#define __NR_sendmmsg 503
+#define __NR_process_vm_readv 504
+#define __NR_process_vm_writev 505
#ifdef __KERNEL__
-#define NR_SYSCALLS 504
+#define NR_SYSCALLS 506
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_STAT64
diff --git a/arch/alpha/include/asm/word-at-a-time.h b/arch/alpha/include/asm/word-at-a-time.h
new file mode 100644
index 000000000000..6b340d0f1521
--- /dev/null
+++ b/arch/alpha/include/asm/word-at-a-time.h
@@ -0,0 +1,55 @@
+#ifndef _ASM_WORD_AT_A_TIME_H
+#define _ASM_WORD_AT_A_TIME_H
+
+#include <asm/compiler.h>
+
+/*
+ * word-at-a-time interface for Alpha.
+ */
+
+/*
+ * We do not use the word_at_a_time struct on Alpha, but it needs to be
+ * implemented to humour the generic code.
+ */
+struct word_at_a_time {
+ const unsigned long unused;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { 0 }
+
+/* Return nonzero if val has a zero */
+static inline unsigned long has_zero(unsigned long val, unsigned long *bits, const struct word_at_a_time *c)
+{
+ unsigned long zero_locations = __kernel_cmpbge(0, val);
+ *bits = zero_locations;
+ return zero_locations;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long val, unsigned long bits, const struct word_at_a_time *c)
+{
+ return bits;
+}
+
+#define create_zero_mask(bits) (bits)
+
+static inline unsigned long find_zero(unsigned long bits)
+{
+#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
+ /* Simple if have CIX instructions */
+ return __kernel_cttz(bits);
+#else
+ unsigned long t1, t2, t3;
+ /* Retain lowest set bit only */
+ bits &= -bits;
+ /* Binary search for lowest set bit */
+ t1 = bits & 0xf0;
+ t2 = bits & 0xcc;
+ t3 = bits & 0xaa;
+ if (t1) t1 = 4;
+ if (t2) t2 = 2;
+ if (t3) t3 = 1;
+ return t1 + t2 + t3;
+#endif
+}
+
+#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c
index d96e742d4dc2..15fa821d09cd 100644
--- a/arch/alpha/kernel/alpha_ksyms.c
+++ b/arch/alpha/kernel/alpha_ksyms.c
@@ -52,7 +52,6 @@ EXPORT_SYMBOL(alpha_write_fp_reg_s);
/* entry.S */
EXPORT_SYMBOL(kernel_thread);
-EXPORT_SYMBOL(kernel_execve);
/* Networking helper routines. */
EXPORT_SYMBOL(csum_tcpudp_magic);
@@ -74,8 +73,6 @@ EXPORT_SYMBOL(alpha_fp_emul);
*/
EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(__do_clear_user);
-EXPORT_SYMBOL(__strncpy_from_user);
-EXPORT_SYMBOL(__strnlen_user);
/*
* SMP-specific symbols.
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
index 6d159cee5f2f..ec0da0567ab5 100644
--- a/arch/alpha/kernel/entry.S
+++ b/arch/alpha/kernel/entry.S
@@ -663,58 +663,6 @@ kernel_thread:
br ret_to_kernel
.end kernel_thread
-/*
- * kernel_execve(path, argv, envp)
- */
- .align 4
- .globl kernel_execve
- .ent kernel_execve
-kernel_execve:
- /* We can be called from a module. */
- ldgp $gp, 0($27)
- lda $sp, -(32+SIZEOF_PT_REGS+8)($sp)
- .frame $sp, 32+SIZEOF_PT_REGS+8, $26, 0
- stq $26, 0($sp)
- stq $16, 8($sp)
- stq $17, 16($sp)
- stq $18, 24($sp)
- .prologue 1
-
- lda $16, 32($sp)
- lda $17, 0
- lda $18, SIZEOF_PT_REGS
- bsr $26, memset !samegp
-
- /* Avoid the HAE being gratuitously wrong, which would cause us
- to do the whole turn off interrupts thing and restore it. */
- ldq $2, alpha_mv+HAE_CACHE
- stq $2, 152+32($sp)
-
- ldq $16, 8($sp)
- ldq $17, 16($sp)
- ldq $18, 24($sp)
- lda $19, 32($sp)
- bsr $26, do_execve !samegp
-
- ldq $26, 0($sp)
- bne $0, 1f /* error! */
-
- /* Move the temporary pt_regs struct from its current location
- to the top of the kernel stack frame. See copy_thread for
- details for a normal process. */
- lda $16, 0x4000 - SIZEOF_PT_REGS($8)
- lda $17, 32($sp)
- lda $18, SIZEOF_PT_REGS
- bsr $26, memmove !samegp
-
- /* Take that over as our new stack frame and visit userland! */
- lda $sp, 0x4000 - SIZEOF_PT_REGS($8)
- br $31, ret_from_sys_call
-
-1: lda $sp, 32+SIZEOF_PT_REGS+8($sp)
- ret
-.end kernel_execve
-
/*
* Special system calls. Most of these are special in that they either
@@ -797,115 +745,6 @@ sys_rt_sigreturn:
.end sys_rt_sigreturn
.align 4
- .globl sys_sethae
- .ent sys_sethae
-sys_sethae:
- .prologue 0
- stq $16, 152($sp)
- ret
-.end sys_sethae
-
- .align 4
- .globl osf_getpriority
- .ent osf_getpriority
-osf_getpriority:
- lda $sp, -16($sp)
- stq $26, 0($sp)
- .prologue 0
-
- jsr $26, sys_getpriority
-
- ldq $26, 0($sp)
- blt $0, 1f
-
- /* Return value is the unbiased priority, i.e. 20 - prio.
- This does result in negative return values, so signal
- no error by writing into the R0 slot. */
- lda $1, 20
- stq $31, 16($sp)
- subl $1, $0, $0
- unop
-
-1: lda $sp, 16($sp)
- ret
-.end osf_getpriority
-
- .align 4
- .globl sys_getxuid
- .ent sys_getxuid
-sys_getxuid:
- .prologue 0
- ldq $2, TI_TASK($8)
- ldq $3, TASK_CRED($2)
- ldl $0, CRED_UID($3)
- ldl $1, CRED_EUID($3)
- stq $1, 80($sp)
- ret
-.end sys_getxuid
-
- .align 4
- .globl sys_getxgid
- .ent sys_getxgid
-sys_getxgid:
- .prologue 0
- ldq $2, TI_TASK($8)
- ldq $3, TASK_CRED($2)
- ldl $0, CRED_GID($3)
- ldl $1, CRED_EGID($3)
- stq $1, 80($sp)
- ret
-.end sys_getxgid
-
- .align 4
- .globl sys_getxpid
- .ent sys_getxpid
-sys_getxpid:
- .prologue 0
- ldq $2, TI_TASK($8)
-
- /* See linux/kernel/timer.c sys_getppid for discussion
- about this loop. */
- ldq $3, TASK_GROUP_LEADER($2)
- ldq $4, TASK_REAL_PARENT($3)
- ldl $0, TASK_TGID($2)
-1: ldl $1, TASK_TGID($4)
-#ifdef CONFIG_SMP
- mov $4, $5
- mb
- ldq $3, TASK_GROUP_LEADER($2)
- ldq $4, TASK_REAL_PARENT($3)
- cmpeq $4, $5, $5
- beq $5, 1b
-#endif
- stq $1, 80($sp)
- ret
-.end sys_getxpid
-
- .align 4
- .globl sys_alpha_pipe
- .ent sys_alpha_pipe
-sys_alpha_pipe:
- lda $sp, -16($sp)
- stq $26, 0($sp)
- .prologue 0
-
- mov $31, $17
- lda $16, 8($sp)
- jsr $26, do_pipe_flags
-
- ldq $26, 0($sp)
- bne $0, 1f
-
- /* The return values are in $0 and $20. */
- ldl $1, 12($sp)
- ldl $0, 8($sp)
-
- stq $1, 80+16($sp)
-1: lda $sp, 16($sp)
- ret
-.end sys_alpha_pipe
-
- .align 4
.globl sys_execve
.ent sys_execve
sys_execve:
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 98a103621af6..bc1acdda7a5e 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1404,3 +1404,52 @@ SYSCALL_DEFINE3(osf_writev, unsigned long, fd,
}
#endif
+
+SYSCALL_DEFINE2(osf_getpriority, int, which, int, who)
+{
+ int prio = sys_getpriority(which, who);
+ if (prio >= 0) {
+ /* Return value is the unbiased priority, i.e. 20 - prio.
+ This does result in negative return values, so signal
+ no error */
+ force_successful_syscall_return();
+ prio = 20 - prio;
+ }
+ return prio;
+}
+
+SYSCALL_DEFINE0(getxuid)
+{
+ current_pt_regs()->r20 = sys_geteuid();
+ return sys_getuid();
+}
+
+SYSCALL_DEFINE0(getxgid)
+{
+ current_pt_regs()->r20 = sys_getegid();
+ return sys_getgid();
+}
+
+SYSCALL_DEFINE0(getxpid)
+{
+ current_pt_regs()->r20 = sys_getppid();
+ return sys_getpid();
+}
+
+SYSCALL_DEFINE0(alpha_pipe)
+{
+ int fd[2];
+ int res = do_pipe_flags(fd, 0);
+ if (!res) {
+ /* The return values are in $0 and $20. */
+ current_pt_regs()->r20 = fd[1];
+ res = fd[0];
+ }
+ return res;
+}
+
+SYSCALL_DEFINE1(sethae, unsigned long, val)
+{
+ current_pt_regs()->hae = val;
+ return 0;
+}
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 153d3fce3e8e..d6fde98b74b3 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -455,3 +455,22 @@ get_wchan(struct task_struct *p)
}
return pc;
}
+
+int kernel_execve(const char *path, const char *const argv[], const char *const envp[])
+{
+ /* Avoid the HAE being gratuitously wrong, which would cause us
+ to do the whole turn off interrupts thing and restore it. */
+ struct pt_regs regs = {.hae = alpha_mv.hae_cache};
+ int err = do_execve(path, argv, envp, &regs);
+ if (!err) {
+ struct pt_regs *p = current_pt_regs();
+ /* copy regs to normal position and off to userland we go... */
+ *p = regs;
+ __asm__ __volatile__ (
+ "mov %0, $sp;"
+ "br $31, ret_from_sys_call"
+ : : "r"(p));
+ }
+ return err;
+}
+EXPORT_SYMBOL(kernel_execve);
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index 87835235f114..2ac6b45c3e00 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -111,7 +111,7 @@ sys_call_table:
.quad sys_socket
.quad sys_connect
.quad sys_accept
- .quad osf_getpriority /* 100 */
+ .quad sys_osf_getpriority /* 100 */
.quad sys_send
.quad sys_recv
.quad sys_sigreturn
@@ -522,6 +522,8 @@ sys_call_table:
.quad sys_setns
.quad sys_accept4
.quad sys_sendmmsg
+ .quad sys_process_vm_readv
+ .quad sys_process_vm_writev /* 505 */
.size sys_call_table, . - sys_call_table
.type sys_call_table, @object
diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile
index c0a83ab62b78..59660743237c 100644
--- a/arch/alpha/lib/Makefile
+++ b/arch/alpha/lib/Makefile
@@ -31,8 +31,6 @@ lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \
$(ev6-y)memchr.o \
$(ev6-y)copy_user.o \
$(ev6-y)clear_user.o \
- $(ev6-y)strncpy_from_user.o \
- $(ev67-y)strlen_user.o \
$(ev6-y)csum_ipv6_magic.o \
$(ev6-y)clear_page.o \
$(ev6-y)copy_page.o \
diff --git a/arch/alpha/lib/ev6-strncpy_from_user.S b/arch/alpha/lib/ev6-strncpy_from_user.S
deleted file mode 100644
index d2e28178cacc..000000000000
--- a/arch/alpha/lib/ev6-strncpy_from_user.S
+++ /dev/null
@@ -1,424 +0,0 @@
-/*
- * arch/alpha/lib/ev6-strncpy_from_user.S
- * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com>
- *
- * Just like strncpy except in the return value:
- *
- * -EFAULT if an exception occurs before the terminator is copied.
- * N if the buffer filled.
- *
- * Otherwise the length of the string is returned.
- *
- * Much of the information about 21264 scheduling/coding comes from:
- * Compiler Writer's Guide for the Alpha 21264
- * abbreviated as 'CWG' in other comments here
- * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
- * Scheduling notation:
- * E - either cluster
- * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
- * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
- * A bunch of instructions got moved and temp registers were changed
- * to aid in scheduling. Control flow was also re-arranged to eliminate
- * branches, and to provide longer code sequences to enable better scheduling.
- * A total rewrite (using byte load/stores for start & tail sequences)
- * is desirable, but very difficult to do without a from-scratch rewrite.
- * Save that for the future.
- */
-
-
-#include <asm/errno.h>
-#include <asm/regdef.h>
-
-
-/* Allow an exception for an insn; exit if we get one. */
-#define EX(x,y...) \
- 99: x,##y; \
- .section __ex_table,"a"; \
- .long 99b - .; \
- lda $31, $exception-99b($0); \
- .previous
-
-
- .set noat
- .set noreorder
- .text
-
- .globl __strncpy_from_user
- .ent __strncpy_from_user
- .frame $30, 0, $26
- .prologue 0
-
- .align 4
-__strncpy_from_user:
- and a0, 7, t3 # E : find dest misalignment
- beq a2, $zerolength # U :
-
- /* Are source and destination co-aligned? */
- mov a0, v0 # E : save the string start
- xor a0, a1, t4 # E :
- EX( ldq_u t1, 0(a1) ) # L : Latency=3 load first quadword
- ldq_u t0, 0(a0) # L : load first (partial) aligned dest quadword
-
- addq a2, t3, a2 # E : bias count by dest misalignment
- subq a2, 1, a3 # E :
- addq zero, 1, t10 # E :
- and t4, 7, t4 # E : misalignment between the two
-
- and a3, 7, t6 # E : number of tail bytes
- sll t10, t6, t10 # E : t10 = bitmask of last count byte
- bne t4, $unaligned # U :
- lda t2, -1 # E : build a mask against false zero
-
- /*
- * We are co-aligned; take care of a partial first word.
- * On entry to this basic block:
- * t0 == the first destination word for masking back in
- * t1 == the first source word.
- */
-
- srl a3, 3, a2 # E : a2 = loop counter = (count - 1)/8
- addq a1, 8, a1 # E :
- mskqh t2, a1, t2 # U : detection in the src word
- nop
-
- /* Create the 1st output word and detect 0's in the 1st input word. */
- mskqh t1, a1, t3 # U :
- mskql t0, a1, t0 # U : assemble the first output word
- ornot t1, t2, t2 # E :
- nop
-
- cmpbge zero, t2, t8 # E : bits set iff null found
- or t0, t3, t0 # E :
- beq a2, $a_eoc # U :
- bne t8, $a_eos # U : 2nd branch in a quad. Bad.
-
- /* On entry to this basic block:
- * t0 == a source quad not containing a null.
- * a0 - current aligned destination address
- * a1 - current aligned source address
- * a2 - count of quadwords to move.
- * NOTE: Loop improvement - unrolling this is going to be
- * a huge win, since we're going to stall otherwise.
- * Fix this later. For _really_ large copies, look
- * at using wh64 on a look-ahead basis. See the code
- * in clear_user.S and copy_user.S.
- * Presumably, since (a0) and (a1) do not overlap (by C definition)
- * Lots of nops here:
- * - Separate loads from stores
- * - Keep it to 1 branch/quadpack so the branch predictor
- * can train.
- */
-$a_loop:
- stq_u t0, 0(a0) # L :
- addq a0, 8, a0 # E :
- nop
- subq a2, 1, a2 # E :
-
- EX( ldq_u t0, 0(a1) ) # L :
- addq a1, 8, a1 # E :
- cmpbge zero, t0, t8 # E : Stall 2 cycles on t0
- beq a2, $a_eoc # U :
-
- beq t8, $a_loop # U :
- nop
- nop
- nop
-
- /* Take care of the final (partial) word store. At this point
- * the end-of-count bit is set in t8 iff it applies.
- *
- * On entry to this basic block we have:
- * t0 == the source word containing the null
- * t8 == the cmpbge mask that found it.
- */
-$a_eos:
- negq t8, t12 # E : find low bit set
- and t8, t12, t12 # E :
-
- /* We're doing a partial word store and so need to combine
- our source and original destination words. */
- ldq_u t1, 0(a0) # L :
- subq t12, 1, t6 # E :
-
- or t12, t6, t8 # E :
- zapnot t0, t8, t0 # U : clear src bytes > null
- zap t1, t8, t1 # U : clear dst bytes <= null
- or t0, t1, t0 # E :
-
- stq_u t0, 0(a0) # L :
- br $finish_up # L0 :
- nop
- nop
-
- /* Add the end-of-count bit to the eos detection bitmask. */
- .align 4
-$a_eoc:
- or t10, t8, t8
- br $a_eos
- nop
- nop
-
-
-/* The source and destination are not co-aligned. Align the destination
- and cope. We have to be very careful about not reading too much and
- causing a SEGV. */
-
- .align 4
-$u_head:
- /* We know just enough now to be able to assemble the first
- full source word. We can still find a zero at the end of it
- that prevents us from outputting the whole thing.
-
- On entry to this basic block:
- t0 == the first dest word, unmasked
- t1 == the shifted low bits of the first source word
- t6 == bytemask that is -1 in dest word bytes */
-
- EX( ldq_u t2, 8(a1) ) # L : load second src word
- addq a1, 8, a1 # E :
- mskql t0, a0, t0 # U : mask trailing garbage in dst
- extqh t2, a1, t4 # U :
-
- or t1, t4, t1 # E : first aligned src word complete
- mskqh t1, a0, t1 # U : mask leading garbage in src
- or t0, t1, t0 # E : first output word complete
- or t0, t6, t6 # E : mask original data for zero test
-
- cmpbge zero, t6, t8 # E :
- beq a2, $u_eocfin # U :
- bne t8, $u_final # U : bad news - 2nd branch in a quad
- lda t6, -1 # E : mask out the bits we have
-
- mskql t6, a1, t6 # U : already seen
- stq_u t0, 0(a0) # L : store first output word
- or t6, t2, t2 # E :
- cmpbge zero, t2, t8 # E : find nulls in second partial
-
- addq a0, 8, a0 # E :
- subq a2, 1, a2 # E :
- bne t8, $u_late_head_exit # U :
- nop
-
- /* Finally, we've got all the stupid leading edge cases taken care
- of and we can set up to enter the main loop. */
-
- extql t2, a1, t1 # U : position hi-bits of lo word
- EX( ldq_u t2, 8(a1) ) # L : read next high-order source word
- addq a1, 8, a1 # E :
- cmpbge zero, t2, t8 # E :
-
- beq a2, $u_eoc # U :
- bne t8, $u_eos # U :
- nop
- nop
-
- /* Unaligned copy main loop. In order to avoid reading too much,
- the loop is structured to detect zeros in aligned source words.
- This has, unfortunately, effectively pulled half of a loop
- iteration out into the head and half into the tail, but it does
- prevent nastiness from accumulating in the very thing we want
- to run as fast as possible.
-
- On entry to this basic block:
- t1 == the shifted high-order bits from the previous source word
- t2 == the unshifted current source word
-
- We further know that t2 does not contain a null terminator. */
-
- /*
- * Extra nops here:
- * separate load quads from store quads
- * only one branch/quad to permit predictor training
- */
-
- .align 4
-$u_loop:
- extqh t2, a1, t0 # U : extract high bits for current word
- addq a1, 8, a1 # E :
- extql t2, a1, t3 # U : extract low bits for next time
- addq a0, 8, a0 # E :
-
- or t0, t1, t0 # E : current dst word now complete
- EX( ldq_u t2, 0(a1) ) # L : load high word for next time
- subq a2, 1, a2 # E :
- nop
-
- stq_u t0, -8(a0) # L : save the current word
- mov t3, t1 # E :
- cmpbge zero, t2, t8 # E : test new word for eos
- beq a2, $u_eoc # U :
-
- beq t8, $u_loop # U :
- nop
- nop
- nop
-
- /* We've found a zero somewhere in the source word we just read.
- If it resides in the lower half, we have one (probably partial)
- word to write out, and if it resides in the upper half, we
- have one full and one partial word left to write out.
-
- On entry to this basic block:
- t1 == the shifted high-order bits from the previous source word
- t2 == the unshifted current source word. */
- .align 4
-$u_eos:
- extqh t2, a1, t0 # U :
- or t0, t1, t0 # E : first (partial) source word complete
- cmpbge zero, t0, t8 # E : is the null in this first bit?
- nop
-
- bne t8, $u_final # U :
- stq_u t0, 0(a0) # L : the null was in the high-order bits
- addq a0, 8, a0 # E :
- subq a2, 1, a2 # E :
-
- .align 4
-$u_late_head_exit:
- extql t2, a1, t0 # U :
- cmpbge zero, t0, t8 # E :
- or t8, t10, t6 # E :
- cmoveq a2, t6, t8 # E :
-
- /* Take care of a final (probably partial) result word.
- On entry to this basic block:
- t0 == assembled source word
- t8 == cmpbge mask that found the null. */
- .align 4
-$u_final:
- negq t8, t6 # E : isolate low bit set
- and t6, t8, t12 # E :
- ldq_u t1, 0(a0) # L :
- subq t12, 1, t6 # E :
-
- or t6, t12, t8 # E :
- zapnot t0, t8, t0 # U : kill source bytes > null
- zap t1, t8, t1 # U : kill dest bytes <= null
- or t0, t1, t0 # E :
-
- stq_u t0, 0(a0) # E :
- br $finish_up # U :
- nop
- nop
-
- .align 4
-$u_eoc: # end-of-count
- extqh t2, a1, t0 # U :
- or t0, t1, t0 # E :
- cmpbge zero, t0, t8 # E :
- nop
-
- .align 4
-$u_eocfin: # end-of-count, final word
- or t10, t8, t8 # E :
- br $u_final # U :
- nop
- nop
-
- /* Unaligned copy entry point. */
- .align 4
-$unaligned:
-
- srl a3, 3, a2 # U : a2 = loop counter = (count - 1)/8
- and a0, 7, t4 # E : find dest misalignment
- and a1, 7, t5 # E : find src misalignment
- mov zero, t0 # E :
-
- /* Conditionally load the first destination word and a bytemask
- with 0xff indicating that the destination byte is sacrosanct. */
-
- mov zero, t6 # E :
- beq t4, 1f # U :
- ldq_u t0, 0(a0) # L :
- lda t6, -1 # E :
-
- mskql t6, a0, t6 # E :
- nop
- nop
- nop
-
- .align 4
-1:
- subq a1, t4, a1 # E : sub dest misalignment from src addr
- /* If source misalignment is larger than dest misalignment, we need
- extra startup checks to avoid SEGV. */
- cmplt t4, t5, t12 # E :
- extql t1, a1, t1 # U : shift src into place
- lda t2, -1 # E : for creating masks later
-
- beq t12, $u_head # U :
- mskqh t2, t5, t2 # U : begin src byte validity mask
- cmpbge zero, t1, t8 # E : is there a zero?
- nop
-
- extql t2, a1, t2 # U :
- or t8, t10, t5 # E : test for end-of-count too
- cmpbge zero, t2, t3 # E :
- cmoveq a2, t5, t8 # E : Latency=2, extra map slot
-
- nop # E : goes with cmov
- andnot t8, t3, t8 # E :
- beq t8, $u_head # U :
- nop
-
- /* At this point we've found a zero in the first partial word of
- the source. We need to isolate the valid source data and mask
- it into the original destination data. (Incidentally, we know
- that we'll need at least one byte of that original dest word.) */
-
- ldq_u t0, 0(a0) # L :
- negq t8, t6 # E : build bitmask of bytes <= zero
- mskqh t1, t4, t1 # U :
- and t6, t8, t12 # E :
-
- subq t12, 1, t6 # E :
- or t6, t12, t8 # E :
- zapnot t2, t8, t2 # U : prepare source word; mirror changes
- zapnot t1, t8, t1 # U : to source validity mask
-
- andnot t0, t2, t0 # E : zero place for source to reside
- or t0, t1, t0 # E : and put it there
- stq_u t0, 0(a0) # L :
- nop
-
- .align 4
-$finish_up:
- zapnot t0, t12, t4 # U : was last byte written null?
- and t12, 0xf0, t3 # E : binary search for the address of the
- cmovne t4, 1, t4 # E : Latency=2, extra map slot
- nop # E : with cmovne
-
- and t12, 0xcc, t2 # E : last byte written
- and t12, 0xaa, t1 # E :
- cmovne t3, 4, t3 # E : Latency=2, extra map slot
- nop # E : with cmovne
-
- bic a0, 7, t0
- cmovne t2, 2, t2 # E : Latency=2, extra map slot
- nop # E : with cmovne
- nop
-
- cmovne t1, 1, t1 # E : Latency=2, extra map slot
- nop # E : with cmovne
- addq t0, t3, t0 # E :
- addq t1, t2, t1 # E :
-
- addq t0, t1, t0 # E :
- addq t0, t4, t0 # add one if we filled the buffer
- subq t0, v0, v0 # find string length
- ret # L0 :
-
- .align 4
-$zerolength:
- nop
- nop
- nop
- clr v0
-
-$exception:
- nop
- nop
- nop
- ret
-
- .end __strncpy_from_user
diff --git a/arch/alpha/lib/ev67-strlen_user.S b/arch/alpha/lib/ev67-strlen_user.S
deleted file mode 100644
index 57e0d77b81a6..000000000000
--- a/arch/alpha/lib/ev67-strlen_user.S
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * arch/alpha/lib/ev67-strlen_user.S
- * 21264 version contributed by Rick Gorton <rick.gorton@api-networks.com>
- *
- * Return the length of the string including the NULL terminator
- * (strlen+1) or zero if an error occurred.
- *
- * In places where it is critical to limit the processing time,
- * and the data is not trusted, strnlen_user() should be used.
- * It will return a value greater than its second argument if
- * that limit would be exceeded. This implementation is allowed
- * to access memory beyond the limit, but will not cross a page
- * boundary when doing so.
- *
- * Much of the information about 21264 scheduling/coding comes from:
- * Compiler Writer's Guide for the Alpha 21264
- * abbreviated as 'CWG' in other comments here
- * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
- * Scheduling notation:
- * E - either cluster
- * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
- * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
- * Try not to change the actual algorithm if possible for consistency.
- */
-
-#include <asm/regdef.h>
-
-
-/* Allow an exception for an insn; exit if we get one. */
-#define EX(x,y...) \
- 99: x,##y; \
- .section __ex_table,"a"; \
- .long 99b - .; \
- lda v0, $exception-99b(zero); \
- .previous
-
-
- .set noreorder
- .set noat
- .text
-
- .globl __strlen_user
- .ent __strlen_user
- .frame sp, 0, ra
-
- .align 4
-__strlen_user:
- ldah a1, 32767(zero) # do not use plain strlen_user() for strings
- # that might be almost 2 GB long; you should
- # be using strnlen_user() instead
- nop
- nop
- nop
-
- .globl __strnlen_user
-
- .align 4
-__strnlen_user:
- .prologue 0
- EX( ldq_u t0, 0(a0) ) # L : load first quadword (a0 may be misaligned)
- lda t1, -1(zero) # E :
-
- insqh t1, a0, t1 # U :
- andnot a0, 7, v0 # E :
- or t1, t0, t0 # E :
- subq a0, 1, a0 # E : get our +1 for the return
-
- cmpbge zero, t0, t1 # E : t1 <- bitmask: bit i == 1 <==> i-th byte == 0
- subq a1, 7, t2 # E :
- subq a0, v0, t0 # E :
- bne t1, $found # U :
-
- addq t2, t0, t2 # E :
- addq a1, 1, a1 # E :
- nop # E :
- nop # E :
-
- .align 4
-$loop: ble t2, $limit # U :
- EX( ldq t0, 8(v0) ) # L :
- nop # E :
- nop # E :
-
- cmpbge zero, t0, t1 # E :
- subq t2, 8, t2 # E :
- addq v0, 8, v0 # E : addr += 8
- beq t1, $loop # U :
-
-$found: cttz t1, t2 # U0 :
- addq v0, t2, v0 # E :
- subq v0, a0, v0 # E :
- ret # L0 :
-
-$exception:
- nop
- nop
- nop
- ret
-
- .align 4 # currently redundant
-$limit:
- nop
- nop
- subq a1, t2, v0
- ret
-
- .end __strlen_user
diff --git a/arch/alpha/lib/strlen_user.S b/arch/alpha/lib/strlen_user.S
deleted file mode 100644
index 508a18e96479..000000000000
--- a/arch/alpha/lib/strlen_user.S
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * arch/alpha/lib/strlen_user.S
- *
- * Return the length of the string including the NUL terminator
- * (strlen+1) or zero if an error occurred.
- *
- * In places where it is critical to limit the processing time,
- * and the data is not trusted, strnlen_user() should be used.
- * It will return a value greater than its second argument if
- * that limit would be exceeded. This implementation is allowed
- * to access memory beyond the limit, but will not cross a page
- * boundary when doing so.
- */
-
-#include <asm/regdef.h>
-
-
-/* Allow an exception for an insn; exit if we get one. */
-#define EX(x,y...) \
- 99: x,##y; \
- .section __ex_table,"a"; \
- .long 99b - .; \
- lda v0, $exception-99b(zero); \
- .previous
-
-
- .set noreorder
- .set noat
- .text
-
- .globl __strlen_user
- .ent __strlen_user
- .frame sp, 0, ra
-
- .align 3
-__strlen_user:
- ldah a1, 32767(zero) # do not use plain strlen_user() for strings
- # that might be almost 2 GB long; you should
- # be using strnlen_user() instead
-
- .globl __strnlen_user
-
- .align 3
-__strnlen_user:
- .prologue 0
-
- EX( ldq_u t0, 0(a0) ) # load first quadword (a0 may be misaligned)
- lda t1, -1(zero)
- insqh t1, a0, t1
- andnot a0, 7, v0
- or t1, t0, t0
- subq a0, 1, a0 # get our +1 for the return
- cmpbge zero, t0, t1 # t1 <- bitmask: bit i == 1 <==> i-th byte == 0
- subq a1, 7, t2
- subq a0, v0, t0
- bne t1, $found
-
- addq t2, t0, t2
- addq a1, 1, a1
-
- .align 3
-$loop: ble t2, $limit
- EX( ldq t0, 8(v0) )
- subq t2, 8, t2
- addq v0, 8, v0 # addr += 8
- cmpbge zero, t0, t1
- beq t1, $loop
-
-$found: negq t1, t2 # clear all but least set bit
- and t1, t2, t1
-
- and t1, 0xf0, t2 # binary search for that set bit
- and t1, 0xcc, t3
- and t1, 0xaa, t4
- cmovne t2, 4, t2
- cmovne t3, 2, t3
- cmovne t4, 1, t4
- addq t2, t3, t2
- addq v0, t4, v0
- addq v0, t2, v0
- nop # dual issue next two on ev4 and ev5
- subq v0, a0, v0
-$exception:
- ret
-
- .align 3 # currently redundant
-$limit:
- subq a1, t2, v0
- ret
-
- .end __strlen_user
diff --git a/arch/alpha/lib/strncpy_from_user.S b/arch/alpha/lib/strncpy_from_user.S
deleted file mode 100644
index 73ee21160ff7..000000000000
--- a/arch/alpha/lib/strncpy_from_user.S
+++ /dev/null
@@ -1,339 +0,0 @@
-/*
- * arch/alpha/lib/strncpy_from_user.S
- * Contributed by Richard Henderson (rth@tamu.edu)
- *
- * Just like strncpy except in the return value:
- *
- * -EFAULT if an exception occurs before the terminator is copied.
- * N if the buffer filled.
- *
- * Otherwise the length of the string is returned.
- */
-
-
-#include <asm/errno.h>
-#include <asm/regdef.h>
-
-
-/* Allow an exception for an insn; exit if we get one. */
-#define EX(x,y...) \
- 99: x,##y; \
- .section __ex_table,"a"; \
- .long 99b - .; \
- lda $31, $exception-99b($0); \
- .previous
-
-
- .set noat
- .set noreorder
- .text
-
- .globl __strncpy_from_user
- .ent __strncpy_from_user
- .frame $30, 0, $26
- .prologue 0
-
- .align 3
-$aligned:
- /* On entry to this basic block:
- t0 == the first destination word for masking back in
- t1 == the first source word. */
-
- /* Create the 1st output word and detect 0's in the 1st input word. */
- lda t2, -1 # e1 : build a mask against false zero
- mskqh t2, a1, t2 # e0 : detection in the src word
- mskqh t1, a1, t3 # e0 :
- ornot t1, t2, t2 # .. e1 :
- mskql t0, a1, t0 # e0 : assemble the first output word
- cmpbge zero, t2, t8 # .. e1 : bits set iff null found
- or t0, t3, t0 # e0 :
- beq a2, $a_eoc # .. e1 :
- bne t8, $a_eos # .. e1 :
-
- /* On entry to this basic block:
- t0 == a source word not containing a null. */
-
-$a_loop:
- stq_u t0, 0(a0) # e0 :
- addq a0, 8, a0 # .. e1 :
- EX( ldq_u t0, 0(a1) ) # e0 :
- addq a1, 8, a1 # .. e1 :
- subq a2, 1, a2 # e0 :
- cmpbge zero, t0, t8 # .. e1 (stall)
- beq a2, $a_eoc # e1 :
- beq t8, $a_loop # e1 :
-
- /* Take care of the final (partial) word store. At this point
- the end-of-count bit is set in t8 iff it applies.
-
- On entry to this basic block we have:
- t0 == the source word containing the null
- t8 == the cmpbge mask that found it. */
-
-$a_eos:
- negq t8, t12 # e0 : find low bit set
- and t8, t12, t12 # e1 (stall)
-
- /* For the sake of the cache, don't read a destination word
- if we're not going to need it. */
- and t12, 0x80, t6 # e0 :
- bne t6, 1f # .. e1 (zdb)
-
- /* We're doing a partial word store and so need to combine
- our source and original destination words. */
- ldq_u t1, 0(a0) # e0 :
- subq t12, 1, t6 # .. e1 :
- or t12, t6, t8 # e0 :
- unop #
- zapnot t0, t8, t0 # e0 : clear src bytes > null
- zap t1, t8, t1 # .. e1 : clear dst bytes <= null
- or t0, t1, t0 # e1 :
-
-1: stq_u t0, 0(a0)
- br $finish_up
-
- /* Add the end-of-count bit to the eos detection bitmask. */
-$a_eoc:
- or t10, t8, t8
- br $a_eos
-
- /*** The Function Entry Point ***/
- .align 3
-__strncpy_from_user:
- mov a0, v0 # save the string start
- beq a2, $zerolength
-
- /* Are source and destination co-aligned? */
- xor a0, a1, t1 # e0 :
- and a0, 7, t0 # .. e1 : find dest misalignment
- and t1, 7, t1 # e0 :
- addq a2, t0, a2 # .. e1 : bias count by dest misalignment
- subq a2, 1, a2 # e0 :
- and a2, 7, t2 # e1 :
- srl a2, 3, a2 # e0 : a2 = loop counter = (count - 1)/8
- addq zero, 1, t10 # .. e1 :
- sll t10, t2, t10 # e0 : t10 = bitmask of last count byte
- bne t1, $unaligned # .. e1 :
-
- /* We are co-aligned; take care of a partial first word. */
-
- EX( ldq_u t1, 0(a1) ) # e0 : load first src word
- addq a1, 8, a1 # .. e1 :
-
- beq t0, $aligned # avoid loading dest word if not needed
- ldq_u t0, 0(a0) # e0 :
- br $aligned # .. e1 :
-
-
-/* The source and destination are not co-aligned. Align the destination
- and cope. We have to be very careful about not reading too much and
- causing a SEGV. */
-
- .align 3
-$u_head:
- /* We know just enough now to be able to assemble the first
- full source word. We can still find a zero at the end of it
- that prevents us from outputting the whole thing.
-
- On entry to this basic block:
- t0 == the first dest word, unmasked
- t1 == the shifted low bits of the first source word
- t6 == bytemask that is -1 in dest word bytes */
-
- EX( ldq_u t2, 8(a1) ) # e0 : load second src word
- addq a1, 8, a1 # .. e1 :
- mskql t0, a0, t0 # e0 : mask trailing garbage in dst
- extqh t2, a1, t4 # e0 :
- or t1, t4, t1 # e1 : first aligned src word complete
- mskqh t1, a0, t1 # e0 : mask leading garbage in src
- or t0, t1, t0 # e0 : first output word complete
- or t0, t6, t6 # e1 : mask original data for zero test
- cmpbge zero, t6, t8 # e0 :
- beq a2, $u_eocfin # .. e1 :
- bne t8, $u_final # e1 :
-
- lda t6, -1 # e1 : mask out the bits we have
- mskql t6, a1, t6 # e0 : already seen
- stq_u t0, 0(a0) # e0 : store first output word
- or t6, t2, t2 # .. e1 :
- cmpbge zero, t2, t8 # e0 : find nulls in second partial
- addq a0, 8, a0 # .. e1 :
- subq a2, 1, a2 # e0 :
- bne t8, $u_late_head_exit # .. e1 :
-
- /* Finally, we've got all the stupid leading edge cases taken care
- of and we can set up to enter the main loop. */
-
- extql t2, a1, t1 # e0 : position hi-bits of lo word
- EX( ldq_u t2, 8(a1) ) # .. e1 : read next high-order source word
- addq a1, 8, a1 # e0 :
- cmpbge zero, t2, t8 # e1 (stall)
- beq a2, $u_eoc # e1 :
- bne t8, $u_eos # e1 :
-
- /* Unaligned copy main loop. In order to avoid reading too much,
- the loop is structured to detect zeros in aligned source words.
- This has, unfortunately, effectively pulled half of a loop
- iteration out into the head and half into the tail, but it does
- prevent nastiness from accumulating in the very thing we want
- to run as fast as possible.
-
- On entry to this basic block:
- t1 == the shifted high-order bits from the previous source word
- t2 == the unshifted current source word
-
- We further know that t2 does not contain a null terminator. */
-
- .align 3
-$u_loop:
- extqh t2, a1, t0 # e0 : extract high bits for current word
- addq a1, 8, a1 # .. e1 :
- extql t2, a1, t3 # e0 : extract low bits for next time
- addq a0, 8, a0 # .. e1 :
- or t0, t1, t0 # e0 : current dst word now complete
- EX( ldq_u t2, 0(a1) ) # .. e1 : load high word for next time
- stq_u t0, -8(a0) # e0 : save the current word
- mov t3, t1 # .. e1 :
- subq a2, 1, a2 # e0 :
- cmpbge zero, t2, t8 # .. e1 : test new word for eos
- beq a2, $u_eoc # e1 :
- beq t8, $u_loop # e1 :
-
- /* We've found a zero somewhere in the source word we just read.
- If it resides in the lower half, we have one (probably partial)
- word to write out, and if it resides in the upper half, we
- have one full and one partial word left to write out.
-
- On entry to this basic block:
- t1 == the shifted high-order bits from the previous source word
- t2 == the unshifted current source word. */
-$u_eos:
- extqh t2, a1, t0 # e0 :
- or t0, t1, t0 # e1 : first (partial) source word complete
-
- cmpbge zero, t0, t8 # e0 : is the null in this first bit?
- bne t8, $u_final # .. e1 (zdb)
-
- stq_u t0, 0(a0) # e0 : the null was in the high-order bits
- addq a0, 8, a0 # .. e1 :
- subq a2, 1, a2 # e1 :
-
-$u_late_head_exit:
- extql t2, a1, t0 # .. e0 :
- cmpbge zero, t0, t8 # e0 :
- or t8, t10, t6 # e1 :
- cmoveq a2, t6, t8 # e0 :
- nop # .. e1 :
-
- /* Take care of a final (probably partial) result word.
- On entry to this basic block:
- t0 == assembled source word
- t8 == cmpbge mask that found the null. */
-$u_final:
- negq t8, t6 # e0 : isolate low bit set
- and t6, t8, t12 # e1 :
-
- and t12, 0x80, t6 # e0 : avoid dest word load if we can
- bne t6, 1f # .. e1 (zdb)
-
- ldq_u t1, 0(a0) # e0 :
- subq t12, 1, t6 # .. e1 :
- or t6, t12, t8 # e0 :
- zapnot t0, t8, t0 # .. e1 : kill source bytes > null
- zap t1, t8, t1 # e0 : kill dest bytes <= null
- or t0, t1, t0 # e1 :
-
-1: stq_u t0, 0(a0) # e0 :
- br $finish_up
-
-$u_eoc: # end-of-count
- extqh t2, a1, t0
- or t0, t1, t0
- cmpbge zero, t0, t8
-
-$u_eocfin: # end-of-count, final word
- or t10, t8, t8
- br $u_final
-
- /* Unaligned copy entry point. */
- .align 3
-$unaligned:
-
- EX( ldq_u t1, 0(a1) ) # e0 : load first source word
-
- and a0, 7, t4 # .. e1 : find dest misalignment
- and a1, 7, t5 # e0 : find src misalignment
-
- /* Conditionally load the first destination word and a bytemask
- with 0xff indicating that the destination byte is sacrosanct. */
-
- mov zero, t0 # .. e1 :
- mov zero, t6 # e0 :
- beq t4, 1f # .. e1 :
- ldq_u t0, 0(a0) # e0 :
- lda t6, -1 # .. e1 :
- mskql t6, a0, t6 # e0 :
-1:
- subq a1, t4, a1 # .. e1 : sub dest misalignment from src addr
-
- /* If source misalignment is larger than dest misalignment, we need
- extra startup checks to avoid SEGV. */
-
- cmplt t4, t5, t12 # e1 :
- extql t1, a1, t1 # .. e0 : shift src into place
- lda t2, -1 # e0 : for creating masks later
- beq t12, $u_head # e1 :
-
- mskqh t2, t5, t2 # e0 : begin src byte validity mask
- cmpbge zero, t1, t8 # .. e1 : is there a zero?
- extql t2, a1, t2 # e0 :
- or t8, t10, t5 # .. e1 : test for end-of-count too
- cmpbge zero, t2, t3 # e0 :
- cmoveq a2, t5, t8 # .. e1 :
- andnot t8, t3, t8 # e0 :
- beq t8, $u_head # .. e1 (zdb)
-
- /* At this point we've found a zero in the first partial word of
- the source. We need to isolate the valid source data and mask
- it into the original destination data. (Incidentally, we know
- that we'll need at least one byte of that original dest word.) */
-
- ldq_u t0, 0(a0) # e0 :
- negq t8, t6 # .. e1 : build bitmask of bytes <= zero
- mskqh t1, t4, t1 # e0 :
- and t6, t8, t12 # .. e1 :
- subq t12, 1, t6 # e0 :
- or t6, t12, t8 # e1 :
-
- zapnot t2, t8, t2 # e0 : prepare source word; mirror changes
- zapnot t1, t8, t1 # .. e1 : to source validity mask
-
- andnot t0, t2, t0 # e0 : zero place for source to reside
- or t0, t1, t0 # e1 : and put it there
- stq_u t0, 0(a0) # e0 :
-
-$finish_up:
- zapnot t0, t12, t4 # was last byte written null?
- cmovne t4, 1, t4
-
- and t12, 0xf0, t3 # binary search for the address of the
- and t12, 0xcc, t2 # last byte written
- and t12, 0xaa, t1
- bic a0, 7, t0
- cmovne t3, 4, t3
- cmovne t2, 2, t2
- cmovne t1, 1, t1
- addq t0, t3, t0
- addq t1, t2, t1
- addq t0, t1, t0
- addq t0, t4, t0 # add one if we filled the buffer
-
- subq t0, v0, v0 # find string length
- ret
-
-$zerolength:
- clr v0
-$exception:
- ret
-
- .end __strncpy_from_user
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index 5eecab1a84ef..0c4132dd3507 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -89,6 +89,8 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
const struct exception_table_entry *fixup;
int fault, si_code = SEGV_MAPERR;
siginfo_t info;
+ unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+ (cause > 0 ? FAULT_FLAG_WRITE : 0));
/* As of EV6, a load into $31/$f31 is a prefetch, and never faults
(or is suppressed by the PALcode). Support that for older CPUs
@@ -114,6 +116,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
goto vmalloc_fault;
#endif
+retry:
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
@@ -144,8 +147,11 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
/* If for any reason at all we couldn't handle the fault,
make sure we exit gracefully rather than endlessly redo
the fault. */
- fault = handle_mm_fault(mm, vma, address, cause > 0 ? FAULT_FLAG_WRITE : 0);
- up_read(&mm->mmap_sem);
+ fault = handle_mm_fault(mm, vma, address, flags);
+
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
@@ -153,10 +159,26 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
goto do_sigbus;
BUG();
}
- if (fault & VM_FAULT_MAJOR)
- current->maj_flt++;
- else
- current->min_flt++;
+
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR)
+ current->maj_flt++;
+ else
+ current->min_flt++;
+ if (fault & VM_FAULT_RETRY) {
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+ /* No need to up_read(&mm->mmap_sem) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+
+ goto retry;
+ }
+ }
+
+ up_read(&mm->mmap_sem);
+
return;
/* Something tried to access memory that isn't in our memory map.
@@ -186,12 +208,14 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
/* We ran out of memory, or some other thing happened to us that
made us unable to handle the page fault gracefully. */
out_of_memory:
+ up_read(&mm->mmap_sem);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
+ up_read(&mm->mmap_sem);
/* Send a sigbus, regardless of whether we were in kernel
or user mode. */
info.si_signo = SIGBUS;
diff --git a/arch/alpha/oprofile/common.c b/arch/alpha/oprofile/common.c
index a0a5d27aa215..b8ce18f485d3 100644
--- a/arch/alpha/oprofile/common.c
+++ b/arch/alpha/oprofile/common.c
@@ -12,6 +12,7 @@
#include <linux/smp.h>
#include <linux/errno.h>
#include <asm/ptrace.h>
+#include <asm/special_insns.h>
#include "op_impl.h"
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index e91c7cdc6fe5..2f88d8d97701 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -6,7 +6,7 @@ config ARM
select HAVE_DMA_API_DEBUG
select HAVE_IDE if PCI || ISA || PCMCIA
select HAVE_DMA_ATTRS
- select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
+ select HAVE_DMA_CONTIGUOUS if MMU
select HAVE_MEMBLOCK
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
@@ -38,7 +38,6 @@ config ARM
select HARDIRQS_SW_RESEND
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
- select GENERIC_IRQ_PROBE
select ARCH_WANT_IPC_PARSE_VERSION
select HARDIRQS_SW_RESEND
select CPU_PM if (SUSPEND || CPU_IDLE)
@@ -126,11 +125,6 @@ config TRACE_IRQFLAGS_SUPPORT
bool
default y
-config GENERIC_LOCKBREAK
- bool
- default y
- depends on SMP && PREEMPT
-
config RWSEM_GENERIC_SPINLOCK
bool
default y
@@ -2150,6 +2144,7 @@ source "drivers/cpufreq/Kconfig"
config CPU_FREQ_IMX
tristate "CPUfreq driver for i.MX CPUs"
depends on ARCH_MXC && CPU_FREQ
+ select CPU_FREQ_TABLE
help
This enables the CPUfreq driver for i.MX CPUs.
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index f15f82bf3a50..e968a52e4881 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -356,15 +356,15 @@ choice
is nothing connected to read from the DCC.
config DEBUG_SEMIHOSTING
- bool "Kernel low-level debug output via semihosting I"
+ bool "Kernel low-level debug output via semihosting I/O"
help
Semihosting enables code running on an ARM target to use
the I/O facilities on a host debugger/emulator through a
- simple SVC calls. The host debugger or emulator must have
+ simple SVC call. The host debugger or emulator must have
semihosting enabled for the special svc call to be trapped
otherwise the kernel will crash.
- This is known to work with OpenOCD, as wellas
+ This is known to work with OpenOCD, as well as
ARM's Fast Models, or any other controlling environment
that implements semihosting.
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 30eae87ead6d..a051dfbdd7db 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -284,10 +284,10 @@ zImage Image xipImage bootpImage uImage: vmlinux
zinstall uinstall install: vmlinux
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
-%.dtb:
+%.dtb: scripts
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
-dtbs:
+dtbs: scripts
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
# We use MRPROPER_FILES and CLEAN_FILES now
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index b8c64b80bafc..bc67cbff3944 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -653,16 +653,21 @@ __armv7_mmu_cache_on:
mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
#endif
mrc p15, 0, r0, c1, c0, 0 @ read control reg
+ bic r0, r0, #1 << 28 @ clear SCTLR.TRE
orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
orr r0, r0, #0x003c @ write buffer
#ifdef CONFIG_MMU
#ifdef CONFIG_CPU_ENDIAN_BE8
orr r0, r0, #1 << 25 @ big-endian page tables
#endif
+ mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg
orrne r0, r0, #1 @ MMU enabled
movne r1, #0xfffffffd @ domain 0 = client
+ bic r6, r6, #1 << 31 @ 32-bit translation system
+ bic r6, r6, #3 << 0 @ use only ttbr0
mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
+ mcrne p15, 0, r6, c2, c0, 2 @ load ttb control
#endif
mcr p15, 0, r0, c7, c5, 4 @ ISB
mcr p15, 0, r0, c1, c0, 0 @ load control register
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 59509c48d7e5..bd0cff3f808c 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -154,5 +154,10 @@
#size-cells = <0>;
ti,hwmods = "i2c3";
};
+
+ wdt2: wdt@44e35000 {
+ compatible = "ti,omap3-wdt";
+ ti,hwmods = "wd_timer2";
+ };
};
};
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index 66389c1c6f62..7c95f76398de 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -104,6 +104,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioB: gpio@fffff600 {
@@ -113,6 +114,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioC: gpio@fffff800 {
@@ -122,6 +124,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
dbgu: serial@fffff200 {
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index b460d6ce9eb5..195019b7ca0e 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -95,6 +95,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioB: gpio@fffff400 {
@@ -104,6 +105,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioC: gpio@fffff600 {
@@ -113,6 +115,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioD: gpio@fffff800 {
@@ -122,6 +125,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioE: gpio@fffffa00 {
@@ -131,6 +135,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
dbgu: serial@ffffee00 {
diff --git a/arch/arm/boot/dts/at91sam9g25ek.dts b/arch/arm/boot/dts/at91sam9g25ek.dts
index 7829a4d0cb22..96514c134e54 100644
--- a/arch/arm/boot/dts/at91sam9g25ek.dts
+++ b/arch/arm/boot/dts/at91sam9g25ek.dts
@@ -15,7 +15,7 @@
compatible = "atmel,at91sam9g25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9";
chosen {
- bootargs = "128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs";
+ bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs";
};
ahb {
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index bafa8806fc17..63751b1e744b 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -113,6 +113,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioB: gpio@fffff400 {
@@ -122,6 +123,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioC: gpio@fffff600 {
@@ -131,6 +133,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioD: gpio@fffff800 {
@@ -140,6 +143,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioE: gpio@fffffa00 {
@@ -149,6 +153,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
dbgu: serial@ffffee00 {
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index bfac0dfc332c..ef9336ae9614 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -107,6 +107,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioB: gpio@fffff600 {
@@ -116,6 +117,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioC: gpio@fffff800 {
@@ -125,6 +127,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioD: gpio@fffffa00 {
@@ -134,6 +137,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
dbgu: serial@fffff200 {
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index 4a18c393b136..8a387a8d61b7 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -115,6 +115,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioB: gpio@fffff600 {
@@ -124,6 +125,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioC: gpio@fffff800 {
@@ -133,6 +135,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioD: gpio@fffffa00 {
@@ -142,6 +145,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
dbgu: serial@fffff200 {
diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
index cd86177a3ea2..59d9789e5508 100644
--- a/arch/arm/boot/dts/imx51-babbage.dts
+++ b/arch/arm/boot/dts/imx51-babbage.dts
@@ -25,8 +25,8 @@
aips@70000000 { /* aips-1 */
spba@70000000 {
esdhc@70004000 { /* ESDHC1 */
- fsl,cd-internal;
- fsl,wp-internal;
+ fsl,cd-controller;
+ fsl,wp-controller;
status = "okay";
};
diff --git a/arch/arm/boot/dts/kirkwood-iconnect.dts b/arch/arm/boot/dts/kirkwood-iconnect.dts
index 52d947045106..f8ca6fa88192 100644
--- a/arch/arm/boot/dts/kirkwood-iconnect.dts
+++ b/arch/arm/boot/dts/kirkwood-iconnect.dts
@@ -41,9 +41,13 @@
};
power-blue {
label = "power:blue";
- gpios = <&gpio1 11 0>;
+ gpios = <&gpio1 10 0>;
linux,default-trigger = "timer";
};
+ power-red {
+ label = "power:red";
+ gpios = <&gpio1 11 0>;
+ };
usb1 {
label = "usb1:blue";
gpios = <&gpio1 12 0>;
diff --git a/arch/arm/boot/dts/twl6030.dtsi b/arch/arm/boot/dts/twl6030.dtsi
index 3b2f3510d7eb..d351b27d7213 100644
--- a/arch/arm/boot/dts/twl6030.dtsi
+++ b/arch/arm/boot/dts/twl6030.dtsi
@@ -66,6 +66,7 @@
vcxio: regulator@8 {
compatible = "ti,twl6030-vcxio";
+ regulator-always-on;
};
vusb: regulator@9 {
@@ -74,10 +75,12 @@
v1v8: regulator@10 {
compatible = "ti,twl6030-v1v8";
+ regulator-always-on;
};
v2v1: regulator@11 {
compatible = "ti,twl6030-v2v1";
+ regulator-always-on;
};
clk32kg: regulator@12 {
diff --git a/arch/arm/configs/armadillo800eva_defconfig b/arch/arm/configs/armadillo800eva_defconfig
index 7d8718468e0d..90610c7030f7 100644
--- a/arch/arm/configs/armadillo800eva_defconfig
+++ b/arch/arm/configs/armadillo800eva_defconfig
@@ -33,7 +33,7 @@ CONFIG_AEABI=y
CONFIG_FORCE_MAX_ZONEORDER=13
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096"
+CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096 rw"
CONFIG_CMDLINE_FORCE=y
CONFIG_KEXEC=y
CONFIG_VFP=y
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index 2d4f661d1cf6..da6845493caa 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -86,6 +86,7 @@ CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_LM3530=y
CONFIG_LEDS_LP5521=y
+CONFIG_LEDS_GPIO=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_AB8500=y
CONFIG_RTC_DRV_PL031=y
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 03fb93621d0d..5c8b3bf4d825 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -320,4 +320,12 @@
.size \name , . - \name
.endm
+ .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
+#ifndef CONFIG_CPU_USE_DOMAINS
+ adds \tmp, \addr, #\size - 1
+ sbcccs \tmp, \tmp, \limit
+ bcs \bad
+#endif
+ .endm
+
#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 2ae842df4551..5c44dcb0987b 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -203,6 +203,13 @@ static inline void dma_free_writecombine(struct device *dev, size_t size,
}
/*
+ * This can be called during early boot to increase the size of the atomic
+ * coherent DMA pool above the default value of 256KiB. It must be called
+ * before postcore_initcall.
+ */
+extern void __init init_dma_coherent_pool_size(unsigned long size);
+
+/*
* This can be called during boot to increase the size of the consistent
* DMA region above it's default value of 2MB. It must be called before the
* memory allocator is initialised, i.e. before any core_initcall.
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index e965f1b560f1..5f6ddcc56452 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -187,6 +187,7 @@ static inline unsigned long __phys_to_virt(unsigned long x)
#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
#endif
#endif
+#endif /* __ASSEMBLY__ */
#ifndef PHYS_OFFSET
#ifdef PLAT_PHYS_OFFSET
@@ -196,6 +197,8 @@ static inline unsigned long __phys_to_virt(unsigned long x)
#endif
#endif
+#ifndef __ASSEMBLY__
+
/*
* PFNs are used to describe any physical page; this means
* PFN 0 == physical address 0.
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index f66626d71e7d..41dc31f834c3 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -195,6 +195,18 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
+#define pte_none(pte) (!pte_val(pte))
+#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
+#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
+#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
+#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
+#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
+#define pte_special(pte) (0)
+
+#define pte_present_user(pte) \
+ ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
+ (L_PTE_PRESENT | L_PTE_USER))
+
#if __LINUX_ARM_ARCH__ < 6
static inline void __sync_icache_dcache(pte_t pteval)
{
@@ -206,25 +218,15 @@ extern void __sync_icache_dcache(pte_t pteval);
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval)
{
- if (addr >= TASK_SIZE)
- set_pte_ext(ptep, pteval, 0);
- else {
+ unsigned long ext = 0;
+
+ if (addr < TASK_SIZE && pte_present_user(pteval)) {
__sync_icache_dcache(pteval);
- set_pte_ext(ptep, pteval, PTE_EXT_NG);
+ ext |= PTE_EXT_NG;
}
-}
-#define pte_none(pte) (!pte_val(pte))
-#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
-#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
-#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
-#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
-#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
-#define pte_special(pte) (0)
-
-#define pte_present_user(pte) \
- ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
- (L_PTE_PRESENT | L_PTE_USER))
+ set_pte_ext(ptep, pteval, ext);
+}
#define PTE_BIT_FUNC(fn,op) \
static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
@@ -251,13 +253,13 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
*
* 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
- * <--------------- offset --------------------> <- type --> 0 0 0
+ * <--------------- offset ----------------------> < type -> 0 0 0
*
- * This gives us up to 63 swap files and 32GB per swap file. Note that
+ * This gives us up to 31 swap files and 64GB per swap file. Note that
* the offset field is always non-zero.
*/
#define __SWP_TYPE_SHIFT 3
-#define __SWP_TYPE_BITS 6
+#define __SWP_TYPE_BITS 5
#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h
index e3f757263438..05b8e82ec9f5 100644
--- a/arch/arm/include/asm/sched_clock.h
+++ b/arch/arm/include/asm/sched_clock.h
@@ -10,5 +10,7 @@
extern void sched_clock_postinit(void);
extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate);
+extern void setup_sched_clock_needs_suspend(u32 (*read)(void), int bits,
+ unsigned long rate);
#endif
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 314d4664eae7..99a19512ee26 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -199,6 +199,9 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
{
pgtable_page_dtor(pte);
+#ifdef CONFIG_ARM_LPAE
+ tlb_add_flush(tlb, addr);
+#else
/*
* With the classic ARM MMU, a pte page has two corresponding pmd
* entries, each covering 1MB.
@@ -206,6 +209,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
addr &= PMD_MASK;
tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
tlb_add_flush(tlb, addr + SZ_1M);
+#endif
tlb_remove_page(tlb, pte);
}
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 479a6352e0b5..77bd79f2ffdb 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -101,28 +101,39 @@ extern int __get_user_1(void *);
extern int __get_user_2(void *);
extern int __get_user_4(void *);
-#define __get_user_x(__r2,__p,__e,__s,__i...) \
+#define __GUP_CLOBBER_1 "lr", "cc"
+#ifdef CONFIG_CPU_USE_DOMAINS
+#define __GUP_CLOBBER_2 "ip", "lr", "cc"
+#else
+#define __GUP_CLOBBER_2 "lr", "cc"
+#endif
+#define __GUP_CLOBBER_4 "lr", "cc"
+
+#define __get_user_x(__r2,__p,__e,__l,__s) \
__asm__ __volatile__ ( \
__asmeq("%0", "r0") __asmeq("%1", "r2") \
+ __asmeq("%3", "r1") \
"bl __get_user_" #__s \
: "=&r" (__e), "=r" (__r2) \
- : "0" (__p) \
- : __i, "cc")
+ : "0" (__p), "r" (__l) \
+ : __GUP_CLOBBER_##__s)
-#define get_user(x,p) \
+#define __get_user_check(x,p) \
({ \
+ unsigned long __limit = current_thread_info()->addr_limit - 1; \
register const typeof(*(p)) __user *__p asm("r0") = (p);\
register unsigned long __r2 asm("r2"); \
+ register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
switch (sizeof(*(__p))) { \
case 1: \
- __get_user_x(__r2, __p, __e, 1, "lr"); \
- break; \
+ __get_user_x(__r2, __p, __e, __l, 1); \
+ break; \
case 2: \
- __get_user_x(__r2, __p, __e, 2, "r3", "lr"); \
+ __get_user_x(__r2, __p, __e, __l, 2); \
break; \
case 4: \
- __get_user_x(__r2, __p, __e, 4, "lr"); \
+ __get_user_x(__r2, __p, __e, __l, 4); \
break; \
default: __e = __get_user_bad(); break; \
} \
@@ -130,42 +141,57 @@ extern int __get_user_4(void *);
__e; \
})
+#define get_user(x,p) \
+ ({ \
+ might_fault(); \
+ __get_user_check(x,p); \
+ })
+
extern int __put_user_1(void *, unsigned int);
extern int __put_user_2(void *, unsigned int);
extern int __put_user_4(void *, unsigned int);
extern int __put_user_8(void *, unsigned long long);
-#define __put_user_x(__r2,__p,__e,__s) \
+#define __put_user_x(__r2,__p,__e,__l,__s) \
__asm__ __volatile__ ( \
__asmeq("%0", "r0") __asmeq("%2", "r2") \
+ __asmeq("%3", "r1") \
"bl __put_user_" #__s \
: "=&r" (__e) \
- : "0" (__p), "r" (__r2) \
+ : "0" (__p), "r" (__r2), "r" (__l) \
: "ip", "lr", "cc")
-#define put_user(x,p) \
+#define __put_user_check(x,p) \
({ \
+ unsigned long __limit = current_thread_info()->addr_limit - 1; \
register const typeof(*(p)) __r2 asm("r2") = (x); \
register const typeof(*(p)) __user *__p asm("r0") = (p);\
+ register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
switch (sizeof(*(__p))) { \
case 1: \
- __put_user_x(__r2, __p, __e, 1); \
+ __put_user_x(__r2, __p, __e, __l, 1); \
break; \
case 2: \
- __put_user_x(__r2, __p, __e, 2); \
+ __put_user_x(__r2, __p, __e, __l, 2); \
break; \
case 4: \
- __put_user_x(__r2, __p, __e, 4); \
+ __put_user_x(__r2, __p, __e, __l, 4); \
break; \
case 8: \
- __put_user_x(__r2, __p, __e, 8); \
+ __put_user_x(__r2, __p, __e, __l, 8); \
break; \
default: __e = __put_user_bad(); break; \
} \
__e; \
})
+#define put_user(x,p) \
+ ({ \
+ might_fault(); \
+ __put_user_check(x,p); \
+ })
+
#else /* CONFIG_MMU */
/*
@@ -219,6 +245,7 @@ do { \
unsigned long __gu_addr = (unsigned long)(ptr); \
unsigned long __gu_val; \
__chk_user_ptr(ptr); \
+ might_fault(); \
switch (sizeof(*(ptr))) { \
case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \
case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \
@@ -300,6 +327,7 @@ do { \
unsigned long __pu_addr = (unsigned long)(ptr); \
__typeof__(*(ptr)) __pu_val = (x); \
__chk_user_ptr(ptr); \
+ might_fault(); \
switch (sizeof(*(ptr))) { \
case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \
case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 0cab47d4a83f..2fde5fd1acce 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -404,6 +404,7 @@
#define __NR_setns (__NR_SYSCALL_BASE+375)
#define __NR_process_vm_readv (__NR_SYSCALL_BASE+376)
#define __NR_process_vm_writev (__NR_SYSCALL_BASE+377)
+ /* 378 for kcmp */
/*
* The following SWIs are ARM private.
@@ -483,6 +484,7 @@
*/
#define __IGNORE_fadvise64_64
#define __IGNORE_migrate_pages
+#define __IGNORE_kcmp
#endif /* __KERNEL__ */
#endif /* __ASM_ARM_UNISTD_H */
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 463ff4a0ec8a..e337879595e5 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -387,6 +387,7 @@
/* 375 */ CALL(sys_setns)
CALL(sys_process_vm_readv)
CALL(sys_process_vm_writev)
+ CALL(sys_ni_syscall) /* reserved for sys_kcmp */
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index ba386bd94107..281bf3301241 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -159,6 +159,12 @@ static int debug_arch_supported(void)
arch >= ARM_DEBUG_ARCH_V7_1;
}
+/* Can we determine the watchpoint access type from the fsr? */
+static int debug_exception_updates_fsr(void)
+{
+ return 0;
+}
+
/* Determine number of WRP registers available. */
static int get_num_wrp_resources(void)
{
@@ -604,13 +610,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
/* Aligned */
break;
case 1:
- /* Allow single byte watchpoint. */
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
- break;
case 2:
/* Allow halfword watchpoints and breakpoints. */
if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
break;
+ case 3:
+ /* Allow single byte watchpoint. */
+ if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
+ break;
default:
ret = -EINVAL;
goto out;
@@ -619,18 +626,35 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
info->address &= ~alignment_mask;
info->ctrl.len <<= offset;
- /*
- * Currently we rely on an overflow handler to take
- * care of single-stepping the breakpoint when it fires.
- * In the case of userspace breakpoints on a core with V7 debug,
- * we can use the mismatch feature as a poor-man's hardware
- * single-step, but this only works for per-task breakpoints.
- */
- if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) ||
- !core_has_mismatch_brps() || !bp->hw.bp_target)) {
- pr_warning("overflow handler required but none found\n");
- ret = -EINVAL;
+ if (!bp->overflow_handler) {
+ /*
+ * Mismatch breakpoints are required for single-stepping
+ * breakpoints.
+ */
+ if (!core_has_mismatch_brps())
+ return -EINVAL;
+
+ /* We don't allow mismatch breakpoints in kernel space. */
+ if (arch_check_bp_in_kernelspace(bp))
+ return -EPERM;
+
+ /*
+ * Per-cpu breakpoints are not supported by our stepping
+ * mechanism.
+ */
+ if (!bp->hw.bp_target)
+ return -EINVAL;
+
+ /*
+ * We only support specific access types if the fsr
+ * reports them.
+ */
+ if (!debug_exception_updates_fsr() &&
+ (info->ctrl.type == ARM_BREAKPOINT_LOAD ||
+ info->ctrl.type == ARM_BREAKPOINT_STORE))
+ return -EINVAL;
}
+
out:
return ret;
}
@@ -706,10 +730,12 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
goto unlock;
/* Check that the access type matches. */
- access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W :
- HW_BREAKPOINT_R;
- if (!(access & hw_breakpoint_type(wp)))
- goto unlock;
+ if (debug_exception_updates_fsr()) {
+ access = (fsr & ARM_FSR_ACCESS_MASK) ?
+ HW_BREAKPOINT_W : HW_BREAKPOINT_R;
+ if (!(access & hw_breakpoint_type(wp)))
+ goto unlock;
+ }
/* We have a winner. */
info->trigger = addr;
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
index 27d186abbc06..f4515393248d 100644
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -21,6 +21,8 @@ struct clock_data {
u32 epoch_cyc_copy;
u32 mult;
u32 shift;
+ bool suspended;
+ bool needs_suspend;
};
static void sched_clock_poll(unsigned long wrap_ticks);
@@ -49,6 +51,9 @@ static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask)
u64 epoch_ns;
u32 epoch_cyc;
+ if (cd.suspended)
+ return cd.epoch_ns;
+
/*
* Load the epoch_cyc and epoch_ns atomically. We do this by
* ensuring that we always write epoch_cyc, epoch_ns and
@@ -98,6 +103,13 @@ static void sched_clock_poll(unsigned long wrap_ticks)
update_sched_clock();
}
+void __init setup_sched_clock_needs_suspend(u32 (*read)(void), int bits,
+ unsigned long rate)
+{
+ setup_sched_clock(read, bits, rate);
+ cd.needs_suspend = true;
+}
+
void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
{
unsigned long r, w;
@@ -169,11 +181,23 @@ void __init sched_clock_postinit(void)
static int sched_clock_suspend(void)
{
sched_clock_poll(sched_clock_timer.data);
+ if (cd.needs_suspend)
+ cd.suspended = true;
return 0;
}
+static void sched_clock_resume(void)
+{
+ if (cd.needs_suspend) {
+ cd.epoch_cyc = read_sched_clock();
+ cd.epoch_cyc_copy = cd.epoch_cyc;
+ cd.suspended = false;
+ }
+}
+
static struct syscore_ops sched_clock_ops = {
.suspend = sched_clock_suspend,
+ .resume = sched_clock_resume,
};
static int __init sched_clock_syscore_init(void)
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index fef42b21cecb..e1f906989bb8 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/clk.h>
-#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -96,7 +95,52 @@ static void twd_timer_stop(struct clock_event_device *clk)
disable_percpu_irq(clk->irq);
}
-#ifdef CONFIG_CPU_FREQ
+#ifdef CONFIG_COMMON_CLK
+
+/*
+ * Updates clockevent frequency when the cpu frequency changes.
+ * Called on the cpu that is changing frequency with interrupts disabled.
+ */
+static void twd_update_frequency(void *new_rate)
+{
+ twd_timer_rate = *((unsigned long *) new_rate);
+
+ clockevents_update_freq(*__this_cpu_ptr(twd_evt), twd_timer_rate);
+}
+
+static int twd_rate_change(struct notifier_block *nb,
+ unsigned long flags, void *data)
+{
+ struct clk_notifier_data *cnd = data;
+
+ /*
+ * The twd clock events must be reprogrammed to account for the new
+ * frequency. The timer is local to a cpu, so cross-call to the
+ * changing cpu.
+ */
+ if (flags == POST_RATE_CHANGE)
+ smp_call_function(twd_update_frequency,
+ (void *)&cnd->new_rate, 1);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block twd_clk_nb = {
+ .notifier_call = twd_rate_change,
+};
+
+static int twd_clk_init(void)
+{
+ if (twd_evt && *__this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk))
+ return clk_notifier_register(twd_clk, &twd_clk_nb);
+
+ return 0;
+}
+core_initcall(twd_clk_init);
+
+#elif defined (CONFIG_CPU_FREQ)
+
+#include <linux/cpufreq.h>
/*
* Updates clockevent frequency when the cpu frequency changes.
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 198b08456e90..26c12c6440fc 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -321,7 +321,7 @@ void store_cpu_topology(unsigned int cpuid)
* init_cpu_topology is called at boot when only one cpu is running
* which prevent simultaneous write access to cpu_topology array
*/
-void init_cpu_topology(void)
+void __init init_cpu_topology(void)
{
unsigned int cpu;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index f7945218b8c6..b0179b89a04c 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -420,20 +420,23 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
#endif
instr = *(u32 *) pc;
} else if (thumb_mode(regs)) {
- get_user(instr, (u16 __user *)pc);
+ if (get_user(instr, (u16 __user *)pc))
+ goto die_sig;
if (is_wide_instruction(instr)) {
unsigned int instr2;
- get_user(instr2, (u16 __user *)pc+1);
+ if (get_user(instr2, (u16 __user *)pc+1))
+ goto die_sig;
instr <<= 16;
instr |= instr2;
}
- } else {
- get_user(instr, (u32 __user *)pc);
+ } else if (get_user(instr, (u32 __user *)pc)) {
+ goto die_sig;
}
if (call_undef_hook(regs, instr) == 0)
return;
+die_sig:
#ifdef CONFIG_DEBUG_USER
if (user_debug & UDBG_UNDEFINED) {
printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index 2473fd1fd51c..af72969820b4 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -16,13 +16,30 @@ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
call_with_stack.o
mmu-y := clear_user.o copy_page.o getuser.o putuser.o
-mmu-y += copy_from_user.o copy_to_user.o
+
+# the code in uaccess.S is not preemption safe and
+# probably faster on ARMv3 only
+ifeq ($(CONFIG_PREEMPT),y)
+ mmu-y += copy_from_user.o copy_to_user.o
+else
+ifneq ($(CONFIG_CPU_32v3),y)
+ mmu-y += copy_from_user.o copy_to_user.o
+else
+ mmu-y += uaccess.o
+endif
+endif
# using lib_ here won't override already available weak symbols
obj-$(CONFIG_UACCESS_WITH_MEMCPY) += uaccess_with_memcpy.o
-lib-$(CONFIG_MMU) += $(mmu-y)
-lib-y += io-readsw-armv4.o io-writesw-armv4.o
+lib-$(CONFIG_MMU) += $(mmu-y)
+
+ifeq ($(CONFIG_CPU_32v3),y)
+ lib-y += io-readsw-armv3.o io-writesw-armv3.o
+else
+ lib-y += io-readsw-armv4.o io-writesw-armv4.o
+endif
+
lib-$(CONFIG_ARCH_RPC) += ecard.o io-acorn.o floppydma.o
lib-$(CONFIG_ARCH_SHARK) += io-shark.o
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
index d6dacc69254e..395d5fbb8fa2 100644
--- a/arch/arm/lib/delay.c
+++ b/arch/arm/lib/delay.c
@@ -59,6 +59,7 @@ void __init init_current_timer_delay(unsigned long freq)
{
pr_info("Switching to timer-based delay loop\n");
lpj_fine = freq / HZ;
+ loops_per_jiffy = lpj_fine;
arm_delay_ops.delay = __timer_delay;
arm_delay_ops.const_udelay = __timer_const_udelay;
arm_delay_ops.udelay = __timer_udelay;
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index 11093a7c3e32..9b06bb41fca6 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -16,8 +16,9 @@
* __get_user_X
*
* Inputs: r0 contains the address
+ * r1 contains the address limit, which must be preserved
* Outputs: r0 is the error code
- * r2, r3 contains the zero-extended value
+ * r2 contains the zero-extended value
* lr corrupted
*
* No other registers must be altered. (see <asm/uaccess.h>
@@ -27,33 +28,39 @@
* Note also that it is intended that __get_user_bad is not global.
*/
#include <linux/linkage.h>
+#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/domain.h>
ENTRY(__get_user_1)
+ check_uaccess r0, 1, r1, r2, __get_user_bad
1: TUSER(ldrb) r2, [r0]
mov r0, #0
mov pc, lr
ENDPROC(__get_user_1)
ENTRY(__get_user_2)
-#ifdef CONFIG_THUMB2_KERNEL
-2: TUSER(ldrb) r2, [r0]
-3: TUSER(ldrb) r3, [r0, #1]
+ check_uaccess r0, 2, r1, r2, __get_user_bad
+#ifdef CONFIG_CPU_USE_DOMAINS
+rb .req ip
+2: ldrbt r2, [r0], #1
+3: ldrbt rb, [r0], #0
#else
-2: TUSER(ldrb) r2, [r0], #1
-3: TUSER(ldrb) r3, [r0]
+rb .req r0
+2: ldrb r2, [r0]
+3: ldrb rb, [r0, #1]
#endif
#ifndef __ARMEB__
- orr r2, r2, r3, lsl #8
+ orr r2, r2, rb, lsl #8
#else
- orr r2, r3, r2, lsl #8
+ orr r2, rb, r2, lsl #8
#endif
mov r0, #0
mov pc, lr
ENDPROC(__get_user_2)
ENTRY(__get_user_4)
+ check_uaccess r0, 4, r1, r2, __get_user_bad
4: TUSER(ldr) r2, [r0]
mov r0, #0
mov pc, lr
diff --git a/arch/arm/lib/io-readsw-armv3.S b/arch/arm/lib/io-readsw-armv3.S
new file mode 100644
index 000000000000..88487c8c4f23
--- /dev/null
+++ b/arch/arm/lib/io-readsw-armv3.S
@@ -0,0 +1,106 @@
+/*
+ * linux/arch/arm/lib/io-readsw-armv3.S
+ *
+ * Copyright (C) 1995-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+.Linsw_bad_alignment:
+ adr r0, .Linsw_bad_align_msg
+ mov r2, lr
+ b panic
+.Linsw_bad_align_msg:
+ .asciz "insw: bad buffer alignment (0x%p, lr=0x%08lX)\n"
+ .align
+
+.Linsw_align: tst r1, #1
+ bne .Linsw_bad_alignment
+
+ ldr r3, [r0]
+ strb r3, [r1], #1
+ mov r3, r3, lsr #8
+ strb r3, [r1], #1
+
+ subs r2, r2, #1
+ moveq pc, lr
+
+ENTRY(__raw_readsw)
+ teq r2, #0 @ do we have to check for the zero len?
+ moveq pc, lr
+ tst r1, #3
+ bne .Linsw_align
+
+.Linsw_aligned: mov ip, #0xff
+ orr ip, ip, ip, lsl #8
+ stmfd sp!, {r4, r5, r6, lr}
+
+ subs r2, r2, #8
+ bmi .Lno_insw_8
+
+.Linsw_8_lp: ldr r3, [r0]
+ and r3, r3, ip
+ ldr r4, [r0]
+ orr r3, r3, r4, lsl #16
+
+ ldr r4, [r0]
+ and r4, r4, ip
+ ldr r5, [r0]
+ orr r4, r4, r5, lsl #16
+
+ ldr r5, [r0]
+ and r5, r5, ip
+ ldr r6, [r0]
+ orr r5, r5, r6, lsl #16
+
+ ldr r6, [r0]
+ and r6, r6, ip
+ ldr lr, [r0]
+ orr r6, r6, lr, lsl #16
+
+ stmia r1!, {r3 - r6}
+
+ subs r2, r2, #8
+ bpl .Linsw_8_lp
+
+ tst r2, #7
+ ldmeqfd sp!, {r4, r5, r6, pc}
+
+.Lno_insw_8: tst r2, #4
+ beq .Lno_insw_4
+
+ ldr r3, [r0]
+ and r3, r3, ip
+ ldr r4, [r0]
+ orr r3, r3, r4, lsl #16
+
+ ldr r4, [r0]
+ and r4, r4, ip
+ ldr r5, [r0]
+ orr r4, r4, r5, lsl #16
+
+ stmia r1!, {r3, r4}
+
+.Lno_insw_4: tst r2, #2
+ beq .Lno_insw_2
+
+ ldr r3, [r0]
+ and r3, r3, ip
+ ldr r4, [r0]
+ orr r3, r3, r4, lsl #16
+
+ str r3, [r1], #4
+
+.Lno_insw_2: tst r2, #1
+ ldrne r3, [r0]
+ strneb r3, [r1], #1
+ movne r3, r3, lsr #8
+ strneb r3, [r1]
+
+ ldmfd sp!, {r4, r5, r6, pc}
+
+
diff --git a/arch/arm/lib/io-writesw-armv3.S b/arch/arm/lib/io-writesw-armv3.S
new file mode 100644
index 000000000000..49b800419e32
--- /dev/null
+++ b/arch/arm/lib/io-writesw-armv3.S
@@ -0,0 +1,126 @@
+/*
+ * linux/arch/arm/lib/io-writesw-armv3.S
+ *
+ * Copyright (C) 1995-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+.Loutsw_bad_alignment:
+ adr r0, .Loutsw_bad_align_msg
+ mov r2, lr
+ b panic
+.Loutsw_bad_align_msg:
+ .asciz "outsw: bad buffer alignment (0x%p, lr=0x%08lX)\n"
+ .align
+
+.Loutsw_align: tst r1, #1
+ bne .Loutsw_bad_alignment
+
+ add r1, r1, #2
+
+ ldr r3, [r1, #-4]
+ mov r3, r3, lsr #16
+ orr r3, r3, r3, lsl #16
+ str r3, [r0]
+ subs r2, r2, #1
+ moveq pc, lr
+
+ENTRY(__raw_writesw)
+ teq r2, #0 @ do we have to check for the zero len?
+ moveq pc, lr
+ tst r1, #3
+ bne .Loutsw_align
+
+ stmfd sp!, {r4, r5, r6, lr}
+
+ subs r2, r2, #8
+ bmi .Lno_outsw_8
+
+.Loutsw_8_lp: ldmia r1!, {r3, r4, r5, r6}
+
+ mov ip, r3, lsl #16
+ orr ip, ip, ip, lsr #16
+ str ip, [r0]
+
+ mov ip, r3, lsr #16
+ orr ip, ip, ip, lsl #16
+ str ip, [r0]
+
+ mov ip, r4, lsl #16
+ orr ip, ip, ip, lsr #16
+ str ip, [r0]
+
+ mov ip, r4, lsr #16
+ orr ip, ip, ip, lsl #16
+ str ip, [r0]
+
+ mov ip, r5, lsl #16
+ orr ip, ip, ip, lsr #16
+ str ip, [r0]
+
+ mov ip, r5, lsr #16
+ orr ip, ip, ip, lsl #16
+ str ip, [r0]
+
+ mov ip, r6, lsl #16
+ orr ip, ip, ip, lsr #16
+ str ip, [r0]
+
+ mov ip, r6, lsr #16
+ orr ip, ip, ip, lsl #16
+ str ip, [r0]
+
+ subs r2, r2, #8
+ bpl .Loutsw_8_lp
+
+ tst r2, #7
+ ldmeqfd sp!, {r4, r5, r6, pc}
+
+.Lno_outsw_8: tst r2, #4
+ beq .Lno_outsw_4
+
+ ldmia r1!, {r3, r4}
+
+ mov ip, r3, lsl #16
+ orr ip, ip, ip, lsr #16
+ str ip, [r0]
+
+ mov ip, r3, lsr #16
+ orr ip, ip, ip, lsl #16
+ str ip, [r0]
+
+ mov ip, r4, lsl #16
+ orr ip, ip, ip, lsr #16
+ str ip, [r0]
+
+ mov ip, r4, lsr #16
+ orr ip, ip, ip, lsl #16
+ str ip, [r0]
+
+.Lno_outsw_4: tst r2, #2
+ beq .Lno_outsw_2
+
+ ldr r3, [r1], #4
+
+ mov ip, r3, lsl #16
+ orr ip, ip, ip, lsr #16
+ str ip, [r0]
+
+ mov ip, r3, lsr #16
+ orr ip, ip, ip, lsl #16
+ str ip, [r0]
+
+.Lno_outsw_2: tst r2, #1
+
+ ldrne r3, [r1]
+
+ movne ip, r3, lsl #16
+ orrne ip, ip, ip, lsr #16
+ strne ip, [r0]
+
+ ldmfd sp!, {r4, r5, r6, pc}
diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S
index 7db25990c589..3d73dcb959b0 100644
--- a/arch/arm/lib/putuser.S
+++ b/arch/arm/lib/putuser.S
@@ -16,6 +16,7 @@
* __put_user_X
*
* Inputs: r0 contains the address
+ * r1 contains the address limit, which must be preserved
* r2, r3 contains the value
* Outputs: r0 is the error code
* lr corrupted
@@ -27,16 +28,19 @@
* Note also that it is intended that __put_user_bad is not global.
*/
#include <linux/linkage.h>
+#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/domain.h>
ENTRY(__put_user_1)
+ check_uaccess r0, 1, r1, ip, __put_user_bad
1: TUSER(strb) r2, [r0]
mov r0, #0
mov pc, lr
ENDPROC(__put_user_1)
ENTRY(__put_user_2)
+ check_uaccess r0, 2, r1, ip, __put_user_bad
mov ip, r2, lsr #8
#ifdef CONFIG_THUMB2_KERNEL
#ifndef __ARMEB__
@@ -60,12 +64,14 @@ ENTRY(__put_user_2)
ENDPROC(__put_user_2)
ENTRY(__put_user_4)
+ check_uaccess r0, 4, r1, ip, __put_user_bad
4: TUSER(str) r2, [r0]
mov r0, #0
mov pc, lr
ENDPROC(__put_user_4)
ENTRY(__put_user_8)
+ check_uaccess r0, 8, r1, ip, __put_user_bad
#ifdef CONFIG_THUMB2_KERNEL
5: TUSER(str) r2, [r0]
6: TUSER(str) r3, [r0, #4]
diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
new file mode 100644
index 000000000000..5c908b1cb8ed
--- /dev/null
+++ b/arch/arm/lib/uaccess.S
@@ -0,0 +1,564 @@
+/*
+ * linux/arch/arm/lib/uaccess.S
+ *
+ * Copyright (C) 1995, 1996,1997,1998 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Routines to block copy data to/from user memory
+ * These are highly optimised both for the 4k page size
+ * and for various alignments.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/errno.h>
+#include <asm/domain.h>
+
+ .text
+
+#define PAGE_SHIFT 12
+
+/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
+ * Purpose : copy a block to user memory from kernel memory
+ * Params : to - user memory
+ * : from - kernel memory
+ * : n - number of bytes to copy
+ * Returns : Number of bytes NOT copied.
+ */
+
+.Lc2u_dest_not_aligned:
+ rsb ip, ip, #4
+ cmp ip, #2
+ ldrb r3, [r1], #1
+USER( TUSER( strb) r3, [r0], #1) @ May fault
+ ldrgeb r3, [r1], #1
+USER( TUSER( strgeb) r3, [r0], #1) @ May fault
+ ldrgtb r3, [r1], #1
+USER( TUSER( strgtb) r3, [r0], #1) @ May fault
+ sub r2, r2, ip
+ b .Lc2u_dest_aligned
+
+ENTRY(__copy_to_user)
+ stmfd sp!, {r2, r4 - r7, lr}
+ cmp r2, #4
+ blt .Lc2u_not_enough
+ ands ip, r0, #3
+ bne .Lc2u_dest_not_aligned
+.Lc2u_dest_aligned:
+
+ ands ip, r1, #3
+ bne .Lc2u_src_not_aligned
+/*
+ * Seeing as there has to be at least 8 bytes to copy, we can
+ * copy one word, and force a user-mode page fault...
+ */
+
+.Lc2u_0fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .Lc2u_0nowords
+ ldr r3, [r1], #4
+USER( TUSER( str) r3, [r0], #4) @ May fault
+ mov ip, r0, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .Lc2u_0fupi
+/*
+ * ip = max no. of bytes to copy before needing another "strt" insn
+ */
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #32
+ blt .Lc2u_0rem8lp
+
+.Lc2u_0cpy8lp: ldmia r1!, {r3 - r6}
+ stmia r0!, {r3 - r6} @ Shouldnt fault
+ ldmia r1!, {r3 - r6}
+ subs ip, ip, #32
+ stmia r0!, {r3 - r6} @ Shouldnt fault
+ bpl .Lc2u_0cpy8lp
+
+.Lc2u_0rem8lp: cmn ip, #16
+ ldmgeia r1!, {r3 - r6}
+ stmgeia r0!, {r3 - r6} @ Shouldnt fault
+ tst ip, #8
+ ldmneia r1!, {r3 - r4}
+ stmneia r0!, {r3 - r4} @ Shouldnt fault
+ tst ip, #4
+ ldrne r3, [r1], #4
+ TUSER( strne) r3, [r0], #4 @ Shouldnt fault
+ ands ip, ip, #3
+ beq .Lc2u_0fupi
+.Lc2u_0nowords: teq ip, #0
+ beq .Lc2u_finished
+.Lc2u_nowords: cmp ip, #2
+ ldrb r3, [r1], #1
+USER( TUSER( strb) r3, [r0], #1) @ May fault
+ ldrgeb r3, [r1], #1
+USER( TUSER( strgeb) r3, [r0], #1) @ May fault
+ ldrgtb r3, [r1], #1
+USER( TUSER( strgtb) r3, [r0], #1) @ May fault
+ b .Lc2u_finished
+
+.Lc2u_not_enough:
+ movs ip, r2
+ bne .Lc2u_nowords
+.Lc2u_finished: mov r0, #0
+ ldmfd sp!, {r2, r4 - r7, pc}
+
+.Lc2u_src_not_aligned:
+ bic r1, r1, #3
+ ldr r7, [r1], #4
+ cmp ip, #2
+ bgt .Lc2u_3fupi
+ beq .Lc2u_2fupi
+.Lc2u_1fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .Lc2u_1nowords
+ mov r3, r7, pull #8
+ ldr r7, [r1], #4
+ orr r3, r3, r7, push #24
+USER( TUSER( str) r3, [r0], #4) @ May fault
+ mov ip, r0, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .Lc2u_1fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .Lc2u_1rem8lp
+
+.Lc2u_1cpy8lp: mov r3, r7, pull #8
+ ldmia r1!, {r4 - r7}
+ subs ip, ip, #16
+ orr r3, r3, r4, push #24
+ mov r4, r4, pull #8
+ orr r4, r4, r5, push #24
+ mov r5, r5, pull #8
+ orr r5, r5, r6, push #24
+ mov r6, r6, pull #8
+ orr r6, r6, r7, push #24
+ stmia r0!, {r3 - r6} @ Shouldnt fault
+ bpl .Lc2u_1cpy8lp
+
+.Lc2u_1rem8lp: tst ip, #8
+ movne r3, r7, pull #8
+ ldmneia r1!, {r4, r7}
+ orrne r3, r3, r4, push #24
+ movne r4, r4, pull #8
+ orrne r4, r4, r7, push #24
+ stmneia r0!, {r3 - r4} @ Shouldnt fault
+ tst ip, #4
+ movne r3, r7, pull #8
+ ldrne r7, [r1], #4
+ orrne r3, r3, r7, push #24
+ TUSER( strne) r3, [r0], #4 @ Shouldnt fault
+ ands ip, ip, #3
+ beq .Lc2u_1fupi
+.Lc2u_1nowords: mov r3, r7, get_byte_1
+ teq ip, #0
+ beq .Lc2u_finished
+ cmp ip, #2
+USER( TUSER( strb) r3, [r0], #1) @ May fault
+ movge r3, r7, get_byte_2
+USER( TUSER( strgeb) r3, [r0], #1) @ May fault
+ movgt r3, r7, get_byte_3
+USER( TUSER( strgtb) r3, [r0], #1) @ May fault
+ b .Lc2u_finished
+
+.Lc2u_2fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .Lc2u_2nowords
+ mov r3, r7, pull #16
+ ldr r7, [r1], #4
+ orr r3, r3, r7, push #16
+USER( TUSER( str) r3, [r0], #4) @ May fault
+ mov ip, r0, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .Lc2u_2fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .Lc2u_2rem8lp
+
+.Lc2u_2cpy8lp: mov r3, r7, pull #16
+ ldmia r1!, {r4 - r7}
+ subs ip, ip, #16
+ orr r3, r3, r4, push #16
+ mov r4, r4, pull #16
+ orr r4, r4, r5, push #16
+ mov r5, r5, pull #16
+ orr r5, r5, r6, push #16
+ mov r6, r6, pull #16
+ orr r6, r6, r7, push #16
+ stmia r0!, {r3 - r6} @ Shouldnt fault
+ bpl .Lc2u_2cpy8lp
+
+.Lc2u_2rem8lp: tst ip, #8
+ movne r3, r7, pull #16
+ ldmneia r1!, {r4, r7}
+ orrne r3, r3, r4, push #16
+ movne r4, r4, pull #16
+ orrne r4, r4, r7, push #16
+ stmneia r0!, {r3 - r4} @ Shouldnt fault
+ tst ip, #4
+ movne r3, r7, pull #16
+ ldrne r7, [r1], #4
+ orrne r3, r3, r7, push #16
+ TUSER( strne) r3, [r0], #4 @ Shouldnt fault
+ ands ip, ip, #3
+ beq .Lc2u_2fupi
+.Lc2u_2nowords: mov r3, r7, get_byte_2
+ teq ip, #0
+ beq .Lc2u_finished
+ cmp ip, #2
+USER( TUSER( strb) r3, [r0], #1) @ May fault
+ movge r3, r7, get_byte_3
+USER( TUSER( strgeb) r3, [r0], #1) @ May fault
+ ldrgtb r3, [r1], #0
+USER( TUSER( strgtb) r3, [r0], #1) @ May fault
+ b .Lc2u_finished
+
+.Lc2u_3fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .Lc2u_3nowords
+ mov r3, r7, pull #24
+ ldr r7, [r1], #4
+ orr r3, r3, r7, push #8
+USER( TUSER( str) r3, [r0], #4) @ May fault
+ mov ip, r0, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .Lc2u_3fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .Lc2u_3rem8lp
+
+.Lc2u_3cpy8lp: mov r3, r7, pull #24
+ ldmia r1!, {r4 - r7}
+ subs ip, ip, #16
+ orr r3, r3, r4, push #8
+ mov r4, r4, pull #24
+ orr r4, r4, r5, push #8
+ mov r5, r5, pull #24
+ orr r5, r5, r6, push #8
+ mov r6, r6, pull #24
+ orr r6, r6, r7, push #8
+ stmia r0!, {r3 - r6} @ Shouldnt fault
+ bpl .Lc2u_3cpy8lp
+
+.Lc2u_3rem8lp: tst ip, #8
+ movne r3, r7, pull #24
+ ldmneia r1!, {r4, r7}
+ orrne r3, r3, r4, push #8
+ movne r4, r4, pull #24
+ orrne r4, r4, r7, push #8
+ stmneia r0!, {r3 - r4} @ Shouldnt fault
+ tst ip, #4
+ movne r3, r7, pull #24
+ ldrne r7, [r1], #4
+ orrne r3, r3, r7, push #8
+ TUSER( strne) r3, [r0], #4 @ Shouldnt fault
+ ands ip, ip, #3
+ beq .Lc2u_3fupi
+.Lc2u_3nowords: mov r3, r7, get_byte_3
+ teq ip, #0
+ beq .Lc2u_finished
+ cmp ip, #2
+USER( TUSER( strb) r3, [r0], #1) @ May fault
+ ldrgeb r3, [r1], #1
+USER( TUSER( strgeb) r3, [r0], #1) @ May fault
+ ldrgtb r3, [r1], #0
+USER( TUSER( strgtb) r3, [r0], #1) @ May fault
+ b .Lc2u_finished
+ENDPROC(__copy_to_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+9001: ldmfd sp!, {r0, r4 - r7, pc}
+ .popsection
+
+/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
+ * Purpose : copy a block from user memory to kernel memory
+ * Params : to - kernel memory
+ * : from - user memory
+ * : n - number of bytes to copy
+ * Returns : Number of bytes NOT copied.
+ */
+.Lcfu_dest_not_aligned:
+ rsb ip, ip, #4
+ cmp ip, #2
+USER( TUSER( ldrb) r3, [r1], #1) @ May fault
+ strb r3, [r0], #1
+USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
+ strgeb r3, [r0], #1
+USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
+ strgtb r3, [r0], #1
+ sub r2, r2, ip
+ b .Lcfu_dest_aligned
+
+ENTRY(__copy_from_user)
+ stmfd sp!, {r0, r2, r4 - r7, lr}
+ cmp r2, #4
+ blt .Lcfu_not_enough
+ ands ip, r0, #3
+ bne .Lcfu_dest_not_aligned
+.Lcfu_dest_aligned:
+ ands ip, r1, #3
+ bne .Lcfu_src_not_aligned
+
+/*
+ * Seeing as there has to be at least 8 bytes to copy, we can
+ * copy one word, and force a user-mode page fault...
+ */
+
+.Lcfu_0fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .Lcfu_0nowords
+USER( TUSER( ldr) r3, [r1], #4)
+ str r3, [r0], #4
+ mov ip, r1, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .Lcfu_0fupi
+/*
+ * ip = max no. of bytes to copy before needing another "strt" insn
+ */
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #32
+ blt .Lcfu_0rem8lp
+
+.Lcfu_0cpy8lp: ldmia r1!, {r3 - r6} @ Shouldnt fault
+ stmia r0!, {r3 - r6}
+ ldmia r1!, {r3 - r6} @ Shouldnt fault
+ subs ip, ip, #32
+ stmia r0!, {r3 - r6}
+ bpl .Lcfu_0cpy8lp
+
+.Lcfu_0rem8lp: cmn ip, #16
+ ldmgeia r1!, {r3 - r6} @ Shouldnt fault
+ stmgeia r0!, {r3 - r6}
+ tst ip, #8
+ ldmneia r1!, {r3 - r4} @ Shouldnt fault
+ stmneia r0!, {r3 - r4}
+ tst ip, #4
+ TUSER( ldrne) r3, [r1], #4 @ Shouldnt fault
+ strne r3, [r0], #4
+ ands ip, ip, #3
+ beq .Lcfu_0fupi
+.Lcfu_0nowords: teq ip, #0
+ beq .Lcfu_finished
+.Lcfu_nowords: cmp ip, #2
+USER( TUSER( ldrb) r3, [r1], #1) @ May fault
+ strb r3, [r0], #1
+USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
+ strgeb r3, [r0], #1
+USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
+ strgtb r3, [r0], #1
+ b .Lcfu_finished
+
+.Lcfu_not_enough:
+ movs ip, r2
+ bne .Lcfu_nowords
+.Lcfu_finished: mov r0, #0
+ add sp, sp, #8
+ ldmfd sp!, {r4 - r7, pc}
+
+.Lcfu_src_not_aligned:
+ bic r1, r1, #3
+USER( TUSER( ldr) r7, [r1], #4) @ May fault
+ cmp ip, #2
+ bgt .Lcfu_3fupi
+ beq .Lcfu_2fupi
+.Lcfu_1fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .Lcfu_1nowords
+ mov r3, r7, pull #8
+USER( TUSER( ldr) r7, [r1], #4) @ May fault
+ orr r3, r3, r7, push #24
+ str r3, [r0], #4
+ mov ip, r1, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .Lcfu_1fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .Lcfu_1rem8lp
+
+.Lcfu_1cpy8lp: mov r3, r7, pull #8
+ ldmia r1!, {r4 - r7} @ Shouldnt fault
+ subs ip, ip, #16
+ orr r3, r3, r4, push #24
+ mov r4, r4, pull #8
+ orr r4, r4, r5, push #24
+ mov r5, r5, pull #8
+ orr r5, r5, r6, push #24
+ mov r6, r6, pull #8
+ orr r6, r6, r7, push #24
+ stmia r0!, {r3 - r6}
+ bpl .Lcfu_1cpy8lp
+
+.Lcfu_1rem8lp: tst ip, #8
+ movne r3, r7, pull #8
+ ldmneia r1!, {r4, r7} @ Shouldnt fault
+ orrne r3, r3, r4, push #24
+ movne r4, r4, pull #8
+ orrne r4, r4, r7, push #24
+ stmneia r0!, {r3 - r4}
+ tst ip, #4
+ movne r3, r7, pull #8
+USER( TUSER( ldrne) r7, [r1], #4) @ May fault
+ orrne r3, r3, r7, push #24
+ strne r3, [r0], #4
+ ands ip, ip, #3
+ beq .Lcfu_1fupi
+.Lcfu_1nowords: mov r3, r7, get_byte_1
+ teq ip, #0
+ beq .Lcfu_finished
+ cmp ip, #2
+ strb r3, [r0], #1
+ movge r3, r7, get_byte_2
+ strgeb r3, [r0], #1
+ movgt r3, r7, get_byte_3
+ strgtb r3, [r0], #1
+ b .Lcfu_finished
+
+.Lcfu_2fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .Lcfu_2nowords
+ mov r3, r7, pull #16
+USER( TUSER( ldr) r7, [r1], #4) @ May fault
+ orr r3, r3, r7, push #16
+ str r3, [r0], #4
+ mov ip, r1, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .Lcfu_2fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .Lcfu_2rem8lp
+
+
+.Lcfu_2cpy8lp: mov r3, r7, pull #16
+ ldmia r1!, {r4 - r7} @ Shouldnt fault
+ subs ip, ip, #16
+ orr r3, r3, r4, push #16
+ mov r4, r4, pull #16
+ orr r4, r4, r5, push #16
+ mov r5, r5, pull #16
+ orr r5, r5, r6, push #16
+ mov r6, r6, pull #16
+ orr r6, r6, r7, push #16
+ stmia r0!, {r3 - r6}
+ bpl .Lcfu_2cpy8lp
+
+.Lcfu_2rem8lp: tst ip, #8
+ movne r3, r7, pull #16
+ ldmneia r1!, {r4, r7} @ Shouldnt fault
+ orrne r3, r3, r4, push #16
+ movne r4, r4, pull #16
+ orrne r4, r4, r7, push #16
+ stmneia r0!, {r3 - r4}
+ tst ip, #4
+ movne r3, r7, pull #16
+USER( TUSER( ldrne) r7, [r1], #4) @ May fault
+ orrne r3, r3, r7, push #16
+ strne r3, [r0], #4
+ ands ip, ip, #3
+ beq .Lcfu_2fupi
+.Lcfu_2nowords: mov r3, r7, get_byte_2
+ teq ip, #0
+ beq .Lcfu_finished
+ cmp ip, #2
+ strb r3, [r0], #1
+ movge r3, r7, get_byte_3
+ strgeb r3, [r0], #1
+USER( TUSER( ldrgtb) r3, [r1], #0) @ May fault
+ strgtb r3, [r0], #1
+ b .Lcfu_finished
+
+.Lcfu_3fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .Lcfu_3nowords
+ mov r3, r7, pull #24
+USER( TUSER( ldr) r7, [r1], #4) @ May fault
+ orr r3, r3, r7, push #8
+ str r3, [r0], #4
+ mov ip, r1, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .Lcfu_3fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .Lcfu_3rem8lp
+
+.Lcfu_3cpy8lp: mov r3, r7, pull #24
+ ldmia r1!, {r4 - r7} @ Shouldnt fault
+ orr r3, r3, r4, push #8
+ mov r4, r4, pull #24
+ orr r4, r4, r5, push #8
+ mov r5, r5, pull #24
+ orr r5, r5, r6, push #8
+ mov r6, r6, pull #24
+ orr r6, r6, r7, push #8
+ stmia r0!, {r3 - r6}
+ subs ip, ip, #16
+ bpl .Lcfu_3cpy8lp
+
+.Lcfu_3rem8lp: tst ip, #8
+ movne r3, r7, pull #24
+ ldmneia r1!, {r4, r7} @ Shouldnt fault
+ orrne r3, r3, r4, push #8
+ movne r4, r4, pull #24
+ orrne r4, r4, r7, push #8
+ stmneia r0!, {r3 - r4}
+ tst ip, #4
+ movne r3, r7, pull #24
+USER( TUSER( ldrne) r7, [r1], #4) @ May fault
+ orrne r3, r3, r7, push #8
+ strne r3, [r0], #4
+ ands ip, ip, #3
+ beq .Lcfu_3fupi
+.Lcfu_3nowords: mov r3, r7, get_byte_3
+ teq ip, #0
+ beq .Lcfu_finished
+ cmp ip, #2
+ strb r3, [r0], #1
+USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
+ strgeb r3, [r0], #1
+USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
+ strgtb r3, [r0], #1
+ b .Lcfu_finished
+ENDPROC(__copy_from_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+ /*
+ * We took an exception. r0 contains a pointer to
+ * the byte not copied.
+ */
+9001: ldr r2, [sp], #4 @ void *to
+ sub r2, r0, r2 @ bytes copied
+ ldr r1, [sp], #4 @ unsigned long count
+ subs r4, r1, r2 @ bytes left to copy
+ movne r1, r4
+ blne __memzero
+ mov r0, r4
+ ldmfd sp!, {r4 - r7, pc}
+ .popsection
+
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c
index 104ca40d8d18..aaa443b48c91 100644
--- a/arch/arm/mach-at91/at91rm9200_time.c
+++ b/arch/arm/mach-at91/at91rm9200_time.c
@@ -197,7 +197,7 @@ void __init at91rm9200_timer_init(void)
at91_st_read(AT91_ST_SR);
/* Make IRQs happen for the system timer */
- setup_irq(AT91_ID_SYS, &at91rm9200_timer_irq);
+ setup_irq(NR_IRQS_LEGACY + AT91_ID_SYS, &at91rm9200_timer_irq);
/* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used
* directly for the clocksource and all clockevents, after adjusting
diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c
index 7b9c2ba396ed..bce572a530ef 100644
--- a/arch/arm/mach-at91/at91sam9260_devices.c
+++ b/arch/arm/mach-at91/at91sam9260_devices.c
@@ -726,6 +726,8 @@ static struct resource rtt_resources[] = {
.flags = IORESOURCE_MEM,
}, {
.flags = IORESOURCE_MEM,
+ }, {
+ .flags = IORESOURCE_IRQ,
},
};
@@ -744,10 +746,12 @@ static void __init at91_add_device_rtt_rtc(void)
* The second resource is needed:
* GPBR will serve as the storage for RTC time offset
*/
- at91sam9260_rtt_device.num_resources = 2;
+ at91sam9260_rtt_device.num_resources = 3;
rtt_resources[1].start = AT91SAM9260_BASE_GPBR +
4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
rtt_resources[1].end = rtt_resources[1].start + 3;
+ rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
+ rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
}
#else
static void __init at91_add_device_rtt_rtc(void)
diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c
index 8df5c1bdff92..bc2590d712d0 100644
--- a/arch/arm/mach-at91/at91sam9261_devices.c
+++ b/arch/arm/mach-at91/at91sam9261_devices.c
@@ -609,6 +609,8 @@ static struct resource rtt_resources[] = {
.flags = IORESOURCE_MEM,
}, {
.flags = IORESOURCE_MEM,
+ }, {
+ .flags = IORESOURCE_IRQ,
}
};
@@ -626,10 +628,12 @@ static void __init at91_add_device_rtt_rtc(void)
* The second resource is needed:
* GPBR will serve as the storage for RTC time offset
*/
- at91sam9261_rtt_device.num_resources = 2;
+ at91sam9261_rtt_device.num_resources = 3;
rtt_resources[1].start = AT91SAM9261_BASE_GPBR +
4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
rtt_resources[1].end = rtt_resources[1].start + 3;
+ rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
+ rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
}
#else
static void __init at91_add_device_rtt_rtc(void)
diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
index eb6bbf86fb9f..9b6ca734f1a9 100644
--- a/arch/arm/mach-at91/at91sam9263_devices.c
+++ b/arch/arm/mach-at91/at91sam9263_devices.c
@@ -990,6 +990,8 @@ static struct resource rtt0_resources[] = {
.flags = IORESOURCE_MEM,
}, {
.flags = IORESOURCE_MEM,
+ }, {
+ .flags = IORESOURCE_IRQ,
}
};
@@ -1006,6 +1008,8 @@ static struct resource rtt1_resources[] = {
.flags = IORESOURCE_MEM,
}, {
.flags = IORESOURCE_MEM,
+ }, {
+ .flags = IORESOURCE_IRQ,
}
};
@@ -1027,14 +1031,14 @@ static void __init at91_add_device_rtt_rtc(void)
* The second resource is needed only for the chosen RTT:
* GPBR will serve as the storage for RTC time offset
*/
- at91sam9263_rtt0_device.num_resources = 2;
+ at91sam9263_rtt0_device.num_resources = 3;
at91sam9263_rtt1_device.num_resources = 1;
pdev = &at91sam9263_rtt0_device;
r = rtt0_resources;
break;
case 1:
at91sam9263_rtt0_device.num_resources = 1;
- at91sam9263_rtt1_device.num_resources = 2;
+ at91sam9263_rtt1_device.num_resources = 3;
pdev = &at91sam9263_rtt1_device;
r = rtt1_resources;
break;
@@ -1047,6 +1051,8 @@ static void __init at91_add_device_rtt_rtc(void)
pdev->name = "rtc-at91sam9";
r[1].start = AT91SAM9263_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
r[1].end = r[1].start + 3;
+ r[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
+ r[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
}
#else
static void __init at91_add_device_rtt_rtc(void)
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index 06073996a382..1b47319ca00b 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -1293,6 +1293,8 @@ static struct resource rtt_resources[] = {
.flags = IORESOURCE_MEM,
}, {
.flags = IORESOURCE_MEM,
+ }, {
+ .flags = IORESOURCE_IRQ,
}
};
@@ -1310,10 +1312,12 @@ static void __init at91_add_device_rtt_rtc(void)
* The second resource is needed:
* GPBR will serve as the storage for RTC time offset
*/
- at91sam9g45_rtt_device.num_resources = 2;
+ at91sam9g45_rtt_device.num_resources = 3;
rtt_resources[1].start = AT91SAM9G45_BASE_GPBR +
4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
rtt_resources[1].end = rtt_resources[1].start + 3;
+ rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
+ rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
}
#else
static void __init at91_add_device_rtt_rtc(void)
diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c
index f09fff932172..b3d365dadef5 100644
--- a/arch/arm/mach-at91/at91sam9rl_devices.c
+++ b/arch/arm/mach-at91/at91sam9rl_devices.c
@@ -688,6 +688,8 @@ static struct resource rtt_resources[] = {
.flags = IORESOURCE_MEM,
}, {
.flags = IORESOURCE_MEM,
+ }, {
+ .flags = IORESOURCE_IRQ,
}
};
@@ -705,10 +707,12 @@ static void __init at91_add_device_rtt_rtc(void)
* The second resource is needed:
* GPBR will serve as the storage for RTC time offset
*/
- at91sam9rl_rtt_device.num_resources = 2;
+ at91sam9rl_rtt_device.num_resources = 3;
rtt_resources[1].start = AT91SAM9RL_BASE_GPBR +
4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
rtt_resources[1].end = rtt_resources[1].start + 3;
+ rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
+ rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
}
#else
static void __init at91_add_device_rtt_rtc(void)
diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c
index de2ec6b8fea7..188c82971ebd 100644
--- a/arch/arm/mach-at91/clock.c
+++ b/arch/arm/mach-at91/clock.c
@@ -63,6 +63,12 @@ EXPORT_SYMBOL_GPL(at91_pmc_base);
#define cpu_has_300M_plla() (cpu_is_at91sam9g10())
+#define cpu_has_240M_plla() (cpu_is_at91sam9261() \
+ || cpu_is_at91sam9263() \
+ || cpu_is_at91sam9rl())
+
+#define cpu_has_210M_plla() (cpu_is_at91sam9260())
+
#define cpu_has_pllb() (!(cpu_is_at91sam9rl() \
|| cpu_is_at91sam9g45() \
|| cpu_is_at91sam9x5() \
@@ -706,6 +712,12 @@ static int __init at91_pmc_init(unsigned long main_clock)
} else if (cpu_has_800M_plla()) {
if (plla.rate_hz > 800000000)
pll_overclock = true;
+ } else if (cpu_has_240M_plla()) {
+ if (plla.rate_hz > 240000000)
+ pll_overclock = true;
+ } else if (cpu_has_210M_plla()) {
+ if (plla.rate_hz > 210000000)
+ pll_overclock = true;
} else {
if (plla.rate_hz > 209000000)
pll_overclock = true;
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index 4db5de54b6a7..6321567d8eaa 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -102,7 +102,8 @@ void __init dove_ehci1_init(void)
void __init dove_ge00_init(struct mv643xx_eth_platform_data *eth_data)
{
orion_ge00_init(eth_data, DOVE_GE00_PHYS_BASE,
- IRQ_DOVE_GE00_SUM, IRQ_DOVE_GE00_ERR);
+ IRQ_DOVE_GE00_SUM, IRQ_DOVE_GE00_ERR,
+ 1600);
}
/*****************************************************************************
diff --git a/arch/arm/mach-exynos/mach-origen.c b/arch/arm/mach-exynos/mach-origen.c
index 5ca80307d6d7..4e574c24581c 100644
--- a/arch/arm/mach-exynos/mach-origen.c
+++ b/arch/arm/mach-exynos/mach-origen.c
@@ -42,6 +42,7 @@
#include <plat/backlight.h>
#include <plat/fb.h>
#include <plat/mfc.h>
+#include <plat/hdmi.h>
#include <mach/ohci.h>
#include <mach/map.h>
@@ -734,6 +735,11 @@ static void __init origen_bt_setup(void)
s3c_gpio_setpull(EXYNOS4_GPX2(2), S3C_GPIO_PULL_NONE);
}
+/* I2C module and id for HDMIPHY */
+static struct i2c_board_info hdmiphy_info = {
+ I2C_BOARD_INFO("hdmiphy-exynos4210", 0x38),
+};
+
static void s5p_tv_setup(void)
{
/* Direct HPD to HDMI chip */
@@ -781,6 +787,7 @@ static void __init origen_machine_init(void)
s5p_tv_setup();
s5p_i2c_hdmiphy_set_platdata(NULL);
+ s5p_hdmi_set_platdata(&hdmiphy_info, NULL, 0);
#ifdef CONFIG_DRM_EXYNOS
s5p_device_fimd0.dev.platform_data = &drm_fimd_pdata;
diff --git a/arch/arm/mach-exynos/mach-smdkv310.c b/arch/arm/mach-exynos/mach-smdkv310.c
index 3cfa688d274a..73f2bce097e1 100644
--- a/arch/arm/mach-exynos/mach-smdkv310.c
+++ b/arch/arm/mach-exynos/mach-smdkv310.c
@@ -40,6 +40,7 @@
#include <plat/mfc.h>
#include <plat/ehci.h>
#include <plat/clock.h>
+#include <plat/hdmi.h>
#include <mach/map.h>
#include <mach/ohci.h>
@@ -354,6 +355,11 @@ static struct platform_pwm_backlight_data smdkv310_bl_data = {
.pwm_period_ns = 1000,
};
+/* I2C module and id for HDMIPHY */
+static struct i2c_board_info hdmiphy_info = {
+ I2C_BOARD_INFO("hdmiphy-exynos4210", 0x38),
+};
+
static void s5p_tv_setup(void)
{
/* direct HPD to HDMI chip */
@@ -388,6 +394,7 @@ static void __init smdkv310_machine_init(void)
s5p_tv_setup();
s5p_i2c_hdmiphy_set_platdata(NULL);
+ s5p_hdmi_set_platdata(&hdmiphy_info, NULL, 0);
samsung_keypad_set_platdata(&smdkv310_keypad_data);
diff --git a/arch/arm/mach-gemini/irq.c b/arch/arm/mach-gemini/irq.c
index ca70e5fcc7ac..020852d3bdd8 100644
--- a/arch/arm/mach-gemini/irq.c
+++ b/arch/arm/mach-gemini/irq.c
@@ -17,6 +17,7 @@
#include <linux/sched.h>
#include <asm/irq.h>
#include <asm/mach/irq.h>
+#include <asm/system_misc.h>
#include <mach/hardware.h>
#define IRQ_SOURCE(base_addr) (base_addr + 0x00)
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index 07f7c226e4cf..d004d37ad9d8 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -9,7 +9,8 @@ obj-$(CONFIG_SOC_IMX27) += clk-imx27.o mm-imx27.o ehci-imx27.o
obj-$(CONFIG_SOC_IMX31) += mm-imx3.o cpu-imx31.o clk-imx31.o iomux-imx31.o ehci-imx31.o pm-imx3.o
obj-$(CONFIG_SOC_IMX35) += mm-imx3.o cpu-imx35.o clk-imx35.o ehci-imx35.o pm-imx3.o
-obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clk-imx51-imx53.o ehci-imx5.o pm-imx5.o cpu_op-mx51.o
+imx5-pm-$(CONFIG_PM) += pm-imx5.o
+obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clk-imx51-imx53.o ehci-imx5.o $(imx5-pm-y) cpu_op-mx51.o
obj-$(CONFIG_COMMON_CLK) += clk-pllv1.o clk-pllv2.o clk-pllv3.o clk-gate2.o \
clk-pfd.o clk-busy.o
@@ -70,14 +71,13 @@ obj-$(CONFIG_DEBUG_LL) += lluart.o
obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
obj-$(CONFIG_HAVE_IMX_MMDC) += mmdc.o
obj-$(CONFIG_HAVE_IMX_SRC) += src.o
-obj-$(CONFIG_CPU_V7) += head-v7.o
-AFLAGS_head-v7.o :=-Wa,-march=armv7-a
-obj-$(CONFIG_SMP) += platsmp.o
+AFLAGS_headsmp.o :=-Wa,-march=armv7-a
+obj-$(CONFIG_SMP) += headsmp.o platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o
ifeq ($(CONFIG_PM),y)
-obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o
+obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o headsmp.o
endif
# i.MX5 based machines
diff --git a/arch/arm/mach-imx/clk-imx25.c b/arch/arm/mach-imx/clk-imx25.c
index fdd8cc87c9fe..d20d4795f4ea 100644
--- a/arch/arm/mach-imx/clk-imx25.c
+++ b/arch/arm/mach-imx/clk-imx25.c
@@ -222,10 +222,8 @@ int __init mx25_clocks_init(void)
clk_register_clkdev(clk[lcdc_ipg], "ipg", "imx-fb.0");
clk_register_clkdev(clk[lcdc_ahb], "ahb", "imx-fb.0");
clk_register_clkdev(clk[wdt_ipg], NULL, "imx2-wdt.0");
- clk_register_clkdev(clk[ssi1_ipg_per], "per", "imx-ssi.0");
- clk_register_clkdev(clk[ssi1_ipg], "ipg", "imx-ssi.0");
- clk_register_clkdev(clk[ssi2_ipg_per], "per", "imx-ssi.1");
- clk_register_clkdev(clk[ssi2_ipg], "ipg", "imx-ssi.1");
+ clk_register_clkdev(clk[ssi1_ipg], NULL, "imx-ssi.0");
+ clk_register_clkdev(clk[ssi2_ipg], NULL, "imx-ssi.1");
clk_register_clkdev(clk[esdhc1_ipg_per], "per", "sdhci-esdhc-imx25.0");
clk_register_clkdev(clk[esdhc1_ipg], "ipg", "sdhci-esdhc-imx25.0");
clk_register_clkdev(clk[esdhc1_ahb], "ahb", "sdhci-esdhc-imx25.0");
@@ -243,6 +241,6 @@ int __init mx25_clocks_init(void)
clk_register_clkdev(clk[sdma_ahb], "ahb", "imx35-sdma");
clk_register_clkdev(clk[iim_ipg], "iim", NULL);
- mxc_timer_init(MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
+ mxc_timer_init(MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), MX25_INT_GPT1);
return 0;
}
diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c
index c6422fb10bae..65fb8bcd86cb 100644
--- a/arch/arm/mach-imx/clk-imx35.c
+++ b/arch/arm/mach-imx/clk-imx35.c
@@ -230,10 +230,8 @@ int __init mx35_clocks_init()
clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb");
clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1");
clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
- clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.0");
- clk_register_clkdev(clk[ssi1_div_post], "per", "imx-ssi.0");
- clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.1");
- clk_register_clkdev(clk[ssi2_div_post], "per", "imx-ssi.1");
+ clk_register_clkdev(clk[ssi1_gate], NULL, "imx-ssi.0");
+ clk_register_clkdev(clk[ssi2_gate], NULL, "imx-ssi.1");
/* i.mx35 has the i.mx21 type uart */
clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0");
clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0");
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index ea89520b6e22..4233d9e3531d 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -152,7 +152,7 @@ enum mx6q_clks {
ssi2, ssi3, uart_ipg, uart_serial, usboh3, usdhc1, usdhc2, usdhc3,
usdhc4, vdo_axi, vpu_axi, cko1, pll1_sys, pll2_bus, pll3_usb_otg,
pll4_audio, pll5_video, pll6_mlb, pll7_usb_host, pll8_enet, ssi1_ipg,
- ssi2_ipg, ssi3_ipg, rom, usbphy1, usbphy2,
+ ssi2_ipg, ssi3_ipg, rom, usbphy1, usbphy2, ldb_di0_div_3_5, ldb_di1_div_3_5,
clk_max
};
@@ -288,8 +288,10 @@ int __init mx6q_clocks_init(void)
clk[gpu3d_shader] = imx_clk_divider("gpu3d_shader", "gpu3d_shader_sel", base + 0x18, 29, 3);
clk[ipu1_podf] = imx_clk_divider("ipu1_podf", "ipu1_sel", base + 0x3c, 11, 3);
clk[ipu2_podf] = imx_clk_divider("ipu2_podf", "ipu2_sel", base + 0x3c, 16, 3);
- clk[ldb_di0_podf] = imx_clk_divider("ldb_di0_podf", "ldb_di0_sel", base + 0x20, 10, 1);
- clk[ldb_di1_podf] = imx_clk_divider("ldb_di1_podf", "ldb_di1_sel", base + 0x20, 11, 1);
+ clk[ldb_di0_div_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
+ clk[ldb_di0_podf] = imx_clk_divider("ldb_di0_podf", "ldb_di0_div_3_5", base + 0x20, 10, 1);
+ clk[ldb_di1_div_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
+ clk[ldb_di1_podf] = imx_clk_divider("ldb_di1_podf", "ldb_di1_div_3_5", base + 0x20, 11, 1);
clk[ipu1_di0_pre] = imx_clk_divider("ipu1_di0_pre", "ipu1_di0_pre_sel", base + 0x34, 3, 3);
clk[ipu1_di1_pre] = imx_clk_divider("ipu1_di1_pre", "ipu1_di1_pre_sel", base + 0x34, 12, 3);
clk[ipu2_di0_pre] = imx_clk_divider("ipu2_di0_pre", "ipu2_di0_pre_sel", base + 0x38, 3, 3);
diff --git a/arch/arm/mach-imx/head-v7.S b/arch/arm/mach-imx/headsmp.S
index 7e49deb128a4..7e49deb128a4 100644
--- a/arch/arm/mach-imx/head-v7.S
+++ b/arch/arm/mach-imx/headsmp.S
diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c
index 20ed2d56c1af..f8f7437c83b8 100644
--- a/arch/arm/mach-imx/hotplug.c
+++ b/arch/arm/mach-imx/hotplug.c
@@ -42,22 +42,6 @@ static inline void cpu_enter_lowpower(void)
: "cc");
}
-static inline void cpu_leave_lowpower(void)
-{
- unsigned int v;
-
- asm volatile(
- "mrc p15, 0, %0, c1, c0, 0\n"
- " orr %0, %0, %1\n"
- " mcr p15, 0, %0, c1, c0, 0\n"
- " mrc p15, 0, %0, c1, c0, 1\n"
- " orr %0, %0, %2\n"
- " mcr p15, 0, %0, c1, c0, 1\n"
- : "=&r" (v)
- : "Ir" (CR_C), "Ir" (0x40)
- : "cc");
-}
-
/*
* platform-specific code to shutdown a CPU
*
@@ -67,11 +51,10 @@ void platform_cpu_die(unsigned int cpu)
{
cpu_enter_lowpower();
imx_enable_cpu(cpu, false);
- cpu_do_idle();
- cpu_leave_lowpower();
- /* We should never return from idle */
- panic("cpu %d unexpectedly exit from shutdown\n", cpu);
+ /* spin here until hardware takes it down */
+ while (1)
+ ;
}
int platform_cpu_disable(unsigned int cpu)
diff --git a/arch/arm/mach-imx/mach-armadillo5x0.c b/arch/arm/mach-imx/mach-armadillo5x0.c
index 2c6ab3273f9e..5985ed1b8c98 100644
--- a/arch/arm/mach-imx/mach-armadillo5x0.c
+++ b/arch/arm/mach-imx/mach-armadillo5x0.c
@@ -526,7 +526,8 @@ static void __init armadillo5x0_init(void)
imx31_add_mxc_nand(&armadillo5x0_nand_board_info);
/* set NAND page size to 2k if not configured via boot mode pins */
- __raw_writel(__raw_readl(MXC_CCM_RCSR) | (1 << 30), MXC_CCM_RCSR);
+ __raw_writel(__raw_readl(mx3_ccm_base + MXC_CCM_RCSR) |
+ (1 << 30), mx3_ccm_base + MXC_CCM_RCSR);
/* RTC */
/* Get RTC IRQ and register the chip */
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 5ec0608f2a76..045b3f6a387d 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -71,7 +71,7 @@ soft:
/* For imx6q sabrelite board: set KSZ9021RN RGMII pad skew */
static int ksz9021rn_phy_fixup(struct phy_device *phydev)
{
- if (IS_ENABLED(CONFIG_PHYLIB)) {
+ if (IS_BUILTIN(CONFIG_PHYLIB)) {
/* min rx data delay */
phy_write(phydev, 0x0b, 0x8105);
phy_write(phydev, 0x0c, 0x0000);
@@ -112,7 +112,7 @@ put_clk:
static void __init imx6q_sabrelite_init(void)
{
- if (IS_ENABLED(CONFIG_PHYLIB))
+ if (IS_BUILTIN(CONFIG_PHYLIB))
phy_register_fixup_for_uid(PHY_ID_KSZ9021, MICREL_PHY_ID_MASK,
ksz9021rn_phy_fixup);
imx6q_sabrelite_cko1_setup();
diff --git a/arch/arm/mach-kirkwood/Makefile.boot b/arch/arm/mach-kirkwood/Makefile.boot
index a5717558ee89..a13299d758e1 100644
--- a/arch/arm/mach-kirkwood/Makefile.boot
+++ b/arch/arm/mach-kirkwood/Makefile.boot
@@ -7,7 +7,8 @@ dtb-$(CONFIG_MACH_DLINK_KIRKWOOD_DT) += kirkwood-dns320.dtb
dtb-$(CONFIG_MACH_DLINK_KIRKWOOD_DT) += kirkwood-dns325.dtb
dtb-$(CONFIG_MACH_ICONNECT_DT) += kirkwood-iconnect.dtb
dtb-$(CONFIG_MACH_IB62X0_DT) += kirkwood-ib62x0.dtb
-dtb-$(CONFIG_MACH_TS219_DT) += kirkwood-qnap-ts219.dtb
+dtb-$(CONFIG_MACH_TS219_DT) += kirkwood-ts219-6281.dtb
+dtb-$(CONFIG_MACH_TS219_DT) += kirkwood-ts219-6282.dtb
dtb-$(CONFIG_MACH_GOFLEXNET_DT) += kirkwood-goflexnet.dtb
dtb-$(CONFIG_MACH_LSXL_DT) += kirkwood-lschlv2.dtb
dtb-$(CONFIG_MACH_LSXL_DT) += kirkwood-lsxhl.dtb
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index c4b64adcbfce..1201191d7f1b 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -301,7 +301,7 @@ void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data)
{
orion_ge00_init(eth_data,
GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM,
- IRQ_KIRKWOOD_GE00_ERR);
+ IRQ_KIRKWOOD_GE00_ERR, 1600);
/* The interface forgets the MAC address assigned by u-boot if
the clock is turned off, so claim the clk now. */
clk_prepare_enable(ge0);
@@ -315,7 +315,7 @@ void __init kirkwood_ge01_init(struct mv643xx_eth_platform_data *eth_data)
{
orion_ge01_init(eth_data,
GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM,
- IRQ_KIRKWOOD_GE01_ERR);
+ IRQ_KIRKWOOD_GE01_ERR, 1600);
clk_prepare_enable(ge1);
}
@@ -517,6 +517,13 @@ void __init kirkwood_wdt_init(void)
void __init kirkwood_init_early(void)
{
orion_time_set_base(TIMER_VIRT_BASE);
+
+ /*
+ * Some Kirkwood devices allocate their coherent buffers from atomic
+ * context. Increase size of atomic coherent pool to make sure such
+ * the allocations won't fail.
+ */
+ init_dma_coherent_pool_size(SZ_1M);
}
int kirkwood_tclk;
diff --git a/arch/arm/mach-kirkwood/db88f6281-bp-setup.c b/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
index d93359379598..be90b7d0e10b 100644
--- a/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
+++ b/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/sizes.h>
#include <linux/platform_device.h>
#include <linux/mtd/partitions.h>
#include <linux/ata_platform.h>
diff --git a/arch/arm/mach-mmp/sram.c b/arch/arm/mach-mmp/sram.c
index 4304f9519372..7e8a5a2e1ec7 100644
--- a/arch/arm/mach-mmp/sram.c
+++ b/arch/arm/mach-mmp/sram.c
@@ -68,7 +68,7 @@ static int __devinit sram_probe(struct platform_device *pdev)
struct resource *res;
int ret = 0;
- if (!pdata && !pdata->pool_name)
+ if (!pdata || !pdata->pool_name)
return -ENODEV;
info = kzalloc(sizeof(*info), GFP_KERNEL);
diff --git a/arch/arm/mach-mv78xx0/addr-map.c b/arch/arm/mach-mv78xx0/addr-map.c
index 62b53d710efd..a9bc84180d21 100644
--- a/arch/arm/mach-mv78xx0/addr-map.c
+++ b/arch/arm/mach-mv78xx0/addr-map.c
@@ -37,7 +37,7 @@
#define WIN0_OFF(n) (BRIDGE_VIRT_BASE + 0x0000 + ((n) << 4))
#define WIN8_OFF(n) (BRIDGE_VIRT_BASE + 0x0900 + (((n) - 8) << 4))
-static void __init __iomem *win_cfg_base(int win)
+static void __init __iomem *win_cfg_base(const struct orion_addr_map_cfg *cfg, int win)
{
/*
* Find the control register base address for this window.
diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c
index b4c53b846c9c..3057f7d4329a 100644
--- a/arch/arm/mach-mv78xx0/common.c
+++ b/arch/arm/mach-mv78xx0/common.c
@@ -213,7 +213,8 @@ void __init mv78xx0_ge00_init(struct mv643xx_eth_platform_data *eth_data)
{
orion_ge00_init(eth_data,
GE00_PHYS_BASE, IRQ_MV78XX0_GE00_SUM,
- IRQ_MV78XX0_GE_ERR);
+ IRQ_MV78XX0_GE_ERR,
+ MV643XX_TX_CSUM_DEFAULT_LIMIT);
}
@@ -224,7 +225,8 @@ void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data)
{
orion_ge01_init(eth_data,
GE01_PHYS_BASE, IRQ_MV78XX0_GE01_SUM,
- NO_IRQ);
+ NO_IRQ,
+ MV643XX_TX_CSUM_DEFAULT_LIMIT);
}
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index dd2db025f778..346fd26f3aa6 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -62,13 +62,14 @@ config ARCH_OMAP4
select PM_OPP if PM
select USB_ARCH_HAS_EHCI if USB_SUPPORT
select ARM_CPU_SUSPEND if PM
- select ARCH_NEEDS_CPU_IDLE_COUPLED
+ select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
config SOC_OMAP5
bool "TI OMAP5"
select CPU_V7
select ARM_GIC
select HAVE_SMP
+ select ARM_CPU_SUSPEND if PM
comment "OMAP Core Type"
depends on ARCH_OMAP2
@@ -231,10 +232,11 @@ config MACH_OMAP3_PANDORA
select OMAP_PACKAGE_CBB
select REGULATOR_FIXED_VOLTAGE if REGULATOR
-config MACH_OMAP3_TOUCHBOOK
+config MACH_TOUCHBOOK
bool "OMAP3 Touch Book"
depends on ARCH_OMAP3
default y
+ select OMAP_PACKAGE_CBB
config MACH_OMAP_3430SDP
bool "OMAP 3430 SDP board"
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index f6a24b3f9c4f..34c2c7f59f0a 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -255,7 +255,7 @@ obj-$(CONFIG_MACH_OMAP_3630SDP) += board-zoom-display.o
obj-$(CONFIG_MACH_CM_T35) += board-cm-t35.o
obj-$(CONFIG_MACH_CM_T3517) += board-cm-t3517.o
obj-$(CONFIG_MACH_IGEP0020) += board-igep0020.o
-obj-$(CONFIG_MACH_OMAP3_TOUCHBOOK) += board-omap3touchbook.o
+obj-$(CONFIG_MACH_TOUCHBOOK) += board-omap3touchbook.o
obj-$(CONFIG_MACH_OMAP_4430SDP) += board-4430sdp.o
obj-$(CONFIG_MACH_OMAP4_PANDA) += board-omap4panda.o
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index 74915295482e..28214483aaba 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -554,6 +554,8 @@ static const struct usbhs_omap_board_data igep3_usbhs_bdata __initconst = {
#ifdef CONFIG_OMAP_MUX
static struct omap_board_mux board_mux[] __initdata = {
+ /* SMSC9221 LAN Controller ETH IRQ (GPIO_176) */
+ OMAP3_MUX(MCSPI1_CS2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
{ .reg_offset = OMAP_MUX_TERMINATOR },
};
#endif
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index ef230a0eb5eb..0d362e9f9cb9 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -58,6 +58,7 @@
#include "hsmmc.h"
#include "common-board-devices.h"
+#define OMAP3_EVM_TS_GPIO 175
#define OMAP3_EVM_EHCI_VBUS 22
#define OMAP3_EVM_EHCI_SELECT 61
diff --git a/arch/arm/mach-omap2/clock33xx_data.c b/arch/arm/mach-omap2/clock33xx_data.c
index 25bbcc7ca4dc..ae27de8899a6 100644
--- a/arch/arm/mach-omap2/clock33xx_data.c
+++ b/arch/arm/mach-omap2/clock33xx_data.c
@@ -1036,13 +1036,13 @@ static struct omap_clk am33xx_clks[] = {
CLK(NULL, "mmu_fck", &mmu_fck, CK_AM33XX),
CLK(NULL, "smartreflex0_fck", &smartreflex0_fck, CK_AM33XX),
CLK(NULL, "smartreflex1_fck", &smartreflex1_fck, CK_AM33XX),
- CLK(NULL, "gpt1_fck", &timer1_fck, CK_AM33XX),
- CLK(NULL, "gpt2_fck", &timer2_fck, CK_AM33XX),
- CLK(NULL, "gpt3_fck", &timer3_fck, CK_AM33XX),
- CLK(NULL, "gpt4_fck", &timer4_fck, CK_AM33XX),
- CLK(NULL, "gpt5_fck", &timer5_fck, CK_AM33XX),
- CLK(NULL, "gpt6_fck", &timer6_fck, CK_AM33XX),
- CLK(NULL, "gpt7_fck", &timer7_fck, CK_AM33XX),
+ CLK(NULL, "timer1_fck", &timer1_fck, CK_AM33XX),
+ CLK(NULL, "timer2_fck", &timer2_fck, CK_AM33XX),
+ CLK(NULL, "timer3_fck", &timer3_fck, CK_AM33XX),
+ CLK(NULL, "timer4_fck", &timer4_fck, CK_AM33XX),
+ CLK(NULL, "timer5_fck", &timer5_fck, CK_AM33XX),
+ CLK(NULL, "timer6_fck", &timer6_fck, CK_AM33XX),
+ CLK(NULL, "timer7_fck", &timer7_fck, CK_AM33XX),
CLK(NULL, "usbotg_fck", &usbotg_fck, CK_AM33XX),
CLK(NULL, "ieee5000_fck", &ieee5000_fck, CK_AM33XX),
CLK(NULL, "wdt1_fck", &wdt1_fck, CK_AM33XX),
diff --git a/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c b/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c
index a0d68dbecfa3..f99e65cfb862 100644
--- a/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c
+++ b/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c
@@ -241,6 +241,52 @@ static void omap3_clkdm_deny_idle(struct clockdomain *clkdm)
_clkdm_del_autodeps(clkdm);
}
+static int omap3xxx_clkdm_clk_enable(struct clockdomain *clkdm)
+{
+ bool hwsup = false;
+
+ if (!clkdm->clktrctrl_mask)
+ return 0;
+
+ hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs,
+ clkdm->clktrctrl_mask);
+
+ if (hwsup) {
+ /* Disable HW transitions when we are changing deps */
+ _disable_hwsup(clkdm);
+ _clkdm_add_autodeps(clkdm);
+ _enable_hwsup(clkdm);
+ } else {
+ if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP)
+ omap3_clkdm_wakeup(clkdm);
+ }
+
+ return 0;
+}
+
+static int omap3xxx_clkdm_clk_disable(struct clockdomain *clkdm)
+{
+ bool hwsup = false;
+
+ if (!clkdm->clktrctrl_mask)
+ return 0;
+
+ hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs,
+ clkdm->clktrctrl_mask);
+
+ if (hwsup) {
+ /* Disable HW transitions when we are changing deps */
+ _disable_hwsup(clkdm);
+ _clkdm_del_autodeps(clkdm);
+ _enable_hwsup(clkdm);
+ } else {
+ if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP)
+ omap3_clkdm_sleep(clkdm);
+ }
+
+ return 0;
+}
+
struct clkdm_ops omap2_clkdm_operations = {
.clkdm_add_wkdep = omap2_clkdm_add_wkdep,
.clkdm_del_wkdep = omap2_clkdm_del_wkdep,
@@ -267,6 +313,6 @@ struct clkdm_ops omap3_clkdm_operations = {
.clkdm_wakeup = omap3_clkdm_wakeup,
.clkdm_allow_idle = omap3_clkdm_allow_idle,
.clkdm_deny_idle = omap3_clkdm_deny_idle,
- .clkdm_clk_enable = omap2_clkdm_clk_enable,
- .clkdm_clk_disable = omap2_clkdm_clk_disable,
+ .clkdm_clk_enable = omap3xxx_clkdm_clk_enable,
+ .clkdm_clk_disable = omap3xxx_clkdm_clk_disable,
};
diff --git a/arch/arm/mach-omap2/cm-regbits-34xx.h b/arch/arm/mach-omap2/cm-regbits-34xx.h
index 766338fe4d34..975f6bda0e0b 100644
--- a/arch/arm/mach-omap2/cm-regbits-34xx.h
+++ b/arch/arm/mach-omap2/cm-regbits-34xx.h
@@ -67,6 +67,7 @@
#define OMAP3430_EN_IVA2_DPLL_MASK (0x7 << 0)
/* CM_IDLEST_IVA2 */
+#define OMAP3430_ST_IVA2_SHIFT 0
#define OMAP3430_ST_IVA2_MASK (1 << 0)
/* CM_IDLEST_PLL_IVA2 */
diff --git a/arch/arm/mach-omap2/common-board-devices.c b/arch/arm/mach-omap2/common-board-devices.c
index 14734746457c..c1875862679f 100644
--- a/arch/arm/mach-omap2/common-board-devices.c
+++ b/arch/arm/mach-omap2/common-board-devices.c
@@ -35,16 +35,6 @@ static struct omap2_mcspi_device_config ads7846_mcspi_config = {
.turbo_mode = 0,
};
-/*
- * ADS7846 driver maybe request a gpio according to the value
- * of pdata->get_pendown_state, but we have done this. So set
- * get_pendown_state to avoid twice gpio requesting.
- */
-static int omap3_get_pendown_state(void)
-{
- return !gpio_get_value(OMAP3_EVM_TS_GPIO);
-}
-
static struct ads7846_platform_data ads7846_config = {
.x_max = 0x0fff,
.y_max = 0x0fff,
@@ -55,7 +45,6 @@ static struct ads7846_platform_data ads7846_config = {
.debounce_rep = 1,
.gpio_pendown = -EINVAL,
.keep_vref_on = 1,
- .get_pendown_state = &omap3_get_pendown_state,
};
static struct spi_board_info ads7846_spi_board_info __initdata = {
diff --git a/arch/arm/mach-omap2/common-board-devices.h b/arch/arm/mach-omap2/common-board-devices.h
index 4c4ef6a6166b..a0b4a42836ab 100644
--- a/arch/arm/mach-omap2/common-board-devices.h
+++ b/arch/arm/mach-omap2/common-board-devices.h
@@ -4,7 +4,6 @@
#include "twl-common.h"
#define NAND_BLOCK_SIZE SZ_128K
-#define OMAP3_EVM_TS_GPIO 175
struct mtd_partition;
struct ads7846_platform_data;
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index ee05e193fc61..288bee6cbb76 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -238,8 +238,9 @@ int __init omap4_idle_init(void)
for_each_cpu(cpu_id, cpu_online_mask) {
dev = &per_cpu(omap4_idle_dev, cpu_id);
dev->cpu = cpu_id;
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
dev->coupled_cpus = *cpu_online_mask;
-
+#endif
cpuidle_register_driver(&omap4_idle_driver);
if (cpuidle_register_device(dev)) {
diff --git a/arch/arm/mach-omap2/mux.h b/arch/arm/mach-omap2/mux.h
index 471e62a74a16..76f9b3c2f586 100644
--- a/arch/arm/mach-omap2/mux.h
+++ b/arch/arm/mach-omap2/mux.h
@@ -127,7 +127,6 @@ struct omap_mux_partition {
* @gpio: GPIO number
* @muxnames: available signal modes for a ball
* @balls: available balls on the package
- * @partition: mux partition
*/
struct omap_mux {
u16 reg_offset;
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index 05fdebfaa195..330d4c6e746b 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -46,7 +46,7 @@
static void __iomem *wakeupgen_base;
static void __iomem *sar_base;
static DEFINE_SPINLOCK(wakeupgen_lock);
-static unsigned int irq_target_cpu[NR_IRQS];
+static unsigned int irq_target_cpu[MAX_IRQS];
static unsigned int irq_banks = MAX_NR_REG_BANKS;
static unsigned int max_irqs = MAX_IRQS;
static unsigned int omap_secure_apis;
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 6ca8e519968d..37afbd173c2c 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1889,6 +1889,7 @@ static int _enable(struct omap_hwmod *oh)
_enable_sysc(oh);
}
} else {
+ _omap4_disable_module(oh);
_disable_clocks(oh);
pr_debug("omap_hwmod: %s: _wait_target_ready: %d\n",
oh->name, r);
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index c9e38200216b..ce7e6068768f 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -100,9 +100,9 @@ static struct omap_hwmod omap3xxx_mpu_hwmod = {
/* IVA2 (IVA2) */
static struct omap_hwmod_rst_info omap3xxx_iva_resets[] = {
- { .name = "logic", .rst_shift = 0 },
- { .name = "seq0", .rst_shift = 1 },
- { .name = "seq1", .rst_shift = 2 },
+ { .name = "logic", .rst_shift = 0, .st_shift = 8 },
+ { .name = "seq0", .rst_shift = 1, .st_shift = 9 },
+ { .name = "seq1", .rst_shift = 2, .st_shift = 10 },
};
static struct omap_hwmod omap3xxx_iva_hwmod = {
@@ -112,6 +112,15 @@ static struct omap_hwmod omap3xxx_iva_hwmod = {
.rst_lines = omap3xxx_iva_resets,
.rst_lines_cnt = ARRAY_SIZE(omap3xxx_iva_resets),
.main_clk = "iva2_ck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = OMAP3430_IVA2_MOD,
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_IVA2_SHIFT,
+ }
+ },
};
/* timer class */
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 242aee498ceb..afb60917a948 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -4210,7 +4210,7 @@ static struct omap_hwmod_ocp_if omap44xx_dsp__iva = {
};
/* dsp -> sl2if */
-static struct omap_hwmod_ocp_if omap44xx_dsp__sl2if = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_dsp__sl2if = {
.master = &omap44xx_dsp_hwmod,
.slave = &omap44xx_sl2if_hwmod,
.clk = "dpll_iva_m5x2_ck",
@@ -4828,7 +4828,7 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__iss = {
};
/* iva -> sl2if */
-static struct omap_hwmod_ocp_if omap44xx_iva__sl2if = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_iva__sl2if = {
.master = &omap44xx_iva_hwmod,
.slave = &omap44xx_sl2if_hwmod,
.clk = "dpll_iva_m5x2_ck",
@@ -5362,7 +5362,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__scrm = {
};
/* l3_main_2 -> sl2if */
-static struct omap_hwmod_ocp_if omap44xx_l3_main_2__sl2if = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l3_main_2__sl2if = {
.master = &omap44xx_l3_main_2_hwmod,
.slave = &omap44xx_sl2if_hwmod,
.clk = "l3_div_ck",
@@ -6032,7 +6032,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_l4_abe__dmic,
&omap44xx_l4_abe__dmic_dma,
&omap44xx_dsp__iva,
- &omap44xx_dsp__sl2if,
+ /* &omap44xx_dsp__sl2if, */
&omap44xx_l4_cfg__dsp,
&omap44xx_l3_main_2__dss,
&omap44xx_l4_per__dss,
@@ -6068,7 +6068,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_l4_per__i2c4,
&omap44xx_l3_main_2__ipu,
&omap44xx_l3_main_2__iss,
- &omap44xx_iva__sl2if,
+ /* &omap44xx_iva__sl2if, */
&omap44xx_l3_main_2__iva,
&omap44xx_l4_wkup__kbd,
&omap44xx_l4_cfg__mailbox,
@@ -6099,7 +6099,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_l4_cfg__cm_core,
&omap44xx_l4_wkup__prm,
&omap44xx_l4_wkup__scrm,
- &omap44xx_l3_main_2__sl2if,
+ /* &omap44xx_l3_main_2__sl2if, */
&omap44xx_l4_abe__slimbus1,
&omap44xx_l4_abe__slimbus1_dma,
&omap44xx_l4_per__slimbus2,
diff --git a/arch/arm/mach-omap2/opp4xxx_data.c b/arch/arm/mach-omap2/opp4xxx_data.c
index 2293ba27101b..c95415da23c2 100644
--- a/arch/arm/mach-omap2/opp4xxx_data.c
+++ b/arch/arm/mach-omap2/opp4xxx_data.c
@@ -94,7 +94,7 @@ int __init omap4_opp_init(void)
{
int r = -ENODEV;
- if (!cpu_is_omap44xx())
+ if (!cpu_is_omap443x())
return r;
r = omap_init_opp_table(omap44xx_opp_def_list,
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index e4fc88c65dbd..05bd8f02723f 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -272,21 +272,16 @@ void omap_sram_idle(void)
per_next_state = pwrdm_read_next_pwrst(per_pwrdm);
core_next_state = pwrdm_read_next_pwrst(core_pwrdm);
- if (mpu_next_state < PWRDM_POWER_ON) {
- pwrdm_pre_transition(mpu_pwrdm);
- pwrdm_pre_transition(neon_pwrdm);
- }
+ pwrdm_pre_transition(NULL);
/* PER */
if (per_next_state < PWRDM_POWER_ON) {
- pwrdm_pre_transition(per_pwrdm);
per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0;
omap2_gpio_prepare_for_idle(per_going_off);
}
/* CORE */
if (core_next_state < PWRDM_POWER_ON) {
- pwrdm_pre_transition(core_pwrdm);
if (core_next_state == PWRDM_POWER_OFF) {
omap3_core_save_context();
omap3_cm_save_context();
@@ -339,20 +334,14 @@ void omap_sram_idle(void)
omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
OMAP3430_GR_MOD,
OMAP3_PRM_VOLTCTRL_OFFSET);
- pwrdm_post_transition(core_pwrdm);
}
omap3_intc_resume_idle();
+ pwrdm_post_transition(NULL);
+
/* PER */
- if (per_next_state < PWRDM_POWER_ON) {
+ if (per_next_state < PWRDM_POWER_ON)
omap2_gpio_resume_after_idle();
- pwrdm_post_transition(per_pwrdm);
- }
-
- if (mpu_next_state < PWRDM_POWER_ON) {
- pwrdm_post_transition(mpu_pwrdm);
- pwrdm_post_transition(neon_pwrdm);
- }
}
static void omap3_pm_idle(void)
diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
index 9f6b83d1b193..91e71d8f46f0 100644
--- a/arch/arm/mach-omap2/sleep44xx.S
+++ b/arch/arm/mach-omap2/sleep44xx.S
@@ -56,9 +56,13 @@ ppa_por_params:
* The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
* It returns to the caller for CPU INACTIVE and ON power states or in case
* CPU failed to transition to targeted OFF/DORMANT state.
+ *
+ * omap4_finish_suspend() calls v7_flush_dcache_all() which doesn't save
+ * stack frame and it expects the caller to take care of it. Hence the entire
+ * stack frame is saved to avoid possible stack corruption.
*/
ENTRY(omap4_finish_suspend)
- stmfd sp!, {lr}
+ stmfd sp!, {r4-r12, lr}
cmp r0, #0x0
beq do_WFI @ No lowpower state, jump to WFI
@@ -226,7 +230,7 @@ scu_gp_clear:
skip_scu_gp_clear:
isb
dsb
- ldmfd sp!, {pc}
+ ldmfd sp!, {r4-r12, pc}
ENDPROC(omap4_finish_suspend)
/*
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 2ff6d41ec6c6..2ba4f57dda86 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -260,6 +260,7 @@ static u32 notrace dmtimer_read_sched_clock(void)
return 0;
}
+#ifdef CONFIG_OMAP_32K_TIMER
/* Setup free-running counter for clocksource */
static int __init omap2_sync32k_clocksource_init(void)
{
@@ -299,6 +300,12 @@ static int __init omap2_sync32k_clocksource_init(void)
return ret;
}
+#else
+static inline int omap2_sync32k_clocksource_init(void)
+{
+ return -ENODEV;
+}
+#endif
static void __init omap2_gptimer_clocksource_init(int gptimer_id,
const char *fck_source)
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c
index de47f170ba50..db5ff6642375 100644
--- a/arch/arm/mach-omap2/twl-common.c
+++ b/arch/arm/mach-omap2/twl-common.c
@@ -67,6 +67,7 @@ void __init omap_pmic_init(int bus, u32 clkrate,
const char *pmic_type, int pmic_irq,
struct twl4030_platform_data *pmic_data)
{
+ omap_mux_init_signal("sys_nirq", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);
strncpy(pmic_i2c_board_info.type, pmic_type,
sizeof(pmic_i2c_board_info.type));
pmic_i2c_board_info.irq = pmic_irq;
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index 9148b229d0de..410291c67666 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -109,7 +109,8 @@ void __init orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data)
{
orion_ge00_init(eth_data,
ORION5X_ETH_PHYS_BASE, IRQ_ORION5X_ETH_SUM,
- IRQ_ORION5X_ETH_ERR);
+ IRQ_ORION5X_ETH_ERR,
+ MV643XX_TX_CSUM_DEFAULT_LIMIT);
}
diff --git a/arch/arm/mach-s3c24xx/include/mach/dma.h b/arch/arm/mach-s3c24xx/include/mach/dma.h
index 454831b66037..ee99fd56c043 100644
--- a/arch/arm/mach-s3c24xx/include/mach/dma.h
+++ b/arch/arm/mach-s3c24xx/include/mach/dma.h
@@ -24,7 +24,8 @@
*/
enum dma_ch {
- DMACH_XD0,
+ DMACH_DT_PROP = -1, /* not yet supported, do not use */
+ DMACH_XD0 = 0,
DMACH_XD1,
DMACH_SDI,
DMACH_SPI0,
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index cf10f92856dc..453a6e50db8b 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -520,13 +520,14 @@ static struct platform_device hdmi_lcdc_device = {
};
/* GPIO KEY */
-#define GPIO_KEY(c, g, d) { .code = c, .gpio = g, .desc = d, .active_low = 1 }
+#define GPIO_KEY(c, g, d, ...) \
+ { .code = c, .gpio = g, .desc = d, .active_low = 1, __VA_ARGS__ }
static struct gpio_keys_button gpio_buttons[] = {
- GPIO_KEY(KEY_POWER, GPIO_PORT99, "SW1"),
- GPIO_KEY(KEY_BACK, GPIO_PORT100, "SW2"),
- GPIO_KEY(KEY_MENU, GPIO_PORT97, "SW3"),
- GPIO_KEY(KEY_HOME, GPIO_PORT98, "SW4"),
+ GPIO_KEY(KEY_POWER, GPIO_PORT99, "SW3", .wakeup = 1),
+ GPIO_KEY(KEY_BACK, GPIO_PORT100, "SW4"),
+ GPIO_KEY(KEY_MENU, GPIO_PORT97, "SW5"),
+ GPIO_KEY(KEY_HOME, GPIO_PORT98, "SW6"),
};
static struct gpio_keys_platform_data gpio_key_info = {
@@ -901,8 +902,8 @@ static struct platform_device *eva_devices[] __initdata = {
&camera_device,
&ceu0_device,
&fsi_device,
- &fsi_hdmi_device,
&fsi_wm8978_device,
+ &fsi_hdmi_device,
};
static void __init eva_clock_init(void)
diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c
index 53b7ea92c32c..3b8a0171c3cb 100644
--- a/arch/arm/mach-shmobile/board-kzm9g.c
+++ b/arch/arm/mach-shmobile/board-kzm9g.c
@@ -346,11 +346,11 @@ static struct resource sh_mmcif_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = gic_spi(141),
+ .start = gic_spi(140),
.flags = IORESOURCE_IRQ,
},
[2] = {
- .start = gic_spi(140),
+ .start = gic_spi(141),
.flags = IORESOURCE_IRQ,
},
};
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 7ea2b31e3199..c129542f6aed 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -695,6 +695,7 @@ static struct platform_device usbhs0_device = {
* - J30 "open"
* - modify usbhs1_get_id() USBHS_HOST -> USBHS_GADGET
* - add .get_vbus = usbhs_get_vbus in usbhs1_private
+ * - check usbhs0_device(pio)/usbhs1_device(irq) order in mackerel_devices.
*/
#define IRQ8 evt2irq(0x0300)
#define USB_PHY_MODE (1 << 4)
@@ -1325,8 +1326,8 @@ static struct platform_device *mackerel_devices[] __initdata = {
&nor_flash_device,
&smc911x_device,
&lcdc_device,
- &usbhs1_device,
&usbhs0_device,
+ &usbhs1_device,
&leds_device,
&fsi_device,
&fsi_ak4643_device,
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
index 3a528cf4366c..fcf5a47f4772 100644
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ b/arch/arm/mach-shmobile/board-marzen.c
@@ -67,7 +67,7 @@ static struct smsc911x_platform_config smsc911x_platdata = {
static struct platform_device eth_device = {
.name = "smsc911x",
- .id = 0,
+ .id = -1,
.dev = {
.platform_data = &smsc911x_platdata,
},
diff --git a/arch/arm/mach-shmobile/intc-sh73a0.c b/arch/arm/mach-shmobile/intc-sh73a0.c
index ee447404c857..588555a67d9c 100644
--- a/arch/arm/mach-shmobile/intc-sh73a0.c
+++ b/arch/arm/mach-shmobile/intc-sh73a0.c
@@ -259,9 +259,9 @@ static int sh73a0_set_wake(struct irq_data *data, unsigned int on)
return 0; /* always allow wakeup */
}
-#define RELOC_BASE 0x1000
+#define RELOC_BASE 0x1200
-/* INTCA IRQ pins at INTCS + 0x1000 to make space for GIC+INTC handling */
+/* INTCA IRQ pins at INTCS + RELOC_BASE to make space for GIC+INTC handling */
#define INTCS_VECT_RELOC(n, vect) INTCS_VECT((n), (vect) + RELOC_BASE)
INTC_IRQ_PINS_32(intca_irq_pins, 0xe6900000,
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index c013bbf79cac..53d3d46dec12 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -41,7 +41,6 @@ config MACH_HREFV60
config MACH_SNOWBALL
bool "U8500 Snowball platform"
select MACH_MOP500
- select LEDS_GPIO
help
Include support for the snowball development platform.
diff --git a/arch/arm/mach-ux500/board-mop500-msp.c b/arch/arm/mach-ux500/board-mop500-msp.c
index 996048038743..df15646036aa 100644
--- a/arch/arm/mach-ux500/board-mop500-msp.c
+++ b/arch/arm/mach-ux500/board-mop500-msp.c
@@ -191,9 +191,9 @@ static struct platform_device *db8500_add_msp_i2s(struct device *parent,
return pdev;
}
-/* Platform device for ASoC U8500 machine */
-static struct platform_device snd_soc_u8500 = {
- .name = "snd-soc-u8500",
+/* Platform device for ASoC MOP500 machine */
+static struct platform_device snd_soc_mop500 = {
+ .name = "snd-soc-mop500",
.id = 0,
.dev = {
.platform_data = NULL,
@@ -227,8 +227,8 @@ int mop500_msp_init(struct device *parent)
{
struct platform_device *msp1;
- pr_info("%s: Register platform-device 'snd-soc-u8500'.\n", __func__);
- platform_device_register(&snd_soc_u8500);
+ pr_info("%s: Register platform-device 'snd-soc-mop500'.\n", __func__);
+ platform_device_register(&snd_soc_mop500);
pr_info("Initialize MSP I2S-devices.\n");
db8500_add_msp_i2s(parent, 0, U8500_MSP0_BASE, IRQ_DB8500_MSP0,
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index 8674a890fd1c..a534d8880de1 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -797,6 +797,7 @@ static void __init u8500_init_machine(void)
ARRAY_SIZE(mop500_platform_devs));
mop500_sdi_init(parent);
+ mop500_msp_init(parent);
i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);
i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs);
i2c_register_board_info(2, mop500_i2c2_devices,
@@ -804,6 +805,8 @@ static void __init u8500_init_machine(void)
mop500_uib_init();
+ } else if (of_machine_is_compatible("calaosystems,snowball-a9500")) {
+ mop500_msp_init(parent);
} else if (of_machine_is_compatible("st-ericsson,hrefv60+")) {
/*
* The HREFv60 board removed a GPIO expander and routed
@@ -815,6 +818,7 @@ static void __init u8500_init_machine(void)
ARRAY_SIZE(mop500_platform_devs));
hrefv60_sdi_init(parent);
+ mop500_msp_init(parent);
i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);
i2c0_devs -= NUM_PRE_V60_I2C0_DEVICES;
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 119bc52ab93e..4e07eec1270d 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -63,10 +63,11 @@ static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
pid = task_pid_nr(thread->task) << ASID_BITS;
asm volatile(
" mrc p15, 0, %0, c13, c0, 1\n"
- " bfi %1, %0, #0, %2\n"
- " mcr p15, 0, %1, c13, c0, 1\n"
+ " and %0, %0, %2\n"
+ " orr %0, %0, %1\n"
+ " mcr p15, 0, %0, c13, c0, 1\n"
: "=r" (contextidr), "+r" (pid)
- : "I" (ASID_BITS));
+ : "I" (~ASID_MASK));
isb();
return NOTIFY_OK;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 4e7d1182e8a3..e59c4ab71bcb 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -267,17 +267,19 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
vunmap(cpu_addr);
}
+#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
+
struct dma_pool {
size_t size;
spinlock_t lock;
unsigned long *bitmap;
unsigned long nr_pages;
void *vaddr;
- struct page *page;
+ struct page **pages;
};
static struct dma_pool atomic_pool = {
- .size = SZ_256K,
+ .size = DEFAULT_DMA_COHERENT_POOL_SIZE,
};
static int __init early_coherent_pool(char *p)
@@ -287,6 +289,21 @@ static int __init early_coherent_pool(char *p)
}
early_param("coherent_pool", early_coherent_pool);
+void __init init_dma_coherent_pool_size(unsigned long size)
+{
+ /*
+ * Catch any attempt to set the pool size too late.
+ */
+ BUG_ON(atomic_pool.vaddr);
+
+ /*
+ * Set architecture specific coherent pool size only if
+ * it has not been changed by kernel command line parameter.
+ */
+ if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE)
+ atomic_pool.size = size;
+}
+
/*
* Initialise the coherent pool for atomic allocations.
*/
@@ -297,6 +314,7 @@ static int __init atomic_pool_init(void)
unsigned long nr_pages = pool->size >> PAGE_SHIFT;
unsigned long *bitmap;
struct page *page;
+ struct page **pages;
void *ptr;
int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
@@ -304,21 +322,31 @@ static int __init atomic_pool_init(void)
if (!bitmap)
goto no_bitmap;
+ pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ goto no_pages;
+
if (IS_ENABLED(CONFIG_CMA))
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
else
ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
&page, NULL);
if (ptr) {
+ int i;
+
+ for (i = 0; i < nr_pages; i++)
+ pages[i] = page + i;
+
spin_lock_init(&pool->lock);
pool->vaddr = ptr;
- pool->page = page;
+ pool->pages = pages;
pool->bitmap = bitmap;
pool->nr_pages = nr_pages;
pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
(unsigned)pool->size / 1024);
return 0;
}
+no_pages:
kfree(bitmap);
no_bitmap:
pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
@@ -443,27 +471,45 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
if (pageno < pool->nr_pages) {
bitmap_set(pool->bitmap, pageno, count);
ptr = pool->vaddr + PAGE_SIZE * pageno;
- *ret_page = pool->page + pageno;
+ *ret_page = pool->pages[pageno];
+ } else {
+ pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
+ "Please increase it with coherent_pool= kernel parameter!\n",
+ (unsigned)pool->size / 1024);
}
spin_unlock_irqrestore(&pool->lock, flags);
return ptr;
}
+static bool __in_atomic_pool(void *start, size_t size)
+{
+ struct dma_pool *pool = &atomic_pool;
+ void *end = start + size;
+ void *pool_start = pool->vaddr;
+ void *pool_end = pool->vaddr + pool->size;
+
+ if (start < pool_start || start >= pool_end)
+ return false;
+
+ if (end <= pool_end)
+ return true;
+
+ WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n",
+ start, end - 1, pool_start, pool_end - 1);
+
+ return false;
+}
+
static int __free_from_pool(void *start, size_t size)
{
struct dma_pool *pool = &atomic_pool;
unsigned long pageno, count;
unsigned long flags;
- if (start < pool->vaddr || start > pool->vaddr + pool->size)
+ if (!__in_atomic_pool(start, size))
return 0;
- if (start + size > pool->vaddr + pool->size) {
- WARN(1, "freeing wrong coherent size from pool\n");
- return 0;
- }
-
pageno = (start - pool->vaddr) >> PAGE_SHIFT;
count = size >> PAGE_SHIFT;
@@ -1090,10 +1136,22 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si
return 0;
}
+static struct page **__atomic_get_pages(void *addr)
+{
+ struct dma_pool *pool = &atomic_pool;
+ struct page **pages = pool->pages;
+ int offs = (addr - pool->vaddr) >> PAGE_SHIFT;
+
+ return pages + offs;
+}
+
static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
{
struct vm_struct *area;
+ if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
+ return __atomic_get_pages(cpu_addr);
+
if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
return cpu_addr;
@@ -1103,6 +1161,34 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
return NULL;
}
+static void *__iommu_alloc_atomic(struct device *dev, size_t size,
+ dma_addr_t *handle)
+{
+ struct page *page;
+ void *addr;
+
+ addr = __alloc_from_pool(size, &page);
+ if (!addr)
+ return NULL;
+
+ *handle = __iommu_create_mapping(dev, &page, size);
+ if (*handle == DMA_ERROR_CODE)
+ goto err_mapping;
+
+ return addr;
+
+err_mapping:
+ __free_from_pool(addr, size);
+ return NULL;
+}
+
+static void __iommu_free_atomic(struct device *dev, struct page **pages,
+ dma_addr_t handle, size_t size)
+{
+ __iommu_remove_mapping(dev, handle, size);
+ __free_from_pool(page_address(pages[0]), size);
+}
+
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{
@@ -1113,6 +1199,9 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
*handle = DMA_ERROR_CODE;
size = PAGE_ALIGN(size);
+ if (gfp & GFP_ATOMIC)
+ return __iommu_alloc_atomic(dev, size, handle);
+
pages = __iommu_alloc_buffer(dev, size, gfp);
if (!pages)
return NULL;
@@ -1179,6 +1268,11 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
return;
}
+ if (__in_atomic_pool(cpu_addr, size)) {
+ __iommu_free_atomic(dev, pages, handle, size);
+ return;
+ }
+
if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
unmap_kernel_range((unsigned long)cpu_addr, size);
vunmap(cpu_addr);
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 77458548e031..40ca11ed6e5f 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -231,8 +231,6 @@ void __sync_icache_dcache(pte_t pteval)
struct page *page;
struct address_space *mapping;
- if (!pte_present_user(pteval))
- return;
if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
/* only flush non-aliasing VIPT caches for exec mappings */
return;
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 6776160618ef..a8ee92da3544 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -55,6 +55,9 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
/* permanent static mappings from iotable_init() */
#define VM_ARM_STATIC_MAPPING 0x40000000
+/* empty mapping */
+#define VM_ARM_EMPTY_MAPPING 0x20000000
+
/* mapping type (attributes) for permanent static mappings */
#define VM_ARM_MTYPE(mt) ((mt) << 20)
#define VM_ARM_MTYPE_MASK (0x1f << 20)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4c2d0451e84a..c2fa21d0103e 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -807,7 +807,7 @@ static void __init pmd_empty_section_gap(unsigned long addr)
vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
vm->addr = (void *)addr;
vm->size = SECTION_SIZE;
- vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
+ vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
vm->caller = pmd_empty_section_gap;
vm_area_add_early(vm);
}
@@ -820,7 +820,7 @@ static void __init fill_pmd_gaps(void)
/* we're still single threaded hence no lock needed here */
for (vm = vmlist; vm; vm = vm->next) {
- if (!(vm->flags & VM_ARM_STATIC_MAPPING))
+ if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
continue;
addr = (unsigned long)vm->addr;
if (addr < next)
@@ -961,8 +961,8 @@ void __init sanity_check_meminfo(void)
* Check whether this memory bank would partially overlap
* the vmalloc area.
*/
- if (__va(bank->start + bank->size) > vmalloc_min ||
- __va(bank->start + bank->size) < __va(bank->start)) {
+ if (__va(bank->start + bank->size - 1) >= vmalloc_min ||
+ __va(bank->start + bank->size - 1) <= __va(bank->start)) {
unsigned long newsize = vmalloc_min - __va(bank->start);
printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
"to -%.8llx (vmalloc region overlap).\n",
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index c2021139cb56..ea94765acf9a 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -38,10 +38,10 @@ ENTRY(v7wbi_flush_user_tlb_range)
dsb
mov r0, r0, lsr #PAGE_SHIFT @ align address
mov r1, r1, lsr #PAGE_SHIFT
-#ifdef CONFIG_ARM_ERRATA_720789
- mov r3, #0
-#else
asid r3, r3 @ mask ASID
+#ifdef CONFIG_ARM_ERRATA_720789
+ ALT_SMP(W(mov) r3, #0 )
+ ALT_UP(W(nop) )
#endif
orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
mov r1, r1, lsl #PAGE_SHIFT
diff --git a/arch/arm/plat-mxc/include/mach/mx25.h b/arch/arm/plat-mxc/include/mach/mx25.h
index 627d94f1b010..ec466400a200 100644
--- a/arch/arm/plat-mxc/include/mach/mx25.h
+++ b/arch/arm/plat-mxc/include/mach/mx25.h
@@ -98,6 +98,7 @@
#define MX25_INT_UART1 (NR_IRQS_LEGACY + 45)
#define MX25_INT_GPIO2 (NR_IRQS_LEGACY + 51)
#define MX25_INT_GPIO1 (NR_IRQS_LEGACY + 52)
+#define MX25_INT_GPT1 (NR_IRQS_LEGACY + 54)
#define MX25_INT_FEC (NR_IRQS_LEGACY + 57)
#define MX25_DMA_REQ_SSI2_RX1 22
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index 626ad8cad7a9..938b50a33439 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -189,6 +189,7 @@ struct omap_dm_timer *omap_dm_timer_request(void)
timer->reserved = 1;
break;
}
+ spin_unlock_irqrestore(&dm_timer_lock, flags);
if (timer) {
ret = omap_dm_timer_prepare(timer);
@@ -197,7 +198,6 @@ struct omap_dm_timer *omap_dm_timer_request(void)
timer = NULL;
}
}
- spin_unlock_irqrestore(&dm_timer_lock, flags);
if (!timer)
pr_debug("%s: timer request failed!\n", __func__);
@@ -220,6 +220,7 @@ struct omap_dm_timer *omap_dm_timer_request_specific(int id)
break;
}
}
+ spin_unlock_irqrestore(&dm_timer_lock, flags);
if (timer) {
ret = omap_dm_timer_prepare(timer);
@@ -228,7 +229,6 @@ struct omap_dm_timer *omap_dm_timer_request_specific(int id)
timer = NULL;
}
}
- spin_unlock_irqrestore(&dm_timer_lock, flags);
if (!timer)
pr_debug("%s: timer%d request failed!\n", __func__, id);
@@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_enable);
void omap_dm_timer_disable(struct omap_dm_timer *timer)
{
- pm_runtime_put(&timer->pdev->dev);
+ pm_runtime_put_sync(&timer->pdev->dev);
}
EXPORT_SYMBOL_GPL(omap_dm_timer_disable);
diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h
index 68b180edcfff..bb5d08a70dbc 100644
--- a/arch/arm/plat-omap/include/plat/cpu.h
+++ b/arch/arm/plat-omap/include/plat/cpu.h
@@ -372,7 +372,8 @@ IS_OMAP_TYPE(3430, 0x3430)
#define cpu_class_is_omap1() (cpu_is_omap7xx() || cpu_is_omap15xx() || \
cpu_is_omap16xx())
#define cpu_class_is_omap2() (cpu_is_omap24xx() || cpu_is_omap34xx() || \
- cpu_is_omap44xx() || soc_is_omap54xx())
+ cpu_is_omap44xx() || soc_is_omap54xx() || \
+ soc_is_am33xx())
/* Various silicon revisions for omap2 */
#define OMAP242X_CLASS 0x24200024
diff --git a/arch/arm/plat-omap/include/plat/multi.h b/arch/arm/plat-omap/include/plat/multi.h
index 045e320f1067..324d31b14852 100644
--- a/arch/arm/plat-omap/include/plat/multi.h
+++ b/arch/arm/plat-omap/include/plat/multi.h
@@ -108,4 +108,13 @@
# endif
#endif
+#ifdef CONFIG_SOC_AM33XX
+# ifdef OMAP_NAME
+# undef MULTI_OMAP2
+# define MULTI_OMAP2
+# else
+# define OMAP_NAME am33xx
+# endif
+#endif
+
#endif /* __PLAT_OMAP_MULTI_H */
diff --git a/arch/arm/plat-omap/include/plat/uncompress.h b/arch/arm/plat-omap/include/plat/uncompress.h
index b8d19a136781..7f7b112acccb 100644
--- a/arch/arm/plat-omap/include/plat/uncompress.h
+++ b/arch/arm/plat-omap/include/plat/uncompress.h
@@ -110,7 +110,7 @@ static inline void flush(void)
_DEBUG_LL_ENTRY(mach, AM33XX_UART##p##_BASE, OMAP_PORT_SHIFT, \
AM33XXUART##p)
-static inline void __arch_decomp_setup(unsigned long arch_id)
+static inline void arch_decomp_setup(void)
{
int port = 0;
@@ -198,8 +198,6 @@ static inline void __arch_decomp_setup(unsigned long arch_id)
} while (0);
}
-#define arch_decomp_setup() __arch_decomp_setup(arch_id)
-
/*
* nothing to do
*/
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 766181cb5c95..024f3b08db29 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -68,6 +68,7 @@
static unsigned long omap_sram_start;
static void __iomem *omap_sram_base;
+static unsigned long omap_sram_skip;
static unsigned long omap_sram_size;
static void __iomem *omap_sram_ceil;
@@ -106,6 +107,7 @@ static int is_sram_locked(void)
*/
static void __init omap_detect_sram(void)
{
+ omap_sram_skip = SRAM_BOOTLOADER_SZ;
if (cpu_class_is_omap2()) {
if (is_sram_locked()) {
if (cpu_is_omap34xx()) {
@@ -113,6 +115,7 @@ static void __init omap_detect_sram(void)
if ((omap_type() == OMAP2_DEVICE_TYPE_EMU) ||
(omap_type() == OMAP2_DEVICE_TYPE_SEC)) {
omap_sram_size = 0x7000; /* 28K */
+ omap_sram_skip += SZ_16K;
} else {
omap_sram_size = 0x8000; /* 32K */
}
@@ -175,8 +178,10 @@ static void __init omap_map_sram(void)
return;
#ifdef CONFIG_OMAP4_ERRATA_I688
+ if (cpu_is_omap44xx()) {
omap_sram_start += PAGE_SIZE;
omap_sram_size -= SZ_16K;
+ }
#endif
if (cpu_is_omap34xx()) {
/*
@@ -203,8 +208,8 @@ static void __init omap_map_sram(void)
* Looks like we need to preserve some bootloader code at the
* beginning of SRAM for jumping to flash for reboot to work...
*/
- memset_io(omap_sram_base + SRAM_BOOTLOADER_SZ, 0,
- omap_sram_size - SRAM_BOOTLOADER_SZ);
+ memset_io(omap_sram_base + omap_sram_skip, 0,
+ omap_sram_size - omap_sram_skip);
}
/*
@@ -218,7 +223,7 @@ void *omap_sram_push_address(unsigned long size)
{
unsigned long available, new_ceil = (unsigned long)omap_sram_ceil;
- available = omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ);
+ available = omap_sram_ceil - (omap_sram_base + omap_sram_skip);
if (size > available) {
pr_err("Not enough space in SRAM\n");
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index d245a87dc014..b8b747a9d360 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -291,10 +291,12 @@ static struct platform_device orion_ge00 = {
void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err)
+ unsigned long irq_err,
+ unsigned int tx_csum_limit)
{
fill_resources(&orion_ge00_shared, orion_ge00_shared_resources,
mapbase + 0x2000, SZ_16K - 1, irq_err);
+ orion_ge00_shared_data.tx_csum_limit = tx_csum_limit;
ge_complete(&orion_ge00_shared_data,
orion_ge00_resources, irq, &orion_ge00_shared,
eth_data, &orion_ge00);
@@ -343,10 +345,12 @@ static struct platform_device orion_ge01 = {
void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err)
+ unsigned long irq_err,
+ unsigned int tx_csum_limit)
{
fill_resources(&orion_ge01_shared, orion_ge01_shared_resources,
mapbase + 0x2000, SZ_16K - 1, irq_err);
+ orion_ge01_shared_data.tx_csum_limit = tx_csum_limit;
ge_complete(&orion_ge01_shared_data,
orion_ge01_resources, irq, &orion_ge01_shared,
eth_data, &orion_ge01);
diff --git a/arch/arm/plat-orion/include/plat/common.h b/arch/arm/plat-orion/include/plat/common.h
index e00fdb213609..ae2377ef63e5 100644
--- a/arch/arm/plat-orion/include/plat/common.h
+++ b/arch/arm/plat-orion/include/plat/common.h
@@ -39,12 +39,14 @@ void __init orion_rtc_init(unsigned long mapbase,
void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err);
+ unsigned long irq_err,
+ unsigned int tx_csum_limit);
void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err);
+ unsigned long irq_err,
+ unsigned int tx_csum_limit);
void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c
index 28f898f75380..db98e7021f0d 100644
--- a/arch/arm/plat-s3c24xx/dma.c
+++ b/arch/arm/plat-s3c24xx/dma.c
@@ -430,7 +430,7 @@ s3c2410_dma_canload(struct s3c2410_dma_chan *chan)
* when necessary.
*/
-int s3c2410_dma_enqueue(unsigned int channel, void *id,
+int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
dma_addr_t data, int size)
{
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
diff --git a/arch/arm/plat-samsung/clock.c b/arch/arm/plat-samsung/clock.c
index 65c5eca475e7..d1116e2dfbea 100644
--- a/arch/arm/plat-samsung/clock.c
+++ b/arch/arm/plat-samsung/clock.c
@@ -144,6 +144,7 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
int clk_set_rate(struct clk *clk, unsigned long rate)
{
+ unsigned long flags;
int ret;
if (IS_ERR(clk))
@@ -159,9 +160,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
if (clk->ops == NULL || clk->ops->set_rate == NULL)
return -EINVAL;
- spin_lock(&clocks_lock);
+ spin_lock_irqsave(&clocks_lock, flags);
ret = (clk->ops->set_rate)(clk, rate);
- spin_unlock(&clocks_lock);
+ spin_unlock_irqrestore(&clocks_lock, flags);
return ret;
}
@@ -173,17 +174,18 @@ struct clk *clk_get_parent(struct clk *clk)
int clk_set_parent(struct clk *clk, struct clk *parent)
{
+ unsigned long flags;
int ret = 0;
if (IS_ERR(clk))
return -EINVAL;
- spin_lock(&clocks_lock);
+ spin_lock_irqsave(&clocks_lock, flags);
if (clk->ops && clk->ops->set_parent)
ret = (clk->ops->set_parent)(clk, parent);
- spin_unlock(&clocks_lock);
+ spin_unlock_irqrestore(&clocks_lock, flags);
return ret;
}
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
index 74e31ce35538..fc49f3dabd76 100644
--- a/arch/arm/plat-samsung/devs.c
+++ b/arch/arm/plat-samsung/devs.c
@@ -32,6 +32,8 @@
#include <linux/platform_data/s3c-hsudc.h>
#include <linux/platform_data/s3c-hsotg.h>
+#include <media/s5p_hdmi.h>
+
#include <asm/irq.h>
#include <asm/pmu.h>
#include <asm/mach/arch.h>
@@ -748,7 +750,8 @@ void __init s5p_i2c_hdmiphy_set_platdata(struct s3c2410_platform_i2c *pd)
if (!pd) {
pd = &default_i2c_data;
- if (soc_is_exynos4210())
+ if (soc_is_exynos4210() ||
+ soc_is_exynos4212() || soc_is_exynos4412())
pd->bus_num = 8;
else if (soc_is_s5pv210())
pd->bus_num = 3;
@@ -759,6 +762,30 @@ void __init s5p_i2c_hdmiphy_set_platdata(struct s3c2410_platform_i2c *pd)
npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c),
&s5p_device_i2c_hdmiphy);
}
+
+struct s5p_hdmi_platform_data s5p_hdmi_def_platdata;
+
+void __init s5p_hdmi_set_platdata(struct i2c_board_info *hdmiphy_info,
+ struct i2c_board_info *mhl_info, int mhl_bus)
+{
+ struct s5p_hdmi_platform_data *pd = &s5p_hdmi_def_platdata;
+
+ if (soc_is_exynos4210() ||
+ soc_is_exynos4212() || soc_is_exynos4412())
+ pd->hdmiphy_bus = 8;
+ else if (soc_is_s5pv210())
+ pd->hdmiphy_bus = 3;
+ else
+ pd->hdmiphy_bus = 0;
+
+ pd->hdmiphy_info = hdmiphy_info;
+ pd->mhl_info = mhl_info;
+ pd->mhl_bus = mhl_bus;
+
+ s3c_set_platdata(pd, sizeof(struct s5p_hdmi_platform_data),
+ &s5p_device_hdmi);
+}
+
#endif /* CONFIG_S5P_DEV_I2C_HDMIPHY */
/* I2S */
diff --git a/arch/arm/plat-samsung/include/plat/hdmi.h b/arch/arm/plat-samsung/include/plat/hdmi.h
new file mode 100644
index 000000000000..331d046ac2c5
--- /dev/null
+++ b/arch/arm/plat-samsung/include/plat/hdmi.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __PLAT_SAMSUNG_HDMI_H
+#define __PLAT_SAMSUNG_HDMI_H __FILE__
+
+extern void s5p_hdmi_set_platdata(struct i2c_board_info *hdmiphy_info,
+ struct i2c_board_info *mhl_info, int mhl_bus);
+
+#endif /* __PLAT_SAMSUNG_HDMI_H */
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c
index 64ab65f0fdbc..15070284343e 100644
--- a/arch/arm/plat-samsung/pm.c
+++ b/arch/arm/plat-samsung/pm.c
@@ -74,7 +74,7 @@ unsigned char pm_uart_udivslot;
#ifdef CONFIG_SAMSUNG_PM_DEBUG
-struct pm_uart_save uart_save[CONFIG_SERIAL_SAMSUNG_UARTS];
+static struct pm_uart_save uart_save[CONFIG_SERIAL_SAMSUNG_UARTS];
static void s3c_pm_save_uart(unsigned int uart, struct pm_uart_save *save)
{
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index fb849d044bde..c834b32af275 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -719,8 +719,10 @@ static int __init vfp_init(void)
if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
elf_hwcap |= HWCAP_NEON;
#endif
+#ifdef CONFIG_VFPv3
if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
elf_hwcap |= HWCAP_VFPv4;
+#endif
}
}
return 0;
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index f34861920634..c7092e6057c5 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -38,6 +38,7 @@ config BLACKFIN
select GENERIC_ATOMIC64
select GENERIC_IRQ_PROBE
select IRQ_PER_CPU if SMP
+ select USE_GENERIC_SMP_HELPERS if SMP
select HAVE_NMI_WATCHDOG if NMI_WATCHDOG
select GENERIC_SMP_IDLE_THREAD
select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS
diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile
index d3d7e64ca96d..66cf00095b84 100644
--- a/arch/blackfin/Makefile
+++ b/arch/blackfin/Makefile
@@ -20,7 +20,6 @@ endif
KBUILD_AFLAGS += $(call cc-option,-mno-fdpic)
KBUILD_CFLAGS_MODULE += -mlong-calls
LDFLAGS += -m elf32bfin
-KALLSYMS += --symbol-prefix=_
KBUILD_DEFCONFIG := BF537-STAMP_defconfig
diff --git a/arch/blackfin/include/asm/smp.h b/arch/blackfin/include/asm/smp.h
index dc3d144b4bb5..9631598dcc5d 100644
--- a/arch/blackfin/include/asm/smp.h
+++ b/arch/blackfin/include/asm/smp.h
@@ -18,6 +18,8 @@
#define raw_smp_processor_id() blackfin_core_id()
extern void bfin_relocate_coreb_l1_mem(void);
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1)
asmlinkage void blackfin_icache_flush_range_l1(unsigned long *ptr);
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 00bbe672b3b3..a40151306b77 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -48,10 +48,13 @@ unsigned long blackfin_iflush_l1_entry[NR_CPUS];
struct blackfin_initial_pda __cpuinitdata initial_pda_coreb;
-#define BFIN_IPI_TIMER 0
-#define BFIN_IPI_RESCHEDULE 1
-#define BFIN_IPI_CALL_FUNC 2
-#define BFIN_IPI_CPU_STOP 3
+enum ipi_message_type {
+ BFIN_IPI_TIMER,
+ BFIN_IPI_RESCHEDULE,
+ BFIN_IPI_CALL_FUNC,
+ BFIN_IPI_CALL_FUNC_SINGLE,
+ BFIN_IPI_CPU_STOP,
+};
struct blackfin_flush_data {
unsigned long start;
@@ -60,35 +63,20 @@ struct blackfin_flush_data {
void *secondary_stack;
-
-struct smp_call_struct {
- void (*func)(void *info);
- void *info;
- int wait;
- cpumask_t *waitmask;
-};
-
static struct blackfin_flush_data smp_flush_data;
static DEFINE_SPINLOCK(stop_lock);
-struct ipi_message {
- unsigned long type;
- struct smp_call_struct call_struct;
-};
-
/* A magic number - stress test shows this is safe for common cases */
#define BFIN_IPI_MSGQ_LEN 5
/* Simple FIFO buffer, overflow leads to panic */
-struct ipi_message_queue {
- spinlock_t lock;
+struct ipi_data {
unsigned long count;
- unsigned long head; /* head of the queue */
- struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN];
+ unsigned long bits;
};
-static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue);
+static DEFINE_PER_CPU(struct ipi_data, bfin_ipi);
static void ipi_cpu_stop(unsigned int cpu)
{
@@ -129,28 +117,6 @@ static void ipi_flush_icache(void *info)
blackfin_icache_flush_range(fdata->start, fdata->end);
}
-static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
-{
- int wait;
- void (*func)(void *info);
- void *info;
- func = msg->call_struct.func;
- info = msg->call_struct.info;
- wait = msg->call_struct.wait;
- func(info);
- if (wait) {
-#ifdef __ARCH_SYNC_CORE_DCACHE
- /*
- * 'wait' usually means synchronization between CPUs.
- * Invalidate D cache in case shared data was changed
- * by func() to ensure cache coherence.
- */
- resync_core_dcache();
-#endif
- cpumask_clear_cpu(cpu, msg->call_struct.waitmask);
- }
-}
-
/* Use IRQ_SUPPLE_0 to request reschedule.
* When returning from interrupt to user space,
* there is chance to reschedule */
@@ -172,152 +138,95 @@ void ipi_timer(void)
static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
{
- struct ipi_message *msg;
- struct ipi_message_queue *msg_queue;
+ struct ipi_data *bfin_ipi_data;
unsigned int cpu = smp_processor_id();
- unsigned long flags;
+ unsigned long pending;
+ unsigned long msg;
platform_clear_ipi(cpu, IRQ_SUPPLE_1);
- msg_queue = &__get_cpu_var(ipi_msg_queue);
-
- spin_lock_irqsave(&msg_queue->lock, flags);
-
- while (msg_queue->count) {
- msg = &msg_queue->ipi_message[msg_queue->head];
- switch (msg->type) {
- case BFIN_IPI_TIMER:
- ipi_timer();
- break;
- case BFIN_IPI_RESCHEDULE:
- scheduler_ipi();
- break;
- case BFIN_IPI_CALL_FUNC:
- ipi_call_function(cpu, msg);
- break;
- case BFIN_IPI_CPU_STOP:
- ipi_cpu_stop(cpu);
- break;
- default:
- printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n",
- cpu, msg->type);
- break;
- }
- msg_queue->head++;
- msg_queue->head %= BFIN_IPI_MSGQ_LEN;
- msg_queue->count--;
+ bfin_ipi_data = &__get_cpu_var(bfin_ipi);
+
+ while ((pending = xchg(&bfin_ipi_data->bits, 0)) != 0) {
+ msg = 0;
+ do {
+ msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1);
+ switch (msg) {
+ case BFIN_IPI_TIMER:
+ ipi_timer();
+ break;
+ case BFIN_IPI_RESCHEDULE:
+ scheduler_ipi();
+ break;
+ case BFIN_IPI_CALL_FUNC:
+ generic_smp_call_function_interrupt();
+ break;
+
+ case BFIN_IPI_CALL_FUNC_SINGLE:
+ generic_smp_call_function_single_interrupt();
+ break;
+
+ case BFIN_IPI_CPU_STOP:
+ ipi_cpu_stop(cpu);
+ break;
+ }
+ } while (msg < BITS_PER_LONG);
+
+ smp_mb();
}
- spin_unlock_irqrestore(&msg_queue->lock, flags);
return IRQ_HANDLED;
}
-static void ipi_queue_init(void)
+static void bfin_ipi_init(void)
{
unsigned int cpu;
- struct ipi_message_queue *msg_queue;
+ struct ipi_data *bfin_ipi_data;
for_each_possible_cpu(cpu) {
- msg_queue = &per_cpu(ipi_msg_queue, cpu);
- spin_lock_init(&msg_queue->lock);
- msg_queue->count = 0;
- msg_queue->head = 0;
+ bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
+ bfin_ipi_data->bits = 0;
+ bfin_ipi_data->count = 0;
}
}
-static inline void smp_send_message(cpumask_t callmap, unsigned long type,
- void (*func) (void *info), void *info, int wait)
+void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
{
unsigned int cpu;
- struct ipi_message_queue *msg_queue;
- struct ipi_message *msg;
- unsigned long flags, next_msg;
- cpumask_t waitmask; /* waitmask is shared by all cpus */
-
- cpumask_copy(&waitmask, &callmap);
- for_each_cpu(cpu, &callmap) {
- msg_queue = &per_cpu(ipi_msg_queue, cpu);
- spin_lock_irqsave(&msg_queue->lock, flags);
- if (msg_queue->count < BFIN_IPI_MSGQ_LEN) {
- next_msg = (msg_queue->head + msg_queue->count)
- % BFIN_IPI_MSGQ_LEN;
- msg = &msg_queue->ipi_message[next_msg];
- msg->type = type;
- if (type == BFIN_IPI_CALL_FUNC) {
- msg->call_struct.func = func;
- msg->call_struct.info = info;
- msg->call_struct.wait = wait;
- msg->call_struct.waitmask = &waitmask;
- }
- msg_queue->count++;
- } else
- panic("IPI message queue overflow\n");
- spin_unlock_irqrestore(&msg_queue->lock, flags);
+ struct ipi_data *bfin_ipi_data;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ for_each_cpu(cpu, cpumask) {
+ bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
+ smp_mb();
+ set_bit(msg, &bfin_ipi_data->bits);
+ bfin_ipi_data->count++;
platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);
}
- if (wait) {
- while (!cpumask_empty(&waitmask))
- blackfin_dcache_invalidate_range(
- (unsigned long)(&waitmask),
- (unsigned long)(&waitmask));
-#ifdef __ARCH_SYNC_CORE_DCACHE
- /*
- * Invalidate D cache in case shared data was changed by
- * other processors to ensure cache coherence.
- */
- resync_core_dcache();
-#endif
- }
+ local_irq_restore(flags);
}
-int smp_call_function(void (*func)(void *info), void *info, int wait)
+void arch_send_call_function_single_ipi(int cpu)
{
- cpumask_t callmap;
-
- preempt_disable();
- cpumask_copy(&callmap, cpu_online_mask);
- cpumask_clear_cpu(smp_processor_id(), &callmap);
- if (!cpumask_empty(&callmap))
- smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
-
- preempt_enable();
-
- return 0;
+ send_ipi(cpumask_of(cpu), BFIN_IPI_CALL_FUNC_SINGLE);
}
-EXPORT_SYMBOL_GPL(smp_call_function);
-int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
- int wait)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
- unsigned int cpu = cpuid;
- cpumask_t callmap;
-
- if (cpu_is_offline(cpu))
- return 0;
- cpumask_clear(&callmap);
- cpumask_set_cpu(cpu, &callmap);
-
- smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
-
- return 0;
+ send_ipi(mask, BFIN_IPI_CALL_FUNC);
}
-EXPORT_SYMBOL_GPL(smp_call_function_single);
void smp_send_reschedule(int cpu)
{
- cpumask_t callmap;
- /* simply trigger an ipi */
-
- cpumask_clear(&callmap);
- cpumask_set_cpu(cpu, &callmap);
-
- smp_send_message(callmap, BFIN_IPI_RESCHEDULE, NULL, NULL, 0);
+ send_ipi(cpumask_of(cpu), BFIN_IPI_RESCHEDULE);
return;
}
void smp_send_msg(const struct cpumask *mask, unsigned long type)
{
- smp_send_message(*mask, type, NULL, NULL, 0);
+ send_ipi(mask, type);
}
void smp_timer_broadcast(const struct cpumask *mask)
@@ -333,7 +242,7 @@ void smp_send_stop(void)
cpumask_copy(&callmap, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &callmap);
if (!cpumask_empty(&callmap))
- smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0);
+ send_ipi(&callmap, BFIN_IPI_CPU_STOP);
preempt_enable();
@@ -436,7 +345,7 @@ void __init smp_prepare_boot_cpu(void)
void __init smp_prepare_cpus(unsigned int max_cpus)
{
platform_prepare_cpus(max_cpus);
- ipi_queue_init();
+ bfin_ipi_init();
platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0);
platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1);
}
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index 052f81a76239..983c859e40b7 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -6,6 +6,7 @@
config C6X
def_bool y
select CLKDEV_LOOKUP
+ select GENERIC_ATOMIC64
select GENERIC_IRQ_SHOW
select HAVE_ARCH_TRACEHOOK
select HAVE_DMA_API_DEBUG
diff --git a/arch/c6x/include/asm/cache.h b/arch/c6x/include/asm/cache.h
index 6d521d96d941..09c5a0f5f4d1 100644
--- a/arch/c6x/include/asm/cache.h
+++ b/arch/c6x/include/asm/cache.h
@@ -1,7 +1,7 @@
/*
* Port on Texas Instruments TMS320C6x architecture
*
- * Copyright (C) 2005, 2006, 2009, 2010 Texas Instruments Incorporated
+ * Copyright (C) 2005, 2006, 2009, 2010, 2012 Texas Instruments Incorporated
* Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
*
* This program is free software; you can redistribute it and/or modify
@@ -16,9 +16,14 @@
/*
* Cache line size
*/
-#define L1D_CACHE_BYTES 64
-#define L1P_CACHE_BYTES 32
-#define L2_CACHE_BYTES 128
+#define L1D_CACHE_SHIFT 6
+#define L1D_CACHE_BYTES (1 << L1D_CACHE_SHIFT)
+
+#define L1P_CACHE_SHIFT 5
+#define L1P_CACHE_BYTES (1 << L1P_CACHE_SHIFT)
+
+#define L2_CACHE_SHIFT 7
+#define L2_CACHE_BYTES (1 << L2_CACHE_SHIFT)
/*
* L2 used as cache
@@ -29,7 +34,8 @@
* For practical reasons the L1_CACHE_BYTES defines should not be smaller than
* the L2 line size
*/
-#define L1_CACHE_BYTES L2_CACHE_BYTES
+#define L1_CACHE_SHIFT L2_CACHE_SHIFT
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define L2_CACHE_ALIGN_LOW(x) \
(((x) & ~(L2_CACHE_BYTES - 1)))
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig
index 954d81e2e837..7913695b2fcb 100644
--- a/arch/ia64/configs/generic_defconfig
+++ b/arch/ia64/configs/generic_defconfig
@@ -234,5 +234,4 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRC_T10DIF=y
-CONFIG_MISC_DEVICES=y
CONFIG_INTEL_IOMMU=y
diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig
index 91c41ecfa6d9..f8e913365423 100644
--- a/arch/ia64/configs/gensparse_defconfig
+++ b/arch/ia64/configs/gensparse_defconfig
@@ -209,4 +209,3 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_CRYPTO_MD5=y
-CONFIG_MISC_DEVICES=y
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 4a469907f04a..b22df9410dce 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -5,6 +5,7 @@ config M68K
select HAVE_AOUT if MMU
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_SHOW
+ select GENERIC_ATOMIC64
select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
select GENERIC_CPU_DEVICES
select GENERIC_STRNCPY_FROM_USER if MMU
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index 82068349a2bb..c4eb79edecec 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -28,6 +28,7 @@ config COLDFIRE
select CPU_HAS_NO_BITFIELDS
select CPU_HAS_NO_MULDIV64
select GENERIC_CSUM
+ select HAVE_CLK
endchoice
@@ -58,7 +59,6 @@ config MCPU32
config M68020
bool "68020 support"
depends on MMU
- select GENERIC_ATOMIC64
select CPU_HAS_ADDRESS_SPACES
help
If you anticipate running this kernel on a computer with a MC68020
@@ -69,7 +69,6 @@ config M68020
config M68030
bool "68030 support"
depends on MMU && !MMU_SUN3
- select GENERIC_ATOMIC64
select CPU_HAS_ADDRESS_SPACES
help
If you anticipate running this kernel on a computer with a MC68030
@@ -79,7 +78,6 @@ config M68030
config M68040
bool "68040 support"
depends on MMU && !MMU_SUN3
- select GENERIC_ATOMIC64
select CPU_HAS_ADDRESS_SPACES
help
If you anticipate running this kernel on a computer with a MC68LC040
@@ -90,7 +88,6 @@ config M68040
config M68060
bool "68060 support"
depends on MMU && !MMU_SUN3
- select GENERIC_ATOMIC64
select CPU_HAS_ADDRESS_SPACES
help
If you anticipate running this kernel on a computer with a MC68060
diff --git a/arch/m68k/platform/coldfire/clk.c b/arch/m68k/platform/coldfire/clk.c
index 75f9ee967ea7..9cd13b4ce42b 100644
--- a/arch/m68k/platform/coldfire/clk.c
+++ b/arch/m68k/platform/coldfire/clk.c
@@ -146,9 +146,3 @@ struct clk_ops clk_ops1 = {
};
#endif /* MCFPM_PPMCR1 */
#endif /* MCFPM_PPMCR0 */
-
-struct clk *devm_clk_get(struct device *dev, const char *id)
-{
- return NULL;
-}
-EXPORT_SYMBOL(devm_clk_get);
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 331d574df99c..faf65286574e 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -89,6 +89,7 @@ config ATH79
select CEVT_R4K
select CSRC_R4K
select DMA_NONCOHERENT
+ select HAVE_CLK
select IRQ_CPU
select MIPS_MACHINE
select SYS_HAS_CPU_MIPS32_R2
diff --git a/arch/mips/alchemy/board-mtx1.c b/arch/mips/alchemy/board-mtx1.c
index 99969484c475..a124c251c0c9 100644
--- a/arch/mips/alchemy/board-mtx1.c
+++ b/arch/mips/alchemy/board-mtx1.c
@@ -228,6 +228,8 @@ static int mtx1_pci_idsel(unsigned int devsel, int assert)
* adapter on the mtx-1 "singleboard" variant. It triggers a custom
* logic chip connected to EXT_IO3 (GPIO1) to suppress IDSEL signals.
*/
+ udelay(1);
+
if (assert && devsel != 0)
/* Suppress signal to Cardbus */
alchemy_gpio_set_value(1, 0); /* set EXT_IO3 OFF */
diff --git a/arch/mips/ath79/dev-usb.c b/arch/mips/ath79/dev-usb.c
index 36e9570e7bc4..b2a2311ec85b 100644
--- a/arch/mips/ath79/dev-usb.c
+++ b/arch/mips/ath79/dev-usb.c
@@ -145,6 +145,8 @@ static void __init ar7240_usb_setup(void)
ath79_ohci_resources[0].start = AR7240_OHCI_BASE;
ath79_ohci_resources[0].end = AR7240_OHCI_BASE + AR7240_OHCI_SIZE - 1;
+ ath79_ohci_resources[1].start = ATH79_CPU_IRQ_USB;
+ ath79_ohci_resources[1].end = ATH79_CPU_IRQ_USB;
platform_device_register(&ath79_ohci_device);
}
diff --git a/arch/mips/ath79/gpio.c b/arch/mips/ath79/gpio.c
index 29054f211832..48fe762d2526 100644
--- a/arch/mips/ath79/gpio.c
+++ b/arch/mips/ath79/gpio.c
@@ -188,8 +188,10 @@ void __init ath79_gpio_init(void)
if (soc_is_ar71xx())
ath79_gpio_count = AR71XX_GPIO_COUNT;
- else if (soc_is_ar724x())
- ath79_gpio_count = AR724X_GPIO_COUNT;
+ else if (soc_is_ar7240())
+ ath79_gpio_count = AR7240_GPIO_COUNT;
+ else if (soc_is_ar7241() || soc_is_ar7242())
+ ath79_gpio_count = AR7241_GPIO_COUNT;
else if (soc_is_ar913x())
ath79_gpio_count = AR913X_GPIO_COUNT;
else if (soc_is_ar933x())
diff --git a/arch/mips/bcm63xx/dev-spi.c b/arch/mips/bcm63xx/dev-spi.c
index e39f73048d4f..f1c9c3e2f678 100644
--- a/arch/mips/bcm63xx/dev-spi.c
+++ b/arch/mips/bcm63xx/dev-spi.c
@@ -106,11 +106,15 @@ int __init bcm63xx_spi_register(void)
if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) {
spi_resources[0].end += BCM_6338_RSET_SPI_SIZE - 1;
spi_pdata.fifo_size = SPI_6338_MSG_DATA_SIZE;
+ spi_pdata.msg_type_shift = SPI_6338_MSG_TYPE_SHIFT;
+ spi_pdata.msg_ctl_width = SPI_6338_MSG_CTL_WIDTH;
}
if (BCMCPU_IS_6358() || BCMCPU_IS_6368()) {
spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1;
spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE;
+ spi_pdata.msg_type_shift = SPI_6358_MSG_TYPE_SHIFT;
+ spi_pdata.msg_ctl_width = SPI_6358_MSG_CTL_WIDTH;
}
bcm63xx_spi_regs_init();
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 7fb1f222b8a5..274cd4fad30c 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -61,6 +61,12 @@ static void octeon_irq_set_ciu_mapping(int irq, int line, int bit,
octeon_irq_ciu_to_irq[line][bit] = irq;
}
+static void octeon_irq_force_ciu_mapping(struct irq_domain *domain,
+ int irq, int line, int bit)
+{
+ irq_domain_associate(domain, irq, line << 6 | bit);
+}
+
static int octeon_coreid_for_cpu(int cpu)
{
#ifdef CONFIG_SMP
@@ -183,19 +189,9 @@ static void __init octeon_irq_init_core(void)
mutex_init(&cd->core_irq_mutex);
irq = OCTEON_IRQ_SW0 + i;
- switch (irq) {
- case OCTEON_IRQ_TIMER:
- case OCTEON_IRQ_SW0:
- case OCTEON_IRQ_SW1:
- case OCTEON_IRQ_5:
- case OCTEON_IRQ_PERF:
- irq_set_chip_data(irq, cd);
- irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
- handle_percpu_irq);
- break;
- default:
- break;
- }
+ irq_set_chip_data(irq, cd);
+ irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
+ handle_percpu_irq);
}
}
@@ -890,7 +886,6 @@ static int octeon_irq_gpio_xlat(struct irq_domain *d,
unsigned int type;
unsigned int pin;
unsigned int trigger;
- struct octeon_irq_gpio_domain_data *gpiod;
if (d->of_node != node)
return -EINVAL;
@@ -925,8 +920,7 @@ static int octeon_irq_gpio_xlat(struct irq_domain *d,
break;
}
*out_type = type;
- gpiod = d->host_data;
- *out_hwirq = gpiod->base_hwirq + pin;
+ *out_hwirq = pin;
return 0;
}
@@ -996,19 +990,21 @@ static int octeon_irq_ciu_map(struct irq_domain *d,
static int octeon_irq_gpio_map(struct irq_domain *d,
unsigned int virq, irq_hw_number_t hw)
{
- unsigned int line = hw >> 6;
- unsigned int bit = hw & 63;
+ struct octeon_irq_gpio_domain_data *gpiod = d->host_data;
+ unsigned int line, bit;
if (!octeon_irq_virq_in_range(virq))
return -EINVAL;
+ hw += gpiod->base_hwirq;
+ line = hw >> 6;
+ bit = hw & 63;
if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0)
return -EINVAL;
octeon_irq_set_ciu_mapping(virq, line, bit,
octeon_irq_gpio_chip,
octeon_irq_handle_gpio);
-
return 0;
}
@@ -1149,6 +1145,7 @@ static void __init octeon_irq_init_ciu(void)
struct irq_chip *chip_wd;
struct device_node *gpio_node;
struct device_node *ciu_node;
+ struct irq_domain *ciu_domain = NULL;
octeon_irq_init_ciu_percpu();
octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
@@ -1177,31 +1174,6 @@ static void __init octeon_irq_init_ciu(void)
/* Mips internal */
octeon_irq_init_core();
- /* CIU_0 */
- for (i = 0; i < 16; i++)
- octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WORKQ0, 0, i + 0, chip, handle_level_irq);
-
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, chip_mbox, handle_percpu_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, chip_mbox, handle_percpu_irq);
-
- for (i = 0; i < 4; i++)
- octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_INT0, 0, i + 36, chip, handle_level_irq);
- for (i = 0; i < 4; i++)
- octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_MSI0, 0, i + 40, chip, handle_level_irq);
-
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_RML, 0, 46, chip, handle_level_irq);
- for (i = 0; i < 4; i++)
- octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_TIMER0, 0, i + 52, chip, handle_edge_irq);
-
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB0, 0, 56, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_BOOTDMA, 0, 63, chip, handle_level_irq);
-
- /* CIU_1 */
- for (i = 0; i < 16; i++)
- octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, chip_wd, handle_level_irq);
-
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB1, 1, 17, chip, handle_level_irq);
-
gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio");
if (gpio_node) {
struct octeon_irq_gpio_domain_data *gpiod;
@@ -1219,10 +1191,35 @@ static void __init octeon_irq_init_ciu(void)
ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu");
if (ciu_node) {
- irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL);
+ ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL);
of_node_put(ciu_node);
} else
- pr_warn("Cannot find device node for cavium,octeon-3860-ciu.\n");
+ panic("Cannot find device node for cavium,octeon-3860-ciu.");
+
+ /* CIU_0 */
+ for (i = 0; i < 16; i++)
+ octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
+
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, chip_mbox, handle_percpu_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, chip_mbox, handle_percpu_irq);
+
+ for (i = 0; i < 4; i++)
+ octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
+ for (i = 0; i < 4; i++)
+ octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
+
+ octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
+ for (i = 0; i < 4; i++)
+ octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
+
+ octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56);
+ octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_BOOTDMA, 0, 63);
+
+ /* CIU_1 */
+ for (i = 0; i < 16; i++)
+ octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, chip_wd, handle_level_irq);
+
+ octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17);
/* Enable the CIU lines */
set_c0_status(STATUSF_IP3 | STATUSF_IP2);
diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
index 1caa78ad06d5..dde504477fac 100644
--- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
+++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
@@ -393,7 +393,8 @@
#define AR71XX_GPIO_REG_FUNC 0x28
#define AR71XX_GPIO_COUNT 16
-#define AR724X_GPIO_COUNT 18
+#define AR7240_GPIO_COUNT 18
+#define AR7241_GPIO_COUNT 20
#define AR913X_GPIO_COUNT 22
#define AR933X_GPIO_COUNT 30
#define AR934X_GPIO_COUNT 23
diff --git a/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
index 4476fa03bf36..6ddae926bf79 100644
--- a/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
@@ -42,7 +42,6 @@
#define cpu_has_mips64r1 0
#define cpu_has_mips64r2 0
-#define cpu_has_dsp 0
#define cpu_has_mipsmt 0
#define cpu_has_64bits 0
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
index 7d98dbe5d4b5..c9bae1362606 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
@@ -9,6 +9,8 @@ int __init bcm63xx_spi_register(void);
struct bcm63xx_spi_pdata {
unsigned int fifo_size;
+ unsigned int msg_type_shift;
+ unsigned int msg_ctl_width;
int bus_num;
int num_chipselect;
u32 speed_hz;
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
index 4ccc2a748aff..61f2a2a5099d 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
@@ -1054,7 +1054,8 @@
#define SPI_6338_FILL_BYTE 0x07
#define SPI_6338_MSG_TAIL 0x09
#define SPI_6338_RX_TAIL 0x0b
-#define SPI_6338_MSG_CTL 0x40
+#define SPI_6338_MSG_CTL 0x40 /* 8-bits register */
+#define SPI_6338_MSG_CTL_WIDTH 8
#define SPI_6338_MSG_DATA 0x41
#define SPI_6338_MSG_DATA_SIZE 0x3f
#define SPI_6338_RX_DATA 0x80
@@ -1070,7 +1071,8 @@
#define SPI_6348_FILL_BYTE 0x07
#define SPI_6348_MSG_TAIL 0x09
#define SPI_6348_RX_TAIL 0x0b
-#define SPI_6348_MSG_CTL 0x40
+#define SPI_6348_MSG_CTL 0x40 /* 8-bits register */
+#define SPI_6348_MSG_CTL_WIDTH 8
#define SPI_6348_MSG_DATA 0x41
#define SPI_6348_MSG_DATA_SIZE 0x3f
#define SPI_6348_RX_DATA 0x80
@@ -1078,6 +1080,7 @@
/* BCM 6358 SPI core */
#define SPI_6358_MSG_CTL 0x00 /* 16-bits register */
+#define SPI_6358_MSG_CTL_WIDTH 16
#define SPI_6358_MSG_DATA 0x02
#define SPI_6358_MSG_DATA_SIZE 0x21e
#define SPI_6358_RX_DATA 0x400
@@ -1094,6 +1097,7 @@
/* BCM 6358 SPI core */
#define SPI_6368_MSG_CTL 0x00 /* 16-bits register */
+#define SPI_6368_MSG_CTL_WIDTH 16
#define SPI_6368_MSG_DATA 0x02
#define SPI_6368_MSG_DATA_SIZE 0x21e
#define SPI_6368_RX_DATA 0x400
@@ -1115,7 +1119,10 @@
#define SPI_HD_W 0x01
#define SPI_HD_R 0x02
#define SPI_BYTE_CNT_SHIFT 0
-#define SPI_MSG_TYPE_SHIFT 14
+#define SPI_6338_MSG_TYPE_SHIFT 6
+#define SPI_6348_MSG_TYPE_SHIFT 6
+#define SPI_6358_MSG_TYPE_SHIFT 14
+#define SPI_6368_MSG_TYPE_SHIFT 14
/* Command */
#define SPI_CMD_NOOP 0x00
diff --git a/arch/mips/include/asm/mach-cavium-octeon/irq.h b/arch/mips/include/asm/mach-cavium-octeon/irq.h
index 418992042f6f..c22a3078bf11 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/irq.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/irq.h
@@ -21,14 +21,10 @@ enum octeon_irq {
OCTEON_IRQ_TIMER,
/* sources in CIU_INTX_EN0 */
OCTEON_IRQ_WORKQ0,
- OCTEON_IRQ_GPIO0 = OCTEON_IRQ_WORKQ0 + 16,
- OCTEON_IRQ_WDOG0 = OCTEON_IRQ_GPIO0 + 16,
+ OCTEON_IRQ_WDOG0 = OCTEON_IRQ_WORKQ0 + 16,
OCTEON_IRQ_WDOG15 = OCTEON_IRQ_WDOG0 + 15,
OCTEON_IRQ_MBOX0 = OCTEON_IRQ_WDOG0 + 16,
OCTEON_IRQ_MBOX1,
- OCTEON_IRQ_UART0,
- OCTEON_IRQ_UART1,
- OCTEON_IRQ_UART2,
OCTEON_IRQ_PCI_INT0,
OCTEON_IRQ_PCI_INT1,
OCTEON_IRQ_PCI_INT2,
@@ -38,8 +34,6 @@ enum octeon_irq {
OCTEON_IRQ_PCI_MSI2,
OCTEON_IRQ_PCI_MSI3,
- OCTEON_IRQ_TWSI,
- OCTEON_IRQ_TWSI2,
OCTEON_IRQ_RML,
OCTEON_IRQ_TIMER0,
OCTEON_IRQ_TIMER1,
@@ -47,8 +41,6 @@ enum octeon_irq {
OCTEON_IRQ_TIMER3,
OCTEON_IRQ_USB0,
OCTEON_IRQ_USB1,
- OCTEON_IRQ_MII0,
- OCTEON_IRQ_MII1,
OCTEON_IRQ_BOOTDMA,
#ifndef CONFIG_PCI_MSI
OCTEON_IRQ_LAST = 127
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
index 7531ecd654d6..dca8bce8c7ab 100644
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -10,6 +10,7 @@ struct mod_arch_specific {
struct list_head dbe_list;
const struct exception_table_entry *dbe_start;
const struct exception_table_entry *dbe_end;
+ struct mips_hi16 *r_mips_hi16_list;
};
typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */
diff --git a/arch/mips/include/asm/r4k-timer.h b/arch/mips/include/asm/r4k-timer.h
index a37d12b3b61c..afe9e0e03fe9 100644
--- a/arch/mips/include/asm/r4k-timer.h
+++ b/arch/mips/include/asm/r4k-timer.h
@@ -12,16 +12,16 @@
#ifdef CONFIG_SYNC_R4K
-extern void synchronise_count_master(void);
-extern void synchronise_count_slave(void);
+extern void synchronise_count_master(int cpu);
+extern void synchronise_count_slave(int cpu);
#else
-static inline void synchronise_count_master(void)
+static inline void synchronise_count_master(int cpu)
{
}
-static inline void synchronise_count_slave(void)
+static inline void synchronise_count_slave(int cpu)
{
}
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index a5066b1c3de3..4f8c3cba8c0c 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -39,8 +39,6 @@ struct mips_hi16 {
Elf_Addr value;
};
-static struct mips_hi16 *mips_hi16_list;
-
static LIST_HEAD(dbe_list);
static DEFINE_SPINLOCK(dbe_lock);
@@ -128,8 +126,8 @@ static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v)
n->addr = (Elf_Addr *)location;
n->value = v;
- n->next = mips_hi16_list;
- mips_hi16_list = n;
+ n->next = me->arch.r_mips_hi16_list;
+ me->arch.r_mips_hi16_list = n;
return 0;
}
@@ -142,18 +140,28 @@ static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v)
return 0;
}
+static void free_relocation_chain(struct mips_hi16 *l)
+{
+ struct mips_hi16 *next;
+
+ while (l) {
+ next = l->next;
+ kfree(l);
+ l = next;
+ }
+}
+
static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
{
unsigned long insnlo = *location;
+ struct mips_hi16 *l;
Elf_Addr val, vallo;
/* Sign extend the addend we extract from the lo insn. */
vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
- if (mips_hi16_list != NULL) {
- struct mips_hi16 *l;
-
- l = mips_hi16_list;
+ if (me->arch.r_mips_hi16_list != NULL) {
+ l = me->arch.r_mips_hi16_list;
while (l != NULL) {
struct mips_hi16 *next;
unsigned long insn;
@@ -188,7 +196,7 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
l = next;
}
- mips_hi16_list = NULL;
+ me->arch.r_mips_hi16_list = NULL;
}
/*
@@ -201,6 +209,9 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
return 0;
out_danger:
+ free_relocation_chain(l);
+ me->arch.r_mips_hi16_list = NULL;
+
pr_err("module %s: dangerous R_MIPS_LO16 REL relocation\n", me->name);
return -ENOEXEC;
@@ -273,6 +284,7 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
+ me->arch.r_mips_hi16_list = NULL;
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
@@ -296,6 +308,19 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
return res;
}
+ /*
+ * Normally the hi16 list should be deallocated at this point. A
+ * malformed binary however could contain a series of R_MIPS_HI16
+ * relocations not followed by a R_MIPS_LO16 relocation. In that
+ * case, free up the list and return an error.
+ */
+ if (me->arch.r_mips_hi16_list) {
+ free_relocation_chain(me->arch.r_mips_hi16_list);
+ me->arch.r_mips_hi16_list = NULL;
+
+ return -ENOEXEC;
+ }
+
return 0;
}
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index e7e03ecf5495..afc379ca3753 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -102,7 +102,7 @@ static void cmp_init_secondary(void)
c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE;
#endif
#ifdef CONFIG_MIPS_MT_SMTC
- c->tc_id = (read_c0_tcbind() >> TCBIND_CURTC_SHIFT) & TCBIND_CURTC;
+ c->tc_id = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
#endif
}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 31637d8c8738..9005bf9fb859 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -130,7 +130,7 @@ asmlinkage __cpuinit void start_secondary(void)
cpu_set(cpu, cpu_callin_map);
- synchronise_count_slave();
+ synchronise_count_slave(cpu);
/*
* irq will be enabled in ->smp_finish(), enabling it too early
@@ -173,7 +173,6 @@ void smp_send_stop(void)
void __init smp_cpus_done(unsigned int max_cpus)
{
mp_ops->cpus_done();
- synchronise_count_master();
}
/* called from main before smp_init() */
@@ -206,6 +205,7 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
while (!cpu_isset(cpu, cpu_callin_map))
udelay(100);
+ synchronise_count_master(cpu);
return 0;
}
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
index 842d55e411fd..7f1eca3858de 100644
--- a/arch/mips/kernel/sync-r4k.c
+++ b/arch/mips/kernel/sync-r4k.c
@@ -28,12 +28,11 @@ static atomic_t __cpuinitdata count_reference = ATOMIC_INIT(0);
#define COUNTON 100
#define NR_LOOPS 5
-void __cpuinit synchronise_count_master(void)
+void __cpuinit synchronise_count_master(int cpu)
{
int i;
unsigned long flags;
unsigned int initcount;
- int nslaves;
#ifdef CONFIG_MIPS_MT_SMTC
/*
@@ -43,8 +42,7 @@ void __cpuinit synchronise_count_master(void)
return;
#endif
- printk(KERN_INFO "Synchronize counters across %u CPUs: ",
- num_online_cpus());
+ printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu);
local_irq_save(flags);
@@ -52,7 +50,7 @@ void __cpuinit synchronise_count_master(void)
* Notify the slaves that it's time to start
*/
atomic_set(&count_reference, read_c0_count());
- atomic_set(&count_start_flag, 1);
+ atomic_set(&count_start_flag, cpu);
smp_wmb();
/* Count will be initialised to current timer for all CPU's */
@@ -69,10 +67,9 @@ void __cpuinit synchronise_count_master(void)
* two CPUs.
*/
- nslaves = num_online_cpus()-1;
for (i = 0; i < NR_LOOPS; i++) {
- /* slaves loop on '!= ncpus' */
- while (atomic_read(&count_count_start) != nslaves)
+ /* slaves loop on '!= 2' */
+ while (atomic_read(&count_count_start) != 1)
mb();
atomic_set(&count_count_stop, 0);
smp_wmb();
@@ -89,7 +86,7 @@ void __cpuinit synchronise_count_master(void)
/*
* Wait for all slaves to leave the synchronization point:
*/
- while (atomic_read(&count_count_stop) != nslaves)
+ while (atomic_read(&count_count_stop) != 1)
mb();
atomic_set(&count_count_start, 0);
smp_wmb();
@@ -97,6 +94,7 @@ void __cpuinit synchronise_count_master(void)
}
/* Arrange for an interrupt in a short while */
write_c0_compare(read_c0_count() + COUNTON);
+ atomic_set(&count_start_flag, 0);
local_irq_restore(flags);
@@ -108,11 +106,10 @@ void __cpuinit synchronise_count_master(void)
printk("done.\n");
}
-void __cpuinit synchronise_count_slave(void)
+void __cpuinit synchronise_count_slave(int cpu)
{
int i;
unsigned int initcount;
- int ncpus;
#ifdef CONFIG_MIPS_MT_SMTC
/*
@@ -127,16 +124,15 @@ void __cpuinit synchronise_count_slave(void)
* so we first wait for the master to say everyone is ready
*/
- while (!atomic_read(&count_start_flag))
+ while (atomic_read(&count_start_flag) != cpu)
mb();
/* Count will be initialised to next expire for all CPU's */
initcount = atomic_read(&count_reference);
- ncpus = num_online_cpus();
for (i = 0; i < NR_LOOPS; i++) {
atomic_inc(&count_count_start);
- while (atomic_read(&count_count_start) != ncpus)
+ while (atomic_read(&count_count_start) != 2)
mb();
/*
@@ -146,7 +142,7 @@ void __cpuinit synchronise_count_slave(void)
write_c0_count(initcount);
atomic_inc(&count_count_stop);
- while (atomic_read(&count_count_stop) != ncpus)
+ while (atomic_read(&count_count_stop) != 2)
mb();
}
/* Arrange for an interrupt in a short while */
diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c
index 33aadbcf170b..dcfd573871c1 100644
--- a/arch/mips/mm/gup.c
+++ b/arch/mips/mm/gup.c
@@ -152,6 +152,8 @@ static int gup_huge_pud(pud_t pud, unsigned long addr, unsigned long end,
do {
VM_BUG_ON(compound_head(page) != head);
pages[*nr] = page;
+ if (PageTail(page))
+ get_huge_page_tail(page);
(*nr)++;
page++;
refs++;
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index 7b13a4caeea4..fea823f18479 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -273,16 +273,19 @@ asmlinkage void plat_irq_dispatch(void)
unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM;
int irq;
+ if (unlikely(!pending)) {
+ spurious_interrupt();
+ return;
+ }
+
irq = irq_ffs(pending);
if (irq == MIPSCPU_INT_I8259A)
malta_hw0_irqdispatch();
else if (gic_present && ((1 << irq) & ipi_map[smp_processor_id()]))
malta_ipi_irqdispatch();
- else if (irq >= 0)
- do_IRQ(MIPS_CPU_IRQ_BASE + irq);
else
- spurious_interrupt();
+ do_IRQ(MIPS_CPU_IRQ_BASE + irq);
}
#ifdef CONFIG_MIPS_MT_SMP
diff --git a/arch/mips/mti-malta/malta-pci.c b/arch/mips/mti-malta/malta-pci.c
index 284dea54faf5..2147cb34e705 100644
--- a/arch/mips/mti-malta/malta-pci.c
+++ b/arch/mips/mti-malta/malta-pci.c
@@ -252,16 +252,3 @@ void __init mips_pcibios_init(void)
register_pci_controller(controller);
}
-
-/* Enable PCI 2.1 compatibility in PIIX4 */
-static void __devinit quirk_dlcsetup(struct pci_dev *dev)
-{
- u8 odlc, ndlc;
- (void) pci_read_config_byte(dev, 0x82, &odlc);
- /* Enable passive releases and delayed transaction */
- ndlc = odlc | 7;
- (void) pci_write_config_byte(dev, 0x82, ndlc);
-}
-
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0,
- quirk_dlcsetup);
diff --git a/arch/mips/mti-malta/malta-platform.c b/arch/mips/mti-malta/malta-platform.c
index 4c35301720e7..80562b81f0f2 100644
--- a/arch/mips/mti-malta/malta-platform.c
+++ b/arch/mips/mti-malta/malta-platform.c
@@ -138,11 +138,6 @@ static int __init malta_add_devices(void)
if (err)
return err;
- /*
- * Set RTC to BCD mode to support current alarm code.
- */
- CMOS_WRITE(CMOS_READ(RTC_CONTROL) & ~RTC_DM_BINARY, RTC_CONTROL);
-
return 0;
}
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
index 414a7459858d..86d77a666458 100644
--- a/arch/mips/pci/pci-ar724x.c
+++ b/arch/mips/pci/pci-ar724x.c
@@ -23,9 +23,12 @@
#define AR724X_PCI_MEM_BASE 0x10000000
#define AR724X_PCI_MEM_SIZE 0x08000000
+#define AR724X_PCI_REG_RESET 0x18
#define AR724X_PCI_REG_INT_STATUS 0x4c
#define AR724X_PCI_REG_INT_MASK 0x50
+#define AR724X_PCI_RESET_LINK_UP BIT(0)
+
#define AR724X_PCI_INT_DEV0 BIT(14)
#define AR724X_PCI_IRQ_COUNT 1
@@ -38,6 +41,15 @@ static void __iomem *ar724x_pci_ctrl_base;
static u32 ar724x_pci_bar0_value;
static bool ar724x_pci_bar0_is_cached;
+static bool ar724x_pci_link_up;
+
+static inline bool ar724x_pci_check_link(void)
+{
+ u32 reset;
+
+ reset = __raw_readl(ar724x_pci_ctrl_base + AR724X_PCI_REG_RESET);
+ return reset & AR724X_PCI_RESET_LINK_UP;
+}
static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, uint32_t *value)
@@ -46,6 +58,9 @@ static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
void __iomem *base;
u32 data;
+ if (!ar724x_pci_link_up)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
if (devfn)
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -96,6 +111,9 @@ static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
u32 data;
int s;
+ if (!ar724x_pci_link_up)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
if (devfn)
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -280,6 +298,10 @@ int __init ar724x_pcibios_init(int irq)
if (ar724x_pci_ctrl_base == NULL)
goto err_unmap_devcfg;
+ ar724x_pci_link_up = ar724x_pci_check_link();
+ if (!ar724x_pci_link_up)
+ pr_warn("ar724x: PCIe link is down\n");
+
ar724x_pci_irq_init(irq);
register_pci_controller(&ar724x_pci_controller);
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 6c6defc24619..af9cf30ed474 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -141,7 +141,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
-#define ATOMIC_INIT(i) ((atomic_t) { (i) })
+#define ATOMIC_INIT(i) { (i) }
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
@@ -150,7 +150,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
#ifdef CONFIG_64BIT
-#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
+#define ATOMIC64_INIT(i) { (i) }
static __inline__ s64
__atomic64_add_return(s64 i, atomic64_t *v)
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index d4b94b395c16..2c05a9292a81 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -309,7 +309,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
cregs->ksp = (unsigned long)stack
+ (pregs->gr[21] & (THREAD_SIZE - 1));
cregs->gr[30] = usp;
- if (p->personality == PER_HPUX) {
+ if (personality(p->personality) == PER_HPUX) {
#ifdef CONFIG_HPUX
cregs->kpc = (unsigned long) &hpux_child_return;
#else
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index c9b932260f47..7426e40699bd 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -225,12 +225,12 @@ long parisc_personality(unsigned long personality)
long err;
if (personality(current->personality) == PER_LINUX32
- && personality == PER_LINUX)
- personality = PER_LINUX32;
+ && personality(personality) == PER_LINUX)
+ personality = (personality & ~PER_MASK) | PER_LINUX32;
err = sys_personality(personality);
- if (err == PER_LINUX32)
- err = PER_LINUX;
+ if (personality(err) == PER_LINUX32)
+ err = (err & ~PER_MASK) | PER_LINUX;
return err;
}
diff --git a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
index 8d35d2c1f694..4f9c9f682ecf 100644
--- a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
@@ -345,6 +345,13 @@
/include/ "qoriq-duart-1.dtsi"
/include/ "qoriq-gpio-0.dtsi"
/include/ "qoriq-usb2-mph-0.dtsi"
+ usb@210000 {
+ compatible = "fsl-usb2-mph-v1.6", "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
+ port0;
+ };
/include/ "qoriq-usb2-dr-0.dtsi"
+ usb@211000 {
+ compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
+ };
/include/ "qoriq-sec4.0-0.dtsi"
};
diff --git a/arch/powerpc/configs/85xx/p1023rds_defconfig b/arch/powerpc/configs/85xx/p1023rds_defconfig
index f4337bacd0e7..26e541c4662b 100644
--- a/arch/powerpc/configs/85xx/p1023rds_defconfig
+++ b/arch/powerpc/configs/85xx/p1023rds_defconfig
@@ -6,28 +6,27 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_AUDIT=y
-CONFIG_SPARSE_IRQ=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_KALLSYMS_ALL=y
-CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_EMBEDDED=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
CONFIG_P1023_RDS=y
CONFIG_QUICC_ENGINE=y
CONFIG_QE_GPIO=y
CONFIG_CPM2=y
-CONFIG_GPIO_MPC8XXX=y
CONFIG_HIGHMEM=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=m
CONFIG_MATH_EMULATION=y
@@ -63,11 +62,11 @@ CONFIG_INET_ESP=y
CONFIG_IPV6=y
CONFIG_IP_SCTP=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
@@ -80,15 +79,14 @@ CONFIG_SATA_FSL=y
CONFIG_SATA_SIL24=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
+CONFIG_FS_ENET=y
+CONFIG_FSL_PQ_MDIO=y
+CONFIG_E1000E=y
CONFIG_MARVELL_PHY=y
CONFIG_DAVICOM_PHY=y
CONFIG_CICADA_PHY=y
CONFIG_VITESSE_PHY=y
CONFIG_FIXED_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_FS_ENET=y
-CONFIG_E1000E=y
-CONFIG_FSL_PQ_MDIO=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
@@ -98,16 +96,15 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=2
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
CONFIG_SERIAL_QE=m
-CONFIG_HW_RANDOM=y
CONFIG_NVRAM=y
CONFIG_I2C=y
CONFIG_I2C_CPM=m
CONFIG_I2C_MPC=y
+CONFIG_GPIO_MPC8XXX=y
# CONFIG_HWMON is not set
CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_SOUND=y
@@ -123,7 +120,6 @@ CONFIG_DMADEVICES=y
CONFIG_FSL_DMA=y
# CONFIG_NET_DMA is not set
CONFIG_STAGING=y
-# CONFIG_STAGING_EXCLUDE_BUILD is not set
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
@@ -150,22 +146,15 @@ CONFIG_QNX4FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
CONFIG_CRC_T10DIF=y
CONFIG_FRAME_WARN=8092
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
index cbb98c1234fd..8b3d57c1ebe8 100644
--- a/arch/powerpc/configs/corenet32_smp_defconfig
+++ b/arch/powerpc/configs/corenet32_smp_defconfig
@@ -6,8 +6,8 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_AUDIT=y
-CONFIG_SPARSE_IRQ=y
-CONFIG_RCU_TRACE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -21,23 +21,22 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
CONFIG_P2041_RDB=y
CONFIG_P3041_DS=y
CONFIG_P4080_DS=y
CONFIG_P5020_DS=y
CONFIG_HIGHMEM=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=m
CONFIG_KEXEC=y
CONFIG_IRQ_ALL_CPUS=y
CONFIG_FORCE_MAX_ZONEORDER=13
-CONFIG_FSL_LBC=y
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
-CONFIG_PCI_MSI=y
# CONFIG_PCIEASPM is not set
+CONFIG_PCI_MSI=y
CONFIG_RAPIDIO=y
CONFIG_FSL_RIO=y
CONFIG_NET=y
@@ -70,6 +69,7 @@ CONFIG_INET_IPCOMP=y
CONFIG_IPV6=y
CONFIG_IP_SCTP=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
@@ -77,17 +77,14 @@ CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_M25P80=y
CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_ECC=y
-CONFIG_MTD_NAND_IDS=y
-CONFIG_MTD_NAND_FSL_IFC=y
CONFIG_MTD_NAND_FSL_ELBC=y
-CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND_FSL_IFC=y
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_MISC_DEVICES=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
@@ -115,11 +112,9 @@ CONFIG_SERIO_LIBPS2=y
CONFIG_PPC_EPAPR_HV_BYTECHAN=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
-CONFIG_HW_RANDOM=y
CONFIG_NVRAM=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
@@ -132,7 +127,6 @@ CONFIG_SPI_FSL_ESPI=y
CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_USB_HID=m
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_FSL=y
@@ -142,8 +136,6 @@ CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
CONFIG_USB_STORAGE=y
CONFIG_MMC=y
CONFIG_MMC_SDHCI=y
-CONFIG_MMC_SDHCI_OF=y
-CONFIG_MMC_SDHCI_OF_ESDHC=y
CONFIG_EDAC=y
CONFIG_EDAC_MM_EDAC=y
CONFIG_EDAC_MPC85XX=y
@@ -170,19 +162,16 @@ CONFIG_HUGETLBFS=y
CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_RCU_TRACE=y
CONFIG_CRYPTO_NULL=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MD4=y
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index dd89de8b0b7f..0516e22ca3de 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -56,6 +56,7 @@ CONFIG_INET_ESP=y
CONFIG_IPV6=y
CONFIG_IP_SCTP=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index 15130066e5e2..07b7f2af2dca 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -1,8 +1,10 @@
+CONFIG_PPC64=y
+CONFIG_ALTIVEC=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=4
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_BLK_DEV_INITRD=y
@@ -13,15 +15,16 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=4
-CONFIG_KEXEC=y
-# CONFIG_RELOCATABLE is not set
+# CONFIG_PPC_PSERIES is not set
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_PMAC64=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_KEXEC=y
+CONFIG_IRQ_ALL_CPUS=y
+# CONFIG_MIGRATION is not set
CONFIG_PCI_MSI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -49,6 +52,7 @@ CONFIG_NF_CT_NETLINK=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_IP_NF_QUEUE=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
@@ -56,6 +60,8 @@ CONFIG_BLK_DEV_RAM_SIZE=65536
CONFIG_CDROM_PKTCDVD=m
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_IDE_PMAC=y
+CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
@@ -79,24 +85,33 @@ CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_MIRROR=m
CONFIG_DM_ZERO=m
-CONFIG_MACINTOSH_DRIVERS=y
+CONFIG_IEEE1394=y
+CONFIG_IEEE1394_OHCI1394=y
+CONFIG_IEEE1394_SBP2=m
+CONFIG_IEEE1394_ETH1394=m
+CONFIG_IEEE1394_RAWIO=y
+CONFIG_IEEE1394_VIDEO1394=m
+CONFIG_IEEE1394_DV1394=m
+CONFIG_ADB_PMU=y
+CONFIG_PMAC_SMU=y
CONFIG_MAC_EMUMOUSEBTN=y
+CONFIG_THERM_PM72=y
+CONFIG_WINDFARM=y
+CONFIG_WINDFARM_PM81=y
+CONFIG_WINDFARM_PM91=y
+CONFIG_WINDFARM_PM112=y
+CONFIG_WINDFARM_PM121=y
CONFIG_NETDEVICES=y
-CONFIG_BONDING=m
CONFIG_DUMMY=m
-CONFIG_MII=y
+CONFIG_BONDING=m
CONFIG_TUN=m
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+CONFIG_SUNGEM=y
CONFIG_ACENIC=m
CONFIG_ACENIC_OMIT_TIGON_I=y
-CONFIG_TIGON3=y
CONFIG_E1000=y
-CONFIG_SUNGEM=y
-CONFIG_PPP=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPPOE=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
+CONFIG_TIGON3=y
CONFIG_USB_CATC=m
CONFIG_USB_KAWETH=m
CONFIG_USB_PEGASUS=m
@@ -106,24 +121,36 @@ CONFIG_USB_USBNET=m
# CONFIG_USB_NET_NET1080 is not set
# CONFIG_USB_NET_CDC_SUBSET is not set
# CONFIG_USB_NET_ZAURUS is not set
+CONFIG_PPP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPPOE=m
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_JOYDEV=m
CONFIG_INPUT_EVDEV=y
+# CONFIG_KEYBOARD_ATKBD is not set
# CONFIG_MOUSE_PS2 is not set
+# CONFIG_SERIO_I8042 is not set
# CONFIG_SERIO_SERPORT is not set
-CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_HW_RANDOM is not set
CONFIG_GEN_RTC=y
CONFIG_RAW_DRIVER=y
CONFIG_I2C_CHARDEV=y
# CONFIG_HWMON is not set
-CONFIG_AGP=y
-CONFIG_DRM=y
-CONFIG_DRM_NOUVEAU=y
+CONFIG_AGP=m
+CONFIG_AGP_UNINORTH=m
CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_TILEBLITTING=y
+CONFIG_FB_OF=y
+CONFIG_FB_NVIDIA=y
+CONFIG_FB_NVIDIA_I2C=y
CONFIG_FB_RADEON=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
CONFIG_SOUND=m
CONFIG_SND=m
@@ -131,7 +158,15 @@ CONFIG_SND_SEQUENCER=m
CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_POWERMAC=m
+CONFIG_SND_AOA=m
+CONFIG_SND_AOA_FABRIC_LAYOUT=m
+CONFIG_SND_AOA_ONYX=m
+CONFIG_SND_AOA_TAS=m
+CONFIG_SND_AOA_TOONIE=m
CONFIG_SND_USB_AUDIO=m
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
CONFIG_HID_GYRATION=y
CONFIG_LOGITECH_FF=y
CONFIG_HID_PANTHERLORD=y
@@ -139,12 +174,13 @@ CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
-CONFIG_HID_PID=y
-CONFIG_USB_HIDDEV=y
CONFIG_USB=y
+CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_HCD_PPC_OF is not set
CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
CONFIG_USB_ACM=m
CONFIG_USB_PRINTER=y
CONFIG_USB_STORAGE=y
@@ -208,6 +244,8 @@ CONFIG_REISERFS_FS_POSIX_ACL=y
CONFIG_REISERFS_FS_SECURITY=y
CONFIG_XFS_FS=m
CONFIG_XFS_POSIX_ACL=y
+CONFIG_INOTIFY=y
+CONFIG_AUTOFS_FS=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
@@ -221,12 +259,14 @@ CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_NFSD=y
CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
CONFIG_CIFS=m
+CONFIG_PARTITION_ADVANCED=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_1250=y
CONFIG_NLS_CODEPAGE_1251=y
@@ -234,23 +274,29 @@ CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_ISO8859_15=y
CONFIG_NLS_UTF8=y
+CONFIG_CRC_T10DIF=y
+CONFIG_LIBCRC32C=m
CONFIG_MAGIC_SYSRQ=y
-# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_LATENCYTOP=y
-CONFIG_STRICT_DEVMEM=y
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_BOOTX_TEXT=y
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
@@ -260,6 +306,3 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
-# CONFIG_VIRTUALIZATION is not set
-CONFIG_CRC_T10DIF=y
-CONFIG_LIBCRC32C=m
diff --git a/arch/powerpc/configs/mpc83xx_defconfig b/arch/powerpc/configs/mpc83xx_defconfig
index 5aac9a8bc53b..9352e4430c3b 100644
--- a/arch/powerpc/configs/mpc83xx_defconfig
+++ b/arch/powerpc/configs/mpc83xx_defconfig
@@ -2,12 +2,12 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_83xx=y
@@ -25,7 +25,6 @@ CONFIG_ASP834x=y
CONFIG_QUICC_ENGINE=y
CONFIG_QE_GPIO=y
CONFIG_MATH_EMULATION=y
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -42,10 +41,9 @@ CONFIG_INET_ESP=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_OF_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
@@ -64,15 +62,14 @@ CONFIG_ATA=y
CONFIG_SATA_FSL=y
CONFIG_SATA_SIL=y
CONFIG_NETDEVICES=y
+CONFIG_MII=y
+CONFIG_UCC_GETH=y
+CONFIG_GIANFAR=y
CONFIG_MARVELL_PHY=y
CONFIG_DAVICOM_PHY=y
CONFIG_VITESSE_PHY=y
CONFIG_ICPLUS_PHY=y
CONFIG_FIXED_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_GIANFAR=y
-CONFIG_UCC_GETH=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
@@ -112,17 +109,12 @@ CONFIG_RTC_DRV_DS1374=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
CONFIG_CRC_T10DIF=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_SHA256=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index 03ee911c4577..8b5bda27d248 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -5,7 +5,9 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_AUDIT=y
-CONFIG_SPARSE_IRQ=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -17,6 +19,8 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
CONFIG_MPC8540_ADS=y
CONFIG_MPC8560_ADS=y
CONFIG_MPC85xx_CDS=y
@@ -40,8 +44,6 @@ CONFIG_SBC8548=y
CONFIG_QUICC_ENGINE=y
CONFIG_QE_GPIO=y
CONFIG_HIGHMEM=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BINFMT_MISC=m
CONFIG_MATH_EMULATION=y
CONFIG_FORCE_MAX_ZONEORDER=12
@@ -74,36 +76,25 @@ CONFIG_INET_ESP=y
CONFIG_IPV6=y
CONFIG_IP_SCTP=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
CONFIG_FTL=y
-CONFIG_MTD_GEN_PROBE=y
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
+CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_CFI_UTIL=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_M25P80=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_FSL_ELBC=y
CONFIG_MTD_NAND_FSL_IFC=y
-CONFIG_MTD_NAND_IDS=y
-CONFIG_MTD_NAND_ECC=y
-CONFIG_MTD_M25P80=y
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
@@ -115,6 +106,7 @@ CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_FSL=y
CONFIG_PATA_ALI=y
+CONFIG_PATA_VIA=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
CONFIG_FS_ENET=y
@@ -134,7 +126,6 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=2
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
@@ -183,7 +174,6 @@ CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_FSL=y
@@ -229,18 +219,13 @@ CONFIG_QNX4FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
CONFIG_CRC_T10DIF=y
CONFIG_DEBUG_FS=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index fdfa84dc908f..b0974e7e98ae 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -7,7 +7,9 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_AUDIT=y
-CONFIG_SPARSE_IRQ=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -19,6 +21,8 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
CONFIG_MPC8540_ADS=y
CONFIG_MPC8560_ADS=y
CONFIG_MPC85xx_CDS=y
@@ -42,8 +46,6 @@ CONFIG_SBC8548=y
CONFIG_QUICC_ENGINE=y
CONFIG_QE_GPIO=y
CONFIG_HIGHMEM=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BINFMT_MISC=m
CONFIG_MATH_EMULATION=y
CONFIG_IRQ_ALL_CPUS=y
@@ -77,36 +79,25 @@ CONFIG_INET_ESP=y
CONFIG_IPV6=y
CONFIG_IP_SCTP=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
CONFIG_FTL=y
-CONFIG_MTD_GEN_PROBE=y
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
+CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_CFI_UTIL=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_M25P80=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_FSL_ELBC=y
CONFIG_MTD_NAND_FSL_IFC=y
-CONFIG_MTD_NAND_IDS=y
-CONFIG_MTD_NAND_ECC=y
-CONFIG_MTD_M25P80=y
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
@@ -137,7 +128,6 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=2
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
@@ -186,7 +176,6 @@ CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_FSL=y
@@ -232,18 +221,13 @@ CONFIG_QNX4FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
CONFIG_CRC_T10DIF=y
CONFIG_DEBUG_FS=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 50d82c8a037f..b3c083de17ad 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -553,9 +553,7 @@ static inline int cpu_has_feature(unsigned long feature)
& feature);
}
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
#define HBP_NUM 1
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 50ea12fd7bf5..a8bf5c673a3c 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -33,6 +33,7 @@
#include <asm/kvm_asm.h>
#include <asm/processor.h>
#include <asm/page.h>
+#include <asm/cacheflush.h>
#define KVM_MAX_VCPUS NR_CPUS
#define KVM_MAX_VCORES NR_CPUS
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 0124937a23b9..e006f0bdea95 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -219,4 +219,16 @@ void kvmppc_claim_lpid(long lpid);
void kvmppc_free_lpid(long lpid);
void kvmppc_init_lpid(unsigned long nr_lpids);
+static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
+{
+ /* Clear i-cache for new pages */
+ struct page *page;
+ page = pfn_to_page(pfn);
+ if (!test_bit(PG_arch_1, &page->flags)) {
+ flush_dcache_icache_page(page);
+ set_bit(PG_arch_1, &page->flags);
+ }
+}
+
+
#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/mpic_msgr.h b/arch/powerpc/include/asm/mpic_msgr.h
index 326d33ca55cd..d4f471fb1031 100644
--- a/arch/powerpc/include/asm/mpic_msgr.h
+++ b/arch/powerpc/include/asm/mpic_msgr.h
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <asm/smp.h>
+#include <asm/io.h>
struct mpic_msgr {
u32 __iomem *base;
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 53b6dfa83344..54b73a28c205 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -386,6 +386,7 @@ extern unsigned long cpuidle_disable;
enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
extern int powersave_nap; /* set if nap mode can be used in idle loop */
+extern void power7_nap(void);
#ifdef CONFIG_PSERIES_IDLE
extern void update_smt_snooze_delay(int snooze);
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 85b05c463fae..e8995727b1c1 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -76,6 +76,7 @@ int main(void)
DEFINE(SIGSEGV, SIGSEGV);
DEFINE(NMI_MASK, NMI_MASK);
DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr));
+ DEFINE(THREAD_DSCR_INHERIT, offsetof(struct thread_struct, dscr_inherit));
#else
DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index 5b25c8060fd6..a892680668d8 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -28,6 +28,8 @@ void doorbell_setup_this_cpu(void)
void doorbell_cause_ipi(int cpu, unsigned long data)
{
+ /* Order previous accesses vs. msgsnd, which is treated as a store */
+ mb();
ppc_msgsnd(PPC_DBELL, 0, data);
}
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 2d7bb8ced136..e4897523de41 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -83,11 +83,10 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
return 0;
}
- if ((tbl->it_offset + tbl->it_size) > (mask >> IOMMU_PAGE_SHIFT)) {
- dev_info(dev, "Warning: IOMMU window too big for device mask\n");
- dev_info(dev, "mask: 0x%08llx, table end: 0x%08lx\n",
- mask, (tbl->it_offset + tbl->it_size) <<
- IOMMU_PAGE_SHIFT);
+ if (tbl->it_offset > (mask >> IOMMU_PAGE_SHIFT)) {
+ dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
+ dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
+ mask, tbl->it_offset << IOMMU_PAGE_SHIFT);
return 0;
} else
return 1;
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 4b01a25e29ef..b40e0b4815b3 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -370,6 +370,12 @@ _GLOBAL(ret_from_fork)
li r3,0
b syscall_exit
+ .section ".toc","aw"
+DSCR_DEFAULT:
+ .tc dscr_default[TC],dscr_default
+
+ .section ".text"
+
/*
* This routine switches between two different tasks. The process
* state of one is saved on its kernel stack. Then the state
@@ -509,9 +515,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
mr r1,r8 /* start using new stack pointer */
std r7,PACAKSAVE(r13)
- ld r6,_CCR(r1)
- mtcrf 0xFF,r6
-
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
ld r0,THREAD_VRSAVE(r4)
@@ -520,14 +523,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_PPC64
BEGIN_FTR_SECTION
+ lwz r6,THREAD_DSCR_INHERIT(r4)
+ ld r7,DSCR_DEFAULT@toc(2)
ld r0,THREAD_DSCR(r4)
- cmpd r0,r25
- beq 1f
+ cmpwi r6,0
+ bne 1f
+ ld r0,0(r7)
+1: cmpd r0,r25
+ beq 2f
mtspr SPRN_DSCR,r0
-1:
+2:
END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
#endif
+ ld r6,_CCR(r1)
+ mtcrf 0xFF,r6
+
/* r3-r13 are destroyed -- Cort */
REST_8GPRS(14, r1)
REST_10GPRS(22, r1)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e894515e77bb..39aa97d3ff88 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -186,7 +186,7 @@ hardware_interrupt_hv:
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
- MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer)
+ STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
@@ -486,6 +486,7 @@ machine_check_common:
STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
+ STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index f3a82dde61db..956a4c496de9 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -253,7 +253,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
/* Do not emulate user-space instructions, instead single-step them */
if (user_mode(regs)) {
- bp->ctx->task->thread.last_hit_ubp = bp;
+ current->thread.last_hit_ubp = bp;
regs->msr |= MSR_SE;
goto out;
}
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index 7140d838339e..e11863f4e595 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -28,7 +28,9 @@ _GLOBAL(power7_idle)
lwz r4,ADDROFF(powersave_nap)(r3)
cmpwi 0,r4,0
beqlr
+ /* fall through */
+_GLOBAL(power7_nap)
/* NAP is a state loss, we create a regs frame on the
* stack, fill it up with the state we care about and
* stick a pointer to it in PACAR1. We really only
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 782bd0a3c2f0..c470a40b29f5 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -25,6 +25,7 @@
#include <asm/processor.h>
#include <asm/machdep.h>
#include <asm/debug.h>
+#include <linux/slab.h>
/*
* This table contains the mapping between PowerPC hardware trap types, and
@@ -101,6 +102,21 @@ static int computeSignal(unsigned int tt)
return SIGHUP; /* default for things we don't know about */
}
+/**
+ *
+ * kgdb_skipexception - Bail out of KGDB when we've been triggered.
+ * @exception: Exception vector number
+ * @regs: Current &struct pt_regs.
+ *
+ * On some architectures we need to skip a breakpoint exception when
+ * it occurs after a breakpoint has been removed.
+ *
+ */
+int kgdb_skipexception(int exception, struct pt_regs *regs)
+{
+ return kgdb_isremovedbreak(regs->nip);
+}
+
static int kgdb_call_nmi_hook(struct pt_regs *regs)
{
kgdb_nmicallback(raw_smp_processor_id(), regs);
@@ -138,6 +154,8 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
static int kgdb_singlestep(struct pt_regs *regs)
{
struct thread_info *thread_info, *exception_thread_info;
+ struct thread_info *backup_current_thread_info = \
+ (struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL);
if (user_mode(regs))
return 0;
@@ -155,13 +173,17 @@ static int kgdb_singlestep(struct pt_regs *regs)
thread_info = (struct thread_info *)(regs->gpr[1] & ~(THREAD_SIZE-1));
exception_thread_info = current_thread_info();
- if (thread_info != exception_thread_info)
+ if (thread_info != exception_thread_info) {
+ /* Save the original current_thread_info. */
+ memcpy(backup_current_thread_info, exception_thread_info, sizeof *thread_info);
memcpy(exception_thread_info, thread_info, sizeof *thread_info);
+ }
kgdb_handle_exception(0, SIGTRAP, 0, regs);
if (thread_info != exception_thread_info)
- memcpy(thread_info, exception_thread_info, sizeof *thread_info);
+ /* Restore current_thread_info lastly. */
+ memcpy(exception_thread_info, backup_current_thread_info, sizeof *thread_info);
return 1;
}
@@ -410,7 +432,6 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
#else
linux_regs->msr |= MSR_SE;
#endif
- kgdb_single_step = 1;
atomic_set(&kgdb_cpu_doing_single_step,
raw_smp_processor_id());
}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 710f400476de..1a1f2ddfb581 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -802,16 +802,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
#endif /* CONFIG_PPC_STD_MMU_64 */
#ifdef CONFIG_PPC64
if (cpu_has_feature(CPU_FTR_DSCR)) {
- if (current->thread.dscr_inherit) {
- p->thread.dscr_inherit = 1;
- p->thread.dscr = current->thread.dscr;
- } else if (0 != dscr_default) {
- p->thread.dscr_inherit = 1;
- p->thread.dscr = dscr_default;
- } else {
- p->thread.dscr_inherit = 0;
- p->thread.dscr = 0;
- }
+ p->thread.dscr_inherit = current->thread.dscr_inherit;
+ p->thread.dscr = current->thread.dscr;
}
#endif
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 0321007086f7..8d4214afc21d 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -198,8 +198,15 @@ void smp_muxed_ipi_message_pass(int cpu, int msg)
struct cpu_messages *info = &per_cpu(ipi_message, cpu);
char *message = (char *)&info->messages;
+ /*
+ * Order previous accesses before accesses in the IPI handler.
+ */
+ smp_mb();
message[msg] = 1;
- mb();
+ /*
+ * cause_ipi functions are required to include a full barrier
+ * before doing whatever causes the IPI.
+ */
smp_ops->cause_ipi(cpu, info->data);
}
@@ -211,7 +218,7 @@ irqreturn_t smp_ipi_demux(void)
mb(); /* order any irq clear */
do {
- all = xchg_local(&info->messages, 0);
+ all = xchg(&info->messages, 0);
#ifdef __BIG_ENDIAN
if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index f2496f2faecc..4e3cc47f26b9 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -107,11 +107,11 @@ long ppc64_personality(unsigned long personality)
long ret;
if (personality(current->personality) == PER_LINUX32
- && personality == PER_LINUX)
- personality = PER_LINUX32;
+ && personality(personality) == PER_LINUX)
+ personality = (personality & ~PER_MASK) | PER_LINUX32;
ret = sys_personality(personality);
- if (ret == PER_LINUX32)
- ret = PER_LINUX;
+ if (personality(ret) == PER_LINUX32)
+ ret = (ret & ~PER_MASK) | PER_LINUX;
return ret;
}
#endif
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 3529446c2abd..8302af649219 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -194,6 +194,14 @@ static ssize_t show_dscr_default(struct device *dev,
return sprintf(buf, "%lx\n", dscr_default);
}
+static void update_dscr(void *dummy)
+{
+ if (!current->thread.dscr_inherit) {
+ current->thread.dscr = dscr_default;
+ mtspr(SPRN_DSCR, dscr_default);
+ }
+}
+
static ssize_t __used store_dscr_default(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
@@ -206,6 +214,8 @@ static ssize_t __used store_dscr_default(struct device *dev,
return -EINVAL;
dscr_default = val;
+ on_each_cpu(update_dscr, NULL, 1);
+
return count;
}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index be171ee73bf8..e49e93191b69 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -535,6 +535,15 @@ void timer_interrupt(struct pt_regs * regs)
trace_timer_interrupt_exit(regs);
}
+/*
+ * Hypervisor decrementer interrupts shouldn't occur but are sometimes
+ * left pending on exit from a KVM guest. We don't need to do anything
+ * to clear them, as they are edge-triggered.
+ */
+void hdec_interrupt(struct pt_regs *regs)
+{
+}
+
#ifdef CONFIG_SUSPEND
static void generic_suspend_disable_irqs(void)
{
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 158972341a2d..ae0843fa7a61 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -972,8 +972,9 @@ static int emulate_instruction(struct pt_regs *regs)
cpu_has_feature(CPU_FTR_DSCR)) {
PPC_WARN_EMULATED(mtdscr, regs);
rd = (instword >> 21) & 0x1f;
- mtspr(SPRN_DSCR, regs->gpr[rd]);
+ current->thread.dscr = regs->gpr[rd];
current->thread.dscr_inherit = 1;
+ mtspr(SPRN_DSCR, current->thread.dscr);
return 0;
}
#endif
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index f922c29bb234..837f13e7b6bf 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -211,6 +211,9 @@ next_pteg:
pteg1 |= PP_RWRX;
}
+ if (orig_pte->may_execute)
+ kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
+
local_irq_disable();
if (pteg[rr]) {
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 10fc8ec9d2a8..0688b6b39585 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -126,6 +126,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
if (!orig_pte->may_execute)
rflags |= HPTE_R_N;
+ else
+ kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 5a84c8d3d040..44b72feaff7d 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1421,13 +1421,13 @@ _GLOBAL(kvmppc_h_cede)
sync /* order setting ceded vs. testing prodded */
lbz r5,VCPU_PRODDED(r3)
cmpwi r5,0
- bne 1f
+ bne kvm_cede_prodded
li r0,0 /* set trap to 0 to say hcall is handled */
stw r0,VCPU_TRAP(r3)
li r0,H_SUCCESS
std r0,VCPU_GPR(R3)(r3)
BEGIN_FTR_SECTION
- b 2f /* just send it up to host on 970 */
+ b kvm_cede_exit /* just send it up to host on 970 */
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
/*
@@ -1446,7 +1446,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
or r4,r4,r0
PPC_POPCNTW(R7,R4)
cmpw r7,r8
- bge 2f
+ bge kvm_cede_exit
stwcx. r4,0,r6
bne 31b
li r0,1
@@ -1555,7 +1555,8 @@ kvm_end_cede:
b hcall_real_fallback
/* cede when already previously prodded case */
-1: li r0,0
+kvm_cede_prodded:
+ li r0,0
stb r0,VCPU_PRODDED(r3)
sync /* order testing prodded vs. clearing ceded */
stb r0,VCPU_CEDED(r3)
@@ -1563,7 +1564,8 @@ kvm_end_cede:
blr
/* we've ceded but we want to give control to the host */
-2: li r3,H_TOO_HARD
+kvm_cede_exit:
+ li r3,H_TOO_HARD
blr
secondary_too_late:
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index c510fc961302..a2b66717813d 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -322,11 +322,11 @@ static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
{
if (vcpu_e500->g2h_tlb1_map)
- memset(vcpu_e500->g2h_tlb1_map,
- sizeof(u64) * vcpu_e500->gtlb_params[1].entries, 0);
+ memset(vcpu_e500->g2h_tlb1_map, 0,
+ sizeof(u64) * vcpu_e500->gtlb_params[1].entries);
if (vcpu_e500->h2g_tlb1_rmap)
- memset(vcpu_e500->h2g_tlb1_rmap,
- sizeof(unsigned int) * host_tlb_params[1].entries, 0);
+ memset(vcpu_e500->h2g_tlb1_rmap, 0,
+ sizeof(unsigned int) * host_tlb_params[1].entries);
}
static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
@@ -539,6 +539,9 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
ref, gvaddr, stlbe);
+
+ /* Clear i-cache for new pages */
+ kvmppc_mmu_flush_icache(pfn);
}
/* XXX only map the one-one case, for now use TLB0 */
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index dd223b3eb333..17e5b2364312 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -20,7 +20,7 @@ int patch_instruction(unsigned int *addr, unsigned int instr)
{
int err;
- err = __put_user(instr, addr);
+ __put_user_size(instr, addr, 4, err);
if (err)
return err;
asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (addr));
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index f9ede7c6606e..0d24ff15f5f6 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -288,7 +288,7 @@ err1; stb r0,0(r3)
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
bl .enter_vmx_usercopy
- cmpwi r3,0
+ cmpwi cr1,r3,0
ld r0,STACKFRAMESIZE+16(r1)
ld r3,STACKFRAMESIZE+48(r1)
ld r4,STACKFRAMESIZE+56(r1)
@@ -326,38 +326,7 @@ err1; stb r0,0(r3)
dcbt r0,r8,0b01010 /* GO */
.machine pop
- /*
- * We prefetch both the source and destination using enhanced touch
- * instructions. We use a stream ID of 0 for the load side and
- * 1 for the store side.
- */
- clrrdi r6,r4,7
- clrrdi r9,r3,7
- ori r9,r9,1 /* stream=1 */
-
- srdi r7,r5,7 /* length in cachelines, capped at 0x3FF */
- cmpldi cr1,r7,0x3FF
- ble cr1,1f
- li r7,0x3FF
-1: lis r0,0x0E00 /* depth=7 */
- sldi r7,r7,7
- or r7,r7,r0
- ori r10,r7,1 /* stream=1 */
-
- lis r8,0x8000 /* GO=1 */
- clrldi r8,r8,32
-
-.machine push
-.machine "power4"
- dcbt r0,r6,0b01000
- dcbt r0,r7,0b01010
- dcbtst r0,r9,0b01000
- dcbtst r0,r10,0b01010
- eieio
- dcbt r0,r8,0b01010 /* GO */
-.machine pop
-
- beq .Lunwind_stack_nonvmx_copy
+ beq cr1,.Lunwind_stack_nonvmx_copy
/*
* If source and destination are not relatively aligned we use a
diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S
index 0efdc51bc716..7ba6c96de778 100644
--- a/arch/powerpc/lib/memcpy_power7.S
+++ b/arch/powerpc/lib/memcpy_power7.S
@@ -222,7 +222,7 @@ _GLOBAL(memcpy_power7)
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
bl .enter_vmx_copy
- cmpwi r3,0
+ cmpwi cr1,r3,0
ld r0,STACKFRAMESIZE+16(r1)
ld r3,STACKFRAMESIZE+48(r1)
ld r4,STACKFRAMESIZE+56(r1)
@@ -260,7 +260,7 @@ _GLOBAL(memcpy_power7)
dcbt r0,r8,0b01010 /* GO */
.machine pop
- beq .Lunwind_stack_nonvmx_copy
+ beq cr1,.Lunwind_stack_nonvmx_copy
/*
* If source and destination are not relatively aligned we use a
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index baaafde7d135..fbdad0e3929a 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -469,6 +469,7 @@ void flush_dcache_icache_page(struct page *page)
__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
#endif
}
+EXPORT_SYMBOL(flush_dcache_icache_page);
void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
{
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 39b159751c35..59213cfaeca9 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1436,11 +1436,11 @@ static long vphn_get_associativity(unsigned long cpu,
/*
* Update the node maps and sysfs entries for each cpu whose home node
- * has changed.
+ * has changed. Returns 1 when the topology has changed, and 0 otherwise.
*/
int arch_update_cpu_topology(void)
{
- int cpu, nid, old_nid;
+ int cpu, nid, old_nid, changed = 0;
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
struct device *dev;
@@ -1466,9 +1466,10 @@ int arch_update_cpu_topology(void)
dev = get_cpu_device(cpu);
if (dev)
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+ changed = 1;
}
- return 1;
+ return changed;
}
static void topology_work_fn(struct work_struct *work)
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 77b49ddda9d3..7cd2dbd6e4c4 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1431,7 +1431,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
if (!event->hw.idx || is_limited_pmc(event->hw.idx))
continue;
val = read_pmc(event->hw.idx);
- if ((int)val < 0) {
+ if (pmc_overflow(val)) {
/* event has overflowed */
found = 1;
record_and_restart(event, val, regs);
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 3ef46254c35b..7698b6e13c57 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -106,14 +106,6 @@ static void pnv_smp_cpu_kill_self(void)
{
unsigned int cpu;
- /* If powersave_nap is enabled, use NAP mode, else just
- * spin aimlessly
- */
- if (!powersave_nap) {
- generic_mach_cpu_die();
- return;
- }
-
/* Standard hot unplug procedure */
local_irq_disable();
idle_task_exit();
@@ -128,7 +120,7 @@ static void pnv_smp_cpu_kill_self(void)
*/
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
while (!generic_check_cpu_restart(cpu)) {
- power7_idle();
+ power7_nap();
if (!generic_check_cpu_restart(cpu)) {
DBG("CPU%d Unexpected exit while offline !\n", cpu);
/* We may be getting an IPI, so we re-enable
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index a7b2a600d0a4..c37f46136321 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -465,7 +465,7 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
iounmap(hose->cfg_data);
iounmap(hose->cfg_addr);
pcibios_free_controller(hose);
- return 0;
+ return -ENODEV;
}
setup_pci_cmd(hose);
@@ -827,6 +827,7 @@ struct device_node *fsl_pci_primary;
void __devinit fsl_pci_init(void)
{
+ int ret;
struct device_node *node;
struct pci_controller *hose;
dma_addr_t max = 0xffffffff;
@@ -855,10 +856,12 @@ void __devinit fsl_pci_init(void)
if (!fsl_pci_primary)
fsl_pci_primary = node;
- fsl_add_bridge(node, fsl_pci_primary == node);
- hose = pci_find_hose_for_OF_device(node);
- max = min(max, hose->dma_window_base_cur +
- hose->dma_window_size);
+ ret = fsl_add_bridge(node, fsl_pci_primary == node);
+ if (ret == 0) {
+ hose = pci_find_hose_for_OF_device(node);
+ max = min(max, hose->dma_window_base_cur +
+ hose->dma_window_size);
+ }
}
}
diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
index 483d8fa72e8b..e961f8c4a8f0 100644
--- a/arch/powerpc/sysdev/mpic_msgr.c
+++ b/arch/powerpc/sysdev/mpic_msgr.c
@@ -14,6 +14,9 @@
#include <linux/list.h>
#include <linux/of_platform.h>
#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/slab.h>
#include <asm/prom.h>
#include <asm/hw_irq.h>
#include <asm/ppc-pci.h>
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
index 14469cf9df68..df0fc5821469 100644
--- a/arch/powerpc/sysdev/xics/icp-hv.c
+++ b/arch/powerpc/sysdev/xics/icp-hv.c
@@ -65,7 +65,11 @@ static inline void icp_hv_set_xirr(unsigned int value)
static inline void icp_hv_set_qirr(int n_cpu , u8 value)
{
int hw_cpu = get_hard_smp_processor_id(n_cpu);
- long rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
+ long rc;
+
+ /* Make sure all previous accesses are ordered before IPI sending */
+ mb();
+ rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
if (rc != H_SUCCESS) {
pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x "
"returned %ld\n", __func__, n_cpu, hw_cpu, value, rc);
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index eab3492a45c5..9b49c65ee7a4 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -17,6 +17,7 @@
#include <linux/reboot.h>
#include <linux/delay.h>
#include <linux/kallsyms.h>
+#include <linux/kmsg_dump.h>
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/sysrq.h>
@@ -894,13 +895,13 @@ cmds(struct pt_regs *excp)
#endif
default:
printf("Unrecognized command: ");
- do {
+ do {
if (' ' < cmd && cmd <= '~')
putchar(cmd);
else
printf("\\x%x", cmd);
cmd = inchar();
- } while (cmd != '\n');
+ } while (cmd != '\n');
printf(" (type ? for help)\n");
break;
}
@@ -1097,7 +1098,7 @@ static long check_bp_loc(unsigned long addr)
return 1;
}
-static char *breakpoint_help_string =
+static char *breakpoint_help_string =
"Breakpoint command usage:\n"
"b show breakpoints\n"
"b <addr> [cnt] set breakpoint at given instr addr\n"
@@ -1193,7 +1194,7 @@ bpt_cmds(void)
default:
termch = cmd;
- cmd = skipbl();
+ cmd = skipbl();
if (cmd == '?') {
printf(breakpoint_help_string);
break;
@@ -1359,7 +1360,7 @@ static void xmon_show_stack(unsigned long sp, unsigned long lr,
sp + REGS_OFFSET);
break;
}
- printf("--- Exception: %lx %s at ", regs.trap,
+ printf("--- Exception: %lx %s at ", regs.trap,
getvecname(TRAP(&regs)));
pc = regs.nip;
lr = regs.link;
@@ -1623,14 +1624,14 @@ static void super_regs(void)
cmd = skipbl();
if (cmd == '\n') {
- unsigned long sp, toc;
+ unsigned long sp, toc;
asm("mr %0,1" : "=r" (sp) :);
asm("mr %0,2" : "=r" (toc) :);
printf("msr = "REG" sprg0= "REG"\n",
mfmsr(), mfspr(SPRN_SPRG0));
printf("pvr = "REG" sprg1= "REG"\n",
- mfspr(SPRN_PVR), mfspr(SPRN_SPRG1));
+ mfspr(SPRN_PVR), mfspr(SPRN_SPRG1));
printf("dec = "REG" sprg2= "REG"\n",
mfspr(SPRN_DEC), mfspr(SPRN_SPRG2));
printf("sp = "REG" sprg3= "REG"\n", sp, mfspr(SPRN_SPRG3));
@@ -1783,7 +1784,7 @@ byterev(unsigned char *val, int size)
static int brev;
static int mnoread;
-static char *memex_help_string =
+static char *memex_help_string =
"Memory examine command usage:\n"
"m [addr] [flags] examine/change memory\n"
" addr is optional. will start where left off.\n"
@@ -1798,7 +1799,7 @@ static char *memex_help_string =
"NOTE: flags are saved as defaults\n"
"";
-static char *memex_subcmd_help_string =
+static char *memex_subcmd_help_string =
"Memory examine subcommands:\n"
" hexval write this val to current location\n"
" 'string' write chars from string to this location\n"
@@ -2064,7 +2065,7 @@ prdump(unsigned long adrs, long ndump)
nr = mread(adrs, temp, r);
adrs += nr;
for (m = 0; m < r; ++m) {
- if ((m & (sizeof(long) - 1)) == 0 && m > 0)
+ if ((m & (sizeof(long) - 1)) == 0 && m > 0)
putchar(' ');
if (m < nr)
printf("%.2x", temp[m]);
@@ -2072,7 +2073,7 @@ prdump(unsigned long adrs, long ndump)
printf("%s", fault_chars[fault_type]);
}
for (; m < 16; ++m) {
- if ((m & (sizeof(long) - 1)) == 0)
+ if ((m & (sizeof(long) - 1)) == 0)
putchar(' ');
printf(" ");
}
@@ -2148,45 +2149,28 @@ print_address(unsigned long addr)
void
dump_log_buf(void)
{
- const unsigned long size = 128;
- unsigned long end, addr;
- unsigned char buf[size + 1];
-
- addr = 0;
- buf[size] = '\0';
-
- if (setjmp(bus_error_jmp) != 0) {
- printf("Unable to lookup symbol __log_buf!\n");
- return;
- }
-
- catch_memory_errors = 1;
- sync();
- addr = kallsyms_lookup_name("__log_buf");
-
- if (! addr)
- printf("Symbol __log_buf not found!\n");
- else {
- end = addr + (1 << CONFIG_LOG_BUF_SHIFT);
- while (addr < end) {
- if (! mread(addr, buf, size)) {
- printf("Can't read memory at address 0x%lx\n", addr);
- break;
- }
-
- printf("%s", buf);
-
- if (strlen(buf) < size)
- break;
-
- addr += size;
- }
- }
-
- sync();
- /* wait a little while to see if we get a machine check */
- __delay(200);
- catch_memory_errors = 0;
+ struct kmsg_dumper dumper = { .active = 1 };
+ unsigned char buf[128];
+ size_t len;
+
+ if (setjmp(bus_error_jmp) != 0) {
+ printf("Error dumping printk buffer!\n");
+ return;
+ }
+
+ catch_memory_errors = 1;
+ sync();
+
+ kmsg_dump_rewind_nolock(&dumper);
+ while (kmsg_dump_get_line_nolock(&dumper, false, buf, sizeof(buf), &len)) {
+ buf[len] = '\0';
+ printf("%s", buf);
+ }
+
+ sync();
+ /* wait a little while to see if we get a machine check */
+ __delay(200);
+ catch_memory_errors = 0;
}
/*
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 32e8449640fa..9b94a160fe7f 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -180,7 +180,8 @@ extern char elf_platform[];
#define ELF_PLATFORM (elf_platform)
#ifndef CONFIG_64BIT
-#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
+#define SET_PERSONALITY(ex) \
+ set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
#else /* CONFIG_64BIT */
#define SET_PERSONALITY(ex) \
do { \
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index 799ed0f1643d..2d6e6e380564 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -66,16 +66,6 @@ static inline pte_t huge_ptep_get(pte_t *ptep)
return pte;
}
-static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- pte_t pte = huge_ptep_get(ptep);
-
- mm->context.flush_mm = 1;
- pmd_clear((pmd_t *) ptep);
- return pte;
-}
-
static inline void __pmd_csp(pmd_t *pmdp)
{
register unsigned long reg2 asm("2") = pmd_val(*pmdp);
@@ -117,6 +107,15 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm,
__pmd_csp(pmdp);
}
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ pte_t pte = huge_ptep_get(ptep);
+
+ huge_ptep_invalidate(mm, addr, ptep);
+ return pte;
+}
+
#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
({ \
int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \
@@ -131,10 +130,7 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm,
({ \
pte_t __pte = huge_ptep_get(__ptep); \
if (pte_write(__pte)) { \
- (__mm)->context.flush_mm = 1; \
- if (atomic_read(&(__mm)->context.attach_count) > 1 || \
- (__mm) != current->active_mm) \
- huge_ptep_invalidate(__mm, __addr, __ptep); \
+ huge_ptep_invalidate(__mm, __addr, __ptep); \
set_huge_pte_at(__mm, __addr, __ptep, \
huge_pte_wrprotect(__pte)); \
} \
diff --git a/arch/s390/include/asm/posix_types.h b/arch/s390/include/asm/posix_types.h
index 7bcc14e395f0..bf2a2ad2f800 100644
--- a/arch/s390/include/asm/posix_types.h
+++ b/arch/s390/include/asm/posix_types.h
@@ -13,6 +13,7 @@
*/
typedef unsigned long __kernel_size_t;
+typedef long __kernel_ssize_t;
#define __kernel_size_t __kernel_size_t
typedef unsigned short __kernel_old_dev_t;
@@ -25,7 +26,6 @@ typedef unsigned short __kernel_mode_t;
typedef unsigned short __kernel_ipc_pid_t;
typedef unsigned short __kernel_uid_t;
typedef unsigned short __kernel_gid_t;
-typedef int __kernel_ssize_t;
typedef int __kernel_ptrdiff_t;
#else /* __s390x__ */
@@ -35,7 +35,6 @@ typedef unsigned int __kernel_mode_t;
typedef int __kernel_ipc_pid_t;
typedef unsigned int __kernel_uid_t;
typedef unsigned int __kernel_gid_t;
-typedef long __kernel_ssize_t;
typedef long __kernel_ptrdiff_t;
typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index a0a8340daafa..ce26ac3cb162 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -44,6 +44,7 @@ static inline void smp_call_online_cpu(void (*func)(void *), void *data)
}
static inline int smp_find_processor_id(int address) { return 0; }
+static inline int smp_store_status(int cpu) { return 0; }
static inline int smp_vcpu_scheduled(int cpu) { return 1; }
static inline void smp_yield_cpu(int cpu) { }
static inline void smp_yield(void) { }
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 9fde315f3a7c..1d8fe2b17ef6 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -90,12 +90,10 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
{
- spin_lock(&mm->page_table_lock);
if (mm->context.flush_mm) {
__tlb_flush_mm(mm);
mm->context.flush_mm = 0;
}
- spin_unlock(&mm->page_table_lock);
}
/*
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index f86c81e13c37..40b57693de38 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -974,11 +974,13 @@ static void __init setup_hwcaps(void)
if (MACHINE_HAS_HPAGE)
elf_hwcap |= HWCAP_S390_HPAGE;
+#if defined(CONFIG_64BIT)
/*
* 64-bit register support for 31-bit processes
* HWCAP_S390_HIGH_GPRS is bit 9.
*/
elf_hwcap |= HWCAP_S390_HIGH_GPRS;
+#endif
get_cpu_id(&cpu_id);
switch (cpu_id.machine) {
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 60ee2b883797..2d37bb861faf 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -2,69 +2,82 @@
* User access functions based on page table walks for enhanced
* system layout without hardware support.
*
- * Copyright IBM Corp. 2006
+ * Copyright IBM Corp. 2006, 2012
* Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
*/
#include <linux/errno.h>
#include <linux/hardirq.h>
#include <linux/mm.h>
+#include <linux/hugetlb.h>
#include <asm/uaccess.h>
#include <asm/futex.h>
#include "uaccess.h"
-static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr)
+
+/*
+ * Returns kernel address for user virtual address. If the returned address is
+ * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
+ * contains the (negative) exception code.
+ */
+static __always_inline unsigned long follow_table(struct mm_struct *mm,
+ unsigned long addr, int write)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
+ pte_t *ptep;
pgd = pgd_offset(mm, addr);
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
- return (pte_t *) 0x3a;
+ return -0x3aUL;
pud = pud_offset(pgd, addr);
if (pud_none(*pud) || unlikely(pud_bad(*pud)))
- return (pte_t *) 0x3b;
+ return -0x3bUL;
pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
- return (pte_t *) 0x10;
+ if (pmd_none(*pmd))
+ return -0x10UL;
+ if (pmd_huge(*pmd)) {
+ if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO))
+ return -0x04UL;
+ return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK);
+ }
+ if (unlikely(pmd_bad(*pmd)))
+ return -0x10UL;
+
+ ptep = pte_offset_map(pmd, addr);
+ if (!pte_present(*ptep))
+ return -0x11UL;
+ if (write && !pte_write(*ptep))
+ return -0x04UL;
- return pte_offset_map(pmd, addr);
+ return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK);
}
static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
size_t n, int write_user)
{
struct mm_struct *mm = current->mm;
- unsigned long offset, pfn, done, size;
- pte_t *pte;
+ unsigned long offset, done, size, kaddr;
void *from, *to;
done = 0;
retry:
spin_lock(&mm->page_table_lock);
do {
- pte = follow_table(mm, uaddr);
- if ((unsigned long) pte < 0x1000)
+ kaddr = follow_table(mm, uaddr, write_user);
+ if (IS_ERR_VALUE(kaddr))
goto fault;
- if (!pte_present(*pte)) {
- pte = (pte_t *) 0x11;
- goto fault;
- } else if (write_user && !pte_write(*pte)) {
- pte = (pte_t *) 0x04;
- goto fault;
- }
- pfn = pte_pfn(*pte);
- offset = uaddr & (PAGE_SIZE - 1);
+ offset = uaddr & ~PAGE_MASK;
size = min(n - done, PAGE_SIZE - offset);
if (write_user) {
- to = (void *)((pfn << PAGE_SHIFT) + offset);
+ to = (void *) kaddr;
from = kptr + done;
} else {
- from = (void *)((pfn << PAGE_SHIFT) + offset);
+ from = (void *) kaddr;
to = kptr + done;
}
memcpy(to, from, size);
@@ -75,7 +88,7 @@ retry:
return n - done;
fault:
spin_unlock(&mm->page_table_lock);
- if (__handle_fault(uaddr, (unsigned long) pte, write_user))
+ if (__handle_fault(uaddr, -kaddr, write_user))
return n - done;
goto retry;
}
@@ -84,27 +97,22 @@ fault:
* Do DAT for user address by page table walk, return kernel address.
* This function needs to be called with current->mm->page_table_lock held.
*/
-static __always_inline unsigned long __dat_user_addr(unsigned long uaddr)
+static __always_inline unsigned long __dat_user_addr(unsigned long uaddr,
+ int write)
{
struct mm_struct *mm = current->mm;
- unsigned long pfn;
- pte_t *pte;
+ unsigned long kaddr;
int rc;
retry:
- pte = follow_table(mm, uaddr);
- if ((unsigned long) pte < 0x1000)
- goto fault;
- if (!pte_present(*pte)) {
- pte = (pte_t *) 0x11;
+ kaddr = follow_table(mm, uaddr, write);
+ if (IS_ERR_VALUE(kaddr))
goto fault;
- }
- pfn = pte_pfn(*pte);
- return (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
+ return kaddr;
fault:
spin_unlock(&mm->page_table_lock);
- rc = __handle_fault(uaddr, (unsigned long) pte, 0);
+ rc = __handle_fault(uaddr, -kaddr, write);
spin_lock(&mm->page_table_lock);
if (!rc)
goto retry;
@@ -159,11 +167,9 @@ static size_t clear_user_pt(size_t n, void __user *to)
static size_t strnlen_user_pt(size_t count, const char __user *src)
{
- char *addr;
unsigned long uaddr = (unsigned long) src;
struct mm_struct *mm = current->mm;
- unsigned long offset, pfn, done, len;
- pte_t *pte;
+ unsigned long offset, done, len, kaddr;
size_t len_str;
if (segment_eq(get_fs(), KERNEL_DS))
@@ -172,19 +178,13 @@ static size_t strnlen_user_pt(size_t count, const char __user *src)
retry:
spin_lock(&mm->page_table_lock);
do {
- pte = follow_table(mm, uaddr);
- if ((unsigned long) pte < 0x1000)
- goto fault;
- if (!pte_present(*pte)) {
- pte = (pte_t *) 0x11;
+ kaddr = follow_table(mm, uaddr, 0);
+ if (IS_ERR_VALUE(kaddr))
goto fault;
- }
- pfn = pte_pfn(*pte);
- offset = uaddr & (PAGE_SIZE-1);
- addr = (char *)(pfn << PAGE_SHIFT) + offset;
+ offset = uaddr & ~PAGE_MASK;
len = min(count - done, PAGE_SIZE - offset);
- len_str = strnlen(addr, len);
+ len_str = strnlen((char *) kaddr, len);
done += len_str;
uaddr += len_str;
} while ((len_str == len) && (done < count));
@@ -192,7 +192,7 @@ retry:
return done + 1;
fault:
spin_unlock(&mm->page_table_lock);
- if (__handle_fault(uaddr, (unsigned long) pte, 0))
+ if (__handle_fault(uaddr, -kaddr, 0))
return 0;
goto retry;
}
@@ -225,11 +225,10 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
const void __user *from)
{
struct mm_struct *mm = current->mm;
- unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to,
- uaddr, done, size, error_code;
+ unsigned long offset_max, uaddr, done, size, error_code;
unsigned long uaddr_from = (unsigned long) from;
unsigned long uaddr_to = (unsigned long) to;
- pte_t *pte_from, *pte_to;
+ unsigned long kaddr_to, kaddr_from;
int write_user;
if (segment_eq(get_fs(), KERNEL_DS)) {
@@ -242,38 +241,23 @@ retry:
do {
write_user = 0;
uaddr = uaddr_from;
- pte_from = follow_table(mm, uaddr_from);
- error_code = (unsigned long) pte_from;
- if (error_code < 0x1000)
- goto fault;
- if (!pte_present(*pte_from)) {
- error_code = 0x11;
+ kaddr_from = follow_table(mm, uaddr_from, 0);
+ error_code = kaddr_from;
+ if (IS_ERR_VALUE(error_code))
goto fault;
- }
write_user = 1;
uaddr = uaddr_to;
- pte_to = follow_table(mm, uaddr_to);
- error_code = (unsigned long) pte_to;
- if (error_code < 0x1000)
- goto fault;
- if (!pte_present(*pte_to)) {
- error_code = 0x11;
+ kaddr_to = follow_table(mm, uaddr_to, 1);
+ error_code = (unsigned long) kaddr_to;
+ if (IS_ERR_VALUE(error_code))
goto fault;
- } else if (!pte_write(*pte_to)) {
- error_code = 0x04;
- goto fault;
- }
- pfn_from = pte_pfn(*pte_from);
- pfn_to = pte_pfn(*pte_to);
- offset_from = uaddr_from & (PAGE_SIZE-1);
- offset_to = uaddr_from & (PAGE_SIZE-1);
- offset_max = max(offset_from, offset_to);
+ offset_max = max(uaddr_from & ~PAGE_MASK,
+ uaddr_to & ~PAGE_MASK);
size = min(n - done, PAGE_SIZE - offset_max);
- memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to,
- (void *)(pfn_from << PAGE_SHIFT) + offset_from, size);
+ memcpy((void *) kaddr_to, (void *) kaddr_from, size);
done += size;
uaddr_from += size;
uaddr_to += size;
@@ -282,7 +266,7 @@ retry:
return n - done;
fault:
spin_unlock(&mm->page_table_lock);
- if (__handle_fault(uaddr, error_code, write_user))
+ if (__handle_fault(uaddr, -error_code, write_user))
return n - done;
goto retry;
}
@@ -341,7 +325,7 @@ int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
return __futex_atomic_op_pt(op, uaddr, oparg, old);
spin_lock(&current->mm->page_table_lock);
uaddr = (u32 __force __user *)
- __dat_user_addr((__force unsigned long) uaddr);
+ __dat_user_addr((__force unsigned long) uaddr, 1);
if (!uaddr) {
spin_unlock(&current->mm->page_table_lock);
return -EFAULT;
@@ -378,7 +362,7 @@ int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
spin_lock(&current->mm->page_table_lock);
uaddr = (u32 __force __user *)
- __dat_user_addr((__force unsigned long) uaddr);
+ __dat_user_addr((__force unsigned long) uaddr, 1);
if (!uaddr) {
spin_unlock(&current->mm->page_table_lock);
return -EFAULT;
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index a1e9d69a9c90..584b93674ea4 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -169,7 +169,7 @@ static ssize_t hw_interval_write(struct file *file, char const __user *buf,
if (*offset)
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
if (val < oprofile_min_interval)
oprofile_hw_interval = oprofile_min_interval;
@@ -212,7 +212,7 @@ static ssize_t hwsampler_zero_write(struct file *file, char const __user *buf,
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
if (val != 0)
return -EINVAL;
@@ -243,7 +243,7 @@ static ssize_t hwsampler_kernel_write(struct file *file, char const __user *buf,
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
if (val != 0 && val != 1)
@@ -278,7 +278,7 @@ static ssize_t hwsampler_user_write(struct file *file, char const __user *buf,
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
if (val != 0 && val != 1)
@@ -317,7 +317,7 @@ static ssize_t timer_enabled_write(struct file *file, char const __user *buf,
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
if (val != 0 && val != 1)
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S
index b7cf6a547f11..7e605b95592a 100644
--- a/arch/sh/kernel/cpu/sh5/entry.S
+++ b/arch/sh/kernel/cpu/sh5/entry.S
@@ -933,7 +933,7 @@ ret_with_reschedule:
pta restore_all, tr1
- movi _TIF_SIGPENDING, r8
+ movi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), r8
and r8, r7, r8
pta work_notifysig, tr0
bne r8, ZERO, tr0
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index f67601cb3f1f..b96489d8b27d 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -139,7 +139,7 @@ work_pending:
! r8: current_thread_info
! t: result of "tst #_TIF_NEED_RESCHED, r0"
bf/s work_resched
- tst #_TIF_SIGPENDING, r0
+ tst #(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME), r0
work_notifysig:
bt/s __restore_all
mov r15, r4
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index 15e0a1693976..f1ddc0d23679 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -48,9 +48,7 @@ void *module_alloc(unsigned long size)
return NULL;
ret = module_map(size);
- if (!ret)
- ret = ERR_PTR(-ENOMEM);
- else
+ if (ret)
memset(ret, 0, size);
return ret;
@@ -116,6 +114,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
v = sym->st_value + rel[i].r_addend;
switch (ELF_R_TYPE(rel[i].r_info) & 0xff) {
+ case R_SPARC_DISP32:
+ v -= (Elf_Addr) location;
+ *loc32 = v;
+ break;
#ifdef CONFIG_SPARC64
case R_SPARC_64:
location[0] = v >> 56;
@@ -128,11 +130,6 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
location[7] = v >> 0;
break;
- case R_SPARC_DISP32:
- v -= (Elf_Addr) location;
- *loc32 = v;
- break;
-
case R_SPARC_WDISP19:
v -= (Elf_Addr) location;
*loc32 = (*loc32 & ~0x7ffff) |
diff --git a/arch/um/os-Linux/time.c b/arch/um/os-Linux/time.c
index f60238559af3..0748fe0c8a73 100644
--- a/arch/um/os-Linux/time.c
+++ b/arch/um/os-Linux/time.c
@@ -114,7 +114,7 @@ static void deliver_alarm(void)
skew += this_tick - last_tick;
while (skew >= one_tick) {
- alarm_handler(SIGVTALRM, NULL);
+ alarm_handler(SIGVTALRM, NULL, NULL);
skew -= one_tick;
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 8ec3a1aa4abd..50a1d1f9b6d3 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -746,10 +746,10 @@ config SWIOTLB
def_bool y if X86_64
---help---
Support for software bounce buffers used on x86-64 systems
- which don't have a hardware IOMMU (e.g. the current generation
- of Intel's x86-64 CPUs). Using this PCI devices which can only
- access 32-bits of memory can be used on systems with more than
- 3 GB of memory. If unsure, say Y.
+ which don't have a hardware IOMMU. Using this PCI devices
+ which can only access 32-bits of memory can be used on systems
+ with more than 3 GB of memory.
+ If unsure, say Y.
config IOMMU_HELPER
def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU)
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index b0c5276861ec..474ca35b1bce 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -27,6 +27,10 @@ ifeq ($(CONFIG_X86_32),y)
KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return
+ # Never want PIC in a 32-bit kernel, prevent breakage with GCC built
+ # with nonstandard options
+ KBUILD_CFLAGS += -fno-pic
+
# prevent gcc from keeping the stack 16 byte aligned
KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2)
@@ -138,7 +142,7 @@ KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
KBUILD_CFLAGS += $(mflags-y)
KBUILD_AFLAGS += $(mflags-y)
-archscripts:
+archscripts: scripts_basic
$(Q)$(MAKE) $(build)=arch/x86/tools relocs
###
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 5a747dd884db..f7535bedc33f 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -57,7 +57,7 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
-Wall -Wstrict-prototypes \
-march=i386 -mregparm=3 \
-include $(srctree)/$(src)/code16gcc.h \
- -fno-strict-aliasing -fomit-frame-pointer \
+ -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
$(call cc-option, -ffreestanding) \
$(call cc-option, -fno-toplevel-reorder,\
$(call cc-option, -fno-unit-at-a-time)) \
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index b315a33867f2..33692eaabab5 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -12,8 +12,7 @@
* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
*
- * These are fair FIFO ticket locks, which are currently limited to 256
- * CPUs.
+ * These are fair FIFO ticket locks, which support up to 2^16 CPUs.
*
* (the type definitions are in asm/spinlock_types.h)
*/
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 93971e841dd5..472b9b783019 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -51,7 +51,8 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s,
extern int m2p_add_override(unsigned long mfn, struct page *page,
struct gnttab_map_grant_ref *kmap_op);
-extern int m2p_remove_override(struct page *page, bool clear_pte);
+extern int m2p_remove_override(struct page *page,
+ struct gnttab_map_grant_ref *kmap_op);
extern struct page *m2p_find_override(unsigned long mfn);
extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index afb7ff79a29f..ced4534baed5 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -165,7 +165,7 @@ static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
#endif
#ifdef P6_NOP1
-static const unsigned char __initconst_or_module p6nops[] =
+static const unsigned char p6nops[] =
{
P6_NOP1,
P6_NOP2,
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index a6c64aaddf9a..c265593ec2cd 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1356,6 +1356,16 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
if (!IO_APIC_IRQ(irq))
return;
+ /*
+ * For legacy irqs, cfg->domain starts with cpu 0. Now that IO-APIC
+ * can handle this irq and the apic driver is finialized at this point,
+ * update the cfg->domain.
+ */
+ if (irq < legacy_pic->nr_legacy_irqs &&
+ cpumask_equal(cfg->domain, cpumask_of(0)))
+ apic->vector_allocation_domain(0, cfg->domain,
+ apic->target_cpus());
+
if (assign_irq_vector(irq, cfg, apic->target_cpus()))
return;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 46d8786d655e..a5fbc3c5fccc 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -144,6 +144,8 @@ static int __init x86_xsave_setup(char *s)
{
setup_clear_cpu_cap(X86_FEATURE_XSAVE);
setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
+ setup_clear_cpu_cap(X86_FEATURE_AVX);
+ setup_clear_cpu_cap(X86_FEATURE_AVX2);
return 1;
}
__setup("noxsave", x86_xsave_setup);
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 6605a81ba339..8b6defe7eefc 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -586,6 +586,8 @@ extern struct event_constraint intel_westmere_pebs_event_constraints[];
extern struct event_constraint intel_snb_pebs_event_constraints[];
+extern struct event_constraint intel_ivb_pebs_event_constraints[];
+
struct event_constraint *intel_pebs_constraints(struct perf_event *event);
void intel_pmu_pebs_enable(struct perf_event *event);
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index 7bfb5bec8630..eebd5ffe1bba 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -209,6 +209,15 @@ static int perf_ibs_precise_event(struct perf_event *event, u64 *config)
return -EOPNOTSUPP;
}
+static const struct perf_event_attr ibs_notsupp = {
+ .exclude_user = 1,
+ .exclude_kernel = 1,
+ .exclude_hv = 1,
+ .exclude_idle = 1,
+ .exclude_host = 1,
+ .exclude_guest = 1,
+};
+
static int perf_ibs_init(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
@@ -229,6 +238,9 @@ static int perf_ibs_init(struct perf_event *event)
if (event->pmu != &perf_ibs->pmu)
return -ENOENT;
+ if (perf_flags(&event->attr) & perf_flags(&ibs_notsupp))
+ return -EINVAL;
+
if (config & ~perf_ibs->config_mask)
return -EINVAL;
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 382366977d4c..6bca492b8547 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1522,8 +1522,16 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
+ /*
+ * If PMU counter has PEBS enabled it is not enough to disable counter
+ * on a guest entry since PEBS memory write can overshoot guest entry
+ * and corrupt guest memory. Disabling PEBS solves the problem.
+ */
+ arr[1].msr = MSR_IA32_PEBS_ENABLE;
+ arr[1].host = cpuc->pebs_enabled;
+ arr[1].guest = 0;
- *nr = 1;
+ *nr = 2;
return arr;
}
@@ -2000,6 +2008,7 @@ __init int intel_pmu_init(void)
break;
case 28: /* Atom */
+ case 54: /* Cedariew */
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -2039,7 +2048,6 @@ __init int intel_pmu_init(void)
case 42: /* SandyBridge */
case 45: /* SandyBridge, "Romely-EP" */
x86_add_quirk(intel_sandybridge_quirk);
- case 58: /* IvyBridge */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
@@ -2064,6 +2072,29 @@ __init int intel_pmu_init(void)
pr_cont("SandyBridge events, ");
break;
+ case 58: /* IvyBridge */
+ memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
+ sizeof(hw_cache_extra_regs));
+
+ intel_pmu_lbr_init_snb();
+
+ x86_pmu.event_constraints = intel_snb_event_constraints;
+ x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
+ x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
+ x86_pmu.extra_regs = intel_snb_extra_regs;
+ /* all extra regs are per-cpu when HT is on */
+ x86_pmu.er_flags |= ERF_HAS_RSP_1;
+ x86_pmu.er_flags |= ERF_NO_HT_SHARING;
+
+ /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
+ intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
+ X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
+
+ pr_cont("IvyBridge events, ");
+ break;
+
default:
switch (x86_pmu.version) {
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index e38d97bf4259..826054a4f2ee 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -407,6 +407,20 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
EVENT_CONSTRAINT_END
};
+struct event_constraint intel_ivb_pebs_event_constraints[] = {
+ INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
+ INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
+ INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
+ INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
+ EVENT_CONSTRAINT_END
+};
+
struct event_constraint *intel_pebs_constraints(struct perf_event *event)
{
struct event_constraint *c;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index 520b4265fcd2..da02e9cc3754 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -686,7 +686,8 @@ void intel_pmu_lbr_init_atom(void)
* to have an operational LBR which can freeze
* on PMU interrupt
*/
- if (boot_cpu_data.x86_mask < 10) {
+ if (boot_cpu_data.x86_model == 28
+ && boot_cpu_data.x86_mask < 10) {
pr_cont("LBR disabled due to erratum");
return;
}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 7563fda9f033..38e4894165b9 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -661,6 +661,11 @@ static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
}
}
+static struct uncore_event_desc snb_uncore_events[] = {
+ INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
+ { /* end: all zeroes */ },
+};
+
static struct attribute *snb_uncore_formats_attr[] = {
&format_attr_event.attr,
&format_attr_umask.attr,
@@ -704,6 +709,7 @@ static struct intel_uncore_type snb_uncore_cbox = {
.constraints = snb_uncore_cbox_constraints,
.ops = &snb_uncore_msr_ops,
.format_group = &snb_uncore_format_group,
+ .event_descs = snb_uncore_events,
};
static struct intel_uncore_type *snb_msr_uncores[] = {
@@ -796,7 +802,6 @@ static struct intel_uncore_type *nhm_msr_uncores[] = {
DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
-DEFINE_UNCORE_FORMAT_ATTR(mm_cfg, mm_cfg, "config:63");
DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
@@ -902,16 +907,21 @@ static struct attribute_group nhmex_uncore_cbox_format_group = {
.attrs = nhmex_uncore_cbox_formats_attr,
};
+/* msr offset for each instance of cbox */
+static unsigned nhmex_cbox_msr_offsets[] = {
+ 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
+};
+
static struct intel_uncore_type nhmex_uncore_cbox = {
.name = "cbox",
.num_counters = 6,
- .num_boxes = 8,
+ .num_boxes = 10,
.perf_ctr_bits = 48,
.event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0,
.perf_ctr = NHMEX_C0_MSR_PMON_CTR0,
.event_mask = NHMEX_PMON_RAW_EVENT_MASK,
.box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL,
- .msr_offset = NHMEX_C_MSR_OFFSET,
+ .msr_offsets = nhmex_cbox_msr_offsets,
.pair_ctr_ctl = 1,
.ops = &nhmex_uncore_ops,
.format_group = &nhmex_uncore_cbox_format_group
@@ -1032,24 +1042,22 @@ static struct intel_uncore_type nhmex_uncore_bbox = {
static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
{
- struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
- struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+ struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
- if (event->attr.config & NHMEX_S_PMON_MM_CFG_EN) {
- reg1->config = event->attr.config1;
- reg2->config = event->attr.config2;
- } else {
- reg1->config = ~0ULL;
- reg2->config = ~0ULL;
- }
+ /* only TO_R_PROG_EV event uses the match/mask register */
+ if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) !=
+ NHMEX_S_EVENT_TO_R_PROG_EV)
+ return 0;
if (box->pmu->pmu_idx == 0)
reg1->reg = NHMEX_S0_MSR_MM_CFG;
else
reg1->reg = NHMEX_S1_MSR_MM_CFG;
-
reg1->idx = 0;
-
+ reg1->config = event->attr.config1;
+ reg2->config = event->attr.config2;
return 0;
}
@@ -1059,8 +1067,8 @@ static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct per
struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
- wrmsrl(reg1->reg, 0);
- if (reg1->config != ~0ULL || reg2->config != ~0ULL) {
+ if (reg1->idx != EXTRA_REG_NONE) {
+ wrmsrl(reg1->reg, 0);
wrmsrl(reg1->reg + 1, reg1->config);
wrmsrl(reg1->reg + 2, reg2->config);
wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
@@ -1074,7 +1082,6 @@ static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
&format_attr_edge.attr,
&format_attr_inv.attr,
&format_attr_thresh8.attr,
- &format_attr_mm_cfg.attr,
&format_attr_match.attr,
&format_attr_mask.attr,
NULL,
@@ -1142,6 +1149,9 @@ static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
EVENT_EXTRA_END
};
+/* Nehalem-EX or Westmere-EX ? */
+bool uncore_nhmex;
+
static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
{
struct intel_uncore_extra_reg *er;
@@ -1171,18 +1181,29 @@ static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64
return false;
/* mask of the shared fields */
- mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
+ if (uncore_nhmex)
+ mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
+ else
+ mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK;
er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
raw_spin_lock_irqsave(&er->lock, flags);
/* add mask of the non-shared field if it's in use */
- if (__BITS_VALUE(atomic_read(&er->ref), idx, 8))
- mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+ if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) {
+ if (uncore_nhmex)
+ mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+ else
+ mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+ }
if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
atomic_add(1 << (idx * 8), &er->ref);
- mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
- NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+ if (uncore_nhmex)
+ mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
+ NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+ else
+ mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK |
+ WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
er->config &= ~mask;
er->config |= (config & mask);
ret = true;
@@ -1216,7 +1237,10 @@ u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
/* get the non-shared control bits and shift them */
idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
- config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+ if (uncore_nhmex)
+ config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+ else
+ config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
if (new_idx > orig_idx) {
idx = new_idx - orig_idx;
config <<= 3 * idx;
@@ -1226,6 +1250,10 @@ u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
}
/* add the shared control bits back */
+ if (uncore_nhmex)
+ config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
+ else
+ config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
if (modify) {
/* adjust the main event selector */
@@ -1264,7 +1292,8 @@ again:
}
/* for the match/mask registers */
- if ((uncore_box_is_fake(box) || !reg2->alloc) &&
+ if (reg2->idx != EXTRA_REG_NONE &&
+ (uncore_box_is_fake(box) || !reg2->alloc) &&
!nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
goto fail;
@@ -1278,7 +1307,8 @@ again:
if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
nhmex_mbox_alter_er(event, idx[0], true);
reg1->alloc |= alloc;
- reg2->alloc = 1;
+ if (reg2->idx != EXTRA_REG_NONE)
+ reg2->alloc = 1;
}
return NULL;
fail:
@@ -1342,9 +1372,6 @@ static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event
struct extra_reg *er;
unsigned msr;
int reg_idx = 0;
-
- if (WARN_ON_ONCE(reg1->idx != -1))
- return -EINVAL;
/*
* The mbox events may require 2 extra MSRs at the most. But only
* the lower 32 bits in these MSRs are significant, so we can use
@@ -1355,11 +1382,6 @@ static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event
continue;
if (event->attr.config1 & ~er->valid_mask)
return -EINVAL;
- if (er->idx == __BITS_VALUE(reg1->idx, 0, 8) ||
- er->idx == __BITS_VALUE(reg1->idx, 1, 8))
- continue;
- if (WARN_ON_ONCE(reg_idx >= 2))
- return -EINVAL;
msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
@@ -1368,6 +1390,8 @@ static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event
/* always use the 32~63 bits to pass the PLD config */
if (er->idx == EXTRA_REG_NHMEX_M_PLD)
reg_idx = 1;
+ else if (WARN_ON_ONCE(reg_idx > 0))
+ return -EINVAL;
reg1->idx &= ~(0xff << (reg_idx * 8));
reg1->reg &= ~(0xffff << (reg_idx * 16));
@@ -1376,17 +1400,21 @@ static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event
reg1->config = event->attr.config1;
reg_idx++;
}
- /* use config2 to pass the filter config */
- reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
- if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
- reg2->config = event->attr.config2;
- else
- reg2->config = ~0ULL;
- if (box->pmu->pmu_idx == 0)
- reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
- else
- reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
-
+ /*
+ * The mbox only provides ability to perform address matching
+ * for the PLD events.
+ */
+ if (reg_idx == 2) {
+ reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
+ if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
+ reg2->config = event->attr.config2;
+ else
+ reg2->config = ~0ULL;
+ if (box->pmu->pmu_idx == 0)
+ reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
+ else
+ reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
+ }
return 0;
}
@@ -1422,34 +1450,36 @@ static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct per
wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
nhmex_mbox_shared_reg_config(box, idx));
- wrmsrl(reg2->reg, 0);
- if (reg2->config != ~0ULL) {
- wrmsrl(reg2->reg + 1,
- reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
- wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
- (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
- wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
+ if (reg2->idx != EXTRA_REG_NONE) {
+ wrmsrl(reg2->reg, 0);
+ if (reg2->config != ~0ULL) {
+ wrmsrl(reg2->reg + 1,
+ reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
+ wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
+ (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
+ wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
+ }
}
wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
}
-DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3");
-DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5");
-DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6");
-DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7");
-DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13");
-DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21");
-DEFINE_UNCORE_FORMAT_ATTR(filter_cfg, filter_cfg, "config2:63");
-DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33");
-DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61");
-DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31");
-DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31");
-DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31");
-DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31");
-DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31");
-DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31");
-DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63");
+DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3");
+DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5");
+DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6");
+DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7");
+DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13");
+DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21");
+DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en, filter_cfg_en, "config2:63");
+DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33");
+DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61");
+DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63");
static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
&format_attr_count_mode.attr,
@@ -1458,7 +1488,7 @@ static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
&format_attr_flag_mode.attr,
&format_attr_inc_sel.attr,
&format_attr_set_flag_sel.attr,
- &format_attr_filter_cfg.attr,
+ &format_attr_filter_cfg_en.attr,
&format_attr_filter_match.attr,
&format_attr_filter_mask.attr,
&format_attr_dsp.attr,
@@ -1482,6 +1512,12 @@ static struct uncore_event_desc nhmex_uncore_mbox_events[] = {
{ /* end: all zeroes */ },
};
+static struct uncore_event_desc wsmex_uncore_mbox_events[] = {
+ INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"),
+ INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"),
+ { /* end: all zeroes */ },
+};
+
static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
NHMEX_UNCORE_OPS_COMMON_INIT(),
.enable_event = nhmex_mbox_msr_enable_event,
@@ -1513,7 +1549,7 @@ void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
int port;
- /* adjust the main event selector */
+ /* adjust the main event selector and extra register index */
if (reg1->idx % 2) {
reg1->idx--;
hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
@@ -1522,29 +1558,17 @@ void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
}
- /* adjust address or config of extra register */
+ /* adjust extra register config */
port = reg1->idx / 6 + box->pmu->pmu_idx * 4;
switch (reg1->idx % 6) {
- case 0:
- reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG0(port);
- break;
- case 1:
- reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG1(port);
- break;
case 2:
- /* the 8~15 bits to the 0~7 bits */
+ /* shift the 8~15 bits to the 0~7 bits */
reg1->config >>= 8;
break;
case 3:
- /* the 0~7 bits to the 8~15 bits */
+ /* shift the 0~7 bits to the 8~15 bits */
reg1->config <<= 8;
break;
- case 4:
- reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port);
- break;
- case 5:
- reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port);
- break;
};
}
@@ -1671,7 +1695,7 @@ static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event
struct hw_perf_event *hwc = &event->hw;
struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
- int port, idx;
+ int idx;
idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
@@ -1681,27 +1705,11 @@ static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event
reg1->idx = idx;
reg1->config = event->attr.config1;
- port = idx / 6 + box->pmu->pmu_idx * 4;
- idx %= 6;
- switch (idx) {
- case 0:
- reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG0(port);
- break;
- case 1:
- reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG1(port);
- break;
- case 2:
- case 3:
- reg1->reg = NHMEX_R_MSR_PORTN_QLX_CFG(port);
- break;
+ switch (idx % 6) {
case 4:
case 5:
- if (idx == 4)
- reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port);
- else
- reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port);
- reg2->config = event->attr.config2;
hwc->config |= event->attr.config & (~0ULL << 32);
+ reg2->config = event->attr.config2;
break;
};
return 0;
@@ -1727,28 +1735,34 @@ static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct per
struct hw_perf_event *hwc = &event->hw;
struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
- int idx, er_idx;
+ int idx, port;
- idx = reg1->idx % 6;
- er_idx = idx;
- if (er_idx > 2)
- er_idx--;
- er_idx += (reg1->idx / 6) * 5;
+ idx = reg1->idx;
+ port = idx / 6 + box->pmu->pmu_idx * 4;
- switch (idx) {
+ switch (idx % 6) {
case 0:
+ wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config);
+ break;
case 1:
- wrmsrl(reg1->reg, reg1->config);
+ wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);
break;
case 2:
case 3:
- wrmsrl(reg1->reg, nhmex_rbox_shared_reg_config(box, er_idx));
+ wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
+ nhmex_rbox_shared_reg_config(box, 2 + (idx / 6) * 5));
break;
case 4:
+ wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
+ hwc->config >> 32);
+ wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config);
+ wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config);
+ break;
case 5:
- wrmsrl(reg1->reg, reg1->config);
- wrmsrl(reg1->reg + 1, hwc->config >> 32);
- wrmsrl(reg1->reg + 2, reg2->config);
+ wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port),
+ hwc->config >> 32);
+ wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config);
+ wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);
break;
};
@@ -1756,8 +1770,8 @@ static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct per
(hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
}
-DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config:32-63");
-DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config1:0-63");
+DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63");
+DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63");
DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
@@ -2303,6 +2317,7 @@ int uncore_pmu_event_init(struct perf_event *event)
event->hw.idx = -1;
event->hw.last_tag = ~0ULL;
event->hw.extra_reg.idx = EXTRA_REG_NONE;
+ event->hw.branch_reg.idx = EXTRA_REG_NONE;
if (event->attr.config == UNCORE_FIXED_EVENT) {
/* no fixed counter */
@@ -2373,7 +2388,7 @@ static void __init uncore_type_exit(struct intel_uncore_type *type)
type->attr_groups[1] = NULL;
}
-static void uncore_types_exit(struct intel_uncore_type **types)
+static void __init uncore_types_exit(struct intel_uncore_type **types)
{
int i;
for (i = 0; types[i]; i++)
@@ -2814,7 +2829,13 @@ static int __init uncore_cpu_init(void)
snbep_uncore_cbox.num_boxes = max_cores;
msr_uncores = snbep_msr_uncores;
break;
- case 46:
+ case 46: /* Nehalem-EX */
+ uncore_nhmex = true;
+ case 47: /* Westmere-EX aka. Xeon E7 */
+ if (!uncore_nhmex)
+ nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
+ if (nhmex_uncore_cbox.num_boxes > max_cores)
+ nhmex_uncore_cbox.num_boxes = max_cores;
msr_uncores = nhmex_msr_uncores;
break;
default:
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index c9e5dc56630a..5b81c1856aac 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -230,6 +230,7 @@
#define NHMEX_S1_MSR_MASK 0xe5a
#define NHMEX_S_PMON_MM_CFG_EN (0x1ULL << 63)
+#define NHMEX_S_EVENT_TO_R_PROG_EV 0
/* NHM-EX Mbox */
#define NHMEX_M0_MSR_GLOBAL_CTL 0xca0
@@ -275,18 +276,12 @@
NHMEX_M_PMON_CTL_INC_SEL_MASK | \
NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK)
-
-#define NHMEX_M_PMON_ZDP_CTL_FVC_FVID_MASK 0x1f
-#define NHMEX_M_PMON_ZDP_CTL_FVC_BCMD_MASK (0x7 << 5)
-#define NHMEX_M_PMON_ZDP_CTL_FVC_RSP_MASK (0x7 << 8)
-#define NHMEX_M_PMON_ZDP_CTL_FVC_PBOX_INIT_ERR (1 << 23)
-#define NHMEX_M_PMON_ZDP_CTL_FVC_MASK \
- (NHMEX_M_PMON_ZDP_CTL_FVC_FVID_MASK | \
- NHMEX_M_PMON_ZDP_CTL_FVC_BCMD_MASK | \
- NHMEX_M_PMON_ZDP_CTL_FVC_RSP_MASK | \
- NHMEX_M_PMON_ZDP_CTL_FVC_PBOX_INIT_ERR)
+#define NHMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 11) - 1) | (1 << 23))
#define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7 << (11 + 3 * (n)))
+#define WSMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 12) - 1) | (1 << 24))
+#define WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7 << (12 + 3 * (n)))
+
/*
* use the 9~13 bits to select event If the 7th bit is not set,
* otherwise use the 19~21 bits to select event.
@@ -368,6 +363,7 @@ struct intel_uncore_type {
unsigned num_shared_regs:8;
unsigned single_fixed:1;
unsigned pair_ctr_ctl:1;
+ unsigned *msr_offsets;
struct event_constraint unconstrainted;
struct event_constraint *constraints;
struct intel_uncore_pmu *pmus;
@@ -485,29 +481,31 @@ unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
return idx * 8 + box->pmu->type->perf_ctr;
}
-static inline
-unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
+static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box)
+{
+ struct intel_uncore_pmu *pmu = box->pmu;
+ return pmu->type->msr_offsets ?
+ pmu->type->msr_offsets[pmu->pmu_idx] :
+ pmu->type->msr_offset * pmu->pmu_idx;
+}
+
+static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
{
if (!box->pmu->type->box_ctl)
return 0;
- return box->pmu->type->box_ctl +
- box->pmu->type->msr_offset * box->pmu->pmu_idx;
+ return box->pmu->type->box_ctl + uncore_msr_box_offset(box);
}
-static inline
-unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
+static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
{
if (!box->pmu->type->fixed_ctl)
return 0;
- return box->pmu->type->fixed_ctl +
- box->pmu->type->msr_offset * box->pmu->pmu_idx;
+ return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box);
}
-static inline
-unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
+static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
{
- return box->pmu->type->fixed_ctr +
- box->pmu->type->msr_offset * box->pmu->pmu_idx;
+ return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box);
}
static inline
@@ -515,7 +513,7 @@ unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
{
return box->pmu->type->event_ctl +
(box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
- box->pmu->type->msr_offset * box->pmu->pmu_idx;
+ uncore_msr_box_offset(box);
}
static inline
@@ -523,7 +521,7 @@ unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
{
return box->pmu->type->perf_ctr +
(box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
- box->pmu->type->msr_offset * box->pmu->pmu_idx;
+ uncore_msr_box_offset(box);
}
static inline
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 7ad683d78645..d44f7829968e 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -270,7 +270,7 @@ void fixup_irqs(void)
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
break_affinity = 1;
- affinity = cpu_all_mask;
+ affinity = cpu_online_mask;
}
chip = irq_data_get_irq_chip(data);
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index 8a2ce8fd41c0..82746f942cd8 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -143,11 +143,12 @@ static int get_matching_microcode(int cpu, const u8 *ucode_ptr,
unsigned int *current_size)
{
struct microcode_header_amd *mc_hdr;
- unsigned int actual_size;
+ unsigned int actual_size, patch_size;
u16 equiv_cpu_id;
/* size of the current patch we're staring at */
- *current_size = *(u32 *)(ucode_ptr + 4) + SECTION_HDR_SIZE;
+ patch_size = *(u32 *)(ucode_ptr + 4);
+ *current_size = patch_size + SECTION_HDR_SIZE;
equiv_cpu_id = find_equiv_id();
if (!equiv_cpu_id)
@@ -174,7 +175,7 @@ static int get_matching_microcode(int cpu, const u8 *ucode_ptr,
/*
* now that the header looks sane, verify its size
*/
- actual_size = verify_ucode_size(cpu, *current_size, leftover_size);
+ actual_size = verify_ucode_size(cpu, patch_size, leftover_size);
if (!actual_size)
return 0;
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index 4873e62db6a1..9e5bcf1e2376 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -225,6 +225,9 @@ static ssize_t microcode_write(struct file *file, const char __user *buf,
if (do_microcode_update(buf, len) == 0)
ret = (ssize_t)len;
+ if (ret > 0)
+ perf_check_microcode();
+
mutex_unlock(&microcode_mutex);
put_online_cpus();
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 97d9a9914ba8..a3b57a27be88 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -475,13 +475,26 @@ register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
return address_mask(ctxt, reg);
}
+static void masked_increment(ulong *reg, ulong mask, int inc)
+{
+ assign_masked(reg, *reg + inc, mask);
+}
+
static inline void
register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
{
+ ulong mask;
+
if (ctxt->ad_bytes == sizeof(unsigned long))
- *reg += inc;
+ mask = ~0UL;
else
- *reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt));
+ mask = ad_mask(ctxt);
+ masked_increment(reg, mask, inc);
+}
+
+static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
+{
+ masked_increment(&ctxt->regs[VCPU_REGS_RSP], stack_mask(ctxt), inc);
}
static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
@@ -1522,8 +1535,8 @@ static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
{
struct segmented_address addr;
- register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -bytes);
- addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
+ rsp_increment(ctxt, -bytes);
+ addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt);
addr.seg = VCPU_SREG_SS;
return segmented_write(ctxt, addr, data, bytes);
@@ -1542,13 +1555,13 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt,
int rc;
struct segmented_address addr;
- addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
+ addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt);
addr.seg = VCPU_SREG_SS;
rc = segmented_read(ctxt, addr, dest, len);
if (rc != X86EMUL_CONTINUE)
return rc;
- register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len);
+ rsp_increment(ctxt, len);
return rc;
}
@@ -1688,8 +1701,7 @@ static int em_popa(struct x86_emulate_ctxt *ctxt)
while (reg >= VCPU_REGS_RAX) {
if (reg == VCPU_REGS_RSP) {
- register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP],
- ctxt->op_bytes);
+ rsp_increment(ctxt, ctxt->op_bytes);
--reg;
}
@@ -2825,7 +2837,7 @@ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
- register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val);
+ rsp_increment(ctxt, ctxt->src.val);
return X86EMUL_CONTINUE;
}
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index e498b18f010c..9fc9aa7ac703 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -318,7 +318,7 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val)
if (val & 0x10) {
u8 edge_irr = s->irr & ~s->elcr;
int i;
- bool found;
+ bool found = false;
struct kvm_vcpu *vcpu;
s->init4 = val & 1;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 01ca00423938..7fbd0d273ea8 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4113,16 +4113,21 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
LIST_HEAD(invalid_list);
/*
+ * Never scan more than sc->nr_to_scan VM instances.
+ * Will not hit this condition practically since we do not try
+ * to shrink more than one VM and it is very unlikely to see
+ * !n_used_mmu_pages so many times.
+ */
+ if (!nr_to_scan--)
+ break;
+ /*
* n_used_mmu_pages is accessed without holding kvm->mmu_lock
* here. We may skip a VM instance errorneosly, but we do not
* want to shrink a VM that only started to populate its MMU
* anyway.
*/
- if (kvm->arch.n_used_mmu_pages > 0) {
- if (!nr_to_scan--)
- break;
+ if (!kvm->arch.n_used_mmu_pages)
continue;
- }
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c00f03de1b79..b1eb202ee76a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3619,6 +3619,7 @@ static void seg_setup(int seg)
static int alloc_apic_access_page(struct kvm *kvm)
{
+ struct page *page;
struct kvm_userspace_memory_region kvm_userspace_mem;
int r = 0;
@@ -3633,7 +3634,13 @@ static int alloc_apic_access_page(struct kvm *kvm)
if (r)
goto out;
- kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
+ page = gfn_to_page(kvm, 0xfee00);
+ if (is_error_page(page)) {
+ r = -EFAULT;
+ goto out;
+ }
+
+ kvm->arch.apic_access_page = page;
out:
mutex_unlock(&kvm->slots_lock);
return r;
@@ -3641,6 +3648,7 @@ out:
static int alloc_identity_pagetable(struct kvm *kvm)
{
+ struct page *page;
struct kvm_userspace_memory_region kvm_userspace_mem;
int r = 0;
@@ -3656,8 +3664,13 @@ static int alloc_identity_pagetable(struct kvm *kvm)
if (r)
goto out;
- kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
- kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
+ page = gfn_to_page(kvm, kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
+ if (is_error_page(page)) {
+ r = -EFAULT;
+ goto out;
+ }
+
+ kvm->arch.ept_identity_pagetable = page;
out:
mutex_unlock(&kvm->slots_lock);
return r;
@@ -6575,7 +6588,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
/* Exposing INVPCID only when PCID is exposed */
best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
if (vmx_invpcid_supported() &&
- best && (best->ecx & bit(X86_FEATURE_INVPCID)) &&
+ best && (best->ebx & bit(X86_FEATURE_INVPCID)) &&
guest_cpuid_has_pcid(vcpu)) {
exec_control |= SECONDARY_EXEC_ENABLE_INVPCID;
vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
@@ -6585,7 +6598,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
exec_control);
if (best)
- best->ecx &= ~bit(X86_FEATURE_INVPCID);
+ best->ebx &= ~bit(X86_FEATURE_INVPCID);
}
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 42bce48f6928..2966c847d489 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -806,7 +806,7 @@ EXPORT_SYMBOL_GPL(kvm_rdpmc);
* kvm-specific. Those are put in the beginning of the list.
*/
-#define KVM_SAVE_MSRS_BEGIN 9
+#define KVM_SAVE_MSRS_BEGIN 10
static u32 msrs_to_save[] = {
MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
@@ -2000,6 +2000,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_KVM_STEAL_TIME:
data = vcpu->arch.st.msr_val;
break;
+ case MSR_KVM_PV_EOI_EN:
+ data = vcpu->arch.pv_eoi.msr_val;
+ break;
case MSR_IA32_P5_MC_ADDR:
case MSR_IA32_P5_MC_TYPE:
case MSR_IA32_MCG_CAP:
@@ -5110,17 +5113,20 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
!kvm_event_needs_reinjection(vcpu);
}
-static void vapic_enter(struct kvm_vcpu *vcpu)
+static int vapic_enter(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
struct page *page;
if (!apic || !apic->vapic_addr)
- return;
+ return 0;
page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
+ if (is_error_page(page))
+ return -EFAULT;
vcpu->arch.apic->vapic_page = page;
+ return 0;
}
static void vapic_exit(struct kvm_vcpu *vcpu)
@@ -5427,7 +5433,11 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
}
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
- vapic_enter(vcpu);
+ r = vapic_enter(vcpu);
+ if (r) {
+ srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+ return r;
+ }
r = 1;
while (r > 0) {
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index f6679a7fb8ca..b91e48512425 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -56,9 +56,16 @@ static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
}
/*
- * search for a shareable pmd page for hugetlb.
+ * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
+ * and returns the corresponding pte. While this is not necessary for the
+ * !shared pmd case because we can allocate the pmd later as well, it makes the
+ * code much cleaner. pmd allocation is essential for the shared case because
+ * pud has to be populated inside the same i_mmap_mutex section - otherwise
+ * racing tasks could either miss the sharing (see huge_pte_offset) or select a
+ * bad pmd for sharing.
*/
-static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
+static pte_t *
+huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
{
struct vm_area_struct *vma = find_vma(mm, addr);
struct address_space *mapping = vma->vm_file->f_mapping;
@@ -68,9 +75,10 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
struct vm_area_struct *svma;
unsigned long saddr;
pte_t *spte = NULL;
+ pte_t *pte;
if (!vma_shareable(vma, addr))
- return;
+ return (pte_t *)pmd_alloc(mm, pud, addr);
mutex_lock(&mapping->i_mmap_mutex);
vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) {
@@ -97,7 +105,9 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
put_page(virt_to_page(spte));
spin_unlock(&mm->page_table_lock);
out:
+ pte = (pte_t *)pmd_alloc(mm, pud, addr);
mutex_unlock(&mapping->i_mmap_mutex);
+ return pte;
}
/*
@@ -142,8 +152,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
} else {
BUG_ON(sz != PMD_SIZE);
if (pud_none(*pud))
- huge_pmd_share(mm, addr, pud);
- pte = (pte_t *) pmd_alloc(mm, pud, addr);
+ pte = huge_pmd_share(mm, addr, pud);
+ else
+ pte = (pte_t *)pmd_alloc(mm, pud, addr);
}
}
BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index e0e6990723e9..ab1f6a93b527 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -319,7 +319,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
*/
int devmem_is_allowed(unsigned long pagenr)
{
- if (pagenr <= 256)
+ if (pagenr < 256)
return 1;
if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
return 0;
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 931930a96160..a718e0d23503 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -919,13 +919,11 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
/*
* On success we use clflush, when the CPU supports it to
- * avoid the wbindv. If the CPU does not support it, in the
- * error case, and during early boot (for EFI) we fall back
- * to cpa_flush_all (which uses wbinvd):
+ * avoid the wbindv. If the CPU does not support it and in the
+ * error case we fall back to cpa_flush_all (which uses
+ * wbindv):
*/
- if (early_boot_irqs_disabled)
- __cpa_flush_all((void *)(long)cache);
- else if (!ret && cpu_has_clflush) {
+ if (!ret && cpu_has_clflush) {
if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
cpa_flush_array(addr, numpages, cache,
cpa.flags, pages);
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 2dc29f51e75a..92660edaa1e7 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -234,7 +234,22 @@ static efi_status_t __init phys_efi_set_virtual_address_map(
return status;
}
-static int efi_set_rtc_mmss(unsigned long nowtime)
+static efi_status_t __init phys_efi_get_time(efi_time_t *tm,
+ efi_time_cap_t *tc)
+{
+ unsigned long flags;
+ efi_status_t status;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ efi_call_phys_prelog();
+ status = efi_call_phys2(efi_phys.get_time, virt_to_phys(tm),
+ virt_to_phys(tc));
+ efi_call_phys_epilog();
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ return status;
+}
+
+int efi_set_rtc_mmss(unsigned long nowtime)
{
int real_seconds, real_minutes;
efi_status_t status;
@@ -263,7 +278,7 @@ static int efi_set_rtc_mmss(unsigned long nowtime)
return 0;
}
-static unsigned long efi_get_time(void)
+unsigned long efi_get_time(void)
{
efi_status_t status;
efi_time_t eft;
@@ -606,13 +621,18 @@ static int __init efi_runtime_init(void)
}
/*
* We will only need *early* access to the following
- * EFI runtime service before set_virtual_address_map
+ * two EFI runtime services before set_virtual_address_map
* is invoked.
*/
+ efi_phys.get_time = (efi_get_time_t *)runtime->get_time;
efi_phys.set_virtual_address_map =
(efi_set_virtual_address_map_t *)
runtime->set_virtual_address_map;
-
+ /*
+ * Make efi_get_time can be called before entering
+ * virtual mode.
+ */
+ efi.get_time = phys_efi_get_time;
early_iounmap(runtime, sizeof(efi_runtime_services_t));
return 0;
@@ -700,10 +720,12 @@ void __init efi_init(void)
efi_enabled = 0;
return;
}
+#ifdef CONFIG_X86_32
if (efi_native) {
x86_platform.get_wallclock = efi_get_time;
x86_platform.set_wallclock = efi_set_rtc_mmss;
}
+#endif
#if EFI_DEBUG
print_efi_memmap();
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index b2d534cab25f..88692871823f 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -72,7 +72,7 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
-Wall -Wstrict-prototypes \
-march=i386 -mregparm=3 \
-include $(srctree)/$(src)/../../boot/code16gcc.h \
- -fno-strict-aliasing -fomit-frame-pointer \
+ -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
$(call cc-option, -ffreestanding) \
$(call cc-option, -fno-toplevel-reorder,\
$(call cc-option, -fno-unit-at-a-time)) \
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
index 29aed7ac2c02..a582bfed95bb 100644
--- a/arch/x86/syscalls/syscall_64.tbl
+++ b/arch/x86/syscalls/syscall_64.tbl
@@ -60,8 +60,8 @@
51 common getsockname sys_getsockname
52 common getpeername sys_getpeername
53 common socketpair sys_socketpair
-54 common setsockopt sys_setsockopt
-55 common getsockopt sys_getsockopt
+54 64 setsockopt sys_setsockopt
+55 64 getsockopt sys_getsockopt
56 common clone stub_clone
57 common fork stub_fork
58 common vfork stub_vfork
@@ -353,3 +353,5 @@
538 x32 sendmmsg compat_sys_sendmmsg
539 x32 process_vm_readv compat_sys_process_vm_readv
540 x32 process_vm_writev compat_sys_process_vm_writev
+541 x32 setsockopt compat_sys_setsockopt
+542 x32 getsockopt compat_sys_getsockopt
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index bf4bda6d3e9a..1fbe75a95f15 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -31,7 +31,6 @@
#include <linux/pci.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
-#include <linux/syscore_ops.h>
#include <xen/xen.h>
#include <xen/interface/xen.h>
@@ -1453,6 +1452,10 @@ asmlinkage void __init xen_start_kernel(void)
pci_request_acs();
xen_acpi_sleep_register();
+
+ /* Avoid searching for BIOS MP tables */
+ x86_init.mpparse.find_smp_config = x86_init_noop;
+ x86_init.mpparse.get_smp_config = x86_init_uint_noop;
}
#ifdef CONFIG_PCI
/* PCI BIOS service won't work from a PV guest. */
@@ -1470,130 +1473,38 @@ asmlinkage void __init xen_start_kernel(void)
#endif
}
-#ifdef CONFIG_XEN_PVHVM
-/*
- * The pfn containing the shared_info is located somewhere in RAM. This
- * will cause trouble if the current kernel is doing a kexec boot into a
- * new kernel. The new kernel (and its startup code) can not know where
- * the pfn is, so it can not reserve the page. The hypervisor will
- * continue to update the pfn, and as a result memory corruption occours
- * in the new kernel.
- *
- * One way to work around this issue is to allocate a page in the
- * xen-platform pci device's BAR memory range. But pci init is done very
- * late and the shared_info page is already in use very early to read
- * the pvclock. So moving the pfn from RAM to MMIO is racy because some
- * code paths on other vcpus could access the pfn during the small
- * window when the old pfn is moved to the new pfn. There is even a
- * small window were the old pfn is not backed by a mfn, and during that
- * time all reads return -1.
- *
- * Because it is not known upfront where the MMIO region is located it
- * can not be used right from the start in xen_hvm_init_shared_info.
- *
- * To minimise trouble the move of the pfn is done shortly before kexec.
- * This does not eliminate the race because all vcpus are still online
- * when the syscore_ops will be called. But hopefully there is no work
- * pending at this point in time. Also the syscore_op is run last which
- * reduces the risk further.
- */
-
-static struct shared_info *xen_hvm_shared_info;
-
-static void xen_hvm_connect_shared_info(unsigned long pfn)
+void __ref xen_hvm_init_shared_info(void)
{
+ int cpu;
struct xen_add_to_physmap xatp;
+ static struct shared_info *shared_info_page = 0;
+ if (!shared_info_page)
+ shared_info_page = (struct shared_info *)
+ extend_brk(PAGE_SIZE, PAGE_SIZE);
xatp.domid = DOMID_SELF;
xatp.idx = 0;
xatp.space = XENMAPSPACE_shared_info;
- xatp.gpfn = pfn;
+ xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
BUG();
-}
-static void xen_hvm_set_shared_info(struct shared_info *sip)
-{
- int cpu;
-
- HYPERVISOR_shared_info = sip;
+ HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
/* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
* page, we use it in the event channel upcall and in some pvclock
* related functions. We don't need the vcpu_info placement
* optimizations because we don't use any pv_mmu or pv_irq op on
* HVM.
- * When xen_hvm_set_shared_info is run at boot time only vcpu 0 is
- * online but xen_hvm_set_shared_info is run at resume time too and
+ * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is
+ * online but xen_hvm_init_shared_info is run at resume time too and
* in that case multiple vcpus might be online. */
for_each_online_cpu(cpu) {
per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
}
}
-/* Reconnect the shared_info pfn to a mfn */
-void xen_hvm_resume_shared_info(void)
-{
- xen_hvm_connect_shared_info(__pa(xen_hvm_shared_info) >> PAGE_SHIFT);
-}
-
-#ifdef CONFIG_KEXEC
-static struct shared_info *xen_hvm_shared_info_kexec;
-static unsigned long xen_hvm_shared_info_pfn_kexec;
-
-/* Remember a pfn in MMIO space for kexec reboot */
-void __devinit xen_hvm_prepare_kexec(struct shared_info *sip, unsigned long pfn)
-{
- xen_hvm_shared_info_kexec = sip;
- xen_hvm_shared_info_pfn_kexec = pfn;
-}
-
-static void xen_hvm_syscore_shutdown(void)
-{
- struct xen_memory_reservation reservation = {
- .domid = DOMID_SELF,
- .nr_extents = 1,
- };
- unsigned long prev_pfn;
- int rc;
-
- if (!xen_hvm_shared_info_kexec)
- return;
-
- prev_pfn = __pa(xen_hvm_shared_info) >> PAGE_SHIFT;
- set_xen_guest_handle(reservation.extent_start, &prev_pfn);
-
- /* Move pfn to MMIO, disconnects previous pfn from mfn */
- xen_hvm_connect_shared_info(xen_hvm_shared_info_pfn_kexec);
-
- /* Update pointers, following hypercall is also a memory barrier */
- xen_hvm_set_shared_info(xen_hvm_shared_info_kexec);
-
- /* Allocate new mfn for previous pfn */
- do {
- rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
- if (rc == 0)
- msleep(123);
- } while (rc == 0);
-
- /* Make sure the previous pfn is really connected to a (new) mfn */
- BUG_ON(rc != 1);
-}
-
-static struct syscore_ops xen_hvm_syscore_ops = {
- .shutdown = xen_hvm_syscore_shutdown,
-};
-#endif
-
-/* Use a pfn in RAM, may move to MMIO before kexec. */
-static void __init xen_hvm_init_shared_info(void)
-{
- /* Remember pointer for resume */
- xen_hvm_shared_info = extend_brk(PAGE_SIZE, PAGE_SIZE);
- xen_hvm_connect_shared_info(__pa(xen_hvm_shared_info) >> PAGE_SHIFT);
- xen_hvm_set_shared_info(xen_hvm_shared_info);
-}
-
+#ifdef CONFIG_XEN_PVHVM
static void __init init_hvm_pv_info(void)
{
int major, minor;
@@ -1644,9 +1555,6 @@ static void __init xen_hvm_guest_init(void)
init_hvm_pv_info();
xen_hvm_init_shared_info();
-#ifdef CONFIG_KEXEC
- register_syscore_ops(&xen_hvm_syscore_ops);
-#endif
if (xen_feature(XENFEAT_hvm_callback_vector))
xen_have_vector_callback = 1;
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index b65a76133f4f..5141d808e751 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1283,7 +1283,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
- if (start != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
+ if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
args->op.cmd = MMUEXT_INVLPG_MULTI;
args->op.arg1.linear_addr = start;
}
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index b2e91d40a4cb..72213da605f5 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -196,9 +196,11 @@ RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3);
/* When we populate back during bootup, the amount of pages can vary. The
* max we have is seen is 395979, but that does not mean it can't be more.
- * But some machines can have 3GB I/O holes even. So lets reserve enough
- * for 4GB of I/O and E820 holes. */
-RESERVE_BRK(p2m_populated, PMD_SIZE * 4);
+ * Some machines can have 3GB I/O holes even. With early_can_reuse_p2m_middle
+ * it can re-use Xen provided mfn_list array, so we only need to allocate at
+ * most three P2M top nodes. */
+RESERVE_BRK(p2m_populated, PAGE_SIZE * 3);
+
static inline unsigned p2m_top_index(unsigned long pfn)
{
BUG_ON(pfn >= MAX_P2M_PFN);
@@ -575,12 +577,99 @@ static bool __init early_alloc_p2m(unsigned long pfn)
}
return true;
}
+
+/*
+ * Skim over the P2M tree looking at pages that are either filled with
+ * INVALID_P2M_ENTRY or with 1:1 PFNs. If found, re-use that page and
+ * replace the P2M leaf with a p2m_missing or p2m_identity.
+ * Stick the old page in the new P2M tree location.
+ */
+bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_mfn)
+{
+ unsigned topidx;
+ unsigned mididx;
+ unsigned ident_pfns;
+ unsigned inv_pfns;
+ unsigned long *p2m;
+ unsigned long *mid_mfn_p;
+ unsigned idx;
+ unsigned long pfn;
+
+ /* We only look when this entails a P2M middle layer */
+ if (p2m_index(set_pfn))
+ return false;
+
+ for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) {
+ topidx = p2m_top_index(pfn);
+
+ if (!p2m_top[topidx])
+ continue;
+
+ if (p2m_top[topidx] == p2m_mid_missing)
+ continue;
+
+ mididx = p2m_mid_index(pfn);
+ p2m = p2m_top[topidx][mididx];
+ if (!p2m)
+ continue;
+
+ if ((p2m == p2m_missing) || (p2m == p2m_identity))
+ continue;
+
+ if ((unsigned long)p2m == INVALID_P2M_ENTRY)
+ continue;
+
+ ident_pfns = 0;
+ inv_pfns = 0;
+ for (idx = 0; idx < P2M_PER_PAGE; idx++) {
+ /* IDENTITY_PFNs are 1:1 */
+ if (p2m[idx] == IDENTITY_FRAME(pfn + idx))
+ ident_pfns++;
+ else if (p2m[idx] == INVALID_P2M_ENTRY)
+ inv_pfns++;
+ else
+ break;
+ }
+ if ((ident_pfns == P2M_PER_PAGE) || (inv_pfns == P2M_PER_PAGE))
+ goto found;
+ }
+ return false;
+found:
+ /* Found one, replace old with p2m_identity or p2m_missing */
+ p2m_top[topidx][mididx] = (ident_pfns ? p2m_identity : p2m_missing);
+ /* And the other for save/restore.. */
+ mid_mfn_p = p2m_top_mfn_p[topidx];
+ /* NOTE: Even if it is a p2m_identity it should still be point to
+ * a page filled with INVALID_P2M_ENTRY entries. */
+ mid_mfn_p[mididx] = virt_to_mfn(p2m_missing);
+
+ /* Reset where we want to stick the old page in. */
+ topidx = p2m_top_index(set_pfn);
+ mididx = p2m_mid_index(set_pfn);
+
+ /* This shouldn't happen */
+ if (WARN_ON(p2m_top[topidx] == p2m_mid_missing))
+ early_alloc_p2m(set_pfn);
+
+ if (WARN_ON(p2m_top[topidx][mididx] != p2m_missing))
+ return false;
+
+ p2m_init(p2m);
+ p2m_top[topidx][mididx] = p2m;
+ mid_mfn_p = p2m_top_mfn_p[topidx];
+ mid_mfn_p[mididx] = virt_to_mfn(p2m);
+
+ return true;
+}
bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
if (!early_alloc_p2m(pfn))
return false;
+ if (early_can_reuse_p2m_middle(pfn, mfn))
+ return __set_phys_to_machine(pfn, mfn);
+
if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/))
return false;
@@ -739,9 +828,6 @@ int m2p_add_override(unsigned long mfn, struct page *page,
xen_mc_issue(PARAVIRT_LAZY_MMU);
}
- /* let's use dev_bus_addr to record the old mfn instead */
- kmap_op->dev_bus_addr = page->index;
- page->index = (unsigned long) kmap_op;
}
spin_lock_irqsave(&m2p_override_lock, flags);
list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]);
@@ -768,7 +854,8 @@ int m2p_add_override(unsigned long mfn, struct page *page,
return 0;
}
EXPORT_SYMBOL_GPL(m2p_add_override);
-int m2p_remove_override(struct page *page, bool clear_pte)
+int m2p_remove_override(struct page *page,
+ struct gnttab_map_grant_ref *kmap_op)
{
unsigned long flags;
unsigned long mfn;
@@ -798,10 +885,8 @@ int m2p_remove_override(struct page *page, bool clear_pte)
WARN_ON(!PagePrivate(page));
ClearPagePrivate(page);
- if (clear_pte) {
- struct gnttab_map_grant_ref *map_op =
- (struct gnttab_map_grant_ref *) page->index;
- set_phys_to_machine(pfn, map_op->dev_bus_addr);
+ set_phys_to_machine(pfn, page->index);
+ if (kmap_op != NULL) {
if (!PageHighMem(page)) {
struct multicall_space mcs;
struct gnttab_unmap_grant_ref *unmap_op;
@@ -813,13 +898,13 @@ int m2p_remove_override(struct page *page, bool clear_pte)
* issued. In this case handle is going to -1 because
* it hasn't been modified yet.
*/
- if (map_op->handle == -1)
+ if (kmap_op->handle == -1)
xen_mc_flush();
/*
- * Now if map_op->handle is negative it means that the
+ * Now if kmap_op->handle is negative it means that the
* hypercall actually returned an error.
*/
- if (map_op->handle == GNTST_general_error) {
+ if (kmap_op->handle == GNTST_general_error) {
printk(KERN_WARNING "m2p_remove_override: "
"pfn %lx mfn %lx, failed to modify kernel mappings",
pfn, mfn);
@@ -829,8 +914,8 @@ int m2p_remove_override(struct page *page, bool clear_pte)
mcs = xen_mc_entry(
sizeof(struct gnttab_unmap_grant_ref));
unmap_op = mcs.args;
- unmap_op->host_addr = map_op->host_addr;
- unmap_op->handle = map_op->handle;
+ unmap_op->host_addr = kmap_op->host_addr;
+ unmap_op->handle = kmap_op->handle;
unmap_op->dev_bus_addr = 0;
MULTI_grant_table_op(mcs.mc,
@@ -841,10 +926,9 @@ int m2p_remove_override(struct page *page, bool clear_pte)
set_pte_at(&init_mm, address, ptep,
pfn_pte(pfn, PAGE_KERNEL));
__flush_tlb_single(address);
- map_op->host_addr = 0;
+ kmap_op->host_addr = 0;
}
- } else
- set_phys_to_machine(pfn, page->index);
+ }
/* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present
* somewhere in this domain, even before being added to the
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index ead85576d54a..d11ca11d14fc 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -78,9 +78,16 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
memblock_reserve(start, size);
xen_max_p2m_pfn = PFN_DOWN(start + size);
+ for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) {
+ unsigned long mfn = pfn_to_mfn(pfn);
+
+ if (WARN(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn))
+ continue;
+ WARN(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n",
+ pfn, mfn);
- for (pfn = PFN_DOWN(start); pfn <= xen_max_p2m_pfn; pfn++)
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+ }
}
static unsigned long __init xen_do_chunk(unsigned long start,
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index ae8a00c39de4..45329c8c226e 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -30,7 +30,7 @@ void xen_arch_hvm_post_suspend(int suspend_cancelled)
{
#ifdef CONFIG_XEN_PVHVM
int cpu;
- xen_hvm_resume_shared_info();
+ xen_hvm_init_shared_info();
xen_callback_vector();
xen_unplug_emulated_devices();
if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 1e4329e04e0f..202d4c150154 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -41,7 +41,7 @@ void xen_enable_syscall(void);
void xen_vcpu_restore(void);
void xen_callback_vector(void);
-void xen_hvm_resume_shared_info(void);
+void xen_hvm_init_shared_info(void);
void xen_unplug_emulated_devices(void);
void __init xen_build_dynamic_phys_to_machine(void);