summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2012-07-10 13:16:43 +0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2012-07-10 13:16:43 +0400
commit50bba07d6a16ce4a3b4f6abb44bfd3645c046ef6 (patch)
treec76f080377dd611857fa33f38a2b8a2af4576f8b /arch
parenta8b91e43afd736fcebb0836359e5ddaeae45b2ab (diff)
parent50fb31cfed9218b439360caf7c0399b00042da15 (diff)
downloadlinux-50bba07d6a16ce4a3b4f6abb44bfd3645c046ef6.tar.xz
Merge branch 'merge' into next
We want to bring in the latest IRQ fixes
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/atomic.h2
-rw-r--r--arch/arm/include/asm/domain.h18
-rw-r--r--arch/arm/include/asm/thread_info.h5
-rw-r--r--arch/arm/kernel/kprobes-test-arm.c4
-rw-r--r--arch/arm/kernel/perf_event.c2
-rw-r--r--arch/arm/kernel/ptrace.c3
-rw-r--r--arch/arm/kernel/signal.c46
-rw-r--r--arch/arm/kernel/signal.h2
-rw-r--r--arch/arm/kernel/traps.c2
-rw-r--r--arch/arm/mach-dove/include/mach/bridge-regs.h1
-rw-r--r--arch/arm/mach-dove/include/mach/dove.h1
-rw-r--r--arch/arm/mach-imx/clk-imx35.c9
-rw-r--r--arch/arm/mach-imx/mach-imx27_visstrim_m10.c2
-rw-r--r--arch/arm/mach-mmp/include/mach/gpio-pxa.h29
-rw-r--r--arch/arm/mach-mv78xx0/include/mach/bridge-regs.h1
-rw-r--r--arch/arm/mach-mv78xx0/include/mach/mv78xx0.h2
-rw-r--r--arch/arm/mach-mxs/mach-apx4devkit.c11
-rw-r--r--arch/arm/mach-omap2/board-overo.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c28
-rw-r--r--arch/arm/mach-omap2/twl-common.c2
-rw-r--r--arch/arm/mach-pxa/hx4700.c15
-rw-r--r--arch/arm/mach-versatile/pci.c1
-rw-r--r--arch/arm/mm/mm.h2
-rw-r--r--arch/powerpc/include/asm/hw_irq.h6
-rw-r--r--arch/powerpc/kernel/irq.c48
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S2
-rw-r--r--arch/powerpc/mm/numa.c2
-rw-r--r--arch/powerpc/platforms/cell/pervasive.c11
-rw-r--r--arch/powerpc/platforms/pseries/processor_idle.c17
-rw-r--r--arch/powerpc/xmon/xmon.c2
-rw-r--r--arch/x86/kvm/mmu.c3
31 files changed, 188 insertions, 93 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 68374ba6a943..c79f61faa3a5 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -243,7 +243,7 @@ typedef struct {
#define ATOMIC64_INIT(i) { (i) }
-static inline u64 atomic64_read(atomic64_t *v)
+static inline u64 atomic64_read(const atomic64_t *v)
{
u64 result;
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 3d2220498abc..6ddbe446425e 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -60,13 +60,13 @@
#ifndef __ASSEMBLY__
#ifdef CONFIG_CPU_USE_DOMAINS
-#define set_domain(x) \
- do { \
- __asm__ __volatile__( \
- "mcr p15, 0, %0, c3, c0 @ set domain" \
- : : "r" (x)); \
- isb(); \
- } while (0)
+static inline void set_domain(unsigned val)
+{
+ asm volatile(
+ "mcr p15, 0, %0, c3, c0 @ set domain"
+ : : "r" (val));
+ isb();
+}
#define modify_domain(dom,type) \
do { \
@@ -78,8 +78,8 @@
} while (0)
#else
-#define set_domain(x) do { } while (0)
-#define modify_domain(dom,type) do { } while (0)
+static inline void set_domain(unsigned val) { }
+static inline void modify_domain(unsigned dom, unsigned type) { }
#endif
/*
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index b79f8e97f775..af7b0bda3355 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -148,7 +148,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_SYSCALL_TRACE 8
#define TIF_SYSCALL_AUDIT 9
-#define TIF_SYSCALL_RESTARTSYS 10
#define TIF_POLLING_NRFLAG 16
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
@@ -164,11 +163,9 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
-#define _TIF_SYSCALL_RESTARTSYS (1 << TIF_SYSCALL_RESTARTSYS)
/* Checks for any syscall work in entry-common.S */
-#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
- _TIF_SYSCALL_RESTARTSYS)
+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
/*
* Change these and you break ASM code in entry-common.S
diff --git a/arch/arm/kernel/kprobes-test-arm.c b/arch/arm/kernel/kprobes-test-arm.c
index ba32b393b3f0..38c1a3b103a0 100644
--- a/arch/arm/kernel/kprobes-test-arm.c
+++ b/arch/arm/kernel/kprobes-test-arm.c
@@ -187,8 +187,8 @@ void kprobe_arm_test_cases(void)
TEST_BF_R ("mov pc, r",0,2f,"")
TEST_BF_RR("mov pc, r",0,2f,", asl r",1,0,"")
TEST_BB( "sub pc, pc, #1b-2b+8")
-#if __LINUX_ARM_ARCH__ >= 6
- TEST_BB( "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before ARMv6 */
+#if __LINUX_ARM_ARCH__ == 6 && !defined(CONFIG_CPU_V7)
+ TEST_BB( "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before and after ARMv6 */
#endif
TEST_BB_R( "sub pc, pc, r",14, 1f-2f+8,"")
TEST_BB_R( "rsb pc, r",14,1f-2f+8,", pc")
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 186c8cb982c5..a02eada3aa5d 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -503,7 +503,7 @@ __hw_perf_event_init(struct perf_event *event)
event_requires_mode_exclusion(&event->attr)) {
pr_debug("ARM performance counters do not support "
"mode exclusion\n");
- return -EPERM;
+ return -EOPNOTSUPP;
}
/*
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 5700a7ae7f0b..14e38261cd31 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -25,7 +25,6 @@
#include <linux/regset.h>
#include <linux/audit.h>
#include <linux/tracehook.h>
-#include <linux/unistd.h>
#include <asm/pgtable.h>
#include <asm/traps.h>
@@ -918,8 +917,6 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0,
regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
- if (why == 0 && test_and_clear_thread_flag(TIF_SYSCALL_RESTARTSYS))
- scno = __NR_restart_syscall - __NR_SYSCALL_BASE;
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return scno;
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index fd2392a17ac1..536c5d6b340b 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -27,6 +27,7 @@
*/
#define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
+#define SWI_SYS_RESTART (0xef000000|__NR_restart_syscall|__NR_OABI_SYSCALL_BASE)
/*
* With EABI, the syscall number has to be loaded into r7.
@@ -47,6 +48,18 @@ const unsigned long sigreturn_codes[7] = {
};
/*
+ * Either we support OABI only, or we have EABI with the OABI
+ * compat layer enabled. In the later case we don't know if
+ * user space is EABI or not, and if not we must not clobber r7.
+ * Always using the OABI syscall solves that issue and works for
+ * all those cases.
+ */
+const unsigned long syscall_restart_code[2] = {
+ SWI_SYS_RESTART, /* swi __NR_restart_syscall */
+ 0xe49df004, /* ldr pc, [sp], #4 */
+};
+
+/*
* atomically swap in the new signal mask, and wait for a signal.
*/
asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask)
@@ -592,10 +605,12 @@ static void do_signal(struct pt_regs *regs, int syscall)
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
- case -ERESTART_RESTARTBLOCK:
regs->ARM_r0 = regs->ARM_ORIG_r0;
regs->ARM_pc = restart_addr;
break;
+ case -ERESTART_RESTARTBLOCK:
+ regs->ARM_r0 = -EINTR;
+ break;
}
}
@@ -611,14 +626,12 @@ static void do_signal(struct pt_regs *regs, int syscall)
* debugger has chosen to restart at a different PC.
*/
if (regs->ARM_pc == restart_addr) {
- if (retval == -ERESTARTNOHAND ||
- retval == -ERESTART_RESTARTBLOCK
+ if (retval == -ERESTARTNOHAND
|| (retval == -ERESTARTSYS
&& !(ka.sa.sa_flags & SA_RESTART))) {
regs->ARM_r0 = -EINTR;
regs->ARM_pc = continue_addr;
}
- clear_thread_flag(TIF_SYSCALL_RESTARTSYS);
}
handle_signal(signr, &ka, &info, regs);
@@ -632,8 +645,29 @@ static void do_signal(struct pt_regs *regs, int syscall)
* ignore the restart.
*/
if (retval == -ERESTART_RESTARTBLOCK
- && regs->ARM_pc == restart_addr)
- set_thread_flag(TIF_SYSCALL_RESTARTSYS);
+ && regs->ARM_pc == continue_addr) {
+ if (thumb_mode(regs)) {
+ regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
+ regs->ARM_pc -= 2;
+ } else {
+#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT)
+ regs->ARM_r7 = __NR_restart_syscall;
+ regs->ARM_pc -= 4;
+#else
+ u32 __user *usp;
+
+ regs->ARM_sp -= 4;
+ usp = (u32 __user *)regs->ARM_sp;
+
+ if (put_user(regs->ARM_pc, usp) == 0) {
+ regs->ARM_pc = KERN_RESTART_CODE;
+ } else {
+ regs->ARM_sp += 4;
+ force_sigsegv(0, current);
+ }
+#endif
+ }
+ }
}
restore_saved_sigmask();
diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h
index 5ff067b7c752..6fcfe8398aa4 100644
--- a/arch/arm/kernel/signal.h
+++ b/arch/arm/kernel/signal.h
@@ -8,5 +8,7 @@
* published by the Free Software Foundation.
*/
#define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500)
+#define KERN_RESTART_CODE (KERN_SIGRETURN_CODE + sizeof(sigreturn_codes))
extern const unsigned long sigreturn_codes[7];
+extern const unsigned long syscall_restart_code[2];
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 4928d89758f4..3647170e9a16 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -820,6 +820,8 @@ void __init early_trap_init(void *vectors_base)
*/
memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
sigreturn_codes, sizeof(sigreturn_codes));
+ memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE),
+ syscall_restart_code, sizeof(syscall_restart_code));
flush_icache_range(vectors, vectors + PAGE_SIZE);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
diff --git a/arch/arm/mach-dove/include/mach/bridge-regs.h b/arch/arm/mach-dove/include/mach/bridge-regs.h
index 226949dc4ac0..f953bb54aa9d 100644
--- a/arch/arm/mach-dove/include/mach/bridge-regs.h
+++ b/arch/arm/mach-dove/include/mach/bridge-regs.h
@@ -50,5 +50,6 @@
#define POWER_MANAGEMENT (BRIDGE_VIRT_BASE | 0x011c)
#define TIMER_VIRT_BASE (BRIDGE_VIRT_BASE | 0x0300)
+#define TIMER_PHYS_BASE (BRIDGE_PHYS_BASE | 0x0300)
#endif
diff --git a/arch/arm/mach-dove/include/mach/dove.h b/arch/arm/mach-dove/include/mach/dove.h
index ad1165d488c1..d52b0ef313b7 100644
--- a/arch/arm/mach-dove/include/mach/dove.h
+++ b/arch/arm/mach-dove/include/mach/dove.h
@@ -78,6 +78,7 @@
/* North-South Bridge */
#define BRIDGE_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE | 0x20000)
+#define BRIDGE_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE | 0x20000)
/* Cryptographic Engine */
#define DOVE_CRYPT_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE | 0x30000)
diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c
index 920a8cc42726..c6422fb10bae 100644
--- a/arch/arm/mach-imx/clk-imx35.c
+++ b/arch/arm/mach-imx/clk-imx35.c
@@ -201,7 +201,6 @@ int __init mx35_clocks_init()
pr_err("i.MX35 clk %d: register failed with %ld\n",
i, PTR_ERR(clk[i]));
-
clk_register_clkdev(clk[pata_gate], NULL, "pata_imx");
clk_register_clkdev(clk[can1_gate], NULL, "flexcan.0");
clk_register_clkdev(clk[can2_gate], NULL, "flexcan.1");
@@ -264,6 +263,14 @@ int __init mx35_clocks_init()
clk_prepare_enable(clk[iim_gate]);
clk_prepare_enable(clk[emi_gate]);
+ /*
+ * SCC is needed to boot via mmc after a watchdog reset. The clock code
+ * before conversion to common clk also enabled UART1 (which isn't
+ * handled here and not needed for mmc) and IIM (which is enabled
+ * unconditionally above).
+ */
+ clk_prepare_enable(clk[scc_gate]);
+
imx_print_silicon_rev("i.MX35", mx35_revision());
#ifdef CONFIG_MXC_USE_EPIT
diff --git a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
index f76edb96a48a..ba09552fe5fe 100644
--- a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
+++ b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
@@ -38,7 +38,7 @@
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
-#include <asm/system.h>
+#include <asm/system_info.h>
#include <mach/common.h>
#include <mach/iomux-mx27.h>
diff --git a/arch/arm/mach-mmp/include/mach/gpio-pxa.h b/arch/arm/mach-mmp/include/mach/gpio-pxa.h
deleted file mode 100644
index 0e135a599f3e..000000000000
--- a/arch/arm/mach-mmp/include/mach/gpio-pxa.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef __ASM_MACH_GPIO_PXA_H
-#define __ASM_MACH_GPIO_PXA_H
-
-#include <mach/addr-map.h>
-#include <mach/cputype.h>
-#include <mach/irqs.h>
-
-#define GPIO_REGS_VIRT (APB_VIRT_BASE + 0x19000)
-
-#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
-#define GPIO_REG(x) (*(volatile u32 *)(GPIO_REGS_VIRT + (x)))
-
-#define gpio_to_bank(gpio) ((gpio) >> 5)
-
-/* NOTE: these macros are defined here to make optimization of
- * gpio_{get,set}_value() to work when 'gpio' is a constant.
- * Usage of these macros otherwise is no longer recommended,
- * use generic GPIO API whenever possible.
- */
-#define GPIO_bit(gpio) (1 << ((gpio) & 0x1f))
-
-#define GPLR(x) GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x00)
-#define GPDR(x) GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x0c)
-#define GPSR(x) GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x18)
-#define GPCR(x) GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x24)
-
-#include <plat/gpio-pxa.h>
-
-#endif /* __ASM_MACH_GPIO_PXA_H */
diff --git a/arch/arm/mach-mv78xx0/include/mach/bridge-regs.h b/arch/arm/mach-mv78xx0/include/mach/bridge-regs.h
index c64dbb96dbad..eb187e0e059b 100644
--- a/arch/arm/mach-mv78xx0/include/mach/bridge-regs.h
+++ b/arch/arm/mach-mv78xx0/include/mach/bridge-regs.h
@@ -31,5 +31,6 @@
#define IRQ_MASK_HIGH_OFF 0x0014
#define TIMER_VIRT_BASE (BRIDGE_VIRT_BASE | 0x0300)
+#define TIMER_PHYS_BASE (BRIDGE_PHYS_BASE | 0x0300)
#endif
diff --git a/arch/arm/mach-mv78xx0/include/mach/mv78xx0.h b/arch/arm/mach-mv78xx0/include/mach/mv78xx0.h
index 3674497162e3..e807c4c52a0b 100644
--- a/arch/arm/mach-mv78xx0/include/mach/mv78xx0.h
+++ b/arch/arm/mach-mv78xx0/include/mach/mv78xx0.h
@@ -42,6 +42,7 @@
#define MV78XX0_CORE0_REGS_PHYS_BASE 0xf1020000
#define MV78XX0_CORE1_REGS_PHYS_BASE 0xf1024000
#define MV78XX0_CORE_REGS_VIRT_BASE 0xfe400000
+#define MV78XX0_CORE_REGS_PHYS_BASE 0xfe400000
#define MV78XX0_CORE_REGS_SIZE SZ_16K
#define MV78XX0_PCIE_IO_PHYS_BASE(i) (0xf0800000 + ((i) << 20))
@@ -59,6 +60,7 @@
* Core-specific peripheral registers.
*/
#define BRIDGE_VIRT_BASE (MV78XX0_CORE_REGS_VIRT_BASE)
+#define BRIDGE_PHYS_BASE (MV78XX0_CORE_REGS_PHYS_BASE)
/*
* Register Map
diff --git a/arch/arm/mach-mxs/mach-apx4devkit.c b/arch/arm/mach-mxs/mach-apx4devkit.c
index 5e90b9dcdef8..f5f061757deb 100644
--- a/arch/arm/mach-mxs/mach-apx4devkit.c
+++ b/arch/arm/mach-mxs/mach-apx4devkit.c
@@ -205,6 +205,16 @@ static int apx4devkit_phy_fixup(struct phy_device *phy)
return 0;
}
+static void __init apx4devkit_fec_phy_clk_enable(void)
+{
+ struct clk *clk;
+
+ /* Enable fec phy clock */
+ clk = clk_get_sys("enet_out", NULL);
+ if (!IS_ERR(clk))
+ clk_prepare_enable(clk);
+}
+
static void __init apx4devkit_init(void)
{
mx28_soc_init();
@@ -225,6 +235,7 @@ static void __init apx4devkit_init(void)
phy_register_fixup_for_uid(PHY_ID_KS8051, MICREL_PHY_ID_MASK,
apx4devkit_phy_fixup);
+ apx4devkit_fec_phy_clk_enable();
mx28_add_fec(0, &mx28_fec_pdata);
mx28_add_mxs_mmc(0, &apx4devkit_mmc_pdata);
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index 8fa2fc3a4c3c..779734d8ba37 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -494,8 +494,8 @@ static void __init overo_init(void)
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
- omap_hsmmc_init(mmc);
overo_i2c_init();
+ omap_hsmmc_init(mmc);
omap_display_init(&overo_dss_data);
omap_serial_init();
omap_sdrc_init(mt46h32m32lf6_sdrc_params,
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index f30e861ce6d9..b7bcba5221ba 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -1928,7 +1928,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp1_sdma_reqs[] = {
static struct omap_hwmod_opt_clk mcbsp1_opt_clks[] = {
{ .role = "pad_fck", .clk = "pad_clks_ck" },
- { .role = "prcm_clk", .clk = "mcbsp1_sync_mux_ck" },
+ { .role = "prcm_fck", .clk = "mcbsp1_sync_mux_ck" },
};
static struct omap_hwmod omap44xx_mcbsp1_hwmod = {
@@ -1963,7 +1963,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp2_sdma_reqs[] = {
static struct omap_hwmod_opt_clk mcbsp2_opt_clks[] = {
{ .role = "pad_fck", .clk = "pad_clks_ck" },
- { .role = "prcm_clk", .clk = "mcbsp2_sync_mux_ck" },
+ { .role = "prcm_fck", .clk = "mcbsp2_sync_mux_ck" },
};
static struct omap_hwmod omap44xx_mcbsp2_hwmod = {
@@ -1998,7 +1998,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp3_sdma_reqs[] = {
static struct omap_hwmod_opt_clk mcbsp3_opt_clks[] = {
{ .role = "pad_fck", .clk = "pad_clks_ck" },
- { .role = "prcm_clk", .clk = "mcbsp3_sync_mux_ck" },
+ { .role = "prcm_fck", .clk = "mcbsp3_sync_mux_ck" },
};
static struct omap_hwmod omap44xx_mcbsp3_hwmod = {
@@ -2033,7 +2033,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp4_sdma_reqs[] = {
static struct omap_hwmod_opt_clk mcbsp4_opt_clks[] = {
{ .role = "pad_fck", .clk = "pad_clks_ck" },
- { .role = "prcm_clk", .clk = "mcbsp4_sync_mux_ck" },
+ { .role = "prcm_fck", .clk = "mcbsp4_sync_mux_ck" },
};
static struct omap_hwmod omap44xx_mcbsp4_hwmod = {
@@ -3864,7 +3864,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_2 = {
};
/* usb_host_fs -> l3_main_2 */
-static struct omap_hwmod_ocp_if omap44xx_usb_host_fs__l3_main_2 = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_usb_host_fs__l3_main_2 = {
.master = &omap44xx_usb_host_fs_hwmod,
.slave = &omap44xx_l3_main_2_hwmod,
.clk = "l3_div_ck",
@@ -3922,7 +3922,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_3 = {
};
/* aess -> l4_abe */
-static struct omap_hwmod_ocp_if omap44xx_aess__l4_abe = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_aess__l4_abe = {
.master = &omap44xx_aess_hwmod,
.slave = &omap44xx_l4_abe_hwmod,
.clk = "ocp_abe_iclk",
@@ -4013,7 +4013,7 @@ static struct omap_hwmod_addr_space omap44xx_aess_addrs[] = {
};
/* l4_abe -> aess */
-static struct omap_hwmod_ocp_if omap44xx_l4_abe__aess = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_abe__aess = {
.master = &omap44xx_l4_abe_hwmod,
.slave = &omap44xx_aess_hwmod,
.clk = "ocp_abe_iclk",
@@ -4031,7 +4031,7 @@ static struct omap_hwmod_addr_space omap44xx_aess_dma_addrs[] = {
};
/* l4_abe -> aess (dma) */
-static struct omap_hwmod_ocp_if omap44xx_l4_abe__aess_dma = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_abe__aess_dma = {
.master = &omap44xx_l4_abe_hwmod,
.slave = &omap44xx_aess_hwmod,
.clk = "ocp_abe_iclk",
@@ -5857,7 +5857,7 @@ static struct omap_hwmod_addr_space omap44xx_usb_host_fs_addrs[] = {
};
/* l4_cfg -> usb_host_fs */
-static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usb_host_fs = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_cfg__usb_host_fs = {
.master = &omap44xx_l4_cfg_hwmod,
.slave = &omap44xx_usb_host_fs_hwmod,
.clk = "l4_div_ck",
@@ -6014,13 +6014,13 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_iva__l3_main_2,
&omap44xx_l3_main_1__l3_main_2,
&omap44xx_l4_cfg__l3_main_2,
- &omap44xx_usb_host_fs__l3_main_2,
+ /* &omap44xx_usb_host_fs__l3_main_2, */
&omap44xx_usb_host_hs__l3_main_2,
&omap44xx_usb_otg_hs__l3_main_2,
&omap44xx_l3_main_1__l3_main_3,
&omap44xx_l3_main_2__l3_main_3,
&omap44xx_l4_cfg__l3_main_3,
- &omap44xx_aess__l4_abe,
+ /* &omap44xx_aess__l4_abe, */
&omap44xx_dsp__l4_abe,
&omap44xx_l3_main_1__l4_abe,
&omap44xx_mpu__l4_abe,
@@ -6029,8 +6029,8 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_l4_cfg__l4_wkup,
&omap44xx_mpu__mpu_private,
&omap44xx_l4_cfg__ocp_wp_noc,
- &omap44xx_l4_abe__aess,
- &omap44xx_l4_abe__aess_dma,
+ /* &omap44xx_l4_abe__aess, */
+ /* &omap44xx_l4_abe__aess_dma, */
&omap44xx_l3_main_2__c2c,
&omap44xx_l4_wkup__counter_32k,
&omap44xx_l4_cfg__ctrl_module_core,
@@ -6136,7 +6136,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_l4_per__uart2,
&omap44xx_l4_per__uart3,
&omap44xx_l4_per__uart4,
- &omap44xx_l4_cfg__usb_host_fs,
+ /* &omap44xx_l4_cfg__usb_host_fs, */
&omap44xx_l4_cfg__usb_host_hs,
&omap44xx_l4_cfg__usb_otg_hs,
&omap44xx_l4_cfg__usb_tll_hs,
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c
index 119d5a910f3a..43a979075338 100644
--- a/arch/arm/mach-omap2/twl-common.c
+++ b/arch/arm/mach-omap2/twl-common.c
@@ -32,6 +32,7 @@
#include "twl-common.h"
#include "pm.h"
#include "voltage.h"
+#include "mux.h"
static struct i2c_board_info __initdata pmic_i2c_board_info = {
.addr = 0x48,
@@ -77,6 +78,7 @@ void __init omap4_pmic_init(const char *pmic_type,
struct twl6040_platform_data *twl6040_data, int twl6040_irq)
{
/* PMIC part*/
+ omap_mux_init_signal("sys_nirq1", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);
strncpy(omap4_i2c1_board_info[0].type, pmic_type,
sizeof(omap4_i2c1_board_info[0].type));
omap4_i2c1_board_info[0].irq = OMAP44XX_IRQ_SYS_1N;
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
index d09da6a746b8..d3de84b0dcbe 100644
--- a/arch/arm/mach-pxa/hx4700.c
+++ b/arch/arm/mach-pxa/hx4700.c
@@ -127,7 +127,11 @@ static unsigned long hx4700_pin_config[] __initdata = {
GPIO19_SSP2_SCLK,
GPIO86_SSP2_RXD,
GPIO87_SSP2_TXD,
- GPIO88_GPIO,
+ GPIO88_GPIO | MFP_LPM_DRIVE_HIGH, /* TSC2046_CS */
+
+ /* BQ24022 Regulator */
+ GPIO72_GPIO | MFP_LPM_KEEP_OUTPUT, /* BQ24022_nCHARGE_EN */
+ GPIO96_GPIO | MFP_LPM_KEEP_OUTPUT, /* BQ24022_ISET2 */
/* HX4700 specific input GPIOs */
GPIO12_GPIO | WAKEUP_ON_EDGE_RISE, /* ASIC3_IRQ */
@@ -135,6 +139,10 @@ static unsigned long hx4700_pin_config[] __initdata = {
GPIO14_GPIO, /* nWLAN_IRQ */
/* HX4700 specific output GPIOs */
+ GPIO61_GPIO | MFP_LPM_DRIVE_HIGH, /* W3220_nRESET */
+ GPIO71_GPIO | MFP_LPM_DRIVE_HIGH, /* ASIC3_nRESET */
+ GPIO81_GPIO | MFP_LPM_DRIVE_HIGH, /* CPU_GP_nRESET */
+ GPIO116_GPIO | MFP_LPM_DRIVE_HIGH, /* CPU_HW_nRESET */
GPIO102_GPIO | MFP_LPM_DRIVE_LOW, /* SYNAPTICS_POWER_ON */
GPIO10_GPIO, /* GSM_IRQ */
@@ -872,14 +880,19 @@ static struct gpio global_gpios[] = {
{ GPIO110_HX4700_LCD_LVDD_3V3_ON, GPIOF_OUT_INIT_HIGH, "LCD_LVDD" },
{ GPIO111_HX4700_LCD_AVDD_3V3_ON, GPIOF_OUT_INIT_HIGH, "LCD_AVDD" },
{ GPIO32_HX4700_RS232_ON, GPIOF_OUT_INIT_HIGH, "RS232_ON" },
+ { GPIO61_HX4700_W3220_nRESET, GPIOF_OUT_INIT_HIGH, "W3220_nRESET" },
{ GPIO71_HX4700_ASIC3_nRESET, GPIOF_OUT_INIT_HIGH, "ASIC3_nRESET" },
+ { GPIO81_HX4700_CPU_GP_nRESET, GPIOF_OUT_INIT_HIGH, "CPU_GP_nRESET" },
{ GPIO82_HX4700_EUART_RESET, GPIOF_OUT_INIT_HIGH, "EUART_RESET" },
+ { GPIO116_HX4700_CPU_HW_nRESET, GPIOF_OUT_INIT_HIGH, "CPU_HW_nRESET" },
};
static void __init hx4700_init(void)
{
int ret;
+ PCFR = PCFR_GPR_EN | PCFR_OPDE;
+
pxa2xx_mfp_config(ARRAY_AND_SIZE(hx4700_pin_config));
gpio_set_wake(GPIO12_HX4700_ASIC3_IRQ, 1);
ret = gpio_request_array(ARRAY_AND_SIZE(global_gpios));
diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c
index bec933b04ef0..e95bf84cc837 100644
--- a/arch/arm/mach-versatile/pci.c
+++ b/arch/arm/mach-versatile/pci.c
@@ -339,7 +339,6 @@ void __init pci_versatile_preinit(void)
static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
int irq;
- int devslot = PCI_SLOT(dev->devfn);
/* slot, pin, irq
* 24 1 27
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index c471436c7952..2e8a1efdf7b8 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -64,7 +64,7 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
#ifdef CONFIG_ZONE_DMA
extern phys_addr_t arm_dma_limit;
#else
-#define arm_dma_limit ((u32)~0)
+#define arm_dma_limit ((phys_addr_t)~0)
#endif
extern phys_addr_t arm_lowmem_limit;
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 6eb75b80488c..0554ab062bdc 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -86,8 +86,8 @@ static inline bool arch_irqs_disabled(void)
}
#ifdef CONFIG_PPC_BOOK3E
-#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory");
-#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory");
+#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory")
+#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory")
#else
#define __hard_irq_enable() __mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
#define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1)
@@ -125,6 +125,8 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
return !regs->softe;
}
+extern bool prep_irq_for_idle(void);
+
#else /* CONFIG_PPC64 */
#define SET_MSR_EE(x) mtmsr(x)
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 1b415027ec0e..1f017bb7a7ce 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -229,7 +229,7 @@ notrace void arch_local_irq_restore(unsigned long en)
*/
if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
__hard_irq_disable();
-#ifdef CONFIG_TRACE_IRQFLAG
+#ifdef CONFIG_TRACE_IRQFLAGS
else {
/*
* We should already be hard disabled here. We had bugs
@@ -286,6 +286,52 @@ void notrace restore_interrupts(void)
__hard_irq_enable();
}
+/*
+ * This is a helper to use when about to go into idle low-power
+ * when the latter has the side effect of re-enabling interrupts
+ * (such as calling H_CEDE under pHyp).
+ *
+ * You call this function with interrupts soft-disabled (this is
+ * already the case when ppc_md.power_save is called). The function
+ * will return whether to enter power save or just return.
+ *
+ * In the former case, it will have notified lockdep of interrupts
+ * being re-enabled and generally sanitized the lazy irq state,
+ * and in the latter case it will leave with interrupts hard
+ * disabled and marked as such, so the local_irq_enable() call
+ * in cpu_idle() will properly re-enable everything.
+ */
+bool prep_irq_for_idle(void)
+{
+ /*
+ * First we need to hard disable to ensure no interrupt
+ * occurs before we effectively enter the low power state
+ */
+ hard_irq_disable();
+
+ /*
+ * If anything happened while we were soft-disabled,
+ * we return now and do not enter the low power state.
+ */
+ if (lazy_irq_pending())
+ return false;
+
+ /* Tell lockdep we are about to re-enable */
+ trace_hardirqs_on();
+
+ /*
+ * Mark interrupts as soft-enabled and clear the
+ * PACA_IRQ_HARD_DIS from the pending mask since we
+ * are about to hard enable as well as a side effect
+ * of entering the low power state.
+ */
+ local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
+ local_paca->soft_enabled = 1;
+
+ /* Tell the caller to enter the low power state */
+ return true;
+}
+
#endif /* CONFIG_PPC64 */
int arch_show_interrupts(struct seq_file *p, int prec)
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index a84aafce2a12..a1044f43becd 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -810,7 +810,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
lwz r3,VCORE_NAPPING_THREADS(r5)
lwz r4,VCPU_PTID(r9)
li r0,1
- sldi r0,r0,r4
+ sld r0,r0,r4
andc. r3,r3,r0 /* no sense IPI'ing ourselves */
beq 43f
mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 7c28589d45bd..39b159751c35 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -641,7 +641,7 @@ static void __init parse_drconf_memory(struct device_node *memory)
unsigned int n, rc, ranges, is_kexec_kdump = 0;
unsigned long lmb_size, base, size, sz;
int nid;
- struct assoc_arrays aa;
+ struct assoc_arrays aa = { .arrays = NULL };
n = of_get_drconf_memory(memory, &dm);
if (!n)
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c
index efdacc829576..d17e98bc0c10 100644
--- a/arch/powerpc/platforms/cell/pervasive.c
+++ b/arch/powerpc/platforms/cell/pervasive.c
@@ -42,11 +42,9 @@ static void cbe_power_save(void)
{
unsigned long ctrl, thread_switch_control;
- /*
- * We need to hard disable interrupts, the local_irq_enable() done by
- * our caller upon return will hard re-enable.
- */
- hard_irq_disable();
+ /* Ensure our interrupt state is properly tracked */
+ if (!prep_irq_for_idle())
+ return;
ctrl = mfspr(SPRN_CTRLF);
@@ -81,6 +79,9 @@ static void cbe_power_save(void)
*/
ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
mtspr(SPRN_CTRLT, ctrl);
+
+ /* Re-enable interrupts in MSR */
+ __hard_irq_enable();
}
static int cbe_system_reset_exception(struct pt_regs *regs)
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c
index a97ef6692dad..7f5668b94165 100644
--- a/arch/powerpc/platforms/pseries/processor_idle.c
+++ b/arch/powerpc/platforms/pseries/processor_idle.c
@@ -100,15 +100,18 @@ out:
static void check_and_cede_processor(void)
{
/*
- * Interrupts are soft-disabled at this point,
- * but not hard disabled. So an interrupt might have
- * occurred before entering NAP, and would be potentially
- * lost (edge events, decrementer events, etc...) unless
- * we first hard disable then check.
+ * Ensure our interrupt state is properly tracked,
+ * also checks if no interrupt has occurred while we
+ * were soft-disabled
*/
- hard_irq_disable();
- if (!lazy_irq_pending())
+ if (prep_irq_for_idle()) {
cede_processor();
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* Ensure that H_CEDE returns with IRQs on */
+ if (WARN_ON(!(mfmsr() & MSR_EE)))
+ __hard_irq_enable();
+#endif
+ }
}
static int dedicated_cede_loop(struct cpuidle_device *dev,
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 0f3ab06d2222..eab3492a45c5 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -971,7 +971,7 @@ static int cpu_cmd(void)
/* print cpus waiting or in xmon */
printf("cpus stopped:");
count = 0;
- for (cpu = 0; cpu < NR_CPUS; ++cpu) {
+ for_each_possible_cpu(cpu) {
if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
if (count == 0)
printf(" %x", cpu);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index be3cea4407ff..57e168e27b5b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3934,6 +3934,9 @@ static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
{
struct kvm_mmu_page *page;
+ if (list_empty(&kvm->arch.active_mmu_pages))
+ return;
+
page = container_of(kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
kvm_mmu_prepare_zap_page(kvm, page, invalid_list);