summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r--arch/powerpc/include/asm/atomic.h19
-rw-r--r--arch/powerpc/include/asm/bitops.h5
-rw-r--r--arch/powerpc/include/asm/dbell.h2
-rw-r--r--arch/powerpc/include/asm/ehv_pic.h40
-rw-r--r--arch/powerpc/include/asm/elf.h4
-rw-r--r--arch/powerpc/include/asm/emulated_ops.h2
-rw-r--r--arch/powerpc/include/asm/epapr_hcalls.h502
-rw-r--r--arch/powerpc/include/asm/exception-64e.h52
-rw-r--r--arch/powerpc/include/asm/fsl_hcalls.h655
-rw-r--r--arch/powerpc/include/asm/hvsi.h94
-rw-r--r--arch/powerpc/include/asm/irq.h4
-rw-r--r--arch/powerpc/include/asm/jump_label.h47
-rw-r--r--arch/powerpc/include/asm/local.h2
-rw-r--r--arch/powerpc/include/asm/macio.h2
-rw-r--r--arch/powerpc/include/asm/mmu.h12
-rw-r--r--arch/powerpc/include/asm/pSeries_reconfig.h2
-rw-r--r--arch/powerpc/include/asm/paca.h7
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h50
-rw-r--r--arch/powerpc/include/asm/pci.h2
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h3
-rw-r--r--arch/powerpc/include/asm/processor.h5
-rw-r--r--arch/powerpc/include/asm/prom.h2
-rw-r--r--arch/powerpc/include/asm/reg.h4
-rw-r--r--arch/powerpc/include/asm/setup.h4
-rw-r--r--arch/powerpc/include/asm/smp.h3
-rw-r--r--arch/powerpc/include/asm/smu.h2
-rw-r--r--arch/powerpc/include/asm/system.h1
-rw-r--r--arch/powerpc/include/asm/udbg.h1
28 files changed, 1424 insertions, 104 deletions
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index b8f152ece025..e2a4c26ad377 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -181,21 +181,21 @@ static __inline__ int atomic_dec_return(atomic_t *v)
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
- * atomic_add_unless - add unless the number is a given value
+ * __atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
+ * Returns the old value of @v.
*/
-static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{
int t;
__asm__ __volatile__ (
PPC_RELEASE_BARRIER
-"1: lwarx %0,0,%1 # atomic_add_unless\n\
+"1: lwarx %0,0,%1 # __atomic_add_unless\n\
cmpw 0,%0,%3 \n\
beq- 2f \n\
add %0,%2,%0 \n"
@@ -209,10 +209,9 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
: "r" (&v->counter), "r" (a), "r" (u)
: "cc", "memory");
- return t != u;
+ return t;
}
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
@@ -444,7 +443,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
+ * Returns the old value of @v.
*/
static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
{
@@ -452,7 +451,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
__asm__ __volatile__ (
PPC_RELEASE_BARRIER
-"1: ldarx %0,0,%1 # atomic_add_unless\n\
+"1: ldarx %0,0,%1 # __atomic_add_unless\n\
cmpd 0,%0,%3 \n\
beq- 2f \n\
add %0,%2,%0 \n"
@@ -470,11 +469,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-#else /* __powerpc64__ */
-#include <asm-generic/atomic64.h>
-
#endif /* __powerpc64__ */
-#include <asm-generic/atomic-long.h>
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_ATOMIC_H_ */
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index f18c6d9b9510..e137afcc10fa 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -327,10 +327,7 @@ unsigned long find_next_bit_le(const void *addr,
unsigned long size, unsigned long offset);
/* Bitmap functions for the ext2 filesystem */
-#define ext2_set_bit_atomic(lock, nr, addr) \
- test_and_set_bit_le((nr), (unsigned long*)addr)
-#define ext2_clear_bit_atomic(lock, nr, addr) \
- test_and_clear_bit_le((nr), (unsigned long*)addr)
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
#include <asm-generic/bitops/sched.h>
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h
index 9c70d0ca96d4..efa74ac44a35 100644
--- a/arch/powerpc/include/asm/dbell.h
+++ b/arch/powerpc/include/asm/dbell.h
@@ -18,7 +18,7 @@
#include <asm/ppc-opcode.h>
#define PPC_DBELL_MSG_BRDCAST (0x04000000)
-#define PPC_DBELL_TYPE(x) (((x) & 0xf) << 28)
+#define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36))
enum ppc_dbell {
PPC_DBELL = 0, /* doorbell */
PPC_DBELL_CRIT = 1, /* critical doorbell */
diff --git a/arch/powerpc/include/asm/ehv_pic.h b/arch/powerpc/include/asm/ehv_pic.h
new file mode 100644
index 000000000000..a9e1f4f796f6
--- /dev/null
+++ b/arch/powerpc/include/asm/ehv_pic.h
@@ -0,0 +1,40 @@
+/*
+ * EHV_PIC private definitions and structure.
+ *
+ * Copyright 2008-2010 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+#ifndef __EHV_PIC_H__
+#define __EHV_PIC_H__
+
+#include <linux/irq.h>
+
+#define NR_EHV_PIC_INTS 1024
+
+#define EHV_PIC_INFO(name) EHV_PIC_##name
+
+#define EHV_PIC_VECPRI_POLARITY_NEGATIVE 0
+#define EHV_PIC_VECPRI_POLARITY_POSITIVE 1
+#define EHV_PIC_VECPRI_SENSE_EDGE 0
+#define EHV_PIC_VECPRI_SENSE_LEVEL 0x2
+#define EHV_PIC_VECPRI_POLARITY_MASK 0x1
+#define EHV_PIC_VECPRI_SENSE_MASK 0x2
+
+struct ehv_pic {
+ /* The remapper for this EHV_PIC */
+ struct irq_host *irqhost;
+
+ /* The "linux" controller struct */
+ struct irq_chip hc_irq;
+
+ /* core int flag */
+ int coreint_flag;
+};
+
+void ehv_pic_init(void);
+unsigned int ehv_pic_get_irq(void);
+
+#endif /* __EHV_PIC_H__ */
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 2b917c69ed15..3bf9cca35147 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -267,7 +267,7 @@ extern int ucache_bsize;
struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
-#define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b);
+#define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b)
/* 1GB for 64bit, 8MB for 32bit */
#define STACK_RND_MASK (is_32bit_task() ? \
@@ -298,7 +298,7 @@ do { \
NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \
NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \
NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \
- VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso_base) \
+ VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso_base); \
} while (0)
/* PowerPC64 relocations defined by the ABIs */
diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h
index 2cc41c715d2b..63f2a22e9954 100644
--- a/arch/powerpc/include/asm/emulated_ops.h
+++ b/arch/powerpc/include/asm/emulated_ops.h
@@ -18,7 +18,7 @@
#ifndef _ASM_POWERPC_EMULATED_OPS_H
#define _ASM_POWERPC_EMULATED_OPS_H
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/perf_event.h>
diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h
new file mode 100644
index 000000000000..f3b0c2cc9fea
--- /dev/null
+++ b/arch/powerpc/include/asm/epapr_hcalls.h
@@ -0,0 +1,502 @@
+/*
+ * ePAPR hcall interface
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Author: Timur Tabi <timur@freescale.com>
+ *
+ * This file is provided under a dual BSD/GPL license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* A "hypercall" is an "sc 1" instruction. This header file file provides C
+ * wrapper functions for the ePAPR hypervisor interface. It is inteded
+ * for use by Linux device drivers and other operating systems.
+ *
+ * The hypercalls are implemented as inline assembly, rather than assembly
+ * language functions in a .S file, for optimization. It allows
+ * the caller to issue the hypercall instruction directly, improving both
+ * performance and memory footprint.
+ */
+
+#ifndef _EPAPR_HCALLS_H
+#define _EPAPR_HCALLS_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <asm/byteorder.h>
+
+#define EV_BYTE_CHANNEL_SEND 1
+#define EV_BYTE_CHANNEL_RECEIVE 2
+#define EV_BYTE_CHANNEL_POLL 3
+#define EV_INT_SET_CONFIG 4
+#define EV_INT_GET_CONFIG 5
+#define EV_INT_SET_MASK 6
+#define EV_INT_GET_MASK 7
+#define EV_INT_IACK 9
+#define EV_INT_EOI 10
+#define EV_INT_SEND_IPI 11
+#define EV_INT_SET_TASK_PRIORITY 12
+#define EV_INT_GET_TASK_PRIORITY 13
+#define EV_DOORBELL_SEND 14
+#define EV_MSGSND 15
+#define EV_IDLE 16
+
+/* vendor ID: epapr */
+#define EV_LOCAL_VENDOR_ID 0 /* for private use */
+#define EV_EPAPR_VENDOR_ID 1
+#define EV_FSL_VENDOR_ID 2 /* Freescale Semiconductor */
+#define EV_IBM_VENDOR_ID 3 /* IBM */
+#define EV_GHS_VENDOR_ID 4 /* Green Hills Software */
+#define EV_ENEA_VENDOR_ID 5 /* Enea */
+#define EV_WR_VENDOR_ID 6 /* Wind River Systems */
+#define EV_AMCC_VENDOR_ID 7 /* Applied Micro Circuits */
+#define EV_KVM_VENDOR_ID 42 /* KVM */
+
+/* The max number of bytes that a byte channel can send or receive per call */
+#define EV_BYTE_CHANNEL_MAX_BYTES 16
+
+
+#define _EV_HCALL_TOKEN(id, num) (((id) << 16) | (num))
+#define EV_HCALL_TOKEN(hcall_num) _EV_HCALL_TOKEN(EV_EPAPR_VENDOR_ID, hcall_num)
+
+/* epapr error codes */
+#define EV_EPERM 1 /* Operation not permitted */
+#define EV_ENOENT 2 /* Entry Not Found */
+#define EV_EIO 3 /* I/O error occured */
+#define EV_EAGAIN 4 /* The operation had insufficient
+ * resources to complete and should be
+ * retried
+ */
+#define EV_ENOMEM 5 /* There was insufficient memory to
+ * complete the operation */
+#define EV_EFAULT 6 /* Bad guest address */
+#define EV_ENODEV 7 /* No such device */
+#define EV_EINVAL 8 /* An argument supplied to the hcall
+ was out of range or invalid */
+#define EV_INTERNAL 9 /* An internal error occured */
+#define EV_CONFIG 10 /* A configuration error was detected */
+#define EV_INVALID_STATE 11 /* The object is in an invalid state */
+#define EV_UNIMPLEMENTED 12 /* Unimplemented hypercall */
+#define EV_BUFFER_OVERFLOW 13 /* Caller-supplied buffer too small */
+
+/*
+ * Hypercall register clobber list
+ *
+ * These macros are used to define the list of clobbered registers during a
+ * hypercall. Technically, registers r0 and r3-r12 are always clobbered,
+ * but the gcc inline assembly syntax does not allow us to specify registers
+ * on the clobber list that are also on the input/output list. Therefore,
+ * the lists of clobbered registers depends on the number of register
+ * parmeters ("+r" and "=r") passed to the hypercall.
+ *
+ * Each assembly block should use one of the HCALL_CLOBBERSx macros. As a
+ * general rule, 'x' is the number of parameters passed to the assembly
+ * block *except* for r11.
+ *
+ * If you're not sure, just use the smallest value of 'x' that does not
+ * generate a compilation error. Because these are static inline functions,
+ * the compiler will only check the clobber list for a function if you
+ * compile code that calls that function.
+ *
+ * r3 and r11 are not included in any clobbers list because they are always
+ * listed as output registers.
+ *
+ * XER, CTR, and LR are currently listed as clobbers because it's uncertain
+ * whether they will be clobbered.
+ *
+ * Note that r11 can be used as an output parameter.
+*/
+
+/* List of common clobbered registers. Do not use this macro. */
+#define EV_HCALL_CLOBBERS "r0", "r12", "xer", "ctr", "lr", "cc"
+
+#define EV_HCALL_CLOBBERS8 EV_HCALL_CLOBBERS
+#define EV_HCALL_CLOBBERS7 EV_HCALL_CLOBBERS8, "r10"
+#define EV_HCALL_CLOBBERS6 EV_HCALL_CLOBBERS7, "r9"
+#define EV_HCALL_CLOBBERS5 EV_HCALL_CLOBBERS6, "r8"
+#define EV_HCALL_CLOBBERS4 EV_HCALL_CLOBBERS5, "r7"
+#define EV_HCALL_CLOBBERS3 EV_HCALL_CLOBBERS4, "r6"
+#define EV_HCALL_CLOBBERS2 EV_HCALL_CLOBBERS3, "r5"
+#define EV_HCALL_CLOBBERS1 EV_HCALL_CLOBBERS2, "r4"
+
+
+/*
+ * We use "uintptr_t" to define a register because it's guaranteed to be a
+ * 32-bit integer on a 32-bit platform, and a 64-bit integer on a 64-bit
+ * platform.
+ *
+ * All registers are either input/output or output only. Registers that are
+ * initialized before making the hypercall are input/output. All
+ * input/output registers are represented with "+r". Output-only registers
+ * are represented with "=r". Do not specify any unused registers. The
+ * clobber list will tell the compiler that the hypercall modifies those
+ * registers, which is good enough.
+ */
+
+/**
+ * ev_int_set_config - configure the specified interrupt
+ * @interrupt: the interrupt number
+ * @config: configuration for this interrupt
+ * @priority: interrupt priority
+ * @destination: destination CPU number
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_set_config(unsigned int interrupt,
+ uint32_t config, unsigned int priority, uint32_t destination)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r6 __asm__("r6");
+
+ r11 = EV_HCALL_TOKEN(EV_INT_SET_CONFIG);
+ r3 = interrupt;
+ r4 = config;
+ r5 = priority;
+ r6 = destination;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6)
+ : : EV_HCALL_CLOBBERS4
+ );
+
+ return r3;
+}
+
+/**
+ * ev_int_get_config - return the config of the specified interrupt
+ * @interrupt: the interrupt number
+ * @config: returned configuration for this interrupt
+ * @priority: returned interrupt priority
+ * @destination: returned destination CPU number
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_get_config(unsigned int interrupt,
+ uint32_t *config, unsigned int *priority, uint32_t *destination)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r6 __asm__("r6");
+
+ r11 = EV_HCALL_TOKEN(EV_INT_GET_CONFIG);
+ r3 = interrupt;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5), "=r" (r6)
+ : : EV_HCALL_CLOBBERS4
+ );
+
+ *config = r4;
+ *priority = r5;
+ *destination = r6;
+
+ return r3;
+}
+
+/**
+ * ev_int_set_mask - sets the mask for the specified interrupt source
+ * @interrupt: the interrupt number
+ * @mask: 0=enable interrupts, 1=disable interrupts
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_set_mask(unsigned int interrupt,
+ unsigned int mask)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = EV_HCALL_TOKEN(EV_INT_SET_MASK);
+ r3 = interrupt;
+ r4 = mask;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3), "+r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ return r3;
+}
+
+/**
+ * ev_int_get_mask - returns the mask for the specified interrupt source
+ * @interrupt: the interrupt number
+ * @mask: returned mask for this interrupt (0=enabled, 1=disabled)
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_get_mask(unsigned int interrupt,
+ unsigned int *mask)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = EV_HCALL_TOKEN(EV_INT_GET_MASK);
+ r3 = interrupt;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3), "=r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ *mask = r4;
+
+ return r3;
+}
+
+/**
+ * ev_int_eoi - signal the end of interrupt processing
+ * @interrupt: the interrupt number
+ *
+ * This function signals the end of processing for the the specified
+ * interrupt, which must be the interrupt currently in service. By
+ * definition, this is also the highest-priority interrupt.
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_eoi(unsigned int interrupt)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = EV_HCALL_TOKEN(EV_INT_EOI);
+ r3 = interrupt;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+/**
+ * ev_byte_channel_send - send characters to a byte stream
+ * @handle: byte stream handle
+ * @count: (input) num of chars to send, (output) num chars sent
+ * @buffer: pointer to a 16-byte buffer
+ *
+ * @buffer must be at least 16 bytes long, because all 16 bytes will be
+ * read from memory into registers, even if count < 16.
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_byte_channel_send(unsigned int handle,
+ unsigned int *count, const char buffer[EV_BYTE_CHANNEL_MAX_BYTES])
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r6 __asm__("r6");
+ register uintptr_t r7 __asm__("r7");
+ register uintptr_t r8 __asm__("r8");
+ const uint32_t *p = (const uint32_t *) buffer;
+
+ r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_SEND);
+ r3 = handle;
+ r4 = *count;
+ r5 = be32_to_cpu(p[0]);
+ r6 = be32_to_cpu(p[1]);
+ r7 = be32_to_cpu(p[2]);
+ r8 = be32_to_cpu(p[3]);
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3),
+ "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), "+r" (r8)
+ : : EV_HCALL_CLOBBERS6
+ );
+
+ *count = r4;
+
+ return r3;
+}
+
+/**
+ * ev_byte_channel_receive - fetch characters from a byte channel
+ * @handle: byte channel handle
+ * @count: (input) max num of chars to receive, (output) num chars received
+ * @buffer: pointer to a 16-byte buffer
+ *
+ * The size of @buffer must be at least 16 bytes, even if you request fewer
+ * than 16 characters, because we always write 16 bytes to @buffer. This is
+ * for performance reasons.
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_byte_channel_receive(unsigned int handle,
+ unsigned int *count, char buffer[EV_BYTE_CHANNEL_MAX_BYTES])
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r6 __asm__("r6");
+ register uintptr_t r7 __asm__("r7");
+ register uintptr_t r8 __asm__("r8");
+ uint32_t *p = (uint32_t *) buffer;
+
+ r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_RECEIVE);
+ r3 = handle;
+ r4 = *count;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3), "+r" (r4),
+ "=r" (r5), "=r" (r6), "=r" (r7), "=r" (r8)
+ : : EV_HCALL_CLOBBERS6
+ );
+
+ *count = r4;
+ p[0] = cpu_to_be32(r5);
+ p[1] = cpu_to_be32(r6);
+ p[2] = cpu_to_be32(r7);
+ p[3] = cpu_to_be32(r8);
+
+ return r3;
+}
+
+/**
+ * ev_byte_channel_poll - returns the status of the byte channel buffers
+ * @handle: byte channel handle
+ * @rx_count: returned count of bytes in receive queue
+ * @tx_count: returned count of free space in transmit queue
+ *
+ * This function reports the amount of data in the receive queue (i.e. the
+ * number of bytes you can read), and the amount of free space in the transmit
+ * queue (i.e. the number of bytes you can write).
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_byte_channel_poll(unsigned int handle,
+ unsigned int *rx_count, unsigned int *tx_count)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+
+ r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_POLL);
+ r3 = handle;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5)
+ : : EV_HCALL_CLOBBERS3
+ );
+
+ *rx_count = r4;
+ *tx_count = r5;
+
+ return r3;
+}
+
+/**
+ * ev_int_iack - acknowledge an interrupt
+ * @handle: handle to the target interrupt controller
+ * @vector: returned interrupt vector
+ *
+ * If handle is zero, the function returns the next interrupt source
+ * number to be handled irrespective of the hierarchy or cascading
+ * of interrupt controllers. If non-zero, specifies a handle to the
+ * interrupt controller that is the target of the acknowledge.
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_int_iack(unsigned int handle,
+ unsigned int *vector)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = EV_HCALL_TOKEN(EV_INT_IACK);
+ r3 = handle;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3), "=r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ *vector = r4;
+
+ return r3;
+}
+
+/**
+ * ev_doorbell_send - send a doorbell to another partition
+ * @handle: doorbell send handle
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_doorbell_send(unsigned int handle)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = EV_HCALL_TOKEN(EV_DOORBELL_SEND);
+ r3 = handle;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+/**
+ * ev_idle -- wait for next interrupt on this core
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int ev_idle(void)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = EV_HCALL_TOKEN(EV_IDLE);
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "=r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+#endif
diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h
index 6d53f311d942..ac13addb8495 100644
--- a/arch/powerpc/include/asm/exception-64e.h
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -48,30 +48,33 @@
#define EX_R14 (4 * 8)
#define EX_R15 (5 * 8)
-/* The TLB miss exception uses different slots */
+/*
+ * The TLB miss exception uses different slots.
+ *
+ * The bolted variant uses only the first six fields,
+ * which in combination with pgd and kernel_pgd fits in
+ * one 64-byte cache line.
+ */
#define EX_TLB_R10 ( 0 * 8)
#define EX_TLB_R11 ( 1 * 8)
-#define EX_TLB_R12 ( 2 * 8)
-#define EX_TLB_R13 ( 3 * 8)
-#define EX_TLB_R14 ( 4 * 8)
-#define EX_TLB_R15 ( 5 * 8)
-#define EX_TLB_R16 ( 6 * 8)
-#define EX_TLB_CR ( 7 * 8)
+#define EX_TLB_R14 ( 2 * 8)
+#define EX_TLB_R15 ( 3 * 8)
+#define EX_TLB_R16 ( 4 * 8)
+#define EX_TLB_CR ( 5 * 8)
+#define EX_TLB_R12 ( 6 * 8)
+#define EX_TLB_R13 ( 7 * 8)
#define EX_TLB_DEAR ( 8 * 8) /* Level 0 and 2 only */
#define EX_TLB_ESR ( 9 * 8) /* Level 0 and 2 only */
#define EX_TLB_SRR0 (10 * 8)
#define EX_TLB_SRR1 (11 * 8)
-#define EX_TLB_MMUCR0 (12 * 8) /* Level 0 */
-#define EX_TLB_MAS1 (12 * 8) /* Level 0 */
-#define EX_TLB_MAS2 (13 * 8) /* Level 0 */
#ifdef CONFIG_BOOK3E_MMU_TLB_STATS
-#define EX_TLB_R8 (14 * 8)
-#define EX_TLB_R9 (15 * 8)
-#define EX_TLB_LR (16 * 8)
-#define EX_TLB_SIZE (17 * 8)
+#define EX_TLB_R8 (12 * 8)
+#define EX_TLB_R9 (13 * 8)
+#define EX_TLB_LR (14 * 8)
+#define EX_TLB_SIZE (15 * 8)
#else
-#define EX_TLB_SIZE (14 * 8)
+#define EX_TLB_SIZE (12 * 8)
#endif
#define START_EXCEPTION(label) \
@@ -168,6 +171,16 @@ exc_##label##_book3e:
ld r9,EX_TLB_R9(r12); \
ld r8,EX_TLB_R8(r12); \
mtlr r16;
+#define TLB_MISS_PROLOG_STATS_BOLTED \
+ mflr r10; \
+ std r8,PACA_EXTLB+EX_TLB_R8(r13); \
+ std r9,PACA_EXTLB+EX_TLB_R9(r13); \
+ std r10,PACA_EXTLB+EX_TLB_LR(r13);
+#define TLB_MISS_RESTORE_STATS_BOLTED \
+ ld r16,PACA_EXTLB+EX_TLB_LR(r13); \
+ ld r9,PACA_EXTLB+EX_TLB_R9(r13); \
+ ld r8,PACA_EXTLB+EX_TLB_R8(r13); \
+ mtlr r16;
#define TLB_MISS_STATS_D(name) \
addi r9,r13,MMSTAT_DSTATS+name; \
bl .tlb_stat_inc;
@@ -183,17 +196,20 @@ exc_##label##_book3e:
61: addi r9,r13,MMSTAT_ISTATS+name; \
62: bl .tlb_stat_inc;
#define TLB_MISS_STATS_SAVE_INFO \
- std r14,EX_TLB_ESR(r12); /* save ESR */ \
-
-
+ std r14,EX_TLB_ESR(r12); /* save ESR */
+#define TLB_MISS_STATS_SAVE_INFO_BOLTED \
+ std r14,PACA_EXTLB+EX_TLB_ESR(r13); /* save ESR */
#else
#define TLB_MISS_PROLOG_STATS
#define TLB_MISS_RESTORE_STATS
+#define TLB_MISS_PROLOG_STATS_BOLTED
+#define TLB_MISS_RESTORE_STATS_BOLTED
#define TLB_MISS_STATS_D(name)
#define TLB_MISS_STATS_I(name)
#define TLB_MISS_STATS_X(name)
#define TLB_MISS_STATS_Y(name)
#define TLB_MISS_STATS_SAVE_INFO
+#define TLB_MISS_STATS_SAVE_INFO_BOLTED
#endif
#define SET_IVOR(vector_number, vector_offset) \
diff --git a/arch/powerpc/include/asm/fsl_hcalls.h b/arch/powerpc/include/asm/fsl_hcalls.h
new file mode 100644
index 000000000000..922d9b5fe3d5
--- /dev/null
+++ b/arch/powerpc/include/asm/fsl_hcalls.h
@@ -0,0 +1,655 @@
+/*
+ * Freescale hypervisor call interface
+ *
+ * Copyright 2008-2010 Freescale Semiconductor, Inc.
+ *
+ * Author: Timur Tabi <timur@freescale.com>
+ *
+ * This file is provided under a dual BSD/GPL license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _FSL_HCALLS_H
+#define _FSL_HCALLS_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <asm/byteorder.h>
+#include <asm/epapr_hcalls.h>
+
+#define FH_API_VERSION 1
+
+#define FH_ERR_GET_INFO 1
+#define FH_PARTITION_GET_DTPROP 2
+#define FH_PARTITION_SET_DTPROP 3
+#define FH_PARTITION_RESTART 4
+#define FH_PARTITION_GET_STATUS 5
+#define FH_PARTITION_START 6
+#define FH_PARTITION_STOP 7
+#define FH_PARTITION_MEMCPY 8
+#define FH_DMA_ENABLE 9
+#define FH_DMA_DISABLE 10
+#define FH_SEND_NMI 11
+#define FH_VMPIC_GET_MSIR 12
+#define FH_SYSTEM_RESET 13
+#define FH_GET_CORE_STATE 14
+#define FH_ENTER_NAP 15
+#define FH_EXIT_NAP 16
+#define FH_CLAIM_DEVICE 17
+#define FH_PARTITION_STOP_DMA 18
+
+/* vendor ID: Freescale Semiconductor */
+#define FH_HCALL_TOKEN(num) _EV_HCALL_TOKEN(EV_FSL_VENDOR_ID, num)
+
+/*
+ * We use "uintptr_t" to define a register because it's guaranteed to be a
+ * 32-bit integer on a 32-bit platform, and a 64-bit integer on a 64-bit
+ * platform.
+ *
+ * All registers are either input/output or output only. Registers that are
+ * initialized before making the hypercall are input/output. All
+ * input/output registers are represented with "+r". Output-only registers
+ * are represented with "=r". Do not specify any unused registers. The
+ * clobber list will tell the compiler that the hypercall modifies those
+ * registers, which is good enough.
+ */
+
+/**
+ * fh_send_nmi - send NMI to virtual cpu(s).
+ * @vcpu_mask: send NMI to virtual cpu(s) specified by this mask.
+ *
+ * Returns 0 for success, or EINVAL for invalid vcpu_mask.
+ */
+static inline unsigned int fh_send_nmi(unsigned int vcpu_mask)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_SEND_NMI);
+ r3 = vcpu_mask;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+/* Arbitrary limits to avoid excessive memory allocation in hypervisor */
+#define FH_DTPROP_MAX_PATHLEN 4096
+#define FH_DTPROP_MAX_PROPLEN 32768
+
+/**
+ * fh_partiton_get_dtprop - get a property from a guest device tree.
+ * @handle: handle of partition whose device tree is to be accessed
+ * @dtpath_addr: physical address of device tree path to access
+ * @propname_addr: physical address of name of property
+ * @propvalue_addr: physical address of property value buffer
+ * @propvalue_len: length of buffer on entry, length of property on return
+ *
+ * Returns zero on success, non-zero on error.
+ */
+static inline unsigned int fh_partition_get_dtprop(int handle,
+ uint64_t dtpath_addr,
+ uint64_t propname_addr,
+ uint64_t propvalue_addr,
+ uint32_t *propvalue_len)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r6 __asm__("r6");
+ register uintptr_t r7 __asm__("r7");
+ register uintptr_t r8 __asm__("r8");
+ register uintptr_t r9 __asm__("r9");
+ register uintptr_t r10 __asm__("r10");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_GET_DTPROP);
+ r3 = handle;
+
+#ifdef CONFIG_PHYS_64BIT
+ r4 = dtpath_addr >> 32;
+ r6 = propname_addr >> 32;
+ r8 = propvalue_addr >> 32;
+#else
+ r4 = 0;
+ r6 = 0;
+ r8 = 0;
+#endif
+ r5 = (uint32_t)dtpath_addr;
+ r7 = (uint32_t)propname_addr;
+ r9 = (uint32_t)propvalue_addr;
+ r10 = *propvalue_len;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11),
+ "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7),
+ "+r" (r8), "+r" (r9), "+r" (r10)
+ : : EV_HCALL_CLOBBERS8
+ );
+
+ *propvalue_len = r4;
+ return r3;
+}
+
+/**
+ * Set a property in a guest device tree.
+ * @handle: handle of partition whose device tree is to be accessed
+ * @dtpath_addr: physical address of device tree path to access
+ * @propname_addr: physical address of name of property
+ * @propvalue_addr: physical address of property value
+ * @propvalue_len: length of property
+ *
+ * Returns zero on success, non-zero on error.
+ */
+static inline unsigned int fh_partition_set_dtprop(int handle,
+ uint64_t dtpath_addr,
+ uint64_t propname_addr,
+ uint64_t propvalue_addr,
+ uint32_t propvalue_len)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r6 __asm__("r6");
+ register uintptr_t r8 __asm__("r8");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r7 __asm__("r7");
+ register uintptr_t r9 __asm__("r9");
+ register uintptr_t r10 __asm__("r10");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_SET_DTPROP);
+ r3 = handle;
+
+#ifdef CONFIG_PHYS_64BIT
+ r4 = dtpath_addr >> 32;
+ r6 = propname_addr >> 32;
+ r8 = propvalue_addr >> 32;
+#else
+ r4 = 0;
+ r6 = 0;
+ r8 = 0;
+#endif
+ r5 = (uint32_t)dtpath_addr;
+ r7 = (uint32_t)propname_addr;
+ r9 = (uint32_t)propvalue_addr;
+ r10 = propvalue_len;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11),
+ "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7),
+ "+r" (r8), "+r" (r9), "+r" (r10)
+ : : EV_HCALL_CLOBBERS8
+ );
+
+ return r3;
+}
+
+/**
+ * fh_partition_restart - reboot the current partition
+ * @partition: partition ID
+ *
+ * Returns an error code if reboot failed. Does not return if it succeeds.
+ */
+static inline unsigned int fh_partition_restart(unsigned int partition)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_RESTART);
+ r3 = partition;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+#define FH_PARTITION_STOPPED 0
+#define FH_PARTITION_RUNNING 1
+#define FH_PARTITION_STARTING 2
+#define FH_PARTITION_STOPPING 3
+#define FH_PARTITION_PAUSING 4
+#define FH_PARTITION_PAUSED 5
+#define FH_PARTITION_RESUMING 6
+
+/**
+ * fh_partition_get_status - gets the status of a partition
+ * @partition: partition ID
+ * @status: returned status code
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_partition_get_status(unsigned int partition,
+ unsigned int *status)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_GET_STATUS);
+ r3 = partition;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3), "=r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ *status = r4;
+
+ return r3;
+}
+
+/**
+ * fh_partition_start - boots and starts execution of the specified partition
+ * @partition: partition ID
+ * @entry_point: guest physical address to start execution
+ *
+ * The hypervisor creates a 1-to-1 virtual/physical IMA mapping, so at boot
+ * time, guest physical address are the same as guest virtual addresses.
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_partition_start(unsigned int partition,
+ uint32_t entry_point, int load)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_START);
+ r3 = partition;
+ r4 = entry_point;
+ r5 = load;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5)
+ : : EV_HCALL_CLOBBERS3
+ );
+
+ return r3;
+}
+
+/**
+ * fh_partition_stop - stops another partition
+ * @partition: partition ID
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_partition_stop(unsigned int partition)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_STOP);
+ r3 = partition;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+/**
+ * struct fh_sg_list: definition of the fh_partition_memcpy S/G list
+ * @source: guest physical address to copy from
+ * @target: guest physical address to copy to
+ * @size: number of bytes to copy
+ * @reserved: reserved, must be zero
+ *
+ * The scatter/gather list for fh_partition_memcpy() is an array of these
+ * structures. The array must be guest physically contiguous.
+ *
+ * This structure must be aligned on 32-byte boundary, so that no single
+ * strucuture can span two pages.
+ */
+struct fh_sg_list {
+ uint64_t source; /**< guest physical address to copy from */
+ uint64_t target; /**< guest physical address to copy to */
+ uint64_t size; /**< number of bytes to copy */
+ uint64_t reserved; /**< reserved, must be zero */
+} __attribute__ ((aligned(32)));
+
+/**
+ * fh_partition_memcpy - copies data from one guest to another
+ * @source: the ID of the partition to copy from
+ * @target: the ID of the partition to copy to
+ * @sg_list: guest physical address of an array of &fh_sg_list structures
+ * @count: the number of entries in @sg_list
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_partition_memcpy(unsigned int source,
+ unsigned int target, phys_addr_t sg_list, unsigned int count)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r6 __asm__("r6");
+ register uintptr_t r7 __asm__("r7");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_MEMCPY);
+ r3 = source;
+ r4 = target;
+ r5 = (uint32_t) sg_list;
+
+#ifdef CONFIG_PHYS_64BIT
+ r6 = sg_list >> 32;
+#else
+ r6 = 0;
+#endif
+ r7 = count;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11),
+ "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7)
+ : : EV_HCALL_CLOBBERS5
+ );
+
+ return r3;
+}
+
+/**
+ * fh_dma_enable - enable DMA for the specified device
+ * @liodn: the LIODN of the I/O device for which to enable DMA
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_dma_enable(unsigned int liodn)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_DMA_ENABLE);
+ r3 = liodn;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+/**
+ * fh_dma_disable - disable DMA for the specified device
+ * @liodn: the LIODN of the I/O device for which to disable DMA
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_dma_disable(unsigned int liodn)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_DMA_DISABLE);
+ r3 = liodn;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+
+/**
+ * fh_vmpic_get_msir - returns the MPIC-MSI register value
+ * @interrupt: the interrupt number
+ * @msir_val: returned MPIC-MSI register value
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_vmpic_get_msir(unsigned int interrupt,
+ unsigned int *msir_val)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = FH_HCALL_TOKEN(FH_VMPIC_GET_MSIR);
+ r3 = interrupt;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3), "=r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ *msir_val = r4;
+
+ return r3;
+}
+
+/**
+ * fh_system_reset - reset the system
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_system_reset(void)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_SYSTEM_RESET);
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "=r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+
+/**
+ * fh_err_get_info - get platform error information
+ * @queue id:
+ * 0 for guest error event queue
+ * 1 for global error event queue
+ *
+ * @pointer to store the platform error data:
+ * platform error data is returned in registers r4 - r11
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_err_get_info(int queue, uint32_t *bufsize,
+ uint32_t addr_hi, uint32_t addr_lo, int peek)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+ register uintptr_t r5 __asm__("r5");
+ register uintptr_t r6 __asm__("r6");
+ register uintptr_t r7 __asm__("r7");
+
+ r11 = FH_HCALL_TOKEN(FH_ERR_GET_INFO);
+ r3 = queue;
+ r4 = *bufsize;
+ r5 = addr_hi;
+ r6 = addr_lo;
+ r7 = peek;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6),
+ "+r" (r7)
+ : : EV_HCALL_CLOBBERS5
+ );
+
+ *bufsize = r4;
+
+ return r3;
+}
+
+
+#define FH_VCPU_RUN 0
+#define FH_VCPU_IDLE 1
+#define FH_VCPU_NAP 2
+
+/**
+ * fh_get_core_state - get the state of a vcpu
+ *
+ * @handle: handle of partition containing the vcpu
+ * @vcpu: vcpu number within the partition
+ * @state:the current state of the vcpu, see FH_VCPU_*
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_get_core_state(unsigned int handle,
+ unsigned int vcpu, unsigned int *state)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = FH_HCALL_TOKEN(FH_GET_CORE_STATE);
+ r3 = handle;
+ r4 = vcpu;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3), "+r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ *state = r4;
+ return r3;
+}
+
+/**
+ * fh_enter_nap - enter nap on a vcpu
+ *
+ * Note that though the API supports entering nap on a vcpu other
+ * than the caller, this may not be implmented and may return EINVAL.
+ *
+ * @handle: handle of partition containing the vcpu
+ * @vcpu: vcpu number within the partition
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_enter_nap(unsigned int handle, unsigned int vcpu)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = FH_HCALL_TOKEN(FH_ENTER_NAP);
+ r3 = handle;
+ r4 = vcpu;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3), "+r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ return r3;
+}
+
+/**
+ * fh_exit_nap - exit nap on a vcpu
+ * @handle: handle of partition containing the vcpu
+ * @vcpu: vcpu number within the partition
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_exit_nap(unsigned int handle, unsigned int vcpu)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+ register uintptr_t r4 __asm__("r4");
+
+ r11 = FH_HCALL_TOKEN(FH_EXIT_NAP);
+ r3 = handle;
+ r4 = vcpu;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3), "+r" (r4)
+ : : EV_HCALL_CLOBBERS2
+ );
+
+ return r3;
+}
+/**
+ * fh_claim_device - claim a "claimable" shared device
+ * @handle: fsl,hv-device-handle of node to claim
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_claim_device(unsigned int handle)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_CLAIM_DEVICE);
+ r3 = handle;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+
+/**
+ * Run deferred DMA disabling on a partition's private devices
+ *
+ * This applies to devices which a partition owns either privately,
+ * or which are claimable and still actively owned by that partition,
+ * and which do not have the no-dma-disable property.
+ *
+ * @handle: partition (must be stopped) whose DMA is to be disabled
+ *
+ * Returns 0 for success, or an error code.
+ */
+static inline unsigned int fh_partition_stop_dma(unsigned int handle)
+{
+ register uintptr_t r11 __asm__("r11");
+ register uintptr_t r3 __asm__("r3");
+
+ r11 = FH_HCALL_TOKEN(FH_PARTITION_STOP_DMA);
+ r3 = handle;
+
+ __asm__ __volatile__ ("sc 1"
+ : "+r" (r11), "+r" (r3)
+ : : EV_HCALL_CLOBBERS1
+ );
+
+ return r3;
+}
+#endif
diff --git a/arch/powerpc/include/asm/hvsi.h b/arch/powerpc/include/asm/hvsi.h
new file mode 100644
index 000000000000..d3f64f361814
--- /dev/null
+++ b/arch/powerpc/include/asm/hvsi.h
@@ -0,0 +1,94 @@
+#ifndef _HVSI_H
+#define _HVSI_H
+
+#define VS_DATA_PACKET_HEADER 0xff
+#define VS_CONTROL_PACKET_HEADER 0xfe
+#define VS_QUERY_PACKET_HEADER 0xfd
+#define VS_QUERY_RESPONSE_PACKET_HEADER 0xfc
+
+/* control verbs */
+#define VSV_SET_MODEM_CTL 1 /* to service processor only */
+#define VSV_MODEM_CTL_UPDATE 2 /* from service processor only */
+#define VSV_CLOSE_PROTOCOL 3
+
+/* query verbs */
+#define VSV_SEND_VERSION_NUMBER 1
+#define VSV_SEND_MODEM_CTL_STATUS 2
+
+/* yes, these masks are not consecutive. */
+#define HVSI_TSDTR 0x01
+#define HVSI_TSCD 0x20
+
+#define HVSI_MAX_OUTGOING_DATA 12
+#define HVSI_VERSION 1
+
+struct hvsi_header {
+ uint8_t type;
+ uint8_t len;
+ uint16_t seqno;
+} __attribute__((packed));
+
+struct hvsi_data {
+ struct hvsi_header hdr;
+ uint8_t data[HVSI_MAX_OUTGOING_DATA];
+} __attribute__((packed));
+
+struct hvsi_control {
+ struct hvsi_header hdr;
+ uint16_t verb;
+ /* optional depending on verb: */
+ uint32_t word;
+ uint32_t mask;
+} __attribute__((packed));
+
+struct hvsi_query {
+ struct hvsi_header hdr;
+ uint16_t verb;
+} __attribute__((packed));
+
+struct hvsi_query_response {
+ struct hvsi_header hdr;
+ uint16_t verb;
+ uint16_t query_seqno;
+ union {
+ uint8_t version;
+ uint32_t mctrl_word;
+ } u;
+} __attribute__((packed));
+
+/* hvsi lib struct definitions */
+#define HVSI_INBUF_SIZE 255
+struct tty_struct;
+struct hvsi_priv {
+ unsigned int inbuf_len; /* data in input buffer */
+ unsigned char inbuf[HVSI_INBUF_SIZE];
+ unsigned int inbuf_cur; /* Cursor in input buffer */
+ unsigned int inbuf_pktlen; /* packet lenght from cursor */
+ atomic_t seqno; /* packet sequence number */
+ unsigned int opened:1; /* driver opened */
+ unsigned int established:1; /* protocol established */
+ unsigned int is_console:1; /* used as a kernel console device */
+ unsigned int mctrl_update:1; /* modem control updated */
+ unsigned short mctrl; /* modem control */
+ struct tty_struct *tty; /* tty structure */
+ int (*get_chars)(uint32_t termno, char *buf, int count);
+ int (*put_chars)(uint32_t termno, const char *buf, int count);
+ uint32_t termno;
+};
+
+/* hvsi lib functions */
+struct hvc_struct;
+extern void hvsilib_init(struct hvsi_priv *pv,
+ int (*get_chars)(uint32_t termno, char *buf, int count),
+ int (*put_chars)(uint32_t termno, const char *buf,
+ int count),
+ int termno, int is_console);
+extern int hvsilib_open(struct hvsi_priv *pv, struct hvc_struct *hp);
+extern void hvsilib_close(struct hvsi_priv *pv, struct hvc_struct *hp);
+extern int hvsilib_read_mctrl(struct hvsi_priv *pv);
+extern int hvsilib_write_mctrl(struct hvsi_priv *pv, int dtr);
+extern void hvsilib_establish(struct hvsi_priv *pv);
+extern int hvsilib_get_chars(struct hvsi_priv *pv, char *buf, int count);
+extern int hvsilib_put_chars(struct hvsi_priv *pv, const char *buf, int count);
+
+#endif /* _HVSI_H */
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index 1bff591f7f72..c0e1bc319e35 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -14,7 +14,7 @@
#include <linux/radix-tree.h>
#include <asm/types.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
/* Define a way to iterate across irqs. */
@@ -330,5 +330,7 @@ extern int call_handle_irq(int irq, void *p1,
struct thread_info *tp, void *func);
extern void do_IRQ(struct pt_regs *regs);
+int irq_choose_cpu(const struct cpumask *mask);
+
#endif /* _ASM_IRQ_H */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
new file mode 100644
index 000000000000..1f780b95c0f0
--- /dev/null
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -0,0 +1,47 @@
+#ifndef _ASM_POWERPC_JUMP_LABEL_H
+#define _ASM_POWERPC_JUMP_LABEL_H
+
+/*
+ * Copyright 2010 Michael Ellerman, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/types.h>
+
+#include <asm/feature-fixups.h>
+
+#define JUMP_ENTRY_TYPE stringify_in_c(FTR_ENTRY_LONG)
+#define JUMP_LABEL_NOP_SIZE 4
+
+static __always_inline bool arch_static_branch(struct jump_label_key *key)
+{
+ asm goto("1:\n\t"
+ "nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".align 4\n\t"
+ JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
+ ".popsection \n\t"
+ : : "i" (key) : : l_yes);
+ return false;
+l_yes:
+ return true;
+}
+
+#ifdef CONFIG_PPC64
+typedef u64 jump_label_t;
+#else
+typedef u32 jump_label_t;
+#endif
+
+struct jump_entry {
+ jump_label_t code;
+ jump_label_t target;
+ jump_label_t key;
+ jump_label_t pad;
+};
+
+#endif /* _ASM_POWERPC_JUMP_LABEL_H */
diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
index c2410af6bfd9..b8da91363864 100644
--- a/arch/powerpc/include/asm/local.h
+++ b/arch/powerpc/include/asm/local.h
@@ -2,7 +2,7 @@
#define _ARCH_POWERPC_LOCAL_H
#include <linux/percpu.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
typedef struct
{
diff --git a/arch/powerpc/include/asm/macio.h b/arch/powerpc/include/asm/macio.h
index 7ab82c825a03..27af7f8bbb8d 100644
--- a/arch/powerpc/include/asm/macio.h
+++ b/arch/powerpc/include/asm/macio.h
@@ -76,7 +76,7 @@ static inline unsigned long macio_resource_len(struct macio_dev *dev, int resour
struct resource *res = &dev->resource[resource_no];
if (res->start == 0 || res->end == 0 || res->end < res->start)
return 0;
- return res->end - res->start + 1;
+ return resource_size(res);
}
extern int macio_enable_devres(struct macio_dev *dev);
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 4138b21ae80a..698b30638681 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -115,14 +115,24 @@
#ifndef __ASSEMBLY__
#include <asm/cputable.h>
+#ifdef CONFIG_PPC_FSL_BOOK3E
+#include <asm/percpu.h>
+DECLARE_PER_CPU(int, next_tlbcam_idx);
+#endif
+
static inline int mmu_has_feature(unsigned long feature)
{
return (cur_cpu_spec->mmu_features & feature);
}
+static inline void mmu_clear_feature(unsigned long feature)
+{
+ cur_cpu_spec->mmu_features &= ~feature;
+}
+
extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
-/* MMU initialization (64-bit only fo now) */
+/* MMU initialization */
extern void early_init_mmu(void);
extern void early_init_mmu_secondary(void);
diff --git a/arch/powerpc/include/asm/pSeries_reconfig.h b/arch/powerpc/include/asm/pSeries_reconfig.h
index 89d2f99c1bf4..23cd6cc30bcf 100644
--- a/arch/powerpc/include/asm/pSeries_reconfig.h
+++ b/arch/powerpc/include/asm/pSeries_reconfig.h
@@ -17,7 +17,7 @@
#ifdef CONFIG_PPC_PSERIES
extern int pSeries_reconfig_notifier_register(struct notifier_block *);
extern void pSeries_reconfig_notifier_unregister(struct notifier_block *);
-extern struct blocking_notifier_head pSeries_reconfig_chain;
+extern int pSeries_reconfig_notify(unsigned long action, void *p);
/* Not the best place to put this, will be fixed when we move some
* of the rtas suspend-me stuff to pseries */
extern void pSeries_coalesce_init(void);
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index a6da12859959..516bfb3f47d9 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -103,11 +103,12 @@ struct paca_struct {
#endif /* CONFIG_PPC_STD_MMU_64 */
#ifdef CONFIG_PPC_BOOK3E
- pgd_t *pgd; /* Current PGD */
- pgd_t *kernel_pgd; /* Kernel PGD */
u64 exgen[8] __attribute__((aligned(0x80)));
+ /* Keep pgd in the same cacheline as the start of extlb */
+ pgd_t *pgd __attribute__((aligned(0x80))); /* Current PGD */
+ pgd_t *kernel_pgd; /* Kernel PGD */
/* We can have up to 3 levels of reentrancy in the TLB miss handler */
- u64 extlb[3][EX_TLB_SIZE / sizeof(u64)] __attribute__((aligned(0x80)));
+ u64 extlb[3][EX_TLB_SIZE / sizeof(u64)];
u64 exmc[8]; /* used for machine checks */
u64 excrit[8]; /* used for crit interrupts */
u64 exdbg[8]; /* used for debug interrupts */
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 90bd3ed48165..56b879ab3a40 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -10,58 +10,10 @@
#include <linux/pci.h>
#include <linux/list.h>
#include <linux/ioport.h>
+#include <asm-generic/pci-bridge.h>
struct device_node;
-enum {
- /* Force re-assigning all resources (ignore firmware
- * setup completely)
- */
- PPC_PCI_REASSIGN_ALL_RSRC = 0x00000001,
-
- /* Re-assign all bus numbers */
- PPC_PCI_REASSIGN_ALL_BUS = 0x00000002,
-
- /* Do not try to assign, just use existing setup */
- PPC_PCI_PROBE_ONLY = 0x00000004,
-
- /* Don't bother with ISA alignment unless the bridge has
- * ISA forwarding enabled
- */
- PPC_PCI_CAN_SKIP_ISA_ALIGN = 0x00000008,
-
- /* Enable domain numbers in /proc */
- PPC_PCI_ENABLE_PROC_DOMAINS = 0x00000010,
- /* ... except for domain 0 */
- PPC_PCI_COMPAT_DOMAIN_0 = 0x00000020,
-};
-#ifdef CONFIG_PCI
-extern unsigned int ppc_pci_flags;
-
-static inline void ppc_pci_set_flags(int flags)
-{
- ppc_pci_flags = flags;
-}
-
-static inline void ppc_pci_add_flags(int flags)
-{
- ppc_pci_flags |= flags;
-}
-
-static inline int ppc_pci_has_flag(int flag)
-{
- return (ppc_pci_flags & flag);
-}
-#else
-static inline void ppc_pci_set_flags(int flags) { }
-static inline void ppc_pci_add_flags(int flags) { }
-static inline int ppc_pci_has_flag(int flag)
-{
- return 0;
-}
-#endif
-
-
/*
* Structure of a PCI controller (host bridge)
*/
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 1f522680ea17..49c3de582be0 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -44,7 +44,7 @@ struct pci_dev;
* bus numbers (don't do that on ppc64 yet !)
*/
#define pcibios_assign_all_busses() \
- (ppc_pci_has_flag(PPC_PCI_REASSIGN_ALL_BUS))
+ (pci_has_flag(PCI_REASSIGN_ALL_BUS))
static inline void pcibios_set_master(struct pci_dev *dev)
{
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 81576ee0cfb1..c4205616dfb5 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -357,7 +357,8 @@ void pgtable_cache_init(void);
/*
* find_linux_pte returns the address of a linux pte for a given
* effective address and directory. If not found, it returns zero.
- */static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
+ */
+static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
{
pgd_t *pg;
pud_t *pu;
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index d50c2b6d9bc3..eb11a446720e 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -20,6 +20,7 @@
#ifndef __ASSEMBLY__
#include <linux/compiler.h>
+#include <linux/cache.h>
#include <asm/ptrace.h>
#include <asm/types.h>
@@ -156,6 +157,10 @@ struct thread_struct {
#endif
struct pt_regs *regs; /* Pointer to saved register state */
mm_segment_t fs; /* for get_fs() validation */
+#ifdef CONFIG_BOOKE
+ /* BookE base exception scratch space; align on cacheline */
+ unsigned long normsave[8] ____cacheline_aligned;
+#endif
#ifdef CONFIG_PPC32
void *pgdir; /* root of page-table tree */
#endif
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index b823536375dc..b5c91901e384 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -18,7 +18,7 @@
*/
#include <linux/types.h>
#include <asm/irq.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#define HAVE_ARCH_DEVTREE_FIXUPS
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index ddbe57ae8584..e8aaf6fce38b 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -889,8 +889,8 @@
#define SPRN_SPRG_WSCRATCH2 SPRN_SPRG4W
#define SPRN_SPRG_RSCRATCH3 SPRN_SPRG5R
#define SPRN_SPRG_WSCRATCH3 SPRN_SPRG5W
-#define SPRN_SPRG_RSCRATCH_MC SPRN_SPRG6R
-#define SPRN_SPRG_WSCRATCH_MC SPRN_SPRG6W
+#define SPRN_SPRG_RSCRATCH_MC SPRN_SPRG1
+#define SPRN_SPRG_WSCRATCH_MC SPRN_SPRG1
#define SPRN_SPRG_RSCRATCH4 SPRN_SPRG7R
#define SPRN_SPRG_WSCRATCH4 SPRN_SPRG7W
#ifdef CONFIG_E200
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index dae19342f0b9..186e0fb835bd 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -3,4 +3,8 @@
#include <asm-generic/setup.h>
+#ifndef __ASSEMBLY__
+extern void ppc_printk_progress(char *s, unsigned short hex);
+#endif
+
#endif /* _ASM_POWERPC_SETUP_H */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 11eb404b5606..15a70b7f638b 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -30,7 +30,7 @@
#include <asm/percpu.h>
extern int boot_cpuid;
-extern int boot_cpu_count;
+extern int spinning_secondaries;
extern void cpu_die(void);
@@ -119,7 +119,6 @@ extern const char *smp_ipi_name[];
/* for irq controllers with only a single ipi */
extern void smp_muxed_ipi_set_data(int cpu, unsigned long data);
extern void smp_muxed_ipi_message_pass(int cpu, int msg);
-extern void smp_muxed_ipi_resend(void);
extern irqreturn_t smp_ipi_demux(void);
void smp_init_iSeries(void);
diff --git a/arch/powerpc/include/asm/smu.h b/arch/powerpc/include/asm/smu.h
index e3bdada8c542..ae20ce1af4c7 100644
--- a/arch/powerpc/include/asm/smu.h
+++ b/arch/powerpc/include/asm/smu.h
@@ -547,7 +547,7 @@ struct smu_sdbp_header {
* (currently, afaik, this concerns only the FVT partition
* (0x12)
*/
-#define SMU_U16_MIX(x) le16_to_cpu(x);
+#define SMU_U16_MIX(x) le16_to_cpu(x)
#define SMU_U32_MIX(x) ((((x) & 0xff00ff00u) >> 8)|(((x) & 0x00ff00ffu) << 8))
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
index 2dc595dda03b..e30a13d1ee76 100644
--- a/arch/powerpc/include/asm/system.h
+++ b/arch/powerpc/include/asm/system.h
@@ -120,7 +120,6 @@ extern void do_dabr(struct pt_regs *regs, unsigned long address,
unsigned long error_code);
#endif
extern void print_backtrace(unsigned long *);
-extern void show_regs(struct pt_regs * regs);
extern void flush_instruction_cache(void);
extern void hard_reset_now(void);
extern void poweroff_now(void);
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h
index 58580e94a2bb..93e05d1b34b2 100644
--- a/arch/powerpc/include/asm/udbg.h
+++ b/arch/powerpc/include/asm/udbg.h
@@ -40,6 +40,7 @@ extern void udbg_adb_init_early(void);
extern void __init udbg_early_init(void);
extern void __init udbg_init_debug_lpar(void);
+extern void __init udbg_init_debug_lpar_hvsi(void);
extern void __init udbg_init_pmac_realmode(void);
extern void __init udbg_init_maple_realmode(void);
extern void __init udbg_init_pas_realmode(void);