summaryrefslogtreecommitdiff
path: root/arch/powerpc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/abs_addr.h56
-rw-r--r--arch/powerpc/include/asm/atomic.h1
-rw-r--r--arch/powerpc/include/asm/bitops.h10
-rw-r--r--arch/powerpc/include/asm/cacheflush.h2
-rw-r--r--arch/powerpc/include/asm/compat.h60
-rw-r--r--arch/powerpc/include/asm/debug.h2
-rw-r--r--arch/powerpc/include/asm/eeh.h141
-rw-r--r--arch/powerpc/include/asm/eeh_event.h6
-rw-r--r--arch/powerpc/include/asm/exception-64e.h6
-rw-r--r--arch/powerpc/include/asm/fsl_guts.h2
-rw-r--r--arch/powerpc/include/asm/fsl_ifc.h14
-rw-r--r--arch/powerpc/include/asm/hugetlb.h4
-rw-r--r--arch/powerpc/include/asm/hvcall.h5
-rw-r--r--arch/powerpc/include/asm/hw_breakpoint.h9
-rw-r--r--arch/powerpc/include/asm/kprobes.h15
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h1
-rw-r--r--arch/powerpc/include/asm/kvm_host.h3
-rw-r--r--arch/powerpc/include/asm/machdep.h12
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h169
-rw-r--r--arch/powerpc/include/asm/mmu.h9
-rw-r--r--arch/powerpc/include/asm/mpc52xx.h2
-rw-r--r--arch/powerpc/include/asm/mpic.h19
-rw-r--r--arch/powerpc/include/asm/paca.h3
-rw-r--r--arch/powerpc/include/asm/page_64.h10
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h11
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h1
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64-4k.h4
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64-64k.h2
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h19
-rw-r--r--arch/powerpc/include/asm/pgtable.h10
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h3
-rw-r--r--arch/powerpc/include/asm/ppc-pci.h20
-rw-r--r--arch/powerpc/include/asm/probes.h42
-rw-r--r--arch/powerpc/include/asm/processor.h6
-rw-r--r--arch/powerpc/include/asm/ps3.h2
-rw-r--r--arch/powerpc/include/asm/pte-hash64-64k.h18
-rw-r--r--arch/powerpc/include/asm/reg.h54
-rw-r--r--arch/powerpc/include/asm/setup.h2
-rw-r--r--arch/powerpc/include/asm/siginfo.h1
-rw-r--r--arch/powerpc/include/asm/smp.h2
-rw-r--r--arch/powerpc/include/asm/sparsemem.h4
-rw-r--r--arch/powerpc/include/asm/swiotlb.h6
-rw-r--r--arch/powerpc/include/asm/systbl.h4
-rw-r--r--arch/powerpc/include/asm/thread_info.h7
-rw-r--r--arch/powerpc/include/asm/time.h6
-rw-r--r--arch/powerpc/include/asm/tlbflush.h7
-rw-r--r--arch/powerpc/include/asm/uaccess.h11
-rw-r--r--arch/powerpc/include/asm/ucc_fast.h2
-rw-r--r--arch/powerpc/include/asm/ucc_slow.h2
-rw-r--r--arch/powerpc/include/asm/unistd.h1
-rw-r--r--arch/powerpc/include/asm/uprobes.h54
-rw-r--r--arch/powerpc/include/uapi/asm/Kbuild3
54 files changed, 578 insertions, 290 deletions
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 7e313f1ed183..ace53dbde2cd 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -35,4 +35,5 @@ header-y += types.h
header-y += ucontext.h
header-y += unistd.h
+generic-y += clkdev.h
generic-y += rwsem.h
diff --git a/arch/powerpc/include/asm/abs_addr.h b/arch/powerpc/include/asm/abs_addr.h
deleted file mode 100644
index 9d92ba04b033..000000000000
--- a/arch/powerpc/include/asm/abs_addr.h
+++ /dev/null
@@ -1,56 +0,0 @@
-#ifndef _ASM_POWERPC_ABS_ADDR_H
-#define _ASM_POWERPC_ABS_ADDR_H
-#ifdef __KERNEL__
-
-
-/*
- * c 2001 PPC 64 Team, IBM Corp
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/memblock.h>
-
-#include <asm/types.h>
-#include <asm/page.h>
-#include <asm/prom.h>
-
-struct mschunks_map {
- unsigned long num_chunks;
- unsigned long chunk_size;
- unsigned long chunk_shift;
- unsigned long chunk_mask;
- u32 *mapping;
-};
-
-extern struct mschunks_map mschunks_map;
-
-/* Chunks are 256 KB */
-#define MSCHUNKS_CHUNK_SHIFT (18)
-#define MSCHUNKS_CHUNK_SIZE (1UL << MSCHUNKS_CHUNK_SHIFT)
-#define MSCHUNKS_OFFSET_MASK (MSCHUNKS_CHUNK_SIZE - 1)
-
-static inline unsigned long chunk_to_addr(unsigned long chunk)
-{
- return chunk << MSCHUNKS_CHUNK_SHIFT;
-}
-
-static inline unsigned long addr_to_chunk(unsigned long addr)
-{
- return addr >> MSCHUNKS_CHUNK_SHIFT;
-}
-
-static inline unsigned long phys_to_abs(unsigned long pa)
-{
- return pa;
-}
-
-/* Convenience macros */
-#define virt_to_abs(va) phys_to_abs(__pa(va))
-#define abs_to_virt(aa) __va(aa)
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_ABS_ADDR_H */
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index da29032ae38f..e3b1d41c89be 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -268,6 +268,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
return t;
}
+#define atomic_dec_if_positive atomic_dec_if_positive
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index efdc92618b38..dc2cf9c6d9e6 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -288,6 +288,16 @@ static __inline__ int test_bit_le(unsigned long nr,
return (tmp[nr >> 3] >> (nr & 7)) & 1;
}
+static inline void set_bit_le(int nr, void *addr)
+{
+ set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
+static inline void clear_bit_le(int nr, void *addr)
+{
+ clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
+}
+
static inline void __set_bit_le(int nr, void *addr)
{
__set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index ab9e402518e8..b843e35122e8 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -30,6 +30,8 @@ extern void flush_dcache_page(struct page *page);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+extern void __flush_disable_L1(void);
+
extern void __flush_icache_range(unsigned long, unsigned long);
static inline void flush_icache_range(unsigned long start, unsigned long stop)
{
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
index 88e602f6430d..84fdf6857c31 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -38,6 +38,7 @@ typedef s64 compat_s64;
typedef u32 compat_uint_t;
typedef u32 compat_ulong_t;
typedef u64 compat_u64;
+typedef u32 compat_uptr_t;
struct compat_timespec {
compat_time_t tv_sec;
@@ -114,6 +115,64 @@ typedef u32 compat_old_sigset_t;
typedef u32 compat_sigset_word;
+typedef union compat_sigval {
+ compat_int_t sival_int;
+ compat_uptr_t sival_ptr;
+} compat_sigval_t;
+
+#define SI_PAD_SIZE32 (128/sizeof(int) - 3)
+
+typedef struct compat_siginfo {
+ int si_signo;
+ int si_errno;
+ int si_code;
+
+ union {
+ int _pad[SI_PAD_SIZE32];
+
+ /* kill() */
+ struct {
+ compat_pid_t _pid; /* sender's pid */
+ __compat_uid_t _uid; /* sender's uid */
+ } _kill;
+
+ /* POSIX.1b timers */
+ struct {
+ compat_timer_t _tid; /* timer id */
+ int _overrun; /* overrun count */
+ compat_sigval_t _sigval; /* same as below */
+ int _sys_private; /* not to be passed to user */
+ } _timer;
+
+ /* POSIX.1b signals */
+ struct {
+ compat_pid_t _pid; /* sender's pid */
+ __compat_uid_t _uid; /* sender's uid */
+ compat_sigval_t _sigval;
+ } _rt;
+
+ /* SIGCHLD */
+ struct {
+ compat_pid_t _pid; /* which child */
+ __compat_uid_t _uid; /* sender's uid */
+ int _status; /* exit code */
+ compat_clock_t _utime;
+ compat_clock_t _stime;
+ } _sigchld;
+
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGEMT */
+ struct {
+ unsigned int _addr; /* faulting insn/memory ref. */
+ } _sigfault;
+
+ /* SIGPOLL */
+ struct {
+ int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ int _fd;
+ } _sigpoll;
+ } _sifields;
+} compat_siginfo_t;
+
#define COMPAT_OFF_T_MAX 0x7fffffff
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
@@ -123,7 +182,6 @@ typedef u32 compat_sigset_word;
* as pointers because the syscall entry code will have
* appropriately converted them already.
*/
-typedef u32 compat_uptr_t;
static inline void __user *compat_ptr(compat_uptr_t uptr)
{
diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h
index 716d2f089eb6..32de2577bb6d 100644
--- a/arch/powerpc/include/asm/debug.h
+++ b/arch/powerpc/include/asm/debug.h
@@ -44,7 +44,7 @@ static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; }
static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
#endif
-extern int set_dabr(unsigned long dabr);
+extern int set_dabr(unsigned long dabr, unsigned long dabrx);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
extern void do_send_trap(struct pt_regs *regs, unsigned long address,
unsigned long error_code, int signal_code, int brkpt);
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index d60f99814ffb..b0ef73882b38 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -32,27 +32,62 @@ struct device_node;
#ifdef CONFIG_EEH
/*
+ * The struct is used to trace PE related EEH functionality.
+ * In theory, there will have one instance of the struct to
+ * be created against particular PE. In nature, PEs corelate
+ * to each other. the struct has to reflect that hierarchy in
+ * order to easily pick up those affected PEs when one particular
+ * PE has EEH errors.
+ *
+ * Also, one particular PE might be composed of PCI device, PCI
+ * bus and its subordinate components. The struct also need ship
+ * the information. Further more, one particular PE is only meaingful
+ * in the corresponding PHB. Therefore, the root PEs should be created
+ * against existing PHBs in on-to-one fashion.
+ */
+#define EEH_PE_INVALID (1 << 0) /* Invalid */
+#define EEH_PE_PHB (1 << 1) /* PHB PE */
+#define EEH_PE_DEVICE (1 << 2) /* Device PE */
+#define EEH_PE_BUS (1 << 3) /* Bus PE */
+
+#define EEH_PE_ISOLATED (1 << 0) /* Isolated PE */
+#define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */
+
+struct eeh_pe {
+ int type; /* PE type: PHB/Bus/Device */
+ int state; /* PE EEH dependent mode */
+ int config_addr; /* Traditional PCI address */
+ int addr; /* PE configuration address */
+ struct pci_controller *phb; /* Associated PHB */
+ int check_count; /* Times of ignored error */
+ int freeze_count; /* Times of froze up */
+ int false_positives; /* Times of reported #ff's */
+ struct eeh_pe *parent; /* Parent PE */
+ struct list_head child_list; /* Link PE to the child list */
+ struct list_head edevs; /* Link list of EEH devices */
+ struct list_head child; /* Child PEs */
+};
+
+#define eeh_pe_for_each_dev(pe, edev) \
+ list_for_each_entry(edev, &pe->edevs, list)
+
+/*
* The struct is used to trace EEH state for the associated
* PCI device node or PCI device. In future, it might
* represent PE as well so that the EEH device to form
* another tree except the currently existing tree of PCI
* buses and PCI devices
*/
-#define EEH_MODE_SUPPORTED (1<<0) /* EEH supported on the device */
-#define EEH_MODE_NOCHECK (1<<1) /* EEH check should be skipped */
-#define EEH_MODE_ISOLATED (1<<2) /* The device has been isolated */
-#define EEH_MODE_RECOVERING (1<<3) /* Recovering the device */
-#define EEH_MODE_IRQ_DISABLED (1<<4) /* Interrupt disabled */
+#define EEH_DEV_IRQ_DISABLED (1<<0) /* Interrupt disabled */
struct eeh_dev {
int mode; /* EEH mode */
int class_code; /* Class code of the device */
int config_addr; /* Config address */
int pe_config_addr; /* PE config address */
- int check_count; /* Times of ignored error */
- int freeze_count; /* Times of froze up */
- int false_positives; /* Times of reported #ff's */
u32 config_space[16]; /* Saved PCI config space */
+ struct eeh_pe *pe; /* Associated PE */
+ struct list_head list; /* Form link list in the PE */
struct pci_controller *phb; /* Associated PHB */
struct device_node *dn; /* Associated device node */
struct pci_dev *pdev; /* Associated PCI device */
@@ -95,19 +130,51 @@ static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev)
struct eeh_ops {
char *name;
int (*init)(void);
- int (*set_option)(struct device_node *dn, int option);
- int (*get_pe_addr)(struct device_node *dn);
- int (*get_state)(struct device_node *dn, int *state);
- int (*reset)(struct device_node *dn, int option);
- int (*wait_state)(struct device_node *dn, int max_wait);
- int (*get_log)(struct device_node *dn, int severity, char *drv_log, unsigned long len);
- int (*configure_bridge)(struct device_node *dn);
+ void* (*of_probe)(struct device_node *dn, void *flag);
+ void* (*dev_probe)(struct pci_dev *dev, void *flag);
+ int (*set_option)(struct eeh_pe *pe, int option);
+ int (*get_pe_addr)(struct eeh_pe *pe);
+ int (*get_state)(struct eeh_pe *pe, int *state);
+ int (*reset)(struct eeh_pe *pe, int option);
+ int (*wait_state)(struct eeh_pe *pe, int max_wait);
+ int (*get_log)(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len);
+ int (*configure_bridge)(struct eeh_pe *pe);
int (*read_config)(struct device_node *dn, int where, int size, u32 *val);
int (*write_config)(struct device_node *dn, int where, int size, u32 val);
};
extern struct eeh_ops *eeh_ops;
extern int eeh_subsystem_enabled;
+extern struct mutex eeh_mutex;
+extern int eeh_probe_mode;
+
+#define EEH_PROBE_MODE_DEV (1<<0) /* From PCI device */
+#define EEH_PROBE_MODE_DEVTREE (1<<1) /* From device tree */
+
+static inline void eeh_probe_mode_set(int flag)
+{
+ eeh_probe_mode = flag;
+}
+
+static inline int eeh_probe_mode_devtree(void)
+{
+ return (eeh_probe_mode == EEH_PROBE_MODE_DEVTREE);
+}
+
+static inline int eeh_probe_mode_dev(void)
+{
+ return (eeh_probe_mode == EEH_PROBE_MODE_DEV);
+}
+
+static inline void eeh_lock(void)
+{
+ mutex_lock(&eeh_mutex);
+}
+
+static inline void eeh_unlock(void)
+{
+ mutex_unlock(&eeh_mutex);
+}
/*
* Max number of EEH freezes allowed before we consider the device
@@ -115,22 +182,26 @@ extern int eeh_subsystem_enabled;
*/
#define EEH_MAX_ALLOWED_FREEZES 5
+typedef void *(*eeh_traverse_func)(void *data, void *flag);
+int __devinit eeh_phb_pe_create(struct pci_controller *phb);
+int eeh_add_to_parent_pe(struct eeh_dev *edev);
+int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe);
+void *eeh_pe_dev_traverse(struct eeh_pe *root,
+ eeh_traverse_func fn, void *flag);
+void eeh_pe_restore_bars(struct eeh_pe *pe);
+struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe);
+
void * __devinit eeh_dev_init(struct device_node *dn, void *data);
void __devinit eeh_dev_phb_init_dynamic(struct pci_controller *phb);
-void __init eeh_dev_phb_init(void);
-void __init eeh_init(void);
-#ifdef CONFIG_PPC_PSERIES
-int __init eeh_pseries_init(void);
-#endif
int __init eeh_ops_register(struct eeh_ops *ops);
int __exit eeh_ops_unregister(const char *name);
unsigned long eeh_check_failure(const volatile void __iomem *token,
unsigned long val);
-int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev);
-void __init pci_addr_cache_build(void);
+int eeh_dev_check_failure(struct eeh_dev *edev);
+void __init eeh_addr_cache_build(void);
void eeh_add_device_tree_early(struct device_node *);
void eeh_add_device_tree_late(struct pci_bus *);
-void eeh_remove_bus_device(struct pci_dev *);
+void eeh_remove_bus_device(struct pci_dev *, int);
/**
* EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
@@ -156,34 +227,24 @@ static inline void *eeh_dev_init(struct device_node *dn, void *data)
static inline void eeh_dev_phb_init_dynamic(struct pci_controller *phb) { }
-static inline void eeh_dev_phb_init(void) { }
-
-static inline void eeh_init(void) { }
-
-#ifdef CONFIG_PPC_PSERIES
-static inline int eeh_pseries_init(void)
-{
- return 0;
-}
-#endif /* CONFIG_PPC_PSERIES */
-
static inline unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val)
{
return val;
}
-static inline int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
-{
- return 0;
-}
+#define eeh_dev_check_failure(x) (0)
-static inline void pci_addr_cache_build(void) { }
+static inline void eeh_addr_cache_build(void) { }
static inline void eeh_add_device_tree_early(struct device_node *dn) { }
static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
-static inline void eeh_remove_bus_device(struct pci_dev *dev) { }
+static inline void eeh_remove_bus_device(struct pci_dev *dev, int purge_pe) { }
+
+static inline void eeh_lock(void) { }
+static inline void eeh_unlock(void) { }
+
#define EEH_POSSIBLE_ERROR(val, type) (0)
#define EEH_IO_ERROR_VALUE(size) (-1UL)
#endif /* CONFIG_EEH */
diff --git a/arch/powerpc/include/asm/eeh_event.h b/arch/powerpc/include/asm/eeh_event.h
index c68b012b7797..de67d830151b 100644
--- a/arch/powerpc/include/asm/eeh_event.h
+++ b/arch/powerpc/include/asm/eeh_event.h
@@ -28,11 +28,11 @@
*/
struct eeh_event {
struct list_head list; /* to form event queue */
- struct eeh_dev *edev; /* EEH device */
+ struct eeh_pe *pe; /* EEH PE */
};
-int eeh_send_failure_event(struct eeh_dev *edev);
-struct eeh_dev *handle_eeh_events(struct eeh_event *);
+int eeh_send_failure_event(struct eeh_pe *pe);
+void eeh_handle_event(struct eeh_pe *pe);
#endif /* __KERNEL__ */
#endif /* ASM_POWERPC_EEH_EVENT_H */
diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h
index ac13addb8495..51fa43e536b9 100644
--- a/arch/powerpc/include/asm/exception-64e.h
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -37,6 +37,7 @@
* critical data
*/
+#define PACA_EXGDBELL PACA_EXGEN
/* We are out of SPRGs so we save some things in the PACA. The normal
* exception frame is smaller than the CRIT or MC one though
@@ -45,8 +46,9 @@
#define EX_CR (1 * 8)
#define EX_R10 (2 * 8)
#define EX_R11 (3 * 8)
-#define EX_R14 (4 * 8)
-#define EX_R15 (5 * 8)
+#define EX_R13 (4 * 8)
+#define EX_R14 (5 * 8)
+#define EX_R15 (6 * 8)
/*
* The TLB miss exception uses different slots.
diff --git a/arch/powerpc/include/asm/fsl_guts.h b/arch/powerpc/include/asm/fsl_guts.h
index aa4c488589ce..dd5ba2c22771 100644
--- a/arch/powerpc/include/asm/fsl_guts.h
+++ b/arch/powerpc/include/asm/fsl_guts.h
@@ -48,6 +48,8 @@ struct ccsr_guts {
__be32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */
u8 res06c[0x70 - 0x6c];
__be32 devdisr; /* 0x.0070 - Device Disable Control */
+#define CCSR_GUTS_DEVDISR_TB1 0x00001000
+#define CCSR_GUTS_DEVDISR_TB0 0x00004000
__be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */
u8 res078[0x7c - 0x78];
__be32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */
diff --git a/arch/powerpc/include/asm/fsl_ifc.h b/arch/powerpc/include/asm/fsl_ifc.h
index b955012939a2..b8a4b9bc50b3 100644
--- a/arch/powerpc/include/asm/fsl_ifc.h
+++ b/arch/powerpc/include/asm/fsl_ifc.h
@@ -768,22 +768,24 @@ struct fsl_ifc_gpcm {
*/
struct fsl_ifc_regs {
__be32 ifc_rev;
- u32 res1[0x3];
+ u32 res1[0x2];
struct {
+ __be32 cspr_ext;
__be32 cspr;
- u32 res2[0x2];
+ u32 res2;
} cspr_cs[FSL_IFC_BANK_COUNT];
- u32 res3[0x18];
+ u32 res3[0x19];
struct {
__be32 amask;
u32 res4[0x2];
} amask_cs[FSL_IFC_BANK_COUNT];
- u32 res5[0x18];
+ u32 res5[0x17];
struct {
+ __be32 csor_ext;
__be32 csor;
- u32 res6[0x2];
+ u32 res6;
} csor_cs[FSL_IFC_BANK_COUNT];
- u32 res7[0x18];
+ u32 res7[0x19];
struct {
__be32 ftim[4];
u32 res8[0x8];
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index dfdb95bc59a5..62e11a32c4c2 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -151,6 +151,10 @@ static inline void arch_release_hugepage(struct page *page)
{
}
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+}
+
#else /* ! CONFIG_HUGETLB_PAGE */
static inline void flush_hugetlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 423cf9eaf4a4..7a867065db79 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -152,11 +152,6 @@
#define H_VASI_RESUMED 5
#define H_VASI_COMPLETED 6
-/* DABRX flags */
-#define H_DABRX_HYPERVISOR (1UL<<(63-61))
-#define H_DABRX_KERNEL (1UL<<(63-62))
-#define H_DABRX_USER (1UL<<(63-63))
-
/* Each control block has to be on a 4K boundary */
#define H_CB_ALIGNMENT 4096
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
index be04330af751..423424599dad 100644
--- a/arch/powerpc/include/asm/hw_breakpoint.h
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
@@ -27,10 +27,11 @@
#ifdef CONFIG_HAVE_HW_BREAKPOINT
struct arch_hw_breakpoint {
- bool extraneous_interrupt;
- u8 len; /* length of the target data symbol */
- int type;
unsigned long address;
+ unsigned long dabrx;
+ int type;
+ u8 len; /* length of the target data symbol */
+ bool extraneous_interrupt;
};
#include <linux/kdebug.h>
@@ -61,7 +62,7 @@ extern void ptrace_triggered(struct perf_event *bp,
struct perf_sample_data *data, struct pt_regs *regs);
static inline void hw_breakpoint_disable(void)
{
- set_dabr(0);
+ set_dabr(0, 0);
}
extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs);
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
index be0171afdc0f..7b6feab6fd26 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -29,21 +29,16 @@
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/percpu.h>
+#include <asm/probes.h>
#define __ARCH_WANT_KPROBES_INSN_SLOT
struct pt_regs;
struct kprobe;
-typedef unsigned int kprobe_opcode_t;
-#define BREAKPOINT_INSTRUCTION 0x7fe00008 /* trap */
+typedef ppc_opcode_t kprobe_opcode_t;
#define MAX_INSN_SIZE 1
-#define IS_TW(instr) (((instr) & 0xfc0007fe) == 0x7c000008)
-#define IS_TD(instr) (((instr) & 0xfc0007fe) == 0x7c000088)
-#define IS_TDI(instr) (((instr) & 0xfc000000) == 0x08000000)
-#define IS_TWI(instr) (((instr) & 0xfc000000) == 0x0c000000)
-
#ifdef CONFIG_PPC64
/*
* 64bit powerpc uses function descriptors.
@@ -72,12 +67,6 @@ typedef unsigned int kprobe_opcode_t;
addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name); \
} \
}
-
-#define is_trap(instr) (IS_TW(instr) || IS_TD(instr) || \
- IS_TWI(instr) || IS_TDI(instr))
-#else
-/* Use stock kprobe_lookup_name since ppc32 doesn't use function descriptors */
-#define is_trap(instr) (IS_TW(instr) || IS_TWI(instr))
#endif
#define flush_insn_slot(p) do { } while (0)
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index f0e0c6a66d97..7aefdb3e1ce4 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -59,7 +59,7 @@ struct hpte_cache {
struct hlist_node list_vpte;
struct hlist_node list_vpte_long;
struct rcu_head rcu_head;
- u64 host_va;
+ u64 host_vpn;
u64 pfn;
ulong slot;
struct kvmppc_pte pte;
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index bfcd00c1485d..88609b23b775 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -74,7 +74,6 @@ struct kvmppc_host_state {
ulong vmhandler;
ulong scratch0;
ulong scratch1;
- ulong sprg3;
u8 in_guest;
u8 restore_hid5;
u8 napping;
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index a8bf5c673a3c..28e8f5e5c63e 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -53,6 +53,8 @@
struct kvm;
extern int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
+extern int kvm_unmap_hva_range(struct kvm *kvm,
+ unsigned long start, unsigned long end);
extern int kvm_age_hva(struct kvm *kvm, unsigned long hva);
extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
@@ -220,6 +222,7 @@ struct revmap_entry {
#define KVMPPC_GOT_PAGE 0x80
struct kvm_arch_memory_slot {
+ unsigned long *rmap;
};
struct kvm_arch {
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 42ce570812c1..c4231973edd3 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -34,19 +34,19 @@ struct machdep_calls {
char *name;
#ifdef CONFIG_PPC64
void (*hpte_invalidate)(unsigned long slot,
- unsigned long va,
+ unsigned long vpn,
int psize, int ssize,
int local);
long (*hpte_updatepp)(unsigned long slot,
unsigned long newpp,
- unsigned long va,
+ unsigned long vpn,
int psize, int ssize,
int local);
void (*hpte_updateboltedpp)(unsigned long newpp,
unsigned long ea,
int psize, int ssize);
long (*hpte_insert)(unsigned long hpte_group,
- unsigned long va,
+ unsigned long vpn,
unsigned long prpn,
unsigned long rflags,
unsigned long vflags,
@@ -180,7 +180,8 @@ struct machdep_calls {
void (*enable_pmcs)(void);
/* Set DABR for this platform, leave empty for default implemenation */
- int (*set_dabr)(unsigned long dabr);
+ int (*set_dabr)(unsigned long dabr,
+ unsigned long dabrx);
#ifdef CONFIG_PPC32 /* XXX for now */
/* A general init function, called by ppc_init in init/main.c.
@@ -214,6 +215,9 @@ struct machdep_calls {
/* Called after scan and before resource survey */
void (*pcibios_fixup_phb)(struct pci_controller *hose);
+ /* Called during PCI resource reassignment */
+ resource_size_t (*pcibios_window_alignment)(struct pci_bus *, unsigned long type);
+
/* Called to shutdown machine specific hardware not already controlled
* by other drivers.
*/
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 1c65a59881ea..9673f73eb8db 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -16,6 +16,13 @@
#include <asm/page.h>
/*
+ * This is necessary to get the definition of PGTABLE_RANGE which we
+ * need for various slices related matters. Note that this isn't the
+ * complete pgtable.h but only a portion of it.
+ */
+#include <asm/pgtable-ppc64.h>
+
+/*
* Segment table
*/
@@ -154,9 +161,25 @@ struct mmu_psize_def
#define MMU_SEGSIZE_256M 0
#define MMU_SEGSIZE_1T 1
+/*
+ * encode page number shift.
+ * in order to fit the 78 bit va in a 64 bit variable we shift the va by
+ * 12 bits. This enable us to address upto 76 bit va.
+ * For hpt hash from a va we can ignore the page size bits of va and for
+ * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure
+ * we work in all cases including 4k page size.
+ */
+#define VPN_SHIFT 12
#ifndef __ASSEMBLY__
+static inline int segment_shift(int ssize)
+{
+ if (ssize == MMU_SEGSIZE_256M)
+ return SID_SHIFT;
+ return SID_SHIFT_1T;
+}
+
/*
* The current system page and segment sizes
*/
@@ -180,18 +203,39 @@ extern unsigned long tce_alloc_start, tce_alloc_end;
extern int mmu_ci_restrictions;
/*
+ * This computes the AVPN and B fields of the first dword of a HPTE,
+ * for use when we want to match an existing PTE. The bottom 7 bits
+ * of the returned value are zero.
+ */
+static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
+ int ssize)
+{
+ unsigned long v;
+ /*
+ * The AVA field omits the low-order 23 bits of the 78 bits VA.
+ * These bits are not needed in the PTE, because the
+ * low-order b of these bits are part of the byte offset
+ * into the virtual page and, if b < 23, the high-order
+ * 23-b of these bits are always used in selecting the
+ * PTEGs to be searched
+ */
+ v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
+ v <<= HPTE_V_AVPN_SHIFT;
+ v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
+ return v;
+}
+
+/*
* This function sets the AVPN and L fields of the HPTE appropriately
* for the page size
*/
-static inline unsigned long hpte_encode_v(unsigned long va, int psize,
- int ssize)
+static inline unsigned long hpte_encode_v(unsigned long vpn,
+ int psize, int ssize)
{
unsigned long v;
- v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
- v <<= HPTE_V_AVPN_SHIFT;
+ v = hpte_encode_avpn(vpn, psize, ssize);
if (psize != MMU_PAGE_4K)
v |= HPTE_V_LARGE;
- v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
return v;
}
@@ -216,30 +260,37 @@ static inline unsigned long hpte_encode_r(unsigned long pa, int psize)
}
/*
- * Build a VA given VSID, EA and segment size
+ * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size.
*/
-static inline unsigned long hpt_va(unsigned long ea, unsigned long vsid,
- int ssize)
+static inline unsigned long hpt_vpn(unsigned long ea,
+ unsigned long vsid, int ssize)
{
- if (ssize == MMU_SEGSIZE_256M)
- return (vsid << 28) | (ea & 0xfffffffUL);
- return (vsid << 40) | (ea & 0xffffffffffUL);
+ unsigned long mask;
+ int s_shift = segment_shift(ssize);
+
+ mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
+ return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
}
/*
* This hashes a virtual address
*/
-
-static inline unsigned long hpt_hash(unsigned long va, unsigned int shift,
- int ssize)
+static inline unsigned long hpt_hash(unsigned long vpn,
+ unsigned int shift, int ssize)
{
+ int mask;
unsigned long hash, vsid;
+ /* VPN_SHIFT can be atmost 12 */
if (ssize == MMU_SEGSIZE_256M) {
- hash = (va >> 28) ^ ((va & 0x0fffffffUL) >> shift);
+ mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
+ hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
+ ((vpn & mask) >> (shift - VPN_SHIFT));
} else {
- vsid = va >> 40;
- hash = vsid ^ (vsid << 25) ^ ((va & 0xffffffffffUL) >> shift);
+ mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
+ vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
+ hash = vsid ^ (vsid << 25) ^
+ ((vpn & mask) >> (shift - VPN_SHIFT)) ;
}
return hash & 0x7fffffffffUL;
}
@@ -280,63 +331,61 @@ extern void slb_set_size(u16 size);
#endif /* __ASSEMBLY__ */
/*
- * VSID allocation
+ * VSID allocation (256MB segment)
+ *
+ * We first generate a 38-bit "proto-VSID". For kernel addresses this
+ * is equal to the ESID | 1 << 37, for user addresses it is:
+ * (context << USER_ESID_BITS) | (esid & ((1U << USER_ESID_BITS) - 1)
*
- * We first generate a 36-bit "proto-VSID". For kernel addresses this
- * is equal to the ESID, for user addresses it is:
- * (context << 15) | (esid & 0x7fff)
+ * This splits the proto-VSID into the below range
+ * 0 - (2^(CONTEXT_BITS + USER_ESID_BITS) - 1) : User proto-VSID range
+ * 2^(CONTEXT_BITS + USER_ESID_BITS) - 2^(VSID_BITS) : Kernel proto-VSID range
*
- * The two forms are distinguishable because the top bit is 0 for user
- * addresses, whereas the top two bits are 1 for kernel addresses.
- * Proto-VSIDs with the top two bits equal to 0b10 are reserved for
- * now.
+ * We also have CONTEXT_BITS + USER_ESID_BITS = VSID_BITS - 1
+ * That is, we assign half of the space to user processes and half
+ * to the kernel.
*
* The proto-VSIDs are then scrambled into real VSIDs with the
* multiplicative hash:
*
* VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
- * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7
- * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF
*
- * This scramble is only well defined for proto-VSIDs below
- * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are
- * reserved. VSID_MULTIPLIER is prime, so in particular it is
+ * VSID_MULTIPLIER is prime, so in particular it is
* co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
* Because the modulus is 2^n-1 we can compute it efficiently without
* a divide or extra multiply (see below).
*
* This scheme has several advantages over older methods:
*
- * - We have VSIDs allocated for every kernel address
+ * - We have VSIDs allocated for every kernel address
* (i.e. everything above 0xC000000000000000), except the very top
* segment, which simplifies several things.
*
- * - We allow for 16 significant bits of ESID and 19 bits of
- * context for user addresses. i.e. 16T (44 bits) of address space for
- * up to half a million contexts.
+ * - We allow for USER_ESID_BITS significant bits of ESID and
+ * CONTEXT_BITS bits of context for user addresses.
+ * i.e. 64T (46 bits) of address space for up to half a million contexts.
*
- * - The scramble function gives robust scattering in the hash
+ * - The scramble function gives robust scattering in the hash
* table (at least based on some initial results). The previous
* method was more susceptible to pathological cases giving excessive
* hash collisions.
*/
+
/*
- * WARNING - If you change these you must make sure the asm
- * implementations in slb_allocate (slb_low.S), do_stab_bolted
- * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly.
+ * This should be computed such that protovosid * vsid_mulitplier
+ * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
*/
-
-#define VSID_MULTIPLIER_256M ASM_CONST(200730139) /* 28-bit prime */
-#define VSID_BITS_256M 36
+#define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */
+#define VSID_BITS_256M 38
#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
-#define VSID_BITS_1T 24
+#define VSID_BITS_1T 26
#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
#define CONTEXT_BITS 19
-#define USER_ESID_BITS 16
-#define USER_ESID_BITS_1T 4
+#define USER_ESID_BITS 18
+#define USER_ESID_BITS_1T 6
#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
@@ -372,6 +421,8 @@ extern void slb_set_size(u16 size);
srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \
add rt,rt,rx
+/* 4 bits per slice and we have one slice per 1TB */
+#define SLICE_ARRAY_SIZE (PGTABLE_RANGE >> 41)
#ifndef __ASSEMBLY__
@@ -416,7 +467,7 @@ typedef struct {
#ifdef CONFIG_PPC_MM_SLICES
u64 low_slices_psize; /* SLB page size encodings */
- u64 high_slices_psize; /* 4 bits per slice for now */
+ unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
#else
u16 sllp; /* SLB page size encoding */
#endif
@@ -452,12 +503,32 @@ typedef struct {
})
#endif /* 1 */
-/* This is only valid for addresses >= PAGE_OFFSET */
+/*
+ * This is only valid for addresses >= PAGE_OFFSET
+ * The proto-VSID space is divided into two class
+ * User: 0 to 2^(CONTEXT_BITS + USER_ESID_BITS) -1
+ * kernel: 2^(CONTEXT_BITS + USER_ESID_BITS) to 2^(VSID_BITS) - 1
+ *
+ * With KERNEL_START at 0xc000000000000000, the proto vsid for
+ * the kernel ends up with 0xc00000000 (36 bits). With 64TB
+ * support we need to have kernel proto-VSID in the
+ * [2^37 to 2^38 - 1] range due to the increased USER_ESID_BITS.
+ */
static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
{
- if (ssize == MMU_SEGSIZE_256M)
- return vsid_scramble(ea >> SID_SHIFT, 256M);
- return vsid_scramble(ea >> SID_SHIFT_1T, 1T);
+ unsigned long proto_vsid;
+ /*
+ * We need to make sure proto_vsid for the kernel is
+ * >= 2^(CONTEXT_BITS + USER_ESID_BITS[_1T])
+ */
+ if (ssize == MMU_SEGSIZE_256M) {
+ proto_vsid = ea >> SID_SHIFT;
+ proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS));
+ return vsid_scramble(proto_vsid, 256M);
+ }
+ proto_vsid = ea >> SID_SHIFT_1T;
+ proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS_1T));
+ return vsid_scramble(proto_vsid, 1T);
}
/* Returns the segment size indicator for a user address */
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index e8a26db2e8f3..5e38eedea218 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -146,6 +146,15 @@ extern void setup_initial_memory_limit(phys_addr_t first_memblock_base,
extern u64 ppc64_rma_size;
#endif /* CONFIG_PPC64 */
+struct mm_struct;
+#ifdef CONFIG_DEBUG_VM
+extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr);
+#else /* CONFIG_DEBUG_VM */
+static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
+{
+}
+#endif /* !CONFIG_DEBUG_VM */
+
#endif /* !__ASSEMBLY__ */
/* The kernel use the constants below to index in the page sizes array.
diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h
index 1f41382eda38..0acc7c7c28d1 100644
--- a/arch/powerpc/include/asm/mpc52xx.h
+++ b/arch/powerpc/include/asm/mpc52xx.h
@@ -307,6 +307,7 @@ struct mpc52xx_lpbfifo_request {
size_t size;
size_t pos; /* current position of transfer */
int flags;
+ int defer_xfer_start;
/* What to do when finished */
void (*callback)(struct mpc52xx_lpbfifo_request *);
@@ -323,6 +324,7 @@ struct mpc52xx_lpbfifo_request {
extern int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req);
extern void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req);
extern void mpc52xx_lpbfifo_poll(void);
+extern int mpc52xx_lpbfifo_start_xfer(struct mpc52xx_lpbfifo_request *req);
/* mpc52xx_pic.c */
extern void mpc52xx_init_irq(void);
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index c9f698a994be..c0f9ef90f0b8 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -63,6 +63,7 @@
*/
#define MPIC_TIMER_BASE 0x01100
#define MPIC_TIMER_STRIDE 0x40
+#define MPIC_TIMER_GROUP_STRIDE 0x1000
#define MPIC_TIMER_CURRENT_CNT 0x00000
#define MPIC_TIMER_BASE_CNT 0x00010
@@ -110,10 +111,16 @@
#define MPIC_VECPRI_SENSE_MASK 0x00400000
#define MPIC_IRQ_DESTINATION 0x00010
+#define MPIC_FSL_BRR1 0x00000
+#define MPIC_FSL_BRR1_VER 0x0000ffff
+
#define MPIC_MAX_IRQ_SOURCES 2048
#define MPIC_MAX_CPUS 32
#define MPIC_MAX_ISU 32
+#define MPIC_MAX_ERR 32
+#define MPIC_FSL_ERR_INT 16
+
/*
* Tsi108 implementation of MPIC has many differences from the original one
*/
@@ -266,6 +273,7 @@ struct mpic
struct irq_chip hc_ipi;
#endif
struct irq_chip hc_tm;
+ struct irq_chip hc_err;
const char *name;
/* Flags */
unsigned int flags;
@@ -279,6 +287,8 @@ struct mpic
/* vector numbers used for internal sources (ipi/timers) */
unsigned int ipi_vecs[4];
unsigned int timer_vecs[8];
+ /* vector numbers used for FSL MPIC error interrupts */
+ unsigned int err_int_vecs[MPIC_MAX_ERR];
/* Spurious vector to program into unused sources */
unsigned int spurious_vec;
@@ -296,11 +306,15 @@ struct mpic
phys_addr_t paddr;
/* The various ioremap'ed bases */
+ struct mpic_reg_bank thiscpuregs;
struct mpic_reg_bank gregs;
struct mpic_reg_bank tmregs;
struct mpic_reg_bank cpuregs[MPIC_MAX_CPUS];
struct mpic_reg_bank isus[MPIC_MAX_ISU];
+ /* ioremap'ed base for error interrupt registers */
+ u32 __iomem *err_regs;
+
/* Protected sources */
unsigned long *protected;
@@ -365,6 +379,11 @@ struct mpic
#define MPIC_NO_RESET 0x00004000
/* Freescale MPIC (compatible includes "fsl,mpic") */
#define MPIC_FSL 0x00008000
+/* Freescale MPIC supports EIMR (error interrupt mask register).
+ * This flag is set for MPIC version >= 4.1 (version determined
+ * from the BRR1 register).
+*/
+#define MPIC_FSL_HAS_EIMR 0x00010000
/* MPIC HW modification ID */
#define MPIC_REGSET_MASK 0xf0000000
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index daf813fea91f..e9e7a6999bb8 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -100,7 +100,7 @@ struct paca_struct {
/* SLB related definitions */
u16 vmalloc_sllp;
u16 slb_cache_ptr;
- u16 slb_cache[SLB_CACHE_ENTRIES];
+ u32 slb_cache[SLB_CACHE_ENTRIES];
#endif /* CONFIG_PPC_STD_MMU_64 */
#ifdef CONFIG_PPC_BOOK3E
@@ -136,6 +136,7 @@ struct paca_struct {
u8 io_sync; /* writel() needs spin_unlock sync */
u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
u8 nap_state_lost; /* NV GPR values lost in power7_idle */
+ u64 sprg3; /* Saved user-visible sprg */
#ifdef CONFIG_PPC_POWERNV
/* Pointer to OPAL machine check event structure set by the
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
index fed85e6290e1..cd915d6b093d 100644
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -78,11 +78,19 @@ extern u64 ppc64_pft_size;
#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT)
+/*
+ * 1 bit per slice and we have one slice per 1TB
+ * Right now we support only 64TB.
+ * IF we change this we will have to change the type
+ * of high_slices
+ */
+#define SLICE_MASK_SIZE 8
+
#ifndef __ASSEMBLY__
struct slice_mask {
u16 low_slices;
- u16 high_slices;
+ u64 high_slices;
};
struct mm_struct;
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 8cccbee61519..025a130729bc 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -182,14 +182,25 @@ static inline int pci_device_from_OF_node(struct device_node *np,
#if defined(CONFIG_EEH)
static inline struct eeh_dev *of_node_to_eeh_dev(struct device_node *dn)
{
+ /*
+ * For those OF nodes whose parent isn't PCI bridge, they
+ * don't have PCI_DN actually. So we have to skip them for
+ * any EEH operations.
+ */
+ if (!dn || !PCI_DN(dn))
+ return NULL;
+
return PCI_DN(dn)->edev;
}
+#else
+#define of_node_to_eeh_dev(x) (NULL)
#endif
/** Find the bus corresponding to the indicated device node */
extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn);
/** Remove all of the PCI devices under this bus */
+extern void __pcibios_remove_pci_devices(struct pci_bus *bus, int purge_pe);
extern void pcibios_remove_pci_devices(struct pci_bus *bus);
/** Discover new pci devices under this bus, and add them */
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 078019b5b353..9710be3a2d17 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -49,6 +49,7 @@ struct power_pmu {
#define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */
#define PPMU_NO_SIPR 4 /* no SIPR/HV in MMCRA at all */
#define PPMU_NO_CONT_SAMPLING 8 /* no continuous sampling */
+#define PPMU_SIAR_VALID 16 /* Processor has SIAR Valid bit */
/*
* Values for flags to get_alternatives()
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-4k.h b/arch/powerpc/include/asm/pgtable-ppc64-4k.h
index 6eefdcffa359..12798c9d4b4b 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64-4k.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64-4k.h
@@ -7,7 +7,7 @@
*/
#define PTE_INDEX_SIZE 9
#define PMD_INDEX_SIZE 7
-#define PUD_INDEX_SIZE 7
+#define PUD_INDEX_SIZE 9
#define PGD_INDEX_SIZE 9
#ifndef __ASSEMBLY__
@@ -19,7 +19,7 @@
#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
-#define PTRS_PER_PUD (1 << PMD_INDEX_SIZE)
+#define PTRS_PER_PUD (1 << PUD_INDEX_SIZE)
#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
/* PMD_SHIFT determines what a second-level page table entry can map */
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-64k.h b/arch/powerpc/include/asm/pgtable-ppc64-64k.h
index 90533ddcd703..be4e2878fbc0 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64-64k.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64-64k.h
@@ -7,7 +7,7 @@
#define PTE_INDEX_SIZE 12
#define PMD_INDEX_SIZE 12
#define PUD_INDEX_SIZE 0
-#define PGD_INDEX_SIZE 4
+#define PGD_INDEX_SIZE 6
#ifndef __ASSEMBLY__
#define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE)
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index c4205616dfb5..0182c203e411 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -21,17 +21,6 @@
#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
-/* Some sanity checking */
-#if TASK_SIZE_USER64 > PGTABLE_RANGE
-#error TASK_SIZE_USER64 exceeds pagetable range
-#endif
-
-#ifdef CONFIG_PPC_STD_MMU_64
-#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
-#error TASK_SIZE_USER64 exceeds user VSID range
-#endif
-#endif
-
/*
* Define the address range of the kernel non-linear virtual area
*/
@@ -41,7 +30,7 @@
#else
#define KERN_VIRT_START ASM_CONST(0xD000000000000000)
#endif
-#define KERN_VIRT_SIZE PGTABLE_RANGE
+#define KERN_VIRT_SIZE ASM_CONST(0x0000100000000000)
/*
* The vmalloc space starts at the beginning of that region, and
@@ -117,9 +106,6 @@
#ifndef __ASSEMBLY__
-#include <linux/stddef.h>
-#include <asm/tlbflush.h>
-
/*
* This is the default implementation of various PTE accessors, it's
* used in all cases except Book3S with 64K pages where we have a
@@ -198,7 +184,8 @@
/* to find an entry in a kernel page-table-directory */
/* This now only contains the vmalloc pages */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
+extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned long pte, int huge);
/* Atomic PTE updates */
static inline unsigned long pte_update(struct mm_struct *mm,
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 2e0e4110f7ae..a9cbd3ba5c33 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -9,14 +9,6 @@
struct mm_struct;
-#ifdef CONFIG_DEBUG_VM
-extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr);
-#else /* CONFIG_DEBUG_VM */
-static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
-{
-}
-#endif /* !CONFIG_DEBUG_VM */
-
#endif /* !__ASSEMBLY__ */
#if defined(CONFIG_PPC64)
@@ -27,6 +19,8 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
#ifndef __ASSEMBLY__
+#include <asm/tlbflush.h>
+
/* Generic accessors to PTE bits */
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 4c25319f2fbc..5f73ce63fcae 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -126,6 +126,7 @@
#define PPC_INST_TLBIVAX 0x7c000624
#define PPC_INST_TLBSRX_DOT 0x7c0006a5
#define PPC_INST_XXLOR 0xf0000510
+#define PPC_INST_XVCPSGNDP 0xf0000780
#define PPC_INST_NAP 0x4c000364
#define PPC_INST_SLEEP 0x4c0003a4
@@ -277,6 +278,8 @@
VSX_XX1((s), a, b))
#define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \
VSX_XX3((t), a, b))
+#define XVCPSGNDP(t, a, b) stringify_in_c(.long (PPC_INST_XVCPSGNDP | \
+ VSX_XX3((t), (a), (b))))
#define PPC_NAP stringify_in_c(.long PPC_INST_NAP)
#define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP)
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
index 80fa704d410f..ed57fa7920c8 100644
--- a/arch/powerpc/include/asm/ppc-pci.h
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -47,19 +47,17 @@ extern int rtas_setup_phb(struct pci_controller *phb);
#ifdef CONFIG_EEH
-void pci_addr_cache_build(void);
-void pci_addr_cache_insert_device(struct pci_dev *dev);
-void pci_addr_cache_remove_device(struct pci_dev *dev);
-struct pci_dev *pci_addr_cache_get_device(unsigned long addr);
-void eeh_slot_error_detail(struct eeh_dev *edev, int severity);
-int eeh_pci_enable(struct eeh_dev *edev, int function);
-int eeh_reset_pe(struct eeh_dev *);
-void eeh_restore_bars(struct eeh_dev *);
+void eeh_addr_cache_insert_dev(struct pci_dev *dev);
+void eeh_addr_cache_rmv_dev(struct pci_dev *dev);
+struct eeh_dev *eeh_addr_cache_get_dev(unsigned long addr);
+void eeh_slot_error_detail(struct eeh_pe *pe, int severity);
+int eeh_pci_enable(struct eeh_pe *pe, int function);
+int eeh_reset_pe(struct eeh_pe *);
+void eeh_save_bars(struct eeh_dev *edev);
int rtas_write_config(struct pci_dn *, int where, int size, u32 val);
int rtas_read_config(struct pci_dn *, int where, int size, u32 *val);
-void eeh_mark_slot(struct device_node *dn, int mode_flag);
-void eeh_clear_slot(struct device_node *dn, int mode_flag);
-struct device_node *eeh_find_device_pe(struct device_node *dn);
+void eeh_pe_state_mark(struct eeh_pe *pe, int state);
+void eeh_pe_state_clear(struct eeh_pe *pe, int state);
void eeh_sysfs_add_device(struct pci_dev *pdev);
void eeh_sysfs_remove_device(struct pci_dev *pdev);
diff --git a/arch/powerpc/include/asm/probes.h b/arch/powerpc/include/asm/probes.h
new file mode 100644
index 000000000000..5f1e15b68704
--- /dev/null
+++ b/arch/powerpc/include/asm/probes.h
@@ -0,0 +1,42 @@
+#ifndef _ASM_POWERPC_PROBES_H
+#define _ASM_POWERPC_PROBES_H
+#ifdef __KERNEL__
+/*
+ * Definitions common to probes files
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2012
+ */
+#include <linux/types.h>
+
+typedef u32 ppc_opcode_t;
+#define BREAKPOINT_INSTRUCTION 0x7fe00008 /* trap */
+
+/* Trap definitions per ISA */
+#define IS_TW(instr) (((instr) & 0xfc0007fe) == 0x7c000008)
+#define IS_TD(instr) (((instr) & 0xfc0007fe) == 0x7c000088)
+#define IS_TDI(instr) (((instr) & 0xfc000000) == 0x08000000)
+#define IS_TWI(instr) (((instr) & 0xfc000000) == 0x0c000000)
+
+#ifdef CONFIG_PPC64
+#define is_trap(instr) (IS_TW(instr) || IS_TD(instr) || \
+ IS_TWI(instr) || IS_TDI(instr))
+#else
+#define is_trap(instr) (IS_TW(instr) || IS_TWI(instr))
+#endif /* CONFIG_PPC64 */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_PROBES_H */
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 5376453d90cc..8734b3855272 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -94,8 +94,8 @@ extern struct task_struct *last_task_used_spe;
#endif
#ifdef CONFIG_PPC64
-/* 64-bit user address space is 44-bits (16TB user VM) */
-#define TASK_SIZE_USER64 (0x0000100000000000UL)
+/* 64-bit user address space is 46-bits (64TB user VM) */
+#define TASK_SIZE_USER64 (0x0000400000000000UL)
/*
* 32-bit user address space is 4GB - 1 page
@@ -216,6 +216,8 @@ struct thread_struct {
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif
unsigned long dabr; /* Data address breakpoint register */
+ unsigned long dabrx; /* ... extension */
+ unsigned long trap_nr; /* last trap # on this thread */
#ifdef CONFIG_ALTIVEC
/* Complete AltiVec register set */
vector128 vr[32] __attribute__((aligned(16)));
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
index 7f065e178ec4..0e15db4d703b 100644
--- a/arch/powerpc/include/asm/ps3.h
+++ b/arch/powerpc/include/asm/ps3.h
@@ -24,7 +24,7 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
-#include "cell-pmu.h"
+#include <asm/cell-pmu.h>
union ps3_firmware_version {
u64 raw;
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
index 59247e816ac5..eedf427c9124 100644
--- a/arch/powerpc/include/asm/pte-hash64-64k.h
+++ b/arch/powerpc/include/asm/pte-hash64-64k.h
@@ -58,14 +58,16 @@
/* Trick: we set __end to va + 64k, which happens works for
* a 16M page as well as we want only one iteration
*/
-#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
- do { \
- unsigned long __end = va + PAGE_SIZE; \
- unsigned __split = (psize == MMU_PAGE_4K || \
- psize == MMU_PAGE_64K_AP); \
- shift = mmu_psize_defs[psize].shift; \
- for (index = 0; va < __end; index++, va += (1L << shift)) { \
- if (!__split || __rpte_sub_valid(rpte, index)) do { \
+#define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \
+ do { \
+ unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \
+ unsigned __split = (psize == MMU_PAGE_4K || \
+ psize == MMU_PAGE_64K_AP); \
+ shift = mmu_psize_defs[psize].shift; \
+ for (index = 0; vpn < __end; index++, \
+ vpn += (1L << (shift - VPN_SHIFT))) { \
+ if (!__split || __rpte_sub_valid(rpte, index)) \
+ do {
#define pte_iterate_hashed_end() } while(0); } } while(0)
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 638608677e2a..d24c14163966 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -208,6 +208,9 @@
#define SPRN_DABRX 0x3F7 /* Data Address Breakpoint Register Extension */
#define DABRX_USER (1UL << 0)
#define DABRX_KERNEL (1UL << 1)
+#define DABRX_HYP (1UL << 2)
+#define DABRX_BTI (1UL << 3)
+#define DABRX_ALL (DABRX_BTI | DABRX_HYP | DABRX_KERNEL | DABRX_USER)
#define SPRN_DAR 0x013 /* Data Address Register */
#define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
#define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
@@ -521,6 +524,7 @@
#define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */
#define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */
+#define HSRR1_DENORM 0x00100000 /* Denorm exception */
#define SPRN_TBCTL 0x35f /* PA6T Timebase control register */
#define TBCTL_FREEZE 0x0000000000000000ull /* Freeze all tbs */
@@ -602,6 +606,10 @@
#define POWER6_MMCRA_SIPR 0x0000020000000000ULL
#define POWER6_MMCRA_THRM 0x00000020UL
#define POWER6_MMCRA_OTHER 0x0000000EUL
+
+#define POWER7P_MMCRA_SIAR_VALID 0x10000000 /* P7+ SIAR contents valid */
+#define POWER7P_MMCRA_SDAR_VALID 0x08000000 /* P7+ SDAR contents valid */
+
#define SPRN_PMC1 787
#define SPRN_PMC2 788
#define SPRN_PMC3 789
@@ -761,7 +769,8 @@
* 64-bit embedded
* - SPRG0 generic exception scratch
* - SPRG2 TLB exception stack
- * - SPRG3 CPU and NUMA node for VDSO getcpu (user visible)
+ * - SPRG3 critical exception scratch and
+ * CPU and NUMA node for VDSO getcpu (user visible)
* - SPRG4 unused (user visible)
* - SPRG6 TLB miss scratch (user visible, sorry !)
* - SPRG7 critical exception scratch
@@ -858,11 +867,12 @@
#ifdef CONFIG_PPC_BOOK3E_64
#define SPRN_SPRG_MC_SCRATCH SPRN_SPRG8
-#define SPRN_SPRG_CRIT_SCRATCH SPRN_SPRG7
+#define SPRN_SPRG_CRIT_SCRATCH SPRN_SPRG3
#define SPRN_SPRG_DBG_SCRATCH SPRN_SPRG9
#define SPRN_SPRG_TLB_EXFRAME SPRN_SPRG2
#define SPRN_SPRG_TLB_SCRATCH SPRN_SPRG6
#define SPRN_SPRG_GEN_SCRATCH SPRN_SPRG0
+#define SPRN_SPRG_GDBELL_SCRATCH SPRN_SPRG_GEN_SCRATCH
#define SET_PACA(rX) mtspr SPRN_SPRG_PACA,rX
#define GET_PACA(rX) mfspr rX,SPRN_SPRG_PACA
@@ -937,7 +947,7 @@
#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */
#define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */
-#define __is_processor(pv) (PVR_VER(mfspr(SPRN_PVR)) == (pv))
+#define pvr_version_is(pvr) (PVR_VER(mfspr(SPRN_PVR)) == (pvr))
/*
* IBM has further subdivided the standard PowerPC 16-bit version and
@@ -1002,25 +1012,25 @@
#define PVR_476_ISS 0x00052000
/* 64-bit processors */
-/* XXX the prefix should be PVR_, we'll do a global sweep to fix it one day */
-#define PV_NORTHSTAR 0x0033
-#define PV_PULSAR 0x0034
-#define PV_POWER4 0x0035
-#define PV_ICESTAR 0x0036
-#define PV_SSTAR 0x0037
-#define PV_POWER4p 0x0038
-#define PV_970 0x0039
-#define PV_POWER5 0x003A
-#define PV_POWER5p 0x003B
-#define PV_970FX 0x003C
-#define PV_POWER6 0x003E
-#define PV_POWER7 0x003F
-#define PV_630 0x0040
-#define PV_630p 0x0041
-#define PV_970MP 0x0044
-#define PV_970GX 0x0045
-#define PV_BE 0x0070
-#define PV_PA6T 0x0090
+#define PVR_NORTHSTAR 0x0033
+#define PVR_PULSAR 0x0034
+#define PVR_POWER4 0x0035
+#define PVR_ICESTAR 0x0036
+#define PVR_SSTAR 0x0037
+#define PVR_POWER4p 0x0038
+#define PVR_970 0x0039
+#define PVR_POWER5 0x003A
+#define PVR_POWER5p 0x003B
+#define PVR_970FX 0x003C
+#define PVR_POWER6 0x003E
+#define PVR_POWER7 0x003F
+#define PVR_630 0x0040
+#define PVR_630p 0x0041
+#define PVR_970MP 0x0044
+#define PVR_970GX 0x0045
+#define PVR_POWER7p 0x004A
+#define PVR_BE 0x0070
+#define PVR_PA6T 0x0090
/* Macros for setting and retrieving special purpose registers */
#ifndef __ASSEMBLY__
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index d084ce195fc3..8b9a306260b2 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -9,7 +9,7 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
extern unsigned int rtas_data;
extern int mem_init_done; /* set on boot once kmalloc can be called */
extern int init_bootmem_done; /* set once bootmem is available */
-extern phys_addr_t memory_limit;
+extern unsigned long long memory_limit;
extern unsigned long klimit;
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
diff --git a/arch/powerpc/include/asm/siginfo.h b/arch/powerpc/include/asm/siginfo.h
index 49495b0534ed..ccce3ef5cd86 100644
--- a/arch/powerpc/include/asm/siginfo.h
+++ b/arch/powerpc/include/asm/siginfo.h
@@ -10,7 +10,6 @@
#ifdef __powerpc64__
# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
-# define SI_PAD_SIZE32 ((SI_MAX_SIZE/sizeof(int)) - 3)
#endif
#include <asm-generic/siginfo.h>
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index ebc24dc5b1a1..e807e9d8e3f7 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -65,6 +65,7 @@ int generic_cpu_disable(void);
void generic_cpu_die(unsigned int cpu);
void generic_mach_cpu_die(void);
void generic_set_cpu_dead(unsigned int cpu);
+void generic_set_cpu_up(unsigned int cpu);
int generic_check_cpu_restart(unsigned int cpu);
#endif
@@ -190,6 +191,7 @@ extern unsigned long __secondary_hold_spinloop;
extern unsigned long __secondary_hold_acknowledge;
extern char __secondary_hold;
+extern void __early_start(void);
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/sparsemem.h b/arch/powerpc/include/asm/sparsemem.h
index 0c5fa3145615..f6fc0ee813d7 100644
--- a/arch/powerpc/include/asm/sparsemem.h
+++ b/arch/powerpc/include/asm/sparsemem.h
@@ -10,8 +10,8 @@
*/
#define SECTION_SIZE_BITS 24
-#define MAX_PHYSADDR_BITS 44
-#define MAX_PHYSMEM_BITS 44
+#define MAX_PHYSADDR_BITS 46
+#define MAX_PHYSMEM_BITS 46
#endif /* CONFIG_SPARSEMEM */
diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
index 8979d4cd3d70..de99d6e29430 100644
--- a/arch/powerpc/include/asm/swiotlb.h
+++ b/arch/powerpc/include/asm/swiotlb.h
@@ -22,4 +22,10 @@ int __init swiotlb_setup_bus_notifier(void);
extern void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev);
+#ifdef CONFIG_SWIOTLB
+void swiotlb_detect_4g(void);
+#else
+static inline void swiotlb_detect_4g(void) {}
+#endif
+
#endif /* __ASM_SWIOTLB_H */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 559ae1ee6706..840838769853 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -189,7 +189,7 @@ SYSCALL_SPU(getcwd)
SYSCALL_SPU(capget)
SYSCALL_SPU(capset)
COMPAT_SYS(sigaltstack)
-SYSX_SPU(sys_sendfile64,compat_sys_sendfile,sys_sendfile)
+SYSX_SPU(sys_sendfile,compat_sys_sendfile_wrapper,sys_sendfile)
SYSCALL(ni_syscall)
SYSCALL(ni_syscall)
PPC_SYS(vfork)
@@ -229,7 +229,7 @@ COMPAT_SYS_SPU(sched_setaffinity)
COMPAT_SYS_SPU(sched_getaffinity)
SYSCALL(ni_syscall)
SYSCALL(ni_syscall)
-SYS32ONLY(sendfile64)
+SYSX(sys_ni_syscall,compat_sys_sendfile64_wrapper,sys_sendfile64)
COMPAT_SYS_SPU(io_setup)
SYSCALL_SPU(io_destroy)
COMPAT_SYS_SPU(io_getevents)
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 5b1974f651c0..406b7b9a1341 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -102,7 +102,10 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
#define TIF_NOERROR 12 /* Force successful syscall return */
#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
+#define TIF_UPROBE 14 /* breakpointed or single-stepping */
#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
+#define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
+ for stack store? */
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -118,12 +121,14 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_RESTOREALL (1<<TIF_RESTOREALL)
#define _TIF_NOERROR (1<<TIF_NOERROR)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
+#define _TIF_UPROBE (1<<TIF_UPROBE)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
+#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
- _TIF_NOTIFY_RESUME)
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE)
#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
/* Bits in local_flags */
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 3b4b4a8da922..c1f267694acb 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -197,12 +197,6 @@ struct cpu_usage {
DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array);
-#if defined(CONFIG_VIRT_CPU_ACCOUNTING)
-#define account_process_vtime(tsk) account_process_tick(tsk, 0)
-#else
-#define account_process_vtime(tsk) do { } while (0)
-#endif
-
extern void secondary_cpu_time_init(void);
DECLARE_PER_CPU(u64, decrementers_next_tb);
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
index 81143fcbd113..61a59271665b 100644
--- a/arch/powerpc/include/asm/tlbflush.h
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -95,7 +95,7 @@ struct ppc64_tlb_batch {
unsigned long index;
struct mm_struct *mm;
real_pte_t pte[PPC64_TLB_BATCH_NR];
- unsigned long vaddr[PPC64_TLB_BATCH_NR];
+ unsigned long vpn[PPC64_TLB_BATCH_NR];
unsigned int psize;
int ssize;
};
@@ -103,9 +103,6 @@ DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
-extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, unsigned long pte, int huge);
-
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
static inline void arch_enter_lazy_mmu_mode(void)
@@ -127,7 +124,7 @@ static inline void arch_leave_lazy_mmu_mode(void)
#define arch_flush_lazy_mmu_mode() do {} while (0)
-extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize,
+extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
int ssize, int local);
extern void flush_hash_range(unsigned long number, int local);
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 17bb40cad5bf..4db49590acf5 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -98,11 +98,6 @@ struct exception_table_entry {
* PowerPC, we can just do these as direct assignments. (Of course, the
* exception handling means that it's no longer "just"...)
*
- * The "user64" versions of the user access functions are versions that
- * allow access of 64-bit data. The "get_user" functions do not
- * properly handle 64-bit data because the value gets down cast to a long.
- * The "put_user" functions already handle 64-bit data properly but we add
- * "user64" versions for completeness
*/
#define get_user(x, ptr) \
__get_user_check((x), (ptr), sizeof(*(ptr)))
@@ -114,12 +109,6 @@ struct exception_table_entry {
#define __put_user(x, ptr) \
__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
-#ifndef __powerpc64__
-#define __get_user64(x, ptr) \
- __get_user64_nocheck((x), (ptr), sizeof(*(ptr)))
-#define __put_user64(x, ptr) __put_user(x, ptr)
-#endif
-
#define __get_user_inatomic(x, ptr) \
__get_user_nosleep((x), (ptr), sizeof(*(ptr)))
#define __put_user_inatomic(x, ptr) \
diff --git a/arch/powerpc/include/asm/ucc_fast.h b/arch/powerpc/include/asm/ucc_fast.h
index 839aab8bf37d..4644c840e2fa 100644
--- a/arch/powerpc/include/asm/ucc_fast.h
+++ b/arch/powerpc/include/asm/ucc_fast.h
@@ -19,7 +19,7 @@
#include <asm/immap_qe.h>
#include <asm/qe.h>
-#include "ucc.h"
+#include <asm/ucc.h>
/* Receive BD's status */
#define R_E 0x80000000 /* buffer empty */
diff --git a/arch/powerpc/include/asm/ucc_slow.h b/arch/powerpc/include/asm/ucc_slow.h
index 0980e6ad335b..cf131ffdb8d1 100644
--- a/arch/powerpc/include/asm/ucc_slow.h
+++ b/arch/powerpc/include/asm/ucc_slow.h
@@ -20,7 +20,7 @@
#include <asm/immap_qe.h>
#include <asm/qe.h>
-#include "ucc.h"
+#include <asm/ucc.h>
/* transmit BD's status */
#define T_R 0x80000000 /* ready bit */
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 26a6825909b6..2533752af30f 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -419,6 +419,7 @@
#define __ARCH_WANT_COMPAT_SYS_TIME
#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_SYS_NEWFSTATAT
+#define __ARCH_WANT_COMPAT_SYS_SENDFILE
#endif
#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_KERNEL_EXECVE
diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h
new file mode 100644
index 000000000000..b532060d0916
--- /dev/null
+++ b/arch/powerpc/include/asm/uprobes.h
@@ -0,0 +1,54 @@
+#ifndef _ASM_UPROBES_H
+#define _ASM_UPROBES_H
+/*
+ * User-space Probes (UProbes) for powerpc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2007-2012
+ *
+ * Adapted from the x86 port by Ananth N Mavinakayanahalli <ananth@in.ibm.com>
+ */
+
+#include <linux/notifier.h>
+#include <asm/probes.h>
+
+typedef ppc_opcode_t uprobe_opcode_t;
+
+#define MAX_UINSN_BYTES 4
+#define UPROBE_XOL_SLOT_BYTES (MAX_UINSN_BYTES)
+
+/* The following alias is needed for reference from arch-agnostic code */
+#define UPROBE_SWBP_INSN BREAKPOINT_INSTRUCTION
+#define UPROBE_SWBP_INSN_SIZE 4 /* swbp insn size in bytes */
+
+struct arch_uprobe {
+ union {
+ u8 insn[MAX_UINSN_BYTES];
+ u32 ainsn;
+ };
+};
+
+struct arch_uprobe_task {
+ unsigned long saved_trap_nr;
+};
+
+extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr);
+extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
+extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
+extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+#endif /* _ASM_UPROBES_H */
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild
new file mode 100644
index 000000000000..baebb3da1d44
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/Kbuild
@@ -0,0 +1,3 @@
+# UAPI Header export list
+include include/uapi/asm-generic/Kbuild.asm
+