summaryrefslogtreecommitdiff
path: root/arch/powerpc/include
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2014-08-22 12:04:15 +0400
committerIngo Molnar <mingo@kernel.org>2014-08-22 12:04:15 +0400
commit80b304fd00e8b667775ff791121b61ecd7cd0c03 (patch)
treeb4f2ec59fe062c43343ee4c2f10a6bcd0e4dcd1b /arch/powerpc/include
parentfb21b84e7f809ef04b1e5aed5d463cf0d4866638 (diff)
parent6a7519e81321343165f89abb8b616df186d3e57a (diff)
downloadlinux-80b304fd00e8b667775ff791121b61ecd7cd0c03.tar.xz
Merge tag 'efi-urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi into x86/urgent
Pull EFI fixes from Matt Fleming: * WARN_ON(!spin_is_locked()) always triggers on non-SMP machines. Swap it for the more canonical lockdep_assert_held() which always does the right thing - Guenter Roeck * Assign the correct value to efi.runtime_version on arm64 so that all the runtime services can be invoked - Semen Protsenko Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/asm-compat.h4
-rw-r--r--arch/powerpc/include/asm/cache.h7
-rw-r--r--arch/powerpc/include/asm/cputable.h33
-rw-r--r--arch/powerpc/include/asm/eeh.h62
-rw-r--r--arch/powerpc/include/asm/exception-64s.h14
-rw-r--r--arch/powerpc/include/asm/fs_pd.h1
-rw-r--r--arch/powerpc/include/asm/hardirq.h1
-rw-r--r--arch/powerpc/include/asm/hvcall.h6
-rw-r--r--arch/powerpc/include/asm/hw_irq.h1
-rw-r--r--arch/powerpc/include/asm/irqflags.h8
-rw-r--r--arch/powerpc/include/asm/jump_label.h9
-rw-r--r--arch/powerpc/include/asm/kvm_44x.h67
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h3
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h51
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h29
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h15
-rw-r--r--arch/powerpc/include/asm/kvm_host.h28
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h116
-rw-r--r--arch/powerpc/include/asm/machdep.h11
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h8
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h22
-rw-r--r--arch/powerpc/include/asm/mmu.h8
-rw-r--r--arch/powerpc/include/asm/mmu_context.h6
-rw-r--r--arch/powerpc/include/asm/mpc85xx.h2
-rw-r--r--arch/powerpc/include/asm/mpc8xx.h12
-rw-r--r--arch/powerpc/include/asm/opal.h205
-rw-r--r--arch/powerpc/include/asm/oprofile_impl.h1
-rw-r--r--arch/powerpc/include/asm/paca.h5
-rw-r--r--arch/powerpc/include/asm/page.h3
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h5
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h2
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h26
-rw-r--r--arch/powerpc/include/asm/pte-fsl-booke.h2
-rw-r--r--arch/powerpc/include/asm/pte-hash64-64k.h35
-rw-r--r--arch/powerpc/include/asm/reg.h18
-rw-r--r--arch/powerpc/include/asm/reg_booke.h57
-rw-r--r--arch/powerpc/include/asm/scatterlist.h17
-rw-r--r--arch/powerpc/include/asm/spinlock.h1
-rw-r--r--arch/powerpc/include/asm/systbl.h4
-rw-r--r--arch/powerpc/include/asm/time.h9
-rw-r--r--arch/powerpc/include/asm/trace.h45
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h2
43 files changed, 603 insertions, 359 deletions
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 3fb1bc432f4f..7f23f162ce9c 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -4,5 +4,6 @@ generic-y += hash.h
generic-y += mcs_spinlock.h
generic-y += preempt.h
generic-y += rwsem.h
+generic-y += scatterlist.h
generic-y += trace_clock.h
generic-y += vtime.h
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
index 4b237aa35660..21be8ae8f809 100644
--- a/arch/powerpc/include/asm/asm-compat.h
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -34,10 +34,14 @@
#define PPC_MIN_STKFRM 112
#ifdef __BIG_ENDIAN__
+#define LWZX_BE stringify_in_c(lwzx)
#define LDX_BE stringify_in_c(ldx)
+#define STWX_BE stringify_in_c(stwx)
#define STDX_BE stringify_in_c(stdx)
#else
+#define LWZX_BE stringify_in_c(lwbrx)
#define LDX_BE stringify_in_c(ldbrx)
+#define STWX_BE stringify_in_c(stwbrx)
#define STDX_BE stringify_in_c(stdbrx)
#endif
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index ed0afc1e44a4..34a05a1a990b 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -3,6 +3,7 @@
#ifdef __KERNEL__
+#include <asm/reg.h>
/* bytes per L1 cache line */
#if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
@@ -39,6 +40,12 @@ struct ppc64_caches {
};
extern struct ppc64_caches ppc64_caches;
+
+static inline void logmpp(u64 x)
+{
+ asm volatile(PPC_LOGMPP(R1) : : "r" (x));
+}
+
#endif /* __powerpc64__ && ! __ASSEMBLY__ */
#if defined(__ASSEMBLY__)
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 0fdd7eece6d9..daa5af91163c 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -195,8 +195,7 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN)
-#define MMU_FTR_PPCAS_ARCH_V2 (MMU_FTR_SLB | MMU_FTR_TLBIEL | \
- MMU_FTR_16M_PAGE)
+#define MMU_FTR_PPCAS_ARCH_V2 (MMU_FTR_TLBIEL | MMU_FTR_16M_PAGE)
/* We only set the altivec features if the kernel was compiled with altivec
* support
@@ -268,10 +267,6 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_MAYBE_CAN_NAP 0
#endif
-#define CLASSIC_PPC (!defined(CONFIG_8xx) && !defined(CONFIG_4xx) && \
- !defined(CONFIG_POWER3) && !defined(CONFIG_POWER4) && \
- !defined(CONFIG_BOOKE))
-
#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | CPU_FTR_601 | \
CPU_FTR_COHERENT_ICACHE | CPU_FTR_UNIFIED_ID_CACHE)
#define CPU_FTRS_603 (CPU_FTR_COMMON | \
@@ -396,15 +391,10 @@ extern const char *powerpc_base_platform;
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_ALTIVEC_COMP | \
- CPU_FTR_CELL_TB_BUG)
+ CPU_FTR_CELL_TB_BUG | CPU_FTR_SMT)
#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
/* 64-bit CPUs */
-#define CPU_FTRS_POWER3 (CPU_FTR_USE_TB | \
- CPU_FTR_IABR | CPU_FTR_PPC_LE)
-#define CPU_FTRS_RS64 (CPU_FTR_USE_TB | \
- CPU_FTR_IABR | \
- CPU_FTR_MMCRA | CPU_FTR_CTRL)
#define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ | \
@@ -467,15 +457,15 @@ extern const char *powerpc_base_platform;
#define CPU_FTRS_POSSIBLE (CPU_FTRS_E6500 | CPU_FTRS_E5500 | CPU_FTRS_A2)
#else
#define CPU_FTRS_POSSIBLE \
- (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \
- CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \
- CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | CPU_FTRS_POWER8 | \
- CPU_FTRS_CELL | CPU_FTRS_PA6T | CPU_FTR_VSX)
+ (CPU_FTRS_POWER4 | CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | \
+ CPU_FTRS_POWER6 | CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | \
+ CPU_FTRS_POWER8 | CPU_FTRS_POWER8_DD1 | CPU_FTRS_CELL | \
+ CPU_FTRS_PA6T | CPU_FTR_VSX)
#endif
#else
enum {
CPU_FTRS_POSSIBLE =
-#if CLASSIC_PPC
+#ifdef CONFIG_PPC_BOOK3S_32
CPU_FTRS_PPC601 | CPU_FTRS_603 | CPU_FTRS_604 | CPU_FTRS_740_NOTAU |
CPU_FTRS_740 | CPU_FTRS_750 | CPU_FTRS_750FX1 |
CPU_FTRS_750FX2 | CPU_FTRS_750FX | CPU_FTRS_750GX |
@@ -518,14 +508,15 @@ enum {
#define CPU_FTRS_ALWAYS (CPU_FTRS_E6500 & CPU_FTRS_E5500 & CPU_FTRS_A2)
#else
#define CPU_FTRS_ALWAYS \
- (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \
- CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 & \
- CPU_FTRS_POWER7 & CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE)
+ (CPU_FTRS_POWER4 & CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & \
+ CPU_FTRS_POWER6 & CPU_FTRS_POWER7 & CPU_FTRS_CELL & \
+ CPU_FTRS_PA6T & CPU_FTRS_POWER8 & CPU_FTRS_POWER8E & \
+ CPU_FTRS_POWER8_DD1 & CPU_FTRS_POSSIBLE)
#endif
#else
enum {
CPU_FTRS_ALWAYS =
-#if CLASSIC_PPC
+#ifdef CONFIG_PPC_BOOK3S_32
CPU_FTRS_PPC601 & CPU_FTRS_603 & CPU_FTRS_604 & CPU_FTRS_740_NOTAU &
CPU_FTRS_740 & CPU_FTRS_750 & CPU_FTRS_750FX1 &
CPU_FTRS_750FX2 & CPU_FTRS_750FX & CPU_FTRS_750GX &
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index fab7743c2640..9983c3d26bca 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -25,6 +25,7 @@
#include <linux/list.h>
#include <linux/string.h>
#include <linux/time.h>
+#include <linux/atomic.h>
struct pci_dev;
struct pci_bus;
@@ -33,10 +34,11 @@ struct device_node;
#ifdef CONFIG_EEH
/* EEH subsystem flags */
-#define EEH_ENABLED 0x1 /* EEH enabled */
-#define EEH_FORCE_DISABLED 0x2 /* EEH disabled */
-#define EEH_PROBE_MODE_DEV 0x4 /* From PCI device */
-#define EEH_PROBE_MODE_DEVTREE 0x8 /* From device tree */
+#define EEH_ENABLED 0x01 /* EEH enabled */
+#define EEH_FORCE_DISABLED 0x02 /* EEH disabled */
+#define EEH_PROBE_MODE_DEV 0x04 /* From PCI device */
+#define EEH_PROBE_MODE_DEVTREE 0x08 /* From device tree */
+#define EEH_ENABLE_IO_FOR_LOG 0x10 /* Enable IO for log */
/*
* Delay for PE reset, all in ms
@@ -84,7 +86,9 @@ struct eeh_pe {
int freeze_count; /* Times of froze up */
struct timeval tstamp; /* Time on first-time freeze */
int false_positives; /* Times of reported #ff's */
+ atomic_t pass_dev_cnt; /* Count of passed through devs */
struct eeh_pe *parent; /* Parent PE */
+ void *data; /* PE auxillary data */
struct list_head child_list; /* Link PE to the child list */
struct list_head edevs; /* Link list of EEH devices */
struct list_head child; /* Child PEs */
@@ -93,6 +97,11 @@ struct eeh_pe {
#define eeh_pe_for_each_dev(pe, edev, tmp) \
list_for_each_entry_safe(edev, tmp, &pe->edevs, list)
+static inline bool eeh_pe_passed(struct eeh_pe *pe)
+{
+ return pe ? !!atomic_read(&pe->pass_dev_cnt) : false;
+}
+
/*
* The struct is used to trace EEH state for the associated
* PCI device node or PCI device. In future, it might
@@ -165,6 +174,11 @@ enum {
#define EEH_STATE_DMA_ACTIVE (1 << 4) /* Active DMA */
#define EEH_STATE_MMIO_ENABLED (1 << 5) /* MMIO enabled */
#define EEH_STATE_DMA_ENABLED (1 << 6) /* DMA enabled */
+#define EEH_PE_STATE_NORMAL 0 /* Normal state */
+#define EEH_PE_STATE_RESET 1 /* PE reset asserted */
+#define EEH_PE_STATE_STOPPED_IO_DMA 2 /* Frozen PE */
+#define EEH_PE_STATE_STOPPED_DMA 4 /* Stopped DMA, Enabled IO */
+#define EEH_PE_STATE_UNAVAIL 5 /* Unavailable */
#define EEH_RESET_DEACTIVATE 0 /* Deactivate the PE reset */
#define EEH_RESET_HOT 1 /* Hot reset */
#define EEH_RESET_FUNDAMENTAL 3 /* Fundamental reset */
@@ -194,36 +208,28 @@ extern int eeh_subsystem_flags;
extern struct eeh_ops *eeh_ops;
extern raw_spinlock_t confirm_error_lock;
-static inline bool eeh_enabled(void)
+static inline void eeh_add_flag(int flag)
{
- if ((eeh_subsystem_flags & EEH_FORCE_DISABLED) ||
- !(eeh_subsystem_flags & EEH_ENABLED))
- return false;
-
- return true;
+ eeh_subsystem_flags |= flag;
}
-static inline void eeh_set_enable(bool mode)
+static inline void eeh_clear_flag(int flag)
{
- if (mode)
- eeh_subsystem_flags |= EEH_ENABLED;
- else
- eeh_subsystem_flags &= ~EEH_ENABLED;
+ eeh_subsystem_flags &= ~flag;
}
-static inline void eeh_probe_mode_set(int flag)
+static inline bool eeh_has_flag(int flag)
{
- eeh_subsystem_flags |= flag;
+ return !!(eeh_subsystem_flags & flag);
}
-static inline int eeh_probe_mode_devtree(void)
+static inline bool eeh_enabled(void)
{
- return (eeh_subsystem_flags & EEH_PROBE_MODE_DEVTREE);
-}
+ if (eeh_has_flag(EEH_FORCE_DISABLED) ||
+ !eeh_has_flag(EEH_ENABLED))
+ return false;
-static inline int eeh_probe_mode_dev(void)
-{
- return (eeh_subsystem_flags & EEH_PROBE_MODE_DEV);
+ return true;
}
static inline void eeh_serialize_lock(unsigned long *flags)
@@ -243,6 +249,7 @@ static inline void eeh_serialize_unlock(unsigned long flags)
#define EEH_MAX_ALLOWED_FREEZES 5
typedef void *(*eeh_traverse_func)(void *data, void *flag);
+void eeh_set_pe_aux_size(int size);
int eeh_phb_pe_create(struct pci_controller *phb);
struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb);
struct eeh_pe *eeh_pe_get(struct eeh_dev *edev);
@@ -272,6 +279,13 @@ void eeh_add_device_late(struct pci_dev *);
void eeh_add_device_tree_late(struct pci_bus *);
void eeh_add_sysfs_files(struct pci_bus *);
void eeh_remove_device(struct pci_dev *);
+int eeh_dev_open(struct pci_dev *pdev);
+void eeh_dev_release(struct pci_dev *pdev);
+struct eeh_pe *eeh_iommu_group_to_pe(struct iommu_group *group);
+int eeh_pe_set_option(struct eeh_pe *pe, int option);
+int eeh_pe_get_state(struct eeh_pe *pe);
+int eeh_pe_reset(struct eeh_pe *pe, int option);
+int eeh_pe_configure(struct eeh_pe *pe);
/**
* EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
@@ -295,8 +309,6 @@ static inline bool eeh_enabled(void)
return false;
}
-static inline void eeh_set_enable(bool mode) { }
-
static inline int eeh_init(void)
{
return 0;
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 8f35cd7d59cc..77f52b26dad6 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -425,6 +425,8 @@ label##_relon_hv: \
#define SOFTEN_VALUE_0xa00 PACA_IRQ_DBELL
#define SOFTEN_VALUE_0xe80 PACA_IRQ_DBELL
#define SOFTEN_VALUE_0xe82 PACA_IRQ_DBELL
+#define SOFTEN_VALUE_0xe60 PACA_IRQ_HMI
+#define SOFTEN_VALUE_0xe62 PACA_IRQ_HMI
#define __SOFTEN_TEST(h, vec) \
lbz r10,PACASOFTIRQEN(r13); \
@@ -513,8 +515,11 @@ label##_relon_hv: \
* runlatch, etc...
*/
-/* Exception addition: Hard disable interrupts */
-#define DISABLE_INTS RECONCILE_IRQ_STATE(r10,r11)
+/*
+ * This addition reconciles our actual IRQ state with the various software
+ * flags that track it. This may call C code.
+ */
+#define ADD_RECONCILE RECONCILE_IRQ_STATE(r10,r11)
#define ADD_NVGPRS \
bl save_nvgprs
@@ -532,6 +537,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
.globl label##_common; \
label##_common: \
EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
+ /* Volatile regs are potentially clobbered here */ \
additions; \
addi r3,r1,STACK_FRAME_OVERHEAD; \
bl hdlr; \
@@ -539,7 +545,7 @@ label##_common: \
#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
EXCEPTION_COMMON(trap, label, hdlr, ret_from_except, \
- ADD_NVGPRS;DISABLE_INTS)
+ ADD_NVGPRS;ADD_RECONCILE)
/*
* Like STD_EXCEPTION_COMMON, but for exceptions that can occur
@@ -548,7 +554,7 @@ label##_common: \
*/
#define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \
EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \
- FINISH_NAP;DISABLE_INTS;RUNLATCH_ON)
+ FINISH_NAP;ADD_RECONCILE;RUNLATCH_ON)
/*
* When the idle code in power4_idle puts the CPU into NAP mode,
diff --git a/arch/powerpc/include/asm/fs_pd.h b/arch/powerpc/include/asm/fs_pd.h
index 9361cd5342cc..f79d6c74eb2a 100644
--- a/arch/powerpc/include/asm/fs_pd.h
+++ b/arch/powerpc/include/asm/fs_pd.h
@@ -28,7 +28,6 @@
#ifdef CONFIG_8xx
#include <asm/8xx_immap.h>
-#include <asm/mpc8xx.h>
extern immap_t __iomem *mpc8xx_immr;
diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h
index 418fb654370d..1bbb3013d6aa 100644
--- a/arch/powerpc/include/asm/hardirq.h
+++ b/arch/powerpc/include/asm/hardirq.h
@@ -11,6 +11,7 @@ typedef struct {
unsigned int pmu_irqs;
unsigned int mce_exceptions;
unsigned int spurious_irqs;
+ unsigned int hmi_exceptions;
#ifdef CONFIG_PPC_DOORBELL
unsigned int doorbell_irqs;
#endif
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 5dbbb29f5c3e..85bc8c0d257b 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -279,6 +279,12 @@
#define H_GET_24X7_DATA 0xF07C
#define H_GET_PERF_COUNTER_INFO 0xF080
+/* Values for 2nd argument to H_SET_MODE */
+#define H_SET_MODE_RESOURCE_SET_CIABR 1
+#define H_SET_MODE_RESOURCE_SET_DAWR 2
+#define H_SET_MODE_RESOURCE_ADDR_TRANS_MODE 3
+#define H_SET_MODE_RESOURCE_LE 4
+
#ifndef __ASSEMBLY__
/**
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 10be1dd01c6b..b59ac27a6b7d 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -25,6 +25,7 @@
#define PACA_IRQ_EE 0x04
#define PACA_IRQ_DEC 0x08 /* Or FIT */
#define PACA_IRQ_EE_EDGE 0x10 /* BookE only */
+#define PACA_IRQ_HMI 0x20
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h
index e20eb95429a8..f2149066fe5d 100644
--- a/arch/powerpc/include/asm/irqflags.h
+++ b/arch/powerpc/include/asm/irqflags.h
@@ -32,9 +32,8 @@
#endif
/*
- * Most of the CPU's IRQ-state tracing is done from assembly code; we
- * have to call a C function so call a wrapper that saves all the
- * C-clobbered registers.
+ * These are calls to C code, so the caller must be prepared for volatiles to
+ * be clobbered.
*/
#define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(trace_hardirqs_on)
#define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(trace_hardirqs_off)
@@ -42,6 +41,9 @@
/*
* This is used by assembly code to soft-disable interrupts first and
* reconcile irq state.
+ *
+ * NB: This may call C code, so the caller must be prepared for volatiles to
+ * be clobbered.
*/
#define RECONCILE_IRQ_STATE(__rA, __rB) \
lbz __rA,PACASOFTIRQEN(r13); \
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index f016bb699b5f..efbf9a322a23 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -10,6 +10,7 @@
* 2 of the License, or (at your option) any later version.
*/
+#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <asm/feature-fixups.h>
@@ -42,4 +43,12 @@ struct jump_entry {
jump_label_t key;
};
+#else
+#define ARCH_STATIC_BRANCH(LABEL, KEY) \
+1098: nop; \
+ .pushsection __jump_table, "aw"; \
+ FTR_ENTRY_LONG 1098b, LABEL, KEY; \
+ .popsection
+#endif
+
#endif /* _ASM_POWERPC_JUMP_LABEL_H */
diff --git a/arch/powerpc/include/asm/kvm_44x.h b/arch/powerpc/include/asm/kvm_44x.h
deleted file mode 100644
index a0e57618ff33..000000000000
--- a/arch/powerpc/include/asm/kvm_44x.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- *
- * Copyright IBM Corp. 2008
- *
- * Authors: Hollis Blanchard <hollisb@us.ibm.com>
- */
-
-#ifndef __ASM_44X_H__
-#define __ASM_44X_H__
-
-#include <linux/kvm_host.h>
-
-#define PPC44x_TLB_SIZE 64
-
-/* If the guest is expecting it, this can be as large as we like; we'd just
- * need to find some way of advertising it. */
-#define KVM44x_GUEST_TLB_SIZE 64
-
-struct kvmppc_44x_tlbe {
- u32 tid; /* Only the low 8 bits are used. */
- u32 word0;
- u32 word1;
- u32 word2;
-};
-
-struct kvmppc_44x_shadow_ref {
- struct page *page;
- u16 gtlb_index;
- u8 writeable;
- u8 tid;
-};
-
-struct kvmppc_vcpu_44x {
- /* Unmodified copy of the guest's TLB. */
- struct kvmppc_44x_tlbe guest_tlb[KVM44x_GUEST_TLB_SIZE];
-
- /* References to guest pages in the hardware TLB. */
- struct kvmppc_44x_shadow_ref shadow_refs[PPC44x_TLB_SIZE];
-
- /* State of the shadow TLB at guest context switch time. */
- struct kvmppc_44x_tlbe shadow_tlb[PPC44x_TLB_SIZE];
- u8 shadow_tlb_mod[PPC44x_TLB_SIZE];
-
- struct kvm_vcpu vcpu;
-};
-
-static inline struct kvmppc_vcpu_44x *to_44x(struct kvm_vcpu *vcpu)
-{
- return container_of(vcpu, struct kvmppc_vcpu_44x, vcpu);
-}
-
-void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu);
-void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu);
-
-#endif /* __ASM_44X_H__ */
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 9601741080e5..465dfcb82c92 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -33,7 +33,6 @@
/* IVPR must be 64KiB-aligned. */
#define VCPU_SIZE_ORDER 4
#define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12)
-#define VCPU_TLB_PGSZ PPC44x_TLB_64K
#define VCPU_SIZE_BYTES (1<<VCPU_SIZE_LOG)
#define BOOKE_INTERRUPT_CRITICAL 0
@@ -98,6 +97,7 @@
#define BOOK3S_INTERRUPT_H_DATA_STORAGE 0xe00
#define BOOK3S_INTERRUPT_H_INST_STORAGE 0xe20
#define BOOK3S_INTERRUPT_H_EMUL_ASSIST 0xe40
+#define BOOK3S_INTERRUPT_HMI 0xe60
#define BOOK3S_INTERRUPT_H_DOORBELL 0xe80
#define BOOK3S_INTERRUPT_PERFMON 0xf00
#define BOOK3S_INTERRUPT_ALTIVEC 0xf20
@@ -131,6 +131,7 @@
#define BOOK3S_HFLAG_NATIVE_PS 0x8
#define BOOK3S_HFLAG_MULTI_PGSIZE 0x10
#define BOOK3S_HFLAG_NEW_TLBIE 0x20
+#define BOOK3S_HFLAG_SPLIT_HACK 0x40
#define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */
#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index f52f65694527..6acf0c2a0f99 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -83,8 +83,6 @@ struct kvmppc_vcpu_book3s {
u64 sdr1;
u64 hior;
u64 msr_mask;
- u64 purr_offset;
- u64 spurr_offset;
#ifdef CONFIG_PPC_BOOK3S_32
u32 vsid_pool[VSID_POOL_SIZE];
u32 vsid_next;
@@ -148,9 +146,10 @@ extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *
extern int kvmppc_mmu_hpte_sysinit(void);
extern void kvmppc_mmu_hpte_sysexit(void);
extern int kvmppc_mmu_hv_init(void);
+extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
+/* XXX remove this export when load_last_inst() is generic */
extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
-extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
unsigned int vec);
@@ -159,13 +158,13 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
bool upper, u32 val);
extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
-extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
+extern pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
bool *writable);
extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
unsigned long *rmap, long pte_index, int realmode);
-extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
+extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
unsigned long pte_index);
-void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
+void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
unsigned long pte_index);
extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
unsigned long *nb_ret);
@@ -183,12 +182,16 @@ extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,
struct kvm_memory_slot *memslot, unsigned long *map);
extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
unsigned long mask);
+extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr);
extern void kvmppc_entry_trampoline(void);
extern void kvmppc_hv_entry_trampoline(void);
extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
+extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm);
+extern int kvmppc_hcall_impl_pr(unsigned long cmd);
+extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
struct kvm_vcpu *vcpu);
extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
@@ -274,32 +277,6 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE);
}
-static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc)
-{
- /* Load the instruction manually if it failed to do so in the
- * exit path */
- if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
- kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
-
- return kvmppc_need_byteswap(vcpu) ? swab32(vcpu->arch.last_inst) :
- vcpu->arch.last_inst;
-}
-
-static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
-{
- return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu));
-}
-
-/*
- * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
- * Because the sc instruction sets SRR0 to point to the following
- * instruction, we have to fetch from pc - 4.
- */
-static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
-{
- return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu) - 4);
-}
-
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault_dar;
@@ -310,6 +287,13 @@ static inline bool is_kvmppc_resume_guest(int r)
return (r == RESUME_GUEST || r == RESUME_GUEST_NV);
}
+static inline bool is_kvmppc_hv_enabled(struct kvm *kvm);
+static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu)
+{
+ /* Only PR KVM supports the magic page */
+ return !is_kvmppc_hv_enabled(vcpu->kvm);
+}
+
/* Magic register values loaded into r3 and r4 before the 'sc' assembly
* instruction for the OSI hypercalls */
#define OSI_SC_MAGIC_R3 0x113724FA
@@ -322,4 +306,7 @@ static inline bool is_kvmppc_resume_guest(int r)
/* LPIDs we support with this build -- runtime limit may be lower */
#define KVMPPC_NR_LPIDS (LPID_RSVD + 1)
+#define SPLIT_HACK_MASK 0xff000000
+#define SPLIT_HACK_OFFS 0xfb000000
+
#endif /* __ASM_KVM_BOOK3S_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index d645428a65a4..0aa817933e6a 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -59,20 +59,29 @@ extern unsigned long kvm_rma_pages;
/* These bits are reserved in the guest view of the HPTE */
#define HPTE_GR_RESERVED HPTE_GR_MODIFIED
-static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits)
+static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
{
unsigned long tmp, old;
+ __be64 be_lockbit, be_bits;
+
+ /*
+ * We load/store in native endian, but the HTAB is in big endian. If
+ * we byte swap all data we apply on the PTE we're implicitly correct
+ * again.
+ */
+ be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
+ be_bits = cpu_to_be64(bits);
asm volatile(" ldarx %0,0,%2\n"
" and. %1,%0,%3\n"
" bne 2f\n"
- " ori %0,%0,%4\n"
+ " or %0,%0,%4\n"
" stdcx. %0,0,%2\n"
" beq+ 2f\n"
" mr %1,%3\n"
"2: isync"
: "=&r" (tmp), "=&r" (old)
- : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
+ : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
: "cc", "memory");
return old == 0;
}
@@ -110,16 +119,12 @@ static inline int __hpte_actual_psize(unsigned int lp, int psize)
static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
unsigned long pte_index)
{
- int b_psize, a_psize;
+ int b_psize = MMU_PAGE_4K, a_psize = MMU_PAGE_4K;
unsigned int penc;
unsigned long rb = 0, va_low, sllp;
unsigned int lp = (r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
- if (!(v & HPTE_V_LARGE)) {
- /* both base and actual psize is 4k */
- b_psize = MMU_PAGE_4K;
- a_psize = MMU_PAGE_4K;
- } else {
+ if (v & HPTE_V_LARGE) {
for (b_psize = 0; b_psize < MMU_PAGE_COUNT; b_psize++) {
/* valid entries have a shift value */
@@ -142,6 +147,8 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
*/
/* This covers 14..54 bits of va*/
rb = (v & ~0x7fUL) << 16; /* AVA field */
+
+ rb |= v >> (62 - 8); /* B field */
/*
* AVA in v had cleared lower 23 bits. We need to derive
* that from pteg index
@@ -172,10 +179,10 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
{
int aval_shift;
/*
- * remaining 7bits of AVA/LP fields
+ * remaining bits of AVA/LP fields
* Also contain the rr bits of LP
*/
- rb |= (va_low & 0x7f) << 16;
+ rb |= (va_low << mmu_psize_defs[b_psize].shift) & 0x7ff000;
/*
* Now clear not needed LP bits based on actual psize
*/
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index c7aed6105ff9..f7aa5cc395c4 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -69,11 +69,6 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
return false;
}
-static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.last_inst;
-}
-
static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.ctr = val;
@@ -108,4 +103,14 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault_dear;
}
+
+static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu)
+{
+ /* Magic page is only supported on e500v2 */
+#ifdef CONFIG_KVM_E500V2
+ return true;
+#else
+ return false;
+#endif
+}
#endif /* __ASM_KVM_BOOKE_H__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index bb66d8b8efdf..98d9dd50d063 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -34,6 +34,7 @@
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
+#include <asm/hvcall.h>
#define KVM_MAX_VCPUS NR_CPUS
#define KVM_MAX_VCORES NR_CPUS
@@ -48,7 +49,6 @@
#define KVM_NR_IRQCHIPS 1
#define KVM_IRQCHIP_NUM_PINS 256
-#if !defined(CONFIG_KVM_440)
#include <linux/mmu_notifier.h>
#define KVM_ARCH_WANT_MMU_NOTIFIER
@@ -61,8 +61,6 @@ extern int kvm_age_hva(struct kvm *kvm, unsigned long hva);
extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
-#endif
-
#define HPTEG_CACHE_NUM (1 << 15)
#define HPTEG_HASH_BITS_PTE 13
#define HPTEG_HASH_BITS_PTE_LONG 12
@@ -96,7 +94,6 @@ struct kvm_vm_stat {
struct kvm_vcpu_stat {
u32 sum_exits;
u32 mmio_exits;
- u32 dcr_exits;
u32 signal_exits;
u32 light_exits;
/* Account for special types of light exits: */
@@ -113,22 +110,21 @@ struct kvm_vcpu_stat {
u32 halt_wakeup;
u32 dbell_exits;
u32 gdbell_exits;
+ u32 ld;
+ u32 st;
#ifdef CONFIG_PPC_BOOK3S
u32 pf_storage;
u32 pf_instruc;
u32 sp_storage;
u32 sp_instruc;
u32 queue_intr;
- u32 ld;
u32 ld_slow;
- u32 st;
u32 st_slow;
#endif
};
enum kvm_exit_types {
MMIO_EXITS,
- DCR_EXITS,
SIGNAL_EXITS,
ITLB_REAL_MISS_EXITS,
ITLB_VIRT_MISS_EXITS,
@@ -254,7 +250,6 @@ struct kvm_arch {
atomic_t hpte_mod_interest;
spinlock_t slot_phys_lock;
cpumask_t need_tlb_flush;
- struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
int hpt_cma_alloc;
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
@@ -263,6 +258,7 @@ struct kvm_arch {
#ifdef CONFIG_PPC_BOOK3S_64
struct list_head spapr_tce_tables;
struct list_head rtas_tokens;
+ DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
#endif
#ifdef CONFIG_KVM_MPIC
struct openpic *mpic;
@@ -271,6 +267,10 @@ struct kvm_arch {
struct kvmppc_xics *xics;
#endif
struct kvmppc_ops *kvm_ops;
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ /* This array can grow quite large, keep it at the end */
+ struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
+#endif
};
/*
@@ -305,6 +305,8 @@ struct kvmppc_vcore {
u32 arch_compat;
ulong pcr;
ulong dpdes; /* doorbell state (POWER8) */
+ void *mpp_buffer; /* Micro Partition Prefetch buffer */
+ bool mpp_buffer_is_valid;
};
#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
@@ -503,8 +505,10 @@ struct kvm_vcpu_arch {
#ifdef CONFIG_BOOKE
u32 decar;
#endif
- u32 tbl;
- u32 tbu;
+ /* Time base value when we entered the guest */
+ u64 entry_tb;
+ u64 entry_vtb;
+ u64 entry_ic;
u32 tcr;
ulong tsr; /* we need to perform set/clr_bits() which requires ulong */
u32 ivor[64];
@@ -580,6 +584,8 @@ struct kvm_vcpu_arch {
u32 mmucfg;
u32 eptcfg;
u32 epr;
+ u64 sprg9;
+ u32 pwrmgtcr0;
u32 crit_save;
/* guest debug registers*/
struct debug_reg dbg_reg;
@@ -593,8 +599,6 @@ struct kvm_vcpu_arch {
u8 io_gpr; /* GPR used as IO source/target */
u8 mmio_is_bigendian;
u8 mmio_sign_extend;
- u8 dcr_needed;
- u8 dcr_is_write;
u8 osi_needed;
u8 osi_enabled;
u8 papr_enabled;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 9c89cdd067a6..fb86a2299d8a 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -41,12 +41,26 @@
enum emulation_result {
EMULATE_DONE, /* no further processing */
EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
- EMULATE_DO_DCR, /* kvm_run filled with DCR request */
EMULATE_FAIL, /* can't emulate this instruction */
EMULATE_AGAIN, /* something went wrong. go again */
EMULATE_EXIT_USER, /* emulation requires exit to user-space */
};
+enum instruction_type {
+ INST_GENERIC,
+ INST_SC, /* system call */
+};
+
+enum xlate_instdata {
+ XLATE_INST, /* translate instruction address */
+ XLATE_DATA /* translate data address */
+};
+
+enum xlate_readwrite {
+ XLATE_READ, /* check for read permissions */
+ XLATE_WRITE /* check for write permissions */
+};
+
extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
extern void kvmppc_handler_highmem(void);
@@ -62,8 +76,16 @@ extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
u64 val, unsigned int bytes,
int is_default_endian);
+extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
+ enum instruction_type type, u32 *inst);
+
+extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
+ bool data);
+extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
+ bool data);
extern int kvmppc_emulate_instruction(struct kvm_run *run,
struct kvm_vcpu *vcpu);
+extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
@@ -86,6 +108,9 @@ extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
gva_t eaddr);
extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
+extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
+ enum xlate_instdata xlid, enum xlate_readwrite xlrw,
+ struct kvmppc_pte *pte);
extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
unsigned int id);
@@ -106,6 +131,14 @@ extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq);
extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
+ ulong esr_flags);
+extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
+ ulong dear_flags,
+ ulong esr_flags);
+extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
+ ulong esr_flags);
extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
@@ -228,12 +261,35 @@ struct kvmppc_ops {
void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
unsigned long arg);
-
+ int (*hcall_implemented)(unsigned long hcall);
};
extern struct kvmppc_ops *kvmppc_hv_ops;
extern struct kvmppc_ops *kvmppc_pr_ops;
+static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
+ enum instruction_type type, u32 *inst)
+{
+ int ret = EMULATE_DONE;
+ u32 fetched_inst;
+
+ /* Load the instruction manually if it failed to do so in the
+ * exit path */
+ if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
+ ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
+
+ /* Write fetch_failed unswapped if the fetch failed */
+ if (ret == EMULATE_DONE)
+ fetched_inst = kvmppc_need_byteswap(vcpu) ?
+ swab32(vcpu->arch.last_inst) :
+ vcpu->arch.last_inst;
+ else
+ fetched_inst = vcpu->arch.last_inst;
+
+ *inst = fetched_inst;
+ return ret;
+}
+
static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
{
return kvm->arch.kvm_ops == kvmppc_hv_ops;
@@ -392,6 +448,17 @@ static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
{ return 0; }
#endif
+static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_KVM_BOOKE_HV
+ return mfspr(SPRN_GEPR);
+#elif defined(CONFIG_BOOKE)
+ return vcpu->arch.epr;
+#else
+ return 0;
+#endif
+}
+
static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
{
#ifdef CONFIG_KVM_BOOKE_HV
@@ -472,8 +539,20 @@ static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
#endif
}
+#define SPRNG_WRAPPER_GET(reg, bookehv_spr) \
+static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
+{ \
+ return mfspr(bookehv_spr); \
+} \
+
+#define SPRNG_WRAPPER_SET(reg, bookehv_spr) \
+static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
+{ \
+ mtspr(bookehv_spr, val); \
+} \
+
#define SHARED_WRAPPER_GET(reg, size) \
-static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
+static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
{ \
if (kvmppc_shared_big_endian(vcpu)) \
return be##size##_to_cpu(vcpu->arch.shared->reg); \
@@ -494,14 +573,31 @@ static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
SHARED_WRAPPER_GET(reg, size) \
SHARED_WRAPPER_SET(reg, size) \
+#define SPRNG_WRAPPER(reg, bookehv_spr) \
+ SPRNG_WRAPPER_GET(reg, bookehv_spr) \
+ SPRNG_WRAPPER_SET(reg, bookehv_spr) \
+
+#ifdef CONFIG_KVM_BOOKE_HV
+
+#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
+ SPRNG_WRAPPER(reg, bookehv_spr) \
+
+#else
+
+#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
+ SHARED_WRAPPER(reg, size) \
+
+#endif
+
SHARED_WRAPPER(critical, 64)
-SHARED_WRAPPER(sprg0, 64)
-SHARED_WRAPPER(sprg1, 64)
-SHARED_WRAPPER(sprg2, 64)
-SHARED_WRAPPER(sprg3, 64)
-SHARED_WRAPPER(srr0, 64)
-SHARED_WRAPPER(srr1, 64)
-SHARED_WRAPPER(dar, 64)
+SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
+SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
+SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
+SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
+SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
+SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
+SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
+SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
SHARED_WRAPPER_GET(msr, 64)
static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
{
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index f92b0b54e921..b125ceab149c 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -57,10 +57,10 @@ struct machdep_calls {
void (*hpte_removebolted)(unsigned long ea,
int psize, int ssize);
void (*flush_hash_range)(unsigned long number, int local);
- void (*hugepage_invalidate)(struct mm_struct *mm,
+ void (*hugepage_invalidate)(unsigned long vsid,
+ unsigned long addr,
unsigned char *hpte_slot_array,
- unsigned long addr, int psize);
-
+ int psize, int ssize);
/* special for kexec, to be called in real mode, linear mapping is
* destroyed as well */
void (*hpte_clear_all)(void);
@@ -174,6 +174,10 @@ struct machdep_calls {
/* Exception handlers */
int (*system_reset_exception)(struct pt_regs *regs);
int (*machine_check_exception)(struct pt_regs *regs);
+ int (*handle_hmi_exception)(struct pt_regs *regs);
+
+ /* Early exception handlers called in realmode */
+ int (*hmi_exception_early)(struct pt_regs *regs);
/* Called during machine check exception to retrive fixup address. */
bool (*mce_check_early_recovery)(struct pt_regs *regs);
@@ -366,6 +370,7 @@ static inline void log_error(char *buf, unsigned int err_type, int fatal)
} \
__define_initcall(__machine_initcall_##mach##_##fn, id);
+#define machine_early_initcall(mach, fn) __define_machine_initcall(mach, fn, early)
#define machine_core_initcall(mach, fn) __define_machine_initcall(mach, fn, 1)
#define machine_core_initcall_sync(mach, fn) __define_machine_initcall(mach, fn, 1s)
#define machine_postcore_initcall(mach, fn) __define_machine_initcall(mach, fn, 2)
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index d0918e09557f..cd4f04a74802 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -40,7 +40,11 @@
/* MAS registers bit definitions */
-#define MAS0_TLBSEL(x) (((x) << 28) & 0x30000000)
+#define MAS0_TLBSEL_MASK 0x30000000
+#define MAS0_TLBSEL_SHIFT 28
+#define MAS0_TLBSEL(x) (((x) << MAS0_TLBSEL_SHIFT) & MAS0_TLBSEL_MASK)
+#define MAS0_GET_TLBSEL(mas0) (((mas0) & MAS0_TLBSEL_MASK) >> \
+ MAS0_TLBSEL_SHIFT)
#define MAS0_ESEL_MASK 0x0FFF0000
#define MAS0_ESEL_SHIFT 16
#define MAS0_ESEL(x) (((x) << MAS0_ESEL_SHIFT) & MAS0_ESEL_MASK)
@@ -58,6 +62,7 @@
#define MAS1_TSIZE_MASK 0x00000f80
#define MAS1_TSIZE_SHIFT 7
#define MAS1_TSIZE(x) (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK)
+#define MAS1_GET_TSIZE(mas1) (((mas1) & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT)
#define MAS2_EPN (~0xFFFUL)
#define MAS2_X0 0x00000040
@@ -86,6 +91,7 @@
#define MAS3_SPSIZE 0x0000003e
#define MAS3_SPSIZE_SHIFT 1
+#define MAS4_TLBSEL_MASK MAS0_TLBSEL_MASK
#define MAS4_TLBSELD(x) MAS0_TLBSEL(x)
#define MAS4_INDD 0x00008000 /* Default IND */
#define MAS4_TSIZED(x) MAS1_TSIZE(x)
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index c2b4dcf23d03..d76514487d6f 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -25,26 +25,6 @@
#include <asm/processor.h>
/*
- * Segment table
- */
-
-#define STE_ESID_V 0x80
-#define STE_ESID_KS 0x20
-#define STE_ESID_KP 0x10
-#define STE_ESID_N 0x08
-
-#define STE_VSID_SHIFT 12
-
-/* Location of cpu0's segment table */
-#define STAB0_PAGE 0x8
-#define STAB0_OFFSET (STAB0_PAGE << 12)
-#define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START)
-
-#ifndef __ASSEMBLY__
-extern char initial_stab[];
-#endif /* ! __ASSEMBLY */
-
-/*
* SLB
*/
@@ -370,10 +350,8 @@ extern void hpte_init_lpar(void);
extern void hpte_init_beat(void);
extern void hpte_init_beat_v3(void);
-extern void stabs_alloc(void);
extern void slb_initialize(void);
extern void slb_flush_and_rebolt(void);
-extern void stab_initialize(unsigned long stab);
extern void slb_vmalloc_update(void);
extern void slb_set_size(u16 size);
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index e61f24ed4e65..3d5abfe6ba67 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -64,9 +64,9 @@
*/
#define MMU_FTR_USE_PAIRED_MAS ASM_CONST(0x01000000)
-/* MMU is SLB-based
+/* Doesn't support the B bit (1T segment) in SLBIE
*/
-#define MMU_FTR_SLB ASM_CONST(0x02000000)
+#define MMU_FTR_NO_SLBIE_B ASM_CONST(0x02000000)
/* Support 16M large pages
*/
@@ -88,10 +88,6 @@
*/
#define MMU_FTR_1T_SEGMENT ASM_CONST(0x40000000)
-/* Doesn't support the B bit (1T segment) in SLBIE
- */
-#define MMU_FTR_NO_SLBIE_B ASM_CONST(0x80000000)
-
/* MMU feature bit sets for various CPUs */
#define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 \
MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index b467530e2485..73382eba02dc 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -18,7 +18,6 @@ extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
extern void destroy_context(struct mm_struct *mm);
extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
-extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
extern void set_context(unsigned long id, pgd_t *pgd);
@@ -77,10 +76,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* sub architectures.
*/
#ifdef CONFIG_PPC_STD_MMU_64
- if (mmu_has_feature(MMU_FTR_SLB))
- switch_slb(tsk, next);
- else
- switch_stab(tsk, next);
+ switch_slb(tsk, next);
#else
/* Out of line for now */
switch_mmu_context(prev, next);
diff --git a/arch/powerpc/include/asm/mpc85xx.h b/arch/powerpc/include/asm/mpc85xx.h
index 736d4acc05a8..3bef74a9914b 100644
--- a/arch/powerpc/include/asm/mpc85xx.h
+++ b/arch/powerpc/include/asm/mpc85xx.h
@@ -77,6 +77,8 @@
#define SVR_T1020 0x852100
#define SVR_T1021 0x852101
#define SVR_T1022 0x852102
+#define SVR_T2080 0x853000
+#define SVR_T2081 0x853100
#define SVR_8610 0x80A000
#define SVR_8641 0x809000
diff --git a/arch/powerpc/include/asm/mpc8xx.h b/arch/powerpc/include/asm/mpc8xx.h
deleted file mode 100644
index 98f3c4f17328..000000000000
--- a/arch/powerpc/include/asm/mpc8xx.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* This is the single file included by all MPC8xx build options.
- * Since there are many different boards and no standard configuration,
- * we have a unique include file for each. Rather than change every
- * file that has to include MPC8xx configuration, they all include
- * this one and the configuration switching is done here.
- */
-#ifndef __CONFIG_8xx_DEFS
-#define __CONFIG_8xx_DEFS
-
-extern struct mpc8xx_pcmcia_ops m8xx_pcmcia_ops;
-
-#endif /* __CONFIG_8xx_DEFS */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 0da1dbd42e02..86055e598269 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -147,6 +147,10 @@ struct opal_sg_list {
#define OPAL_SET_PARAM 90
#define OPAL_DUMP_RESEND 91
#define OPAL_DUMP_INFO2 94
+#define OPAL_PCI_EEH_FREEZE_SET 97
+#define OPAL_HANDLE_HMI 98
+#define OPAL_REGISTER_DUMP_REGION 101
+#define OPAL_UNREGISTER_DUMP_REGION 102
#ifndef __ASSEMBLY__
@@ -170,7 +174,11 @@ enum OpalFreezeState {
enum OpalEehFreezeActionToken {
OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO = 1,
OPAL_EEH_ACTION_CLEAR_FREEZE_DMA = 2,
- OPAL_EEH_ACTION_CLEAR_FREEZE_ALL = 3
+ OPAL_EEH_ACTION_CLEAR_FREEZE_ALL = 3,
+
+ OPAL_EEH_ACTION_SET_FREEZE_MMIO = 1,
+ OPAL_EEH_ACTION_SET_FREEZE_DMA = 2,
+ OPAL_EEH_ACTION_SET_FREEZE_ALL = 3
};
enum OpalPciStatusToken {
@@ -240,6 +248,7 @@ enum OpalMessageType {
OPAL_MSG_MEM_ERR,
OPAL_MSG_EPOW,
OPAL_MSG_SHUTDOWN,
+ OPAL_MSG_HMI_EVT,
OPAL_MSG_TYPE_MAX,
};
@@ -340,6 +349,12 @@ enum OpalMveEnableAction {
OPAL_ENABLE_MVE = 1
};
+enum OpalM64EnableAction {
+ OPAL_DISABLE_M64 = 0,
+ OPAL_ENABLE_M64_SPLIT = 1,
+ OPAL_ENABLE_M64_NON_SPLIT = 2
+};
+
enum OpalPciResetScope {
OPAL_PHB_COMPLETE = 1, OPAL_PCI_LINK = 2, OPAL_PHB_ERROR = 3,
OPAL_PCI_HOT_RESET = 4, OPAL_PCI_FUNDAMENTAL_RESET = 5,
@@ -502,6 +517,50 @@ struct OpalMemoryErrorData {
} u;
};
+/* HMI interrupt event */
+enum OpalHMI_Version {
+ OpalHMIEvt_V1 = 1,
+};
+
+enum OpalHMI_Severity {
+ OpalHMI_SEV_NO_ERROR = 0,
+ OpalHMI_SEV_WARNING = 1,
+ OpalHMI_SEV_ERROR_SYNC = 2,
+ OpalHMI_SEV_FATAL = 3,
+};
+
+enum OpalHMI_Disposition {
+ OpalHMI_DISPOSITION_RECOVERED = 0,
+ OpalHMI_DISPOSITION_NOT_RECOVERED = 1,
+};
+
+enum OpalHMI_ErrType {
+ OpalHMI_ERROR_MALFUNC_ALERT = 0,
+ OpalHMI_ERROR_PROC_RECOV_DONE,
+ OpalHMI_ERROR_PROC_RECOV_DONE_AGAIN,
+ OpalHMI_ERROR_PROC_RECOV_MASKED,
+ OpalHMI_ERROR_TFAC,
+ OpalHMI_ERROR_TFMR_PARITY,
+ OpalHMI_ERROR_HA_OVERFLOW_WARN,
+ OpalHMI_ERROR_XSCOM_FAIL,
+ OpalHMI_ERROR_XSCOM_DONE,
+ OpalHMI_ERROR_SCOM_FIR,
+ OpalHMI_ERROR_DEBUG_TRIG_FIR,
+ OpalHMI_ERROR_HYP_RESOURCE,
+};
+
+struct OpalHMIEvent {
+ uint8_t version; /* 0x00 */
+ uint8_t severity; /* 0x01 */
+ uint8_t type; /* 0x02 */
+ uint8_t disposition; /* 0x03 */
+ uint8_t reserved_1[4]; /* 0x04 */
+
+ __be64 hmer;
+ /* TFMR register. Valid only for TFAC and TFMR_PARITY error type. */
+ __be64 tfmr;
+};
+
enum {
OPAL_P7IOC_DIAG_TYPE_NONE = 0,
OPAL_P7IOC_DIAG_TYPE_RGC = 1,
@@ -513,40 +572,40 @@ enum {
};
struct OpalIoP7IOCErrorData {
- uint16_t type;
+ __be16 type;
/* GEM */
- uint64_t gemXfir;
- uint64_t gemRfir;
- uint64_t gemRirqfir;
- uint64_t gemMask;
- uint64_t gemRwof;
+ __be64 gemXfir;
+ __be64 gemRfir;
+ __be64 gemRirqfir;
+ __be64 gemMask;
+ __be64 gemRwof;
/* LEM */
- uint64_t lemFir;
- uint64_t lemErrMask;
- uint64_t lemAction0;
- uint64_t lemAction1;
- uint64_t lemWof;
+ __be64 lemFir;
+ __be64 lemErrMask;
+ __be64 lemAction0;
+ __be64 lemAction1;
+ __be64 lemWof;
union {
struct OpalIoP7IOCRgcErrorData {
- uint64_t rgcStatus; /* 3E1C10 */
- uint64_t rgcLdcp; /* 3E1C18 */
+ __be64 rgcStatus; /* 3E1C10 */
+ __be64 rgcLdcp; /* 3E1C18 */
}rgc;
struct OpalIoP7IOCBiErrorData {
- uint64_t biLdcp0; /* 3C0100, 3C0118 */
- uint64_t biLdcp1; /* 3C0108, 3C0120 */
- uint64_t biLdcp2; /* 3C0110, 3C0128 */
- uint64_t biFenceStatus; /* 3C0130, 3C0130 */
+ __be64 biLdcp0; /* 3C0100, 3C0118 */
+ __be64 biLdcp1; /* 3C0108, 3C0120 */
+ __be64 biLdcp2; /* 3C0110, 3C0128 */
+ __be64 biFenceStatus; /* 3C0130, 3C0130 */
- uint8_t biDownbound; /* BI Downbound or Upbound */
+ u8 biDownbound; /* BI Downbound or Upbound */
}bi;
struct OpalIoP7IOCCiErrorData {
- uint64_t ciPortStatus; /* 3Dn008 */
- uint64_t ciPortLdcp; /* 3Dn010 */
+ __be64 ciPortStatus; /* 3Dn008 */
+ __be64 ciPortLdcp; /* 3Dn010 */
- uint8_t ciPort; /* Index of CI port: 0/1 */
+ u8 ciPort; /* Index of CI port: 0/1 */
}ci;
};
};
@@ -578,60 +637,60 @@ struct OpalIoPhbErrorCommon {
struct OpalIoP7IOCPhbErrorData {
struct OpalIoPhbErrorCommon common;
- uint32_t brdgCtl;
+ __be32 brdgCtl;
// P7IOC utl regs
- uint32_t portStatusReg;
- uint32_t rootCmplxStatus;
- uint32_t busAgentStatus;
+ __be32 portStatusReg;
+ __be32 rootCmplxStatus;
+ __be32 busAgentStatus;
// P7IOC cfg regs
- uint32_t deviceStatus;
- uint32_t slotStatus;
- uint32_t linkStatus;
- uint32_t devCmdStatus;
- uint32_t devSecStatus;
+ __be32 deviceStatus;
+ __be32 slotStatus;
+ __be32 linkStatus;
+ __be32 devCmdStatus;
+ __be32 devSecStatus;
// cfg AER regs
- uint32_t rootErrorStatus;
- uint32_t uncorrErrorStatus;
- uint32_t corrErrorStatus;
- uint32_t tlpHdr1;
- uint32_t tlpHdr2;
- uint32_t tlpHdr3;
- uint32_t tlpHdr4;
- uint32_t sourceId;
+ __be32 rootErrorStatus;
+ __be32 uncorrErrorStatus;
+ __be32 corrErrorStatus;
+ __be32 tlpHdr1;
+ __be32 tlpHdr2;
+ __be32 tlpHdr3;
+ __be32 tlpHdr4;
+ __be32 sourceId;
- uint32_t rsv3;
+ __be32 rsv3;
// Record data about the call to allocate a buffer.
- uint64_t errorClass;
- uint64_t correlator;
+ __be64 errorClass;
+ __be64 correlator;
//P7IOC MMIO Error Regs
- uint64_t p7iocPlssr; // n120
- uint64_t p7iocCsr; // n110
- uint64_t lemFir; // nC00
- uint64_t lemErrorMask; // nC18
- uint64_t lemWOF; // nC40
- uint64_t phbErrorStatus; // nC80
- uint64_t phbFirstErrorStatus; // nC88
- uint64_t phbErrorLog0; // nCC0
- uint64_t phbErrorLog1; // nCC8
- uint64_t mmioErrorStatus; // nD00
- uint64_t mmioFirstErrorStatus; // nD08
- uint64_t mmioErrorLog0; // nD40
- uint64_t mmioErrorLog1; // nD48
- uint64_t dma0ErrorStatus; // nD80
- uint64_t dma0FirstErrorStatus; // nD88
- uint64_t dma0ErrorLog0; // nDC0
- uint64_t dma0ErrorLog1; // nDC8
- uint64_t dma1ErrorStatus; // nE00
- uint64_t dma1FirstErrorStatus; // nE08
- uint64_t dma1ErrorLog0; // nE40
- uint64_t dma1ErrorLog1; // nE48
- uint64_t pestA[OPAL_P7IOC_NUM_PEST_REGS];
- uint64_t pestB[OPAL_P7IOC_NUM_PEST_REGS];
+ __be64 p7iocPlssr; // n120
+ __be64 p7iocCsr; // n110
+ __be64 lemFir; // nC00
+ __be64 lemErrorMask; // nC18
+ __be64 lemWOF; // nC40
+ __be64 phbErrorStatus; // nC80
+ __be64 phbFirstErrorStatus; // nC88
+ __be64 phbErrorLog0; // nCC0
+ __be64 phbErrorLog1; // nCC8
+ __be64 mmioErrorStatus; // nD00
+ __be64 mmioFirstErrorStatus; // nD08
+ __be64 mmioErrorLog0; // nD40
+ __be64 mmioErrorLog1; // nD48
+ __be64 dma0ErrorStatus; // nD80
+ __be64 dma0FirstErrorStatus; // nD88
+ __be64 dma0ErrorLog0; // nDC0
+ __be64 dma0ErrorLog1; // nDC8
+ __be64 dma1ErrorStatus; // nE00
+ __be64 dma1FirstErrorStatus; // nE08
+ __be64 dma1ErrorLog0; // nE40
+ __be64 dma1ErrorLog1; // nE48
+ __be64 pestA[OPAL_P7IOC_NUM_PEST_REGS];
+ __be64 pestB[OPAL_P7IOC_NUM_PEST_REGS];
};
struct OpalIoPhb3ErrorData {
@@ -758,6 +817,8 @@ int64_t opal_pci_eeh_freeze_status(uint64_t phb_id, uint64_t pe_number,
__be64 *phb_status);
int64_t opal_pci_eeh_freeze_clear(uint64_t phb_id, uint64_t pe_number,
uint64_t eeh_action_token);
+int64_t opal_pci_eeh_freeze_set(uint64_t phb_id, uint64_t pe_number,
+ uint64_t eeh_action_token);
int64_t opal_pci_shpc(uint64_t phb_id, uint64_t shpc_action, uint8_t *state);
@@ -768,7 +829,7 @@ int64_t opal_pci_set_phb_mem_window(uint64_t phb_id, uint16_t window_type,
uint16_t window_num,
uint64_t starting_real_address,
uint64_t starting_pci_address,
- uint16_t segment_size);
+ uint64_t size);
int64_t opal_pci_map_pe_mmio_window(uint64_t phb_id, uint16_t pe_number,
uint16_t window_type, uint16_t window_num,
uint16_t segment_num);
@@ -860,6 +921,9 @@ int64_t opal_get_param(uint64_t token, uint32_t param_id, uint64_t buffer,
int64_t opal_set_param(uint64_t token, uint32_t param_id, uint64_t buffer,
uint64_t length);
int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data);
+int64_t opal_handle_hmi(void);
+int64_t opal_register_dump_region(uint32_t id, uint64_t start, uint64_t end);
+int64_t opal_unregister_dump_region(uint32_t id);
/* Internal functions */
extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
@@ -902,6 +966,8 @@ extern void opal_msglog_init(void);
extern int opal_machine_check(struct pt_regs *regs);
extern bool opal_mce_check_early_recovery(struct pt_regs *regs);
+extern int opal_hmi_exception_early(struct pt_regs *regs);
+extern int opal_handle_hmi_exception(struct pt_regs *regs);
extern void opal_shutdown(void);
extern int opal_resync_timebase(void);
@@ -912,6 +978,13 @@ struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
unsigned long vmalloc_size);
void opal_free_sg_list(struct opal_sg_list *sg);
+/*
+ * Dump region ID range usable by the OS
+ */
+#define OPAL_DUMP_REGION_HOST_START 0x80
+#define OPAL_DUMP_REGION_LOG_BUF 0x80
+#define OPAL_DUMP_REGION_HOST_END 0xFF
+
#endif /* __ASSEMBLY__ */
#endif /* __OPAL_H */
diff --git a/arch/powerpc/include/asm/oprofile_impl.h b/arch/powerpc/include/asm/oprofile_impl.h
index d697b08994c9..61fe5d6f18e1 100644
--- a/arch/powerpc/include/asm/oprofile_impl.h
+++ b/arch/powerpc/include/asm/oprofile_impl.h
@@ -61,7 +61,6 @@ struct op_powerpc_model {
};
extern struct op_powerpc_model op_model_fsl_emb;
-extern struct op_powerpc_model op_model_rs64;
extern struct op_powerpc_model op_model_power4;
extern struct op_powerpc_model op_model_7450;
extern struct op_powerpc_model op_model_cell;
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index bb0bd25f20d0..a5139ea6910b 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -78,10 +78,6 @@ struct paca_struct {
u64 kernel_toc; /* Kernel TOC address */
u64 kernelbase; /* Base address of kernel */
u64 kernel_msr; /* MSR while running in kernel */
-#ifdef CONFIG_PPC_STD_MMU_64
- u64 stab_real; /* Absolute address of segment table */
- u64 stab_addr; /* Virtual address of segment table */
-#endif /* CONFIG_PPC_STD_MMU_64 */
void *emergency_sp; /* pointer to emergency stack */
u64 data_offset; /* per cpu data offset */
s16 hw_cpu_id; /* Physical processor number */
@@ -171,6 +167,7 @@ struct paca_struct {
* and already using emergency stack.
*/
u16 in_mce;
+ u8 hmi_event_available; /* HMI event is available */
#endif
/* Stuff for accurate time accounting */
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 32e4e212b9c1..26fe1ae15212 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -48,9 +48,6 @@ extern unsigned int HPAGE_SHIFT;
#define HUGE_MAX_HSTATE (MMU_PAGE_COUNT-1)
#endif
-/* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
-#define __HAVE_ARCH_GATE_AREA 1
-
/*
* Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
* assign PAGE_MASK to a larger type it gets extended the way we want
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index b3e936027b26..814622146d5a 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -19,6 +19,8 @@
#define MAX_EVENT_ALTERNATIVES 8
#define MAX_LIMITED_HWCOUNTERS 2
+struct perf_event;
+
/*
* This struct provides the constants and functions needed to
* describe the PMU on a particular POWER-family CPU.
@@ -30,7 +32,8 @@ struct power_pmu {
unsigned long add_fields;
unsigned long test_adder;
int (*compute_mmcr)(u64 events[], int n_ev,
- unsigned int hwc[], unsigned long mmcr[]);
+ unsigned int hwc[], unsigned long mmcr[],
+ struct perf_event *pevents[]);
int (*get_constraint)(u64 event_id, unsigned long *mskp,
unsigned long *valp);
int (*get_alternatives)(u64 event_id, unsigned int flags,
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index eb9261024f51..7b3d54fae46f 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -413,7 +413,7 @@ static inline char *get_hpte_slot_array(pmd_t *pmdp)
}
extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
- pmd_t *pmdp);
+ pmd_t *pmdp, unsigned long old_pmd);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 3132bb9365f3..6f8536208049 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -139,6 +139,7 @@
#define PPC_INST_ISEL 0x7c00001e
#define PPC_INST_ISEL_MASK 0xfc00003e
#define PPC_INST_LDARX 0x7c0000a8
+#define PPC_INST_LOGMPP 0x7c0007e4
#define PPC_INST_LSWI 0x7c0004aa
#define PPC_INST_LSWX 0x7c00042a
#define PPC_INST_LWARX 0x7c000028
@@ -150,8 +151,10 @@
#define PPC_INST_MCRXR_MASK 0xfc0007fe
#define PPC_INST_MFSPR_PVR 0x7c1f42a6
#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff
+#define PPC_INST_MFTMR 0x7c0002dc
#define PPC_INST_MSGSND 0x7c00019c
#define PPC_INST_MSGSNDP 0x7c00011c
+#define PPC_INST_MTTMR 0x7c0003dc
#define PPC_INST_NOP 0x60000000
#define PPC_INST_POPCNTB 0x7c0000f4
#define PPC_INST_POPCNTB_MASK 0xfc0007fe
@@ -275,6 +278,20 @@
#define __PPC_EH(eh) 0
#endif
+/* POWER8 Micro Partition Prefetch (MPP) parameters */
+/* Address mask is common for LOGMPP instruction and MPPR SPR */
+#define PPC_MPPE_ADDRESS_MASK 0xffffffffc000
+
+/* Bits 60 and 61 of MPP SPR should be set to one of the following */
+/* Aborting the fetch is indeed setting 00 in the table size bits */
+#define PPC_MPPR_FETCH_ABORT (0x0ULL << 60)
+#define PPC_MPPR_FETCH_WHOLE_TABLE (0x2ULL << 60)
+
+/* Bits 54 and 55 of register for LOGMPP instruction should be set to: */
+#define PPC_LOGMPP_LOG_L2 (0x02ULL << 54)
+#define PPC_LOGMPP_LOG_L2L3 (0x01ULL << 54)
+#define PPC_LOGMPP_LOG_ABORT (0x03ULL << 54)
+
/* Deal with instructions that older assemblers aren't aware of */
#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \
__PPC_RA(a) | __PPC_RB(b))
@@ -283,6 +300,8 @@
#define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \
___PPC_RT(t) | ___PPC_RA(a) | \
___PPC_RB(b) | __PPC_EH(eh))
+#define PPC_LOGMPP(b) stringify_in_c(.long PPC_INST_LOGMPP | \
+ __PPC_RB(b))
#define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \
___PPC_RT(t) | ___PPC_RA(a) | \
___PPC_RB(b) | __PPC_EH(eh))
@@ -369,4 +388,11 @@
#define TABORT(r) stringify_in_c(.long PPC_INST_TABORT \
| __PPC_RA(r))
+/* book3e thread control instructions */
+#define TMRN(x) ((((x) & 0x1f) << 16) | (((x) & 0x3e0) << 6))
+#define MTTMR(tmr, r) stringify_in_c(.long PPC_INST_MTTMR | \
+ TMRN(tmr) | ___PPC_RS(r))
+#define MFTMR(tmr, r) stringify_in_c(.long PPC_INST_MFTMR | \
+ TMRN(tmr) | ___PPC_RT(r))
+
#endif /* _ASM_POWERPC_PPC_OPCODE_H */
diff --git a/arch/powerpc/include/asm/pte-fsl-booke.h b/arch/powerpc/include/asm/pte-fsl-booke.h
index 2c12be5f677a..e84dd7ed505e 100644
--- a/arch/powerpc/include/asm/pte-fsl-booke.h
+++ b/arch/powerpc/include/asm/pte-fsl-booke.h
@@ -37,5 +37,7 @@
#define _PMD_PRESENT_MASK (PAGE_MASK)
#define _PMD_BAD (~PAGE_MASK)
+#define PTE_WIMGE_SHIFT (6)
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_PTE_FSL_BOOKE_H */
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
index d836d945068d..4f4ec2ab45c9 100644
--- a/arch/powerpc/include/asm/pte-hash64-64k.h
+++ b/arch/powerpc/include/asm/pte-hash64-64k.h
@@ -46,11 +46,31 @@
* in order to deal with 64K made of 4K HW pages. Thus we override the
* generic accessors and iterators here
*/
-#define __real_pte(e,p) ((real_pte_t) { \
- (e), (pte_val(e) & _PAGE_COMBO) ? \
- (pte_val(*((p) + PTRS_PER_PTE))) : 0 })
-#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
- (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
+#define __real_pte __real_pte
+static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
+{
+ real_pte_t rpte;
+
+ rpte.pte = pte;
+ rpte.hidx = 0;
+ if (pte_val(pte) & _PAGE_COMBO) {
+ /*
+ * Make sure we order the hidx load against the _PAGE_COMBO
+ * check. The store side ordering is done in __hash_page_4K
+ */
+ smp_rmb();
+ rpte.hidx = pte_val(*((ptep) + PTRS_PER_PTE));
+ }
+ return rpte;
+}
+
+static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
+{
+ if ((pte_val(rpte.pte) & _PAGE_COMBO))
+ return (rpte.hidx >> (index<<2)) & 0xf;
+ return (pte_val(rpte.pte) >> 12) & 0xf;
+}
+
#define __rpte_to_pte(r) ((r).pte)
#define __rpte_sub_valid(rpte, index) \
(pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index)))
@@ -75,7 +95,8 @@
(((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K)
#define remap_4k_pfn(vma, addr, pfn, prot) \
- remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \
- __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN))
+ (WARN_ON(((pfn) >= (1UL << (64 - PTE_RPN_SHIFT)))) ? -EINVAL : \
+ remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \
+ __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN)))
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index bffd89d27301..0c0505956a29 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -213,9 +213,8 @@
#define SPRN_ACOP 0x1F /* Available Coprocessor Register */
#define SPRN_TFIAR 0x81 /* Transaction Failure Inst Addr */
#define SPRN_TEXASR 0x82 /* Transaction EXception & Summary */
-#define TEXASR_FS __MASK(63-36) /* Transaction Failure Summary */
#define SPRN_TEXASRU 0x83 /* '' '' '' Upper 32 */
-#define TEXASR_FS __MASK(63-36) /* TEXASR Failure Summary */
+#define TEXASR_FS __MASK(63-36) /* TEXASR Failure Summary */
#define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */
#define SPRN_CTRLF 0x088
#define SPRN_CTRLT 0x098
@@ -225,6 +224,7 @@
#define CTRL_TE 0x00c00000 /* thread enable */
#define CTRL_RUNLATCH 0x1
#define SPRN_DAWR 0xB4
+#define SPRN_MPPR 0xB8 /* Micro Partition Prefetch Register */
#define SPRN_RPR 0xBA /* Relative Priority Register */
#define SPRN_CIABR 0xBB
#define CIABR_PRIV 0x3
@@ -254,7 +254,7 @@
#define DSISR_PROTFAULT 0x08000000 /* protection fault */
#define DSISR_ISSTORE 0x02000000 /* access was a store */
#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
-#define DSISR_NOSEGMENT 0x00200000 /* STAB/SLB miss */
+#define DSISR_NOSEGMENT 0x00200000 /* SLB miss */
#define DSISR_KEYFAULT 0x00200000 /* Key fault */
#define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */
#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
@@ -944,9 +944,6 @@
* readable variant for reads, which can avoid a fault
* with KVM type virtualization.
*
- * (*) Under KVM, the host SPRG1 is used to point to
- * the current VCPU data structure
- *
* 32-bit 8xx:
* - SPRG0 scratch for exception vectors
* - SPRG1 scratch for exception vectors
@@ -1203,6 +1200,15 @@
: "r" ((unsigned long)(v)) \
: "memory")
+static inline unsigned long mfvtb (void)
+{
+#ifdef CONFIG_PPC_BOOK3S_64
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ return mfspr(SPRN_VTB);
+#endif
+ return 0;
+}
+
#ifdef __powerpc64__
#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
#define mftb() ({unsigned long rval; \
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 464f1089b532..1d653308a33c 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -15,16 +15,28 @@
#ifndef __ASM_POWERPC_REG_BOOKE_H__
#define __ASM_POWERPC_REG_BOOKE_H__
+#include <asm/ppc-opcode.h>
+
/* Machine State Register (MSR) Fields */
-#define MSR_GS (1<<28) /* Guest state */
-#define MSR_UCLE (1<<26) /* User-mode cache lock enable */
-#define MSR_SPE (1<<25) /* Enable SPE */
-#define MSR_DWE (1<<10) /* Debug Wait Enable */
-#define MSR_UBLE (1<<10) /* BTB lock enable (e500) */
-#define MSR_IS MSR_IR /* Instruction Space */
-#define MSR_DS MSR_DR /* Data Space */
-#define MSR_PMM (1<<2) /* Performance monitor mark bit */
-#define MSR_CM (1<<31) /* Computation Mode (0=32-bit, 1=64-bit) */
+#define MSR_GS_LG 28 /* Guest state */
+#define MSR_UCLE_LG 26 /* User-mode cache lock enable */
+#define MSR_SPE_LG 25 /* Enable SPE */
+#define MSR_DWE_LG 10 /* Debug Wait Enable */
+#define MSR_UBLE_LG 10 /* BTB lock enable (e500) */
+#define MSR_IS_LG MSR_IR_LG /* Instruction Space */
+#define MSR_DS_LG MSR_DR_LG /* Data Space */
+#define MSR_PMM_LG 2 /* Performance monitor mark bit */
+#define MSR_CM_LG 31 /* Computation Mode (0=32-bit, 1=64-bit) */
+
+#define MSR_GS __MASK(MSR_GS_LG)
+#define MSR_UCLE __MASK(MSR_UCLE_LG)
+#define MSR_SPE __MASK(MSR_SPE_LG)
+#define MSR_DWE __MASK(MSR_DWE_LG)
+#define MSR_UBLE __MASK(MSR_UBLE_LG)
+#define MSR_IS __MASK(MSR_IS_LG)
+#define MSR_DS __MASK(MSR_DS_LG)
+#define MSR_PMM __MASK(MSR_PMM_LG)
+#define MSR_CM __MASK(MSR_CM_LG)
#if defined(CONFIG_PPC_BOOK3E_64)
#define MSR_64BIT MSR_CM
@@ -260,7 +272,7 @@
/* e500mc */
#define MCSR_DCPERR_MC 0x20000000UL /* D-Cache Parity Error */
-#define MCSR_L2MMU_MHIT 0x04000000UL /* Hit on multiple TLB entries */
+#define MCSR_L2MMU_MHIT 0x08000000UL /* Hit on multiple TLB entries */
#define MCSR_NMI 0x00100000UL /* Non-Maskable Interrupt */
#define MCSR_MAV 0x00080000UL /* MCAR address valid */
#define MCSR_MEA 0x00040000UL /* MCAR is effective address */
@@ -598,6 +610,13 @@
/* Bit definitions for L1CSR2. */
#define L1CSR2_DCWS 0x40000000 /* Data Cache write shadow */
+/* Bit definitions for BUCSR. */
+#define BUCSR_STAC_EN 0x01000000 /* Segment Target Address Cache */
+#define BUCSR_LS_EN 0x00400000 /* Link Stack */
+#define BUCSR_BBFI 0x00000200 /* Branch Buffer flash invalidate */
+#define BUCSR_BPEN 0x00000001 /* Branch prediction enable */
+#define BUCSR_INIT (BUCSR_STAC_EN | BUCSR_LS_EN | BUCSR_BBFI | BUCSR_BPEN)
+
/* Bit definitions for L2CSR0. */
#define L2CSR0_L2E 0x80000000 /* L2 Cache Enable */
#define L2CSR0_L2PE 0x40000000 /* L2 Cache Parity/ECC Enable */
@@ -721,5 +740,23 @@
#define MMUBE1_VBE4 0x00000002
#define MMUBE1_VBE5 0x00000001
+#define TMRN_IMSR0 0x120 /* Initial MSR Register 0 (e6500) */
+#define TMRN_IMSR1 0x121 /* Initial MSR Register 1 (e6500) */
+#define TMRN_INIA0 0x140 /* Next Instruction Address Register 0 */
+#define TMRN_INIA1 0x141 /* Next Instruction Address Register 1 */
+#define SPRN_TENSR 0x1b5 /* Thread Enable Status Register */
+#define SPRN_TENS 0x1b6 /* Thread Enable Set Register */
+#define SPRN_TENC 0x1b7 /* Thread Enable Clear Register */
+
+#define TEN_THREAD(x) (1 << (x))
+
+#ifndef __ASSEMBLY__
+#define mftmr(rn) ({unsigned long rval; \
+ asm volatile(MFTMR(rn, %0) : "=r" (rval)); rval;})
+#define mttmr(rn, v) asm volatile(MTTMR(rn, %0) : \
+ : "r" ((unsigned long)(v)) \
+ : "memory")
+#endif /* !__ASSEMBLY__ */
+
#endif /* __ASM_POWERPC_REG_BOOKE_H__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/scatterlist.h b/arch/powerpc/include/asm/scatterlist.h
deleted file mode 100644
index de1f620bd5c9..000000000000
--- a/arch/powerpc/include/asm/scatterlist.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef _ASM_POWERPC_SCATTERLIST_H
-#define _ASM_POWERPC_SCATTERLIST_H
-/*
- * Copyright (C) 2001 PPC64 Team, IBM Corp
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <asm/dma.h>
-#include <asm-generic/scatterlist.h>
-
-#define ARCH_HAS_SG_CHAIN
-
-#endif /* _ASM_POWERPC_SCATTERLIST_H */
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 35aa339410bd..4dbe072eecbe 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -61,6 +61,7 @@ static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
+ smp_mb();
return !arch_spin_value_unlocked(*lock);
}
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index babbeca6850f..542bc0f0673f 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -77,10 +77,10 @@ SYSCALL_SPU(setreuid)
SYSCALL_SPU(setregid)
#define compat_sys_sigsuspend sys_sigsuspend
SYS32ONLY(sigsuspend)
-COMPAT_SYS(sigpending)
+SYSX(sys_ni_syscall,compat_sys_sigpending,sys_sigpending)
SYSCALL_SPU(sethostname)
COMPAT_SYS_SPU(setrlimit)
-COMPAT_SYS(old_getrlimit)
+SYSX(sys_ni_syscall,compat_sys_old_getrlimit,sys_old_getrlimit)
COMPAT_SYS_SPU(getrusage)
COMPAT_SYS_SPU(gettimeofday)
COMPAT_SYS_SPU(settimeofday)
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 1d428e6007ca..03cbada59d3a 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -102,6 +102,15 @@ static inline u64 get_rtc(void)
return (u64)hi * 1000000000 + lo;
}
+static inline u64 get_vtb(void)
+{
+#ifdef CONFIG_PPC_BOOK3S_64
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ return mfvtb();
+#endif
+ return 0;
+}
+
#ifdef CONFIG_PPC64
static inline u64 get_tb(void)
{
diff --git a/arch/powerpc/include/asm/trace.h b/arch/powerpc/include/asm/trace.h
index 5712f06905a9..c15da6073cb8 100644
--- a/arch/powerpc/include/asm/trace.h
+++ b/arch/powerpc/include/asm/trace.h
@@ -99,6 +99,51 @@ TRACE_EVENT_FN(hcall_exit,
);
#endif
+#ifdef CONFIG_PPC_POWERNV
+extern void opal_tracepoint_regfunc(void);
+extern void opal_tracepoint_unregfunc(void);
+
+TRACE_EVENT_FN(opal_entry,
+
+ TP_PROTO(unsigned long opcode, unsigned long *args),
+
+ TP_ARGS(opcode, args),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, opcode)
+ ),
+
+ TP_fast_assign(
+ __entry->opcode = opcode;
+ ),
+
+ TP_printk("opcode=%lu", __entry->opcode),
+
+ opal_tracepoint_regfunc, opal_tracepoint_unregfunc
+);
+
+TRACE_EVENT_FN(opal_exit,
+
+ TP_PROTO(unsigned long opcode, unsigned long retval),
+
+ TP_ARGS(opcode, retval),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, opcode)
+ __field(unsigned long, retval)
+ ),
+
+ TP_fast_assign(
+ __entry->opcode = opcode;
+ __entry->retval = retval;
+ ),
+
+ TP_printk("opcode=%lu retval=%lu", __entry->opcode, __entry->retval),
+
+ opal_tracepoint_regfunc, opal_tracepoint_unregfunc
+);
+#endif
+
#endif /* _TRACE_POWERPC_H */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 2bc4a9409a93..e0e49dbb145d 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -548,6 +548,7 @@ struct kvm_get_htab_header {
#define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4)
#define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5)
+#define KVM_REG_PPC_LPCR_64 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb5)
#define KVM_REG_PPC_PPR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb6)
/* Architecture compatibility level */
@@ -555,6 +556,7 @@ struct kvm_get_htab_header {
#define KVM_REG_PPC_DABRX (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb8)
#define KVM_REG_PPC_WORT (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb9)
+#define KVM_REG_PPC_SPRG9 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xba)
/* Transactional Memory checkpointed state:
* This is all GPRs, all VSX regs and a subset of SPRs