summaryrefslogtreecommitdiff
path: root/arch/s390/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/include/asm')
-rw-r--r--arch/s390/include/asm/appldata.h2
-rw-r--r--arch/s390/include/asm/atomic.h2
-rw-r--r--arch/s390/include/asm/barrier.h8
-rw-r--r--arch/s390/include/asm/bitops.h55
-rw-r--r--arch/s390/include/asm/cio.h11
-rw-r--r--arch/s390/include/asm/cmb.h1
-rw-r--r--arch/s390/include/asm/cmpxchg.h30
-rw-r--r--arch/s390/include/asm/cpu_mf.h5
-rw-r--r--arch/s390/include/asm/ctl_reg.h2
-rw-r--r--arch/s390/include/asm/diag.h29
-rw-r--r--arch/s390/include/asm/elf.h13
-rw-r--r--arch/s390/include/asm/etr.h10
-rw-r--r--arch/s390/include/asm/fpu/api.h30
-rw-r--r--arch/s390/include/asm/fpu/internal.h (renamed from arch/s390/include/asm/fpu-internal.h)51
-rw-r--r--arch/s390/include/asm/fpu/types.h25
-rw-r--r--arch/s390/include/asm/idle.h2
-rw-r--r--arch/s390/include/asm/ipl.h3
-rw-r--r--arch/s390/include/asm/irq.h14
-rw-r--r--arch/s390/include/asm/kvm_host.h4
-rw-r--r--arch/s390/include/asm/kvm_para.h67
-rw-r--r--arch/s390/include/asm/lowcore.h11
-rw-r--r--arch/s390/include/asm/nmi.h98
-rw-r--r--arch/s390/include/asm/pci.h4
-rw-r--r--arch/s390/include/asm/pci_dma.h5
-rw-r--r--arch/s390/include/asm/pgtable.h66
-rw-r--r--arch/s390/include/asm/processor.h99
-rw-r--r--arch/s390/include/asm/ptrace.h11
-rw-r--r--arch/s390/include/asm/setup.h50
-rw-r--r--arch/s390/include/asm/spinlock.h3
-rw-r--r--arch/s390/include/asm/switch_to.h2
-rw-r--r--arch/s390/include/asm/thread_info.h22
-rw-r--r--arch/s390/include/asm/trace/diag.h43
32 files changed, 491 insertions, 287 deletions
diff --git a/arch/s390/include/asm/appldata.h b/arch/s390/include/asm/appldata.h
index 16887c5fd989..a6263d4e8e56 100644
--- a/arch/s390/include/asm/appldata.h
+++ b/arch/s390/include/asm/appldata.h
@@ -7,6 +7,7 @@
#ifndef _ASM_S390_APPLDATA_H
#define _ASM_S390_APPLDATA_H
+#include <asm/diag.h>
#include <asm/io.h>
#define APPLDATA_START_INTERVAL_REC 0x80
@@ -53,6 +54,7 @@ static inline int appldata_asm(struct appldata_product_id *id,
parm_list.buffer_length = length;
parm_list.product_id_addr = (unsigned long) id;
parm_list.buffer_addr = virt_to_phys(buffer);
+ diag_stat_inc(DIAG_STAT_X0DC);
asm volatile(
" diag %1,%0,0xdc"
: "=d" (ry)
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 117fa5c921c1..911064aa59b2 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -36,7 +36,6 @@
\
typecheck(atomic_t *, ptr); \
asm volatile( \
- __barrier \
op_string " %0,%2,%1\n" \
__barrier \
: "=d" (old_val), "+Q" ((ptr)->counter) \
@@ -180,7 +179,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
\
typecheck(atomic64_t *, ptr); \
asm volatile( \
- __barrier \
op_string " %0,%2,%1\n" \
__barrier \
: "=d" (old_val), "+Q" ((ptr)->counter) \
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index d48fe0162331..d68e11e0df5e 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -22,10 +22,10 @@
#define mb() do { asm volatile(__ASM_BARRIER : : : "memory"); } while (0)
-#define rmb() mb()
-#define wmb() mb()
-#define dma_rmb() rmb()
-#define dma_wmb() wmb()
+#define rmb() barrier()
+#define wmb() barrier()
+#define dma_rmb() mb()
+#define dma_wmb() mb()
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 9b68e98a724f..8043f10da6b5 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -11,30 +11,25 @@
* big-endian system because, unlike little endian, the number of each
* bit depends on the word size.
*
- * The bitop functions are defined to work on unsigned longs, so for an
- * s390x system the bits end up numbered:
+ * The bitop functions are defined to work on unsigned longs, so the bits
+ * end up numbered:
* |63..............0|127............64|191...........128|255...........192|
- * and on s390:
- * |31.....0|63....32|95....64|127...96|159..128|191..160|223..192|255..224|
*
* There are a few little-endian macros used mostly for filesystem
- * bitmaps, these work on similar bit arrays layouts, but
- * byte-oriented:
+ * bitmaps, these work on similar bit array layouts, but byte-oriented:
* |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
*
- * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
- * number field needs to be reversed compared to the big-endian bit
- * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b).
+ * The main difference is that bit 3-5 in the bit number field needs to be
+ * reversed compared to the big-endian bit fields. This can be achieved by
+ * XOR with 0x38.
*
- * We also have special functions which work with an MSB0 encoding:
- * on an s390x system the bits are numbered:
+ * We also have special functions which work with an MSB0 encoding.
+ * The bits are numbered:
* |0..............63|64............127|128...........191|192...........255|
- * and on s390:
- * |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255|
*
- * The main difference is that bit 0-63 (64b) or 0-31 (32b) in the bit
- * number field needs to be reversed compared to the LSB0 encoded bit
- * fields. This can be achieved by XOR with 0x3f (64b) or 0x1f (32b).
+ * The main difference is that bit 0-63 in the bit number field needs to be
+ * reversed compared to the LSB0 encoded bit fields. This can be achieved by
+ * XOR with 0x3f.
*
*/
@@ -64,7 +59,6 @@
\
typecheck(unsigned long *, (__addr)); \
asm volatile( \
- __barrier \
__op_string " %0,%2,%1\n" \
__barrier \
: "=d" (__old), "+Q" (*(__addr)) \
@@ -276,12 +270,32 @@ static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
return (*addr >> (nr & 7)) & 1;
}
+static inline int test_and_set_bit_lock(unsigned long nr,
+ volatile unsigned long *ptr)
+{
+ if (test_bit(nr, ptr))
+ return 1;
+ return test_and_set_bit(nr, ptr);
+}
+
+static inline void clear_bit_unlock(unsigned long nr,
+ volatile unsigned long *ptr)
+{
+ smp_mb__before_atomic();
+ clear_bit(nr, ptr);
+}
+
+static inline void __clear_bit_unlock(unsigned long nr,
+ volatile unsigned long *ptr)
+{
+ smp_mb();
+ __clear_bit(nr, ptr);
+}
+
/*
* Functions which use MSB0 bit numbering.
- * On an s390x system the bits are numbered:
+ * The bits are numbered:
* |0..............63|64............127|128...........191|192...........255|
- * and on s390:
- * |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255|
*/
unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size);
unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
@@ -446,7 +460,6 @@ static inline int fls(int word)
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/hweight.h>
-#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index 096339207764..d1e7b0a0feeb 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -5,6 +5,7 @@
#define _ASM_S390_CIO_H_
#include <linux/spinlock.h>
+#include <linux/bitops.h>
#include <asm/types.h>
#define LPM_ANYPATH 0xff
@@ -296,12 +297,22 @@ static inline int ccw_dev_id_is_equal(struct ccw_dev_id *dev_id1,
return 0;
}
+/**
+ * pathmask_to_pos() - find the position of the left-most bit in a pathmask
+ * @mask: pathmask with at least one bit set
+ */
+static inline u8 pathmask_to_pos(u8 mask)
+{
+ return 8 - ffs(mask);
+}
+
void channel_subsystem_reinit(void);
extern void css_schedule_reprobe(void);
extern void reipl_ccw_dev(struct ccw_dev_id *id);
struct cio_iplinfo {
+ u8 ssid;
u16 devno;
int is_qdio;
};
diff --git a/arch/s390/include/asm/cmb.h b/arch/s390/include/asm/cmb.h
index 806eac12e3bd..ed2630c23f90 100644
--- a/arch/s390/include/asm/cmb.h
+++ b/arch/s390/include/asm/cmb.h
@@ -6,6 +6,7 @@
struct ccw_device;
extern int enable_cmf(struct ccw_device *cdev);
extern int disable_cmf(struct ccw_device *cdev);
+extern int __disable_cmf(struct ccw_device *cdev);
extern u64 cmf_read(struct ccw_device *cdev, int index);
extern int cmf_readall(struct ccw_device *cdev, struct cmbdata *data);
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index 411464f4c97a..24ea6948e32b 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -32,7 +32,7 @@
__old; \
})
-#define __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, insn) \
+#define __cmpxchg_double(p1, p2, o1, o2, n1, n2) \
({ \
register __typeof__(*(p1)) __old1 asm("2") = (o1); \
register __typeof__(*(p2)) __old2 asm("3") = (o2); \
@@ -40,7 +40,7 @@
register __typeof__(*(p2)) __new2 asm("5") = (n2); \
int cc; \
asm volatile( \
- insn " %[old],%[new],%[ptr]\n" \
+ " cdsg %[old],%[new],%[ptr]\n" \
" ipm %[cc]\n" \
" srl %[cc],28" \
: [cc] "=d" (cc), [old] "+d" (__old1), "+d" (__old2) \
@@ -50,30 +50,6 @@
!cc; \
})
-#define __cmpxchg_double_4(p1, p2, o1, o2, n1, n2) \
- __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, "cds")
-
-#define __cmpxchg_double_8(p1, p2, o1, o2, n1, n2) \
- __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, "cdsg")
-
-extern void __cmpxchg_double_called_with_bad_pointer(void);
-
-#define __cmpxchg_double(p1, p2, o1, o2, n1, n2) \
-({ \
- int __ret; \
- switch (sizeof(*(p1))) { \
- case 4: \
- __ret = __cmpxchg_double_4(p1, p2, o1, o2, n1, n2); \
- break; \
- case 8: \
- __ret = __cmpxchg_double_8(p1, p2, o1, o2, n1, n2); \
- break; \
- default: \
- __cmpxchg_double_called_with_bad_pointer(); \
- } \
- __ret; \
-})
-
#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
({ \
__typeof__(p1) __p1 = (p1); \
@@ -81,7 +57,7 @@ extern void __cmpxchg_double_called_with_bad_pointer(void);
BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
VM_BUG_ON((unsigned long)((__p1) + 1) != (unsigned long)(__p2));\
- __cmpxchg_double_8(__p1, __p2, o1, o2, n1, n2); \
+ __cmpxchg_double(__p1, __p2, o1, o2, n1, n2); \
})
#define system_has_cmpxchg_double() 1
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index 5243a8679a1d..9dd04b9e9782 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -22,15 +22,10 @@
#define CPU_MF_INT_SF_LSDA (1 << 22) /* loss of sample data alert */
#define CPU_MF_INT_CF_CACA (1 << 7) /* counter auth. change alert */
#define CPU_MF_INT_CF_LCDA (1 << 6) /* loss of counter data alert */
-#define CPU_MF_INT_RI_HALTED (1 << 5) /* run-time instr. halted */
-#define CPU_MF_INT_RI_BUF_FULL (1 << 4) /* run-time instr. program
- buffer full */
-
#define CPU_MF_INT_CF_MASK (CPU_MF_INT_CF_CACA|CPU_MF_INT_CF_LCDA)
#define CPU_MF_INT_SF_MASK (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE| \
CPU_MF_INT_SF_PRA|CPU_MF_INT_SF_SACA| \
CPU_MF_INT_SF_LSDA)
-#define CPU_MF_INT_RI_MASK (CPU_MF_INT_RI_HALTED|CPU_MF_INT_RI_BUF_FULL)
/* CPU measurement facility support */
static inline int cpum_cf_avail(void)
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index 17a373576868..d7697ab802f6 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -46,8 +46,6 @@ static inline void __ctl_clear_bit(unsigned int cr, unsigned int bit)
__ctl_load(reg, cr, cr);
}
-void __ctl_set_vx(void);
-
void smp_ctl_set_bit(int cr, int bit);
void smp_ctl_clear_bit(int cr, int bit);
diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h
index 7e91c58072e2..5fac921c1c42 100644
--- a/arch/s390/include/asm/diag.h
+++ b/arch/s390/include/asm/diag.h
@@ -8,6 +8,34 @@
#ifndef _ASM_S390_DIAG_H
#define _ASM_S390_DIAG_H
+#include <linux/percpu.h>
+
+enum diag_stat_enum {
+ DIAG_STAT_X008,
+ DIAG_STAT_X00C,
+ DIAG_STAT_X010,
+ DIAG_STAT_X014,
+ DIAG_STAT_X044,
+ DIAG_STAT_X064,
+ DIAG_STAT_X09C,
+ DIAG_STAT_X0DC,
+ DIAG_STAT_X204,
+ DIAG_STAT_X210,
+ DIAG_STAT_X224,
+ DIAG_STAT_X250,
+ DIAG_STAT_X258,
+ DIAG_STAT_X288,
+ DIAG_STAT_X2C4,
+ DIAG_STAT_X2FC,
+ DIAG_STAT_X304,
+ DIAG_STAT_X308,
+ DIAG_STAT_X500,
+ NR_DIAG_STAT
+};
+
+void diag_stat_inc(enum diag_stat_enum nr);
+void diag_stat_inc_norecursion(enum diag_stat_enum nr);
+
/*
* Diagnose 10: Release page range
*/
@@ -18,6 +46,7 @@ static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn)
start_addr = start_pfn << PAGE_SHIFT;
end_addr = (start_pfn + num_pfn - 1) << PAGE_SHIFT;
+ diag_stat_inc(DIAG_STAT_X010);
asm volatile(
"0: diag %0,%1,0x10\n"
"1:\n"
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 3ad48f22de78..bab6739a1154 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -206,9 +206,16 @@ do { \
} while (0)
#endif /* CONFIG_COMPAT */
-extern unsigned long mmap_rnd_mask;
-
-#define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask)
+/*
+ * Cache aliasing on the latest machines calls for a mapping granularity
+ * of 512KB. For 64-bit processes use a 512KB alignment and a randomization
+ * of up to 1GB. For 31-bit processes the virtual address space is limited,
+ * use no alignment and limit the randomization to 8MB.
+ */
+#define BRK_RND_MASK (is_32bit_task() ? 0x7ffUL : 0x3ffffUL)
+#define MMAP_RND_MASK (is_32bit_task() ? 0x7ffUL : 0x3ff80UL)
+#define MMAP_ALIGN_MASK (is_32bit_task() ? 0 : 0x7fUL)
+#define STACK_RND_MASK MMAP_RND_MASK
#define ARCH_DLINFO \
do { \
diff --git a/arch/s390/include/asm/etr.h b/arch/s390/include/asm/etr.h
index f7e5c36688c3..105f90e63a0e 100644
--- a/arch/s390/include/asm/etr.h
+++ b/arch/s390/include/asm/etr.h
@@ -211,8 +211,9 @@ static inline int etr_ptff(void *ptff_block, unsigned int func)
#define ETR_PTFF_SGS 0x43 /* set gross steering rate */
/* Functions needed by the machine check handler */
-void etr_switch_to_local(void);
-void etr_sync_check(void);
+int etr_switch_to_local(void);
+int etr_sync_check(void);
+void etr_queue_work(void);
/* notifier for syncs */
extern struct atomic_notifier_head s390_epoch_delta_notifier;
@@ -253,7 +254,8 @@ struct stp_sstpi {
} __attribute__ ((packed));
/* Functions needed by the machine check handler */
-void stp_sync_check(void);
-void stp_island_check(void);
+int stp_sync_check(void);
+int stp_island_check(void);
+void stp_queue_work(void);
#endif /* __S390_ETR_H */
diff --git a/arch/s390/include/asm/fpu/api.h b/arch/s390/include/asm/fpu/api.h
new file mode 100644
index 000000000000..5e04f3cbd320
--- /dev/null
+++ b/arch/s390/include/asm/fpu/api.h
@@ -0,0 +1,30 @@
+/*
+ * In-kernel FPU support functions
+ *
+ * Copyright IBM Corp. 2015
+ * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ */
+
+#ifndef _ASM_S390_FPU_API_H
+#define _ASM_S390_FPU_API_H
+
+void save_fpu_regs(void);
+
+static inline int test_fp_ctl(u32 fpc)
+{
+ u32 orig_fpc;
+ int rc;
+
+ asm volatile(
+ " efpc %1\n"
+ " sfpc %2\n"
+ "0: sfpc %1\n"
+ " la %0,0\n"
+ "1:\n"
+ EX_TABLE(0b,1b)
+ : "=d" (rc), "=d" (orig_fpc)
+ : "d" (fpc), "0" (-EINVAL));
+ return rc;
+}
+
+#endif /* _ASM_S390_FPU_API_H */
diff --git a/arch/s390/include/asm/fpu-internal.h b/arch/s390/include/asm/fpu/internal.h
index 55dc2c0fb40a..2559b16da525 100644
--- a/arch/s390/include/asm/fpu-internal.h
+++ b/arch/s390/include/asm/fpu/internal.h
@@ -1,5 +1,5 @@
/*
- * General floating pointer and vector register helpers
+ * FPU state and register content conversion primitives
*
* Copyright IBM Corp. 2015
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
@@ -8,50 +8,9 @@
#ifndef _ASM_S390_FPU_INTERNAL_H
#define _ASM_S390_FPU_INTERNAL_H
-#define FPU_USE_VX 1 /* Vector extension is active */
-
-#ifndef __ASSEMBLY__
-
-#include <linux/errno.h>
#include <linux/string.h>
-#include <asm/linkage.h>
#include <asm/ctl_reg.h>
-#include <asm/sigcontext.h>
-
-struct fpu {
- __u32 fpc; /* Floating-point control */
- __u32 flags;
- union {
- void *regs;
- freg_t *fprs; /* Floating-point register save area */
- __vector128 *vxrs; /* Vector register save area */
- };
-};
-
-void save_fpu_regs(void);
-
-#define is_vx_fpu(fpu) (!!((fpu)->flags & FPU_USE_VX))
-#define is_vx_task(tsk) (!!((tsk)->thread.fpu.flags & FPU_USE_VX))
-
-/* VX array structure for address operand constraints in inline assemblies */
-struct vx_array { __vector128 _[__NUM_VXRS]; };
-
-static inline int test_fp_ctl(u32 fpc)
-{
- u32 orig_fpc;
- int rc;
-
- asm volatile(
- " efpc %1\n"
- " sfpc %2\n"
- "0: sfpc %1\n"
- " la %0,0\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "=d" (rc), "=d" (orig_fpc)
- : "d" (fpc), "0" (-EINVAL));
- return rc;
-}
+#include <asm/fpu/types.h>
static inline void save_vx_regs_safe(__vector128 *vxrs)
{
@@ -89,7 +48,7 @@ static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs)
static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
{
fpregs->pad = 0;
- if (is_vx_fpu(fpu))
+ if (MACHINE_HAS_VX)
convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs);
else
memcpy((freg_t *)&fpregs->fprs, fpu->fprs,
@@ -98,13 +57,11 @@ static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu)
{
- if (is_vx_fpu(fpu))
+ if (MACHINE_HAS_VX)
convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs);
else
memcpy(fpu->fprs, (freg_t *)&fpregs->fprs,
sizeof(fpregs->fprs));
}
-#endif
-
#endif /* _ASM_S390_FPU_INTERNAL_H */
diff --git a/arch/s390/include/asm/fpu/types.h b/arch/s390/include/asm/fpu/types.h
new file mode 100644
index 000000000000..14a8b0c14f87
--- /dev/null
+++ b/arch/s390/include/asm/fpu/types.h
@@ -0,0 +1,25 @@
+/*
+ * FPU data structures
+ *
+ * Copyright IBM Corp. 2015
+ * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ */
+
+#ifndef _ASM_S390_FPU_TYPES_H
+#define _ASM_S390_FPU_TYPES_H
+
+#include <asm/sigcontext.h>
+
+struct fpu {
+ __u32 fpc; /* Floating-point control */
+ union {
+ void *regs;
+ freg_t *fprs; /* Floating-point register save area */
+ __vector128 *vxrs; /* Vector register save area */
+ };
+};
+
+/* VX array structure for address operand constraints in inline assemblies */
+struct vx_array { __vector128 _[__NUM_VXRS]; };
+
+#endif /* _ASM_S390_FPU_TYPES_H */
diff --git a/arch/s390/include/asm/idle.h b/arch/s390/include/asm/idle.h
index 113cd963dbbe..51ff96d9f287 100644
--- a/arch/s390/include/asm/idle.h
+++ b/arch/s390/include/asm/idle.h
@@ -24,4 +24,6 @@ struct s390_idle_data {
extern struct device_attribute dev_attr_idle_count;
extern struct device_attribute dev_attr_idle_time_us;
+void psw_idle(struct s390_idle_data *, unsigned long);
+
#endif /* _S390_IDLE_H */
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 39ae6a359747..86634e71b69f 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -64,7 +64,8 @@ struct ipl_block_fcp {
struct ipl_block_ccw {
u8 reserved1[84];
- u8 reserved2[2];
+ u16 reserved2 : 13;
+ u8 ssid : 3;
u16 devno;
u8 vm_flags;
u8 reserved3[3];
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index ff95d15a2384..f97b055de76a 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -47,7 +47,6 @@ enum interruption_class {
IRQEXT_IUC,
IRQEXT_CMS,
IRQEXT_CMC,
- IRQEXT_CMR,
IRQEXT_FTP,
IRQIO_CIO,
IRQIO_QAI,
@@ -96,6 +95,19 @@ enum irq_subclass {
IRQ_SUBCLASS_SERVICE_SIGNAL = 9,
};
+#define CR0_IRQ_SUBCLASS_MASK \
+ ((1UL << (63 - 30)) /* Warning Track */ | \
+ (1UL << (63 - 48)) /* Malfunction Alert */ | \
+ (1UL << (63 - 49)) /* Emergency Signal */ | \
+ (1UL << (63 - 50)) /* External Call */ | \
+ (1UL << (63 - 52)) /* Clock Comparator */ | \
+ (1UL << (63 - 53)) /* CPU Timer */ | \
+ (1UL << (63 - 54)) /* Service Signal */ | \
+ (1UL << (63 - 57)) /* Interrupt Key */ | \
+ (1UL << (63 - 58)) /* Measurement Alert */ | \
+ (1UL << (63 - 59)) /* Timing Alert */ | \
+ (1UL << (63 - 62))) /* IUCV */
+
void irq_subclass_register(enum irq_subclass subclass);
void irq_subclass_unregister(enum irq_subclass subclass);
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 8ced426091e1..efaac2c3bb77 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -22,7 +22,7 @@
#include <linux/kvm.h>
#include <asm/debug.h>
#include <asm/cpu.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/api.h>
#include <asm/isc.h>
#define KVM_MAX_VCPUS 64
@@ -644,5 +644,7 @@ static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslot
static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot) {}
+static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
#endif
diff --git a/arch/s390/include/asm/kvm_para.h b/arch/s390/include/asm/kvm_para.h
index e0f842308a68..41393052ac57 100644
--- a/arch/s390/include/asm/kvm_para.h
+++ b/arch/s390/include/asm/kvm_para.h
@@ -27,10 +27,9 @@
#define __S390_KVM_PARA_H
#include <uapi/asm/kvm_para.h>
+#include <asm/diag.h>
-
-
-static inline long kvm_hypercall0(unsigned long nr)
+static inline long __kvm_hypercall0(unsigned long nr)
{
register unsigned long __nr asm("1") = nr;
register long __rc asm("2");
@@ -40,7 +39,13 @@ static inline long kvm_hypercall0(unsigned long nr)
return __rc;
}
-static inline long kvm_hypercall1(unsigned long nr, unsigned long p1)
+static inline long kvm_hypercall0(unsigned long nr)
+{
+ diag_stat_inc(DIAG_STAT_X500);
+ return __kvm_hypercall0(nr);
+}
+
+static inline long __kvm_hypercall1(unsigned long nr, unsigned long p1)
{
register unsigned long __nr asm("1") = nr;
register unsigned long __p1 asm("2") = p1;
@@ -51,7 +56,13 @@ static inline long kvm_hypercall1(unsigned long nr, unsigned long p1)
return __rc;
}
-static inline long kvm_hypercall2(unsigned long nr, unsigned long p1,
+static inline long kvm_hypercall1(unsigned long nr, unsigned long p1)
+{
+ diag_stat_inc(DIAG_STAT_X500);
+ return __kvm_hypercall1(nr, p1);
+}
+
+static inline long __kvm_hypercall2(unsigned long nr, unsigned long p1,
unsigned long p2)
{
register unsigned long __nr asm("1") = nr;
@@ -65,7 +76,14 @@ static inline long kvm_hypercall2(unsigned long nr, unsigned long p1,
return __rc;
}
-static inline long kvm_hypercall3(unsigned long nr, unsigned long p1,
+static inline long kvm_hypercall2(unsigned long nr, unsigned long p1,
+ unsigned long p2)
+{
+ diag_stat_inc(DIAG_STAT_X500);
+ return __kvm_hypercall2(nr, p1, p2);
+}
+
+static inline long __kvm_hypercall3(unsigned long nr, unsigned long p1,
unsigned long p2, unsigned long p3)
{
register unsigned long __nr asm("1") = nr;
@@ -80,8 +98,14 @@ static inline long kvm_hypercall3(unsigned long nr, unsigned long p1,
return __rc;
}
+static inline long kvm_hypercall3(unsigned long nr, unsigned long p1,
+ unsigned long p2, unsigned long p3)
+{
+ diag_stat_inc(DIAG_STAT_X500);
+ return __kvm_hypercall3(nr, p1, p2, p3);
+}
-static inline long kvm_hypercall4(unsigned long nr, unsigned long p1,
+static inline long __kvm_hypercall4(unsigned long nr, unsigned long p1,
unsigned long p2, unsigned long p3,
unsigned long p4)
{
@@ -98,7 +122,15 @@ static inline long kvm_hypercall4(unsigned long nr, unsigned long p1,
return __rc;
}
-static inline long kvm_hypercall5(unsigned long nr, unsigned long p1,
+static inline long kvm_hypercall4(unsigned long nr, unsigned long p1,
+ unsigned long p2, unsigned long p3,
+ unsigned long p4)
+{
+ diag_stat_inc(DIAG_STAT_X500);
+ return __kvm_hypercall4(nr, p1, p2, p3, p4);
+}
+
+static inline long __kvm_hypercall5(unsigned long nr, unsigned long p1,
unsigned long p2, unsigned long p3,
unsigned long p4, unsigned long p5)
{
@@ -116,7 +148,15 @@ static inline long kvm_hypercall5(unsigned long nr, unsigned long p1,
return __rc;
}
-static inline long kvm_hypercall6(unsigned long nr, unsigned long p1,
+static inline long kvm_hypercall5(unsigned long nr, unsigned long p1,
+ unsigned long p2, unsigned long p3,
+ unsigned long p4, unsigned long p5)
+{
+ diag_stat_inc(DIAG_STAT_X500);
+ return __kvm_hypercall5(nr, p1, p2, p3, p4, p5);
+}
+
+static inline long __kvm_hypercall6(unsigned long nr, unsigned long p1,
unsigned long p2, unsigned long p3,
unsigned long p4, unsigned long p5,
unsigned long p6)
@@ -137,6 +177,15 @@ static inline long kvm_hypercall6(unsigned long nr, unsigned long p1,
return __rc;
}
+static inline long kvm_hypercall6(unsigned long nr, unsigned long p1,
+ unsigned long p2, unsigned long p3,
+ unsigned long p4, unsigned long p5,
+ unsigned long p6)
+{
+ diag_stat_inc(DIAG_STAT_X500);
+ return __kvm_hypercall6(nr, p1, p2, p3, p4, p5, p6);
+}
+
/* kvm on s390 is always paravirtualization enabled */
static inline int kvm_para_available(void)
{
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 663f23e37460..afe1cfebf1a4 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -67,7 +67,7 @@ struct _lowcore {
__u8 pad_0x00c4[0x00c8-0x00c4]; /* 0x00c4 */
__u32 stfl_fac_list; /* 0x00c8 */
__u8 pad_0x00cc[0x00e8-0x00cc]; /* 0x00cc */
- __u32 mcck_interruption_code[2]; /* 0x00e8 */
+ __u64 mcck_interruption_code; /* 0x00e8 */
__u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */
__u32 external_damage_code; /* 0x00f4 */
__u64 failing_storage_address; /* 0x00f8 */
@@ -132,7 +132,14 @@ struct _lowcore {
/* Address space pointer. */
__u64 kernel_asce; /* 0x0358 */
__u64 user_asce; /* 0x0360 */
- __u64 current_pid; /* 0x0368 */
+
+ /*
+ * The lpp and current_pid fields form a
+ * 64-bit value that is set as program
+ * parameter with the LPP instruction.
+ */
+ __u32 lpp; /* 0x0368 */
+ __u32 current_pid; /* 0x036c */
/* SMP info area */
__u32 cpu_nr; /* 0x0370 */
diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h
index 3027a5a72b74..b75fd910386a 100644
--- a/arch/s390/include/asm/nmi.h
+++ b/arch/s390/include/asm/nmi.h
@@ -11,51 +11,62 @@
#ifndef _ASM_S390_NMI_H
#define _ASM_S390_NMI_H
+#include <linux/const.h>
#include <linux/types.h>
-struct mci {
- __u32 sd : 1; /* 00 system damage */
- __u32 pd : 1; /* 01 instruction-processing damage */
- __u32 sr : 1; /* 02 system recovery */
- __u32 : 1; /* 03 */
- __u32 cd : 1; /* 04 timing-facility damage */
- __u32 ed : 1; /* 05 external damage */
- __u32 : 1; /* 06 */
- __u32 dg : 1; /* 07 degradation */
- __u32 w : 1; /* 08 warning pending */
- __u32 cp : 1; /* 09 channel-report pending */
- __u32 sp : 1; /* 10 service-processor damage */
- __u32 ck : 1; /* 11 channel-subsystem damage */
- __u32 : 2; /* 12-13 */
- __u32 b : 1; /* 14 backed up */
- __u32 : 1; /* 15 */
- __u32 se : 1; /* 16 storage error uncorrected */
- __u32 sc : 1; /* 17 storage error corrected */
- __u32 ke : 1; /* 18 storage-key error uncorrected */
- __u32 ds : 1; /* 19 storage degradation */
- __u32 wp : 1; /* 20 psw mwp validity */
- __u32 ms : 1; /* 21 psw mask and key validity */
- __u32 pm : 1; /* 22 psw program mask and cc validity */
- __u32 ia : 1; /* 23 psw instruction address validity */
- __u32 fa : 1; /* 24 failing storage address validity */
- __u32 vr : 1; /* 25 vector register validity */
- __u32 ec : 1; /* 26 external damage code validity */
- __u32 fp : 1; /* 27 floating point register validity */
- __u32 gr : 1; /* 28 general register validity */
- __u32 cr : 1; /* 29 control register validity */
- __u32 : 1; /* 30 */
- __u32 st : 1; /* 31 storage logical validity */
- __u32 ie : 1; /* 32 indirect storage error */
- __u32 ar : 1; /* 33 access register validity */
- __u32 da : 1; /* 34 delayed access exception */
- __u32 : 7; /* 35-41 */
- __u32 pr : 1; /* 42 tod programmable register validity */
- __u32 fc : 1; /* 43 fp control register validity */
- __u32 ap : 1; /* 44 ancillary report */
- __u32 : 1; /* 45 */
- __u32 ct : 1; /* 46 cpu timer validity */
- __u32 cc : 1; /* 47 clock comparator validity */
- __u32 : 16; /* 47-63 */
+#define MCCK_CODE_SYSTEM_DAMAGE _BITUL(63)
+#define MCCK_CODE_CPU_TIMER_VALID _BITUL(63 - 46)
+#define MCCK_CODE_PSW_MWP_VALID _BITUL(63 - 20)
+#define MCCK_CODE_PSW_IA_VALID _BITUL(63 - 23)
+
+#ifndef __ASSEMBLY__
+
+union mci {
+ unsigned long val;
+ struct {
+ u64 sd : 1; /* 00 system damage */
+ u64 pd : 1; /* 01 instruction-processing damage */
+ u64 sr : 1; /* 02 system recovery */
+ u64 : 1; /* 03 */
+ u64 cd : 1; /* 04 timing-facility damage */
+ u64 ed : 1; /* 05 external damage */
+ u64 : 1; /* 06 */
+ u64 dg : 1; /* 07 degradation */
+ u64 w : 1; /* 08 warning pending */
+ u64 cp : 1; /* 09 channel-report pending */
+ u64 sp : 1; /* 10 service-processor damage */
+ u64 ck : 1; /* 11 channel-subsystem damage */
+ u64 : 2; /* 12-13 */
+ u64 b : 1; /* 14 backed up */
+ u64 : 1; /* 15 */
+ u64 se : 1; /* 16 storage error uncorrected */
+ u64 sc : 1; /* 17 storage error corrected */
+ u64 ke : 1; /* 18 storage-key error uncorrected */
+ u64 ds : 1; /* 19 storage degradation */
+ u64 wp : 1; /* 20 psw mwp validity */
+ u64 ms : 1; /* 21 psw mask and key validity */
+ u64 pm : 1; /* 22 psw program mask and cc validity */
+ u64 ia : 1; /* 23 psw instruction address validity */
+ u64 fa : 1; /* 24 failing storage address validity */
+ u64 vr : 1; /* 25 vector register validity */
+ u64 ec : 1; /* 26 external damage code validity */
+ u64 fp : 1; /* 27 floating point register validity */
+ u64 gr : 1; /* 28 general register validity */
+ u64 cr : 1; /* 29 control register validity */
+ u64 : 1; /* 30 */
+ u64 st : 1; /* 31 storage logical validity */
+ u64 ie : 1; /* 32 indirect storage error */
+ u64 ar : 1; /* 33 access register validity */
+ u64 da : 1; /* 34 delayed access exception */
+ u64 : 7; /* 35-41 */
+ u64 pr : 1; /* 42 tod programmable register validity */
+ u64 fc : 1; /* 43 fp control register validity */
+ u64 ap : 1; /* 44 ancillary report */
+ u64 : 1; /* 45 */
+ u64 ct : 1; /* 46 cpu timer validity */
+ u64 cc : 1; /* 47 clock comparator validity */
+ u64 : 16; /* 47-63 */
+ };
};
struct pt_regs;
@@ -63,4 +74,5 @@ struct pt_regs;
extern void s390_handle_mcck(void);
extern void s390_do_machine_check(struct pt_regs *regs);
+#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_NMI_H */
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 34d960353a08..c873e682b67f 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -62,6 +62,8 @@ struct zpci_bar_struct {
u8 size; /* order 2 exponent */
};
+struct s390_domain;
+
/* Private data per function */
struct zpci_dev {
struct pci_dev *pdev;
@@ -118,6 +120,8 @@ struct zpci_dev {
struct dentry *debugfs_dev;
struct dentry *debugfs_perf;
+
+ struct s390_domain *s390_domain; /* s390 IOMMU domain data */
};
static inline bool zdev_enabled(struct zpci_dev *zdev)
diff --git a/arch/s390/include/asm/pci_dma.h b/arch/s390/include/asm/pci_dma.h
index 30b4c179c38c..1aac41e83ea1 100644
--- a/arch/s390/include/asm/pci_dma.h
+++ b/arch/s390/include/asm/pci_dma.h
@@ -192,5 +192,10 @@ static inline unsigned long *get_st_pto(unsigned long entry)
/* Prototypes */
int zpci_dma_init_device(struct zpci_dev *);
void zpci_dma_exit_device(struct zpci_dev *);
+void dma_free_seg_table(unsigned long);
+unsigned long *dma_alloc_cpu_table(void);
+void dma_cleanup_tables(unsigned long *);
+unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr);
+void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags);
#endif
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index bdb2f51124ed..024f85f947ae 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -193,9 +193,15 @@ static inline int is_module_addr(void *addr)
#define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
#define __HAVE_ARCH_PTE_SPECIAL
+#ifdef CONFIG_MEM_SOFT_DIRTY
+#define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */
+#else
+#define _PAGE_SOFT_DIRTY 0x000
+#endif
+
/* Set of bits not changed in pte_modify */
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
- _PAGE_YOUNG)
+ _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
/*
* handle_pte_fault uses pte_present and pte_none to find out the pte type
@@ -285,6 +291,12 @@ static inline int is_module_addr(void *addr)
#define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */
#define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */
+#ifdef CONFIG_MEM_SOFT_DIRTY
+#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
+#else
+#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
+#endif
+
/*
* Segment table entry encoding (R = read-only, I = invalid, y = young bit):
* dy..R...I...wr
@@ -589,6 +601,43 @@ static inline int pmd_protnone(pmd_t pmd)
}
#endif
+static inline int pte_soft_dirty(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_SOFT_DIRTY;
+}
+#define pte_swp_soft_dirty pte_soft_dirty
+
+static inline pte_t pte_mksoft_dirty(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_SOFT_DIRTY;
+ return pte;
+}
+#define pte_swp_mksoft_dirty pte_mksoft_dirty
+
+static inline pte_t pte_clear_soft_dirty(pte_t pte)
+{
+ pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
+ return pte;
+}
+#define pte_swp_clear_soft_dirty pte_clear_soft_dirty
+
+static inline int pmd_soft_dirty(pmd_t pmd)
+{
+ return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
+}
+
+static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
+{
+ pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
+ return pmd;
+}
+
+static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
+{
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
+ return pmd;
+}
+
static inline pgste_t pgste_get_lock(pte_t *ptep)
{
unsigned long new = 0;
@@ -889,7 +938,7 @@ static inline pte_t pte_mkclean(pte_t pte)
static inline pte_t pte_mkdirty(pte_t pte)
{
- pte_val(pte) |= _PAGE_DIRTY;
+ pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
if (pte_val(pte) & _PAGE_WRITE)
pte_val(pte) &= ~_PAGE_PROTECT;
return pte;
@@ -1218,8 +1267,10 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
pte_t entry, int dirty)
{
pgste_t pgste;
+ pte_t oldpte;
- if (pte_same(*ptep, entry))
+ oldpte = *ptep;
+ if (pte_same(oldpte, entry))
return 0;
if (mm_has_pgste(vma->vm_mm)) {
pgste = pgste_get_lock(ptep);
@@ -1229,7 +1280,8 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
ptep_flush_direct(vma->vm_mm, address, ptep);
if (mm_has_pgste(vma->vm_mm)) {
- pgste_set_key(ptep, pgste, entry, vma->vm_mm);
+ if (pte_val(oldpte) & _PAGE_INVALID)
+ pgste_set_key(ptep, pgste, entry, vma->vm_mm);
pgste = pgste_set_pte(ptep, pgste, entry);
pgste_set_unlock(ptep, pgste);
} else
@@ -1340,7 +1392,8 @@ static inline pmd_t pmd_mkclean(pmd_t pmd)
static inline pmd_t pmd_mkdirty(pmd_t pmd)
{
if (pmd_large(pmd)) {
- pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY;
+ pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY |
+ _SEGMENT_ENTRY_SOFT_DIRTY;
if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
}
@@ -1371,7 +1424,8 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
if (pmd_large(pmd)) {
pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
- _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SPLIT;
+ _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SPLIT |
+ _SEGMENT_ENTRY_SOFT_DIRTY;
pmd_val(pmd) |= massage_pgprot_pmd(newprot);
if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 085fb0d3c54e..b16c3d0a1b9f 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -11,15 +11,19 @@
#ifndef __ASM_S390_PROCESSOR_H
#define __ASM_S390_PROCESSOR_H
+#include <linux/const.h>
+
#define CIF_MCCK_PENDING 0 /* machine check handling is pending */
#define CIF_ASCE 1 /* user asce needs fixup / uaccess */
#define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
-#define CIF_FPU 3 /* restore vector registers */
+#define CIF_FPU 3 /* restore FPU registers */
+#define CIF_IGNORE_IRQ 4 /* ignore interrupt (for udelay) */
-#define _CIF_MCCK_PENDING (1<<CIF_MCCK_PENDING)
-#define _CIF_ASCE (1<<CIF_ASCE)
-#define _CIF_NOHZ_DELAY (1<<CIF_NOHZ_DELAY)
-#define _CIF_FPU (1<<CIF_FPU)
+#define _CIF_MCCK_PENDING _BITUL(CIF_MCCK_PENDING)
+#define _CIF_ASCE _BITUL(CIF_ASCE)
+#define _CIF_NOHZ_DELAY _BITUL(CIF_NOHZ_DELAY)
+#define _CIF_FPU _BITUL(CIF_FPU)
+#define _CIF_IGNORE_IRQ _BITUL(CIF_IGNORE_IRQ)
#ifndef __ASSEMBLY__
@@ -30,21 +34,22 @@
#include <asm/ptrace.h>
#include <asm/setup.h>
#include <asm/runtime_instr.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/types.h>
+#include <asm/fpu/internal.h>
static inline void set_cpu_flag(int flag)
{
- S390_lowcore.cpu_flags |= (1U << flag);
+ S390_lowcore.cpu_flags |= (1UL << flag);
}
static inline void clear_cpu_flag(int flag)
{
- S390_lowcore.cpu_flags &= ~(1U << flag);
+ S390_lowcore.cpu_flags &= ~(1UL << flag);
}
static inline int test_cpu_flag(int flag)
{
- return !!(S390_lowcore.cpu_flags & (1U << flag));
+ return !!(S390_lowcore.cpu_flags & (1UL << flag));
}
#define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
@@ -102,7 +107,6 @@ struct thread_struct {
struct list_head list;
/* cpu runtime instrumentation */
struct runtime_instr_cb *ri_cb;
- int ri_signum;
unsigned char trap_tdb[256]; /* Transaction abort diagnose block */
};
@@ -139,8 +143,10 @@ struct stack_frame {
#define ARCH_MIN_TASKALIGN 8
+extern __vector128 init_task_fpu_regs[__NUM_VXRS];
#define INIT_THREAD { \
.ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
+ .fpu.regs = (void *)&init_task_fpu_regs, \
}
/*
@@ -217,7 +223,7 @@ static inline void __load_psw(psw_t psw)
* Set PSW mask to specified value, while leaving the
* PSW addr pointing to the next instruction.
*/
-static inline void __load_psw_mask (unsigned long mask)
+static inline void __load_psw_mask(unsigned long mask)
{
unsigned long addr;
psw_t psw;
@@ -243,6 +249,16 @@ static inline unsigned long __extract_psw(void)
return (((unsigned long) reg1) << 32) | ((unsigned long) reg2);
}
+static inline void local_mcck_enable(void)
+{
+ __load_psw_mask(__extract_psw() | PSW_MASK_MCHECK);
+}
+
+static inline void local_mcck_disable(void)
+{
+ __load_psw_mask(__extract_psw() & ~PSW_MASK_MCHECK);
+}
+
/*
* Rewind PSW instruction address by specified number of bytes.
*/
@@ -266,65 +282,14 @@ void enabled_wait(void);
*/
static inline void __noreturn disabled_wait(unsigned long code)
{
- unsigned long ctl_buf;
- psw_t dw_psw;
-
- dw_psw.mask = PSW_MASK_BASE | PSW_MASK_WAIT | PSW_MASK_BA | PSW_MASK_EA;
- dw_psw.addr = code;
- /*
- * Store status and then load disabled wait psw,
- * the processor is dead afterwards
- */
- asm volatile(
- " stctg 0,0,0(%2)\n"
- " ni 4(%2),0xef\n" /* switch off protection */
- " lctlg 0,0,0(%2)\n"
- " lghi 1,0x1000\n"
- " stpt 0x328(1)\n" /* store timer */
- " stckc 0x330(1)\n" /* store clock comparator */
- " stpx 0x318(1)\n" /* store prefix register */
- " stam 0,15,0x340(1)\n"/* store access registers */
- " stfpc 0x31c(1)\n" /* store fpu control */
- " std 0,0x200(1)\n" /* store f0 */
- " std 1,0x208(1)\n" /* store f1 */
- " std 2,0x210(1)\n" /* store f2 */
- " std 3,0x218(1)\n" /* store f3 */
- " std 4,0x220(1)\n" /* store f4 */
- " std 5,0x228(1)\n" /* store f5 */
- " std 6,0x230(1)\n" /* store f6 */
- " std 7,0x238(1)\n" /* store f7 */
- " std 8,0x240(1)\n" /* store f8 */
- " std 9,0x248(1)\n" /* store f9 */
- " std 10,0x250(1)\n" /* store f10 */
- " std 11,0x258(1)\n" /* store f11 */
- " std 12,0x260(1)\n" /* store f12 */
- " std 13,0x268(1)\n" /* store f13 */
- " std 14,0x270(1)\n" /* store f14 */
- " std 15,0x278(1)\n" /* store f15 */
- " stmg 0,15,0x280(1)\n"/* store general registers */
- " stctg 0,15,0x380(1)\n"/* store control registers */
- " oi 0x384(1),0x10\n"/* fake protection bit */
- " lpswe 0(%1)"
- : "=m" (ctl_buf)
- : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1");
- while (1);
-}
+ psw_t psw;
-/*
- * Use to set psw mask except for the first byte which
- * won't be changed by this function.
- */
-static inline void
-__set_psw_mask(unsigned long mask)
-{
- __load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8)));
+ psw.mask = PSW_MASK_BASE | PSW_MASK_WAIT | PSW_MASK_BA | PSW_MASK_EA;
+ psw.addr = code;
+ __load_psw(psw);
+ while (1);
}
-#define local_mcck_enable() \
- __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK)
-#define local_mcck_disable() \
- __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT)
-
/*
* Basic Machine Check/Program Check Handler.
*/
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 6feda2599282..37cbc50947f2 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -6,13 +6,14 @@
#ifndef _S390_PTRACE_H
#define _S390_PTRACE_H
+#include <linux/const.h>
#include <uapi/asm/ptrace.h>
#define PIF_SYSCALL 0 /* inside a system call */
#define PIF_PER_TRAP 1 /* deliver sigtrap on return to user */
-#define _PIF_SYSCALL (1<<PIF_SYSCALL)
-#define _PIF_PER_TRAP (1<<PIF_PER_TRAP)
+#define _PIF_SYSCALL _BITUL(PIF_SYSCALL)
+#define _PIF_PER_TRAP _BITUL(PIF_PER_TRAP)
#ifndef __ASSEMBLY__
@@ -128,17 +129,17 @@ struct per_struct_kernel {
static inline void set_pt_regs_flag(struct pt_regs *regs, int flag)
{
- regs->flags |= (1U << flag);
+ regs->flags |= (1UL << flag);
}
static inline void clear_pt_regs_flag(struct pt_regs *regs, int flag)
{
- regs->flags &= ~(1U << flag);
+ regs->flags &= ~(1UL << flag);
}
static inline int test_pt_regs_flag(struct pt_regs *regs, int flag)
{
- return !!(regs->flags & (1U << flag));
+ return !!(regs->flags & (1UL << flag));
}
/*
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index b8ffc1bd0a9f..23537661da0e 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -5,11 +5,38 @@
#ifndef _ASM_S390_SETUP_H
#define _ASM_S390_SETUP_H
+#include <linux/const.h>
#include <uapi/asm/setup.h>
#define PARMAREA 0x10400
+/*
+ * Machine features detected in head.S
+ */
+
+#define MACHINE_FLAG_VM _BITUL(0)
+#define MACHINE_FLAG_IEEE _BITUL(1)
+#define MACHINE_FLAG_CSP _BITUL(2)
+#define MACHINE_FLAG_MVPG _BITUL(3)
+#define MACHINE_FLAG_DIAG44 _BITUL(4)
+#define MACHINE_FLAG_IDTE _BITUL(5)
+#define MACHINE_FLAG_DIAG9C _BITUL(6)
+#define MACHINE_FLAG_KVM _BITUL(8)
+#define MACHINE_FLAG_ESOP _BITUL(9)
+#define MACHINE_FLAG_EDAT1 _BITUL(10)
+#define MACHINE_FLAG_EDAT2 _BITUL(11)
+#define MACHINE_FLAG_LPAR _BITUL(12)
+#define MACHINE_FLAG_LPP _BITUL(13)
+#define MACHINE_FLAG_TOPOLOGY _BITUL(14)
+#define MACHINE_FLAG_TE _BITUL(15)
+#define MACHINE_FLAG_TLB_LC _BITUL(17)
+#define MACHINE_FLAG_VX _BITUL(18)
+#define MACHINE_FLAG_CAD _BITUL(19)
+
+#define LPP_MAGIC _BITUL(31)
+#define LPP_PFAULT_PID_MASK _AC(0xffffffff, UL)
+
#ifndef __ASSEMBLY__
#include <asm/lowcore.h>
@@ -28,29 +55,6 @@ extern unsigned long max_physmem_end;
extern void detect_memory_memblock(void);
-/*
- * Machine features detected in head.S
- */
-
-#define MACHINE_FLAG_VM (1UL << 0)
-#define MACHINE_FLAG_IEEE (1UL << 1)
-#define MACHINE_FLAG_CSP (1UL << 2)
-#define MACHINE_FLAG_MVPG (1UL << 3)
-#define MACHINE_FLAG_DIAG44 (1UL << 4)
-#define MACHINE_FLAG_IDTE (1UL << 5)
-#define MACHINE_FLAG_DIAG9C (1UL << 6)
-#define MACHINE_FLAG_KVM (1UL << 8)
-#define MACHINE_FLAG_ESOP (1UL << 9)
-#define MACHINE_FLAG_EDAT1 (1UL << 10)
-#define MACHINE_FLAG_EDAT2 (1UL << 11)
-#define MACHINE_FLAG_LPAR (1UL << 12)
-#define MACHINE_FLAG_LPP (1UL << 13)
-#define MACHINE_FLAG_TOPOLOGY (1UL << 14)
-#define MACHINE_FLAG_TE (1UL << 15)
-#define MACHINE_FLAG_TLB_LC (1UL << 17)
-#define MACHINE_FLAG_VX (1UL << 18)
-#define MACHINE_FLAG_CAD (1UL << 19)
-
#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
#define MACHINE_IS_LPAR (S390_lowcore.machine_flags & MACHINE_FLAG_LPAR)
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 0e37cd041241..63ebf37d3143 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -87,7 +87,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
{
typecheck(unsigned int, lp->lock);
asm volatile(
- __ASM_BARRIER
"st %1,%0\n"
: "+Q" (lp->lock)
: "d" (0)
@@ -169,7 +168,6 @@ static inline int arch_write_trylock_once(arch_rwlock_t *rw)
\
typecheck(unsigned int *, ptr); \
asm volatile( \
- "bcr 14,0\n" \
op_string " %0,%2,%1\n" \
: "=d" (old_val), "+Q" (*ptr) \
: "d" (op_val) \
@@ -243,7 +241,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
rw->owner = 0;
asm volatile(
- __ASM_BARRIER
"st %1,%0\n"
: "+Q" (rw->lock)
: "d" (0)
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index dcadfde32265..12d45f0cfdd9 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -8,7 +8,7 @@
#define __ASM_SWITCH_TO_H
#include <linux/thread_info.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/api.h>
#include <asm/ptrace.h>
extern struct task_struct *__switch_to(void *, void *);
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 4c27ec764c36..692b9247c019 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -7,6 +7,8 @@
#ifndef _ASM_THREAD_INFO_H
#define _ASM_THREAD_INFO_H
+#include <linux/const.h>
+
/*
* Size of kernel stack for each process
*/
@@ -83,16 +85,16 @@ void arch_release_task_struct(struct task_struct *tsk);
#define TIF_BLOCK_STEP 20 /* This task is block stepped */
#define TIF_UPROBE_SINGLESTEP 21 /* This task is uprobe single stepped */
-#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
-#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
-#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
-#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
-#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
-#define _TIF_SECCOMP (1<<TIF_SECCOMP)
-#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
-#define _TIF_UPROBE (1<<TIF_UPROBE)
-#define _TIF_31BIT (1<<TIF_31BIT)
-#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP)
+#define _TIF_NOTIFY_RESUME _BITUL(TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING _BITUL(TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED _BITUL(TIF_NEED_RESCHED)
+#define _TIF_SYSCALL_TRACE _BITUL(TIF_SYSCALL_TRACE)
+#define _TIF_SYSCALL_AUDIT _BITUL(TIF_SYSCALL_AUDIT)
+#define _TIF_SECCOMP _BITUL(TIF_SECCOMP)
+#define _TIF_SYSCALL_TRACEPOINT _BITUL(TIF_SYSCALL_TRACEPOINT)
+#define _TIF_UPROBE _BITUL(TIF_UPROBE)
+#define _TIF_31BIT _BITUL(TIF_31BIT)
+#define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP)
#define is_32bit_task() (test_thread_flag(TIF_31BIT))
diff --git a/arch/s390/include/asm/trace/diag.h b/arch/s390/include/asm/trace/diag.h
new file mode 100644
index 000000000000..cc6cfe7889da
--- /dev/null
+++ b/arch/s390/include/asm/trace/diag.h
@@ -0,0 +1,43 @@
+/*
+ * Tracepoint header for s390 diagnose calls
+ *
+ * Copyright IBM Corp. 2015
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM s390
+
+#if !defined(_TRACE_S390_DIAG_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_S390_DIAG_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+#define TRACE_INCLUDE_PATH asm/trace
+#define TRACE_INCLUDE_FILE diag
+
+TRACE_EVENT(s390_diagnose,
+ TP_PROTO(unsigned short nr),
+ TP_ARGS(nr),
+ TP_STRUCT__entry(
+ __field(unsigned short, nr)
+ ),
+ TP_fast_assign(
+ __entry->nr = nr;
+ ),
+ TP_printk("nr=0x%x", __entry->nr)
+);
+
+#ifdef CONFIG_TRACEPOINTS
+void trace_s390_diagnose_norecursion(int diag_nr);
+#else
+static inline void trace_s390_diagnose_norecursion(int diag_nr) { }
+#endif
+
+#endif /* _TRACE_S390_DIAG_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>