summaryrefslogtreecommitdiff
path: root/arch/s390/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/include')
-rw-r--r--arch/s390/include/asm/appldata.h24
-rw-r--r--arch/s390/include/asm/atomic.h95
-rw-r--r--arch/s390/include/asm/bitops.h28
-rw-r--r--arch/s390/include/asm/cmpxchg.h7
-rw-r--r--arch/s390/include/asm/cputime.h26
-rw-r--r--arch/s390/include/asm/ctl_reg.h14
-rw-r--r--arch/s390/include/asm/dma-mapping.h2
-rw-r--r--arch/s390/include/asm/elf.h18
-rw-r--r--arch/s390/include/asm/idals.h16
-rw-r--r--arch/s390/include/asm/irq.h1
-rw-r--r--arch/s390/include/asm/jump_label.h15
-rw-r--r--arch/s390/include/asm/kvm_host.h58
-rw-r--r--arch/s390/include/asm/livepatch.h43
-rw-r--r--arch/s390/include/asm/lowcore.h159
-rw-r--r--arch/s390/include/asm/mman.h2
-rw-r--r--arch/s390/include/asm/mmu_context.h6
-rw-r--r--arch/s390/include/asm/page.h11
-rw-r--r--arch/s390/include/asm/pci.h10
-rw-r--r--arch/s390/include/asm/percpu.h4
-rw-r--r--arch/s390/include/asm/perf_event.h3
-rw-r--r--arch/s390/include/asm/pgalloc.h24
-rw-r--r--arch/s390/include/asm/pgtable.h123
-rw-r--r--arch/s390/include/asm/processor.h66
-rw-r--r--arch/s390/include/asm/ptrace.h4
-rw-r--r--arch/s390/include/asm/qdio.h10
-rw-r--r--arch/s390/include/asm/runtime_instr.h10
-rw-r--r--arch/s390/include/asm/rwsem.h81
-rw-r--r--arch/s390/include/asm/setup.h35
-rw-r--r--arch/s390/include/asm/sfp-util.h10
-rw-r--r--arch/s390/include/asm/sparsemem.h9
-rw-r--r--arch/s390/include/asm/switch_to.h21
-rw-r--r--arch/s390/include/asm/syscall.h2
-rw-r--r--arch/s390/include/asm/thread_info.h13
-rw-r--r--arch/s390/include/asm/tlb.h4
-rw-r--r--arch/s390/include/asm/tlbflush.h7
-rw-r--r--arch/s390/include/asm/types.h17
-rw-r--r--arch/s390/include/asm/uaccess.h1
-rw-r--r--arch/s390/include/asm/unistd.h8
-rw-r--r--arch/s390/include/asm/vdso.h2
-rw-r--r--arch/s390/include/uapi/asm/kvm.h4
-rw-r--r--arch/s390/include/uapi/asm/sie.h4
41 files changed, 133 insertions, 864 deletions
diff --git a/arch/s390/include/asm/appldata.h b/arch/s390/include/asm/appldata.h
index 32a705987156..16887c5fd989 100644
--- a/arch/s390/include/asm/appldata.h
+++ b/arch/s390/include/asm/appldata.h
@@ -9,28 +9,6 @@
#include <asm/io.h>
-#ifndef CONFIG_64BIT
-
-#define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */
-#define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */
-#define APPLDATA_GEN_EVENT_REC 0x02
-#define APPLDATA_START_CONFIG_REC 0x03
-
-/*
- * Parameter list for DIAGNOSE X'DC'
- */
-struct appldata_parameter_list {
- u16 diag; /* The DIAGNOSE code X'00DC' */
- u8 function; /* The function code for the DIAGNOSE */
- u8 parlist_length; /* Length of the parameter list */
- u32 product_id_addr; /* Address of the 16-byte product ID */
- u16 reserved;
- u16 buffer_length; /* Length of the application data buffer */
- u32 buffer_addr; /* Address of the application data buffer */
-} __attribute__ ((packed));
-
-#else /* CONFIG_64BIT */
-
#define APPLDATA_START_INTERVAL_REC 0x80
#define APPLDATA_STOP_REC 0x81
#define APPLDATA_GEN_EVENT_REC 0x82
@@ -51,8 +29,6 @@ struct appldata_parameter_list {
u64 buffer_addr;
} __attribute__ ((packed));
-#endif /* CONFIG_64BIT */
-
struct appldata_product_id {
char prod_nr[7]; /* product number */
u16 prod_fn; /* product function */
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index fa934fe080c1..adbe3802e377 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -160,8 +160,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#define ATOMIC64_INIT(i) { (i) }
-#ifdef CONFIG_64BIT
-
#define __ATOMIC64_NO_BARRIER "\n"
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
@@ -274,99 +272,6 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
#undef __ATOMIC64_LOOP
-#else /* CONFIG_64BIT */
-
-typedef struct {
- long long counter;
-} atomic64_t;
-
-static inline long long atomic64_read(const atomic64_t *v)
-{
- register_pair rp;
-
- asm volatile(
- " lm %0,%N0,%1"
- : "=&d" (rp) : "Q" (v->counter) );
- return rp.pair;
-}
-
-static inline void atomic64_set(atomic64_t *v, long long i)
-{
- register_pair rp = {.pair = i};
-
- asm volatile(
- " stm %1,%N1,%0"
- : "=Q" (v->counter) : "d" (rp) );
-}
-
-static inline long long atomic64_xchg(atomic64_t *v, long long new)
-{
- register_pair rp_new = {.pair = new};
- register_pair rp_old;
-
- asm volatile(
- " lm %0,%N0,%1\n"
- "0: cds %0,%2,%1\n"
- " jl 0b\n"
- : "=&d" (rp_old), "+Q" (v->counter)
- : "d" (rp_new)
- : "cc");
- return rp_old.pair;
-}
-
-static inline long long atomic64_cmpxchg(atomic64_t *v,
- long long old, long long new)
-{
- register_pair rp_old = {.pair = old};
- register_pair rp_new = {.pair = new};
-
- asm volatile(
- " cds %0,%2,%1"
- : "+&d" (rp_old), "+Q" (v->counter)
- : "d" (rp_new)
- : "cc");
- return rp_old.pair;
-}
-
-
-static inline long long atomic64_add_return(long long i, atomic64_t *v)
-{
- long long old, new;
-
- do {
- old = atomic64_read(v);
- new = old + i;
- } while (atomic64_cmpxchg(v, old, new) != old);
- return new;
-}
-
-static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
-{
- long long old, new;
-
- do {
- old = atomic64_read(v);
- new = old | mask;
- } while (atomic64_cmpxchg(v, old, new) != old);
-}
-
-static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
-{
- long long old, new;
-
- do {
- old = atomic64_read(v);
- new = old & mask;
- } while (atomic64_cmpxchg(v, old, new) != old);
-}
-
-static inline void atomic64_add(long long i, atomic64_t *v)
-{
- atomic64_add_return(i, v);
-}
-
-#endif /* CONFIG_64BIT */
-
static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
{
long long c, old;
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 520542477678..9b68e98a724f 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -51,32 +51,6 @@
#define __BITOPS_NO_BARRIER "\n"
-#ifndef CONFIG_64BIT
-
-#define __BITOPS_OR "or"
-#define __BITOPS_AND "nr"
-#define __BITOPS_XOR "xr"
-#define __BITOPS_BARRIER "\n"
-
-#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
-({ \
- unsigned long __old, __new; \
- \
- typecheck(unsigned long *, (__addr)); \
- asm volatile( \
- " l %0,%2\n" \
- "0: lr %1,%0\n" \
- __op_string " %1,%3\n" \
- " cs %0,%1,%2\n" \
- " jl 0b" \
- : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
- : "d" (__val) \
- : "cc", "memory"); \
- __old; \
-})
-
-#else /* CONFIG_64BIT */
-
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#define __BITOPS_OR "laog"
@@ -125,8 +99,6 @@
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
-#endif /* CONFIG_64BIT */
-
#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
static inline unsigned long *
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index 6259895fcd97..4eadec466b8c 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -80,15 +80,10 @@ extern void __cmpxchg_double_called_with_bad_pointer(void);
({ \
__typeof__(p1) __p1 = (p1); \
__typeof__(p2) __p2 = (p2); \
- int __ret; \
BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
VM_BUG_ON((unsigned long)((__p1) + 1) != (unsigned long)(__p2));\
- if (sizeof(long) == 4) \
- __ret = __cmpxchg_double_4(__p1, __p2, o1, o2, n1, n2); \
- else \
- __ret = __cmpxchg_double_8(__p1, __p2, o1, o2, n1, n2); \
- __ret; \
+ __cmpxchg_double_8(__p1, __p2, o1, o2, n1, n2); \
})
#define system_has_cmpxchg_double() 1
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index b91e960e4045..221b454c734a 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -22,15 +22,7 @@ typedef unsigned long long __nocast cputime64_t;
static inline unsigned long __div(unsigned long long n, unsigned long base)
{
-#ifndef CONFIG_64BIT
- register_pair rp;
-
- rp.pair = n >> 1;
- asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1));
- return rp.subreg.odd;
-#else /* CONFIG_64BIT */
return n / base;
-#endif /* CONFIG_64BIT */
}
#define cputime_one_jiffy jiffies_to_cputime(1)
@@ -101,17 +93,8 @@ static inline void cputime_to_timespec(const cputime_t cputime,
struct timespec *value)
{
unsigned long long __cputime = (__force unsigned long long) cputime;
-#ifndef CONFIG_64BIT
- register_pair rp;
-
- rp.pair = __cputime >> 1;
- asm ("dr %0,%1" : "+d" (rp) : "d" (CPUTIME_PER_SEC / 2));
- value->tv_nsec = rp.subreg.even * NSEC_PER_USEC / CPUTIME_PER_USEC;
- value->tv_sec = rp.subreg.odd;
-#else
value->tv_nsec = (__cputime % CPUTIME_PER_SEC) * NSEC_PER_USEC / CPUTIME_PER_USEC;
value->tv_sec = __cputime / CPUTIME_PER_SEC;
-#endif
}
/*
@@ -129,17 +112,8 @@ static inline void cputime_to_timeval(const cputime_t cputime,
struct timeval *value)
{
unsigned long long __cputime = (__force unsigned long long) cputime;
-#ifndef CONFIG_64BIT
- register_pair rp;
-
- rp.pair = __cputime >> 1;
- asm ("dr %0,%1" : "+d" (rp) : "d" (CPUTIME_PER_USEC / 2));
- value->tv_usec = rp.subreg.even / CPUTIME_PER_USEC;
- value->tv_sec = rp.subreg.odd;
-#else
value->tv_usec = (__cputime % CPUTIME_PER_SEC) / CPUTIME_PER_USEC;
value->tv_sec = __cputime / CPUTIME_PER_SEC;
-#endif
}
/*
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index 31ab9f346d7e..cfad7fca01d6 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -9,20 +9,12 @@
#include <linux/bug.h>
-#ifdef CONFIG_64BIT
-# define __CTL_LOAD "lctlg"
-# define __CTL_STORE "stctg"
-#else
-# define __CTL_LOAD "lctl"
-# define __CTL_STORE "stctl"
-#endif
-
#define __ctl_load(array, low, high) { \
typedef struct { char _[sizeof(array)]; } addrtype; \
\
BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
asm volatile( \
- __CTL_LOAD " %1,%2,%0\n" \
+ " lctlg %1,%2,%0\n" \
: : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
}
@@ -31,7 +23,7 @@
\
BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
asm volatile( \
- __CTL_STORE " %1,%2,%0\n" \
+ " stctg %1,%2,%0\n" \
: "=Q" (*(addrtype *)(&array)) \
: "i" (low), "i" (high)); \
}
@@ -60,9 +52,7 @@ void smp_ctl_clear_bit(int cr, int bit);
union ctlreg0 {
unsigned long val;
struct {
-#ifdef CONFIG_64BIT
unsigned long : 32;
-#endif
unsigned long : 3;
unsigned long lap : 1; /* Low-address-protection control */
unsigned long : 4;
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
index 709955ddaa4d..9d395961e713 100644
--- a/arch/s390/include/asm/dma-mapping.h
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -42,7 +42,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
- return 0;
+ return false;
return addr + size - 1 <= *dev->dma_mask;
}
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index c9df40b5c0ac..3ad48f22de78 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -107,11 +107,7 @@
/*
* These are used to set parameters in the core dumps.
*/
-#ifndef CONFIG_64BIT
-#define ELF_CLASS ELFCLASS32
-#else /* CONFIG_64BIT */
#define ELF_CLASS ELFCLASS64
-#endif /* CONFIG_64BIT */
#define ELF_DATA ELFDATA2MSB
#define ELF_ARCH EM_S390
@@ -161,10 +157,11 @@ extern unsigned int vdso_enabled;
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
-
-extern unsigned long randomize_et_dyn(void);
-#define ELF_ET_DYN_BASE randomize_et_dyn()
+ that it will "exec", and that there is sufficient room for the brk. 64-bit
+ tasks are aligned to 4GB. */
+#define ELF_ET_DYN_BASE (is_32bit_task() ? \
+ (STACK_TOP / 3 * 2) : \
+ (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. */
@@ -211,7 +208,7 @@ do { \
extern unsigned long mmap_rnd_mask;
-#define STACK_RND_MASK (mmap_rnd_mask)
+#define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask)
#define ARCH_DLINFO \
do { \
@@ -225,9 +222,6 @@ struct linux_binprm;
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
int arch_setup_additional_pages(struct linux_binprm *, int);
-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
-#define arch_randomize_brk arch_randomize_brk
-
void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
#endif
diff --git a/arch/s390/include/asm/idals.h b/arch/s390/include/asm/idals.h
index ea5a6e45fd93..a7b2d7504049 100644
--- a/arch/s390/include/asm/idals.h
+++ b/arch/s390/include/asm/idals.h
@@ -19,11 +19,7 @@
#include <asm/cio.h>
#include <asm/uaccess.h>
-#ifdef CONFIG_64BIT
#define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */
-#else
-#define IDA_SIZE_LOG 11 /* 11 for 2k , 12 for 4k */
-#endif
#define IDA_BLOCK_SIZE (1L<<IDA_SIZE_LOG)
/*
@@ -32,11 +28,7 @@
static inline int
idal_is_needed(void *vaddr, unsigned int length)
{
-#ifdef CONFIG_64BIT
return ((__pa(vaddr) + length - 1) >> 31) != 0;
-#else
- return 0;
-#endif
}
@@ -77,7 +69,6 @@ static inline unsigned long *idal_create_words(unsigned long *idaws,
static inline int
set_normalized_cda(struct ccw1 * ccw, void *vaddr)
{
-#ifdef CONFIG_64BIT
unsigned int nridaws;
unsigned long *idal;
@@ -93,7 +84,6 @@ set_normalized_cda(struct ccw1 * ccw, void *vaddr)
ccw->flags |= CCW_FLAG_IDA;
vaddr = idal;
}
-#endif
ccw->cda = (__u32)(unsigned long) vaddr;
return 0;
}
@@ -104,12 +94,10 @@ set_normalized_cda(struct ccw1 * ccw, void *vaddr)
static inline void
clear_normalized_cda(struct ccw1 * ccw)
{
-#ifdef CONFIG_64BIT
if (ccw->flags & CCW_FLAG_IDA) {
kfree((void *)(unsigned long) ccw->cda);
ccw->flags &= ~CCW_FLAG_IDA;
}
-#endif
ccw->cda = 0;
}
@@ -181,12 +169,8 @@ idal_buffer_free(struct idal_buffer *ib)
static inline int
__idal_buffer_is_needed(struct idal_buffer *ib)
{
-#ifdef CONFIG_64BIT
return ib->size > (4096ul << ib->page_order) ||
idal_is_needed(ib->data[0], ib->size);
-#else
- return ib->size > (4096ul << ib->page_order);
-#endif
}
/*
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 343ea7c987aa..ff95d15a2384 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -57,7 +57,6 @@ enum interruption_class {
IRQIO_TAP,
IRQIO_VMR,
IRQIO_LCS,
- IRQIO_CLW,
IRQIO_CTC,
IRQIO_APB,
IRQIO_ADM,
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 58642fd29c87..69972b7957ee 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -1,19 +1,13 @@
#ifndef _ASM_S390_JUMP_LABEL_H
#define _ASM_S390_JUMP_LABEL_H
+#ifndef __ASSEMBLY__
+
#include <linux/types.h>
#define JUMP_LABEL_NOP_SIZE 6
#define JUMP_LABEL_NOP_OFFSET 2
-#ifdef CONFIG_64BIT
-#define ASM_PTR ".quad"
-#define ASM_ALIGN ".balign 8"
-#else
-#define ASM_PTR ".long"
-#define ASM_ALIGN ".balign 4"
-#endif
-
/*
* We use a brcl 0,2 instruction for jump labels at compile time so it
* can be easily distinguished from a hotpatch generated instruction.
@@ -22,8 +16,8 @@ static __always_inline bool arch_static_branch(struct static_key *key)
{
asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n"
".pushsection __jump_table, \"aw\"\n"
- ASM_ALIGN "\n"
- ASM_PTR " 0b, %l[label], %0\n"
+ ".balign 8\n"
+ ".quad 0b, %l[label], %0\n"
".popsection\n"
: : "X" (key) : : label);
return false;
@@ -39,4 +33,5 @@ struct jump_entry {
jump_label_t key;
};
+#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index d84559e31f32..d01fc588b5c3 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -172,7 +172,9 @@ struct kvm_s390_sie_block {
__u32 fac; /* 0x01a0 */
__u8 reserved1a4[20]; /* 0x01a4 */
__u64 cbrlo; /* 0x01b8 */
- __u8 reserved1c0[30]; /* 0x01c0 */
+ __u8 reserved1c0[8]; /* 0x01c0 */
+ __u32 ecd; /* 0x01c8 */
+ __u8 reserved1cc[18]; /* 0x01cc */
__u64 pp; /* 0x01de */
__u8 reserved1e6[2]; /* 0x01e6 */
__u64 itdba; /* 0x01e8 */
@@ -183,11 +185,17 @@ struct kvm_s390_itdb {
__u8 data[256];
} __packed;
+struct kvm_s390_vregs {
+ __vector128 vrs[32];
+ __u8 reserved200[512]; /* for future vector expansion */
+} __packed;
+
struct sie_page {
struct kvm_s390_sie_block sie_block;
__u8 reserved200[1024]; /* 0x0200 */
struct kvm_s390_itdb itdb; /* 0x0600 */
- __u8 reserved700[2304]; /* 0x0700 */
+ __u8 reserved700[1280]; /* 0x0700 */
+ struct kvm_s390_vregs vregs; /* 0x0c00 */
} __packed;
struct kvm_vcpu_stat {
@@ -238,6 +246,7 @@ struct kvm_vcpu_stat {
u32 instruction_sigp_stop;
u32 instruction_sigp_stop_store_status;
u32 instruction_sigp_store_status;
+ u32 instruction_sigp_store_adtl_status;
u32 instruction_sigp_arch;
u32 instruction_sigp_prefix;
u32 instruction_sigp_restart;
@@ -270,6 +279,7 @@ struct kvm_vcpu_stat {
#define PGM_SPECIAL_OPERATION 0x13
#define PGM_OPERAND 0x15
#define PGM_TRACE_TABEL 0x16
+#define PGM_VECTOR_PROCESSING 0x1b
#define PGM_SPACE_SWITCH 0x1c
#define PGM_HFP_SQUARE_ROOT 0x1d
#define PGM_PC_TRANSLATION_SPEC 0x1f
@@ -334,6 +344,11 @@ enum irq_types {
IRQ_PEND_COUNT
};
+/* We have 2M for virtio device descriptor pages. Smallest amount of
+ * memory per page is 24 bytes (1 queue), so (2048*1024) / 24 = 87381
+ */
+#define KVM_S390_MAX_VIRTIO_IRQS 87381
+
/*
* Repressible (non-floating) machine check interrupts
* subclass bits in MCIC
@@ -411,13 +426,32 @@ struct kvm_s390_local_interrupt {
unsigned long pending_irqs;
};
+#define FIRQ_LIST_IO_ISC_0 0
+#define FIRQ_LIST_IO_ISC_1 1
+#define FIRQ_LIST_IO_ISC_2 2
+#define FIRQ_LIST_IO_ISC_3 3
+#define FIRQ_LIST_IO_ISC_4 4
+#define FIRQ_LIST_IO_ISC_5 5
+#define FIRQ_LIST_IO_ISC_6 6
+#define FIRQ_LIST_IO_ISC_7 7
+#define FIRQ_LIST_PFAULT 8
+#define FIRQ_LIST_VIRTIO 9
+#define FIRQ_LIST_COUNT 10
+#define FIRQ_CNTR_IO 0
+#define FIRQ_CNTR_SERVICE 1
+#define FIRQ_CNTR_VIRTIO 2
+#define FIRQ_CNTR_PFAULT 3
+#define FIRQ_MAX_COUNT 4
+
struct kvm_s390_float_interrupt {
+ unsigned long pending_irqs;
spinlock_t lock;
- struct list_head list;
- atomic_t active;
+ struct list_head lists[FIRQ_LIST_COUNT];
+ int counters[FIRQ_MAX_COUNT];
+ struct kvm_s390_mchk_info mchk;
+ struct kvm_s390_ext_info srv_signal;
int next_rr_cpu;
unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
- unsigned int irq_count;
};
struct kvm_hw_wp_info_arch {
@@ -465,6 +499,7 @@ struct kvm_vcpu_arch {
s390_fp_regs host_fpregs;
unsigned int host_acrs[NUM_ACRS];
s390_fp_regs guest_fpregs;
+ struct kvm_s390_vregs *host_vregs;
struct kvm_s390_local_interrupt local_int;
struct hrtimer ckc_timer;
struct kvm_s390_pgm_info pgm;
@@ -515,15 +550,15 @@ struct s390_io_adapter {
#define S390_ARCH_FAC_MASK_SIZE_U64 \
(S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64))
-struct s390_model_fac {
- /* facilities used in SIE context */
- __u64 sie[S390_ARCH_FAC_LIST_SIZE_U64];
- /* subset enabled by kvm */
- __u64 kvm[S390_ARCH_FAC_LIST_SIZE_U64];
+struct kvm_s390_fac {
+ /* facility list requested by guest */
+ __u64 list[S390_ARCH_FAC_LIST_SIZE_U64];
+ /* facility mask supported by kvm & hosting machine */
+ __u64 mask[S390_ARCH_FAC_LIST_SIZE_U64];
};
struct kvm_s390_cpu_model {
- struct s390_model_fac *fac;
+ struct kvm_s390_fac *fac;
struct cpuid cpu_id;
unsigned short ibc;
};
@@ -553,6 +588,7 @@ struct kvm_arch{
int use_cmma;
int user_cpu_state_ctrl;
int user_sigp;
+ int user_stsi;
struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
wait_queue_head_t ipte_wq;
int ipte_lock_count;
diff --git a/arch/s390/include/asm/livepatch.h b/arch/s390/include/asm/livepatch.h
new file mode 100644
index 000000000000..7aa799134a11
--- /dev/null
+++ b/arch/s390/include/asm/livepatch.h
@@ -0,0 +1,43 @@
+/*
+ * livepatch.h - s390-specific Kernel Live Patching Core
+ *
+ * Copyright (c) 2013-2015 SUSE
+ * Authors: Jiri Kosina
+ * Vojtech Pavlik
+ * Jiri Slaby
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#ifndef ASM_LIVEPATCH_H
+#define ASM_LIVEPATCH_H
+
+#include <linux/module.h>
+
+#ifdef CONFIG_LIVEPATCH
+static inline int klp_check_compiler_support(void)
+{
+ return 0;
+}
+
+static inline int klp_write_module_reloc(struct module *mod, unsigned long
+ type, unsigned long loc, unsigned long value)
+{
+ /* not supported yet */
+ return -ENOSYS;
+}
+
+static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->psw.addr = ip;
+}
+#else
+#error Live patching support is disabled; check CONFIG_LIVEPATCH
+#endif
+
+#endif
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 34fbcac61133..663f23e37460 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -13,163 +13,6 @@
#include <asm/cpu.h>
#include <asm/types.h>
-#ifdef CONFIG_32BIT
-
-#define LC_ORDER 0
-#define LC_PAGES 1
-
-struct save_area {
- u32 ext_save;
- u64 timer;
- u64 clk_cmp;
- u8 pad1[24];
- u8 psw[8];
- u32 pref_reg;
- u8 pad2[20];
- u32 acc_regs[16];
- u64 fp_regs[4];
- u32 gp_regs[16];
- u32 ctrl_regs[16];
-} __packed;
-
-struct save_area_ext {
- struct save_area sa;
- __vector128 vx_regs[32];
-};
-
-struct _lowcore {
- psw_t restart_psw; /* 0x0000 */
- psw_t restart_old_psw; /* 0x0008 */
- __u8 pad_0x0010[0x0014-0x0010]; /* 0x0010 */
- __u32 ipl_parmblock_ptr; /* 0x0014 */
- psw_t external_old_psw; /* 0x0018 */
- psw_t svc_old_psw; /* 0x0020 */
- psw_t program_old_psw; /* 0x0028 */
- psw_t mcck_old_psw; /* 0x0030 */
- psw_t io_old_psw; /* 0x0038 */
- __u8 pad_0x0040[0x0058-0x0040]; /* 0x0040 */
- psw_t external_new_psw; /* 0x0058 */
- psw_t svc_new_psw; /* 0x0060 */
- psw_t program_new_psw; /* 0x0068 */
- psw_t mcck_new_psw; /* 0x0070 */
- psw_t io_new_psw; /* 0x0078 */
- __u32 ext_params; /* 0x0080 */
- __u16 ext_cpu_addr; /* 0x0084 */
- __u16 ext_int_code; /* 0x0086 */
- __u16 svc_ilc; /* 0x0088 */
- __u16 svc_code; /* 0x008a */
- __u16 pgm_ilc; /* 0x008c */
- __u16 pgm_code; /* 0x008e */
- __u32 trans_exc_code; /* 0x0090 */
- __u16 mon_class_num; /* 0x0094 */
- __u8 per_code; /* 0x0096 */
- __u8 per_atmid; /* 0x0097 */
- __u32 per_address; /* 0x0098 */
- __u32 monitor_code; /* 0x009c */
- __u8 exc_access_id; /* 0x00a0 */
- __u8 per_access_id; /* 0x00a1 */
- __u8 op_access_id; /* 0x00a2 */
- __u8 ar_mode_id; /* 0x00a3 */
- __u8 pad_0x00a4[0x00b8-0x00a4]; /* 0x00a4 */
- __u16 subchannel_id; /* 0x00b8 */
- __u16 subchannel_nr; /* 0x00ba */
- __u32 io_int_parm; /* 0x00bc */
- __u32 io_int_word; /* 0x00c0 */
- __u8 pad_0x00c4[0x00c8-0x00c4]; /* 0x00c4 */
- __u32 stfl_fac_list; /* 0x00c8 */
- __u8 pad_0x00cc[0x00d4-0x00cc]; /* 0x00cc */
- __u32 extended_save_area_addr; /* 0x00d4 */
- __u32 cpu_timer_save_area[2]; /* 0x00d8 */
- __u32 clock_comp_save_area[2]; /* 0x00e0 */
- __u32 mcck_interruption_code[2]; /* 0x00e8 */
- __u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */
- __u32 external_damage_code; /* 0x00f4 */
- __u32 failing_storage_address; /* 0x00f8 */
- __u8 pad_0x00fc[0x0100-0x00fc]; /* 0x00fc */
- psw_t psw_save_area; /* 0x0100 */
- __u32 prefixreg_save_area; /* 0x0108 */
- __u8 pad_0x010c[0x0120-0x010c]; /* 0x010c */
-
- /* CPU register save area: defined by architecture */
- __u32 access_regs_save_area[16]; /* 0x0120 */
- __u32 floating_pt_save_area[8]; /* 0x0160 */
- __u32 gpregs_save_area[16]; /* 0x0180 */
- __u32 cregs_save_area[16]; /* 0x01c0 */
-
- /* Save areas. */
- __u32 save_area_sync[8]; /* 0x0200 */
- __u32 save_area_async[8]; /* 0x0220 */
- __u32 save_area_restart[1]; /* 0x0240 */
-
- /* CPU flags. */
- __u32 cpu_flags; /* 0x0244 */
-
- /* Return psws. */
- psw_t return_psw; /* 0x0248 */
- psw_t return_mcck_psw; /* 0x0250 */
-
- /* CPU time accounting values */
- __u64 sync_enter_timer; /* 0x0258 */
- __u64 async_enter_timer; /* 0x0260 */
- __u64 mcck_enter_timer; /* 0x0268 */
- __u64 exit_timer; /* 0x0270 */
- __u64 user_timer; /* 0x0278 */
- __u64 system_timer; /* 0x0280 */
- __u64 steal_timer; /* 0x0288 */
- __u64 last_update_timer; /* 0x0290 */
- __u64 last_update_clock; /* 0x0298 */
- __u64 int_clock; /* 0x02a0 */
- __u64 mcck_clock; /* 0x02a8 */
- __u64 clock_comparator; /* 0x02b0 */
-
- /* Current process. */
- __u32 current_task; /* 0x02b8 */
- __u32 thread_info; /* 0x02bc */
- __u32 kernel_stack; /* 0x02c0 */
-
- /* Interrupt, panic and restart stack. */
- __u32 async_stack; /* 0x02c4 */
- __u32 panic_stack; /* 0x02c8 */
- __u32 restart_stack; /* 0x02cc */
-
- /* Restart function and parameter. */
- __u32 restart_fn; /* 0x02d0 */
- __u32 restart_data; /* 0x02d4 */
- __u32 restart_source; /* 0x02d8 */
-
- /* Address space pointer. */
- __u32 kernel_asce; /* 0x02dc */
- __u32 user_asce; /* 0x02e0 */
- __u32 current_pid; /* 0x02e4 */
-
- /* SMP info area */
- __u32 cpu_nr; /* 0x02e8 */
- __u32 softirq_pending; /* 0x02ec */
- __u32 percpu_offset; /* 0x02f0 */
- __u32 machine_flags; /* 0x02f4 */
- __u8 pad_0x02f8[0x02fc-0x02f8]; /* 0x02f8 */
- __u32 spinlock_lockval; /* 0x02fc */
-
- __u8 pad_0x0300[0x0e00-0x0300]; /* 0x0300 */
-
- /*
- * 0xe00 contains the address of the IPL Parameter Information
- * block. Dump tools need IPIB for IPL after dump.
- * Note: do not change the position of any fields in 0x0e00-0x0f00
- */
- __u32 ipib; /* 0x0e00 */
- __u32 ipib_checksum; /* 0x0e04 */
- __u32 vmcore_info; /* 0x0e08 */
- __u8 pad_0x0e0c[0x0e18-0x0e0c]; /* 0x0e0c */
- __u32 os_info; /* 0x0e18 */
- __u8 pad_0x0e1c[0x0f00-0x0e1c]; /* 0x0e1c */
-
- /* Extended facility list */
- __u64 stfle_fac_list[32]; /* 0x0f00 */
-} __packed;
-
-#else /* CONFIG_32BIT */
-
#define LC_ORDER 1
#define LC_PAGES 2
@@ -354,8 +197,6 @@ struct _lowcore {
__u8 vector_save_area[1024]; /* 0x1c00 */
} __packed;
-#endif /* CONFIG_32BIT */
-
#define S390_lowcore (*((struct _lowcore *) 0))
extern struct _lowcore *lowcore_ptr[];
diff --git a/arch/s390/include/asm/mman.h b/arch/s390/include/asm/mman.h
index 9977e08df5bd..b55a59e1d134 100644
--- a/arch/s390/include/asm/mman.h
+++ b/arch/s390/include/asm/mman.h
@@ -8,7 +8,7 @@
#include <uapi/asm/mman.h>
-#if !defined(__ASSEMBLY__) && defined(CONFIG_64BIT)
+#ifndef __ASSEMBLY__
int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
#define arch_mmap_check(addr, len, flags) s390_mmap_check(addr, len, flags)
#endif
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index f49b71954654..d25d9ff10ba8 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -19,9 +19,7 @@ static inline int init_new_context(struct task_struct *tsk,
atomic_set(&mm->context.attach_count, 0);
mm->context.flush_mm = 0;
mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
-#ifdef CONFIG_64BIT
mm->context.asce_bits |= _ASCE_TYPE_REGION3;
-#endif
mm->context.has_pgste = 0;
mm->context.use_skey = 0;
mm->context.asce_limit = STACK_TOP_MAX;
@@ -62,6 +60,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
{
int cpu = smp_processor_id();
+ S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
if (prev == next)
return;
if (MACHINE_HAS_TLB_LC)
@@ -73,7 +72,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
atomic_dec(&prev->context.attach_count);
if (MACHINE_HAS_TLB_LC)
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
- S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
}
#define finish_arch_post_lock_switch finish_arch_post_lock_switch
@@ -110,10 +108,8 @@ static inline void activate_mm(struct mm_struct *prev,
static inline void arch_dup_mmap(struct mm_struct *oldmm,
struct mm_struct *mm)
{
-#ifdef CONFIG_64BIT
if (oldmm->context.asce_limit < mm->context.asce_limit)
crst_table_downgrade(mm, oldmm->context.asce_limit);
-#endif
}
static inline void arch_exit_mmap(struct mm_struct *mm)
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 7b2ac6e44166..53eacbd4f09b 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -37,16 +37,7 @@ static inline void storage_key_init_range(unsigned long start, unsigned long end
#endif
}
-static inline void clear_page(void *page)
-{
- register unsigned long reg1 asm ("1") = 0;
- register void *reg2 asm ("2") = page;
- register unsigned long reg3 asm ("3") = 4096;
- asm volatile(
- " mvcl 2,0"
- : "+d" (reg2), "+d" (reg3) : "d" (reg1)
- : "memory", "cc");
-}
+#define clear_page(page) memset((page), 0, PAGE_SIZE)
/*
* copy_page uses the mvcl instruction with 0xb0 padding byte in order to
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index ef803c202d42..a648338c434a 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -7,6 +7,7 @@
#define PCI_BAR_COUNT 6
#include <linux/pci.h>
+#include <linux/mutex.h>
#include <asm-generic/pci.h>
#include <asm-generic/pci-dma-compat.h>
#include <asm/pci_clp.h>
@@ -44,10 +45,6 @@ struct zpci_fmb {
u64 rpcit_ops;
u64 dma_rbytes;
u64 dma_wbytes;
- /* software counters */
- atomic64_t allocated_pages;
- atomic64_t mapped_pages;
- atomic64_t unmapped_pages;
} __packed __aligned(16);
enum zpci_state {
@@ -80,6 +77,7 @@ struct zpci_dev {
u8 pft; /* pci function type */
u16 domain;
+ struct mutex lock;
u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */
u32 uid; /* user defined id */
u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */
@@ -111,6 +109,10 @@ struct zpci_dev {
/* Function measurement block */
struct zpci_fmb *fmb;
u16 fmb_update; /* update interval */
+ /* software counters */
+ atomic64_t allocated_pages;
+ atomic64_t mapped_pages;
+ atomic64_t unmapped_pages;
enum pci_bus_speed max_bus_speed;
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 933355e0d091..6d6556ca24aa 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -10,8 +10,6 @@
*/
#define __my_cpu_offset S390_lowcore.percpu_offset
-#ifdef CONFIG_64BIT
-
/*
* For 64 bit module code, the module may be more than 4G above the
* per cpu area, use weak definitions to force the compiler to
@@ -183,8 +181,6 @@
#define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double
#define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double
-#endif /* CONFIG_64BIT */
-
#include <asm-generic/percpu.h>
#endif /* __ARCH_S390_PERCPU__ */
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 159a8ec6da9a..4cb19fe76dd9 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -9,8 +9,6 @@
#ifndef _ASM_S390_PERF_EVENT_H
#define _ASM_S390_PERF_EVENT_H
-#ifdef CONFIG_64BIT
-
#include <linux/perf_event.h>
#include <linux/device.h>
#include <asm/cpu_mf.h>
@@ -92,5 +90,4 @@ struct sf_raw_sample {
int perf_reserve_sampling(void);
void perf_release_sampling(void);
-#endif /* CONFIG_64BIT */
#endif /* _ASM_S390_PERF_EVENT_H */
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 3009c2ba46d2..51e7fb634ebc 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -33,11 +33,7 @@ static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
*s = val;
n = (n / 256) - 1;
asm volatile(
-#ifdef CONFIG_64BIT
" mvc 8(248,%0),0(%0)\n"
-#else
- " mvc 4(252,%0),0(%0)\n"
-#endif
"0: mvc 256(256,%0),0(%0)\n"
" la %0,256(%0)\n"
" brct %1,0b\n"
@@ -50,24 +46,6 @@ static inline void crst_table_init(unsigned long *crst, unsigned long entry)
clear_table(crst, entry, sizeof(unsigned long)*2048);
}
-#ifndef CONFIG_64BIT
-
-static inline unsigned long pgd_entry_type(struct mm_struct *mm)
-{
- return _SEGMENT_ENTRY_EMPTY;
-}
-
-#define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
-#define pud_free(mm, x) do { } while (0)
-
-#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
-#define pmd_free(mm, x) do { } while (0)
-
-#define pgd_populate(mm, pgd, pud) BUG()
-#define pud_populate(mm, pud, pmd) BUG()
-
-#else /* CONFIG_64BIT */
-
static inline unsigned long pgd_entry_type(struct mm_struct *mm)
{
if (mm->context.asce_limit <= (1UL << 31))
@@ -119,8 +97,6 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
}
-#endif /* CONFIG_64BIT */
-
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
spin_lock_init(&mm->context.list_lock);
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index fbb5ee3ae57c..989cfae9e202 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -66,15 +66,9 @@ extern unsigned long zero_page_mask;
* table can map
* PGDIR_SHIFT determines what a third-level page table entry can map
*/
-#ifndef CONFIG_64BIT
-# define PMD_SHIFT 20
-# define PUD_SHIFT 20
-# define PGDIR_SHIFT 20
-#else /* CONFIG_64BIT */
-# define PMD_SHIFT 20
-# define PUD_SHIFT 31
-# define PGDIR_SHIFT 42
-#endif /* CONFIG_64BIT */
+#define PMD_SHIFT 20
+#define PUD_SHIFT 31
+#define PGDIR_SHIFT 42
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
@@ -90,13 +84,8 @@ extern unsigned long zero_page_mask;
* that leads to 1024 pte per pgd
*/
#define PTRS_PER_PTE 256
-#ifndef CONFIG_64BIT
-#define PTRS_PER_PMD 1
-#define PTRS_PER_PUD 1
-#else /* CONFIG_64BIT */
#define PTRS_PER_PMD 2048
#define PTRS_PER_PUD 2048
-#endif /* CONFIG_64BIT */
#define PTRS_PER_PGD 2048
#define FIRST_USER_ADDRESS 0UL
@@ -125,23 +114,19 @@ extern struct page *vmemmap;
#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
-#ifdef CONFIG_64BIT
extern unsigned long MODULES_VADDR;
extern unsigned long MODULES_END;
#define MODULES_VADDR MODULES_VADDR
#define MODULES_END MODULES_END
#define MODULES_LEN (1UL << 31)
-#endif
static inline int is_module_addr(void *addr)
{
-#ifdef CONFIG_64BIT
BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
if (addr < (void *)MODULES_VADDR)
return 0;
if (addr > (void *)MODULES_END)
return 0;
-#endif
return 1;
}
@@ -282,56 +267,6 @@ static inline int is_module_addr(void *addr)
* pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402
*/
-#ifndef CONFIG_64BIT
-
-/* Bits in the segment table address-space-control-element */
-#define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
-#define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
-#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
-#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
-#define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
-
-/* Bits in the segment table entry */
-#define _SEGMENT_ENTRY_BITS 0x7fffffffUL /* Valid segment table bits */
-#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
-#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
-#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
-#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
-#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
-
-#define _SEGMENT_ENTRY_DIRTY 0 /* No sw dirty bit for 31-bit */
-#define _SEGMENT_ENTRY_YOUNG 0 /* No sw young bit for 31-bit */
-#define _SEGMENT_ENTRY_READ 0 /* No sw read bit for 31-bit */
-#define _SEGMENT_ENTRY_WRITE 0 /* No sw write bit for 31-bit */
-#define _SEGMENT_ENTRY_LARGE 0 /* No large pages for 31-bit */
-#define _SEGMENT_ENTRY_BITS_LARGE 0
-#define _SEGMENT_ENTRY_ORIGIN_LARGE 0
-
-#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
-#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
-
-/*
- * Segment table entry encoding (I = invalid, R = read-only bit):
- * ..R...I.....
- * prot-none ..1...1.....
- * read-only ..1...0.....
- * read-write ..0...0.....
- * empty ..0...1.....
- */
-
-/* Page status table bits for virtualization */
-#define PGSTE_ACC_BITS 0xf0000000UL
-#define PGSTE_FP_BIT 0x08000000UL
-#define PGSTE_PCL_BIT 0x00800000UL
-#define PGSTE_HR_BIT 0x00400000UL
-#define PGSTE_HC_BIT 0x00200000UL
-#define PGSTE_GR_BIT 0x00040000UL
-#define PGSTE_GC_BIT 0x00020000UL
-#define PGSTE_UC_BIT 0x00008000UL /* user dirty (migration) */
-#define PGSTE_IN_BIT 0x00004000UL /* IPTE notify bit */
-
-#else /* CONFIG_64BIT */
-
/* Bits in the segment/region table address-space-control-element */
#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
@@ -415,8 +350,6 @@ static inline int is_module_addr(void *addr)
#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
-#endif /* CONFIG_64BIT */
-
/* Guest Page State used for virtualization */
#define _PGSTE_GPS_ZERO 0x0000000080000000UL
#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
@@ -507,19 +440,6 @@ static inline int mm_use_skey(struct mm_struct *mm)
/*
* pgd/pmd/pte query functions
*/
-#ifndef CONFIG_64BIT
-
-static inline int pgd_present(pgd_t pgd) { return 1; }
-static inline int pgd_none(pgd_t pgd) { return 0; }
-static inline int pgd_bad(pgd_t pgd) { return 0; }
-
-static inline int pud_present(pud_t pud) { return 1; }
-static inline int pud_none(pud_t pud) { return 0; }
-static inline int pud_large(pud_t pud) { return 0; }
-static inline int pud_bad(pud_t pud) { return 0; }
-
-#else /* CONFIG_64BIT */
-
static inline int pgd_present(pgd_t pgd)
{
if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
@@ -581,8 +501,6 @@ static inline int pud_bad(pud_t pud)
return (pud_val(pud) & mask) != 0;
}
-#endif /* CONFIG_64BIT */
-
static inline int pmd_present(pmd_t pmd)
{
return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
@@ -914,18 +832,14 @@ static inline int pte_unused(pte_t pte)
static inline void pgd_clear(pgd_t *pgd)
{
-#ifdef CONFIG_64BIT
if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
-#endif
}
static inline void pud_clear(pud_t *pud)
{
-#ifdef CONFIG_64BIT
if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
pud_val(*pud) = _REGION3_ENTRY_EMPTY;
-#endif
}
static inline void pmd_clear(pmd_t *pmdp)
@@ -1024,10 +938,6 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
{
unsigned long pto = (unsigned long) ptep;
-#ifndef CONFIG_64BIT
- /* pto in ESA mode must point to the start of the segment table */
- pto &= 0x7ffffc00;
-#endif
/* Invalidation + global TLB flush for the pte */
asm volatile(
" ipte %2,%3"
@@ -1038,10 +948,6 @@ static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
{
unsigned long pto = (unsigned long) ptep;
-#ifndef CONFIG_64BIT
- /* pto in ESA mode must point to the start of the segment table */
- pto &= 0x7ffffc00;
-#endif
/* Invalidation + local TLB flush for the pte */
asm volatile(
" .insn rrf,0xb2210000,%2,%3,0,1"
@@ -1052,10 +958,6 @@ static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep)
{
unsigned long pto = (unsigned long) ptep;
-#ifndef CONFIG_64BIT
- /* pto in ESA mode must point to the start of the segment table */
- pto &= 0x7ffffc00;
-#endif
/* Invalidate a range of ptes + global TLB flush of the ptes */
do {
asm volatile(
@@ -1374,17 +1276,6 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-#ifndef CONFIG_64BIT
-
-#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
-#define pud_deref(pmd) ({ BUG(); 0UL; })
-#define pgd_deref(pmd) ({ BUG(); 0UL; })
-
-#define pud_offset(pgd, address) ((pud_t *) pgd)
-#define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
-
-#else /* CONFIG_64BIT */
-
#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
@@ -1405,8 +1296,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
return pmd + pmd_index(address);
}
-#endif /* CONFIG_64BIT */
-
#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
#define pte_page(x) pfn_to_page(pte_pfn(x))
@@ -1727,11 +1616,9 @@ static inline int has_transparent_hugepage(void)
* 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
* 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
*/
-#ifndef CONFIG_64BIT
-#define __SWP_OFFSET_MASK (~0UL >> 12)
-#else
+
#define __SWP_OFFSET_MASK (~0UL >> 11)
-#endif
+
static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
{
pte_t pte;
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index e7cbbdcdee13..dedb6218544b 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -19,7 +19,6 @@
#define _CIF_ASCE (1<<CIF_ASCE)
#define _CIF_NOHZ_DELAY (1<<CIF_NOHZ_DELAY)
-
#ifndef __ASSEMBLY__
#include <linux/linkage.h>
@@ -66,13 +65,6 @@ extern void execve_tail(void);
/*
* User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
*/
-#ifndef CONFIG_64BIT
-
-#define TASK_SIZE (1UL << 31)
-#define TASK_MAX_SIZE (1UL << 31)
-#define TASK_UNMAPPED_BASE (1UL << 30)
-
-#else /* CONFIG_64BIT */
#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit)
#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
@@ -80,15 +72,8 @@ extern void execve_tail(void);
#define TASK_SIZE TASK_SIZE_OF(current)
#define TASK_MAX_SIZE (1UL << 53)
-#endif /* CONFIG_64BIT */
-
-#ifndef CONFIG_64BIT
-#define STACK_TOP (1UL << 31)
-#define STACK_TOP_MAX (1UL << 31)
-#else /* CONFIG_64BIT */
#define STACK_TOP (1UL << (test_thread_flag(TIF_31BIT) ? 31:42))
#define STACK_TOP_MAX (1UL << 42)
-#endif /* CONFIG_64BIT */
#define HAVE_ARCH_PICK_MMAP_LAYOUT
@@ -115,10 +100,8 @@ struct thread_struct {
/* cpu runtime instrumentation */
struct runtime_instr_cb *ri_cb;
int ri_signum;
-#ifdef CONFIG_64BIT
unsigned char trap_tdb[256]; /* Transaction abort diagnose block */
__vector128 *vxrs; /* Vector register save area */
-#endif
};
/* Flag to disable transactions. */
@@ -181,11 +164,7 @@ struct task_struct;
struct mm_struct;
struct seq_file;
-#ifdef CONFIG_64BIT
-extern void show_cacheinfo(struct seq_file *m);
-#else
-static inline void show_cacheinfo(struct seq_file *m) { }
-#endif
+void show_cacheinfo(struct seq_file *m);
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
@@ -229,11 +208,7 @@ static inline void psw_set_key(unsigned int key)
*/
static inline void __load_psw(psw_t psw)
{
-#ifndef CONFIG_64BIT
- asm volatile("lpsw %0" : : "Q" (psw) : "cc");
-#else
asm volatile("lpswe %0" : : "Q" (psw) : "cc");
-#endif
}
/*
@@ -247,22 +222,12 @@ static inline void __load_psw_mask (unsigned long mask)
psw.mask = mask;
-#ifndef CONFIG_64BIT
- asm volatile(
- " basr %0,0\n"
- "0: ahi %0,1f-0b\n"
- " st %0,%O1+4(%R1)\n"
- " lpsw %1\n"
- "1:"
- : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
-#else /* CONFIG_64BIT */
asm volatile(
" larl %0,1f\n"
" stg %0,%O1+8(%R1)\n"
" lpswe %1\n"
"1:"
: "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
-#endif /* CONFIG_64BIT */
}
/*
@@ -270,20 +235,12 @@ static inline void __load_psw_mask (unsigned long mask)
*/
static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc)
{
-#ifndef CONFIG_64BIT
- if (psw.addr & PSW_ADDR_AMODE)
- /* 31 bit mode */
- return (psw.addr - ilc) | PSW_ADDR_AMODE;
- /* 24 bit mode */
- return (psw.addr - ilc) & ((1UL << 24) - 1);
-#else
unsigned long mask;
mask = (psw.mask & PSW_MASK_EA) ? -1UL :
(psw.mask & PSW_MASK_BA) ? (1UL << 31) - 1 :
(1UL << 24) - 1;
return (psw.addr - ilc) & mask;
-#endif
}
/*
@@ -305,26 +262,6 @@ static inline void __noreturn disabled_wait(unsigned long code)
* Store status and then load disabled wait psw,
* the processor is dead afterwards
*/
-#ifndef CONFIG_64BIT
- asm volatile(
- " stctl 0,0,0(%2)\n"
- " ni 0(%2),0xef\n" /* switch off protection */
- " lctl 0,0,0(%2)\n"
- " stpt 0xd8\n" /* store timer */
- " stckc 0xe0\n" /* store clock comparator */
- " stpx 0x108\n" /* store prefix register */
- " stam 0,15,0x120\n" /* store access registers */
- " std 0,0x160\n" /* store f0 */
- " std 2,0x168\n" /* store f2 */
- " std 4,0x170\n" /* store f4 */
- " std 6,0x178\n" /* store f6 */
- " stm 0,15,0x180\n" /* store general registers */
- " stctl 0,15,0x1c0\n" /* store control registers */
- " oi 0x1c0,0x10\n" /* fake protection bit */
- " lpsw 0(%1)"
- : "=m" (ctl_buf)
- : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc");
-#else /* CONFIG_64BIT */
asm volatile(
" stctg 0,0,0(%2)\n"
" ni 4(%2),0xef\n" /* switch off protection */
@@ -357,7 +294,6 @@ static inline void __noreturn disabled_wait(unsigned long code)
" lpswe 0(%1)"
: "=m" (ctl_buf)
: "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1");
-#endif /* CONFIG_64BIT */
while (1);
}
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index be317feff7ac..6feda2599282 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -40,12 +40,8 @@ struct psw_bits {
unsigned long long ri : 1; /* Runtime Instrumentation */
unsigned long long : 6;
unsigned long long eaba : 2; /* Addressing Mode */
-#ifdef CONFIG_64BIT
unsigned long long : 31;
unsigned long long ia : 64;/* Instruction Address */
-#else
- unsigned long long ia : 31;/* Instruction Address */
-#endif
};
enum {
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 06f3034605a1..998b61cd0e56 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -211,11 +211,6 @@ struct qdio_buffer_element {
u8 scount;
u8 sflags;
u32 length;
-#ifdef CONFIG_32BIT
- /* private: */
- void *res2;
- /* public: */
-#endif
void *addr;
} __attribute__ ((packed, aligned(16)));
@@ -232,11 +227,6 @@ struct qdio_buffer {
* @sbal: absolute SBAL address
*/
struct sl_element {
-#ifdef CONFIG_32BIT
- /* private: */
- unsigned long reserved;
- /* public: */
-#endif
unsigned long sbal;
} __attribute__ ((packed));
diff --git a/arch/s390/include/asm/runtime_instr.h b/arch/s390/include/asm/runtime_instr.h
index 830da737ff85..402ad6df4897 100644
--- a/arch/s390/include/asm/runtime_instr.h
+++ b/arch/s390/include/asm/runtime_instr.h
@@ -72,27 +72,19 @@ static inline void store_runtime_instr_cb(struct runtime_instr_cb *cb)
static inline void save_ri_cb(struct runtime_instr_cb *cb_prev)
{
-#ifdef CONFIG_64BIT
if (cb_prev)
store_runtime_instr_cb(cb_prev);
-#endif
}
static inline void restore_ri_cb(struct runtime_instr_cb *cb_next,
struct runtime_instr_cb *cb_prev)
{
-#ifdef CONFIG_64BIT
if (cb_next)
load_runtime_instr_cb(cb_next);
else if (cb_prev)
load_runtime_instr_cb(&runtime_instr_empty_cb);
-#endif
}
-#ifdef CONFIG_64BIT
-extern void exit_thread_runtime_instr(void);
-#else
-static inline void exit_thread_runtime_instr(void) { }
-#endif
+void exit_thread_runtime_instr(void);
#endif /* _RUNTIME_INSTR_H */
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h
index 487f9b64efb9..4b43ee7e6776 100644
--- a/arch/s390/include/asm/rwsem.h
+++ b/arch/s390/include/asm/rwsem.h
@@ -39,17 +39,10 @@
#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
#endif
-#ifndef CONFIG_64BIT
-#define RWSEM_UNLOCKED_VALUE 0x00000000
-#define RWSEM_ACTIVE_BIAS 0x00000001
-#define RWSEM_ACTIVE_MASK 0x0000ffff
-#define RWSEM_WAITING_BIAS (-0x00010000)
-#else /* CONFIG_64BIT */
#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
-#endif /* CONFIG_64BIT */
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
@@ -61,19 +54,11 @@ static inline void __down_read(struct rw_semaphore *sem)
signed long old, new;
asm volatile(
-#ifndef CONFIG_64BIT
- " l %0,%2\n"
- "0: lr %1,%0\n"
- " ahi %1,%4\n"
- " cs %0,%1,%2\n"
- " jl 0b"
-#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" aghi %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
-#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
: "cc", "memory");
@@ -89,15 +74,6 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
signed long old, new;
asm volatile(
-#ifndef CONFIG_64BIT
- " l %0,%2\n"
- "0: ltr %1,%0\n"
- " jm 1f\n"
- " ahi %1,%4\n"
- " cs %0,%1,%2\n"
- " jl 0b\n"
- "1:"
-#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: ltgr %1,%0\n"
" jm 1f\n"
@@ -105,7 +81,6 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
" csg %0,%1,%2\n"
" jl 0b\n"
"1:"
-#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
: "cc", "memory");
@@ -121,19 +96,11 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
tmp = RWSEM_ACTIVE_WRITE_BIAS;
asm volatile(
-#ifndef CONFIG_64BIT
- " l %0,%2\n"
- "0: lr %1,%0\n"
- " a %1,%4\n"
- " cs %0,%1,%2\n"
- " jl 0b"
-#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" ag %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
-#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "m" (tmp)
: "cc", "memory");
@@ -154,19 +121,11 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
signed long old;
asm volatile(
-#ifndef CONFIG_64BIT
- " l %0,%1\n"
- "0: ltr %0,%0\n"
- " jnz 1f\n"
- " cs %0,%3,%1\n"
- " jl 0b\n"
-#else /* CONFIG_64BIT */
" lg %0,%1\n"
"0: ltgr %0,%0\n"
" jnz 1f\n"
" csg %0,%3,%1\n"
" jl 0b\n"
-#endif /* CONFIG_64BIT */
"1:"
: "=&d" (old), "=Q" (sem->count)
: "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
@@ -182,19 +141,11 @@ static inline void __up_read(struct rw_semaphore *sem)
signed long old, new;
asm volatile(
-#ifndef CONFIG_64BIT
- " l %0,%2\n"
- "0: lr %1,%0\n"
- " ahi %1,%4\n"
- " cs %0,%1,%2\n"
- " jl 0b"
-#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" aghi %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
-#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
: "cc", "memory");
@@ -212,19 +163,11 @@ static inline void __up_write(struct rw_semaphore *sem)
tmp = -RWSEM_ACTIVE_WRITE_BIAS;
asm volatile(
-#ifndef CONFIG_64BIT
- " l %0,%2\n"
- "0: lr %1,%0\n"
- " a %1,%4\n"
- " cs %0,%1,%2\n"
- " jl 0b"
-#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" ag %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
-#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "m" (tmp)
: "cc", "memory");
@@ -242,19 +185,11 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
tmp = -RWSEM_WAITING_BIAS;
asm volatile(
-#ifndef CONFIG_64BIT
- " l %0,%2\n"
- "0: lr %1,%0\n"
- " a %1,%4\n"
- " cs %0,%1,%2\n"
- " jl 0b"
-#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" ag %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
-#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "m" (tmp)
: "cc", "memory");
@@ -270,19 +205,11 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
signed long old, new;
asm volatile(
-#ifndef CONFIG_64BIT
- " l %0,%2\n"
- "0: lr %1,%0\n"
- " ar %1,%4\n"
- " cs %0,%1,%2\n"
- " jl 0b"
-#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" agr %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
-#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "d" (delta)
: "cc", "memory");
@@ -296,19 +223,11 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
signed long old, new;
asm volatile(
-#ifndef CONFIG_64BIT
- " l %0,%2\n"
- "0: lr %1,%0\n"
- " ar %1,%4\n"
- " cs %0,%1,%2\n"
- " jl 0b"
-#else /* CONFIG_64BIT */
" lg %0,%2\n"
"0: lgr %1,%0\n"
" agr %1,%4\n"
" csg %0,%1,%2\n"
" jl 0b"
-#endif /* CONFIG_64BIT */
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
: "Q" (sem->count), "d" (delta)
: "cc", "memory");
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index b8d1e54b4733..b8ffc1bd0a9f 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -15,19 +15,11 @@
#include <asm/lowcore.h>
#include <asm/types.h>
-#ifndef CONFIG_64BIT
-#define IPL_DEVICE (*(unsigned long *) (0x10404))
-#define INITRD_START (*(unsigned long *) (0x1040C))
-#define INITRD_SIZE (*(unsigned long *) (0x10414))
-#define OLDMEM_BASE (*(unsigned long *) (0x1041C))
-#define OLDMEM_SIZE (*(unsigned long *) (0x10424))
-#else /* CONFIG_64BIT */
#define IPL_DEVICE (*(unsigned long *) (0x10400))
#define INITRD_START (*(unsigned long *) (0x10408))
#define INITRD_SIZE (*(unsigned long *) (0x10410))
#define OLDMEM_BASE (*(unsigned long *) (0x10418))
#define OLDMEM_SIZE (*(unsigned long *) (0x10420))
-#endif /* CONFIG_64BIT */
#define COMMAND_LINE ((char *) (0x10480))
extern int memory_end_set;
@@ -68,26 +60,8 @@ extern void detect_memory_memblock(void);
#define MACHINE_HAS_PFMF MACHINE_HAS_EDAT1
#define MACHINE_HAS_HPAGE MACHINE_HAS_EDAT1
-#ifndef CONFIG_64BIT
-#define MACHINE_HAS_IEEE (S390_lowcore.machine_flags & MACHINE_FLAG_IEEE)
-#define MACHINE_HAS_CSP (S390_lowcore.machine_flags & MACHINE_FLAG_CSP)
-#define MACHINE_HAS_IDTE (0)
-#define MACHINE_HAS_DIAG44 (1)
-#define MACHINE_HAS_MVPG (S390_lowcore.machine_flags & MACHINE_FLAG_MVPG)
-#define MACHINE_HAS_EDAT1 (0)
-#define MACHINE_HAS_EDAT2 (0)
-#define MACHINE_HAS_LPP (0)
-#define MACHINE_HAS_TOPOLOGY (0)
-#define MACHINE_HAS_TE (0)
-#define MACHINE_HAS_TLB_LC (0)
-#define MACHINE_HAS_VX (0)
-#define MACHINE_HAS_CAD (0)
-#else /* CONFIG_64BIT */
-#define MACHINE_HAS_IEEE (1)
-#define MACHINE_HAS_CSP (1)
#define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE)
#define MACHINE_HAS_DIAG44 (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44)
-#define MACHINE_HAS_MVPG (1)
#define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1)
#define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2)
#define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP)
@@ -96,7 +70,6 @@ extern void detect_memory_memblock(void);
#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC)
#define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX)
#define MACHINE_HAS_CAD (S390_lowcore.machine_flags & MACHINE_FLAG_CAD)
-#endif /* CONFIG_64BIT */
/*
* Console mode. Override with conmode=
@@ -135,19 +108,11 @@ extern void (*_machine_power_off)(void);
#else /* __ASSEMBLY__ */
-#ifndef CONFIG_64BIT
-#define IPL_DEVICE 0x10404
-#define INITRD_START 0x1040C
-#define INITRD_SIZE 0x10414
-#define OLDMEM_BASE 0x1041C
-#define OLDMEM_SIZE 0x10424
-#else /* CONFIG_64BIT */
#define IPL_DEVICE 0x10400
#define INITRD_START 0x10408
#define INITRD_SIZE 0x10410
#define OLDMEM_BASE 0x10418
#define OLDMEM_SIZE 0x10420
-#endif /* CONFIG_64BIT */
#define COMMAND_LINE 0x10480
#endif /* __ASSEMBLY__ */
diff --git a/arch/s390/include/asm/sfp-util.h b/arch/s390/include/asm/sfp-util.h
index 5959bfb3b693..c8b7cf9d6279 100644
--- a/arch/s390/include/asm/sfp-util.h
+++ b/arch/s390/include/asm/sfp-util.h
@@ -51,7 +51,6 @@
wl = __wl; \
})
-#ifdef CONFIG_64BIT
#define udiv_qrnnd(q, r, n1, n0, d) \
do { unsigned long __n; \
unsigned int __r, __d; \
@@ -60,15 +59,6 @@
(q) = __n / __d; \
(r) = __n % __d; \
} while (0)
-#else
-#define udiv_qrnnd(q, r, n1, n0, d) \
- do { unsigned int __r; \
- (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \
- (r) = __r; \
- } while (0)
-extern unsigned long __udiv_qrnnd (unsigned int *, unsigned int,
- unsigned int , unsigned int);
-#endif
#define UDIV_NEEDS_NORMALIZATION 0
diff --git a/arch/s390/include/asm/sparsemem.h b/arch/s390/include/asm/sparsemem.h
index a60d085ddb4d..487428b6d099 100644
--- a/arch/s390/include/asm/sparsemem.h
+++ b/arch/s390/include/asm/sparsemem.h
@@ -1,16 +1,7 @@
#ifndef _ASM_S390_SPARSEMEM_H
#define _ASM_S390_SPARSEMEM_H
-#ifdef CONFIG_64BIT
-
#define SECTION_SIZE_BITS 28
#define MAX_PHYSMEM_BITS 46
-#else
-
-#define SECTION_SIZE_BITS 25
-#define MAX_PHYSMEM_BITS 31
-
-#endif /* CONFIG_64BIT */
-
#endif /* _ASM_S390_SPARSEMEM_H */
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index 2542a7e4c8b4..d62e7a69605f 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -18,9 +18,6 @@ static inline int test_fp_ctl(u32 fpc)
u32 orig_fpc;
int rc;
- if (!MACHINE_HAS_IEEE)
- return 0;
-
asm volatile(
" efpc %1\n"
" sfpc %2\n"
@@ -35,9 +32,6 @@ static inline int test_fp_ctl(u32 fpc)
static inline void save_fp_ctl(u32 *fpc)
{
- if (!MACHINE_HAS_IEEE)
- return;
-
asm volatile(
" stfpc %0\n"
: "+Q" (*fpc));
@@ -47,9 +41,6 @@ static inline int restore_fp_ctl(u32 *fpc)
{
int rc;
- if (!MACHINE_HAS_IEEE)
- return 0;
-
asm volatile(
" lfpc %1\n"
"0: la %0,0\n"
@@ -65,8 +56,6 @@ static inline void save_fp_regs(freg_t *fprs)
asm volatile("std 2,%0" : "=Q" (fprs[2]));
asm volatile("std 4,%0" : "=Q" (fprs[4]));
asm volatile("std 6,%0" : "=Q" (fprs[6]));
- if (!MACHINE_HAS_IEEE)
- return;
asm volatile("std 1,%0" : "=Q" (fprs[1]));
asm volatile("std 3,%0" : "=Q" (fprs[3]));
asm volatile("std 5,%0" : "=Q" (fprs[5]));
@@ -87,8 +76,6 @@ static inline void restore_fp_regs(freg_t *fprs)
asm volatile("ld 2,%0" : : "Q" (fprs[2]));
asm volatile("ld 4,%0" : : "Q" (fprs[4]));
asm volatile("ld 6,%0" : : "Q" (fprs[6]));
- if (!MACHINE_HAS_IEEE)
- return;
asm volatile("ld 1,%0" : : "Q" (fprs[1]));
asm volatile("ld 3,%0" : : "Q" (fprs[3]));
asm volatile("ld 5,%0" : : "Q" (fprs[5]));
@@ -140,22 +127,18 @@ static inline void restore_vx_regs(__vector128 *vxrs)
static inline void save_fp_vx_regs(struct task_struct *task)
{
-#ifdef CONFIG_64BIT
if (task->thread.vxrs)
save_vx_regs(task->thread.vxrs);
else
-#endif
- save_fp_regs(task->thread.fp_regs.fprs);
+ save_fp_regs(task->thread.fp_regs.fprs);
}
static inline void restore_fp_vx_regs(struct task_struct *task)
{
-#ifdef CONFIG_64BIT
if (task->thread.vxrs)
restore_vx_regs(task->thread.vxrs);
else
-#endif
- restore_fp_regs(task->thread.fp_regs.fprs);
+ restore_fp_regs(task->thread.fp_regs.fprs);
}
static inline void save_access_regs(unsigned int *acrs)
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index 5bc12598ae9e..6ba0bf928909 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -95,6 +95,6 @@ static inline int syscall_get_arch(void)
if (test_tsk_thread_flag(current, TIF_31BIT))
return AUDIT_ARCH_S390;
#endif
- return sizeof(long) == 8 ? AUDIT_ARCH_S390X : AUDIT_ARCH_S390;
+ return AUDIT_ARCH_S390X;
}
#endif /* _ASM_SYSCALL_H */
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index ef1df718642d..4c27ec764c36 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -10,13 +10,8 @@
/*
* Size of kernel stack for each process
*/
-#ifndef CONFIG_64BIT
-#define THREAD_ORDER 1
-#define ASYNC_ORDER 1
-#else /* CONFIG_64BIT */
#define THREAD_ORDER 2
#define ASYNC_ORDER 2
-#endif /* CONFIG_64BIT */
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER)
@@ -34,7 +29,6 @@
*/
struct thread_info {
struct task_struct *task; /* main task structure */
- struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
unsigned long sys_call_table; /* System call table address */
unsigned int cpu; /* current CPU */
@@ -51,7 +45,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
@@ -66,6 +59,8 @@ static inline struct thread_info *current_thread_info(void)
return (struct thread_info *) S390_lowcore.thread_info;
}
+void arch_release_task_struct(struct task_struct *tsk);
+
#define THREAD_SIZE_ORDER THREAD_ORDER
#endif
@@ -99,10 +94,6 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_31BIT (1<<TIF_31BIT)
#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP)
-#ifdef CONFIG_64BIT
#define is_32bit_task() (test_thread_flag(TIF_31BIT))
-#else
-#define is_32bit_task() (1)
-#endif
#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 06d8741ad6f4..7a92e69c50bc 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -118,12 +118,10 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long address)
{
-#ifdef CONFIG_64BIT
if (tlb->mm->context.asce_limit <= (1UL << 31))
return;
pgtable_pmd_page_dtor(virt_to_page(pmd));
tlb_remove_table(tlb, pmd);
-#endif
}
/*
@@ -136,11 +134,9 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
unsigned long address)
{
-#ifdef CONFIG_64BIT
if (tlb->mm->context.asce_limit <= (1UL << 42))
return;
tlb_remove_table(tlb, pud);
-#endif
}
#define tlb_start_vma(tlb, vma) do { } while (0)
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 16c9c88658c8..ca148f7c3eaa 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -49,13 +49,6 @@ static inline void __tlb_flush_global(void)
register unsigned long reg4 asm("4");
long dummy;
-#ifndef CONFIG_64BIT
- if (!MACHINE_HAS_CSP) {
- smp_ptlb_all();
- return;
- }
-#endif /* CONFIG_64BIT */
-
dummy = 0;
reg2 = reg3 = 0;
reg4 = ((unsigned long) &dummy) + 1;
diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h
index dccef3ca91fa..6740f4f9781f 100644
--- a/arch/s390/include/asm/types.h
+++ b/arch/s390/include/asm/types.h
@@ -8,21 +8,4 @@
#include <uapi/asm/types.h>
-/*
- * These aren't exported outside the kernel to avoid name space clashes
- */
-
-#ifndef __ASSEMBLY__
-
-#ifndef CONFIG_64BIT
-typedef union {
- unsigned long long pair;
- struct {
- unsigned long even;
- unsigned long odd;
- } subreg;
-} register_pair;
-
-#endif /* ! CONFIG_64BIT */
-#endif /* __ASSEMBLY__ */
#endif /* _S390_TYPES_H */
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index cd4c68e0398d..d64a7a62164f 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -372,5 +372,6 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
}
int copy_to_user_real(void __user *dest, void *src, unsigned long count);
+void s390_kernel_write(void *dst, const void *src, size_t size);
#endif /* __S390_UACCESS_H */
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 651886353551..91f56b1d8156 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -9,11 +9,7 @@
#include <uapi/asm/unistd.h>
-#ifndef CONFIG_64BIT
-#define __IGNORE_select
-#else
#define __IGNORE_time
-#endif
/* Ignore NUMA system calls. Not wired up on s390. */
#define __IGNORE_mbind
@@ -43,10 +39,6 @@
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
-# ifndef CONFIG_64BIT
-# define __ARCH_WANT_STAT64
-# define __ARCH_WANT_SYS_TIME
-# endif
# ifdef CONFIG_COMPAT
# define __ARCH_WANT_COMPAT_SYS_TIME
# endif
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index a62526d09201..787acd4f9668 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -42,10 +42,8 @@ struct vdso_per_cpu_data {
extern struct vdso_data *vdso_data;
-#ifdef CONFIG_64BIT
int vdso_alloc_per_cpu(struct _lowcore *lowcore);
void vdso_free_per_cpu(struct _lowcore *lowcore);
-#endif
#endif /* __ASSEMBLY__ */
#endif /* __S390_VDSO_H__ */
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index 9c77e60b9a26..ef1a5fcc6c66 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -150,6 +150,7 @@ struct kvm_guest_debug_arch {
#define KVM_SYNC_CRS (1UL << 3)
#define KVM_SYNC_ARCH0 (1UL << 4)
#define KVM_SYNC_PFAULT (1UL << 5)
+#define KVM_SYNC_VRS (1UL << 6)
/* definition of registers in kvm_run */
struct kvm_sync_regs {
__u64 prefix; /* prefix register */
@@ -164,6 +165,9 @@ struct kvm_sync_regs {
__u64 pft; /* pfault token [PFAULT] */
__u64 pfs; /* pfault select [PFAULT] */
__u64 pfc; /* pfault compare [PFAULT] */
+ __u64 vrs[32][2]; /* vector registers */
+ __u8 reserved[512]; /* for future vector expansion */
+ __u32 fpc; /* only valid with vector registers */
};
#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
diff --git a/arch/s390/include/uapi/asm/sie.h b/arch/s390/include/uapi/asm/sie.h
index d4096fdfc6ab..ee69c0854c88 100644
--- a/arch/s390/include/uapi/asm/sie.h
+++ b/arch/s390/include/uapi/asm/sie.h
@@ -230,7 +230,7 @@
* and returns a key, which can be used to find a mnemonic name
* of the instruction in the icpt_insn_codes table.
*/
-#define icpt_insn_decoder(insn) \
+#define icpt_insn_decoder(insn) ( \
INSN_DECODE_IPA0(0x01, insn, 48, 0xff) \
INSN_DECODE_IPA0(0xaa, insn, 48, 0x0f) \
INSN_DECODE_IPA0(0xb2, insn, 48, 0xff) \
@@ -239,6 +239,6 @@
INSN_DECODE_IPA0(0xe5, insn, 48, 0xff) \
INSN_DECODE_IPA0(0xeb, insn, 16, 0xff) \
INSN_DECODE_IPA0(0xc8, insn, 48, 0x0f) \
- INSN_DECODE(insn)
+ INSN_DECODE(insn))
#endif /* _UAPI_ASM_S390_SIE_H */