summaryrefslogtreecommitdiff
path: root/arch/s390/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r--arch/s390/kernel/Makefile24
-rw-r--r--arch/s390/kernel/asm-offsets.c6
-rw-r--r--arch/s390/kernel/base.S76
-rw-r--r--arch/s390/kernel/cache.c4
-rw-r--r--arch/s390/kernel/compat_signal.c14
-rw-r--r--arch/s390/kernel/cpcmd.c10
-rw-r--r--arch/s390/kernel/diag.c15
-rw-r--r--arch/s390/kernel/dis.c48
-rw-r--r--arch/s390/kernel/dumpstack.c26
-rw-r--r--arch/s390/kernel/early.c69
-rw-r--r--arch/s390/kernel/entry.S1005
-rw-r--r--arch/s390/kernel/entry64.S1059
-rw-r--r--arch/s390/kernel/ftrace.c73
-rw-r--r--arch/s390/kernel/head.S49
-rw-r--r--arch/s390/kernel/head31.S106
-rw-r--r--arch/s390/kernel/head_kdump.S8
-rw-r--r--arch/s390/kernel/ipl.c157
-rw-r--r--arch/s390/kernel/irq.c4
-rw-r--r--arch/s390/kernel/jump_label.c14
-rw-r--r--arch/s390/kernel/kprobes.c2
-rw-r--r--arch/s390/kernel/module.c13
-rw-r--r--arch/s390/kernel/nmi.c92
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c7
-rw-r--r--arch/s390/kernel/pgm_check.S22
-rw-r--r--arch/s390/kernel/process.c29
-rw-r--r--arch/s390/kernel/processor.c2
-rw-r--r--arch/s390/kernel/ptrace.c46
-rw-r--r--arch/s390/kernel/reipl.S133
-rw-r--r--arch/s390/kernel/reipl64.S155
-rw-r--r--arch/s390/kernel/relocate_kernel.S63
-rw-r--r--arch/s390/kernel/relocate_kernel64.S121
-rw-r--r--arch/s390/kernel/sclp.S10
-rw-r--r--arch/s390/kernel/setup.c72
-rw-r--r--arch/s390/kernel/signal.c24
-rw-r--r--arch/s390/kernel/smp.c36
-rw-r--r--arch/s390/kernel/suspend.c4
-rw-r--r--arch/s390/kernel/swsusp.S (renamed from arch/s390/kernel/swsusp_asm64.S)11
-rw-r--r--arch/s390/kernel/sys_s390.c49
-rw-r--r--arch/s390/kernel/syscalls.S716
-rw-r--r--arch/s390/kernel/time.c20
-rw-r--r--arch/s390/kernel/topology.c2
-rw-r--r--arch/s390/kernel/traps.c155
-rw-r--r--arch/s390/kernel/uprobes.c4
-rw-r--r--arch/s390/kernel/vdso.c16
-rw-r--r--arch/s390/kernel/vmlinux.lds.S7
45 files changed, 1259 insertions, 3319 deletions
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 31fab2676fe9..ffb87617a36c 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -26,25 +26,21 @@ CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
#
CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
-CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
+CFLAGS_sysinfo.o += -w
obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
-obj-y += dumpstack.o
+obj-y += runtime_instr.o cache.o dumpstack.o
+obj-y += entry.o reipl.o relocate_kernel.o
-obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
-obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
-obj-y += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o)
-
-extra-y += head.o vmlinux.lds
-extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o)
+extra-y += head.o head64.o vmlinux.lds
obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SCHED_BOOK) += topology.o
-obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o
+obj-$(CONFIG_HIBERNATION) += suspend.o swsusp.o
obj-$(CONFIG_AUDIT) += audit.o
compat-obj-$(CONFIG_AUDIT) += compat_audit.o
obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o
@@ -56,13 +52,9 @@ obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_UPROBES) += uprobes.o
-ifdef CONFIG_64BIT
-obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o \
- perf_cpum_cf_events.o
-obj-y += runtime_instr.o cache.o
-endif
+obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o
+obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o
# vdso
-obj-$(CONFIG_64BIT) += vdso64/
-obj-$(CONFIG_32BIT) += vdso32/
+obj-y += vdso64/
obj-$(CONFIG_COMPAT) += vdso32/
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index e07e91605353..c7d1b9d09011 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -34,7 +34,6 @@ int main(void)
DEFINE(__THREAD_per_paid, offsetof(struct task_struct, thread.per_event.paid));
BLANK();
DEFINE(__TI_task, offsetof(struct thread_info, task));
- DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain));
DEFINE(__TI_flags, offsetof(struct thread_info, flags));
DEFINE(__TI_sysc_table, offsetof(struct thread_info, sys_call_table));
DEFINE(__TI_cpu, offsetof(struct thread_info, cpu));
@@ -166,11 +165,9 @@ int main(void)
DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area));
DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area));
DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area));
-#ifdef CONFIG_32BIT
- DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, extended_save_area_addr));
-#else /* CONFIG_32BIT */
DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code));
DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address));
+ DEFINE(__LC_VX_SAVE_AREA_ADDR, offsetof(struct _lowcore, vector_save_area_addr));
DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2));
DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area));
DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
@@ -183,6 +180,5 @@ int main(void)
DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c));
DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20));
-#endif /* CONFIG_32BIT */
return 0;
}
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index f74a53d339b0..daed3fde42ec 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -11,8 +11,6 @@
#include <asm/ptrace.h>
#include <asm/sigp.h>
-#ifdef CONFIG_64BIT
-
ENTRY(s390_base_mcck_handler)
basr %r13,0
0: lg %r15,__LC_PANIC_STACK # load panic stack
@@ -131,77 +129,3 @@ ENTRY(diag308_reset)
.Lfpctl:
.long 0
.previous
-
-#else /* CONFIG_64BIT */
-
-ENTRY(s390_base_mcck_handler)
- basr %r13,0
-0: l %r15,__LC_PANIC_STACK # load panic stack
- ahi %r15,-STACK_FRAME_OVERHEAD
- l %r1,2f-0b(%r13)
- l %r1,0(%r1)
- ltr %r1,%r1
- jz 1f
- basr %r14,%r1
-1: lm %r0,%r15,__LC_GPREGS_SAVE_AREA
- lpsw __LC_MCK_OLD_PSW
-
-2: .long s390_base_mcck_handler_fn
-
- .section .bss
- .align 4
- .globl s390_base_mcck_handler_fn
-s390_base_mcck_handler_fn:
- .long 0
- .previous
-
-ENTRY(s390_base_ext_handler)
- stm %r0,%r15,__LC_SAVE_AREA_ASYNC
- basr %r13,0
-0: ahi %r15,-STACK_FRAME_OVERHEAD
- l %r1,2f-0b(%r13)
- l %r1,0(%r1)
- ltr %r1,%r1
- jz 1f
- basr %r14,%r1
-1: lm %r0,%r15,__LC_SAVE_AREA_ASYNC
- ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
- lpsw __LC_EXT_OLD_PSW
-
-2: .long s390_base_ext_handler_fn
-
- .section .bss
- .align 4
- .globl s390_base_ext_handler_fn
-s390_base_ext_handler_fn:
- .long 0
- .previous
-
-ENTRY(s390_base_pgm_handler)
- stm %r0,%r15,__LC_SAVE_AREA_SYNC
- basr %r13,0
-0: ahi %r15,-STACK_FRAME_OVERHEAD
- l %r1,2f-0b(%r13)
- l %r1,0(%r1)
- ltr %r1,%r1
- jz 1f
- basr %r14,%r1
- lm %r0,%r15,__LC_SAVE_AREA_SYNC
- lpsw __LC_PGM_OLD_PSW
-
-1: lpsw disabled_wait_psw-0b(%r13)
-
-2: .long s390_base_pgm_handler_fn
-
-disabled_wait_psw:
- .align 8
- .long 0x000a0000,0x00000000 + s390_base_pgm_handler
-
- .section .bss
- .align 4
- .globl s390_base_pgm_handler_fn
-s390_base_pgm_handler_fn:
- .long 0
- .previous
-
-#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index 0969d113b3d6..bff5e3b6d822 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -70,6 +70,8 @@ void show_cacheinfo(struct seq_file *m)
struct cacheinfo *cache;
int idx;
+ if (!test_facility(34))
+ return;
get_online_cpus();
this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask));
for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
@@ -159,6 +161,8 @@ int populate_cache_leaves(unsigned int cpu)
union cache_topology ct;
enum cache_type ctype;
+ if (!test_facility(34))
+ return -EOPNOTSUPP;
ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
idx < this_cpu_ci->num_leaves; idx++, level++) {
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index bc1df12dd4f8..fe8d6924efaa 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -370,16 +370,6 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
return (void __user *)((sp - frame_size) & -8ul);
}
-static inline int map_signal(int sig)
-{
- if (current_thread_info()->exec_domain
- && current_thread_info()->exec_domain->signal_invmap
- && sig < 32)
- return current_thread_info()->exec_domain->signal_invmap[sig];
- else
- return sig;
-}
-
static int setup_frame32(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
@@ -449,7 +439,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (__force __u64) ksig->ka.sa.sa_handler;
- regs->gprs[2] = map_signal(sig);
+ regs->gprs[2] = sig;
regs->gprs[3] = (__force __u64) &frame->sc;
/* We forgot to include these in the sigcontext.
@@ -532,7 +522,7 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (__u64 __force) ksig->ka.sa.sa_handler;
- regs->gprs[2] = map_signal(ksig->sig);
+ regs->gprs[2] = ksig->sig;
regs->gprs[3] = (__force __u64) &frame->info;
regs->gprs[4] = (__force __u64) &frame->uc;
regs->gprs[5] = task_thread_info(current)->last_break;
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index d7b0c4d27880..199ec92ef4fe 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -27,13 +27,9 @@ static int diag8_noresponse(int cmdlen)
register unsigned long reg3 asm ("3") = cmdlen;
asm volatile(
-#ifndef CONFIG_64BIT
- " diag %1,%0,0x8\n"
-#else /* CONFIG_64BIT */
" sam31\n"
" diag %1,%0,0x8\n"
" sam64\n"
-#endif /* CONFIG_64BIT */
: "+d" (reg3) : "d" (reg2) : "cc");
return reg3;
}
@@ -46,17 +42,11 @@ static int diag8_response(int cmdlen, char *response, int *rlen)
register unsigned long reg5 asm ("5") = *rlen;
asm volatile(
-#ifndef CONFIG_64BIT
- " diag %2,%0,0x8\n"
- " brc 8,1f\n"
- " ar %1,%4\n"
-#else /* CONFIG_64BIT */
" sam31\n"
" diag %2,%0,0x8\n"
" sam64\n"
" brc 8,1f\n"
" agr %1,%4\n"
-#endif /* CONFIG_64BIT */
"1:\n"
: "+d" (reg4), "+d" (reg5)
: "d" (reg2), "d" (reg3), "d" (*rlen) : "cc");
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index 8237fc07ac79..2f69243bf700 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -18,13 +18,9 @@ int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode)
int rc = 0;
asm volatile(
-#ifdef CONFIG_64BIT
" sam31\n"
" diag %2,2,0x14\n"
" sam64\n"
-#else
- " diag %2,2,0x14\n"
-#endif
" ipm %0\n"
" srl %0,28\n"
: "=d" (rc), "+d" (_ry2)
@@ -52,7 +48,6 @@ int diag210(struct diag210 *addr)
spin_lock_irqsave(&diag210_lock, flags);
diag210_tmp = *addr;
-#ifdef CONFIG_64BIT
asm volatile(
" lhi %0,-1\n"
" sam31\n"
@@ -62,16 +57,6 @@ int diag210(struct diag210 *addr)
"1: sam64\n"
EX_TABLE(0b, 1b)
: "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory");
-#else
- asm volatile(
- " lhi %0,-1\n"
- " diag %1,0,0x210\n"
- "0: ipm %0\n"
- " srl %0,28\n"
- "1:\n"
- EX_TABLE(0b, 1b)
- : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory");
-#endif
*addr = diag210_tmp;
spin_unlock_irqrestore(&diag210_lock, flags);
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 533430307da8..8140d10c6785 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -32,12 +32,6 @@
#include <asm/debug.h>
#include <asm/irq.h>
-#ifndef CONFIG_64BIT
-#define ONELONG "%08lx: "
-#else /* CONFIG_64BIT */
-#define ONELONG "%016lx: "
-#endif /* CONFIG_64BIT */
-
enum {
UNUSED, /* Indicates the end of the operand list */
R_8, /* GPR starting at position 8 */
@@ -536,12 +530,10 @@ static char *long_insn_name[] = {
};
static struct s390_insn opcode[] = {
-#ifdef CONFIG_64BIT
{ "bprp", 0xc5, INSTR_MII_UPI },
{ "bpp", 0xc7, INSTR_SMI_U0RDP },
{ "trtr", 0xd0, INSTR_SS_L0RDRD },
{ "lmd", 0xef, INSTR_SS_RRRDRD3 },
-#endif
{ "spm", 0x04, INSTR_RR_R0 },
{ "balr", 0x05, INSTR_RR_RR },
{ "bctr", 0x06, INSTR_RR_RR },
@@ -725,11 +717,9 @@ static struct s390_insn opcode[] = {
};
static struct s390_insn opcode_01[] = {
-#ifdef CONFIG_64BIT
{ "ptff", 0x04, INSTR_E },
{ "pfpo", 0x0a, INSTR_E },
{ "sam64", 0x0e, INSTR_E },
-#endif
{ "pr", 0x01, INSTR_E },
{ "upt", 0x02, INSTR_E },
{ "sckpf", 0x07, INSTR_E },
@@ -741,7 +731,6 @@ static struct s390_insn opcode_01[] = {
};
static struct s390_insn opcode_a5[] = {
-#ifdef CONFIG_64BIT
{ "iihh", 0x00, INSTR_RI_RU },
{ "iihl", 0x01, INSTR_RI_RU },
{ "iilh", 0x02, INSTR_RI_RU },
@@ -758,12 +747,10 @@ static struct s390_insn opcode_a5[] = {
{ "llihl", 0x0d, INSTR_RI_RU },
{ "llilh", 0x0e, INSTR_RI_RU },
{ "llill", 0x0f, INSTR_RI_RU },
-#endif
{ "", 0, INSTR_INVALID }
};
static struct s390_insn opcode_a7[] = {
-#ifdef CONFIG_64BIT
{ "tmhh", 0x02, INSTR_RI_RU },
{ "tmhl", 0x03, INSTR_RI_RU },
{ "brctg", 0x07, INSTR_RI_RP },
@@ -771,7 +758,6 @@ static struct s390_insn opcode_a7[] = {
{ "aghi", 0x0b, INSTR_RI_RI },
{ "mghi", 0x0d, INSTR_RI_RI },
{ "cghi", 0x0f, INSTR_RI_RI },
-#endif
{ "tmlh", 0x00, INSTR_RI_RU },
{ "tmll", 0x01, INSTR_RI_RU },
{ "brc", 0x04, INSTR_RI_UP },
@@ -785,18 +771,15 @@ static struct s390_insn opcode_a7[] = {
};
static struct s390_insn opcode_aa[] = {
-#ifdef CONFIG_64BIT
{ { 0, LONG_INSN_RINEXT }, 0x00, INSTR_RI_RI },
{ "rion", 0x01, INSTR_RI_RI },
{ "tric", 0x02, INSTR_RI_RI },
{ "rioff", 0x03, INSTR_RI_RI },
{ { 0, LONG_INSN_RIEMIT }, 0x04, INSTR_RI_RI },
-#endif
{ "", 0, INSTR_INVALID }
};
static struct s390_insn opcode_b2[] = {
-#ifdef CONFIG_64BIT
{ "stckf", 0x7c, INSTR_S_RD },
{ "lpp", 0x80, INSTR_S_RD },
{ "lcctl", 0x84, INSTR_S_RD },
@@ -819,7 +802,6 @@ static struct s390_insn opcode_b2[] = {
{ "tend", 0xf8, INSTR_S_00 },
{ "niai", 0xfa, INSTR_IE_UU },
{ { 0, LONG_INSN_TABORT }, 0xfc, INSTR_S_RD },
-#endif
{ "stidp", 0x02, INSTR_S_RD },
{ "sck", 0x04, INSTR_S_RD },
{ "stck", 0x05, INSTR_S_RD },
@@ -908,7 +890,6 @@ static struct s390_insn opcode_b2[] = {
};
static struct s390_insn opcode_b3[] = {
-#ifdef CONFIG_64BIT
{ "maylr", 0x38, INSTR_RRF_F0FF },
{ "mylr", 0x39, INSTR_RRF_F0FF },
{ "mayr", 0x3a, INSTR_RRF_F0FF },
@@ -996,7 +977,6 @@ static struct s390_insn opcode_b3[] = {
{ "qaxtr", 0xfd, INSTR_RRF_FUFF },
{ "iextr", 0xfe, INSTR_RRF_F0FR },
{ "rrxtr", 0xff, INSTR_RRF_FFRU },
-#endif
{ "lpebr", 0x00, INSTR_RRE_FF },
{ "lnebr", 0x01, INSTR_RRE_FF },
{ "ltebr", 0x02, INSTR_RRE_FF },
@@ -1091,7 +1071,6 @@ static struct s390_insn opcode_b3[] = {
};
static struct s390_insn opcode_b9[] = {
-#ifdef CONFIG_64BIT
{ "lpgr", 0x00, INSTR_RRE_RR },
{ "lngr", 0x01, INSTR_RRE_RR },
{ "ltgr", 0x02, INSTR_RRE_RR },
@@ -1204,7 +1183,6 @@ static struct s390_insn opcode_b9[] = {
{ "srk", 0xf9, INSTR_RRF_R0RR2 },
{ "alrk", 0xfa, INSTR_RRF_R0RR2 },
{ "slrk", 0xfb, INSTR_RRF_R0RR2 },
-#endif
{ "kmac", 0x1e, INSTR_RRE_RR },
{ "lrvr", 0x1f, INSTR_RRE_RR },
{ "km", 0x2e, INSTR_RRE_RR },
@@ -1224,7 +1202,6 @@ static struct s390_insn opcode_b9[] = {
};
static struct s390_insn opcode_c0[] = {
-#ifdef CONFIG_64BIT
{ "lgfi", 0x01, INSTR_RIL_RI },
{ "xihf", 0x06, INSTR_RIL_RU },
{ "xilf", 0x07, INSTR_RIL_RU },
@@ -1236,7 +1213,6 @@ static struct s390_insn opcode_c0[] = {
{ "oilf", 0x0d, INSTR_RIL_RU },
{ "llihf", 0x0e, INSTR_RIL_RU },
{ "llilf", 0x0f, INSTR_RIL_RU },
-#endif
{ "larl", 0x00, INSTR_RIL_RP },
{ "brcl", 0x04, INSTR_RIL_UP },
{ "brasl", 0x05, INSTR_RIL_RP },
@@ -1244,7 +1220,6 @@ static struct s390_insn opcode_c0[] = {
};
static struct s390_insn opcode_c2[] = {
-#ifdef CONFIG_64BIT
{ "msgfi", 0x00, INSTR_RIL_RI },
{ "msfi", 0x01, INSTR_RIL_RI },
{ "slgfi", 0x04, INSTR_RIL_RU },
@@ -1257,12 +1232,10 @@ static struct s390_insn opcode_c2[] = {
{ "cfi", 0x0d, INSTR_RIL_RI },
{ "clgfi", 0x0e, INSTR_RIL_RU },
{ "clfi", 0x0f, INSTR_RIL_RU },
-#endif
{ "", 0, INSTR_INVALID }
};
static struct s390_insn opcode_c4[] = {
-#ifdef CONFIG_64BIT
{ "llhrl", 0x02, INSTR_RIL_RP },
{ "lghrl", 0x04, INSTR_RIL_RP },
{ "lhrl", 0x05, INSTR_RIL_RP },
@@ -1274,12 +1247,10 @@ static struct s390_insn opcode_c4[] = {
{ "lrl", 0x0d, INSTR_RIL_RP },
{ { 0, LONG_INSN_LLGFRL }, 0x0e, INSTR_RIL_RP },
{ "strl", 0x0f, INSTR_RIL_RP },
-#endif
{ "", 0, INSTR_INVALID }
};
static struct s390_insn opcode_c6[] = {
-#ifdef CONFIG_64BIT
{ "exrl", 0x00, INSTR_RIL_RP },
{ "pfdrl", 0x02, INSTR_RIL_UP },
{ "cghrl", 0x04, INSTR_RIL_RP },
@@ -1292,35 +1263,29 @@ static struct s390_insn opcode_c6[] = {
{ "crl", 0x0d, INSTR_RIL_RP },
{ { 0, LONG_INSN_CLGFRL }, 0x0e, INSTR_RIL_RP },
{ "clrl", 0x0f, INSTR_RIL_RP },
-#endif
{ "", 0, INSTR_INVALID }
};
static struct s390_insn opcode_c8[] = {
-#ifdef CONFIG_64BIT
{ "mvcos", 0x00, INSTR_SSF_RRDRD },
{ "ectg", 0x01, INSTR_SSF_RRDRD },
{ "csst", 0x02, INSTR_SSF_RRDRD },
{ "lpd", 0x04, INSTR_SSF_RRDRD2 },
{ "lpdg", 0x05, INSTR_SSF_RRDRD2 },
-#endif
{ "", 0, INSTR_INVALID }
};
static struct s390_insn opcode_cc[] = {
-#ifdef CONFIG_64BIT
{ "brcth", 0x06, INSTR_RIL_RP },
{ "aih", 0x08, INSTR_RIL_RI },
{ "alsih", 0x0a, INSTR_RIL_RI },
{ { 0, LONG_INSN_ALSIHN }, 0x0b, INSTR_RIL_RI },
{ "cih", 0x0d, INSTR_RIL_RI },
{ "clih", 0x0f, INSTR_RIL_RI },
-#endif
{ "", 0, INSTR_INVALID }
};
static struct s390_insn opcode_e3[] = {
-#ifdef CONFIG_64BIT
{ "ltg", 0x02, INSTR_RXY_RRRD },
{ "lrag", 0x03, INSTR_RXY_RRRD },
{ "lg", 0x04, INSTR_RXY_RRRD },
@@ -1414,7 +1379,6 @@ static struct s390_insn opcode_e3[] = {
{ "clhf", 0xcf, INSTR_RXY_RRRD },
{ { 0, LONG_INSN_MPCIFC }, 0xd0, INSTR_RXY_RRRD },
{ { 0, LONG_INSN_STPCIFC }, 0xd4, INSTR_RXY_RRRD },
-#endif
{ "lrv", 0x1e, INSTR_RXY_RRRD },
{ "lrvh", 0x1f, INSTR_RXY_RRRD },
{ "strv", 0x3e, INSTR_RXY_RRRD },
@@ -1426,7 +1390,6 @@ static struct s390_insn opcode_e3[] = {
};
static struct s390_insn opcode_e5[] = {
-#ifdef CONFIG_64BIT
{ "strag", 0x02, INSTR_SSE_RDRD },
{ "mvhhi", 0x44, INSTR_SIL_RDI },
{ "mvghi", 0x48, INSTR_SIL_RDI },
@@ -1439,7 +1402,6 @@ static struct s390_insn opcode_e5[] = {
{ { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU },
{ { 0, LONG_INSN_TBEGIN }, 0x60, INSTR_SIL_RDU },
{ { 0, LONG_INSN_TBEGINC }, 0x61, INSTR_SIL_RDU },
-#endif
{ "lasp", 0x00, INSTR_SSE_RDRD },
{ "tprot", 0x01, INSTR_SSE_RDRD },
{ "mvcsk", 0x0e, INSTR_SSE_RDRD },
@@ -1448,7 +1410,6 @@ static struct s390_insn opcode_e5[] = {
};
static struct s390_insn opcode_e7[] = {
-#ifdef CONFIG_64BIT
{ "lcbb", 0x27, INSTR_RXE_RRRDM },
{ "vgef", 0x13, INSTR_VRV_VVRDM },
{ "vgeg", 0x12, INSTR_VRV_VVRDM },
@@ -1588,11 +1549,9 @@ static struct s390_insn opcode_e7[] = {
{ "vfsq", 0xce, INSTR_VRR_VV000MM },
{ "vfs", 0xe2, INSTR_VRR_VVV00MM },
{ "vftci", 0x4a, INSTR_VRI_VVIMM },
-#endif
};
static struct s390_insn opcode_eb[] = {
-#ifdef CONFIG_64BIT
{ "lmg", 0x04, INSTR_RSY_RRRD },
{ "srag", 0x0a, INSTR_RSY_RRRD },
{ "slag", 0x0b, INSTR_RSY_RRRD },
@@ -1659,7 +1618,6 @@ static struct s390_insn opcode_eb[] = {
{ "stric", 0x61, INSTR_RSY_RDRM },
{ "mric", 0x62, INSTR_RSY_RDRM },
{ { 0, LONG_INSN_STCCTM }, 0x17, INSTR_RSY_RMRD },
-#endif
{ "rll", 0x1d, INSTR_RSY_RRRD },
{ "mvclu", 0x8e, INSTR_RSY_RRRD },
{ "tp", 0xc0, INSTR_RSL_R0RD },
@@ -1667,7 +1625,6 @@ static struct s390_insn opcode_eb[] = {
};
static struct s390_insn opcode_ec[] = {
-#ifdef CONFIG_64BIT
{ "brxhg", 0x44, INSTR_RIE_RRP },
{ "brxlg", 0x45, INSTR_RIE_RRP },
{ { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU },
@@ -1701,12 +1658,10 @@ static struct s390_insn opcode_ec[] = {
{ "clgib", 0xfd, INSTR_RIS_RURDU },
{ "cib", 0xfe, INSTR_RIS_RURDI },
{ "clib", 0xff, INSTR_RIS_RURDU },
-#endif
{ "", 0, INSTR_INVALID }
};
static struct s390_insn opcode_ed[] = {
-#ifdef CONFIG_64BIT
{ "mayl", 0x38, INSTR_RXF_FRRDF },
{ "myl", 0x39, INSTR_RXF_FRRDF },
{ "may", 0x3a, INSTR_RXF_FRRDF },
@@ -1731,7 +1686,6 @@ static struct s390_insn opcode_ed[] = {
{ "czxt", 0xa9, INSTR_RSL_LRDFU },
{ "cdzt", 0xaa, INSTR_RSL_LRDFU },
{ "cxzt", 0xab, INSTR_RSL_LRDFU },
-#endif
{ "ldeb", 0x04, INSTR_RXE_FRRD },
{ "lxdb", 0x05, INSTR_RXE_FRRD },
{ "lxeb", 0x06, INSTR_RXE_FRRD },
@@ -2051,7 +2005,7 @@ void show_code(struct pt_regs *regs)
else
*ptr++ = ' ';
addr = regs->psw.addr + start - 32;
- ptr += sprintf(ptr, ONELONG, addr);
+ ptr += sprintf(ptr, "%016lx: ", addr);
if (start + opsize >= end)
break;
for (i = 0; i < opsize; i++)
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index a99852e96a77..dc8e20473484 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -18,16 +18,6 @@
#include <asm/dis.h>
#include <asm/ipl.h>
-#ifndef CONFIG_64BIT
-#define LONG "%08lx "
-#define FOURLONG "%08lx %08lx %08lx %08lx\n"
-static int kstack_depth_to_print = 12;
-#else /* CONFIG_64BIT */
-#define LONG "%016lx "
-#define FOURLONG "%016lx %016lx %016lx %016lx\n"
-static int kstack_depth_to_print = 20;
-#endif /* CONFIG_64BIT */
-
/*
* For show_trace we have tree different stack to consider:
* - the panic stack which is used if the kernel stack has overflown
@@ -115,12 +105,12 @@ void show_stack(struct task_struct *task, unsigned long *sp)
else
stack = sp;
- for (i = 0; i < kstack_depth_to_print; i++) {
+ for (i = 0; i < 20; i++) {
if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
break;
if ((i * sizeof(long) % 32) == 0)
printk("%s ", i == 0 ? "" : "\n");
- printk(LONG, *stack++);
+ printk("%016lx ", *stack++);
}
printk("\n");
show_trace(task, sp);
@@ -128,10 +118,8 @@ void show_stack(struct task_struct *task, unsigned long *sp)
static void show_last_breaking_event(struct pt_regs *regs)
{
-#ifdef CONFIG_64BIT
printk("Last Breaking-Event-Address:\n");
printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]);
-#endif
}
static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
@@ -155,16 +143,14 @@ void show_registers(struct pt_regs *regs)
mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
-#ifdef CONFIG_64BIT
printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
-#endif
- printk("\n%s GPRS: " FOURLONG, mode,
+ printk("\n%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
- printk(" " FOURLONG,
+ printk(" %016lx %016lx %016lx %016lx\n",
regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
- printk(" " FOURLONG,
+ printk(" %016lx %016lx %016lx %016lx\n",
regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
- printk(" " FOURLONG,
+ printk(" %016lx %016lx %016lx %016lx\n",
regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
show_code(regs);
}
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 4427ab7ac23a..549a73a4b543 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -64,7 +64,6 @@ asm(
" .align 4\n"
" .type savesys_ipl_nss, @function\n"
"savesys_ipl_nss:\n"
-#ifdef CONFIG_64BIT
" stmg 6,15,48(15)\n"
" lgr 14,3\n"
" sam31\n"
@@ -72,13 +71,6 @@ asm(
" sam64\n"
" lgr 2,14\n"
" lmg 6,15,48(15)\n"
-#else
- " stm 6,15,24(15)\n"
- " lr 14,3\n"
- " diag 2,14,0x8\n"
- " lr 2,14\n"
- " lm 6,15,24(15)\n"
-#endif
" br 14\n"
" .size savesys_ipl_nss, .-savesys_ipl_nss\n"
" .previous\n");
@@ -240,7 +232,6 @@ static noinline __init void detect_machine_type(void)
static __init void setup_topology(void)
{
-#ifdef CONFIG_64BIT
int max_mnest;
if (!test_facility(11))
@@ -251,7 +242,6 @@ static __init void setup_topology(void)
break;
}
topology_max_mnest = max_mnest;
-#endif
}
static void early_pgm_check_handler(void)
@@ -290,58 +280,6 @@ static noinline __init void setup_facility_list(void)
ARRAY_SIZE(S390_lowcore.stfle_fac_list));
}
-static __init void detect_mvpg(void)
-{
-#ifndef CONFIG_64BIT
- int rc;
-
- asm volatile(
- " la 0,0\n"
- " mvpg %2,%2\n"
- "0: la %0,0\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "=d" (rc) : "0" (-EOPNOTSUPP), "a" (0) : "memory", "cc", "0");
- if (!rc)
- S390_lowcore.machine_flags |= MACHINE_FLAG_MVPG;
-#endif
-}
-
-static __init void detect_ieee(void)
-{
-#ifndef CONFIG_64BIT
- int rc, tmp;
-
- asm volatile(
- " efpc %1,0\n"
- "0: la %0,0\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "=d" (rc), "=d" (tmp): "0" (-EOPNOTSUPP) : "cc");
- if (!rc)
- S390_lowcore.machine_flags |= MACHINE_FLAG_IEEE;
-#endif
-}
-
-static __init void detect_csp(void)
-{
-#ifndef CONFIG_64BIT
- int rc;
-
- asm volatile(
- " la 0,0\n"
- " la 1,0\n"
- " la 2,4\n"
- " csp 0,2\n"
- "0: la %0,0\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc", "0", "1", "2");
- if (!rc)
- S390_lowcore.machine_flags |= MACHINE_FLAG_CSP;
-#endif
-}
-
static __init void detect_diag9c(void)
{
unsigned int cpu_address;
@@ -360,7 +298,6 @@ static __init void detect_diag9c(void)
static __init void detect_diag44(void)
{
-#ifdef CONFIG_64BIT
int rc;
asm volatile(
@@ -371,12 +308,10 @@ static __init void detect_diag44(void)
: "=d" (rc) : "0" (-EOPNOTSUPP) : "cc");
if (!rc)
S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44;
-#endif
}
static __init void detect_machine_facilities(void)
{
-#ifdef CONFIG_64BIT
if (test_facility(8)) {
S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1;
__ctl_set_bit(0, 23);
@@ -393,7 +328,6 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
if (test_facility(129))
S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
-#endif
}
static int __init cad_setup(char *str)
@@ -501,9 +435,6 @@ void __init startup_init(void)
ipl_update_parameters();
setup_boot_command_line();
create_kernel_nss();
- detect_mvpg();
- detect_ieee();
- detect_csp();
detect_diag9c();
detect_diag44();
detect_machine_facilities();
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 398329b2b518..99b44acbfcc7 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -22,27 +22,28 @@
#include <asm/irq.h>
__PT_R0 = __PT_GPRS
-__PT_R1 = __PT_GPRS + 4
-__PT_R2 = __PT_GPRS + 8
-__PT_R3 = __PT_GPRS + 12
-__PT_R4 = __PT_GPRS + 16
-__PT_R5 = __PT_GPRS + 20
-__PT_R6 = __PT_GPRS + 24
-__PT_R7 = __PT_GPRS + 28
-__PT_R8 = __PT_GPRS + 32
-__PT_R9 = __PT_GPRS + 36
-__PT_R10 = __PT_GPRS + 40
-__PT_R11 = __PT_GPRS + 44
-__PT_R12 = __PT_GPRS + 48
-__PT_R13 = __PT_GPRS + 524
-__PT_R14 = __PT_GPRS + 56
-__PT_R15 = __PT_GPRS + 60
+__PT_R1 = __PT_GPRS + 8
+__PT_R2 = __PT_GPRS + 16
+__PT_R3 = __PT_GPRS + 24
+__PT_R4 = __PT_GPRS + 32
+__PT_R5 = __PT_GPRS + 40
+__PT_R6 = __PT_GPRS + 48
+__PT_R7 = __PT_GPRS + 56
+__PT_R8 = __PT_GPRS + 64
+__PT_R9 = __PT_GPRS + 72
+__PT_R10 = __PT_GPRS + 80
+__PT_R11 = __PT_GPRS + 88
+__PT_R12 = __PT_GPRS + 96
+__PT_R13 = __PT_GPRS + 104
+__PT_R14 = __PT_GPRS + 112
+__PT_R15 = __PT_GPRS + 120
STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
STACK_SIZE = 1 << STACK_SHIFT
-STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
+STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
-_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED)
+_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
+ _TIF_UPROBE)
_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
_TIF_SYSCALL_TRACEPOINT)
_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE)
@@ -53,16 +54,14 @@ _PIF_WORK = (_PIF_PER_TRAP)
.macro TRACE_IRQS_ON
#ifdef CONFIG_TRACE_IRQFLAGS
basr %r2,%r0
- l %r1,BASED(.Lc_hardirqs_on)
- basr %r14,%r1 # call trace_hardirqs_on_caller
+ brasl %r14,trace_hardirqs_on_caller
#endif
.endm
.macro TRACE_IRQS_OFF
#ifdef CONFIG_TRACE_IRQFLAGS
basr %r2,%r0
- l %r1,BASED(.Lc_hardirqs_off)
- basr %r14,%r1 # call trace_hardirqs_off_caller
+ brasl %r14,trace_hardirqs_off_caller
#endif
.endm
@@ -70,73 +69,104 @@ _PIF_WORK = (_PIF_PER_TRAP)
#ifdef CONFIG_LOCKDEP
tm __PT_PSW+1(%r11),0x01 # returning to user ?
jz .+10
- l %r1,BASED(.Lc_lockdep_sys_exit)
- basr %r14,%r1 # call lockdep_sys_exit
+ brasl %r14,lockdep_sys_exit
+#endif
+ .endm
+
+ .macro LPP newpp
+#if IS_ENABLED(CONFIG_KVM)
+ tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP
+ jz .+8
+ .insn s,0xb2800000,\newpp
+#endif
+ .endm
+
+ .macro HANDLE_SIE_INTERCEPT scratch,reason
+#if IS_ENABLED(CONFIG_KVM)
+ tmhh %r8,0x0001 # interrupting from user ?
+ jnz .+62
+ lgr \scratch,%r9
+ slg \scratch,BASED(.Lsie_critical)
+ clg \scratch,BASED(.Lsie_critical_length)
+ .if \reason==1
+ # Some program interrupts are suppressing (e.g. protection).
+ # We must also check the instruction after SIE in that case.
+ # do_protection_exception will rewind to .Lrewind_pad
+ jh .+42
+ .else
+ jhe .+42
+ .endif
+ lg %r14,__SF_EMPTY(%r15) # get control block pointer
+ LPP __SF_EMPTY+16(%r15) # set host id
+ ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
+ lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
+ larl %r9,sie_exit # skip forward to sie_exit
+ mvi __SF_EMPTY+31(%r15),\reason # set exit reason
#endif
.endm
.macro CHECK_STACK stacksize,savearea
#ifdef CONFIG_CHECK_STACK
tml %r15,\stacksize - CONFIG_STACK_GUARD
- la %r14,\savearea
+ lghi %r14,\savearea
jz stack_overflow
#endif
.endm
.macro SWITCH_ASYNC savearea,stack,shift
- tmh %r8,0x0001 # interrupting from user ?
+ tmhh %r8,0x0001 # interrupting from user ?
jnz 1f
- lr %r14,%r9
- sl %r14,BASED(.Lc_critical_start)
- cl %r14,BASED(.Lc_critical_length)
+ lgr %r14,%r9
+ slg %r14,BASED(.Lcritical_start)
+ clg %r14,BASED(.Lcritical_length)
jhe 0f
- la %r11,\savearea # inside critical section, do cleanup
- bras %r14,cleanup_critical
- tmh %r8,0x0001 # retest problem state after cleanup
+ lghi %r11,\savearea # inside critical section, do cleanup
+ brasl %r14,cleanup_critical
+ tmhh %r8,0x0001 # retest problem state after cleanup
jnz 1f
-0: l %r14,\stack # are we already on the target stack?
- slr %r14,%r15
- sra %r14,\shift
+0: lg %r14,\stack # are we already on the target stack?
+ slgr %r14,%r15
+ srag %r14,%r14,\shift
jnz 1f
CHECK_STACK 1<<\shift,\savearea
- ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
-1: l %r15,\stack # load target stack
+1: lg %r15,\stack # load target stack
2: la %r11,STACK_FRAME_OVERHEAD(%r15)
.endm
- .macro ADD64 high,low,timer
- al \high,\timer
- al \low,4+\timer
- brc 12,.+8
- ahi \high,1
- .endm
-
- .macro SUB64 high,low,timer
- sl \high,\timer
- sl \low,4+\timer
- brc 3,.+8
- ahi \high,-1
+ .macro UPDATE_VTIME scratch,enter_timer
+ lg \scratch,__LC_EXIT_TIMER
+ slg \scratch,\enter_timer
+ alg \scratch,__LC_USER_TIMER
+ stg \scratch,__LC_USER_TIMER
+ lg \scratch,__LC_LAST_UPDATE_TIMER
+ slg \scratch,__LC_EXIT_TIMER
+ alg \scratch,__LC_SYSTEM_TIMER
+ stg \scratch,__LC_SYSTEM_TIMER
+ mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
.endm
- .macro UPDATE_VTIME high,low,enter_timer
- lm \high,\low,__LC_EXIT_TIMER
- SUB64 \high,\low,\enter_timer
- ADD64 \high,\low,__LC_USER_TIMER
- stm \high,\low,__LC_USER_TIMER
- lm \high,\low,__LC_LAST_UPDATE_TIMER
- SUB64 \high,\low,__LC_EXIT_TIMER
- ADD64 \high,\low,__LC_SYSTEM_TIMER
- stm \high,\low,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
+ .macro LAST_BREAK scratch
+ srag \scratch,%r10,23
+ jz .+10
+ stg %r10,__TI_last_break(%r12)
.endm
.macro REENABLE_IRQS
- st %r8,__LC_RETURN_PSW
+ stg %r8,__LC_RETURN_PSW
ni __LC_RETURN_PSW,0xbf
ssm __LC_RETURN_PSW
.endm
+ .macro STCK savearea
+#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
+ .insn s,0xb27c0000,\savearea # store clock fast
+#else
+ .insn s,0xb2050000,\savearea # store clock
+#endif
+ .endm
+
.section .kprobes.text, "ax"
/*
@@ -147,19 +177,19 @@ _PIF_WORK = (_PIF_PER_TRAP)
* gpr2 = prev
*/
ENTRY(__switch_to)
- stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
- st %r15,__THREAD_ksp(%r2) # store kernel stack of prev
- l %r4,__THREAD_info(%r2) # get thread_info of prev
- l %r5,__THREAD_info(%r3) # get thread_info of next
- lr %r15,%r5
- ahi %r15,STACK_INIT # end of kernel stack of next
- st %r3,__LC_CURRENT # store task struct of next
- st %r5,__LC_THREAD_INFO # store thread info of next
- st %r15,__LC_KERNEL_STACK # store end of kernel stack
+ stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
+ stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev
+ lg %r4,__THREAD_info(%r2) # get thread_info of prev
+ lg %r5,__THREAD_info(%r3) # get thread_info of next
+ lgr %r15,%r5
+ aghi %r15,STACK_INIT # end of kernel stack of next
+ stg %r3,__LC_CURRENT # store task struct of next
+ stg %r5,__LC_THREAD_INFO # store thread info of next
+ stg %r15,__LC_KERNEL_STACK # store end of kernel stack
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
- mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
- l %r15,__THREAD_ksp(%r3) # load kernel stack of next
- lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
+ mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
+ lg %r15,__THREAD_ksp(%r3) # load kernel stack of next
+ lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
br %r14
.L__critical_start:
@@ -170,75 +200,83 @@ ENTRY(__switch_to)
ENTRY(system_call)
stpt __LC_SYNC_ENTER_TIMER
-.Lsysc_stm:
- stm %r8,%r15,__LC_SAVE_AREA_SYNC
- l %r12,__LC_THREAD_INFO
- l %r13,__LC_SVC_NEW_PSW+4
- lhi %r14,_PIF_SYSCALL
+.Lsysc_stmg:
+ stmg %r8,%r15,__LC_SAVE_AREA_SYNC
+ lg %r10,__LC_LAST_BREAK
+ lg %r12,__LC_THREAD_INFO
+ lghi %r14,_PIF_SYSCALL
.Lsysc_per:
- l %r15,__LC_KERNEL_STACK
+ lg %r15,__LC_KERNEL_STACK
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
.Lsysc_vtime:
- UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
- stm %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
- mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW
+ UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER
+ LAST_BREAK %r13
+ stmg %r0,%r7,__PT_R0(%r11)
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
+ mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
- st %r14,__PT_FLAGS(%r11)
+ stg %r14,__PT_FLAGS(%r11)
.Lsysc_do_svc:
- l %r10,__TI_sysc_table(%r12) # 31 bit system call table
- lh %r8,__PT_INT_CODE+2(%r11)
- sla %r8,2 # shift and test for svc0
+ lg %r10,__TI_sysc_table(%r12) # address of system call table
+ llgh %r8,__PT_INT_CODE+2(%r11)
+ slag %r8,%r8,2 # shift and test for svc 0
jnz .Lsysc_nr_ok
# svc 0: system call number in %r1
- cl %r1,BASED(.Lnr_syscalls)
+ llgfr %r1,%r1 # clear high word in r1
+ cghi %r1,NR_syscalls
jnl .Lsysc_nr_ok
sth %r1,__PT_INT_CODE+2(%r11)
- lr %r8,%r1
- sla %r8,2
+ slag %r8,%r1,2
.Lsysc_nr_ok:
- xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
- st %r2,__PT_ORIG_GPR2(%r11)
- st %r7,STACK_FRAME_OVERHEAD(%r15)
- l %r9,0(%r8,%r10) # get system call addr.
- tm __TI_flags+3(%r12),_TIF_TRACE
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ stg %r2,__PT_ORIG_GPR2(%r11)
+ stg %r7,STACK_FRAME_OVERHEAD(%r15)
+ lgf %r9,0(%r8,%r10) # get system call add.
+ tm __TI_flags+7(%r12),_TIF_TRACE
jnz .Lsysc_tracesys
basr %r14,%r9 # call sys_xxxx
- st %r2,__PT_R2(%r11) # store return value
+ stg %r2,__PT_R2(%r11) # store return value
.Lsysc_return:
LOCKDEP_SYS_EXIT
.Lsysc_tif:
tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno .Lsysc_restore
- tm __PT_FLAGS+3(%r11),_PIF_WORK
+ tm __PT_FLAGS+7(%r11),_PIF_WORK
jnz .Lsysc_work
- tm __TI_flags+3(%r12),_TIF_WORK
- jnz .Lsysc_work # check for thread work
- tm __LC_CPU_FLAGS+3,_CIF_WORK
+ tm __TI_flags+7(%r12),_TIF_WORK
+ jnz .Lsysc_work # check for work
+ tm __LC_CPU_FLAGS+7,_CIF_WORK
jnz .Lsysc_work
.Lsysc_restore:
- mvc __LC_RETURN_PSW(8),__PT_PSW(%r11)
+ lg %r14,__LC_VDSO_PER_CPU
+ lmg %r0,%r10,__PT_R0(%r11)
+ mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
stpt __LC_EXIT_TIMER
- lm %r0,%r15,__PT_R0(%r11)
- lpsw __LC_RETURN_PSW
+ mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+ lmg %r11,%r15,__PT_R11(%r11)
+ lpswe __LC_RETURN_PSW
.Lsysc_done:
#
# One of the work bits is on. Find out which one.
#
.Lsysc_work:
- tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING
+ tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
jo .Lsysc_mcck_pending
- tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
+ tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
jo .Lsysc_reschedule
- tm __PT_FLAGS+3(%r11),_PIF_PER_TRAP
+#ifdef CONFIG_UPROBES
+ tm __TI_flags+7(%r12),_TIF_UPROBE
+ jo .Lsysc_uprobe_notify
+#endif
+ tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP
jo .Lsysc_singlestep
- tm __TI_flags+3(%r12),_TIF_SIGPENDING
+ tm __TI_flags+7(%r12),_TIF_SIGPENDING
jo .Lsysc_sigpending
- tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
+ tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
jo .Lsysc_notify_resume
- tm __LC_CPU_FLAGS+3,_CIF_ASCE
+ tm __LC_CPU_FLAGS+7,_CIF_ASCE
jo .Lsysc_uaccess
j .Lsysc_return # beware of critical section cleanup
@@ -246,109 +284,109 @@ ENTRY(system_call)
# _TIF_NEED_RESCHED is set, call schedule
#
.Lsysc_reschedule:
- l %r1,BASED(.Lc_schedule)
- la %r14,BASED(.Lsysc_return)
- br %r1 # call schedule
+ larl %r14,.Lsysc_return
+ jg schedule
#
# _CIF_MCCK_PENDING is set, call handler
#
.Lsysc_mcck_pending:
- l %r1,BASED(.Lc_handle_mcck)
- la %r14,BASED(.Lsysc_return)
- br %r1 # TIF bit will be cleared by handler
+ larl %r14,.Lsysc_return
+ jg s390_handle_mcck # TIF bit will be cleared by handler
#
# _CIF_ASCE is set, load user space asce
#
.Lsysc_uaccess:
- ni __LC_CPU_FLAGS+3,255-_CIF_ASCE
- lctl %c1,%c1,__LC_USER_ASCE # load primary asce
+ ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
+ lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
j .Lsysc_return
#
# _TIF_SIGPENDING is set, call do_signal
#
.Lsysc_sigpending:
- lr %r2,%r11 # pass pointer to pt_regs
- l %r1,BASED(.Lc_do_signal)
- basr %r14,%r1 # call do_signal
- tm __PT_FLAGS+3(%r11),_PIF_SYSCALL
+ lgr %r2,%r11 # pass pointer to pt_regs
+ brasl %r14,do_signal
+ tm __PT_FLAGS+7(%r11),_PIF_SYSCALL
jno .Lsysc_return
- lm %r2,%r7,__PT_R2(%r11) # load svc arguments
- l %r10,__TI_sysc_table(%r12) # 31 bit system call table
- xr %r8,%r8 # svc 0 returns -ENOSYS
- clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
+ lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
+ lg %r10,__TI_sysc_table(%r12) # address of system call table
+ lghi %r8,0 # svc 0 returns -ENOSYS
+ llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
+ cghi %r1,NR_syscalls
jnl .Lsysc_nr_ok # invalid svc number -> do svc 0
- lh %r8,__PT_INT_CODE+2(%r11) # load new svc number
- sla %r8,2
+ slag %r8,%r1,2
j .Lsysc_nr_ok # restart svc
#
# _TIF_NOTIFY_RESUME is set, call do_notify_resume
#
.Lsysc_notify_resume:
- lr %r2,%r11 # pass pointer to pt_regs
- l %r1,BASED(.Lc_do_notify_resume)
- la %r14,BASED(.Lsysc_return)
- br %r1 # call do_notify_resume
+ lgr %r2,%r11 # pass pointer to pt_regs
+ larl %r14,.Lsysc_return
+ jg do_notify_resume
+
+#
+# _TIF_UPROBE is set, call uprobe_notify_resume
+#
+#ifdef CONFIG_UPROBES
+.Lsysc_uprobe_notify:
+ lgr %r2,%r11 # pass pointer to pt_regs
+ larl %r14,.Lsysc_return
+ jg uprobe_notify_resume
+#endif
#
# _PIF_PER_TRAP is set, call do_per_trap
#
.Lsysc_singlestep:
- ni __PT_FLAGS+3(%r11),255-_PIF_PER_TRAP
- lr %r2,%r11 # pass pointer to pt_regs
- l %r1,BASED(.Lc_do_per_trap)
- la %r14,BASED(.Lsysc_return)
- br %r1 # call do_per_trap
+ ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
+ lgr %r2,%r11 # pass pointer to pt_regs
+ larl %r14,.Lsysc_return
+ jg do_per_trap
#
# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
# and after the system call
#
.Lsysc_tracesys:
- l %r1,BASED(.Lc_trace_enter)
- lr %r2,%r11 # pass pointer to pt_regs
+ lgr %r2,%r11 # pass pointer to pt_regs
la %r3,0
- xr %r0,%r0
- icm %r0,3,__PT_INT_CODE+2(%r11)
- st %r0,__PT_R2(%r11)
- basr %r14,%r1 # call do_syscall_trace_enter
- cl %r2,BASED(.Lnr_syscalls)
- jnl .Lsysc_tracenogo
- lr %r8,%r2
- sll %r8,2
- l %r9,0(%r8,%r10)
+ llgh %r0,__PT_INT_CODE+2(%r11)
+ stg %r0,__PT_R2(%r11)
+ brasl %r14,do_syscall_trace_enter
+ lghi %r0,NR_syscalls
+ clgr %r0,%r2
+ jnh .Lsysc_tracenogo
+ sllg %r8,%r2,2
+ lgf %r9,0(%r8,%r10)
.Lsysc_tracego:
- lm %r3,%r7,__PT_R3(%r11)
- st %r7,STACK_FRAME_OVERHEAD(%r15)
- l %r2,__PT_ORIG_GPR2(%r11)
+ lmg %r3,%r7,__PT_R3(%r11)
+ stg %r7,STACK_FRAME_OVERHEAD(%r15)
+ lg %r2,__PT_ORIG_GPR2(%r11)
basr %r14,%r9 # call sys_xxx
- st %r2,__PT_R2(%r11) # store return value
+ stg %r2,__PT_R2(%r11) # store return value
.Lsysc_tracenogo:
- tm __TI_flags+3(%r12),_TIF_TRACE
+ tm __TI_flags+7(%r12),_TIF_TRACE
jz .Lsysc_return
- l %r1,BASED(.Lc_trace_exit)
- lr %r2,%r11 # pass pointer to pt_regs
- la %r14,BASED(.Lsysc_return)
- br %r1 # call do_syscall_trace_exit
+ lgr %r2,%r11 # pass pointer to pt_regs
+ larl %r14,.Lsysc_return
+ jg do_syscall_trace_exit
#
# a new process exits the kernel with ret_from_fork
#
ENTRY(ret_from_fork)
la %r11,STACK_FRAME_OVERHEAD(%r15)
- l %r12,__LC_THREAD_INFO
- l %r13,__LC_SVC_NEW_PSW+4
- l %r1,BASED(.Lc_schedule_tail)
- basr %r14,%r1 # call schedule_tail
+ lg %r12,__LC_THREAD_INFO
+ brasl %r14,schedule_tail
TRACE_IRQS_ON
ssm __LC_SVC_NEW_PSW # reenable interrupts
tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
jne .Lsysc_tracenogo
# it's a kernel thread
- lm %r9,%r10,__PT_R9(%r11) # load gprs
+ lmg %r9,%r10,__PT_R9(%r11) # load gprs
ENTRY(kernel_thread_starter)
la %r2,0(%r10)
basr %r14,%r9
@@ -360,46 +398,54 @@ ENTRY(kernel_thread_starter)
ENTRY(pgm_check_handler)
stpt __LC_SYNC_ENTER_TIMER
- stm %r8,%r15,__LC_SAVE_AREA_SYNC
- l %r12,__LC_THREAD_INFO
- l %r13,__LC_SVC_NEW_PSW+4
- lm %r8,%r9,__LC_PGM_OLD_PSW
- tmh %r8,0x0001 # test problem state bit
+ stmg %r8,%r15,__LC_SAVE_AREA_SYNC
+ lg %r10,__LC_LAST_BREAK
+ lg %r12,__LC_THREAD_INFO
+ larl %r13,system_call
+ lmg %r8,%r9,__LC_PGM_OLD_PSW
+ HANDLE_SIE_INTERCEPT %r14,1
+ tmhh %r8,0x0001 # test problem state bit
jnz 1f # -> fault in user space
- tmh %r8,0x4000 # PER bit set in old PSW ?
+ tmhh %r8,0x4000 # PER bit set in old PSW ?
jnz 0f # -> enabled, can't be a double fault
tm __LC_PGM_ILC+3,0x80 # check for per exception
jnz .Lpgm_svcper # -> single stepped svc
0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
- ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 2f
-1: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
- l %r15,__LC_KERNEL_STACK
+1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER
+ LAST_BREAK %r14
+ lg %r15,__LC_KERNEL_STACK
+ lg %r14,__TI_task(%r12)
+ lghi %r13,__LC_PGM_TDB
+ tm __LC_PGM_ILC+2,0x02 # check for transaction abort
+ jz 2f
+ mvc __THREAD_trap_tdb(256,%r14),0(%r13)
2: la %r11,STACK_FRAME_OVERHEAD(%r15)
- stm %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
- stm %r8,%r9,__PT_PSW(%r11)
+ stmg %r0,%r7,__PT_R0(%r11)
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
+ stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
- mvc __PT_INT_PARM_LONG(4,%r11),__LC_TRANS_EXC_CODE
- xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
+ mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
+ xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
+ stg %r10,__PT_ARGS(%r11)
tm __LC_PGM_ILC+3,0x80 # check for per exception
jz 0f
- l %r1,__TI_task(%r12)
- tmh %r8,0x0001 # kernel per event ?
+ tmhh %r8,0x0001 # kernel per event ?
jz .Lpgm_kprobe
- oi __PT_FLAGS+3(%r11),_PIF_PER_TRAP
- mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS
- mvc __THREAD_per_cause(2,%r1),__LC_PER_CODE
- mvc __THREAD_per_paid(1,%r1),__LC_PER_ACCESS_ID
+ oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
+ mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
+ mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
+ mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
0: REENABLE_IRQS
- xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
- l %r1,BASED(.Lc_jump_table)
- la %r10,0x7f
- n %r10,__PT_INT_CODE(%r11)
- je .Lsysc_return
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ larl %r1,pgm_check_table
+ llgh %r10,__PT_INT_CODE+2(%r11)
+ nill %r10,0x007f
sll %r10,2
- l %r1,0(%r10,%r1) # load address of handler routine
- lr %r2,%r11 # pass pointer to pt_regs
+ je .Lsysc_return
+ lgf %r1,0(%r10,%r1) # load address of handler routine
+ lgr %r2,%r11 # pass pointer to pt_regs
basr %r14,%r1 # branch to interrupt-handler
j .Lsysc_return
@@ -408,54 +454,55 @@ ENTRY(pgm_check_handler)
#
.Lpgm_kprobe:
REENABLE_IRQS
- xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
- l %r1,BASED(.Lc_do_per_trap)
- lr %r2,%r11 # pass pointer to pt_regs
- basr %r14,%r1 # call do_per_trap
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ lgr %r2,%r11 # pass pointer to pt_regs
+ brasl %r14,do_per_trap
j .Lsysc_return
#
# single stepped system call
#
.Lpgm_svcper:
- mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW
- mvc __LC_RETURN_PSW+4(4),BASED(.Lc_sysc_per)
- lhi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
- lpsw __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
+ mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
+ larl %r14,.Lsysc_per
+ stg %r14,__LC_RETURN_PSW+8
+ lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
+ lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
/*
* IO interrupt handler routine
*/
-
ENTRY(io_int_handler)
- stck __LC_INT_CLOCK
+ STCK __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
- stm %r8,%r15,__LC_SAVE_AREA_ASYNC
- l %r12,__LC_THREAD_INFO
- l %r13,__LC_SVC_NEW_PSW+4
- lm %r8,%r9,__LC_IO_OLD_PSW
- tmh %r8,0x0001 # interrupting from user ?
+ stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
+ lg %r10,__LC_LAST_BREAK
+ lg %r12,__LC_THREAD_INFO
+ larl %r13,system_call
+ lmg %r8,%r9,__LC_IO_OLD_PSW
+ HANDLE_SIE_INTERCEPT %r14,2
+ SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
+ tmhh %r8,0x0001 # interrupting from user?
jz .Lio_skip
- UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
+ UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
+ LAST_BREAK %r14
.Lio_skip:
- SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
- stm %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
- stm %r8,%r9,__PT_PSW(%r11)
+ stmg %r0,%r7,__PT_R0(%r11)
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
+ stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
- xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
+ xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
TRACE_IRQS_OFF
- xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
.Lio_loop:
- l %r1,BASED(.Lc_do_IRQ)
- lr %r2,%r11 # pass pointer to pt_regs
- lhi %r3,IO_INTERRUPT
+ lgr %r2,%r11 # pass pointer to pt_regs
+ lghi %r3,IO_INTERRUPT
tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
jz .Lio_call
- lhi %r3,THIN_INTERRUPT
+ lghi %r3,THIN_INTERRUPT
.Lio_call:
- basr %r14,%r1 # call do_IRQ
- tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR
+ brasl %r14,do_IRQ
+ tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR
jz .Lio_return
tpi 0
jz .Lio_return
@@ -465,21 +512,26 @@ ENTRY(io_int_handler)
LOCKDEP_SYS_EXIT
TRACE_IRQS_ON
.Lio_tif:
- tm __TI_flags+3(%r12),_TIF_WORK
+ tm __TI_flags+7(%r12),_TIF_WORK
jnz .Lio_work # there is work to do (signals etc.)
- tm __LC_CPU_FLAGS+3,_CIF_WORK
+ tm __LC_CPU_FLAGS+7,_CIF_WORK
jnz .Lio_work
.Lio_restore:
- mvc __LC_RETURN_PSW(8),__PT_PSW(%r11)
+ lg %r14,__LC_VDSO_PER_CPU
+ lmg %r0,%r10,__PT_R0(%r11)
+ mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
stpt __LC_EXIT_TIMER
- lm %r0,%r15,__PT_R0(%r11)
- lpsw __LC_RETURN_PSW
+ mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+ lmg %r11,%r15,__PT_R11(%r11)
+ lpswe __LC_RETURN_PSW
.Lio_done:
#
# There is work todo, find out in which context we have been interrupted:
# 1) if we return to user space we can do all _TIF_WORK work
-# 2) if we return to kernel code and preemptive scheduling is enabled check
+# 2) if we return to kernel code and kvm is enabled check if we need to
+# modify the psw to leave SIE
+# 3) if we return to kernel code and preemptive scheduling is enabled check
# the preemption counter and if it is zero call preempt_schedule_irq
# Before any work can be done, a switch to the kernel stack is required.
#
@@ -489,21 +541,20 @@ ENTRY(io_int_handler)
#ifdef CONFIG_PREEMPT
# check for preemptive scheduling
icm %r0,15,__TI_precount(%r12)
- jnz .Lio_restore # preemption disabled
- tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
+ jnz .Lio_restore # preemption is disabled
+ tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
jno .Lio_restore
# switch to kernel stack
- l %r1,__PT_R15(%r11)
- ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ lg %r1,__PT_R15(%r11)
+ aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
- xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
+ xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
- lr %r15,%r1
+ lgr %r15,%r1
# TRACE_IRQS_ON already done at .Lio_return, call
# TRACE_IRQS_OFF to keep things symmetrical
TRACE_IRQS_OFF
- l %r1,BASED(.Lc_preempt_irq)
- basr %r14,%r1 # call preempt_schedule_irq
+ brasl %r14,preempt_schedule_irq
j .Lio_return
#else
j .Lio_restore
@@ -513,25 +564,25 @@ ENTRY(io_int_handler)
# Need to do work before returning to userspace, switch to kernel stack
#
.Lio_work_user:
- l %r1,__LC_KERNEL_STACK
+ lg %r1,__LC_KERNEL_STACK
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
- xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
+ xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
la %r11,STACK_FRAME_OVERHEAD(%r1)
- lr %r15,%r1
+ lgr %r15,%r1
#
# One of the work bits is on. Find out which one.
#
.Lio_work_tif:
- tm __LC_CPU_FLAGS+3(%r12),_CIF_MCCK_PENDING
+ tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
jo .Lio_mcck_pending
- tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
+ tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
jo .Lio_reschedule
- tm __TI_flags+3(%r12),_TIF_SIGPENDING
+ tm __TI_flags+7(%r12),_TIF_SIGPENDING
jo .Lio_sigpending
- tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
+ tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
jo .Lio_notify_resume
- tm __LC_CPU_FLAGS+3,_CIF_ASCE
+ tm __LC_CPU_FLAGS+7,_CIF_ASCE
jo .Lio_uaccess
j .Lio_return # beware of critical section cleanup
@@ -540,8 +591,7 @@ ENTRY(io_int_handler)
#
.Lio_mcck_pending:
# TRACE_IRQS_ON already done at .Lio_return
- l %r1,BASED(.Lc_handle_mcck)
- basr %r14,%r1 # TIF bit will be cleared by handler
+ brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler
TRACE_IRQS_OFF
j .Lio_return
@@ -549,8 +599,8 @@ ENTRY(io_int_handler)
# _CIF_ASCE is set, load user space asce
#
.Lio_uaccess:
- ni __LC_CPU_FLAGS+3,255-_CIF_ASCE
- lctl %c1,%c1,__LC_USER_ASCE # load primary asce
+ ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
+ lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
j .Lio_return
#
@@ -558,35 +608,32 @@ ENTRY(io_int_handler)
#
.Lio_reschedule:
# TRACE_IRQS_ON already done at .Lio_return
- l %r1,BASED(.Lc_schedule)
ssm __LC_SVC_NEW_PSW # reenable interrupts
- basr %r14,%r1 # call scheduler
+ brasl %r14,schedule # call scheduler
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF
j .Lio_return
#
-# _TIF_SIGPENDING is set, call do_signal
+# _TIF_SIGPENDING or is set, call do_signal
#
.Lio_sigpending:
# TRACE_IRQS_ON already done at .Lio_return
- l %r1,BASED(.Lc_do_signal)
ssm __LC_SVC_NEW_PSW # reenable interrupts
- lr %r2,%r11 # pass pointer to pt_regs
- basr %r14,%r1 # call do_signal
+ lgr %r2,%r11 # pass pointer to pt_regs
+ brasl %r14,do_signal
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF
j .Lio_return
#
-# _TIF_SIGPENDING is set, call do_signal
+# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
#
.Lio_notify_resume:
# TRACE_IRQS_ON already done at .Lio_return
- l %r1,BASED(.Lc_do_notify_resume)
ssm __LC_SVC_NEW_PSW # reenable interrupts
- lr %r2,%r11 # pass pointer to pt_regs
- basr %r14,%r1 # call do_notify_resume
+ lgr %r2,%r11 # pass pointer to pt_regs
+ brasl %r14,do_notify_resume
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF
j .Lio_return
@@ -594,45 +641,47 @@ ENTRY(io_int_handler)
/*
* External interrupt handler routine
*/
-
ENTRY(ext_int_handler)
- stck __LC_INT_CLOCK
+ STCK __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
- stm %r8,%r15,__LC_SAVE_AREA_ASYNC
- l %r12,__LC_THREAD_INFO
- l %r13,__LC_SVC_NEW_PSW+4
- lm %r8,%r9,__LC_EXT_OLD_PSW
- tmh %r8,0x0001 # interrupting from user ?
+ stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
+ lg %r10,__LC_LAST_BREAK
+ lg %r12,__LC_THREAD_INFO
+ larl %r13,system_call
+ lmg %r8,%r9,__LC_EXT_OLD_PSW
+ HANDLE_SIE_INTERCEPT %r14,3
+ SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
+ tmhh %r8,0x0001 # interrupting from user ?
jz .Lext_skip
- UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
+ UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
+ LAST_BREAK %r14
.Lext_skip:
- SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
- stm %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
- stm %r8,%r9,__PT_PSW(%r11)
+ stmg %r0,%r7,__PT_R0(%r11)
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
+ stmg %r8,%r9,__PT_PSW(%r11)
+ lghi %r1,__LC_EXT_PARAMS2
mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
- xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
+ mvc __PT_INT_PARM_LONG(8,%r11),0(%r1)
+ xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
TRACE_IRQS_OFF
- l %r1,BASED(.Lc_do_IRQ)
- lr %r2,%r11 # pass pointer to pt_regs
- lhi %r3,EXT_INTERRUPT
- basr %r14,%r1 # call do_IRQ
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ lgr %r2,%r11 # pass pointer to pt_regs
+ lghi %r3,EXT_INTERRUPT
+ brasl %r14,do_IRQ
j .Lio_return
/*
* Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
*/
ENTRY(psw_idle)
- st %r3,__SF_EMPTY(%r15)
- basr %r1,0
- la %r1,.Lpsw_idle_lpsw+4-.(%r1)
- st %r1,__SF_EMPTY+4(%r15)
- oi __SF_EMPTY+4(%r15),0x80
- stck __CLOCK_IDLE_ENTER(%r2)
+ stg %r3,__SF_EMPTY(%r15)
+ larl %r1,.Lpsw_idle_lpsw+4
+ stg %r1,__SF_EMPTY+8(%r15)
+ STCK __CLOCK_IDLE_ENTER(%r2)
stpt __TIMER_IDLE_ENTER(%r2)
.Lpsw_idle_lpsw:
- lpsw __SF_EMPTY(%r15)
+ lpswe __SF_EMPTY(%r15)
br %r14
.Lpsw_idle_end:
@@ -641,17 +690,19 @@ ENTRY(psw_idle)
/*
* Machine check handler routines
*/
-
ENTRY(mcck_int_handler)
- stck __LC_MCCK_CLOCK
- spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
- lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
- l %r12,__LC_THREAD_INFO
- l %r13,__LC_SVC_NEW_PSW+4
- lm %r8,%r9,__LC_MCK_OLD_PSW
+ STCK __LC_MCCK_CLOCK
+ la %r1,4095 # revalidate r1
+ spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
+ lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
+ lg %r10,__LC_LAST_BREAK
+ lg %r12,__LC_THREAD_INFO
+ larl %r13,system_call
+ lmg %r8,%r9,__LC_MCK_OLD_PSW
+ HANDLE_SIE_INTERCEPT %r14,4
tm __LC_MCCK_CODE,0x80 # system damage?
jo .Lmcck_panic # yes -> rest of mcck code invalid
- la %r14,__LC_CPU_TIMER_SAVE_AREA
+ lghi %r14,__LC_CPU_TIMER_SAVE_AREA
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
jo 3f
@@ -669,76 +720,76 @@ ENTRY(mcck_int_handler)
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
jno .Lmcck_panic # no -> skip cleanup critical
+ SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT
tm %r8,0x0001 # interrupting from user ?
jz .Lmcck_skip
- UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER
+ UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER
+ LAST_BREAK %r14
.Lmcck_skip:
- SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT
- stm %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32
- stm %r8,%r9,__PT_PSW(%r11)
- xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
- xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
- l %r1,BASED(.Lc_do_machine_check)
- lr %r2,%r11 # pass pointer to pt_regs
- basr %r14,%r1 # call s390_do_machine_check
+ lghi %r14,__LC_GPREGS_SAVE_AREA+64
+ stmg %r0,%r7,__PT_R0(%r11)
+ mvc __PT_R8(64,%r11),0(%r14)
+ stmg %r8,%r9,__PT_PSW(%r11)
+ xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ lgr %r2,%r11 # pass pointer to pt_regs
+ brasl %r14,s390_do_machine_check
tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno .Lmcck_return
- l %r1,__LC_KERNEL_STACK # switch to kernel stack
+ lg %r1,__LC_KERNEL_STACK # switch to kernel stack
mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
- xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
- la %r11,STACK_FRAME_OVERHEAD(%r15)
- lr %r15,%r1
+ xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
+ la %r11,STACK_FRAME_OVERHEAD(%r1)
+ lgr %r15,%r1
ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
- tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING
+ tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
jno .Lmcck_return
TRACE_IRQS_OFF
- l %r1,BASED(.Lc_handle_mcck)
- basr %r14,%r1 # call s390_handle_mcck
+ brasl %r14,s390_handle_mcck
TRACE_IRQS_ON
.Lmcck_return:
- mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW
+ lg %r14,__LC_VDSO_PER_CPU
+ lmg %r0,%r10,__PT_R0(%r11)
+ mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
jno 0f
- lm %r0,%r15,__PT_R0(%r11)
stpt __LC_EXIT_TIMER
- lpsw __LC_RETURN_MCCK_PSW
-0: lm %r0,%r15,__PT_R0(%r11)
- lpsw __LC_RETURN_MCCK_PSW
+ mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+0: lmg %r11,%r15,__PT_R11(%r11)
+ lpswe __LC_RETURN_MCCK_PSW
.Lmcck_panic:
- l %r14,__LC_PANIC_STACK
- slr %r14,%r15
- sra %r14,PAGE_SHIFT
+ lg %r14,__LC_PANIC_STACK
+ slgr %r14,%r15
+ srag %r14,%r14,PAGE_SHIFT
jz 0f
- l %r15,__LC_PANIC_STACK
- j .Lmcck_skip
-0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ lg %r15,__LC_PANIC_STACK
+0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j .Lmcck_skip
#
# PSW restart interrupt handler
#
ENTRY(restart_int_handler)
- st %r15,__LC_SAVE_AREA_RESTART
- l %r15,__LC_RESTART_STACK
- ahi %r15,-__PT_SIZE # create pt_regs on stack
+ stg %r15,__LC_SAVE_AREA_RESTART
+ lg %r15,__LC_RESTART_STACK
+ aghi %r15,-__PT_SIZE # create pt_regs on stack
xc 0(__PT_SIZE,%r15),0(%r15)
- stm %r0,%r14,__PT_R0(%r15)
- mvc __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART
- mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw
- ahi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack
+ stmg %r0,%r14,__PT_R0(%r15)
+ mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
+ mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
+ aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack
xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
- l %r1,__LC_RESTART_FN # load fn, parm & source cpu
- l %r2,__LC_RESTART_DATA
- l %r3,__LC_RESTART_SOURCE
- ltr %r3,%r3 # test source cpu address
+ lg %r1,__LC_RESTART_FN # load fn, parm & source cpu
+ lg %r2,__LC_RESTART_DATA
+ lg %r3,__LC_RESTART_SOURCE
+ ltgr %r3,%r3 # test source cpu address
jm 1f # negative -> skip source stop
0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu
brc 10,0b # wait for status stored
1: basr %r14,%r1 # call function
stap __SF_EMPTY(%r15) # store cpu address
- lh %r3,__SF_EMPTY(%r15)
+ llgh %r3,__SF_EMPTY(%r15)
2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
brc 2,2b
3: j 3b
@@ -752,215 +803,257 @@ ENTRY(restart_int_handler)
* Setup a pt_regs so that show_trace can provide a good call trace.
*/
stack_overflow:
- l %r15,__LC_PANIC_STACK # change to panic stack
+ lg %r15,__LC_PANIC_STACK # change to panic stack
la %r11,STACK_FRAME_OVERHEAD(%r15)
- stm %r0,%r7,__PT_R0(%r11)
- stm %r8,%r9,__PT_PSW(%r11)
- mvc __PT_R8(32,%r11),0(%r14)
- l %r1,BASED(1f)
- xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
- lr %r2,%r11 # pass pointer to pt_regs
- br %r1 # branch to kernel_stack_overflow
-1: .long kernel_stack_overflow
+ stmg %r0,%r7,__PT_R0(%r11)
+ stmg %r8,%r9,__PT_PSW(%r11)
+ mvc __PT_R8(64,%r11),0(%r14)
+ stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ lgr %r2,%r11 # pass pointer to pt_regs
+ jg kernel_stack_overflow
#endif
+ .align 8
.Lcleanup_table:
- .long system_call + 0x80000000
- .long .Lsysc_do_svc + 0x80000000
- .long .Lsysc_tif + 0x80000000
- .long .Lsysc_restore + 0x80000000
- .long .Lsysc_done + 0x80000000
- .long .Lio_tif + 0x80000000
- .long .Lio_restore + 0x80000000
- .long .Lio_done + 0x80000000
- .long psw_idle + 0x80000000
- .long .Lpsw_idle_end + 0x80000000
+ .quad system_call
+ .quad .Lsysc_do_svc
+ .quad .Lsysc_tif
+ .quad .Lsysc_restore
+ .quad .Lsysc_done
+ .quad .Lio_tif
+ .quad .Lio_restore
+ .quad .Lio_done
+ .quad psw_idle
+ .quad .Lpsw_idle_end
cleanup_critical:
- cl %r9,BASED(.Lcleanup_table) # system_call
+ clg %r9,BASED(.Lcleanup_table) # system_call
jl 0f
- cl %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc
+ clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc
jl .Lcleanup_system_call
- cl %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif
+ clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif
jl 0f
- cl %r9,BASED(.Lcleanup_table+12) # .Lsysc_restore
+ clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore
jl .Lcleanup_sysc_tif
- cl %r9,BASED(.Lcleanup_table+16) # .Lsysc_done
+ clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done
jl .Lcleanup_sysc_restore
- cl %r9,BASED(.Lcleanup_table+20) # .Lio_tif
+ clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif
jl 0f
- cl %r9,BASED(.Lcleanup_table+24) # .Lio_restore
+ clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore
jl .Lcleanup_io_tif
- cl %r9,BASED(.Lcleanup_table+28) # .Lio_done
+ clg %r9,BASED(.Lcleanup_table+56) # .Lio_done
jl .Lcleanup_io_restore
- cl %r9,BASED(.Lcleanup_table+32) # psw_idle
+ clg %r9,BASED(.Lcleanup_table+64) # psw_idle
jl 0f
- cl %r9,BASED(.Lcleanup_table+36) # .Lpsw_idle_end
+ clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end
jl .Lcleanup_idle
0: br %r14
+
.Lcleanup_system_call:
# check if stpt has been executed
- cl %r9,BASED(.Lcleanup_system_call_insn)
+ clg %r9,BASED(.Lcleanup_system_call_insn)
jh 0f
mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
- chi %r11,__LC_SAVE_AREA_ASYNC
+ cghi %r11,__LC_SAVE_AREA_ASYNC
je 0f
mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
-0: # check if stm has been executed
- cl %r9,BASED(.Lcleanup_system_call_insn+4)
+0: # check if stmg has been executed
+ clg %r9,BASED(.Lcleanup_system_call_insn+8)
jh 0f
- mvc __LC_SAVE_AREA_SYNC(32),0(%r11)
-0: # set up saved registers r12, and r13
- st %r12,16(%r11) # r12 thread-info pointer
- st %r13,20(%r11) # r13 literal-pool pointer
- # check if the user time calculation has been done
- cl %r9,BASED(.Lcleanup_system_call_insn+8)
+ mvc __LC_SAVE_AREA_SYNC(64),0(%r11)
+0: # check if base register setup + TIF bit load has been done
+ clg %r9,BASED(.Lcleanup_system_call_insn+16)
+ jhe 0f
+ # set up saved registers r10 and r12
+ stg %r10,16(%r11) # r10 last break
+ stg %r12,32(%r11) # r12 thread-info pointer
+0: # check if the user time update has been done
+ clg %r9,BASED(.Lcleanup_system_call_insn+24)
jh 0f
- l %r10,__LC_EXIT_TIMER
- l %r15,__LC_EXIT_TIMER+4
- SUB64 %r10,%r15,__LC_SYNC_ENTER_TIMER
- ADD64 %r10,%r15,__LC_USER_TIMER
- st %r10,__LC_USER_TIMER
- st %r15,__LC_USER_TIMER+4
-0: # check if the system time calculation has been done
- cl %r9,BASED(.Lcleanup_system_call_insn+12)
+ lg %r15,__LC_EXIT_TIMER
+ slg %r15,__LC_SYNC_ENTER_TIMER
+ alg %r15,__LC_USER_TIMER
+ stg %r15,__LC_USER_TIMER
+0: # check if the system time update has been done
+ clg %r9,BASED(.Lcleanup_system_call_insn+32)
jh 0f
- l %r10,__LC_LAST_UPDATE_TIMER
- l %r15,__LC_LAST_UPDATE_TIMER+4
- SUB64 %r10,%r15,__LC_EXIT_TIMER
- ADD64 %r10,%r15,__LC_SYSTEM_TIMER
- st %r10,__LC_SYSTEM_TIMER
- st %r15,__LC_SYSTEM_TIMER+4
+ lg %r15,__LC_LAST_UPDATE_TIMER
+ slg %r15,__LC_EXIT_TIMER
+ alg %r15,__LC_SYSTEM_TIMER
+ stg %r15,__LC_SYSTEM_TIMER
0: # update accounting time stamp
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
- # set up saved register 11
- l %r15,__LC_KERNEL_STACK
+ # do LAST_BREAK
+ lg %r9,16(%r11)
+ srag %r9,%r9,23
+ jz 0f
+ mvc __TI_last_break(8,%r12),16(%r11)
+0: # set up saved register r11
+ lg %r15,__LC_KERNEL_STACK
la %r9,STACK_FRAME_OVERHEAD(%r15)
- st %r9,12(%r11) # r11 pt_regs pointer
+ stg %r9,24(%r11) # r11 pt_regs pointer
# fill pt_regs
- mvc __PT_R8(32,%r9),__LC_SAVE_AREA_SYNC
- stm %r0,%r7,__PT_R0(%r9)
- mvc __PT_PSW(8,%r9),__LC_SVC_OLD_PSW
+ mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
+ stmg %r0,%r7,__PT_R0(%r9)
+ mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
- xc __PT_FLAGS(4,%r9),__PT_FLAGS(%r9)
- mvi __PT_FLAGS+3(%r9),_PIF_SYSCALL
- # setup saved register 15
- st %r15,28(%r11) # r15 stack pointer
+ xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9)
+ mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL
+ # setup saved register r15
+ stg %r15,56(%r11) # r15 stack pointer
# set new psw address and exit
- l %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc + 0x80000000
+ larl %r9,.Lsysc_do_svc
br %r14
.Lcleanup_system_call_insn:
- .long system_call + 0x80000000
- .long .Lsysc_stm + 0x80000000
- .long .Lsysc_vtime + 0x80000000 + 36
- .long .Lsysc_vtime + 0x80000000 + 76
+ .quad system_call
+ .quad .Lsysc_stmg
+ .quad .Lsysc_per
+ .quad .Lsysc_vtime+18
+ .quad .Lsysc_vtime+42
.Lcleanup_sysc_tif:
- l %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif + 0x80000000
+ larl %r9,.Lsysc_tif
br %r14
.Lcleanup_sysc_restore:
- cl %r9,BASED(.Lcleanup_sysc_restore_insn)
- jhe 0f
- l %r9,12(%r11) # get saved pointer to pt_regs
- mvc __LC_RETURN_PSW(8),__PT_PSW(%r9)
- mvc 0(32,%r11),__PT_R8(%r9)
- lm %r0,%r7,__PT_R0(%r9)
-0: lm %r8,%r9,__LC_RETURN_PSW
+ clg %r9,BASED(.Lcleanup_sysc_restore_insn)
+ je 0f
+ lg %r9,24(%r11) # get saved pointer to pt_regs
+ mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
+ mvc 0(64,%r11),__PT_R8(%r9)
+ lmg %r0,%r7,__PT_R0(%r9)
+0: lmg %r8,%r9,__LC_RETURN_PSW
br %r14
.Lcleanup_sysc_restore_insn:
- .long .Lsysc_done - 4 + 0x80000000
+ .quad .Lsysc_done - 4
.Lcleanup_io_tif:
- l %r9,BASED(.Lcleanup_table+20) # .Lio_tif + 0x80000000
+ larl %r9,.Lio_tif
br %r14
.Lcleanup_io_restore:
- cl %r9,BASED(.Lcleanup_io_restore_insn)
- jhe 0f
- l %r9,12(%r11) # get saved r11 pointer to pt_regs
- mvc __LC_RETURN_PSW(8),__PT_PSW(%r9)
- mvc 0(32,%r11),__PT_R8(%r9)
- lm %r0,%r7,__PT_R0(%r9)
-0: lm %r8,%r9,__LC_RETURN_PSW
+ clg %r9,BASED(.Lcleanup_io_restore_insn)
+ je 0f
+ lg %r9,24(%r11) # get saved r11 pointer to pt_regs
+ mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
+ mvc 0(64,%r11),__PT_R8(%r9)
+ lmg %r0,%r7,__PT_R0(%r9)
+0: lmg %r8,%r9,__LC_RETURN_PSW
br %r14
.Lcleanup_io_restore_insn:
- .long .Lio_done - 4 + 0x80000000
+ .quad .Lio_done - 4
.Lcleanup_idle:
# copy interrupt clock & cpu timer
mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
- chi %r11,__LC_SAVE_AREA_ASYNC
+ cghi %r11,__LC_SAVE_AREA_ASYNC
je 0f
mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
-0: # check if stck has been executed
- cl %r9,BASED(.Lcleanup_idle_insn)
+0: # check if stck & stpt have been executed
+ clg %r9,BASED(.Lcleanup_idle_insn)
jhe 1f
mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
- mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3)
+ mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
1: # account system time going idle
- lm %r9,%r10,__LC_STEAL_TIMER
- ADD64 %r9,%r10,__CLOCK_IDLE_ENTER(%r2)
- SUB64 %r9,%r10,__LC_LAST_UPDATE_CLOCK
- stm %r9,%r10,__LC_STEAL_TIMER
+ lg %r9,__LC_STEAL_TIMER
+ alg %r9,__CLOCK_IDLE_ENTER(%r2)
+ slg %r9,__LC_LAST_UPDATE_CLOCK
+ stg %r9,__LC_STEAL_TIMER
mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
- lm %r9,%r10,__LC_SYSTEM_TIMER
- ADD64 %r9,%r10,__LC_LAST_UPDATE_TIMER
- SUB64 %r9,%r10,__TIMER_IDLE_ENTER(%r2)
- stm %r9,%r10,__LC_SYSTEM_TIMER
+ lg %r9,__LC_SYSTEM_TIMER
+ alg %r9,__LC_LAST_UPDATE_TIMER
+ slg %r9,__TIMER_IDLE_ENTER(%r2)
+ stg %r9,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
# prepare return psw
- n %r8,BASED(.Lcleanup_idle_wait) # clear irq & wait state bits
- l %r9,24(%r11) # return from psw_idle
+ nihh %r8,0xfcfd # clear irq & wait state bits
+ lg %r9,48(%r11) # return from psw_idle
br %r14
.Lcleanup_idle_insn:
- .long .Lpsw_idle_lpsw + 0x80000000
-.Lcleanup_idle_wait:
- .long 0xfcfdffff
+ .quad .Lpsw_idle_lpsw
/*
* Integer constants
*/
- .align 4
-.Lnr_syscalls:
- .long NR_syscalls
-.Lvtimer_max:
- .quad 0x7fffffffffffffff
+ .align 8
+.Lcritical_start:
+ .quad .L__critical_start
+.Lcritical_length:
+ .quad .L__critical_end - .L__critical_start
+
+#if IS_ENABLED(CONFIG_KVM)
/*
- * Symbol constants
+ * sie64a calling convention:
+ * %r2 pointer to sie control block
+ * %r3 guest register save area
*/
-.Lc_do_machine_check: .long s390_do_machine_check
-.Lc_handle_mcck: .long s390_handle_mcck
-.Lc_do_IRQ: .long do_IRQ
-.Lc_do_signal: .long do_signal
-.Lc_do_notify_resume: .long do_notify_resume
-.Lc_do_per_trap: .long do_per_trap
-.Lc_jump_table: .long pgm_check_table
-.Lc_schedule: .long schedule
-#ifdef CONFIG_PREEMPT
-.Lc_preempt_irq: .long preempt_schedule_irq
-#endif
-.Lc_trace_enter: .long do_syscall_trace_enter
-.Lc_trace_exit: .long do_syscall_trace_exit
-.Lc_schedule_tail: .long schedule_tail
-.Lc_sysc_per: .long .Lsysc_per + 0x80000000
-#ifdef CONFIG_TRACE_IRQFLAGS
-.Lc_hardirqs_on: .long trace_hardirqs_on_caller
-.Lc_hardirqs_off: .long trace_hardirqs_off_caller
-#endif
-#ifdef CONFIG_LOCKDEP
-.Lc_lockdep_sys_exit: .long lockdep_sys_exit
+ENTRY(sie64a)
+ stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
+ stg %r2,__SF_EMPTY(%r15) # save control block pointer
+ stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
+ xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason
+ lmg %r0,%r13,0(%r3) # load guest gprs 0-13
+ lg %r14,__LC_GMAP # get gmap pointer
+ ltgr %r14,%r14
+ jz .Lsie_gmap
+ lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
+.Lsie_gmap:
+ lg %r14,__SF_EMPTY(%r15) # get control block pointer
+ oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
+ tm __SIE_PROG20+3(%r14),1 # last exit...
+ jnz .Lsie_done
+ LPP __SF_EMPTY(%r15) # set guest id
+ sie 0(%r14)
+.Lsie_done:
+ LPP __SF_EMPTY+16(%r15) # set host id
+ ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
+ lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
+# some program checks are suppressing. C code (e.g. do_protection_exception)
+# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
+# instructions between sie64a and .Lsie_done should not cause program
+# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
+# See also HANDLE_SIE_INTERCEPT
+.Lrewind_pad:
+ nop 0
+ .globl sie_exit
+sie_exit:
+ lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
+ stmg %r0,%r13,0(%r14) # save guest gprs 0-13
+ lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
+ lg %r2,__SF_EMPTY+24(%r15) # return exit reason code
+ br %r14
+.Lsie_fault:
+ lghi %r14,-EFAULT
+ stg %r14,__SF_EMPTY+24(%r15) # set exit reason code
+ j sie_exit
+
+ .align 8
+.Lsie_critical:
+ .quad .Lsie_gmap
+.Lsie_critical_length:
+ .quad .Lsie_done - .Lsie_gmap
+
+ EX_TABLE(.Lrewind_pad,.Lsie_fault)
+ EX_TABLE(sie_exit,.Lsie_fault)
#endif
-.Lc_critical_start: .long .L__critical_start + 0x80000000
-.Lc_critical_length: .long .L__critical_end - .L__critical_start
- .section .rodata, "a"
-#define SYSCALL(esa,esame,emu) .long esa
+ .section .rodata, "a"
+#define SYSCALL(esame,emu) .long esame
.globl sys_call_table
sys_call_table:
#include "syscalls.S"
#undef SYSCALL
+
+#ifdef CONFIG_COMPAT
+
+#define SYSCALL(esame,emu) .long emu
+ .globl sys_call_table_emu
+sys_call_table_emu:
+#include "syscalls.S"
+#undef SYSCALL
+#endif
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
deleted file mode 100644
index c329446a951d..000000000000
--- a/arch/s390/kernel/entry64.S
+++ /dev/null
@@ -1,1059 +0,0 @@
-/*
- * S390 low-level entry points.
- *
- * Copyright IBM Corp. 1999, 2012
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- * Hartmut Penner (hp@de.ibm.com),
- * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
- * Heiko Carstens <heiko.carstens@de.ibm.com>
- */
-
-#include <linux/init.h>
-#include <linux/linkage.h>
-#include <asm/processor.h>
-#include <asm/cache.h>
-#include <asm/errno.h>
-#include <asm/ptrace.h>
-#include <asm/thread_info.h>
-#include <asm/asm-offsets.h>
-#include <asm/unistd.h>
-#include <asm/page.h>
-#include <asm/sigp.h>
-#include <asm/irq.h>
-
-__PT_R0 = __PT_GPRS
-__PT_R1 = __PT_GPRS + 8
-__PT_R2 = __PT_GPRS + 16
-__PT_R3 = __PT_GPRS + 24
-__PT_R4 = __PT_GPRS + 32
-__PT_R5 = __PT_GPRS + 40
-__PT_R6 = __PT_GPRS + 48
-__PT_R7 = __PT_GPRS + 56
-__PT_R8 = __PT_GPRS + 64
-__PT_R9 = __PT_GPRS + 72
-__PT_R10 = __PT_GPRS + 80
-__PT_R11 = __PT_GPRS + 88
-__PT_R12 = __PT_GPRS + 96
-__PT_R13 = __PT_GPRS + 104
-__PT_R14 = __PT_GPRS + 112
-__PT_R15 = __PT_GPRS + 120
-
-STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
-STACK_SIZE = 1 << STACK_SHIFT
-STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
-
-_TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
- _TIF_UPROBE)
-_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
- _TIF_SYSCALL_TRACEPOINT)
-_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE)
-_PIF_WORK = (_PIF_PER_TRAP)
-
-#define BASED(name) name-system_call(%r13)
-
- .macro TRACE_IRQS_ON
-#ifdef CONFIG_TRACE_IRQFLAGS
- basr %r2,%r0
- brasl %r14,trace_hardirqs_on_caller
-#endif
- .endm
-
- .macro TRACE_IRQS_OFF
-#ifdef CONFIG_TRACE_IRQFLAGS
- basr %r2,%r0
- brasl %r14,trace_hardirqs_off_caller
-#endif
- .endm
-
- .macro LOCKDEP_SYS_EXIT
-#ifdef CONFIG_LOCKDEP
- tm __PT_PSW+1(%r11),0x01 # returning to user ?
- jz .+10
- brasl %r14,lockdep_sys_exit
-#endif
- .endm
-
- .macro LPP newpp
-#if IS_ENABLED(CONFIG_KVM)
- tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP
- jz .+8
- .insn s,0xb2800000,\newpp
-#endif
- .endm
-
- .macro HANDLE_SIE_INTERCEPT scratch,reason
-#if IS_ENABLED(CONFIG_KVM)
- tmhh %r8,0x0001 # interrupting from user ?
- jnz .+62
- lgr \scratch,%r9
- slg \scratch,BASED(.Lsie_critical)
- clg \scratch,BASED(.Lsie_critical_length)
- .if \reason==1
- # Some program interrupts are suppressing (e.g. protection).
- # We must also check the instruction after SIE in that case.
- # do_protection_exception will rewind to .Lrewind_pad
- jh .+42
- .else
- jhe .+42
- .endif
- lg %r14,__SF_EMPTY(%r15) # get control block pointer
- LPP __SF_EMPTY+16(%r15) # set host id
- ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
- lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
- larl %r9,sie_exit # skip forward to sie_exit
- mvi __SF_EMPTY+31(%r15),\reason # set exit reason
-#endif
- .endm
-
- .macro CHECK_STACK stacksize,savearea
-#ifdef CONFIG_CHECK_STACK
- tml %r15,\stacksize - CONFIG_STACK_GUARD
- lghi %r14,\savearea
- jz stack_overflow
-#endif
- .endm
-
- .macro SWITCH_ASYNC savearea,stack,shift
- tmhh %r8,0x0001 # interrupting from user ?
- jnz 1f
- lgr %r14,%r9
- slg %r14,BASED(.Lcritical_start)
- clg %r14,BASED(.Lcritical_length)
- jhe 0f
- lghi %r11,\savearea # inside critical section, do cleanup
- brasl %r14,cleanup_critical
- tmhh %r8,0x0001 # retest problem state after cleanup
- jnz 1f
-0: lg %r14,\stack # are we already on the target stack?
- slgr %r14,%r15
- srag %r14,%r14,\shift
- jnz 1f
- CHECK_STACK 1<<\shift,\savearea
- aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- j 2f
-1: lg %r15,\stack # load target stack
-2: la %r11,STACK_FRAME_OVERHEAD(%r15)
- .endm
-
- .macro UPDATE_VTIME scratch,enter_timer
- lg \scratch,__LC_EXIT_TIMER
- slg \scratch,\enter_timer
- alg \scratch,__LC_USER_TIMER
- stg \scratch,__LC_USER_TIMER
- lg \scratch,__LC_LAST_UPDATE_TIMER
- slg \scratch,__LC_EXIT_TIMER
- alg \scratch,__LC_SYSTEM_TIMER
- stg \scratch,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
- .endm
-
- .macro LAST_BREAK scratch
- srag \scratch,%r10,23
- jz .+10
- stg %r10,__TI_last_break(%r12)
- .endm
-
- .macro REENABLE_IRQS
- stg %r8,__LC_RETURN_PSW
- ni __LC_RETURN_PSW,0xbf
- ssm __LC_RETURN_PSW
- .endm
-
- .macro STCK savearea
-#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
- .insn s,0xb27c0000,\savearea # store clock fast
-#else
- .insn s,0xb2050000,\savearea # store clock
-#endif
- .endm
-
- .section .kprobes.text, "ax"
-
-/*
- * Scheduler resume function, called by switch_to
- * gpr2 = (task_struct *) prev
- * gpr3 = (task_struct *) next
- * Returns:
- * gpr2 = prev
- */
-ENTRY(__switch_to)
- stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
- stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev
- lg %r4,__THREAD_info(%r2) # get thread_info of prev
- lg %r5,__THREAD_info(%r3) # get thread_info of next
- lgr %r15,%r5
- aghi %r15,STACK_INIT # end of kernel stack of next
- stg %r3,__LC_CURRENT # store task struct of next
- stg %r5,__LC_THREAD_INFO # store thread info of next
- stg %r15,__LC_KERNEL_STACK # store end of kernel stack
- lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
- mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
- lg %r15,__THREAD_ksp(%r3) # load kernel stack of next
- lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
- br %r14
-
-.L__critical_start:
-/*
- * SVC interrupt handler routine. System calls are synchronous events and
- * are executed with interrupts enabled.
- */
-
-ENTRY(system_call)
- stpt __LC_SYNC_ENTER_TIMER
-.Lsysc_stmg:
- stmg %r8,%r15,__LC_SAVE_AREA_SYNC
- lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
- lghi %r14,_PIF_SYSCALL
-.Lsysc_per:
- lg %r15,__LC_KERNEL_STACK
- la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
-.Lsysc_vtime:
- UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER
- LAST_BREAK %r13
- stmg %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
- mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
- mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
- stg %r14,__PT_FLAGS(%r11)
-.Lsysc_do_svc:
- lg %r10,__TI_sysc_table(%r12) # address of system call table
- llgh %r8,__PT_INT_CODE+2(%r11)
- slag %r8,%r8,2 # shift and test for svc 0
- jnz .Lsysc_nr_ok
- # svc 0: system call number in %r1
- llgfr %r1,%r1 # clear high word in r1
- cghi %r1,NR_syscalls
- jnl .Lsysc_nr_ok
- sth %r1,__PT_INT_CODE+2(%r11)
- slag %r8,%r1,2
-.Lsysc_nr_ok:
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
- stg %r2,__PT_ORIG_GPR2(%r11)
- stg %r7,STACK_FRAME_OVERHEAD(%r15)
- lgf %r9,0(%r8,%r10) # get system call add.
- tm __TI_flags+7(%r12),_TIF_TRACE
- jnz .Lsysc_tracesys
- basr %r14,%r9 # call sys_xxxx
- stg %r2,__PT_R2(%r11) # store return value
-
-.Lsysc_return:
- LOCKDEP_SYS_EXIT
-.Lsysc_tif:
- tm __PT_PSW+1(%r11),0x01 # returning to user ?
- jno .Lsysc_restore
- tm __PT_FLAGS+7(%r11),_PIF_WORK
- jnz .Lsysc_work
- tm __TI_flags+7(%r12),_TIF_WORK
- jnz .Lsysc_work # check for work
- tm __LC_CPU_FLAGS+7,_CIF_WORK
- jnz .Lsysc_work
-.Lsysc_restore:
- lg %r14,__LC_VDSO_PER_CPU
- lmg %r0,%r10,__PT_R0(%r11)
- mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
- stpt __LC_EXIT_TIMER
- mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
- lmg %r11,%r15,__PT_R11(%r11)
- lpswe __LC_RETURN_PSW
-.Lsysc_done:
-
-#
-# One of the work bits is on. Find out which one.
-#
-.Lsysc_work:
- tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
- jo .Lsysc_mcck_pending
- tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
- jo .Lsysc_reschedule
-#ifdef CONFIG_UPROBES
- tm __TI_flags+7(%r12),_TIF_UPROBE
- jo .Lsysc_uprobe_notify
-#endif
- tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP
- jo .Lsysc_singlestep
- tm __TI_flags+7(%r12),_TIF_SIGPENDING
- jo .Lsysc_sigpending
- tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
- jo .Lsysc_notify_resume
- tm __LC_CPU_FLAGS+7,_CIF_ASCE
- jo .Lsysc_uaccess
- j .Lsysc_return # beware of critical section cleanup
-
-#
-# _TIF_NEED_RESCHED is set, call schedule
-#
-.Lsysc_reschedule:
- larl %r14,.Lsysc_return
- jg schedule
-
-#
-# _CIF_MCCK_PENDING is set, call handler
-#
-.Lsysc_mcck_pending:
- larl %r14,.Lsysc_return
- jg s390_handle_mcck # TIF bit will be cleared by handler
-
-#
-# _CIF_ASCE is set, load user space asce
-#
-.Lsysc_uaccess:
- ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
- lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
- j .Lsysc_return
-
-#
-# _TIF_SIGPENDING is set, call do_signal
-#
-.Lsysc_sigpending:
- lgr %r2,%r11 # pass pointer to pt_regs
- brasl %r14,do_signal
- tm __PT_FLAGS+7(%r11),_PIF_SYSCALL
- jno .Lsysc_return
- lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
- lg %r10,__TI_sysc_table(%r12) # address of system call table
- lghi %r8,0 # svc 0 returns -ENOSYS
- llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
- cghi %r1,NR_syscalls
- jnl .Lsysc_nr_ok # invalid svc number -> do svc 0
- slag %r8,%r1,2
- j .Lsysc_nr_ok # restart svc
-
-#
-# _TIF_NOTIFY_RESUME is set, call do_notify_resume
-#
-.Lsysc_notify_resume:
- lgr %r2,%r11 # pass pointer to pt_regs
- larl %r14,.Lsysc_return
- jg do_notify_resume
-
-#
-# _TIF_UPROBE is set, call uprobe_notify_resume
-#
-#ifdef CONFIG_UPROBES
-.Lsysc_uprobe_notify:
- lgr %r2,%r11 # pass pointer to pt_regs
- larl %r14,.Lsysc_return
- jg uprobe_notify_resume
-#endif
-
-#
-# _PIF_PER_TRAP is set, call do_per_trap
-#
-.Lsysc_singlestep:
- ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
- lgr %r2,%r11 # pass pointer to pt_regs
- larl %r14,.Lsysc_return
- jg do_per_trap
-
-#
-# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
-# and after the system call
-#
-.Lsysc_tracesys:
- lgr %r2,%r11 # pass pointer to pt_regs
- la %r3,0
- llgh %r0,__PT_INT_CODE+2(%r11)
- stg %r0,__PT_R2(%r11)
- brasl %r14,do_syscall_trace_enter
- lghi %r0,NR_syscalls
- clgr %r0,%r2
- jnh .Lsysc_tracenogo
- sllg %r8,%r2,2
- lgf %r9,0(%r8,%r10)
-.Lsysc_tracego:
- lmg %r3,%r7,__PT_R3(%r11)
- stg %r7,STACK_FRAME_OVERHEAD(%r15)
- lg %r2,__PT_ORIG_GPR2(%r11)
- basr %r14,%r9 # call sys_xxx
- stg %r2,__PT_R2(%r11) # store return value
-.Lsysc_tracenogo:
- tm __TI_flags+7(%r12),_TIF_TRACE
- jz .Lsysc_return
- lgr %r2,%r11 # pass pointer to pt_regs
- larl %r14,.Lsysc_return
- jg do_syscall_trace_exit
-
-#
-# a new process exits the kernel with ret_from_fork
-#
-ENTRY(ret_from_fork)
- la %r11,STACK_FRAME_OVERHEAD(%r15)
- lg %r12,__LC_THREAD_INFO
- brasl %r14,schedule_tail
- TRACE_IRQS_ON
- ssm __LC_SVC_NEW_PSW # reenable interrupts
- tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
- jne .Lsysc_tracenogo
- # it's a kernel thread
- lmg %r9,%r10,__PT_R9(%r11) # load gprs
-ENTRY(kernel_thread_starter)
- la %r2,0(%r10)
- basr %r14,%r9
- j .Lsysc_tracenogo
-
-/*
- * Program check handler routine
- */
-
-ENTRY(pgm_check_handler)
- stpt __LC_SYNC_ENTER_TIMER
- stmg %r8,%r15,__LC_SAVE_AREA_SYNC
- lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
- larl %r13,system_call
- lmg %r8,%r9,__LC_PGM_OLD_PSW
- HANDLE_SIE_INTERCEPT %r14,1
- tmhh %r8,0x0001 # test problem state bit
- jnz 1f # -> fault in user space
- tmhh %r8,0x4000 # PER bit set in old PSW ?
- jnz 0f # -> enabled, can't be a double fault
- tm __LC_PGM_ILC+3,0x80 # check for per exception
- jnz .Lpgm_svcper # -> single stepped svc
-0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
- aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- j 2f
-1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER
- LAST_BREAK %r14
- lg %r15,__LC_KERNEL_STACK
- lg %r14,__TI_task(%r12)
- lghi %r13,__LC_PGM_TDB
- tm __LC_PGM_ILC+2,0x02 # check for transaction abort
- jz 2f
- mvc __THREAD_trap_tdb(256,%r14),0(%r13)
-2: la %r11,STACK_FRAME_OVERHEAD(%r15)
- stmg %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
- stmg %r8,%r9,__PT_PSW(%r11)
- mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
- mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
- xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
- stg %r10,__PT_ARGS(%r11)
- tm __LC_PGM_ILC+3,0x80 # check for per exception
- jz 0f
- tmhh %r8,0x0001 # kernel per event ?
- jz .Lpgm_kprobe
- oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
- mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
- mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
- mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
-0: REENABLE_IRQS
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
- larl %r1,pgm_check_table
- llgh %r10,__PT_INT_CODE+2(%r11)
- nill %r10,0x007f
- sll %r10,2
- je .Lsysc_return
- lgf %r1,0(%r10,%r1) # load address of handler routine
- lgr %r2,%r11 # pass pointer to pt_regs
- basr %r14,%r1 # branch to interrupt-handler
- j .Lsysc_return
-
-#
-# PER event in supervisor state, must be kprobes
-#
-.Lpgm_kprobe:
- REENABLE_IRQS
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
- lgr %r2,%r11 # pass pointer to pt_regs
- brasl %r14,do_per_trap
- j .Lsysc_return
-
-#
-# single stepped system call
-#
-.Lpgm_svcper:
- mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
- larl %r14,.Lsysc_per
- stg %r14,__LC_RETURN_PSW+8
- lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
- lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
-
-/*
- * IO interrupt handler routine
- */
-ENTRY(io_int_handler)
- STCK __LC_INT_CLOCK
- stpt __LC_ASYNC_ENTER_TIMER
- stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
- lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
- larl %r13,system_call
- lmg %r8,%r9,__LC_IO_OLD_PSW
- HANDLE_SIE_INTERCEPT %r14,2
- SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
- tmhh %r8,0x0001 # interrupting from user?
- jz .Lio_skip
- UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
- LAST_BREAK %r14
-.Lio_skip:
- stmg %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
- stmg %r8,%r9,__PT_PSW(%r11)
- mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
- xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
- TRACE_IRQS_OFF
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
-.Lio_loop:
- lgr %r2,%r11 # pass pointer to pt_regs
- lghi %r3,IO_INTERRUPT
- tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
- jz .Lio_call
- lghi %r3,THIN_INTERRUPT
-.Lio_call:
- brasl %r14,do_IRQ
- tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR
- jz .Lio_return
- tpi 0
- jz .Lio_return
- mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
- j .Lio_loop
-.Lio_return:
- LOCKDEP_SYS_EXIT
- TRACE_IRQS_ON
-.Lio_tif:
- tm __TI_flags+7(%r12),_TIF_WORK
- jnz .Lio_work # there is work to do (signals etc.)
- tm __LC_CPU_FLAGS+7,_CIF_WORK
- jnz .Lio_work
-.Lio_restore:
- lg %r14,__LC_VDSO_PER_CPU
- lmg %r0,%r10,__PT_R0(%r11)
- mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
- stpt __LC_EXIT_TIMER
- mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
- lmg %r11,%r15,__PT_R11(%r11)
- lpswe __LC_RETURN_PSW
-.Lio_done:
-
-#
-# There is work todo, find out in which context we have been interrupted:
-# 1) if we return to user space we can do all _TIF_WORK work
-# 2) if we return to kernel code and kvm is enabled check if we need to
-# modify the psw to leave SIE
-# 3) if we return to kernel code and preemptive scheduling is enabled check
-# the preemption counter and if it is zero call preempt_schedule_irq
-# Before any work can be done, a switch to the kernel stack is required.
-#
-.Lio_work:
- tm __PT_PSW+1(%r11),0x01 # returning to user ?
- jo .Lio_work_user # yes -> do resched & signal
-#ifdef CONFIG_PREEMPT
- # check for preemptive scheduling
- icm %r0,15,__TI_precount(%r12)
- jnz .Lio_restore # preemption is disabled
- tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
- jno .Lio_restore
- # switch to kernel stack
- lg %r1,__PT_R15(%r11)
- aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
- xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
- la %r11,STACK_FRAME_OVERHEAD(%r1)
- lgr %r15,%r1
- # TRACE_IRQS_ON already done at .Lio_return, call
- # TRACE_IRQS_OFF to keep things symmetrical
- TRACE_IRQS_OFF
- brasl %r14,preempt_schedule_irq
- j .Lio_return
-#else
- j .Lio_restore
-#endif
-
-#
-# Need to do work before returning to userspace, switch to kernel stack
-#
-.Lio_work_user:
- lg %r1,__LC_KERNEL_STACK
- mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
- xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
- la %r11,STACK_FRAME_OVERHEAD(%r1)
- lgr %r15,%r1
-
-#
-# One of the work bits is on. Find out which one.
-#
-.Lio_work_tif:
- tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
- jo .Lio_mcck_pending
- tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
- jo .Lio_reschedule
- tm __TI_flags+7(%r12),_TIF_SIGPENDING
- jo .Lio_sigpending
- tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
- jo .Lio_notify_resume
- tm __LC_CPU_FLAGS+7,_CIF_ASCE
- jo .Lio_uaccess
- j .Lio_return # beware of critical section cleanup
-
-#
-# _CIF_MCCK_PENDING is set, call handler
-#
-.Lio_mcck_pending:
- # TRACE_IRQS_ON already done at .Lio_return
- brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler
- TRACE_IRQS_OFF
- j .Lio_return
-
-#
-# _CIF_ASCE is set, load user space asce
-#
-.Lio_uaccess:
- ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
- lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
- j .Lio_return
-
-#
-# _TIF_NEED_RESCHED is set, call schedule
-#
-.Lio_reschedule:
- # TRACE_IRQS_ON already done at .Lio_return
- ssm __LC_SVC_NEW_PSW # reenable interrupts
- brasl %r14,schedule # call scheduler
- ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
- TRACE_IRQS_OFF
- j .Lio_return
-
-#
-# _TIF_SIGPENDING or is set, call do_signal
-#
-.Lio_sigpending:
- # TRACE_IRQS_ON already done at .Lio_return
- ssm __LC_SVC_NEW_PSW # reenable interrupts
- lgr %r2,%r11 # pass pointer to pt_regs
- brasl %r14,do_signal
- ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
- TRACE_IRQS_OFF
- j .Lio_return
-
-#
-# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
-#
-.Lio_notify_resume:
- # TRACE_IRQS_ON already done at .Lio_return
- ssm __LC_SVC_NEW_PSW # reenable interrupts
- lgr %r2,%r11 # pass pointer to pt_regs
- brasl %r14,do_notify_resume
- ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
- TRACE_IRQS_OFF
- j .Lio_return
-
-/*
- * External interrupt handler routine
- */
-ENTRY(ext_int_handler)
- STCK __LC_INT_CLOCK
- stpt __LC_ASYNC_ENTER_TIMER
- stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
- lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
- larl %r13,system_call
- lmg %r8,%r9,__LC_EXT_OLD_PSW
- HANDLE_SIE_INTERCEPT %r14,3
- SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
- tmhh %r8,0x0001 # interrupting from user ?
- jz .Lext_skip
- UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
- LAST_BREAK %r14
-.Lext_skip:
- stmg %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
- stmg %r8,%r9,__PT_PSW(%r11)
- lghi %r1,__LC_EXT_PARAMS2
- mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
- mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
- mvc __PT_INT_PARM_LONG(8,%r11),0(%r1)
- xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
- TRACE_IRQS_OFF
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
- lgr %r2,%r11 # pass pointer to pt_regs
- lghi %r3,EXT_INTERRUPT
- brasl %r14,do_IRQ
- j .Lio_return
-
-/*
- * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
- */
-ENTRY(psw_idle)
- stg %r3,__SF_EMPTY(%r15)
- larl %r1,.Lpsw_idle_lpsw+4
- stg %r1,__SF_EMPTY+8(%r15)
- STCK __CLOCK_IDLE_ENTER(%r2)
- stpt __TIMER_IDLE_ENTER(%r2)
-.Lpsw_idle_lpsw:
- lpswe __SF_EMPTY(%r15)
- br %r14
-.Lpsw_idle_end:
-
-.L__critical_end:
-
-/*
- * Machine check handler routines
- */
-ENTRY(mcck_int_handler)
- STCK __LC_MCCK_CLOCK
- la %r1,4095 # revalidate r1
- spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
- lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
- lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_THREAD_INFO
- larl %r13,system_call
- lmg %r8,%r9,__LC_MCK_OLD_PSW
- HANDLE_SIE_INTERCEPT %r14,4
- tm __LC_MCCK_CODE,0x80 # system damage?
- jo .Lmcck_panic # yes -> rest of mcck code invalid
- lghi %r14,__LC_CPU_TIMER_SAVE_AREA
- mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
- tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
- jo 3f
- la %r14,__LC_SYNC_ENTER_TIMER
- clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
- jl 0f
- la %r14,__LC_ASYNC_ENTER_TIMER
-0: clc 0(8,%r14),__LC_EXIT_TIMER
- jl 1f
- la %r14,__LC_EXIT_TIMER
-1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
- jl 2f
- la %r14,__LC_LAST_UPDATE_TIMER
-2: spt 0(%r14)
- mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
-3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
- jno .Lmcck_panic # no -> skip cleanup critical
- SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT
- tm %r8,0x0001 # interrupting from user ?
- jz .Lmcck_skip
- UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER
- LAST_BREAK %r14
-.Lmcck_skip:
- lghi %r14,__LC_GPREGS_SAVE_AREA+64
- stmg %r0,%r7,__PT_R0(%r11)
- mvc __PT_R8(64,%r11),0(%r14)
- stmg %r8,%r9,__PT_PSW(%r11)
- xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
- lgr %r2,%r11 # pass pointer to pt_regs
- brasl %r14,s390_do_machine_check
- tm __PT_PSW+1(%r11),0x01 # returning to user ?
- jno .Lmcck_return
- lg %r1,__LC_KERNEL_STACK # switch to kernel stack
- mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
- xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
- la %r11,STACK_FRAME_OVERHEAD(%r1)
- lgr %r15,%r1
- ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
- tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
- jno .Lmcck_return
- TRACE_IRQS_OFF
- brasl %r14,s390_handle_mcck
- TRACE_IRQS_ON
-.Lmcck_return:
- lg %r14,__LC_VDSO_PER_CPU
- lmg %r0,%r10,__PT_R0(%r11)
- mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
- tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
- jno 0f
- stpt __LC_EXIT_TIMER
- mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
-0: lmg %r11,%r15,__PT_R11(%r11)
- lpswe __LC_RETURN_MCCK_PSW
-
-.Lmcck_panic:
- lg %r14,__LC_PANIC_STACK
- slgr %r14,%r15
- srag %r14,%r14,PAGE_SHIFT
- jz 0f
- lg %r15,__LC_PANIC_STACK
-0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- j .Lmcck_skip
-
-#
-# PSW restart interrupt handler
-#
-ENTRY(restart_int_handler)
- stg %r15,__LC_SAVE_AREA_RESTART
- lg %r15,__LC_RESTART_STACK
- aghi %r15,-__PT_SIZE # create pt_regs on stack
- xc 0(__PT_SIZE,%r15),0(%r15)
- stmg %r0,%r14,__PT_R0(%r15)
- mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
- mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
- aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack
- xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
- lg %r1,__LC_RESTART_FN # load fn, parm & source cpu
- lg %r2,__LC_RESTART_DATA
- lg %r3,__LC_RESTART_SOURCE
- ltgr %r3,%r3 # test source cpu address
- jm 1f # negative -> skip source stop
-0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu
- brc 10,0b # wait for status stored
-1: basr %r14,%r1 # call function
- stap __SF_EMPTY(%r15) # store cpu address
- llgh %r3,__SF_EMPTY(%r15)
-2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
- brc 2,2b
-3: j 3b
-
- .section .kprobes.text, "ax"
-
-#ifdef CONFIG_CHECK_STACK
-/*
- * The synchronous or the asynchronous stack overflowed. We are dead.
- * No need to properly save the registers, we are going to panic anyway.
- * Setup a pt_regs so that show_trace can provide a good call trace.
- */
-stack_overflow:
- lg %r15,__LC_PANIC_STACK # change to panic stack
- la %r11,STACK_FRAME_OVERHEAD(%r15)
- stmg %r0,%r7,__PT_R0(%r11)
- stmg %r8,%r9,__PT_PSW(%r11)
- mvc __PT_R8(64,%r11),0(%r14)
- stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
- lgr %r2,%r11 # pass pointer to pt_regs
- jg kernel_stack_overflow
-#endif
-
- .align 8
-.Lcleanup_table:
- .quad system_call
- .quad .Lsysc_do_svc
- .quad .Lsysc_tif
- .quad .Lsysc_restore
- .quad .Lsysc_done
- .quad .Lio_tif
- .quad .Lio_restore
- .quad .Lio_done
- .quad psw_idle
- .quad .Lpsw_idle_end
-
-cleanup_critical:
- clg %r9,BASED(.Lcleanup_table) # system_call
- jl 0f
- clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc
- jl .Lcleanup_system_call
- clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif
- jl 0f
- clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore
- jl .Lcleanup_sysc_tif
- clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done
- jl .Lcleanup_sysc_restore
- clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif
- jl 0f
- clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore
- jl .Lcleanup_io_tif
- clg %r9,BASED(.Lcleanup_table+56) # .Lio_done
- jl .Lcleanup_io_restore
- clg %r9,BASED(.Lcleanup_table+64) # psw_idle
- jl 0f
- clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end
- jl .Lcleanup_idle
-0: br %r14
-
-
-.Lcleanup_system_call:
- # check if stpt has been executed
- clg %r9,BASED(.Lcleanup_system_call_insn)
- jh 0f
- mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
- cghi %r11,__LC_SAVE_AREA_ASYNC
- je 0f
- mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
-0: # check if stmg has been executed
- clg %r9,BASED(.Lcleanup_system_call_insn+8)
- jh 0f
- mvc __LC_SAVE_AREA_SYNC(64),0(%r11)
-0: # check if base register setup + TIF bit load has been done
- clg %r9,BASED(.Lcleanup_system_call_insn+16)
- jhe 0f
- # set up saved registers r10 and r12
- stg %r10,16(%r11) # r10 last break
- stg %r12,32(%r11) # r12 thread-info pointer
-0: # check if the user time update has been done
- clg %r9,BASED(.Lcleanup_system_call_insn+24)
- jh 0f
- lg %r15,__LC_EXIT_TIMER
- slg %r15,__LC_SYNC_ENTER_TIMER
- alg %r15,__LC_USER_TIMER
- stg %r15,__LC_USER_TIMER
-0: # check if the system time update has been done
- clg %r9,BASED(.Lcleanup_system_call_insn+32)
- jh 0f
- lg %r15,__LC_LAST_UPDATE_TIMER
- slg %r15,__LC_EXIT_TIMER
- alg %r15,__LC_SYSTEM_TIMER
- stg %r15,__LC_SYSTEM_TIMER
-0: # update accounting time stamp
- mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
- # do LAST_BREAK
- lg %r9,16(%r11)
- srag %r9,%r9,23
- jz 0f
- mvc __TI_last_break(8,%r12),16(%r11)
-0: # set up saved register r11
- lg %r15,__LC_KERNEL_STACK
- la %r9,STACK_FRAME_OVERHEAD(%r15)
- stg %r9,24(%r11) # r11 pt_regs pointer
- # fill pt_regs
- mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
- stmg %r0,%r7,__PT_R0(%r9)
- mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW
- mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
- xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9)
- mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL
- # setup saved register r15
- stg %r15,56(%r11) # r15 stack pointer
- # set new psw address and exit
- larl %r9,.Lsysc_do_svc
- br %r14
-.Lcleanup_system_call_insn:
- .quad system_call
- .quad .Lsysc_stmg
- .quad .Lsysc_per
- .quad .Lsysc_vtime+18
- .quad .Lsysc_vtime+42
-
-.Lcleanup_sysc_tif:
- larl %r9,.Lsysc_tif
- br %r14
-
-.Lcleanup_sysc_restore:
- clg %r9,BASED(.Lcleanup_sysc_restore_insn)
- je 0f
- lg %r9,24(%r11) # get saved pointer to pt_regs
- mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
- mvc 0(64,%r11),__PT_R8(%r9)
- lmg %r0,%r7,__PT_R0(%r9)
-0: lmg %r8,%r9,__LC_RETURN_PSW
- br %r14
-.Lcleanup_sysc_restore_insn:
- .quad .Lsysc_done - 4
-
-.Lcleanup_io_tif:
- larl %r9,.Lio_tif
- br %r14
-
-.Lcleanup_io_restore:
- clg %r9,BASED(.Lcleanup_io_restore_insn)
- je 0f
- lg %r9,24(%r11) # get saved r11 pointer to pt_regs
- mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
- mvc 0(64,%r11),__PT_R8(%r9)
- lmg %r0,%r7,__PT_R0(%r9)
-0: lmg %r8,%r9,__LC_RETURN_PSW
- br %r14
-.Lcleanup_io_restore_insn:
- .quad .Lio_done - 4
-
-.Lcleanup_idle:
- # copy interrupt clock & cpu timer
- mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
- mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
- cghi %r11,__LC_SAVE_AREA_ASYNC
- je 0f
- mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
- mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
-0: # check if stck & stpt have been executed
- clg %r9,BASED(.Lcleanup_idle_insn)
- jhe 1f
- mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
- mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
-1: # account system time going idle
- lg %r9,__LC_STEAL_TIMER
- alg %r9,__CLOCK_IDLE_ENTER(%r2)
- slg %r9,__LC_LAST_UPDATE_CLOCK
- stg %r9,__LC_STEAL_TIMER
- mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
- lg %r9,__LC_SYSTEM_TIMER
- alg %r9,__LC_LAST_UPDATE_TIMER
- slg %r9,__TIMER_IDLE_ENTER(%r2)
- stg %r9,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
- # prepare return psw
- nihh %r8,0xfcfd # clear irq & wait state bits
- lg %r9,48(%r11) # return from psw_idle
- br %r14
-.Lcleanup_idle_insn:
- .quad .Lpsw_idle_lpsw
-
-/*
- * Integer constants
- */
- .align 8
-.Lcritical_start:
- .quad .L__critical_start
-.Lcritical_length:
- .quad .L__critical_end - .L__critical_start
-
-
-#if IS_ENABLED(CONFIG_KVM)
-/*
- * sie64a calling convention:
- * %r2 pointer to sie control block
- * %r3 guest register save area
- */
-ENTRY(sie64a)
- stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
- stg %r2,__SF_EMPTY(%r15) # save control block pointer
- stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
- xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason
- lmg %r0,%r13,0(%r3) # load guest gprs 0-13
- lg %r14,__LC_GMAP # get gmap pointer
- ltgr %r14,%r14
- jz .Lsie_gmap
- lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
-.Lsie_gmap:
- lg %r14,__SF_EMPTY(%r15) # get control block pointer
- oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
- tm __SIE_PROG20+3(%r14),1 # last exit...
- jnz .Lsie_done
- LPP __SF_EMPTY(%r15) # set guest id
- sie 0(%r14)
-.Lsie_done:
- LPP __SF_EMPTY+16(%r15) # set host id
- ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
- lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
-# some program checks are suppressing. C code (e.g. do_protection_exception)
-# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
-# instructions between sie64a and .Lsie_done should not cause program
-# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
-# See also HANDLE_SIE_INTERCEPT
-.Lrewind_pad:
- nop 0
- .globl sie_exit
-sie_exit:
- lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
- stmg %r0,%r13,0(%r14) # save guest gprs 0-13
- lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
- lg %r2,__SF_EMPTY+24(%r15) # return exit reason code
- br %r14
-.Lsie_fault:
- lghi %r14,-EFAULT
- stg %r14,__SF_EMPTY+24(%r15) # set exit reason code
- j sie_exit
-
- .align 8
-.Lsie_critical:
- .quad .Lsie_gmap
-.Lsie_critical_length:
- .quad .Lsie_done - .Lsie_gmap
-
- EX_TABLE(.Lrewind_pad,.Lsie_fault)
- EX_TABLE(sie_exit,.Lsie_fault)
-#endif
-
- .section .rodata, "a"
-#define SYSCALL(esa,esame,emu) .long esame
- .globl sys_call_table
-sys_call_table:
-#include "syscalls.S"
-#undef SYSCALL
-
-#ifdef CONFIG_COMPAT
-
-#define SYSCALL(esa,esame,emu) .long emu
- .globl sys_call_table_emu
-sys_call_table_emu:
-#include "syscalls.S"
-#undef SYSCALL
-#endif
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 82c19899574f..e0eaf11134b4 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -57,6 +57,44 @@
unsigned long ftrace_plt;
+static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
+{
+#ifdef CC_USING_HOTPATCH
+ /* brcl 0,0 */
+ insn->opc = 0xc004;
+ insn->disp = 0;
+#else
+ /* stg r14,8(r15) */
+ insn->opc = 0xe3e0;
+ insn->disp = 0xf0080024;
+#endif
+}
+
+static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn)
+{
+#ifdef CONFIG_KPROBES
+ if (insn->opc == BREAKPOINT_INSTRUCTION)
+ return 1;
+#endif
+ return 0;
+}
+
+static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn)
+{
+#ifdef CONFIG_KPROBES
+ insn->opc = BREAKPOINT_INSTRUCTION;
+ insn->disp = KPROBE_ON_FTRACE_NOP;
+#endif
+}
+
+static inline void ftrace_generate_kprobe_call_insn(struct ftrace_insn *insn)
+{
+#ifdef CONFIG_KPROBES
+ insn->opc = BREAKPOINT_INSTRUCTION;
+ insn->disp = KPROBE_ON_FTRACE_CALL;
+#endif
+}
+
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
@@ -72,16 +110,9 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
return -EFAULT;
if (addr == MCOUNT_ADDR) {
/* Initial code replacement */
-#ifdef CC_USING_HOTPATCH
- /* We expect to see brcl 0,0 */
- ftrace_generate_nop_insn(&orig);
-#else
- /* We expect to see stg r14,8(r15) */
- orig.opc = 0xe3e0;
- orig.disp = 0xf0080024;
-#endif
+ ftrace_generate_orig_insn(&orig);
ftrace_generate_nop_insn(&new);
- } else if (old.opc == BREAKPOINT_INSTRUCTION) {
+ } else if (is_kprobe_on_ftrace(&old)) {
/*
* If we find a breakpoint instruction, a kprobe has been
* placed at the beginning of the function. We write the
@@ -89,9 +120,8 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
* bytes of the original instruction so that the kprobes
* handler can execute a nop, if it reaches this breakpoint.
*/
- new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
- orig.disp = KPROBE_ON_FTRACE_CALL;
- new.disp = KPROBE_ON_FTRACE_NOP;
+ ftrace_generate_kprobe_call_insn(&orig);
+ ftrace_generate_kprobe_nop_insn(&new);
} else {
/* Replace ftrace call with a nop. */
ftrace_generate_call_insn(&orig, rec->ip);
@@ -100,8 +130,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
/* Verify that the to be replaced code matches what we expect. */
if (memcmp(&orig, &old, sizeof(old)))
return -EINVAL;
- if (probe_kernel_write((void *) rec->ip, &new, sizeof(new)))
- return -EPERM;
+ s390_kernel_write((void *) rec->ip, &new, sizeof(new));
return 0;
}
@@ -111,7 +140,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
return -EFAULT;
- if (old.opc == BREAKPOINT_INSTRUCTION) {
+ if (is_kprobe_on_ftrace(&old)) {
/*
* If we find a breakpoint instruction, a kprobe has been
* placed at the beginning of the function. We write the
@@ -119,9 +148,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
* bytes of the original instruction so that the kprobes
* handler can execute a brasl if it reaches this breakpoint.
*/
- new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
- orig.disp = KPROBE_ON_FTRACE_NOP;
- new.disp = KPROBE_ON_FTRACE_CALL;
+ ftrace_generate_kprobe_nop_insn(&orig);
+ ftrace_generate_kprobe_call_insn(&new);
} else {
/* Replace nop with an ftrace call. */
ftrace_generate_nop_insn(&orig);
@@ -130,8 +158,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
/* Verify that the to be replaced code matches what we expect. */
if (memcmp(&orig, &old, sizeof(old)))
return -EINVAL;
- if (probe_kernel_write((void *) rec->ip, &new, sizeof(new)))
- return -EPERM;
+ s390_kernel_write((void *) rec->ip, &new, sizeof(new));
return 0;
}
@@ -202,14 +229,16 @@ int ftrace_enable_ftrace_graph_caller(void)
{
u8 op = 0x04; /* set mask field to zero */
- return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
+ s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
+ return 0;
}
int ftrace_disable_ftrace_graph_caller(void)
{
u8 op = 0xf4; /* set mask field to all ones */
- return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
+ s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
+ return 0;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 132f4c9ade60..59b7c6470567 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -27,11 +27,7 @@
#include <asm/thread_info.h>
#include <asm/page.h>
-#ifdef CONFIG_64BIT
#define ARCH_OFFSET 4
-#else
-#define ARCH_OFFSET 0
-#endif
__HEAD
@@ -67,7 +63,6 @@ __HEAD
# subroutine to set architecture mode
#
.Lsetmode:
-#ifdef CONFIG_64BIT
mvi __LC_AR_MODE_ID,1 # set esame flag
slr %r0,%r0 # set cpuid to zero
lhi %r1,2 # mode 2 = esame (dump)
@@ -76,16 +71,12 @@ __HEAD
.fill 16,4,0x0
0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
sam31 # switch to 31 bit addressing mode
-#else
- mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0)
-#endif
br %r14
#
# subroutine to wait for end I/O
#
.Lirqwait:
-#ifdef CONFIG_64BIT
mvc 0x1f0(16),.Lnewpsw # set up IO interrupt psw
lpsw .Lwaitpsw
.Lioint:
@@ -93,15 +84,6 @@ __HEAD
.align 8
.Lnewpsw:
.quad 0x0000000080000000,.Lioint
-#else
- mvc 0x78(8),.Lnewpsw # set up IO interrupt psw
- lpsw .Lwaitpsw
-.Lioint:
- br %r14
- .align 8
-.Lnewpsw:
- .long 0x00080000,0x80000000+.Lioint
-#endif
.Lwaitpsw:
.long 0x020a0000,0x80000000+.Lioint
@@ -375,7 +357,6 @@ ENTRY(startup)
ENTRY(startup_kdump)
j .Lep_startup_kdump
.Lep_startup_normal:
-#ifdef CONFIG_64BIT
mvi __LC_AR_MODE_ID,1 # set esame flag
slr %r0,%r0 # set cpuid to zero
lhi %r1,2 # mode 2 = esame (dump)
@@ -384,9 +365,6 @@ ENTRY(startup_kdump)
.fill 16,4,0x0
0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
sam31 # switch to 31 bit addressing mode
-#else
- mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0)
-#endif
basr %r13,0 # get base
.LPG0:
xc 0x200(256),0x200 # partially clear lowcore
@@ -396,7 +374,6 @@ ENTRY(startup_kdump)
spt 6f-.LPG0(%r13)
mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
-#ifndef CONFIG_MARCH_G5
# check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10}
.insn s,0xb2b10000,0 # store facilities @ __LC_STFL_FAC_LIST
tm __LC_STFL_FAC_LIST,0x01 # stfle available ?
@@ -435,7 +412,6 @@ ENTRY(startup_kdump)
# the kernel will crash. Format is number of facility words with bits set,
# followed by the facility words.
-#if defined(CONFIG_64BIT)
#if defined(CONFIG_MARCH_Z13)
.long 3, 0xc100eff2, 0xf46ce800, 0x00400000
#elif defined(CONFIG_MARCH_ZEC12)
@@ -451,35 +427,10 @@ ENTRY(startup_kdump)
#elif defined(CONFIG_MARCH_Z900)
.long 1, 0xc0000000
#endif
-#else
-#if defined(CONFIG_MARCH_ZEC12)
- .long 1, 0x8100c880
-#elif defined(CONFIG_MARCH_Z196)
- .long 1, 0x8100c880
-#elif defined(CONFIG_MARCH_Z10)
- .long 1, 0x8100c880
-#elif defined(CONFIG_MARCH_Z9_109)
- .long 1, 0x8100c880
-#elif defined(CONFIG_MARCH_Z990)
- .long 1, 0x80002000
-#elif defined(CONFIG_MARCH_Z900)
- .long 1, 0x80000000
-#endif
-#endif
4:
-#endif
-
-#ifdef CONFIG_64BIT
/* Continue with 64bit startup code in head64.S */
sam64 # switch to 64 bit mode
jg startup_continue
-#else
- /* Continue with 31bit startup code in head31.S */
- l %r13,5f-.LPG0(%r13)
- b 0(%r13)
- .align 8
-5: .long startup_continue
-#endif
.align 8
6: .long 0x7fffffff,0xffffffff
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
deleted file mode 100644
index 6dbe80983a24..000000000000
--- a/arch/s390/kernel/head31.S
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright IBM Corp. 2005, 2010
- *
- * Author(s): Hartmut Penner <hp@de.ibm.com>
- * Martin Schwidefsky <schwidefsky@de.ibm.com>
- * Rob van der Heij <rvdhei@iae.nl>
- * Heiko Carstens <heiko.carstens@de.ibm.com>
- *
- */
-
-#include <linux/init.h>
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/thread_info.h>
-#include <asm/page.h>
-
-__HEAD
-ENTRY(startup_continue)
- basr %r13,0 # get base
-.LPG1:
-
- l %r1,.Lbase_cc-.LPG1(%r13)
- mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK
- lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
- l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
- # move IPL device to lowcore
-#
-# Setup stack
-#
- l %r15,.Linittu-.LPG1(%r13)
- st %r15,__LC_THREAD_INFO # cache thread info in lowcore
- mvc __LC_CURRENT(4),__TI_task(%r15)
- ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE
- st %r15,__LC_KERNEL_STACK # set end of kernel stack
- ahi %r15,-96
-#
-# Save ipl parameters, clear bss memory, initialize storage key for kernel pages,
-# and create a kernel NSS if the SAVESYS= parm is defined
-#
- l %r14,.Lstartup_init-.LPG1(%r13)
- basr %r14,%r14
- lpsw .Lentry-.LPG1(13) # jump to _stext in primary-space,
- # virtual and never return ...
- .align 8
-.Lentry:.long 0x00080000,0x80000000 + _stext
-.Lctl: .long 0x04b50000 # cr0: various things
- .long 0 # cr1: primary space segment table
- .long .Lduct # cr2: dispatchable unit control table
- .long 0 # cr3: instruction authorization
- .long 0 # cr4: instruction authorization
- .long .Lduct # cr5: primary-aste origin
- .long 0 # cr6: I/O interrupts
- .long 0 # cr7: secondary space segment table
- .long 0 # cr8: access registers translation
- .long 0 # cr9: tracing off
- .long 0 # cr10: tracing off
- .long 0 # cr11: tracing off
- .long 0 # cr12: tracing off
- .long 0 # cr13: home space segment table
- .long 0xc0000000 # cr14: machine check handling off
- .long 0 # cr15: linkage stack operations
-.Lbss_bgn: .long __bss_start
-.Lbss_end: .long _end
-.Lparmaddr: .long PARMAREA
-.Linittu: .long init_thread_union
-.Lstartup_init:
- .long startup_init
- .align 64
-.Lduct: .long 0,0,0,0,.Lduald,0,0,0
- .long 0,0,0,0,0,0,0,0
- .align 128
-.Lduald:.rept 8
- .long 0x80000000,0,0,0 # invalid access-list entries
- .endr
-.Lbase_cc:
- .long sched_clock_base_cc
-
-ENTRY(_ehead)
-
- .org 0x100000 - 0x11000 # head.o ends at 0x11000
-#
-# startup-code, running in absolute addressing mode
-#
-ENTRY(_stext)
- basr %r13,0 # get base
-.LPG3:
-# check control registers
- stctl %c0,%c15,0(%r15)
- oi 2(%r15),0x60 # enable sigp emergency & external call
- oi 0(%r15),0x10 # switch on low address protection
- lctl %c0,%c15,0(%r15)
-
-#
- lam 0,15,.Laregs-.LPG3(%r13) # load access regs needed by uaccess
- l %r14,.Lstart-.LPG3(%r13)
- basr %r14,%r14 # call start_kernel
-#
-# We returned from start_kernel ?!? PANIK
-#
- basr %r13,0
- lpsw .Ldw-.(%r13) # load disabled wait psw
-#
- .align 8
-.Ldw: .long 0x000a0000,0x00000000
-.Lstart:.long start_kernel
-.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
diff --git a/arch/s390/kernel/head_kdump.S b/arch/s390/kernel/head_kdump.S
index 085a95eb315f..d05950f02c34 100644
--- a/arch/s390/kernel/head_kdump.S
+++ b/arch/s390/kernel/head_kdump.S
@@ -92,17 +92,9 @@ startup_kdump_relocated:
#else
.align 2
.Lep_startup_kdump:
-#ifdef CONFIG_64BIT
larl %r13,startup_kdump_crash
lpswe 0(%r13)
.align 8
startup_kdump_crash:
.quad 0x0002000080000000,0x0000000000000000 + startup_kdump_crash
-#else
- basr %r13,0
-0: lpsw startup_kdump_crash-0b(%r13)
-.align 8
-startup_kdump_crash:
- .long 0x000a0000,0x00000000 + startup_kdump_crash
-#endif /* CONFIG_64BIT */
#endif /* CONFIG_CRASH_DUMP */
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 5c8651f36509..52fbef91d1d9 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -182,24 +182,21 @@ EXPORT_SYMBOL_GPL(diag308);
/* SYSFS */
-#define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \
+#define IPL_ATTR_SHOW_FN(_prefix, _name, _format, args...) \
static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \
struct kobj_attribute *attr, \
char *page) \
{ \
- return sprintf(page, _format, _value); \
-} \
+ return snprintf(page, PAGE_SIZE, _format, ##args); \
+}
+
+#define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \
+IPL_ATTR_SHOW_FN(_prefix, _name, _format, _value) \
static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
- __ATTR(_name, S_IRUGO, sys_##_prefix##_##_name##_show, NULL);
+ __ATTR(_name, S_IRUGO, sys_##_prefix##_##_name##_show, NULL)
#define DEFINE_IPL_ATTR_RW(_prefix, _name, _fmt_out, _fmt_in, _value) \
-static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, \
- char *page) \
-{ \
- return sprintf(page, _fmt_out, \
- (unsigned long long) _value); \
-} \
+IPL_ATTR_SHOW_FN(_prefix, _name, _fmt_out, (unsigned long long) _value) \
static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
struct kobj_attribute *attr, \
const char *buf, size_t len) \
@@ -213,15 +210,10 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
__ATTR(_name,(S_IRUGO | S_IWUSR), \
sys_##_prefix##_##_name##_show, \
- sys_##_prefix##_##_name##_store);
+ sys_##_prefix##_##_name##_store)
#define DEFINE_IPL_ATTR_STR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)\
-static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, \
- char *page) \
-{ \
- return sprintf(page, _fmt_out, _value); \
-} \
+IPL_ATTR_SHOW_FN(_prefix, _name, _fmt_out, _value) \
static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
struct kobj_attribute *attr, \
const char *buf, size_t len) \
@@ -233,7 +225,7 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
__ATTR(_name,(S_IRUGO | S_IWUSR), \
sys_##_prefix##_##_name##_show, \
- sys_##_prefix##_##_name##_store);
+ sys_##_prefix##_##_name##_store)
static void make_attrs_ro(struct attribute **attrs)
{
@@ -415,15 +407,9 @@ static ssize_t ipl_parameter_read(struct file *filp, struct kobject *kobj,
return memory_read_from_buffer(buf, count, &off, IPL_PARMBLOCK_START,
IPL_PARMBLOCK_SIZE);
}
-
-static struct bin_attribute ipl_parameter_attr = {
- .attr = {
- .name = "binary_parameter",
- .mode = S_IRUGO,
- },
- .size = PAGE_SIZE,
- .read = &ipl_parameter_read,
-};
+static struct bin_attribute ipl_parameter_attr =
+ __BIN_ATTR(binary_parameter, S_IRUGO, ipl_parameter_read, NULL,
+ PAGE_SIZE);
static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
@@ -434,14 +420,13 @@ static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj,
return memory_read_from_buffer(buf, count, &off, scp_data, size);
}
+static struct bin_attribute ipl_scp_data_attr =
+ __BIN_ATTR(scp_data, S_IRUGO, ipl_scp_data_read, NULL, PAGE_SIZE);
-static struct bin_attribute ipl_scp_data_attr = {
- .attr = {
- .name = "scp_data",
- .mode = S_IRUGO,
- },
- .size = PAGE_SIZE,
- .read = ipl_scp_data_read,
+static struct bin_attribute *ipl_fcp_bin_attrs[] = {
+ &ipl_parameter_attr,
+ &ipl_scp_data_attr,
+ NULL,
};
/* FCP ipl device attributes */
@@ -484,6 +469,7 @@ static struct attribute *ipl_fcp_attrs[] = {
static struct attribute_group ipl_fcp_attr_group = {
.attrs = ipl_fcp_attrs,
+ .bin_attrs = ipl_fcp_bin_attrs,
};
/* CCW ipl device attributes */
@@ -540,28 +526,6 @@ static struct attribute_group ipl_unknown_attr_group = {
static struct kset *ipl_kset;
-static int __init ipl_register_fcp_files(void)
-{
- int rc;
-
- rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
- if (rc)
- goto out;
- rc = sysfs_create_bin_file(&ipl_kset->kobj, &ipl_parameter_attr);
- if (rc)
- goto out_ipl_parm;
- rc = sysfs_create_bin_file(&ipl_kset->kobj, &ipl_scp_data_attr);
- if (!rc)
- goto out;
-
- sysfs_remove_bin_file(&ipl_kset->kobj, &ipl_parameter_attr);
-
-out_ipl_parm:
- sysfs_remove_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
-out:
- return rc;
-}
-
static void __ipl_run(void *unused)
{
diag308(DIAG308_IPL, NULL);
@@ -596,7 +560,7 @@ static int __init ipl_init(void)
break;
case IPL_TYPE_FCP:
case IPL_TYPE_FCP_DUMP:
- rc = ipl_register_fcp_files();
+ rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
break;
case IPL_TYPE_NSS:
rc = sysfs_create_group(&ipl_kset->kobj, &ipl_nss_attr_group);
@@ -744,15 +708,13 @@ static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj,
return count;
}
+static struct bin_attribute sys_reipl_fcp_scp_data_attr =
+ __BIN_ATTR(scp_data, (S_IRUGO | S_IWUSR), reipl_fcp_scpdata_read,
+ reipl_fcp_scpdata_write, PAGE_SIZE);
-static struct bin_attribute sys_reipl_fcp_scp_data_attr = {
- .attr = {
- .name = "scp_data",
- .mode = S_IRUGO | S_IWUSR,
- },
- .size = PAGE_SIZE,
- .read = reipl_fcp_scpdata_read,
- .write = reipl_fcp_scpdata_write,
+static struct bin_attribute *reipl_fcp_bin_attrs[] = {
+ &sys_reipl_fcp_scp_data_attr,
+ NULL,
};
DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%llx\n",
@@ -841,6 +803,7 @@ static struct attribute *reipl_fcp_attrs[] = {
static struct attribute_group reipl_fcp_attr_group = {
.attrs = reipl_fcp_attrs,
+ .bin_attrs = reipl_fcp_bin_attrs,
};
/* CCW reipl device attributes */
@@ -1261,15 +1224,6 @@ static int __init reipl_fcp_init(void)
return rc;
}
- rc = sysfs_create_bin_file(&reipl_fcp_kset->kobj,
- &sys_reipl_fcp_scp_data_attr);
- if (rc) {
- sysfs_remove_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group);
- kset_unregister(reipl_fcp_kset);
- free_page((unsigned long) reipl_block_fcp);
- return rc;
- }
-
if (ipl_info.type == IPL_TYPE_FCP) {
memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE);
/*
@@ -1713,9 +1667,7 @@ static ssize_t on_reboot_store(struct kobject *kobj,
{
return set_trigger(buf, &on_reboot_trigger, len);
}
-
-static struct kobj_attribute on_reboot_attr =
- __ATTR(on_reboot, 0644, on_reboot_show, on_reboot_store);
+static struct kobj_attribute on_reboot_attr = __ATTR_RW(on_reboot);
static void do_machine_restart(char *__unused)
{
@@ -1741,9 +1693,7 @@ static ssize_t on_panic_store(struct kobject *kobj,
{
return set_trigger(buf, &on_panic_trigger, len);
}
-
-static struct kobj_attribute on_panic_attr =
- __ATTR(on_panic, 0644, on_panic_show, on_panic_store);
+static struct kobj_attribute on_panic_attr = __ATTR_RW(on_panic);
static void do_panic(void)
{
@@ -1769,9 +1719,7 @@ static ssize_t on_restart_store(struct kobject *kobj,
{
return set_trigger(buf, &on_restart_trigger, len);
}
-
-static struct kobj_attribute on_restart_attr =
- __ATTR(on_restart, 0644, on_restart_show, on_restart_store);
+static struct kobj_attribute on_restart_attr = __ATTR_RW(on_restart);
static void __do_restart(void *ignore)
{
@@ -1808,10 +1756,7 @@ static ssize_t on_halt_store(struct kobject *kobj,
{
return set_trigger(buf, &on_halt_trigger, len);
}
-
-static struct kobj_attribute on_halt_attr =
- __ATTR(on_halt, 0644, on_halt_show, on_halt_store);
-
+static struct kobj_attribute on_halt_attr = __ATTR_RW(on_halt);
static void do_machine_halt(void)
{
@@ -1837,10 +1782,7 @@ static ssize_t on_poff_store(struct kobject *kobj,
{
return set_trigger(buf, &on_poff_trigger, len);
}
-
-static struct kobj_attribute on_poff_attr =
- __ATTR(on_poff, 0644, on_poff_show, on_poff_store);
-
+static struct kobj_attribute on_poff_attr = __ATTR_RW(on_poff);
static void do_machine_power_off(void)
{
@@ -1850,26 +1792,27 @@ static void do_machine_power_off(void)
}
void (*_machine_power_off)(void) = do_machine_power_off;
+static struct attribute *shutdown_action_attrs[] = {
+ &on_restart_attr.attr,
+ &on_reboot_attr.attr,
+ &on_panic_attr.attr,
+ &on_halt_attr.attr,
+ &on_poff_attr.attr,
+ NULL,
+};
+
+static struct attribute_group shutdown_action_attr_group = {
+ .attrs = shutdown_action_attrs,
+};
+
static void __init shutdown_triggers_init(void)
{
shutdown_actions_kset = kset_create_and_add("shutdown_actions", NULL,
firmware_kobj);
if (!shutdown_actions_kset)
goto fail;
- if (sysfs_create_file(&shutdown_actions_kset->kobj,
- &on_reboot_attr.attr))
- goto fail;
- if (sysfs_create_file(&shutdown_actions_kset->kobj,
- &on_panic_attr.attr))
- goto fail;
- if (sysfs_create_file(&shutdown_actions_kset->kobj,
- &on_halt_attr.attr))
- goto fail;
- if (sysfs_create_file(&shutdown_actions_kset->kobj,
- &on_poff_attr.attr))
- goto fail;
- if (sysfs_create_file(&shutdown_actions_kset->kobj,
- &on_restart_attr.attr))
+ if (sysfs_create_group(&shutdown_actions_kset->kobj,
+ &shutdown_action_attr_group))
goto fail;
return;
fail:
@@ -2062,12 +2005,10 @@ static void do_reset_calls(void)
{
struct reset_call *reset;
-#ifdef CONFIG_64BIT
if (diag308_set_works) {
diag308_reset();
return;
}
-#endif
list_for_each_entry(reset, &rcall, list)
reset->fn();
}
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index f238720690f3..e9d9addfaa44 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -56,7 +56,7 @@ static const struct irq_class irqclass_main_desc[NR_IRQS_BASE] = {
* /proc/interrupts.
* In addition this list contains non external / I/O events like NMIs.
*/
-static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
+static const struct irq_class irqclass_sub_desc[] = {
{.irq = IRQEXT_CLK, .name = "CLK", .desc = "[EXT] Clock Comparator"},
{.irq = IRQEXT_EXC, .name = "EXC", .desc = "[EXT] External Call"},
{.irq = IRQEXT_EMS, .name = "EMS", .desc = "[EXT] Emergency Signal"},
@@ -79,7 +79,6 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
{.irq = IRQIO_TAP, .name = "TAP", .desc = "[I/O] Tape"},
{.irq = IRQIO_VMR, .name = "VMR", .desc = "[I/O] Unit Record Devices"},
{.irq = IRQIO_LCS, .name = "LCS", .desc = "[I/O] LCS"},
- {.irq = IRQIO_CLW, .name = "CLW", .desc = "[I/O] CLAW"},
{.irq = IRQIO_CTC, .name = "CTC", .desc = "[I/O] CTC"},
{.irq = IRQIO_APB, .name = "APB", .desc = "[I/O] AP Bus"},
{.irq = IRQIO_ADM, .name = "ADM", .desc = "[I/O] EADM Subchannel"},
@@ -94,6 +93,7 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
void __init init_IRQ(void)
{
+ BUILD_BUG_ON(ARRAY_SIZE(irqclass_sub_desc) != NR_ARCH_IRQS);
init_cio_interrupts();
init_airq_interrupts();
init_ext_interrupts();
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
index cb2d51e779df..a90299600483 100644
--- a/arch/s390/kernel/jump_label.c
+++ b/arch/s390/kernel/jump_label.c
@@ -36,16 +36,20 @@ static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn)
insn->offset = (entry->target - entry->code) >> 1;
}
-static void jump_label_bug(struct jump_entry *entry, struct insn *insn)
+static void jump_label_bug(struct jump_entry *entry, struct insn *expected,
+ struct insn *new)
{
unsigned char *ipc = (unsigned char *)entry->code;
- unsigned char *ipe = (unsigned char *)insn;
+ unsigned char *ipe = (unsigned char *)expected;
+ unsigned char *ipn = (unsigned char *)new;
pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc);
pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n",
ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]);
pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n",
ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]);
+ pr_emerg("New: %02x %02x %02x %02x %02x %02x\n",
+ ipn[0], ipn[1], ipn[2], ipn[3], ipn[4], ipn[5]);
panic("Corrupted kernel text");
}
@@ -69,12 +73,12 @@ static void __jump_label_transform(struct jump_entry *entry,
}
if (init) {
if (memcmp((void *)entry->code, &orignop, sizeof(orignop)))
- jump_label_bug(entry, &old);
+ jump_label_bug(entry, &orignop, &new);
} else {
if (memcmp((void *)entry->code, &old, sizeof(old)))
- jump_label_bug(entry, &old);
+ jump_label_bug(entry, &old, &new);
}
- probe_kernel_write((void *)entry->code, &new, sizeof(new));
+ s390_kernel_write((void *)entry->code, &new, sizeof(new));
}
static int __sm_arch_jump_label_transform(void *data)
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index f516edc1fbe3..389db56a2208 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -178,7 +178,7 @@ static int swap_instruction(void *data)
}
skip_ftrace:
kcb->kprobe_status = KPROBE_SWAP_INST;
- probe_kernel_write(p->addr, &new_insn, len);
+ s390_kernel_write(p->addr, &new_insn, len);
kcb->kprobe_status = status;
return 0;
}
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 36154a2f1814..0c1a679314dd 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -38,13 +38,8 @@
#define DEBUGP(fmt , ...)
#endif
-#ifndef CONFIG_64BIT
-#define PLT_ENTRY_SIZE 12
-#else /* CONFIG_64BIT */
#define PLT_ENTRY_SIZE 20
-#endif /* CONFIG_64BIT */
-#ifdef CONFIG_64BIT
void *module_alloc(unsigned long size)
{
if (PAGE_ALIGN(size) > MODULES_LEN)
@@ -53,7 +48,6 @@ void *module_alloc(unsigned long size)
GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
-#endif
void module_arch_freeing_init(struct module *mod)
{
@@ -323,17 +317,11 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
unsigned int *ip;
ip = me->module_core + me->arch.plt_offset +
info->plt_offset;
-#ifndef CONFIG_64BIT
- ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
- ip[1] = 0x100607f1;
- ip[2] = val;
-#else /* CONFIG_64BIT */
ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
ip[1] = 0x100a0004;
ip[2] = 0x07f10000;
ip[3] = (unsigned int) (val >> 32);
ip[4] = (unsigned int) val;
-#endif /* CONFIG_64BIT */
info->plt_initialized = 1;
}
if (r_type == R_390_PLTOFF16 ||
@@ -436,6 +424,7 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
+ jump_label_apply_nops(me);
vfree(me->arch.syminfo);
me->arch.syminfo = NULL;
return 0;
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 3f51cf4e8f02..505c17c0ae1a 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -117,55 +117,36 @@ static int notrace s390_revalidate_registers(struct mci *mci)
*/
kill_task = 1;
}
-#ifndef CONFIG_64BIT
+ fpt_save_area = &S390_lowcore.floating_pt_save_area;
+ fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
+ if (!mci->fc) {
+ /*
+ * Floating point control register can't be restored.
+ * Task will be terminated.
+ */
+ asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
+ kill_task = 1;
+ } else
+ asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
+
asm volatile(
" ld 0,0(%0)\n"
- " ld 2,8(%0)\n"
- " ld 4,16(%0)\n"
- " ld 6,24(%0)"
- : : "a" (&S390_lowcore.floating_pt_save_area));
-#endif
-
- if (MACHINE_HAS_IEEE) {
-#ifdef CONFIG_64BIT
- fpt_save_area = &S390_lowcore.floating_pt_save_area;
- fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
-#else
- fpt_save_area = (void *) S390_lowcore.extended_save_area_addr;
- fpt_creg_save_area = fpt_save_area + 128;
-#endif
- if (!mci->fc) {
- /*
- * Floating point control register can't be restored.
- * Task will be terminated.
- */
- asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
- kill_task = 1;
-
- } else
- asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
-
- asm volatile(
- " ld 0,0(%0)\n"
- " ld 1,8(%0)\n"
- " ld 2,16(%0)\n"
- " ld 3,24(%0)\n"
- " ld 4,32(%0)\n"
- " ld 5,40(%0)\n"
- " ld 6,48(%0)\n"
- " ld 7,56(%0)\n"
- " ld 8,64(%0)\n"
- " ld 9,72(%0)\n"
- " ld 10,80(%0)\n"
- " ld 11,88(%0)\n"
- " ld 12,96(%0)\n"
- " ld 13,104(%0)\n"
- " ld 14,112(%0)\n"
- " ld 15,120(%0)\n"
- : : "a" (fpt_save_area));
- }
-
-#ifdef CONFIG_64BIT
+ " ld 1,8(%0)\n"
+ " ld 2,16(%0)\n"
+ " ld 3,24(%0)\n"
+ " ld 4,32(%0)\n"
+ " ld 5,40(%0)\n"
+ " ld 6,48(%0)\n"
+ " ld 7,56(%0)\n"
+ " ld 8,64(%0)\n"
+ " ld 9,72(%0)\n"
+ " ld 10,80(%0)\n"
+ " ld 11,88(%0)\n"
+ " ld 12,96(%0)\n"
+ " ld 13,104(%0)\n"
+ " ld 14,112(%0)\n"
+ " ld 15,120(%0)\n"
+ : : "a" (fpt_save_area));
/* Revalidate vector registers */
if (MACHINE_HAS_VX && current->thread.vxrs) {
if (!mci->vr) {
@@ -178,7 +159,6 @@ static int notrace s390_revalidate_registers(struct mci *mci)
restore_vx_regs((__vector128 *)
S390_lowcore.vector_save_area_addr);
}
-#endif
/* Revalidate access registers */
asm volatile(
" lam 0,15,0(%0)"
@@ -198,21 +178,14 @@ static int notrace s390_revalidate_registers(struct mci *mci)
*/
s390_handle_damage("invalid control registers.");
} else {
-#ifdef CONFIG_64BIT
asm volatile(
" lctlg 0,15,0(%0)"
: : "a" (&S390_lowcore.cregs_save_area));
-#else
- asm volatile(
- " lctl 0,15,0(%0)"
- : : "a" (&S390_lowcore.cregs_save_area));
-#endif
}
/*
* We don't even try to revalidate the TOD register, since we simply
* can't write something sensible into that register.
*/
-#ifdef CONFIG_64BIT
/*
* See if we can revalidate the TOD programmable register with its
* old contents (should be zero) otherwise set it to zero.
@@ -228,7 +201,6 @@ static int notrace s390_revalidate_registers(struct mci *mci)
" sckpf"
: : "a" (&S390_lowcore.tod_progreg_save_area)
: "0", "cc");
-#endif
/* Revalidate clock comparator register */
set_clock_comparator(S390_lowcore.clock_comparator);
/* Check if old PSW is valid */
@@ -280,19 +252,11 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
if (mci->b) {
/* Processing backup -> verify if we can survive this */
u64 z_mcic, o_mcic, t_mcic;
-#ifdef CONFIG_64BIT
z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29);
o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 |
1ULL<<16);
-#else
- z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<57 | 1ULL<<50 |
- 1ULL<<29);
- o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
- 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
- 1ULL<<30 | 1ULL<<20 | 1ULL<<17 | 1ULL<<16);
-#endif
t_mcic = *(u64 *)mci;
if (((t_mcic & z_mcic) != 0) ||
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index c3f8d157cb0d..e6a1578fc000 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1415,7 +1415,7 @@ CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
static struct attribute *cpumsf_pmu_events_attr[] = {
CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC),
- CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG),
+ NULL,
NULL,
};
@@ -1606,8 +1606,11 @@ static int __init init_cpum_sampling_pmu(void)
return -EINVAL;
}
- if (si.ad)
+ if (si.ad) {
sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
+ cpumsf_pmu_events_attr[1] =
+ CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG);
+ }
sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80);
if (!sfdbg)
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
index f6f8886399f6..036aa01d06a9 100644
--- a/arch/s390/kernel/pgm_check.S
+++ b/arch/s390/kernel/pgm_check.S
@@ -6,19 +6,13 @@
#include <linux/linkage.h>
-#ifdef CONFIG_32BIT
-#define PGM_CHECK_64BIT(handler) .long default_trap_handler
-#else
-#define PGM_CHECK_64BIT(handler) .long handler
-#endif
-
#define PGM_CHECK(handler) .long handler
#define PGM_CHECK_DEFAULT PGM_CHECK(default_trap_handler)
/*
* The program check table contains exactly 128 (0x00-0x7f) entries. Each
- * line defines the 31 and/or 64 bit function to be called corresponding
- * to the program check interruption code.
+ * line defines the function to be called corresponding to the program check
+ * interruption code.
*/
.section .rodata, "a"
ENTRY(pgm_check_table)
@@ -46,10 +40,10 @@ PGM_CHECK_DEFAULT /* 14 */
PGM_CHECK(operand_exception) /* 15 */
PGM_CHECK_DEFAULT /* 16 */
PGM_CHECK_DEFAULT /* 17 */
-PGM_CHECK_64BIT(transaction_exception) /* 18 */
+PGM_CHECK(transaction_exception) /* 18 */
PGM_CHECK_DEFAULT /* 19 */
PGM_CHECK_DEFAULT /* 1a */
-PGM_CHECK_64BIT(vector_exception) /* 1b */
+PGM_CHECK(vector_exception) /* 1b */
PGM_CHECK(space_switch_exception) /* 1c */
PGM_CHECK(hfp_sqrt_exception) /* 1d */
PGM_CHECK_DEFAULT /* 1e */
@@ -78,10 +72,10 @@ PGM_CHECK_DEFAULT /* 34 */
PGM_CHECK_DEFAULT /* 35 */
PGM_CHECK_DEFAULT /* 36 */
PGM_CHECK_DEFAULT /* 37 */
-PGM_CHECK_64BIT(do_dat_exception) /* 38 */
-PGM_CHECK_64BIT(do_dat_exception) /* 39 */
-PGM_CHECK_64BIT(do_dat_exception) /* 3a */
-PGM_CHECK_64BIT(do_dat_exception) /* 3b */
+PGM_CHECK(do_dat_exception) /* 38 */
+PGM_CHECK(do_dat_exception) /* 39 */
+PGM_CHECK(do_dat_exception) /* 3a */
+PGM_CHECK(do_dat_exception) /* 3b */
PGM_CHECK_DEFAULT /* 3c */
PGM_CHECK_DEFAULT /* 3d */
PGM_CHECK_DEFAULT /* 3e */
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 13fc0978ca7e..dc5edc29b73a 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -79,13 +79,11 @@ void release_thread(struct task_struct *dead_task)
{
}
-#ifdef CONFIG_64BIT
void arch_release_task_struct(struct task_struct *tsk)
{
if (tsk->thread.vxrs)
kfree(tsk->thread.vxrs);
}
-#endif
int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
unsigned long arg, struct task_struct *p)
@@ -144,19 +142,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
p->thread.ri_signum = 0;
frame->childregs.psw.mask &= ~PSW_MASK_RI;
-#ifndef CONFIG_64BIT
- /*
- * save fprs to current->thread.fp_regs to merge them with
- * the emulated registers and then copy the result to the child.
- */
- save_fp_ctl(&current->thread.fp_regs.fpc);
- save_fp_regs(current->thread.fp_regs.fprs);
- memcpy(&p->thread.fp_regs, &current->thread.fp_regs,
- sizeof(s390_fp_regs));
- /* Set a new TLS ? */
- if (clone_flags & CLONE_SETTLS)
- p->thread.acrs[0] = frame->childregs.gprs[6];
-#else /* CONFIG_64BIT */
/* Save the fpu registers to new thread structure. */
save_fp_ctl(&p->thread.fp_regs.fpc);
save_fp_regs(p->thread.fp_regs.fprs);
@@ -172,15 +157,13 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
p->thread.acrs[1] = (unsigned int)tls;
}
}
-#endif /* CONFIG_64BIT */
return 0;
}
asmlinkage void execve_tail(void)
{
current->thread.fp_regs.fpc = 0;
- if (MACHINE_HAS_IEEE)
- asm volatile("sfpc %0,%0" : : "d" (0));
+ asm volatile("sfpc %0,%0" : : "d" (0));
}
/*
@@ -188,18 +171,8 @@ asmlinkage void execve_tail(void)
*/
int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
{
-#ifndef CONFIG_64BIT
- /*
- * save fprs to current->thread.fp_regs to merge them with
- * the emulated registers and then copy the result to the dump.
- */
- save_fp_ctl(&current->thread.fp_regs.fpc);
- save_fp_regs(current->thread.fp_regs.fprs);
- memcpy(fpregs, &current->thread.fp_regs, sizeof(s390_fp_regs));
-#else /* CONFIG_64BIT */
save_fp_ctl(&fpregs->fpc);
save_fp_regs(fpregs->fprs);
-#endif /* CONFIG_64BIT */
return 1;
}
EXPORT_SYMBOL(dump_fpu);
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 26108232fcaa..dc488e13b7e3 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -18,7 +18,7 @@
static DEFINE_PER_CPU(struct cpuid, cpu_id);
-void cpu_relax(void)
+void notrace cpu_relax(void)
{
if (!smp_cpu_mtid && MACHINE_HAS_DIAG44)
asm volatile("diag 0,0,0x44");
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index eabfb4594517..d363c9c322a1 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -44,7 +44,6 @@ void update_cr_regs(struct task_struct *task)
struct thread_struct *thread = &task->thread;
struct per_regs old, new;
-#ifdef CONFIG_64BIT
/* Take care of the enable/disable of transactional execution. */
if (MACHINE_HAS_TE || MACHINE_HAS_VX) {
unsigned long cr, cr_new;
@@ -80,7 +79,6 @@ void update_cr_regs(struct task_struct *task)
__ctl_load(cr_new, 2, 2);
}
}
-#endif
/* Copy user specified PER registers */
new.control = thread->per_user.control;
new.start = thread->per_user.start;
@@ -93,10 +91,8 @@ void update_cr_regs(struct task_struct *task)
new.control |= PER_EVENT_BRANCH;
else
new.control |= PER_EVENT_IFETCH;
-#ifdef CONFIG_64BIT
new.control |= PER_CONTROL_SUSPENSION;
new.control |= PER_EVENT_TRANSACTION_END;
-#endif
if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
new.control |= PER_EVENT_IFETCH;
new.start = 0;
@@ -146,11 +142,7 @@ void ptrace_disable(struct task_struct *task)
task->thread.per_flags = 0;
}
-#ifndef CONFIG_64BIT
-# define __ADDR_MASK 3
-#else
-# define __ADDR_MASK 7
-#endif
+#define __ADDR_MASK 7
static inline unsigned long __peek_user_per(struct task_struct *child,
addr_t addr)
@@ -223,7 +215,6 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
* access registers are stored in the thread structure
*/
offset = addr - (addr_t) &dummy->regs.acrs;
-#ifdef CONFIG_64BIT
/*
* Very special case: old & broken 64 bit gdb reading
* from acrs[15]. Result is a 64 bit value. Read the
@@ -232,8 +223,7 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
if (addr == (addr_t) &dummy->regs.acrs[15])
tmp = ((unsigned long) child->thread.acrs[15]) << 32;
else
-#endif
- tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
+ tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
} else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
/*
@@ -261,12 +251,10 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
* or the child->thread.vxrs array
*/
offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
-#ifdef CONFIG_64BIT
if (child->thread.vxrs)
tmp = *(addr_t *)
((addr_t) child->thread.vxrs + 2*offset);
else
-#endif
tmp = *(addr_t *)
((addr_t) &child->thread.fp_regs.fprs + offset);
@@ -293,11 +281,9 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
* an alignment of 4. Programmers from hell...
*/
mask = __ADDR_MASK;
-#ifdef CONFIG_64BIT
if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
mask = 3;
-#endif
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
return -EIO;
@@ -370,7 +356,6 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
* access registers are stored in the thread structure
*/
offset = addr - (addr_t) &dummy->regs.acrs;
-#ifdef CONFIG_64BIT
/*
* Very special case: old & broken 64 bit gdb writing
* to acrs[15] with a 64 bit value. Ignore the lower
@@ -380,8 +365,7 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
if (addr == (addr_t) &dummy->regs.acrs[15])
child->thread.acrs[15] = (unsigned int) (data >> 32);
else
-#endif
- *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
+ *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
} else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
/*
@@ -411,12 +395,10 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
* or the child->thread.vxrs array
*/
offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
-#ifdef CONFIG_64BIT
if (child->thread.vxrs)
*(addr_t *)((addr_t)
child->thread.vxrs + 2*offset) = data;
else
-#endif
*(addr_t *)((addr_t)
&child->thread.fp_regs.fprs + offset) = data;
@@ -441,11 +423,9 @@ static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
* an alignment of 4. Programmers from hell indeed...
*/
mask = __ADDR_MASK;
-#ifdef CONFIG_64BIT
if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
mask = 3;
-#endif
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
return -EIO;
@@ -649,12 +629,10 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
* or the child->thread.vxrs array
*/
offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
-#ifdef CONFIG_64BIT
if (child->thread.vxrs)
tmp = *(__u32 *)
((addr_t) child->thread.vxrs + 2*offset);
else
-#endif
tmp = *(__u32 *)
((addr_t) &child->thread.fp_regs.fprs + offset);
@@ -776,12 +754,10 @@ static int __poke_user_compat(struct task_struct *child,
* or the child->thread.vxrs array
*/
offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
-#ifdef CONFIG_64BIT
if (child->thread.vxrs)
*(__u32 *)((addr_t)
child->thread.vxrs + 2*offset) = tmp;
else
-#endif
*(__u32 *)((addr_t)
&child->thread.fp_regs.fprs + offset) = tmp;
@@ -979,16 +955,13 @@ static int s390_fpregs_get(struct task_struct *target,
if (target == current) {
save_fp_ctl(&target->thread.fp_regs.fpc);
save_fp_regs(target->thread.fp_regs.fprs);
- }
-#ifdef CONFIG_64BIT
- else if (target->thread.vxrs) {
+ } else if (target->thread.vxrs) {
int i;
for (i = 0; i < __NUM_VXRS_LOW; i++)
target->thread.fp_regs.fprs[i] =
*(freg_t *)(target->thread.vxrs + i);
}
-#endif
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.fp_regs, 0, -1);
}
@@ -1026,23 +999,18 @@ static int s390_fpregs_set(struct task_struct *target,
if (target == current) {
restore_fp_ctl(&target->thread.fp_regs.fpc);
restore_fp_regs(target->thread.fp_regs.fprs);
- }
-#ifdef CONFIG_64BIT
- else if (target->thread.vxrs) {
+ } else if (target->thread.vxrs) {
int i;
for (i = 0; i < __NUM_VXRS_LOW; i++)
*(freg_t *)(target->thread.vxrs + i) =
target->thread.fp_regs.fprs[i];
}
-#endif
}
return rc;
}
-#ifdef CONFIG_64BIT
-
static int s390_last_break_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
@@ -1182,8 +1150,6 @@ static int s390_vxrs_high_set(struct task_struct *target,
return rc;
}
-#endif
-
static int s390_system_call_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
@@ -1229,7 +1195,6 @@ static const struct user_regset s390_regsets[] = {
.get = s390_system_call_get,
.set = s390_system_call_set,
},
-#ifdef CONFIG_64BIT
{
.core_note_type = NT_S390_LAST_BREAK,
.n = 1,
@@ -1262,7 +1227,6 @@ static const struct user_regset s390_regsets[] = {
.get = s390_vxrs_high_get,
.set = s390_vxrs_high_set,
},
-#endif
};
static const struct user_regset_view user_s390_view = {
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index dd8016b0477e..52aab0bd84f8 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -1,7 +1,7 @@
/*
- * S390 version
- * Copyright IBM Corp. 2000
- * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com)
+ * Copyright IBM Corp 2000, 2011
+ * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
+ * Denis Joseph Barrow,
*/
#include <linux/linkage.h>
@@ -9,43 +9,90 @@
#include <asm/sigp.h>
#
-# store_status: Empty implementation until kdump is supported on 31 bit
+# store_status
+#
+# Prerequisites to run this function:
+# - Prefix register is set to zero
+# - Original prefix register is stored in "dump_prefix_page"
+# - Lowcore protection is off
#
ENTRY(store_status)
- br %r14
+ /* Save register one and load save area base */
+ stg %r1,__LC_SAVE_AREA_RESTART
+ lghi %r1,SAVE_AREA_BASE
+ /* General purpose registers */
+ stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ lg %r2,__LC_SAVE_AREA_RESTART
+ stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1)
+ /* Control registers */
+ stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ /* Access registers */
+ stam %a0,%a15,__LC_AREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ /* Floating point registers */
+ std %f0, 0x00 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f1, 0x08 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f2, 0x10 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f3, 0x18 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f4, 0x20 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f5, 0x28 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f6, 0x30 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f7, 0x38 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f8, 0x40 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f9, 0x48 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f10,0x50 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f11,0x58 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f12,0x60 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f13,0x68 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f14,0x70 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ std %f15,0x78 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ /* Floating point control register */
+ stfpc __LC_FP_CREG_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ /* CPU timer */
+ stpt __LC_CPU_TIMER_SAVE_AREA-SAVE_AREA_BASE(%r1)
+ /* Saved prefix register */
+ larl %r2,dump_prefix_page
+ mvc __LC_PREFIX_SAVE_AREA-SAVE_AREA_BASE(4,%r1),0(%r2)
+ /* Clock comparator - seven bytes */
+ larl %r2,.Lclkcmp
+ stckc 0(%r2)
+ mvc __LC_CLOCK_COMP_SAVE_AREA-SAVE_AREA_BASE + 1(7,%r1),1(%r2)
+ /* Program status word */
+ epsw %r2,%r3
+ st %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 0(%r1)
+ st %r3,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 4(%r1)
+ larl %r2,store_status
+ stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1)
+ br %r14
+
+ .section .bss
+ .align 8
+.Lclkcmp: .quad 0x0000000000000000
+ .previous
#
# do_reipl_asm
# Parameter: r2 = schid of reipl device
#
+
ENTRY(do_reipl_asm)
basr %r13,0
-.Lpg0: lpsw .Lnewpsw-.Lpg0(%r13)
-.Lpg1: # do store status of all registers
+.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13)
+.Lpg1: brasl %r14,store_status
- stm %r0,%r15,__LC_GPREGS_SAVE_AREA
- stctl %c0,%c15,__LC_CREGS_SAVE_AREA
- stam %a0,%a15,__LC_AREGS_SAVE_AREA
- l %r10,.Ldump_pfx-.Lpg0(%r13)
- mvc __LC_PREFIX_SAVE_AREA(4),0(%r10)
- stckc .Lclkcmp-.Lpg0(%r13)
- mvc __LC_CLOCK_COMP_SAVE_AREA(8),.Lclkcmp-.Lpg0(%r13)
- stpt __LC_CPU_TIMER_SAVE_AREA
- st %r13, __LC_PSW_SAVE_AREA+4
- lctl %c6,%c6,.Lall-.Lpg0(%r13)
- lr %r1,%r2
- mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13)
+ lctlg %c6,%c6,.Lall-.Lpg0(%r13)
+ lgr %r1,%r2
+ mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13)
stsch .Lschib-.Lpg0(%r13)
oi .Lschib+5-.Lpg0(%r13),0x84
-.Lecs: xi .Lschib+27-.Lpg0(%r13),0x01
+.Lecs: xi .Lschib+27-.Lpg0(%r13),0x01
msch .Lschib-.Lpg0(%r13)
- lhi %r0,5
+ lghi %r0,5
.Lssch: ssch .Liplorb-.Lpg0(%r13)
jz .L001
brct %r0,.Lssch
bas %r14,.Ldisab-.Lpg0(%r13)
-.L001: mvc __LC_IO_NEW_PSW(8),.Lionew-.Lpg0(%r13)
-.Ltpi: lpsw .Lwaitpsw-.Lpg0(%r13)
+.L001: mvc __LC_IO_NEW_PSW(16),.Lionew-.Lpg0(%r13)
+.Ltpi: lpswe .Lwaitpsw-.Lpg0(%r13)
.Lcont: c %r1,__LC_SUBCHANNEL_ID
jnz .Ltpi
clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13)
@@ -58,20 +105,36 @@ ENTRY(do_reipl_asm)
jz .L003
bas %r14,.Ldisab-.Lpg0(%r13)
.L003: st %r1,__LC_SUBCHANNEL_ID
+ lhi %r1,0 # mode 0 = esa
+ slr %r0,%r0 # set cpuid to zero
+ sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esa mode
lpsw 0
- sigp 0,0,SIGP_RESTART
-.Ldisab: st %r14,.Ldispsw+4-.Lpg0(%r13)
- lpsw .Ldispsw-.Lpg0(%r13)
+.Ldisab: sll %r14,1
+ srl %r14,1 # need to kill hi bit to avoid specification exceptions.
+ st %r14,.Ldispsw+12-.Lpg0(%r13)
+ lpswe .Ldispsw-.Lpg0(%r13)
.align 8
-.Lclkcmp: .quad 0x0000000000000000
-.Lall: .long 0xff000000
-.Ldump_pfx: .long dump_prefix_page
- .align 8
-.Lnewpsw: .long 0x00080000,0x80000000+.Lpg1
-.Lpcnew: .long 0x00080000,0x80000000+.Lecs
-.Lionew: .long 0x00080000,0x80000000+.Lcont
-.Lwaitpsw: .long 0x020a0000,0x00000000+.Ltpi
-.Ldispsw: .long 0x000a0000,0x00000000
+.Lall: .quad 0x00000000ff000000
+ .align 16
+/*
+ * These addresses have to be 31 bit otherwise
+ * the sigp will throw a specifcation exception
+ * when switching to ESA mode as bit 31 be set
+ * in the ESA psw.
+ * Bit 31 of the addresses has to be 0 for the
+ * 31bit lpswe instruction a fact they appear to have
+ * omitted from the pop.
+ */
+.Lnewpsw: .quad 0x0000000080000000
+ .quad .Lpg1
+.Lpcnew: .quad 0x0000000080000000
+ .quad .Lecs
+.Lionew: .quad 0x0000000080000000
+ .quad .Lcont
+.Lwaitpsw: .quad 0x0202000080000000
+ .quad .Ltpi
+.Ldispsw: .quad 0x0002000080000000
+ .quad 0x0000000000000000
.Liplccws: .long 0x02000000,0x60000018
.long 0x08000008,0x20000001
.Liplorb: .long 0x0049504c,0x0040ff80
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
deleted file mode 100644
index dc3b1273c4dc..000000000000
--- a/arch/s390/kernel/reipl64.S
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Copyright IBM Corp 2000, 2011
- * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
- * Denis Joseph Barrow,
- */
-
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/sigp.h>
-
-#
-# store_status
-#
-# Prerequisites to run this function:
-# - Prefix register is set to zero
-# - Original prefix register is stored in "dump_prefix_page"
-# - Lowcore protection is off
-#
-ENTRY(store_status)
- /* Save register one and load save area base */
- stg %r1,__LC_SAVE_AREA_RESTART
- lghi %r1,SAVE_AREA_BASE
- /* General purpose registers */
- stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- lg %r2,__LC_SAVE_AREA_RESTART
- stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1)
- /* Control registers */
- stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- /* Access registers */
- stam %a0,%a15,__LC_AREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- /* Floating point registers */
- std %f0, 0x00 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f1, 0x08 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f2, 0x10 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f3, 0x18 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f4, 0x20 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f5, 0x28 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f6, 0x30 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f7, 0x38 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f8, 0x40 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f9, 0x48 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f10,0x50 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f11,0x58 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f12,0x60 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f13,0x68 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f14,0x70 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- std %f15,0x78 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1)
- /* Floating point control register */
- stfpc __LC_FP_CREG_SAVE_AREA-SAVE_AREA_BASE(%r1)
- /* CPU timer */
- stpt __LC_CPU_TIMER_SAVE_AREA-SAVE_AREA_BASE(%r1)
- /* Saved prefix register */
- larl %r2,dump_prefix_page
- mvc __LC_PREFIX_SAVE_AREA-SAVE_AREA_BASE(4,%r1),0(%r2)
- /* Clock comparator - seven bytes */
- larl %r2,.Lclkcmp
- stckc 0(%r2)
- mvc __LC_CLOCK_COMP_SAVE_AREA-SAVE_AREA_BASE + 1(7,%r1),1(%r2)
- /* Program status word */
- epsw %r2,%r3
- st %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 0(%r1)
- st %r3,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 4(%r1)
- larl %r2,store_status
- stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1)
- br %r14
-
- .section .bss
- .align 8
-.Lclkcmp: .quad 0x0000000000000000
- .previous
-
-#
-# do_reipl_asm
-# Parameter: r2 = schid of reipl device
-#
-
-ENTRY(do_reipl_asm)
- basr %r13,0
-.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13)
-.Lpg1: brasl %r14,store_status
-
- lctlg %c6,%c6,.Lall-.Lpg0(%r13)
- lgr %r1,%r2
- mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13)
- stsch .Lschib-.Lpg0(%r13)
- oi .Lschib+5-.Lpg0(%r13),0x84
-.Lecs: xi .Lschib+27-.Lpg0(%r13),0x01
- msch .Lschib-.Lpg0(%r13)
- lghi %r0,5
-.Lssch: ssch .Liplorb-.Lpg0(%r13)
- jz .L001
- brct %r0,.Lssch
- bas %r14,.Ldisab-.Lpg0(%r13)
-.L001: mvc __LC_IO_NEW_PSW(16),.Lionew-.Lpg0(%r13)
-.Ltpi: lpswe .Lwaitpsw-.Lpg0(%r13)
-.Lcont: c %r1,__LC_SUBCHANNEL_ID
- jnz .Ltpi
- clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13)
- jnz .Ltpi
- tsch .Liplirb-.Lpg0(%r13)
- tm .Liplirb+9-.Lpg0(%r13),0xbf
- jz .L002
- bas %r14,.Ldisab-.Lpg0(%r13)
-.L002: tm .Liplirb+8-.Lpg0(%r13),0xf3
- jz .L003
- bas %r14,.Ldisab-.Lpg0(%r13)
-.L003: st %r1,__LC_SUBCHANNEL_ID
- lhi %r1,0 # mode 0 = esa
- slr %r0,%r0 # set cpuid to zero
- sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esa mode
- lpsw 0
-.Ldisab: sll %r14,1
- srl %r14,1 # need to kill hi bit to avoid specification exceptions.
- st %r14,.Ldispsw+12-.Lpg0(%r13)
- lpswe .Ldispsw-.Lpg0(%r13)
- .align 8
-.Lall: .quad 0x00000000ff000000
- .align 16
-/*
- * These addresses have to be 31 bit otherwise
- * the sigp will throw a specifcation exception
- * when switching to ESA mode as bit 31 be set
- * in the ESA psw.
- * Bit 31 of the addresses has to be 0 for the
- * 31bit lpswe instruction a fact they appear to have
- * omitted from the pop.
- */
-.Lnewpsw: .quad 0x0000000080000000
- .quad .Lpg1
-.Lpcnew: .quad 0x0000000080000000
- .quad .Lecs
-.Lionew: .quad 0x0000000080000000
- .quad .Lcont
-.Lwaitpsw: .quad 0x0202000080000000
- .quad .Ltpi
-.Ldispsw: .quad 0x0002000080000000
- .quad 0x0000000000000000
-.Liplccws: .long 0x02000000,0x60000018
- .long 0x08000008,0x20000001
-.Liplorb: .long 0x0049504c,0x0040ff80
- .long 0x00000000+.Liplccws
-.Lschib: .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
-.Liplirb: .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S
index f4e6f20e117a..cfac28330b03 100644
--- a/arch/s390/kernel/relocate_kernel.S
+++ b/arch/s390/kernel/relocate_kernel.S
@@ -19,7 +19,8 @@
* %r7 = PAGE_SIZE
* %r8 holds the source address
* %r9 = PAGE_SIZE
- * %r10 is a page mask
+ *
+ * 0xf000 is a page_mask
*/
.text
@@ -27,46 +28,47 @@ ENTRY(relocate_kernel)
basr %r13,0 # base address
.base:
stnsm sys_msk-.base(%r13),0xfb # disable DAT
- stctl %c0,%c15,ctlregs-.base(%r13)
- stm %r0,%r15,gprregs-.base(%r13)
+ stctg %c0,%c15,ctlregs-.base(%r13)
+ stmg %r0,%r15,gprregs-.base(%r13)
+ lghi %r0,3
+ sllg %r0,%r0,31
+ stg %r0,0x1d0(%r0)
+ la %r0,.back_pgm-.base(%r13)
+ stg %r0,0x1d8(%r0)
la %r1,load_psw-.base(%r13)
mvc 0(8,%r0),0(%r1)
la %r0,.back-.base(%r13)
st %r0,4(%r0)
oi 4(%r0),0x80
- mvc 0x68(8,%r0),0(%r1)
- la %r0,.back_pgm-.base(%r13)
- st %r0,0x6c(%r0)
- oi 0x6c(%r0),0x80
- lhi %r0,0
+ lghi %r0,0
diag %r0,%r0,0x308
.back:
+ lhi %r1,1 # mode 1 = esame
+ sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esame mode
+ sam64 # switch to 64 bit addressing mode
basr %r13,0
.back_base:
oi have_diag308-.back_base(%r13),0x01
- lctl %c0,%c15,ctlregs-.back_base(%r13)
- lm %r0,%r15,gprregs-.back_base(%r13)
- j .start_reloc
+ lctlg %c0,%c15,ctlregs-.back_base(%r13)
+ lmg %r0,%r15,gprregs-.back_base(%r13)
+ j .top
.back_pgm:
- lm %r0,%r15,gprregs-.base(%r13)
- .start_reloc:
- lhi %r10,-1 # preparing the mask
- sll %r10,12 # shift it such that it becomes 0xf000
+ lmg %r0,%r15,gprregs-.base(%r13)
.top:
- lhi %r7,4096 # load PAGE_SIZE in r7
- lhi %r9,4096 # load PAGE_SIZE in r9
- l %r5,0(%r2) # read another word for indirection page
- ahi %r2,4 # increment pointer
+ lghi %r7,4096 # load PAGE_SIZE in r7
+ lghi %r9,4096 # load PAGE_SIZE in r9
+ lg %r5,0(%r2) # read another word for indirection page
+ aghi %r2,8 # increment pointer
tml %r5,0x1 # is it a destination page?
je .indir_check # NO, goto "indir_check"
- lr %r6,%r5 # r6 = r5
- nr %r6,%r10 # mask it out and...
+ lgr %r6,%r5 # r6 = r5
+ nill %r6,0xf000 # mask it out and...
j .top # ...next iteration
.indir_check:
tml %r5,0x2 # is it a indirection page?
je .done_test # NO, goto "done_test"
- nr %r5,%r10 # YES, mask out,
- lr %r2,%r5 # move it into the right register,
+ nill %r5,0xf000 # YES, mask out,
+ lgr %r2,%r5 # move it into the right register,
j .top # and read next...
.done_test:
tml %r5,0x4 # is it the done indicator?
@@ -75,13 +77,13 @@ ENTRY(relocate_kernel)
.source_test:
tml %r5,0x8 # it should be a source indicator...
je .top # NO, ignore it...
- lr %r8,%r5 # r8 = r5
- nr %r8,%r10 # masking
+ lgr %r8,%r5 # r8 = r5
+ nill %r8,0xf000 # masking
0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0
jo 0b
j .top
.done:
- sr %r0,%r0 # clear register r0
+ sgr %r0,%r0 # clear register r0
la %r4,load_psw-.base(%r13) # load psw-address into the register
o %r3,4(%r4) # or load address into psw
st %r3,4(%r4)
@@ -90,8 +92,9 @@ ENTRY(relocate_kernel)
jno .no_diag308
diag %r0,%r0,0x308
.no_diag308:
- sr %r1,%r1 # clear %r1
- sr %r2,%r2 # clear %r2
+ sam31 # 31 bit mode
+ sr %r1,%r1 # erase register r1
+ sr %r2,%r2 # erase register r2
sigp %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero
lpsw 0 # hopefully start new kernel...
@@ -102,11 +105,11 @@ ENTRY(relocate_kernel)
.quad 0
ctlregs:
.rept 16
- .long 0
+ .quad 0
.endr
gprregs:
.rept 16
- .long 0
+ .quad 0
.endr
have_diag308:
.byte 0
diff --git a/arch/s390/kernel/relocate_kernel64.S b/arch/s390/kernel/relocate_kernel64.S
deleted file mode 100644
index cfac28330b03..000000000000
--- a/arch/s390/kernel/relocate_kernel64.S
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright IBM Corp. 2005
- *
- * Author(s): Rolf Adelsberger,
- * Heiko Carstens <heiko.carstens@de.ibm.com>
- *
- */
-
-#include <linux/linkage.h>
-#include <asm/sigp.h>
-
-/*
- * moves the new kernel to its destination...
- * %r2 = pointer to first kimage_entry_t
- * %r3 = start address - where to jump to after the job is done...
- *
- * %r5 will be used as temp. storage
- * %r6 holds the destination address
- * %r7 = PAGE_SIZE
- * %r8 holds the source address
- * %r9 = PAGE_SIZE
- *
- * 0xf000 is a page_mask
- */
-
- .text
-ENTRY(relocate_kernel)
- basr %r13,0 # base address
- .base:
- stnsm sys_msk-.base(%r13),0xfb # disable DAT
- stctg %c0,%c15,ctlregs-.base(%r13)
- stmg %r0,%r15,gprregs-.base(%r13)
- lghi %r0,3
- sllg %r0,%r0,31
- stg %r0,0x1d0(%r0)
- la %r0,.back_pgm-.base(%r13)
- stg %r0,0x1d8(%r0)
- la %r1,load_psw-.base(%r13)
- mvc 0(8,%r0),0(%r1)
- la %r0,.back-.base(%r13)
- st %r0,4(%r0)
- oi 4(%r0),0x80
- lghi %r0,0
- diag %r0,%r0,0x308
- .back:
- lhi %r1,1 # mode 1 = esame
- sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esame mode
- sam64 # switch to 64 bit addressing mode
- basr %r13,0
- .back_base:
- oi have_diag308-.back_base(%r13),0x01
- lctlg %c0,%c15,ctlregs-.back_base(%r13)
- lmg %r0,%r15,gprregs-.back_base(%r13)
- j .top
- .back_pgm:
- lmg %r0,%r15,gprregs-.base(%r13)
- .top:
- lghi %r7,4096 # load PAGE_SIZE in r7
- lghi %r9,4096 # load PAGE_SIZE in r9
- lg %r5,0(%r2) # read another word for indirection page
- aghi %r2,8 # increment pointer
- tml %r5,0x1 # is it a destination page?
- je .indir_check # NO, goto "indir_check"
- lgr %r6,%r5 # r6 = r5
- nill %r6,0xf000 # mask it out and...
- j .top # ...next iteration
- .indir_check:
- tml %r5,0x2 # is it a indirection page?
- je .done_test # NO, goto "done_test"
- nill %r5,0xf000 # YES, mask out,
- lgr %r2,%r5 # move it into the right register,
- j .top # and read next...
- .done_test:
- tml %r5,0x4 # is it the done indicator?
- je .source_test # NO! Well, then it should be the source indicator...
- j .done # ok, lets finish it here...
- .source_test:
- tml %r5,0x8 # it should be a source indicator...
- je .top # NO, ignore it...
- lgr %r8,%r5 # r8 = r5
- nill %r8,0xf000 # masking
- 0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0
- jo 0b
- j .top
- .done:
- sgr %r0,%r0 # clear register r0
- la %r4,load_psw-.base(%r13) # load psw-address into the register
- o %r3,4(%r4) # or load address into psw
- st %r3,4(%r4)
- mvc 0(8,%r0),0(%r4) # copy psw to absolute address 0
- tm have_diag308-.base(%r13),0x01
- jno .no_diag308
- diag %r0,%r0,0x308
- .no_diag308:
- sam31 # 31 bit mode
- sr %r1,%r1 # erase register r1
- sr %r2,%r2 # erase register r2
- sigp %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero
- lpsw 0 # hopefully start new kernel...
-
- .align 8
- load_psw:
- .long 0x00080000,0x80000000
- sys_msk:
- .quad 0
- ctlregs:
- .rept 16
- .quad 0
- .endr
- gprregs:
- .rept 16
- .quad 0
- .endr
- have_diag308:
- .byte 0
- .align 8
- relocate_kernel_end:
- .align 8
- .globl relocate_kernel_len
- relocate_kernel_len:
- .quad relocate_kernel_end - relocate_kernel
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
index 7e77e03378f3..43c3169ea49c 100644
--- a/arch/s390/kernel/sclp.S
+++ b/arch/s390/kernel/sclp.S
@@ -36,21 +36,17 @@ _sclp_wait_int:
ahi %r15,-96 # create stack frame
la %r8,LC_EXT_NEW_PSW # register int handler
la %r9,.LextpswS1-.LbaseS1(%r13)
-#ifdef CONFIG_64BIT
tm LC_AR_MODE_ID,1
jno .Lesa1
la %r8,LC_EXT_NEW_PSW_64 # register int handler 64 bit
la %r9,.LextpswS1_64-.LbaseS1(%r13)
.Lesa1:
-#endif
mvc .LoldpswS1-.LbaseS1(16,%r13),0(%r8)
mvc 0(16,%r8),0(%r9)
-#ifdef CONFIG_64BIT
epsw %r6,%r7 # set current addressing mode
nill %r6,0x1 # in new psw (31 or 64 bit mode)
nilh %r7,0x8000
stm %r6,%r7,0(%r8)
-#endif
lhi %r6,0x0200 # cr mask for ext int (cr0.54)
ltr %r2,%r2
jz .LsetctS1
@@ -92,10 +88,8 @@ _sclp_wait_int:
.long 0, 0, 0, 0 # old ext int PSW
.LextpswS1:
.long 0x00080000, 0x80000000+.LwaitS1 # PSW to handle ext int
-#ifdef CONFIG_64BIT
.LextpswS1_64:
.quad 0, .LwaitS1 # PSW to handle ext int, 64 bit
-#endif
.LwaitpswS1:
.long 0x010a0000, 0x00000000+.LloopS1 # PSW to wait for ext int
.LtimeS1:
@@ -272,13 +266,11 @@ _sclp_print:
ENTRY(_sclp_print_early)
stm %r6,%r15,24(%r15) # save registers
ahi %r15,-96 # create stack frame
-#ifdef CONFIG_64BIT
tm LC_AR_MODE_ID,1
jno .Lesa2
ahi %r15,-80
stmh %r6,%r15,96(%r15) # store upper register halves
.Lesa2:
-#endif
lr %r10,%r2 # save string pointer
lhi %r2,0
bras %r14,_sclp_setup # enable console
@@ -291,14 +283,12 @@ ENTRY(_sclp_print_early)
lhi %r2,1
bras %r14,_sclp_setup # disable console
.LendS5:
-#ifdef CONFIG_64BIT
tm LC_AR_MODE_ID,1
jno .Lesa3
lgfr %r2,%r2 # sign extend return value
lmh %r6,%r15,96(%r15) # restore upper register halves
ahi %r15,80
.Lesa3:
-#endif
lm %r6,%r15,120(%r15) # restore registers
br %r14
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index a5ea8bc17cb3..7262fe438c99 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -92,10 +92,8 @@ EXPORT_SYMBOL(VMALLOC_END);
struct page *vmemmap;
EXPORT_SYMBOL(vmemmap);
-#ifdef CONFIG_64BIT
unsigned long MODULES_VADDR;
unsigned long MODULES_END;
-#endif
/* An array with a pointer to the lowcore of every CPU. */
struct _lowcore *lowcore_ptr[NR_CPUS];
@@ -334,19 +332,10 @@ static void __init setup_lowcore(void)
lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
MAX_FACILITY_BIT/8);
-#ifndef CONFIG_64BIT
- if (MACHINE_HAS_IEEE) {
- lc->extended_save_area_addr = (__u32)
- __alloc_bootmem_low(PAGE_SIZE, PAGE_SIZE, 0);
- /* enable extended save area */
- __ctl_set_bit(14, 29);
- }
-#else
if (MACHINE_HAS_VX)
lc->vector_save_area_addr =
(unsigned long) &lc->vector_save_area;
lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
-#endif
lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
lc->async_enter_timer = S390_lowcore.async_enter_timer;
lc->exit_timer = S390_lowcore.exit_timer;
@@ -450,7 +439,6 @@ static void __init setup_memory_end(void)
unsigned long vmax, vmalloc_size, tmp;
/* Choose kernel address space layout: 2, 3, or 4 levels. */
-#ifdef CONFIG_64BIT
vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
@@ -462,12 +450,6 @@ static void __init setup_memory_end(void)
MODULES_END = vmax;
MODULES_VADDR = MODULES_END - MODULES_LEN;
VMALLOC_END = MODULES_VADDR;
-#else
- vmalloc_size = VMALLOC_END ?: 96UL << 20;
- vmax = 1UL << 31; /* 2-level kernel page table */
- /* vmalloc area is at the end of the kernel address space. */
- VMALLOC_END = vmax;
-#endif
VMALLOC_START = vmax - vmalloc_size;
/* Split remaining virtual space between 1:1 mapping & vmemmap array */
@@ -754,7 +736,6 @@ static void __init setup_hwcaps(void)
if (MACHINE_HAS_HPAGE)
elf_hwcap |= HWCAP_S390_HPAGE;
-#if defined(CONFIG_64BIT)
/*
* 64-bit register support for 31-bit processes
* HWCAP_S390_HIGH_GPRS is bit 9.
@@ -772,22 +753,15 @@ static void __init setup_hwcaps(void)
*/
if (test_facility(129))
elf_hwcap |= HWCAP_S390_VXRS;
-#endif
-
get_cpu_id(&cpu_id);
add_device_randomness(&cpu_id, sizeof(cpu_id));
switch (cpu_id.machine) {
case 0x9672:
-#if !defined(CONFIG_64BIT)
- default: /* Use "g5" as default for 31 bit kernels. */
-#endif
strcpy(elf_platform, "g5");
break;
case 0x2064:
case 0x2066:
-#if defined(CONFIG_64BIT)
default: /* Use "z900" as default for 64 bit kernels. */
-#endif
strcpy(elf_platform, "z900");
break;
case 0x2084:
@@ -839,19 +813,6 @@ void __init setup_arch(char **cmdline_p)
/*
* print what head.S has found out about the machine
*/
-#ifndef CONFIG_64BIT
- if (MACHINE_IS_VM)
- pr_info("Linux is running as a z/VM "
- "guest operating system in 31-bit mode\n");
- else if (MACHINE_IS_LPAR)
- pr_info("Linux is running natively in 31-bit mode\n");
- if (MACHINE_HAS_IEEE)
- pr_info("The hardware system has IEEE compatible "
- "floating point units\n");
- else
- pr_info("The hardware system has no IEEE compatible "
- "floating point units\n");
-#else /* CONFIG_64BIT */
if (MACHINE_IS_VM)
pr_info("Linux is running as a z/VM "
"guest operating system in 64-bit mode\n");
@@ -859,7 +820,6 @@ void __init setup_arch(char **cmdline_p)
pr_info("Linux is running under KVM in 64-bit mode\n");
else if (MACHINE_IS_LPAR)
pr_info("Linux is running natively in 64-bit mode\n");
-#endif /* CONFIG_64BIT */
/* Have one command line that is parsed and saved in /proc/cmdline */
/* boot_command_line has been already set up in early.c */
@@ -930,35 +890,3 @@ void __init setup_arch(char **cmdline_p)
/* Add system specific data to the random pool */
setup_randomness();
}
-
-#ifdef CONFIG_32BIT
-static int no_removal_warning __initdata;
-
-static int __init parse_no_removal_warning(char *str)
-{
- no_removal_warning = 1;
- return 0;
-}
-__setup("no_removal_warning", parse_no_removal_warning);
-
-static int __init removal_warning(void)
-{
- if (no_removal_warning)
- return 0;
- printk(KERN_ALERT "\n\n");
- printk(KERN_CONT "Warning - you are using a 31 bit kernel!\n\n");
- printk(KERN_CONT "We plan to remove 31 bit kernel support from the kernel sources in March 2015.\n");
- printk(KERN_CONT "Currently we assume that nobody is using the 31 bit kernel on old 31 bit\n");
- printk(KERN_CONT "hardware anymore. If you think that the code should not be removed and also\n");
- printk(KERN_CONT "future versions of the Linux kernel should be able to run in 31 bit mode\n");
- printk(KERN_CONT "please let us know. Please write to:\n");
- printk(KERN_CONT "linux390@de.ibm.com (mail address) and/or\n");
- printk(KERN_CONT "linux-s390@vger.kernel.org (mailing list).\n\n");
- printk(KERN_CONT "Thank you!\n\n");
- printk(KERN_CONT "If this kernel runs on a 64 bit machine you may consider using a 64 bit kernel.\n");
- printk(KERN_CONT "This message can be disabled with the \"no_removal_warning\" kernel parameter.\n");
- schedule_timeout_uninterruptible(300 * HZ);
- return 0;
-}
-early_initcall(removal_warning);
-#endif
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index b3ae6f70c6d6..c551f22ce066 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -106,7 +106,6 @@ static void store_sigregs(void)
{
save_access_regs(current->thread.acrs);
save_fp_ctl(&current->thread.fp_regs.fpc);
-#ifdef CONFIG_64BIT
if (current->thread.vxrs) {
int i;
@@ -115,7 +114,6 @@ static void store_sigregs(void)
current->thread.fp_regs.fprs[i] =
*(freg_t *)(current->thread.vxrs + i);
} else
-#endif
save_fp_regs(current->thread.fp_regs.fprs);
}
@@ -124,7 +122,6 @@ static void load_sigregs(void)
{
restore_access_regs(current->thread.acrs);
/* restore_fp_ctl is done in restore_sigregs */
-#ifdef CONFIG_64BIT
if (current->thread.vxrs) {
int i;
@@ -133,7 +130,6 @@ static void load_sigregs(void)
current->thread.fp_regs.fprs[i];
restore_vx_regs(current->thread.vxrs);
} else
-#endif
restore_fp_regs(current->thread.fp_regs.fprs);
}
@@ -200,7 +196,6 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
static int save_sigregs_ext(struct pt_regs *regs,
_sigregs_ext __user *sregs_ext)
{
-#ifdef CONFIG_64BIT
__u64 vxrs[__NUM_VXRS_LOW];
int i;
@@ -215,14 +210,12 @@ static int save_sigregs_ext(struct pt_regs *regs,
sizeof(sregs_ext->vxrs_high)))
return -EFAULT;
}
-#endif
return 0;
}
static int restore_sigregs_ext(struct pt_regs *regs,
_sigregs_ext __user *sregs_ext)
{
-#ifdef CONFIG_64BIT
__u64 vxrs[__NUM_VXRS_LOW];
int i;
@@ -237,7 +230,6 @@ static int restore_sigregs_ext(struct pt_regs *regs,
for (i = 0; i < __NUM_VXRS_LOW; i++)
*((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i];
}
-#endif
return 0;
}
@@ -309,16 +301,6 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
return (void __user *)((sp - frame_size) & -8ul);
}
-static inline int map_signal(int sig)
-{
- if (current_thread_info()->exec_domain
- && current_thread_info()->exec_domain->signal_invmap
- && sig < 32)
- return current_thread_info()->exec_domain->signal_invmap[sig];
- else
- return sig;
-}
-
static int setup_frame(int sig, struct k_sigaction *ka,
sigset_t *set, struct pt_regs * regs)
{
@@ -386,7 +368,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
- regs->gprs[2] = map_signal(sig);
+ regs->gprs[2] = sig;
regs->gprs[3] = (unsigned long) &frame->sc;
/* We forgot to include these in the sigcontext.
@@ -416,13 +398,11 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
* included in the signal frame on a 31-bit system.
*/
uc_flags = 0;
-#ifdef CONFIG_64BIT
if (MACHINE_HAS_VX) {
frame_size += sizeof(_sigregs_ext);
if (current->thread.vxrs)
uc_flags |= UC_VXRS;
}
-#endif
frame = get_sigframe(&ksig->ka, regs, frame_size);
if (frame == (void __user *) -1UL)
return -EFAULT;
@@ -468,7 +448,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (unsigned long) ksig->ka.sa.sa_handler | PSW_ADDR_AMODE;
- regs->gprs[2] = map_signal(ksig->sig);
+ regs->gprs[2] = ksig->sig;
regs->gprs[3] = (unsigned long) &frame->info;
regs->gprs[4] = (unsigned long) &frame->uc;
regs->gprs[5] = task_thread_info(current)->last_break;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index db8f1115a3bf..efd2c1968000 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -198,19 +198,11 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
lc->cpu_nr = cpu;
lc->spinlock_lockval = arch_spin_lockval(cpu);
-#ifndef CONFIG_64BIT
- if (MACHINE_HAS_IEEE) {
- lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL);
- if (!lc->extended_save_area_addr)
- goto out;
- }
-#else
if (MACHINE_HAS_VX)
lc->vector_save_area_addr =
(unsigned long) &lc->vector_save_area;
if (vdso_alloc_per_cpu(lc))
goto out;
-#endif
lowcore_ptr[cpu] = lc;
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
return 0;
@@ -229,16 +221,7 @@ static void pcpu_free_lowcore(struct pcpu *pcpu)
{
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
lowcore_ptr[pcpu - pcpu_devices] = NULL;
-#ifndef CONFIG_64BIT
- if (MACHINE_HAS_IEEE) {
- struct _lowcore *lc = pcpu->lowcore;
-
- free_page((unsigned long) lc->extended_save_area_addr);
- lc->extended_save_area_addr = 0;
- }
-#else
vdso_free_per_cpu(pcpu->lowcore);
-#endif
if (pcpu == &pcpu_devices[0])
return;
free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
@@ -492,22 +475,6 @@ void arch_send_call_function_single_ipi(int cpu)
pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
}
-#ifndef CONFIG_64BIT
-/*
- * this function sends a 'purge tlb' signal to another CPU.
- */
-static void smp_ptlb_callback(void *info)
-{
- __tlb_flush_local();
-}
-
-void smp_ptlb_all(void)
-{
- on_each_cpu(smp_ptlb_callback, NULL, 1);
-}
-EXPORT_SYMBOL(smp_ptlb_all);
-#endif /* ! CONFIG_64BIT */
-
/*
* this function sends a 'reschedule' IPI to another CPU.
* it goes straight through and wastes no time serializing
@@ -851,7 +818,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
pcpu_prepare_secondary(pcpu, cpu);
pcpu_attach_task(pcpu, tidle);
pcpu_start_fn(pcpu, smp_start_secondary, NULL);
- while (!cpu_online(cpu))
+ /* Wait until cpu puts itself in the online & active maps */
+ while (!cpu_online(cpu) || !cpu_active(cpu))
cpu_relax();
return 0;
}
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
index 1c4c5accd220..d3236c9e226b 100644
--- a/arch/s390/kernel/suspend.c
+++ b/arch/s390/kernel/suspend.c
@@ -138,6 +138,8 @@ int pfn_is_nosave(unsigned long pfn)
{
unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end));
+ unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
+ unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
/* Always save lowcore pages (LC protection might be enabled). */
if (pfn <= LC_PAGES)
@@ -145,6 +147,8 @@ int pfn_is_nosave(unsigned long pfn)
if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
return 1;
/* Skip memory holes and read-only pages (NSS, DCSS, ...). */
+ if (pfn >= stext_pfn && pfn <= eshared_pfn)
+ return ipl_info.type == IPL_TYPE_NSS ? 1 : 0;
if (tprot(PFN_PHYS(pfn)))
return 1;
return 0;
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp.S
index 6b09fdffbd2f..ca6294645dd3 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp.S
@@ -177,6 +177,17 @@ restart_entry:
lhi %r1,1
sigp %r1,%r0,SIGP_SET_ARCHITECTURE
sam64
+#ifdef CONFIG_SMP
+ larl %r1,smp_cpu_mt_shift
+ icm %r1,15,0(%r1)
+ jz smt_done
+ llgfr %r1,%r1
+smt_loop:
+ sigp %r1,%r0,SIGP_SET_MULTI_THREADING
+ brc 8,smt_done /* accepted */
+ brc 2,smt_loop /* busy, try again */
+smt_done:
+#endif
larl %r1,.Lnew_pgm_check_psw
lpswe 0(%r1)
pgm_check_entry:
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index 23eb222c1658..f145490cce54 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -76,7 +76,6 @@ SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, unsigned long, second,
return sys_ipc(call, first, second, third, ptr, third);
}
-#ifdef CONFIG_64BIT
SYSCALL_DEFINE1(s390_personality, unsigned int, personality)
{
unsigned int ret;
@@ -90,51 +89,3 @@ SYSCALL_DEFINE1(s390_personality, unsigned int, personality)
return ret;
}
-#endif /* CONFIG_64BIT */
-
-/*
- * Wrapper function for sys_fadvise64/fadvise64_64
- */
-#ifndef CONFIG_64BIT
-
-SYSCALL_DEFINE5(s390_fadvise64, int, fd, u32, offset_high, u32, offset_low,
- size_t, len, int, advice)
-{
- return sys_fadvise64(fd, (u64) offset_high << 32 | offset_low,
- len, advice);
-}
-
-struct fadvise64_64_args {
- int fd;
- long long offset;
- long long len;
- int advice;
-};
-
-SYSCALL_DEFINE1(s390_fadvise64_64, struct fadvise64_64_args __user *, args)
-{
- struct fadvise64_64_args a;
-
- if ( copy_from_user(&a, args, sizeof(a)) )
- return -EFAULT;
- return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
-}
-
-/*
- * This is a wrapper to call sys_fallocate(). For 31 bit s390 the last
- * 64 bit argument "len" is split into the upper and lower 32 bits. The
- * system call wrapper in the user space loads the value to %r6/%r7.
- * The code in entry.S keeps the values in %r2 - %r6 where they are and
- * stores %r7 to 96(%r15). But the standard C linkage requires that
- * the whole 64 bit value for len is stored on the stack and doesn't
- * use %r6 at all. So s390_fallocate has to convert the arguments from
- * %r2: fd, %r3: mode, %r4/%r5: offset, %r6/96(%r15)-99(%r15): len
- * to
- * %r2: fd, %r3: mode, %r4/%r5: offset, 96(%r15)-103(%r15): len
- */
-SYSCALL_DEFINE5(s390_fallocate, int, fd, int, mode, loff_t, offset,
- u32, len_high, u32, len_low)
-{
- return sys_fallocate(fd, mode, offset, ((u64)len_high << 32) | len_low);
-}
-#endif
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 939ec474b1dd..1acad02681c4 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -1,365 +1,365 @@
/*
* definitions for sys_call_table, each line represents an
- * entry in the table in the form
- * SYSCALL(31 bit syscall, 64 bit syscall, 31 bit emulated syscall)
+ * entry in the table in the form
+ * SYSCALL(64 bit syscall, 31 bit emulated syscall)
*
- * this file is meant to be included from entry.S and entry64.S
+ * this file is meant to be included from entry.S
*/
-#define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall,sys_ni_syscall)
+#define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall)
-NI_SYSCALL /* 0 */
-SYSCALL(sys_exit,sys_exit,compat_sys_exit)
-SYSCALL(sys_fork,sys_fork,sys_fork)
-SYSCALL(sys_read,sys_read,compat_sys_s390_read)
-SYSCALL(sys_write,sys_write,compat_sys_s390_write)
-SYSCALL(sys_open,sys_open,compat_sys_open) /* 5 */
-SYSCALL(sys_close,sys_close,compat_sys_close)
-SYSCALL(sys_restart_syscall,sys_restart_syscall,sys_restart_syscall)
-SYSCALL(sys_creat,sys_creat,compat_sys_creat)
-SYSCALL(sys_link,sys_link,compat_sys_link)
-SYSCALL(sys_unlink,sys_unlink,compat_sys_unlink) /* 10 */
-SYSCALL(sys_execve,sys_execve,compat_sys_execve)
-SYSCALL(sys_chdir,sys_chdir,compat_sys_chdir)
-SYSCALL(sys_time,sys_ni_syscall,compat_sys_time) /* old time syscall */
-SYSCALL(sys_mknod,sys_mknod,compat_sys_mknod)
-SYSCALL(sys_chmod,sys_chmod,compat_sys_chmod) /* 15 */
-SYSCALL(sys_lchown16,sys_ni_syscall,compat_sys_s390_lchown16) /* old lchown16 syscall*/
-NI_SYSCALL /* old break syscall holder */
-NI_SYSCALL /* old stat syscall holder */
-SYSCALL(sys_lseek,sys_lseek,compat_sys_lseek)
-SYSCALL(sys_getpid,sys_getpid,sys_getpid) /* 20 */
-SYSCALL(sys_mount,sys_mount,compat_sys_mount)
-SYSCALL(sys_oldumount,sys_oldumount,compat_sys_oldumount)
-SYSCALL(sys_setuid16,sys_ni_syscall,compat_sys_s390_setuid16) /* old setuid16 syscall*/
-SYSCALL(sys_getuid16,sys_ni_syscall,compat_sys_s390_getuid16) /* old getuid16 syscall*/
-SYSCALL(sys_stime,sys_ni_syscall,compat_sys_stime) /* 25 old stime syscall */
-SYSCALL(sys_ptrace,sys_ptrace,compat_sys_ptrace)
-SYSCALL(sys_alarm,sys_alarm,compat_sys_alarm)
-NI_SYSCALL /* old fstat syscall */
-SYSCALL(sys_pause,sys_pause,sys_pause)
-SYSCALL(sys_utime,sys_utime,compat_sys_utime) /* 30 */
-NI_SYSCALL /* old stty syscall */
-NI_SYSCALL /* old gtty syscall */
-SYSCALL(sys_access,sys_access,compat_sys_access)
-SYSCALL(sys_nice,sys_nice,compat_sys_nice)
-NI_SYSCALL /* 35 old ftime syscall */
-SYSCALL(sys_sync,sys_sync,sys_sync)
-SYSCALL(sys_kill,sys_kill,compat_sys_kill)
-SYSCALL(sys_rename,sys_rename,compat_sys_rename)
-SYSCALL(sys_mkdir,sys_mkdir,compat_sys_mkdir)
-SYSCALL(sys_rmdir,sys_rmdir,compat_sys_rmdir) /* 40 */
-SYSCALL(sys_dup,sys_dup,compat_sys_dup)
-SYSCALL(sys_pipe,sys_pipe,compat_sys_pipe)
-SYSCALL(sys_times,sys_times,compat_sys_times)
-NI_SYSCALL /* old prof syscall */
-SYSCALL(sys_brk,sys_brk,compat_sys_brk) /* 45 */
-SYSCALL(sys_setgid16,sys_ni_syscall,compat_sys_s390_setgid16) /* old setgid16 syscall*/
-SYSCALL(sys_getgid16,sys_ni_syscall,compat_sys_s390_getgid16) /* old getgid16 syscall*/
-SYSCALL(sys_signal,sys_signal,compat_sys_signal)
-SYSCALL(sys_geteuid16,sys_ni_syscall,compat_sys_s390_geteuid16) /* old geteuid16 syscall */
-SYSCALL(sys_getegid16,sys_ni_syscall,compat_sys_s390_getegid16) /* 50 old getegid16 syscall */
-SYSCALL(sys_acct,sys_acct,compat_sys_acct)
-SYSCALL(sys_umount,sys_umount,compat_sys_umount)
-NI_SYSCALL /* old lock syscall */
-SYSCALL(sys_ioctl,sys_ioctl,compat_sys_ioctl)
-SYSCALL(sys_fcntl,sys_fcntl,compat_sys_fcntl) /* 55 */
-NI_SYSCALL /* intel mpx syscall */
-SYSCALL(sys_setpgid,sys_setpgid,compat_sys_setpgid)
-NI_SYSCALL /* old ulimit syscall */
-NI_SYSCALL /* old uname syscall */
-SYSCALL(sys_umask,sys_umask,compat_sys_umask) /* 60 */
-SYSCALL(sys_chroot,sys_chroot,compat_sys_chroot)
-SYSCALL(sys_ustat,sys_ustat,compat_sys_ustat)
-SYSCALL(sys_dup2,sys_dup2,compat_sys_dup2)
-SYSCALL(sys_getppid,sys_getppid,sys_getppid)
-SYSCALL(sys_getpgrp,sys_getpgrp,sys_getpgrp) /* 65 */
-SYSCALL(sys_setsid,sys_setsid,sys_setsid)
-SYSCALL(sys_sigaction,sys_sigaction,compat_sys_sigaction)
-NI_SYSCALL /* old sgetmask syscall*/
-NI_SYSCALL /* old ssetmask syscall*/
-SYSCALL(sys_setreuid16,sys_ni_syscall,compat_sys_s390_setreuid16) /* old setreuid16 syscall */
-SYSCALL(sys_setregid16,sys_ni_syscall,compat_sys_s390_setregid16) /* old setregid16 syscall */
-SYSCALL(sys_sigsuspend,sys_sigsuspend,compat_sys_sigsuspend)
-SYSCALL(sys_sigpending,sys_sigpending,compat_sys_sigpending)
-SYSCALL(sys_sethostname,sys_sethostname,compat_sys_sethostname)
-SYSCALL(sys_setrlimit,sys_setrlimit,compat_sys_setrlimit) /* 75 */
-SYSCALL(sys_old_getrlimit,sys_getrlimit,compat_sys_old_getrlimit)
-SYSCALL(sys_getrusage,sys_getrusage,compat_sys_getrusage)
-SYSCALL(sys_gettimeofday,sys_gettimeofday,compat_sys_gettimeofday)
-SYSCALL(sys_settimeofday,sys_settimeofday,compat_sys_settimeofday)
-SYSCALL(sys_getgroups16,sys_ni_syscall,compat_sys_s390_getgroups16) /* 80 old getgroups16 syscall */
-SYSCALL(sys_setgroups16,sys_ni_syscall,compat_sys_s390_setgroups16) /* old setgroups16 syscall */
-NI_SYSCALL /* old select syscall */
-SYSCALL(sys_symlink,sys_symlink,compat_sys_symlink)
-NI_SYSCALL /* old lstat syscall */
-SYSCALL(sys_readlink,sys_readlink,compat_sys_readlink) /* 85 */
-SYSCALL(sys_uselib,sys_uselib,compat_sys_uselib)
-SYSCALL(sys_swapon,sys_swapon,compat_sys_swapon)
-SYSCALL(sys_reboot,sys_reboot,compat_sys_reboot)
-SYSCALL(sys_ni_syscall,sys_ni_syscall,compat_sys_old_readdir) /* old readdir syscall */
-SYSCALL(sys_old_mmap,sys_old_mmap,compat_sys_s390_old_mmap) /* 90 */
-SYSCALL(sys_munmap,sys_munmap,compat_sys_munmap)
-SYSCALL(sys_truncate,sys_truncate,compat_sys_truncate)
-SYSCALL(sys_ftruncate,sys_ftruncate,compat_sys_ftruncate)
-SYSCALL(sys_fchmod,sys_fchmod,compat_sys_fchmod)
-SYSCALL(sys_fchown16,sys_ni_syscall,compat_sys_s390_fchown16) /* 95 old fchown16 syscall*/
-SYSCALL(sys_getpriority,sys_getpriority,compat_sys_getpriority)
-SYSCALL(sys_setpriority,sys_setpriority,compat_sys_setpriority)
-NI_SYSCALL /* old profil syscall */
-SYSCALL(sys_statfs,sys_statfs,compat_sys_statfs)
-SYSCALL(sys_fstatfs,sys_fstatfs,compat_sys_fstatfs) /* 100 */
-NI_SYSCALL /* ioperm for i386 */
-SYSCALL(sys_socketcall,sys_socketcall,compat_sys_socketcall)
-SYSCALL(sys_syslog,sys_syslog,compat_sys_syslog)
-SYSCALL(sys_setitimer,sys_setitimer,compat_sys_setitimer)
-SYSCALL(sys_getitimer,sys_getitimer,compat_sys_getitimer) /* 105 */
-SYSCALL(sys_newstat,sys_newstat,compat_sys_newstat)
-SYSCALL(sys_newlstat,sys_newlstat,compat_sys_newlstat)
-SYSCALL(sys_newfstat,sys_newfstat,compat_sys_newfstat)
-NI_SYSCALL /* old uname syscall */
-SYSCALL(sys_lookup_dcookie,sys_lookup_dcookie,compat_sys_lookup_dcookie) /* 110 */
-SYSCALL(sys_vhangup,sys_vhangup,sys_vhangup)
-NI_SYSCALL /* old "idle" system call */
-NI_SYSCALL /* vm86old for i386 */
-SYSCALL(sys_wait4,sys_wait4,compat_sys_wait4)
-SYSCALL(sys_swapoff,sys_swapoff,compat_sys_swapoff) /* 115 */
-SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo)
-SYSCALL(sys_s390_ipc,sys_s390_ipc,compat_sys_s390_ipc)
-SYSCALL(sys_fsync,sys_fsync,compat_sys_fsync)
-SYSCALL(sys_sigreturn,sys_sigreturn,compat_sys_sigreturn)
-SYSCALL(sys_clone,sys_clone,compat_sys_clone) /* 120 */
-SYSCALL(sys_setdomainname,sys_setdomainname,compat_sys_setdomainname)
-SYSCALL(sys_newuname,sys_newuname,compat_sys_newuname)
-NI_SYSCALL /* modify_ldt for i386 */
-SYSCALL(sys_adjtimex,sys_adjtimex,compat_sys_adjtimex)
-SYSCALL(sys_mprotect,sys_mprotect,compat_sys_mprotect) /* 125 */
-SYSCALL(sys_sigprocmask,sys_sigprocmask,compat_sys_sigprocmask)
-NI_SYSCALL /* old "create module" */
-SYSCALL(sys_init_module,sys_init_module,compat_sys_init_module)
-SYSCALL(sys_delete_module,sys_delete_module,compat_sys_delete_module)
-NI_SYSCALL /* 130: old get_kernel_syms */
-SYSCALL(sys_quotactl,sys_quotactl,compat_sys_quotactl)
-SYSCALL(sys_getpgid,sys_getpgid,compat_sys_getpgid)
-SYSCALL(sys_fchdir,sys_fchdir,compat_sys_fchdir)
-SYSCALL(sys_bdflush,sys_bdflush,compat_sys_bdflush)
-SYSCALL(sys_sysfs,sys_sysfs,compat_sys_sysfs) /* 135 */
-SYSCALL(sys_personality,sys_s390_personality,compat_sys_s390_personality)
-NI_SYSCALL /* for afs_syscall */
-SYSCALL(sys_setfsuid16,sys_ni_syscall,compat_sys_s390_setfsuid16) /* old setfsuid16 syscall */
-SYSCALL(sys_setfsgid16,sys_ni_syscall,compat_sys_s390_setfsgid16) /* old setfsgid16 syscall */
-SYSCALL(sys_llseek,sys_llseek,compat_sys_llseek) /* 140 */
-SYSCALL(sys_getdents,sys_getdents,compat_sys_getdents)
-SYSCALL(sys_select,sys_select,compat_sys_select)
-SYSCALL(sys_flock,sys_flock,compat_sys_flock)
-SYSCALL(sys_msync,sys_msync,compat_sys_msync)
-SYSCALL(sys_readv,sys_readv,compat_sys_readv) /* 145 */
-SYSCALL(sys_writev,sys_writev,compat_sys_writev)
-SYSCALL(sys_getsid,sys_getsid,compat_sys_getsid)
-SYSCALL(sys_fdatasync,sys_fdatasync,compat_sys_fdatasync)
-SYSCALL(sys_sysctl,sys_sysctl,compat_sys_sysctl)
-SYSCALL(sys_mlock,sys_mlock,compat_sys_mlock) /* 150 */
-SYSCALL(sys_munlock,sys_munlock,compat_sys_munlock)
-SYSCALL(sys_mlockall,sys_mlockall,compat_sys_mlockall)
-SYSCALL(sys_munlockall,sys_munlockall,sys_munlockall)
-SYSCALL(sys_sched_setparam,sys_sched_setparam,compat_sys_sched_setparam)
-SYSCALL(sys_sched_getparam,sys_sched_getparam,compat_sys_sched_getparam) /* 155 */
-SYSCALL(sys_sched_setscheduler,sys_sched_setscheduler,compat_sys_sched_setscheduler)
-SYSCALL(sys_sched_getscheduler,sys_sched_getscheduler,compat_sys_sched_getscheduler)
-SYSCALL(sys_sched_yield,sys_sched_yield,sys_sched_yield)
-SYSCALL(sys_sched_get_priority_max,sys_sched_get_priority_max,compat_sys_sched_get_priority_max)
-SYSCALL(sys_sched_get_priority_min,sys_sched_get_priority_min,compat_sys_sched_get_priority_min) /* 160 */
-SYSCALL(sys_sched_rr_get_interval,sys_sched_rr_get_interval,compat_sys_sched_rr_get_interval)
-SYSCALL(sys_nanosleep,sys_nanosleep,compat_sys_nanosleep)
-SYSCALL(sys_mremap,sys_mremap,compat_sys_mremap)
-SYSCALL(sys_setresuid16,sys_ni_syscall,compat_sys_s390_setresuid16) /* old setresuid16 syscall */
-SYSCALL(sys_getresuid16,sys_ni_syscall,compat_sys_s390_getresuid16) /* 165 old getresuid16 syscall */
-NI_SYSCALL /* for vm86 */
-NI_SYSCALL /* old sys_query_module */
-SYSCALL(sys_poll,sys_poll,compat_sys_poll)
-NI_SYSCALL /* old nfsservctl */
-SYSCALL(sys_setresgid16,sys_ni_syscall,compat_sys_s390_setresgid16) /* 170 old setresgid16 syscall */
-SYSCALL(sys_getresgid16,sys_ni_syscall,compat_sys_s390_getresgid16) /* old getresgid16 syscall */
-SYSCALL(sys_prctl,sys_prctl,compat_sys_prctl)
-SYSCALL(sys_rt_sigreturn,sys_rt_sigreturn,compat_sys_rt_sigreturn)
-SYSCALL(sys_rt_sigaction,sys_rt_sigaction,compat_sys_rt_sigaction)
-SYSCALL(sys_rt_sigprocmask,sys_rt_sigprocmask,compat_sys_rt_sigprocmask) /* 175 */
-SYSCALL(sys_rt_sigpending,sys_rt_sigpending,compat_sys_rt_sigpending)
-SYSCALL(sys_rt_sigtimedwait,sys_rt_sigtimedwait,compat_sys_rt_sigtimedwait)
-SYSCALL(sys_rt_sigqueueinfo,sys_rt_sigqueueinfo,compat_sys_rt_sigqueueinfo)
-SYSCALL(sys_rt_sigsuspend,sys_rt_sigsuspend,compat_sys_rt_sigsuspend)
-SYSCALL(sys_pread64,sys_pread64,compat_sys_s390_pread64) /* 180 */
-SYSCALL(sys_pwrite64,sys_pwrite64,compat_sys_s390_pwrite64)
-SYSCALL(sys_chown16,sys_ni_syscall,compat_sys_s390_chown16) /* old chown16 syscall */
-SYSCALL(sys_getcwd,sys_getcwd,compat_sys_getcwd)
-SYSCALL(sys_capget,sys_capget,compat_sys_capget)
-SYSCALL(sys_capset,sys_capset,compat_sys_capset) /* 185 */
-SYSCALL(sys_sigaltstack,sys_sigaltstack,compat_sys_sigaltstack)
-SYSCALL(sys_sendfile,sys_sendfile64,compat_sys_sendfile)
-NI_SYSCALL /* streams1 */
-NI_SYSCALL /* streams2 */
-SYSCALL(sys_vfork,sys_vfork,sys_vfork) /* 190 */
-SYSCALL(sys_getrlimit,sys_getrlimit,compat_sys_getrlimit)
-SYSCALL(sys_mmap2,sys_mmap2,compat_sys_s390_mmap2)
-SYSCALL(sys_truncate64,sys_ni_syscall,compat_sys_s390_truncate64)
-SYSCALL(sys_ftruncate64,sys_ni_syscall,compat_sys_s390_ftruncate64)
-SYSCALL(sys_stat64,sys_ni_syscall,compat_sys_s390_stat64) /* 195 */
-SYSCALL(sys_lstat64,sys_ni_syscall,compat_sys_s390_lstat64)
-SYSCALL(sys_fstat64,sys_ni_syscall,compat_sys_s390_fstat64)
-SYSCALL(sys_lchown,sys_lchown,compat_sys_lchown)
-SYSCALL(sys_getuid,sys_getuid,sys_getuid)
-SYSCALL(sys_getgid,sys_getgid,sys_getgid) /* 200 */
-SYSCALL(sys_geteuid,sys_geteuid,sys_geteuid)
-SYSCALL(sys_getegid,sys_getegid,sys_getegid)
-SYSCALL(sys_setreuid,sys_setreuid,compat_sys_setreuid)
-SYSCALL(sys_setregid,sys_setregid,compat_sys_setregid)
-SYSCALL(sys_getgroups,sys_getgroups,compat_sys_getgroups) /* 205 */
-SYSCALL(sys_setgroups,sys_setgroups,compat_sys_setgroups)
-SYSCALL(sys_fchown,sys_fchown,compat_sys_fchown)
-SYSCALL(sys_setresuid,sys_setresuid,compat_sys_setresuid)
-SYSCALL(sys_getresuid,sys_getresuid,compat_sys_getresuid)
-SYSCALL(sys_setresgid,sys_setresgid,compat_sys_setresgid) /* 210 */
-SYSCALL(sys_getresgid,sys_getresgid,compat_sys_getresgid)
-SYSCALL(sys_chown,sys_chown,compat_sys_chown)
-SYSCALL(sys_setuid,sys_setuid,compat_sys_setuid)
-SYSCALL(sys_setgid,sys_setgid,compat_sys_setgid)
-SYSCALL(sys_setfsuid,sys_setfsuid,compat_sys_setfsuid) /* 215 */
-SYSCALL(sys_setfsgid,sys_setfsgid,compat_sys_setfsgid)
-SYSCALL(sys_pivot_root,sys_pivot_root,compat_sys_pivot_root)
-SYSCALL(sys_mincore,sys_mincore,compat_sys_mincore)
-SYSCALL(sys_madvise,sys_madvise,compat_sys_madvise)
-SYSCALL(sys_getdents64,sys_getdents64,compat_sys_getdents64) /* 220 */
-SYSCALL(sys_fcntl64,sys_ni_syscall,compat_sys_fcntl64)
-SYSCALL(sys_readahead,sys_readahead,compat_sys_s390_readahead)
-SYSCALL(sys_sendfile64,sys_ni_syscall,compat_sys_sendfile64)
-SYSCALL(sys_setxattr,sys_setxattr,compat_sys_setxattr)
-SYSCALL(sys_lsetxattr,sys_lsetxattr,compat_sys_lsetxattr) /* 225 */
-SYSCALL(sys_fsetxattr,sys_fsetxattr,compat_sys_fsetxattr)
-SYSCALL(sys_getxattr,sys_getxattr,compat_sys_getxattr)
-SYSCALL(sys_lgetxattr,sys_lgetxattr,compat_sys_lgetxattr)
-SYSCALL(sys_fgetxattr,sys_fgetxattr,compat_sys_fgetxattr)
-SYSCALL(sys_listxattr,sys_listxattr,compat_sys_listxattr) /* 230 */
-SYSCALL(sys_llistxattr,sys_llistxattr,compat_sys_llistxattr)
-SYSCALL(sys_flistxattr,sys_flistxattr,compat_sys_flistxattr)
-SYSCALL(sys_removexattr,sys_removexattr,compat_sys_removexattr)
-SYSCALL(sys_lremovexattr,sys_lremovexattr,compat_sys_lremovexattr)
-SYSCALL(sys_fremovexattr,sys_fremovexattr,compat_sys_fremovexattr) /* 235 */
-SYSCALL(sys_gettid,sys_gettid,sys_gettid)
-SYSCALL(sys_tkill,sys_tkill,compat_sys_tkill)
-SYSCALL(sys_futex,sys_futex,compat_sys_futex)
-SYSCALL(sys_sched_setaffinity,sys_sched_setaffinity,compat_sys_sched_setaffinity)
-SYSCALL(sys_sched_getaffinity,sys_sched_getaffinity,compat_sys_sched_getaffinity) /* 240 */
-SYSCALL(sys_tgkill,sys_tgkill,compat_sys_tgkill)
-NI_SYSCALL /* reserved for TUX */
-SYSCALL(sys_io_setup,sys_io_setup,compat_sys_io_setup)
-SYSCALL(sys_io_destroy,sys_io_destroy,compat_sys_io_destroy)
-SYSCALL(sys_io_getevents,sys_io_getevents,compat_sys_io_getevents) /* 245 */
-SYSCALL(sys_io_submit,sys_io_submit,compat_sys_io_submit)
-SYSCALL(sys_io_cancel,sys_io_cancel,compat_sys_io_cancel)
-SYSCALL(sys_exit_group,sys_exit_group,compat_sys_exit_group)
-SYSCALL(sys_epoll_create,sys_epoll_create,compat_sys_epoll_create)
-SYSCALL(sys_epoll_ctl,sys_epoll_ctl,compat_sys_epoll_ctl) /* 250 */
-SYSCALL(sys_epoll_wait,sys_epoll_wait,compat_sys_epoll_wait)
-SYSCALL(sys_set_tid_address,sys_set_tid_address,compat_sys_set_tid_address)
-SYSCALL(sys_s390_fadvise64,sys_fadvise64_64,compat_sys_s390_fadvise64)
-SYSCALL(sys_timer_create,sys_timer_create,compat_sys_timer_create)
-SYSCALL(sys_timer_settime,sys_timer_settime,compat_sys_timer_settime) /* 255 */
-SYSCALL(sys_timer_gettime,sys_timer_gettime,compat_sys_timer_gettime)
-SYSCALL(sys_timer_getoverrun,sys_timer_getoverrun,compat_sys_timer_getoverrun)
-SYSCALL(sys_timer_delete,sys_timer_delete,compat_sys_timer_delete)
-SYSCALL(sys_clock_settime,sys_clock_settime,compat_sys_clock_settime)
-SYSCALL(sys_clock_gettime,sys_clock_gettime,compat_sys_clock_gettime) /* 260 */
-SYSCALL(sys_clock_getres,sys_clock_getres,compat_sys_clock_getres)
-SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,compat_sys_clock_nanosleep)
-NI_SYSCALL /* reserved for vserver */
-SYSCALL(sys_s390_fadvise64_64,sys_ni_syscall,compat_sys_s390_fadvise64_64)
-SYSCALL(sys_statfs64,sys_statfs64,compat_sys_statfs64)
-SYSCALL(sys_fstatfs64,sys_fstatfs64,compat_sys_fstatfs64)
-SYSCALL(sys_remap_file_pages,sys_remap_file_pages,compat_sys_remap_file_pages)
-NI_SYSCALL /* 268 sys_mbind */
-NI_SYSCALL /* 269 sys_get_mempolicy */
-NI_SYSCALL /* 270 sys_set_mempolicy */
-SYSCALL(sys_mq_open,sys_mq_open,compat_sys_mq_open)
-SYSCALL(sys_mq_unlink,sys_mq_unlink,compat_sys_mq_unlink)
-SYSCALL(sys_mq_timedsend,sys_mq_timedsend,compat_sys_mq_timedsend)
-SYSCALL(sys_mq_timedreceive,sys_mq_timedreceive,compat_sys_mq_timedreceive)
-SYSCALL(sys_mq_notify,sys_mq_notify,compat_sys_mq_notify) /* 275 */
-SYSCALL(sys_mq_getsetattr,sys_mq_getsetattr,compat_sys_mq_getsetattr)
-SYSCALL(sys_kexec_load,sys_kexec_load,compat_sys_kexec_load)
-SYSCALL(sys_add_key,sys_add_key,compat_sys_add_key)
-SYSCALL(sys_request_key,sys_request_key,compat_sys_request_key)
-SYSCALL(sys_keyctl,sys_keyctl,compat_sys_keyctl) /* 280 */
-SYSCALL(sys_waitid,sys_waitid,compat_sys_waitid)
-SYSCALL(sys_ioprio_set,sys_ioprio_set,compat_sys_ioprio_set)
-SYSCALL(sys_ioprio_get,sys_ioprio_get,compat_sys_ioprio_get)
-SYSCALL(sys_inotify_init,sys_inotify_init,sys_inotify_init)
-SYSCALL(sys_inotify_add_watch,sys_inotify_add_watch,compat_sys_inotify_add_watch) /* 285 */
-SYSCALL(sys_inotify_rm_watch,sys_inotify_rm_watch,compat_sys_inotify_rm_watch)
-NI_SYSCALL /* 287 sys_migrate_pages */
-SYSCALL(sys_openat,sys_openat,compat_sys_openat)
-SYSCALL(sys_mkdirat,sys_mkdirat,compat_sys_mkdirat)
-SYSCALL(sys_mknodat,sys_mknodat,compat_sys_mknodat) /* 290 */
-SYSCALL(sys_fchownat,sys_fchownat,compat_sys_fchownat)
-SYSCALL(sys_futimesat,sys_futimesat,compat_sys_futimesat)
-SYSCALL(sys_fstatat64,sys_newfstatat,compat_sys_s390_fstatat64)
-SYSCALL(sys_unlinkat,sys_unlinkat,compat_sys_unlinkat)
-SYSCALL(sys_renameat,sys_renameat,compat_sys_renameat) /* 295 */
-SYSCALL(sys_linkat,sys_linkat,compat_sys_linkat)
-SYSCALL(sys_symlinkat,sys_symlinkat,compat_sys_symlinkat)
-SYSCALL(sys_readlinkat,sys_readlinkat,compat_sys_readlinkat)
-SYSCALL(sys_fchmodat,sys_fchmodat,compat_sys_fchmodat)
-SYSCALL(sys_faccessat,sys_faccessat,compat_sys_faccessat) /* 300 */
-SYSCALL(sys_pselect6,sys_pselect6,compat_sys_pselect6)
-SYSCALL(sys_ppoll,sys_ppoll,compat_sys_ppoll)
-SYSCALL(sys_unshare,sys_unshare,compat_sys_unshare)
-SYSCALL(sys_set_robust_list,sys_set_robust_list,compat_sys_set_robust_list)
-SYSCALL(sys_get_robust_list,sys_get_robust_list,compat_sys_get_robust_list)
-SYSCALL(sys_splice,sys_splice,compat_sys_splice)
-SYSCALL(sys_sync_file_range,sys_sync_file_range,compat_sys_s390_sync_file_range)
-SYSCALL(sys_tee,sys_tee,compat_sys_tee)
-SYSCALL(sys_vmsplice,sys_vmsplice,compat_sys_vmsplice)
-NI_SYSCALL /* 310 sys_move_pages */
-SYSCALL(sys_getcpu,sys_getcpu,compat_sys_getcpu)
-SYSCALL(sys_epoll_pwait,sys_epoll_pwait,compat_sys_epoll_pwait)
-SYSCALL(sys_utimes,sys_utimes,compat_sys_utimes)
-SYSCALL(sys_s390_fallocate,sys_fallocate,compat_sys_s390_fallocate)
-SYSCALL(sys_utimensat,sys_utimensat,compat_sys_utimensat) /* 315 */
-SYSCALL(sys_signalfd,sys_signalfd,compat_sys_signalfd)
+NI_SYSCALL /* 0 */
+SYSCALL(sys_exit,compat_sys_exit)
+SYSCALL(sys_fork,sys_fork)
+SYSCALL(sys_read,compat_sys_s390_read)
+SYSCALL(sys_write,compat_sys_s390_write)
+SYSCALL(sys_open,compat_sys_open) /* 5 */
+SYSCALL(sys_close,compat_sys_close)
+SYSCALL(sys_restart_syscall,sys_restart_syscall)
+SYSCALL(sys_creat,compat_sys_creat)
+SYSCALL(sys_link,compat_sys_link)
+SYSCALL(sys_unlink,compat_sys_unlink) /* 10 */
+SYSCALL(sys_execve,compat_sys_execve)
+SYSCALL(sys_chdir,compat_sys_chdir)
+SYSCALL(sys_ni_syscall,compat_sys_time) /* old time syscall */
+SYSCALL(sys_mknod,compat_sys_mknod)
+SYSCALL(sys_chmod,compat_sys_chmod) /* 15 */
+SYSCALL(sys_ni_syscall,compat_sys_s390_lchown16) /* old lchown16 syscall*/
+NI_SYSCALL /* old break syscall holder */
+NI_SYSCALL /* old stat syscall holder */
+SYSCALL(sys_lseek,compat_sys_lseek)
+SYSCALL(sys_getpid,sys_getpid) /* 20 */
+SYSCALL(sys_mount,compat_sys_mount)
+SYSCALL(sys_oldumount,compat_sys_oldumount)
+SYSCALL(sys_ni_syscall,compat_sys_s390_setuid16) /* old setuid16 syscall*/
+SYSCALL(sys_ni_syscall,compat_sys_s390_getuid16) /* old getuid16 syscall*/
+SYSCALL(sys_ni_syscall,compat_sys_stime) /* 25 old stime syscall */
+SYSCALL(sys_ptrace,compat_sys_ptrace)
+SYSCALL(sys_alarm,compat_sys_alarm)
+NI_SYSCALL /* old fstat syscall */
+SYSCALL(sys_pause,sys_pause)
+SYSCALL(sys_utime,compat_sys_utime) /* 30 */
+NI_SYSCALL /* old stty syscall */
+NI_SYSCALL /* old gtty syscall */
+SYSCALL(sys_access,compat_sys_access)
+SYSCALL(sys_nice,compat_sys_nice)
+NI_SYSCALL /* 35 old ftime syscall */
+SYSCALL(sys_sync,sys_sync)
+SYSCALL(sys_kill,compat_sys_kill)
+SYSCALL(sys_rename,compat_sys_rename)
+SYSCALL(sys_mkdir,compat_sys_mkdir)
+SYSCALL(sys_rmdir,compat_sys_rmdir) /* 40 */
+SYSCALL(sys_dup,compat_sys_dup)
+SYSCALL(sys_pipe,compat_sys_pipe)
+SYSCALL(sys_times,compat_sys_times)
+NI_SYSCALL /* old prof syscall */
+SYSCALL(sys_brk,compat_sys_brk) /* 45 */
+SYSCALL(sys_ni_syscall,compat_sys_s390_setgid16) /* old setgid16 syscall*/
+SYSCALL(sys_ni_syscall,compat_sys_s390_getgid16) /* old getgid16 syscall*/
+SYSCALL(sys_signal,compat_sys_signal)
+SYSCALL(sys_ni_syscall,compat_sys_s390_geteuid16) /* old geteuid16 syscall */
+SYSCALL(sys_ni_syscall,compat_sys_s390_getegid16) /* 50 old getegid16 syscall */
+SYSCALL(sys_acct,compat_sys_acct)
+SYSCALL(sys_umount,compat_sys_umount)
+NI_SYSCALL /* old lock syscall */
+SYSCALL(sys_ioctl,compat_sys_ioctl)
+SYSCALL(sys_fcntl,compat_sys_fcntl) /* 55 */
+NI_SYSCALL /* intel mpx syscall */
+SYSCALL(sys_setpgid,compat_sys_setpgid)
+NI_SYSCALL /* old ulimit syscall */
+NI_SYSCALL /* old uname syscall */
+SYSCALL(sys_umask,compat_sys_umask) /* 60 */
+SYSCALL(sys_chroot,compat_sys_chroot)
+SYSCALL(sys_ustat,compat_sys_ustat)
+SYSCALL(sys_dup2,compat_sys_dup2)
+SYSCALL(sys_getppid,sys_getppid)
+SYSCALL(sys_getpgrp,sys_getpgrp) /* 65 */
+SYSCALL(sys_setsid,sys_setsid)
+SYSCALL(sys_sigaction,compat_sys_sigaction)
+NI_SYSCALL /* old sgetmask syscall*/
+NI_SYSCALL /* old ssetmask syscall*/
+SYSCALL(sys_ni_syscall,compat_sys_s390_setreuid16) /* old setreuid16 syscall */
+SYSCALL(sys_ni_syscall,compat_sys_s390_setregid16) /* old setregid16 syscall */
+SYSCALL(sys_sigsuspend,compat_sys_sigsuspend)
+SYSCALL(sys_sigpending,compat_sys_sigpending)
+SYSCALL(sys_sethostname,compat_sys_sethostname)
+SYSCALL(sys_setrlimit,compat_sys_setrlimit) /* 75 */
+SYSCALL(sys_getrlimit,compat_sys_old_getrlimit)
+SYSCALL(sys_getrusage,compat_sys_getrusage)
+SYSCALL(sys_gettimeofday,compat_sys_gettimeofday)
+SYSCALL(sys_settimeofday,compat_sys_settimeofday)
+SYSCALL(sys_ni_syscall,compat_sys_s390_getgroups16) /* 80 old getgroups16 syscall */
+SYSCALL(sys_ni_syscall,compat_sys_s390_setgroups16) /* old setgroups16 syscall */
+NI_SYSCALL /* old select syscall */
+SYSCALL(sys_symlink,compat_sys_symlink)
+NI_SYSCALL /* old lstat syscall */
+SYSCALL(sys_readlink,compat_sys_readlink) /* 85 */
+SYSCALL(sys_uselib,compat_sys_uselib)
+SYSCALL(sys_swapon,compat_sys_swapon)
+SYSCALL(sys_reboot,compat_sys_reboot)
+SYSCALL(sys_ni_syscall,compat_sys_old_readdir) /* old readdir syscall */
+SYSCALL(sys_old_mmap,compat_sys_s390_old_mmap) /* 90 */
+SYSCALL(sys_munmap,compat_sys_munmap)
+SYSCALL(sys_truncate,compat_sys_truncate)
+SYSCALL(sys_ftruncate,compat_sys_ftruncate)
+SYSCALL(sys_fchmod,compat_sys_fchmod)
+SYSCALL(sys_ni_syscall,compat_sys_s390_fchown16) /* 95 old fchown16 syscall*/
+SYSCALL(sys_getpriority,compat_sys_getpriority)
+SYSCALL(sys_setpriority,compat_sys_setpriority)
+NI_SYSCALL /* old profil syscall */
+SYSCALL(sys_statfs,compat_sys_statfs)
+SYSCALL(sys_fstatfs,compat_sys_fstatfs) /* 100 */
+NI_SYSCALL /* ioperm for i386 */
+SYSCALL(sys_socketcall,compat_sys_socketcall)
+SYSCALL(sys_syslog,compat_sys_syslog)
+SYSCALL(sys_setitimer,compat_sys_setitimer)
+SYSCALL(sys_getitimer,compat_sys_getitimer) /* 105 */
+SYSCALL(sys_newstat,compat_sys_newstat)
+SYSCALL(sys_newlstat,compat_sys_newlstat)
+SYSCALL(sys_newfstat,compat_sys_newfstat)
+NI_SYSCALL /* old uname syscall */
+SYSCALL(sys_lookup_dcookie,compat_sys_lookup_dcookie) /* 110 */
+SYSCALL(sys_vhangup,sys_vhangup)
+NI_SYSCALL /* old "idle" system call */
+NI_SYSCALL /* vm86old for i386 */
+SYSCALL(sys_wait4,compat_sys_wait4)
+SYSCALL(sys_swapoff,compat_sys_swapoff) /* 115 */
+SYSCALL(sys_sysinfo,compat_sys_sysinfo)
+SYSCALL(sys_s390_ipc,compat_sys_s390_ipc)
+SYSCALL(sys_fsync,compat_sys_fsync)
+SYSCALL(sys_sigreturn,compat_sys_sigreturn)
+SYSCALL(sys_clone,compat_sys_clone) /* 120 */
+SYSCALL(sys_setdomainname,compat_sys_setdomainname)
+SYSCALL(sys_newuname,compat_sys_newuname)
+NI_SYSCALL /* modify_ldt for i386 */
+SYSCALL(sys_adjtimex,compat_sys_adjtimex)
+SYSCALL(sys_mprotect,compat_sys_mprotect) /* 125 */
+SYSCALL(sys_sigprocmask,compat_sys_sigprocmask)
+NI_SYSCALL /* old "create module" */
+SYSCALL(sys_init_module,compat_sys_init_module)
+SYSCALL(sys_delete_module,compat_sys_delete_module)
+NI_SYSCALL /* 130: old get_kernel_syms */
+SYSCALL(sys_quotactl,compat_sys_quotactl)
+SYSCALL(sys_getpgid,compat_sys_getpgid)
+SYSCALL(sys_fchdir,compat_sys_fchdir)
+SYSCALL(sys_bdflush,compat_sys_bdflush)
+SYSCALL(sys_sysfs,compat_sys_sysfs) /* 135 */
+SYSCALL(sys_s390_personality,compat_sys_s390_personality)
+NI_SYSCALL /* for afs_syscall */
+SYSCALL(sys_ni_syscall,compat_sys_s390_setfsuid16) /* old setfsuid16 syscall */
+SYSCALL(sys_ni_syscall,compat_sys_s390_setfsgid16) /* old setfsgid16 syscall */
+SYSCALL(sys_llseek,compat_sys_llseek) /* 140 */
+SYSCALL(sys_getdents,compat_sys_getdents)
+SYSCALL(sys_select,compat_sys_select)
+SYSCALL(sys_flock,compat_sys_flock)
+SYSCALL(sys_msync,compat_sys_msync)
+SYSCALL(sys_readv,compat_sys_readv) /* 145 */
+SYSCALL(sys_writev,compat_sys_writev)
+SYSCALL(sys_getsid,compat_sys_getsid)
+SYSCALL(sys_fdatasync,compat_sys_fdatasync)
+SYSCALL(sys_sysctl,compat_sys_sysctl)
+SYSCALL(sys_mlock,compat_sys_mlock) /* 150 */
+SYSCALL(sys_munlock,compat_sys_munlock)
+SYSCALL(sys_mlockall,compat_sys_mlockall)
+SYSCALL(sys_munlockall,sys_munlockall)
+SYSCALL(sys_sched_setparam,compat_sys_sched_setparam)
+SYSCALL(sys_sched_getparam,compat_sys_sched_getparam) /* 155 */
+SYSCALL(sys_sched_setscheduler,compat_sys_sched_setscheduler)
+SYSCALL(sys_sched_getscheduler,compat_sys_sched_getscheduler)
+SYSCALL(sys_sched_yield,sys_sched_yield)
+SYSCALL(sys_sched_get_priority_max,compat_sys_sched_get_priority_max)
+SYSCALL(sys_sched_get_priority_min,compat_sys_sched_get_priority_min) /* 160 */
+SYSCALL(sys_sched_rr_get_interval,compat_sys_sched_rr_get_interval)
+SYSCALL(sys_nanosleep,compat_sys_nanosleep)
+SYSCALL(sys_mremap,compat_sys_mremap)
+SYSCALL(sys_ni_syscall,compat_sys_s390_setresuid16) /* old setresuid16 syscall */
+SYSCALL(sys_ni_syscall,compat_sys_s390_getresuid16) /* 165 old getresuid16 syscall */
+NI_SYSCALL /* for vm86 */
+NI_SYSCALL /* old sys_query_module */
+SYSCALL(sys_poll,compat_sys_poll)
+NI_SYSCALL /* old nfsservctl */
+SYSCALL(sys_ni_syscall,compat_sys_s390_setresgid16) /* 170 old setresgid16 syscall */
+SYSCALL(sys_ni_syscall,compat_sys_s390_getresgid16) /* old getresgid16 syscall */
+SYSCALL(sys_prctl,compat_sys_prctl)
+SYSCALL(sys_rt_sigreturn,compat_sys_rt_sigreturn)
+SYSCALL(sys_rt_sigaction,compat_sys_rt_sigaction)
+SYSCALL(sys_rt_sigprocmask,compat_sys_rt_sigprocmask) /* 175 */
+SYSCALL(sys_rt_sigpending,compat_sys_rt_sigpending)
+SYSCALL(sys_rt_sigtimedwait,compat_sys_rt_sigtimedwait)
+SYSCALL(sys_rt_sigqueueinfo,compat_sys_rt_sigqueueinfo)
+SYSCALL(sys_rt_sigsuspend,compat_sys_rt_sigsuspend)
+SYSCALL(sys_pread64,compat_sys_s390_pread64) /* 180 */
+SYSCALL(sys_pwrite64,compat_sys_s390_pwrite64)
+SYSCALL(sys_ni_syscall,compat_sys_s390_chown16) /* old chown16 syscall */
+SYSCALL(sys_getcwd,compat_sys_getcwd)
+SYSCALL(sys_capget,compat_sys_capget)
+SYSCALL(sys_capset,compat_sys_capset) /* 185 */
+SYSCALL(sys_sigaltstack,compat_sys_sigaltstack)
+SYSCALL(sys_sendfile64,compat_sys_sendfile)
+NI_SYSCALL /* streams1 */
+NI_SYSCALL /* streams2 */
+SYSCALL(sys_vfork,sys_vfork) /* 190 */
+SYSCALL(sys_getrlimit,compat_sys_getrlimit)
+SYSCALL(sys_mmap2,compat_sys_s390_mmap2)
+SYSCALL(sys_ni_syscall,compat_sys_s390_truncate64)
+SYSCALL(sys_ni_syscall,compat_sys_s390_ftruncate64)
+SYSCALL(sys_ni_syscall,compat_sys_s390_stat64) /* 195 */
+SYSCALL(sys_ni_syscall,compat_sys_s390_lstat64)
+SYSCALL(sys_ni_syscall,compat_sys_s390_fstat64)
+SYSCALL(sys_lchown,compat_sys_lchown)
+SYSCALL(sys_getuid,sys_getuid)
+SYSCALL(sys_getgid,sys_getgid) /* 200 */
+SYSCALL(sys_geteuid,sys_geteuid)
+SYSCALL(sys_getegid,sys_getegid)
+SYSCALL(sys_setreuid,compat_sys_setreuid)
+SYSCALL(sys_setregid,compat_sys_setregid)
+SYSCALL(sys_getgroups,compat_sys_getgroups) /* 205 */
+SYSCALL(sys_setgroups,compat_sys_setgroups)
+SYSCALL(sys_fchown,compat_sys_fchown)
+SYSCALL(sys_setresuid,compat_sys_setresuid)
+SYSCALL(sys_getresuid,compat_sys_getresuid)
+SYSCALL(sys_setresgid,compat_sys_setresgid) /* 210 */
+SYSCALL(sys_getresgid,compat_sys_getresgid)
+SYSCALL(sys_chown,compat_sys_chown)
+SYSCALL(sys_setuid,compat_sys_setuid)
+SYSCALL(sys_setgid,compat_sys_setgid)
+SYSCALL(sys_setfsuid,compat_sys_setfsuid) /* 215 */
+SYSCALL(sys_setfsgid,compat_sys_setfsgid)
+SYSCALL(sys_pivot_root,compat_sys_pivot_root)
+SYSCALL(sys_mincore,compat_sys_mincore)
+SYSCALL(sys_madvise,compat_sys_madvise)
+SYSCALL(sys_getdents64,compat_sys_getdents64) /* 220 */
+SYSCALL(sys_ni_syscall,compat_sys_fcntl64)
+SYSCALL(sys_readahead,compat_sys_s390_readahead)
+SYSCALL(sys_ni_syscall,compat_sys_sendfile64)
+SYSCALL(sys_setxattr,compat_sys_setxattr)
+SYSCALL(sys_lsetxattr,compat_sys_lsetxattr) /* 225 */
+SYSCALL(sys_fsetxattr,compat_sys_fsetxattr)
+SYSCALL(sys_getxattr,compat_sys_getxattr)
+SYSCALL(sys_lgetxattr,compat_sys_lgetxattr)
+SYSCALL(sys_fgetxattr,compat_sys_fgetxattr)
+SYSCALL(sys_listxattr,compat_sys_listxattr) /* 230 */
+SYSCALL(sys_llistxattr,compat_sys_llistxattr)
+SYSCALL(sys_flistxattr,compat_sys_flistxattr)
+SYSCALL(sys_removexattr,compat_sys_removexattr)
+SYSCALL(sys_lremovexattr,compat_sys_lremovexattr)
+SYSCALL(sys_fremovexattr,compat_sys_fremovexattr) /* 235 */
+SYSCALL(sys_gettid,sys_gettid)
+SYSCALL(sys_tkill,compat_sys_tkill)
+SYSCALL(sys_futex,compat_sys_futex)
+SYSCALL(sys_sched_setaffinity,compat_sys_sched_setaffinity)
+SYSCALL(sys_sched_getaffinity,compat_sys_sched_getaffinity) /* 240 */
+SYSCALL(sys_tgkill,compat_sys_tgkill)
+NI_SYSCALL /* reserved for TUX */
+SYSCALL(sys_io_setup,compat_sys_io_setup)
+SYSCALL(sys_io_destroy,compat_sys_io_destroy)
+SYSCALL(sys_io_getevents,compat_sys_io_getevents) /* 245 */
+SYSCALL(sys_io_submit,compat_sys_io_submit)
+SYSCALL(sys_io_cancel,compat_sys_io_cancel)
+SYSCALL(sys_exit_group,compat_sys_exit_group)
+SYSCALL(sys_epoll_create,compat_sys_epoll_create)
+SYSCALL(sys_epoll_ctl,compat_sys_epoll_ctl) /* 250 */
+SYSCALL(sys_epoll_wait,compat_sys_epoll_wait)
+SYSCALL(sys_set_tid_address,compat_sys_set_tid_address)
+SYSCALL(sys_fadvise64_64,compat_sys_s390_fadvise64)
+SYSCALL(sys_timer_create,compat_sys_timer_create)
+SYSCALL(sys_timer_settime,compat_sys_timer_settime) /* 255 */
+SYSCALL(sys_timer_gettime,compat_sys_timer_gettime)
+SYSCALL(sys_timer_getoverrun,compat_sys_timer_getoverrun)
+SYSCALL(sys_timer_delete,compat_sys_timer_delete)
+SYSCALL(sys_clock_settime,compat_sys_clock_settime)
+SYSCALL(sys_clock_gettime,compat_sys_clock_gettime) /* 260 */
+SYSCALL(sys_clock_getres,compat_sys_clock_getres)
+SYSCALL(sys_clock_nanosleep,compat_sys_clock_nanosleep)
+NI_SYSCALL /* reserved for vserver */
+SYSCALL(sys_ni_syscall,compat_sys_s390_fadvise64_64)
+SYSCALL(sys_statfs64,compat_sys_statfs64)
+SYSCALL(sys_fstatfs64,compat_sys_fstatfs64)
+SYSCALL(sys_remap_file_pages,compat_sys_remap_file_pages)
+NI_SYSCALL /* 268 sys_mbind */
+NI_SYSCALL /* 269 sys_get_mempolicy */
+NI_SYSCALL /* 270 sys_set_mempolicy */
+SYSCALL(sys_mq_open,compat_sys_mq_open)
+SYSCALL(sys_mq_unlink,compat_sys_mq_unlink)
+SYSCALL(sys_mq_timedsend,compat_sys_mq_timedsend)
+SYSCALL(sys_mq_timedreceive,compat_sys_mq_timedreceive)
+SYSCALL(sys_mq_notify,compat_sys_mq_notify) /* 275 */
+SYSCALL(sys_mq_getsetattr,compat_sys_mq_getsetattr)
+SYSCALL(sys_kexec_load,compat_sys_kexec_load)
+SYSCALL(sys_add_key,compat_sys_add_key)
+SYSCALL(sys_request_key,compat_sys_request_key)
+SYSCALL(sys_keyctl,compat_sys_keyctl) /* 280 */
+SYSCALL(sys_waitid,compat_sys_waitid)
+SYSCALL(sys_ioprio_set,compat_sys_ioprio_set)
+SYSCALL(sys_ioprio_get,compat_sys_ioprio_get)
+SYSCALL(sys_inotify_init,sys_inotify_init)
+SYSCALL(sys_inotify_add_watch,compat_sys_inotify_add_watch) /* 285 */
+SYSCALL(sys_inotify_rm_watch,compat_sys_inotify_rm_watch)
+NI_SYSCALL /* 287 sys_migrate_pages */
+SYSCALL(sys_openat,compat_sys_openat)
+SYSCALL(sys_mkdirat,compat_sys_mkdirat)
+SYSCALL(sys_mknodat,compat_sys_mknodat) /* 290 */
+SYSCALL(sys_fchownat,compat_sys_fchownat)
+SYSCALL(sys_futimesat,compat_sys_futimesat)
+SYSCALL(sys_newfstatat,compat_sys_s390_fstatat64)
+SYSCALL(sys_unlinkat,compat_sys_unlinkat)
+SYSCALL(sys_renameat,compat_sys_renameat) /* 295 */
+SYSCALL(sys_linkat,compat_sys_linkat)
+SYSCALL(sys_symlinkat,compat_sys_symlinkat)
+SYSCALL(sys_readlinkat,compat_sys_readlinkat)
+SYSCALL(sys_fchmodat,compat_sys_fchmodat)
+SYSCALL(sys_faccessat,compat_sys_faccessat) /* 300 */
+SYSCALL(sys_pselect6,compat_sys_pselect6)
+SYSCALL(sys_ppoll,compat_sys_ppoll)
+SYSCALL(sys_unshare,compat_sys_unshare)
+SYSCALL(sys_set_robust_list,compat_sys_set_robust_list)
+SYSCALL(sys_get_robust_list,compat_sys_get_robust_list)
+SYSCALL(sys_splice,compat_sys_splice)
+SYSCALL(sys_sync_file_range,compat_sys_s390_sync_file_range)
+SYSCALL(sys_tee,compat_sys_tee)
+SYSCALL(sys_vmsplice,compat_sys_vmsplice)
+NI_SYSCALL /* 310 sys_move_pages */
+SYSCALL(sys_getcpu,compat_sys_getcpu)
+SYSCALL(sys_epoll_pwait,compat_sys_epoll_pwait)
+SYSCALL(sys_utimes,compat_sys_utimes)
+SYSCALL(sys_fallocate,compat_sys_s390_fallocate)
+SYSCALL(sys_utimensat,compat_sys_utimensat) /* 315 */
+SYSCALL(sys_signalfd,compat_sys_signalfd)
NI_SYSCALL /* 317 old sys_timer_fd */
-SYSCALL(sys_eventfd,sys_eventfd,compat_sys_eventfd)
-SYSCALL(sys_timerfd_create,sys_timerfd_create,compat_sys_timerfd_create)
-SYSCALL(sys_timerfd_settime,sys_timerfd_settime,compat_sys_timerfd_settime) /* 320 */
-SYSCALL(sys_timerfd_gettime,sys_timerfd_gettime,compat_sys_timerfd_gettime)
-SYSCALL(sys_signalfd4,sys_signalfd4,compat_sys_signalfd4)
-SYSCALL(sys_eventfd2,sys_eventfd2,compat_sys_eventfd2)
-SYSCALL(sys_inotify_init1,sys_inotify_init1,compat_sys_inotify_init1)
-SYSCALL(sys_pipe2,sys_pipe2,compat_sys_pipe2) /* 325 */
-SYSCALL(sys_dup3,sys_dup3,compat_sys_dup3)
-SYSCALL(sys_epoll_create1,sys_epoll_create1,compat_sys_epoll_create1)
-SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv)
-SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev)
-SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */
-SYSCALL(sys_perf_event_open,sys_perf_event_open,compat_sys_perf_event_open)
-SYSCALL(sys_fanotify_init,sys_fanotify_init,compat_sys_fanotify_init)
-SYSCALL(sys_fanotify_mark,sys_fanotify_mark,compat_sys_fanotify_mark)
-SYSCALL(sys_prlimit64,sys_prlimit64,compat_sys_prlimit64)
-SYSCALL(sys_name_to_handle_at,sys_name_to_handle_at,compat_sys_name_to_handle_at) /* 335 */
-SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at)
-SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime)
-SYSCALL(sys_syncfs,sys_syncfs,compat_sys_syncfs)
-SYSCALL(sys_setns,sys_setns,compat_sys_setns)
-SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv) /* 340 */
-SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev)
-SYSCALL(sys_ni_syscall,sys_s390_runtime_instr,compat_sys_s390_runtime_instr)
-SYSCALL(sys_kcmp,sys_kcmp,compat_sys_kcmp)
-SYSCALL(sys_finit_module,sys_finit_module,compat_sys_finit_module)
-SYSCALL(sys_sched_setattr,sys_sched_setattr,compat_sys_sched_setattr) /* 345 */
-SYSCALL(sys_sched_getattr,sys_sched_getattr,compat_sys_sched_getattr)
-SYSCALL(sys_renameat2,sys_renameat2,compat_sys_renameat2)
-SYSCALL(sys_seccomp,sys_seccomp,compat_sys_seccomp)
-SYSCALL(sys_getrandom,sys_getrandom,compat_sys_getrandom)
-SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */
-SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf)
-SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write)
-SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read)
-SYSCALL(sys_execveat,sys_execveat,compat_sys_execveat)
+SYSCALL(sys_eventfd,compat_sys_eventfd)
+SYSCALL(sys_timerfd_create,compat_sys_timerfd_create)
+SYSCALL(sys_timerfd_settime,compat_sys_timerfd_settime) /* 320 */
+SYSCALL(sys_timerfd_gettime,compat_sys_timerfd_gettime)
+SYSCALL(sys_signalfd4,compat_sys_signalfd4)
+SYSCALL(sys_eventfd2,compat_sys_eventfd2)
+SYSCALL(sys_inotify_init1,compat_sys_inotify_init1)
+SYSCALL(sys_pipe2,compat_sys_pipe2) /* 325 */
+SYSCALL(sys_dup3,compat_sys_dup3)
+SYSCALL(sys_epoll_create1,compat_sys_epoll_create1)
+SYSCALL(sys_preadv,compat_sys_preadv)
+SYSCALL(sys_pwritev,compat_sys_pwritev)
+SYSCALL(sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */
+SYSCALL(sys_perf_event_open,compat_sys_perf_event_open)
+SYSCALL(sys_fanotify_init,compat_sys_fanotify_init)
+SYSCALL(sys_fanotify_mark,compat_sys_fanotify_mark)
+SYSCALL(sys_prlimit64,compat_sys_prlimit64)
+SYSCALL(sys_name_to_handle_at,compat_sys_name_to_handle_at) /* 335 */
+SYSCALL(sys_open_by_handle_at,compat_sys_open_by_handle_at)
+SYSCALL(sys_clock_adjtime,compat_sys_clock_adjtime)
+SYSCALL(sys_syncfs,compat_sys_syncfs)
+SYSCALL(sys_setns,compat_sys_setns)
+SYSCALL(sys_process_vm_readv,compat_sys_process_vm_readv) /* 340 */
+SYSCALL(sys_process_vm_writev,compat_sys_process_vm_writev)
+SYSCALL(sys_s390_runtime_instr,compat_sys_s390_runtime_instr)
+SYSCALL(sys_kcmp,compat_sys_kcmp)
+SYSCALL(sys_finit_module,compat_sys_finit_module)
+SYSCALL(sys_sched_setattr,compat_sys_sched_setattr) /* 345 */
+SYSCALL(sys_sched_getattr,compat_sys_sched_getattr)
+SYSCALL(sys_renameat2,compat_sys_renameat2)
+SYSCALL(sys_seccomp,compat_sys_seccomp)
+SYSCALL(sys_getrandom,compat_sys_getrandom)
+SYSCALL(sys_memfd_create,compat_sys_memfd_create) /* 350 */
+SYSCALL(sys_bpf,compat_sys_bpf)
+SYSCALL(sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write)
+SYSCALL(sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read)
+SYSCALL(sys_execveat,compat_sys_execveat)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 20660dddb2d6..170ddd2018b3 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -215,20 +215,20 @@ void update_vsyscall(struct timekeeper *tk)
{
u64 nsecps;
- if (tk->tkr.clock != &clocksource_tod)
+ if (tk->tkr_mono.clock != &clocksource_tod)
return;
/* Make userspace gettimeofday spin until we're done. */
++vdso_data->tb_update_count;
smp_wmb();
- vdso_data->xtime_tod_stamp = tk->tkr.cycle_last;
+ vdso_data->xtime_tod_stamp = tk->tkr_mono.cycle_last;
vdso_data->xtime_clock_sec = tk->xtime_sec;
- vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec;
+ vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
vdso_data->wtom_clock_sec =
tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
- vdso_data->wtom_clock_nsec = tk->tkr.xtime_nsec +
- + ((u64) tk->wall_to_monotonic.tv_nsec << tk->tkr.shift);
- nsecps = (u64) NSEC_PER_SEC << tk->tkr.shift;
+ vdso_data->wtom_clock_nsec = tk->tkr_mono.xtime_nsec +
+ + ((u64) tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift);
+ nsecps = (u64) NSEC_PER_SEC << tk->tkr_mono.shift;
while (vdso_data->wtom_clock_nsec >= nsecps) {
vdso_data->wtom_clock_nsec -= nsecps;
vdso_data->wtom_clock_sec++;
@@ -236,7 +236,7 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->xtime_coarse_sec = tk->xtime_sec;
vdso_data->xtime_coarse_nsec =
- (long)(tk->tkr.xtime_nsec >> tk->tkr.shift);
+ (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
vdso_data->wtom_coarse_sec =
vdso_data->xtime_coarse_sec + tk->wall_to_monotonic.tv_sec;
vdso_data->wtom_coarse_nsec =
@@ -246,8 +246,8 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->wtom_coarse_sec++;
}
- vdso_data->tk_mult = tk->tkr.mult;
- vdso_data->tk_shift = tk->tkr.shift;
+ vdso_data->tk_mult = tk->tkr_mono.mult;
+ vdso_data->tk_shift = tk->tkr_mono.shift;
smp_wmb();
++vdso_data->tb_update_count;
}
@@ -283,7 +283,7 @@ void __init time_init(void)
if (register_external_irq(EXT_IRQ_TIMING_ALERT, timing_alert_interrupt))
panic("Couldn't request external interrupt 0x1406");
- if (clocksource_register(&clocksource_tod) != 0)
+ if (__clocksource_register(&clocksource_tod) != 0)
panic("Could not register TOD clock source");
/* Enable TOD clock interrupts on the boot cpu. */
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 14da43b801d9..5728c5bd44a8 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -421,7 +421,7 @@ int topology_cpu_init(struct cpu *cpu)
return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
}
-const struct cpumask *cpu_thread_mask(int cpu)
+static const struct cpumask *cpu_thread_mask(int cpu)
{
return &per_cpu(cpu_topology, cpu).thread_mask;
}
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index f081cf1157c3..4d96c9f53455 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -26,7 +26,6 @@ int show_unhandled_signals = 1;
static inline void __user *get_trap_ip(struct pt_regs *regs)
{
-#ifdef CONFIG_64BIT
unsigned long address;
if (regs->int_code & 0x200)
@@ -35,10 +34,6 @@ static inline void __user *get_trap_ip(struct pt_regs *regs)
address = regs->psw.addr;
return (void __user *)
((address - (regs->int_code >> 16)) & PSW_ADDR_INSN);
-#else
- return (void __user *)
- ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN);
-#endif
}
static inline void report_user_fault(struct pt_regs *regs, int signr)
@@ -153,11 +148,8 @@ DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
"privileged operation")
DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
"special operation exception")
-
-#ifdef CONFIG_64BIT
DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN,
"transaction constraint exception")
-#endif
static inline void do_fp_trap(struct pt_regs *regs, int fpc)
{
@@ -182,7 +174,7 @@ static inline void do_fp_trap(struct pt_regs *regs, int fpc)
void translation_exception(struct pt_regs *regs)
{
/* May never happen. */
- die(regs, "Translation exception");
+ panic("Translation exception");
}
void illegal_op(struct pt_regs *regs)
@@ -211,29 +203,6 @@ void illegal_op(struct pt_regs *regs)
} else if (*((__u16 *) opcode) == UPROBE_SWBP_INSN) {
is_uprobe_insn = 1;
#endif
-#ifdef CONFIG_MATHEMU
- } else if (opcode[0] == 0xb3) {
- if (get_user(*((__u16 *) (opcode+2)), location+1))
- return;
- signal = math_emu_b3(opcode, regs);
- } else if (opcode[0] == 0xed) {
- if (get_user(*((__u32 *) (opcode+2)),
- (__u32 __user *)(location+1)))
- return;
- signal = math_emu_ed(opcode, regs);
- } else if (*((__u16 *) opcode) == 0xb299) {
- if (get_user(*((__u16 *) (opcode+2)), location+1))
- return;
- signal = math_emu_srnm(opcode, regs);
- } else if (*((__u16 *) opcode) == 0xb29c) {
- if (get_user(*((__u16 *) (opcode+2)), location+1))
- return;
- signal = math_emu_stfpc(opcode, regs);
- } else if (*((__u16 *) opcode) == 0xb29d) {
- if (get_user(*((__u16 *) (opcode+2)), location+1))
- return;
- signal = math_emu_lfpc(opcode, regs);
-#endif
} else
signal = SIGILL;
}
@@ -247,71 +216,14 @@ void illegal_op(struct pt_regs *regs)
3, SIGTRAP) != NOTIFY_STOP)
signal = SIGILL;
}
-
-#ifdef CONFIG_MATHEMU
- if (signal == SIGFPE)
- do_fp_trap(regs, current->thread.fp_regs.fpc);
- else if (signal == SIGSEGV)
- do_trap(regs, signal, SEGV_MAPERR, "user address fault");
- else
-#endif
if (signal)
do_trap(regs, signal, ILL_ILLOPC, "illegal operation");
}
NOKPROBE_SYMBOL(illegal_op);
-#ifdef CONFIG_MATHEMU
-void specification_exception(struct pt_regs *regs)
-{
- __u8 opcode[6];
- __u16 __user *location = NULL;
- int signal = 0;
-
- location = (__u16 __user *) get_trap_ip(regs);
-
- if (user_mode(regs)) {
- get_user(*((__u16 *) opcode), location);
- switch (opcode[0]) {
- case 0x28: /* LDR Rx,Ry */
- signal = math_emu_ldr(opcode);
- break;
- case 0x38: /* LER Rx,Ry */
- signal = math_emu_ler(opcode);
- break;
- case 0x60: /* STD R,D(X,B) */
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_std(opcode, regs);
- break;
- case 0x68: /* LD R,D(X,B) */
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_ld(opcode, regs);
- break;
- case 0x70: /* STE R,D(X,B) */
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_ste(opcode, regs);
- break;
- case 0x78: /* LE R,D(X,B) */
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_le(opcode, regs);
- break;
- default:
- signal = SIGILL;
- break;
- }
- } else
- signal = SIGILL;
-
- if (signal == SIGFPE)
- do_fp_trap(regs, current->thread.fp_regs.fpc);
- else if (signal)
- do_trap(regs, signal, ILL_ILLOPN, "specification exception");
-}
-#else
DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
"specification exception");
-#endif
-#ifdef CONFIG_64BIT
int alloc_vector_registers(struct task_struct *tsk)
{
__vector128 *vxrs;
@@ -377,7 +289,6 @@ static int __init disable_vector_extension(char *str)
return 1;
}
__setup("novx", disable_vector_extension);
-#endif
void data_exception(struct pt_regs *regs)
{
@@ -386,65 +297,7 @@ void data_exception(struct pt_regs *regs)
location = get_trap_ip(regs);
- if (MACHINE_HAS_IEEE)
- asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
-
-#ifdef CONFIG_MATHEMU
- else if (user_mode(regs)) {
- __u8 opcode[6];
- get_user(*((__u16 *) opcode), location);
- switch (opcode[0]) {
- case 0x28: /* LDR Rx,Ry */
- signal = math_emu_ldr(opcode);
- break;
- case 0x38: /* LER Rx,Ry */
- signal = math_emu_ler(opcode);
- break;
- case 0x60: /* STD R,D(X,B) */
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_std(opcode, regs);
- break;
- case 0x68: /* LD R,D(X,B) */
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_ld(opcode, regs);
- break;
- case 0x70: /* STE R,D(X,B) */
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_ste(opcode, regs);
- break;
- case 0x78: /* LE R,D(X,B) */
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_le(opcode, regs);
- break;
- case 0xb3:
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_b3(opcode, regs);
- break;
- case 0xed:
- get_user(*((__u32 *) (opcode+2)),
- (__u32 __user *)(location+1));
- signal = math_emu_ed(opcode, regs);
- break;
- case 0xb2:
- if (opcode[1] == 0x99) {
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_srnm(opcode, regs);
- } else if (opcode[1] == 0x9c) {
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_stfpc(opcode, regs);
- } else if (opcode[1] == 0x9d) {
- get_user(*((__u16 *) (opcode+2)), location+1);
- signal = math_emu_lfpc(opcode, regs);
- } else
- signal = SIGILL;
- break;
- default:
- signal = SIGILL;
- break;
- }
- }
-#endif
-#ifdef CONFIG_64BIT
+ asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
/* Check for vector register enablement */
if (MACHINE_HAS_VX && !current->thread.vxrs &&
(current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) {
@@ -454,13 +307,11 @@ void data_exception(struct pt_regs *regs)
clear_pt_regs_flag(regs, PIF_PER_TRAP);
return;
}
-#endif
-
if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
signal = SIGFPE;
else
signal = SIGILL;
- if (signal == SIGFPE)
+ if (signal == SIGFPE)
do_fp_trap(regs, current->thread.fp_regs.fpc);
else if (signal)
do_trap(regs, signal, ILL_ILLOPN, "data exception");
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
index cc7328080b60..66956c09d5bf 100644
--- a/arch/s390/kernel/uprobes.c
+++ b/arch/s390/kernel/uprobes.c
@@ -188,7 +188,9 @@ static void adjust_psw_addr(psw_t *psw, unsigned long len)
else if (put_user(*(input), __ptr)) \
__rc = EMU_ADDRESSING; \
if (__rc == 0) \
- sim_stor_event(regs, __ptr, mask + 1); \
+ sim_stor_event(regs, \
+ (void __force *)__ptr, \
+ mask + 1); \
__rc; \
})
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 0bbb7e027c5a..0d58269ff425 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -32,19 +32,17 @@
#include <asm/vdso.h>
#include <asm/facility.h>
-#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
+#ifdef CONFIG_COMPAT
extern char vdso32_start, vdso32_end;
static void *vdso32_kbase = &vdso32_start;
static unsigned int vdso32_pages;
static struct page **vdso32_pagelist;
#endif
-#ifdef CONFIG_64BIT
extern char vdso64_start, vdso64_end;
static void *vdso64_kbase = &vdso64_start;
static unsigned int vdso64_pages;
static struct page **vdso64_pagelist;
-#endif /* CONFIG_64BIT */
/*
* Should the kernel map a VDSO page into processes and pass its
@@ -87,7 +85,6 @@ static void vdso_init_data(struct vdso_data *vd)
vd->ectg_available = test_facility(31);
}
-#ifdef CONFIG_64BIT
/*
* Allocate/free per cpu vdso data.
*/
@@ -169,7 +166,6 @@ static void vdso_init_cr5(void)
cr5 = offsetof(struct _lowcore, paste);
__ctl_load(cr5, 5, 5);
}
-#endif /* CONFIG_64BIT */
/*
* This is called from binfmt_elf, we create the special vma for the
@@ -191,7 +187,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (!uses_interp)
return 0;
-#ifdef CONFIG_64BIT
vdso_pagelist = vdso64_pagelist;
vdso_pages = vdso64_pages;
#ifdef CONFIG_COMPAT
@@ -200,11 +195,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
vdso_pages = vdso32_pages;
}
#endif
-#else
- vdso_pagelist = vdso32_pagelist;
- vdso_pages = vdso32_pages;
-#endif
-
/*
* vDSO has a problem and was disabled, just don't "enable" it for
* the process
@@ -268,7 +258,7 @@ static int __init vdso_init(void)
if (!vdso_enabled)
return 0;
vdso_init_data(vdso_data);
-#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
+#ifdef CONFIG_COMPAT
/* Calculate the size of the 32 bit vDSO */
vdso32_pages = ((&vdso32_end - &vdso32_start
+ PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
@@ -287,7 +277,6 @@ static int __init vdso_init(void)
vdso32_pagelist[vdso32_pages] = NULL;
#endif
-#ifdef CONFIG_64BIT
/* Calculate the size of the 64 bit vDSO */
vdso64_pages = ((&vdso64_end - &vdso64_start
+ PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
@@ -307,7 +296,6 @@ static int __init vdso_init(void)
if (vdso_alloc_per_cpu(&S390_lowcore))
BUG();
vdso_init_cr5();
-#endif /* CONFIG_64BIT */
get_page(virt_to_page(vdso_data));
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 35b13ed0af5f..445657fe658c 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -6,17 +6,10 @@
#include <asm/page.h>
#include <asm-generic/vmlinux.lds.h>
-#ifndef CONFIG_64BIT
-OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
-OUTPUT_ARCH(s390:31-bit)
-ENTRY(startup)
-jiffies = jiffies_64 + 4;
-#else
OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
OUTPUT_ARCH(s390:64-bit)
ENTRY(startup)
jiffies = jiffies_64;
-#endif
PHDRS {
text PT_LOAD FLAGS(5); /* R_E */