diff options
author | Heiko Carstens <hca@linux.ibm.com> | 2023-09-11 22:40:04 +0300 |
---|---|---|
committer | Vasily Gorbik <gor@linux.ibm.com> | 2023-09-19 14:26:56 +0300 |
commit | 527618abb92793b9d4dba548d55822dcebd95317 (patch) | |
tree | a33943001b1f0134ffc09e4cf0ecf1d267e44779 | |
parent | ecc53818f60447177e24ea11b7f136c405150976 (diff) | |
download | linux-527618abb92793b9d4dba548d55822dcebd95317.tar.xz |
s390/ctlreg: add struct ctlreg
Add struct ctlreg to enforce strict type checking / usage for control
register functions.
Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
-rw-r--r-- | arch/s390/boot/vmem.c | 8 | ||||
-rw-r--r-- | arch/s390/include/asm/ctlreg.h | 28 | ||||
-rw-r--r-- | arch/s390/include/asm/kprobes.h | 3 | ||||
-rw-r--r-- | arch/s390/include/asm/lowcore.h | 7 | ||||
-rw-r--r-- | arch/s390/include/asm/mmu_context.h | 2 | ||||
-rw-r--r-- | arch/s390/include/asm/pgtable.h | 3 | ||||
-rw-r--r-- | arch/s390/kernel/ctlreg.c | 10 | ||||
-rw-r--r-- | arch/s390/kernel/kprobes.c | 14 | ||||
-rw-r--r-- | arch/s390/kernel/machine_kexec.c | 6 | ||||
-rw-r--r-- | arch/s390/kernel/nmi.c | 12 | ||||
-rw-r--r-- | arch/s390/kernel/ptrace.c | 38 | ||||
-rw-r--r-- | arch/s390/kernel/setup.c | 12 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 8 | ||||
-rw-r--r-- | arch/s390/lib/uaccess.c | 8 | ||||
-rw-r--r-- | arch/s390/mm/dump_pagetables.c | 2 | ||||
-rw-r--r-- | arch/s390/mm/fault.c | 4 | ||||
-rw-r--r-- | arch/s390/mm/init.c | 2 | ||||
-rw-r--r-- | arch/s390/mm/pageattr.c | 2 | ||||
-rw-r--r-- | arch/s390/mm/pgalloc.c | 2 | ||||
-rw-r--r-- | drivers/s390/char/sclp.c | 6 | ||||
-rw-r--r-- | drivers/s390/char/sclp_early_core.c | 6 |
21 files changed, 97 insertions, 86 deletions
diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c index 44278ae2512f..bdbfee86d1ac 100644 --- a/arch/s390/boot/vmem.c +++ b/arch/s390/boot/vmem.c @@ -12,7 +12,7 @@ #include "decompressor.h" #include "boot.h" -unsigned long __bootdata_preserved(s390_invalid_asce); +struct ctlreg __bootdata_preserved(s390_invalid_asce); #ifdef CONFIG_PROC_FS atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]); @@ -422,7 +422,7 @@ void setup_vmem(unsigned long asce_limit) asce_type = _REGION3_ENTRY_EMPTY; asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; } - s390_invalid_asce = invalid_pg_dir | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; + s390_invalid_asce.val = invalid_pg_dir | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; crst_table_init((unsigned long *)swapper_pg_dir, asce_type); crst_table_init((unsigned long *)invalid_pg_dir, _REGION3_ENTRY_EMPTY); @@ -443,12 +443,12 @@ void setup_vmem(unsigned long asce_limit) kasan_populate_shadow(); - S390_lowcore.kernel_asce = swapper_pg_dir | asce_bits; + S390_lowcore.kernel_asce.val = swapper_pg_dir | asce_bits; S390_lowcore.user_asce = s390_invalid_asce; local_ctl_load(1, &S390_lowcore.kernel_asce); local_ctl_load(7, &S390_lowcore.user_asce); local_ctl_load(13, &S390_lowcore.kernel_asce); - init_mm.context.asce = S390_lowcore.kernel_asce; + init_mm.context.asce = S390_lowcore.kernel_asce.val; } diff --git a/arch/s390/include/asm/ctlreg.h b/arch/s390/include/asm/ctlreg.h index a49459adba9d..57cc610dd997 100644 --- a/arch/s390/include/asm/ctlreg.h +++ b/arch/s390/include/asm/ctlreg.h @@ -35,6 +35,10 @@ #include <linux/bug.h> +struct ctlreg { + unsigned long val; +}; + #define __local_ctl_load(low, high, array) do { \ struct addrtype { \ char _[sizeof(array)]; \ @@ -43,9 +47,9 @@ int _low = low; \ int _esize; \ \ - _esize = (_high - _low + 1) * sizeof(unsigned long); \ + _esize = (_high - _low + 1) * sizeof(struct ctlreg); \ BUILD_BUG_ON(sizeof(struct addrtype) != _esize); \ - typecheck(unsigned long, array[0]); \ + typecheck(struct ctlreg, array[0]); \ asm volatile( \ " lctlg %[_low],%[_high],%[_arr]\n" \ : \ @@ -62,16 +66,16 @@ int _low = low; \ int _esize; \ \ - _esize = (_high - _low + 1) * sizeof(unsigned long); \ + _esize = (_high - _low + 1) * sizeof(struct ctlreg); \ BUILD_BUG_ON(sizeof(struct addrtype) != _esize); \ - typecheck(unsigned long, array[0]); \ + typecheck(struct ctlreg, array[0]); \ asm volatile( \ " stctg %[_low],%[_high],%[_arr]\n" \ : [_arr] "=Q" (*(struct addrtype *)(&array)) \ : [_low] "i" (low), [_high] "i" (high)); \ } while (0) -static __always_inline void local_ctl_load(unsigned int cr, unsigned long *reg) +static __always_inline void local_ctl_load(unsigned int cr, struct ctlreg *reg) { asm volatile( " lctlg %[cr],%[cr],%[reg]\n" @@ -80,7 +84,7 @@ static __always_inline void local_ctl_load(unsigned int cr, unsigned long *reg) : "memory"); } -static __always_inline void local_ctl_store(unsigned int cr, unsigned long *reg) +static __always_inline void local_ctl_store(unsigned int cr, struct ctlreg *reg) { asm volatile( " stctg %[cr],%[cr],%[reg]\n" @@ -90,19 +94,19 @@ static __always_inline void local_ctl_store(unsigned int cr, unsigned long *reg) static __always_inline void local_ctl_set_bit(unsigned int cr, unsigned int bit) { - unsigned long reg; + struct ctlreg reg; local_ctl_store(cr, ®); - reg |= 1UL << bit; + reg.val |= 1UL << bit; local_ctl_load(cr, ®); } static __always_inline void local_ctl_clear_bit(unsigned int cr, unsigned int bit) { - unsigned long reg; + struct ctlreg reg; local_ctl_store(cr, ®); - reg &= ~(1UL << bit); + reg.val &= ~(1UL << bit); local_ctl_load(cr, ®); } @@ -122,6 +126,7 @@ static inline void system_ctl_clear_bit(unsigned int cr, unsigned int bit) union ctlreg0 { unsigned long val; + struct ctlreg reg; struct { unsigned long : 8; unsigned long tcx : 1; /* Transactional-Execution control */ @@ -148,6 +153,7 @@ union ctlreg0 { union ctlreg2 { unsigned long val; + struct ctlreg reg; struct { unsigned long : 33; unsigned long ducto : 25; @@ -161,6 +167,7 @@ union ctlreg2 { union ctlreg5 { unsigned long val; + struct ctlreg reg; struct { unsigned long : 33; unsigned long pasteo: 25; @@ -170,6 +177,7 @@ union ctlreg5 { union ctlreg15 { unsigned long val; + struct ctlreg reg; struct { unsigned long lsea : 61; unsigned long : 3; diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h index 83f732ca3af4..21b9e5290c04 100644 --- a/arch/s390/include/asm/kprobes.h +++ b/arch/s390/include/asm/kprobes.h @@ -15,6 +15,7 @@ * <grundym@us.ibm.com> */ #include <linux/types.h> +#include <asm/ctlreg.h> #include <asm-generic/kprobes.h> #define BREAKPOINT_INSTRUCTION 0x0002 @@ -65,7 +66,7 @@ struct prev_kprobe { struct kprobe_ctlblk { unsigned long kprobe_status; unsigned long kprobe_saved_imask; - unsigned long kprobe_saved_ctl[3]; + struct ctlreg kprobe_saved_ctl[3]; struct prev_kprobe prev_kprobe; }; diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 2174f00e188b..3366431dcad5 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -11,6 +11,7 @@ #include <linux/types.h> #include <asm/ptrace.h> +#include <asm/ctlreg.h> #include <asm/cpu.h> #include <asm/types.h> @@ -139,8 +140,8 @@ struct lowcore { __u32 restart_flags; /* 0x0384 */ /* Address space pointer. */ - unsigned long kernel_asce; /* 0x0388 */ - unsigned long user_asce; /* 0x0390 */ + struct ctlreg kernel_asce; /* 0x0388 */ + struct ctlreg user_asce; /* 0x0390 */ /* * The lpp and current_pid fields form a @@ -199,7 +200,7 @@ struct lowcore { __u32 clock_comp_save_area[2]; /* 0x1330 */ __u64 last_break_save_area; /* 0x1338 */ __u32 access_regs_save_area[16]; /* 0x1340 */ - unsigned long cregs_save_area[16]; /* 0x1380 */ + struct ctlreg cregs_save_area[16]; /* 0x1380 */ __u8 pad_0x1400[0x1500-0x1400]; /* 0x1400 */ /* Cryptography-counter designation */ __u64 ccd; /* 0x1500 */ diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 0e93275f80f0..757fe6f0d802 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -78,7 +78,7 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct * if (next == &init_mm) S390_lowcore.user_asce = s390_invalid_asce; else - S390_lowcore.user_asce = next->context.asce; + S390_lowcore.user_asce.val = next->context.asce; cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); /* Clear previous user-ASCE from CR7 */ local_ctl_load(7, &s390_invalid_asce); diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index fb3ee7758b76..601e87fa8a9a 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -18,6 +18,7 @@ #include <linux/radix-tree.h> #include <linux/atomic.h> #include <asm/sections.h> +#include <asm/ctlreg.h> #include <asm/bug.h> #include <asm/page.h> #include <asm/uv.h> @@ -25,7 +26,7 @@ extern pgd_t swapper_pg_dir[]; extern pgd_t invalid_pg_dir[]; extern void paging_init(void); -extern unsigned long s390_invalid_asce; +extern struct ctlreg s390_invalid_asce; enum { PG_DIRECT_MAP_4K = 0, diff --git a/arch/s390/kernel/ctlreg.c b/arch/s390/kernel/ctlreg.c index 1c9fee1ca4a3..14f715bc5f73 100644 --- a/arch/s390/kernel/ctlreg.c +++ b/arch/s390/kernel/ctlreg.c @@ -36,11 +36,11 @@ struct ctl_bit_parms { static void ctl_bit_callback(void *info) { struct ctl_bit_parms *pp = info; - unsigned long regs[16]; + struct ctlreg regs[16]; __local_ctl_store(0, 15, regs); - regs[pp->cr] &= pp->andval; - regs[pp->cr] |= pp->orval; + regs[pp->cr].val &= pp->andval; + regs[pp->cr].val |= pp->orval; __local_ctl_load(0, 15, regs); } @@ -53,8 +53,8 @@ void system_ctl_set_clear_bit(unsigned int cr, unsigned int bit, bool set) pp.andval = set ? -1UL : ~(1UL << bit); system_ctlreg_lock(); abs_lc = get_abs_lowcore(); - abs_lc->cregs_save_area[cr] &= pp.andval; - abs_lc->cregs_save_area[cr] |= pp.orval; + abs_lc->cregs_save_area[cr].val &= pp.andval; + abs_lc->cregs_save_area[cr].val |= pp.orval; put_abs_lowcore(abs_lc); on_each_cpu(ctl_bit_callback, &pp, 1); system_ctlreg_unlock(); diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index d4c2ece4f839..f0cf20d4b3c5 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -225,18 +225,18 @@ static void enable_singlestep(struct kprobe_ctlblk *kcb, unsigned long ip) { union { - unsigned long regs[3]; + struct ctlreg regs[3]; struct { - unsigned long control; - unsigned long start; - unsigned long end; + struct ctlreg control; + struct ctlreg start; + struct ctlreg end; }; } per_kprobe; /* Set up the PER control registers %cr9-%cr11 */ - per_kprobe.control = PER_EVENT_IFETCH; - per_kprobe.start = ip; - per_kprobe.end = ip; + per_kprobe.control.val = PER_EVENT_IFETCH; + per_kprobe.start.val = ip; + per_kprobe.end.val = ip; /* Save control regs and psw mask */ __local_ctl_store(9, 11, kcb->kprobe_saved_ctl); diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index fe9d65060fa4..bb0d4d68fcbe 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -94,12 +94,12 @@ static noinline void __machine_kdump(void *image) if (MACHINE_HAS_VX) save_vx_regs((__vector128 *) mcesa->vector_save_area); if (MACHINE_HAS_GS) { - local_ctl_store(2, &cr2_old.val); + local_ctl_store(2, &cr2_old.reg); cr2_new = cr2_old; cr2_new.gse = 1; - local_ctl_load(2, &cr2_new.val); + local_ctl_load(2, &cr2_new.reg); save_gs_cb((struct gs_cb *) mcesa->guarded_storage_save_area); - local_ctl_load(2, &cr2_old.val); + local_ctl_load(2, &cr2_old.reg); } /* * To create a good backchain for this CPU in the dump store_status diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index 7880a42896a3..579cebc58d8c 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -131,10 +131,10 @@ static notrace void s390_handle_damage(void) * Disable low address protection and make machine check new PSW a * disabled wait PSW. Any additional machine check cannot be handled. */ - local_ctl_store(0, &cr0.val); + local_ctl_store(0, &cr0.reg); cr0_new = cr0; cr0_new.lap = 0; - local_ctl_load(0, &cr0_new.val); + local_ctl_load(0, &cr0_new.reg); psw_save = S390_lowcore.mcck_new_psw; psw_bits(S390_lowcore.mcck_new_psw).io = 0; psw_bits(S390_lowcore.mcck_new_psw).ext = 0; @@ -146,7 +146,7 @@ static notrace void s390_handle_damage(void) * values. This makes possible system dump analysis easier. */ S390_lowcore.mcck_new_psw = psw_save; - local_ctl_load(0, &cr0.val); + local_ctl_load(0, &cr0.reg); disabled_wait(); while (1); } @@ -269,9 +269,9 @@ static int notrace s390_validate_registers(union mci mci) */ if (!mci.vr && !test_cpu_flag(CIF_MCCK_GUEST)) kill_task = 1; - cr0.val = S390_lowcore.cregs_save_area[0]; + cr0.reg = S390_lowcore.cregs_save_area[0]; cr0.afp = cr0.vx = 1; - local_ctl_load(0, &cr0.val); + local_ctl_load(0, &cr0.reg); asm volatile( " la 1,%0\n" " VLM 0,15,0,1\n" @@ -290,7 +290,7 @@ static int notrace s390_validate_registers(union mci mci) if (!mci.ar) kill_task = 1; /* Validate guarded storage registers */ - cr2.val = S390_lowcore.cregs_save_area[2]; + cr2.reg = S390_lowcore.cregs_save_area[2]; if (cr2.gse) { if (!mci.gs) { /* diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 1e1de907f24d..6a825351ff41 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -45,16 +45,16 @@ void update_cr_regs(struct task_struct *task) union ctlreg2 cr2_old, cr2_new; int cr0_changed, cr2_changed; union { - unsigned long regs[3]; + struct ctlreg regs[3]; struct { - unsigned long control; - unsigned long start; - unsigned long end; + struct ctlreg control; + struct ctlreg start; + struct ctlreg end; }; } old, new; - local_ctl_store(0, &cr0_old.val); - local_ctl_store(2, &cr2_old.val); + local_ctl_store(0, &cr0_old.reg); + local_ctl_store(2, &cr2_old.reg); cr0_new = cr0_old; cr2_new = cr2_old; /* Take care of the enable/disable of transactional execution. */ @@ -82,31 +82,31 @@ void update_cr_regs(struct task_struct *task) cr0_changed = cr0_new.val != cr0_old.val; cr2_changed = cr2_new.val != cr2_old.val; if (cr0_changed) - local_ctl_load(0, &cr0_new.val); + local_ctl_load(0, &cr0_new.reg); if (cr2_changed) - local_ctl_load(2, &cr2_new.val); + local_ctl_load(2, &cr2_new.reg); /* Copy user specified PER registers */ - new.control = thread->per_user.control; - new.start = thread->per_user.start; - new.end = thread->per_user.end; + new.control.val = thread->per_user.control; + new.start.val = thread->per_user.start; + new.end.val = thread->per_user.end; /* merge TIF_SINGLE_STEP into user specified PER registers. */ if (test_tsk_thread_flag(task, TIF_SINGLE_STEP) || test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) { if (test_tsk_thread_flag(task, TIF_BLOCK_STEP)) - new.control |= PER_EVENT_BRANCH; + new.control.val |= PER_EVENT_BRANCH; else - new.control |= PER_EVENT_IFETCH; - new.control |= PER_CONTROL_SUSPENSION; - new.control |= PER_EVENT_TRANSACTION_END; + new.control.val |= PER_EVENT_IFETCH; + new.control.val |= PER_CONTROL_SUSPENSION; + new.control.val |= PER_EVENT_TRANSACTION_END; if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) - new.control |= PER_EVENT_IFETCH; - new.start = 0; - new.end = -1UL; + new.control.val |= PER_EVENT_IFETCH; + new.start.val = 0; + new.end.val = -1UL; } /* Take care of the PER enablement bit in the PSW. */ - if (!(new.control & PER_EVENT_MASK)) { + if (!(new.control.val & PER_EVENT_MASK)) { regs->psw.mask &= ~PSW_MASK_PER; return; } diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 2c4bfe41d284..1c049a65c769 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -791,15 +791,15 @@ static void __init setup_cr(void) __ctl_duct[4] = (unsigned long)__ctl_duald; /* Update control registers CR2, CR5 and CR15 */ - local_ctl_store(2, &cr2.val); - local_ctl_store(5, &cr5.val); - local_ctl_store(15, &cr15.val); + local_ctl_store(2, &cr2.reg); + local_ctl_store(5, &cr5.reg); + local_ctl_store(15, &cr15.reg); cr2.ducto = (unsigned long)__ctl_duct >> 6; cr5.pasteo = (unsigned long)__ctl_duct >> 6; cr15.lsea = (unsigned long)__ctl_linkage_stack >> 3; - local_ctl_load(2, &cr2.val); - local_ctl_load(5, &cr5.val); - local_ctl_load(15, &cr15.val); + local_ctl_load(2, &cr2.reg); + local_ctl_load(5, &cr5.reg); + local_ctl_load(15, &cr15.reg); } /* diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 90d3502379aa..fbd2801c7061 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -910,7 +910,7 @@ early_param("possible_cpus", _setup_possible_cpus); int __cpu_disable(void) { - unsigned long cregs[16]; + struct ctlreg cregs[16]; int cpu; /* Handle possible pending IPIs */ @@ -923,9 +923,9 @@ int __cpu_disable(void) pfault_fini(); /* Disable interrupt sources via control register. */ __local_ctl_store(0, 15, cregs); - cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */ - cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */ - cregs[14] &= ~0x1f000000UL; /* disable most machine checks */ + cregs[0].val &= ~0x0000ee70UL; /* disable all external interrupts */ + cregs[6].val &= ~0xff000000UL; /* disable all I/O interrupts */ + cregs[14].val &= ~0x1f000000UL; /* disable most machine checks */ __local_ctl_load(0, 15, cregs); clear_cpu_flag(CIF_NOHZ_DELAY); return 0; diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c index 56f7eb90b44d..61d8dcd95bbc 100644 --- a/arch/s390/lib/uaccess.c +++ b/arch/s390/lib/uaccess.c @@ -17,17 +17,17 @@ #ifdef CONFIG_DEBUG_ENTRY void debug_user_asce(int exit) { - unsigned long cr1, cr7; + struct ctlreg cr1, cr7; local_ctl_store(1, &cr1); local_ctl_store(7, &cr7); - if (cr1 == S390_lowcore.kernel_asce && cr7 == S390_lowcore.user_asce) + if (cr1.val == S390_lowcore.kernel_asce.val && cr7.val == S390_lowcore.user_asce.val) return; panic("incorrect ASCE on kernel %s\n" "cr1: %016lx cr7: %016lx\n" "kernel: %016lx user: %016lx\n", - exit ? "exit" : "entry", cr1, cr7, - S390_lowcore.kernel_asce, S390_lowcore.user_asce); + exit ? "exit" : "entry", cr1.val, cr7.val, + S390_lowcore.kernel_asce.val, S390_lowcore.user_asce.val); } #endif /*CONFIG_DEBUG_ENTRY */ diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c index b51666967aa1..d37a8f607b71 100644 --- a/arch/s390/mm/dump_pagetables.c +++ b/arch/s390/mm/dump_pagetables.c @@ -287,7 +287,7 @@ static int pt_dump_init(void) * kernel ASCE. We need this to keep the page table walker functions * from accessing non-existent entries. */ - max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2; + max_addr = (S390_lowcore.kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2; max_addr = 1UL << (max_addr * 11 + 31); address_markers[IDENTITY_AFTER_END_NR].start_address = ident_map_size; address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31; diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index b678295931c3..587b6b64185f 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -196,7 +196,7 @@ static void dump_fault_info(struct pt_regs *regs) pr_cont("mode while using "); switch (get_fault_type(regs)) { case USER_FAULT: - asce = S390_lowcore.user_asce; + asce = S390_lowcore.user_asce.val; pr_cont("user "); break; case GMAP_FAULT: @@ -204,7 +204,7 @@ static void dump_fault_info(struct pt_regs *regs) pr_cont("gmap "); break; case KERNEL_FAULT: - asce = S390_lowcore.kernel_asce; + asce = S390_lowcore.kernel_asce.val; pr_cont("kernel "); break; default: diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index c6ab6f2707d7..7eca10c32caa 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -54,7 +54,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir"); pgd_t invalid_pg_dir[PTRS_PER_PGD] __section(".bss..invalid_pg_dir"); -unsigned long __bootdata_preserved(s390_invalid_asce); +struct ctlreg __bootdata_preserved(s390_invalid_asce); unsigned long empty_zero_page, zero_page_mask; EXPORT_SYMBOL(empty_zero_page); diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index b87e96c64b61..631e3a4ee2de 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c @@ -75,7 +75,7 @@ static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr, break; } table = (unsigned long *)((unsigned long)old & mask); - crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce); + crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce.val); } else if (MACHINE_HAS_IDTE) { cspg(old, *old, new); } else { diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index 27720a687022..61fb157029c8 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -62,7 +62,7 @@ static void __crst_table_upgrade(void *arg) /* change all active ASCEs to avoid the creation of new TLBs */ if (current->active_mm == mm) { - S390_lowcore.user_asce = mm->context.asce; + S390_lowcore.user_asce.val = mm->context.asce; local_ctl_load(7, &S390_lowcore.user_asce); } __tlb_flush_local(); diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index 30a7cd9748fe..ba9b202c5dee 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c @@ -706,8 +706,8 @@ void sclp_sync_wait(void) { unsigned long long old_tick; + struct ctlreg cr0, cr0_sync; unsigned long flags; - unsigned long cr0, cr0_sync; static u64 sync_count; u64 timeout; int irq_context; @@ -733,8 +733,8 @@ sclp_sync_wait(void) old_tick = local_tick_disable(); trace_hardirqs_on(); local_ctl_store(0, &cr0); - cr0_sync = cr0 & ~CR0_IRQ_SUBCLASS_MASK; - cr0_sync |= 1UL << (63 - 54); + cr0_sync.val = cr0.val & ~CR0_IRQ_SUBCLASS_MASK; + cr0_sync.val |= 1UL << (63 - 54); local_ctl_load(0, &cr0_sync); __arch_local_irq_stosm(0x01); /* Loop until driver state indicates finished request */ diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c index 8cb9fb7098e2..9f6165cafdc3 100644 --- a/drivers/s390/char/sclp_early_core.c +++ b/drivers/s390/char/sclp_early_core.c @@ -32,11 +32,11 @@ void sclp_early_wait_irq(void) psw_t psw_ext_save, psw_wait; union ctlreg0 cr0, cr0_new; - local_ctl_store(0, &cr0.val); + local_ctl_store(0, &cr0.reg); cr0_new.val = cr0.val & ~CR0_IRQ_SUBCLASS_MASK; cr0_new.lap = 0; cr0_new.sssm = 1; - local_ctl_load(0, &cr0_new.val); + local_ctl_load(0, &cr0_new.reg); psw_ext_save = S390_lowcore.external_new_psw; psw_mask = __extract_psw(); @@ -59,7 +59,7 @@ void sclp_early_wait_irq(void) } while (S390_lowcore.ext_int_code != EXT_IRQ_SERVICE_SIG); S390_lowcore.external_new_psw = psw_ext_save; - local_ctl_load(0, &cr0.val); + local_ctl_load(0, &cr0.reg); } int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb) |