diff options
Diffstat (limited to 'arch/s390/include/asm')
27 files changed, 539 insertions, 274 deletions
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 1d4706114a45..fa934fe080c1 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -412,9 +412,4 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) -#define smp_mb__before_atomic_dec() smp_mb() -#define smp_mb__after_atomic_dec() smp_mb() -#define smp_mb__before_atomic_inc() smp_mb() -#define smp_mb__after_atomic_inc() smp_mb() - #endif /* __ARCH_S390_ATOMIC__ */ diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index 578680f6207a..19ff956b752b 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -27,8 +27,9 @@ #define smp_rmb() rmb() #define smp_wmb() wmb() #define smp_read_barrier_depends() read_barrier_depends() -#define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() smp_mb() + +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() #define set_mb(var, value) do { var = value; mb(); } while (0) diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h index a9c2c0686177..b80e456d6428 100644 --- a/arch/s390/include/asm/ccwdev.h +++ b/arch/s390/include/asm/ccwdev.h @@ -229,5 +229,5 @@ int ccw_device_siosl(struct ccw_device *); extern void ccw_device_get_schid(struct ccw_device *, struct subchannel_id *); -extern void *ccw_device_get_chp_desc(struct ccw_device *, int); +struct channel_path_desc *ccw_device_get_chp_desc(struct ccw_device *, int); #endif /* _S390_CCWDEV_H_ */ diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h index ebc2913f9ee0..057ce0ca6377 100644 --- a/arch/s390/include/asm/ccwgroup.h +++ b/arch/s390/include/asm/ccwgroup.h @@ -10,6 +10,8 @@ struct ccw_driver; * @count: number of attached slave devices * @dev: embedded device structure * @cdev: variable number of slave devices, allocated as needed + * @ungroup_work: work to be done when a ccwgroup notifier has action + * type %BUS_NOTIFY_UNBIND_DRIVER */ struct ccwgroup_device { enum { diff --git a/arch/s390/include/asm/chpid.h b/arch/s390/include/asm/chpid.h index 38c405ef89ce..7298eec98541 100644 --- a/arch/s390/include/asm/chpid.h +++ b/arch/s390/include/asm/chpid.h @@ -8,6 +8,17 @@ #include <uapi/asm/chpid.h> #include <asm/cio.h> +struct channel_path_desc { + u8 flags; + u8 lsn; + u8 desc; + u8 chpid; + u8 swla; + u8 zeroes; + u8 chla; + u8 chpp; +} __packed; + static inline void chp_id_init(struct chp_id *chpid) { memset(chpid, 0, sizeof(struct chp_id)); diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h index 4e63f1a13600..31ab9f346d7e 100644 --- a/arch/s390/include/asm/ctl_reg.h +++ b/arch/s390/include/asm/ctl_reg.h @@ -57,6 +57,20 @@ static inline void __ctl_clear_bit(unsigned int cr, unsigned int bit) void smp_ctl_set_bit(int cr, int bit); void smp_ctl_clear_bit(int cr, int bit); +union ctlreg0 { + unsigned long val; + struct { +#ifdef CONFIG_64BIT + unsigned long : 32; +#endif + unsigned long : 3; + unsigned long lap : 1; /* Low-address-protection control */ + unsigned long : 4; + unsigned long edat : 1; /* Enhanced-DAT-enablement control */ + unsigned long : 23; + }; +}; + #ifdef CONFIG_SMP # define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit) # define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit) diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h index 69cf5b5eddc9..a4811aa0304d 100644 --- a/arch/s390/include/asm/futex.h +++ b/arch/s390/include/asm/futex.h @@ -29,7 +29,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) int cmparg = (encoded_op << 20) >> 20; int oldval = 0, newval, ret; - update_primary_asce(current); + load_kernel_asce(); if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; @@ -79,7 +79,7 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, { int ret; - update_primary_asce(current); + load_kernel_asce(); asm volatile( " sacf 256\n" "0: cs %1,%4,0(%5)\n" diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 154b60089be9..4181d7baabba 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -32,16 +32,26 @@ #define KVM_NR_IRQCHIPS 1 #define KVM_IRQCHIP_NUM_PINS 4096 +#define SIGP_CTRL_C 0x00800000 + struct sca_entry { - atomic_t scn; + atomic_t ctrl; __u32 reserved; __u64 sda; __u64 reserved2[2]; } __attribute__((packed)); +union ipte_control { + unsigned long val; + struct { + unsigned long k : 1; + unsigned long kh : 31; + unsigned long kg : 32; + }; +}; struct sca_block { - __u64 ipte_control; + union ipte_control ipte_control; __u64 reserved[5]; __u64 mcn; __u64 reserved2; @@ -64,6 +74,7 @@ struct sca_block { #define CPUSTAT_ZARCH 0x00000800 #define CPUSTAT_MCDS 0x00000100 #define CPUSTAT_SM 0x00000080 +#define CPUSTAT_IBS 0x00000040 #define CPUSTAT_G 0x00000008 #define CPUSTAT_GED 0x00000004 #define CPUSTAT_J 0x00000002 @@ -71,7 +82,9 @@ struct sca_block { struct kvm_s390_sie_block { atomic_t cpuflags; /* 0x0000 */ - __u32 prefix; /* 0x0004 */ + __u32 : 1; /* 0x0004 */ + __u32 prefix : 18; + __u32 : 13; __u8 reserved08[4]; /* 0x0008 */ #define PROG_IN_SIE (1<<0) __u32 prog0c; /* 0x000c */ @@ -85,12 +98,27 @@ struct kvm_s390_sie_block { __u8 reserved40[4]; /* 0x0040 */ #define LCTL_CR0 0x8000 #define LCTL_CR6 0x0200 +#define LCTL_CR9 0x0040 +#define LCTL_CR10 0x0020 +#define LCTL_CR11 0x0010 #define LCTL_CR14 0x0002 __u16 lctl; /* 0x0044 */ __s16 icpua; /* 0x0046 */ -#define ICTL_LPSW 0x00400000 +#define ICTL_PINT 0x20000000 +#define ICTL_LPSW 0x00400000 +#define ICTL_STCTL 0x00040000 +#define ICTL_ISKE 0x00004000 +#define ICTL_SSKE 0x00002000 +#define ICTL_RRBE 0x00001000 +#define ICTL_TPROT 0x00000200 __u32 ictl; /* 0x0048 */ __u32 eca; /* 0x004c */ +#define ICPT_INST 0x04 +#define ICPT_PROGI 0x08 +#define ICPT_INSTPROGI 0x0C +#define ICPT_OPEREXC 0x2C +#define ICPT_PARTEXEC 0x38 +#define ICPT_IOINST 0x40 __u8 icptcode; /* 0x0050 */ __u8 reserved51; /* 0x0051 */ __u16 ihcpu; /* 0x0052 */ @@ -109,9 +137,24 @@ struct kvm_s390_sie_block { psw_t gpsw; /* 0x0090 */ __u64 gg14; /* 0x00a0 */ __u64 gg15; /* 0x00a8 */ - __u8 reservedb0[30]; /* 0x00b0 */ - __u16 iprcc; /* 0x00ce */ - __u8 reservedd0[48]; /* 0x00d0 */ + __u8 reservedb0[20]; /* 0x00b0 */ + __u16 extcpuaddr; /* 0x00c4 */ + __u16 eic; /* 0x00c6 */ + __u32 reservedc8; /* 0x00c8 */ + __u16 pgmilc; /* 0x00cc */ + __u16 iprcc; /* 0x00ce */ + __u32 dxc; /* 0x00d0 */ + __u16 mcn; /* 0x00d4 */ + __u8 perc; /* 0x00d6 */ + __u8 peratmid; /* 0x00d7 */ + __u64 peraddr; /* 0x00d8 */ + __u8 eai; /* 0x00e0 */ + __u8 peraid; /* 0x00e1 */ + __u8 oai; /* 0x00e2 */ + __u8 armid; /* 0x00e3 */ + __u8 reservede4[4]; /* 0x00e4 */ + __u64 tecmc; /* 0x00e8 */ + __u8 reservedf0[16]; /* 0x00f0 */ __u64 gcr[16]; /* 0x0100 */ __u64 gbea; /* 0x0180 */ __u8 reserved188[24]; /* 0x0188 */ @@ -146,6 +189,8 @@ struct kvm_vcpu_stat { u32 exit_instruction; u32 instruction_lctl; u32 instruction_lctlg; + u32 instruction_stctl; + u32 instruction_stctg; u32 exit_program_interruption; u32 exit_instr_and_program; u32 deliver_external_call; @@ -164,6 +209,7 @@ struct kvm_vcpu_stat { u32 instruction_stpx; u32 instruction_stap; u32 instruction_storage_key; + u32 instruction_ipte_interlock; u32 instruction_stsch; u32 instruction_chsc; u32 instruction_stsi; @@ -183,13 +229,58 @@ struct kvm_vcpu_stat { u32 diagnose_9c; }; -#define PGM_OPERATION 0x01 -#define PGM_PRIVILEGED_OP 0x02 -#define PGM_EXECUTE 0x03 -#define PGM_PROTECTION 0x04 -#define PGM_ADDRESSING 0x05 -#define PGM_SPECIFICATION 0x06 -#define PGM_DATA 0x07 +#define PGM_OPERATION 0x01 +#define PGM_PRIVILEGED_OP 0x02 +#define PGM_EXECUTE 0x03 +#define PGM_PROTECTION 0x04 +#define PGM_ADDRESSING 0x05 +#define PGM_SPECIFICATION 0x06 +#define PGM_DATA 0x07 +#define PGM_FIXED_POINT_OVERFLOW 0x08 +#define PGM_FIXED_POINT_DIVIDE 0x09 +#define PGM_DECIMAL_OVERFLOW 0x0a +#define PGM_DECIMAL_DIVIDE 0x0b +#define PGM_HFP_EXPONENT_OVERFLOW 0x0c +#define PGM_HFP_EXPONENT_UNDERFLOW 0x0d +#define PGM_HFP_SIGNIFICANCE 0x0e +#define PGM_HFP_DIVIDE 0x0f +#define PGM_SEGMENT_TRANSLATION 0x10 +#define PGM_PAGE_TRANSLATION 0x11 +#define PGM_TRANSLATION_SPEC 0x12 +#define PGM_SPECIAL_OPERATION 0x13 +#define PGM_OPERAND 0x15 +#define PGM_TRACE_TABEL 0x16 +#define PGM_SPACE_SWITCH 0x1c +#define PGM_HFP_SQUARE_ROOT 0x1d +#define PGM_PC_TRANSLATION_SPEC 0x1f +#define PGM_AFX_TRANSLATION 0x20 +#define PGM_ASX_TRANSLATION 0x21 +#define PGM_LX_TRANSLATION 0x22 +#define PGM_EX_TRANSLATION 0x23 +#define PGM_PRIMARY_AUTHORITY 0x24 +#define PGM_SECONDARY_AUTHORITY 0x25 +#define PGM_LFX_TRANSLATION 0x26 +#define PGM_LSX_TRANSLATION 0x27 +#define PGM_ALET_SPECIFICATION 0x28 +#define PGM_ALEN_TRANSLATION 0x29 +#define PGM_ALE_SEQUENCE 0x2a +#define PGM_ASTE_VALIDITY 0x2b +#define PGM_ASTE_SEQUENCE 0x2c +#define PGM_EXTENDED_AUTHORITY 0x2d +#define PGM_LSTE_SEQUENCE 0x2e +#define PGM_ASTE_INSTANCE 0x2f +#define PGM_STACK_FULL 0x30 +#define PGM_STACK_EMPTY 0x31 +#define PGM_STACK_SPECIFICATION 0x32 +#define PGM_STACK_TYPE 0x33 +#define PGM_STACK_OPERATION 0x34 +#define PGM_ASCE_TYPE 0x38 +#define PGM_REGION_FIRST_TRANS 0x39 +#define PGM_REGION_SECOND_TRANS 0x3a +#define PGM_REGION_THIRD_TRANS 0x3b +#define PGM_MONITOR 0x40 +#define PGM_PER 0x80 +#define PGM_CRYPTO_OPERATION 0x119 struct kvm_s390_interrupt_info { struct list_head list; @@ -229,6 +320,45 @@ struct kvm_s390_float_interrupt { unsigned int irq_count; }; +struct kvm_hw_wp_info_arch { + unsigned long addr; + unsigned long phys_addr; + int len; + char *old_data; +}; + +struct kvm_hw_bp_info_arch { + unsigned long addr; + int len; +}; + +/* + * Only the upper 16 bits of kvm_guest_debug->control are arch specific. + * Further KVM_GUESTDBG flags which an be used from userspace can be found in + * arch/s390/include/uapi/asm/kvm.h + */ +#define KVM_GUESTDBG_EXIT_PENDING 0x10000000 + +#define guestdbg_enabled(vcpu) \ + (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) +#define guestdbg_sstep_enabled(vcpu) \ + (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) +#define guestdbg_hw_bp_enabled(vcpu) \ + (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) +#define guestdbg_exit_pending(vcpu) (guestdbg_enabled(vcpu) && \ + (vcpu->guest_debug & KVM_GUESTDBG_EXIT_PENDING)) + +struct kvm_guestdbg_info_arch { + unsigned long cr0; + unsigned long cr9; + unsigned long cr10; + unsigned long cr11; + struct kvm_hw_bp_info_arch *hw_bp_info; + struct kvm_hw_wp_info_arch *hw_wp_info; + int nr_hw_bp; + int nr_hw_wp; + unsigned long last_bp; +}; struct kvm_vcpu_arch { struct kvm_s390_sie_block *sie_block; @@ -238,11 +368,13 @@ struct kvm_vcpu_arch { struct kvm_s390_local_interrupt local_int; struct hrtimer ckc_timer; struct tasklet_struct tasklet; + struct kvm_s390_pgm_info pgm; union { struct cpuid cpu_id; u64 stidp_data; }; struct gmap *gmap; + struct kvm_guestdbg_info_arch guestdbg; #define KVM_S390_PFAULT_TOKEN_INVALID (-1UL) unsigned long pfault_token; unsigned long pfault_select; @@ -285,7 +417,10 @@ struct kvm_arch{ struct gmap *gmap; int css_support; int use_irqchip; + int use_cmma; struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; + wait_queue_head_t ipte_wq; + spinlock_t start_stop_lock; }; #define KVM_HVA_ERR_BAD (-1UL) diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index bbf8141408cd..4349197ab9df 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -56,13 +56,14 @@ struct _lowcore { __u16 pgm_code; /* 0x008e */ __u32 trans_exc_code; /* 0x0090 */ __u16 mon_class_num; /* 0x0094 */ - __u16 per_perc_atmid; /* 0x0096 */ + __u8 per_code; /* 0x0096 */ + __u8 per_atmid; /* 0x0097 */ __u32 per_address; /* 0x0098 */ __u32 monitor_code; /* 0x009c */ __u8 exc_access_id; /* 0x00a0 */ __u8 per_access_id; /* 0x00a1 */ __u8 op_access_id; /* 0x00a2 */ - __u8 ar_access_id; /* 0x00a3 */ + __u8 ar_mode_id; /* 0x00a3 */ __u8 pad_0x00a4[0x00b8-0x00a4]; /* 0x00a4 */ __u16 subchannel_id; /* 0x00b8 */ __u16 subchannel_nr; /* 0x00ba */ @@ -93,7 +94,9 @@ struct _lowcore { __u32 save_area_sync[8]; /* 0x0200 */ __u32 save_area_async[8]; /* 0x0220 */ __u32 save_area_restart[1]; /* 0x0240 */ - __u8 pad_0x0244[0x0248-0x0244]; /* 0x0244 */ + + /* CPU flags. */ + __u32 cpu_flags; /* 0x0244 */ /* Return psws. */ psw_t return_psw; /* 0x0248 */ @@ -139,12 +142,9 @@ struct _lowcore { __u32 percpu_offset; /* 0x02f0 */ __u32 machine_flags; /* 0x02f4 */ __u32 ftrace_func; /* 0x02f8 */ - __u8 pad_0x02fc[0x0300-0x02fc]; /* 0x02fc */ - - /* Interrupt response block */ - __u8 irb[64]; /* 0x0300 */ + __u32 spinlock_lockval; /* 0x02fc */ - __u8 pad_0x0340[0x0e00-0x0340]; /* 0x0340 */ + __u8 pad_0x0300[0x0e00-0x0300]; /* 0x0300 */ /* * 0xe00 contains the address of the IPL Parameter Information @@ -196,12 +196,13 @@ struct _lowcore { __u16 pgm_code; /* 0x008e */ __u32 data_exc_code; /* 0x0090 */ __u16 mon_class_num; /* 0x0094 */ - __u16 per_perc_atmid; /* 0x0096 */ + __u8 per_code; /* 0x0096 */ + __u8 per_atmid; /* 0x0097 */ __u64 per_address; /* 0x0098 */ __u8 exc_access_id; /* 0x00a0 */ __u8 per_access_id; /* 0x00a1 */ __u8 op_access_id; /* 0x00a2 */ - __u8 ar_access_id; /* 0x00a3 */ + __u8 ar_mode_id; /* 0x00a3 */ __u8 pad_0x00a4[0x00a8-0x00a4]; /* 0x00a4 */ __u64 trans_exc_code; /* 0x00a8 */ __u64 monitor_code; /* 0x00b0 */ @@ -237,7 +238,9 @@ struct _lowcore { __u64 save_area_sync[8]; /* 0x0200 */ __u64 save_area_async[8]; /* 0x0240 */ __u64 save_area_restart[1]; /* 0x0280 */ - __u8 pad_0x0288[0x0290-0x0288]; /* 0x0288 */ + + /* CPU flags. */ + __u64 cpu_flags; /* 0x0288 */ /* Return psws. */ psw_t return_psw; /* 0x0290 */ @@ -285,15 +288,13 @@ struct _lowcore { __u64 machine_flags; /* 0x0388 */ __u64 ftrace_func; /* 0x0390 */ __u64 gmap; /* 0x0398 */ - __u8 pad_0x03a0[0x0400-0x03a0]; /* 0x03a0 */ - - /* Interrupt response block. */ - __u8 irb[64]; /* 0x0400 */ + __u32 spinlock_lockval; /* 0x03a0 */ + __u8 pad_0x03a0[0x0400-0x03a4]; /* 0x03a4 */ /* Per cpu primary space access list */ - __u32 paste[16]; /* 0x0440 */ + __u32 paste[16]; /* 0x0400 */ - __u8 pad_0x0480[0x0e00-0x0480]; /* 0x0480 */ + __u8 pad_0x04c0[0x0e00-0x0440]; /* 0x0440 */ /* * 0xe00 contains the address of the IPL Parameter Information diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index f77695a82f64..a5e656260a70 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h @@ -16,6 +16,8 @@ typedef struct { unsigned long vdso_base; /* The mmu context has extended page tables. */ unsigned int has_pgste:1; + /* The mmu context uses storage keys. */ + unsigned int use_skey:1; } mm_context_t; #define INIT_MM_CONTEXT(name) \ diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 71be346d0e3c..c28f32a45af5 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -23,6 +23,7 @@ static inline int init_new_context(struct task_struct *tsk, mm->context.asce_bits |= _ASCE_TYPE_REGION3; #endif mm->context.has_pgste = 0; + mm->context.use_skey = 0; mm->context.asce_limit = STACK_TOP_MAX; crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); return 0; @@ -30,33 +31,31 @@ static inline int init_new_context(struct task_struct *tsk, #define destroy_context(mm) do { } while (0) -static inline void update_user_asce(struct mm_struct *mm, int load_primary) +static inline void set_user_asce(struct mm_struct *mm) { pgd_t *pgd = mm->pgd; S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); - if (load_primary) - __ctl_load(S390_lowcore.user_asce, 1, 1); set_fs(current->thread.mm_segment); + set_cpu_flag(CIF_ASCE); } -static inline void clear_user_asce(struct mm_struct *mm, int load_primary) +static inline void clear_user_asce(void) { S390_lowcore.user_asce = S390_lowcore.kernel_asce; - if (load_primary) - __ctl_load(S390_lowcore.user_asce, 1, 1); + __ctl_load(S390_lowcore.user_asce, 1, 1); __ctl_load(S390_lowcore.user_asce, 7, 7); } -static inline void update_primary_asce(struct task_struct *tsk) +static inline void load_kernel_asce(void) { unsigned long asce; __ctl_store(asce, 1, 1); if (asce != S390_lowcore.kernel_asce) __ctl_load(S390_lowcore.kernel_asce, 1, 1); - set_tsk_thread_flag(tsk, TIF_ASCE); + set_cpu_flag(CIF_ASCE); } static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, @@ -64,25 +63,17 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, { int cpu = smp_processor_id(); - update_primary_asce(tsk); if (prev == next) return; if (MACHINE_HAS_TLB_LC) cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); - if (atomic_inc_return(&next->context.attach_count) >> 16) { - /* Delay update_user_asce until all TLB flushes are done. */ - set_tsk_thread_flag(tsk, TIF_TLB_WAIT); - /* Clear old ASCE by loading the kernel ASCE. */ - clear_user_asce(next, 0); - } else { - cpumask_set_cpu(cpu, mm_cpumask(next)); - update_user_asce(next, 0); - if (next->context.flush_mm) - /* Flush pending TLBs */ - __tlb_flush_mm(next); - } + /* Clear old ASCE by loading the kernel ASCE. */ + __ctl_load(S390_lowcore.kernel_asce, 1, 1); + __ctl_load(S390_lowcore.kernel_asce, 7, 7); + /* Delay loading of the new ASCE to control registers CR1 & CR7 */ + set_cpu_flag(CIF_ASCE); + atomic_inc(&next->context.attach_count); atomic_dec(&prev->context.attach_count); - WARN_ON(atomic_read(&prev->context.attach_count) < 0); if (MACHINE_HAS_TLB_LC) cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); } @@ -93,15 +84,14 @@ static inline void finish_arch_post_lock_switch(void) struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; - if (!test_tsk_thread_flag(tsk, TIF_TLB_WAIT)) + if (!mm) return; preempt_disable(); - clear_tsk_thread_flag(tsk, TIF_TLB_WAIT); while (atomic_read(&mm->context.attach_count) >> 16) cpu_relax(); cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); - update_user_asce(mm, 0); + set_user_asce(mm); if (mm->context.flush_mm) __tlb_flush_mm(mm); preempt_enable(); @@ -113,7 +103,9 @@ static inline void finish_arch_post_lock_switch(void) static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) { - switch_mm(prev, next, current); + switch_mm(prev, next, current); + cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); + set_user_asce(next); } static inline void arch_dup_mmap(struct mm_struct *oldmm, diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 2583466f576b..c030900320e0 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -78,10 +78,16 @@ struct zpci_dev { enum zpci_state state; u32 fid; /* function ID, used by sclp */ u32 fh; /* function handle, used by insn's */ + u16 vfn; /* virtual function number */ u16 pchid; /* physical channel ID */ u8 pfgid; /* function group ID */ + u8 pft; /* pci function type */ u16 domain; + u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */ + u32 uid; /* user defined id */ + u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */ + /* IRQ stuff */ u64 msi_addr; /* MSI address */ struct airq_iv *aibv; /* adapter interrupt bit vector */ @@ -120,6 +126,8 @@ static inline bool zdev_enabled(struct zpci_dev *zdev) return (zdev->fh & (1UL << 31)) ? true : false; } +extern const struct attribute_group *zpci_attr_groups[]; + /* ----------------------------------------------------------------------------- Prototypes ----------------------------------------------------------------------------- */ @@ -166,10 +174,6 @@ static inline void zpci_exit_slot(struct zpci_dev *zdev) {} struct zpci_dev *get_zdev(struct pci_dev *); struct zpci_dev *get_zdev_by_fid(u32); -/* sysfs */ -int zpci_sysfs_add_device(struct device *); -void zpci_sysfs_remove_device(struct device *); - /* DMA */ int zpci_dma_init(void); void zpci_dma_exit(void); diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h index d31d739f8689..dd78f92f1cce 100644 --- a/arch/s390/include/asm/pci_clp.h +++ b/arch/s390/include/asm/pci_clp.h @@ -44,6 +44,7 @@ struct clp_fh_list_entry { #define CLP_SET_DISABLE_PCI_FN 1 /* Yes, 1 disables it */ #define CLP_UTIL_STR_LEN 64 +#define CLP_PFIP_NR_SEGMENTS 4 /* List PCI functions request */ struct clp_req_list_pci { @@ -85,7 +86,7 @@ struct clp_rsp_query_pci { struct clp_rsp_hdr hdr; u32 fmt : 4; /* cmd request block format */ u32 : 28; - u64 reserved1; + u64 : 64; u16 vfn; /* virtual fn number */ u16 : 7; u16 util_str_avail : 1; /* utility string available? */ @@ -94,10 +95,13 @@ struct clp_rsp_query_pci { u8 bar_size[PCI_BAR_COUNT]; u16 pchid; u32 bar[PCI_BAR_COUNT]; - u64 reserved2; + u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */ + u32 : 24; + u8 pft; /* pci function type */ u64 sdma; /* start dma as */ u64 edma; /* end dma as */ - u64 reserved3[6]; + u32 reserved[11]; + u32 uid; /* user defined id */ u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */ } __packed; diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index 884017cbfa9f..9e18a61d3df3 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -22,7 +22,8 @@ unsigned long *page_table_alloc(struct mm_struct *, unsigned long); void page_table_free(struct mm_struct *, unsigned long *); void page_table_free_rcu(struct mmu_gather *, unsigned long *); -void page_table_reset_pgste(struct mm_struct *, unsigned long, unsigned long); +void page_table_reset_pgste(struct mm_struct *, unsigned long, unsigned long, + bool init_skey); int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, unsigned long key, bool nq); diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 12f75313e086..fcba5e03839f 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -309,7 +309,8 @@ extern unsigned long MODULES_END; #define PGSTE_HC_BIT 0x00200000UL #define PGSTE_GR_BIT 0x00040000UL #define PGSTE_GC_BIT 0x00020000UL -#define PGSTE_IN_BIT 0x00008000UL /* IPTE notify bit */ +#define PGSTE_UC_BIT 0x00008000UL /* user dirty (migration) */ +#define PGSTE_IN_BIT 0x00004000UL /* IPTE notify bit */ #else /* CONFIG_64BIT */ @@ -391,7 +392,8 @@ extern unsigned long MODULES_END; #define PGSTE_HC_BIT 0x0020000000000000UL #define PGSTE_GR_BIT 0x0004000000000000UL #define PGSTE_GC_BIT 0x0002000000000000UL -#define PGSTE_IN_BIT 0x0000800000000000UL /* IPTE notify bit */ +#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */ +#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */ #endif /* CONFIG_64BIT */ @@ -466,6 +468,16 @@ static inline int mm_has_pgste(struct mm_struct *mm) #endif return 0; } + +static inline int mm_use_skey(struct mm_struct *mm) +{ +#ifdef CONFIG_PGSTE + if (mm->context.use_skey) + return 1; +#endif + return 0; +} + /* * pgd/pmd/pte query functions */ @@ -699,26 +711,17 @@ static inline void pgste_set(pte_t *ptep, pgste_t pgste) #endif } -static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) +static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste, + struct mm_struct *mm) { #ifdef CONFIG_PGSTE unsigned long address, bits, skey; - if (pte_val(*ptep) & _PAGE_INVALID) + if (!mm_use_skey(mm) || pte_val(*ptep) & _PAGE_INVALID) return pgste; address = pte_val(*ptep) & PAGE_MASK; skey = (unsigned long) page_get_storage_key(address); bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); - if (!(pgste_val(pgste) & PGSTE_HC_BIT) && (bits & _PAGE_CHANGED)) { - /* Transfer dirty + referenced bit to host bits in pgste */ - pgste_val(pgste) |= bits << 52; - page_set_storage_key(address, skey ^ bits, 0); - } else if (!(pgste_val(pgste) & PGSTE_HR_BIT) && - (bits & _PAGE_REFERENCED)) { - /* Transfer referenced bit to host bit in pgste */ - pgste_val(pgste) |= PGSTE_HR_BIT; - page_reset_referenced(address); - } /* Transfer page changed & referenced bit to guest bits in pgste */ pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */ /* Copy page access key and fetch protection bit to pgste */ @@ -729,25 +732,14 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) } -static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste) -{ -#ifdef CONFIG_PGSTE - if (pte_val(*ptep) & _PAGE_INVALID) - return pgste; - /* Get referenced bit from storage key */ - if (page_reset_referenced(pte_val(*ptep) & PAGE_MASK)) - pgste_val(pgste) |= PGSTE_HR_BIT | PGSTE_GR_BIT; -#endif - return pgste; -} - -static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry) +static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry, + struct mm_struct *mm) { #ifdef CONFIG_PGSTE unsigned long address; unsigned long nkey; - if (pte_val(entry) & _PAGE_INVALID) + if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID) return; VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID)); address = pte_val(entry) & PAGE_MASK; @@ -757,23 +749,30 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry) * key C/R to 0. */ nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56; + nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48; page_set_storage_key(address, nkey, 0); #endif } -static inline void pgste_set_pte(pte_t *ptep, pte_t entry) +static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry) { - if (!MACHINE_HAS_ESOP && - (pte_val(entry) & _PAGE_PRESENT) && - (pte_val(entry) & _PAGE_WRITE)) { - /* - * Without enhanced suppression-on-protection force - * the dirty bit on for all writable ptes. - */ - pte_val(entry) |= _PAGE_DIRTY; - pte_val(entry) &= ~_PAGE_PROTECT; + if ((pte_val(entry) & _PAGE_PRESENT) && + (pte_val(entry) & _PAGE_WRITE) && + !(pte_val(entry) & _PAGE_INVALID)) { + if (!MACHINE_HAS_ESOP) { + /* + * Without enhanced suppression-on-protection force + * the dirty bit on for all writable ptes. + */ + pte_val(entry) |= _PAGE_DIRTY; + pte_val(entry) &= ~_PAGE_PROTECT; + } + if (!(pte_val(entry) & _PAGE_PROTECT)) + /* This pte allows write access, set user-dirty */ + pgste_val(pgste) |= PGSTE_UC_BIT; } *ptep = entry; + return pgste; } /** @@ -839,6 +838,8 @@ unsigned long __gmap_fault(unsigned long address, struct gmap *); unsigned long gmap_fault(unsigned long address, struct gmap *); void gmap_discard(unsigned long from, unsigned long to, struct gmap *); void __gmap_zap(unsigned long address, struct gmap *); +bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *); + void gmap_register_ipte_notifier(struct gmap_notifier *); void gmap_unregister_ipte_notifier(struct gmap_notifier *); @@ -870,8 +871,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, if (mm_has_pgste(mm)) { pgste = pgste_get_lock(ptep); pgste_val(pgste) &= ~_PGSTE_GPS_ZERO; - pgste_set_key(ptep, pgste, entry); - pgste_set_pte(ptep, entry); + pgste_set_key(ptep, pgste, entry, mm); + pgste = pgste_set_pte(ptep, pgste, entry); pgste_set_unlock(ptep, pgste); } else { if (!(pte_val(entry) & _PAGE_INVALID) && MACHINE_HAS_EDAT1) @@ -1017,45 +1018,6 @@ static inline pte_t pte_mkhuge(pte_t pte) } #endif -/* - * Get (and clear) the user dirty bit for a pte. - */ -static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm, - pte_t *ptep) -{ - pgste_t pgste; - int dirty = 0; - - if (mm_has_pgste(mm)) { - pgste = pgste_get_lock(ptep); - pgste = pgste_update_all(ptep, pgste); - dirty = !!(pgste_val(pgste) & PGSTE_HC_BIT); - pgste_val(pgste) &= ~PGSTE_HC_BIT; - pgste_set_unlock(ptep, pgste); - return dirty; - } - return dirty; -} - -/* - * Get (and clear) the user referenced bit for a pte. - */ -static inline int ptep_test_and_clear_user_young(struct mm_struct *mm, - pte_t *ptep) -{ - pgste_t pgste; - int young = 0; - - if (mm_has_pgste(mm)) { - pgste = pgste_get_lock(ptep); - pgste = pgste_update_young(ptep, pgste); - young = !!(pgste_val(pgste) & PGSTE_HR_BIT); - pgste_val(pgste) &= ~PGSTE_HR_BIT; - pgste_set_unlock(ptep, pgste); - } - return young; -} - static inline void __ptep_ipte(unsigned long address, pte_t *ptep) { unsigned long pto = (unsigned long) ptep; @@ -1118,6 +1080,36 @@ static inline void ptep_flush_lazy(struct mm_struct *mm, atomic_sub(0x10000, &mm->context.attach_count); } +/* + * Get (and clear) the user dirty bit for a pte. + */ +static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep) +{ + pgste_t pgste; + pte_t pte; + int dirty; + + if (!mm_has_pgste(mm)) + return 0; + pgste = pgste_get_lock(ptep); + dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT); + pgste_val(pgste) &= ~PGSTE_UC_BIT; + pte = *ptep; + if (dirty && (pte_val(pte) & _PAGE_PRESENT)) { + pgste = pgste_ipte_notify(mm, ptep, pgste); + __ptep_ipte(addr, ptep); + if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE)) + pte_val(pte) |= _PAGE_PROTECT; + else + pte_val(pte) |= _PAGE_INVALID; + *ptep = pte; + } + pgste_set_unlock(ptep, pgste); + return dirty; +} + #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) @@ -1137,7 +1129,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, pte = pte_mkold(pte); if (mm_has_pgste(vma->vm_mm)) { - pgste_set_pte(ptep, pte); + pgste = pgste_set_pte(ptep, pgste, pte); pgste_set_unlock(ptep, pgste); } else *ptep = pte; @@ -1182,7 +1174,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, pte_val(*ptep) = _PAGE_INVALID; if (mm_has_pgste(mm)) { - pgste = pgste_update_all(&pte, pgste); + pgste = pgste_update_all(&pte, pgste, mm); pgste_set_unlock(ptep, pgste); } return pte; @@ -1205,7 +1197,7 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, ptep_flush_lazy(mm, address, ptep); if (mm_has_pgste(mm)) { - pgste = pgste_update_all(&pte, pgste); + pgste = pgste_update_all(&pte, pgste, mm); pgste_set(ptep, pgste); } return pte; @@ -1219,8 +1211,8 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm, if (mm_has_pgste(mm)) { pgste = pgste_get(ptep); - pgste_set_key(ptep, pgste, pte); - pgste_set_pte(ptep, pte); + pgste_set_key(ptep, pgste, pte, mm); + pgste = pgste_set_pte(ptep, pgste, pte); pgste_set_unlock(ptep, pgste); } else *ptep = pte; @@ -1246,7 +1238,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) pte_val(pte) |= _PAGE_UNUSED; - pgste = pgste_update_all(&pte, pgste); + pgste = pgste_update_all(&pte, pgste, vma->vm_mm); pgste_set_unlock(ptep, pgste); } return pte; @@ -1278,7 +1270,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, pte_val(*ptep) = _PAGE_INVALID; if (!full && mm_has_pgste(mm)) { - pgste = pgste_update_all(&pte, pgste); + pgste = pgste_update_all(&pte, pgste, mm); pgste_set_unlock(ptep, pgste); } return pte; @@ -1301,7 +1293,7 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm, pte = pte_wrprotect(pte); if (mm_has_pgste(mm)) { - pgste_set_pte(ptep, pte); + pgste = pgste_set_pte(ptep, pgste, pte); pgste_set_unlock(ptep, pgste); } else *ptep = pte; @@ -1326,7 +1318,7 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma, ptep_flush_direct(vma->vm_mm, address, ptep); if (mm_has_pgste(vma->vm_mm)) { - pgste_set_pte(ptep, entry); + pgste = pgste_set_pte(ptep, pgste, entry); pgste_set_unlock(ptep, pgste); } else *ptep = entry; @@ -1734,6 +1726,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) extern int vmem_add_mapping(unsigned long start, unsigned long size); extern int vmem_remove_mapping(unsigned long start, unsigned long size); extern int s390_enable_sie(void); +extern void s390_enable_skey(void); /* * No page table caches to initialise diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index dc5fc4f90e52..6f02d452bbee 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -11,6 +11,13 @@ #ifndef __ASM_S390_PROCESSOR_H #define __ASM_S390_PROCESSOR_H +#define CIF_MCCK_PENDING 0 /* machine check handling is pending */ +#define CIF_ASCE 1 /* user asce needs fixup / uaccess */ + +#define _CIF_MCCK_PENDING (1<<CIF_MCCK_PENDING) +#define _CIF_ASCE (1<<CIF_ASCE) + + #ifndef __ASSEMBLY__ #include <linux/linkage.h> @@ -21,6 +28,21 @@ #include <asm/setup.h> #include <asm/runtime_instr.h> +static inline void set_cpu_flag(int flag) +{ + S390_lowcore.cpu_flags |= (1U << flag); +} + +static inline void clear_cpu_flag(int flag) +{ + S390_lowcore.cpu_flags &= ~(1U << flag); +} + +static inline int test_cpu_flag(int flag) +{ + return !!(S390_lowcore.cpu_flags & (1U << flag)); +} + /* * Default implementation of macro that returns current * instruction pointer ("program counter"). diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index f4783c0b7b43..55d69dd7473c 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h @@ -8,6 +8,12 @@ #include <uapi/asm/ptrace.h> +#define PIF_SYSCALL 0 /* inside a system call */ +#define PIF_PER_TRAP 1 /* deliver sigtrap on return to user */ + +#define _PIF_SYSCALL (1<<PIF_SYSCALL) +#define _PIF_PER_TRAP (1<<PIF_PER_TRAP) + #ifndef __ASSEMBLY__ #define PSW_KERNEL_BITS (PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_HOME | \ @@ -16,6 +22,50 @@ PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | \ PSW_MASK_PSTATE | PSW_ASC_PRIMARY) +struct psw_bits { + unsigned long long : 1; + unsigned long long r : 1; /* PER-Mask */ + unsigned long long : 3; + unsigned long long t : 1; /* DAT Mode */ + unsigned long long i : 1; /* Input/Output Mask */ + unsigned long long e : 1; /* External Mask */ + unsigned long long key : 4; /* PSW Key */ + unsigned long long : 1; + unsigned long long m : 1; /* Machine-Check Mask */ + unsigned long long w : 1; /* Wait State */ + unsigned long long p : 1; /* Problem State */ + unsigned long long as : 2; /* Address Space Control */ + unsigned long long cc : 2; /* Condition Code */ + unsigned long long pm : 4; /* Program Mask */ + unsigned long long ri : 1; /* Runtime Instrumentation */ + unsigned long long : 6; + unsigned long long eaba : 2; /* Addressing Mode */ +#ifdef CONFIG_64BIT + unsigned long long : 31; + unsigned long long ia : 64;/* Instruction Address */ +#else + unsigned long long ia : 31;/* Instruction Address */ +#endif +}; + +enum { + PSW_AMODE_24BIT = 0, + PSW_AMODE_31BIT = 1, + PSW_AMODE_64BIT = 3 +}; + +enum { + PSW_AS_PRIMARY = 0, + PSW_AS_ACCREG = 1, + PSW_AS_SECONDARY = 2, + PSW_AS_HOME = 3 +}; + +#define psw_bits(__psw) (*({ \ + typecheck(psw_t, __psw); \ + &(*(struct psw_bits *)(&(__psw))); \ +})) + /* * The pt_regs struct defines the way the registers are stored on * the stack during a system call. @@ -29,6 +79,7 @@ struct pt_regs unsigned int int_code; unsigned int int_parm; unsigned long int_parm_long; + unsigned long flags; }; /* @@ -79,6 +130,21 @@ struct per_struct_kernel { #define PER_CONTROL_SUSPENSION 0x00400000UL #define PER_CONTROL_ALTERATION 0x00200000UL +static inline void set_pt_regs_flag(struct pt_regs *regs, int flag) +{ + regs->flags |= (1U << flag); +} + +static inline void clear_pt_regs_flag(struct pt_regs *regs, int flag) +{ + regs->flags &= ~(1U << flag); +} + +static inline int test_pt_regs_flag(struct pt_regs *regs, int flag) +{ + return !!(regs->flags & (1U << flag)); +} + /* * These are defined as per linux/ptrace.h, which see. */ diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index 2f5e9932b4de..1aba89b53cb9 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -28,7 +28,11 @@ struct sclp_ipl_info { struct sclp_cpu_entry { u8 address; - u8 reserved0[13]; + u8 reserved0[2]; + u8 : 3; + u8 siif : 1; + u8 : 4; + u8 reserved2[10]; u8 type; u8 reserved1; } __attribute__((packed)); @@ -61,5 +65,7 @@ int sclp_pci_deconfigure(u32 fid); int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode); unsigned long sclp_get_hsa_size(void); void sclp_early_detect(void); +int sclp_has_siif(void); +unsigned int sclp_get_ibc(void); #endif /* _ASM_S390_SCLP_H */ diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index b31b22dba948..089a49814c50 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h @@ -9,7 +9,6 @@ #define PARMAREA 0x10400 -#define MEMORY_CHUNKS 256 #ifndef __ASSEMBLY__ @@ -31,22 +30,11 @@ #endif /* CONFIG_64BIT */ #define COMMAND_LINE ((char *) (0x10480)) -#define CHUNK_READ_WRITE 0 -#define CHUNK_READ_ONLY 1 - -struct mem_chunk { - unsigned long addr; - unsigned long size; - int type; -}; - -extern struct mem_chunk memory_chunk[]; extern int memory_end_set; extern unsigned long memory_end; +extern unsigned long max_physmem_end; -void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize); -void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, - unsigned long size); +extern void detect_memory_memblock(void); /* * Machine features detected in head.S diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index 21703f85b48d..4f1307962a95 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h @@ -30,7 +30,6 @@ extern int smp_store_status(int cpu); extern int smp_vcpu_scheduled(int cpu); extern void smp_yield_cpu(int cpu); extern void smp_yield(void); -extern void smp_stop_cpu(void); extern void smp_cpu_set_polarization(int cpu, int val); extern int smp_cpu_get_polarization(int cpu); extern void smp_fill_possible_mask(void); @@ -54,6 +53,8 @@ static inline void smp_yield_cpu(int cpu) { } static inline void smp_yield(void) { } static inline void smp_fill_possible_mask(void) { } +#endif /* CONFIG_SMP */ + static inline void smp_stop_cpu(void) { u16 pcpu = stap(); @@ -64,8 +65,6 @@ static inline void smp_stop_cpu(void) } } -#endif /* CONFIG_SMP */ - #ifdef CONFIG_HOTPLUG_CPU extern int smp_rescan_cpus(void); extern void __noreturn cpu_die(void); diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 83e5d216105e..96879f7ad6da 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -11,18 +11,21 @@ #include <linux/smp.h> +#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval) + extern int spin_retry; static inline int -_raw_compare_and_swap(volatile unsigned int *lock, - unsigned int old, unsigned int new) +_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new) { + unsigned int old_expected = old; + asm volatile( " cs %0,%3,%1" : "=d" (old), "=Q" (*lock) : "0" (old), "d" (new), "Q" (*lock) : "cc", "memory" ); - return old; + return old == old_expected; } /* @@ -34,57 +37,69 @@ _raw_compare_and_swap(volatile unsigned int *lock, * (the type definitions are in asm/spinlock_types.h) */ -#define arch_spin_is_locked(x) ((x)->owner_cpu != 0) -#define arch_spin_unlock_wait(lock) \ - do { while (arch_spin_is_locked(lock)) \ - arch_spin_relax(lock); } while (0) +void arch_spin_lock_wait(arch_spinlock_t *); +int arch_spin_trylock_retry(arch_spinlock_t *); +void arch_spin_relax(arch_spinlock_t *); +void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); -extern void arch_spin_lock_wait(arch_spinlock_t *); -extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); -extern int arch_spin_trylock_retry(arch_spinlock_t *); -extern void arch_spin_relax(arch_spinlock_t *lock); +static inline u32 arch_spin_lockval(int cpu) +{ + return ~cpu; +} static inline int arch_spin_value_unlocked(arch_spinlock_t lock) { - return lock.owner_cpu == 0; + return lock.lock == 0; } -static inline void arch_spin_lock(arch_spinlock_t *lp) +static inline int arch_spin_is_locked(arch_spinlock_t *lp) { - int old; + return ACCESS_ONCE(lp->lock) != 0; +} - old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); - if (likely(old == 0)) - return; - arch_spin_lock_wait(lp); +static inline int arch_spin_trylock_once(arch_spinlock_t *lp) +{ + barrier(); + return likely(arch_spin_value_unlocked(*lp) && + _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL)); } -static inline void arch_spin_lock_flags(arch_spinlock_t *lp, - unsigned long flags) +static inline int arch_spin_tryrelease_once(arch_spinlock_t *lp) { - int old; + return _raw_compare_and_swap(&lp->lock, SPINLOCK_LOCKVAL, 0); +} - old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); - if (likely(old == 0)) - return; - arch_spin_lock_wait_flags(lp, flags); +static inline void arch_spin_lock(arch_spinlock_t *lp) +{ + if (!arch_spin_trylock_once(lp)) + arch_spin_lock_wait(lp); } -static inline int arch_spin_trylock(arch_spinlock_t *lp) +static inline void arch_spin_lock_flags(arch_spinlock_t *lp, + unsigned long flags) { - int old; + if (!arch_spin_trylock_once(lp)) + arch_spin_lock_wait_flags(lp, flags); +} - old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); - if (likely(old == 0)) - return 1; - return arch_spin_trylock_retry(lp); +static inline int arch_spin_trylock(arch_spinlock_t *lp) +{ + if (!arch_spin_trylock_once(lp)) + return arch_spin_trylock_retry(lp); + return 1; } static inline void arch_spin_unlock(arch_spinlock_t *lp) { - _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); + arch_spin_tryrelease_once(lp); +} + +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) +{ + while (arch_spin_is_locked(lock)) + arch_spin_relax(lock); } - + /* * Read-write spinlocks, allowing multiple readers * but only one writer. @@ -115,42 +130,50 @@ extern void _raw_write_lock_wait(arch_rwlock_t *lp); extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); extern int _raw_write_trylock_retry(arch_rwlock_t *lp); +static inline int arch_read_trylock_once(arch_rwlock_t *rw) +{ + unsigned int old = ACCESS_ONCE(rw->lock); + return likely((int) old >= 0 && + _raw_compare_and_swap(&rw->lock, old, old + 1)); +} + +static inline int arch_write_trylock_once(arch_rwlock_t *rw) +{ + unsigned int old = ACCESS_ONCE(rw->lock); + return likely(old == 0 && + _raw_compare_and_swap(&rw->lock, 0, 0x80000000)); +} + static inline void arch_read_lock(arch_rwlock_t *rw) { - unsigned int old; - old = rw->lock & 0x7fffffffU; - if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old) + if (!arch_read_trylock_once(rw)) _raw_read_lock_wait(rw); } static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) { - unsigned int old; - old = rw->lock & 0x7fffffffU; - if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old) + if (!arch_read_trylock_once(rw)) _raw_read_lock_wait_flags(rw, flags); } static inline void arch_read_unlock(arch_rwlock_t *rw) { - unsigned int old, cmp; + unsigned int old; - old = rw->lock; do { - cmp = old; - old = _raw_compare_and_swap(&rw->lock, old, old - 1); - } while (cmp != old); + old = ACCESS_ONCE(rw->lock); + } while (!_raw_compare_and_swap(&rw->lock, old, old - 1)); } static inline void arch_write_lock(arch_rwlock_t *rw) { - if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) + if (!arch_write_trylock_once(rw)) _raw_write_lock_wait(rw); } static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) { - if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) + if (!arch_write_trylock_once(rw)) _raw_write_lock_wait_flags(rw, flags); } @@ -161,18 +184,16 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) static inline int arch_read_trylock(arch_rwlock_t *rw) { - unsigned int old; - old = rw->lock & 0x7fffffffU; - if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old)) - return 1; - return _raw_read_trylock_retry(rw); + if (!arch_read_trylock_once(rw)) + return _raw_read_trylock_retry(rw); + return 1; } static inline int arch_write_trylock(arch_rwlock_t *rw) { - if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) - return 1; - return _raw_write_trylock_retry(rw); + if (!arch_write_trylock_once(rw)) + return _raw_write_trylock_retry(rw); + return 1; } #define arch_read_relax(lock) cpu_relax() diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h index 9c76656a0af0..b2cd6ff7c2c5 100644 --- a/arch/s390/include/asm/spinlock_types.h +++ b/arch/s390/include/asm/spinlock_types.h @@ -6,13 +6,13 @@ #endif typedef struct { - volatile unsigned int owner_cpu; + unsigned int lock; } __attribute__ ((aligned (4))) arch_spinlock_t; -#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0, } typedef struct { - volatile unsigned int lock; + unsigned int lock; } arch_rwlock_t; #define __ARCH_RW_LOCK_UNLOCKED { 0 } diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h index e759181357fc..29c81f82705e 100644 --- a/arch/s390/include/asm/switch_to.h +++ b/arch/s390/include/asm/switch_to.h @@ -132,7 +132,6 @@ static inline void restore_access_regs(unsigned int *acrs) update_cr_regs(next); \ } \ prev = __switch_to(prev,next); \ - update_primary_asce(current); \ } while (0) #define finish_arch_switch(prev) do { \ diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index 777687055e7b..abad78d5b10c 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h @@ -28,7 +28,7 @@ extern const unsigned int sys_call_table_emu[]; static inline long syscall_get_nr(struct task_struct *task, struct pt_regs *regs) { - return test_tsk_thread_flag(task, TIF_SYSCALL) ? + return test_pt_regs_flag(regs, PIF_SYSCALL) ? (regs->int_code & 0xffff) : -1; } diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 50630e6a35de..b833e9c0bfbf 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -77,32 +77,22 @@ static inline struct thread_info *current_thread_info(void) /* * thread information flags bit numbers */ -#define TIF_SYSCALL 0 /* inside a system call */ -#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ -#define TIF_SIGPENDING 2 /* signal pending */ -#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ -#define TIF_TLB_WAIT 4 /* wait for TLB flush completion */ -#define TIF_ASCE 5 /* primary asce needs fixup / uaccess */ -#define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */ -#define TIF_MCCK_PENDING 7 /* machine check handling is pending */ -#define TIF_SYSCALL_TRACE 8 /* syscall trace active */ -#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ -#define TIF_SECCOMP 10 /* secure computing */ -#define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */ -#define TIF_31BIT 17 /* 32bit process */ -#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ -#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */ -#define TIF_SINGLE_STEP 20 /* This task is single stepped */ -#define TIF_BLOCK_STEP 21 /* This task is block stepped */ +#define TIF_NOTIFY_RESUME 0 /* callback before returning to user */ +#define TIF_SIGPENDING 1 /* signal pending */ +#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +#define TIF_SYSCALL_TRACE 3 /* syscall trace active */ +#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ +#define TIF_SECCOMP 5 /* secure computing */ +#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ +#define TIF_31BIT 16 /* 32bit process */ +#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ +#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */ +#define TIF_SINGLE_STEP 19 /* This task is single stepped */ +#define TIF_BLOCK_STEP 20 /* This task is block stepped */ -#define _TIF_SYSCALL (1<<TIF_SYSCALL) #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) -#define _TIF_TLB_WAIT (1<<TIF_TLB_WAIT) -#define _TIF_ASCE (1<<TIF_ASCE) -#define _TIF_PER_TRAP (1<<TIF_PER_TRAP) -#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1<<TIF_SECCOMP) diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index 05425b18c0aa..56af53093d24 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h @@ -26,21 +26,12 @@ extern struct cpu_topology_s390 cpu_topology[NR_CPUS]; #define mc_capable() 1 -static inline const struct cpumask *cpu_coregroup_mask(int cpu) -{ - return &cpu_topology[cpu].core_mask; -} - -static inline const struct cpumask *cpu_book_mask(int cpu) -{ - return &cpu_topology[cpu].book_mask; -} - int topology_cpu_init(struct cpu *); int topology_set_cpu_management(int fc); void topology_schedule_update(void); void store_topology(struct sysinfo_15_1_x *info); void topology_expect_change(void); +const struct cpumask *cpu_coregroup_mask(int cpu); #else /* CONFIG_SCHED_BOOK */ @@ -64,8 +55,6 @@ static inline void s390_init_cpu_topology(void) }; #endif -#define SD_BOOK_INIT SD_CPU_INIT - #include <asm-generic/topology.h> #endif /* _ASM_S390_TOPOLOGY_H */ diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 1be64a1506d0..cd4c68e0398d 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -132,6 +132,34 @@ unsigned long __must_check __copy_to_user(void __user *to, const void *from, #define __copy_to_user_inatomic __copy_to_user #define __copy_from_user_inatomic __copy_from_user +#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES + +#define __put_get_user_asm(to, from, size, spec) \ +({ \ + register unsigned long __reg0 asm("0") = spec; \ + int __rc; \ + \ + asm volatile( \ + "0: mvcos %1,%3,%2\n" \ + "1: xr %0,%0\n" \ + "2:\n" \ + ".pushsection .fixup, \"ax\"\n" \ + "3: lhi %0,%5\n" \ + " jg 2b\n" \ + ".popsection\n" \ + EX_TABLE(0b,3b) EX_TABLE(1b,3b) \ + : "=d" (__rc), "=Q" (*(to)) \ + : "d" (size), "Q" (*(from)), \ + "d" (__reg0), "K" (-EFAULT) \ + : "cc"); \ + __rc; \ +}) + +#define __put_user_fn(x, ptr, size) __put_get_user_asm(ptr, x, size, 0x810000UL) +#define __get_user_fn(x, ptr, size) __put_get_user_asm(x, ptr, size, 0x81UL) + +#else /* CONFIG_HAVE_MARCH_Z10_FEATURES */ + static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) { size = __copy_to_user(ptr, x, size); @@ -144,6 +172,8 @@ static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long s return size ? -EFAULT : 0; } +#endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */ + /* * These are the main single-value transfer routines. They automatically * use the right size if we just have the right pointer type. |