diff options
Diffstat (limited to 'arch/s390')
75 files changed, 2101 insertions, 865 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 68b68d755fdf..373cd5badf1c 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -66,6 +66,7 @@ config S390 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS select ARCH_HAS_GCOV_PROFILE_ALL + select ARCH_HAS_SG_CHAIN select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_INLINE_READ_LOCK select ARCH_INLINE_READ_LOCK_BH @@ -116,7 +117,6 @@ config S390 select HAVE_BPF_JIT if 64BIT && PACK_STACK select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_LOCAL - select HAVE_C_RECORDMCOUNT select HAVE_DEBUG_KMEMLEAK select HAVE_DYNAMIC_FTRACE if 64BIT select HAVE_DYNAMIC_FTRACE_WITH_REGS if 64BIT @@ -151,7 +151,6 @@ config S390 select TTY select VIRT_CPU_ACCOUNTING select VIRT_TO_BUS - select ARCH_HAS_SG_CHAIN config SCHED_OMIT_FRAME_POINTER def_bool y @@ -185,6 +184,10 @@ config HAVE_MARCH_ZEC12_FEATURES def_bool n select HAVE_MARCH_Z196_FEATURES +config HAVE_MARCH_Z13_FEATURES + def_bool n + select HAVE_MARCH_ZEC12_FEATURES + choice prompt "Processor type" default MARCH_G5 @@ -244,6 +247,14 @@ config MARCH_ZEC12 2827 series). The kernel will be slightly faster but will not work on older machines. +config MARCH_Z13 + bool "IBM z13" + select HAVE_MARCH_Z13_FEATURES if 64BIT + help + Select this to enable optimizations for IBM z13 (2964 series). + The kernel will be slightly faster but will not work on older + machines. + endchoice config MARCH_G5_TUNE @@ -267,6 +278,9 @@ config MARCH_Z196_TUNE config MARCH_ZEC12_TUNE def_bool TUNE_ZEC12 || MARCH_ZEC12 && TUNE_DEFAULT +config MARCH_Z13_TUNE + def_bool TUNE_Z13 || MARCH_Z13 && TUNE_DEFAULT + choice prompt "Tune code generation" default TUNE_DEFAULT @@ -305,6 +319,9 @@ config TUNE_Z196 config TUNE_ZEC12 bool "IBM zBC12 and zEC12" +config TUNE_Z13 + bool "IBM z13" + endchoice config 64BIT @@ -356,14 +373,14 @@ config SMP Even if you don't know what to do here, say Y. config NR_CPUS - int "Maximum number of CPUs (2-256)" - range 2 256 + int "Maximum number of CPUs (2-512)" + range 2 512 depends on SMP default "32" if !64BIT default "64" if 64BIT help This allows you to specify the maximum number of CPUs which this - kernel will support. The maximum supported value is 256 and the + kernel will support. The maximum supported value is 512 and the minimum value which makes sense is 2. This is purely to save memory - each supported CPU adds @@ -378,17 +395,26 @@ config HOTPLUG_CPU can be controlled through /sys/devices/system/cpu/cpu#. Say N if you want to disable CPU hotplug. +config SCHED_SMT + def_bool n + config SCHED_MC def_bool n config SCHED_BOOK + def_bool n + +config SCHED_TOPOLOGY def_bool y - prompt "Book scheduler support" + prompt "Topology scheduler support" depends on SMP + select SCHED_SMT select SCHED_MC + select SCHED_BOOK help - Book scheduler support improves the CPU scheduler's decision making - when dealing with machines that have several books. + Topology scheduler support improves the CPU scheduler's decision + making when dealing with machines that have multi-threading, + multiple cores or multiple books. source kernel/Kconfig.preempt diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 878e67973151..acb6859c6a95 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -42,6 +42,7 @@ mflags-$(CONFIG_MARCH_Z9_109) := -march=z9-109 mflags-$(CONFIG_MARCH_Z10) := -march=z10 mflags-$(CONFIG_MARCH_Z196) := -march=z196 mflags-$(CONFIG_MARCH_ZEC12) := -march=zEC12 +mflags-$(CONFIG_MARCH_Z13) := -march=z13 aflags-y += $(mflags-y) cflags-y += $(mflags-y) @@ -53,6 +54,7 @@ cflags-$(CONFIG_MARCH_Z9_109_TUNE) += -mtune=z9-109 cflags-$(CONFIG_MARCH_Z10_TUNE) += -mtune=z10 cflags-$(CONFIG_MARCH_Z196_TUNE) += -mtune=z196 cflags-$(CONFIG_MARCH_ZEC12_TUNE) += -mtune=zEC12 +cflags-$(CONFIG_MARCH_Z13_TUNE) += -mtune=z13 #KBUILD_IMAGE is necessary for make rpm KBUILD_IMAGE :=arch/s390/boot/image @@ -85,6 +87,16 @@ ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y) cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack endif +ifdef CONFIG_FUNCTION_TRACER +# make use of hotpatch feature if the compiler supports it +cc_hotpatch := -mhotpatch=0,3 +ifeq ($(call cc-option-yn,$(cc_hotpatch)),y) +CC_FLAGS_FTRACE := $(cc_hotpatch) +KBUILD_AFLAGS += -DCC_USING_HOTPATCH +KBUILD_CFLAGS += -DCC_USING_HOTPATCH +endif +endif + KBUILD_CFLAGS += -mbackchain -msoft-float $(cflags-y) KBUILD_CFLAGS += -pipe -fno-strength-reduce -Wno-sign-compare KBUILD_AFLAGS += $(aflags-y) diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c index 57cbaff1f397..42506b371b74 100644 --- a/arch/s390/boot/compressed/misc.c +++ b/arch/s390/boot/compressed/misc.c @@ -8,6 +8,7 @@ #include <asm/uaccess.h> #include <asm/page.h> +#include <asm/sclp.h> #include <asm/ipl.h> #include "sizes.h" @@ -63,8 +64,6 @@ static unsigned long free_mem_end_ptr; #include "../../../../lib/decompress_unxz.c" #endif -extern _sclp_print_early(const char *); - static int puts(const char *s) { _sclp_print_early(s); diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig index 9432d0f202ef..64707750c780 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/default_defconfig @@ -555,7 +555,6 @@ CONFIG_DEBUG_OBJECTS_RCU_HEAD=y CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y CONFIG_SLUB_DEBUG_ON=y CONFIG_SLUB_STATS=y -CONFIG_DEBUG_KMEMLEAK=y CONFIG_DEBUG_STACK_USAGE=y CONFIG_DEBUG_VM=y CONFIG_DEBUG_VM_RB=y @@ -563,6 +562,7 @@ CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m CONFIG_DEBUG_PER_CPU_MAPS=y CONFIG_DEBUG_SHIRQ=y CONFIG_DETECT_HUNG_TASK=y +CONFIG_PANIC_ON_OOPS=y CONFIG_TIMER_STATS=y CONFIG_DEBUG_RT_MUTEXES=y CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig index 219dca6ea926..5c3097272cd8 100644 --- a/arch/s390/configs/gcov_defconfig +++ b/arch/s390/configs/gcov_defconfig @@ -540,6 +540,7 @@ CONFIG_UNUSED_SYMBOLS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m +CONFIG_PANIC_ON_OOPS=y CONFIG_TIMER_STATS=y CONFIG_RCU_TORTURE_TEST=m CONFIG_RCU_CPU_STALL_TIMEOUT=60 diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index 822c2f2e0c25..bda70f1ffd2c 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig @@ -537,6 +537,7 @@ CONFIG_FRAME_WARN=1024 CONFIG_UNUSED_SYMBOLS=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y +CONFIG_PANIC_ON_OOPS=y CONFIG_TIMER_STATS=y CONFIG_RCU_TORTURE_TEST=m CONFIG_RCU_CPU_STALL_TIMEOUT=60 diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index 9d63051ebec4..1b0184a0f7f2 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig @@ -71,6 +71,7 @@ CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y +CONFIG_PANIC_ON_OOPS=y # CONFIG_SCHED_DEBUG is not set CONFIG_RCU_CPU_STALL_TIMEOUT=60 # CONFIG_FTRACE is not set diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 1f272b24fc0b..5566ce80abdb 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -134,7 +134,7 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { - const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); + struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); if (unlikely(need_fallback(sctx->key_len))) { crypto_cipher_encrypt_one(sctx->fallback.cip, out, in); @@ -159,7 +159,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { - const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); + struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); if (unlikely(need_fallback(sctx->key_len))) { crypto_cipher_decrypt_one(sctx->fallback.cip, out, in); diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 785c5f24d6f9..83ef702d2403 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -14,7 +14,6 @@ CONFIG_IKCONFIG_PROC=y CONFIG_CGROUPS=y CONFIG_CPUSETS=y CONFIG_CGROUP_CPUACCT=y -CONFIG_RESOURCE_COUNTERS=y CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y CONFIG_CGROUP_SCHED=y @@ -22,12 +21,8 @@ CONFIG_RT_GROUP_SCHED=y CONFIG_BLK_CGROUP=y CONFIG_NAMESPACES=y CONFIG_BLK_DEV_INITRD=y -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y -CONFIG_RD_XZ=y -CONFIG_RD_LZO=y -CONFIG_RD_LZ4=y CONFIG_EXPERT=y +CONFIG_BPF_SYSCALL=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y CONFIG_OPROFILE=y diff --git a/arch/s390/hypfs/Makefile b/arch/s390/hypfs/Makefile index 06f8d95a16cd..2ee25ba252d6 100644 --- a/arch/s390/hypfs/Makefile +++ b/arch/s390/hypfs/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o s390_hypfs-objs := inode.o hypfs_diag.o hypfs_vm.o hypfs_dbfs.o hypfs_sprp.o +s390_hypfs-objs += hypfs_diag0c.o diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h index b34b5ab90a31..eecde500ed49 100644 --- a/arch/s390/hypfs/hypfs.h +++ b/arch/s390/hypfs/hypfs.h @@ -37,6 +37,10 @@ extern int hypfs_vm_init(void); extern void hypfs_vm_exit(void); extern int hypfs_vm_create_files(struct dentry *root); +/* VM diagnose 0c */ +int hypfs_diag0c_init(void); +void hypfs_diag0c_exit(void); + /* Set Partition-Resource Parameter */ int hypfs_sprp_init(void); void hypfs_sprp_exit(void); @@ -49,7 +53,6 @@ struct hypfs_dbfs_data { void *buf_free_ptr; size_t size; struct hypfs_dbfs_file *dbfs_file; - struct kref kref; }; struct hypfs_dbfs_file { @@ -61,8 +64,6 @@ struct hypfs_dbfs_file { unsigned long); /* Private data for hypfs_dbfs.c */ - struct hypfs_dbfs_data *data; - struct delayed_work data_free_work; struct mutex lock; struct dentry *dentry; }; diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c index 47fe1055c714..752f6df3e697 100644 --- a/arch/s390/hypfs/hypfs_dbfs.c +++ b/arch/s390/hypfs/hypfs_dbfs.c @@ -17,33 +17,16 @@ static struct hypfs_dbfs_data *hypfs_dbfs_data_alloc(struct hypfs_dbfs_file *f) data = kmalloc(sizeof(*data), GFP_KERNEL); if (!data) return NULL; - kref_init(&data->kref); data->dbfs_file = f; return data; } -static void hypfs_dbfs_data_free(struct kref *kref) +static void hypfs_dbfs_data_free(struct hypfs_dbfs_data *data) { - struct hypfs_dbfs_data *data; - - data = container_of(kref, struct hypfs_dbfs_data, kref); data->dbfs_file->data_free(data->buf_free_ptr); kfree(data); } -static void data_free_delayed(struct work_struct *work) -{ - struct hypfs_dbfs_data *data; - struct hypfs_dbfs_file *df; - - df = container_of(work, struct hypfs_dbfs_file, data_free_work.work); - mutex_lock(&df->lock); - data = df->data; - df->data = NULL; - mutex_unlock(&df->lock); - kref_put(&data->kref, hypfs_dbfs_data_free); -} - static ssize_t dbfs_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) { @@ -56,28 +39,21 @@ static ssize_t dbfs_read(struct file *file, char __user *buf, df = file_inode(file)->i_private; mutex_lock(&df->lock); - if (!df->data) { - data = hypfs_dbfs_data_alloc(df); - if (!data) { - mutex_unlock(&df->lock); - return -ENOMEM; - } - rc = df->data_create(&data->buf, &data->buf_free_ptr, - &data->size); - if (rc) { - mutex_unlock(&df->lock); - kfree(data); - return rc; - } - df->data = data; - schedule_delayed_work(&df->data_free_work, HZ); + data = hypfs_dbfs_data_alloc(df); + if (!data) { + mutex_unlock(&df->lock); + return -ENOMEM; + } + rc = df->data_create(&data->buf, &data->buf_free_ptr, &data->size); + if (rc) { + mutex_unlock(&df->lock); + kfree(data); + return rc; } - data = df->data; - kref_get(&data->kref); mutex_unlock(&df->lock); rc = simple_read_from_buffer(buf, size, ppos, data->buf, data->size); - kref_put(&data->kref, hypfs_dbfs_data_free); + hypfs_dbfs_data_free(data); return rc; } @@ -108,7 +84,6 @@ int hypfs_dbfs_create_file(struct hypfs_dbfs_file *df) if (IS_ERR(df->dentry)) return PTR_ERR(df->dentry); mutex_init(&df->lock); - INIT_DELAYED_WORK(&df->data_free_work, data_free_delayed); return 0; } diff --git a/arch/s390/hypfs/hypfs_diag0c.c b/arch/s390/hypfs/hypfs_diag0c.c new file mode 100644 index 000000000000..d4c0d3717543 --- /dev/null +++ b/arch/s390/hypfs/hypfs_diag0c.c @@ -0,0 +1,139 @@ +/* + * Hypervisor filesystem for Linux on s390 + * + * Diag 0C implementation + * + * Copyright IBM Corp. 2014 + */ + +#include <linux/slab.h> +#include <linux/cpu.h> +#include <asm/hypfs.h> +#include "hypfs.h" + +#define DBFS_D0C_HDR_VERSION 0 + +/* + * Execute diagnose 0c in 31 bit mode + */ +static void diag0c(struct hypfs_diag0c_entry *entry) +{ + asm volatile ( +#ifdef CONFIG_64BIT + " sam31\n" + " diag %0,%0,0x0c\n" + " sam64\n" +#else + " diag %0,%0,0x0c\n" +#endif + : /* no output register */ + : "a" (entry) + : "memory"); +} + +/* + * Get hypfs_diag0c_entry from CPU vector and store diag0c data + */ +static void diag0c_fn(void *data) +{ + diag0c(((void **) data)[smp_processor_id()]); +} + +/* + * Allocate buffer and store diag 0c data + */ +static void *diag0c_store(unsigned int *count) +{ + struct hypfs_diag0c_data *diag0c_data; + unsigned int cpu_count, cpu, i; + void **cpu_vec; + + get_online_cpus(); + cpu_count = num_online_cpus(); + cpu_vec = kmalloc(sizeof(*cpu_vec) * num_possible_cpus(), GFP_KERNEL); + if (!cpu_vec) + goto fail_put_online_cpus; + /* Note: Diag 0c needs 8 byte alignment and real storage */ + diag0c_data = kzalloc(sizeof(struct hypfs_diag0c_hdr) + + cpu_count * sizeof(struct hypfs_diag0c_entry), + GFP_KERNEL | GFP_DMA); + if (!diag0c_data) + goto fail_kfree_cpu_vec; + i = 0; + /* Fill CPU vector for each online CPU */ + for_each_online_cpu(cpu) { + diag0c_data->entry[i].cpu = cpu; + cpu_vec[cpu] = &diag0c_data->entry[i++]; + } + /* Collect data all CPUs */ + on_each_cpu(diag0c_fn, cpu_vec, 1); + *count = cpu_count; + kfree(cpu_vec); + put_online_cpus(); + return diag0c_data; + +fail_kfree_cpu_vec: + kfree(cpu_vec); +fail_put_online_cpus: + put_online_cpus(); + return ERR_PTR(-ENOMEM); +} + +/* + * Hypfs DBFS callback: Free diag 0c data + */ +static void dbfs_diag0c_free(const void *data) +{ + kfree(data); +} + +/* + * Hypfs DBFS callback: Create diag 0c data + */ +static int dbfs_diag0c_create(void **data, void **data_free_ptr, size_t *size) +{ + struct hypfs_diag0c_data *diag0c_data; + unsigned int count; + + diag0c_data = diag0c_store(&count); + if (IS_ERR(diag0c_data)) + return PTR_ERR(diag0c_data); + memset(&diag0c_data->hdr, 0, sizeof(diag0c_data->hdr)); + get_tod_clock_ext(diag0c_data->hdr.tod_ext); + diag0c_data->hdr.len = count * sizeof(struct hypfs_diag0c_entry); + diag0c_data->hdr.version = DBFS_D0C_HDR_VERSION; + diag0c_data->hdr.count = count; + *data = diag0c_data; + *data_free_ptr = diag0c_data; + *size = diag0c_data->hdr.len + sizeof(struct hypfs_diag0c_hdr); + return 0; +} + +/* + * Hypfs DBFS file structure + */ +static struct hypfs_dbfs_file dbfs_file_0c = { + .name = "diag_0c", + .data_create = dbfs_diag0c_create, + .data_free = dbfs_diag0c_free, +}; + +/* + * Initialize diag 0c interface for z/VM + */ +int __init hypfs_diag0c_init(void) +{ + if (!MACHINE_IS_VM) + return 0; + return hypfs_dbfs_create_file(&dbfs_file_0c); +} + +/* + * Shutdown diag 0c interface for z/VM + */ +void hypfs_diag0c_exit(void) +{ + if (!MACHINE_IS_VM) + return; + hypfs_dbfs_remove_file(&dbfs_file_0c); +} diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index c952b981e4f2..4c8008dd938e 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c @@ -482,10 +482,14 @@ static int __init hypfs_init(void) rc = -ENODATA; goto fail_hypfs_vm_exit; } + if (hypfs_diag0c_init()) { + rc = -ENODATA; + goto fail_hypfs_sprp_exit; + } s390_kobj = kobject_create_and_add("s390", hypervisor_kobj); if (!s390_kobj) { rc = -ENOMEM; - goto fail_hypfs_sprp_exit; + goto fail_hypfs_diag0c_exit; } rc = register_filesystem(&hypfs_type); if (rc) @@ -494,6 +498,8 @@ static int __init hypfs_init(void) fail_filesystem: kobject_put(s390_kobj); +fail_hypfs_diag0c_exit: + hypfs_diag0c_exit(); fail_hypfs_sprp_exit: hypfs_sprp_exit(); fail_hypfs_vm_exit: @@ -510,6 +516,7 @@ static void __exit hypfs_exit(void) { unregister_filesystem(&hypfs_type); kobject_put(s390_kobj); + hypfs_diag0c_exit(); hypfs_sprp_exit(); hypfs_vm_exit(); hypfs_diag_exit(); diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h index cb700d54bd83..5243a8679a1d 100644 --- a/arch/s390/include/asm/cpu_mf.h +++ b/arch/s390/include/asm/cpu_mf.h @@ -189,6 +189,20 @@ static inline int ecctr(u64 ctr, u64 *val) return cc; } +/* Store CPU counter multiple for the MT utilization counter set */ +static inline int stcctm5(u64 num, u64 *val) +{ + typedef struct { u64 _[num]; } addrtype; + int cc; + + asm volatile ( + " .insn rsy,0xeb0000000017,%2,5,%1\n" + " ipm %0\n" + " srl %0,28\n" + : "=d" (cc), "=Q" (*(addrtype *) val) : "d" (num) : "cc"); + return cc; +} + /* Query sampling information */ static inline int qsi(struct hws_qsi_info_block *info) { diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index f6e43d39e3d8..c9df40b5c0ac 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -163,8 +163,8 @@ extern unsigned int vdso_enabled; the loader. We need to make sure that it is out of the way of the program that it will "exec", and that there is sufficient room for the brk. */ -extern unsigned long randomize_et_dyn(unsigned long base); -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2)) +extern unsigned long randomize_et_dyn(void); +#define ELF_ET_DYN_BASE randomize_et_dyn() /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. */ @@ -209,7 +209,9 @@ do { \ } while (0) #endif /* CONFIG_COMPAT */ -#define STACK_RND_MASK 0x7ffUL +extern unsigned long mmap_rnd_mask; + +#define STACK_RND_MASK (mmap_rnd_mask) #define ARCH_DLINFO \ do { \ diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h index abb618f1ead2..836c56290499 100644 --- a/arch/s390/include/asm/ftrace.h +++ b/arch/s390/include/asm/ftrace.h @@ -3,8 +3,12 @@ #define ARCH_SUPPORTS_FTRACE_OPS 1 +#ifdef CC_USING_HOTPATCH +#define MCOUNT_INSN_SIZE 6 +#else #define MCOUNT_INSN_SIZE 24 #define MCOUNT_RETURN_FIXUP 18 +#endif #ifndef __ASSEMBLY__ @@ -37,18 +41,29 @@ struct ftrace_insn { static inline void ftrace_generate_nop_insn(struct ftrace_insn *insn) { #ifdef CONFIG_FUNCTION_TRACER +#ifdef CC_USING_HOTPATCH + /* brcl 0,0 */ + insn->opc = 0xc004; + insn->disp = 0; +#else /* jg .+24 */ insn->opc = 0xc0f4; insn->disp = MCOUNT_INSN_SIZE / 2; #endif +#endif } static inline int is_ftrace_nop(struct ftrace_insn *insn) { #ifdef CONFIG_FUNCTION_TRACER +#ifdef CC_USING_HOTPATCH + if (insn->disp == 0) + return 1; +#else if (insn->disp == MCOUNT_INSN_SIZE / 2) return 1; #endif +#endif return 0; } diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h index 346b1c85ffb4..58642fd29c87 100644 --- a/arch/s390/include/asm/jump_label.h +++ b/arch/s390/include/asm/jump_label.h @@ -4,6 +4,7 @@ #include <linux/types.h> #define JUMP_LABEL_NOP_SIZE 6 +#define JUMP_LABEL_NOP_OFFSET 2 #ifdef CONFIG_64BIT #define ASM_PTR ".quad" @@ -13,9 +14,13 @@ #define ASM_ALIGN ".balign 4" #endif +/* + * We use a brcl 0,2 instruction for jump labels at compile time so it + * can be easily distinguished from a hotpatch generated instruction. + */ static __always_inline bool arch_static_branch(struct static_key *key) { - asm_volatile_goto("0: brcl 0,0\n" + asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n" ".pushsection __jump_table, \"aw\"\n" ASM_ALIGN "\n" ASM_PTR " 0b, %l[label], %0\n" diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 9cba74d5d853..d84559e31f32 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -35,11 +35,13 @@ #define KVM_NR_IRQCHIPS 1 #define KVM_IRQCHIP_NUM_PINS 4096 -#define SIGP_CTRL_C 0x00800000 +#define SIGP_CTRL_C 0x80 +#define SIGP_CTRL_SCN_MASK 0x3f struct sca_entry { - atomic_t ctrl; - __u32 reserved; + __u8 reserved0; + __u8 sigp_ctrl; + __u16 reserved[3]; __u64 sda; __u64 reserved2[2]; } __attribute__((packed)); @@ -87,7 +89,8 @@ struct kvm_s390_sie_block { atomic_t cpuflags; /* 0x0000 */ __u32 : 1; /* 0x0004 */ __u32 prefix : 18; - __u32 : 13; + __u32 : 1; + __u32 ibc : 12; __u8 reserved08[4]; /* 0x0008 */ #define PROG_IN_SIE (1<<0) __u32 prog0c; /* 0x000c */ @@ -132,7 +135,9 @@ struct kvm_s390_sie_block { __u8 reserved60; /* 0x0060 */ __u8 ecb; /* 0x0061 */ __u8 ecb2; /* 0x0062 */ - __u8 reserved63[1]; /* 0x0063 */ +#define ECB3_AES 0x04 +#define ECB3_DEA 0x08 + __u8 ecb3; /* 0x0063 */ __u32 scaol; /* 0x0064 */ __u8 reserved68[4]; /* 0x0068 */ __u32 todpr; /* 0x006c */ @@ -159,6 +164,7 @@ struct kvm_s390_sie_block { __u64 tecmc; /* 0x00e8 */ __u8 reservedf0[12]; /* 0x00f0 */ #define CRYCB_FORMAT1 0x00000001 +#define CRYCB_FORMAT2 0x00000003 __u32 crycbd; /* 0x00fc */ __u64 gcr[16]; /* 0x0100 */ __u64 gbea; /* 0x0180 */ @@ -192,6 +198,7 @@ struct kvm_vcpu_stat { u32 exit_stop_request; u32 exit_validity; u32 exit_instruction; + u32 halt_successful_poll; u32 halt_wakeup; u32 instruction_lctl; u32 instruction_lctlg; @@ -378,14 +385,11 @@ struct kvm_s390_interrupt_info { struct kvm_s390_emerg_info emerg; struct kvm_s390_extcall_info extcall; struct kvm_s390_prefix_info prefix; + struct kvm_s390_stop_info stop; struct kvm_s390_mchk_info mchk; }; }; -/* for local_interrupt.action_flags */ -#define ACTION_STORE_ON_STOP (1<<0) -#define ACTION_STOP_ON_STOP (1<<1) - struct kvm_s390_irq_payload { struct kvm_s390_io_info io; struct kvm_s390_ext_info ext; @@ -393,6 +397,7 @@ struct kvm_s390_irq_payload { struct kvm_s390_emerg_info emerg; struct kvm_s390_extcall_info extcall; struct kvm_s390_prefix_info prefix; + struct kvm_s390_stop_info stop; struct kvm_s390_mchk_info mchk; }; @@ -401,7 +406,6 @@ struct kvm_s390_local_interrupt { struct kvm_s390_float_interrupt *float_int; wait_queue_head_t *wq; atomic_t *cpuflags; - unsigned int action_bits; DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS); struct kvm_s390_irq_payload irq; unsigned long pending_irqs; @@ -470,7 +474,6 @@ struct kvm_vcpu_arch { }; struct gmap *gmap; struct kvm_guestdbg_info_arch guestdbg; -#define KVM_S390_PFAULT_TOKEN_INVALID (-1UL) unsigned long pfault_token; unsigned long pfault_select; unsigned long pfault_compare; @@ -504,13 +507,39 @@ struct s390_io_adapter { #define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8) #define MAX_S390_ADAPTER_MAPS 256 +/* maximum size of facilities and facility mask is 2k bytes */ +#define S390_ARCH_FAC_LIST_SIZE_BYTE (1<<11) +#define S390_ARCH_FAC_LIST_SIZE_U64 \ + (S390_ARCH_FAC_LIST_SIZE_BYTE / sizeof(u64)) +#define S390_ARCH_FAC_MASK_SIZE_BYTE S390_ARCH_FAC_LIST_SIZE_BYTE +#define S390_ARCH_FAC_MASK_SIZE_U64 \ + (S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64)) + +struct s390_model_fac { + /* facilities used in SIE context */ + __u64 sie[S390_ARCH_FAC_LIST_SIZE_U64]; + /* subset enabled by kvm */ + __u64 kvm[S390_ARCH_FAC_LIST_SIZE_U64]; +}; + +struct kvm_s390_cpu_model { + struct s390_model_fac *fac; + struct cpuid cpu_id; + unsigned short ibc; +}; + struct kvm_s390_crypto { struct kvm_s390_crypto_cb *crycb; __u32 crycbd; + __u8 aes_kw; + __u8 dea_kw; }; struct kvm_s390_crypto_cb { - __u8 reserved00[128]; /* 0x0000 */ + __u8 reserved00[72]; /* 0x0000 */ + __u8 dea_wrapping_key_mask[24]; /* 0x0048 */ + __u8 aes_wrapping_key_mask[32]; /* 0x0060 */ + __u8 reserved80[128]; /* 0x0080 */ }; struct kvm_arch{ @@ -523,12 +552,15 @@ struct kvm_arch{ int use_irqchip; int use_cmma; int user_cpu_state_ctrl; + int user_sigp; struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; wait_queue_head_t ipte_wq; int ipte_lock_count; struct mutex ipte_mutex; spinlock_t start_stop_lock; + struct kvm_s390_cpu_model model; struct kvm_s390_crypto crypto; + u64 epoch; }; #define KVM_HVA_ERR_BAD (-1UL) diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h index f664e96f48c7..1a9a98de5bde 100644 --- a/arch/s390/include/asm/pci_io.h +++ b/arch/s390/include/asm/pci_io.h @@ -16,6 +16,7 @@ struct zpci_iomap_entry { u32 fh; u8 bar; + u16 count; }; extern struct zpci_iomap_entry *zpci_iomap_start; diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 5e102422c9ab..fbb5ee3ae57c 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -99,7 +99,7 @@ extern unsigned long zero_page_mask; #endif /* CONFIG_64BIT */ #define PTRS_PER_PGD 2048 -#define FIRST_USER_ADDRESS 0 +#define FIRST_USER_ADDRESS 0UL #define pte_ERROR(e) \ printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) @@ -249,10 +249,10 @@ static inline int is_module_addr(void *addr) _PAGE_YOUNG) /* - * handle_pte_fault uses pte_present, pte_none and pte_file to find out the - * pte type WITHOUT holding the page table lock. The _PAGE_PRESENT bit - * is used to distinguish present from not-present ptes. It is changed only - * with the page table lock held. + * handle_pte_fault uses pte_present and pte_none to find out the pte type + * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to + * distinguish present from not-present ptes. It is changed only with the page + * table lock held. * * The following table gives the different possible bit combinations for * the pte hardware and software bits in the last 12 bits of a pte: @@ -279,7 +279,6 @@ static inline int is_module_addr(void *addr) * * pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001 * pte_none is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400 - * pte_file is true for the bit pattern .11...xxxxx0, (pte & 0x601) == 0x600 * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402 */ @@ -671,13 +670,6 @@ static inline int pte_swap(pte_t pte) == (_PAGE_INVALID | _PAGE_TYPE); } -static inline int pte_file(pte_t pte) -{ - /* Bit pattern: (pte & 0x601) == 0x600 */ - return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT | _PAGE_PRESENT)) - == (_PAGE_INVALID | _PAGE_PROTECT); -} - static inline int pte_special(pte_t pte) { return (pte_val(pte) & _PAGE_SPECIAL); @@ -1756,19 +1748,6 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -#ifndef CONFIG_64BIT -# define PTE_FILE_MAX_BITS 26 -#else /* CONFIG_64BIT */ -# define PTE_FILE_MAX_BITS 59 -#endif /* CONFIG_64BIT */ - -#define pte_to_pgoff(__pte) \ - ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f)) - -#define pgoff_to_pte(__off) \ - ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \ - | _PAGE_INVALID | _PAGE_PROTECT }) - #endif /* !__ASSEMBLY__ */ #define kern_addr_valid(addr) (1) @@ -1779,6 +1758,10 @@ extern int s390_enable_sie(void); extern int s390_enable_skey(void); extern void s390_reset_cmma(struct mm_struct *mm); +/* s390 has a private copy of get unmapped area to deal with cache synonyms */ +#define HAVE_ARCH_UNMAPPED_AREA +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN + /* * No page table caches to initialise */ diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index bed05ea7ec27..e7cbbdcdee13 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -215,10 +215,7 @@ static inline unsigned short stap(void) /* * Give up the time slice of the virtual PU. */ -static inline void cpu_relax(void) -{ - barrier(); -} +void cpu_relax(void); #define cpu_relax_lowlatency() barrier() diff --git a/arch/s390/include/asm/reset.h b/arch/s390/include/asm/reset.h index 804578587a7a..72786067b300 100644 --- a/arch/s390/include/asm/reset.h +++ b/arch/s390/include/asm/reset.h @@ -15,5 +15,6 @@ struct reset_call { extern void register_reset_call(struct reset_call *reset); extern void unregister_reset_call(struct reset_call *reset); -extern void s390_reset_system(void (*func)(void *), void *data); +extern void s390_reset_system(void (*fn_pre)(void), + void (*fn_post)(void *), void *data); #endif /* _ASM_S390_RESET_H */ diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index 1aba89b53cb9..f1096bab5199 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -27,11 +27,12 @@ struct sclp_ipl_info { }; struct sclp_cpu_entry { - u8 address; + u8 core_id; u8 reserved0[2]; u8 : 3; u8 siif : 1; - u8 : 4; + u8 sigpif : 1; + u8 : 3; u8 reserved2[10]; u8 type; u8 reserved1; @@ -51,6 +52,9 @@ int sclp_cpu_deconfigure(u8 cpu); unsigned long long sclp_get_rnmax(void); unsigned long long sclp_get_rzm(void); unsigned int sclp_get_max_cpu(void); +unsigned int sclp_get_mtid(u8 cpu_type); +unsigned int sclp_get_mtid_max(void); +unsigned int sclp_get_mtid_prev(void); int sclp_sdias_blk_count(void); int sclp_sdias_copy(void *dest, int blk_num, int nr_blks); int sclp_chp_configure(struct chp_id chpid); @@ -66,6 +70,9 @@ int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode); unsigned long sclp_get_hsa_size(void); void sclp_early_detect(void); int sclp_has_siif(void); +int sclp_has_sigpif(void); unsigned int sclp_get_ibc(void); +long _sclp_print_early(const char *); + #endif /* _ASM_S390_SCLP_H */ diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index 7736fdd72595..b8d1e54b4733 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h @@ -57,6 +57,7 @@ extern void detect_memory_memblock(void); #define MACHINE_FLAG_TE (1UL << 15) #define MACHINE_FLAG_TLB_LC (1UL << 17) #define MACHINE_FLAG_VX (1UL << 18) +#define MACHINE_FLAG_CAD (1UL << 19) #define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) #define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) @@ -80,6 +81,7 @@ extern void detect_memory_memblock(void); #define MACHINE_HAS_TE (0) #define MACHINE_HAS_TLB_LC (0) #define MACHINE_HAS_VX (0) +#define MACHINE_HAS_CAD (0) #else /* CONFIG_64BIT */ #define MACHINE_HAS_IEEE (1) #define MACHINE_HAS_CSP (1) @@ -93,6 +95,7 @@ extern void detect_memory_memblock(void); #define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE) #define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC) #define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX) +#define MACHINE_HAS_CAD (S390_lowcore.machine_flags & MACHINE_FLAG_CAD) #endif /* CONFIG_64BIT */ /* diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h index fad4ae23ece0..ec60cf7fa0a2 100644 --- a/arch/s390/include/asm/sigp.h +++ b/arch/s390/include/asm/sigp.h @@ -16,6 +16,7 @@ #define SIGP_SET_ARCHITECTURE 18 #define SIGP_COND_EMERGENCY_SIGNAL 19 #define SIGP_SENSE_RUNNING 21 +#define SIGP_SET_MULTI_THREADING 22 #define SIGP_STORE_ADDITIONAL_STATUS 23 /* SIGP condition codes */ diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index 762d4f88af5a..b3bd0282dd98 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h @@ -16,6 +16,8 @@ #define raw_smp_processor_id() (S390_lowcore.cpu_nr) extern struct mutex smp_cpu_state_mutex; +extern unsigned int smp_cpu_mt_shift; +extern unsigned int smp_cpu_mtid; extern int __cpu_up(unsigned int cpu, struct task_struct *tidle); @@ -35,6 +37,8 @@ extern void smp_fill_possible_mask(void); #else /* CONFIG_SMP */ +#define smp_cpu_mtid 0 + static inline void smp_call_ipl_cpu(void (*func)(void *), void *data) { func(data); diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h index 7e2dcd7c57ef..8662f5c8e17f 100644 --- a/arch/s390/include/asm/string.h +++ b/arch/s390/include/asm/string.h @@ -44,7 +44,6 @@ extern char *strstr(const char *, const char *); #undef __HAVE_ARCH_STRCHR #undef __HAVE_ARCH_STRNCHR #undef __HAVE_ARCH_STRNCMP -#undef __HAVE_ARCH_STRNICMP #undef __HAVE_ARCH_STRPBRK #undef __HAVE_ARCH_STRSEP #undef __HAVE_ARCH_STRSPN diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h index f92428e459f8..f7054a892d9e 100644 --- a/arch/s390/include/asm/sysinfo.h +++ b/arch/s390/include/asm/sysinfo.h @@ -15,6 +15,7 @@ #define __ASM_S390_SYSINFO_H #include <asm/bitsperlong.h> +#include <linux/uuid.h> struct sysinfo_1_1_1 { unsigned char p:1; @@ -90,7 +91,11 @@ struct sysinfo_2_2_2 { unsigned short cpus_reserved; char name[8]; unsigned int caf; - char reserved_2[16]; + char reserved_2[8]; + unsigned char mt_installed; + unsigned char mt_general; + unsigned char mt_psmtid; + char reserved_3[5]; unsigned short cpus_dedicated; unsigned short cpus_shared; }; @@ -112,34 +117,39 @@ struct sysinfo_3_2_2 { char name[8]; unsigned int caf; char cpi[16]; - char reserved_1[24]; - + char reserved_1[3]; + char ext_name_encoding; + unsigned int reserved_2; + uuid_be uuid; } vm[8]; - char reserved_544[3552]; + char reserved_3[1504]; + char ext_names[8][256]; }; extern int topology_max_mnest; -#define TOPOLOGY_CPU_BITS 64 +#define TOPOLOGY_CORE_BITS 64 #define TOPOLOGY_NR_MAG 6 -struct topology_cpu { - unsigned char reserved0[4]; +struct topology_core { + unsigned char nl; + unsigned char reserved0[3]; unsigned char :6; unsigned char pp:2; unsigned char reserved1; unsigned short origin; - unsigned long mask[TOPOLOGY_CPU_BITS / BITS_PER_LONG]; + unsigned long mask[TOPOLOGY_CORE_BITS / BITS_PER_LONG]; }; struct topology_container { - unsigned char reserved[7]; + unsigned char nl; + unsigned char reserved[6]; unsigned char id; }; union topology_entry { unsigned char nl; - struct topology_cpu cpu; + struct topology_core cpu; struct topology_container container; }; diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 4d62fd5b56e5..ef1df718642d 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -39,7 +39,6 @@ struct thread_info { unsigned long sys_call_table; /* System call table address */ unsigned int cpu; /* current CPU */ int preempt_count; /* 0 => preemptable, <0 => BUG */ - struct restart_block restart_block; unsigned int system_call; __u64 user_timer; __u64 system_timer; @@ -56,9 +55,6 @@ struct thread_info { .flags = 0, \ .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ } #define init_thread_info (init_thread_union.thread_info) diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index 56af53093d24..c4fbb9527c5c 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h @@ -9,9 +9,11 @@ struct cpu; #ifdef CONFIG_SCHED_BOOK struct cpu_topology_s390 { + unsigned short thread_id; unsigned short core_id; unsigned short socket_id; unsigned short book_id; + cpumask_t thread_mask; cpumask_t core_mask; cpumask_t book_mask; }; @@ -19,6 +21,8 @@ struct cpu_topology_s390 { extern struct cpu_topology_s390 cpu_topology[NR_CPUS]; #define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) +#define topology_thread_id(cpu) (cpu_topology[cpu].thread_id) +#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_mask) #define topology_core_id(cpu) (cpu_topology[cpu].core_id) #define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask) #define topology_book_id(cpu) (cpu_topology[cpu].book_id) diff --git a/arch/s390/include/uapi/asm/hypfs.h b/arch/s390/include/uapi/asm/hypfs.h index 37998b449531..b3fe12d8dd87 100644 --- a/arch/s390/include/uapi/asm/hypfs.h +++ b/arch/s390/include/uapi/asm/hypfs.h @@ -1,16 +1,19 @@ /* - * IOCTL interface for hypfs + * Structures for hypfs interface * * Copyright IBM Corp. 2013 * * Author: Martin Schwidefsky <schwidefsky@de.ibm.com> */ -#ifndef _ASM_HYPFS_CTL_H -#define _ASM_HYPFS_CTL_H +#ifndef _ASM_HYPFS_H +#define _ASM_HYPFS_H #include <linux/types.h> +/* + * IOCTL for binary interface /sys/kernel/debug/diag_304 + */ struct hypfs_diag304 { __u32 args[2]; __u64 data; @@ -22,4 +25,30 @@ struct hypfs_diag304 { #define HYPFS_DIAG304 \ _IOWR(HYPFS_IOCTL_MAGIC, 0x20, struct hypfs_diag304) +/* + * Structures for binary interface /sys/kernel/debug/diag_0c + */ +struct hypfs_diag0c_hdr { + __u64 len; /* Length of diag0c buffer without header */ + __u16 version; /* Version of header */ + char reserved1[6]; /* Reserved */ + char tod_ext[16]; /* TOD clock for diag0c */ + __u64 count; /* Number of entries (CPUs) in diag0c array */ + char reserved2[24]; /* Reserved */ +}; + +struct hypfs_diag0c_entry { + char date[8]; /* MM/DD/YY in EBCDIC */ + char time[8]; /* HH:MM:SS in EBCDIC */ + __u64 virtcpu; /* Virtual time consumed by the virt CPU (us) */ + __u64 totalproc; /* Total of virtual and simulation time (us) */ + __u32 cpu; /* Linux logical CPU number */ + __u32 reserved; /* Align to 8 byte */ +}; + +struct hypfs_diag0c_data { + struct hypfs_diag0c_hdr hdr; /* 64 byte header */ + struct hypfs_diag0c_entry entry[]; /* diag0c entry array */ +}; + #endif diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index 48eda3ab4944..9c77e60b9a26 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -57,10 +57,44 @@ struct kvm_s390_io_adapter_req { /* kvm attr_group on vm fd */ #define KVM_S390_VM_MEM_CTRL 0 +#define KVM_S390_VM_TOD 1 +#define KVM_S390_VM_CRYPTO 2 +#define KVM_S390_VM_CPU_MODEL 3 /* kvm attributes for mem_ctrl */ #define KVM_S390_VM_MEM_ENABLE_CMMA 0 #define KVM_S390_VM_MEM_CLR_CMMA 1 +#define KVM_S390_VM_MEM_LIMIT_SIZE 2 + +/* kvm attributes for KVM_S390_VM_TOD */ +#define KVM_S390_VM_TOD_LOW 0 +#define KVM_S390_VM_TOD_HIGH 1 + +/* kvm attributes for KVM_S390_VM_CPU_MODEL */ +/* processor related attributes are r/w */ +#define KVM_S390_VM_CPU_PROCESSOR 0 +struct kvm_s390_vm_cpu_processor { + __u64 cpuid; + __u16 ibc; + __u8 pad[6]; + __u64 fac_list[256]; +}; + +/* machine related attributes are r/o */ +#define KVM_S390_VM_CPU_MACHINE 1 +struct kvm_s390_vm_cpu_machine { + __u64 cpuid; + __u32 ibc; + __u8 pad[4]; + __u64 fac_mask[256]; + __u64 fac_list[256]; +}; + +/* kvm attributes for crypto */ +#define KVM_S390_VM_CRYPTO_ENABLE_AES_KW 0 +#define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1 +#define KVM_S390_VM_CRYPTO_DISABLE_AES_KW 2 +#define KVM_S390_VM_CRYPTO_DISABLE_DEA_KW 3 /* for KVM_GET_REGS and KVM_SET_REGS */ struct kvm_regs { @@ -107,6 +141,9 @@ struct kvm_guest_debug_arch { struct kvm_hw_breakpoint __user *hw_bp; }; +/* for KVM_SYNC_PFAULT and KVM_REG_S390_PFTOKEN */ +#define KVM_S390_PFAULT_TOKEN_INVALID 0xffffffffffffffffULL + #define KVM_SYNC_PREFIX (1UL << 0) #define KVM_SYNC_GPRS (1UL << 1) #define KVM_SYNC_ACRS (1UL << 2) diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 204c43a4c245..31fab2676fe9 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -4,8 +4,8 @@ ifdef CONFIG_FUNCTION_TRACER # Don't trace early setup code and tracing code -CFLAGS_REMOVE_early.o = -pg -CFLAGS_REMOVE_ftrace.o = -pg +CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) endif # diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S index 797a823a2275..f74a53d339b0 100644 --- a/arch/s390/kernel/base.S +++ b/arch/s390/kernel/base.S @@ -97,7 +97,8 @@ ENTRY(diag308_reset) lg %r4,0(%r4) # Save PSW sturg %r4,%r3 # Use sturg, because of large pages lghi %r1,1 - diag %r1,%r1,0x308 + lghi %r0,0 + diag %r0,%r1,0x308 .Lrestart_part2: lhi %r0,0 # Load r0 with zero lhi %r1,2 # Use mode 2 = ESAME (dump) diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c index c0b03c28d157..632fa06ea162 100644 --- a/arch/s390/kernel/cache.c +++ b/arch/s390/kernel/cache.c @@ -5,37 +5,11 @@ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> */ -#include <linux/notifier.h> #include <linux/seq_file.h> -#include <linux/init.h> -#include <linux/list.h> -#include <linux/slab.h> #include <linux/cpu.h> +#include <linux/cacheinfo.h> #include <asm/facility.h> -struct cache { - unsigned long size; - unsigned int line_size; - unsigned int associativity; - unsigned int nr_sets; - unsigned int level : 3; - unsigned int type : 2; - unsigned int private : 1; - struct list_head list; -}; - -struct cache_dir { - struct kobject *kobj; - struct cache_index_dir *index; -}; - -struct cache_index_dir { - struct kobject kobj; - int cpu; - struct cache *cache; - struct cache_index_dir *next; -}; - enum { CACHE_SCOPE_NOTEXISTS, CACHE_SCOPE_PRIVATE, @@ -44,10 +18,10 @@ enum { }; enum { - CACHE_TYPE_SEPARATE, - CACHE_TYPE_DATA, - CACHE_TYPE_INSTRUCTION, - CACHE_TYPE_UNIFIED, + CTYPE_SEPARATE, + CTYPE_DATA, + CTYPE_INSTRUCTION, + CTYPE_UNIFIED, }; enum { @@ -70,37 +44,60 @@ struct cache_info { }; #define CACHE_MAX_LEVEL 8 - union cache_topology { struct cache_info ci[CACHE_MAX_LEVEL]; unsigned long long raw; }; static const char * const cache_type_string[] = { - "Data", + "", "Instruction", + "Data", + "", "Unified", }; -static struct cache_dir *cache_dir_cpu[NR_CPUS]; -static LIST_HEAD(cache_list); +static const enum cache_type cache_type_map[] = { + [CTYPE_SEPARATE] = CACHE_TYPE_SEPARATE, + [CTYPE_DATA] = CACHE_TYPE_DATA, + [CTYPE_INSTRUCTION] = CACHE_TYPE_INST, + [CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED, +}; void show_cacheinfo(struct seq_file *m) { - struct cache *cache; - int index = 0; + struct cpu_cacheinfo *this_cpu_ci; + struct cacheinfo *cache; + int idx; - list_for_each_entry(cache, &cache_list, list) { - seq_printf(m, "cache%-11d: ", index); + get_online_cpus(); + this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask)); + for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) { + cache = this_cpu_ci->info_list + idx; + seq_printf(m, "cache%-11d: ", idx); seq_printf(m, "level=%d ", cache->level); seq_printf(m, "type=%s ", cache_type_string[cache->type]); - seq_printf(m, "scope=%s ", cache->private ? "Private" : "Shared"); - seq_printf(m, "size=%luK ", cache->size >> 10); - seq_printf(m, "line_size=%u ", cache->line_size); - seq_printf(m, "associativity=%d", cache->associativity); + seq_printf(m, "scope=%s ", + cache->disable_sysfs ? "Shared" : "Private"); + seq_printf(m, "size=%dK ", cache->size >> 10); + seq_printf(m, "line_size=%u ", cache->coherency_line_size); + seq_printf(m, "associativity=%d", cache->ways_of_associativity); seq_puts(m, "\n"); - index++; } + put_online_cpus(); +} + +static inline enum cache_type get_cache_type(struct cache_info *ci, int level) +{ + if (level >= CACHE_MAX_LEVEL) + return CACHE_TYPE_NOCACHE; + + ci += level; + + if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE) + return CACHE_TYPE_NOCACHE; + + return cache_type_map[ci->type]; } static inline unsigned long ecag(int ai, int li, int ti) @@ -113,277 +110,79 @@ static inline unsigned long ecag(int ai, int li, int ti) return val; } -static int __init cache_add(int level, int private, int type) +static void ci_leaf_init(struct cacheinfo *this_leaf, int private, + enum cache_type type, unsigned int level) { - struct cache *cache; - int ti; + int ti, num_sets; + int cpu = smp_processor_id(); - cache = kzalloc(sizeof(*cache), GFP_KERNEL); - if (!cache) - return -ENOMEM; - if (type == CACHE_TYPE_INSTRUCTION) + if (type == CACHE_TYPE_INST) ti = CACHE_TI_INSTRUCTION; else ti = CACHE_TI_UNIFIED; - cache->size = ecag(EXTRACT_SIZE, level, ti); - cache->line_size = ecag(EXTRACT_LINE_SIZE, level, ti); - cache->associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti); - cache->nr_sets = cache->size / cache->associativity; - cache->nr_sets /= cache->line_size; - cache->private = private; - cache->level = level + 1; - cache->type = type - 1; - list_add_tail(&cache->list, &cache_list); - return 0; -} - -static void __init cache_build_info(void) -{ - struct cache *cache, *next; - union cache_topology ct; - int level, private, rc; - - ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); - for (level = 0; level < CACHE_MAX_LEVEL; level++) { - switch (ct.ci[level].scope) { - case CACHE_SCOPE_SHARED: - private = 0; - break; - case CACHE_SCOPE_PRIVATE: - private = 1; - break; - default: - return; - } - if (ct.ci[level].type == CACHE_TYPE_SEPARATE) { - rc = cache_add(level, private, CACHE_TYPE_DATA); - rc |= cache_add(level, private, CACHE_TYPE_INSTRUCTION); - } else { - rc = cache_add(level, private, ct.ci[level].type); - } - if (rc) - goto error; - } - return; -error: - list_for_each_entry_safe(cache, next, &cache_list, list) { - list_del(&cache->list); - kfree(cache); - } -} - -static struct cache_dir *cache_create_cache_dir(int cpu) -{ - struct cache_dir *cache_dir; - struct kobject *kobj = NULL; - struct device *dev; - - dev = get_cpu_device(cpu); - if (!dev) - goto out; - kobj = kobject_create_and_add("cache", &dev->kobj); - if (!kobj) - goto out; - cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL); - if (!cache_dir) - goto out; - cache_dir->kobj = kobj; - cache_dir_cpu[cpu] = cache_dir; - return cache_dir; -out: - kobject_put(kobj); - return NULL; -} - -static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *kobj) -{ - return container_of(kobj, struct cache_index_dir, kobj); -} - -static void cache_index_release(struct kobject *kobj) -{ - struct cache_index_dir *index; - - index = kobj_to_cache_index_dir(kobj); - kfree(index); -} - -static ssize_t cache_index_show(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - struct kobj_attribute *kobj_attr; - - kobj_attr = container_of(attr, struct kobj_attribute, attr); - return kobj_attr->show(kobj, kobj_attr, buf); -} - -#define DEFINE_CACHE_ATTR(_name, _format, _value) \ -static ssize_t cache_##_name##_show(struct kobject *kobj, \ - struct kobj_attribute *attr, \ - char *buf) \ -{ \ - struct cache_index_dir *index; \ - \ - index = kobj_to_cache_index_dir(kobj); \ - return sprintf(buf, _format, _value); \ -} \ -static struct kobj_attribute cache_##_name##_attr = \ - __ATTR(_name, 0444, cache_##_name##_show, NULL); -DEFINE_CACHE_ATTR(size, "%luK\n", index->cache->size >> 10); -DEFINE_CACHE_ATTR(coherency_line_size, "%u\n", index->cache->line_size); -DEFINE_CACHE_ATTR(number_of_sets, "%u\n", index->cache->nr_sets); -DEFINE_CACHE_ATTR(ways_of_associativity, "%u\n", index->cache->associativity); -DEFINE_CACHE_ATTR(type, "%s\n", cache_type_string[index->cache->type]); -DEFINE_CACHE_ATTR(level, "%d\n", index->cache->level); + this_leaf->level = level + 1; + this_leaf->type = type; + this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti); + this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, + level, ti); + this_leaf->size = ecag(EXTRACT_SIZE, level, ti); -static ssize_t shared_cpu_map_func(struct kobject *kobj, int type, char *buf) -{ - struct cache_index_dir *index; - int len; - - index = kobj_to_cache_index_dir(kobj); - len = type ? - cpulist_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu)) : - cpumask_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu)); - len += sprintf(&buf[len], "\n"); - return len; -} - -static ssize_t shared_cpu_map_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) -{ - return shared_cpu_map_func(kobj, 0, buf); + num_sets = this_leaf->size / this_leaf->coherency_line_size; + num_sets /= this_leaf->ways_of_associativity; + this_leaf->number_of_sets = num_sets; + cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); + if (!private) + this_leaf->disable_sysfs = true; } -static struct kobj_attribute cache_shared_cpu_map_attr = - __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL); -static ssize_t shared_cpu_list_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) +int init_cache_level(unsigned int cpu) { - return shared_cpu_map_func(kobj, 1, buf); -} -static struct kobj_attribute cache_shared_cpu_list_attr = - __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL); - -static struct attribute *cache_index_default_attrs[] = { - &cache_type_attr.attr, - &cache_size_attr.attr, - &cache_number_of_sets_attr.attr, - &cache_ways_of_associativity_attr.attr, - &cache_level_attr.attr, - &cache_coherency_line_size_attr.attr, - &cache_shared_cpu_map_attr.attr, - &cache_shared_cpu_list_attr.attr, - NULL, -}; - -static const struct sysfs_ops cache_index_ops = { - .show = cache_index_show, -}; - -static struct kobj_type cache_index_type = { - .sysfs_ops = &cache_index_ops, - .release = cache_index_release, - .default_attrs = cache_index_default_attrs, -}; - -static int cache_create_index_dir(struct cache_dir *cache_dir, - struct cache *cache, int index, int cpu) -{ - struct cache_index_dir *index_dir; - int rc; - - index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL); - if (!index_dir) - return -ENOMEM; - index_dir->cache = cache; - index_dir->cpu = cpu; - rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type, - cache_dir->kobj, "index%d", index); - if (rc) - goto out; - index_dir->next = cache_dir->index; - cache_dir->index = index_dir; - return 0; -out: - kfree(index_dir); - return rc; -} + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + unsigned int level = 0, leaves = 0; + union cache_topology ct; + enum cache_type ctype; -static int cache_add_cpu(int cpu) -{ - struct cache_dir *cache_dir; - struct cache *cache; - int rc, index = 0; + if (!this_cpu_ci) + return -EINVAL; - if (list_empty(&cache_list)) - return 0; - cache_dir = cache_create_cache_dir(cpu); - if (!cache_dir) - return -ENOMEM; - list_for_each_entry(cache, &cache_list, list) { - if (!cache->private) + ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); + do { + ctype = get_cache_type(&ct.ci[0], level); + if (ctype == CACHE_TYPE_NOCACHE) break; - rc = cache_create_index_dir(cache_dir, cache, index, cpu); - if (rc) - return rc; - index++; - } - return 0; -} + /* Separate instruction and data caches */ + leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1; + } while (++level < CACHE_MAX_LEVEL); -static void cache_remove_cpu(int cpu) -{ - struct cache_index_dir *index, *next; - struct cache_dir *cache_dir; + this_cpu_ci->num_levels = level; + this_cpu_ci->num_leaves = leaves; - cache_dir = cache_dir_cpu[cpu]; - if (!cache_dir) - return; - index = cache_dir->index; - while (index) { - next = index->next; - kobject_put(&index->kobj); - index = next; - } - kobject_put(cache_dir->kobj); - kfree(cache_dir); - cache_dir_cpu[cpu] = NULL; + return 0; } -static int cache_hotplug(struct notifier_block *nfb, unsigned long action, - void *hcpu) +int populate_cache_leaves(unsigned int cpu) { - int cpu = (long)hcpu; - int rc = 0; + unsigned int level, idx, pvt; + union cache_topology ct; + enum cache_type ctype; + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + struct cacheinfo *this_leaf = this_cpu_ci->info_list; - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_ONLINE: - rc = cache_add_cpu(cpu); - if (rc) - cache_remove_cpu(cpu); - break; - case CPU_DEAD: - cache_remove_cpu(cpu); - break; + ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); + for (idx = 0, level = 0; level < this_cpu_ci->num_levels && + idx < this_cpu_ci->num_leaves; idx++, level++) { + if (!this_leaf) + return -EINVAL; + + pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0; + ctype = get_cache_type(&ct.ci[0], level); + if (ctype == CACHE_TYPE_SEPARATE) { + ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level); + ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level); + } else { + ci_leaf_init(this_leaf++, pvt, ctype, level); + } } - return rc ? NOTIFY_BAD : NOTIFY_OK; -} - -static int __init cache_init(void) -{ - int cpu; - - if (!test_facility(34)) - return 0; - cache_build_info(); - - cpu_notifier_register_begin(); - for_each_online_cpu(cpu) - cache_add_cpu(cpu); - __hotcpu_notifier(cache_hotplug, 0); - cpu_notifier_register_done(); return 0; } -device_initcall(cache_init); diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 34d5fa7b01b5..bc1df12dd4f8 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -209,7 +209,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) int i; /* Alwys make any pending restarted system call return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; if (__copy_from_user(&user_sregs, &sregs->regs, sizeof(user_sregs))) return -EFAULT; diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index f3762937dd82..533430307da8 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -137,7 +137,7 @@ enum { INSTR_RSI_RRP, INSTR_RSL_LRDFU, INSTR_RSL_R0RD, INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD, - INSTR_RSY_RDRM, + INSTR_RSY_RDRM, INSTR_RSY_RMRD, INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD, INSTR_RS_RURD, INSTR_RXE_FRRD, INSTR_RXE_RRRD, INSTR_RXE_RRRDM, @@ -226,7 +226,6 @@ static const struct s390_operand operands[] = [U16_32] = { 16, 32, 0 }, [J16_16] = { 16, 16, OPERAND_PCREL }, [J16_32] = { 16, 32, OPERAND_PCREL }, - [I16_32] = { 16, 32, OPERAND_SIGNED }, [I24_24] = { 24, 24, OPERAND_SIGNED }, [J32_16] = { 32, 16, OPERAND_PCREL }, [I32_16] = { 32, 16, OPERAND_SIGNED }, @@ -308,6 +307,7 @@ static const unsigned char formats[][7] = { [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 }, [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 }, [INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 }, + [INSTR_RSY_RMRD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 }, [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 }, [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 }, [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, @@ -451,7 +451,8 @@ enum { LONG_INSN_VERLLV, LONG_INSN_VESRAV, LONG_INSN_VESRLV, - LONG_INSN_VSBCBI + LONG_INSN_VSBCBI, + LONG_INSN_STCCTM }; static char *long_insn_name[] = { @@ -531,6 +532,7 @@ static char *long_insn_name[] = { [LONG_INSN_VESRAV] = "vesrav", [LONG_INSN_VESRLV] = "vesrlv", [LONG_INSN_VSBCBI] = "vsbcbi", + [LONG_INSN_STCCTM] = "stcctm", }; static struct s390_insn opcode[] = { @@ -1656,6 +1658,7 @@ static struct s390_insn opcode_eb[] = { { "lric", 0x60, INSTR_RSY_RDRM }, { "stric", 0x61, INSTR_RSY_RDRM }, { "mric", 0x62, INSTR_RSY_RDRM }, + { { 0, LONG_INSN_STCCTM }, 0x17, INSTR_RSY_RMRD }, #endif { "rll", 0x1d, INSTR_RSY_RRRD }, { "mvclu", 0x8e, INSTR_RSY_RRRD }, diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 302ac1f7f8e7..70a329450901 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -393,9 +393,27 @@ static __init void detect_machine_facilities(void) S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; if (test_facility(129)) S390_lowcore.machine_flags |= MACHINE_FLAG_VX; + if (test_facility(128)) + S390_lowcore.machine_flags |= MACHINE_FLAG_CAD; #endif } +static int __init nocad_setup(char *str) +{ + S390_lowcore.machine_flags &= ~MACHINE_FLAG_CAD; + return 0; +} +early_param("nocad", nocad_setup); + +static int __init cad_init(void) +{ + if (MACHINE_HAS_CAD) + /* Enable problem state CAD. */ + __ctl_set_bit(2, 3); + return 0; +} +early_initcall(cad_init); + static __init void rescue_initrd(void) { #ifdef CONFIG_BLK_DEV_INITRD diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index 8e61393c8275..834df047d35f 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h @@ -71,9 +71,11 @@ struct s390_mmap_arg_struct; struct fadvise64_64_args; struct old_sigaction; +long sys_rt_sigreturn(void); +long sys_sigreturn(void); + long sys_s390_personality(unsigned int personality); long sys_s390_runtime_instr(int command, int signum); - long sys_s390_pci_mmio_write(unsigned long, const void __user *, size_t); long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t); #endif /* _ENTRY_H */ diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index b86bb8823f15..82c19899574f 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -46,6 +46,13 @@ * lg %r14,8(%r15) # offset 18 * The jg instruction branches to offset 24 to skip as many instructions * as possible. + * In case we use gcc's hotpatch feature the original and also the disabled + * function prologue contains only a single six byte instruction and looks + * like this: + * > brcl 0,0 # offset 0 + * To enable ftrace the code gets patched like above and afterwards looks + * like this: + * > brasl %r0,ftrace_caller # offset 0 */ unsigned long ftrace_plt; @@ -59,62 +66,71 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { - struct ftrace_insn insn; - unsigned short op; - void *from, *to; - size_t size; - - ftrace_generate_nop_insn(&insn); - size = sizeof(insn); - from = &insn; - to = (void *) rec->ip; - if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op))) + struct ftrace_insn orig, new, old; + + if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old))) return -EFAULT; - /* - * If we find a breakpoint instruction, a kprobe has been placed - * at the beginning of the function. We write the constant - * KPROBE_ON_FTRACE_NOP into the remaining four bytes of the original - * instruction so that the kprobes handler can execute a nop, if it - * reaches this breakpoint. - */ - if (op == BREAKPOINT_INSTRUCTION) { - size -= 2; - from += 2; - to += 2; - insn.disp = KPROBE_ON_FTRACE_NOP; + if (addr == MCOUNT_ADDR) { + /* Initial code replacement */ +#ifdef CC_USING_HOTPATCH + /* We expect to see brcl 0,0 */ + ftrace_generate_nop_insn(&orig); +#else + /* We expect to see stg r14,8(r15) */ + orig.opc = 0xe3e0; + orig.disp = 0xf0080024; +#endif + ftrace_generate_nop_insn(&new); + } else if (old.opc == BREAKPOINT_INSTRUCTION) { + /* + * If we find a breakpoint instruction, a kprobe has been + * placed at the beginning of the function. We write the + * constant KPROBE_ON_FTRACE_NOP into the remaining four + * bytes of the original instruction so that the kprobes + * handler can execute a nop, if it reaches this breakpoint. + */ + new.opc = orig.opc = BREAKPOINT_INSTRUCTION; + orig.disp = KPROBE_ON_FTRACE_CALL; + new.disp = KPROBE_ON_FTRACE_NOP; + } else { + /* Replace ftrace call with a nop. */ + ftrace_generate_call_insn(&orig, rec->ip); + ftrace_generate_nop_insn(&new); } - if (probe_kernel_write(to, from, size)) + /* Verify that the to be replaced code matches what we expect. */ + if (memcmp(&orig, &old, sizeof(old))) + return -EINVAL; + if (probe_kernel_write((void *) rec->ip, &new, sizeof(new))) return -EPERM; return 0; } int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { - struct ftrace_insn insn; - unsigned short op; - void *from, *to; - size_t size; - - ftrace_generate_call_insn(&insn, rec->ip); - size = sizeof(insn); - from = &insn; - to = (void *) rec->ip; - if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op))) + struct ftrace_insn orig, new, old; + + if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old))) return -EFAULT; - /* - * If we find a breakpoint instruction, a kprobe has been placed - * at the beginning of the function. We write the constant - * KPROBE_ON_FTRACE_CALL into the remaining four bytes of the original - * instruction so that the kprobes handler can execute a brasl if it - * reaches this breakpoint. - */ - if (op == BREAKPOINT_INSTRUCTION) { - size -= 2; - from += 2; - to += 2; - insn.disp = KPROBE_ON_FTRACE_CALL; + if (old.opc == BREAKPOINT_INSTRUCTION) { + /* + * If we find a breakpoint instruction, a kprobe has been + * placed at the beginning of the function. We write the + * constant KPROBE_ON_FTRACE_CALL into the remaining four + * bytes of the original instruction so that the kprobes + * handler can execute a brasl if it reaches this breakpoint. + */ + new.opc = orig.opc = BREAKPOINT_INSTRUCTION; + orig.disp = KPROBE_ON_FTRACE_NOP; + new.disp = KPROBE_ON_FTRACE_CALL; + } else { + /* Replace nop with an ftrace call. */ + ftrace_generate_nop_insn(&orig); + ftrace_generate_call_insn(&new, rec->ip); } - if (probe_kernel_write(to, from, size)) + /* Verify that the to be replaced code matches what we expect. */ + if (memcmp(&orig, &old, sizeof(old))) + return -EINVAL; + if (probe_kernel_write((void *) rec->ip, &new, sizeof(new))) return -EPERM; return 0; } diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index d62eee11f0b5..132f4c9ade60 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -436,7 +436,9 @@ ENTRY(startup_kdump) # followed by the facility words. #if defined(CONFIG_64BIT) -#if defined(CONFIG_MARCH_ZEC12) +#if defined(CONFIG_MARCH_Z13) + .long 3, 0xc100eff2, 0xf46ce800, 0x00400000 +#elif defined(CONFIG_MARCH_ZEC12) .long 3, 0xc100eff2, 0xf46ce800, 0x00400000 #elif defined(CONFIG_MARCH_Z196) .long 2, 0xc100eff2, 0xf46c0000 diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 39badb9ca0b3..5c8651f36509 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -2074,7 +2074,8 @@ static void do_reset_calls(void) u32 dump_prefix_page; -void s390_reset_system(void (*func)(void *), void *data) +void s390_reset_system(void (*fn_pre)(void), + void (*fn_post)(void *), void *data) { struct _lowcore *lc; @@ -2112,7 +2113,11 @@ void s390_reset_system(void (*func)(void *), void *data) /* Store status at absolute zero */ store_status(); + /* Call function before reset */ + if (fn_pre) + fn_pre(); do_reset_calls(); - if (func) - func(data); + /* Call function after reset */ + if (fn_post) + fn_post(data); } diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c index b987ab2c1541..cb2d51e779df 100644 --- a/arch/s390/kernel/jump_label.c +++ b/arch/s390/kernel/jump_label.c @@ -22,31 +22,66 @@ struct insn_args { enum jump_label_type type; }; +static void jump_label_make_nop(struct jump_entry *entry, struct insn *insn) +{ + /* brcl 0,0 */ + insn->opcode = 0xc004; + insn->offset = 0; +} + +static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn) +{ + /* brcl 15,offset */ + insn->opcode = 0xc0f4; + insn->offset = (entry->target - entry->code) >> 1; +} + +static void jump_label_bug(struct jump_entry *entry, struct insn *insn) +{ + unsigned char *ipc = (unsigned char *)entry->code; + unsigned char *ipe = (unsigned char *)insn; + + pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc); + pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n", + ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]); + pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n", + ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]); + panic("Corrupted kernel text"); +} + +static struct insn orignop = { + .opcode = 0xc004, + .offset = JUMP_LABEL_NOP_OFFSET >> 1, +}; + static void __jump_label_transform(struct jump_entry *entry, - enum jump_label_type type) + enum jump_label_type type, + int init) { - struct insn insn; - int rc; + struct insn old, new; if (type == JUMP_LABEL_ENABLE) { - /* brcl 15,offset */ - insn.opcode = 0xc0f4; - insn.offset = (entry->target - entry->code) >> 1; + jump_label_make_nop(entry, &old); + jump_label_make_branch(entry, &new); } else { - /* brcl 0,0 */ - insn.opcode = 0xc004; - insn.offset = 0; + jump_label_make_branch(entry, &old); + jump_label_make_nop(entry, &new); } - - rc = probe_kernel_write((void *)entry->code, &insn, JUMP_LABEL_NOP_SIZE); - WARN_ON_ONCE(rc < 0); + if (init) { + if (memcmp((void *)entry->code, &orignop, sizeof(orignop))) + jump_label_bug(entry, &old); + } else { + if (memcmp((void *)entry->code, &old, sizeof(old))) + jump_label_bug(entry, &old); + } + probe_kernel_write((void *)entry->code, &new, sizeof(new)); } static int __sm_arch_jump_label_transform(void *data) { struct insn_args *args = data; - __jump_label_transform(args->entry, args->type); + __jump_label_transform(args->entry, args->type, 0); return 0; } @@ -64,7 +99,7 @@ void arch_jump_label_transform(struct jump_entry *entry, void arch_jump_label_transform_static(struct jump_entry *entry, enum jump_label_type type) { - __jump_label_transform(entry, type); + __jump_label_transform(entry, type, 1); } #endif diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 1e4c710dfb92..f516edc1fbe3 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -69,7 +69,8 @@ static void copy_instruction(struct kprobe *p) /* * If kprobes patches the instruction that is morphed by * ftrace make sure that kprobes always sees the branch - * "jg .+24" that skips the mcount block + * "jg .+24" that skips the mcount block or the "brcl 0,0" + * in case of hotpatch. */ ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn); p->ainsn.is_ftrace_insn = 1; diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index 4685337fa7c6..fb0901ec4306 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -103,21 +103,18 @@ static int __init machine_kdump_pm_init(void) return 0; } arch_initcall(machine_kdump_pm_init); -#endif /* * Start kdump: We expect here that a store status has been done on our CPU */ static void __do_machine_kdump(void *image) { -#ifdef CONFIG_CRASH_DUMP int (*start_kdump)(int) = (void *)((struct kimage *) image)->start; - setup_regs(); __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA); start_kdump(1); -#endif } +#endif /* * Check if kdump checksums are valid: We call purgatory with parameter "0" @@ -249,18 +246,18 @@ static void __do_machine_kexec(void *data) */ static void __machine_kexec(void *data) { - struct kimage *image = data; - __arch_local_irq_stosm(0x04); /* enable DAT */ pfault_fini(); tracing_off(); debug_locks_off(); - if (image->type == KEXEC_TYPE_CRASH) { +#ifdef CONFIG_CRASH_DUMP + if (((struct kimage *) data)->type == KEXEC_TYPE_CRASH) { + lgr_info_log(); - s390_reset_system(__do_machine_kdump, data); - } else { - s390_reset_system(__do_machine_kexec, data); - } + s390_reset_system(setup_regs, __do_machine_kdump, data); + } else +#endif + s390_reset_system(NULL, __do_machine_kexec, data); disabled_wait((unsigned long) __builtin_return_address(0)); } diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index b6dfc5bfcb89..e499370fbccb 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -27,7 +27,9 @@ ENTRY(ftrace_caller) .globl ftrace_regs_caller .set ftrace_regs_caller,ftrace_caller lgr %r1,%r15 +#ifndef CC_USING_HOTPATCH aghi %r0,MCOUNT_RETURN_FIXUP +#endif aghi %r15,-STACK_FRAME_SIZE stg %r1,__SF_BACKCHAIN(%r15) stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15) diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 409d152585be..36154a2f1814 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -50,7 +50,7 @@ void *module_alloc(unsigned long size) if (PAGE_ALIGN(size) > MODULES_LEN) return NULL; return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, - GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE, + GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0)); } #endif diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index aa7a83948c7b..13fc0978ca7e 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -79,6 +79,14 @@ void release_thread(struct task_struct *dead_task) { } +#ifdef CONFIG_64BIT +void arch_release_task_struct(struct task_struct *tsk) +{ + if (tsk->thread.vxrs) + kfree(tsk->thread.vxrs); +} +#endif + int copy_thread(unsigned long clone_flags, unsigned long new_stackp, unsigned long arg, struct task_struct *p) { @@ -243,13 +251,3 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) ret = PAGE_ALIGN(mm->brk + brk_rnd()); return (ret > mm->brk) ? ret : mm->brk; } - -unsigned long randomize_et_dyn(unsigned long base) -{ - unsigned long ret; - - if (!(current->flags & PF_RANDOMIZE)) - return base; - ret = PAGE_ALIGN(base + brk_rnd()); - return (ret > base) ? ret : base; -} diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index dbdd33ee0102..26108232fcaa 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c @@ -8,16 +8,24 @@ #include <linux/kernel.h> #include <linux/init.h> -#include <linux/smp.h> #include <linux/seq_file.h> #include <linux/delay.h> #include <linux/cpu.h> #include <asm/elf.h> #include <asm/lowcore.h> #include <asm/param.h> +#include <asm/smp.h> static DEFINE_PER_CPU(struct cpuid, cpu_id); +void cpu_relax(void) +{ + if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) + asm volatile("diag 0,0,0x44"); + barrier(); +} +EXPORT_SYMBOL(cpu_relax); + /* * cpu_init - initializes state that is per-CPU. */ diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S index a41f2c99dcc8..7e77e03378f3 100644 --- a/arch/s390/kernel/sclp.S +++ b/arch/s390/kernel/sclp.S @@ -294,7 +294,8 @@ ENTRY(_sclp_print_early) #ifdef CONFIG_64BIT tm LC_AR_MODE_ID,1 jno .Lesa3 - lmh %r6,%r15,96(%r15) # store upper register halves + lgfr %r2,%r2 # sign extend return value + lmh %r6,%r15,96(%r15) # restore upper register halves ahi %r15,80 .Lesa3: #endif diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 4e532c67832f..bfac77ada4f2 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -810,6 +810,9 @@ static void __init setup_hwcaps(void) case 0x2828: strcpy(elf_platform, "zEC12"); break; + case 0x2964: + strcpy(elf_platform, "z13"); + break; } } diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 6a2ac257d98f..b3ae6f70c6d6 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -162,7 +162,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) _sigregs user_sregs; /* Alwys make any pending restarted system call return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; if (__copy_from_user(&user_sregs, sregs, sizeof(user_sregs))) return -EFAULT; diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 0b499f5cbe19..a668993ff577 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -71,9 +71,30 @@ struct pcpu { }; static u8 boot_cpu_type; -static u16 boot_cpu_address; static struct pcpu pcpu_devices[NR_CPUS]; +unsigned int smp_cpu_mt_shift; +EXPORT_SYMBOL(smp_cpu_mt_shift); + +unsigned int smp_cpu_mtid; +EXPORT_SYMBOL(smp_cpu_mtid); + +static unsigned int smp_max_threads __initdata = -1U; + +static int __init early_nosmt(char *s) +{ + smp_max_threads = 1; + return 0; +} +early_param("nosmt", early_nosmt); + +static int __init early_smt(char *s) +{ + get_option(&s, &smp_max_threads); + return 0; +} +early_param("smt", early_smt); + /* * The smp_cpu_state_mutex must be held when changing the state or polarization * member of a pcpu data structure within the pcpu_devices arreay. @@ -132,7 +153,7 @@ static inline int pcpu_running(struct pcpu *pcpu) /* * Find struct pcpu by cpu address. */ -static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address) +static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address) { int cpu; @@ -299,6 +320,32 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *), } /* + * Enable additional logical cpus for multi-threading. + */ +static int pcpu_set_smt(unsigned int mtid) +{ + register unsigned long reg1 asm ("1") = (unsigned long) mtid; + int cc; + + if (smp_cpu_mtid == mtid) + return 0; + asm volatile( + " sigp %1,0,%2 # sigp set multi-threading\n" + " ipm %0\n" + " srl %0,28\n" + : "=d" (cc) : "d" (reg1), "K" (SIGP_SET_MULTI_THREADING) + : "cc"); + if (cc == 0) { + smp_cpu_mtid = mtid; + smp_cpu_mt_shift = 0; + while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift)) + smp_cpu_mt_shift++; + pcpu_devices[0].address = stap(); + } + return cc; +} + +/* * Call function on an online CPU. */ void smp_call_online_cpu(void (*func)(void *), void *data) @@ -512,22 +559,17 @@ EXPORT_SYMBOL(smp_ctl_clear_bit); #ifdef CONFIG_CRASH_DUMP -static void __init smp_get_save_area(int cpu, u16 address) +static inline void __smp_store_cpu_state(int cpu, u16 address, int is_boot_cpu) { void *lc = pcpu_devices[0].lowcore; struct save_area_ext *sa_ext; unsigned long vx_sa; - if (is_kdump_kernel()) - return; - if (!OLDMEM_BASE && (address == boot_cpu_address || - ipl_info.type != IPL_TYPE_FCP_DUMP)) - return; sa_ext = dump_save_area_create(cpu); if (!sa_ext) panic("could not allocate memory for save area\n"); - if (address == boot_cpu_address) { - /* Copy the registers of the boot cpu. */ + if (is_boot_cpu) { + /* Copy the registers of the boot CPU. */ copy_oldmem_page(1, (void *) &sa_ext->sa, sizeof(sa_ext->sa), SAVE_AREA_BASE - PAGE_SIZE, 0); if (MACHINE_HAS_VX) @@ -548,6 +590,64 @@ static void __init smp_get_save_area(int cpu, u16 address) free_page(vx_sa); } +/* + * Collect CPU state of the previous, crashed system. + * There are four cases: + * 1) standard zfcp dump + * condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP + * The state for all CPUs except the boot CPU needs to be collected + * with sigp stop-and-store-status. The boot CPU state is located in + * the absolute lowcore of the memory stored in the HSA. The zcore code + * will allocate the save area and copy the boot CPU state from the HSA. + * 2) stand-alone kdump for SCSI (zfcp dump with swapped memory) + * condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP + * The state for all CPUs except the boot CPU needs to be collected + * with sigp stop-and-store-status. The firmware or the boot-loader + * stored the registers of the boot CPU in the absolute lowcore in the + * memory of the old system. + * 3) kdump and the old kernel did not store the CPU state, + * or stand-alone kdump for DASD + * condition: OLDMEM_BASE != NULL && !is_kdump_kernel() + * The state for all CPUs except the boot CPU needs to be collected + * with sigp stop-and-store-status. The kexec code or the boot-loader + * stored the registers of the boot CPU in the memory of the old system. + * 4) kdump and the old kernel stored the CPU state + * condition: OLDMEM_BASE != NULL && is_kdump_kernel() + * The state of all CPUs is stored in ELF sections in the memory of the + * old system. The ELF sections are picked up by the crash_dump code + * via elfcorehdr_addr. + */ +static void __init smp_store_cpu_states(struct sclp_cpu_info *info) +{ + unsigned int cpu, address, i, j; + int is_boot_cpu; + + if (is_kdump_kernel()) + /* Previous system stored the CPU states. Nothing to do. */ + return; + if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP)) + /* No previous system present, normal boot. */ + return; + /* Set multi-threading state to the previous system. */ + pcpu_set_smt(sclp_get_mtid_prev()); + /* Collect CPU states. */ + cpu = 0; + for (i = 0; i < info->configured; i++) { + /* Skip CPUs with different CPU type. */ + if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type) + continue; + for (j = 0; j <= smp_cpu_mtid; j++, cpu++) { + address = (info->cpu[i].core_id << smp_cpu_mt_shift) + j; + is_boot_cpu = (address == pcpu_devices[0].address); + if (is_boot_cpu && !OLDMEM_BASE) + /* Skip boot CPU for standard zfcp dump. */ + continue; + /* Get state for this CPu. */ + __smp_store_cpu_state(cpu, address, is_boot_cpu); + } + } +} + int smp_store_status(int cpu) { unsigned long vx_sa; @@ -565,10 +665,6 @@ int smp_store_status(int cpu) return 0; } -#else /* CONFIG_CRASH_DUMP */ - -static inline void smp_get_save_area(int cpu, u16 address) { } - #endif /* CONFIG_CRASH_DUMP */ void smp_cpu_set_polarization(int cpu, int val) @@ -590,11 +686,13 @@ static struct sclp_cpu_info *smp_get_cpu_info(void) info = kzalloc(sizeof(*info), GFP_KERNEL); if (info && (use_sigp_detection || sclp_get_cpu_info(info))) { use_sigp_detection = 1; - for (address = 0; address <= MAX_CPU_ADDRESS; address++) { + for (address = 0; address <= MAX_CPU_ADDRESS; + address += (1U << smp_cpu_mt_shift)) { if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) == SIGP_CC_NOT_OPERATIONAL) continue; - info->cpu[info->configured].address = address; + info->cpu[info->configured].core_id = + address >> smp_cpu_mt_shift; info->configured++; } info->combined = info->configured; @@ -608,7 +706,8 @@ static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add) { struct pcpu *pcpu; cpumask_t avail; - int cpu, nr, i; + int cpu, nr, i, j; + u16 address; nr = 0; cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); @@ -616,51 +715,76 @@ static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add) for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) { if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type) continue; - if (pcpu_find_address(cpu_present_mask, info->cpu[i].address)) - continue; - pcpu = pcpu_devices + cpu; - pcpu->address = info->cpu[i].address; - pcpu->state = (i >= info->configured) ? - CPU_STATE_STANDBY : CPU_STATE_CONFIGURED; - smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); - set_cpu_present(cpu, true); - if (sysfs_add && smp_add_present_cpu(cpu) != 0) - set_cpu_present(cpu, false); - else - nr++; - cpu = cpumask_next(cpu, &avail); + address = info->cpu[i].core_id << smp_cpu_mt_shift; + for (j = 0; j <= smp_cpu_mtid; j++) { + if (pcpu_find_address(cpu_present_mask, address + j)) + continue; + pcpu = pcpu_devices + cpu; + pcpu->address = address + j; + pcpu->state = + (cpu >= info->configured*(smp_cpu_mtid + 1)) ? + CPU_STATE_STANDBY : CPU_STATE_CONFIGURED; + smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); + set_cpu_present(cpu, true); + if (sysfs_add && smp_add_present_cpu(cpu) != 0) + set_cpu_present(cpu, false); + else + nr++; + cpu = cpumask_next(cpu, &avail); + if (cpu >= nr_cpu_ids) + break; + } } return nr; } static void __init smp_detect_cpus(void) { - unsigned int cpu, c_cpus, s_cpus; + unsigned int cpu, mtid, c_cpus, s_cpus; struct sclp_cpu_info *info; + u16 address; + /* Get CPU information */ info = smp_get_cpu_info(); if (!info) panic("smp_detect_cpus failed to allocate memory\n"); + + /* Find boot CPU type */ if (info->has_cpu_type) { - for (cpu = 0; cpu < info->combined; cpu++) { - if (info->cpu[cpu].address != boot_cpu_address) - continue; - /* The boot cpu dictates the cpu type. */ - boot_cpu_type = info->cpu[cpu].type; - break; - } + address = stap(); + for (cpu = 0; cpu < info->combined; cpu++) + if (info->cpu[cpu].core_id == address) { + /* The boot cpu dictates the cpu type. */ + boot_cpu_type = info->cpu[cpu].type; + break; + } + if (cpu >= info->combined) + panic("Could not find boot CPU type"); } + +#ifdef CONFIG_CRASH_DUMP + /* Collect CPU state of previous system */ + smp_store_cpu_states(info); +#endif + + /* Set multi-threading state for the current system */ + mtid = sclp_get_mtid(boot_cpu_type); + mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1; + pcpu_set_smt(mtid); + + /* Print number of CPUs */ c_cpus = s_cpus = 0; for (cpu = 0; cpu < info->combined; cpu++) { if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type) continue; - if (cpu < info->configured) { - smp_get_save_area(c_cpus, info->cpu[cpu].address); - c_cpus++; - } else - s_cpus++; + if (cpu < info->configured) + c_cpus += smp_cpu_mtid + 1; + else + s_cpus += smp_cpu_mtid + 1; } pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); + + /* Add CPUs present at boot */ get_online_cpus(); __smp_rescan_cpus(info, 0); put_online_cpus(); @@ -696,12 +820,23 @@ static void smp_start_secondary(void *cpuvoid) int __cpu_up(unsigned int cpu, struct task_struct *tidle) { struct pcpu *pcpu; - int rc; + int base, i, rc; pcpu = pcpu_devices + cpu; if (pcpu->state != CPU_STATE_CONFIGURED) return -EIO; - if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) != + base = cpu - (cpu % (smp_cpu_mtid + 1)); + for (i = 0; i <= smp_cpu_mtid; i++) { + if (base + i < nr_cpu_ids) + if (cpu_online(base + i)) + break; + } + /* + * If this is the first CPU of the core to get online + * do an initial CPU reset. + */ + if (i > smp_cpu_mtid && + pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) != SIGP_CC_ORDER_CODE_ACCEPTED) return -EIO; @@ -774,7 +909,8 @@ void __init smp_fill_possible_mask(void) { unsigned int possible, sclp, cpu; - sclp = sclp_get_max_cpu() ?: nr_cpu_ids; + sclp = min(smp_max_threads, sclp_get_mtid_max() + 1); + sclp = sclp_get_max_cpu()*sclp ?: nr_cpu_ids; possible = setup_possible_cpus ?: nr_cpu_ids; possible = min(possible, sclp); for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) @@ -796,9 +932,8 @@ void __init smp_prepare_boot_cpu(void) { struct pcpu *pcpu = pcpu_devices; - boot_cpu_address = stap(); pcpu->state = CPU_STATE_CONFIGURED; - pcpu->address = boot_cpu_address; + pcpu->address = stap(); pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix(); pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); @@ -848,7 +983,7 @@ static ssize_t cpu_configure_store(struct device *dev, const char *buf, size_t count) { struct pcpu *pcpu; - int cpu, val, rc; + int cpu, val, rc, i; char delim; if (sscanf(buf, "%d %c", &val, &delim) != 1) @@ -860,29 +995,43 @@ static ssize_t cpu_configure_store(struct device *dev, rc = -EBUSY; /* disallow configuration changes of online cpus and cpu 0 */ cpu = dev->id; - if (cpu_online(cpu) || cpu == 0) + cpu -= cpu % (smp_cpu_mtid + 1); + if (cpu == 0) goto out; + for (i = 0; i <= smp_cpu_mtid; i++) + if (cpu_online(cpu + i)) + goto out; pcpu = pcpu_devices + cpu; rc = 0; switch (val) { case 0: if (pcpu->state != CPU_STATE_CONFIGURED) break; - rc = sclp_cpu_deconfigure(pcpu->address); + rc = sclp_cpu_deconfigure(pcpu->address >> smp_cpu_mt_shift); if (rc) break; - pcpu->state = CPU_STATE_STANDBY; - smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); + for (i = 0; i <= smp_cpu_mtid; i++) { + if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) + continue; + pcpu[i].state = CPU_STATE_STANDBY; + smp_cpu_set_polarization(cpu + i, + POLARIZATION_UNKNOWN); + } topology_expect_change(); break; case 1: if (pcpu->state != CPU_STATE_STANDBY) break; - rc = sclp_cpu_configure(pcpu->address); + rc = sclp_cpu_configure(pcpu->address >> smp_cpu_mt_shift); if (rc) break; - pcpu->state = CPU_STATE_CONFIGURED; - smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); + for (i = 0; i <= smp_cpu_mtid; i++) { + if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) + continue; + pcpu[i].state = CPU_STATE_CONFIGURED; + smp_cpu_set_polarization(cpu + i, + POLARIZATION_UNKNOWN); + } topology_expect_change(); break; default: diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c index 811f542b8ed4..99babea026ca 100644 --- a/arch/s390/kernel/sysinfo.c +++ b/arch/s390/kernel/sysinfo.c @@ -194,6 +194,41 @@ static void stsi_2_2_2(struct seq_file *m, struct sysinfo_2_2_2 *info) seq_printf(m, "LPAR CPUs Reserved: %d\n", info->cpus_reserved); seq_printf(m, "LPAR CPUs Dedicated: %d\n", info->cpus_dedicated); seq_printf(m, "LPAR CPUs Shared: %d\n", info->cpus_shared); + if (info->mt_installed & 0x80) { + seq_printf(m, "LPAR CPUs G-MTID: %d\n", + info->mt_general & 0x1f); + seq_printf(m, "LPAR CPUs S-MTID: %d\n", + info->mt_installed & 0x1f); + seq_printf(m, "LPAR CPUs PS-MTID: %d\n", + info->mt_psmtid & 0x1f); + } +} + +static void print_ext_name(struct seq_file *m, int lvl, + struct sysinfo_3_2_2 *info) +{ + if (info->vm[lvl].ext_name_encoding == 0) + return; + if (info->ext_names[lvl][0] == 0) + return; + switch (info->vm[lvl].ext_name_encoding) { + case 1: /* EBCDIC */ + EBCASC(info->ext_names[lvl], sizeof(info->ext_names[lvl])); + break; + case 2: /* UTF-8 */ + break; + default: + return; + } + seq_printf(m, "VM%02d Extended Name: %-.256s\n", lvl, + info->ext_names[lvl]); +} + +static void print_uuid(struct seq_file *m, int i, struct sysinfo_3_2_2 *info) +{ + if (!memcmp(&info->vm[i].uuid, &NULL_UUID_BE, sizeof(uuid_be))) + return; + seq_printf(m, "VM%02d UUID: %pUb\n", i, &info->vm[i].uuid); } static void stsi_3_2_2(struct seq_file *m, struct sysinfo_3_2_2 *info) @@ -213,6 +248,8 @@ static void stsi_3_2_2(struct seq_file *m, struct sysinfo_3_2_2 *info) seq_printf(m, "VM%02d CPUs Configured: %d\n", i, info->vm[i].cpus_configured); seq_printf(m, "VM%02d CPUs Standby: %d\n", i, info->vm[i].cpus_standby); seq_printf(m, "VM%02d CPUs Reserved: %d\n", i, info->vm[i].cpus_reserved); + print_ext_name(m, i, info); + print_uuid(m, i, info); } } diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index b93bed76ea94..24ee33f1af24 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -59,32 +59,50 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) return mask; } -static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, +static cpumask_t cpu_thread_map(unsigned int cpu) +{ + cpumask_t mask; + int i; + + cpumask_copy(&mask, cpumask_of(cpu)); + if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) + return mask; + cpu -= cpu % (smp_cpu_mtid + 1); + for (i = 0; i <= smp_cpu_mtid; i++) + if (cpu_present(cpu + i)) + cpumask_set_cpu(cpu + i, &mask); + return mask; +} + +static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core, struct mask_info *book, struct mask_info *socket, int one_socket_per_cpu) { - unsigned int cpu; + unsigned int core; - for_each_set_bit(cpu, &tl_cpu->mask[0], TOPOLOGY_CPU_BITS) { - unsigned int rcpu; - int lcpu; + for_each_set_bit(core, &tl_core->mask[0], TOPOLOGY_CORE_BITS) { + unsigned int rcore; + int lcpu, i; - rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin; - lcpu = smp_find_processor_id(rcpu); + rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin; + lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift); if (lcpu < 0) continue; - cpumask_set_cpu(lcpu, &book->mask); - cpu_topology[lcpu].book_id = book->id; - cpumask_set_cpu(lcpu, &socket->mask); - cpu_topology[lcpu].core_id = rcpu; - if (one_socket_per_cpu) { - cpu_topology[lcpu].socket_id = rcpu; - socket = socket->next; - } else { - cpu_topology[lcpu].socket_id = socket->id; + for (i = 0; i <= smp_cpu_mtid; i++) { + cpu_topology[lcpu + i].book_id = book->id; + cpu_topology[lcpu + i].core_id = rcore; + cpu_topology[lcpu + i].thread_id = lcpu + i; + cpumask_set_cpu(lcpu + i, &book->mask); + cpumask_set_cpu(lcpu + i, &socket->mask); + if (one_socket_per_cpu) + cpu_topology[lcpu + i].socket_id = rcore; + else + cpu_topology[lcpu + i].socket_id = socket->id; + smp_cpu_set_polarization(lcpu + i, tl_core->pp); } - smp_cpu_set_polarization(lcpu, tl_cpu->pp); + if (one_socket_per_cpu) + socket = socket->next; } return socket; } @@ -108,7 +126,7 @@ static void clear_masks(void) static union topology_entry *next_tle(union topology_entry *tle) { if (!tle->nl) - return (union topology_entry *)((struct topology_cpu *)tle + 1); + return (union topology_entry *)((struct topology_core *)tle + 1); return (union topology_entry *)((struct topology_container *)tle + 1); } @@ -231,9 +249,11 @@ static void update_cpu_masks(void) spin_lock_irqsave(&topology_lock, flags); for_each_possible_cpu(cpu) { + cpu_topology[cpu].thread_mask = cpu_thread_map(cpu); cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu); cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu); if (!MACHINE_HAS_TOPOLOGY) { + cpu_topology[cpu].thread_id = cpu; cpu_topology[cpu].core_id = cpu; cpu_topology[cpu].socket_id = cpu; cpu_topology[cpu].book_id = cpu; @@ -445,6 +465,12 @@ int topology_cpu_init(struct cpu *cpu) return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group); } +const struct cpumask *cpu_thread_mask(int cpu) +{ + return &cpu_topology[cpu].thread_mask; +} + + const struct cpumask *cpu_coregroup_mask(int cpu) { return &cpu_topology[cpu].core_mask; @@ -456,6 +482,7 @@ static const struct cpumask *cpu_book_mask(int cpu) } static struct sched_domain_topology_level s390_topology[] = { + { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, { cpu_book_mask, SD_INIT_NAME(BOOK) }, { cpu_cpu_mask, SD_INIT_NAME(DIE) }, diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index e34122e539a1..e53d3595a7c8 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -15,6 +15,8 @@ #include <asm/cputime.h> #include <asm/vtimer.h> #include <asm/vtime.h> +#include <asm/cpu_mf.h> +#include <asm/smp.h> static void virt_timer_expire(void); @@ -23,6 +25,10 @@ static DEFINE_SPINLOCK(virt_timer_lock); static atomic64_t virt_timer_current; static atomic64_t virt_timer_elapsed; +static DEFINE_PER_CPU(u64, mt_cycles[32]); +static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 }; +static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 }; + static inline u64 get_vtimer(void) { u64 timer; @@ -61,6 +67,8 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) { struct thread_info *ti = task_thread_info(tsk); u64 timer, clock, user, system, steal; + u64 user_scaled, system_scaled; + int i; timer = S390_lowcore.last_update_timer; clock = S390_lowcore.last_update_clock; @@ -76,15 +84,49 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; + /* Do MT utilization calculation */ + if (smp_cpu_mtid) { + u64 cycles_new[32], *cycles_old; + u64 delta, mult, div; + + cycles_old = this_cpu_ptr(mt_cycles); + if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) { + mult = div = 0; + for (i = 0; i <= smp_cpu_mtid; i++) { + delta = cycles_new[i] - cycles_old[i]; + mult += delta; + div += (i + 1) * delta; + } + if (mult > 0) { + /* Update scaling factor */ + __this_cpu_write(mt_scaling_mult, mult); + __this_cpu_write(mt_scaling_div, div); + memcpy(cycles_old, cycles_new, + sizeof(u64) * (smp_cpu_mtid + 1)); + } + } + } + user = S390_lowcore.user_timer - ti->user_timer; S390_lowcore.steal_timer -= user; ti->user_timer = S390_lowcore.user_timer; - account_user_time(tsk, user, user); system = S390_lowcore.system_timer - ti->system_timer; S390_lowcore.steal_timer -= system; ti->system_timer = S390_lowcore.system_timer; - account_system_time(tsk, hardirq_offset, system, system); + + user_scaled = user; + system_scaled = system; + /* Do MT utilization scaling */ + if (smp_cpu_mtid) { + u64 mult = __this_cpu_read(mt_scaling_mult); + u64 div = __this_cpu_read(mt_scaling_div); + + user_scaled = (user_scaled * mult) / div; + system_scaled = (system_scaled * mult) / div; + } + account_user_time(tsk, user, user_scaled); + account_system_time(tsk, hardirq_offset, system, system_scaled); steal = S390_lowcore.steal_timer; if ((s64) steal > 0) { @@ -126,7 +168,7 @@ void vtime_account_user(struct task_struct *tsk) void vtime_account_irq_enter(struct task_struct *tsk) { struct thread_info *ti = task_thread_info(tsk); - u64 timer, system; + u64 timer, system, system_scaled; timer = S390_lowcore.last_update_timer; S390_lowcore.last_update_timer = get_vtimer(); @@ -135,7 +177,15 @@ void vtime_account_irq_enter(struct task_struct *tsk) system = S390_lowcore.system_timer - ti->system_timer; S390_lowcore.steal_timer -= system; ti->system_timer = S390_lowcore.system_timer; - account_system_time(tsk, 0, system, system); + system_scaled = system; + /* Do MT utilization scaling */ + if (smp_cpu_mtid) { + u64 mult = __this_cpu_read(mt_scaling_mult); + u64 div = __this_cpu_read(mt_scaling_div); + + system_scaled = (system_scaled * mult) / div; + } + account_system_time(tsk, 0, system, system_scaled); virt_timer_forward(system); } diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig index 646db9c467d1..5fce52cf0e57 100644 --- a/arch/s390/kvm/Kconfig +++ b/arch/s390/kvm/Kconfig @@ -28,6 +28,7 @@ config KVM select HAVE_KVM_IRQCHIP select HAVE_KVM_IRQFD select HAVE_KVM_IRQ_ROUTING + select SRCU ---help--- Support hosting paravirtualized guest machines using the SIE virtualization capability on the mainframe. This should work diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 8a1be9017730..267523cac6de 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -357,8 +357,8 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, union asce asce; ctlreg0.val = vcpu->arch.sie_block->gcr[0]; - edat1 = ctlreg0.edat && test_vfacility(8); - edat2 = edat1 && test_vfacility(78); + edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8); + edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78); asce.val = get_vcpu_asce(vcpu); if (asce.r) goto real_address; diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 81c77ab8102e..bebd2157edd0 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -68,18 +68,27 @@ static int handle_noop(struct kvm_vcpu *vcpu) static int handle_stop(struct kvm_vcpu *vcpu) { + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; int rc = 0; - unsigned int action_bits; + uint8_t flags, stop_pending; vcpu->stat.exit_stop_request++; - trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits); - action_bits = vcpu->arch.local_int.action_bits; + /* delay the stop if any non-stop irq is pending */ + if (kvm_s390_vcpu_has_irq(vcpu, 1)) + return 0; + + /* avoid races with the injection/SIGP STOP code */ + spin_lock(&li->lock); + flags = li->irq.stop.flags; + stop_pending = kvm_s390_is_stop_irq_pending(vcpu); + spin_unlock(&li->lock); - if (!(action_bits & ACTION_STOP_ON_STOP)) + trace_kvm_s390_stop_request(stop_pending, flags); + if (!stop_pending) return 0; - if (action_bits & ACTION_STORE_ON_STOP) { + if (flags & KVM_S390_STOP_FLAG_STORE_STATUS) { rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_NOADDR); if (rc) @@ -279,11 +288,13 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu) irq.type = KVM_S390_INT_CPU_TIMER; break; case EXT_IRQ_EXTERNAL_CALL: - if (kvm_s390_si_ext_call_pending(vcpu)) - return 0; irq.type = KVM_S390_INT_EXTERNAL_CALL; irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr; - break; + rc = kvm_s390_inject_vcpu(vcpu, &irq); + /* ignore if another external call is already pending */ + if (rc == -EBUSY) + return 0; + return rc; default: return -EOPNOTSUPP; } @@ -307,17 +318,19 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu) kvm_s390_get_regs_rre(vcpu, ®1, ®2); /* Make sure that the source is paged-in */ - srcaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg2]); - if (kvm_is_error_gpa(vcpu->kvm, srcaddr)) - return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); + rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2], + &srcaddr, 0); + if (rc) + return kvm_s390_inject_prog_cond(vcpu, rc); rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0); if (rc != 0) return rc; /* Make sure that the destination is paged-in */ - dstaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg1]); - if (kvm_is_error_gpa(vcpu->kvm, dstaddr)) - return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); + rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1], + &dstaddr, 1); + if (rc) + return kvm_s390_inject_prog_cond(vcpu, rc); rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1); if (rc != 0) return rc; diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index f00f31e66cd8..073b5f387d1d 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -19,6 +19,7 @@ #include <linux/bitmap.h> #include <asm/asm-offsets.h> #include <asm/uaccess.h> +#include <asm/sclp.h> #include "kvm-s390.h" #include "gaccess.h" #include "trace-s390.h" @@ -159,6 +160,12 @@ static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu) if (psw_mchk_disabled(vcpu)) active_mask &= ~IRQ_PEND_MCHK_MASK; + /* + * STOP irqs will never be actively delivered. They are triggered via + * intercept requests and cleared when the stop intercept is performed. + */ + __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask); + return active_mask; } @@ -186,9 +193,6 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) LCTL_CR10 | LCTL_CR11); vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT); } - - if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) - atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); } static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) @@ -216,11 +220,18 @@ static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->lctl |= LCTL_CR14; } +static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu) +{ + if (kvm_s390_is_stop_irq_pending(vcpu)) + __set_cpuflag(vcpu, CPUSTAT_STOP_INT); +} + /* Set interception request for non-deliverable local interrupts */ static void set_intercept_indicators_local(struct kvm_vcpu *vcpu) { set_intercept_indicators_ext(vcpu); set_intercept_indicators_mchk(vcpu); + set_intercept_indicators_stop(vcpu); } static void __set_intercept_indicator(struct kvm_vcpu *vcpu, @@ -392,18 +403,6 @@ static int __must_check __deliver_restart(struct kvm_vcpu *vcpu) return rc ? -EFAULT : 0; } -static int __must_check __deliver_stop(struct kvm_vcpu *vcpu) -{ - VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop"); - vcpu->stat.deliver_stop_signal++; - trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_SIGP_STOP, - 0, 0); - - __set_cpuflag(vcpu, CPUSTAT_STOP_INT); - clear_bit(IRQ_PEND_SIGP_STOP, &vcpu->arch.local_int.pending_irqs); - return 0; -} - static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; @@ -705,7 +704,6 @@ static const deliver_irq_t deliver_irq_funcs[] = { [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc, [IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer, [IRQ_PEND_RESTART] = __deliver_restart, - [IRQ_PEND_SIGP_STOP] = __deliver_stop, [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix, [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init, }; @@ -738,21 +736,20 @@ static int __must_check __deliver_floating_interrupt(struct kvm_vcpu *vcpu, return rc; } -/* Check whether SIGP interpretation facility has an external call pending */ -int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu) +/* Check whether an external call is pending (deliverable or not) */ +int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) { - atomic_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl; + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl; - if (!psw_extint_disabled(vcpu) && - (vcpu->arch.sie_block->gcr[0] & 0x2000ul) && - (atomic_read(sigp_ctrl) & SIGP_CTRL_C) && - (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND)) - return 1; + if (!sclp_has_sigpif()) + return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); - return 0; + return (sigp_ctrl & SIGP_CTRL_C) && + (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND); } -int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) +int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop) { struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int; struct kvm_s390_interrupt_info *inti; @@ -773,7 +770,13 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) if (!rc && kvm_cpu_has_pending_timer(vcpu)) rc = 1; - if (!rc && kvm_s390_si_ext_call_pending(vcpu)) + /* external call pending and deliverable */ + if (!rc && kvm_s390_ext_call_pending(vcpu) && + !psw_extint_disabled(vcpu) && + (vcpu->arch.sie_block->gcr[0] & 0x2000ul)) + rc = 1; + + if (!rc && !exclude_stop && kvm_s390_is_stop_irq_pending(vcpu)) rc = 1; return rc; @@ -804,14 +807,20 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) return -EOPNOTSUPP; /* disabled wait */ } - __set_cpu_idle(vcpu); if (!ckc_interrupts_enabled(vcpu)) { VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); + __set_cpu_idle(vcpu); goto no_timer; } now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); + + /* underflow */ + if (vcpu->arch.sie_block->ckc < now) + return 0; + + __set_cpu_idle(vcpu); hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); no_timer: @@ -820,7 +829,7 @@ no_timer: __unset_cpu_idle(vcpu); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); - hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); + hrtimer_cancel(&vcpu->arch.ckc_timer); return 0; } @@ -840,10 +849,20 @@ void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu) enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) { struct kvm_vcpu *vcpu; + u64 now, sltime; vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); - kvm_s390_vcpu_wakeup(vcpu); + now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; + sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); + /* + * If the monotonic clock runs faster than the tod clock we might be + * woken up too early and have to go back to sleep to avoid deadlocks. + */ + if (vcpu->arch.sie_block->ckc > now && + hrtimer_forward_now(timer, ns_to_ktime(sltime))) + return HRTIMER_RESTART; + kvm_s390_vcpu_wakeup(vcpu); return HRTIMER_NORESTART; } @@ -859,8 +878,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) /* clear pending external calls set by sigp interpretation facility */ atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags); - atomic_clear_mask(SIGP_CTRL_C, - &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl); + vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0; } int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) @@ -984,18 +1002,43 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) return 0; } -int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) +static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id) +{ + unsigned char new_val, old_val; + uint8_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl; + + new_val = SIGP_CTRL_C | (src_id & SIGP_CTRL_SCN_MASK); + old_val = *sigp_ctrl & ~SIGP_CTRL_C; + if (cmpxchg(sigp_ctrl, old_val, new_val) != old_val) { + /* another external call is pending */ + return -EBUSY; + } + atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); + return 0; +} + +static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_extcall_info *extcall = &li->irq.extcall; + uint16_t src_id = irq->u.extcall.code; VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u", - irq->u.extcall.code); + src_id); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL, - irq->u.extcall.code, 0, 2); + src_id, 0, 2); + + /* sending vcpu invalid */ + if (src_id >= KVM_MAX_VCPUS || + kvm_get_vcpu(vcpu->kvm, src_id) == NULL) + return -EINVAL; + if (sclp_has_sigpif()) + return __inject_extcall_sigpif(vcpu, src_id); + + if (!test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) + return -EBUSY; *extcall = irq->u.extcall; - set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs); atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); return 0; } @@ -1006,23 +1049,41 @@ static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) struct kvm_s390_prefix_info *prefix = &li->irq.prefix; VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)", - prefix->address); + irq->u.prefix.address); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX, - prefix->address, 0, 2); + irq->u.prefix.address, 0, 2); + + if (!is_vcpu_stopped(vcpu)) + return -EBUSY; *prefix = irq->u.prefix; set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); return 0; } +#define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS) static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + struct kvm_s390_stop_info *stop = &li->irq.stop; + int rc = 0; trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2); - li->action_bits |= ACTION_STOP_ON_STOP; - set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); + if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS) + return -EINVAL; + + if (is_vcpu_stopped(vcpu)) { + if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS) + rc = kvm_s390_store_status_unloaded(vcpu, + KVM_S390_STORE_STATUS_NOADDR); + return rc; + } + + if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs)) + return -EBUSY; + stop->flags = irq->u.stop.flags; + __set_cpuflag(vcpu, CPUSTAT_STOP_INT); return 0; } @@ -1042,14 +1103,13 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - struct kvm_s390_emerg_info *emerg = &li->irq.emerg; VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", irq->u.emerg.code); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, - emerg->code, 0, 2); + irq->u.emerg.code, 0, 2); - set_bit(emerg->code, li->sigp_emerg_pending); + set_bit(irq->u.emerg.code, li->sigp_emerg_pending); set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); return 0; @@ -1061,9 +1121,9 @@ static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) struct kvm_s390_mchk_info *mchk = &li->irq.mchk; VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx", - mchk->mcic); + irq->u.mchk.mcic); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0, - mchk->mcic, 2); + irq->u.mchk.mcic, 2); /* * Because repressible machine checks can be indicated along with @@ -1121,7 +1181,6 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, if ((!schid && !cr6) || (schid && cr6)) return NULL; - mutex_lock(&kvm->lock); fi = &kvm->arch.float_int; spin_lock(&fi->lock); inti = NULL; @@ -1149,7 +1208,6 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, if (list_empty(&fi->list)) atomic_set(&fi->active, 0); spin_unlock(&fi->lock); - mutex_unlock(&kvm->lock); return inti; } @@ -1162,7 +1220,6 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) int sigcpu; int rc = 0; - mutex_lock(&kvm->lock); fi = &kvm->arch.float_int; spin_lock(&fi->lock); if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) { @@ -1187,6 +1244,8 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) list_add_tail(&inti->list, &iter->list); } atomic_set(&fi->active, 1); + if (atomic_read(&kvm->online_vcpus) == 0) + goto unlock_fi; sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); if (sigcpu == KVM_MAX_VCPUS) { do { @@ -1213,7 +1272,6 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu)); unlock_fi: spin_unlock(&fi->lock); - mutex_unlock(&kvm->lock); return rc; } @@ -1221,6 +1279,7 @@ int kvm_s390_inject_vm(struct kvm *kvm, struct kvm_s390_interrupt *s390int) { struct kvm_s390_interrupt_info *inti; + int rc; inti = kzalloc(sizeof(*inti), GFP_KERNEL); if (!inti) @@ -1239,7 +1298,6 @@ int kvm_s390_inject_vm(struct kvm *kvm, inti->ext.ext_params = s390int->parm; break; case KVM_S390_INT_PFAULT_DONE: - inti->type = s390int->type; inti->ext.ext_params2 = s390int->parm64; break; case KVM_S390_MCHK: @@ -1268,7 +1326,10 @@ int kvm_s390_inject_vm(struct kvm *kvm, trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64, 2); - return __inject_vm(kvm, inti); + rc = __inject_vm(kvm, inti); + if (rc) + kfree(inti); + return rc; } void kvm_s390_reinject_io_int(struct kvm *kvm, @@ -1290,13 +1351,16 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, case KVM_S390_SIGP_SET_PREFIX: irq->u.prefix.address = s390int->parm; break; + case KVM_S390_SIGP_STOP: + irq->u.stop.flags = s390int->parm; + break; case KVM_S390_INT_EXTERNAL_CALL: - if (irq->u.extcall.code & 0xffff0000) + if (s390int->parm & 0xffff0000) return -EINVAL; irq->u.extcall.code = s390int->parm; break; case KVM_S390_INT_EMERGENCY: - if (irq->u.emerg.code & 0xffff0000) + if (s390int->parm & 0xffff0000) return -EINVAL; irq->u.emerg.code = s390int->parm; break; @@ -1307,6 +1371,23 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, return 0; } +int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + + return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); +} + +void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu) +{ + struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; + + spin_lock(&li->lock); + li->irq.stop.flags = 0; + clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); + spin_unlock(&li->lock); +} + int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; @@ -1363,7 +1444,6 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm) struct kvm_s390_float_interrupt *fi; struct kvm_s390_interrupt_info *n, *inti = NULL; - mutex_lock(&kvm->lock); fi = &kvm->arch.float_int; spin_lock(&fi->lock); list_for_each_entry_safe(inti, n, &fi->list, list) { @@ -1373,7 +1453,6 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm) fi->irq_count = 0; atomic_set(&fi->active, 0); spin_unlock(&fi->lock); - mutex_unlock(&kvm->lock); } static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti, @@ -1413,7 +1492,6 @@ static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len) int ret = 0; int n = 0; - mutex_lock(&kvm->lock); fi = &kvm->arch.float_int; spin_lock(&fi->lock); @@ -1432,7 +1510,6 @@ static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len) } spin_unlock(&fi->lock); - mutex_unlock(&kvm->lock); return ret < 0 ? ret : n; } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 3e09801e3104..0c3623927563 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -22,6 +22,7 @@ #include <linux/kvm.h> #include <linux/kvm_host.h> #include <linux/module.h> +#include <linux/random.h> #include <linux/slab.h> #include <linux/timer.h> #include <asm/asm-offsets.h> @@ -29,7 +30,6 @@ #include <asm/pgtable.h> #include <asm/nmi.h> #include <asm/switch_to.h> -#include <asm/facility.h> #include <asm/sclp.h> #include "kvm-s390.h" #include "gaccess.h" @@ -50,6 +50,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "exit_instruction", VCPU_STAT(exit_instruction) }, { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, + { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, { "halt_wakeup", VCPU_STAT(halt_wakeup) }, { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, { "instruction_lctl", VCPU_STAT(instruction_lctl) }, @@ -98,15 +99,20 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { NULL } }; -unsigned long *vfacilities; -static struct gmap_notifier gmap_notifier; +/* upper facilities limit for kvm */ +unsigned long kvm_s390_fac_list_mask[] = { + 0xff82fffbf4fc2000UL, + 0x005c000000000000UL, +}; -/* test availability of vfacility */ -int test_vfacility(unsigned long nr) +unsigned long kvm_s390_fac_list_mask_size(void) { - return __test_facility(nr, (void *) vfacilities); + BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64); + return ARRAY_SIZE(kvm_s390_fac_list_mask); } +static struct gmap_notifier gmap_notifier; + /* Section: not file related */ int kvm_arch_hardware_enable(void) { @@ -166,6 +172,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_S390_IRQCHIP: case KVM_CAP_VM_ATTRIBUTES: case KVM_CAP_MP_STATE: + case KVM_CAP_S390_USER_SIGP: r = 1; break; case KVM_CAP_NR_VCPUS: @@ -254,6 +261,10 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) kvm->arch.use_irqchip = 1; r = 0; break; + case KVM_CAP_S390_USER_SIGP: + kvm->arch.user_sigp = 1; + r = 0; + break; default: r = -EINVAL; break; @@ -261,7 +272,24 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) return r; } -static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) +static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) +{ + int ret; + + switch (attr->attr) { + case KVM_S390_VM_MEM_LIMIT_SIZE: + ret = 0; + if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr)) + ret = -EFAULT; + break; + default: + ret = -ENXIO; + break; + } + return ret; +} + +static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) { int ret; unsigned int idx; @@ -283,6 +311,36 @@ static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) mutex_unlock(&kvm->lock); ret = 0; break; + case KVM_S390_VM_MEM_LIMIT_SIZE: { + unsigned long new_limit; + + if (kvm_is_ucontrol(kvm)) + return -EINVAL; + + if (get_user(new_limit, (u64 __user *)attr->addr)) + return -EFAULT; + + if (new_limit > kvm->arch.gmap->asce_end) + return -E2BIG; + + ret = -EBUSY; + mutex_lock(&kvm->lock); + if (atomic_read(&kvm->online_vcpus) == 0) { + /* gmap_alloc will round the limit up */ + struct gmap *new = gmap_alloc(current->mm, new_limit); + + if (!new) { + ret = -ENOMEM; + } else { + gmap_free(kvm->arch.gmap); + new->private = kvm; + kvm->arch.gmap = new; + ret = 0; + } + } + mutex_unlock(&kvm->lock); + break; + } default: ret = -ENXIO; break; @@ -290,13 +348,276 @@ static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) return ret; } +static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu); + +static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) +{ + struct kvm_vcpu *vcpu; + int i; + + if (!test_kvm_facility(kvm, 76)) + return -EINVAL; + + mutex_lock(&kvm->lock); + switch (attr->attr) { + case KVM_S390_VM_CRYPTO_ENABLE_AES_KW: + get_random_bytes( + kvm->arch.crypto.crycb->aes_wrapping_key_mask, + sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); + kvm->arch.crypto.aes_kw = 1; + break; + case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: + get_random_bytes( + kvm->arch.crypto.crycb->dea_wrapping_key_mask, + sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); + kvm->arch.crypto.dea_kw = 1; + break; + case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: + kvm->arch.crypto.aes_kw = 0; + memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, + sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); + break; + case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: + kvm->arch.crypto.dea_kw = 0; + memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, + sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); + break; + default: + mutex_unlock(&kvm->lock); + return -ENXIO; + } + + kvm_for_each_vcpu(i, vcpu, kvm) { + kvm_s390_vcpu_crypto_setup(vcpu); + exit_sie(vcpu); + } + mutex_unlock(&kvm->lock); + return 0; +} + +static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) +{ + u8 gtod_high; + + if (copy_from_user(>od_high, (void __user *)attr->addr, + sizeof(gtod_high))) + return -EFAULT; + + if (gtod_high != 0) + return -EINVAL; + + return 0; +} + +static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) +{ + struct kvm_vcpu *cur_vcpu; + unsigned int vcpu_idx; + u64 host_tod, gtod; + int r; + + if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) + return -EFAULT; + + r = store_tod_clock(&host_tod); + if (r) + return r; + + mutex_lock(&kvm->lock); + kvm->arch.epoch = gtod - host_tod; + kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) { + cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch; + exit_sie(cur_vcpu); + } + mutex_unlock(&kvm->lock); + return 0; +} + +static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) +{ + int ret; + + if (attr->flags) + return -EINVAL; + + switch (attr->attr) { + case KVM_S390_VM_TOD_HIGH: + ret = kvm_s390_set_tod_high(kvm, attr); + break; + case KVM_S390_VM_TOD_LOW: + ret = kvm_s390_set_tod_low(kvm, attr); + break; + default: + ret = -ENXIO; + break; + } + return ret; +} + +static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) +{ + u8 gtod_high = 0; + + if (copy_to_user((void __user *)attr->addr, >od_high, + sizeof(gtod_high))) + return -EFAULT; + + return 0; +} + +static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) +{ + u64 host_tod, gtod; + int r; + + r = store_tod_clock(&host_tod); + if (r) + return r; + + gtod = host_tod + kvm->arch.epoch; + if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) + return -EFAULT; + + return 0; +} + +static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr) +{ + int ret; + + if (attr->flags) + return -EINVAL; + + switch (attr->attr) { + case KVM_S390_VM_TOD_HIGH: + ret = kvm_s390_get_tod_high(kvm, attr); + break; + case KVM_S390_VM_TOD_LOW: + ret = kvm_s390_get_tod_low(kvm, attr); + break; + default: + ret = -ENXIO; + break; + } + return ret; +} + +static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr) +{ + struct kvm_s390_vm_cpu_processor *proc; + int ret = 0; + + mutex_lock(&kvm->lock); + if (atomic_read(&kvm->online_vcpus)) { + ret = -EBUSY; + goto out; + } + proc = kzalloc(sizeof(*proc), GFP_KERNEL); + if (!proc) { + ret = -ENOMEM; + goto out; + } + if (!copy_from_user(proc, (void __user *)attr->addr, + sizeof(*proc))) { + memcpy(&kvm->arch.model.cpu_id, &proc->cpuid, + sizeof(struct cpuid)); + kvm->arch.model.ibc = proc->ibc; + memcpy(kvm->arch.model.fac->kvm, proc->fac_list, + S390_ARCH_FAC_LIST_SIZE_BYTE); + } else + ret = -EFAULT; + kfree(proc); +out: + mutex_unlock(&kvm->lock); + return ret; +} + +static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) +{ + int ret = -ENXIO; + + switch (attr->attr) { + case KVM_S390_VM_CPU_PROCESSOR: + ret = kvm_s390_set_processor(kvm, attr); + break; + } + return ret; +} + +static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr) +{ + struct kvm_s390_vm_cpu_processor *proc; + int ret = 0; + + proc = kzalloc(sizeof(*proc), GFP_KERNEL); + if (!proc) { + ret = -ENOMEM; + goto out; + } + memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid)); + proc->ibc = kvm->arch.model.ibc; + memcpy(&proc->fac_list, kvm->arch.model.fac->kvm, S390_ARCH_FAC_LIST_SIZE_BYTE); + if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) + ret = -EFAULT; + kfree(proc); +out: + return ret; +} + +static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr) +{ + struct kvm_s390_vm_cpu_machine *mach; + int ret = 0; + + mach = kzalloc(sizeof(*mach), GFP_KERNEL); + if (!mach) { + ret = -ENOMEM; + goto out; + } + get_cpu_id((struct cpuid *) &mach->cpuid); + mach->ibc = sclp_get_ibc(); + memcpy(&mach->fac_mask, kvm_s390_fac_list_mask, + kvm_s390_fac_list_mask_size() * sizeof(u64)); + memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, + S390_ARCH_FAC_LIST_SIZE_U64); + if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) + ret = -EFAULT; + kfree(mach); +out: + return ret; +} + +static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) +{ + int ret = -ENXIO; + + switch (attr->attr) { + case KVM_S390_VM_CPU_PROCESSOR: + ret = kvm_s390_get_processor(kvm, attr); + break; + case KVM_S390_VM_CPU_MACHINE: + ret = kvm_s390_get_machine(kvm, attr); + break; + } + return ret; +} + static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) { int ret; switch (attr->group) { case KVM_S390_VM_MEM_CTRL: - ret = kvm_s390_mem_control(kvm, attr); + ret = kvm_s390_set_mem_control(kvm, attr); + break; + case KVM_S390_VM_TOD: + ret = kvm_s390_set_tod(kvm, attr); + break; + case KVM_S390_VM_CPU_MODEL: + ret = kvm_s390_set_cpu_model(kvm, attr); + break; + case KVM_S390_VM_CRYPTO: + ret = kvm_s390_vm_set_crypto(kvm, attr); break; default: ret = -ENXIO; @@ -308,7 +629,24 @@ static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr) { - return -ENXIO; + int ret; + + switch (attr->group) { + case KVM_S390_VM_MEM_CTRL: + ret = kvm_s390_get_mem_control(kvm, attr); + break; + case KVM_S390_VM_TOD: + ret = kvm_s390_get_tod(kvm, attr); + break; + case KVM_S390_VM_CPU_MODEL: + ret = kvm_s390_get_cpu_model(kvm, attr); + break; + default: + ret = -ENXIO; + break; + } + + return ret; } static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) @@ -320,6 +658,42 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) switch (attr->attr) { case KVM_S390_VM_MEM_ENABLE_CMMA: case KVM_S390_VM_MEM_CLR_CMMA: + case KVM_S390_VM_MEM_LIMIT_SIZE: + ret = 0; + break; + default: + ret = -ENXIO; + break; + } + break; + case KVM_S390_VM_TOD: + switch (attr->attr) { + case KVM_S390_VM_TOD_LOW: + case KVM_S390_VM_TOD_HIGH: + ret = 0; + break; + default: + ret = -ENXIO; + break; + } + break; + case KVM_S390_VM_CPU_MODEL: + switch (attr->attr) { + case KVM_S390_VM_CPU_PROCESSOR: + case KVM_S390_VM_CPU_MACHINE: + ret = 0; + break; + default: + ret = -ENXIO; + break; + } + break; + case KVM_S390_VM_CRYPTO: + switch (attr->attr) { + case KVM_S390_VM_CRYPTO_ENABLE_AES_KW: + case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: + case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: + case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: ret = 0; break; default: @@ -401,9 +775,61 @@ long kvm_arch_vm_ioctl(struct file *filp, return r; } +static int kvm_s390_query_ap_config(u8 *config) +{ + u32 fcn_code = 0x04000000UL; + u32 cc; + + asm volatile( + "lgr 0,%1\n" + "lgr 2,%2\n" + ".long 0xb2af0000\n" /* PQAP(QCI) */ + "ipm %0\n" + "srl %0,28\n" + : "=r" (cc) + : "r" (fcn_code), "r" (config) + : "cc", "0", "2", "memory" + ); + + return cc; +} + +static int kvm_s390_apxa_installed(void) +{ + u8 config[128]; + int cc; + + if (test_facility(2) && test_facility(12)) { + cc = kvm_s390_query_ap_config(config); + + if (cc) + pr_err("PQAP(QCI) failed with cc=%d", cc); + else + return config[0] & 0x40; + } + + return 0; +} + +static void kvm_s390_set_crycb_format(struct kvm *kvm) +{ + kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; + + if (kvm_s390_apxa_installed()) + kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; + else + kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; +} + +static void kvm_s390_get_cpu_id(struct cpuid *cpu_id) +{ + get_cpu_id(cpu_id); + cpu_id->version = 0xff; +} + static int kvm_s390_crypto_init(struct kvm *kvm) { - if (!test_vfacility(76)) + if (!test_kvm_facility(kvm, 76)) return 0; kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb), @@ -411,15 +837,18 @@ static int kvm_s390_crypto_init(struct kvm *kvm) if (!kvm->arch.crypto.crycb) return -ENOMEM; - kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb | - CRYCB_FORMAT1; + kvm_s390_set_crycb_format(kvm); + + /* Disable AES/DEA protected key functions by default */ + kvm->arch.crypto.aes_kw = 0; + kvm->arch.crypto.dea_kw = 0; return 0; } int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { - int rc; + int i, rc; char debug_name[16]; static unsigned long sca_offset; @@ -454,6 +883,46 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) if (!kvm->arch.dbf) goto out_nodbf; + /* + * The architectural maximum amount of facilities is 16 kbit. To store + * this amount, 2 kbyte of memory is required. Thus we need a full + * page to hold the active copy (arch.model.fac->sie) and the current + * facilities set (arch.model.fac->kvm). Its address size has to be + * 31 bits and word aligned. + */ + kvm->arch.model.fac = + (struct s390_model_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!kvm->arch.model.fac) + goto out_nofac; + + memcpy(kvm->arch.model.fac->kvm, S390_lowcore.stfle_fac_list, + S390_ARCH_FAC_LIST_SIZE_U64); + + /* + * If this KVM host runs *not* in a LPAR, relax the facility bits + * of the kvm facility mask by all missing facilities. This will allow + * to determine the right CPU model by means of the remaining facilities. + * Live guest migration must prohibit the migration of KVMs running in + * a LPAR to non LPAR hosts. + */ + if (!MACHINE_IS_LPAR) + for (i = 0; i < kvm_s390_fac_list_mask_size(); i++) + kvm_s390_fac_list_mask[i] &= kvm->arch.model.fac->kvm[i]; + + /* + * Apply the kvm facility mask to limit the kvm supported/tolerated + * facility list. + */ + for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { + if (i < kvm_s390_fac_list_mask_size()) + kvm->arch.model.fac->kvm[i] &= kvm_s390_fac_list_mask[i]; + else + kvm->arch.model.fac->kvm[i] = 0UL; + } + + kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id); + kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; + if (kvm_s390_crypto_init(kvm) < 0) goto out_crypto; @@ -477,6 +946,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm->arch.css_support = 0; kvm->arch.use_irqchip = 0; + kvm->arch.epoch = 0; spin_lock_init(&kvm->arch.start_stop_lock); @@ -484,6 +954,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) out_nogmap: kfree(kvm->arch.crypto.crycb); out_crypto: + free_page((unsigned long)kvm->arch.model.fac); +out_nofac: debug_unregister(kvm->arch.dbf); out_nodbf: free_page((unsigned long)(kvm->arch.sca)); @@ -536,6 +1008,7 @@ static void kvm_free_vcpus(struct kvm *kvm) void kvm_arch_destroy_vm(struct kvm *kvm) { kvm_free_vcpus(kvm); + free_page((unsigned long)kvm->arch.model.fac); free_page((unsigned long)(kvm->arch.sca)); debug_unregister(kvm->arch.dbf); kfree(kvm->arch.crypto.crycb); @@ -546,25 +1019,30 @@ void kvm_arch_destroy_vm(struct kvm *kvm) } /* Section: vcpu related */ +static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu) +{ + vcpu->arch.gmap = gmap_alloc(current->mm, -1UL); + if (!vcpu->arch.gmap) + return -ENOMEM; + vcpu->arch.gmap->private = vcpu->kvm; + + return 0; +} + int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; kvm_clear_async_pf_completion_queue(vcpu); - if (kvm_is_ucontrol(vcpu->kvm)) { - vcpu->arch.gmap = gmap_alloc(current->mm, -1UL); - if (!vcpu->arch.gmap) - return -ENOMEM; - vcpu->arch.gmap->private = vcpu->kvm; - return 0; - } - - vcpu->arch.gmap = vcpu->kvm->arch.gmap; vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | KVM_SYNC_GPRS | KVM_SYNC_ACRS | KVM_SYNC_CRS | KVM_SYNC_ARCH0 | KVM_SYNC_PFAULT; + + if (kvm_is_ucontrol(vcpu->kvm)) + return __kvm_ucontrol_vcpu_init(vcpu); + return 0; } @@ -615,16 +1093,27 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) kvm_s390_clear_local_irqs(vcpu); } -int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) +void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) { - return 0; + mutex_lock(&vcpu->kvm->lock); + vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; + mutex_unlock(&vcpu->kvm->lock); + if (!kvm_is_ucontrol(vcpu->kvm)) + vcpu->arch.gmap = vcpu->kvm->arch.gmap; } static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) { - if (!test_vfacility(76)) + if (!test_kvm_facility(vcpu->kvm, 76)) return; + vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); + + if (vcpu->kvm->arch.crypto.aes_kw) + vcpu->arch.sie_block->ecb3 |= ECB3_AES; + if (vcpu->kvm->arch.crypto.dea_kw) + vcpu->arch.sie_block->ecb3 |= ECB3_DEA; + vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; } @@ -654,14 +1143,15 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) CPUSTAT_STOPPED | CPUSTAT_GED); vcpu->arch.sie_block->ecb = 6; - if (test_vfacility(50) && test_vfacility(73)) + if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73)) vcpu->arch.sie_block->ecb |= 0x10; vcpu->arch.sie_block->ecb2 = 8; - vcpu->arch.sie_block->eca = 0xD1002000U; + vcpu->arch.sie_block->eca = 0xC1002000U; if (sclp_has_siif()) vcpu->arch.sie_block->eca |= 1; - vcpu->arch.sie_block->fac = (int) (long) vfacilities; + if (sclp_has_sigpif()) + vcpu->arch.sie_block->eca |= 0x10000000U; vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE | ICTL_TPROT; @@ -670,10 +1160,15 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) if (rc) return rc; } - hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); + hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; - get_cpu_id(&vcpu->arch.cpu_id); - vcpu->arch.cpu_id.version = 0xff; + + mutex_lock(&vcpu->kvm->lock); + vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id; + memcpy(vcpu->kvm->arch.model.fac->sie, vcpu->kvm->arch.model.fac->kvm, + S390_ARCH_FAC_LIST_SIZE_BYTE); + vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc; + mutex_unlock(&vcpu->kvm->lock); kvm_s390_vcpu_crypto_setup(vcpu); @@ -717,6 +1212,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); } + vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->sie; spin_lock_init(&vcpu->arch.local_int.lock); vcpu->arch.local_int.float_int = &kvm->arch.float_int; @@ -741,7 +1237,7 @@ out: int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { - return kvm_cpu_has_interrupt(vcpu); + return kvm_s390_vcpu_has_irq(vcpu, 0); } void s390_vcpu_block(struct kvm_vcpu *vcpu) @@ -869,6 +1365,8 @@ static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, case KVM_REG_S390_PFTOKEN: r = get_user(vcpu->arch.pfault_token, (u64 __user *)reg->addr); + if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) + kvm_clear_async_pf_completion_queue(vcpu); break; case KVM_REG_S390_PFCOMPARE: r = get_user(vcpu->arch.pfault_compare, @@ -1176,7 +1674,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu) return 0; if (psw_extint_disabled(vcpu)) return 0; - if (kvm_cpu_has_interrupt(vcpu)) + if (kvm_s390_vcpu_has_irq(vcpu, 0)) return 0; if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) return 0; @@ -1341,6 +1839,8 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) vcpu->arch.pfault_token = kvm_run->s.regs.pft; vcpu->arch.pfault_select = kvm_run->s.regs.pfs; vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; + if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) + kvm_clear_async_pf_completion_queue(vcpu); } kvm_run->kvm_dirty_regs = 0; } @@ -1559,15 +2059,10 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) spin_lock(&vcpu->kvm->arch.start_stop_lock); online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); - /* Need to lock access to action_bits to avoid a SIGP race condition */ - spin_lock(&vcpu->arch.local_int.lock); - atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); - /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ - vcpu->arch.local_int.action_bits &= - ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP); - spin_unlock(&vcpu->arch.local_int.lock); + kvm_s390_clear_stop_irq(vcpu); + atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); __disable_ibs_on_vcpu(vcpu); for (i = 0; i < online_vcpus; i++) { @@ -1783,30 +2278,11 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, static int __init kvm_s390_init(void) { - int ret; - ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); - if (ret) - return ret; - - /* - * guests can ask for up to 255+1 double words, we need a full page - * to hold the maximum amount of facilities. On the other hand, we - * only set facilities that are known to work in KVM. - */ - vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA); - if (!vfacilities) { - kvm_exit(); - return -ENOMEM; - } - memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16); - vfacilities[0] &= 0xff82fffbf47c2000UL; - vfacilities[1] &= 0x005c000000000000UL; - return 0; + return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); } static void __exit kvm_s390_exit(void) { - free_page((unsigned long) vfacilities); kvm_exit(); } diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index a8f3d9b71c11..985c2114d7ef 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -18,12 +18,10 @@ #include <linux/hrtimer.h> #include <linux/kvm.h> #include <linux/kvm_host.h> +#include <asm/facility.h> typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu); -/* declare vfacilities extern */ -extern unsigned long *vfacilities; - /* Transactional Memory Execution related macros */ #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10)) #define TDB_FORMAT1 1 @@ -127,6 +125,12 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc) vcpu->arch.sie_block->gpsw.mask |= cc << 44; } +/* test availability of facility in a kvm intance */ +static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr) +{ + return __test_facility(nr, kvm->arch.model.fac->kvm); +} + /* are cpu states controlled by user space */ static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm) { @@ -183,7 +187,8 @@ int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu); /* is cmma enabled */ bool kvm_s390_cmma_enabled(struct kvm *kvm); -int test_vfacility(unsigned long nr); +unsigned long kvm_s390_fac_list_mask_size(void); +extern unsigned long kvm_s390_fac_list_mask[]; /* implemented in diag.c */ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu); @@ -228,11 +233,13 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, struct kvm_s390_irq *s390irq); /* implemented in interrupt.c */ -int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); +int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop); int psw_extint_disabled(struct kvm_vcpu *vcpu); void kvm_s390_destroy_adapters(struct kvm *kvm); -int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu); +int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu); extern struct kvm_device_ops kvm_flic_ops; +int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu); +void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu); /* implemented in guestdbg.c */ void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 1be578d64dfc..bdd9b5b17e03 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -337,19 +337,24 @@ static int handle_io_inst(struct kvm_vcpu *vcpu) static int handle_stfl(struct kvm_vcpu *vcpu) { int rc; + unsigned int fac; vcpu->stat.instruction_stfl++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); + /* + * We need to shift the lower 32 facility bits (bit 0-31) from a u64 + * into a u32 memory representation. They will remain bits 0-31. + */ + fac = *vcpu->kvm->arch.model.fac->sie >> 32; rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list), - vfacilities, 4); + &fac, sizeof(fac)); if (rc) return rc; - VCPU_EVENT(vcpu, 5, "store facility list value %x", - *(unsigned int *) vfacilities); - trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities); + VCPU_EVENT(vcpu, 5, "store facility list value %x", fac); + trace_kvm_s390_handle_stfl(vcpu, fac); return 0; } diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 6651f9f73973..23b1e86b2122 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -26,15 +26,17 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, struct kvm_s390_local_interrupt *li; int cpuflags; int rc; + int ext_call_pending; li = &dst_vcpu->arch.local_int; cpuflags = atomic_read(li->cpuflags); - if (!(cpuflags & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED))) + ext_call_pending = kvm_s390_ext_call_pending(dst_vcpu); + if (!(cpuflags & CPUSTAT_STOPPED) && !ext_call_pending) rc = SIGP_CC_ORDER_CODE_ACCEPTED; else { *reg &= 0xffffffff00000000UL; - if (cpuflags & CPUSTAT_ECALL_PEND) + if (ext_call_pending) *reg |= SIGP_STATUS_EXT_CALL_PENDING; if (cpuflags & CPUSTAT_STOPPED) *reg |= SIGP_STATUS_STOPPED; @@ -96,7 +98,7 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, } static int __sigp_external_call(struct kvm_vcpu *vcpu, - struct kvm_vcpu *dst_vcpu) + struct kvm_vcpu *dst_vcpu, u64 *reg) { struct kvm_s390_irq irq = { .type = KVM_S390_INT_EXTERNAL_CALL, @@ -105,45 +107,31 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, int rc; rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); - if (!rc) + if (rc == -EBUSY) { + *reg &= 0xffffffff00000000UL; + *reg |= SIGP_STATUS_EXT_CALL_PENDING; + return SIGP_CC_STATUS_STORED; + } else if (rc == 0) { VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", dst_vcpu->vcpu_id); - - return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; -} - -static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action) -{ - struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int; - int rc = SIGP_CC_ORDER_CODE_ACCEPTED; - - spin_lock(&li->lock); - if (li->action_bits & ACTION_STOP_ON_STOP) { - /* another SIGP STOP is pending */ - rc = SIGP_CC_BUSY; - goto out; } - if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { - if ((action & ACTION_STORE_ON_STOP) != 0) - rc = -ESHUTDOWN; - goto out; - } - set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs); - li->action_bits |= action; - atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); - kvm_s390_vcpu_wakeup(dst_vcpu); -out: - spin_unlock(&li->lock); - return rc; + return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; } static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) { + struct kvm_s390_irq irq = { + .type = KVM_S390_SIGP_STOP, + }; int rc; - rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP); - VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", dst_vcpu->vcpu_id); + rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); + if (rc == -EBUSY) + rc = SIGP_CC_BUSY; + else if (rc == 0) + VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", + dst_vcpu->vcpu_id); return rc; } @@ -151,20 +139,18 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, u64 *reg) { + struct kvm_s390_irq irq = { + .type = KVM_S390_SIGP_STOP, + .u.stop.flags = KVM_S390_STOP_FLAG_STORE_STATUS, + }; int rc; - rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP | - ACTION_STORE_ON_STOP); - VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x", - dst_vcpu->vcpu_id); - - if (rc == -ESHUTDOWN) { - /* If the CPU has already been stopped, we still have - * to save the status when doing stop-and-store. This - * has to be done after unlocking all spinlocks. */ - rc = kvm_s390_store_status_unloaded(dst_vcpu, - KVM_S390_STORE_STATUS_NOADDR); - } + rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); + if (rc == -EBUSY) + rc = SIGP_CC_BUSY; + else if (rc == 0) + VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x", + dst_vcpu->vcpu_id); return rc; } @@ -197,41 +183,33 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, u32 address, u64 *reg) { - struct kvm_s390_local_interrupt *li; + struct kvm_s390_irq irq = { + .type = KVM_S390_SIGP_SET_PREFIX, + .u.prefix.address = address & 0x7fffe000u, + }; int rc; - li = &dst_vcpu->arch.local_int; - /* * Make sure the new value is valid memory. We only need to check the * first page, since address is 8k aligned and memory pieces are always * at least 1MB aligned and have at least a size of 1MB. */ - address &= 0x7fffe000u; - if (kvm_is_error_gpa(vcpu->kvm, address)) { + if (kvm_is_error_gpa(vcpu->kvm, irq.u.prefix.address)) { *reg &= 0xffffffff00000000UL; *reg |= SIGP_STATUS_INVALID_PARAMETER; return SIGP_CC_STATUS_STORED; } - spin_lock(&li->lock); - /* cpu must be in stopped state */ - if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { + rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); + if (rc == -EBUSY) { *reg &= 0xffffffff00000000UL; *reg |= SIGP_STATUS_INCORRECT_STATE; - rc = SIGP_CC_STATUS_STORED; - goto out_li; + return SIGP_CC_STATUS_STORED; + } else if (rc == 0) { + VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", + dst_vcpu->vcpu_id, irq.u.prefix.address); } - li->irq.prefix.address = address; - set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs); - kvm_s390_vcpu_wakeup(dst_vcpu); - rc = SIGP_CC_ORDER_CODE_ACCEPTED; - - VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", dst_vcpu->vcpu_id, - address); -out_li: - spin_unlock(&li->lock); return rc; } @@ -242,9 +220,7 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, int flags; int rc; - spin_lock(&dst_vcpu->arch.local_int.lock); flags = atomic_read(dst_vcpu->arch.local_int.cpuflags); - spin_unlock(&dst_vcpu->arch.local_int.lock); if (!(flags & CPUSTAT_STOPPED)) { *reg &= 0xffffffff00000000UL; *reg |= SIGP_STATUS_INCORRECT_STATE; @@ -291,8 +267,9 @@ static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu, /* handle (RE)START in user space */ int rc = -EOPNOTSUPP; + /* make sure we don't race with STOP irq injection */ spin_lock(&li->lock); - if (li->action_bits & ACTION_STOP_ON_STOP) + if (kvm_s390_is_stop_irq_pending(dst_vcpu)) rc = SIGP_CC_BUSY; spin_unlock(&li->lock); @@ -333,7 +310,7 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, break; case SIGP_EXTERNAL_CALL: vcpu->stat.instruction_sigp_external_call++; - rc = __sigp_external_call(vcpu, dst_vcpu); + rc = __sigp_external_call(vcpu, dst_vcpu, status_reg); break; case SIGP_EMERGENCY_SIGNAL: vcpu->stat.instruction_sigp_emergency++; @@ -394,6 +371,53 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, return rc; } +static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code) +{ + if (!vcpu->kvm->arch.user_sigp) + return 0; + + switch (order_code) { + case SIGP_SENSE: + case SIGP_EXTERNAL_CALL: + case SIGP_EMERGENCY_SIGNAL: + case SIGP_COND_EMERGENCY_SIGNAL: + case SIGP_SENSE_RUNNING: + return 0; + /* update counters as we're directly dropping to user space */ + case SIGP_STOP: + vcpu->stat.instruction_sigp_stop++; + break; + case SIGP_STOP_AND_STORE_STATUS: + vcpu->stat.instruction_sigp_stop_store_status++; + break; + case SIGP_STORE_STATUS_AT_ADDRESS: + vcpu->stat.instruction_sigp_store_status++; + break; + case SIGP_SET_PREFIX: + vcpu->stat.instruction_sigp_prefix++; + break; + case SIGP_START: + vcpu->stat.instruction_sigp_start++; + break; + case SIGP_RESTART: + vcpu->stat.instruction_sigp_restart++; + break; + case SIGP_INITIAL_CPU_RESET: + vcpu->stat.instruction_sigp_init_cpu_reset++; + break; + case SIGP_CPU_RESET: + vcpu->stat.instruction_sigp_cpu_reset++; + break; + default: + vcpu->stat.instruction_sigp_unknown++; + } + + VCPU_EVENT(vcpu, 4, "sigp order %u: completely handled in user space", + order_code); + + return 1; +} + int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) { int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; @@ -408,6 +432,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); order_code = kvm_s390_get_base_disp_rs(vcpu); + if (handle_sigp_order_in_user_space(vcpu, order_code)) + return -EOPNOTSUPP; if (r1 % 2) parameter = vcpu->run->s.regs.gprs[r1]; diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h index 647e9d6a4818..653a7ec09ef5 100644 --- a/arch/s390/kvm/trace-s390.h +++ b/arch/s390/kvm/trace-s390.h @@ -209,19 +209,21 @@ TRACE_EVENT(kvm_s390_request_resets, * Trace point for a vcpu's stop requests. */ TRACE_EVENT(kvm_s390_stop_request, - TP_PROTO(unsigned int action_bits), - TP_ARGS(action_bits), + TP_PROTO(unsigned char stop_irq, unsigned char flags), + TP_ARGS(stop_irq, flags), TP_STRUCT__entry( - __field(unsigned int, action_bits) + __field(unsigned char, stop_irq) + __field(unsigned char, flags) ), TP_fast_assign( - __entry->action_bits = action_bits; + __entry->stop_irq = stop_irq; + __entry->flags = flags; ), - TP_printk("stop request, action_bits = %08x", - __entry->action_bits) + TP_printk("stop request, stop irq = %u, flags = %08x", + __entry->stop_irq, __entry->flags) ); diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index 034a35a3e9c1..d6c9991f7797 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -12,7 +12,15 @@ #include <linux/smp.h> #include <asm/io.h> -int spin_retry = 1000; +int spin_retry = -1; + +static int __init spin_retry_init(void) +{ + if (spin_retry < 0) + spin_retry = MACHINE_HAS_CAD ? 10 : 1000; + return 0; +} +early_initcall(spin_retry_init); /** * spin_retry= parameter @@ -24,6 +32,11 @@ static int __init spin_retry_setup(char *str) } __setup("spin_retry=", spin_retry_setup); +static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old) +{ + asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock)); +} + void arch_spin_lock_wait(arch_spinlock_t *lp) { unsigned int cpu = SPINLOCK_LOCKVAL; @@ -46,6 +59,8 @@ void arch_spin_lock_wait(arch_spinlock_t *lp) /* Loop for a while on the lock value. */ count = spin_retry; do { + if (MACHINE_HAS_CAD) + _raw_compare_and_delay(&lp->lock, owner); owner = ACCESS_ONCE(lp->lock); } while (owner && count-- > 0); if (!owner) @@ -84,6 +99,8 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) /* Loop for a while on the lock value. */ count = spin_retry; do { + if (MACHINE_HAS_CAD) + _raw_compare_and_delay(&lp->lock, owner); owner = ACCESS_ONCE(lp->lock); } while (owner && count-- > 0); if (!owner) @@ -100,11 +117,19 @@ EXPORT_SYMBOL(arch_spin_lock_wait_flags); int arch_spin_trylock_retry(arch_spinlock_t *lp) { + unsigned int cpu = SPINLOCK_LOCKVAL; + unsigned int owner; int count; - for (count = spin_retry; count > 0; count--) - if (arch_spin_trylock_once(lp)) - return 1; + for (count = spin_retry; count > 0; count--) { + owner = ACCESS_ONCE(lp->lock); + /* Try to get the lock if it is free. */ + if (!owner) { + if (_raw_compare_and_swap(&lp->lock, 0, cpu)) + return 1; + } else if (MACHINE_HAS_CAD) + _raw_compare_and_delay(&lp->lock, owner); + } return 0; } EXPORT_SYMBOL(arch_spin_trylock_retry); @@ -126,8 +151,11 @@ void _raw_read_lock_wait(arch_rwlock_t *rw) } old = ACCESS_ONCE(rw->lock); owner = ACCESS_ONCE(rw->owner); - if ((int) old < 0) + if ((int) old < 0) { + if (MACHINE_HAS_CAD) + _raw_compare_and_delay(&rw->lock, old); continue; + } if (_raw_compare_and_swap(&rw->lock, old, old + 1)) return; } @@ -141,8 +169,11 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw) while (count-- > 0) { old = ACCESS_ONCE(rw->lock); - if ((int) old < 0) + if ((int) old < 0) { + if (MACHINE_HAS_CAD) + _raw_compare_and_delay(&rw->lock, old); continue; + } if (_raw_compare_and_swap(&rw->lock, old, old + 1)) return 1; } @@ -173,6 +204,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev) } if ((old & 0x7fffffff) == 0 && (int) prev >= 0) break; + if (MACHINE_HAS_CAD) + _raw_compare_and_delay(&rw->lock, old); } } EXPORT_SYMBOL(_raw_write_lock_wait); @@ -201,6 +234,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw) smp_rmb(); if ((old & 0x7fffffff) == 0 && (int) prev >= 0) break; + if (MACHINE_HAS_CAD) + _raw_compare_and_delay(&rw->lock, old); } } EXPORT_SYMBOL(_raw_write_lock_wait); @@ -214,8 +249,11 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw) while (count-- > 0) { old = ACCESS_ONCE(rw->lock); - if (old) + if (old) { + if (MACHINE_HAS_CAD) + _raw_compare_and_delay(&rw->lock, old); continue; + } if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000)) return 1; } diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 9065d5aa3932..3ff86533f7db 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -171,7 +171,7 @@ static void dump_pagetable(unsigned long asce, unsigned long address) table = table + ((address >> 20) & 0x7ff); if (bad_address(table)) goto bad; - pr_cont(KERN_CONT "S:%016lx ", *table); + pr_cont("S:%016lx ", *table); if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE)) goto out; table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); @@ -261,7 +261,7 @@ static inline void report_user_fault(struct pt_regs *regs, long signr) return; if (!printk_ratelimit()) return; - printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d", + printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ", regs->int_code & 0xffff, regs->int_code >> 17); print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN); printk(KERN_CONT "\n"); diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 639fce464008..5c586c78ca8d 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c @@ -235,10 +235,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, /* Try to get the remaining pages with get_user_pages */ start += nr << PAGE_SHIFT; pages += nr; - down_read(&mm->mmap_sem); - ret = get_user_pages(current, mm, start, - nr_pages - nr, write, 0, pages, NULL); - up_read(&mm->mmap_sem); + ret = get_user_pages_unlocked(current, mm, start, + nr_pages - nr, write, 0, pages); /* Have to be a bit careful with return values */ if (nr > 0) ret = (ret < 0) ? nr : ret + nr; diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 3c80d2e38f03..210ffede0153 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -192,12 +192,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) return 0; } -struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, - int write) -{ - return ERR_PTR(-EINVAL); -} - int pmd_huge(pmd_t pmd) { if (!MACHINE_HAS_HPAGE) @@ -210,17 +204,3 @@ int pud_huge(pud_t pud) { return 0; } - -struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, - pmd_t *pmdp, int write) -{ - struct page *page; - - if (!MACHINE_HAS_HPAGE) - return NULL; - - page = pmd_page(*pmdp); - if (page) - page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT); - return page; -} diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index c7235e01fd67..d35b15113b17 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -71,13 +71,16 @@ static void __init setup_zero_pages(void) break; case 0x2827: /* zEC12 */ case 0x2828: /* zEC12 */ - default: order = 5; break; + case 0x2964: /* z13 */ + default: + order = 7; + break; } /* Limit number of empty zero pages for small memory sizes */ - if (order > 2 && totalram_pages <= 16384) - order = 2; + while (order > 2 && (totalram_pages >> 10) < (1UL << order)) + order--; empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); if (!empty_zero_page) diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 9b436c21195e..d008f638b2cd 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -28,8 +28,12 @@ #include <linux/module.h> #include <linux/random.h> #include <linux/compat.h> +#include <linux/security.h> #include <asm/pgalloc.h> +unsigned long mmap_rnd_mask; +unsigned long mmap_align_mask; + static unsigned long stack_maxrandom_size(void) { if (!(current->flags & PF_RANDOMIZE)) @@ -60,8 +64,10 @@ static unsigned long mmap_rnd(void) { if (!(current->flags & PF_RANDOMIZE)) return 0; - /* 8MB randomization for mmap_base */ - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT; + if (is_32bit_task()) + return (get_random_int() & 0x7ff) << PAGE_SHIFT; + else + return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT; } static unsigned long mmap_base_legacy(void) @@ -81,6 +87,106 @@ static inline unsigned long mmap_base(void) return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap; } +unsigned long +arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + struct vm_unmapped_area_info info; + int do_color_align; + + if (len > TASK_SIZE - mmap_min_addr) + return -ENOMEM; + + if (flags & MAP_FIXED) + return addr; + + if (addr) { + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && + (!vma || addr + len <= vma->vm_start)) + return addr; + } + + do_color_align = 0; + if (filp || (flags & MAP_SHARED)) + do_color_align = !is_32bit_task(); + + info.flags = 0; + info.length = len; + info.low_limit = mm->mmap_base; + info.high_limit = TASK_SIZE; + info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0; + info.align_offset = pgoff << PAGE_SHIFT; + return vm_unmapped_area(&info); +} + +unsigned long +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + const unsigned long len, const unsigned long pgoff, + const unsigned long flags) +{ + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; + unsigned long addr = addr0; + struct vm_unmapped_area_info info; + int do_color_align; + + /* requested length too big for entire address space */ + if (len > TASK_SIZE - mmap_min_addr) + return -ENOMEM; + + if (flags & MAP_FIXED) + return addr; + + /* requesting a specific address */ + if (addr) { + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && + (!vma || addr + len <= vma->vm_start)) + return addr; + } + + do_color_align = 0; + if (filp || (flags & MAP_SHARED)) + do_color_align = !is_32bit_task(); + + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; + info.low_limit = max(PAGE_SIZE, mmap_min_addr); + info.high_limit = mm->mmap_base; + info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0; + info.align_offset = pgoff << PAGE_SHIFT; + addr = vm_unmapped_area(&info); + + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + if (addr & ~PAGE_MASK) { + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = TASK_SIZE; + addr = vm_unmapped_area(&info); + } + + return addr; +} + +unsigned long randomize_et_dyn(void) +{ + unsigned long base; + + base = (STACK_TOP / 3 * 2) & (~mmap_align_mask << PAGE_SHIFT); + return base + mmap_rnd(); +} + #ifndef CONFIG_64BIT /* @@ -177,4 +283,36 @@ void arch_pick_mmap_layout(struct mm_struct *mm) } } +static int __init setup_mmap_rnd(void) +{ + struct cpuid cpu_id; + + get_cpu_id(&cpu_id); + switch (cpu_id.machine) { + case 0x9672: + case 0x2064: + case 0x2066: + case 0x2084: + case 0x2086: + case 0x2094: + case 0x2096: + case 0x2097: + case 0x2098: + case 0x2817: + case 0x2818: + case 0x2827: + case 0x2828: + mmap_rnd_mask = 0x7ffUL; + mmap_align_mask = 0UL; + break; + case 0x2964: /* z13 */ + default: + mmap_rnd_mask = 0x3ff80UL; + mmap_align_mask = 0x7fUL; + break; + } + return 0; +} +early_initcall(setup_mmap_rnd); + #endif diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 3cf8cc03fff6..b2c1542f2ba2 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -527,7 +527,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr) table += (gaddr >> 53) & 0x7ff; if ((*table & _REGION_ENTRY_INVALID) && gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY, - gaddr & 0xffe0000000000000)) + gaddr & 0xffe0000000000000UL)) return -ENOMEM; table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); } @@ -535,7 +535,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr) table += (gaddr >> 42) & 0x7ff; if ((*table & _REGION_ENTRY_INVALID) && gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY, - gaddr & 0xfffffc0000000000)) + gaddr & 0xfffffc0000000000UL)) return -ENOMEM; table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); } @@ -543,7 +543,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr) table += (gaddr >> 31) & 0x7ff; if ((*table & _REGION_ENTRY_INVALID) && gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY, - gaddr & 0xffffffff80000000)) + gaddr & 0xffffffff80000000UL)) return -ENOMEM; table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); } diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 3290f11ae1d9..753a56731951 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -259,7 +259,10 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count) } /* Create a virtual mapping cookie for a PCI BAR */ -void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max) +void __iomem *pci_iomap_range(struct pci_dev *pdev, + int bar, + unsigned long offset, + unsigned long max) { struct zpci_dev *zdev = get_zdev(pdev); u64 addr; @@ -270,14 +273,27 @@ void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max) idx = zdev->bars[bar].map_idx; spin_lock(&zpci_iomap_lock); - zpci_iomap_start[idx].fh = zdev->fh; - zpci_iomap_start[idx].bar = bar; + if (zpci_iomap_start[idx].count++) { + BUG_ON(zpci_iomap_start[idx].fh != zdev->fh || + zpci_iomap_start[idx].bar != bar); + } else { + zpci_iomap_start[idx].fh = zdev->fh; + zpci_iomap_start[idx].bar = bar; + } + /* Detect overrun */ + BUG_ON(!zpci_iomap_start[idx].count); spin_unlock(&zpci_iomap_lock); addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); - return (void __iomem *) addr; + return (void __iomem *) addr + offset; } -EXPORT_SYMBOL_GPL(pci_iomap); +EXPORT_SYMBOL_GPL(pci_iomap_range); + +void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) +{ + return pci_iomap_range(dev, bar, 0, maxlen); +} +EXPORT_SYMBOL(pci_iomap); void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) { @@ -285,8 +301,12 @@ void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48; spin_lock(&zpci_iomap_lock); - zpci_iomap_start[idx].fh = 0; - zpci_iomap_start[idx].bar = 0; + /* Detect underrun */ + BUG_ON(!zpci_iomap_start[idx].count); + if (!--zpci_iomap_start[idx].count) { + zpci_iomap_start[idx].fh = 0; + zpci_iomap_start[idx].bar = 0; + } spin_unlock(&zpci_iomap_lock); } EXPORT_SYMBOL_GPL(pci_iounmap); diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c index 62c5ea6d8682..8aa271b3d1ad 100644 --- a/arch/s390/pci/pci_mmio.c +++ b/arch/s390/pci/pci_mmio.c @@ -55,7 +55,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr, ret = get_pfn(mmio_addr, VM_WRITE, &pfn); if (ret) goto out; - io_addr = (void *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); + io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); ret = -EFAULT; if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) @@ -96,7 +96,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr, ret = get_pfn(mmio_addr, VM_READ, &pfn); if (ret) goto out; - io_addr = (void *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); + io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); ret = -EFAULT; if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) |