diff options
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/Kconfig | 2 | ||||
-rw-r--r-- | arch/s390/crypto/aes_s390.c | 65 | ||||
-rw-r--r-- | arch/s390/crypto/des_s390.c | 95 | ||||
-rw-r--r-- | arch/s390/hypfs/Makefile | 2 | ||||
-rw-r--r-- | arch/s390/hypfs/hypfs.h | 7 | ||||
-rw-r--r-- | arch/s390/hypfs/hypfs_dbfs.c | 16 | ||||
-rw-r--r-- | arch/s390/hypfs/hypfs_sprp.c | 141 | ||||
-rw-r--r-- | arch/s390/hypfs/inode.c | 15 | ||||
-rw-r--r-- | arch/s390/include/asm/cmpxchg.h | 5 | ||||
-rw-r--r-- | arch/s390/include/asm/kvm_host.h | 15 | ||||
-rw-r--r-- | arch/s390/include/asm/sclp.h | 1 | ||||
-rw-r--r-- | arch/s390/include/uapi/asm/hypfs.h | 25 | ||||
-rw-r--r-- | arch/s390/include/uapi/asm/statfs.h | 10 | ||||
-rw-r--r-- | arch/s390/include/uapi/asm/unistd.h | 2 | ||||
-rw-r--r-- | arch/s390/kernel/compat_linux.c | 4 | ||||
-rw-r--r-- | arch/s390/kernel/compat_wrapper.S | 11 | ||||
-rw-r--r-- | arch/s390/kernel/syscalls.S | 2 | ||||
-rw-r--r-- | arch/s390/kvm/intercept.c | 11 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 17 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.h | 6 | ||||
-rw-r--r-- | arch/s390/lib/uaccess.h | 9 | ||||
-rw-r--r-- | arch/s390/lib/uaccess_pt.c | 14 |
22 files changed, 390 insertions, 85 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 4f858f77d870..65a07750f4f9 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -596,7 +596,7 @@ config CRASH_DUMP config ZFCPDUMP def_bool n prompt "zfcpdump support" - depends on SMP + depends on 64BIT && SMP help Select this option if you want to build an zfcpdump enabled kernel. Refer to <file:Documentation/s390/zfcpdump.txt> for more details on this. diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index b3feabd39f31..cf3c0089bef2 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -25,6 +25,7 @@ #include <linux/err.h> #include <linux/module.h> #include <linux/init.h> +#include <linux/spinlock.h> #include "crypt_s390.h" #define AES_KEYLEN_128 1 @@ -32,6 +33,7 @@ #define AES_KEYLEN_256 4 static u8 *ctrblk; +static DEFINE_SPINLOCK(ctrblk_lock); static char keylen_flag; struct s390_aes_ctx { @@ -758,43 +760,67 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, return aes_set_key(tfm, in_key, key_len); } +static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes) +{ + unsigned int i, n; + + /* only use complete blocks, max. PAGE_SIZE */ + n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1); + for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) { + memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE, + AES_BLOCK_SIZE); + crypto_inc(ctrptr + i, AES_BLOCK_SIZE); + } + return n; +} + static int ctr_aes_crypt(struct blkcipher_desc *desc, long func, struct s390_aes_ctx *sctx, struct blkcipher_walk *walk) { int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE); - unsigned int i, n, nbytes; - u8 buf[AES_BLOCK_SIZE]; - u8 *out, *in; + unsigned int n, nbytes; + u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE]; + u8 *out, *in, *ctrptr = ctrbuf; if (!walk->nbytes) return ret; - memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE); + if (spin_trylock(&ctrblk_lock)) + ctrptr = ctrblk; + + memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE); while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { out = walk->dst.virt.addr; in = walk->src.virt.addr; while (nbytes >= AES_BLOCK_SIZE) { - /* only use complete blocks, max. PAGE_SIZE */ - n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : - nbytes & ~(AES_BLOCK_SIZE - 1); - for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) { - memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE, - AES_BLOCK_SIZE); - crypto_inc(ctrblk + i, AES_BLOCK_SIZE); - } - ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk); - if (ret < 0 || ret != n) + if (ctrptr == ctrblk) + n = __ctrblk_init(ctrptr, nbytes); + else + n = AES_BLOCK_SIZE; + ret = crypt_s390_kmctr(func, sctx->key, out, in, + n, ctrptr); + if (ret < 0 || ret != n) { + if (ctrptr == ctrblk) + spin_unlock(&ctrblk_lock); return -EIO; + } if (n > AES_BLOCK_SIZE) - memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE, + memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE, AES_BLOCK_SIZE); - crypto_inc(ctrblk, AES_BLOCK_SIZE); + crypto_inc(ctrptr, AES_BLOCK_SIZE); out += n; in += n; nbytes -= n; } ret = blkcipher_walk_done(desc, walk, nbytes); } + if (ctrptr == ctrblk) { + if (nbytes) + memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE); + else + memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE); + spin_unlock(&ctrblk_lock); + } /* * final block may be < AES_BLOCK_SIZE, copy only nbytes */ @@ -802,14 +828,15 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func, out = walk->dst.virt.addr; in = walk->src.virt.addr; ret = crypt_s390_kmctr(func, sctx->key, buf, in, - AES_BLOCK_SIZE, ctrblk); + AES_BLOCK_SIZE, ctrbuf); if (ret < 0 || ret != AES_BLOCK_SIZE) return -EIO; memcpy(out, buf, nbytes); - crypto_inc(ctrblk, AES_BLOCK_SIZE); + crypto_inc(ctrbuf, AES_BLOCK_SIZE); ret = blkcipher_walk_done(desc, walk, 0); + memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE); } - memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE); + return ret; } diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index 200f2a1b599d..0a5aac8a9412 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c @@ -25,6 +25,7 @@ #define DES3_KEY_SIZE (3 * DES_KEY_SIZE) static u8 *ctrblk; +static DEFINE_SPINLOCK(ctrblk_lock); struct s390_des_ctx { u8 iv[DES_BLOCK_SIZE]; @@ -105,29 +106,35 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func, } static int cbc_desall_crypt(struct blkcipher_desc *desc, long func, - u8 *iv, struct blkcipher_walk *walk) + struct blkcipher_walk *walk) { + struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); int ret = blkcipher_walk_virt(desc, walk); unsigned int nbytes = walk->nbytes; + struct { + u8 iv[DES_BLOCK_SIZE]; + u8 key[DES3_KEY_SIZE]; + } param; if (!nbytes) goto out; - memcpy(iv, walk->iv, DES_BLOCK_SIZE); + memcpy(param.iv, walk->iv, DES_BLOCK_SIZE); + memcpy(param.key, ctx->key, DES3_KEY_SIZE); do { /* only use complete blocks */ unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1); u8 *out = walk->dst.virt.addr; u8 *in = walk->src.virt.addr; - ret = crypt_s390_kmc(func, iv, out, in, n); + ret = crypt_s390_kmc(func, ¶m, out, in, n); if (ret < 0 || ret != n) return -EIO; nbytes &= DES_BLOCK_SIZE - 1; ret = blkcipher_walk_done(desc, walk, nbytes); } while ((nbytes = walk->nbytes)); - memcpy(walk->iv, iv, DES_BLOCK_SIZE); + memcpy(walk->iv, param.iv, DES_BLOCK_SIZE); out: return ret; @@ -179,22 +186,20 @@ static int cbc_des_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { - struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, ctx->iv, &walk); + return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, &walk); } static int cbc_des_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { - struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, ctx->iv, &walk); + return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, &walk); } static struct crypto_alg cbc_des_alg = { @@ -327,22 +332,20 @@ static int cbc_des3_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { - struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, ctx->iv, &walk); + return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, &walk); } static int cbc_des3_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { - struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk walk; blkcipher_walk_init(&walk, dst, src, nbytes); - return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, ctx->iv, &walk); + return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, &walk); } static struct crypto_alg cbc_des3_alg = { @@ -366,54 +369,80 @@ static struct crypto_alg cbc_des3_alg = { } }; +static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes) +{ + unsigned int i, n; + + /* align to block size, max. PAGE_SIZE */ + n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1); + for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) { + memcpy(ctrptr + i, ctrptr + i - DES_BLOCK_SIZE, DES_BLOCK_SIZE); + crypto_inc(ctrptr + i, DES_BLOCK_SIZE); + } + return n; +} + static int ctr_desall_crypt(struct blkcipher_desc *desc, long func, - struct s390_des_ctx *ctx, struct blkcipher_walk *walk) + struct s390_des_ctx *ctx, + struct blkcipher_walk *walk) { int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE); - unsigned int i, n, nbytes; - u8 buf[DES_BLOCK_SIZE]; - u8 *out, *in; + unsigned int n, nbytes; + u8 buf[DES_BLOCK_SIZE], ctrbuf[DES_BLOCK_SIZE]; + u8 *out, *in, *ctrptr = ctrbuf; + + if (!walk->nbytes) + return ret; - memcpy(ctrblk, walk->iv, DES_BLOCK_SIZE); + if (spin_trylock(&ctrblk_lock)) + ctrptr = ctrblk; + + memcpy(ctrptr, walk->iv, DES_BLOCK_SIZE); while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) { out = walk->dst.virt.addr; in = walk->src.virt.addr; while (nbytes >= DES_BLOCK_SIZE) { - /* align to block size, max. PAGE_SIZE */ - n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : - nbytes & ~(DES_BLOCK_SIZE - 1); - for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) { - memcpy(ctrblk + i, ctrblk + i - DES_BLOCK_SIZE, - DES_BLOCK_SIZE); - crypto_inc(ctrblk + i, DES_BLOCK_SIZE); - } - ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk); - if (ret < 0 || ret != n) + if (ctrptr == ctrblk) + n = __ctrblk_init(ctrptr, nbytes); + else + n = DES_BLOCK_SIZE; + ret = crypt_s390_kmctr(func, ctx->key, out, in, + n, ctrptr); + if (ret < 0 || ret != n) { + if (ctrptr == ctrblk) + spin_unlock(&ctrblk_lock); return -EIO; + } if (n > DES_BLOCK_SIZE) - memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE, + memcpy(ctrptr, ctrptr + n - DES_BLOCK_SIZE, DES_BLOCK_SIZE); - crypto_inc(ctrblk, DES_BLOCK_SIZE); + crypto_inc(ctrptr, DES_BLOCK_SIZE); out += n; in += n; nbytes -= n; } ret = blkcipher_walk_done(desc, walk, nbytes); } - + if (ctrptr == ctrblk) { + if (nbytes) + memcpy(ctrbuf, ctrptr, DES_BLOCK_SIZE); + else + memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE); + spin_unlock(&ctrblk_lock); + } /* final block may be < DES_BLOCK_SIZE, copy only nbytes */ if (nbytes) { out = walk->dst.virt.addr; in = walk->src.virt.addr; ret = crypt_s390_kmctr(func, ctx->key, buf, in, - DES_BLOCK_SIZE, ctrblk); + DES_BLOCK_SIZE, ctrbuf); if (ret < 0 || ret != DES_BLOCK_SIZE) return -EIO; memcpy(out, buf, nbytes); - crypto_inc(ctrblk, DES_BLOCK_SIZE); + crypto_inc(ctrbuf, DES_BLOCK_SIZE); ret = blkcipher_walk_done(desc, walk, 0); + memcpy(walk->iv, ctrbuf, DES_BLOCK_SIZE); } - memcpy(walk->iv, ctrblk, DES_BLOCK_SIZE); return ret; } diff --git a/arch/s390/hypfs/Makefile b/arch/s390/hypfs/Makefile index 2e671d5004ca..06f8d95a16cd 100644 --- a/arch/s390/hypfs/Makefile +++ b/arch/s390/hypfs/Makefile @@ -4,4 +4,4 @@ obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o -s390_hypfs-objs := inode.o hypfs_diag.o hypfs_vm.o hypfs_dbfs.o +s390_hypfs-objs := inode.o hypfs_diag.o hypfs_vm.o hypfs_dbfs.o hypfs_sprp.o diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h index 79f2ac55253f..b34b5ab90a31 100644 --- a/arch/s390/hypfs/hypfs.h +++ b/arch/s390/hypfs/hypfs.h @@ -13,6 +13,7 @@ #include <linux/debugfs.h> #include <linux/workqueue.h> #include <linux/kref.h> +#include <asm/hypfs.h> #define REG_FILE_MODE 0440 #define UPDATE_FILE_MODE 0220 @@ -36,6 +37,10 @@ extern int hypfs_vm_init(void); extern void hypfs_vm_exit(void); extern int hypfs_vm_create_files(struct dentry *root); +/* Set Partition-Resource Parameter */ +int hypfs_sprp_init(void); +void hypfs_sprp_exit(void); + /* debugfs interface */ struct hypfs_dbfs_file; @@ -52,6 +57,8 @@ struct hypfs_dbfs_file { int (*data_create)(void **data, void **data_free_ptr, size_t *size); void (*data_free)(const void *buf_free_ptr); + long (*unlocked_ioctl) (struct file *, unsigned int, + unsigned long); /* Private data for hypfs_dbfs.c */ struct hypfs_dbfs_data *data; diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c index 17ab8b7b53cc..2badf2bf9cd7 100644 --- a/arch/s390/hypfs/hypfs_dbfs.c +++ b/arch/s390/hypfs/hypfs_dbfs.c @@ -81,9 +81,25 @@ static ssize_t dbfs_read(struct file *file, char __user *buf, return rc; } +static long dbfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct hypfs_dbfs_file *df; + long rc; + + df = file->f_path.dentry->d_inode->i_private; + mutex_lock(&df->lock); + if (df->unlocked_ioctl) + rc = df->unlocked_ioctl(file, cmd, arg); + else + rc = -ENOTTY; + mutex_unlock(&df->lock); + return rc; +} + static const struct file_operations dbfs_ops = { .read = dbfs_read, .llseek = no_llseek, + .unlocked_ioctl = dbfs_ioctl, }; int hypfs_dbfs_create_file(struct hypfs_dbfs_file *df) diff --git a/arch/s390/hypfs/hypfs_sprp.c b/arch/s390/hypfs/hypfs_sprp.c new file mode 100644 index 000000000000..f043c3c7e73c --- /dev/null +++ b/arch/s390/hypfs/hypfs_sprp.c @@ -0,0 +1,141 @@ +/* + * Hypervisor filesystem for Linux on s390. + * Set Partition-Resource Parameter interface. + * + * Copyright IBM Corp. 2013 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> + */ + +#include <linux/compat.h> +#include <linux/errno.h> +#include <linux/gfp.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/uaccess.h> +#include <asm/compat.h> +#include <asm/sclp.h> +#include "hypfs.h" + +#define DIAG304_SET_WEIGHTS 0 +#define DIAG304_QUERY_PRP 1 +#define DIAG304_SET_CAPPING 2 + +#define DIAG304_CMD_MAX 2 + +static unsigned long hypfs_sprp_diag304(void *data, unsigned long cmd) +{ + register unsigned long _data asm("2") = (unsigned long) data; + register unsigned long _rc asm("3"); + register unsigned long _cmd asm("4") = cmd; + + asm volatile("diag %1,%2,0x304\n" + : "=d" (_rc) : "d" (_data), "d" (_cmd) : "memory"); + + return _rc; +} + +static void hypfs_sprp_free(const void *data) +{ + free_page((unsigned long) data); +} + +static int hypfs_sprp_create(void **data_ptr, void **free_ptr, size_t *size) +{ + unsigned long rc; + void *data; + + data = (void *) get_zeroed_page(GFP_KERNEL); + if (!data) + return -ENOMEM; + rc = hypfs_sprp_diag304(data, DIAG304_QUERY_PRP); + if (rc != 1) { + *data_ptr = *free_ptr = NULL; + *size = 0; + free_page((unsigned long) data); + return -EIO; + } + *data_ptr = *free_ptr = data; + *size = PAGE_SIZE; + return 0; +} + +static int __hypfs_sprp_ioctl(void __user *user_area) +{ + struct hypfs_diag304 diag304; + unsigned long cmd; + void __user *udata; + void *data; + int rc; + + if (copy_from_user(&diag304, user_area, sizeof(diag304))) + return -EFAULT; + if ((diag304.args[0] >> 8) != 0 || diag304.args[1] > DIAG304_CMD_MAX) + return -EINVAL; + + data = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!data) + return -ENOMEM; + + udata = (void __user *)(unsigned long) diag304.data; + if (diag304.args[1] == DIAG304_SET_WEIGHTS || + diag304.args[1] == DIAG304_SET_CAPPING) + if (copy_from_user(data, udata, PAGE_SIZE)) { + rc = -EFAULT; + goto out; + } + + cmd = *(unsigned long *) &diag304.args[0]; + diag304.rc = hypfs_sprp_diag304(data, cmd); + + if (diag304.args[1] == DIAG304_QUERY_PRP) + if (copy_to_user(udata, data, PAGE_SIZE)) { + rc = -EFAULT; + goto out; + } + + rc = copy_to_user(user_area, &diag304, sizeof(diag304)) ? -EFAULT : 0; +out: + free_page((unsigned long) data); + return rc; +} + +static long hypfs_sprp_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + void __user *argp; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + if (is_compat_task()) + argp = compat_ptr(arg); + else + argp = (void __user *) arg; + switch (cmd) { + case HYPFS_DIAG304: + return __hypfs_sprp_ioctl(argp); + default: /* unknown ioctl number */ + return -ENOTTY; + } + return 0; +} + +static struct hypfs_dbfs_file hypfs_sprp_file = { + .name = "diag_304", + .data_create = hypfs_sprp_create, + .data_free = hypfs_sprp_free, + .unlocked_ioctl = hypfs_sprp_ioctl, +}; + +int hypfs_sprp_init(void) +{ + if (!sclp_has_sprp()) + return 0; + return hypfs_dbfs_create_file(&hypfs_sprp_file); +} + +void hypfs_sprp_exit(void) +{ + if (!sclp_has_sprp()) + return; + hypfs_dbfs_remove_file(&hypfs_sprp_file); +} diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index ddfe09b45134..c952b981e4f2 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c @@ -478,10 +478,14 @@ static int __init hypfs_init(void) rc = -ENODATA; goto fail_hypfs_diag_exit; } + if (hypfs_sprp_init()) { + rc = -ENODATA; + goto fail_hypfs_vm_exit; + } s390_kobj = kobject_create_and_add("s390", hypervisor_kobj); if (!s390_kobj) { rc = -ENOMEM; - goto fail_hypfs_vm_exit; + goto fail_hypfs_sprp_exit; } rc = register_filesystem(&hypfs_type); if (rc) @@ -490,6 +494,8 @@ static int __init hypfs_init(void) fail_filesystem: kobject_put(s390_kobj); +fail_hypfs_sprp_exit: + hypfs_sprp_exit(); fail_hypfs_vm_exit: hypfs_vm_exit(); fail_hypfs_diag_exit: @@ -502,11 +508,12 @@ fail_dbfs_exit: static void __exit hypfs_exit(void) { - hypfs_diag_exit(); - hypfs_vm_exit(); - hypfs_dbfs_exit(); unregister_filesystem(&hypfs_type); kobject_put(s390_kobj); + hypfs_sprp_exit(); + hypfs_vm_exit(); + hypfs_diag_exit(); + hypfs_dbfs_exit(); } module_init(hypfs_init) diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h index 0f636cbdf342..4236408070e5 100644 --- a/arch/s390/include/asm/cmpxchg.h +++ b/arch/s390/include/asm/cmpxchg.h @@ -185,11 +185,12 @@ static inline unsigned long long __cmpxchg64(void *ptr, { register_pair rp_old = {.pair = old}; register_pair rp_new = {.pair = new}; + unsigned long long *ullptr = ptr; asm volatile( " cds %0,%2,%1" - : "+&d" (rp_old), "=Q" (ptr) - : "d" (rp_new), "Q" (ptr) + : "+d" (rp_old), "+Q" (*ullptr) + : "d" (rp_new) : "memory", "cc"); return rp_old.pair; } diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index d5bc3750616e..eef3dd3fd9a9 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -106,9 +106,22 @@ struct kvm_s390_sie_block { __u64 gbea; /* 0x0180 */ __u8 reserved188[24]; /* 0x0188 */ __u32 fac; /* 0x01a0 */ - __u8 reserved1a4[92]; /* 0x01a4 */ + __u8 reserved1a4[68]; /* 0x01a4 */ + __u64 itdba; /* 0x01e8 */ + __u8 reserved1f0[16]; /* 0x01f0 */ } __attribute__((packed)); +struct kvm_s390_itdb { + __u8 data[256]; +} __packed; + +struct sie_page { + struct kvm_s390_sie_block sie_block; + __u8 reserved200[1024]; /* 0x0200 */ + struct kvm_s390_itdb itdb; /* 0x0600 */ + __u8 reserved700[2304]; /* 0x0700 */ +} __packed; + struct kvm_vcpu_stat { u32 exit_userspace; u32 exit_null; diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index 220e171413f8..abaca2275c7a 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -54,6 +54,7 @@ int sclp_chp_read_info(struct sclp_chp_info *info); void sclp_get_ipl_info(struct sclp_ipl_info *info); bool __init sclp_has_linemode(void); bool __init sclp_has_vt220(void); +bool sclp_has_sprp(void); int sclp_pci_configure(u32 fid); int sclp_pci_deconfigure(u32 fid); int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode); diff --git a/arch/s390/include/uapi/asm/hypfs.h b/arch/s390/include/uapi/asm/hypfs.h new file mode 100644 index 000000000000..37998b449531 --- /dev/null +++ b/arch/s390/include/uapi/asm/hypfs.h @@ -0,0 +1,25 @@ +/* + * IOCTL interface for hypfs + * + * Copyright IBM Corp. 2013 + * + * Author: Martin Schwidefsky <schwidefsky@de.ibm.com> + */ + +#ifndef _ASM_HYPFS_CTL_H +#define _ASM_HYPFS_CTL_H + +#include <linux/types.h> + +struct hypfs_diag304 { + __u32 args[2]; + __u64 data; + __u64 rc; +} __attribute__((packed)); + +#define HYPFS_IOCTL_MAGIC 0x10 + +#define HYPFS_DIAG304 \ + _IOWR(HYPFS_IOCTL_MAGIC, 0x20, struct hypfs_diag304) + +#endif diff --git a/arch/s390/include/uapi/asm/statfs.h b/arch/s390/include/uapi/asm/statfs.h index a61d538756f2..471eb09184d4 100644 --- a/arch/s390/include/uapi/asm/statfs.h +++ b/arch/s390/include/uapi/asm/statfs.h @@ -35,11 +35,11 @@ struct statfs { struct statfs64 { unsigned int f_type; unsigned int f_bsize; - unsigned long f_blocks; - unsigned long f_bfree; - unsigned long f_bavail; - unsigned long f_files; - unsigned long f_ffree; + unsigned long long f_blocks; + unsigned long long f_bfree; + unsigned long long f_bavail; + unsigned long long f_files; + unsigned long long f_ffree; __kernel_fsid_t f_fsid; unsigned int f_namelen; unsigned int f_frsize; diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h index 864f693c237f..5eb5c9ddb120 100644 --- a/arch/s390/include/uapi/asm/unistd.h +++ b/arch/s390/include/uapi/asm/unistd.h @@ -280,6 +280,8 @@ #define __NR_s390_runtime_instr 342 #define __NR_kcmp 343 #define __NR_finit_module 344 +#define __NR_sched_setattr 345 +#define __NR_sched_getattr 346 #define NR_syscalls 345 /* diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index e030d2bdec1b..db02052bd137 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -286,8 +286,8 @@ asmlinkage long sys32_getegid16(void) } #ifdef CONFIG_SYSVIPC -COMPAT_SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, unsigned long, second, - unsigned long, third, compat_uptr_t, ptr) +COMPAT_SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, compat_ulong_t, second, + compat_ulong_t, third, compat_uptr_t, ptr) { if (call >> 16) /* hack for backward compatibility */ return -EINVAL; diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index 9cb1b975b353..59c8efce1b99 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -1412,3 +1412,14 @@ ENTRY(sys_finit_module_wrapper) llgtr %r3,%r3 # const char __user * lgfr %r4,%r4 # int jg sys_finit_module + +ENTRY(sys_sched_setattr_wrapper) + lgfr %r2,%r2 # pid_t + llgtr %r3,%r3 # struct sched_attr __user * + jg sys_sched_setattr + +ENTRY(sys_sched_getattr_wrapper) + lgfr %r2,%r2 # pid_t + llgtr %r3,%r3 # const char __user * + llgfr %r3,%r3 # unsigned int + jg sys_sched_getattr diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 913410bd74a3..143992152ec9 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -353,3 +353,5 @@ SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev SYSCALL(sys_ni_syscall,sys_s390_runtime_instr,sys_s390_runtime_instr_wrapper) SYSCALL(sys_kcmp,sys_kcmp,sys_kcmp_wrapper) SYSCALL(sys_finit_module,sys_finit_module,sys_finit_module_wrapper) +SYSCALL(sys_sched_setattr,sys_sched_setattr,sys_sched_setattr_wrapper) /* 345 */ +SYSCALL(sys_sched_getattr,sys_sched_getattr,sys_sched_getattr_wrapper) diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 5ddbbde6f65c..eeb1ac7d8fa4 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -112,6 +112,17 @@ static int handle_instruction(struct kvm_vcpu *vcpu) static int handle_prog(struct kvm_vcpu *vcpu) { vcpu->stat.exit_program_interruption++; + + /* Restore ITDB to Program-Interruption TDB in guest memory */ + if (IS_TE_ENABLED(vcpu) && + !(current->thread.per_flags & PER_FLAG_NO_TE) && + IS_ITDB_VALID(vcpu)) { + copy_to_guest(vcpu, TDB_ADDR, vcpu->arch.sie_block->itdba, + sizeof(struct kvm_s390_itdb)); + memset((void *) vcpu->arch.sie_block->itdba, 0, + sizeof(struct kvm_s390_itdb)); + } + trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc); return kvm_s390_inject_program_int(vcpu, vcpu->arch.sie_block->iprcc); } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 7635c00a1479..e0676f390d57 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -395,6 +395,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) CPUSTAT_STOPPED | CPUSTAT_GED); vcpu->arch.sie_block->ecb = 6; + if (test_vfacility(50) && test_vfacility(73)) + vcpu->arch.sie_block->ecb |= 0x10; + vcpu->arch.sie_block->ecb2 = 8; vcpu->arch.sie_block->eca = 0xC1002001U; vcpu->arch.sie_block->fac = (int) (long) vfacilities; @@ -411,6 +414,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { struct kvm_vcpu *vcpu; + struct sie_page *sie_page; int rc = -EINVAL; if (id >= KVM_MAX_VCPUS) @@ -422,12 +426,13 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, if (!vcpu) goto out; - vcpu->arch.sie_block = (struct kvm_s390_sie_block *) - get_zeroed_page(GFP_KERNEL); - - if (!vcpu->arch.sie_block) + sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL); + if (!sie_page) goto out_free_cpu; + vcpu->arch.sie_block = &sie_page->sie_block; + vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; + vcpu->arch.sie_block->icpua = id; if (!kvm_is_ucontrol(kvm)) { if (!kvm->arch.sca) { @@ -1182,8 +1187,8 @@ static int __init kvm_s390_init(void) return -ENOMEM; } memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16); - vfacilities[0] &= 0xff82fff3f47c0000UL; - vfacilities[1] &= 0x001c000000000000UL; + vfacilities[0] &= 0xff82fff3f4fc2000UL; + vfacilities[1] &= 0x005c000000000000UL; return 0; } diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 095cf51b16ec..f9559b0bd620 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -26,6 +26,12 @@ extern unsigned long *vfacilities; int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu); +/* Transactional Memory Execution related macros */ +#define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10)) +#define TDB_ADDR 0x1800UL +#define TDB_FORMAT1 1 +#define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1)) + #define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\ do { \ debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \ diff --git a/arch/s390/lib/uaccess.h b/arch/s390/lib/uaccess.h index 315dbe09983e..b1a22173d027 100644 --- a/arch/s390/lib/uaccess.h +++ b/arch/s390/lib/uaccess.h @@ -6,15 +6,6 @@ #ifndef __ARCH_S390_LIB_UACCESS_H #define __ARCH_S390_LIB_UACCESS_H -extern size_t copy_from_user_std(size_t, const void __user *, void *); -extern size_t copy_to_user_std(size_t, void __user *, const void *); -extern size_t strnlen_user_std(size_t, const char __user *); -extern size_t strncpy_from_user_std(size_t, const char __user *, char *); -extern int futex_atomic_cmpxchg_std(u32 *, u32 __user *, u32, u32); -extern int futex_atomic_op_std(int, u32 __user *, int, int *); - -extern size_t copy_from_user_pt(size_t, const void __user *, void *); -extern size_t copy_to_user_pt(size_t, void __user *, const void *); extern int futex_atomic_op_pt(int, u32 __user *, int, int *); extern int futex_atomic_cmpxchg_pt(u32 *, u32 __user *, u32, u32); diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index 0632dc50da78..61ebcc9ccb34 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c @@ -153,6 +153,8 @@ static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, unsigned long offset, done, size, kaddr; void *from, *to; + if (!mm) + return n; done = 0; retry: spin_lock(&mm->page_table_lock); @@ -209,7 +211,7 @@ fault: return 0; } -size_t copy_from_user_pt(size_t n, const void __user *from, void *to) +static size_t copy_from_user_pt(size_t n, const void __user *from, void *to) { size_t rc; @@ -221,7 +223,7 @@ size_t copy_from_user_pt(size_t n, const void __user *from, void *to) return rc; } -size_t copy_to_user_pt(size_t n, void __user *to, const void *from) +static size_t copy_to_user_pt(size_t n, void __user *to, const void *from) { if (segment_eq(get_fs(), KERNEL_DS)) return copy_in_kernel(n, to, (void __user *) from); @@ -262,6 +264,8 @@ static size_t strnlen_user_pt(size_t count, const char __user *src) return 0; if (segment_eq(get_fs(), KERNEL_DS)) return strnlen_kernel(count, src); + if (!mm) + return 0; done = 0; retry: spin_lock(&mm->page_table_lock); @@ -323,6 +327,8 @@ static size_t copy_in_user_pt(size_t n, void __user *to, if (segment_eq(get_fs(), KERNEL_DS)) return copy_in_kernel(n, to, from); + if (!mm) + return n; done = 0; retry: spin_lock(&mm->page_table_lock); @@ -411,6 +417,8 @@ int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) if (segment_eq(get_fs(), KERNEL_DS)) return __futex_atomic_op_pt(op, uaddr, oparg, old); + if (unlikely(!current->mm)) + return -EFAULT; spin_lock(¤t->mm->page_table_lock); uaddr = (u32 __force __user *) __dat_user_addr((__force unsigned long) uaddr, 1); @@ -448,6 +456,8 @@ int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, if (segment_eq(get_fs(), KERNEL_DS)) return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); + if (unlikely(!current->mm)) + return -EFAULT; spin_lock(¤t->mm->page_table_lock); uaddr = (u32 __force __user *) __dat_user_addr((__force unsigned long) uaddr, 1); |