summaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/Makefile2
-rw-r--r--arch/s390/appldata/appldata_base.c2
-rw-r--r--arch/s390/boot/compressed/Makefile1
-rw-r--r--arch/s390/configs/default_defconfig11
-rw-r--r--arch/s390/configs/gcov_defconfig13
-rw-r--r--arch/s390/configs/performance_defconfig13
-rw-r--r--arch/s390/crypto/aes_s390.c590
-rw-r--r--arch/s390/crypto/crc32-vx.c2
-rw-r--r--arch/s390/crypto/des_s390.c330
-rw-r--r--arch/s390/crypto/ghash_s390.c20
-rw-r--r--arch/s390/crypto/prng.c101
-rw-r--r--arch/s390/crypto/sha1_s390.c2
-rw-r--r--arch/s390/crypto/sha256_s390.c2
-rw-r--r--arch/s390/crypto/sha512_s390.c2
-rw-r--r--arch/s390/crypto/sha_common.c23
-rw-r--r--arch/s390/include/asm/cpacf.h178
-rw-r--r--arch/s390/include/asm/facilities_src.h24
-rw-r--r--arch/s390/include/asm/fpu/api.h32
-rw-r--r--arch/s390/include/asm/kvm_host.h136
-rw-r--r--arch/s390/include/asm/lowcore.h3
-rw-r--r--arch/s390/include/asm/mmu.h1
-rw-r--r--arch/s390/include/asm/mmu_context.h1
-rw-r--r--arch/s390/include/asm/pci.h5
-rw-r--r--arch/s390/include/asm/pgtable.h74
-rw-r--r--arch/s390/include/asm/tlbflush.h72
-rw-r--r--arch/s390/include/asm/uaccess.h8
-rw-r--r--arch/s390/include/asm/vx-insn.h148
-rw-r--r--arch/s390/include/uapi/asm/Kbuild1
-rw-r--r--arch/s390/kernel/Makefile3
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/crash_dump.c4
-rw-r--r--arch/s390/kernel/early.c2
-rw-r--r--arch/s390/kernel/fpu.c317
-rw-r--r--arch/s390/kernel/ftrace.c3
-rw-r--r--arch/s390/kernel/kprobes.c2
-rw-r--r--arch/s390/kernel/nmi.c67
-rw-r--r--arch/s390/kernel/sysinfo.c2
-rw-r--r--arch/s390/kernel/time.c86
-rw-r--r--arch/s390/kernel/traps.c3
-rw-r--r--arch/s390/kernel/vdso32/Makefile3
-rw-r--r--arch/s390/kernel/vdso64/Makefile3
-rw-r--r--arch/s390/kvm/gaccess.c37
-rw-r--r--arch/s390/kvm/guestdbg.c59
-rw-r--r--arch/s390/kvm/intercept.c1
-rw-r--r--arch/s390/kvm/interrupt.c98
-rw-r--r--arch/s390/kvm/kvm-s390.c123
-rw-r--r--arch/s390/kvm/kvm-s390.h14
-rw-r--r--arch/s390/kvm/priv.c21
-rw-r--r--arch/s390/kvm/vsie.c2
-rw-r--r--arch/s390/mm/fault.c32
-rw-r--r--arch/s390/mm/gmap.c15
-rw-r--r--arch/s390/mm/pageattr.c4
-rw-r--r--arch/s390/mm/pgtable.c18
-rw-r--r--arch/s390/pci/pci.c9
-rw-r--r--arch/s390/pci/pci_dma.c246
56 files changed, 1445 insertions, 1530 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index c109f073d454..deeadfa291ba 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -73,6 +73,7 @@ config S390
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_KCOV
select ARCH_HAS_SG_CHAIN
+ select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_INLINE_READ_LOCK
select ARCH_INLINE_READ_LOCK_BH
@@ -109,6 +110,7 @@ config S390
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select ARCH_WANTS_PROT_NUMA_PROT_NONE
+ select ARCH_WANTS_UBSAN_NO_NULL
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS2
@@ -136,6 +138,7 @@ config S390
select HAVE_DMA_API_DEBUG
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
+ select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_EXIT_THREAD
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 224b42734f0d..54e00526b8df 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -46,6 +46,8 @@ cflags-$(CONFIG_MARCH_Z196_TUNE) += -mtune=z196
cflags-$(CONFIG_MARCH_ZEC12_TUNE) += -mtune=zEC12
cflags-$(CONFIG_MARCH_Z13_TUNE) += -mtune=z13
+cflags-y += -Wa,-I$(srctree)/arch/$(ARCH)/include
+
#KBUILD_IMAGE is necessary for make rpm
KBUILD_IMAGE :=arch/s390/boot/image
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 15c94246b600..f587c4811faf 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -542,7 +542,7 @@ static int __init appldata_init(void)
rc = PTR_ERR(appldata_pdev);
goto out_driver;
}
- appldata_wq = create_singlethread_workqueue("appldata");
+ appldata_wq = alloc_ordered_workqueue("appldata", 0);
if (!appldata_wq) {
rc = -ENOMEM;
goto out_device;
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 33ba697c782d..0daa070d6c9d 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -17,6 +17,7 @@ KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
GCOV_PROFILE := n
+UBSAN_SANITIZE := n
OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o sclp.o ebcdic.o als.o)
OBJECTS += $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 412b1bd21029..45968686f918 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -260,7 +260,6 @@ CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NF_TABLES_ARP=m
-CONFIG_NF_NAT_IPV4=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
@@ -269,6 +268,8 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
@@ -281,7 +282,6 @@ CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NF_NAT_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
@@ -299,6 +299,8 @@ CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
CONFIG_IP6_NF_SECURITY=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NET_SCTPPROBE=m
CONFIG_RDS=m
@@ -359,6 +361,7 @@ CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y
+CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
CONFIG_NET_TCPPROBE=m
@@ -409,6 +412,7 @@ CONFIG_MD_FAULTY=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
CONFIG_DM_MIRROR=m
CONFIG_DM_LOG_USERSPACE=m
CONFIG_DM_RAID=m
@@ -428,6 +432,7 @@ CONFIG_EQUALIZER=m
CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_TUN=m
CONFIG_VETH=m
@@ -453,7 +458,6 @@ CONFIG_PPP_SYNC_TTY=m
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
-CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
CONFIG_LEGACY_PTY_COUNT=0
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m
@@ -495,6 +499,7 @@ CONFIG_QFMT_V2=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=y
CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
CONFIG_FSCACHE=m
CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=y
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index bec279eb4b93..1dd05e345c4d 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -15,6 +15,8 @@ CONFIG_NUMA_BALANCING=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_PIDS=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y
@@ -255,7 +257,6 @@ CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NF_TABLES_ARP=m
-CONFIG_NF_NAT_IPV4=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
@@ -264,6 +265,8 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
@@ -276,7 +279,6 @@ CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NF_NAT_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
@@ -294,6 +296,8 @@ CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
CONFIG_IP6_NF_SECURITY=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NET_SCTPPROBE=m
CONFIG_RDS=m
@@ -353,6 +357,7 @@ CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y
+CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
CONFIG_NET_TCPPROBE=m
@@ -403,6 +408,7 @@ CONFIG_MD_FAULTY=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
CONFIG_DM_MIRROR=m
CONFIG_DM_LOG_USERSPACE=m
CONFIG_DM_RAID=m
@@ -422,6 +428,7 @@ CONFIG_EQUALIZER=m
CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_TUN=m
CONFIG_VETH=m
@@ -447,7 +454,6 @@ CONFIG_PPP_SYNC_TTY=m
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
-CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
CONFIG_LEGACY_PTY_COUNT=0
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m
@@ -487,6 +493,7 @@ CONFIG_QFMT_V2=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=y
CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
CONFIG_FSCACHE=m
CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=y
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 1751446a5bbb..29d1178666f0 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -16,6 +16,8 @@ CONFIG_NUMA_BALANCING=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_PIDS=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y
@@ -255,7 +257,6 @@ CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NF_TABLES_ARP=m
-CONFIG_NF_NAT_IPV4=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
@@ -264,6 +265,8 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
@@ -276,7 +279,6 @@ CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NF_NAT_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
@@ -294,6 +296,8 @@ CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
CONFIG_IP6_NF_SECURITY=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NET_SCTPPROBE=m
CONFIG_RDS=m
@@ -353,6 +357,7 @@ CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y
+CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
CONFIG_NET_TCPPROBE=m
@@ -403,6 +408,7 @@ CONFIG_MD_FAULTY=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_THIN_PROVISIONING=m
CONFIG_DM_MIRROR=m
CONFIG_DM_LOG_USERSPACE=m
CONFIG_DM_RAID=m
@@ -422,6 +428,7 @@ CONFIG_EQUALIZER=m
CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
+CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_TUN=m
CONFIG_VETH=m
@@ -447,7 +454,6 @@ CONFIG_PPP_SYNC_TTY=m
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
-CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
CONFIG_LEGACY_PTY_COUNT=0
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m
@@ -488,6 +494,7 @@ CONFIG_QFMT_V2=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=y
CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
CONFIG_FSCACHE=m
CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=y
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 2ea18b050309..303d28eb03a2 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -31,69 +31,29 @@
#include <crypto/xts.h>
#include <asm/cpacf.h>
-#define AES_KEYLEN_128 1
-#define AES_KEYLEN_192 2
-#define AES_KEYLEN_256 4
-
static u8 *ctrblk;
static DEFINE_SPINLOCK(ctrblk_lock);
-static char keylen_flag;
+
+static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
struct s390_aes_ctx {
u8 key[AES_MAX_KEY_SIZE];
- long enc;
- long dec;
int key_len;
+ unsigned long fc;
union {
struct crypto_skcipher *blk;
struct crypto_cipher *cip;
} fallback;
};
-struct pcc_param {
- u8 key[32];
- u8 tweak[16];
- u8 block[16];
- u8 bit[16];
- u8 xts[16];
-};
-
struct s390_xts_ctx {
u8 key[32];
u8 pcc_key[32];
- long enc;
- long dec;
int key_len;
+ unsigned long fc;
struct crypto_skcipher *fallback;
};
-/*
- * Check if the key_len is supported by the HW.
- * Returns 0 if it is, a positive number if it is not and software fallback is
- * required or a negative number in case the key size is not valid
- */
-static int need_fallback(unsigned int key_len)
-{
- switch (key_len) {
- case 16:
- if (!(keylen_flag & AES_KEYLEN_128))
- return 1;
- break;
- case 24:
- if (!(keylen_flag & AES_KEYLEN_192))
- return 1;
- break;
- case 32:
- if (!(keylen_flag & AES_KEYLEN_256))
- return 1;
- break;
- default:
- return -1;
- break;
- }
- return 0;
-}
-
static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
@@ -117,72 +77,44 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
- u32 *flags = &tfm->crt_flags;
- int ret;
+ unsigned long fc;
- ret = need_fallback(key_len);
- if (ret < 0) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- return -EINVAL;
- }
+ /* Pick the correct function code based on the key length */
+ fc = (key_len == 16) ? CPACF_KM_AES_128 :
+ (key_len == 24) ? CPACF_KM_AES_192 :
+ (key_len == 32) ? CPACF_KM_AES_256 : 0;
- sctx->key_len = key_len;
- if (!ret) {
- memcpy(sctx->key, in_key, key_len);
- return 0;
- }
+ /* Check if the function code is available */
+ sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
+ if (!sctx->fc)
+ return setkey_fallback_cip(tfm, in_key, key_len);
- return setkey_fallback_cip(tfm, in_key, key_len);
+ sctx->key_len = key_len;
+ memcpy(sctx->key, in_key, key_len);
+ return 0;
}
static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
- if (unlikely(need_fallback(sctx->key_len))) {
+ if (unlikely(!sctx->fc)) {
crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
return;
}
-
- switch (sctx->key_len) {
- case 16:
- cpacf_km(CPACF_KM_AES_128_ENC, &sctx->key, out, in,
- AES_BLOCK_SIZE);
- break;
- case 24:
- cpacf_km(CPACF_KM_AES_192_ENC, &sctx->key, out, in,
- AES_BLOCK_SIZE);
- break;
- case 32:
- cpacf_km(CPACF_KM_AES_256_ENC, &sctx->key, out, in,
- AES_BLOCK_SIZE);
- break;
- }
+ cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
}
static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
- if (unlikely(need_fallback(sctx->key_len))) {
+ if (unlikely(!sctx->fc)) {
crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
return;
}
-
- switch (sctx->key_len) {
- case 16:
- cpacf_km(CPACF_KM_AES_128_DEC, &sctx->key, out, in,
- AES_BLOCK_SIZE);
- break;
- case 24:
- cpacf_km(CPACF_KM_AES_192_DEC, &sctx->key, out, in,
- AES_BLOCK_SIZE);
- break;
- case 32:
- cpacf_km(CPACF_KM_AES_256_DEC, &sctx->key, out, in,
- AES_BLOCK_SIZE);
- break;
- }
+ cpacf_km(sctx->fc | CPACF_DECRYPT,
+ &sctx->key, out, in, AES_BLOCK_SIZE);
}
static int fallback_init_cip(struct crypto_tfm *tfm)
@@ -291,50 +223,37 @@ static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
- int ret;
+ unsigned long fc;
- ret = need_fallback(key_len);
- if (ret > 0) {
- sctx->key_len = key_len;
- return setkey_fallback_blk(tfm, in_key, key_len);
- }
+ /* Pick the correct function code based on the key length */
+ fc = (key_len == 16) ? CPACF_KM_AES_128 :
+ (key_len == 24) ? CPACF_KM_AES_192 :
+ (key_len == 32) ? CPACF_KM_AES_256 : 0;
- switch (key_len) {
- case 16:
- sctx->enc = CPACF_KM_AES_128_ENC;
- sctx->dec = CPACF_KM_AES_128_DEC;
- break;
- case 24:
- sctx->enc = CPACF_KM_AES_192_ENC;
- sctx->dec = CPACF_KM_AES_192_DEC;
- break;
- case 32:
- sctx->enc = CPACF_KM_AES_256_ENC;
- sctx->dec = CPACF_KM_AES_256_DEC;
- break;
- }
+ /* Check if the function code is available */
+ sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
+ if (!sctx->fc)
+ return setkey_fallback_blk(tfm, in_key, key_len);
- return aes_set_key(tfm, in_key, key_len);
+ sctx->key_len = key_len;
+ memcpy(sctx->key, in_key, key_len);
+ return 0;
}
-static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
+static int ecb_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
struct blkcipher_walk *walk)
{
- int ret = blkcipher_walk_virt(desc, walk);
- unsigned int nbytes;
+ struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ unsigned int nbytes, n;
+ int ret;
- while ((nbytes = walk->nbytes)) {
+ ret = blkcipher_walk_virt(desc, walk);
+ while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
/* only use complete blocks */
- unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
- u8 *out = walk->dst.virt.addr;
- u8 *in = walk->src.virt.addr;
-
- ret = cpacf_km(func, param, out, in, n);
- if (ret < 0 || ret != n)
- return -EIO;
-
- nbytes &= AES_BLOCK_SIZE - 1;
- ret = blkcipher_walk_done(desc, walk, nbytes);
+ n = nbytes & ~(AES_BLOCK_SIZE - 1);
+ cpacf_km(sctx->fc | modifier, sctx->key,
+ walk->dst.virt.addr, walk->src.virt.addr, n);
+ ret = blkcipher_walk_done(desc, walk, nbytes - n);
}
return ret;
@@ -347,11 +266,11 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
- if (unlikely(need_fallback(sctx->key_len)))
+ if (unlikely(!sctx->fc))
return fallback_blk_enc(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk);
+ return ecb_aes_crypt(desc, 0, &walk);
}
static int ecb_aes_decrypt(struct blkcipher_desc *desc,
@@ -361,11 +280,11 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
- if (unlikely(need_fallback(sctx->key_len)))
+ if (unlikely(!sctx->fc))
return fallback_blk_dec(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk);
+ return ecb_aes_crypt(desc, CPACF_DECRYPT, &walk);
}
static int fallback_init_blk(struct crypto_tfm *tfm)
@@ -420,64 +339,45 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
- int ret;
+ unsigned long fc;
- ret = need_fallback(key_len);
- if (ret > 0) {
- sctx->key_len = key_len;
- return setkey_fallback_blk(tfm, in_key, key_len);
- }
+ /* Pick the correct function code based on the key length */
+ fc = (key_len == 16) ? CPACF_KMC_AES_128 :
+ (key_len == 24) ? CPACF_KMC_AES_192 :
+ (key_len == 32) ? CPACF_KMC_AES_256 : 0;
- switch (key_len) {
- case 16:
- sctx->enc = CPACF_KMC_AES_128_ENC;
- sctx->dec = CPACF_KMC_AES_128_DEC;
- break;
- case 24:
- sctx->enc = CPACF_KMC_AES_192_ENC;
- sctx->dec = CPACF_KMC_AES_192_DEC;
- break;
- case 32:
- sctx->enc = CPACF_KMC_AES_256_ENC;
- sctx->dec = CPACF_KMC_AES_256_DEC;
- break;
- }
+ /* Check if the function code is available */
+ sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
+ if (!sctx->fc)
+ return setkey_fallback_blk(tfm, in_key, key_len);
- return aes_set_key(tfm, in_key, key_len);
+ sctx->key_len = key_len;
+ memcpy(sctx->key, in_key, key_len);
+ return 0;
}
-static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
+static int cbc_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
struct blkcipher_walk *walk)
{
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
- int ret = blkcipher_walk_virt(desc, walk);
- unsigned int nbytes = walk->nbytes;
+ unsigned int nbytes, n;
+ int ret;
struct {
u8 iv[AES_BLOCK_SIZE];
u8 key[AES_MAX_KEY_SIZE];
} param;
- if (!nbytes)
- goto out;
-
+ ret = blkcipher_walk_virt(desc, walk);
memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
memcpy(param.key, sctx->key, sctx->key_len);
- do {
+ while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
/* only use complete blocks */
- unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
- u8 *out = walk->dst.virt.addr;
- u8 *in = walk->src.virt.addr;
-
- ret = cpacf_kmc(func, &param, out, in, n);
- if (ret < 0 || ret != n)
- return -EIO;
-
- nbytes &= AES_BLOCK_SIZE - 1;
- ret = blkcipher_walk_done(desc, walk, nbytes);
- } while ((nbytes = walk->nbytes));
+ n = nbytes & ~(AES_BLOCK_SIZE - 1);
+ cpacf_kmc(sctx->fc | modifier, &param,
+ walk->dst.virt.addr, walk->src.virt.addr, n);
+ ret = blkcipher_walk_done(desc, walk, nbytes - n);
+ }
memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
-
-out:
return ret;
}
@@ -488,11 +388,11 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
- if (unlikely(need_fallback(sctx->key_len)))
+ if (unlikely(!sctx->fc))
return fallback_blk_enc(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_aes_crypt(desc, sctx->enc, &walk);
+ return cbc_aes_crypt(desc, 0, &walk);
}
static int cbc_aes_decrypt(struct blkcipher_desc *desc,
@@ -502,11 +402,11 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
- if (unlikely(need_fallback(sctx->key_len)))
+ if (unlikely(!sctx->fc))
return fallback_blk_dec(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_aes_crypt(desc, sctx->dec, &walk);
+ return cbc_aes_crypt(desc, CPACF_DECRYPT, &walk);
}
static struct crypto_alg cbc_aes_alg = {
@@ -594,83 +494,67 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
- u32 *flags = &tfm->crt_flags;
+ unsigned long fc;
int err;
err = xts_check_key(tfm, in_key, key_len);
if (err)
return err;
- switch (key_len) {
- case 32:
- xts_ctx->enc = CPACF_KM_XTS_128_ENC;
- xts_ctx->dec = CPACF_KM_XTS_128_DEC;
- memcpy(xts_ctx->key + 16, in_key, 16);
- memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
- break;
- case 48:
- xts_ctx->enc = 0;
- xts_ctx->dec = 0;
- xts_fallback_setkey(tfm, in_key, key_len);
- break;
- case 64:
- xts_ctx->enc = CPACF_KM_XTS_256_ENC;
- xts_ctx->dec = CPACF_KM_XTS_256_DEC;
- memcpy(xts_ctx->key, in_key, 32);
- memcpy(xts_ctx->pcc_key, in_key + 32, 32);
- break;
- default:
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- return -EINVAL;
- }
+ /* Pick the correct function code based on the key length */
+ fc = (key_len == 32) ? CPACF_KM_XTS_128 :
+ (key_len == 64) ? CPACF_KM_XTS_256 : 0;
+
+ /* Check if the function code is available */
+ xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
+ if (!xts_ctx->fc)
+ return xts_fallback_setkey(tfm, in_key, key_len);
+
+ /* Split the XTS key into the two subkeys */
+ key_len = key_len / 2;
xts_ctx->key_len = key_len;
+ memcpy(xts_ctx->key, in_key, key_len);
+ memcpy(xts_ctx->pcc_key, in_key + key_len, key_len);
return 0;
}
-static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
- struct s390_xts_ctx *xts_ctx,
+static int xts_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
struct blkcipher_walk *walk)
{
- unsigned int offset = (xts_ctx->key_len >> 1) & 0x10;
- int ret = blkcipher_walk_virt(desc, walk);
- unsigned int nbytes = walk->nbytes;
- unsigned int n;
- u8 *in, *out;
- struct pcc_param pcc_param;
+ struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
+ unsigned int offset, nbytes, n;
+ int ret;
+ struct {
+ u8 key[32];
+ u8 tweak[16];
+ u8 block[16];
+ u8 bit[16];
+ u8 xts[16];
+ } pcc_param;
struct {
u8 key[32];
u8 init[16];
} xts_param;
- if (!nbytes)
- goto out;
-
+ ret = blkcipher_walk_virt(desc, walk);
+ offset = xts_ctx->key_len & 0x10;
memset(pcc_param.block, 0, sizeof(pcc_param.block));
memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
- memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
- /* remove decipher modifier bit from 'func' and call PCC */
- ret = cpacf_pcc(func & 0x7f, &pcc_param.key[offset]);
- if (ret < 0)
- return -EIO;
+ memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
+ cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
- memcpy(xts_param.key, xts_ctx->key, 32);
+ memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
memcpy(xts_param.init, pcc_param.xts, 16);
- do {
+
+ while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
- out = walk->dst.virt.addr;
- in = walk->src.virt.addr;
-
- ret = cpacf_km(func, &xts_param.key[offset], out, in, n);
- if (ret < 0 || ret != n)
- return -EIO;
-
- nbytes &= AES_BLOCK_SIZE - 1;
- ret = blkcipher_walk_done(desc, walk, nbytes);
- } while ((nbytes = walk->nbytes));
-out:
+ cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
+ walk->dst.virt.addr, walk->src.virt.addr, n);
+ ret = blkcipher_walk_done(desc, walk, nbytes - n);
+ }
return ret;
}
@@ -681,11 +565,11 @@ static int xts_aes_encrypt(struct blkcipher_desc *desc,
struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
- if (unlikely(xts_ctx->key_len == 48))
+ if (unlikely(!xts_ctx->fc))
return xts_fallback_encrypt(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes);
- return xts_aes_crypt(desc, xts_ctx->enc, xts_ctx, &walk);
+ return xts_aes_crypt(desc, 0, &walk);
}
static int xts_aes_decrypt(struct blkcipher_desc *desc,
@@ -695,11 +579,11 @@ static int xts_aes_decrypt(struct blkcipher_desc *desc,
struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
- if (unlikely(xts_ctx->key_len == 48))
+ if (unlikely(!xts_ctx->fc))
return xts_fallback_decrypt(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes);
- return xts_aes_crypt(desc, xts_ctx->dec, xts_ctx, &walk);
+ return xts_aes_crypt(desc, CPACF_DECRYPT, &walk);
}
static int xts_fallback_init(struct crypto_tfm *tfm)
@@ -750,108 +634,79 @@ static struct crypto_alg xts_aes_alg = {
}
};
-static int xts_aes_alg_reg;
-
static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ unsigned long fc;
- switch (key_len) {
- case 16:
- sctx->enc = CPACF_KMCTR_AES_128_ENC;
- sctx->dec = CPACF_KMCTR_AES_128_DEC;
- break;
- case 24:
- sctx->enc = CPACF_KMCTR_AES_192_ENC;
- sctx->dec = CPACF_KMCTR_AES_192_DEC;
- break;
- case 32:
- sctx->enc = CPACF_KMCTR_AES_256_ENC;
- sctx->dec = CPACF_KMCTR_AES_256_DEC;
- break;
- }
+ /* Pick the correct function code based on the key length */
+ fc = (key_len == 16) ? CPACF_KMCTR_AES_128 :
+ (key_len == 24) ? CPACF_KMCTR_AES_192 :
+ (key_len == 32) ? CPACF_KMCTR_AES_256 : 0;
+
+ /* Check if the function code is available */
+ sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
+ if (!sctx->fc)
+ return setkey_fallback_blk(tfm, in_key, key_len);
- return aes_set_key(tfm, in_key, key_len);
+ sctx->key_len = key_len;
+ memcpy(sctx->key, in_key, key_len);
+ return 0;
}
-static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
+static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
{
unsigned int i, n;
/* only use complete blocks, max. PAGE_SIZE */
+ memcpy(ctrptr, iv, AES_BLOCK_SIZE);
n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
- for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
- memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE,
- AES_BLOCK_SIZE);
- crypto_inc(ctrptr + i, AES_BLOCK_SIZE);
+ for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
+ memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
+ crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
+ ctrptr += AES_BLOCK_SIZE;
}
return n;
}
-static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
- struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
+static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
+ struct blkcipher_walk *walk)
{
- int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
+ struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ u8 buf[AES_BLOCK_SIZE], *ctrptr;
unsigned int n, nbytes;
- u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE];
- u8 *out, *in, *ctrptr = ctrbuf;
-
- if (!walk->nbytes)
- return ret;
+ int ret, locked;
- if (spin_trylock(&ctrblk_lock))
- ctrptr = ctrblk;
+ locked = spin_trylock(&ctrblk_lock);
- memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE);
+ ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
- out = walk->dst.virt.addr;
- in = walk->src.virt.addr;
- while (nbytes >= AES_BLOCK_SIZE) {
- if (ctrptr == ctrblk)
- n = __ctrblk_init(ctrptr, nbytes);
- else
- n = AES_BLOCK_SIZE;
- ret = cpacf_kmctr(func, sctx->key, out, in, n, ctrptr);
- if (ret < 0 || ret != n) {
- if (ctrptr == ctrblk)
- spin_unlock(&ctrblk_lock);
- return -EIO;
- }
- if (n > AES_BLOCK_SIZE)
- memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE,
- AES_BLOCK_SIZE);
- crypto_inc(ctrptr, AES_BLOCK_SIZE);
- out += n;
- in += n;
- nbytes -= n;
- }
- ret = blkcipher_walk_done(desc, walk, nbytes);
+ n = AES_BLOCK_SIZE;
+ if (nbytes >= 2*AES_BLOCK_SIZE && locked)
+ n = __ctrblk_init(ctrblk, walk->iv, nbytes);
+ ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
+ cpacf_kmctr(sctx->fc | modifier, sctx->key,
+ walk->dst.virt.addr, walk->src.virt.addr,
+ n, ctrptr);
+ if (ctrptr == ctrblk)
+ memcpy(walk->iv, ctrptr + n - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE);
+ crypto_inc(walk->iv, AES_BLOCK_SIZE);
+ ret = blkcipher_walk_done(desc, walk, nbytes - n);
}
- if (ctrptr == ctrblk) {
- if (nbytes)
- memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE);
- else
- memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
+ if (locked)
spin_unlock(&ctrblk_lock);
- } else {
- if (!nbytes)
- memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
- }
/*
* final block may be < AES_BLOCK_SIZE, copy only nbytes
*/
if (nbytes) {
- out = walk->dst.virt.addr;
- in = walk->src.virt.addr;
- ret = cpacf_kmctr(func, sctx->key, buf, in,
- AES_BLOCK_SIZE, ctrbuf);
- if (ret < 0 || ret != AES_BLOCK_SIZE)
- return -EIO;
- memcpy(out, buf, nbytes);
- crypto_inc(ctrbuf, AES_BLOCK_SIZE);
+ cpacf_kmctr(sctx->fc | modifier, sctx->key,
+ buf, walk->src.virt.addr,
+ AES_BLOCK_SIZE, walk->iv);
+ memcpy(walk->dst.virt.addr, buf, nbytes);
+ crypto_inc(walk->iv, AES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0);
- memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE);
}
return ret;
@@ -864,8 +719,11 @@ static int ctr_aes_encrypt(struct blkcipher_desc *desc,
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
+ if (unlikely(!sctx->fc))
+ return fallback_blk_enc(desc, dst, src, nbytes);
+
blkcipher_walk_init(&walk, dst, src, nbytes);
- return ctr_aes_crypt(desc, sctx->enc, sctx, &walk);
+ return ctr_aes_crypt(desc, 0, &walk);
}
static int ctr_aes_decrypt(struct blkcipher_desc *desc,
@@ -875,19 +733,25 @@ static int ctr_aes_decrypt(struct blkcipher_desc *desc,
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
+ if (unlikely(!sctx->fc))
+ return fallback_blk_dec(desc, dst, src, nbytes);
+
blkcipher_walk_init(&walk, dst, src, nbytes);
- return ctr_aes_crypt(desc, sctx->dec, sctx, &walk);
+ return ctr_aes_crypt(desc, CPACF_DECRYPT, &walk);
}
static struct crypto_alg ctr_aes_alg = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-s390",
.cra_priority = 400, /* combo: aes + ctr */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+ CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct s390_aes_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
+ .cra_init = fallback_init_blk,
+ .cra_exit = fallback_exit_blk,
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
@@ -900,89 +764,79 @@ static struct crypto_alg ctr_aes_alg = {
}
};
-static int ctr_aes_alg_reg;
+static struct crypto_alg *aes_s390_algs_ptr[5];
+static int aes_s390_algs_num;
-static int __init aes_s390_init(void)
+static int aes_s390_register_alg(struct crypto_alg *alg)
{
int ret;
- if (cpacf_query(CPACF_KM, CPACF_KM_AES_128_ENC))
- keylen_flag |= AES_KEYLEN_128;
- if (cpacf_query(CPACF_KM, CPACF_KM_AES_192_ENC))
- keylen_flag |= AES_KEYLEN_192;
- if (cpacf_query(CPACF_KM, CPACF_KM_AES_256_ENC))
- keylen_flag |= AES_KEYLEN_256;
+ ret = crypto_register_alg(alg);
+ if (!ret)
+ aes_s390_algs_ptr[aes_s390_algs_num++] = alg;
+ return ret;
+}
- if (!keylen_flag)
- return -EOPNOTSUPP;
+static void aes_s390_fini(void)
+{
+ while (aes_s390_algs_num--)
+ crypto_unregister_alg(aes_s390_algs_ptr[aes_s390_algs_num]);
+ if (ctrblk)
+ free_page((unsigned long) ctrblk);
+}
- /* z9 109 and z9 BC/EC only support 128 bit key length */
- if (keylen_flag == AES_KEYLEN_128)
- pr_info("AES hardware acceleration is only available for"
- " 128-bit keys\n");
+static int __init aes_s390_init(void)
+{
+ int ret;
- ret = crypto_register_alg(&aes_alg);
- if (ret)
- goto aes_err;
+ /* Query available functions for KM, KMC and KMCTR */
+ cpacf_query(CPACF_KM, &km_functions);
+ cpacf_query(CPACF_KMC, &kmc_functions);
+ cpacf_query(CPACF_KMCTR, &kmctr_functions);
- ret = crypto_register_alg(&ecb_aes_alg);
- if (ret)
- goto ecb_aes_err;
+ if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
+ cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
+ cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
+ ret = aes_s390_register_alg(&aes_alg);
+ if (ret)
+ goto out_err;
+ ret = aes_s390_register_alg(&ecb_aes_alg);
+ if (ret)
+ goto out_err;
+ }
- ret = crypto_register_alg(&cbc_aes_alg);
- if (ret)
- goto cbc_aes_err;
+ if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
+ cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
+ cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
+ ret = aes_s390_register_alg(&cbc_aes_alg);
+ if (ret)
+ goto out_err;
+ }
- if (cpacf_query(CPACF_KM, CPACF_KM_XTS_128_ENC) &&
- cpacf_query(CPACF_KM, CPACF_KM_XTS_256_ENC)) {
- ret = crypto_register_alg(&xts_aes_alg);
+ if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
+ cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
+ ret = aes_s390_register_alg(&xts_aes_alg);
if (ret)
- goto xts_aes_err;
- xts_aes_alg_reg = 1;
+ goto out_err;
}
- if (cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_128_ENC) &&
- cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_192_ENC) &&
- cpacf_query(CPACF_KMCTR, CPACF_KMCTR_AES_256_ENC)) {
+ if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) ||
+ cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) ||
+ cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) {
ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
if (!ctrblk) {
ret = -ENOMEM;
- goto ctr_aes_err;
+ goto out_err;
}
- ret = crypto_register_alg(&ctr_aes_alg);
- if (ret) {
- free_page((unsigned long) ctrblk);
- goto ctr_aes_err;
- }
- ctr_aes_alg_reg = 1;
+ ret = aes_s390_register_alg(&ctr_aes_alg);
+ if (ret)
+ goto out_err;
}
-out:
+ return 0;
+out_err:
+ aes_s390_fini();
return ret;
-
-ctr_aes_err:
- crypto_unregister_alg(&xts_aes_alg);
-xts_aes_err:
- crypto_unregister_alg(&cbc_aes_alg);
-cbc_aes_err:
- crypto_unregister_alg(&ecb_aes_alg);
-ecb_aes_err:
- crypto_unregister_alg(&aes_alg);
-aes_err:
- goto out;
-}
-
-static void __exit aes_s390_fini(void)
-{
- if (ctr_aes_alg_reg) {
- crypto_unregister_alg(&ctr_aes_alg);
- free_page((unsigned long) ctrblk);
- }
- if (xts_aes_alg_reg)
- crypto_unregister_alg(&xts_aes_alg);
- crypto_unregister_alg(&cbc_aes_alg);
- crypto_unregister_alg(&ecb_aes_alg);
- crypto_unregister_alg(&aes_alg);
}
module_cpu_feature_match(MSA, aes_s390_init);
diff --git a/arch/s390/crypto/crc32-vx.c b/arch/s390/crypto/crc32-vx.c
index 2bad9d837029..992e630c227b 100644
--- a/arch/s390/crypto/crc32-vx.c
+++ b/arch/s390/crypto/crc32-vx.c
@@ -67,7 +67,7 @@ u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
\
kernel_fpu_begin(&vxstate, KERNEL_VXR_LOW); \
crc = ___crc32_vx(crc, data, aligned); \
- kernel_fpu_end(&vxstate); \
+ kernel_fpu_end(&vxstate, KERNEL_VXR_LOW); \
\
if (remaining) \
crc = ___crc32_sw(crc, data + aligned, remaining); \
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 697e71a75fc2..8b83144206eb 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -27,6 +27,8 @@
static u8 *ctrblk;
static DEFINE_SPINLOCK(ctrblk_lock);
+static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
+
struct s390_des_ctx {
u8 iv[DES_BLOCK_SIZE];
u8 key[DES3_KEY_SIZE];
@@ -36,12 +38,12 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int key_len)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
- u32 *flags = &tfm->crt_flags;
u32 tmp[DES_EXPKEY_WORDS];
/* check for weak keys */
- if (!des_ekey(tmp, key) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
- *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ if (!des_ekey(tmp, key) &&
+ (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
@@ -53,14 +55,15 @@ static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
- cpacf_km(CPACF_KM_DEA_ENC, ctx->key, out, in, DES_BLOCK_SIZE);
+ cpacf_km(CPACF_KM_DEA, ctx->key, out, in, DES_BLOCK_SIZE);
}
static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
- cpacf_km(CPACF_KM_DEA_DEC, ctx->key, out, in, DES_BLOCK_SIZE);
+ cpacf_km(CPACF_KM_DEA | CPACF_DECRYPT,
+ ctx->key, out, in, DES_BLOCK_SIZE);
}
static struct crypto_alg des_alg = {
@@ -82,61 +85,46 @@ static struct crypto_alg des_alg = {
}
};
-static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
- u8 *key, struct blkcipher_walk *walk)
+static int ecb_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
+ struct blkcipher_walk *walk)
{
- int ret = blkcipher_walk_virt(desc, walk);
- unsigned int nbytes;
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ unsigned int nbytes, n;
+ int ret;
- while ((nbytes = walk->nbytes)) {
+ ret = blkcipher_walk_virt(desc, walk);
+ while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
/* only use complete blocks */
- unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
- u8 *out = walk->dst.virt.addr;
- u8 *in = walk->src.virt.addr;
-
- ret = cpacf_km(func, key, out, in, n);
- if (ret < 0 || ret != n)
- return -EIO;
-
- nbytes &= DES_BLOCK_SIZE - 1;
- ret = blkcipher_walk_done(desc, walk, nbytes);
+ n = nbytes & ~(DES_BLOCK_SIZE - 1);
+ cpacf_km(fc, ctx->key, walk->dst.virt.addr,
+ walk->src.virt.addr, n);
+ ret = blkcipher_walk_done(desc, walk, nbytes - n);
}
-
return ret;
}
-static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
+static int cbc_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
struct blkcipher_walk *walk)
{
struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- int ret = blkcipher_walk_virt(desc, walk);
- unsigned int nbytes = walk->nbytes;
+ unsigned int nbytes, n;
+ int ret;
struct {
u8 iv[DES_BLOCK_SIZE];
u8 key[DES3_KEY_SIZE];
} param;
- if (!nbytes)
- goto out;
-
+ ret = blkcipher_walk_virt(desc, walk);
memcpy(param.iv, walk->iv, DES_BLOCK_SIZE);
memcpy(param.key, ctx->key, DES3_KEY_SIZE);
- do {
+ while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
/* only use complete blocks */
- unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
- u8 *out = walk->dst.virt.addr;
- u8 *in = walk->src.virt.addr;
-
- ret = cpacf_kmc(func, &param, out, in, n);
- if (ret < 0 || ret != n)
- return -EIO;
-
- nbytes &= DES_BLOCK_SIZE - 1;
- ret = blkcipher_walk_done(desc, walk, nbytes);
- } while ((nbytes = walk->nbytes));
+ n = nbytes & ~(DES_BLOCK_SIZE - 1);
+ cpacf_kmc(fc, &param, walk->dst.virt.addr,
+ walk->src.virt.addr, n);
+ ret = blkcipher_walk_done(desc, walk, nbytes - n);
+ }
memcpy(walk->iv, param.iv, DES_BLOCK_SIZE);
-
-out:
return ret;
}
@@ -144,22 +132,20 @@ static int ecb_des_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_desall_crypt(desc, CPACF_KM_DEA_ENC, ctx->key, &walk);
+ return ecb_desall_crypt(desc, CPACF_KM_DEA, &walk);
}
static int ecb_des_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_desall_crypt(desc, CPACF_KM_DEA_DEC, ctx->key, &walk);
+ return ecb_desall_crypt(desc, CPACF_KM_DEA | CPACF_DECRYPT, &walk);
}
static struct crypto_alg ecb_des_alg = {
@@ -189,7 +175,7 @@ static int cbc_des_encrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, CPACF_KMC_DEA_ENC, &walk);
+ return cbc_desall_crypt(desc, CPACF_KMC_DEA, &walk);
}
static int cbc_des_decrypt(struct blkcipher_desc *desc,
@@ -199,7 +185,7 @@ static int cbc_des_decrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, CPACF_KMC_DEA_DEC, &walk);
+ return cbc_desall_crypt(desc, CPACF_KMC_DEA | CPACF_DECRYPT, &walk);
}
static struct crypto_alg cbc_des_alg = {
@@ -240,13 +226,12 @@ static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int key_len)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
- u32 *flags = &tfm->crt_flags;
if (!(crypto_memneq(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
crypto_memneq(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
DES_KEY_SIZE)) &&
- (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
- *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
memcpy(ctx->key, key, key_len);
@@ -257,14 +242,15 @@ static void des3_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
- cpacf_km(CPACF_KM_TDEA_192_ENC, ctx->key, dst, src, DES_BLOCK_SIZE);
+ cpacf_km(CPACF_KM_TDEA_192, ctx->key, dst, src, DES_BLOCK_SIZE);
}
static void des3_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
- cpacf_km(CPACF_KM_TDEA_192_DEC, ctx->key, dst, src, DES_BLOCK_SIZE);
+ cpacf_km(CPACF_KM_TDEA_192 | CPACF_DECRYPT,
+ ctx->key, dst, src, DES_BLOCK_SIZE);
}
static struct crypto_alg des3_alg = {
@@ -290,22 +276,21 @@ static int ecb_des3_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_desall_crypt(desc, CPACF_KM_TDEA_192_ENC, ctx->key, &walk);
+ return ecb_desall_crypt(desc, CPACF_KM_TDEA_192, &walk);
}
static int ecb_des3_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_desall_crypt(desc, CPACF_KM_TDEA_192_DEC, ctx->key, &walk);
+ return ecb_desall_crypt(desc, CPACF_KM_TDEA_192 | CPACF_DECRYPT,
+ &walk);
}
static struct crypto_alg ecb_des3_alg = {
@@ -335,7 +320,7 @@ static int cbc_des3_encrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, CPACF_KMC_TDEA_192_ENC, &walk);
+ return cbc_desall_crypt(desc, CPACF_KMC_TDEA_192, &walk);
}
static int cbc_des3_decrypt(struct blkcipher_desc *desc,
@@ -345,7 +330,8 @@ static int cbc_des3_decrypt(struct blkcipher_desc *desc,
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, CPACF_KMC_TDEA_192_DEC, &walk);
+ return cbc_desall_crypt(desc, CPACF_KMC_TDEA_192 | CPACF_DECRYPT,
+ &walk);
}
static struct crypto_alg cbc_des3_alg = {
@@ -369,81 +355,54 @@ static struct crypto_alg cbc_des3_alg = {
}
};
-static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
+static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
{
unsigned int i, n;
/* align to block size, max. PAGE_SIZE */
n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1);
- for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
- memcpy(ctrptr + i, ctrptr + i - DES_BLOCK_SIZE, DES_BLOCK_SIZE);
- crypto_inc(ctrptr + i, DES_BLOCK_SIZE);
+ memcpy(ctrptr, iv, DES_BLOCK_SIZE);
+ for (i = (n / DES_BLOCK_SIZE) - 1; i > 0; i--) {
+ memcpy(ctrptr + DES_BLOCK_SIZE, ctrptr, DES_BLOCK_SIZE);
+ crypto_inc(ctrptr + DES_BLOCK_SIZE, DES_BLOCK_SIZE);
+ ctrptr += DES_BLOCK_SIZE;
}
return n;
}
-static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
- struct s390_des_ctx *ctx,
+static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
struct blkcipher_walk *walk)
{
- int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ u8 buf[DES_BLOCK_SIZE], *ctrptr;
unsigned int n, nbytes;
- u8 buf[DES_BLOCK_SIZE], ctrbuf[DES_BLOCK_SIZE];
- u8 *out, *in, *ctrptr = ctrbuf;
-
- if (!walk->nbytes)
- return ret;
+ int ret, locked;
- if (spin_trylock(&ctrblk_lock))
- ctrptr = ctrblk;
+ locked = spin_trylock(&ctrblk_lock);
- memcpy(ctrptr, walk->iv, DES_BLOCK_SIZE);
+ ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
- out = walk->dst.virt.addr;
- in = walk->src.virt.addr;
- while (nbytes >= DES_BLOCK_SIZE) {
- if (ctrptr == ctrblk)
- n = __ctrblk_init(ctrptr, nbytes);
- else
- n = DES_BLOCK_SIZE;
- ret = cpacf_kmctr(func, ctx->key, out, in, n, ctrptr);
- if (ret < 0 || ret != n) {
- if (ctrptr == ctrblk)
- spin_unlock(&ctrblk_lock);
- return -EIO;
- }
- if (n > DES_BLOCK_SIZE)
- memcpy(ctrptr, ctrptr + n - DES_BLOCK_SIZE,
- DES_BLOCK_SIZE);
- crypto_inc(ctrptr, DES_BLOCK_SIZE);
- out += n;
- in += n;
- nbytes -= n;
- }
- ret = blkcipher_walk_done(desc, walk, nbytes);
+ n = DES_BLOCK_SIZE;
+ if (nbytes >= 2*DES_BLOCK_SIZE && locked)
+ n = __ctrblk_init(ctrblk, walk->iv, nbytes);
+ ctrptr = (n > DES_BLOCK_SIZE) ? ctrblk : walk->iv;
+ cpacf_kmctr(fc, ctx->key, walk->dst.virt.addr,
+ walk->src.virt.addr, n, ctrptr);
+ if (ctrptr == ctrblk)
+ memcpy(walk->iv, ctrptr + n - DES_BLOCK_SIZE,
+ DES_BLOCK_SIZE);
+ crypto_inc(walk->iv, DES_BLOCK_SIZE);
+ ret = blkcipher_walk_done(desc, walk, nbytes - n);
}
- if (ctrptr == ctrblk) {
- if (nbytes)
- memcpy(ctrbuf, ctrptr, DES_BLOCK_SIZE);
- else
- memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
+ if (locked)
spin_unlock(&ctrblk_lock);
- } else {
- if (!nbytes)
- memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
- }
/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
if (nbytes) {
- out = walk->dst.virt.addr;
- in = walk->src.virt.addr;
- ret = cpacf_kmctr(func, ctx->key, buf, in,
- DES_BLOCK_SIZE, ctrbuf);
- if (ret < 0 || ret != DES_BLOCK_SIZE)
- return -EIO;
- memcpy(out, buf, nbytes);
- crypto_inc(ctrbuf, DES_BLOCK_SIZE);
+ cpacf_kmctr(fc, ctx->key, buf, walk->src.virt.addr,
+ DES_BLOCK_SIZE, walk->iv);
+ memcpy(walk->dst.virt.addr, buf, nbytes);
+ crypto_inc(walk->iv, DES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0);
- memcpy(walk->iv, ctrbuf, DES_BLOCK_SIZE);
}
return ret;
}
@@ -452,22 +411,20 @@ static int ctr_des_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return ctr_desall_crypt(desc, CPACF_KMCTR_DEA_ENC, ctx, &walk);
+ return ctr_desall_crypt(desc, CPACF_KMCTR_DEA, &walk);
}
static int ctr_des_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return ctr_desall_crypt(desc, CPACF_KMCTR_DEA_DEC, ctx, &walk);
+ return ctr_desall_crypt(desc, CPACF_KMCTR_DEA | CPACF_DECRYPT, &walk);
}
static struct crypto_alg ctr_des_alg = {
@@ -495,22 +452,21 @@ static int ctr_des3_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return ctr_desall_crypt(desc, CPACF_KMCTR_TDEA_192_ENC, ctx, &walk);
+ return ctr_desall_crypt(desc, CPACF_KMCTR_TDEA_192, &walk);
}
static int ctr_des3_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return ctr_desall_crypt(desc, CPACF_KMCTR_TDEA_192_DEC, ctx, &walk);
+ return ctr_desall_crypt(desc, CPACF_KMCTR_TDEA_192 | CPACF_DECRYPT,
+ &walk);
}
static struct crypto_alg ctr_des3_alg = {
@@ -534,83 +490,87 @@ static struct crypto_alg ctr_des3_alg = {
}
};
+static struct crypto_alg *des_s390_algs_ptr[8];
+static int des_s390_algs_num;
+
+static int des_s390_register_alg(struct crypto_alg *alg)
+{
+ int ret;
+
+ ret = crypto_register_alg(alg);
+ if (!ret)
+ des_s390_algs_ptr[des_s390_algs_num++] = alg;
+ return ret;
+}
+
+static void des_s390_exit(void)
+{
+ while (des_s390_algs_num--)
+ crypto_unregister_alg(des_s390_algs_ptr[des_s390_algs_num]);
+ if (ctrblk)
+ free_page((unsigned long) ctrblk);
+}
+
static int __init des_s390_init(void)
{
int ret;
- if (!cpacf_query(CPACF_KM, CPACF_KM_DEA_ENC) ||
- !cpacf_query(CPACF_KM, CPACF_KM_TDEA_192_ENC))
- return -EOPNOTSUPP;
-
- ret = crypto_register_alg(&des_alg);
- if (ret)
- goto des_err;
- ret = crypto_register_alg(&ecb_des_alg);
- if (ret)
- goto ecb_des_err;
- ret = crypto_register_alg(&cbc_des_alg);
- if (ret)
- goto cbc_des_err;
- ret = crypto_register_alg(&des3_alg);
- if (ret)
- goto des3_err;
- ret = crypto_register_alg(&ecb_des3_alg);
- if (ret)
- goto ecb_des3_err;
- ret = crypto_register_alg(&cbc_des3_alg);
- if (ret)
- goto cbc_des3_err;
-
- if (cpacf_query(CPACF_KMCTR, CPACF_KMCTR_DEA_ENC) &&
- cpacf_query(CPACF_KMCTR, CPACF_KMCTR_TDEA_192_ENC)) {
- ret = crypto_register_alg(&ctr_des_alg);
+ /* Query available functions for KM, KMC and KMCTR */
+ cpacf_query(CPACF_KM, &km_functions);
+ cpacf_query(CPACF_KMC, &kmc_functions);
+ cpacf_query(CPACF_KMCTR, &kmctr_functions);
+
+ if (cpacf_test_func(&km_functions, CPACF_KM_DEA)) {
+ ret = des_s390_register_alg(&des_alg);
+ if (ret)
+ goto out_err;
+ ret = des_s390_register_alg(&ecb_des_alg);
if (ret)
- goto ctr_des_err;
- ret = crypto_register_alg(&ctr_des3_alg);
+ goto out_err;
+ }
+ if (cpacf_test_func(&kmc_functions, CPACF_KMC_DEA)) {
+ ret = des_s390_register_alg(&cbc_des_alg);
if (ret)
- goto ctr_des3_err;
+ goto out_err;
+ }
+ if (cpacf_test_func(&km_functions, CPACF_KM_TDEA_192)) {
+ ret = des_s390_register_alg(&des3_alg);
+ if (ret)
+ goto out_err;
+ ret = des_s390_register_alg(&ecb_des3_alg);
+ if (ret)
+ goto out_err;
+ }
+ if (cpacf_test_func(&kmc_functions, CPACF_KMC_TDEA_192)) {
+ ret = des_s390_register_alg(&cbc_des3_alg);
+ if (ret)
+ goto out_err;
+ }
+
+ if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_DEA) ||
+ cpacf_test_func(&kmctr_functions, CPACF_KMCTR_TDEA_192)) {
ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
if (!ctrblk) {
ret = -ENOMEM;
- goto ctr_mem_err;
+ goto out_err;
}
}
-out:
- return ret;
-ctr_mem_err:
- crypto_unregister_alg(&ctr_des3_alg);
-ctr_des3_err:
- crypto_unregister_alg(&ctr_des_alg);
-ctr_des_err:
- crypto_unregister_alg(&cbc_des3_alg);
-cbc_des3_err:
- crypto_unregister_alg(&ecb_des3_alg);
-ecb_des3_err:
- crypto_unregister_alg(&des3_alg);
-des3_err:
- crypto_unregister_alg(&cbc_des_alg);
-cbc_des_err:
- crypto_unregister_alg(&ecb_des_alg);
-ecb_des_err:
- crypto_unregister_alg(&des_alg);
-des_err:
- goto out;
-}
-
-static void __exit des_s390_exit(void)
-{
- if (ctrblk) {
- crypto_unregister_alg(&ctr_des_alg);
- crypto_unregister_alg(&ctr_des3_alg);
- free_page((unsigned long) ctrblk);
+ if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_DEA)) {
+ ret = des_s390_register_alg(&ctr_des_alg);
+ if (ret)
+ goto out_err;
+ }
+ if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_TDEA_192)) {
+ ret = des_s390_register_alg(&ctr_des3_alg);
+ if (ret)
+ goto out_err;
}
- crypto_unregister_alg(&cbc_des3_alg);
- crypto_unregister_alg(&ecb_des3_alg);
- crypto_unregister_alg(&des3_alg);
- crypto_unregister_alg(&cbc_des_alg);
- crypto_unregister_alg(&ecb_des_alg);
- crypto_unregister_alg(&des_alg);
+
+ return 0;
+out_err:
+ des_s390_exit();
+ return ret;
}
module_cpu_feature_match(MSA, des_s390_init);
diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
index ab68de72e795..564616d48d8b 100644
--- a/arch/s390/crypto/ghash_s390.c
+++ b/arch/s390/crypto/ghash_s390.c
@@ -58,7 +58,6 @@ static int ghash_update(struct shash_desc *desc,
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
unsigned int n;
u8 *buf = dctx->buffer;
- int ret;
if (dctx->bytes) {
u8 *pos = buf + (GHASH_BLOCK_SIZE - dctx->bytes);
@@ -71,18 +70,14 @@ static int ghash_update(struct shash_desc *desc,
src += n;
if (!dctx->bytes) {
- ret = cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf,
- GHASH_BLOCK_SIZE);
- if (ret != GHASH_BLOCK_SIZE)
- return -EIO;
+ cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf,
+ GHASH_BLOCK_SIZE);
}
}
n = srclen & ~(GHASH_BLOCK_SIZE - 1);
if (n) {
- ret = cpacf_kimd(CPACF_KIMD_GHASH, dctx, src, n);
- if (ret != n)
- return -EIO;
+ cpacf_kimd(CPACF_KIMD_GHASH, dctx, src, n);
src += n;
srclen -= n;
}
@@ -98,17 +93,12 @@ static int ghash_update(struct shash_desc *desc,
static int ghash_flush(struct ghash_desc_ctx *dctx)
{
u8 *buf = dctx->buffer;
- int ret;
if (dctx->bytes) {
u8 *pos = buf + (GHASH_BLOCK_SIZE - dctx->bytes);
memset(pos, 0, dctx->bytes);
-
- ret = cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
- if (ret != GHASH_BLOCK_SIZE)
- return -EIO;
-
+ cpacf_kimd(CPACF_KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
dctx->bytes = 0;
}
@@ -146,7 +136,7 @@ static struct shash_alg ghash_alg = {
static int __init ghash_mod_init(void)
{
- if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_GHASH))
+ if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_GHASH))
return -EOPNOTSUPP;
return crypto_register_shash(&ghash_alg);
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index 41527b113f5a..9cc050f9536c 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -135,12 +135,7 @@ static int generate_entropy(u8 *ebuf, size_t nbytes)
else
h = ebuf;
/* generate sha256 from this page */
- if (cpacf_kimd(CPACF_KIMD_SHA_256, h,
- pg, PAGE_SIZE) != PAGE_SIZE) {
- prng_errorflag = PRNG_GEN_ENTROPY_FAILED;
- ret = -EIO;
- goto out;
- }
+ cpacf_kimd(CPACF_KIMD_SHA_256, h, pg, PAGE_SIZE);
if (n < sizeof(hash))
memcpy(ebuf, hash, n);
ret += n;
@@ -148,7 +143,6 @@ static int generate_entropy(u8 *ebuf, size_t nbytes)
nbytes -= n;
}
-out:
free_page((unsigned long)pg);
return ret;
}
@@ -160,13 +154,11 @@ static void prng_tdes_add_entropy(void)
{
__u64 entropy[4];
unsigned int i;
- int ret;
for (i = 0; i < 16; i++) {
- ret = cpacf_kmc(CPACF_KMC_PRNG, prng_data->prngws.parm_block,
- (char *)entropy, (char *)entropy,
- sizeof(entropy));
- BUG_ON(ret < 0 || ret != sizeof(entropy));
+ cpacf_kmc(CPACF_KMC_PRNG, prng_data->prngws.parm_block,
+ (char *) entropy, (char *) entropy,
+ sizeof(entropy));
memcpy(prng_data->prngws.parm_block, entropy, sizeof(entropy));
}
}
@@ -303,21 +295,14 @@ static int __init prng_sha512_selftest(void)
0x21, 0xe4, 0xb0, 0x86, 0x44, 0xf6, 0x72, 0x7c,
0x36, 0x8c, 0x5a, 0x9f, 0x7a, 0x4b, 0x3e, 0xe2 };
- int ret = 0;
u8 buf[sizeof(random)];
struct ppno_ws_s ws;
memset(&ws, 0, sizeof(ws));
/* initial seed */
- ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED, &ws, NULL, 0,
- seed, sizeof(seed));
- if (ret < 0) {
- pr_err("The prng self test seed operation for the "
- "SHA-512 mode failed with rc=%d\n", ret);
- prng_errorflag = PRNG_SELFTEST_FAILED;
- return -EIO;
- }
+ cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED,
+ &ws, NULL, 0, seed, sizeof(seed));
/* check working states V and C */
if (memcmp(ws.V, V0, sizeof(V0)) != 0
@@ -329,22 +314,10 @@ static int __init prng_sha512_selftest(void)
}
/* generate random bytes */
- ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
- &ws, buf, sizeof(buf), NULL, 0);
- if (ret < 0) {
- pr_err("The prng self test generate operation for "
- "the SHA-512 mode failed with rc=%d\n", ret);
- prng_errorflag = PRNG_SELFTEST_FAILED;
- return -EIO;
- }
- ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
- &ws, buf, sizeof(buf), NULL, 0);
- if (ret < 0) {
- pr_err("The prng self test generate operation for "
- "the SHA-512 mode failed with rc=%d\n", ret);
- prng_errorflag = PRNG_SELFTEST_FAILED;
- return -EIO;
- }
+ cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
+ &ws, buf, sizeof(buf), NULL, 0);
+ cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
+ &ws, buf, sizeof(buf), NULL, 0);
/* check against expected data */
if (memcmp(buf, random, sizeof(random)) != 0) {
@@ -392,26 +365,16 @@ static int __init prng_sha512_instantiate(void)
get_tod_clock_ext(seed + 48);
/* initial seed of the ppno drng */
- ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED,
- &prng_data->ppnows, NULL, 0, seed, sizeof(seed));
- if (ret < 0) {
- prng_errorflag = PRNG_SEED_FAILED;
- ret = -EIO;
- goto outfree;
- }
+ cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED,
+ &prng_data->ppnows, NULL, 0, seed, sizeof(seed));
/* if fips mode is enabled, generate a first block of random
bytes for the FIPS 140-2 Conditional Self Test */
if (fips_enabled) {
prng_data->prev = prng_data->buf + prng_chunk_size;
- ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
- &prng_data->ppnows,
- prng_data->prev, prng_chunk_size, NULL, 0);
- if (ret < 0 || ret != prng_chunk_size) {
- prng_errorflag = PRNG_GEN_FAILED;
- ret = -EIO;
- goto outfree;
- }
+ cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
+ &prng_data->ppnows,
+ prng_data->prev, prng_chunk_size, NULL, 0);
}
return 0;
@@ -440,12 +403,8 @@ static int prng_sha512_reseed(void)
return ret;
/* do a reseed of the ppno drng with this bytestring */
- ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED,
- &prng_data->ppnows, NULL, 0, seed, sizeof(seed));
- if (ret) {
- prng_errorflag = PRNG_RESEED_FAILED;
- return -EIO;
- }
+ cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED,
+ &prng_data->ppnows, NULL, 0, seed, sizeof(seed));
return 0;
}
@@ -463,12 +422,8 @@ static int prng_sha512_generate(u8 *buf, size_t nbytes)
}
/* PPNO generate */
- ret = cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
- &prng_data->ppnows, buf, nbytes, NULL, 0);
- if (ret < 0 || ret != nbytes) {
- prng_errorflag = PRNG_GEN_FAILED;
- return -EIO;
- }
+ cpacf_ppno(CPACF_PPNO_SHA512_DRNG_GEN,
+ &prng_data->ppnows, buf, nbytes, NULL, 0);
/* FIPS 140-2 Conditional Self Test */
if (fips_enabled) {
@@ -479,7 +434,7 @@ static int prng_sha512_generate(u8 *buf, size_t nbytes)
memcpy(prng_data->prev, buf, nbytes);
}
- return ret;
+ return nbytes;
}
@@ -494,7 +449,7 @@ static int prng_open(struct inode *inode, struct file *file)
static ssize_t prng_tdes_read(struct file *file, char __user *ubuf,
size_t nbytes, loff_t *ppos)
{
- int chunk, n, tmp, ret = 0;
+ int chunk, n, ret = 0;
/* lock prng_data struct */
if (mutex_lock_interruptible(&prng_data->mutex))
@@ -545,13 +500,9 @@ static ssize_t prng_tdes_read(struct file *file, char __user *ubuf,
*
* Note: you can still get strict X9.17 conformity by setting
* prng_chunk_size to 8 bytes.
- */
- tmp = cpacf_kmc(CPACF_KMC_PRNG, prng_data->prngws.parm_block,
- prng_data->buf, prng_data->buf, n);
- if (tmp < 0 || tmp != n) {
- ret = -EIO;
- break;
- }
+ */
+ cpacf_kmc(CPACF_KMC_PRNG, prng_data->prngws.parm_block,
+ prng_data->buf, prng_data->buf, n);
prng_data->prngws.byte_counter += n;
prng_data->prngws.reseed_counter += n;
@@ -806,13 +757,13 @@ static int __init prng_init(void)
int ret;
/* check if the CPU has a PRNG */
- if (!cpacf_query(CPACF_KMC, CPACF_KMC_PRNG))
+ if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG))
return -EOPNOTSUPP;
/* choose prng mode */
if (prng_mode != PRNG_MODE_TDES) {
/* check for MSA5 support for PPNO operations */
- if (!cpacf_query(CPACF_PPNO, CPACF_PPNO_SHA512_DRNG_GEN)) {
+ if (!cpacf_query_func(CPACF_PPNO, CPACF_PPNO_SHA512_DRNG_GEN)) {
if (prng_mode == PRNG_MODE_SHA512) {
pr_err("The prng module cannot "
"start in SHA-512 mode\n");
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index 5fbf91bbb478..c7de53d8da75 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -91,7 +91,7 @@ static struct shash_alg alg = {
static int __init sha1_s390_init(void)
{
- if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_SHA_1))
+ if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_SHA_1))
return -EOPNOTSUPP;
return crypto_register_shash(&alg);
}
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 10aac0b11988..53c277999a28 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -123,7 +123,7 @@ static int __init sha256_s390_init(void)
{
int ret;
- if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_SHA_256))
+ if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_SHA_256))
return -EOPNOTSUPP;
ret = crypto_register_shash(&sha256_alg);
if (ret < 0)
diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c
index ea85757be407..2f4caa1ef123 100644
--- a/arch/s390/crypto/sha512_s390.c
+++ b/arch/s390/crypto/sha512_s390.c
@@ -133,7 +133,7 @@ static int __init init(void)
{
int ret;
- if (!cpacf_query(CPACF_KIMD, CPACF_KIMD_SHA_512))
+ if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_SHA_512))
return -EOPNOTSUPP;
if ((ret = crypto_register_shash(&sha512_alg)) < 0)
goto out;
diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c
index 8e908166c3ee..c740f77285b2 100644
--- a/arch/s390/crypto/sha_common.c
+++ b/arch/s390/crypto/sha_common.c
@@ -22,8 +22,7 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
{
struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
unsigned int bsize = crypto_shash_blocksize(desc->tfm);
- unsigned int index;
- int ret;
+ unsigned int index, n;
/* how much is already in the buffer? */
index = ctx->count & (bsize - 1);
@@ -35,9 +34,7 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
/* process one stored block */
if (index) {
memcpy(ctx->buf + index, data, bsize - index);
- ret = cpacf_kimd(ctx->func, ctx->state, ctx->buf, bsize);
- if (ret != bsize)
- return -EIO;
+ cpacf_kimd(ctx->func, ctx->state, ctx->buf, bsize);
data += bsize - index;
len -= bsize - index;
index = 0;
@@ -45,12 +42,10 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
/* process as many blocks as possible */
if (len >= bsize) {
- ret = cpacf_kimd(ctx->func, ctx->state, data,
- len & ~(bsize - 1));
- if (ret != (len & ~(bsize - 1)))
- return -EIO;
- data += ret;
- len -= ret;
+ n = len & ~(bsize - 1);
+ cpacf_kimd(ctx->func, ctx->state, data, n);
+ data += n;
+ len -= n;
}
store:
if (len)
@@ -66,7 +61,6 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
unsigned int bsize = crypto_shash_blocksize(desc->tfm);
u64 bits;
unsigned int index, end, plen;
- int ret;
/* SHA-512 uses 128 bit padding length */
plen = (bsize > SHA256_BLOCK_SIZE) ? 16 : 8;
@@ -88,10 +82,7 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
*/
bits = ctx->count * 8;
memcpy(ctx->buf + end - 8, &bits, sizeof(bits));
-
- ret = cpacf_kimd(ctx->func, ctx->state, ctx->buf, end);
- if (ret != end)
- return -EIO;
+ cpacf_kimd(ctx->func, ctx->state, ctx->buf, end);
/* copy digest to out */
memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm));
diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
index d28621de8e0b..2c680db7e5c1 100644
--- a/arch/s390/include/asm/cpacf.h
+++ b/arch/s390/include/asm/cpacf.h
@@ -28,67 +28,51 @@
#define CPACF_PPNO 0xb93c /* MSA5 */
/*
- * Function codes for the KM (CIPHER MESSAGE)
- * instruction (0x80 is the decipher modifier bit)
+ * Decryption modifier bit
+ */
+#define CPACF_DECRYPT 0x80
+
+/*
+ * Function codes for the KM (CIPHER MESSAGE) instruction
*/
#define CPACF_KM_QUERY 0x00
-#define CPACF_KM_DEA_ENC 0x01
-#define CPACF_KM_DEA_DEC 0x81
-#define CPACF_KM_TDEA_128_ENC 0x02
-#define CPACF_KM_TDEA_128_DEC 0x82
-#define CPACF_KM_TDEA_192_ENC 0x03
-#define CPACF_KM_TDEA_192_DEC 0x83
-#define CPACF_KM_AES_128_ENC 0x12
-#define CPACF_KM_AES_128_DEC 0x92
-#define CPACF_KM_AES_192_ENC 0x13
-#define CPACF_KM_AES_192_DEC 0x93
-#define CPACF_KM_AES_256_ENC 0x14
-#define CPACF_KM_AES_256_DEC 0x94
-#define CPACF_KM_XTS_128_ENC 0x32
-#define CPACF_KM_XTS_128_DEC 0xb2
-#define CPACF_KM_XTS_256_ENC 0x34
-#define CPACF_KM_XTS_256_DEC 0xb4
+#define CPACF_KM_DEA 0x01
+#define CPACF_KM_TDEA_128 0x02
+#define CPACF_KM_TDEA_192 0x03
+#define CPACF_KM_AES_128 0x12
+#define CPACF_KM_AES_192 0x13
+#define CPACF_KM_AES_256 0x14
+#define CPACF_KM_XTS_128 0x32
+#define CPACF_KM_XTS_256 0x34
/*
* Function codes for the KMC (CIPHER MESSAGE WITH CHAINING)
- * instruction (0x80 is the decipher modifier bit)
+ * instruction
*/
#define CPACF_KMC_QUERY 0x00
-#define CPACF_KMC_DEA_ENC 0x01
-#define CPACF_KMC_DEA_DEC 0x81
-#define CPACF_KMC_TDEA_128_ENC 0x02
-#define CPACF_KMC_TDEA_128_DEC 0x82
-#define CPACF_KMC_TDEA_192_ENC 0x03
-#define CPACF_KMC_TDEA_192_DEC 0x83
-#define CPACF_KMC_AES_128_ENC 0x12
-#define CPACF_KMC_AES_128_DEC 0x92
-#define CPACF_KMC_AES_192_ENC 0x13
-#define CPACF_KMC_AES_192_DEC 0x93
-#define CPACF_KMC_AES_256_ENC 0x14
-#define CPACF_KMC_AES_256_DEC 0x94
+#define CPACF_KMC_DEA 0x01
+#define CPACF_KMC_TDEA_128 0x02
+#define CPACF_KMC_TDEA_192 0x03
+#define CPACF_KMC_AES_128 0x12
+#define CPACF_KMC_AES_192 0x13
+#define CPACF_KMC_AES_256 0x14
#define CPACF_KMC_PRNG 0x43
/*
* Function codes for the KMCTR (CIPHER MESSAGE WITH COUNTER)
- * instruction (0x80 is the decipher modifier bit)
+ * instruction
*/
-#define CPACF_KMCTR_QUERY 0x00
-#define CPACF_KMCTR_DEA_ENC 0x01
-#define CPACF_KMCTR_DEA_DEC 0x81
-#define CPACF_KMCTR_TDEA_128_ENC 0x02
-#define CPACF_KMCTR_TDEA_128_DEC 0x82
-#define CPACF_KMCTR_TDEA_192_ENC 0x03
-#define CPACF_KMCTR_TDEA_192_DEC 0x83
-#define CPACF_KMCTR_AES_128_ENC 0x12
-#define CPACF_KMCTR_AES_128_DEC 0x92
-#define CPACF_KMCTR_AES_192_ENC 0x13
-#define CPACF_KMCTR_AES_192_DEC 0x93
-#define CPACF_KMCTR_AES_256_ENC 0x14
-#define CPACF_KMCTR_AES_256_DEC 0x94
+#define CPACF_KMCTR_QUERY 0x00
+#define CPACF_KMCTR_DEA 0x01
+#define CPACF_KMCTR_TDEA_128 0x02
+#define CPACF_KMCTR_TDEA_192 0x03
+#define CPACF_KMCTR_AES_128 0x12
+#define CPACF_KMCTR_AES_192 0x13
+#define CPACF_KMCTR_AES_256 0x14
/*
* Function codes for the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST)
- * instruction (0x80 is the decipher modifier bit)
+ * instruction
*/
#define CPACF_KIMD_QUERY 0x00
#define CPACF_KIMD_SHA_1 0x01
@@ -98,7 +82,7 @@
/*
* Function codes for the KLMD (COMPUTE LAST MESSAGE DIGEST)
- * instruction (0x80 is the decipher modifier bit)
+ * instruction
*/
#define CPACF_KLMD_QUERY 0x00
#define CPACF_KLMD_SHA_1 0x01
@@ -107,7 +91,7 @@
/*
* function codes for the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
- * instruction (0x80 is the decipher modifier bit)
+ * instruction
*/
#define CPACF_KMAC_QUERY 0x00
#define CPACF_KMAC_DEA 0x01
@@ -116,12 +100,14 @@
/*
* Function codes for the PPNO (PERFORM PSEUDORANDOM NUMBER OPERATION)
- * instruction (0x80 is the decipher modifier bit)
+ * instruction
*/
#define CPACF_PPNO_QUERY 0x00
#define CPACF_PPNO_SHA512_DRNG_GEN 0x03
#define CPACF_PPNO_SHA512_DRNG_SEED 0x83
+typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
+
/**
* cpacf_query() - check if a specific CPACF function is available
* @opcode: the opcode of the crypto instruction
@@ -132,55 +118,66 @@
*
* Returns 1 if @func is available for @opcode, 0 otherwise
*/
-static inline void __cpacf_query(unsigned int opcode, unsigned char *status)
+static inline void __cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
{
- typedef struct { unsigned char _[16]; } status_type;
register unsigned long r0 asm("0") = 0; /* query function */
- register unsigned long r1 asm("1") = (unsigned long) status;
+ register unsigned long r1 asm("1") = (unsigned long) mask;
asm volatile(
" spm 0\n" /* pckmo doesn't change the cc */
/* Parameter registers are ignored, but may not be 0 */
"0: .insn rrf,%[opc] << 16,2,2,2,0\n"
" brc 1,0b\n" /* handle partial completion */
- : "=m" (*(status_type *) status)
+ : "=m" (*mask)
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (opcode)
: "cc");
}
-static inline int cpacf_query(unsigned int opcode, unsigned int func)
+static inline int __cpacf_check_opcode(unsigned int opcode)
{
- unsigned char status[16];
-
switch (opcode) {
case CPACF_KMAC:
case CPACF_KM:
case CPACF_KMC:
case CPACF_KIMD:
case CPACF_KLMD:
- if (!test_facility(17)) /* check for MSA */
- return 0;
- break;
+ return test_facility(17); /* check for MSA */
case CPACF_PCKMO:
- if (!test_facility(76)) /* check for MSA3 */
- return 0;
- break;
+ return test_facility(76); /* check for MSA3 */
case CPACF_KMF:
case CPACF_KMO:
case CPACF_PCC:
case CPACF_KMCTR:
- if (!test_facility(77)) /* check for MSA4 */
- return 0;
- break;
+ return test_facility(77); /* check for MSA4 */
case CPACF_PPNO:
- if (!test_facility(57)) /* check for MSA5 */
- return 0;
- break;
+ return test_facility(57); /* check for MSA5 */
default:
BUG();
}
- __cpacf_query(opcode, status);
- return (status[func >> 3] & (0x80 >> (func & 7))) != 0;
+}
+
+static inline int cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
+{
+ if (__cpacf_check_opcode(opcode)) {
+ __cpacf_query(opcode, mask);
+ return 1;
+ }
+ memset(mask, 0, sizeof(*mask));
+ return 0;
+}
+
+static inline int cpacf_test_func(cpacf_mask_t *mask, unsigned int func)
+{
+ return (mask->bytes[func >> 3] & (0x80 >> (func & 7))) != 0;
+}
+
+static inline int cpacf_query_func(unsigned int opcode, unsigned int func)
+{
+ cpacf_mask_t mask;
+
+ if (cpacf_query(opcode, &mask))
+ return cpacf_test_func(&mask, func);
+ return 0;
}
/**
@@ -194,7 +191,7 @@ static inline int cpacf_query(unsigned int opcode, unsigned int func)
* Returns 0 for the query func, number of processed bytes for
* encryption/decryption funcs
*/
-static inline int cpacf_km(long func, void *param,
+static inline int cpacf_km(unsigned long func, void *param,
u8 *dest, const u8 *src, long src_len)
{
register unsigned long r0 asm("0") = (unsigned long) func;
@@ -224,7 +221,7 @@ static inline int cpacf_km(long func, void *param,
* Returns 0 for the query func, number of processed bytes for
* encryption/decryption funcs
*/
-static inline int cpacf_kmc(long func, void *param,
+static inline int cpacf_kmc(unsigned long func, void *param,
u8 *dest, const u8 *src, long src_len)
{
register unsigned long r0 asm("0") = (unsigned long) func;
@@ -250,11 +247,9 @@ static inline int cpacf_kmc(long func, void *param,
* @param: address of parameter block; see POP for details on each func
* @src: address of source memory area
* @src_len: length of src operand in bytes
- *
- * Returns 0 for the query func, number of processed bytes for digest funcs
*/
-static inline int cpacf_kimd(long func, void *param,
- const u8 *src, long src_len)
+static inline void cpacf_kimd(unsigned long func, void *param,
+ const u8 *src, long src_len)
{
register unsigned long r0 asm("0") = (unsigned long) func;
register unsigned long r1 asm("1") = (unsigned long) param;
@@ -267,8 +262,6 @@ static inline int cpacf_kimd(long func, void *param,
: [src] "+a" (r2), [len] "+d" (r3)
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KIMD)
: "cc", "memory");
-
- return src_len - r3;
}
/**
@@ -277,11 +270,9 @@ static inline int cpacf_kimd(long func, void *param,
* @param: address of parameter block; see POP for details on each func
* @src: address of source memory area
* @src_len: length of src operand in bytes
- *
- * Returns 0 for the query func, number of processed bytes for digest funcs
*/
-static inline int cpacf_klmd(long func, void *param,
- const u8 *src, long src_len)
+static inline void cpacf_klmd(unsigned long func, void *param,
+ const u8 *src, long src_len)
{
register unsigned long r0 asm("0") = (unsigned long) func;
register unsigned long r1 asm("1") = (unsigned long) param;
@@ -294,8 +285,6 @@ static inline int cpacf_klmd(long func, void *param,
: [src] "+a" (r2), [len] "+d" (r3)
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KLMD)
: "cc", "memory");
-
- return src_len - r3;
}
/**
@@ -308,7 +297,7 @@ static inline int cpacf_klmd(long func, void *param,
*
* Returns 0 for the query func, number of processed bytes for digest funcs
*/
-static inline int cpacf_kmac(long func, void *param,
+static inline int cpacf_kmac(unsigned long func, void *param,
const u8 *src, long src_len)
{
register unsigned long r0 asm("0") = (unsigned long) func;
@@ -338,7 +327,7 @@ static inline int cpacf_kmac(long func, void *param,
* Returns 0 for the query func, number of processed bytes for
* encryption/decryption funcs
*/
-static inline int cpacf_kmctr(long func, void *param, u8 *dest,
+static inline int cpacf_kmctr(unsigned long func, void *param, u8 *dest,
const u8 *src, long src_len, u8 *counter)
{
register unsigned long r0 asm("0") = (unsigned long) func;
@@ -368,13 +357,10 @@ static inline int cpacf_kmctr(long func, void *param, u8 *dest,
* @dest_len: size of destination memory area in bytes
* @seed: address of seed data
* @seed_len: size of seed data in bytes
- *
- * Returns 0 for the query func, number of random bytes stored in
- * dest buffer for generate function
*/
-static inline int cpacf_ppno(long func, void *param,
- u8 *dest, long dest_len,
- const u8 *seed, long seed_len)
+static inline void cpacf_ppno(unsigned long func, void *param,
+ u8 *dest, long dest_len,
+ const u8 *seed, long seed_len)
{
register unsigned long r0 asm("0") = (unsigned long) func;
register unsigned long r1 asm("1") = (unsigned long) param;
@@ -390,8 +376,6 @@ static inline int cpacf_ppno(long func, void *param,
: [fc] "d" (r0), [pba] "a" (r1),
[seed] "a" (r4), [slen] "d" (r5), [opc] "i" (CPACF_PPNO)
: "cc", "memory");
-
- return dest_len - r3;
}
/**
@@ -399,10 +383,8 @@ static inline int cpacf_ppno(long func, void *param,
* instruction
* @func: the function code passed to PCC; see CPACF_KM_xxx defines
* @param: address of parameter block; see POP for details on each func
- *
- * Returns 0.
*/
-static inline int cpacf_pcc(long func, void *param)
+static inline void cpacf_pcc(unsigned long func, void *param)
{
register unsigned long r0 asm("0") = (unsigned long) func;
register unsigned long r1 asm("1") = (unsigned long) param;
@@ -413,8 +395,6 @@ static inline int cpacf_pcc(long func, void *param)
:
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_PCC)
: "cc", "memory");
-
- return 0;
}
#endif /* _ASM_S390_CPACF_H */
diff --git a/arch/s390/include/asm/facilities_src.h b/arch/s390/include/asm/facilities_src.h
index 4917728e5828..3b758f66e48b 100644
--- a/arch/s390/include/asm/facilities_src.h
+++ b/arch/s390/include/asm/facilities_src.h
@@ -55,4 +55,28 @@ static struct facility_def facility_defs[] = {
-1 /* END */
}
},
+ {
+ .name = "FACILITIES_KVM",
+ .bits = (int[]){
+ 0, /* N3 instructions */
+ 1, /* z/Arch mode installed */
+ 2, /* z/Arch mode active */
+ 3, /* DAT-enhancement */
+ 4, /* idte segment table */
+ 5, /* idte region table */
+ 6, /* ASN-and-LX reuse */
+ 7, /* stfle */
+ 8, /* enhanced-DAT 1 */
+ 9, /* sense-running-status */
+ 10, /* conditional sske */
+ 13, /* ipte-range */
+ 14, /* nonquiescing key-setting */
+ 73, /* transactional execution */
+ 75, /* access-exception-fetch/store indication */
+ 76, /* msa extension 3 */
+ 77, /* msa extension 4 */
+ 78, /* enhanced-DAT 2 */
+ -1 /* END */
+ }
+ },
};
diff --git a/arch/s390/include/asm/fpu/api.h b/arch/s390/include/asm/fpu/api.h
index 6aba6fc406ad..02124d66bfb5 100644
--- a/arch/s390/include/asm/fpu/api.h
+++ b/arch/s390/include/asm/fpu/api.h
@@ -64,18 +64,18 @@ static inline int test_fp_ctl(u32 fpc)
return rc;
}
-#define KERNEL_VXR_V0V7 1
-#define KERNEL_VXR_V8V15 2
-#define KERNEL_VXR_V16V23 4
-#define KERNEL_VXR_V24V31 8
-#define KERNEL_FPR 16
-#define KERNEL_FPC 256
+#define KERNEL_FPC 1
+#define KERNEL_VXR_V0V7 2
+#define KERNEL_VXR_V8V15 4
+#define KERNEL_VXR_V16V23 8
+#define KERNEL_VXR_V24V31 16
#define KERNEL_VXR_LOW (KERNEL_VXR_V0V7|KERNEL_VXR_V8V15)
#define KERNEL_VXR_MID (KERNEL_VXR_V8V15|KERNEL_VXR_V16V23)
#define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23|KERNEL_VXR_V24V31)
-#define KERNEL_FPU_MASK (KERNEL_VXR_LOW|KERNEL_VXR_HIGH|KERNEL_FPR)
+#define KERNEL_VXR (KERNEL_VXR_LOW|KERNEL_VXR_HIGH)
+#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_V0V7)
struct kernel_fpu;
@@ -87,18 +87,28 @@ struct kernel_fpu;
* Prefer using the kernel_fpu_begin()/kernel_fpu_end() pair of functions.
*/
void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags);
-void __kernel_fpu_end(struct kernel_fpu *state);
+void __kernel_fpu_end(struct kernel_fpu *state, u32 flags);
static inline void kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
{
preempt_disable();
- __kernel_fpu_begin(state, flags);
+ state->mask = S390_lowcore.fpu_flags;
+ if (!test_cpu_flag(CIF_FPU))
+ /* Save user space FPU state and register contents */
+ save_fpu_regs();
+ else if (state->mask & flags)
+ /* Save FPU/vector register in-use by the kernel */
+ __kernel_fpu_begin(state, flags);
+ S390_lowcore.fpu_flags |= flags;
}
-static inline void kernel_fpu_end(struct kernel_fpu *state)
+static inline void kernel_fpu_end(struct kernel_fpu *state, u32 flags)
{
- __kernel_fpu_end(state);
+ S390_lowcore.fpu_flags = state->mask;
+ if (state->mask & flags)
+ /* Restore FPU/vector register in-use by the kernel */
+ __kernel_fpu_end(state, flags);
preempt_enable();
}
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 8e5daf7a76ce..a41faf34b034 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -28,7 +28,7 @@
#define KVM_S390_BSCA_CPU_SLOTS 64
#define KVM_S390_ESCA_CPU_SLOTS 248
-#define KVM_MAX_VCPUS KVM_S390_ESCA_CPU_SLOTS
+#define KVM_MAX_VCPUS 255
#define KVM_USER_MEM_SLOTS 32
/*
@@ -245,72 +245,72 @@ struct sie_page {
} __packed;
struct kvm_vcpu_stat {
- u32 exit_userspace;
- u32 exit_null;
- u32 exit_external_request;
- u32 exit_external_interrupt;
- u32 exit_stop_request;
- u32 exit_validity;
- u32 exit_instruction;
- u32 exit_pei;
- u32 halt_successful_poll;
- u32 halt_attempted_poll;
- u32 halt_poll_invalid;
- u32 halt_wakeup;
- u32 instruction_lctl;
- u32 instruction_lctlg;
- u32 instruction_stctl;
- u32 instruction_stctg;
- u32 exit_program_interruption;
- u32 exit_instr_and_program;
- u32 exit_operation_exception;
- u32 deliver_external_call;
- u32 deliver_emergency_signal;
- u32 deliver_service_signal;
- u32 deliver_virtio_interrupt;
- u32 deliver_stop_signal;
- u32 deliver_prefix_signal;
- u32 deliver_restart_signal;
- u32 deliver_program_int;
- u32 deliver_io_int;
- u32 exit_wait_state;
- u32 instruction_pfmf;
- u32 instruction_stidp;
- u32 instruction_spx;
- u32 instruction_stpx;
- u32 instruction_stap;
- u32 instruction_storage_key;
- u32 instruction_ipte_interlock;
- u32 instruction_stsch;
- u32 instruction_chsc;
- u32 instruction_stsi;
- u32 instruction_stfl;
- u32 instruction_tprot;
- u32 instruction_sie;
- u32 instruction_essa;
- u32 instruction_sthyi;
- u32 instruction_sigp_sense;
- u32 instruction_sigp_sense_running;
- u32 instruction_sigp_external_call;
- u32 instruction_sigp_emergency;
- u32 instruction_sigp_cond_emergency;
- u32 instruction_sigp_start;
- u32 instruction_sigp_stop;
- u32 instruction_sigp_stop_store_status;
- u32 instruction_sigp_store_status;
- u32 instruction_sigp_store_adtl_status;
- u32 instruction_sigp_arch;
- u32 instruction_sigp_prefix;
- u32 instruction_sigp_restart;
- u32 instruction_sigp_init_cpu_reset;
- u32 instruction_sigp_cpu_reset;
- u32 instruction_sigp_unknown;
- u32 diagnose_10;
- u32 diagnose_44;
- u32 diagnose_9c;
- u32 diagnose_258;
- u32 diagnose_308;
- u32 diagnose_500;
+ u64 exit_userspace;
+ u64 exit_null;
+ u64 exit_external_request;
+ u64 exit_external_interrupt;
+ u64 exit_stop_request;
+ u64 exit_validity;
+ u64 exit_instruction;
+ u64 exit_pei;
+ u64 halt_successful_poll;
+ u64 halt_attempted_poll;
+ u64 halt_poll_invalid;
+ u64 halt_wakeup;
+ u64 instruction_lctl;
+ u64 instruction_lctlg;
+ u64 instruction_stctl;
+ u64 instruction_stctg;
+ u64 exit_program_interruption;
+ u64 exit_instr_and_program;
+ u64 exit_operation_exception;
+ u64 deliver_external_call;
+ u64 deliver_emergency_signal;
+ u64 deliver_service_signal;
+ u64 deliver_virtio_interrupt;
+ u64 deliver_stop_signal;
+ u64 deliver_prefix_signal;
+ u64 deliver_restart_signal;
+ u64 deliver_program_int;
+ u64 deliver_io_int;
+ u64 exit_wait_state;
+ u64 instruction_pfmf;
+ u64 instruction_stidp;
+ u64 instruction_spx;
+ u64 instruction_stpx;
+ u64 instruction_stap;
+ u64 instruction_storage_key;
+ u64 instruction_ipte_interlock;
+ u64 instruction_stsch;
+ u64 instruction_chsc;
+ u64 instruction_stsi;
+ u64 instruction_stfl;
+ u64 instruction_tprot;
+ u64 instruction_sie;
+ u64 instruction_essa;
+ u64 instruction_sthyi;
+ u64 instruction_sigp_sense;
+ u64 instruction_sigp_sense_running;
+ u64 instruction_sigp_external_call;
+ u64 instruction_sigp_emergency;
+ u64 instruction_sigp_cond_emergency;
+ u64 instruction_sigp_start;
+ u64 instruction_sigp_stop;
+ u64 instruction_sigp_stop_store_status;
+ u64 instruction_sigp_store_status;
+ u64 instruction_sigp_store_adtl_status;
+ u64 instruction_sigp_arch;
+ u64 instruction_sigp_prefix;
+ u64 instruction_sigp_restart;
+ u64 instruction_sigp_init_cpu_reset;
+ u64 instruction_sigp_cpu_reset;
+ u64 instruction_sigp_unknown;
+ u64 diagnose_10;
+ u64 diagnose_44;
+ u64 diagnose_9c;
+ u64 diagnose_258;
+ u64 diagnose_308;
+ u64 diagnose_500;
};
#define PGM_OPERATION 0x01
@@ -577,7 +577,7 @@ struct kvm_vcpu_arch {
};
struct kvm_vm_stat {
- u32 remote_tlb_flush;
+ ulong remote_tlb_flush;
};
struct kvm_arch_memory_slot {
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index d79ba7cf75b0..7b93b78f423c 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -129,7 +129,8 @@ struct lowcore {
__u8 pad_0x0390[0x0398-0x0390]; /* 0x0390 */
__u64 gmap; /* 0x0398 */
__u32 spinlock_lockval; /* 0x03a0 */
- __u8 pad_0x03a0[0x0400-0x03a4]; /* 0x03a4 */
+ __u32 fpu_flags; /* 0x03a4 */
+ __u8 pad_0x03a8[0x0400-0x03a8]; /* 0x03a8 */
/* Per cpu primary space access list */
__u32 paste[16]; /* 0x0400 */
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 6d39329c894b..bea785d7f853 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -12,6 +12,7 @@ typedef struct {
struct list_head pgtable_list;
spinlock_t gmap_lock;
struct list_head gmap_list;
+ unsigned long gmap_asce;
unsigned long asce;
unsigned long asce_limit;
unsigned long vdso_base;
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index c6a088c91aee..515fea5a3fc4 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -21,6 +21,7 @@ static inline int init_new_context(struct task_struct *tsk,
INIT_LIST_HEAD(&mm->context.gmap_list);
cpumask_clear(&mm->context.cpu_attach_mask);
atomic_set(&mm->context.flush_count, 0);
+ mm->context.gmap_asce = 0;
mm->context.flush_mm = 0;
#ifdef CONFIG_PGSTE
mm->context.alloc_pgste = page_table_allocate_pgste;
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 0da91c4d30fd..6611f798d2be 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -11,6 +11,7 @@
#include <asm-generic/pci.h>
#include <asm/pci_clp.h>
#include <asm/pci_debug.h>
+#include <asm/sclp.h>
#define PCIBIOS_MIN_IO 0x1000
#define PCIBIOS_MIN_MEM 0x10000000
@@ -117,6 +118,7 @@ struct zpci_dev {
spinlock_t iommu_bitmap_lock;
unsigned long *iommu_bitmap;
+ unsigned long *lazy_bitmap;
unsigned long iommu_size;
unsigned long iommu_pages;
unsigned int next_bit;
@@ -216,6 +218,9 @@ void zpci_debug_init_device(struct zpci_dev *, const char *);
void zpci_debug_exit_device(struct zpci_dev *);
void zpci_debug_info(struct zpci_dev *, struct seq_file *);
+/* Error reporting */
+int zpci_report_error(struct pci_dev *, struct zpci_report_error_header *);
+
#ifdef CONFIG_NUMA
/* Returns the node based on PCI bus */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 72c7f60bfe83..0362cd5fa187 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -874,35 +874,31 @@ static inline pte_t pte_mkhuge(pte_t pte)
}
#endif
-static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
-{
- unsigned long pto = (unsigned long) ptep;
-
- /* Invalidation + global TLB flush for the pte */
- asm volatile(
- " ipte %2,%3"
- : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
-}
+#define IPTE_GLOBAL 0
+#define IPTE_LOCAL 1
-static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
+static inline void __ptep_ipte(unsigned long address, pte_t *ptep, int local)
{
unsigned long pto = (unsigned long) ptep;
- /* Invalidation + local TLB flush for the pte */
+ /* Invalidation + TLB flush for the pte */
asm volatile(
- " .insn rrf,0xb2210000,%2,%3,0,1"
- : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
+ " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
+ : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
+ [m4] "i" (local));
}
-static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep)
+static inline void __ptep_ipte_range(unsigned long address, int nr,
+ pte_t *ptep, int local)
{
unsigned long pto = (unsigned long) ptep;
- /* Invalidate a range of ptes + global TLB flush of the ptes */
+ /* Invalidate a range of ptes + TLB flush of the ptes */
do {
asm volatile(
- " .insn rrf,0xb2210000,%2,%0,%1,0"
- : "+a" (address), "+a" (nr) : "a" (pto) : "memory");
+ " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
+ : [r2] "+a" (address), [r3] "+a" (nr)
+ : [r1] "a" (pto), [m4] "i" (local) : "memory");
} while (nr != 255);
}
@@ -1239,53 +1235,33 @@ static inline void __pmdp_csp(pmd_t *pmdp)
pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
}
-static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp)
-{
- unsigned long sto;
-
- sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
- asm volatile(
- " .insn rrf,0xb98e0000,%2,%3,0,0"
- : "=m" (*pmdp)
- : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
- : "cc" );
-}
-
-static inline void __pudp_idte(unsigned long address, pud_t *pudp)
-{
- unsigned long r3o;
-
- r3o = (unsigned long) pudp - pud_index(address) * sizeof(pud_t);
- r3o |= _ASCE_TYPE_REGION3;
- asm volatile(
- " .insn rrf,0xb98e0000,%2,%3,0,0"
- : "=m" (*pudp)
- : "m" (*pudp), "a" (r3o), "a" ((address & PUD_MASK))
- : "cc");
-}
+#define IDTE_GLOBAL 0
+#define IDTE_LOCAL 1
-static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp)
+static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp, int local)
{
unsigned long sto;
sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
asm volatile(
- " .insn rrf,0xb98e0000,%2,%3,0,1"
- : "=m" (*pmdp)
- : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
+ " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
+ : "+m" (*pmdp)
+ : [r1] "a" (sto), [r2] "a" ((address & HPAGE_MASK)),
+ [m4] "i" (local)
: "cc" );
}
-static inline void __pudp_idte_local(unsigned long address, pud_t *pudp)
+static inline void __pudp_idte(unsigned long address, pud_t *pudp, int local)
{
unsigned long r3o;
r3o = (unsigned long) pudp - pud_index(address) * sizeof(pud_t);
r3o |= _ASCE_TYPE_REGION3;
asm volatile(
- " .insn rrf,0xb98e0000,%2,%3,0,1"
- : "=m" (*pudp)
- : "m" (*pudp), "a" (r3o), "a" ((address & PUD_MASK))
+ " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
+ : "+m" (*pudp)
+ : [r1] "a" (r3o), [r2] "a" ((address & PUD_MASK)),
+ [m4] "i" (local)
: "cc");
}
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 1a691ef740cf..39846100682a 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -26,17 +26,6 @@ static inline void __tlb_flush_idte(unsigned long asce)
: : "a" (2048), "a" (asce) : "cc");
}
-/*
- * Flush TLB entries for a specific ASCE on the local CPU
- */
-static inline void __tlb_flush_idte_local(unsigned long asce)
-{
- /* Local TLB flush for the mm */
- asm volatile(
- " .insn rrf,0xb98e0000,0,%0,%1,1"
- : : "a" (2048), "a" (asce) : "cc");
-}
-
#ifdef CONFIG_SMP
void smp_ptlb_all(void);
@@ -65,35 +54,33 @@ static inline void __tlb_flush_full(struct mm_struct *mm)
/* Global TLB flush */
__tlb_flush_global();
/* Reset TLB flush mask */
- if (MACHINE_HAS_TLB_LC)
- cpumask_copy(mm_cpumask(mm),
- &mm->context.cpu_attach_mask);
+ cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
}
atomic_dec(&mm->context.flush_count);
preempt_enable();
}
-/*
- * Flush TLB entries for a specific ASCE on all CPUs. Should never be used
- * when more than one asce (e.g. gmap) ran on this mm.
- */
-static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
+static inline void __tlb_flush_mm(struct mm_struct *mm)
{
+ unsigned long gmap_asce;
+
+ /*
+ * If the machine has IDTE we prefer to do a per mm flush
+ * on all cpus instead of doing a local flush if the mm
+ * only ran on the local cpu.
+ */
preempt_disable();
atomic_inc(&mm->context.flush_count);
- if (MACHINE_HAS_TLB_LC &&
- cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
- __tlb_flush_idte_local(asce);
+ gmap_asce = READ_ONCE(mm->context.gmap_asce);
+ if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
+ if (gmap_asce)
+ __tlb_flush_idte(gmap_asce);
+ __tlb_flush_idte(mm->context.asce);
} else {
- if (MACHINE_HAS_IDTE)
- __tlb_flush_idte(asce);
- else
- __tlb_flush_global();
- /* Reset TLB flush mask */
- if (MACHINE_HAS_TLB_LC)
- cpumask_copy(mm_cpumask(mm),
- &mm->context.cpu_attach_mask);
+ __tlb_flush_full(mm);
}
+ /* Reset TLB flush mask */
+ cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
atomic_dec(&mm->context.flush_count);
preempt_enable();
}
@@ -112,36 +99,17 @@ static inline void __tlb_flush_kernel(void)
/*
* Flush TLB entries for a specific ASCE on all CPUs.
*/
-static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
+static inline void __tlb_flush_mm(struct mm_struct *mm)
{
- if (MACHINE_HAS_TLB_LC)
- __tlb_flush_idte_local(asce);
- else
- __tlb_flush_local();
+ __tlb_flush_local();
}
static inline void __tlb_flush_kernel(void)
{
- if (MACHINE_HAS_TLB_LC)
- __tlb_flush_idte_local(init_mm.context.asce);
- else
- __tlb_flush_local();
+ __tlb_flush_local();
}
#endif
-static inline void __tlb_flush_mm(struct mm_struct * mm)
-{
- /*
- * If the machine has IDTE we prefer to do a per mm flush
- * on all cpus instead of doing a local flush if the mm
- * only ran on the local cpu.
- */
- if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
- __tlb_flush_asce(mm, mm->context.asce);
- else
- __tlb_flush_full(mm);
-}
-
static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
{
if (mm->context.flush_mm) {
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 95aefdba4be2..52d7c8709279 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -266,28 +266,28 @@ int __put_user_bad(void) __attribute__((noreturn));
__chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \
case 1: { \
- unsigned char __x; \
+ unsigned char __x = 0; \
__gu_err = __get_user_fn(&__x, ptr, \
sizeof(*(ptr))); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
break; \
}; \
case 2: { \
- unsigned short __x; \
+ unsigned short __x = 0; \
__gu_err = __get_user_fn(&__x, ptr, \
sizeof(*(ptr))); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
break; \
}; \
case 4: { \
- unsigned int __x; \
+ unsigned int __x = 0; \
__gu_err = __get_user_fn(&__x, ptr, \
sizeof(*(ptr))); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
break; \
}; \
case 8: { \
- unsigned long long __x; \
+ unsigned long long __x = 0; \
__gu_err = __get_user_fn(&__x, ptr, \
sizeof(*(ptr))); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
diff --git a/arch/s390/include/asm/vx-insn.h b/arch/s390/include/asm/vx-insn.h
index 4a3135620f5e..49c24a2afce0 100644
--- a/arch/s390/include/asm/vx-insn.h
+++ b/arch/s390/include/asm/vx-insn.h
@@ -16,15 +16,13 @@
/* Macros to generate vector instruction byte code */
-#define REG_NUM_INVALID 255
-
/* GR_NUM - Retrieve general-purpose register number
*
* @opd: Operand to store register number
* @r64: String designation register in the format "%rN"
*/
.macro GR_NUM opd gr
- \opd = REG_NUM_INVALID
+ \opd = 255
.ifc \gr,%r0
\opd = 0
.endif
@@ -73,14 +71,11 @@
.ifc \gr,%r15
\opd = 15
.endif
- .if \opd == REG_NUM_INVALID
- .error "Invalid general-purpose register designation: \gr"
+ .if \opd == 255
+ \opd = \gr
.endif
.endm
-/* VX_R() - Macro to encode the VX_NUM into the instruction */
-#define VX_R(v) (v & 0x0F)
-
/* VX_NUM - Retrieve vector register number
*
* @opd: Operand to store register number
@@ -88,11 +83,10 @@
*
* The vector register number is used for as input number to the
* instruction and, as well as, to compute the RXB field of the
- * instruction. To encode the particular vector register number,
- * use the VX_R(v) macro to extract the instruction opcode.
+ * instruction.
*/
.macro VX_NUM opd vxr
- \opd = REG_NUM_INVALID
+ \opd = 255
.ifc \vxr,%v0
\opd = 0
.endif
@@ -189,8 +183,8 @@
.ifc \vxr,%v31
\opd = 31
.endif
- .if \opd == REG_NUM_INVALID
- .error "Invalid vector register designation: \vxr"
+ .if \opd == 255
+ \opd = \vxr
.endif
.endm
@@ -251,7 +245,7 @@
/* VECTOR GENERATE BYTE MASK */
.macro VGBM vr imm2
VX_NUM v1, \vr
- .word (0xE700 | (VX_R(v1) << 4))
+ .word (0xE700 | ((v1&15) << 4))
.word \imm2
MRXBOPC 0, 0x44, v1
.endm
@@ -267,7 +261,7 @@
VX_NUM v1, \v
GR_NUM b2, "%r0"
GR_NUM r3, \gr
- .word 0xE700 | (VX_R(v1) << 4) | r3
+ .word 0xE700 | ((v1&15) << 4) | r3
.word (b2 << 12) | (\disp)
MRXBOPC \m, 0x22, v1
.endm
@@ -284,12 +278,21 @@
VLVG \v, \gr, \index, 3
.endm
+/* VECTOR LOAD REGISTER */
+.macro VLR v1, v2
+ VX_NUM v1, \v1
+ VX_NUM v2, \v2
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word 0
+ MRXBOPC 0, 0x56, v1, v2
+.endm
+
/* VECTOR LOAD */
.macro VL v, disp, index="%r0", base
VX_NUM v1, \v
GR_NUM x2, \index
GR_NUM b2, \base
- .word 0xE700 | (VX_R(v1) << 4) | x2
+ .word 0xE700 | ((v1&15) << 4) | x2
.word (b2 << 12) | (\disp)
MRXBOPC 0, 0x06, v1
.endm
@@ -299,7 +302,7 @@
VX_NUM v1, \vr1
GR_NUM x2, \index
GR_NUM b2, \base
- .word 0xE700 | (VX_R(v1) << 4) | x2
+ .word 0xE700 | ((v1&15) << 4) | x2
.word (b2 << 12) | (\disp)
MRXBOPC \m3, \opc, v1
.endm
@@ -319,7 +322,7 @@
/* VECTOR LOAD ELEMENT IMMEDIATE */
.macro VLEIx vr1, imm2, m3, opc
VX_NUM v1, \vr1
- .word 0xE700 | (VX_R(v1) << 4)
+ .word 0xE700 | ((v1&15) << 4)
.word \imm2
MRXBOPC \m3, \opc, v1
.endm
@@ -341,7 +344,7 @@
GR_NUM r1, \gr
GR_NUM b2, \base
VX_NUM v3, \vr
- .word 0xE700 | (r1 << 4) | VX_R(v3)
+ .word 0xE700 | (r1 << 4) | (v3&15)
.word (b2 << 12) | (\disp)
MRXBOPC \m, 0x21, v3
.endm
@@ -363,7 +366,7 @@
VX_NUM v1, \vfrom
VX_NUM v3, \vto
GR_NUM b2, \base /* Base register */
- .word 0xE700 | (VX_R(v1) << 4) | VX_R(v3)
+ .word 0xE700 | ((v1&15) << 4) | (v3&15)
.word (b2 << 12) | (\disp)
MRXBOPC 0, 0x36, v1, v3
.endm
@@ -373,7 +376,7 @@
VX_NUM v1, \vfrom
VX_NUM v3, \vto
GR_NUM b2, \base /* Base register */
- .word 0xE700 | (VX_R(v1) << 4) | VX_R(v3)
+ .word 0xE700 | ((v1&15) << 4) | (v3&15)
.word (b2 << 12) | (\disp)
MRXBOPC 0, 0x3E, v1, v3
.endm
@@ -384,16 +387,16 @@
VX_NUM v2, \vr2
VX_NUM v3, \vr3
VX_NUM v4, \vr4
- .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2)
- .word (VX_R(v3) << 12)
- MRXBOPC VX_R(v4), 0x8C, v1, v2, v3, v4
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12)
+ MRXBOPC (v4&15), 0x8C, v1, v2, v3, v4
.endm
/* VECTOR UNPACK LOGICAL LOW */
.macro VUPLL vr1, vr2, m3
VX_NUM v1, \vr1
VX_NUM v2, \vr2
- .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2)
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
.word 0x0000
MRXBOPC \m3, 0xD4, v1, v2
.endm
@@ -410,13 +413,23 @@
/* Vector integer instructions */
+/* VECTOR AND */
+.macro VN vr1, vr2, vr3
+ VX_NUM v1, \vr1
+ VX_NUM v2, \vr2
+ VX_NUM v3, \vr3
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12)
+ MRXBOPC 0, 0x68, v1, v2, v3
+.endm
+
/* VECTOR EXCLUSIVE OR */
.macro VX vr1, vr2, vr3
VX_NUM v1, \vr1
VX_NUM v2, \vr2
VX_NUM v3, \vr3
- .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2)
- .word (VX_R(v3) << 12)
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12)
MRXBOPC 0, 0x6D, v1, v2, v3
.endm
@@ -425,8 +438,8 @@
VX_NUM v1, \vr1
VX_NUM v2, \vr2
VX_NUM v3, \vr3
- .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2)
- .word (VX_R(v3) << 12)
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12)
MRXBOPC \m4, 0xB4, v1, v2, v3
.endm
.macro VGFMB vr1, vr2, vr3
@@ -448,9 +461,9 @@
VX_NUM v2, \vr2
VX_NUM v3, \vr3
VX_NUM v4, \vr4
- .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2)
- .word (VX_R(v3) << 12) | (\m5 << 8)
- MRXBOPC VX_R(v4), 0xBC, v1, v2, v3, v4
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12) | (\m5 << 8)
+ MRXBOPC (v4&15), 0xBC, v1, v2, v3, v4
.endm
.macro VGFMAB vr1, vr2, vr3, vr4
VGFMA \vr1, \vr2, \vr3, \vr4, 0
@@ -470,11 +483,78 @@
VX_NUM v1, \vr1
VX_NUM v2, \vr2
VX_NUM v3, \vr3
- .word 0xE700 | (VX_R(v1) << 4) | VX_R(v2)
- .word (VX_R(v3) << 12)
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12)
MRXBOPC 0, 0x7D, v1, v2, v3
.endm
+/* VECTOR REPLICATE IMMEDIATE */
+.macro VREPI vr1, imm2, m3
+ VX_NUM v1, \vr1
+ .word 0xE700 | ((v1&15) << 4)
+ .word \imm2
+ MRXBOPC \m3, 0x45, v1
+.endm
+.macro VREPIB vr1, imm2
+ VREPI \vr1, \imm2, 0
+.endm
+.macro VREPIH vr1, imm2
+ VREPI \vr1, \imm2, 1
+.endm
+.macro VREPIF vr1, imm2
+ VREPI \vr1, \imm2, 2
+.endm
+.macro VREPIG vr1, imm2
+ VREP \vr1, \imm2, 3
+.endm
+
+/* VECTOR ADD */
+.macro VA vr1, vr2, vr3, m4
+ VX_NUM v1, \vr1
+ VX_NUM v2, \vr2
+ VX_NUM v3, \vr3
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12)
+ MRXBOPC \m4, 0xF3, v1, v2, v3
+.endm
+.macro VAB vr1, vr2, vr3
+ VA \vr1, \vr2, \vr3, 0
+.endm
+.macro VAH vr1, vr2, vr3
+ VA \vr1, \vr2, \vr3, 1
+.endm
+.macro VAF vr1, vr2, vr3
+ VA \vr1, \vr2, \vr3, 2
+.endm
+.macro VAG vr1, vr2, vr3
+ VA \vr1, \vr2, \vr3, 3
+.endm
+.macro VAQ vr1, vr2, vr3
+ VA \vr1, \vr2, \vr3, 4
+.endm
+
+/* VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */
+.macro VESRAV vr1, vr2, vr3, m4
+ VX_NUM v1, \vr1
+ VX_NUM v2, \vr2
+ VX_NUM v3, \vr3
+ .word 0xE700 | ((v1&15) << 4) | (v2&15)
+ .word ((v3&15) << 12)
+ MRXBOPC \m4, 0x7A, v1, v2, v3
+.endm
+
+.macro VESRAVB vr1, vr2, vr3
+ VESRAV \vr1, \vr2, \vr3, 0
+.endm
+.macro VESRAVH vr1, vr2, vr3
+ VESRAV \vr1, \vr2, \vr3, 1
+.endm
+.macro VESRAVF vr1, vr2, vr3
+ VESRAV \vr1, \vr2, \vr3, 2
+.endm
+.macro VESRAVG vr1, vr2, vr3
+ VESRAV \vr1, \vr2, \vr3, 3
+.endm
#endif /* __ASSEMBLY__ */
#endif /* __ASM_S390_VX_INSN_H */
diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild
index 08fe6dad9026..cc44b09c25fc 100644
--- a/arch/s390/include/uapi/asm/Kbuild
+++ b/arch/s390/include/uapi/asm/Kbuild
@@ -6,6 +6,7 @@ header-y += bitsperlong.h
header-y += byteorder.h
header-y += chpid.h
header-y += chsc.h
+header-y += clp.h
header-y += cmb.h
header-y += dasd.h
header-y += debug.h
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 3234817c7d47..72ccc41444dc 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -48,6 +48,9 @@ AFLAGS_head.o += -march=z900
endif
GCOV_PROFILE_sclp.o := n
GCOV_PROFILE_als.o := n
+UBSAN_SANITIZE_als.o := n
+UBSAN_SANITIZE_early.o := n
+UBSAN_SANITIZE_sclp.o := n
obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 1f95cc1faeb7..f3df9e0a5dec 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -125,6 +125,7 @@ int main(void)
OFFSET(__LC_STFL_FAC_LIST, lowcore, stfl_fac_list);
OFFSET(__LC_STFLE_FAC_LIST, lowcore, stfle_fac_list);
OFFSET(__LC_MCCK_CODE, lowcore, mcck_interruption_code);
+ OFFSET(__LC_EXT_DAMAGE_CODE, lowcore, external_damage_code);
OFFSET(__LC_MCCK_FAIL_STOR_ADDR, lowcore, failing_storage_address);
OFFSET(__LC_LAST_BREAK, lowcore, breaking_event_addr);
OFFSET(__LC_RST_OLD_PSW, lowcore, restart_old_psw);
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 29df8484282b..f9293bfefb7f 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -71,9 +71,7 @@ struct save_area * __init save_area_alloc(bool is_boot_cpu)
*/
struct save_area * __init save_area_boot_cpu(void)
{
- if (list_empty(&dump_save_areas))
- return NULL;
- return list_first_entry(&dump_save_areas, struct save_area, list);
+ return list_first_entry_or_null(&dump_save_areas, struct save_area, list);
}
/*
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 717b03aa16b5..2374c5b46bbc 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -13,7 +13,7 @@
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/lockdep.h>
-#include <linux/module.h>
+#include <linux/extable.h>
#include <linux/pfn.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
diff --git a/arch/s390/kernel/fpu.c b/arch/s390/kernel/fpu.c
index 81d1d1887507..1235b9438df4 100644
--- a/arch/s390/kernel/fpu.c
+++ b/arch/s390/kernel/fpu.c
@@ -10,240 +10,167 @@
#include <asm/fpu/types.h>
#include <asm/fpu/api.h>
-/*
- * Per-CPU variable to maintain FPU register ranges that are in use
- * by the kernel.
- */
-static DEFINE_PER_CPU(u32, kernel_fpu_state);
-
-#define KERNEL_FPU_STATE_MASK (KERNEL_FPU_MASK|KERNEL_FPC)
-
+asm(".include \"asm/vx-insn.h\"\n");
void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
{
- if (!__this_cpu_read(kernel_fpu_state)) {
- /*
- * Save user space FPU state and register contents. Multiple
- * calls because of interruptions do not matter and return
- * immediately. This also sets CIF_FPU to lazy restore FP/VX
- * register contents when returning to user space.
- */
- save_fpu_regs();
- }
-
- /* Update flags to use the vector facility for KERNEL_FPR */
- if (MACHINE_HAS_VX && (state->mask & KERNEL_FPR)) {
- flags |= KERNEL_VXR_LOW | KERNEL_FPC;
- flags &= ~KERNEL_FPR;
- }
-
- /* Save and update current kernel VX state */
- state->mask = __this_cpu_read(kernel_fpu_state);
- __this_cpu_or(kernel_fpu_state, flags & KERNEL_FPU_STATE_MASK);
-
/*
- * If this is the first call to __kernel_fpu_begin(), no additional
- * work is required.
+ * Limit the save to the FPU/vector registers already
+ * in use by the previous context
*/
- if (!(state->mask & KERNEL_FPU_STATE_MASK))
- return;
+ flags &= state->mask;
- /*
- * If KERNEL_FPR is still set, the vector facility is not available
- * and, thus, save floating-point control and registers only.
- */
- if (state->mask & KERNEL_FPR) {
- asm volatile("stfpc %0" : "=Q" (state->fpc));
- asm volatile("std 0,%0" : "=Q" (state->fprs[0]));
- asm volatile("std 1,%0" : "=Q" (state->fprs[1]));
- asm volatile("std 2,%0" : "=Q" (state->fprs[2]));
- asm volatile("std 3,%0" : "=Q" (state->fprs[3]));
- asm volatile("std 4,%0" : "=Q" (state->fprs[4]));
- asm volatile("std 5,%0" : "=Q" (state->fprs[5]));
- asm volatile("std 6,%0" : "=Q" (state->fprs[6]));
- asm volatile("std 7,%0" : "=Q" (state->fprs[7]));
- asm volatile("std 8,%0" : "=Q" (state->fprs[8]));
- asm volatile("std 9,%0" : "=Q" (state->fprs[9]));
- asm volatile("std 10,%0" : "=Q" (state->fprs[10]));
- asm volatile("std 11,%0" : "=Q" (state->fprs[11]));
- asm volatile("std 12,%0" : "=Q" (state->fprs[12]));
- asm volatile("std 13,%0" : "=Q" (state->fprs[13]));
- asm volatile("std 14,%0" : "=Q" (state->fprs[14]));
- asm volatile("std 15,%0" : "=Q" (state->fprs[15]));
+ if (flags & KERNEL_FPC)
+ /* Save floating point control */
+ asm volatile("stfpc %0" : "=m" (state->fpc));
+
+ if (!MACHINE_HAS_VX) {
+ if (flags & KERNEL_VXR_V0V7) {
+ /* Save floating-point registers */
+ asm volatile("std 0,%0" : "=Q" (state->fprs[0]));
+ asm volatile("std 1,%0" : "=Q" (state->fprs[1]));
+ asm volatile("std 2,%0" : "=Q" (state->fprs[2]));
+ asm volatile("std 3,%0" : "=Q" (state->fprs[3]));
+ asm volatile("std 4,%0" : "=Q" (state->fprs[4]));
+ asm volatile("std 5,%0" : "=Q" (state->fprs[5]));
+ asm volatile("std 6,%0" : "=Q" (state->fprs[6]));
+ asm volatile("std 7,%0" : "=Q" (state->fprs[7]));
+ asm volatile("std 8,%0" : "=Q" (state->fprs[8]));
+ asm volatile("std 9,%0" : "=Q" (state->fprs[9]));
+ asm volatile("std 10,%0" : "=Q" (state->fprs[10]));
+ asm volatile("std 11,%0" : "=Q" (state->fprs[11]));
+ asm volatile("std 12,%0" : "=Q" (state->fprs[12]));
+ asm volatile("std 13,%0" : "=Q" (state->fprs[13]));
+ asm volatile("std 14,%0" : "=Q" (state->fprs[14]));
+ asm volatile("std 15,%0" : "=Q" (state->fprs[15]));
+ }
return;
}
- /*
- * If this is a nested call to __kernel_fpu_begin(), check the saved
- * state mask to save and later restore the vector registers that
- * are already in use. Let's start with checking floating-point
- * controls.
- */
- if (state->mask & KERNEL_FPC)
- asm volatile("stfpc %0" : "=m" (state->fpc));
-
/* Test and save vector registers */
asm volatile (
/*
* Test if any vector register must be saved and, if so,
* test if all register can be saved.
*/
- " tmll %[m],15\n" /* KERNEL_VXR_MASK */
- " jz 20f\n" /* no work -> done */
" la 1,%[vxrs]\n" /* load save area */
- " jo 18f\n" /* -> save V0..V31 */
-
+ " tmll %[m],30\n" /* KERNEL_VXR */
+ " jz 7f\n" /* no work -> done */
+ " jo 5f\n" /* -> save V0..V31 */
/*
- * Test if V8..V23 can be saved at once... this speeds up
- * for KERNEL_fpu_MID only. Otherwise continue to split the
- * range of vector registers into two halves and test them
- * separately.
+ * Test for special case KERNEL_FPU_MID only. In this
+ * case a vstm V8..V23 is the best instruction
*/
- " tmll %[m],6\n" /* KERNEL_VXR_MID */
- " jo 17f\n" /* -> save V8..V23 */
-
+ " chi %[m],12\n" /* KERNEL_VXR_MID */
+ " jne 0f\n" /* -> save V8..V23 */
+ " VSTM 8,23,128,1\n" /* vstm %v8,%v23,128(%r1) */
+ " j 7f\n"
/* Test and save the first half of 16 vector registers */
- "1: tmll %[m],3\n" /* KERNEL_VXR_LOW */
- " jz 10f\n" /* -> KERNEL_VXR_HIGH */
+ "0: tmll %[m],6\n" /* KERNEL_VXR_LOW */
+ " jz 3f\n" /* -> KERNEL_VXR_HIGH */
" jo 2f\n" /* 11 -> save V0..V15 */
- " brc 4,3f\n" /* 01 -> save V0..V7 */
- " brc 2,4f\n" /* 10 -> save V8..V15 */
-
+ " brc 2,1f\n" /* 10 -> save V8..V15 */
+ " VSTM 0,7,0,1\n" /* vstm %v0,%v7,0(%r1) */
+ " j 3f\n"
+ "1: VSTM 8,15,128,1\n" /* vstm %v8,%v15,128(%r1) */
+ " j 3f\n"
+ "2: VSTM 0,15,0,1\n" /* vstm %v0,%v15,0(%r1) */
/* Test and save the second half of 16 vector registers */
- "10: tmll %[m],12\n" /* KERNEL_VXR_HIGH */
- " jo 19f\n" /* 11 -> save V16..V31 */
- " brc 4,11f\n" /* 01 -> save V16..V23 */
- " brc 2,12f\n" /* 10 -> save V24..V31 */
- " j 20f\n" /* 00 -> done */
-
- /*
- * Below are the vstm combinations to save multiple vector
- * registers at once.
- */
- "2: .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */
- " j 10b\n" /* -> VXR_HIGH */
- "3: .word 0xe707,0x1000,0x003e\n" /* vstm 0,7,0(1) */
- " j 10b\n" /* -> VXR_HIGH */
- "4: .word 0xe78f,0x1080,0x003e\n" /* vstm 8,15,128(1) */
- " j 10b\n" /* -> VXR_HIGH */
- "\n"
- "11: .word 0xe707,0x1100,0x0c3e\n" /* vstm 16,23,256(1) */
- " j 20f\n" /* -> done */
- "12: .word 0xe78f,0x1180,0x0c3e\n" /* vstm 24,31,384(1) */
- " j 20f\n" /* -> done */
- "\n"
- "17: .word 0xe787,0x1080,0x043e\n" /* vstm 8,23,128(1) */
- " nill %[m],249\n" /* m &= ~VXR_MID */
- " j 1b\n" /* -> VXR_LOW */
- "\n"
- "18: .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */
- "19: .word 0xe70f,0x1100,0x0c3e\n" /* vstm 16,31,256(1) */
- "20:"
+ "3: tmll %[m],24\n" /* KERNEL_VXR_HIGH */
+ " jz 7f\n"
+ " jo 6f\n" /* 11 -> save V16..V31 */
+ " brc 2,4f\n" /* 10 -> save V24..V31 */
+ " VSTM 16,23,256,1\n" /* vstm %v16,%v23,256(%r1) */
+ " j 7f\n"
+ "4: VSTM 24,31,384,1\n" /* vstm %v24,%v31,384(%r1) */
+ " j 7f\n"
+ "5: VSTM 0,15,0,1\n" /* vstm %v0,%v15,0(%r1) */
+ "6: VSTM 16,31,256,1\n" /* vstm %v16,%v31,256(%r1) */
+ "7:"
: [vxrs] "=Q" (*(struct vx_array *) &state->vxrs)
- : [m] "d" (state->mask)
+ : [m] "d" (flags)
: "1", "cc");
}
EXPORT_SYMBOL(__kernel_fpu_begin);
-void __kernel_fpu_end(struct kernel_fpu *state)
+void __kernel_fpu_end(struct kernel_fpu *state, u32 flags)
{
- /* Just update the per-CPU state if there is nothing to restore */
- if (!(state->mask & KERNEL_FPU_STATE_MASK))
- goto update_fpu_state;
-
/*
- * If KERNEL_FPR is specified, the vector facility is not available
- * and, thus, restore floating-point control and registers only.
+ * Limit the restore to the FPU/vector registers of the
+ * previous context that have been overwritte by the
+ * current context
*/
- if (state->mask & KERNEL_FPR) {
- asm volatile("lfpc %0" : : "Q" (state->fpc));
- asm volatile("ld 0,%0" : : "Q" (state->fprs[0]));
- asm volatile("ld 1,%0" : : "Q" (state->fprs[1]));
- asm volatile("ld 2,%0" : : "Q" (state->fprs[2]));
- asm volatile("ld 3,%0" : : "Q" (state->fprs[3]));
- asm volatile("ld 4,%0" : : "Q" (state->fprs[4]));
- asm volatile("ld 5,%0" : : "Q" (state->fprs[5]));
- asm volatile("ld 6,%0" : : "Q" (state->fprs[6]));
- asm volatile("ld 7,%0" : : "Q" (state->fprs[7]));
- asm volatile("ld 8,%0" : : "Q" (state->fprs[8]));
- asm volatile("ld 9,%0" : : "Q" (state->fprs[9]));
- asm volatile("ld 10,%0" : : "Q" (state->fprs[10]));
- asm volatile("ld 11,%0" : : "Q" (state->fprs[11]));
- asm volatile("ld 12,%0" : : "Q" (state->fprs[12]));
- asm volatile("ld 13,%0" : : "Q" (state->fprs[13]));
- asm volatile("ld 14,%0" : : "Q" (state->fprs[14]));
- asm volatile("ld 15,%0" : : "Q" (state->fprs[15]));
- goto update_fpu_state;
- }
+ flags &= state->mask;
- /* Test and restore floating-point controls */
- if (state->mask & KERNEL_FPC)
+ if (flags & KERNEL_FPC)
+ /* Restore floating-point controls */
asm volatile("lfpc %0" : : "Q" (state->fpc));
+ if (!MACHINE_HAS_VX) {
+ if (flags & KERNEL_VXR_V0V7) {
+ /* Restore floating-point registers */
+ asm volatile("ld 0,%0" : : "Q" (state->fprs[0]));
+ asm volatile("ld 1,%0" : : "Q" (state->fprs[1]));
+ asm volatile("ld 2,%0" : : "Q" (state->fprs[2]));
+ asm volatile("ld 3,%0" : : "Q" (state->fprs[3]));
+ asm volatile("ld 4,%0" : : "Q" (state->fprs[4]));
+ asm volatile("ld 5,%0" : : "Q" (state->fprs[5]));
+ asm volatile("ld 6,%0" : : "Q" (state->fprs[6]));
+ asm volatile("ld 7,%0" : : "Q" (state->fprs[7]));
+ asm volatile("ld 8,%0" : : "Q" (state->fprs[8]));
+ asm volatile("ld 9,%0" : : "Q" (state->fprs[9]));
+ asm volatile("ld 10,%0" : : "Q" (state->fprs[10]));
+ asm volatile("ld 11,%0" : : "Q" (state->fprs[11]));
+ asm volatile("ld 12,%0" : : "Q" (state->fprs[12]));
+ asm volatile("ld 13,%0" : : "Q" (state->fprs[13]));
+ asm volatile("ld 14,%0" : : "Q" (state->fprs[14]));
+ asm volatile("ld 15,%0" : : "Q" (state->fprs[15]));
+ }
+ return;
+ }
+
/* Test and restore (load) vector registers */
asm volatile (
/*
- * Test if any vector registers must be loaded and, if so,
+ * Test if any vector register must be loaded and, if so,
* test if all registers can be loaded at once.
*/
- " tmll %[m],15\n" /* KERNEL_VXR_MASK */
- " jz 20f\n" /* no work -> done */
- " la 1,%[vxrs]\n" /* load load area */
- " jo 18f\n" /* -> load V0..V31 */
-
- /*
- * Test if V8..V23 can be restored at once... this speeds up
- * for KERNEL_VXR_MID only. Otherwise continue to split the
- * range of vector registers into two halves and test them
- * separately.
- */
- " tmll %[m],6\n" /* KERNEL_VXR_MID */
- " jo 17f\n" /* -> load V8..V23 */
-
- /* Test and load the first half of 16 vector registers */
- "1: tmll %[m],3\n" /* KERNEL_VXR_LOW */
- " jz 10f\n" /* -> KERNEL_VXR_HIGH */
- " jo 2f\n" /* 11 -> load V0..V15 */
- " brc 4,3f\n" /* 01 -> load V0..V7 */
- " brc 2,4f\n" /* 10 -> load V8..V15 */
-
- /* Test and load the second half of 16 vector registers */
- "10: tmll %[m],12\n" /* KERNEL_VXR_HIGH */
- " jo 19f\n" /* 11 -> load V16..V31 */
- " brc 4,11f\n" /* 01 -> load V16..V23 */
- " brc 2,12f\n" /* 10 -> load V24..V31 */
- " j 20f\n" /* 00 -> done */
-
+ " la 1,%[vxrs]\n" /* load restore area */
+ " tmll %[m],30\n" /* KERNEL_VXR */
+ " jz 7f\n" /* no work -> done */
+ " jo 5f\n" /* -> restore V0..V31 */
/*
- * Below are the vstm combinations to load multiple vector
- * registers at once.
+ * Test for special case KERNEL_FPU_MID only. In this
+ * case a vlm V8..V23 is the best instruction
*/
- "2: .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */
- " j 10b\n" /* -> VXR_HIGH */
- "3: .word 0xe707,0x1000,0x0036\n" /* vlm 0,7,0(1) */
- " j 10b\n" /* -> VXR_HIGH */
- "4: .word 0xe78f,0x1080,0x0036\n" /* vlm 8,15,128(1) */
- " j 10b\n" /* -> VXR_HIGH */
- "\n"
- "11: .word 0xe707,0x1100,0x0c36\n" /* vlm 16,23,256(1) */
- " j 20f\n" /* -> done */
- "12: .word 0xe78f,0x1180,0x0c36\n" /* vlm 24,31,384(1) */
- " j 20f\n" /* -> done */
- "\n"
- "17: .word 0xe787,0x1080,0x0436\n" /* vlm 8,23,128(1) */
- " nill %[m],249\n" /* m &= ~VXR_MID */
- " j 1b\n" /* -> VXR_LOW */
- "\n"
- "18: .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */
- "19: .word 0xe70f,0x1100,0x0c36\n" /* vlm 16,31,256(1) */
- "20:"
- :
- : [vxrs] "Q" (*(struct vx_array *) &state->vxrs),
- [m] "d" (state->mask)
+ " chi %[m],12\n" /* KERNEL_VXR_MID */
+ " jne 0f\n" /* -> restore V8..V23 */
+ " VLM 8,23,128,1\n" /* vlm %v8,%v23,128(%r1) */
+ " j 7f\n"
+ /* Test and restore the first half of 16 vector registers */
+ "0: tmll %[m],6\n" /* KERNEL_VXR_LOW */
+ " jz 3f\n" /* -> KERNEL_VXR_HIGH */
+ " jo 2f\n" /* 11 -> restore V0..V15 */
+ " brc 2,1f\n" /* 10 -> restore V8..V15 */
+ " VLM 0,7,0,1\n" /* vlm %v0,%v7,0(%r1) */
+ " j 3f\n"
+ "1: VLM 8,15,128,1\n" /* vlm %v8,%v15,128(%r1) */
+ " j 3f\n"
+ "2: VLM 0,15,0,1\n" /* vlm %v0,%v15,0(%r1) */
+ /* Test and restore the second half of 16 vector registers */
+ "3: tmll %[m],24\n" /* KERNEL_VXR_HIGH */
+ " jz 7f\n"
+ " jo 6f\n" /* 11 -> restore V16..V31 */
+ " brc 2,4f\n" /* 10 -> restore V24..V31 */
+ " VLM 16,23,256,1\n" /* vlm %v16,%v23,256(%r1) */
+ " j 7f\n"
+ "4: VLM 24,31,384,1\n" /* vlm %v24,%v31,384(%r1) */
+ " j 7f\n"
+ "5: VLM 0,15,0,1\n" /* vlm %v0,%v15,0(%r1) */
+ "6: VLM 16,31,256,1\n" /* vlm %v16,%v31,256(%r1) */
+ "7:"
+ : [vxrs] "=Q" (*(struct vx_array *) &state->vxrs)
+ : [m] "d" (flags)
: "1", "cc");
-
-update_fpu_state:
- /* Update current kernel VX state */
- __this_cpu_write(kernel_fpu_state, state->mask);
}
EXPORT_SYMBOL(__kernel_fpu_end);
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 0f7bfeba6da6..60a8a4e207ed 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -209,7 +209,8 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
/* Only trace if the calling function expects to. */
if (!ftrace_graph_entry(&trace))
goto out;
- if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
+ if (ftrace_push_return_trace(parent, ip, &trace.depth, 0,
+ NULL) == -EBUSY)
goto out;
parent = (unsigned long) return_to_handler;
out:
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index dd6306c51bd6..fdb40424acfe 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -26,12 +26,14 @@
#include <linux/stop_machine.h>
#include <linux/kdebug.h>
#include <linux/uaccess.h>
+#include <linux/extable.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/hardirq.h>
#include <linux/ftrace.h>
#include <asm/cacheflush.h>
#include <asm/sections.h>
+#include <asm/uaccess.h>
#include <asm/dis.h>
DEFINE_PER_CPU(struct kprobe *, current_kprobe);
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 29376f0e725c..9a32f7419d78 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -98,7 +98,7 @@ EXPORT_SYMBOL_GPL(s390_handle_mcck);
* returns 0 if all registers could be validated
* returns 1 otherwise
*/
-static int notrace s390_validate_registers(union mci mci)
+static int notrace s390_validate_registers(union mci mci, int umode)
{
int kill_task;
u64 zero;
@@ -110,26 +110,41 @@ static int notrace s390_validate_registers(union mci mci)
if (!mci.gr) {
/*
* General purpose registers couldn't be restored and have
- * unknown contents. Process needs to be terminated.
+ * unknown contents. Stop system or terminate process.
*/
+ if (!umode)
+ s390_handle_damage();
kill_task = 1;
}
if (!mci.fp) {
/*
- * Floating point registers can't be restored and
- * therefore the process needs to be terminated.
+ * Floating point registers can't be restored. If the
+ * kernel currently uses floating point registers the
+ * system is stopped. If the process has its floating
+ * pointer registers loaded it is terminated.
+ * Otherwise just revalidate the registers.
*/
- kill_task = 1;
+ if (S390_lowcore.fpu_flags & KERNEL_VXR_V0V7)
+ s390_handle_damage();
+ if (!test_cpu_flag(CIF_FPU))
+ kill_task = 1;
}
fpt_save_area = &S390_lowcore.floating_pt_save_area;
fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
if (!mci.fc) {
/*
* Floating point control register can't be restored.
- * Task will be terminated.
+ * If the kernel currently uses the floating pointer
+ * registers and needs the FPC register the system is
+ * stopped. If the process has its floating pointer
+ * registers loaded it is terminated. Otherwiese the
+ * FPC is just revalidated.
*/
+ if (S390_lowcore.fpu_flags & KERNEL_FPC)
+ s390_handle_damage();
asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
- kill_task = 1;
+ if (!test_cpu_flag(CIF_FPU))
+ kill_task = 1;
} else
asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
@@ -159,10 +174,16 @@ static int notrace s390_validate_registers(union mci mci)
if (!mci.vr) {
/*
- * Vector registers can't be restored and therefore
- * the process needs to be terminated.
+ * Vector registers can't be restored. If the kernel
+ * currently uses vector registers the system is
+ * stopped. If the process has its vector registers
+ * loaded it is terminated. Otherwise just revalidate
+ * the registers.
*/
- kill_task = 1;
+ if (S390_lowcore.fpu_flags & KERNEL_VXR)
+ s390_handle_damage();
+ if (!test_cpu_flag(CIF_FPU))
+ kill_task = 1;
}
cr0.val = S390_lowcore.cregs_save_area[0];
cr0.afp = cr0.vx = 1;
@@ -250,13 +271,11 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
struct mcck_struct *mcck;
unsigned long long tmp;
union mci mci;
- int umode;
nmi_enter();
inc_irq_stat(NMI_NMI);
mci.val = S390_lowcore.mcck_interruption_code;
mcck = this_cpu_ptr(&cpu_mcck);
- umode = user_mode(regs);
if (mci.sd) {
/* System damage -> stopping machine */
@@ -297,22 +316,14 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
s390_handle_damage();
}
}
- if (s390_validate_registers(mci)) {
- if (umode) {
- /*
- * Couldn't restore all register contents while in
- * user mode -> mark task for termination.
- */
- mcck->kill_task = 1;
- mcck->mcck_code = mci.val;
- set_cpu_flag(CIF_MCCK_PENDING);
- } else {
- /*
- * Couldn't restore all register contents while in
- * kernel mode -> stopping machine.
- */
- s390_handle_damage();
- }
+ if (s390_validate_registers(mci, user_mode(regs))) {
+ /*
+ * Couldn't restore all register contents for the
+ * user space process -> mark task for termination.
+ */
+ mcck->kill_task = 1;
+ mcck->mcck_code = mci.val;
+ set_cpu_flag(CIF_MCCK_PENDING);
}
if (mci.cd) {
/* Timing facility damage */
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index 050b8d067d3b..bfda6aa40280 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -454,7 +454,7 @@ void s390_adjust_jiffies(void)
: "Q" (info->capability), "d" (10000000), "d" (0)
: "cc"
);
- kernel_fpu_end(&fpu);
+ kernel_fpu_end(&fpu, KERNEL_FPR);
} else
/*
* Really old machine without stsi block for basic
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 4e9949800562..0bfcc492987e 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -50,10 +50,6 @@
#include <asm/cio.h>
#include "entry.h"
-/* change this if you have some constant time drift */
-#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
-#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12)
-
u64 sched_clock_base_cc = -1; /* Force to data section. */
EXPORT_SYMBOL_GPL(sched_clock_base_cc);
@@ -282,13 +278,8 @@ extern struct timezone sys_tz;
void update_vsyscall_tz(void)
{
- /* Make userspace gettimeofday spin until we're done. */
- ++vdso_data->tb_update_count;
- smp_wmb();
vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
vdso_data->tz_dsttime = sys_tz.tz_dsttime;
- smp_wmb();
- ++vdso_data->tb_update_count;
}
/*
@@ -318,51 +309,12 @@ void __init time_init(void)
vtime_init();
}
-/*
- * The time is "clock". old is what we think the time is.
- * Adjust the value by a multiple of jiffies and add the delta to ntp.
- * "delay" is an approximation how long the synchronization took. If
- * the time correction is positive, then "delay" is subtracted from
- * the time difference and only the remaining part is passed to ntp.
- */
-static unsigned long long adjust_time(unsigned long long old,
- unsigned long long clock,
- unsigned long long delay)
-{
- unsigned long long delta, ticks;
- struct timex adjust;
-
- if (clock > old) {
- /* It is later than we thought. */
- delta = ticks = clock - old;
- delta = ticks = (delta < delay) ? 0 : delta - delay;
- delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
- adjust.offset = ticks * (1000000 / HZ);
- } else {
- /* It is earlier than we thought. */
- delta = ticks = old - clock;
- delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
- delta = -delta;
- adjust.offset = -ticks * (1000000 / HZ);
- }
- sched_clock_base_cc += delta;
- if (adjust.offset != 0) {
- pr_notice("The ETR interface has adjusted the clock "
- "by %li microseconds\n", adjust.offset);
- adjust.modes = ADJ_OFFSET_SINGLESHOT;
- do_adjtimex(&adjust);
- }
- return delta;
-}
-
static DEFINE_PER_CPU(atomic_t, clock_sync_word);
static DEFINE_MUTEX(clock_sync_mutex);
static unsigned long clock_sync_flags;
-#define CLOCK_SYNC_HAS_ETR 0
-#define CLOCK_SYNC_HAS_STP 1
-#define CLOCK_SYNC_ETR 2
-#define CLOCK_SYNC_STP 3
+#define CLOCK_SYNC_HAS_STP 0
+#define CLOCK_SYNC_STP 1
/*
* The get_clock function for the physical clock. It will get the current
@@ -384,34 +336,32 @@ int get_phys_clock(unsigned long long *clock)
if (sw0 == sw1 && (sw0 & 0x80000000U))
/* Success: time is in sync. */
return 0;
- if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags) &&
- !test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
+ if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
return -EOPNOTSUPP;
- if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags) &&
- !test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
+ if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
return -EACCES;
return -EAGAIN;
}
EXPORT_SYMBOL(get_phys_clock);
/*
- * Make get_sync_clock return -EAGAIN.
+ * Make get_phys_clock() return -EAGAIN.
*/
static void disable_sync_clock(void *dummy)
{
atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
/*
- * Clear the in-sync bit 2^31. All get_sync_clock calls will
+ * Clear the in-sync bit 2^31. All get_phys_clock calls will
* fail until the sync bit is turned back on. In addition
* increase the "sequence" counter to avoid the race of an
- * etr event and the complete recovery against get_sync_clock.
+ * stp event and the complete recovery against get_phys_clock.
*/
atomic_andnot(0x80000000, sw_ptr);
atomic_inc(sw_ptr);
}
/*
- * Make get_sync_clock return 0 again.
+ * Make get_phys_clock() return 0 again.
* Needs to be called from a context disabled for preemption.
*/
static void enable_sync_clock(void)
@@ -434,7 +384,7 @@ static inline int check_sync_clock(void)
return rc;
}
-/* Single threaded workqueue used for etr and stp sync events */
+/* Single threaded workqueue used for stp sync events */
static struct workqueue_struct *time_sync_wq;
static void __init time_init_wq(void)
@@ -448,20 +398,12 @@ struct clock_sync_data {
atomic_t cpus;
int in_sync;
unsigned long long fixup_cc;
- int etr_port;
- struct etr_aib *etr_aib;
};
static void clock_sync_cpu(struct clock_sync_data *sync)
{
atomic_dec(&sync->cpus);
enable_sync_clock();
- /*
- * This looks like a busy wait loop but it isn't. etr_sync_cpus
- * is called on all other cpus while the TOD clocks is stopped.
- * __udelay will stop the cpu on an enabled wait psw until the
- * TOD is running again.
- */
while (sync->in_sync == 0) {
__udelay(1);
/*
@@ -582,7 +524,7 @@ void stp_queue_work(void)
static int stp_sync_clock(void *data)
{
static int first;
- unsigned long long old_clock, delta, new_clock, clock_delta;
+ unsigned long long clock_delta;
struct clock_sync_data *stp_sync;
struct ptff_qto qto;
int rc;
@@ -605,18 +547,18 @@ static int stp_sync_clock(void *data)
if (stp_info.todoff[0] || stp_info.todoff[1] ||
stp_info.todoff[2] || stp_info.todoff[3] ||
stp_info.tmd != 2) {
- old_clock = get_tod_clock();
rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0, &clock_delta);
if (rc == 0) {
- new_clock = old_clock + clock_delta;
- delta = adjust_time(old_clock, new_clock, 0);
+ /* fixup the monotonic sched clock */
+ sched_clock_base_cc += clock_delta;
if (ptff_query(PTFF_QTO) &&
ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
/* Update LPAR offset */
lpar_offset = qto.tod_epoch_difference;
atomic_notifier_call_chain(&s390_epoch_delta_notifier,
0, &clock_delta);
- fixup_clock_comparator(delta);
+ stp_sync->fixup_cc = clock_delta;
+ fixup_clock_comparator(clock_delta);
rc = chsc_sstpi(stp_page, &stp_info,
sizeof(struct stp_sstpi));
if (rc == 0 && stp_info.tmd != 2)
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index dd97a3e8a34a..d0539f76fd24 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -14,11 +14,12 @@
*/
#include <linux/kprobes.h>
#include <linux/kdebug.h>
-#include <linux/module.h>
+#include <linux/extable.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <asm/uaccess.h>
#include <asm/fpu/api.h>
#include "entry.h"
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
index 68145456fee2..6cc947896c77 100644
--- a/arch/s390/kernel/vdso32/Makefile
+++ b/arch/s390/kernel/vdso32/Makefile
@@ -24,8 +24,9 @@ obj-y += vdso32_wrapper.o
extra-y += vdso32.lds
CPPFLAGS_vdso32.lds += -P -C -U$(ARCH)
-# Disable gcov profiling for VDSO code
+# Disable gcov profiling and ubsan for VDSO code
GCOV_PROFILE := n
+UBSAN_SANITIZE := n
# Force dependency (incbin is bad)
$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index 0b0fd22c869a..2d54c18089eb 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -24,8 +24,9 @@ obj-y += vdso64_wrapper.o
extra-y += vdso64.lds
CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
-# Disable gcov profiling for VDSO code
+# Disable gcov profiling and ubsan for VDSO code
GCOV_PROFILE := n
+UBSAN_SANITIZE := n
# Force dependency (incbin is bad)
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 54200208bf24..4aa8a7e2a1da 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -495,6 +495,18 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
switch (code) {
+ case PGM_PROTECTION:
+ switch (prot) {
+ case PROT_TYPE_ALC:
+ tec->b60 = 1;
+ /* FALL THROUGH */
+ case PROT_TYPE_DAT:
+ tec->b61 = 1;
+ break;
+ default: /* LA and KEYC set b61 to 0, other params undefined */
+ return code;
+ }
+ /* FALL THROUGH */
case PGM_ASCE_TYPE:
case PGM_PAGE_TRANSLATION:
case PGM_REGION_FIRST_TRANS:
@@ -504,8 +516,7 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
/*
* op_access_id only applies to MOVE_PAGE -> set bit 61
* exc_access_id has to be set to 0 for some instructions. Both
- * cases have to be handled by the caller. We can always store
- * exc_access_id, as it is undefined for non-ar cases.
+ * cases have to be handled by the caller.
*/
tec->addr = gva >> PAGE_SHIFT;
tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
@@ -516,25 +527,13 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
case PGM_ASTE_VALIDITY:
case PGM_ASTE_SEQUENCE:
case PGM_EXTENDED_AUTHORITY:
+ /*
+ * We can always store exc_access_id, as it is
+ * undefined for non-ar cases. It is undefined for
+ * most DAT protection exceptions.
+ */
pgm->exc_access_id = ar;
break;
- case PGM_PROTECTION:
- switch (prot) {
- case PROT_TYPE_ALC:
- tec->b60 = 1;
- /* FALL THROUGH */
- case PROT_TYPE_DAT:
- tec->b61 = 1;
- tec->addr = gva >> PAGE_SHIFT;
- tec->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
- tec->as = psw_bits(vcpu->arch.sie_block->gpsw).as;
- /* exc_access_id is undefined for most cases */
- pgm->exc_access_id = ar;
- break;
- default: /* LA and KEYC set b61 to 0, other params undefined */
- break;
- }
- break;
}
return code;
}
diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c
index 31a05330d11c..d7c6a7f53ced 100644
--- a/arch/s390/kvm/guestdbg.c
+++ b/arch/s390/kvm/guestdbg.c
@@ -206,7 +206,7 @@ static int __import_wp_info(struct kvm_vcpu *vcpu,
int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{
- int ret = 0, nr_wp = 0, nr_bp = 0, i, size;
+ int ret = 0, nr_wp = 0, nr_bp = 0, i;
struct kvm_hw_breakpoint *bp_data = NULL;
struct kvm_hw_wp_info_arch *wp_info = NULL;
struct kvm_hw_bp_info_arch *bp_info = NULL;
@@ -216,17 +216,10 @@ int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
else if (dbg->arch.nr_hw_bp > MAX_BP_COUNT)
return -EINVAL;
- size = dbg->arch.nr_hw_bp * sizeof(struct kvm_hw_breakpoint);
- bp_data = kmalloc(size, GFP_KERNEL);
- if (!bp_data) {
- ret = -ENOMEM;
- goto error;
- }
-
- if (copy_from_user(bp_data, dbg->arch.hw_bp, size)) {
- ret = -EFAULT;
- goto error;
- }
+ bp_data = memdup_user(dbg->arch.hw_bp,
+ sizeof(*bp_data) * dbg->arch.nr_hw_bp);
+ if (IS_ERR(bp_data))
+ return PTR_ERR(bp_data);
for (i = 0; i < dbg->arch.nr_hw_bp; i++) {
switch (bp_data[i].type) {
@@ -241,17 +234,19 @@ int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
}
}
- size = nr_wp * sizeof(struct kvm_hw_wp_info_arch);
- if (size > 0) {
- wp_info = kmalloc(size, GFP_KERNEL);
+ if (nr_wp > 0) {
+ wp_info = kmalloc_array(nr_wp,
+ sizeof(*wp_info),
+ GFP_KERNEL);
if (!wp_info) {
ret = -ENOMEM;
goto error;
}
}
- size = nr_bp * sizeof(struct kvm_hw_bp_info_arch);
- if (size > 0) {
- bp_info = kmalloc(size, GFP_KERNEL);
+ if (nr_bp > 0) {
+ bp_info = kmalloc_array(nr_bp,
+ sizeof(*bp_info),
+ GFP_KERNEL);
if (!bp_info) {
ret = -ENOMEM;
goto error;
@@ -382,14 +377,20 @@ void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu)
vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING;
}
+#define PER_CODE_MASK (PER_EVENT_MASK >> 24)
+#define PER_CODE_BRANCH (PER_EVENT_BRANCH >> 24)
+#define PER_CODE_IFETCH (PER_EVENT_IFETCH >> 24)
+#define PER_CODE_STORE (PER_EVENT_STORE >> 24)
+#define PER_CODE_STORE_REAL (PER_EVENT_STORE_REAL >> 24)
+
#define per_bp_event(code) \
- (code & (PER_EVENT_IFETCH | PER_EVENT_BRANCH))
+ (code & (PER_CODE_IFETCH | PER_CODE_BRANCH))
#define per_write_wp_event(code) \
- (code & (PER_EVENT_STORE | PER_EVENT_STORE_REAL))
+ (code & (PER_CODE_STORE | PER_CODE_STORE_REAL))
static int debug_exit_required(struct kvm_vcpu *vcpu)
{
- u32 perc = (vcpu->arch.sie_block->perc << 24);
+ u8 perc = vcpu->arch.sie_block->perc;
struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch;
struct kvm_hw_wp_info_arch *wp_info = NULL;
struct kvm_hw_bp_info_arch *bp_info = NULL;
@@ -444,7 +445,7 @@ int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu)
const u8 ilen = kvm_s390_get_ilen(vcpu);
struct kvm_s390_pgm_info pgm_info = {
.code = PGM_PER,
- .per_code = PER_EVENT_IFETCH >> 24,
+ .per_code = PER_CODE_IFETCH,
.per_address = __rewind_psw(vcpu->arch.sie_block->gpsw, ilen),
};
@@ -458,33 +459,33 @@ int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu)
static void filter_guest_per_event(struct kvm_vcpu *vcpu)
{
- u32 perc = vcpu->arch.sie_block->perc << 24;
+ const u8 perc = vcpu->arch.sie_block->perc;
u64 peraddr = vcpu->arch.sie_block->peraddr;
u64 addr = vcpu->arch.sie_block->gpsw.addr;
u64 cr9 = vcpu->arch.sie_block->gcr[9];
u64 cr10 = vcpu->arch.sie_block->gcr[10];
u64 cr11 = vcpu->arch.sie_block->gcr[11];
/* filter all events, demanded by the guest */
- u32 guest_perc = perc & cr9 & PER_EVENT_MASK;
+ u8 guest_perc = perc & (cr9 >> 24) & PER_CODE_MASK;
if (!guest_per_enabled(vcpu))
guest_perc = 0;
/* filter "successful-branching" events */
- if (guest_perc & PER_EVENT_BRANCH &&
+ if (guest_perc & PER_CODE_BRANCH &&
cr9 & PER_CONTROL_BRANCH_ADDRESS &&
!in_addr_range(addr, cr10, cr11))
- guest_perc &= ~PER_EVENT_BRANCH;
+ guest_perc &= ~PER_CODE_BRANCH;
/* filter "instruction-fetching" events */
- if (guest_perc & PER_EVENT_IFETCH &&
+ if (guest_perc & PER_CODE_IFETCH &&
!in_addr_range(peraddr, cr10, cr11))
- guest_perc &= ~PER_EVENT_IFETCH;
+ guest_perc &= ~PER_CODE_IFETCH;
/* All other PER events will be given to the guest */
/* TODO: Check altered address/address space */
- vcpu->arch.sie_block->perc = guest_perc >> 24;
+ vcpu->arch.sie_block->perc = guest_perc;
if (!guest_perc)
vcpu->arch.sie_block->iprcc &= ~PGM_PER;
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index dfd0ca2638fa..1cab8a177d0e 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -29,6 +29,7 @@ static const intercept_handler_t instruction_handlers[256] = {
[0x01] = kvm_s390_handle_01,
[0x82] = kvm_s390_handle_lpsw,
[0x83] = kvm_s390_handle_diag,
+ [0xaa] = kvm_s390_handle_aa,
[0xae] = kvm_s390_handle_sigp,
[0xb2] = kvm_s390_handle_b2,
[0xb6] = kvm_s390_handle_stctl,
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 24524c0f3ef8..be4db07f70d3 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -24,6 +24,8 @@
#include <asm/sclp.h>
#include <asm/isc.h>
#include <asm/gmap.h>
+#include <asm/switch_to.h>
+#include <asm/nmi.h>
#include "kvm-s390.h"
#include "gaccess.h"
#include "trace-s390.h"
@@ -40,6 +42,7 @@ static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
return 0;
+ BUG_ON(!kvm_s390_use_sca_entries());
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
@@ -68,6 +71,7 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
{
int expect, rc;
+ BUG_ON(!kvm_s390_use_sca_entries());
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
@@ -109,6 +113,8 @@ static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
int rc, expect;
+ if (!kvm_s390_use_sca_entries())
+ return;
atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
@@ -400,12 +406,78 @@ static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
return rc ? -EFAULT : 0;
}
+static int __write_machine_check(struct kvm_vcpu *vcpu,
+ struct kvm_s390_mchk_info *mchk)
+{
+ unsigned long ext_sa_addr;
+ freg_t fprs[NUM_FPRS];
+ union mci mci;
+ int rc;
+
+ mci.val = mchk->mcic;
+ /* take care of lazy register loading via vcpu load/put */
+ save_fpu_regs();
+ save_access_regs(vcpu->run->s.regs.acrs);
+
+ /* Extended save area */
+ rc = read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR, &ext_sa_addr,
+ sizeof(unsigned long));
+ /* Only bits 0-53 are used for address formation */
+ ext_sa_addr &= ~0x3ffUL;
+ if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) {
+ if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs,
+ 512))
+ mci.vr = 0;
+ } else {
+ mci.vr = 0;
+ }
+
+ /* General interruption information */
+ rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID);
+ rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+ rc |= put_guest_lc(vcpu, mci.val, (u64 __user *) __LC_MCCK_CODE);
+
+ /* Register-save areas */
+ if (MACHINE_HAS_VX) {
+ convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
+ rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA, fprs, 128);
+ } else {
+ rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA,
+ vcpu->run->s.regs.fprs, 128);
+ }
+ rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA,
+ vcpu->run->s.regs.gprs, 128);
+ rc |= put_guest_lc(vcpu, current->thread.fpu.fpc,
+ (u32 __user *) __LC_FP_CREG_SAVE_AREA);
+ rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr,
+ (u32 __user *) __LC_TOD_PROGREG_SAVE_AREA);
+ rc |= put_guest_lc(vcpu, kvm_s390_get_cpu_timer(vcpu),
+ (u64 __user *) __LC_CPU_TIMER_SAVE_AREA);
+ rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->ckc >> 8,
+ (u64 __user *) __LC_CLOCK_COMP_SAVE_AREA);
+ rc |= write_guest_lc(vcpu, __LC_AREGS_SAVE_AREA,
+ &vcpu->run->s.regs.acrs, 64);
+ rc |= write_guest_lc(vcpu, __LC_CREGS_SAVE_AREA,
+ &vcpu->arch.sie_block->gcr, 128);
+
+ /* Extended interruption information */
+ rc |= put_guest_lc(vcpu, mchk->ext_damage_code,
+ (u32 __user *) __LC_EXT_DAMAGE_CODE);
+ rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
+ (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
+ rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, &mchk->fixed_logout,
+ sizeof(mchk->fixed_logout));
+ return rc ? -EFAULT : 0;
+}
+
static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_mchk_info mchk = {};
- unsigned long adtl_status_addr;
int deliver = 0;
int rc = 0;
@@ -446,29 +518,9 @@ static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
KVM_S390_MCHK,
mchk.cr14, mchk.mcic);
-
- rc = kvm_s390_vcpu_store_status(vcpu,
- KVM_S390_STORE_STATUS_PREFIXED);
- rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR,
- &adtl_status_addr,
- sizeof(unsigned long));
- rc |= kvm_s390_vcpu_store_adtl_status(vcpu,
- adtl_status_addr);
- rc |= put_guest_lc(vcpu, mchk.mcic,
- (u64 __user *) __LC_MCCK_CODE);
- rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
- (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
- rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
- &mchk.fixed_logout,
- sizeof(mchk.fixed_logout));
- rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
- &vcpu->arch.sie_block->gpsw,
- sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
- &vcpu->arch.sie_block->gpsw,
- sizeof(psw_t));
+ rc = __write_machine_check(vcpu, &mchk);
}
- return rc ? -EFAULT : 0;
+ return rc;
}
static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f142215ed30d..9c7a1ecfe6bd 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -132,10 +132,7 @@ module_param(nested, int, S_IRUGO);
MODULE_PARM_DESC(nested, "Nested virtualization support");
/* upper facilities limit for kvm */
-unsigned long kvm_s390_fac_list_mask[16] = {
- 0xffe6000000000000UL,
- 0x005e000000000000UL,
-};
+unsigned long kvm_s390_fac_list_mask[16] = { FACILITIES_KVM };
unsigned long kvm_s390_fac_list_mask_size(void)
{
@@ -248,22 +245,33 @@ static void kvm_s390_cpu_feat_init(void)
PTFF_QAF);
if (test_facility(17)) { /* MSA */
- __cpacf_query(CPACF_KMAC, kvm_s390_available_subfunc.kmac);
- __cpacf_query(CPACF_KMC, kvm_s390_available_subfunc.kmc);
- __cpacf_query(CPACF_KM, kvm_s390_available_subfunc.km);
- __cpacf_query(CPACF_KIMD, kvm_s390_available_subfunc.kimd);
- __cpacf_query(CPACF_KLMD, kvm_s390_available_subfunc.klmd);
+ __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
+ kvm_s390_available_subfunc.kmac);
+ __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
+ kvm_s390_available_subfunc.kmc);
+ __cpacf_query(CPACF_KM, (cpacf_mask_t *)
+ kvm_s390_available_subfunc.km);
+ __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
+ kvm_s390_available_subfunc.kimd);
+ __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
+ kvm_s390_available_subfunc.klmd);
}
if (test_facility(76)) /* MSA3 */
- __cpacf_query(CPACF_PCKMO, kvm_s390_available_subfunc.pckmo);
+ __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
+ kvm_s390_available_subfunc.pckmo);
if (test_facility(77)) { /* MSA4 */
- __cpacf_query(CPACF_KMCTR, kvm_s390_available_subfunc.kmctr);
- __cpacf_query(CPACF_KMF, kvm_s390_available_subfunc.kmf);
- __cpacf_query(CPACF_KMO, kvm_s390_available_subfunc.kmo);
- __cpacf_query(CPACF_PCC, kvm_s390_available_subfunc.pcc);
+ __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
+ kvm_s390_available_subfunc.kmctr);
+ __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
+ kvm_s390_available_subfunc.kmf);
+ __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
+ kvm_s390_available_subfunc.kmo);
+ __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
+ kvm_s390_available_subfunc.pcc);
}
if (test_facility(57)) /* MSA5 */
- __cpacf_query(CPACF_PPNO, kvm_s390_available_subfunc.ppno);
+ __cpacf_query(CPACF_PPNO, (cpacf_mask_t *)
+ kvm_s390_available_subfunc.ppno);
if (MACHINE_HAS_ESOP)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
@@ -376,7 +384,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_NR_VCPUS:
case KVM_CAP_MAX_VCPUS:
r = KVM_S390_BSCA_CPU_SLOTS;
- if (sclp.has_esca && sclp.has_64bscao)
+ if (!kvm_s390_use_sca_entries())
+ r = KVM_MAX_VCPUS;
+ else if (sclp.has_esca && sclp.has_64bscao)
r = KVM_S390_ESCA_CPU_SLOTS;
break;
case KVM_CAP_NR_MEMSLOTS:
@@ -1490,6 +1500,16 @@ out_err:
return rc;
}
+bool kvm_arch_has_vcpu_debugfs(void)
+{
+ return false;
+}
+
+int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
VCPU_EVENT(vcpu, 3, "%s", "free cpu");
@@ -1553,6 +1573,8 @@ static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
static void sca_del_vcpu(struct kvm_vcpu *vcpu)
{
+ if (!kvm_s390_use_sca_entries())
+ return;
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
@@ -1570,6 +1592,13 @@ static void sca_del_vcpu(struct kvm_vcpu *vcpu)
static void sca_add_vcpu(struct kvm_vcpu *vcpu)
{
+ if (!kvm_s390_use_sca_entries()) {
+ struct bsca_block *sca = vcpu->kvm->arch.sca;
+
+ /* we still need the basic sca for the ipte control */
+ vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
+ vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
+ }
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
@@ -1650,6 +1679,11 @@ static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
{
int rc;
+ if (!kvm_s390_use_sca_entries()) {
+ if (id < KVM_MAX_VCPUS)
+ return true;
+ return false;
+ }
if (id < KVM_S390_BSCA_CPU_SLOTS)
return true;
if (!sclp.has_esca || !sclp.has_64bscao)
@@ -1938,8 +1972,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->eca |= 1;
if (sclp.has_sigpif)
vcpu->arch.sie_block->eca |= 0x10000000U;
- if (test_kvm_facility(vcpu->kvm, 64))
- vcpu->arch.sie_block->ecb3 |= 0x01;
if (test_kvm_facility(vcpu->kvm, 129)) {
vcpu->arch.sie_block->eca |= 0x00020000;
vcpu->arch.sie_block->ecd |= 0x20000000;
@@ -2231,9 +2263,10 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
return -EINVAL;
current->thread.fpu.fpc = fpu->fpc;
if (MACHINE_HAS_VX)
- convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
+ convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
+ (freg_t *) fpu->fprs);
else
- memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
+ memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
return 0;
}
@@ -2242,9 +2275,10 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
/* make sure we have the latest values */
save_fpu_regs();
if (MACHINE_HAS_VX)
- convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
+ convert_vx_to_fp((freg_t *) fpu->fprs,
+ (__vector128 *) vcpu->run->s.regs.vrs);
else
- memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
+ memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
fpu->fpc = current->thread.fpu.fpc;
return 0;
}
@@ -2694,6 +2728,19 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
kvm_clear_async_pf_completion_queue(vcpu);
}
+ /*
+ * If userspace sets the riccb (e.g. after migration) to a valid state,
+ * we should enable RI here instead of doing the lazy enablement.
+ */
+ if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
+ test_kvm_facility(vcpu->kvm, 64)) {
+ struct runtime_instr_cb *riccb =
+ (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
+
+ if (riccb->valid)
+ vcpu->arch.sie_block->ecb3 |= 0x01;
+ }
+
kvm_run->kvm_dirty_regs = 0;
}
@@ -2837,38 +2884,6 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
return kvm_s390_store_status_unloaded(vcpu, addr);
}
-/*
- * store additional status at address
- */
-int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
- unsigned long gpa)
-{
- /* Only bits 0-53 are used for address formation */
- if (!(gpa & ~0x3ff))
- return 0;
-
- return write_guest_abs(vcpu, gpa & ~0x3ff,
- (void *)&vcpu->run->s.regs.vrs, 512);
-}
-
-int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
-{
- if (!test_kvm_facility(vcpu->kvm, 129))
- return 0;
-
- /*
- * The guest VXRS are in the host VXRs due to the lazy
- * copying in vcpu load/put. We can simply call save_fpu_regs()
- * to save the current register state because we are in the
- * middle of a load/put cycle.
- *
- * Let's update our copies before we save it into the save area.
- */
- save_fpu_regs();
-
- return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
-}
-
static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
{
kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index b8432862a817..3a4e97f1a9e6 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -20,6 +20,7 @@
#include <linux/kvm_host.h>
#include <asm/facility.h>
#include <asm/processor.h>
+#include <asm/sclp.h>
typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
@@ -245,6 +246,7 @@ static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu)
/* implemented in priv.c */
int is_valid_psw(psw_t *psw);
+int kvm_s390_handle_aa(struct kvm_vcpu *vcpu);
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
@@ -273,10 +275,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu);
void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod);
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
-int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
- unsigned long addr);
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
-int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
@@ -389,4 +388,13 @@ static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm)
return &sca->ipte_control;
}
+static inline int kvm_s390_use_sca_entries(void)
+{
+ /*
+ * Without SIGP interpretation, only SRS interpretation (if available)
+ * might use the entries. By not setting the entries and keeping them
+ * invalid, hardware will not access them but intercept.
+ */
+ return sclp.has_sigpif;
+}
#endif
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 46160388e996..e18435355c16 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -32,6 +32,24 @@
#include "kvm-s390.h"
#include "trace.h"
+static int handle_ri(struct kvm_vcpu *vcpu)
+{
+ if (test_kvm_facility(vcpu->kvm, 64)) {
+ vcpu->arch.sie_block->ecb3 |= 0x01;
+ kvm_s390_retry_instr(vcpu);
+ return 0;
+ } else
+ return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
+}
+
+int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
+{
+ if ((vcpu->arch.sie_block->ipa & 0xf) <= 4)
+ return handle_ri(vcpu);
+ else
+ return -EOPNOTSUPP;
+}
+
/* Handle SCK (SET CLOCK) interception */
static int handle_set_clock(struct kvm_vcpu *vcpu)
{
@@ -1093,6 +1111,9 @@ static int handle_stctg(struct kvm_vcpu *vcpu)
static const intercept_handler_t eb_handlers[256] = {
[0x2f] = handle_lctlg,
[0x25] = handle_stctg,
+ [0x60] = handle_ri,
+ [0x61] = handle_ri,
+ [0x62] = handle_ri,
};
int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index c106488b4137..d8673e243f13 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -584,7 +584,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
/* Validity 0x0044 will be checked by SIE */
if (rc)
goto unpin;
- scb_s->gvrd = hpa;
+ scb_s->riccbd = hpa;
}
return 0;
unpin:
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index a58bca62a93b..661d9fe63c43 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -24,7 +24,7 @@
#include <linux/kdebug.h>
#include <linux/init.h>
#include <linux/console.h>
-#include <linux/module.h>
+#include <linux/extable.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
@@ -740,28 +740,21 @@ out:
put_task_struct(tsk);
}
-static int pfault_cpu_notify(struct notifier_block *self, unsigned long action,
- void *hcpu)
+static int pfault_cpu_dead(unsigned int cpu)
{
struct thread_struct *thread, *next;
struct task_struct *tsk;
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DEAD:
- spin_lock_irq(&pfault_lock);
- list_for_each_entry_safe(thread, next, &pfault_list, list) {
- thread->pfault_wait = 0;
- list_del(&thread->list);
- tsk = container_of(thread, struct task_struct, thread);
- wake_up_process(tsk);
- put_task_struct(tsk);
- }
- spin_unlock_irq(&pfault_lock);
- break;
- default:
- break;
+ spin_lock_irq(&pfault_lock);
+ list_for_each_entry_safe(thread, next, &pfault_list, list) {
+ thread->pfault_wait = 0;
+ list_del(&thread->list);
+ tsk = container_of(thread, struct task_struct, thread);
+ wake_up_process(tsk);
+ put_task_struct(tsk);
}
- return NOTIFY_OK;
+ spin_unlock_irq(&pfault_lock);
+ return 0;
}
static int __init pfault_irq_init(void)
@@ -775,7 +768,8 @@ static int __init pfault_irq_init(void)
if (rc)
goto out_pfault;
irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
- hotcpu_notifier(pfault_cpu_notify, 0);
+ cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
+ NULL, pfault_cpu_dead);
return 0;
out_pfault:
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 2ce6bb3bab32..3ba622702ce4 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -94,6 +94,7 @@ out:
struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
{
struct gmap *gmap;
+ unsigned long gmap_asce;
gmap = gmap_alloc(limit);
if (!gmap)
@@ -101,6 +102,11 @@ struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
gmap->mm = mm;
spin_lock(&mm->context.gmap_lock);
list_add_rcu(&gmap->list, &mm->context.gmap_list);
+ if (list_is_singular(&mm->context.gmap_list))
+ gmap_asce = gmap->asce;
+ else
+ gmap_asce = -1UL;
+ WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
spin_unlock(&mm->context.gmap_lock);
return gmap;
}
@@ -230,6 +236,7 @@ EXPORT_SYMBOL_GPL(gmap_put);
void gmap_remove(struct gmap *gmap)
{
struct gmap *sg, *next;
+ unsigned long gmap_asce;
/* Remove all shadow gmaps linked to this gmap */
if (!list_empty(&gmap->children)) {
@@ -243,6 +250,14 @@ void gmap_remove(struct gmap *gmap)
/* Remove gmap from the pre-mm list */
spin_lock(&gmap->mm->context.gmap_lock);
list_del_rcu(&gmap->list);
+ if (list_empty(&gmap->mm->context.gmap_list))
+ gmap_asce = 0;
+ else if (list_is_singular(&gmap->mm->context.gmap_list))
+ gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
+ struct gmap, list)->asce;
+ else
+ gmap_asce = -1UL;
+ WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
spin_unlock(&gmap->mm->context.gmap_lock);
synchronize_rcu();
/* Put reference */
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index af7cf28cf97e..44f150312a16 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -309,11 +309,11 @@ static void ipte_range(pte_t *pte, unsigned long address, int nr)
int i;
if (test_facility(13)) {
- __ptep_ipte_range(address, nr - 1, pte);
+ __ptep_ipte_range(address, nr - 1, pte, IPTE_GLOBAL);
return;
}
for (i = 0; i < nr; i++) {
- __ptep_ipte(address, pte);
+ __ptep_ipte(address, pte, IPTE_GLOBAL);
address += PAGE_SIZE;
pte++;
}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 5f092015aaa7..7a1897c51c54 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -35,9 +35,9 @@ static inline pte_t ptep_flush_direct(struct mm_struct *mm,
atomic_inc(&mm->context.flush_count);
if (MACHINE_HAS_TLB_LC &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
- __ptep_ipte_local(addr, ptep);
+ __ptep_ipte(addr, ptep, IPTE_LOCAL);
else
- __ptep_ipte(addr, ptep);
+ __ptep_ipte(addr, ptep, IPTE_GLOBAL);
atomic_dec(&mm->context.flush_count);
return old;
}
@@ -56,7 +56,7 @@ static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
pte_val(*ptep) |= _PAGE_INVALID;
mm->context.flush_mm = 1;
} else
- __ptep_ipte(addr, ptep);
+ __ptep_ipte(addr, ptep, IPTE_GLOBAL);
atomic_dec(&mm->context.flush_count);
return old;
}
@@ -301,9 +301,9 @@ static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
atomic_inc(&mm->context.flush_count);
if (MACHINE_HAS_TLB_LC &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
- __pmdp_idte_local(addr, pmdp);
+ __pmdp_idte(addr, pmdp, IDTE_LOCAL);
else
- __pmdp_idte(addr, pmdp);
+ __pmdp_idte(addr, pmdp, IDTE_GLOBAL);
atomic_dec(&mm->context.flush_count);
return old;
}
@@ -322,7 +322,7 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
mm->context.flush_mm = 1;
} else if (MACHINE_HAS_IDTE)
- __pmdp_idte(addr, pmdp);
+ __pmdp_idte(addr, pmdp, IDTE_GLOBAL);
else
__pmdp_csp(pmdp);
atomic_dec(&mm->context.flush_count);
@@ -374,9 +374,9 @@ static inline pud_t pudp_flush_direct(struct mm_struct *mm,
atomic_inc(&mm->context.flush_count);
if (MACHINE_HAS_TLB_LC &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
- __pudp_idte_local(addr, pudp);
+ __pudp_idte(addr, pudp, IDTE_LOCAL);
else
- __pudp_idte(addr, pudp);
+ __pudp_idte(addr, pudp, IDTE_GLOBAL);
atomic_dec(&mm->context.flush_count);
return old;
}
@@ -620,7 +620,7 @@ bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
pte = *ptep;
if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
pgste = pgste_pte_notify(mm, addr, ptep, pgste);
- __ptep_ipte(addr, ptep);
+ __ptep_ipte(addr, ptep, IPTE_GLOBAL);
if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
pte_val(pte) |= _PAGE_PROTECT;
else
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 871af75c69c2..15ffc19c8c0c 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -854,6 +854,15 @@ void zpci_stop_device(struct zpci_dev *zdev)
}
EXPORT_SYMBOL_GPL(zpci_stop_device);
+int zpci_report_error(struct pci_dev *pdev,
+ struct zpci_report_error_header *report)
+{
+ struct zpci_dev *zdev = to_zpci(pdev);
+
+ return sclp_pci_report(report, zdev->fh, zdev->fid);
+}
+EXPORT_SYMBOL(zpci_report_error);
+
static inline int barsize(u8 size)
{
return (size) ? (1 << size) >> 10 : 0;
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 7297fce9bf80..7350c8bc13a2 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -129,12 +129,11 @@ void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags)
entry_clr_protected(entry);
}
-static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
- dma_addr_t dma_addr, size_t size, int flags)
+static int __dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
+ dma_addr_t dma_addr, size_t size, int flags)
{
unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
u8 *page_addr = (u8 *) (pa & PAGE_MASK);
- dma_addr_t start_dma_addr = dma_addr;
unsigned long irq_flags;
unsigned long *entry;
int i, rc = 0;
@@ -145,7 +144,7 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
if (!zdev->dma_table) {
rc = -EINVAL;
- goto no_refresh;
+ goto out_unlock;
}
for (i = 0; i < nr_pages; i++) {
@@ -159,20 +158,6 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
dma_addr += PAGE_SIZE;
}
- /*
- * With zdev->tlb_refresh == 0, rpcit is not required to establish new
- * translations when previously invalid translation-table entries are
- * validated. With lazy unmap, it also is skipped for previously valid
- * entries, but a global rpcit is then required before any address can
- * be re-used, i.e. after each iommu bitmap wrap-around.
- */
- if (!zdev->tlb_refresh &&
- (!s390_iommu_strict ||
- ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)))
- goto no_refresh;
-
- rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
- nr_pages * PAGE_SIZE);
undo_cpu_trans:
if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
flags = ZPCI_PTE_INVALID;
@@ -185,12 +170,46 @@ undo_cpu_trans:
dma_update_cpu_trans(entry, page_addr, flags);
}
}
-
-no_refresh:
+out_unlock:
spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
return rc;
}
+static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
+ size_t size, int flags)
+{
+ /*
+ * With zdev->tlb_refresh == 0, rpcit is not required to establish new
+ * translations when previously invalid translation-table entries are
+ * validated. With lazy unmap, it also is skipped for previously valid
+ * entries, but a global rpcit is then required before any address can
+ * be re-used, i.e. after each iommu bitmap wrap-around.
+ */
+ if (!zdev->tlb_refresh &&
+ (!s390_iommu_strict ||
+ ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)))
+ return 0;
+
+ return zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
+ PAGE_ALIGN(size));
+}
+
+static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
+ dma_addr_t dma_addr, size_t size, int flags)
+{
+ int rc;
+
+ rc = __dma_update_trans(zdev, pa, dma_addr, size, flags);
+ if (rc)
+ return rc;
+
+ rc = __dma_purge_tlb(zdev, dma_addr, size, flags);
+ if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
+ __dma_update_trans(zdev, pa, dma_addr, size, ZPCI_PTE_INVALID);
+
+ return rc;
+}
+
void dma_free_seg_table(unsigned long entry)
{
unsigned long *sto = get_rt_sto(entry);
@@ -230,45 +249,54 @@ static unsigned long __dma_alloc_iommu(struct device *dev,
boundary_size, 0);
}
-static unsigned long dma_alloc_iommu(struct device *dev, int size)
+static dma_addr_t dma_alloc_address(struct device *dev, int size)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
unsigned long offset, flags;
- int wrap = 0;
spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
if (offset == -1) {
+ if (!zdev->tlb_refresh && !s390_iommu_strict) {
+ /* global flush before DMA addresses are reused */
+ if (zpci_refresh_global(zdev))
+ goto out_error;
+
+ bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
+ zdev->lazy_bitmap, zdev->iommu_pages);
+ bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
+ }
/* wrap-around */
offset = __dma_alloc_iommu(dev, 0, size);
- wrap = 1;
+ if (offset == -1)
+ goto out_error;
}
+ zdev->next_bit = offset + size;
+ spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
- if (offset != -1) {
- zdev->next_bit = offset + size;
- if (!zdev->tlb_refresh && !s390_iommu_strict && wrap)
- /* global flush after wrap-around with lazy unmap */
- zpci_refresh_global(zdev);
- }
+ return zdev->start_dma + offset * PAGE_SIZE;
+
+out_error:
spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
- return offset;
+ return DMA_ERROR_CODE;
}
-static void dma_free_iommu(struct device *dev, unsigned long offset, int size)
+static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
- unsigned long flags;
+ unsigned long flags, offset;
+
+ offset = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
if (!zdev->iommu_bitmap)
goto out;
- bitmap_clear(zdev->iommu_bitmap, offset, size);
- /*
- * Lazy flush for unmap: need to move next_bit to avoid address re-use
- * until wrap-around.
- */
- if (!s390_iommu_strict && offset >= zdev->next_bit)
- zdev->next_bit = offset + size;
+
+ if (zdev->tlb_refresh || s390_iommu_strict)
+ bitmap_clear(zdev->iommu_bitmap, offset, size);
+ else
+ bitmap_set(zdev->lazy_bitmap, offset, size);
+
out:
spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
}
@@ -289,16 +317,16 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
unsigned long attrs)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
- unsigned long nr_pages, iommu_page_index;
unsigned long pa = page_to_phys(page) + offset;
int flags = ZPCI_PTE_VALID;
+ unsigned long nr_pages;
dma_addr_t dma_addr;
int ret;
/* This rounds up number of pages based on size and offset */
nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
- iommu_page_index = dma_alloc_iommu(dev, nr_pages);
- if (iommu_page_index == -1) {
+ dma_addr = dma_alloc_address(dev, nr_pages);
+ if (dma_addr == DMA_ERROR_CODE) {
ret = -ENOSPC;
goto out_err;
}
@@ -306,12 +334,6 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
/* Use rounded up size */
size = nr_pages * PAGE_SIZE;
- dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
- if (dma_addr + size > zdev->end_dma) {
- ret = -ERANGE;
- goto out_free;
- }
-
if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
flags |= ZPCI_TABLE_PROTECTED;
@@ -323,7 +345,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
return dma_addr + (offset & ~PAGE_MASK);
out_free:
- dma_free_iommu(dev, iommu_page_index, nr_pages);
+ dma_free_address(dev, dma_addr, nr_pages);
out_err:
zpci_err("map error:\n");
zpci_err_dma(ret, pa);
@@ -335,7 +357,6 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
unsigned long attrs)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
- unsigned long iommu_page_index;
int npages, ret;
npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
@@ -349,8 +370,7 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
}
atomic64_add(npages, &zdev->unmapped_pages);
- iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
- dma_free_iommu(dev, iommu_page_index, npages);
+ dma_free_address(dev, dma_addr, npages);
}
static void *s390_dma_alloc(struct device *dev, size_t size,
@@ -394,37 +414,98 @@ static void s390_dma_free(struct device *dev, size_t size,
free_pages((unsigned long) pa, get_order(size));
}
-static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nr_elements, enum dma_data_direction dir,
- unsigned long attrs)
+/* Map a segment into a contiguous dma address area */
+static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ size_t size, dma_addr_t *handle,
+ enum dma_data_direction dir)
{
- int mapped_elements = 0;
+ struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
+ dma_addr_t dma_addr_base, dma_addr;
+ int flags = ZPCI_PTE_VALID;
struct scatterlist *s;
- int i;
+ unsigned long pa;
+ int ret;
- for_each_sg(sg, s, nr_elements, i) {
- struct page *page = sg_page(s);
- s->dma_address = s390_dma_map_pages(dev, page, s->offset,
- s->length, dir, 0);
- if (!dma_mapping_error(dev, s->dma_address)) {
- s->dma_length = s->length;
- mapped_elements++;
- } else
+ size = PAGE_ALIGN(size);
+ dma_addr_base = dma_alloc_address(dev, size >> PAGE_SHIFT);
+ if (dma_addr_base == DMA_ERROR_CODE)
+ return -ENOMEM;
+
+ dma_addr = dma_addr_base;
+ if (dir == DMA_NONE || dir == DMA_TO_DEVICE)
+ flags |= ZPCI_TABLE_PROTECTED;
+
+ for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
+ pa = page_to_phys(sg_page(s)) + s->offset;
+ ret = __dma_update_trans(zdev, pa, dma_addr, s->length, flags);
+ if (ret)
goto unmap;
+
+ dma_addr += s->length;
}
-out:
- return mapped_elements;
+ ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags);
+ if (ret)
+ goto unmap;
+
+ *handle = dma_addr_base;
+ atomic64_add(size >> PAGE_SHIFT, &zdev->mapped_pages);
+
+ return ret;
unmap:
- for_each_sg(sg, s, mapped_elements, i) {
- if (s->dma_address)
- s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
- dir, 0);
- s->dma_address = 0;
+ dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
+ ZPCI_PTE_INVALID);
+ dma_free_address(dev, dma_addr_base, size >> PAGE_SHIFT);
+ zpci_err("map error:\n");
+ zpci_err_dma(ret, pa);
+ return ret;
+}
+
+static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nr_elements, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct scatterlist *s = sg, *start = sg, *dma = sg;
+ unsigned int max = dma_get_max_seg_size(dev);
+ unsigned int size = s->offset + s->length;
+ unsigned int offset = s->offset;
+ int count = 0, i;
+
+ for (i = 1; i < nr_elements; i++) {
+ s = sg_next(s);
+
+ s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
+
+ if (s->offset || (size & ~PAGE_MASK) ||
+ size + s->length > max) {
+ if (__s390_dma_map_sg(dev, start, size,
+ &dma->dma_address, dir))
+ goto unmap;
+
+ dma->dma_address += offset;
+ dma->dma_length = size - offset;
+
+ size = offset = s->offset;
+ start = s;
+ dma = sg_next(dma);
+ count++;
+ }
+ size += s->length;
}
- mapped_elements = 0;
- goto out;
+ if (__s390_dma_map_sg(dev, start, size, &dma->dma_address, dir))
+ goto unmap;
+
+ dma->dma_address += offset;
+ dma->dma_length = size - offset;
+
+ return count + 1;
+unmap:
+ for_each_sg(sg, s, count, i)
+ s390_dma_unmap_pages(dev, sg_dma_address(s), sg_dma_len(s),
+ dir, attrs);
+
+ return 0;
}
static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
@@ -435,8 +516,9 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int i;
for_each_sg(sg, s, nr_elements, i) {
- s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir,
- 0);
+ if (s->dma_length)
+ s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
+ dir, attrs);
s->dma_address = 0;
s->dma_length = 0;
}
@@ -482,7 +564,14 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
rc = -ENOMEM;
goto free_dma_table;
}
+ if (!zdev->tlb_refresh && !s390_iommu_strict) {
+ zdev->lazy_bitmap = vzalloc(zdev->iommu_pages / 8);
+ if (!zdev->lazy_bitmap) {
+ rc = -ENOMEM;
+ goto free_bitmap;
+ }
+ }
rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
(u64) zdev->dma_table);
if (rc)
@@ -492,6 +581,8 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
free_bitmap:
vfree(zdev->iommu_bitmap);
zdev->iommu_bitmap = NULL;
+ vfree(zdev->lazy_bitmap);
+ zdev->lazy_bitmap = NULL;
free_dma_table:
dma_free_cpu_table(zdev->dma_table);
zdev->dma_table = NULL;
@@ -513,6 +604,9 @@ void zpci_dma_exit_device(struct zpci_dev *zdev)
zdev->dma_table = NULL;
vfree(zdev->iommu_bitmap);
zdev->iommu_bitmap = NULL;
+ vfree(zdev->lazy_bitmap);
+ zdev->lazy_bitmap = NULL;
+
zdev->next_bit = 0;
}