summaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig28
-rw-r--r--arch/s390/boot/startup.c2
-rw-r--r--arch/s390/crypto/aes_s390.c609
-rw-r--r--arch/s390/crypto/des_s390.c419
-rw-r--r--arch/s390/crypto/paes_s390.c414
-rw-r--r--arch/s390/crypto/sha_common.c7
-rw-r--r--arch/s390/include/asm/alternative.h4
-rw-r--r--arch/s390/include/asm/bug.h4
-rw-r--r--arch/s390/include/asm/ctl_reg.h1
-rw-r--r--arch/s390/include/asm/kvm_host.h1
-rw-r--r--arch/s390/include/asm/page.h2
-rw-r--r--arch/s390/include/asm/pgalloc.h16
-rw-r--r--arch/s390/include/asm/pgtable.h97
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/qdio.h1
-rw-r--r--arch/s390/include/asm/spinlock.h2
-rw-r--r--arch/s390/include/asm/stacktrace.h2
-rw-r--r--arch/s390/include/asm/timex.h17
-rw-r--r--arch/s390/include/asm/unwind.h1
-rw-r--r--arch/s390/kernel/dis.c13
-rw-r--r--arch/s390/kernel/early.c38
-rw-r--r--arch/s390/kernel/head64.S18
-rw-r--r--arch/s390/kernel/idle.c29
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c21
-rw-r--r--arch/s390/kernel/perf_cpum_cf_diag.c10
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c104
-rw-r--r--arch/s390/kernel/perf_event.c8
-rw-r--r--arch/s390/kernel/process.c36
-rw-r--r--arch/s390/kernel/smp.c80
-rw-r--r--arch/s390/kernel/time.c9
-rw-r--r--arch/s390/kernel/unwind_bc.c32
-rw-r--r--arch/s390/kernel/vmlinux.lds.S12
-rw-r--r--arch/s390/kernel/vtime.c4
-rw-r--r--arch/s390/kvm/diag.c22
-rw-r--r--arch/s390/kvm/interrupt.c5
-rw-r--r--arch/s390/kvm/kvm-s390.c19
-rw-r--r--arch/s390/lib/spinlock.c4
-rw-r--r--arch/s390/mm/cmm.c12
-rw-r--r--arch/s390/mm/init.c1
-rw-r--r--arch/s390/mm/maccess.c12
-rw-r--r--arch/s390/net/bpf_jit_comp.c502
41 files changed, 1304 insertions, 1316 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 43a81d0ad507..f0df9e48e651 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -246,8 +246,8 @@ choice
config MARCH_Z900
bool "IBM zSeries model z800 and z900"
- depends on !CC_IS_CLANG
select HAVE_MARCH_Z900_FEATURES
+ depends on $(cc-option,-march=z900)
help
Select this to enable optimizations for model z800/z900 (2064 and
2066 series). This will enable some optimizations that are not
@@ -255,8 +255,8 @@ config MARCH_Z900
config MARCH_Z990
bool "IBM zSeries model z890 and z990"
- depends on !CC_IS_CLANG
select HAVE_MARCH_Z990_FEATURES
+ depends on $(cc-option,-march=z990)
help
Select this to enable optimizations for model z890/z990 (2084 and
2086 series). The kernel will be slightly faster but will not work
@@ -264,8 +264,8 @@ config MARCH_Z990
config MARCH_Z9_109
bool "IBM System z9"
- depends on !CC_IS_CLANG
select HAVE_MARCH_Z9_109_FEATURES
+ depends on $(cc-option,-march=z9-109)
help
Select this to enable optimizations for IBM System z9 (2094 and
2096 series). The kernel will be slightly faster but will not work
@@ -274,6 +274,7 @@ config MARCH_Z9_109
config MARCH_Z10
bool "IBM System z10"
select HAVE_MARCH_Z10_FEATURES
+ depends on $(cc-option,-march=z10)
help
Select this to enable optimizations for IBM System z10 (2097 and
2098 series). The kernel will be slightly faster but will not work
@@ -282,6 +283,7 @@ config MARCH_Z10
config MARCH_Z196
bool "IBM zEnterprise 114 and 196"
select HAVE_MARCH_Z196_FEATURES
+ depends on $(cc-option,-march=z196)
help
Select this to enable optimizations for IBM zEnterprise 114 and 196
(2818 and 2817 series). The kernel will be slightly faster but will
@@ -290,6 +292,7 @@ config MARCH_Z196
config MARCH_ZEC12
bool "IBM zBC12 and zEC12"
select HAVE_MARCH_ZEC12_FEATURES
+ depends on $(cc-option,-march=zEC12)
help
Select this to enable optimizations for IBM zBC12 and zEC12 (2828 and
2827 series). The kernel will be slightly faster but will not work on
@@ -298,6 +301,7 @@ config MARCH_ZEC12
config MARCH_Z13
bool "IBM z13s and z13"
select HAVE_MARCH_Z13_FEATURES
+ depends on $(cc-option,-march=z13)
help
Select this to enable optimizations for IBM z13s and z13 (2965 and
2964 series). The kernel will be slightly faster but will not work on
@@ -306,6 +310,7 @@ config MARCH_Z13
config MARCH_Z14
bool "IBM z14 ZR1 and z14"
select HAVE_MARCH_Z14_FEATURES
+ depends on $(cc-option,-march=z14)
help
Select this to enable optimizations for IBM z14 ZR1 and z14 (3907
and 3906 series). The kernel will be slightly faster but will not
@@ -314,6 +319,7 @@ config MARCH_Z14
config MARCH_Z15
bool "IBM z15"
select HAVE_MARCH_Z15_FEATURES
+ depends on $(cc-option,-march=z15)
help
Select this to enable optimizations for IBM z15 (8562
and 8561 series). The kernel will be slightly faster but will not
@@ -367,33 +373,39 @@ config TUNE_DEFAULT
config TUNE_Z900
bool "IBM zSeries model z800 and z900"
- depends on !CC_IS_CLANG
+ depends on $(cc-option,-mtune=z900)
config TUNE_Z990
bool "IBM zSeries model z890 and z990"
- depends on !CC_IS_CLANG
+ depends on $(cc-option,-mtune=z990)
config TUNE_Z9_109
bool "IBM System z9"
- depends on !CC_IS_CLANG
+ depends on $(cc-option,-mtune=z9-109)
config TUNE_Z10
bool "IBM System z10"
+ depends on $(cc-option,-mtune=z10)
config TUNE_Z196
bool "IBM zEnterprise 114 and 196"
+ depends on $(cc-option,-mtune=z196)
config TUNE_ZEC12
bool "IBM zBC12 and zEC12"
+ depends on $(cc-option,-mtune=zEC12)
config TUNE_Z13
- bool "IBM z13"
+ bool "IBM z13s and z13"
+ depends on $(cc-option,-mtune=z13)
config TUNE_Z14
- bool "IBM z14"
+ bool "IBM z14 ZR1 and z14"
+ depends on $(cc-option,-mtune=z14)
config TUNE_Z15
bool "IBM z15"
+ depends on $(cc-option,-mtune=z15)
endchoice
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index 5367950510f6..fbd341ea03b8 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -46,7 +46,7 @@ struct diag_ops __bootdata_preserved(diag_dma_ops) = {
.diag0c = _diag0c_dma,
.diag308_reset = _diag308_reset_dma
};
-static struct diag210 _diag210_tmp_dma __section(".dma.data");
+static struct diag210 _diag210_tmp_dma __section(.dma.data);
struct diag210 *__bootdata_preserved(__diag210_tmp_dma) = &_diag210_tmp_dma;
void _swsusp_reset_dma(void);
unsigned long __bootdata_preserved(__swsusp_reset_dma) = __pa(_swsusp_reset_dma);
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 9803e96d2924..ead0b2c9881d 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -44,7 +44,7 @@ struct s390_aes_ctx {
int key_len;
unsigned long fc;
union {
- struct crypto_sync_skcipher *blk;
+ struct crypto_skcipher *skcipher;
struct crypto_cipher *cip;
} fallback;
};
@@ -54,7 +54,7 @@ struct s390_xts_ctx {
u8 pcc_key[32];
int key_len;
unsigned long fc;
- struct crypto_sync_skcipher *fallback;
+ struct crypto_skcipher *fallback;
};
struct gcm_sg_walk {
@@ -178,66 +178,41 @@ static struct crypto_alg aes_alg = {
}
};
-static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
- unsigned int len)
+static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int len)
{
- struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
- unsigned int ret;
-
- crypto_sync_skcipher_clear_flags(sctx->fallback.blk,
- CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags &
- CRYPTO_TFM_REQ_MASK);
-
- ret = crypto_sync_skcipher_setkey(sctx->fallback.blk, key, len);
-
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |= crypto_sync_skcipher_get_flags(sctx->fallback.blk) &
- CRYPTO_TFM_RES_MASK;
-
- return ret;
-}
-
-static int fallback_blk_dec(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- unsigned int ret;
- struct crypto_blkcipher *tfm = desc->tfm;
- struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
-
- skcipher_request_set_sync_tfm(req, sctx->fallback.blk);
- skcipher_request_set_callback(req, desc->flags, NULL, NULL);
- skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
-
- ret = crypto_skcipher_decrypt(req);
+ struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
+ int ret;
- skcipher_request_zero(req);
+ crypto_skcipher_clear_flags(sctx->fallback.skcipher,
+ CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(sctx->fallback.skcipher,
+ crypto_skcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ ret = crypto_skcipher_setkey(sctx->fallback.skcipher, key, len);
+ crypto_skcipher_set_flags(tfm,
+ crypto_skcipher_get_flags(sctx->fallback.skcipher) &
+ CRYPTO_TFM_RES_MASK);
return ret;
}
-static int fallback_blk_enc(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int fallback_skcipher_crypt(struct s390_aes_ctx *sctx,
+ struct skcipher_request *req,
+ unsigned long modifier)
{
- unsigned int ret;
- struct crypto_blkcipher *tfm = desc->tfm;
- struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
-
- skcipher_request_set_sync_tfm(req, sctx->fallback.blk);
- skcipher_request_set_callback(req, desc->flags, NULL, NULL);
- skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
+ struct skcipher_request *subreq = skcipher_request_ctx(req);
- ret = crypto_skcipher_encrypt(req);
- return ret;
+ *subreq = *req;
+ skcipher_request_set_tfm(subreq, sctx->fallback.skcipher);
+ return (modifier & CPACF_DECRYPT) ?
+ crypto_skcipher_decrypt(subreq) :
+ crypto_skcipher_encrypt(subreq);
}
-static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int ecb_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
- struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
unsigned long fc;
/* Pick the correct function code based on the key length */
@@ -248,111 +223,92 @@ static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
/* Check if the function code is available */
sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
if (!sctx->fc)
- return setkey_fallback_blk(tfm, in_key, key_len);
+ return setkey_fallback_skcipher(tfm, in_key, key_len);
sctx->key_len = key_len;
memcpy(sctx->key, in_key, key_len);
return 0;
}
-static int ecb_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
- struct blkcipher_walk *walk)
+static int ecb_aes_crypt(struct skcipher_request *req, unsigned long modifier)
{
- struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
unsigned int nbytes, n;
int ret;
- ret = blkcipher_walk_virt(desc, walk);
- while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
+ if (unlikely(!sctx->fc))
+ return fallback_skcipher_crypt(sctx, req, modifier);
+
+ ret = skcipher_walk_virt(&walk, req, false);
+ while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
cpacf_km(sctx->fc | modifier, sctx->key,
- walk->dst.virt.addr, walk->src.virt.addr, n);
- ret = blkcipher_walk_done(desc, walk, nbytes - n);
+ walk.dst.virt.addr, walk.src.virt.addr, n);
+ ret = skcipher_walk_done(&walk, nbytes - n);
}
-
return ret;
}
-static int ecb_aes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_aes_encrypt(struct skcipher_request *req)
{
- struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
-
- if (unlikely(!sctx->fc))
- return fallback_blk_enc(desc, dst, src, nbytes);
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_aes_crypt(desc, 0, &walk);
+ return ecb_aes_crypt(req, 0);
}
-static int ecb_aes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_aes_decrypt(struct skcipher_request *req)
{
- struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
-
- if (unlikely(!sctx->fc))
- return fallback_blk_dec(desc, dst, src, nbytes);
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_aes_crypt(desc, CPACF_DECRYPT, &walk);
+ return ecb_aes_crypt(req, CPACF_DECRYPT);
}
-static int fallback_init_blk(struct crypto_tfm *tfm)
+static int fallback_init_skcipher(struct crypto_skcipher *tfm)
{
- const char *name = tfm->__crt_alg->cra_name;
- struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ const char *name = crypto_tfm_alg_name(&tfm->base);
+ struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
- sctx->fallback.blk = crypto_alloc_sync_skcipher(name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ sctx->fallback.skcipher = crypto_alloc_skcipher(name, 0,
+ CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
- if (IS_ERR(sctx->fallback.blk)) {
+ if (IS_ERR(sctx->fallback.skcipher)) {
pr_err("Allocating AES fallback algorithm %s failed\n",
name);
- return PTR_ERR(sctx->fallback.blk);
+ return PTR_ERR(sctx->fallback.skcipher);
}
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(sctx->fallback.skcipher));
return 0;
}
-static void fallback_exit_blk(struct crypto_tfm *tfm)
+static void fallback_exit_skcipher(struct crypto_skcipher *tfm)
{
- struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
- crypto_free_sync_skcipher(sctx->fallback.blk);
+ crypto_free_skcipher(sctx->fallback.skcipher);
}
-static struct crypto_alg ecb_aes_alg = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-s390",
- .cra_priority = 401, /* combo: aes + ecb + 1 */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s390_aes_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = fallback_init_blk,
- .cra_exit = fallback_exit_blk,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = ecb_aes_set_key,
- .encrypt = ecb_aes_encrypt,
- .decrypt = ecb_aes_decrypt,
- }
- }
+static struct skcipher_alg ecb_aes_alg = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ecb-aes-s390",
+ .base.cra_priority = 401, /* combo: aes + ecb + 1 */
+ .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s390_aes_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = fallback_init_skcipher,
+ .exit = fallback_exit_skcipher,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = ecb_aes_set_key,
+ .encrypt = ecb_aes_encrypt,
+ .decrypt = ecb_aes_decrypt,
};
-static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int cbc_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
- struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
unsigned long fc;
/* Pick the correct function code based on the key length */
@@ -363,17 +319,18 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
/* Check if the function code is available */
sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
if (!sctx->fc)
- return setkey_fallback_blk(tfm, in_key, key_len);
+ return setkey_fallback_skcipher(tfm, in_key, key_len);
sctx->key_len = key_len;
memcpy(sctx->key, in_key, key_len);
return 0;
}
-static int cbc_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
- struct blkcipher_walk *walk)
+static int cbc_aes_crypt(struct skcipher_request *req, unsigned long modifier)
{
- struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
unsigned int nbytes, n;
int ret;
struct {
@@ -381,134 +338,74 @@ static int cbc_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
u8 key[AES_MAX_KEY_SIZE];
} param;
- ret = blkcipher_walk_virt(desc, walk);
- memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
+ if (unlikely(!sctx->fc))
+ return fallback_skcipher_crypt(sctx, req, modifier);
+
+ ret = skcipher_walk_virt(&walk, req, false);
+ if (ret)
+ return ret;
+ memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
memcpy(param.key, sctx->key, sctx->key_len);
- while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
+ while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
cpacf_kmc(sctx->fc | modifier, &param,
- walk->dst.virt.addr, walk->src.virt.addr, n);
- ret = blkcipher_walk_done(desc, walk, nbytes - n);
+ walk.dst.virt.addr, walk.src.virt.addr, n);
+ memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
+ ret = skcipher_walk_done(&walk, nbytes - n);
}
- memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
return ret;
}
-static int cbc_aes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_aes_encrypt(struct skcipher_request *req)
{
- struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
-
- if (unlikely(!sctx->fc))
- return fallback_blk_enc(desc, dst, src, nbytes);
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_aes_crypt(desc, 0, &walk);
+ return cbc_aes_crypt(req, 0);
}
-static int cbc_aes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_aes_decrypt(struct skcipher_request *req)
{
- struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
-
- if (unlikely(!sctx->fc))
- return fallback_blk_dec(desc, dst, src, nbytes);
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_aes_crypt(desc, CPACF_DECRYPT, &walk);
+ return cbc_aes_crypt(req, CPACF_DECRYPT);
}
-static struct crypto_alg cbc_aes_alg = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-s390",
- .cra_priority = 402, /* ecb-aes-s390 + 1 */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s390_aes_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = fallback_init_blk,
- .cra_exit = fallback_exit_blk,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = cbc_aes_set_key,
- .encrypt = cbc_aes_encrypt,
- .decrypt = cbc_aes_decrypt,
- }
- }
+static struct skcipher_alg cbc_aes_alg = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "cbc-aes-s390",
+ .base.cra_priority = 402, /* ecb-aes-s390 + 1 */
+ .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s390_aes_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = fallback_init_skcipher,
+ .exit = fallback_exit_skcipher,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = cbc_aes_set_key,
+ .encrypt = cbc_aes_encrypt,
+ .decrypt = cbc_aes_decrypt,
};
-static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int len)
-{
- struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
- unsigned int ret;
-
- crypto_sync_skcipher_clear_flags(xts_ctx->fallback,
- CRYPTO_TFM_REQ_MASK);
- crypto_sync_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags &
- CRYPTO_TFM_REQ_MASK);
-
- ret = crypto_sync_skcipher_setkey(xts_ctx->fallback, key, len);
-
- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |= crypto_sync_skcipher_get_flags(xts_ctx->fallback) &
- CRYPTO_TFM_RES_MASK;
-
- return ret;
-}
-
-static int xts_fallback_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- struct crypto_blkcipher *tfm = desc->tfm;
- struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
- unsigned int ret;
-
- skcipher_request_set_sync_tfm(req, xts_ctx->fallback);
- skcipher_request_set_callback(req, desc->flags, NULL, NULL);
- skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
-
- ret = crypto_skcipher_decrypt(req);
-
- skcipher_request_zero(req);
- return ret;
-}
-
-static int xts_fallback_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int xts_fallback_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int len)
{
- struct crypto_blkcipher *tfm = desc->tfm;
- struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
- unsigned int ret;
-
- skcipher_request_set_sync_tfm(req, xts_ctx->fallback);
- skcipher_request_set_callback(req, desc->flags, NULL, NULL);
- skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
-
- ret = crypto_skcipher_encrypt(req);
+ struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
+ int ret;
- skcipher_request_zero(req);
+ crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(xts_ctx->fallback,
+ crypto_skcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ ret = crypto_skcipher_setkey(xts_ctx->fallback, key, len);
+ crypto_skcipher_set_flags(tfm,
+ crypto_skcipher_get_flags(xts_ctx->fallback) &
+ CRYPTO_TFM_RES_MASK);
return ret;
}
-static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
- struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
+ struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
unsigned long fc;
int err;
@@ -518,7 +415,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
/* In fips mode only 128 bit or 256 bit keys are valid */
if (fips_enabled && key_len != 32 && key_len != 64) {
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -539,10 +436,11 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return 0;
}
-static int xts_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
- struct blkcipher_walk *walk)
+static int xts_aes_crypt(struct skcipher_request *req, unsigned long modifier)
{
- struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
unsigned int offset, nbytes, n;
int ret;
struct {
@@ -557,113 +455,100 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
u8 init[16];
} xts_param;
- ret = blkcipher_walk_virt(desc, walk);
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) {
+ struct skcipher_request *subreq = skcipher_request_ctx(req);
+
+ *subreq = *req;
+ skcipher_request_set_tfm(subreq, xts_ctx->fallback);
+ return (modifier & CPACF_DECRYPT) ?
+ crypto_skcipher_decrypt(subreq) :
+ crypto_skcipher_encrypt(subreq);
+ }
+
+ ret = skcipher_walk_virt(&walk, req, false);
+ if (ret)
+ return ret;
offset = xts_ctx->key_len & 0x10;
memset(pcc_param.block, 0, sizeof(pcc_param.block));
memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
- memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
+ memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
memcpy(xts_param.init, pcc_param.xts, 16);
- while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
+ while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
- walk->dst.virt.addr, walk->src.virt.addr, n);
- ret = blkcipher_walk_done(desc, walk, nbytes - n);
+ walk.dst.virt.addr, walk.src.virt.addr, n);
+ ret = skcipher_walk_done(&walk, nbytes - n);
}
return ret;
}
-static int xts_aes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int xts_aes_encrypt(struct skcipher_request *req)
{
- struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
-
- if (!nbytes)
- return -EINVAL;
-
- if (unlikely(!xts_ctx->fc || (nbytes % XTS_BLOCK_SIZE) != 0))
- return xts_fallback_encrypt(desc, dst, src, nbytes);
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return xts_aes_crypt(desc, 0, &walk);
+ return xts_aes_crypt(req, 0);
}
-static int xts_aes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int xts_aes_decrypt(struct skcipher_request *req)
{
- struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
-
- if (!nbytes)
- return -EINVAL;
-
- if (unlikely(!xts_ctx->fc || (nbytes % XTS_BLOCK_SIZE) != 0))
- return xts_fallback_decrypt(desc, dst, src, nbytes);
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return xts_aes_crypt(desc, CPACF_DECRYPT, &walk);
+ return xts_aes_crypt(req, CPACF_DECRYPT);
}
-static int xts_fallback_init(struct crypto_tfm *tfm)
+static int xts_fallback_init(struct crypto_skcipher *tfm)
{
- const char *name = tfm->__crt_alg->cra_name;
- struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
+ const char *name = crypto_tfm_alg_name(&tfm->base);
+ struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
- xts_ctx->fallback = crypto_alloc_sync_skcipher(name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
+ xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
+ CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
if (IS_ERR(xts_ctx->fallback)) {
pr_err("Allocating XTS fallback algorithm %s failed\n",
name);
return PTR_ERR(xts_ctx->fallback);
}
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(xts_ctx->fallback));
return 0;
}
-static void xts_fallback_exit(struct crypto_tfm *tfm)
+static void xts_fallback_exit(struct crypto_skcipher *tfm)
{
- struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
+ struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
- crypto_free_sync_skcipher(xts_ctx->fallback);
+ crypto_free_skcipher(xts_ctx->fallback);
}
-static struct crypto_alg xts_aes_alg = {
- .cra_name = "xts(aes)",
- .cra_driver_name = "xts-aes-s390",
- .cra_priority = 402, /* ecb-aes-s390 + 1 */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s390_xts_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = xts_fallback_init,
- .cra_exit = xts_fallback_exit,
- .cra_u = {
- .blkcipher = {
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = xts_aes_set_key,
- .encrypt = xts_aes_encrypt,
- .decrypt = xts_aes_decrypt,
- }
- }
+static struct skcipher_alg xts_aes_alg = {
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "xts-aes-s390",
+ .base.cra_priority = 402, /* ecb-aes-s390 + 1 */
+ .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s390_xts_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = xts_fallback_init,
+ .exit = xts_fallback_exit,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = xts_aes_set_key,
+ .encrypt = xts_aes_encrypt,
+ .decrypt = xts_aes_decrypt,
};
-static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
- struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
unsigned long fc;
/* Pick the correct function code based on the key length */
@@ -674,7 +559,7 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
/* Check if the function code is available */
sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
if (!sctx->fc)
- return setkey_fallback_blk(tfm, in_key, key_len);
+ return setkey_fallback_skcipher(tfm, in_key, key_len);
sctx->key_len = key_len;
memcpy(sctx->key, in_key, key_len);
@@ -696,30 +581,34 @@ static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
return n;
}
-static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
- struct blkcipher_walk *walk)
+static int ctr_aes_crypt(struct skcipher_request *req)
{
- struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
u8 buf[AES_BLOCK_SIZE], *ctrptr;
+ struct skcipher_walk walk;
unsigned int n, nbytes;
int ret, locked;
+ if (unlikely(!sctx->fc))
+ return fallback_skcipher_crypt(sctx, req, 0);
+
locked = mutex_trylock(&ctrblk_lock);
- ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
- while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
+ ret = skcipher_walk_virt(&walk, req, false);
+ while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
n = AES_BLOCK_SIZE;
+
if (nbytes >= 2*AES_BLOCK_SIZE && locked)
- n = __ctrblk_init(ctrblk, walk->iv, nbytes);
- ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
- cpacf_kmctr(sctx->fc | modifier, sctx->key,
- walk->dst.virt.addr, walk->src.virt.addr,
- n, ctrptr);
+ n = __ctrblk_init(ctrblk, walk.iv, nbytes);
+ ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
+ cpacf_kmctr(sctx->fc, sctx->key, walk.dst.virt.addr,
+ walk.src.virt.addr, n, ctrptr);
if (ctrptr == ctrblk)
- memcpy(walk->iv, ctrptr + n - AES_BLOCK_SIZE,
+ memcpy(walk.iv, ctrptr + n - AES_BLOCK_SIZE,
AES_BLOCK_SIZE);
- crypto_inc(walk->iv, AES_BLOCK_SIZE);
- ret = blkcipher_walk_done(desc, walk, nbytes - n);
+ crypto_inc(walk.iv, AES_BLOCK_SIZE);
+ ret = skcipher_walk_done(&walk, nbytes - n);
}
if (locked)
mutex_unlock(&ctrblk_lock);
@@ -727,67 +616,33 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
* final block may be < AES_BLOCK_SIZE, copy only nbytes
*/
if (nbytes) {
- cpacf_kmctr(sctx->fc | modifier, sctx->key,
- buf, walk->src.virt.addr,
- AES_BLOCK_SIZE, walk->iv);
- memcpy(walk->dst.virt.addr, buf, nbytes);
- crypto_inc(walk->iv, AES_BLOCK_SIZE);
- ret = blkcipher_walk_done(desc, walk, 0);
+ cpacf_kmctr(sctx->fc, sctx->key, buf, walk.src.virt.addr,
+ AES_BLOCK_SIZE, walk.iv);
+ memcpy(walk.dst.virt.addr, buf, nbytes);
+ crypto_inc(walk.iv, AES_BLOCK_SIZE);
+ ret = skcipher_walk_done(&walk, 0);
}
return ret;
}
-static int ctr_aes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
-
- if (unlikely(!sctx->fc))
- return fallback_blk_enc(desc, dst, src, nbytes);
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ctr_aes_crypt(desc, 0, &walk);
-}
-
-static int ctr_aes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
-
- if (unlikely(!sctx->fc))
- return fallback_blk_dec(desc, dst, src, nbytes);
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ctr_aes_crypt(desc, CPACF_DECRYPT, &walk);
-}
-
-static struct crypto_alg ctr_aes_alg = {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-s390",
- .cra_priority = 402, /* ecb-aes-s390 + 1 */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct s390_aes_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = fallback_init_blk,
- .cra_exit = fallback_exit_blk,
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ctr_aes_set_key,
- .encrypt = ctr_aes_encrypt,
- .decrypt = ctr_aes_decrypt,
- }
- }
+static struct skcipher_alg ctr_aes_alg = {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ctr-aes-s390",
+ .base.cra_priority = 402, /* ecb-aes-s390 + 1 */
+ .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct s390_aes_ctx),
+ .base.cra_module = THIS_MODULE,
+ .init = fallback_init_skcipher,
+ .exit = fallback_exit_skcipher,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ctr_aes_set_key,
+ .encrypt = ctr_aes_crypt,
+ .decrypt = ctr_aes_crypt,
+ .chunksize = AES_BLOCK_SIZE,
};
static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
@@ -1116,24 +971,27 @@ static struct aead_alg gcm_aes_aead = {
},
};
-static struct crypto_alg *aes_s390_algs_ptr[5];
-static int aes_s390_algs_num;
+static struct crypto_alg *aes_s390_alg;
+static struct skcipher_alg *aes_s390_skcipher_algs[4];
+static int aes_s390_skciphers_num;
static struct aead_alg *aes_s390_aead_alg;
-static int aes_s390_register_alg(struct crypto_alg *alg)
+static int aes_s390_register_skcipher(struct skcipher_alg *alg)
{
int ret;
- ret = crypto_register_alg(alg);
+ ret = crypto_register_skcipher(alg);
if (!ret)
- aes_s390_algs_ptr[aes_s390_algs_num++] = alg;
+ aes_s390_skcipher_algs[aes_s390_skciphers_num++] = alg;
return ret;
}
static void aes_s390_fini(void)
{
- while (aes_s390_algs_num--)
- crypto_unregister_alg(aes_s390_algs_ptr[aes_s390_algs_num]);
+ if (aes_s390_alg)
+ crypto_unregister_alg(aes_s390_alg);
+ while (aes_s390_skciphers_num--)
+ crypto_unregister_skcipher(aes_s390_skcipher_algs[aes_s390_skciphers_num]);
if (ctrblk)
free_page((unsigned long) ctrblk);
@@ -1154,10 +1012,11 @@ static int __init aes_s390_init(void)
if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
- ret = aes_s390_register_alg(&aes_alg);
+ ret = crypto_register_alg(&aes_alg);
if (ret)
goto out_err;
- ret = aes_s390_register_alg(&ecb_aes_alg);
+ aes_s390_alg = &aes_alg;
+ ret = aes_s390_register_skcipher(&ecb_aes_alg);
if (ret)
goto out_err;
}
@@ -1165,14 +1024,14 @@ static int __init aes_s390_init(void)
if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
- ret = aes_s390_register_alg(&cbc_aes_alg);
+ ret = aes_s390_register_skcipher(&cbc_aes_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
- ret = aes_s390_register_alg(&xts_aes_alg);
+ ret = aes_s390_register_skcipher(&xts_aes_alg);
if (ret)
goto out_err;
}
@@ -1185,7 +1044,7 @@ static int __init aes_s390_init(void)
ret = -ENOMEM;
goto out_err;
}
- ret = aes_s390_register_alg(&ctr_aes_alg);
+ ret = aes_s390_register_skcipher(&ctr_aes_alg);
if (ret)
goto out_err;
}
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 439b100c6f2e..bfbafd35bcbd 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -17,6 +17,7 @@
#include <linux/mutex.h>
#include <crypto/algapi.h>
#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
#include <asm/cpacf.h>
#define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
@@ -45,6 +46,12 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
return 0;
}
+static int des_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ return des_setkey(crypto_skcipher_tfm(tfm), key, key_len);
+}
+
static void s390_des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -79,28 +86,30 @@ static struct crypto_alg des_alg = {
}
};
-static int ecb_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
- struct blkcipher_walk *walk)
+static int ecb_desall_crypt(struct skcipher_request *req, unsigned long fc)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_des_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
unsigned int nbytes, n;
int ret;
- ret = blkcipher_walk_virt(desc, walk);
- while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
+ ret = skcipher_walk_virt(&walk, req, false);
+ while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(DES_BLOCK_SIZE - 1);
- cpacf_km(fc, ctx->key, walk->dst.virt.addr,
- walk->src.virt.addr, n);
- ret = blkcipher_walk_done(desc, walk, nbytes - n);
+ cpacf_km(fc, ctx->key, walk.dst.virt.addr,
+ walk.src.virt.addr, n);
+ ret = skcipher_walk_done(&walk, nbytes - n);
}
return ret;
}
-static int cbc_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
- struct blkcipher_walk *walk)
+static int cbc_desall_crypt(struct skcipher_request *req, unsigned long fc)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_des_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
unsigned int nbytes, n;
int ret;
struct {
@@ -108,99 +117,69 @@ static int cbc_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
u8 key[DES3_KEY_SIZE];
} param;
- ret = blkcipher_walk_virt(desc, walk);
- memcpy(param.iv, walk->iv, DES_BLOCK_SIZE);
+ ret = skcipher_walk_virt(&walk, req, false);
+ if (ret)
+ return ret;
+ memcpy(param.iv, walk.iv, DES_BLOCK_SIZE);
memcpy(param.key, ctx->key, DES3_KEY_SIZE);
- while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
+ while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(DES_BLOCK_SIZE - 1);
- cpacf_kmc(fc, &param, walk->dst.virt.addr,
- walk->src.virt.addr, n);
- ret = blkcipher_walk_done(desc, walk, nbytes - n);
+ cpacf_kmc(fc, &param, walk.dst.virt.addr,
+ walk.src.virt.addr, n);
+ memcpy(walk.iv, param.iv, DES_BLOCK_SIZE);
+ ret = skcipher_walk_done(&walk, nbytes - n);
}
- memcpy(walk->iv, param.iv, DES_BLOCK_SIZE);
return ret;
}
-static int ecb_des_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_des_encrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_desall_crypt(desc, CPACF_KM_DEA, &walk);
+ return ecb_desall_crypt(req, CPACF_KM_DEA);
}
-static int ecb_des_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_des_decrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_desall_crypt(desc, CPACF_KM_DEA | CPACF_DECRYPT, &walk);
+ return ecb_desall_crypt(req, CPACF_KM_DEA | CPACF_DECRYPT);
}
-static struct crypto_alg ecb_des_alg = {
- .cra_name = "ecb(des)",
- .cra_driver_name = "ecb-des-s390",
- .cra_priority = 400, /* combo: des + ecb */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s390_des_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = des_setkey,
- .encrypt = ecb_des_encrypt,
- .decrypt = ecb_des_decrypt,
- }
- }
+static struct skcipher_alg ecb_des_alg = {
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "ecb-des-s390",
+ .base.cra_priority = 400, /* combo: des + ecb */
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s390_des_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = des_setkey_skcipher,
+ .encrypt = ecb_des_encrypt,
+ .decrypt = ecb_des_decrypt,
};
-static int cbc_des_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_des_encrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, CPACF_KMC_DEA, &walk);
+ return cbc_desall_crypt(req, CPACF_KMC_DEA);
}
-static int cbc_des_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_des_decrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, CPACF_KMC_DEA | CPACF_DECRYPT, &walk);
+ return cbc_desall_crypt(req, CPACF_KMC_DEA | CPACF_DECRYPT);
}
-static struct crypto_alg cbc_des_alg = {
- .cra_name = "cbc(des)",
- .cra_driver_name = "cbc-des-s390",
- .cra_priority = 400, /* combo: des + cbc */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s390_des_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = des_setkey,
- .encrypt = cbc_des_encrypt,
- .decrypt = cbc_des_decrypt,
- }
- }
+static struct skcipher_alg cbc_des_alg = {
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "cbc-des-s390",
+ .base.cra_priority = 400, /* combo: des + cbc */
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s390_des_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = des_setkey_skcipher,
+ .encrypt = cbc_des_encrypt,
+ .decrypt = cbc_des_decrypt,
};
/*
@@ -232,6 +211,12 @@ static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
return 0;
}
+static int des3_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ return des3_setkey(crypto_skcipher_tfm(tfm), key, key_len);
+}
+
static void des3_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -266,87 +251,53 @@ static struct crypto_alg des3_alg = {
}
};
-static int ecb_des3_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_des3_encrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_desall_crypt(desc, CPACF_KM_TDEA_192, &walk);
+ return ecb_desall_crypt(req, CPACF_KM_TDEA_192);
}
-static int ecb_des3_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_des3_decrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_desall_crypt(desc, CPACF_KM_TDEA_192 | CPACF_DECRYPT,
- &walk);
+ return ecb_desall_crypt(req, CPACF_KM_TDEA_192 | CPACF_DECRYPT);
}
-static struct crypto_alg ecb_des3_alg = {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ecb-des3_ede-s390",
- .cra_priority = 400, /* combo: des3 + ecb */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s390_des_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = DES3_KEY_SIZE,
- .max_keysize = DES3_KEY_SIZE,
- .setkey = des3_setkey,
- .encrypt = ecb_des3_encrypt,
- .decrypt = ecb_des3_decrypt,
- }
- }
+static struct skcipher_alg ecb_des3_alg = {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ecb-des3_ede-s390",
+ .base.cra_priority = 400, /* combo: des3 + ecb */
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s390_des_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = DES3_KEY_SIZE,
+ .max_keysize = DES3_KEY_SIZE,
+ .setkey = des3_setkey_skcipher,
+ .encrypt = ecb_des3_encrypt,
+ .decrypt = ecb_des3_decrypt,
};
-static int cbc_des3_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_des3_encrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, CPACF_KMC_TDEA_192, &walk);
+ return cbc_desall_crypt(req, CPACF_KMC_TDEA_192);
}
-static int cbc_des3_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_des3_decrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, CPACF_KMC_TDEA_192 | CPACF_DECRYPT,
- &walk);
+ return cbc_desall_crypt(req, CPACF_KMC_TDEA_192 | CPACF_DECRYPT);
}
-static struct crypto_alg cbc_des3_alg = {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cbc-des3_ede-s390",
- .cra_priority = 400, /* combo: des3 + cbc */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s390_des_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = DES3_KEY_SIZE,
- .max_keysize = DES3_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = des3_setkey,
- .encrypt = cbc_des3_encrypt,
- .decrypt = cbc_des3_decrypt,
- }
- }
+static struct skcipher_alg cbc_des3_alg = {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "cbc-des3_ede-s390",
+ .base.cra_priority = 400, /* combo: des3 + cbc */
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s390_des_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = DES3_KEY_SIZE,
+ .max_keysize = DES3_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = des3_setkey_skcipher,
+ .encrypt = cbc_des3_encrypt,
+ .decrypt = cbc_des3_decrypt,
};
static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
@@ -364,128 +315,90 @@ static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
return n;
}
-static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
- struct blkcipher_walk *walk)
+static int ctr_desall_crypt(struct skcipher_request *req, unsigned long fc)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_des_ctx *ctx = crypto_skcipher_ctx(tfm);
u8 buf[DES_BLOCK_SIZE], *ctrptr;
+ struct skcipher_walk walk;
unsigned int n, nbytes;
int ret, locked;
locked = mutex_trylock(&ctrblk_lock);
- ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
- while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
+ ret = skcipher_walk_virt(&walk, req, false);
+ while ((nbytes = walk.nbytes) >= DES_BLOCK_SIZE) {
n = DES_BLOCK_SIZE;
if (nbytes >= 2*DES_BLOCK_SIZE && locked)
- n = __ctrblk_init(ctrblk, walk->iv, nbytes);
- ctrptr = (n > DES_BLOCK_SIZE) ? ctrblk : walk->iv;
- cpacf_kmctr(fc, ctx->key, walk->dst.virt.addr,
- walk->src.virt.addr, n, ctrptr);
+ n = __ctrblk_init(ctrblk, walk.iv, nbytes);
+ ctrptr = (n > DES_BLOCK_SIZE) ? ctrblk : walk.iv;
+ cpacf_kmctr(fc, ctx->key, walk.dst.virt.addr,
+ walk.src.virt.addr, n, ctrptr);
if (ctrptr == ctrblk)
- memcpy(walk->iv, ctrptr + n - DES_BLOCK_SIZE,
+ memcpy(walk.iv, ctrptr + n - DES_BLOCK_SIZE,
DES_BLOCK_SIZE);
- crypto_inc(walk->iv, DES_BLOCK_SIZE);
- ret = blkcipher_walk_done(desc, walk, nbytes - n);
+ crypto_inc(walk.iv, DES_BLOCK_SIZE);
+ ret = skcipher_walk_done(&walk, nbytes - n);
}
if (locked)
mutex_unlock(&ctrblk_lock);
/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
if (nbytes) {
- cpacf_kmctr(fc, ctx->key, buf, walk->src.virt.addr,
- DES_BLOCK_SIZE, walk->iv);
- memcpy(walk->dst.virt.addr, buf, nbytes);
- crypto_inc(walk->iv, DES_BLOCK_SIZE);
- ret = blkcipher_walk_done(desc, walk, 0);
+ cpacf_kmctr(fc, ctx->key, buf, walk.src.virt.addr,
+ DES_BLOCK_SIZE, walk.iv);
+ memcpy(walk.dst.virt.addr, buf, nbytes);
+ crypto_inc(walk.iv, DES_BLOCK_SIZE);
+ ret = skcipher_walk_done(&walk, 0);
}
return ret;
}
-static int ctr_des_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ctr_desall_crypt(desc, CPACF_KMCTR_DEA, &walk);
-}
-
-static int ctr_des_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ctr_des_crypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ctr_desall_crypt(desc, CPACF_KMCTR_DEA | CPACF_DECRYPT, &walk);
+ return ctr_desall_crypt(req, CPACF_KMCTR_DEA);
}
-static struct crypto_alg ctr_des_alg = {
- .cra_name = "ctr(des)",
- .cra_driver_name = "ctr-des-s390",
- .cra_priority = 400, /* combo: des + ctr */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct s390_des_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = des_setkey,
- .encrypt = ctr_des_encrypt,
- .decrypt = ctr_des_decrypt,
- }
- }
+static struct skcipher_alg ctr_des_alg = {
+ .base.cra_name = "ctr(des)",
+ .base.cra_driver_name = "ctr-des-s390",
+ .base.cra_priority = 400, /* combo: des + ctr */
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct s390_des_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = des_setkey_skcipher,
+ .encrypt = ctr_des_crypt,
+ .decrypt = ctr_des_crypt,
+ .chunksize = DES_BLOCK_SIZE,
};
-static int ctr_des3_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ctr_desall_crypt(desc, CPACF_KMCTR_TDEA_192, &walk);
-}
-
-static int ctr_des3_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ctr_des3_crypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ctr_desall_crypt(desc, CPACF_KMCTR_TDEA_192 | CPACF_DECRYPT,
- &walk);
+ return ctr_desall_crypt(req, CPACF_KMCTR_TDEA_192);
}
-static struct crypto_alg ctr_des3_alg = {
- .cra_name = "ctr(des3_ede)",
- .cra_driver_name = "ctr-des3_ede-s390",
- .cra_priority = 400, /* combo: des3 + ede */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct s390_des_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = DES3_KEY_SIZE,
- .max_keysize = DES3_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = des3_setkey,
- .encrypt = ctr_des3_encrypt,
- .decrypt = ctr_des3_decrypt,
- }
- }
+static struct skcipher_alg ctr_des3_alg = {
+ .base.cra_name = "ctr(des3_ede)",
+ .base.cra_driver_name = "ctr-des3_ede-s390",
+ .base.cra_priority = 400, /* combo: des3 + ede */
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct s390_des_ctx),
+ .base.cra_module = THIS_MODULE,
+ .min_keysize = DES3_KEY_SIZE,
+ .max_keysize = DES3_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = des3_setkey_skcipher,
+ .encrypt = ctr_des3_crypt,
+ .decrypt = ctr_des3_crypt,
+ .chunksize = DES_BLOCK_SIZE,
};
-static struct crypto_alg *des_s390_algs_ptr[8];
+static struct crypto_alg *des_s390_algs_ptr[2];
static int des_s390_algs_num;
+static struct skcipher_alg *des_s390_skciphers_ptr[6];
+static int des_s390_skciphers_num;
static int des_s390_register_alg(struct crypto_alg *alg)
{
@@ -497,10 +410,22 @@ static int des_s390_register_alg(struct crypto_alg *alg)
return ret;
}
+static int des_s390_register_skcipher(struct skcipher_alg *alg)
+{
+ int ret;
+
+ ret = crypto_register_skcipher(alg);
+ if (!ret)
+ des_s390_skciphers_ptr[des_s390_skciphers_num++] = alg;
+ return ret;
+}
+
static void des_s390_exit(void)
{
while (des_s390_algs_num--)
crypto_unregister_alg(des_s390_algs_ptr[des_s390_algs_num]);
+ while (des_s390_skciphers_num--)
+ crypto_unregister_skcipher(des_s390_skciphers_ptr[des_s390_skciphers_num]);
if (ctrblk)
free_page((unsigned long) ctrblk);
}
@@ -518,12 +443,12 @@ static int __init des_s390_init(void)
ret = des_s390_register_alg(&des_alg);
if (ret)
goto out_err;
- ret = des_s390_register_alg(&ecb_des_alg);
+ ret = des_s390_register_skcipher(&ecb_des_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&kmc_functions, CPACF_KMC_DEA)) {
- ret = des_s390_register_alg(&cbc_des_alg);
+ ret = des_s390_register_skcipher(&cbc_des_alg);
if (ret)
goto out_err;
}
@@ -531,12 +456,12 @@ static int __init des_s390_init(void)
ret = des_s390_register_alg(&des3_alg);
if (ret)
goto out_err;
- ret = des_s390_register_alg(&ecb_des3_alg);
+ ret = des_s390_register_skcipher(&ecb_des3_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&kmc_functions, CPACF_KMC_TDEA_192)) {
- ret = des_s390_register_alg(&cbc_des3_alg);
+ ret = des_s390_register_skcipher(&cbc_des3_alg);
if (ret)
goto out_err;
}
@@ -551,12 +476,12 @@ static int __init des_s390_init(void)
}
if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_DEA)) {
- ret = des_s390_register_alg(&ctr_des_alg);
+ ret = des_s390_register_skcipher(&ctr_des_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_TDEA_192)) {
- ret = des_s390_register_alg(&ctr_des3_alg);
+ ret = des_s390_register_skcipher(&ctr_des3_alg);
if (ret)
goto out_err;
}
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
index 6184dceed340..c7119c617b6e 100644
--- a/arch/s390/crypto/paes_s390.c
+++ b/arch/s390/crypto/paes_s390.c
@@ -21,6 +21,7 @@
#include <linux/cpufeature.h>
#include <linux/init.h>
#include <linux/spinlock.h>
+#include <crypto/internal/skcipher.h>
#include <crypto/xts.h>
#include <asm/cpacf.h>
#include <asm/pkey.h>
@@ -123,27 +124,27 @@ static int __paes_set_key(struct s390_paes_ctx *ctx)
return ctx->fc ? 0 : -EINVAL;
}
-static int ecb_paes_init(struct crypto_tfm *tfm)
+static int ecb_paes_init(struct crypto_skcipher *tfm)
{
- struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->kb.key = NULL;
return 0;
}
-static void ecb_paes_exit(struct crypto_tfm *tfm)
+static void ecb_paes_exit(struct crypto_skcipher *tfm)
{
- struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
_free_kb_keybuf(&ctx->kb);
}
-static int ecb_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
int rc;
- struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
_free_kb_keybuf(&ctx->kb);
rc = _copy_key_to_kb(&ctx->kb, in_key, key_len);
@@ -151,91 +152,75 @@ static int ecb_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return rc;
if (__paes_set_key(ctx)) {
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return 0;
}
-static int ecb_paes_crypt(struct blkcipher_desc *desc,
- unsigned long modifier,
- struct blkcipher_walk *walk)
+static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier)
{
- struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
unsigned int nbytes, n, k;
int ret;
- ret = blkcipher_walk_virt(desc, walk);
- while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
+ ret = skcipher_walk_virt(&walk, req, false);
+ while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
k = cpacf_km(ctx->fc | modifier, ctx->pk.protkey,
- walk->dst.virt.addr, walk->src.virt.addr, n);
+ walk.dst.virt.addr, walk.src.virt.addr, n);
if (k)
- ret = blkcipher_walk_done(desc, walk, nbytes - k);
+ ret = skcipher_walk_done(&walk, nbytes - k);
if (k < n) {
if (__paes_set_key(ctx) != 0)
- return blkcipher_walk_done(desc, walk, -EIO);
+ return skcipher_walk_done(&walk, -EIO);
}
}
return ret;
}
-static int ecb_paes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_paes_encrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_paes_crypt(desc, CPACF_ENCRYPT, &walk);
+ return ecb_paes_crypt(req, 0);
}
-static int ecb_paes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int ecb_paes_decrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ecb_paes_crypt(desc, CPACF_DECRYPT, &walk);
+ return ecb_paes_crypt(req, CPACF_DECRYPT);
}
-static struct crypto_alg ecb_paes_alg = {
- .cra_name = "ecb(paes)",
- .cra_driver_name = "ecb-paes-s390",
- .cra_priority = 401, /* combo: aes + ecb + 1 */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s390_paes_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(ecb_paes_alg.cra_list),
- .cra_init = ecb_paes_init,
- .cra_exit = ecb_paes_exit,
- .cra_u = {
- .blkcipher = {
- .min_keysize = PAES_MIN_KEYSIZE,
- .max_keysize = PAES_MAX_KEYSIZE,
- .setkey = ecb_paes_set_key,
- .encrypt = ecb_paes_encrypt,
- .decrypt = ecb_paes_decrypt,
- }
- }
+static struct skcipher_alg ecb_paes_alg = {
+ .base.cra_name = "ecb(paes)",
+ .base.cra_driver_name = "ecb-paes-s390",
+ .base.cra_priority = 401, /* combo: aes + ecb + 1 */
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s390_paes_ctx),
+ .base.cra_module = THIS_MODULE,
+ .base.cra_list = LIST_HEAD_INIT(ecb_paes_alg.base.cra_list),
+ .init = ecb_paes_init,
+ .exit = ecb_paes_exit,
+ .min_keysize = PAES_MIN_KEYSIZE,
+ .max_keysize = PAES_MAX_KEYSIZE,
+ .setkey = ecb_paes_set_key,
+ .encrypt = ecb_paes_encrypt,
+ .decrypt = ecb_paes_decrypt,
};
-static int cbc_paes_init(struct crypto_tfm *tfm)
+static int cbc_paes_init(struct crypto_skcipher *tfm)
{
- struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->kb.key = NULL;
return 0;
}
-static void cbc_paes_exit(struct crypto_tfm *tfm)
+static void cbc_paes_exit(struct crypto_skcipher *tfm)
{
- struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
_free_kb_keybuf(&ctx->kb);
}
@@ -258,11 +243,11 @@ static int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
return ctx->fc ? 0 : -EINVAL;
}
-static int cbc_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int cbc_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
int rc;
- struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
_free_kb_keybuf(&ctx->kb);
rc = _copy_key_to_kb(&ctx->kb, in_key, key_len);
@@ -270,16 +255,17 @@ static int cbc_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return rc;
if (__cbc_paes_set_key(ctx)) {
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return 0;
}
-static int cbc_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
- struct blkcipher_walk *walk)
+static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
{
- struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
unsigned int nbytes, n, k;
int ret;
struct {
@@ -287,73 +273,60 @@ static int cbc_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
u8 key[MAXPROTKEYSIZE];
} param;
- ret = blkcipher_walk_virt(desc, walk);
- memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
+ ret = skcipher_walk_virt(&walk, req, false);
+ if (ret)
+ return ret;
+ memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
- while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
+ while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
k = cpacf_kmc(ctx->fc | modifier, &param,
- walk->dst.virt.addr, walk->src.virt.addr, n);
- if (k)
- ret = blkcipher_walk_done(desc, walk, nbytes - k);
+ walk.dst.virt.addr, walk.src.virt.addr, n);
+ if (k) {
+ memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
+ ret = skcipher_walk_done(&walk, nbytes - k);
+ }
if (k < n) {
if (__cbc_paes_set_key(ctx) != 0)
- return blkcipher_walk_done(desc, walk, -EIO);
+ return skcipher_walk_done(&walk, -EIO);
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
}
}
- memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
return ret;
}
-static int cbc_paes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_paes_encrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_paes_crypt(desc, 0, &walk);
+ return cbc_paes_crypt(req, 0);
}
-static int cbc_paes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int cbc_paes_decrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_paes_crypt(desc, CPACF_DECRYPT, &walk);
+ return cbc_paes_crypt(req, CPACF_DECRYPT);
}
-static struct crypto_alg cbc_paes_alg = {
- .cra_name = "cbc(paes)",
- .cra_driver_name = "cbc-paes-s390",
- .cra_priority = 402, /* ecb-paes-s390 + 1 */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s390_paes_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(cbc_paes_alg.cra_list),
- .cra_init = cbc_paes_init,
- .cra_exit = cbc_paes_exit,
- .cra_u = {
- .blkcipher = {
- .min_keysize = PAES_MIN_KEYSIZE,
- .max_keysize = PAES_MAX_KEYSIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = cbc_paes_set_key,
- .encrypt = cbc_paes_encrypt,
- .decrypt = cbc_paes_decrypt,
- }
- }
+static struct skcipher_alg cbc_paes_alg = {
+ .base.cra_name = "cbc(paes)",
+ .base.cra_driver_name = "cbc-paes-s390",
+ .base.cra_priority = 402, /* ecb-paes-s390 + 1 */
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s390_paes_ctx),
+ .base.cra_module = THIS_MODULE,
+ .base.cra_list = LIST_HEAD_INIT(cbc_paes_alg.base.cra_list),
+ .init = cbc_paes_init,
+ .exit = cbc_paes_exit,
+ .min_keysize = PAES_MIN_KEYSIZE,
+ .max_keysize = PAES_MAX_KEYSIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = cbc_paes_set_key,
+ .encrypt = cbc_paes_encrypt,
+ .decrypt = cbc_paes_decrypt,
};
-static int xts_paes_init(struct crypto_tfm *tfm)
+static int xts_paes_init(struct crypto_skcipher *tfm)
{
- struct s390_pxts_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->kb[0].key = NULL;
ctx->kb[1].key = NULL;
@@ -361,9 +334,9 @@ static int xts_paes_init(struct crypto_tfm *tfm)
return 0;
}
-static void xts_paes_exit(struct crypto_tfm *tfm)
+static void xts_paes_exit(struct crypto_skcipher *tfm)
{
- struct s390_pxts_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
_free_kb_keybuf(&ctx->kb[0]);
_free_kb_keybuf(&ctx->kb[1]);
@@ -391,11 +364,11 @@ static int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
return ctx->fc ? 0 : -EINVAL;
}
-static int xts_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int xts_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int xts_key_len)
{
int rc;
- struct s390_pxts_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
u8 ckey[2 * AES_MAX_KEY_SIZE];
unsigned int ckey_len, key_len;
@@ -414,7 +387,7 @@ static int xts_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return rc;
if (__xts_paes_set_key(ctx)) {
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -427,13 +400,14 @@ static int xts_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
AES_KEYSIZE_128 : AES_KEYSIZE_256;
memcpy(ckey, ctx->pk[0].protkey, ckey_len);
memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len);
- return xts_check_key(tfm, ckey, 2*ckey_len);
+ return xts_verify_key(tfm, ckey, 2*ckey_len);
}
-static int xts_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
- struct blkcipher_walk *walk)
+static int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier)
{
- struct s390_pxts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
unsigned int keylen, offset, nbytes, n, k;
int ret;
struct {
@@ -448,90 +422,76 @@ static int xts_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
u8 init[16];
} xts_param;
- ret = blkcipher_walk_virt(desc, walk);
+ ret = skcipher_walk_virt(&walk, req, false);
+ if (ret)
+ return ret;
keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64;
offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0;
retry:
memset(&pcc_param, 0, sizeof(pcc_param));
- memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
+ memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen);
cpacf_pcc(ctx->fc, pcc_param.key + offset);
memcpy(xts_param.key + offset, ctx->pk[0].protkey, keylen);
memcpy(xts_param.init, pcc_param.xts, 16);
- while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
+ while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
k = cpacf_km(ctx->fc | modifier, xts_param.key + offset,
- walk->dst.virt.addr, walk->src.virt.addr, n);
+ walk.dst.virt.addr, walk.src.virt.addr, n);
if (k)
- ret = blkcipher_walk_done(desc, walk, nbytes - k);
+ ret = skcipher_walk_done(&walk, nbytes - k);
if (k < n) {
if (__xts_paes_set_key(ctx) != 0)
- return blkcipher_walk_done(desc, walk, -EIO);
+ return skcipher_walk_done(&walk, -EIO);
goto retry;
}
}
return ret;
}
-static int xts_paes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int xts_paes_encrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return xts_paes_crypt(desc, 0, &walk);
+ return xts_paes_crypt(req, 0);
}
-static int xts_paes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int xts_paes_decrypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return xts_paes_crypt(desc, CPACF_DECRYPT, &walk);
+ return xts_paes_crypt(req, CPACF_DECRYPT);
}
-static struct crypto_alg xts_paes_alg = {
- .cra_name = "xts(paes)",
- .cra_driver_name = "xts-paes-s390",
- .cra_priority = 402, /* ecb-paes-s390 + 1 */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s390_pxts_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(xts_paes_alg.cra_list),
- .cra_init = xts_paes_init,
- .cra_exit = xts_paes_exit,
- .cra_u = {
- .blkcipher = {
- .min_keysize = 2 * PAES_MIN_KEYSIZE,
- .max_keysize = 2 * PAES_MAX_KEYSIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = xts_paes_set_key,
- .encrypt = xts_paes_encrypt,
- .decrypt = xts_paes_decrypt,
- }
- }
+static struct skcipher_alg xts_paes_alg = {
+ .base.cra_name = "xts(paes)",
+ .base.cra_driver_name = "xts-paes-s390",
+ .base.cra_priority = 402, /* ecb-paes-s390 + 1 */
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct s390_pxts_ctx),
+ .base.cra_module = THIS_MODULE,
+ .base.cra_list = LIST_HEAD_INIT(xts_paes_alg.base.cra_list),
+ .init = xts_paes_init,
+ .exit = xts_paes_exit,
+ .min_keysize = 2 * PAES_MIN_KEYSIZE,
+ .max_keysize = 2 * PAES_MAX_KEYSIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = xts_paes_set_key,
+ .encrypt = xts_paes_encrypt,
+ .decrypt = xts_paes_decrypt,
};
-static int ctr_paes_init(struct crypto_tfm *tfm)
+static int ctr_paes_init(struct crypto_skcipher *tfm)
{
- struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->kb.key = NULL;
return 0;
}
-static void ctr_paes_exit(struct crypto_tfm *tfm)
+static void ctr_paes_exit(struct crypto_skcipher *tfm)
{
- struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
_free_kb_keybuf(&ctx->kb);
}
@@ -555,11 +515,11 @@ static int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
return ctx->fc ? 0 : -EINVAL;
}
-static int ctr_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+static int ctr_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
int rc;
- struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
_free_kb_keybuf(&ctx->kb);
rc = _copy_key_to_kb(&ctx->kb, in_key, key_len);
@@ -567,7 +527,7 @@ static int ctr_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return rc;
if (__ctr_paes_set_key(ctx)) {
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
return 0;
@@ -588,37 +548,37 @@ static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
return n;
}
-static int ctr_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
- struct blkcipher_walk *walk)
+static int ctr_paes_crypt(struct skcipher_request *req)
{
- struct s390_paes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
u8 buf[AES_BLOCK_SIZE], *ctrptr;
+ struct skcipher_walk walk;
unsigned int nbytes, n, k;
int ret, locked;
locked = spin_trylock(&ctrblk_lock);
- ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
- while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
+ ret = skcipher_walk_virt(&walk, req, false);
+ while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
n = AES_BLOCK_SIZE;
if (nbytes >= 2*AES_BLOCK_SIZE && locked)
- n = __ctrblk_init(ctrblk, walk->iv, nbytes);
- ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
- k = cpacf_kmctr(ctx->fc | modifier, ctx->pk.protkey,
- walk->dst.virt.addr, walk->src.virt.addr,
- n, ctrptr);
+ n = __ctrblk_init(ctrblk, walk.iv, nbytes);
+ ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
+ k = cpacf_kmctr(ctx->fc, ctx->pk.protkey, walk.dst.virt.addr,
+ walk.src.virt.addr, n, ctrptr);
if (k) {
if (ctrptr == ctrblk)
- memcpy(walk->iv, ctrptr + k - AES_BLOCK_SIZE,
+ memcpy(walk.iv, ctrptr + k - AES_BLOCK_SIZE,
AES_BLOCK_SIZE);
- crypto_inc(walk->iv, AES_BLOCK_SIZE);
- ret = blkcipher_walk_done(desc, walk, nbytes - n);
+ crypto_inc(walk.iv, AES_BLOCK_SIZE);
+ ret = skcipher_walk_done(&walk, nbytes - n);
}
if (k < n) {
if (__ctr_paes_set_key(ctx) != 0) {
if (locked)
spin_unlock(&ctrblk_lock);
- return blkcipher_walk_done(desc, walk, -EIO);
+ return skcipher_walk_done(&walk, -EIO);
}
}
}
@@ -629,80 +589,54 @@ static int ctr_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
*/
if (nbytes) {
while (1) {
- if (cpacf_kmctr(ctx->fc | modifier,
- ctx->pk.protkey, buf,
- walk->src.virt.addr, AES_BLOCK_SIZE,
- walk->iv) == AES_BLOCK_SIZE)
+ if (cpacf_kmctr(ctx->fc, ctx->pk.protkey, buf,
+ walk.src.virt.addr, AES_BLOCK_SIZE,
+ walk.iv) == AES_BLOCK_SIZE)
break;
if (__ctr_paes_set_key(ctx) != 0)
- return blkcipher_walk_done(desc, walk, -EIO);
+ return skcipher_walk_done(&walk, -EIO);
}
- memcpy(walk->dst.virt.addr, buf, nbytes);
- crypto_inc(walk->iv, AES_BLOCK_SIZE);
- ret = blkcipher_walk_done(desc, walk, 0);
+ memcpy(walk.dst.virt.addr, buf, nbytes);
+ crypto_inc(walk.iv, AES_BLOCK_SIZE);
+ ret = skcipher_walk_done(&walk, 0);
}
return ret;
}
-static int ctr_paes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ctr_paes_crypt(desc, 0, &walk);
-}
-
-static int ctr_paes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- struct blkcipher_walk walk;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- return ctr_paes_crypt(desc, CPACF_DECRYPT, &walk);
-}
-
-static struct crypto_alg ctr_paes_alg = {
- .cra_name = "ctr(paes)",
- .cra_driver_name = "ctr-paes-s390",
- .cra_priority = 402, /* ecb-paes-s390 + 1 */
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct s390_paes_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(ctr_paes_alg.cra_list),
- .cra_init = ctr_paes_init,
- .cra_exit = ctr_paes_exit,
- .cra_u = {
- .blkcipher = {
- .min_keysize = PAES_MIN_KEYSIZE,
- .max_keysize = PAES_MAX_KEYSIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = ctr_paes_set_key,
- .encrypt = ctr_paes_encrypt,
- .decrypt = ctr_paes_decrypt,
- }
- }
+static struct skcipher_alg ctr_paes_alg = {
+ .base.cra_name = "ctr(paes)",
+ .base.cra_driver_name = "ctr-paes-s390",
+ .base.cra_priority = 402, /* ecb-paes-s390 + 1 */
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct s390_paes_ctx),
+ .base.cra_module = THIS_MODULE,
+ .base.cra_list = LIST_HEAD_INIT(ctr_paes_alg.base.cra_list),
+ .init = ctr_paes_init,
+ .exit = ctr_paes_exit,
+ .min_keysize = PAES_MIN_KEYSIZE,
+ .max_keysize = PAES_MAX_KEYSIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ctr_paes_set_key,
+ .encrypt = ctr_paes_crypt,
+ .decrypt = ctr_paes_crypt,
+ .chunksize = AES_BLOCK_SIZE,
};
-static inline void __crypto_unregister_alg(struct crypto_alg *alg)
+static inline void __crypto_unregister_skcipher(struct skcipher_alg *alg)
{
- if (!list_empty(&alg->cra_list))
- crypto_unregister_alg(alg);
+ if (!list_empty(&alg->base.cra_list))
+ crypto_unregister_skcipher(alg);
}
static void paes_s390_fini(void)
{
if (ctrblk)
free_page((unsigned long) ctrblk);
- __crypto_unregister_alg(&ctr_paes_alg);
- __crypto_unregister_alg(&xts_paes_alg);
- __crypto_unregister_alg(&cbc_paes_alg);
- __crypto_unregister_alg(&ecb_paes_alg);
+ __crypto_unregister_skcipher(&ctr_paes_alg);
+ __crypto_unregister_skcipher(&xts_paes_alg);
+ __crypto_unregister_skcipher(&cbc_paes_alg);
+ __crypto_unregister_skcipher(&ecb_paes_alg);
}
static int __init paes_s390_init(void)
@@ -717,7 +651,7 @@ static int __init paes_s390_init(void)
if (cpacf_test_func(&km_functions, CPACF_KM_PAES_128) ||
cpacf_test_func(&km_functions, CPACF_KM_PAES_192) ||
cpacf_test_func(&km_functions, CPACF_KM_PAES_256)) {
- ret = crypto_register_alg(&ecb_paes_alg);
+ ret = crypto_register_skcipher(&ecb_paes_alg);
if (ret)
goto out_err;
}
@@ -725,14 +659,14 @@ static int __init paes_s390_init(void)
if (cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) {
- ret = crypto_register_alg(&cbc_paes_alg);
+ ret = crypto_register_skcipher(&cbc_paes_alg);
if (ret)
goto out_err;
}
if (cpacf_test_func(&km_functions, CPACF_KM_PXTS_128) ||
cpacf_test_func(&km_functions, CPACF_KM_PXTS_256)) {
- ret = crypto_register_alg(&xts_paes_alg);
+ ret = crypto_register_skcipher(&xts_paes_alg);
if (ret)
goto out_err;
}
@@ -740,7 +674,7 @@ static int __init paes_s390_init(void)
if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_128) ||
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_192) ||
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) {
- ret = crypto_register_alg(&ctr_paes_alg);
+ ret = crypto_register_skcipher(&ctr_paes_alg);
if (ret)
goto out_err;
ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c
index d39e0f079217..686fe7aa192f 100644
--- a/arch/s390/crypto/sha_common.c
+++ b/arch/s390/crypto/sha_common.c
@@ -74,14 +74,17 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
unsigned int bsize = crypto_shash_blocksize(desc->tfm);
u64 bits;
- unsigned int n, mbl_offset;
+ unsigned int n;
+ int mbl_offset;
n = ctx->count % bsize;
bits = ctx->count * 8;
- mbl_offset = s390_crypto_shash_parmsize(ctx->func) / sizeof(u32);
+ mbl_offset = s390_crypto_shash_parmsize(ctx->func);
if (mbl_offset < 0)
return -EINVAL;
+ mbl_offset = mbl_offset / sizeof(u32);
+
/* set total msg bit length (mbl) in CPACF parmblock */
switch (ctx->func) {
case CPACF_KLMD_SHA_1:
diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
index c2cf7bcdef9b..1c8a38f762a3 100644
--- a/arch/s390/include/asm/alternative.h
+++ b/arch/s390/include/asm/alternative.h
@@ -139,10 +139,10 @@ void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
* without volatile and memory clobber.
*/
#define alternative(oldinstr, altinstr, facility) \
- asm volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory")
+ asm_inline volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory")
#define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \
- asm volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \
+ asm_inline volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \
altinstr2, facility2) ::: "memory")
#endif /* __ASSEMBLY__ */
diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h
index 713fc9735ffb..a2b11ac00f60 100644
--- a/arch/s390/include/asm/bug.h
+++ b/arch/s390/include/asm/bug.h
@@ -9,7 +9,7 @@
#ifdef CONFIG_DEBUG_BUGVERBOSE
#define __EMIT_BUG(x) do { \
- asm volatile( \
+ asm_inline volatile( \
"0: j 0b+2\n" \
"1:\n" \
".section .rodata.str,\"aMS\",@progbits,1\n" \
@@ -28,7 +28,7 @@
#else /* CONFIG_DEBUG_BUGVERBOSE */
#define __EMIT_BUG(x) do { \
- asm volatile( \
+ asm_inline volatile( \
"0: j 0b+2\n" \
"1:\n" \
".section __bug_table,\"awM\",@progbits,%1\n" \
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index 60f907516335..ed5efbb531c4 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -11,6 +11,7 @@
#include <linux/bits.h>
#define CR0_CLOCK_COMPARATOR_SIGN BIT(63 - 10)
+#define CR0_LOW_ADDRESS_PROTECTION BIT(63 - 35)
#define CR0_EMERGENCY_SIGNAL_SUBMASK BIT(63 - 49)
#define CR0_EXTERNAL_CALL_SUBMASK BIT(63 - 50)
#define CR0_CLOCK_COMPARATOR_SUBMASK BIT(63 - 52)
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index abe60268335d..02f4c21c57f6 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -392,6 +392,7 @@ struct kvm_vcpu_stat {
u64 diagnose_10;
u64 diagnose_44;
u64 diagnose_9c;
+ u64 diagnose_9c_ignored;
u64 diagnose_258;
u64 diagnose_308;
u64 diagnose_500;
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 823578c6b9e2..a4d38092530a 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -177,8 +177,6 @@ static inline int devmem_is_allowed(unsigned long pfn)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define ARCH_ZONE_DMA_BITS 31
-
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index bccb8f4a63e2..77606c4acd58 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -56,7 +56,12 @@ static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
crst_table_init(table, _REGION2_ENTRY_EMPTY);
return (p4d_t *) table;
}
-#define p4d_free(mm, p4d) crst_table_free(mm, (unsigned long *) p4d)
+
+static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
+{
+ if (!mm_p4d_folded(mm))
+ crst_table_free(mm, (unsigned long *) p4d);
+}
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
{
@@ -65,7 +70,12 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
crst_table_init(table, _REGION3_ENTRY_EMPTY);
return (pud_t *) table;
}
-#define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
+
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ if (!mm_pud_folded(mm))
+ crst_table_free(mm, (unsigned long *) pud);
+}
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
{
@@ -83,6 +93,8 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
+ if (mm_pmd_folded(mm))
+ return;
pgtable_pmd_page_dtor(virt_to_page(pmd));
crst_table_free(mm, (unsigned long *) pmd);
}
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 5ff98d76a66c..7b03037a8475 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -266,11 +266,9 @@ static inline int is_module_addr(void *addr)
#endif
#define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
-#define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe2fUL
/* Bits in the segment table entry */
#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
-#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
#define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL
#define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL
#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
@@ -699,10 +697,8 @@ static inline int pmd_large(pmd_t pmd)
static inline int pmd_bad(pmd_t pmd)
{
- if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0)
+ if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd))
return 1;
- if (pmd_large(pmd))
- return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
}
@@ -710,12 +706,10 @@ static inline int pud_bad(pud_t pud)
{
unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
- if (type > _REGION_ENTRY_TYPE_R3)
+ if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud))
return 1;
if (type < _REGION_ENTRY_TYPE_R3)
return 0;
- if (pud_large(pud))
- return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0;
return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
}
@@ -758,18 +752,12 @@ static inline int pmd_write(pmd_t pmd)
static inline int pmd_dirty(pmd_t pmd)
{
- int dirty = 1;
- if (pmd_large(pmd))
- dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
- return dirty;
+ return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
}
static inline int pmd_young(pmd_t pmd)
{
- int young = 1;
- if (pmd_large(pmd))
- young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
- return young;
+ return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
}
static inline int pte_present(pte_t pte)
@@ -1173,8 +1161,6 @@ void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
- if (!MACHINE_HAS_NX)
- pte_val(entry) &= ~_PAGE_NOEXEC;
if (pte_present(entry))
pte_val(entry) &= ~_PAGE_UNUSED;
if (mm_has_pgste(mm))
@@ -1191,6 +1177,8 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
{
pte_t __pte;
pte_val(__pte) = physpage + pgprot_val(pgprot);
+ if (!MACHINE_HAS_NX)
+ pte_val(__pte) &= ~_PAGE_NOEXEC;
return pte_mkyoung(__pte);
}
@@ -1297,29 +1285,23 @@ static inline pmd_t pmd_wrprotect(pmd_t pmd)
static inline pmd_t pmd_mkwrite(pmd_t pmd)
{
pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
- if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
- return pmd;
- pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
+ if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
return pmd;
}
static inline pmd_t pmd_mkclean(pmd_t pmd)
{
- if (pmd_large(pmd)) {
- pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
- pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
- }
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
+ pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
return pmd;
}
static inline pmd_t pmd_mkdirty(pmd_t pmd)
{
- if (pmd_large(pmd)) {
- pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY |
- _SEGMENT_ENTRY_SOFT_DIRTY;
- if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
- pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
- }
+ pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY;
+ if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
return pmd;
}
@@ -1333,29 +1315,23 @@ static inline pud_t pud_wrprotect(pud_t pud)
static inline pud_t pud_mkwrite(pud_t pud)
{
pud_val(pud) |= _REGION3_ENTRY_WRITE;
- if (pud_large(pud) && !(pud_val(pud) & _REGION3_ENTRY_DIRTY))
- return pud;
- pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
+ if (pud_val(pud) & _REGION3_ENTRY_DIRTY)
+ pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
return pud;
}
static inline pud_t pud_mkclean(pud_t pud)
{
- if (pud_large(pud)) {
- pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
- pud_val(pud) |= _REGION_ENTRY_PROTECT;
- }
+ pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
+ pud_val(pud) |= _REGION_ENTRY_PROTECT;
return pud;
}
static inline pud_t pud_mkdirty(pud_t pud)
{
- if (pud_large(pud)) {
- pud_val(pud) |= _REGION3_ENTRY_DIRTY |
- _REGION3_ENTRY_SOFT_DIRTY;
- if (pud_val(pud) & _REGION3_ENTRY_WRITE)
- pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
- }
+ pud_val(pud) |= _REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY;
+ if (pud_val(pud) & _REGION3_ENTRY_WRITE)
+ pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
return pud;
}
@@ -1379,38 +1355,29 @@ static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
static inline pmd_t pmd_mkyoung(pmd_t pmd)
{
- if (pmd_large(pmd)) {
- pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
- if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
- pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
- }
+ pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
+ if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
return pmd;
}
static inline pmd_t pmd_mkold(pmd_t pmd)
{
- if (pmd_large(pmd)) {
- pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
- pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
- }
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
+ pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
return pmd;
}
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
- if (pmd_large(pmd)) {
- pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
- _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
- _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
- pmd_val(pmd) |= massage_pgprot_pmd(newprot);
- if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
- pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
- if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
- pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
- return pmd;
- }
- pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
+ pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
+ _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
+ _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
pmd_val(pmd) |= massage_pgprot_pmd(newprot);
+ if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
+ pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
+ if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
+ pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
return pmd;
}
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 51a0e4a2dc96..881fc37c11c6 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -206,7 +206,7 @@ unsigned long get_wchan(struct task_struct *p);
/* Has task runtime instrumentation enabled ? */
#define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
-static inline unsigned long current_stack_pointer(void)
+static __always_inline unsigned long current_stack_pointer(void)
{
unsigned long sp;
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index e3f238e8c611..71e3f0146cda 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -276,6 +276,7 @@ struct qdio_outbuf_state {
#define CHSC_AC2_MULTI_BUFFER_AVAILABLE 0x0080
#define CHSC_AC2_MULTI_BUFFER_ENABLED 0x0040
#define CHSC_AC2_DATA_DIV_AVAILABLE 0x0010
+#define CHSC_AC2_SNIFFER_AVAILABLE 0x0008
#define CHSC_AC2_DATA_DIV_ENABLED 0x0002
#define CHSC_AC3_FORMAT2_CQ_AVAILABLE 0x8000
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index c02bff33f6c7..3a37172d5398 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -85,7 +85,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lp)
static inline void arch_spin_unlock(arch_spinlock_t *lp)
{
typecheck(int, lp->lock);
- asm volatile(
+ asm_inline volatile(
ALTERNATIVE("", ".long 0xb2fa0070", 49) /* NIAI 7 */
" sth %1,%0\n"
: "=Q" (((unsigned short *) &lp->lock)[1])
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
index 0ae4bbf7779c..fee40212af11 100644
--- a/arch/s390/include/asm/stacktrace.h
+++ b/arch/s390/include/asm/stacktrace.h
@@ -38,7 +38,7 @@ static inline unsigned long get_stack_pointer(struct task_struct *task,
{
if (regs)
return (unsigned long) kernel_stack_pointer(regs);
- if (task == current)
+ if (!task || task == current)
return current_stack_pointer();
return (unsigned long) task->thread.ksp;
}
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 64539c221672..6da8885251d6 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -10,8 +10,9 @@
#ifndef _ASM_S390_TIMEX_H
#define _ASM_S390_TIMEX_H
-#include <asm/lowcore.h>
+#include <linux/preempt.h>
#include <linux/time64.h>
+#include <asm/lowcore.h>
/* The value of the TOD clock for 1.1.1970. */
#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
@@ -179,22 +180,24 @@ static inline cycles_t get_cycles(void)
int get_phys_clock(unsigned long *clock);
void init_cpu_timer(void);
-unsigned long long monotonic_clock(void);
extern unsigned char tod_clock_base[16] __aligned(8);
/**
* get_clock_monotonic - returns current time in clock rate units
*
- * The caller must ensure that preemption is disabled.
* The clock and tod_clock_base get changed via stop_machine.
- * Therefore preemption must be disabled when calling this
- * function, otherwise the returned value is not guaranteed to
- * be monotonic.
+ * Therefore preemption must be disabled, otherwise the returned
+ * value is not guaranteed to be monotonic.
*/
static inline unsigned long long get_tod_clock_monotonic(void)
{
- return get_tod_clock() - *(unsigned long long *) &tod_clock_base[1];
+ unsigned long long tod;
+
+ preempt_disable();
+ tod = get_tod_clock() - *(unsigned long long *) &tod_clock_base[1];
+ preempt_enable();
+ return tod;
}
/**
diff --git a/arch/s390/include/asm/unwind.h b/arch/s390/include/asm/unwind.h
index d827b5b9a32c..eaaefeceef6f 100644
--- a/arch/s390/include/asm/unwind.h
+++ b/arch/s390/include/asm/unwind.h
@@ -35,6 +35,7 @@ struct unwind_state {
struct task_struct *task;
struct pt_regs *regs;
unsigned long sp, ip;
+ bool reuse_sp;
int graph_idx;
bool reliable;
bool error;
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 7abe6ae261b4..f304802ecf7b 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -461,10 +461,11 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
ptr += sprintf(ptr, "%%c%i", value);
else if (operand->flags & OPERAND_VR)
ptr += sprintf(ptr, "%%v%i", value);
- else if (operand->flags & OPERAND_PCREL)
- ptr += sprintf(ptr, "%lx", (signed int) value
- + addr);
- else if (operand->flags & OPERAND_SIGNED)
+ else if (operand->flags & OPERAND_PCREL) {
+ void *pcrel = (void *)((int)value + addr);
+
+ ptr += sprintf(ptr, "%px", pcrel);
+ } else if (operand->flags & OPERAND_SIGNED)
ptr += sprintf(ptr, "%i", value);
else
ptr += sprintf(ptr, "%u", value);
@@ -536,7 +537,7 @@ void show_code(struct pt_regs *regs)
else
*ptr++ = ' ';
addr = regs->psw.addr + start - 32;
- ptr += sprintf(ptr, "%016lx: ", addr);
+ ptr += sprintf(ptr, "%px: ", (void *)addr);
if (start + opsize >= end)
break;
for (i = 0; i < opsize; i++)
@@ -564,7 +565,7 @@ void print_fn_code(unsigned char *code, unsigned long len)
opsize = insn_length(*code);
if (opsize > len)
break;
- ptr += sprintf(ptr, "%p: ", code);
+ ptr += sprintf(ptr, "%px: ", code);
for (i = 0; i < opsize; i++)
ptr += sprintf(ptr, "%02x", code[i]);
*ptr++ = '\t';
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index b432d63d0b37..db32a55daaec 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -30,6 +30,7 @@
#include <asm/sclp.h>
#include <asm/facility.h>
#include <asm/boot_data.h>
+#include <asm/switch_to.h>
#include "entry.h"
static void __init reset_tod_clock(void)
@@ -238,7 +239,7 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
__ctl_set_bit(0, 17);
}
- if (test_facility(130)) {
+ if (test_facility(130) && !noexec_disabled) {
S390_lowcore.machine_flags |= MACHINE_FLAG_NX;
__ctl_set_bit(0, 20);
}
@@ -260,6 +261,24 @@ static inline void save_vector_registers(void)
#endif
}
+static inline void setup_control_registers(void)
+{
+ unsigned long reg;
+
+ __ctl_store(reg, 0, 0);
+ reg |= CR0_LOW_ADDRESS_PROTECTION;
+ reg |= CR0_EMERGENCY_SIGNAL_SUBMASK;
+ reg |= CR0_EXTERNAL_CALL_SUBMASK;
+ __ctl_load(reg, 0, 0);
+}
+
+static inline void setup_access_registers(void)
+{
+ unsigned int acrs[NUM_ACRS] = { 0 };
+
+ restore_access_regs(acrs);
+}
+
static int __init disable_vector_extension(char *str)
{
S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
@@ -268,21 +287,6 @@ static int __init disable_vector_extension(char *str)
}
early_param("novx", disable_vector_extension);
-static int __init noexec_setup(char *str)
-{
- bool enabled;
- int rc;
-
- rc = kstrtobool(str, &enabled);
- if (!rc && !enabled) {
- /* Disable no-execute support */
- S390_lowcore.machine_flags &= ~MACHINE_FLAG_NX;
- __ctl_clear_bit(0, 20);
- }
- return rc;
-}
-early_param("noexec", noexec_setup);
-
static int __init cad_setup(char *str)
{
bool enabled;
@@ -332,5 +336,7 @@ void __init startup_init(void)
save_vector_registers();
setup_topology();
sclp_early_detect();
+ setup_control_registers();
+ setup_access_registers();
lockdep_on();
}
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 0d9ee198f4eb..b9e585f528a6 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -26,8 +26,6 @@ ENTRY(startup_continue)
0: larl %r1,tod_clock_base
mvc 0(16,%r1),__LC_BOOT_CLOCK
larl %r13,.LPG1 # get base
- larl %r0,boot_vdso_data
- stg %r0,__LC_VDSO_PER_CPU
#
# Setup stack
#
@@ -37,19 +35,8 @@ ENTRY(startup_continue)
#ifdef CONFIG_KASAN
brasl %r14,kasan_early_init
#endif
-#
-# Early machine initialization and detection functions.
-#
- brasl %r14,startup_init
-
-# check control registers
- stctg %c0,%c15,0(%r15)
- oi 6(%r15),0x60 # enable sigp emergency & external call
- oi 4(%r15),0x10 # switch on low address proctection
- lctlg %c0,%c15,0(%r15)
-
- lam 0,15,.Laregs-.LPG1(%r13) # load acrs needed by uaccess
- brasl %r14,start_kernel # go to C code
+ brasl %r14,startup_init # s390 specific early init
+ brasl %r14,start_kernel # common init code
#
# We returned from start_kernel ?!? PANIK
#
@@ -59,4 +46,3 @@ ENTRY(startup_continue)
.align 16
.LPG1:
.Ldw: .quad 0x0002000180000000,0x0000000000000000
-.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
index b9d8fe45737a..8f8456816d83 100644
--- a/arch/s390/kernel/idle.c
+++ b/arch/s390/kernel/idle.c
@@ -69,18 +69,26 @@ DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
static ssize_t show_idle_time(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ unsigned long long now, idle_time, idle_enter, idle_exit, in_idle;
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
- unsigned long long now, idle_time, idle_enter, idle_exit;
unsigned int seq;
do {
- now = get_tod_clock();
seq = read_seqcount_begin(&idle->seqcount);
idle_time = READ_ONCE(idle->idle_time);
idle_enter = READ_ONCE(idle->clock_idle_enter);
idle_exit = READ_ONCE(idle->clock_idle_exit);
} while (read_seqcount_retry(&idle->seqcount, seq));
- idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
+ in_idle = 0;
+ now = get_tod_clock();
+ if (idle_enter) {
+ if (idle_exit) {
+ in_idle = idle_exit - idle_enter;
+ } else if (now > idle_enter) {
+ in_idle = now - idle_enter;
+ }
+ }
+ idle_time += in_idle;
return sprintf(buf, "%llu\n", idle_time >> 12);
}
DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
@@ -88,17 +96,24 @@ DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
u64 arch_cpu_idle_time(int cpu)
{
struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
- unsigned long long now, idle_enter, idle_exit;
+ unsigned long long now, idle_enter, idle_exit, in_idle;
unsigned int seq;
do {
- now = get_tod_clock();
seq = read_seqcount_begin(&idle->seqcount);
idle_enter = READ_ONCE(idle->clock_idle_enter);
idle_exit = READ_ONCE(idle->clock_idle_exit);
} while (read_seqcount_retry(&idle->seqcount, seq));
-
- return cputime_to_nsecs(idle_enter ? ((idle_exit ?: now) - idle_enter) : 0);
+ in_idle = 0;
+ now = get_tod_clock();
+ if (idle_enter) {
+ if (idle_exit) {
+ in_idle = idle_exit - idle_enter;
+ } else if (now > idle_enter) {
+ in_idle = now - idle_enter;
+ }
+ }
+ return cputime_to_nsecs(in_idle);
}
void arch_cpu_idle_enter(void)
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 48d48b6187c0..0eb1d1cc53a8 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -199,7 +199,7 @@ static const int cpumf_generic_events_user[] = {
[PERF_COUNT_HW_BUS_CYCLES] = -1,
};
-static int __hw_perf_event_init(struct perf_event *event)
+static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
{
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
@@ -207,7 +207,7 @@ static int __hw_perf_event_init(struct perf_event *event)
int err = 0;
u64 ev;
- switch (attr->type) {
+ switch (type) {
case PERF_TYPE_RAW:
/* Raw events are used to access counters directly,
* hence do not permit excludes */
@@ -294,17 +294,16 @@ static int __hw_perf_event_init(struct perf_event *event)
static int cpumf_pmu_event_init(struct perf_event *event)
{
+ unsigned int type = event->attr.type;
int err;
- switch (event->attr.type) {
- case PERF_TYPE_HARDWARE:
- case PERF_TYPE_HW_CACHE:
- case PERF_TYPE_RAW:
- err = __hw_perf_event_init(event);
- break;
- default:
+ if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_RAW)
+ err = __hw_perf_event_init(event, type);
+ else if (event->pmu->type == type)
+ /* Registered as unknown PMU */
+ err = __hw_perf_event_init(event, PERF_TYPE_RAW);
+ else
return -ENOENT;
- }
if (unlikely(err) && event->destroy)
event->destroy(event);
@@ -553,7 +552,7 @@ static int __init cpumf_pmu_init(void)
return -ENODEV;
cpumf_pmu.attr_groups = cpumf_cf_event_group();
- rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
+ rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", -1);
if (rc)
pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc);
return rc;
diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c
index 2654e348801a..e949ab832ed7 100644
--- a/arch/s390/kernel/perf_cpum_cf_diag.c
+++ b/arch/s390/kernel/perf_cpum_cf_diag.c
@@ -243,13 +243,13 @@ static int cf_diag_event_init(struct perf_event *event)
int err = -ENOENT;
debug_sprintf_event(cf_diag_dbg, 5,
- "%s event %p cpu %d config %#llx "
+ "%s event %p cpu %d config %#llx type:%u "
"sample_type %#llx cf_diag_events %d\n", __func__,
- event, event->cpu, attr->config, attr->sample_type,
- atomic_read(&cf_diag_events));
+ event, event->cpu, attr->config, event->pmu->type,
+ attr->sample_type, atomic_read(&cf_diag_events));
if (event->attr.config != PERF_EVENT_CPUM_CF_DIAG ||
- event->attr.type != PERF_TYPE_RAW)
+ event->attr.type != event->pmu->type)
goto out;
/* Raw events are used to access counters directly,
@@ -693,7 +693,7 @@ static int __init cf_diag_init(void)
}
debug_register_view(cf_diag_dbg, &debug_sprintf_view);
- rc = perf_pmu_register(&cf_diag, "cpum_cf_diag", PERF_TYPE_RAW);
+ rc = perf_pmu_register(&cf_diag, "cpum_cf_diag", -1);
if (rc) {
debug_unregister_view(cf_diag_dbg, &debug_sprintf_view);
debug_unregister(cf_diag_dbg);
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 3d8b12a9a6ff..69506fdbd9a1 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -156,8 +156,8 @@ static void free_sampling_buffer(struct sf_buffer *sfb)
}
}
- debug_sprintf_event(sfdbg, 5,
- "free_sampling_buffer: freed sdbt=%p\n", sfb->sdbt);
+ debug_sprintf_event(sfdbg, 5, "%s freed sdbt %p\n", __func__,
+ sfb->sdbt);
memset(sfb, 0, sizeof(*sfb));
}
@@ -212,10 +212,10 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
* the sampling buffer origin.
*/
if (sfb->sdbt != get_next_sdbt(tail)) {
- debug_sprintf_event(sfdbg, 3, "realloc_sampling_buffer: "
- "sampling buffer is not linked: origin=%p"
- "tail=%p\n",
- (void *) sfb->sdbt, (void *) tail);
+ debug_sprintf_event(sfdbg, 3, "%s: "
+ "sampling buffer is not linked: origin %p"
+ " tail %p\n", __func__,
+ (void *)sfb->sdbt, (void *)tail);
return -EINVAL;
}
@@ -252,7 +252,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
sfb->tail = tail;
debug_sprintf_event(sfdbg, 4, "realloc_sampling_buffer: new buffer"
- " settings: sdbt=%lu sdb=%lu\n",
+ " settings: sdbt %lu sdb %lu\n",
sfb->num_sdbt, sfb->num_sdb);
return rc;
}
@@ -293,11 +293,11 @@ static int alloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb)
if (rc) {
free_sampling_buffer(sfb);
debug_sprintf_event(sfdbg, 4, "alloc_sampling_buffer: "
- "realloc_sampling_buffer failed with rc=%i\n", rc);
+ "realloc_sampling_buffer failed with rc %i\n", rc);
} else
debug_sprintf_event(sfdbg, 4,
- "alloc_sampling_buffer: tear=%p dear=%p\n",
- sfb->sdbt, (void *) *sfb->sdbt);
+ "alloc_sampling_buffer: tear %p dear %p\n",
+ sfb->sdbt, (void *)*sfb->sdbt);
return rc;
}
@@ -404,8 +404,8 @@ static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc)
return 0;
debug_sprintf_event(sfdbg, 3,
- "allocate_buffers: rate=%lu f=%lu sdb=%lu/%lu"
- " sample_size=%lu cpuhw=%p\n",
+ "%s: rate %lu f %lu sdb %lu/%lu"
+ " sample_size %lu cpuhw %p\n", __func__,
SAMPL_RATE(hwc), freq, n_sdb, sfb_max_limit(hwc),
sample_size, cpuhw);
@@ -465,8 +465,8 @@ static void sfb_account_overflows(struct cpu_hw_sf *cpuhw,
if (num)
sfb_account_allocs(num, hwc);
- debug_sprintf_event(sfdbg, 5, "sfb: overflow: overflow=%llu ratio=%lu"
- " num=%lu\n", OVERFLOW_REG(hwc), ratio, num);
+ debug_sprintf_event(sfdbg, 5, "sfb: overflow: overflow %llu ratio %lu"
+ " num %lu\n", OVERFLOW_REG(hwc), ratio, num);
OVERFLOW_REG(hwc) = 0;
}
@@ -505,11 +505,11 @@ static void extend_sampling_buffer(struct sf_buffer *sfb,
rc = realloc_sampling_buffer(sfb, num, GFP_ATOMIC);
if (rc)
debug_sprintf_event(sfdbg, 5, "sfb: extend: realloc "
- "failed with rc=%i\n", rc);
+ "failed with rc %i\n", rc);
if (sfb_has_pending_allocs(sfb, hwc))
debug_sprintf_event(sfdbg, 5, "sfb: extend: "
- "req=%lu alloc=%lu remaining=%lu\n",
+ "req %lu alloc %lu remaining %lu\n",
num, sfb->num_sdb - num_old,
sfb_pending_allocs(sfb, hwc));
}
@@ -538,20 +538,22 @@ static void setup_pmc_cpu(void *flags)
err = sf_disable();
if (err)
pr_err("Switching off the sampling facility failed "
- "with rc=%i\n", err);
+ "with rc %i\n", err);
debug_sprintf_event(sfdbg, 5,
- "setup_pmc_cpu: initialized: cpuhw=%p\n", cpusf);
+ "%s: initialized: cpuhw %p\n", __func__,
+ cpusf);
break;
case PMC_RELEASE:
cpusf->flags &= ~PMU_F_RESERVED;
err = sf_disable();
if (err) {
pr_err("Switching off the sampling facility failed "
- "with rc=%i\n", err);
+ "with rc %i\n", err);
} else
deallocate_buffers(cpusf);
debug_sprintf_event(sfdbg, 5,
- "setup_pmc_cpu: released: cpuhw=%p\n", cpusf);
+ "%s: released: cpuhw %p\n", __func__,
+ cpusf);
break;
}
if (err)
@@ -744,7 +746,7 @@ static int __hw_perf_event_init_rate(struct perf_event *event,
SAMPL_RATE(hwc) = rate;
hw_init_period(hwc, SAMPL_RATE(hwc));
debug_sprintf_event(sfdbg, 4, "__hw_perf_event_init_rate:"
- "cpu:%d period:%llx freq:%d,%#lx\n", event->cpu,
+ "cpu:%d period:%#llx freq:%d,%#lx\n", event->cpu,
event->attr.sample_period, event->attr.freq,
SAMPLE_FREQ_MODE(hwc));
return 0;
@@ -963,7 +965,7 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
err = lsctl(&cpuhw->lsctl);
if (err) {
cpuhw->flags &= ~PMU_F_ENABLED;
- pr_err("Loading sampling controls failed: op=%i err=%i\n",
+ pr_err("Loading sampling controls failed: op %i err %i\n",
1, err);
return;
}
@@ -971,8 +973,8 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
/* Load current program parameter */
lpp(&S390_lowcore.lpp);
- debug_sprintf_event(sfdbg, 6, "pmu_enable: es=%i cs=%i ed=%i cd=%i "
- "interval:%lx tear=%p dear=%p\n",
+ debug_sprintf_event(sfdbg, 6, "pmu_enable: es %i cs %i ed %i cd %i "
+ "interval %#lx tear %p dear %p\n",
cpuhw->lsctl.es, cpuhw->lsctl.cs, cpuhw->lsctl.ed,
cpuhw->lsctl.cd, cpuhw->lsctl.interval,
(void *) cpuhw->lsctl.tear,
@@ -999,13 +1001,14 @@ static void cpumsf_pmu_disable(struct pmu *pmu)
err = lsctl(&inactive);
if (err) {
- pr_err("Loading sampling controls failed: op=%i err=%i\n",
+ pr_err("Loading sampling controls failed: op %i err %i\n",
2, err);
return;
}
/* Save state of TEAR and DEAR register contents */
- if (!qsi(&si)) {
+ err = qsi(&si);
+ if (!err) {
/* TEAR/DEAR values are valid only if the sampling facility is
* enabled. Note that cpumsf_pmu_disable() might be called even
* for a disabled sampling facility because cpumsf_pmu_enable()
@@ -1017,7 +1020,7 @@ static void cpumsf_pmu_disable(struct pmu *pmu)
}
} else
debug_sprintf_event(sfdbg, 3, "cpumsf_pmu_disable: "
- "qsi() failed with err=%i\n", err);
+ "qsi() failed with err %i\n", err);
cpuhw->flags &= ~PMU_F_ENABLED;
}
@@ -1130,15 +1133,6 @@ static void perf_event_count_update(struct perf_event *event, u64 count)
local64_add(count, &event->count);
}
-static void debug_sample_entry(struct hws_basic_entry *sample,
- struct hws_trailer_entry *te)
-{
- debug_sprintf_event(sfdbg, 4, "hw_collect_samples: Found unknown "
- "sampling data entry: te->f=%i basic.def=%04x "
- "(%p)\n",
- te->f, sample->def, sample);
-}
-
/* hw_collect_samples() - Walk through a sample-data-block and collect samples
* @event: The perf event
* @sdbt: Sample-data-block table
@@ -1192,7 +1186,11 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
/* Count discarded samples */
*overflow += 1;
} else {
- debug_sample_entry(sample, te);
+ debug_sprintf_event(sfdbg, 4,
+ "%s: Found unknown"
+ " sampling data entry: te->f %i"
+ " basic.def %#4x (%p)\n", __func__,
+ te->f, sample->def, sample);
/* Sample slot is not yet written or other record.
*
* This condition can occur if the buffer was reused
@@ -1267,9 +1265,9 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
sampl_overflow += te->overflow;
/* Timestamps are valid for full sample-data-blocks only */
- debug_sprintf_event(sfdbg, 6, "hw_perf_event_update: sdbt=%p "
- "overflow=%llu timestamp=%#llx\n",
- sdbt, te->overflow,
+ debug_sprintf_event(sfdbg, 6, "%s: sdbt %p "
+ "overflow %llu timestamp %#llx\n",
+ __func__, sdbt, te->overflow,
(te->f) ? trailer_timestamp(te) : 0ULL);
/* Collect all samples from a single sample-data-block and
@@ -1313,9 +1311,9 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) +
sampl_overflow, 1 + num_sdb);
if (sampl_overflow || event_overflow)
- debug_sprintf_event(sfdbg, 4, "hw_perf_event_update: "
- "overflow stats: sample=%llu event=%llu\n",
- sampl_overflow, event_overflow);
+ debug_sprintf_event(sfdbg, 4, "%s: "
+ "overflow stats: sample %llu event %llu\n",
+ __func__, sampl_overflow, event_overflow);
}
#define AUX_SDB_INDEX(aux, i) ((i) % aux->sfb.num_sdb)
@@ -1368,7 +1366,7 @@ static void aux_output_end(struct perf_output_handle *handle)
te = aux_sdb_trailer(aux, aux->alert_mark);
te->flags &= ~SDB_TE_ALERT_REQ_MASK;
- debug_sprintf_event(sfdbg, 6, "aux_output_end: collect %lx SDBs\n", i);
+ debug_sprintf_event(sfdbg, 6, "%s: collect %#lx SDBs\n", __func__, i);
}
/*
@@ -1428,8 +1426,8 @@ static int aux_output_begin(struct perf_output_handle *handle,
debug_sprintf_event(sfdbg, 6, "aux_output_begin: "
"head->alert_mark->empty_mark (num_alert, range)"
- "[%lx -> %lx -> %lx] (%lx, %lx) "
- "tear index %lx, tear %lx dear %lx\n",
+ "[%#lx -> %#lx -> %#lx] (%#lx, %#lx) "
+ "tear index %#lx, tear %#lx dear %#lx\n",
aux->head, aux->alert_mark, aux->empty_mark,
AUX_SDB_NUM_ALERT(aux), range,
head / CPUM_SF_SDB_PER_TABLE,
@@ -1596,13 +1594,13 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
perf_aux_output_end(&cpuhw->handle, size);
pr_err("Sample data caused the AUX buffer with %lu "
"pages to overflow\n", num_sdb);
- debug_sprintf_event(sfdbg, 1, "head %lx range %lx "
- "overflow %llx\n",
+ debug_sprintf_event(sfdbg, 1, "head %#lx range %#lx "
+ "overflow %#llx\n",
aux->head, range, overflow);
} else {
size = AUX_SDB_NUM_ALERT(aux) << PAGE_SHIFT;
perf_aux_output_end(&cpuhw->handle, size);
- debug_sprintf_event(sfdbg, 6, "head %lx alert %lx "
+ debug_sprintf_event(sfdbg, 6, "head %#lx alert %#lx "
"already full, try another\n",
aux->head, aux->alert_mark);
}
@@ -1610,7 +1608,7 @@ static void hw_collect_aux(struct cpu_hw_sf *cpuhw)
if (done)
debug_sprintf_event(sfdbg, 6, "aux_reset_buffer: "
- "[%lx -> %lx -> %lx] (%lx, %lx)\n",
+ "[%#lx -> %#lx -> %#lx] (%#lx, %#lx)\n",
aux->head, aux->alert_mark, aux->empty_mark,
AUX_SDB_NUM_ALERT(aux), range);
}
@@ -1800,7 +1798,7 @@ static int cpumsf_pmu_check_period(struct perf_event *event, u64 value)
SAMPL_RATE(&event->hw) = rate;
hw_init_period(&event->hw, SAMPL_RATE(&event->hw));
debug_sprintf_event(sfdbg, 4, "cpumsf_pmu_check_period:"
- "cpu:%d value:%llx period:%llx freq:%d\n",
+ "cpu:%d value:%#llx period:%#llx freq:%d\n",
event->cpu, value,
event->attr.sample_period, do_freq);
return 0;
@@ -2111,7 +2109,7 @@ static int param_set_sfb_size(const char *val, const struct kernel_param *kp)
sfb_set_limits(min, max);
pr_info("The sampling buffer limits have changed to: "
- "min=%lu max=%lu (diag=x%lu)\n",
+ "min %lu max %lu (diag %lu)\n",
CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB, CPUM_SF_SDB_DIAG_FACTOR);
return 0;
}
@@ -2129,7 +2127,7 @@ static const struct kernel_param_ops param_ops_sfb_size = {
static void __init pr_cpumsf_err(unsigned int reason)
{
pr_err("Sampling facility support for perf is not available: "
- "reason=%04x\n", reason);
+ "reason %#x\n", reason);
}
static int __init init_cpum_sampling_pmu(void)
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index fcb6c2e92b07..1e75cc983546 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -224,9 +224,13 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
struct unwind_state state;
+ unsigned long addr;
- unwind_for_each_frame(&state, current, regs, 0)
- perf_callchain_store(entry, state.ip);
+ unwind_for_each_frame(&state, current, regs, 0) {
+ addr = unwind_get_return_address(&state);
+ if (!addr || perf_callchain_store(entry, addr))
+ return;
+ }
}
/* Perf definitions for PMU event attributes in sysfs */
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index b0afec673f77..6ccef5f29761 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -40,6 +40,7 @@
#include <asm/stacktrace.h>
#include <asm/switch_to.h>
#include <asm/runtime_instr.h>
+#include <asm/unwind.h>
#include "entry.h"
asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
@@ -178,9 +179,8 @@ EXPORT_SYMBOL(dump_fpu);
unsigned long get_wchan(struct task_struct *p)
{
- struct stack_frame *sf, *low, *high;
- unsigned long return_address;
- int count;
+ struct unwind_state state;
+ unsigned long ip = 0;
if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
return 0;
@@ -188,26 +188,22 @@ unsigned long get_wchan(struct task_struct *p)
if (!try_get_task_stack(p))
return 0;
- low = task_stack_page(p);
- high = (struct stack_frame *) task_pt_regs(p);
- sf = (struct stack_frame *) p->thread.ksp;
- if (sf <= low || sf > high) {
- return_address = 0;
- goto out;
- }
- for (count = 0; count < 16; count++) {
- sf = (struct stack_frame *)READ_ONCE_NOCHECK(sf->back_chain);
- if (sf <= low || sf > high) {
- return_address = 0;
- goto out;
+ unwind_for_each_frame(&state, p, NULL, 0) {
+ if (state.stack_info.type != STACK_TYPE_TASK) {
+ ip = 0;
+ break;
}
- return_address = READ_ONCE_NOCHECK(sf->gprs[8]);
- if (!in_sched_functions(return_address))
- goto out;
+
+ ip = unwind_get_return_address(&state);
+ if (!ip)
+ break;
+
+ if (!in_sched_functions(ip))
+ break;
}
-out:
+
put_task_stack(p);
- return return_address;
+ return ip;
}
unsigned long arch_align_stack(unsigned long sp)
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 44974654cbd0..6acdcf1d4074 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -724,39 +724,67 @@ static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
static int smp_add_present_cpu(int cpu);
-static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
+static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
+ bool configured, bool early)
{
struct pcpu *pcpu;
- cpumask_t avail;
- int cpu, nr, i, j;
+ int cpu, nr, i;
u16 address;
nr = 0;
- cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
- cpu = cpumask_first(&avail);
- for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
- if (sclp.has_core_type && info->core[i].type != boot_core_type)
+ if (sclp.has_core_type && core->type != boot_core_type)
+ return nr;
+ cpu = cpumask_first(avail);
+ address = core->core_id << smp_cpu_mt_shift;
+ for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) {
+ if (pcpu_find_address(cpu_present_mask, address + i))
continue;
- address = info->core[i].core_id << smp_cpu_mt_shift;
- for (j = 0; j <= smp_cpu_mtid; j++) {
- if (pcpu_find_address(cpu_present_mask, address + j))
- continue;
- pcpu = pcpu_devices + cpu;
- pcpu->address = address + j;
- pcpu->state =
- (cpu >= info->configured*(smp_cpu_mtid + 1)) ?
- CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
- smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
- set_cpu_present(cpu, true);
- if (sysfs_add && smp_add_present_cpu(cpu) != 0)
- set_cpu_present(cpu, false);
- else
- nr++;
- cpu = cpumask_next(cpu, &avail);
- if (cpu >= nr_cpu_ids)
+ pcpu = pcpu_devices + cpu;
+ pcpu->address = address + i;
+ if (configured)
+ pcpu->state = CPU_STATE_CONFIGURED;
+ else
+ pcpu->state = CPU_STATE_STANDBY;
+ smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
+ set_cpu_present(cpu, true);
+ if (!early && smp_add_present_cpu(cpu) != 0)
+ set_cpu_present(cpu, false);
+ else
+ nr++;
+ cpumask_clear_cpu(cpu, avail);
+ cpu = cpumask_next(cpu, avail);
+ }
+ return nr;
+}
+
+static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
+{
+ struct sclp_core_entry *core;
+ cpumask_t avail;
+ bool configured;
+ u16 core_id;
+ int nr, i;
+
+ nr = 0;
+ cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
+ /*
+ * Add IPL core first (which got logical CPU number 0) to make sure
+ * that all SMT threads get subsequent logical CPU numbers.
+ */
+ if (early) {
+ core_id = pcpu_devices[0].address >> smp_cpu_mt_shift;
+ for (i = 0; i < info->configured; i++) {
+ core = &info->core[i];
+ if (core->core_id == core_id) {
+ nr += smp_add_core(core, &avail, true, early);
break;
+ }
}
}
+ for (i = 0; i < info->combined; i++) {
+ configured = i < info->configured;
+ nr += smp_add_core(&info->core[i], &avail, configured, early);
+ }
return nr;
}
@@ -805,7 +833,7 @@ void __init smp_detect_cpus(void)
/* Add CPUs present at boot */
get_online_cpus();
- __smp_rescan_cpus(info, 0);
+ __smp_rescan_cpus(info, true);
put_online_cpus();
memblock_free_early((unsigned long)info, sizeof(*info));
}
@@ -1148,7 +1176,7 @@ int __ref smp_rescan_cpus(void)
smp_get_core_info(info, 0);
get_online_cpus();
mutex_lock(&smp_cpu_state_mutex);
- nr = __smp_rescan_cpus(info, 1);
+ nr = __smp_rescan_cpus(info, false);
mutex_unlock(&smp_cpu_state_mutex);
put_online_cpus();
kfree(info);
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index e8766beee5ad..f9d070d016e3 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -110,15 +110,6 @@ unsigned long long notrace sched_clock(void)
}
NOKPROBE_SYMBOL(sched_clock);
-/*
- * Monotonic_clock - returns # of nanoseconds passed since time_init()
- */
-unsigned long long monotonic_clock(void)
-{
- return sched_clock();
-}
-EXPORT_SYMBOL(monotonic_clock);
-
static void ext_to_timespec64(unsigned char *clk, struct timespec64 *xt)
{
unsigned long long high, low, rem, sec, nsec;
diff --git a/arch/s390/kernel/unwind_bc.c b/arch/s390/kernel/unwind_bc.c
index 8fc9daae47a2..fa111d3d378f 100644
--- a/arch/s390/kernel/unwind_bc.c
+++ b/arch/s390/kernel/unwind_bc.c
@@ -46,10 +46,15 @@ bool unwind_next_frame(struct unwind_state *state)
regs = state->regs;
if (unlikely(regs)) {
- sp = READ_ONCE_NOCHECK(regs->gprs[15]);
- if (unlikely(outside_of_stack(state, sp))) {
- if (!update_stack_info(state, sp))
- goto out_err;
+ if (state->reuse_sp) {
+ sp = state->sp;
+ state->reuse_sp = false;
+ } else {
+ sp = READ_ONCE_NOCHECK(regs->gprs[15]);
+ if (unlikely(outside_of_stack(state, sp))) {
+ if (!update_stack_info(state, sp))
+ goto out_err;
+ }
}
sf = (struct stack_frame *) sp;
ip = READ_ONCE_NOCHECK(sf->gprs[8]);
@@ -80,12 +85,7 @@ bool unwind_next_frame(struct unwind_state *state)
}
}
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- /* Decode any ftrace redirection */
- if (ip == (unsigned long) return_to_handler)
- ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
- ip, (void *) sp);
-#endif
+ ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, (void *) sp);
/* Update unwind state */
state->sp = sp;
@@ -107,9 +107,9 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
{
struct stack_info *info = &state->stack_info;
unsigned long *mask = &state->stack_mask;
+ bool reliable, reuse_sp;
struct stack_frame *sf;
unsigned long ip;
- bool reliable;
memset(state, 0, sizeof(*state));
state->task = task;
@@ -134,22 +134,20 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
if (regs) {
ip = READ_ONCE_NOCHECK(regs->psw.addr);
reliable = true;
+ reuse_sp = true;
} else {
sf = (struct stack_frame *) sp;
ip = READ_ONCE_NOCHECK(sf->gprs[8]);
reliable = false;
+ reuse_sp = false;
}
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- /* Decode any ftrace redirection */
- if (ip == (unsigned long) return_to_handler)
- ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
- ip, NULL);
-#endif
+ ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, NULL);
/* Update unwind state */
state->sp = sp;
state->ip = ip;
state->reliable = reliable;
+ state->reuse_sp = reuse_sp;
}
EXPORT_SYMBOL_GPL(__unwind_start);
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 7e0eb4020917..37695499717d 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -15,6 +15,8 @@
/* Handle ro_after_init data on our own. */
#define RO_AFTER_INIT_DATA
+#define EMITS_PT_NOTE
+
#include <asm-generic/vmlinux.lds.h>
#include <asm/vmlinux.lds.h>
@@ -50,11 +52,7 @@ SECTIONS
_etext = .; /* End of text section */
} :text = 0x0700
- NOTES :text :note
-
- .dummy : { *(.dummy) } :data
-
- RO_DATA_SECTION(PAGE_SIZE)
+ RO_DATA(PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
_sdata = .; /* Start of data section */
@@ -64,12 +62,12 @@ SECTIONS
.data..ro_after_init : {
*(.data..ro_after_init)
JUMP_TABLE_DATA
- }
+ } :data
EXCEPTION_TABLE(16)
. = ALIGN(PAGE_SIZE);
__end_ro_after_init = .;
- RW_DATA_SECTION(0x100, PAGE_SIZE, THREAD_SIZE)
+ RW_DATA(0x100, PAGE_SIZE, THREAD_SIZE)
BOOT_DATA_PRESERVED
_edata = .; /* End of data section */
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index c475ca49cfc6..8df10d3c8f6c 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -247,9 +247,9 @@ void vtime_account_irq_enter(struct task_struct *tsk)
}
EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
-void vtime_account_system(struct task_struct *tsk)
+void vtime_account_kernel(struct task_struct *tsk)
__attribute__((alias("vtime_account_irq_enter")));
-EXPORT_SYMBOL_GPL(vtime_account_system);
+EXPORT_SYMBOL_GPL(vtime_account_kernel);
/*
* Sorted add to a list. List is linear searched until first bigger
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 45634b3d2e0a..3fb54ec2cf3e 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -158,14 +158,28 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
vcpu->stat.diagnose_9c++;
- VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d", tid);
+ /* yield to self */
if (tid == vcpu->vcpu_id)
- return 0;
+ goto no_yield;
+ /* yield to invalid */
tcpu = kvm_get_vcpu_by_id(vcpu->kvm, tid);
- if (tcpu)
- kvm_vcpu_yield_to(tcpu);
+ if (!tcpu)
+ goto no_yield;
+
+ /* target already running */
+ if (READ_ONCE(tcpu->cpu) >= 0)
+ goto no_yield;
+
+ if (kvm_vcpu_yield_to(tcpu) <= 0)
+ goto no_yield;
+
+ VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: done", tid);
+ return 0;
+no_yield:
+ VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: ignored", tid);
+ vcpu->stat.diagnose_9c_ignored++;
return 0;
}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index d1ccc168c071..165dea4c7f19 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1477,8 +1477,7 @@ static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
return 0;
}
-static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
- struct kvm_s390_irq *irq)
+static int __inject_sigp_restart(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
@@ -2007,7 +2006,7 @@ static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
rc = __inject_sigp_stop(vcpu, irq);
break;
case KVM_S390_RESTART:
- rc = __inject_sigp_restart(vcpu, irq);
+ rc = __inject_sigp_restart(vcpu);
break;
case KVM_S390_INT_CLOCK_COMP:
rc = __inject_ckc(vcpu);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index d047e846e1b9..d9e6bf3d54f0 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -155,6 +155,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "instruction_diag_10", VCPU_STAT(diagnose_10) },
{ "instruction_diag_44", VCPU_STAT(diagnose_44) },
{ "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
+ { "diag_9c_ignored", VCPU_STAT(diagnose_9c_ignored) },
{ "instruction_diag_258", VCPU_STAT(diagnose_258) },
{ "instruction_diag_308", VCPU_STAT(diagnose_308) },
{ "instruction_diag_500", VCPU_STAT(diagnose_500) },
@@ -453,16 +454,14 @@ static void kvm_s390_cpu_feat_init(void)
int kvm_arch_init(void *opaque)
{
- int rc;
+ int rc = -ENOMEM;
kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
if (!kvm_s390_dbf)
return -ENOMEM;
- if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
- rc = -ENOMEM;
- goto out_debug_unreg;
- }
+ if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view))
+ goto out;
kvm_s390_cpu_feat_init();
@@ -470,19 +469,17 @@ int kvm_arch_init(void *opaque)
rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
if (rc) {
pr_err("A FLIC registration call failed with rc=%d\n", rc);
- goto out_debug_unreg;
+ goto out;
}
rc = kvm_s390_gib_init(GAL_ISC);
if (rc)
- goto out_gib_destroy;
+ goto out;
return 0;
-out_gib_destroy:
- kvm_s390_gib_destroy();
-out_debug_unreg:
- debug_unregister(kvm_s390_dbf);
+out:
+ kvm_arch_exit();
return rc;
}
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 30a7c8c29964..ce1e4bbe53aa 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -74,7 +74,7 @@ static inline int arch_load_niai4(int *lock)
{
int owner;
- asm volatile(
+ asm_inline volatile(
ALTERNATIVE("", ".long 0xb2fa0040", 49) /* NIAI 4 */
" l %0,%1\n"
: "=d" (owner) : "Q" (*lock) : "memory");
@@ -85,7 +85,7 @@ static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
{
int expected = old;
- asm volatile(
+ asm_inline volatile(
ALTERNATIVE("", ".long 0xb2fa0080", 49) /* NIAI 8 */
" cs %0,%3,%1\n"
: "=d" (old), "=Q" (*lock)
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 510a18299196..a51c892f14f3 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -298,16 +298,16 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write,
}
if (write) {
- len = *lenp;
- if (copy_from_user(buf, buffer,
- len > sizeof(buf) ? sizeof(buf) : len))
+ len = min(*lenp, sizeof(buf));
+ if (copy_from_user(buf, buffer, len))
return -EFAULT;
- buf[sizeof(buf) - 1] = '\0';
+ buf[len - 1] = '\0';
cmm_skip_blanks(buf, &p);
nr = simple_strtoul(p, &p, 0);
cmm_skip_blanks(p, &p);
seconds = simple_strtoul(p, &p, 0);
cmm_set_timeout(nr, seconds);
+ *ppos += *lenp;
} else {
len = sprintf(buf, "%ld %ld\n",
cmm_timeout_pages, cmm_timeout_seconds);
@@ -315,9 +315,9 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write,
len = *lenp;
if (copy_to_user(buffer, buf, len))
return -EFAULT;
+ *lenp = len;
+ *ppos += len;
}
- *lenp = len;
- *ppos += len;
return 0;
}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index a124f19f7b3c..f0ce22220565 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -118,6 +118,7 @@ void __init paging_init(void)
sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init();
+ zone_dma_bits = 31;
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 1864a8bb9622..59ad7997fed1 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -70,7 +70,7 @@ void notrace s390_kernel_write(void *dst, const void *src, size_t size)
spin_unlock_irqrestore(&s390_kernel_write_lock, flags);
}
-static int __memcpy_real(void *dest, void *src, size_t count)
+static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count)
{
register unsigned long _dest asm("2") = (unsigned long) dest;
register unsigned long _len1 asm("3") = (unsigned long) count;
@@ -91,19 +91,23 @@ static int __memcpy_real(void *dest, void *src, size_t count)
return rc;
}
-static unsigned long _memcpy_real(unsigned long dest, unsigned long src,
- unsigned long count)
+static unsigned long __no_sanitize_address _memcpy_real(unsigned long dest,
+ unsigned long src,
+ unsigned long count)
{
int irqs_disabled, rc;
unsigned long flags;
if (!count)
return 0;
- flags = __arch_local_irq_stnsm(0xf8UL);
+ flags = arch_local_irq_save();
irqs_disabled = arch_irqs_disabled_flags(flags);
if (!irqs_disabled)
trace_hardirqs_off();
+ __arch_local_irq_stnsm(0xf8); // disable DAT
rc = __memcpy_real((void *) dest, (void *) src, (size_t) count);
+ if (flags & PSW_MASK_DAT)
+ __arch_local_irq_stosm(0x04); // enable DAT
if (!irqs_disabled)
trace_hardirqs_on();
__arch_local_irq_ssm(flags);
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index ce88211b9c6c..8d2134136290 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -23,6 +23,8 @@
#include <linux/filter.h>
#include <linux/init.h>
#include <linux/bpf.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
#include <asm/cacheflush.h>
#include <asm/dis.h>
#include <asm/facility.h>
@@ -38,10 +40,11 @@ struct bpf_jit {
int size; /* Size of program and literal pool */
int size_prg; /* Size of program */
int prg; /* Current position in program */
- int lit_start; /* Start of literal pool */
- int lit; /* Current position in literal pool */
+ int lit32_start; /* Start of 32-bit literal pool */
+ int lit32; /* Current position in 32-bit literal pool */
+ int lit64_start; /* Start of 64-bit literal pool */
+ int lit64; /* Current position in 64-bit literal pool */
int base_ip; /* Base address for literal pool */
- int ret0_ip; /* Address of return 0 */
int exit_ip; /* Address of exit */
int r1_thunk_ip; /* Address of expoline thunk for 'br %r1' */
int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */
@@ -49,14 +52,10 @@ struct bpf_jit {
int labels[1]; /* Labels for local jumps */
};
-#define BPF_SIZE_MAX 0xffff /* Max size for program (16 bit branches) */
-
-#define SEEN_MEM (1 << 0) /* use mem[] for temporary storage */
-#define SEEN_RET0 (1 << 1) /* ret0_ip points to a valid return 0 */
-#define SEEN_LITERAL (1 << 2) /* code uses literals */
-#define SEEN_FUNC (1 << 3) /* calls C functions */
-#define SEEN_TAIL_CALL (1 << 4) /* code uses tail calls */
-#define SEEN_REG_AX (1 << 5) /* code uses constant blinding */
+#define SEEN_MEM BIT(0) /* use mem[] for temporary storage */
+#define SEEN_LITERAL BIT(1) /* code uses literals */
+#define SEEN_FUNC BIT(2) /* calls C functions */
+#define SEEN_TAIL_CALL BIT(3) /* code uses tail calls */
#define SEEN_STACK (SEEN_FUNC | SEEN_MEM)
/*
@@ -131,13 +130,13 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
#define _EMIT2(op) \
({ \
if (jit->prg_buf) \
- *(u16 *) (jit->prg_buf + jit->prg) = op; \
+ *(u16 *) (jit->prg_buf + jit->prg) = (op); \
jit->prg += 2; \
})
#define EMIT2(op, b1, b2) \
({ \
- _EMIT2(op | reg(b1, b2)); \
+ _EMIT2((op) | reg(b1, b2)); \
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
})
@@ -145,20 +144,20 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
#define _EMIT4(op) \
({ \
if (jit->prg_buf) \
- *(u32 *) (jit->prg_buf + jit->prg) = op; \
+ *(u32 *) (jit->prg_buf + jit->prg) = (op); \
jit->prg += 4; \
})
#define EMIT4(op, b1, b2) \
({ \
- _EMIT4(op | reg(b1, b2)); \
+ _EMIT4((op) | reg(b1, b2)); \
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
})
#define EMIT4_RRF(op, b1, b2, b3) \
({ \
- _EMIT4(op | reg_high(b3) << 8 | reg(b1, b2)); \
+ _EMIT4((op) | reg_high(b3) << 8 | reg(b1, b2)); \
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
REG_SET_SEEN(b3); \
@@ -167,13 +166,13 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
#define _EMIT4_DISP(op, disp) \
({ \
unsigned int __disp = (disp) & 0xfff; \
- _EMIT4(op | __disp); \
+ _EMIT4((op) | __disp); \
})
#define EMIT4_DISP(op, b1, b2, disp) \
({ \
- _EMIT4_DISP(op | reg_high(b1) << 16 | \
- reg_high(b2) << 8, disp); \
+ _EMIT4_DISP((op) | reg_high(b1) << 16 | \
+ reg_high(b2) << 8, (disp)); \
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
})
@@ -181,21 +180,27 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
#define EMIT4_IMM(op, b1, imm) \
({ \
unsigned int __imm = (imm) & 0xffff; \
- _EMIT4(op | reg_high(b1) << 16 | __imm); \
+ _EMIT4((op) | reg_high(b1) << 16 | __imm); \
REG_SET_SEEN(b1); \
})
#define EMIT4_PCREL(op, pcrel) \
({ \
long __pcrel = ((pcrel) >> 1) & 0xffff; \
- _EMIT4(op | __pcrel); \
+ _EMIT4((op) | __pcrel); \
+})
+
+#define EMIT4_PCREL_RIC(op, mask, target) \
+({ \
+ int __rel = ((target) - jit->prg) / 2; \
+ _EMIT4((op) | (mask) << 20 | (__rel & 0xffff)); \
})
#define _EMIT6(op1, op2) \
({ \
if (jit->prg_buf) { \
- *(u32 *) (jit->prg_buf + jit->prg) = op1; \
- *(u16 *) (jit->prg_buf + jit->prg + 4) = op2; \
+ *(u32 *) (jit->prg_buf + jit->prg) = (op1); \
+ *(u16 *) (jit->prg_buf + jit->prg + 4) = (op2); \
} \
jit->prg += 6; \
})
@@ -203,20 +208,20 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
#define _EMIT6_DISP(op1, op2, disp) \
({ \
unsigned int __disp = (disp) & 0xfff; \
- _EMIT6(op1 | __disp, op2); \
+ _EMIT6((op1) | __disp, op2); \
})
#define _EMIT6_DISP_LH(op1, op2, disp) \
({ \
- u32 _disp = (u32) disp; \
+ u32 _disp = (u32) (disp); \
unsigned int __disp_h = _disp & 0xff000; \
unsigned int __disp_l = _disp & 0x00fff; \
- _EMIT6(op1 | __disp_l, op2 | __disp_h >> 4); \
+ _EMIT6((op1) | __disp_l, (op2) | __disp_h >> 4); \
})
#define EMIT6_DISP_LH(op1, op2, b1, b2, b3, disp) \
({ \
- _EMIT6_DISP_LH(op1 | reg(b1, b2) << 16 | \
+ _EMIT6_DISP_LH((op1) | reg(b1, b2) << 16 | \
reg_high(b3) << 8, op2, disp); \
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
@@ -226,8 +231,8 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
#define EMIT6_PCREL_LABEL(op1, op2, b1, b2, label, mask) \
({ \
int rel = (jit->labels[label] - jit->prg) >> 1; \
- _EMIT6(op1 | reg(b1, b2) << 16 | (rel & 0xffff), \
- op2 | mask << 12); \
+ _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), \
+ (op2) | (mask) << 12); \
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
})
@@ -235,68 +240,83 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
#define EMIT6_PCREL_IMM_LABEL(op1, op2, b1, imm, label, mask) \
({ \
int rel = (jit->labels[label] - jit->prg) >> 1; \
- _EMIT6(op1 | (reg_high(b1) | mask) << 16 | \
- (rel & 0xffff), op2 | (imm & 0xff) << 8); \
+ _EMIT6((op1) | (reg_high(b1) | (mask)) << 16 | \
+ (rel & 0xffff), (op2) | ((imm) & 0xff) << 8); \
REG_SET_SEEN(b1); \
- BUILD_BUG_ON(((unsigned long) imm) > 0xff); \
+ BUILD_BUG_ON(((unsigned long) (imm)) > 0xff); \
})
#define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \
({ \
/* Branch instruction needs 6 bytes */ \
- int rel = (addrs[i + off + 1] - (addrs[i + 1] - 6)) / 2;\
- _EMIT6(op1 | reg(b1, b2) << 16 | (rel & 0xffff), op2 | mask); \
+ int rel = (addrs[(i) + (off) + 1] - (addrs[(i) + 1] - 6)) / 2;\
+ _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
})
#define EMIT6_PCREL_RILB(op, b, target) \
({ \
- int rel = (target - jit->prg) / 2; \
- _EMIT6(op | reg_high(b) << 16 | rel >> 16, rel & 0xffff); \
+ unsigned int rel = (int)((target) - jit->prg) / 2; \
+ _EMIT6((op) | reg_high(b) << 16 | rel >> 16, rel & 0xffff);\
REG_SET_SEEN(b); \
})
#define EMIT6_PCREL_RIL(op, target) \
({ \
- int rel = (target - jit->prg) / 2; \
- _EMIT6(op | rel >> 16, rel & 0xffff); \
+ unsigned int rel = (int)((target) - jit->prg) / 2; \
+ _EMIT6((op) | rel >> 16, rel & 0xffff); \
+})
+
+#define EMIT6_PCREL_RILC(op, mask, target) \
+({ \
+ EMIT6_PCREL_RIL((op) | (mask) << 20, (target)); \
})
#define _EMIT6_IMM(op, imm) \
({ \
unsigned int __imm = (imm); \
- _EMIT6(op | (__imm >> 16), __imm & 0xffff); \
+ _EMIT6((op) | (__imm >> 16), __imm & 0xffff); \
})
#define EMIT6_IMM(op, b1, imm) \
({ \
- _EMIT6_IMM(op | reg_high(b1) << 16, imm); \
+ _EMIT6_IMM((op) | reg_high(b1) << 16, imm); \
REG_SET_SEEN(b1); \
})
-#define EMIT_CONST_U32(val) \
+#define _EMIT_CONST_U32(val) \
({ \
unsigned int ret; \
- ret = jit->lit - jit->base_ip; \
- jit->seen |= SEEN_LITERAL; \
+ ret = jit->lit32; \
if (jit->prg_buf) \
- *(u32 *) (jit->prg_buf + jit->lit) = (u32) val; \
- jit->lit += 4; \
+ *(u32 *)(jit->prg_buf + jit->lit32) = (u32)(val);\
+ jit->lit32 += 4; \
ret; \
})
-#define EMIT_CONST_U64(val) \
+#define EMIT_CONST_U32(val) \
({ \
- unsigned int ret; \
- ret = jit->lit - jit->base_ip; \
jit->seen |= SEEN_LITERAL; \
+ _EMIT_CONST_U32(val) - jit->base_ip; \
+})
+
+#define _EMIT_CONST_U64(val) \
+({ \
+ unsigned int ret; \
+ ret = jit->lit64; \
if (jit->prg_buf) \
- *(u64 *) (jit->prg_buf + jit->lit) = (u64) val; \
- jit->lit += 8; \
+ *(u64 *)(jit->prg_buf + jit->lit64) = (u64)(val);\
+ jit->lit64 += 8; \
ret; \
})
+#define EMIT_CONST_U64(val) \
+({ \
+ jit->seen |= SEEN_LITERAL; \
+ _EMIT_CONST_U64(val) - jit->base_ip; \
+})
+
#define EMIT_ZERO(b1) \
({ \
if (!fp->aux->verifier_zext) { \
@@ -307,6 +327,67 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
})
/*
+ * Return whether this is the first pass. The first pass is special, since we
+ * don't know any sizes yet, and thus must be conservative.
+ */
+static bool is_first_pass(struct bpf_jit *jit)
+{
+ return jit->size == 0;
+}
+
+/*
+ * Return whether this is the code generation pass. The code generation pass is
+ * special, since we should change as little as possible.
+ */
+static bool is_codegen_pass(struct bpf_jit *jit)
+{
+ return jit->prg_buf;
+}
+
+/*
+ * Return whether "rel" can be encoded as a short PC-relative offset
+ */
+static bool is_valid_rel(int rel)
+{
+ return rel >= -65536 && rel <= 65534;
+}
+
+/*
+ * Return whether "off" can be reached using a short PC-relative offset
+ */
+static bool can_use_rel(struct bpf_jit *jit, int off)
+{
+ return is_valid_rel(off - jit->prg);
+}
+
+/*
+ * Return whether given displacement can be encoded using
+ * Long-Displacement Facility
+ */
+static bool is_valid_ldisp(int disp)
+{
+ return disp >= -524288 && disp <= 524287;
+}
+
+/*
+ * Return whether the next 32-bit literal pool entry can be referenced using
+ * Long-Displacement Facility
+ */
+static bool can_use_ldisp_for_lit32(struct bpf_jit *jit)
+{
+ return is_valid_ldisp(jit->lit32 - jit->base_ip);
+}
+
+/*
+ * Return whether the next 64-bit literal pool entry can be referenced using
+ * Long-Displacement Facility
+ */
+static bool can_use_ldisp_for_lit64(struct bpf_jit *jit)
+{
+ return is_valid_ldisp(jit->lit64 - jit->base_ip);
+}
+
+/*
* Fill whole space with illegal instructions
*/
static void jit_fill_hole(void *area, unsigned int size)
@@ -383,9 +464,18 @@ static int get_end(struct bpf_jit *jit, int start)
*/
static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth)
{
-
+ const int last = 15, save_restore_size = 6;
int re = 6, rs;
+ if (is_first_pass(jit)) {
+ /*
+ * We don't know yet which registers are used. Reserve space
+ * conservatively.
+ */
+ jit->prg += (last - re + 1) * save_restore_size;
+ return;
+ }
+
do {
rs = get_start(jit, re);
if (!rs)
@@ -396,7 +486,7 @@ static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth)
else
restore_regs(jit, rs, re, stack_depth);
re++;
- } while (re <= 15);
+ } while (re <= last);
}
/*
@@ -420,21 +510,28 @@ static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
/* Save registers */
save_restore_regs(jit, REGS_SAVE, stack_depth);
/* Setup literal pool */
- if (jit->seen & SEEN_LITERAL) {
- /* basr %r13,0 */
- EMIT2(0x0d00, REG_L, REG_0);
- jit->base_ip = jit->prg;
+ if (is_first_pass(jit) || (jit->seen & SEEN_LITERAL)) {
+ if (!is_first_pass(jit) &&
+ is_valid_ldisp(jit->size - (jit->prg + 2))) {
+ /* basr %l,0 */
+ EMIT2(0x0d00, REG_L, REG_0);
+ jit->base_ip = jit->prg;
+ } else {
+ /* larl %l,lit32_start */
+ EMIT6_PCREL_RILB(0xc0000000, REG_L, jit->lit32_start);
+ jit->base_ip = jit->lit32_start;
+ }
}
/* Setup stack and backchain */
- if (jit->seen & SEEN_STACK) {
- if (jit->seen & SEEN_FUNC)
+ if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) {
+ if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
/* lgr %w1,%r15 (backchain) */
EMIT4(0xb9040000, REG_W1, REG_15);
/* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
/* aghi %r15,-STK_OFF */
EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth));
- if (jit->seen & SEEN_FUNC)
+ if (is_first_pass(jit) || (jit->seen & SEEN_FUNC))
/* stg %w1,152(%r15) (backchain) */
EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
REG_15, 152);
@@ -446,12 +543,6 @@ static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
*/
static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
{
- /* Return 0 */
- if (jit->seen & SEEN_RET0) {
- jit->ret0_ip = jit->prg;
- /* lghi %b0,0 */
- EMIT4_IMM(0xa7090000, BPF_REG_0, 0);
- }
jit->exit_ip = jit->prg;
/* Load exit code: lgr %r2,%b0 */
EMIT4(0xb9040000, REG_2, BPF_REG_0);
@@ -476,7 +567,7 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
_EMIT2(0x07fe);
if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable &&
- (jit->seen & SEEN_FUNC)) {
+ (is_first_pass(jit) || (jit->seen & SEEN_FUNC))) {
jit->r1_thunk_ip = jit->prg;
/* Generate __s390_indirect_jump_r1 thunk */
if (test_facility(35)) {
@@ -506,16 +597,14 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
int i, bool extra_pass)
{
struct bpf_insn *insn = &fp->insnsi[i];
- int jmp_off, last, insn_count = 1;
u32 dst_reg = insn->dst_reg;
u32 src_reg = insn->src_reg;
+ int last, insn_count = 1;
u32 *addrs = jit->addrs;
s32 imm = insn->imm;
s16 off = insn->off;
unsigned int mask;
- if (dst_reg == BPF_REG_AX || src_reg == BPF_REG_AX)
- jit->seen |= SEEN_REG_AX;
switch (insn->code) {
/*
* BPF_MOV
@@ -549,9 +638,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
u64 imm64;
imm64 = (u64)(u32) insn[0].imm | ((u64)(u32) insn[1].imm) << 32;
- /* lg %dst,<d(imm)>(%l) */
- EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, REG_0, REG_L,
- EMIT_CONST_U64(imm64));
+ /* lgrl %dst,imm */
+ EMIT6_PCREL_RILB(0xc4080000, dst_reg, _EMIT_CONST_U64(imm64));
insn_count = 2;
break;
}
@@ -680,9 +768,18 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4_IMM(0xa7080000, REG_W0, 0);
/* lr %w1,%dst */
EMIT2(0x1800, REG_W1, dst_reg);
- /* dl %w0,<d(imm)>(%l) */
- EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0, REG_L,
- EMIT_CONST_U32(imm));
+ if (!is_first_pass(jit) && can_use_ldisp_for_lit32(jit)) {
+ /* dl %w0,<d(imm)>(%l) */
+ EMIT6_DISP_LH(0xe3000000, 0x0097, REG_W0, REG_0, REG_L,
+ EMIT_CONST_U32(imm));
+ } else {
+ /* lgfrl %dst,imm */
+ EMIT6_PCREL_RILB(0xc40c0000, dst_reg,
+ _EMIT_CONST_U32(imm));
+ jit->seen |= SEEN_LITERAL;
+ /* dlr %w0,%dst */
+ EMIT4(0xb9970000, REG_W0, dst_reg);
+ }
/* llgfr %dst,%rc */
EMIT4(0xb9160000, dst_reg, rc_reg);
if (insn_is_zext(&insn[1]))
@@ -704,9 +801,18 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4_IMM(0xa7090000, REG_W0, 0);
/* lgr %w1,%dst */
EMIT4(0xb9040000, REG_W1, dst_reg);
- /* dlg %w0,<d(imm)>(%l) */
- EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L,
- EMIT_CONST_U64(imm));
+ if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
+ /* dlg %w0,<d(imm)>(%l) */
+ EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L,
+ EMIT_CONST_U64(imm));
+ } else {
+ /* lgrl %dst,imm */
+ EMIT6_PCREL_RILB(0xc4080000, dst_reg,
+ _EMIT_CONST_U64(imm));
+ jit->seen |= SEEN_LITERAL;
+ /* dlgr %w0,%dst */
+ EMIT4(0xb9870000, REG_W0, dst_reg);
+ }
/* lgr %dst,%rc */
EMIT4(0xb9040000, dst_reg, rc_reg);
break;
@@ -729,9 +835,19 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
- /* ng %dst,<d(imm)>(%l) */
- EMIT6_DISP_LH(0xe3000000, 0x0080, dst_reg, REG_0, REG_L,
- EMIT_CONST_U64(imm));
+ if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
+ /* ng %dst,<d(imm)>(%l) */
+ EMIT6_DISP_LH(0xe3000000, 0x0080,
+ dst_reg, REG_0, REG_L,
+ EMIT_CONST_U64(imm));
+ } else {
+ /* lgrl %w0,imm */
+ EMIT6_PCREL_RILB(0xc4080000, REG_W0,
+ _EMIT_CONST_U64(imm));
+ jit->seen |= SEEN_LITERAL;
+ /* ngr %dst,%w0 */
+ EMIT4(0xb9800000, dst_reg, REG_W0);
+ }
break;
/*
* BPF_OR
@@ -751,9 +867,19 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_OR | BPF_K: /* dst = dst | imm */
- /* og %dst,<d(imm)>(%l) */
- EMIT6_DISP_LH(0xe3000000, 0x0081, dst_reg, REG_0, REG_L,
- EMIT_CONST_U64(imm));
+ if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
+ /* og %dst,<d(imm)>(%l) */
+ EMIT6_DISP_LH(0xe3000000, 0x0081,
+ dst_reg, REG_0, REG_L,
+ EMIT_CONST_U64(imm));
+ } else {
+ /* lgrl %w0,imm */
+ EMIT6_PCREL_RILB(0xc4080000, REG_W0,
+ _EMIT_CONST_U64(imm));
+ jit->seen |= SEEN_LITERAL;
+ /* ogr %dst,%w0 */
+ EMIT4(0xb9810000, dst_reg, REG_W0);
+ }
break;
/*
* BPF_XOR
@@ -775,9 +901,19 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
- /* xg %dst,<d(imm)>(%l) */
- EMIT6_DISP_LH(0xe3000000, 0x0082, dst_reg, REG_0, REG_L,
- EMIT_CONST_U64(imm));
+ if (!is_first_pass(jit) && can_use_ldisp_for_lit64(jit)) {
+ /* xg %dst,<d(imm)>(%l) */
+ EMIT6_DISP_LH(0xe3000000, 0x0082,
+ dst_reg, REG_0, REG_L,
+ EMIT_CONST_U64(imm));
+ } else {
+ /* lgrl %w0,imm */
+ EMIT6_PCREL_RILB(0xc4080000, REG_W0,
+ _EMIT_CONST_U64(imm));
+ jit->seen |= SEEN_LITERAL;
+ /* xgr %dst,%w0 */
+ EMIT4(0xb9820000, dst_reg, REG_W0);
+ }
break;
/*
* BPF_LSH
@@ -1023,9 +1159,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
REG_SET_SEEN(BPF_REG_5);
jit->seen |= SEEN_FUNC;
- /* lg %w1,<d(imm)>(%l) */
- EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
- EMIT_CONST_U64(func));
+ /* lgrl %w1,func */
+ EMIT6_PCREL_RILB(0xc4080000, REG_W1, _EMIT_CONST_U64(func));
if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable) {
/* brasl %r14,__s390_indirect_jump_r1 */
EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
@@ -1054,9 +1189,17 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
/* llgf %w1,map.max_entries(%b2) */
EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
offsetof(struct bpf_array, map.max_entries));
- /* clrj %b3,%w1,0xa,label0: if (u32)%b3 >= (u32)%w1 goto out */
- EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
- REG_W1, 0, 0xa);
+ /* if ((u32)%b3 >= (u32)%w1) goto out; */
+ if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) {
+ /* clrj %b3,%w1,0xa,label0 */
+ EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
+ REG_W1, 0, 0xa);
+ } else {
+ /* clr %b3,%w1 */
+ EMIT2(0x1500, BPF_REG_3, REG_W1);
+ /* brcl 0xa,label0 */
+ EMIT6_PCREL_RILC(0xc0040000, 0xa, jit->labels[0]);
+ }
/*
* if (tail_call_cnt++ > MAX_TAIL_CALL_CNT)
@@ -1071,9 +1214,16 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4_IMM(0xa7080000, REG_W0, 1);
/* laal %w1,%w0,off(%r15) */
EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off);
- /* clij %w1,MAX_TAIL_CALL_CNT,0x2,label0 */
- EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007f, REG_W1,
- MAX_TAIL_CALL_CNT, 0, 0x2);
+ if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) {
+ /* clij %w1,MAX_TAIL_CALL_CNT,0x2,label0 */
+ EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007f, REG_W1,
+ MAX_TAIL_CALL_CNT, 0, 0x2);
+ } else {
+ /* clfi %w1,MAX_TAIL_CALL_CNT */
+ EMIT6_IMM(0xc20f0000, REG_W1, MAX_TAIL_CALL_CNT);
+ /* brcl 0x2,label0 */
+ EMIT6_PCREL_RILC(0xc0040000, 0x2, jit->labels[0]);
+ }
/*
* prog = array->ptrs[index];
@@ -1085,11 +1235,16 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4(0xb9160000, REG_1, BPF_REG_3);
/* sllg %r1,%r1,3: %r1 *= 8 */
EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
- /* lg %r1,prog(%b2,%r1) */
- EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
+ /* ltg %r1,prog(%b2,%r1) */
+ EMIT6_DISP_LH(0xe3000000, 0x0002, REG_1, BPF_REG_2,
REG_1, offsetof(struct bpf_array, ptrs));
- /* clgij %r1,0,0x8,label0 */
- EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007d, REG_1, 0, 0, 0x8);
+ if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) {
+ /* brc 0x8,label0 */
+ EMIT4_PCREL_RIC(0xa7040000, 0x8, jit->labels[0]);
+ } else {
+ /* brcl 0x8,label0 */
+ EMIT6_PCREL_RILC(0xc0040000, 0x8, jit->labels[0]);
+ }
/*
* Restore registers before calling function
@@ -1110,7 +1265,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
break;
case BPF_JMP | BPF_EXIT: /* return b0 */
last = (i == fp->len - 1) ? 1 : 0;
- if (last && !(jit->seen & SEEN_RET0))
+ if (last)
break;
/* j <exit> */
EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
@@ -1246,36 +1401,83 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
goto branch_oc;
branch_ks:
is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
- /* lgfi %w1,imm (load sign extend imm) */
- EMIT6_IMM(0xc0010000, REG_W1, imm);
- /* crj or cgrj %dst,%w1,mask,off */
- EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064),
- dst_reg, REG_W1, i, off, mask);
+ /* cfi or cgfi %dst,imm */
+ EMIT6_IMM(is_jmp32 ? 0xc20d0000 : 0xc20c0000,
+ dst_reg, imm);
+ if (!is_first_pass(jit) &&
+ can_use_rel(jit, addrs[i + off + 1])) {
+ /* brc mask,off */
+ EMIT4_PCREL_RIC(0xa7040000,
+ mask >> 12, addrs[i + off + 1]);
+ } else {
+ /* brcl mask,off */
+ EMIT6_PCREL_RILC(0xc0040000,
+ mask >> 12, addrs[i + off + 1]);
+ }
break;
branch_ku:
is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
- /* lgfi %w1,imm (load sign extend imm) */
- EMIT6_IMM(0xc0010000, REG_W1, imm);
- /* clrj or clgrj %dst,%w1,mask,off */
- EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065),
- dst_reg, REG_W1, i, off, mask);
+ /* clfi or clgfi %dst,imm */
+ EMIT6_IMM(is_jmp32 ? 0xc20f0000 : 0xc20e0000,
+ dst_reg, imm);
+ if (!is_first_pass(jit) &&
+ can_use_rel(jit, addrs[i + off + 1])) {
+ /* brc mask,off */
+ EMIT4_PCREL_RIC(0xa7040000,
+ mask >> 12, addrs[i + off + 1]);
+ } else {
+ /* brcl mask,off */
+ EMIT6_PCREL_RILC(0xc0040000,
+ mask >> 12, addrs[i + off + 1]);
+ }
break;
branch_xs:
is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
- /* crj or cgrj %dst,%src,mask,off */
- EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064),
- dst_reg, src_reg, i, off, mask);
+ if (!is_first_pass(jit) &&
+ can_use_rel(jit, addrs[i + off + 1])) {
+ /* crj or cgrj %dst,%src,mask,off */
+ EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064),
+ dst_reg, src_reg, i, off, mask);
+ } else {
+ /* cr or cgr %dst,%src */
+ if (is_jmp32)
+ EMIT2(0x1900, dst_reg, src_reg);
+ else
+ EMIT4(0xb9200000, dst_reg, src_reg);
+ /* brcl mask,off */
+ EMIT6_PCREL_RILC(0xc0040000,
+ mask >> 12, addrs[i + off + 1]);
+ }
break;
branch_xu:
is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
- /* clrj or clgrj %dst,%src,mask,off */
- EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065),
- dst_reg, src_reg, i, off, mask);
+ if (!is_first_pass(jit) &&
+ can_use_rel(jit, addrs[i + off + 1])) {
+ /* clrj or clgrj %dst,%src,mask,off */
+ EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065),
+ dst_reg, src_reg, i, off, mask);
+ } else {
+ /* clr or clgr %dst,%src */
+ if (is_jmp32)
+ EMIT2(0x1500, dst_reg, src_reg);
+ else
+ EMIT4(0xb9210000, dst_reg, src_reg);
+ /* brcl mask,off */
+ EMIT6_PCREL_RILC(0xc0040000,
+ mask >> 12, addrs[i + off + 1]);
+ }
break;
branch_oc:
- /* brc mask,jmp_off (branch instruction needs 4 bytes) */
- jmp_off = addrs[i + off + 1] - (addrs[i + 1] - 4);
- EMIT4_PCREL(0xa7040000 | mask << 8, jmp_off);
+ if (!is_first_pass(jit) &&
+ can_use_rel(jit, addrs[i + off + 1])) {
+ /* brc mask,off */
+ EMIT4_PCREL_RIC(0xa7040000,
+ mask >> 12, addrs[i + off + 1]);
+ } else {
+ /* brcl mask,off */
+ EMIT6_PCREL_RILC(0xc0040000,
+ mask >> 12, addrs[i + off + 1]);
+ }
break;
}
default: /* too complex, give up */
@@ -1286,28 +1488,67 @@ branch_oc:
}
/*
+ * Return whether new i-th instruction address does not violate any invariant
+ */
+static bool bpf_is_new_addr_sane(struct bpf_jit *jit, int i)
+{
+ /* On the first pass anything goes */
+ if (is_first_pass(jit))
+ return true;
+
+ /* The codegen pass must not change anything */
+ if (is_codegen_pass(jit))
+ return jit->addrs[i] == jit->prg;
+
+ /* Passes in between must not increase code size */
+ return jit->addrs[i] >= jit->prg;
+}
+
+/*
+ * Update the address of i-th instruction
+ */
+static int bpf_set_addr(struct bpf_jit *jit, int i)
+{
+ if (!bpf_is_new_addr_sane(jit, i))
+ return -1;
+ jit->addrs[i] = jit->prg;
+ return 0;
+}
+
+/*
* Compile eBPF program into s390x code
*/
static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp,
bool extra_pass)
{
- int i, insn_count;
+ int i, insn_count, lit32_size, lit64_size;
- jit->lit = jit->lit_start;
+ jit->lit32 = jit->lit32_start;
+ jit->lit64 = jit->lit64_start;
jit->prg = 0;
bpf_jit_prologue(jit, fp->aux->stack_depth);
+ if (bpf_set_addr(jit, 0) < 0)
+ return -1;
for (i = 0; i < fp->len; i += insn_count) {
insn_count = bpf_jit_insn(jit, fp, i, extra_pass);
if (insn_count < 0)
return -1;
/* Next instruction address */
- jit->addrs[i + insn_count] = jit->prg;
+ if (bpf_set_addr(jit, i + insn_count) < 0)
+ return -1;
}
bpf_jit_epilogue(jit, fp->aux->stack_depth);
- jit->lit_start = jit->prg;
- jit->size = jit->lit;
+ lit32_size = jit->lit32 - jit->lit32_start;
+ lit64_size = jit->lit64 - jit->lit64_start;
+ jit->lit32_start = jit->prg;
+ if (lit32_size)
+ jit->lit32_start = ALIGN(jit->lit32_start, 4);
+ jit->lit64_start = jit->lit32_start + lit32_size;
+ if (lit64_size)
+ jit->lit64_start = ALIGN(jit->lit64_start, 8);
+ jit->size = jit->lit64_start + lit64_size;
jit->size_prg = jit->prg;
return 0;
}
@@ -1369,7 +1610,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
}
memset(&jit, 0, sizeof(jit));
- jit.addrs = kcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
+ jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
if (jit.addrs == NULL) {
fp = orig_fp;
goto out;
@@ -1388,12 +1629,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
/*
* Final pass: Allocate and generate program
*/
- if (jit.size >= BPF_SIZE_MAX) {
- fp = orig_fp;
- goto free_addrs;
- }
-
- header = bpf_jit_binary_alloc(jit.size, &jit.prg_buf, 2, jit_fill_hole);
+ header = bpf_jit_binary_alloc(jit.size, &jit.prg_buf, 8, jit_fill_hole);
if (!header) {
fp = orig_fp;
goto free_addrs;
@@ -1422,7 +1658,7 @@ skip_init_ctx:
if (!fp->is_func || extra_pass) {
bpf_prog_fill_jited_linfo(fp, jit.addrs + 1);
free_addrs:
- kfree(jit.addrs);
+ kvfree(jit.addrs);
kfree(jit_data);
fp->aux->jit_data = NULL;
}