summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'crypto')
-rw-r--r--crypto/af_alg.c15
-rw-r--r--crypto/algapi.c16
-rw-r--r--crypto/algif_hash.c4
-rw-r--r--crypto/asymmetric_keys/public_key.c28
-rw-r--r--crypto/asymmetric_keys/restrict.c44
-rw-r--r--crypto/asymmetric_keys/verify_pefile.c2
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c5
-rw-r--r--crypto/crypto_engine.c244
-rw-r--r--crypto/jitterentropy.c10
-rw-r--r--crypto/lrw.c6
-rw-r--r--crypto/sig.c5
-rw-r--r--crypto/xts.c6
12 files changed, 306 insertions, 79 deletions
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 6218c773d71c..ea6fb8e89d06 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -320,18 +320,21 @@ static int alg_setkey_by_key_serial(struct alg_sock *ask, sockptr_t optval,
if (IS_ERR(ret)) {
up_read(&key->sem);
+ key_put(key);
return PTR_ERR(ret);
}
key_data = sock_kmalloc(&ask->sk, key_datalen, GFP_KERNEL);
if (!key_data) {
up_read(&key->sem);
+ key_put(key);
return -ENOMEM;
}
memcpy(key_data, ret, key_datalen);
up_read(&key->sem);
+ key_put(key);
err = type->setkey(ask->private, key_data, key_datalen);
@@ -992,7 +995,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
ssize_t plen;
/* use the existing memory in an allocated page */
- if (ctx->merge) {
+ if (ctx->merge && !(msg->msg_flags & MSG_SPLICE_PAGES)) {
sgl = list_entry(ctx->tsgl_list.prev,
struct af_alg_tsgl, list);
sg = sgl->sg + sgl->cur - 1;
@@ -1054,6 +1057,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
ctx->used += plen;
copied += plen;
size -= plen;
+ ctx->merge = 0;
} else {
do {
struct page *pg;
@@ -1085,12 +1089,12 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
size -= plen;
sgl->cur++;
} while (len && sgl->cur < MAX_SGL_ENTS);
+
+ ctx->merge = plen & (PAGE_SIZE - 1);
}
if (!size)
sg_mark_end(sg + sgl->cur - 1);
-
- ctx->merge = plen & (PAGE_SIZE - 1);
}
err = 0;
@@ -1191,6 +1195,7 @@ struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
areq->areqlen = areqlen;
areq->sk = sk;
+ areq->first_rsgl.sgl.sgt.sgl = areq->first_rsgl.sgl.sgl;
areq->last_rsgl = NULL;
INIT_LIST_HEAD(&areq->rsgl_list);
areq->tsgl = NULL;
@@ -1240,6 +1245,8 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
return -ENOMEM;
}
+ rsgl->sgl.need_unpin =
+ iov_iter_extract_will_pin(&msg->msg_iter);
rsgl->sgl.sgt.sgl = rsgl->sgl.sgl;
rsgl->sgl.sgt.nents = 0;
rsgl->sgl.sgt.orig_nents = 0;
@@ -1254,8 +1261,6 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
}
sg_mark_end(rsgl->sgl.sgt.sgl + rsgl->sgl.sgt.nents - 1);
- rsgl->sgl.need_unpin =
- iov_iter_extract_will_pin(&msg->msg_iter);
/* chain the new scatterlist with previous one */
if (areq->last_rsgl)
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 5e7cd603d489..4fe95c448047 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -17,6 +17,7 @@
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/workqueue.h>
#include "internal.h"
@@ -74,15 +75,26 @@ static void crypto_free_instance(struct crypto_instance *inst)
inst->alg.cra_type->free(inst);
}
-static void crypto_destroy_instance(struct crypto_alg *alg)
+static void crypto_destroy_instance_workfn(struct work_struct *w)
{
- struct crypto_instance *inst = (void *)alg;
+ struct crypto_instance *inst = container_of(w, struct crypto_instance,
+ free_work);
struct crypto_template *tmpl = inst->tmpl;
crypto_free_instance(inst);
crypto_tmpl_put(tmpl);
}
+static void crypto_destroy_instance(struct crypto_alg *alg)
+{
+ struct crypto_instance *inst = container_of(alg,
+ struct crypto_instance,
+ alg);
+
+ INIT_WORK(&inst->free_work, crypto_destroy_instance_workfn);
+ schedule_work(&inst->free_work);
+}
+
/*
* This function adds a spawn to the list secondary_spawns which
* will be used at the end of crypto_remove_spawns to unregister
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 0ab43e149f0e..82c44d4899b9 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -68,13 +68,15 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
struct hash_ctx *ctx = ask->private;
ssize_t copied = 0;
size_t len, max_pages, npages;
- bool continuing = ctx->more, need_init = false;
+ bool continuing, need_init = false;
int err;
max_pages = min_t(size_t, ALG_MAX_PAGES,
DIV_ROUND_UP(sk->sk_sndbuf, PAGE_SIZE));
lock_sock(sk);
+ continuing = ctx->more;
+
if (!continuing) {
/* Discard a previous request that wasn't marked MSG_MORE. */
hash_free_result(sk, ctx);
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index e787598cb3f7..abeecb8329b3 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -42,7 +42,7 @@ static void public_key_describe(const struct key *asymmetric_key,
void public_key_free(struct public_key *key)
{
if (key) {
- kfree(key->key);
+ kfree_sensitive(key->key);
kfree(key->params);
kfree(key);
}
@@ -185,8 +185,10 @@ static int software_key_query(const struct kernel_pkey_params *params,
if (issig) {
sig = crypto_alloc_sig(alg_name, 0, 0);
- if (IS_ERR(sig))
+ if (IS_ERR(sig)) {
+ ret = PTR_ERR(sig);
goto error_free_key;
+ }
if (pkey->key_is_private)
ret = crypto_sig_set_privkey(sig, key, pkey->keylen);
@@ -208,8 +210,10 @@ static int software_key_query(const struct kernel_pkey_params *params,
}
} else {
tfm = crypto_alloc_akcipher(alg_name, 0, 0);
- if (IS_ERR(tfm))
+ if (IS_ERR(tfm)) {
+ ret = PTR_ERR(tfm);
goto error_free_key;
+ }
if (pkey->key_is_private)
ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
@@ -259,7 +263,7 @@ error_free_tfm:
else
crypto_free_akcipher(tfm);
error_free_key:
- kfree(key);
+ kfree_sensitive(key);
pr_devel("<==%s() = %d\n", __func__, ret);
return ret;
}
@@ -300,8 +304,10 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
if (issig) {
sig = crypto_alloc_sig(alg_name, 0, 0);
- if (IS_ERR(sig))
+ if (IS_ERR(sig)) {
+ ret = PTR_ERR(sig);
goto error_free_key;
+ }
if (pkey->key_is_private)
ret = crypto_sig_set_privkey(sig, key, pkey->keylen);
@@ -313,8 +319,10 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
ksz = crypto_sig_maxsize(sig);
} else {
tfm = crypto_alloc_akcipher(alg_name, 0, 0);
- if (IS_ERR(tfm))
+ if (IS_ERR(tfm)) {
+ ret = PTR_ERR(tfm);
goto error_free_key;
+ }
if (pkey->key_is_private)
ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
@@ -361,7 +369,7 @@ error_free_tfm:
else
crypto_free_akcipher(tfm);
error_free_key:
- kfree(key);
+ kfree_sensitive(key);
pr_devel("<==%s() = %d\n", __func__, ret);
return ret;
}
@@ -411,8 +419,10 @@ int public_key_verify_signature(const struct public_key *pkey,
key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
GFP_KERNEL);
- if (!key)
+ if (!key) {
+ ret = -ENOMEM;
goto error_free_tfm;
+ }
memcpy(key, pkey->key, pkey->keylen);
ptr = key + pkey->keylen;
@@ -431,7 +441,7 @@ int public_key_verify_signature(const struct public_key *pkey,
sig->digest, sig->digest_size);
error_free_key:
- kfree(key);
+ kfree_sensitive(key);
error_free_tfm:
crypto_free_sig(tfm);
pr_devel("<==%s() = %d\n", __func__, ret);
diff --git a/crypto/asymmetric_keys/restrict.c b/crypto/asymmetric_keys/restrict.c
index 276bdb627498..6b69ea40da23 100644
--- a/crypto/asymmetric_keys/restrict.c
+++ b/crypto/asymmetric_keys/restrict.c
@@ -148,6 +148,50 @@ int restrict_link_by_ca(struct key *dest_keyring,
return 0;
}
+/**
+ * restrict_link_by_digsig - Restrict additions to a ring of digsig keys
+ * @dest_keyring: Keyring being linked to.
+ * @type: The type of key being added.
+ * @payload: The payload of the new key.
+ * @trust_keyring: A ring of keys that can be used to vouch for the new cert.
+ *
+ * Check if the new certificate has digitalSignature usage set. If it is,
+ * then mark the new certificate as being ok to link. Afterwards verify
+ * the new certificate against the ones in the trust_keyring.
+ *
+ * Returns 0 if the new certificate was accepted, -ENOKEY if the
+ * certificate is not a digsig. -ENOPKG if the signature uses unsupported
+ * crypto, or some other error if there is a matching certificate but
+ * the signature check cannot be performed.
+ */
+int restrict_link_by_digsig(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *trust_keyring)
+{
+ const struct public_key *pkey;
+
+ if (type != &key_type_asymmetric)
+ return -EOPNOTSUPP;
+
+ pkey = payload->data[asym_crypto];
+
+ if (!pkey)
+ return -ENOPKG;
+
+ if (!test_bit(KEY_EFLAG_DIGITALSIG, &pkey->key_eflags))
+ return -ENOKEY;
+
+ if (test_bit(KEY_EFLAG_CA, &pkey->key_eflags))
+ return -ENOKEY;
+
+ if (test_bit(KEY_EFLAG_KEYCERTSIGN, &pkey->key_eflags))
+ return -ENOKEY;
+
+ return restrict_link_by_signature(dest_keyring, type, payload,
+ trust_keyring);
+}
+
static bool match_either_id(const struct asymmetric_key_id **pair,
const struct asymmetric_key_id *single)
{
diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c
index 22beaf2213a2..f440767bd727 100644
--- a/crypto/asymmetric_keys/verify_pefile.c
+++ b/crypto/asymmetric_keys/verify_pefile.c
@@ -391,7 +391,7 @@ error_no_desc:
* verify_pefile_signature - Verify the signature on a PE binary image
* @pebuf: Buffer containing the PE binary image
* @pelen: Length of the binary image
- * @trust_keys: Signing certificate(s) to use as starting points
+ * @trusted_keys: Signing certificate(s) to use as starting points
* @usage: The use to which the key is being put.
*
* Validate that the certificate chain inside the PKCS#7 message inside the PE
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index 6fdfc82e23a8..7c71db3ac23d 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -130,6 +130,11 @@ int x509_check_for_self_signed(struct x509_certificate *cert)
goto out;
}
+ if (cert->unsupported_sig) {
+ ret = 0;
+ goto out;
+ }
+
ret = public_key_verify_signature(cert->pub, cert->sig);
if (ret < 0) {
if (ret == -ENOPKG) {
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index 74fcc0897041..108d9d55c509 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -7,15 +7,30 @@
* Author: Baolin Wang <baolin.wang@linaro.org>
*/
+#include <crypto/internal/aead.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/internal/engine.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/device.h>
-#include <crypto/engine.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <uapi/linux/sched/types.h>
#include "internal.h"
#define CRYPTO_ENGINE_MAX_QLEN 10
+/* Temporary algorithm flag used to indicate an updated driver. */
+#define CRYPTO_ALG_ENGINE 0x200
+
+struct crypto_engine_alg {
+ struct crypto_alg base;
+ struct crypto_engine_op op;
+};
+
/**
* crypto_finalize_request - finalize one request if the request is done
* @engine: the hardware engine
@@ -26,9 +41,6 @@ static void crypto_finalize_request(struct crypto_engine *engine,
struct crypto_async_request *req, int err)
{
unsigned long flags;
- bool finalize_req = false;
- int ret;
- struct crypto_engine_ctx *enginectx;
/*
* If hardware cannot enqueue more requests
@@ -38,21 +50,11 @@ static void crypto_finalize_request(struct crypto_engine *engine,
if (!engine->retry_support) {
spin_lock_irqsave(&engine->queue_lock, flags);
if (engine->cur_req == req) {
- finalize_req = true;
engine->cur_req = NULL;
}
spin_unlock_irqrestore(&engine->queue_lock, flags);
}
- if (finalize_req || engine->retry_support) {
- enginectx = crypto_tfm_ctx(req->tfm);
- if (enginectx->op.prepare_request &&
- enginectx->op.unprepare_request) {
- ret = enginectx->op.unprepare_request(engine, req);
- if (ret)
- dev_err(engine->dev, "failed to unprepare request\n");
- }
- }
lockdep_assert_in_softirq();
crypto_request_complete(req, err);
@@ -72,10 +74,11 @@ static void crypto_pump_requests(struct crypto_engine *engine,
bool in_kthread)
{
struct crypto_async_request *async_req, *backlog;
+ struct crypto_engine_alg *alg;
+ struct crypto_engine_op *op;
unsigned long flags;
bool was_busy = false;
int ret;
- struct crypto_engine_ctx *enginectx;
spin_lock_irqsave(&engine->queue_lock, flags);
@@ -141,27 +144,21 @@ start_request:
ret = engine->prepare_crypt_hardware(engine);
if (ret) {
dev_err(engine->dev, "failed to prepare crypt hardware\n");
- goto req_err_2;
+ goto req_err_1;
}
}
- enginectx = crypto_tfm_ctx(async_req->tfm);
-
- if (enginectx->op.prepare_request) {
- ret = enginectx->op.prepare_request(engine, async_req);
- if (ret) {
- dev_err(engine->dev, "failed to prepare request: %d\n",
- ret);
- goto req_err_2;
- }
- }
- if (!enginectx->op.do_one_request) {
+ if (async_req->tfm->__crt_alg->cra_flags & CRYPTO_ALG_ENGINE) {
+ alg = container_of(async_req->tfm->__crt_alg,
+ struct crypto_engine_alg, base);
+ op = &alg->op;
+ } else {
dev_err(engine->dev, "failed to do request\n");
ret = -EINVAL;
goto req_err_1;
}
- ret = enginectx->op.do_one_request(engine, async_req);
+ ret = op->do_one_request(engine, async_req);
/* Request unsuccessfully executed by hardware */
if (ret < 0) {
@@ -177,18 +174,6 @@ start_request:
ret);
goto req_err_1;
}
- /*
- * If retry mechanism is supported,
- * unprepare current request and
- * enqueue it back into crypto-engine queue.
- */
- if (enginectx->op.unprepare_request) {
- ret = enginectx->op.unprepare_request(engine,
- async_req);
- if (ret)
- dev_err(engine->dev,
- "failed to unprepare request\n");
- }
spin_lock_irqsave(&engine->queue_lock, flags);
/*
* If hardware was unable to execute request, enqueue it
@@ -204,13 +189,6 @@ start_request:
goto retry;
req_err_1:
- if (enginectx->op.unprepare_request) {
- ret = enginectx->op.unprepare_request(engine, async_req);
- if (ret)
- dev_err(engine->dev, "failed to unprepare request\n");
- }
-
-req_err_2:
crypto_request_complete(async_req, ret);
retry:
@@ -591,5 +569,177 @@ int crypto_engine_exit(struct crypto_engine *engine)
}
EXPORT_SYMBOL_GPL(crypto_engine_exit);
+int crypto_engine_register_aead(struct aead_engine_alg *alg)
+{
+ if (!alg->op.do_one_request)
+ return -EINVAL;
+
+ alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
+
+ return crypto_register_aead(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_aead);
+
+void crypto_engine_unregister_aead(struct aead_engine_alg *alg)
+{
+ crypto_unregister_aead(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_aead);
+
+int crypto_engine_register_aeads(struct aead_engine_alg *algs, int count)
+{
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ ret = crypto_engine_register_aead(&algs[i]);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ crypto_engine_unregister_aeads(algs, i);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_aeads);
+
+void crypto_engine_unregister_aeads(struct aead_engine_alg *algs, int count)
+{
+ int i;
+
+ for (i = count - 1; i >= 0; --i)
+ crypto_engine_unregister_aead(&algs[i]);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_aeads);
+
+int crypto_engine_register_ahash(struct ahash_engine_alg *alg)
+{
+ if (!alg->op.do_one_request)
+ return -EINVAL;
+
+ alg->base.halg.base.cra_flags |= CRYPTO_ALG_ENGINE;
+
+ return crypto_register_ahash(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_ahash);
+
+void crypto_engine_unregister_ahash(struct ahash_engine_alg *alg)
+{
+ crypto_unregister_ahash(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahash);
+
+int crypto_engine_register_ahashes(struct ahash_engine_alg *algs, int count)
+{
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ ret = crypto_engine_register_ahash(&algs[i]);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ crypto_engine_unregister_ahashes(algs, i);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_ahashes);
+
+void crypto_engine_unregister_ahashes(struct ahash_engine_alg *algs,
+ int count)
+{
+ int i;
+
+ for (i = count - 1; i >= 0; --i)
+ crypto_engine_unregister_ahash(&algs[i]);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahashes);
+
+int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg)
+{
+ if (!alg->op.do_one_request)
+ return -EINVAL;
+
+ alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
+
+ return crypto_register_akcipher(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_akcipher);
+
+void crypto_engine_unregister_akcipher(struct akcipher_engine_alg *alg)
+{
+ crypto_unregister_akcipher(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_akcipher);
+
+int crypto_engine_register_kpp(struct kpp_engine_alg *alg)
+{
+ if (!alg->op.do_one_request)
+ return -EINVAL;
+
+ alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
+
+ return crypto_register_kpp(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_kpp);
+
+void crypto_engine_unregister_kpp(struct kpp_engine_alg *alg)
+{
+ crypto_unregister_kpp(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_kpp);
+
+int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg)
+{
+ if (!alg->op.do_one_request)
+ return -EINVAL;
+
+ alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
+
+ return crypto_register_skcipher(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_skcipher);
+
+void crypto_engine_unregister_skcipher(struct skcipher_engine_alg *alg)
+{
+ return crypto_unregister_skcipher(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_skcipher);
+
+int crypto_engine_register_skciphers(struct skcipher_engine_alg *algs,
+ int count)
+{
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ ret = crypto_engine_register_skcipher(&algs[i]);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ crypto_engine_unregister_skciphers(algs, i);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_skciphers);
+
+void crypto_engine_unregister_skciphers(struct skcipher_engine_alg *algs,
+ int count)
+{
+ int i;
+
+ for (i = count - 1; i >= 0; --i)
+ crypto_engine_unregister_skcipher(&algs[i]);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_skciphers);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Crypto hardware engine framework");
diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c
index c7d7f2caa779..fe9c233ec769 100644
--- a/crypto/jitterentropy.c
+++ b/crypto/jitterentropy.c
@@ -89,10 +89,14 @@ struct rand_data {
unsigned int rct_count; /* Number of stuck values */
/* Intermittent health test failure threshold of 2^-30 */
-#define JENT_RCT_CUTOFF 30 /* Taken from SP800-90B sec 4.4.1 */
-#define JENT_APT_CUTOFF 325 /* Taken from SP800-90B sec 4.4.2 */
+ /* From an SP800-90B perspective, this RCT cutoff value is equal to 31. */
+ /* However, our RCT implementation starts at 1, so we subtract 1 here. */
+#define JENT_RCT_CUTOFF (31 - 1) /* Taken from SP800-90B sec 4.4.1 */
+#define JENT_APT_CUTOFF 325 /* Taken from SP800-90B sec 4.4.2 */
/* Permanent health test failure threshold of 2^-60 */
-#define JENT_RCT_CUTOFF_PERMANENT 60
+ /* From an SP800-90B perspective, this RCT cutoff value is equal to 61. */
+ /* However, our RCT implementation starts at 1, so we subtract 1 here. */
+#define JENT_RCT_CUTOFF_PERMANENT (61 - 1)
#define JENT_APT_CUTOFF_PERMANENT 355
#define JENT_APT_WINDOW_SIZE 512 /* Data window size */
/* LSB of time stamp to process */
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 1b0f76ba3eb5..59260aefed28 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -357,10 +357,10 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
* cipher name.
*/
if (!strncmp(cipher_name, "ecb(", 4)) {
- unsigned len;
+ int len;
- len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
- if (len < 2 || len >= sizeof(ecb_name))
+ len = strscpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
+ if (len < 2)
goto err_free_inst;
if (ecb_name[len - 1] != ')')
diff --git a/crypto/sig.c b/crypto/sig.c
index b48c18ec65cd..224c47019297 100644
--- a/crypto/sig.c
+++ b/crypto/sig.c
@@ -21,11 +21,6 @@
static const struct crypto_type crypto_sig_type;
-static inline struct crypto_sig *__crypto_sig_tfm(struct crypto_tfm *tfm)
-{
- return container_of(tfm, struct crypto_sig, base);
-}
-
static int crypto_sig_init_tfm(struct crypto_tfm *tfm)
{
if (tfm->__crt_alg->cra_type != &crypto_sig_type)
diff --git a/crypto/xts.c b/crypto/xts.c
index 09be909a6a1a..548b302c6c6a 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -396,10 +396,10 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
* cipher name.
*/
if (!strncmp(cipher_name, "ecb(", 4)) {
- unsigned len;
+ int len;
- len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
- if (len < 2 || len >= sizeof(ctx->name))
+ len = strscpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
+ if (len < 2)
goto err_free_inst;
if (ctx->name[len - 1] != ')')