diff options
author | Antoine Tenart <antoine.tenart@bootlin.com> | 2018-05-29 15:13:46 +0300 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2018-06-22 18:03:02 +0300 |
commit | b460edb6230ac2877b0d176b9122736fed6f3c6e (patch) | |
tree | f37158480ae6c85a36b73a6c7755132214597ef5 | |
parent | 30c217ef6411d24b8c8b0478b89335dc2dabe6e8 (diff) | |
download | linux-b460edb6230ac2877b0d176b9122736fed6f3c6e.tar.xz |
crypto: inside-secure - sha512 support
This patch adds the sha512 algorithm support to the Inside Secure
SafeXcel driver.
Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r-- | drivers/crypto/inside-secure/safexcel.c | 1 | ||||
-rw-r--r-- | drivers/crypto/inside-secure/safexcel.h | 11 | ||||
-rw-r--r-- | drivers/crypto/inside-secure/safexcel_hash.c | 178 |
3 files changed, 153 insertions, 37 deletions
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 8691f6369a13..126def0b4450 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -790,6 +790,7 @@ static struct safexcel_alg_template *safexcel_algs[] = { &safexcel_alg_sha1, &safexcel_alg_sha224, &safexcel_alg_sha256, + &safexcel_alg_sha512, &safexcel_alg_hmac_sha1, &safexcel_alg_hmac_sha224, &safexcel_alg_hmac_sha256, diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h index d2adedc23e9a..0438e058844c 100644 --- a/drivers/crypto/inside-secure/safexcel.h +++ b/drivers/crypto/inside-secure/safexcel.h @@ -313,6 +313,8 @@ struct safexcel_context_record { #define CONTEXT_CONTROL_CRYPTO_ALG_SHA1 (0x2 << 23) #define CONTEXT_CONTROL_CRYPTO_ALG_SHA224 (0x4 << 23) #define CONTEXT_CONTROL_CRYPTO_ALG_SHA256 (0x3 << 23) +#define CONTEXT_CONTROL_CRYPTO_ALG_SHA384 (0x6 << 23) +#define CONTEXT_CONTROL_CRYPTO_ALG_SHA512 (0x5 << 23) #define CONTEXT_CONTROL_INV_FR (0x5 << 24) #define CONTEXT_CONTROL_INV_TR (0x6 << 24) @@ -605,13 +607,13 @@ struct safexcel_context { }; struct safexcel_ahash_export_state { - u64 len; - u64 processed; + u64 len[2]; + u64 processed[2]; u32 digest; - u32 state[SHA256_DIGEST_SIZE / sizeof(u32)]; - u8 cache[SHA256_BLOCK_SIZE]; + u32 state[SHA512_DIGEST_SIZE / sizeof(u32)]; + u8 cache[SHA512_BLOCK_SIZE]; }; /* @@ -670,6 +672,7 @@ extern struct safexcel_alg_template safexcel_alg_cbc_aes; extern struct safexcel_alg_template safexcel_alg_sha1; extern struct safexcel_alg_template safexcel_alg_sha224; extern struct safexcel_alg_template safexcel_alg_sha256; +extern struct safexcel_alg_template safexcel_alg_sha512; extern struct safexcel_alg_template safexcel_alg_hmac_sha1; extern struct safexcel_alg_template safexcel_alg_hmac_sha224; extern struct safexcel_alg_template safexcel_alg_hmac_sha256; diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index c284c84cdddb..5d8e15e70cff 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -38,18 +38,26 @@ struct safexcel_ahash_req { u32 digest; u8 state_sz; /* expected sate size, only set once */ - u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32)); + u32 state[SHA512_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32)); - u64 len; - u64 processed; + u64 len[2]; + u64 processed[2]; - u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32)); + u8 cache[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); dma_addr_t cache_dma; unsigned int cache_sz; - u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32)); + u8 cache_next[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); }; +static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req) +{ + if (req->len[1] > req->processed[1]) + return 0xffffffff - (req->len[0] - req->processed[0]); + + return req->len[0] - req->processed[0]; +} + static void safexcel_hash_token(struct safexcel_command_desc *cdesc, u32 input_length, u32 result_length) { @@ -74,6 +82,7 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, struct safexcel_command_desc *cdesc, unsigned int digestsize) { + struct safexcel_crypto_priv *priv = ctx->priv; int i; cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT; @@ -81,12 +90,14 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, cdesc->control_data.control0 |= req->digest; if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) { - if (req->processed) { + if (req->processed[0] || req->processed[1]) { if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1) cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6); else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 || ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256) cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9); + else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512) + cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(17); cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT; } else { @@ -101,13 +112,28 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, * fields. Do this now as we need it to setup the first command * descriptor. */ - if (req->processed) { + if (req->processed[0] || req->processed[1]) { for (i = 0; i < digestsize / sizeof(u32); i++) ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]); - if (req->finish) - ctx->base.ctxr->data[i] = - cpu_to_le32(req->processed / EIP197_COUNTER_BLOCK_SIZE); + if (req->finish) { + u64 count = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE; + count += ((0xffffffff / EIP197_COUNTER_BLOCK_SIZE) * + req->processed[1]); + + /* This is a haredware limitation, as the + * counter must fit into an u32. This represents + * a farily big amount of input data, so we + * shouldn't see this. + */ + if (unlikely(count & 0xffff0000)) { + dev_warn(priv->dev, + "Input data is too big\n"); + return; + } + + ctx->base.ctxr->data[i] = cpu_to_le32(count); + } } } else if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) { cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(2 * req->state_sz / sizeof(u32)); @@ -126,7 +152,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin struct ahash_request *areq = ahash_request_cast(async); struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); struct safexcel_ahash_req *sreq = ahash_request_ctx(areq); - int cache_len; + u64 cache_len; *ret = 0; @@ -164,7 +190,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin memcpy(areq->result, sreq->state, crypto_ahash_digestsize(ahash)); - cache_len = sreq->len - sreq->processed; + cache_len = safexcel_queued_len(sreq); if (cache_len) memcpy(sreq->cache, sreq->cache_next, cache_len); @@ -185,9 +211,10 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, struct safexcel_command_desc *cdesc, *first_cdesc = NULL; struct safexcel_result_desc *rdesc; struct scatterlist *sg; - int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0; + int i, extra, n_cdesc = 0, ret = 0; + u64 queued, len, cache_len; - queued = len = req->len - req->processed; + queued = len = safexcel_queued_len(req); if (queued <= crypto_ahash_blocksize(ahash)) cache_len = queued; else @@ -260,7 +287,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, int sglen = sg_dma_len(sg); /* Do not overflow the request */ - if (queued - sglen < 0) + if (queued < sglen) sglen = queued; cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, @@ -304,7 +331,10 @@ send_command: spin_unlock_bh(&priv->ring[ring].egress_lock); - req->processed += len; + req->processed[0] += len; + if (req->processed[0] < len) + req->processed[1]++; + request->req = &areq->base; *commands = n_cdesc; @@ -335,14 +365,17 @@ static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq) struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); struct safexcel_ahash_req *req = ahash_request_ctx(areq); unsigned int state_w_sz = req->state_sz / sizeof(u32); + u64 processed; int i; + processed = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE; + processed += (0xffffffff / EIP197_COUNTER_BLOCK_SIZE) * req->processed[1]; + for (i = 0; i < state_w_sz; i++) if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i])) return true; - if (ctx->base.ctxr->data[state_w_sz] != - cpu_to_le32(req->processed / EIP197_COUNTER_BLOCK_SIZE)) + if (ctx->base.ctxr->data[state_w_sz] != cpu_to_le32(processed)) return true; return false; @@ -504,17 +537,17 @@ static int safexcel_ahash_cache(struct ahash_request *areq) { struct safexcel_ahash_req *req = ahash_request_ctx(areq); struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); - int queued, cache_len; + u64 queued, cache_len; - /* cache_len: everyting accepted by the driver but not sent yet, - * tot sz handled by update() - last req sz - tot sz handled by send() - */ - cache_len = req->len - areq->nbytes - req->processed; /* queued: everything accepted by the driver which will be handled by * the next send() calls. * tot sz handled by update() - tot sz handled by send() */ - queued = req->len - req->processed; + queued = safexcel_queued_len(req); + /* cache_len: everything accepted by the driver but not sent yet, + * tot sz handled by update() - last req sz - tot sz handled by send() + */ + cache_len = queued - areq->nbytes; /* * In case there isn't enough bytes to proceed (less than a @@ -541,8 +574,8 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq) req->needs_inv = false; if (ctx->base.ctxr) { - if (priv->version == EIP197 && - !ctx->base.needs_inv && req->processed && + if (priv->version == EIP197 && !ctx->base.needs_inv && + (req->processed[0] || req->processed[1]) && req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) /* We're still setting needs_inv here, even though it is * cleared right away, because the needs_inv flag can be @@ -585,7 +618,9 @@ static int safexcel_ahash_update(struct ahash_request *areq) if (!areq->nbytes) return 0; - req->len += areq->nbytes; + req->len[0] += areq->nbytes; + if (req->len[0] < areq->nbytes) + req->len[1]++; safexcel_ahash_cache(areq); @@ -600,7 +635,7 @@ static int safexcel_ahash_update(struct ahash_request *areq) return safexcel_ahash_enqueue(areq); if (!req->last_req && - req->len - req->processed > crypto_ahash_blocksize(ahash)) + safexcel_queued_len(req) > crypto_ahash_blocksize(ahash)) return safexcel_ahash_enqueue(areq); return 0; @@ -615,7 +650,7 @@ static int safexcel_ahash_final(struct ahash_request *areq) req->finish = true; /* If we have an overall 0 length request */ - if (!(req->len + areq->nbytes)) { + if (!req->len[0] && !req->len[1] && !areq->nbytes) { if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1) memcpy(areq->result, sha1_zero_message_hash, SHA1_DIGEST_SIZE); @@ -625,6 +660,9 @@ static int safexcel_ahash_final(struct ahash_request *areq) else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256) memcpy(areq->result, sha256_zero_message_hash, SHA256_DIGEST_SIZE); + else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512) + memcpy(areq->result, sha512_zero_message_hash, + SHA512_DIGEST_SIZE); return 0; } @@ -649,8 +687,10 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out) struct safexcel_ahash_req *req = ahash_request_ctx(areq); struct safexcel_ahash_export_state *export = out; - export->len = req->len; - export->processed = req->processed; + export->len[0] = req->len[0]; + export->len[1] = req->len[1]; + export->processed[0] = req->processed[0]; + export->processed[1] = req->processed[1]; export->digest = req->digest; @@ -671,8 +711,10 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in) if (ret) return ret; - req->len = export->len; - req->processed = export->processed; + req->len[0] = export->len[0]; + req->len[1] = export->len[1]; + req->processed[0] = export->processed[0]; + req->processed[1] = export->processed[1]; req->digest = export->digest; @@ -1246,3 +1288,73 @@ struct safexcel_alg_template safexcel_alg_hmac_sha256 = { }, }, }; + +static int safexcel_sha512_init(struct ahash_request *areq) +{ + struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); + struct safexcel_ahash_req *req = ahash_request_ctx(areq); + + memset(req, 0, sizeof(*req)); + + req->state[0] = lower_32_bits(SHA512_H0); + req->state[1] = upper_32_bits(SHA512_H0); + req->state[2] = lower_32_bits(SHA512_H1); + req->state[3] = upper_32_bits(SHA512_H1); + req->state[4] = lower_32_bits(SHA512_H2); + req->state[5] = upper_32_bits(SHA512_H2); + req->state[6] = lower_32_bits(SHA512_H3); + req->state[7] = upper_32_bits(SHA512_H3); + req->state[8] = lower_32_bits(SHA512_H4); + req->state[9] = upper_32_bits(SHA512_H4); + req->state[10] = lower_32_bits(SHA512_H5); + req->state[11] = upper_32_bits(SHA512_H5); + req->state[12] = lower_32_bits(SHA512_H6); + req->state[13] = upper_32_bits(SHA512_H6); + req->state[14] = lower_32_bits(SHA512_H7); + req->state[15] = upper_32_bits(SHA512_H7); + + ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512; + req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED; + req->state_sz = SHA512_DIGEST_SIZE; + + return 0; +} + +static int safexcel_sha512_digest(struct ahash_request *areq) +{ + int ret = safexcel_sha512_init(areq); + + if (ret) + return ret; + + return safexcel_ahash_finup(areq); +} + +struct safexcel_alg_template safexcel_alg_sha512 = { + .type = SAFEXCEL_ALG_TYPE_AHASH, + .alg.ahash = { + .init = safexcel_sha512_init, + .update = safexcel_ahash_update, + .final = safexcel_ahash_final, + .finup = safexcel_ahash_finup, + .digest = safexcel_sha512_digest, + .export = safexcel_ahash_export, + .import = safexcel_ahash_import, + .halg = { + .digestsize = SHA512_DIGEST_SIZE, + .statesize = sizeof(struct safexcel_ahash_export_state), + .base = { + .cra_name = "sha512", + .cra_driver_name = "safexcel-sha512", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = SHA512_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct safexcel_ahash_ctx), + .cra_init = safexcel_ahash_cra_init, + .cra_exit = safexcel_ahash_cra_exit, + .cra_module = THIS_MODULE, + }, + }, + }, +}; |