diff options
Diffstat (limited to 'drivers/crypto/sunxi-ss')
-rw-r--r-- | drivers/crypto/sunxi-ss/sun4i-ss-cipher.c | 213 | ||||
-rw-r--r-- | drivers/crypto/sunxi-ss/sun4i-ss-core.c | 237 | ||||
-rw-r--r-- | drivers/crypto/sunxi-ss/sun4i-ss-hash.c | 138 | ||||
-rw-r--r-- | drivers/crypto/sunxi-ss/sun4i-ss.h | 34 |
4 files changed, 313 insertions, 309 deletions
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c index 90efd10d57a1..5cf64746731a 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c @@ -16,13 +16,13 @@ */ #include "sun4i-ss.h" -static int sun4i_ss_opti_poll(struct ablkcipher_request *areq) +static int sun4i_ss_opti_poll(struct skcipher_request *areq) { - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); - struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_ss_ctx *ss = op->ss; - unsigned int ivsize = crypto_ablkcipher_ivsize(tfm); - struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq); + unsigned int ivsize = crypto_skcipher_ivsize(tfm); + struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq); u32 mode = ctx->mode; /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */ u32 rx_cnt = SS_RX_DEFAULT; @@ -31,17 +31,17 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq) u32 v; int err = 0; unsigned int i; - unsigned int ileft = areq->nbytes; - unsigned int oleft = areq->nbytes; + unsigned int ileft = areq->cryptlen; + unsigned int oleft = areq->cryptlen; unsigned int todo; struct sg_mapping_iter mi, mo; unsigned int oi, oo; /* offset for in and out */ unsigned long flags; - if (areq->nbytes == 0) + if (!areq->cryptlen) return 0; - if (!areq->info) { + if (!areq->iv) { dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n"); return -EINVAL; } @@ -56,9 +56,9 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq) for (i = 0; i < op->keylen; i += 4) writel(*(op->key + i / 4), ss->base + SS_KEY0 + i); - if (areq->info) { + if (areq->iv) { for (i = 0; i < 4 && i < ivsize / 4; i++) { - v = *(u32 *)(areq->info + i * 4); + v = *(u32 *)(areq->iv + i * 4); writel(v, ss->base + SS_IV0 + i * 4); } } @@ -76,13 +76,13 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq) goto release_ss; } - ileft = areq->nbytes / 4; - oleft = areq->nbytes / 4; + ileft = areq->cryptlen / 4; + oleft = areq->cryptlen / 4; oi = 0; oo = 0; do { todo = min3(rx_cnt, ileft, (mi.length - oi) / 4); - if (todo > 0) { + if (todo) { ileft -= todo; writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo); oi += todo * 4; @@ -97,7 +97,7 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq) tx_cnt = SS_TXFIFO_SPACES(spaces); todo = min3(tx_cnt, oleft, (mo.length - oo) / 4); - if (todo > 0) { + if (todo) { oleft -= todo; readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo); oo += todo * 4; @@ -106,12 +106,12 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq) sg_miter_next(&mo); oo = 0; } - } while (oleft > 0); + } while (oleft); - if (areq->info) { + if (areq->iv) { for (i = 0; i < 4 && i < ivsize / 4; i++) { v = readl(ss->base + SS_IV0 + i * 4); - *(u32 *)(areq->info + i * 4) = v; + *(u32 *)(areq->iv + i * 4) = v; } } @@ -124,16 +124,16 @@ release_ss: } /* Generic function that support SG with size not multiple of 4 */ -static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq) +static int sun4i_ss_cipher_poll(struct skcipher_request *areq) { - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); - struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_ss_ctx *ss = op->ss; int no_chunk = 1; struct scatterlist *in_sg = areq->src; struct scatterlist *out_sg = areq->dst; - unsigned int ivsize = crypto_ablkcipher_ivsize(tfm); - struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq); + unsigned int ivsize = crypto_skcipher_ivsize(tfm); + struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq); u32 mode = ctx->mode; /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */ u32 rx_cnt = SS_RX_DEFAULT; @@ -142,8 +142,8 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq) u32 spaces; int err = 0; unsigned int i; - unsigned int ileft = areq->nbytes; - unsigned int oleft = areq->nbytes; + unsigned int ileft = areq->cryptlen; + unsigned int oleft = areq->cryptlen; unsigned int todo; struct sg_mapping_iter mi, mo; unsigned int oi, oo; /* offset for in and out */ @@ -154,10 +154,10 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq) unsigned int obl = 0; /* length of data in bufo */ unsigned long flags; - if (areq->nbytes == 0) + if (!areq->cryptlen) return 0; - if (!areq->info) { + if (!areq->iv) { dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n"); return -EINVAL; } @@ -172,12 +172,12 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq) * we can use the SS optimized function */ while (in_sg && no_chunk == 1) { - if ((in_sg->length % 4) != 0) + if (in_sg->length % 4) no_chunk = 0; in_sg = sg_next(in_sg); } while (out_sg && no_chunk == 1) { - if ((out_sg->length % 4) != 0) + if (out_sg->length % 4) no_chunk = 0; out_sg = sg_next(out_sg); } @@ -190,9 +190,9 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq) for (i = 0; i < op->keylen; i += 4) writel(*(op->key + i / 4), ss->base + SS_KEY0 + i); - if (areq->info) { + if (areq->iv) { for (i = 0; i < 4 && i < ivsize / 4; i++) { - v = *(u32 *)(areq->info + i * 4); + v = *(u32 *)(areq->iv + i * 4); writel(v, ss->base + SS_IV0 + i * 4); } } @@ -209,19 +209,19 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq) err = -EINVAL; goto release_ss; } - ileft = areq->nbytes; - oleft = areq->nbytes; + ileft = areq->cryptlen; + oleft = areq->cryptlen; oi = 0; oo = 0; - while (oleft > 0) { - if (ileft > 0) { + while (oleft) { + if (ileft) { /* * todo is the number of consecutive 4byte word that we * can read from current SG */ todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4); - if (todo > 0 && ob == 0) { + if (todo && !ob) { writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo); ileft -= todo * 4; @@ -240,7 +240,7 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq) ileft -= todo; oi += todo; ob += todo; - if (ob % 4 == 0) { + if (!(ob % 4)) { writesl(ss->base + SS_RXFIFO, buf, ob / 4); ob = 0; @@ -257,14 +257,14 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq) tx_cnt = SS_TXFIFO_SPACES(spaces); dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n", mode, - oi, mi.length, ileft, areq->nbytes, rx_cnt, - oo, mo.length, oleft, areq->nbytes, tx_cnt, ob); + oi, mi.length, ileft, areq->cryptlen, rx_cnt, + oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob); - if (tx_cnt == 0) + if (!tx_cnt) continue; /* todo in 4bytes word */ todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4); - if (todo > 0) { + if (todo) { readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo); oleft -= todo * 4; oo += todo * 4; @@ -300,10 +300,10 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq) /* bufo must be fully used here */ } } - if (areq->info) { + if (areq->iv) { for (i = 0; i < 4 && i < ivsize / 4; i++) { v = readl(ss->base + SS_IV0 + i * 4); - *(u32 *)(areq->info + i * 4) = v; + *(u32 *)(areq->iv + i * 4) = v; } } @@ -317,22 +317,22 @@ release_ss: } /* CBC AES */ -int sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request *areq) +int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq) { - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); - struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); - struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION | op->keymode; return sun4i_ss_cipher_poll(areq); } -int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq) +int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq) { - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); - struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); - struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION | op->keymode; @@ -340,22 +340,22 @@ int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq) } /* ECB AES */ -int sun4i_ss_ecb_aes_encrypt(struct ablkcipher_request *areq) +int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq) { - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); - struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); - struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION | op->keymode; return sun4i_ss_cipher_poll(areq); } -int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request *areq) +int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq) { - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); - struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); - struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION | op->keymode; @@ -363,22 +363,22 @@ int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request *areq) } /* CBC DES */ -int sun4i_ss_cbc_des_encrypt(struct ablkcipher_request *areq) +int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq) { - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); - struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); - struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION | op->keymode; return sun4i_ss_cipher_poll(areq); } -int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request *areq) +int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq) { - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); - struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); - struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION | op->keymode; @@ -386,22 +386,22 @@ int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request *areq) } /* ECB DES */ -int sun4i_ss_ecb_des_encrypt(struct ablkcipher_request *areq) +int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq) { - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); - struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); - struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION | op->keymode; return sun4i_ss_cipher_poll(areq); } -int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request *areq) +int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq) { - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); - struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); - struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION | op->keymode; @@ -409,22 +409,22 @@ int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request *areq) } /* CBC 3DES */ -int sun4i_ss_cbc_des3_encrypt(struct ablkcipher_request *areq) +int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq) { - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); - struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); - struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION | op->keymode; return sun4i_ss_cipher_poll(areq); } -int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request *areq) +int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq) { - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); - struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); - struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION | op->keymode; @@ -432,22 +432,22 @@ int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request *areq) } /* ECB 3DES */ -int sun4i_ss_ecb_des3_encrypt(struct ablkcipher_request *areq) +int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq) { - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); - struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); - struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION | op->keymode; return sun4i_ss_cipher_poll(areq); } -int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request *areq) +int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq) { - struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq); - struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); - struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION | op->keymode; @@ -457,24 +457,25 @@ int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request *areq) int sun4i_ss_cipher_init(struct crypto_tfm *tfm) { struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm); - struct crypto_alg *alg = tfm->__crt_alg; struct sun4i_ss_alg_template *algt; memset(op, 0, sizeof(struct sun4i_tfm_ctx)); - algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto); + algt = container_of(tfm->__crt_alg, struct sun4i_ss_alg_template, + alg.crypto.base); op->ss = algt->ss; - tfm->crt_ablkcipher.reqsize = sizeof(struct sun4i_cipher_req_ctx); + crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), + sizeof(struct sun4i_cipher_req_ctx)); return 0; } /* check and set the AES key, prepare the mode to be used */ -int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, +int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { - struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_ss_ctx *ss = op->ss; switch (keylen) { @@ -489,7 +490,7 @@ int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, break; default: dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen); - crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } op->keylen = keylen; @@ -498,10 +499,10 @@ int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, } /* check and set the DES key, prepare the mode to be used */ -int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, +int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { - struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_ss_ctx *ss = op->ss; u32 flags; u32 tmp[DES_EXPKEY_WORDS]; @@ -509,15 +510,15 @@ int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, if (unlikely(keylen != DES_KEY_SIZE)) { dev_err(ss->dev, "Invalid keylen %u\n", keylen); - crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } - flags = crypto_ablkcipher_get_flags(tfm); + flags = crypto_skcipher_get_flags(tfm); ret = des_ekey(tmp, key); - if (unlikely(ret == 0) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) { - crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); + if (unlikely(!ret) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); dev_dbg(ss->dev, "Weak key %u\n", keylen); return -EINVAL; } @@ -528,15 +529,15 @@ int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, } /* check and set the 3DES key, prepare the mode to be used */ -int sun4i_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key, +int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { - struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm); + struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_ss_ctx *ss = op->ss; if (unlikely(keylen != 3 * DES_KEY_SIZE)) { dev_err(ss->dev, "Invalid keylen %u\n", keylen); - crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } op->keylen = keylen; diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c index 3ac6c6c4ad18..02ad8256e900 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c @@ -83,134 +83,133 @@ static struct sun4i_ss_alg_template ss_algs[] = { } } }, -{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, +{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.crypto = { - .cra_name = "cbc(aes)", - .cra_driver_name = "cbc-aes-sun4i-ss", - .cra_priority = 300, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, - .cra_ctxsize = sizeof(struct sun4i_tfm_ctx), - .cra_module = THIS_MODULE, - .cra_alignmask = 3, - .cra_type = &crypto_ablkcipher_type, - .cra_init = sun4i_ss_cipher_init, - .cra_ablkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = sun4i_ss_aes_setkey, - .encrypt = sun4i_ss_cbc_aes_encrypt, - .decrypt = sun4i_ss_cbc_aes_decrypt, + .setkey = sun4i_ss_aes_setkey, + .encrypt = sun4i_ss_cbc_aes_encrypt, + .decrypt = sun4i_ss_cbc_aes_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-sun4i-ss", + .cra_priority = 300, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_ctxsize = sizeof(struct sun4i_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 3, + .cra_init = sun4i_ss_cipher_init, } } }, -{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, +{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.crypto = { - .cra_name = "ecb(aes)", - .cra_driver_name = "ecb-aes-sun4i-ss", - .cra_priority = 300, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, - .cra_ctxsize = sizeof(struct sun4i_tfm_ctx), - .cra_module = THIS_MODULE, - .cra_alignmask = 3, - .cra_type = &crypto_ablkcipher_type, - .cra_init = sun4i_ss_cipher_init, - .cra_ablkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = sun4i_ss_aes_setkey, - .encrypt = sun4i_ss_ecb_aes_encrypt, - .decrypt = sun4i_ss_ecb_aes_decrypt, + .setkey = sun4i_ss_aes_setkey, + .encrypt = sun4i_ss_ecb_aes_encrypt, + .decrypt = sun4i_ss_ecb_aes_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-sun4i-ss", + .cra_priority = 300, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_ctxsize = sizeof(struct sun4i_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 3, + .cra_init = sun4i_ss_cipher_init, } } }, -{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, +{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.crypto = { - .cra_name = "cbc(des)", - .cra_driver_name = "cbc-des-sun4i-ss", - .cra_priority = 300, - .cra_blocksize = DES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, - .cra_ctxsize = sizeof(struct sun4i_req_ctx), - .cra_module = THIS_MODULE, - .cra_alignmask = 3, - .cra_type = &crypto_ablkcipher_type, - .cra_init = sun4i_ss_cipher_init, - .cra_u.ablkcipher = { - .min_keysize = DES_KEY_SIZE, - .max_keysize = DES_KEY_SIZE, - .ivsize = DES_BLOCK_SIZE, - .setkey = sun4i_ss_des_setkey, - .encrypt = sun4i_ss_cbc_des_encrypt, - .decrypt = sun4i_ss_cbc_des_decrypt, + .setkey = sun4i_ss_des_setkey, + .encrypt = sun4i_ss_cbc_des_encrypt, + .decrypt = sun4i_ss_cbc_des_decrypt, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .base = { + .cra_name = "cbc(des)", + .cra_driver_name = "cbc-des-sun4i-ss", + .cra_priority = 300, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_ctxsize = sizeof(struct sun4i_req_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 3, + .cra_init = sun4i_ss_cipher_init, } } }, -{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, +{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.crypto = { - .cra_name = "ecb(des)", - .cra_driver_name = "ecb-des-sun4i-ss", - .cra_priority = 300, - .cra_blocksize = DES_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, - .cra_ctxsize = sizeof(struct sun4i_req_ctx), - .cra_module = THIS_MODULE, - .cra_alignmask = 3, - .cra_type = &crypto_ablkcipher_type, - .cra_init = sun4i_ss_cipher_init, - .cra_u.ablkcipher = { - .min_keysize = DES_KEY_SIZE, - .max_keysize = DES_KEY_SIZE, - .setkey = sun4i_ss_des_setkey, - .encrypt = sun4i_ss_ecb_des_encrypt, - .decrypt = sun4i_ss_ecb_des_decrypt, + .setkey = sun4i_ss_des_setkey, + .encrypt = sun4i_ss_ecb_des_encrypt, + .decrypt = sun4i_ss_ecb_des_decrypt, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .base = { + .cra_name = "ecb(des)", + .cra_driver_name = "ecb-des-sun4i-ss", + .cra_priority = 300, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_ctxsize = sizeof(struct sun4i_req_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 3, + .cra_init = sun4i_ss_cipher_init, } } }, -{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, +{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.crypto = { - .cra_name = "cbc(des3_ede)", - .cra_driver_name = "cbc-des3-sun4i-ss", - .cra_priority = 300, - .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, - .cra_ctxsize = sizeof(struct sun4i_req_ctx), - .cra_module = THIS_MODULE, - .cra_alignmask = 3, - .cra_type = &crypto_ablkcipher_type, - .cra_init = sun4i_ss_cipher_init, - .cra_u.ablkcipher = { - .min_keysize = DES3_EDE_KEY_SIZE, - .max_keysize = DES3_EDE_KEY_SIZE, - .ivsize = DES3_EDE_BLOCK_SIZE, - .setkey = sun4i_ss_des3_setkey, - .encrypt = sun4i_ss_cbc_des3_encrypt, - .decrypt = sun4i_ss_cbc_des3_decrypt, + .setkey = sun4i_ss_des3_setkey, + .encrypt = sun4i_ss_cbc_des3_encrypt, + .decrypt = sun4i_ss_cbc_des3_decrypt, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .base = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "cbc-des3-sun4i-ss", + .cra_priority = 300, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_ctxsize = sizeof(struct sun4i_req_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 3, + .cra_init = sun4i_ss_cipher_init, } } }, -{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, +{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.crypto = { - .cra_name = "ecb(des3_ede)", - .cra_driver_name = "ecb-des3-sun4i-ss", - .cra_priority = 300, - .cra_blocksize = DES3_EDE_BLOCK_SIZE, - .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER, - .cra_ctxsize = sizeof(struct sun4i_req_ctx), - .cra_module = THIS_MODULE, - .cra_alignmask = 3, - .cra_type = &crypto_ablkcipher_type, - .cra_init = sun4i_ss_cipher_init, - .cra_u.ablkcipher = { - .min_keysize = DES3_EDE_KEY_SIZE, - .max_keysize = DES3_EDE_KEY_SIZE, - .ivsize = DES3_EDE_BLOCK_SIZE, - .setkey = sun4i_ss_des3_setkey, - .encrypt = sun4i_ss_ecb_des3_encrypt, - .decrypt = sun4i_ss_ecb_des3_decrypt, + .setkey = sun4i_ss_des3_setkey, + .encrypt = sun4i_ss_ecb_des3_encrypt, + .decrypt = sun4i_ss_ecb_des3_decrypt, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .base = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "ecb-des3-sun4i-ss", + .cra_priority = 300, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER, + .cra_ctxsize = sizeof(struct sun4i_req_ctx), + .cra_module = THIS_MODULE, + .cra_alignmask = 3, + .cra_init = sun4i_ss_cipher_init, } } }, @@ -266,12 +265,12 @@ static int sun4i_ss_probe(struct platform_device *pdev) /* Enable both clocks */ err = clk_prepare_enable(ss->busclk); - if (err != 0) { + if (err) { dev_err(&pdev->dev, "Cannot prepare_enable busclk\n"); return err; } err = clk_prepare_enable(ss->ssclk); - if (err != 0) { + if (err) { dev_err(&pdev->dev, "Cannot prepare_enable ssclk\n"); goto error_ssclk; } @@ -281,7 +280,7 @@ static int sun4i_ss_probe(struct platform_device *pdev) * Try to set the clock to the maximum allowed */ err = clk_set_rate(ss->ssclk, cr_mod); - if (err != 0) { + if (err) { dev_err(&pdev->dev, "Cannot set clock rate to ssclk\n"); goto error_clk; } @@ -340,17 +339,17 @@ static int sun4i_ss_probe(struct platform_device *pdev) for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { ss_algs[i].ss = ss; switch (ss_algs[i].type) { - case CRYPTO_ALG_TYPE_ABLKCIPHER: - err = crypto_register_alg(&ss_algs[i].alg.crypto); - if (err != 0) { + case CRYPTO_ALG_TYPE_SKCIPHER: + err = crypto_register_skcipher(&ss_algs[i].alg.crypto); + if (err) { dev_err(ss->dev, "Fail to register %s\n", - ss_algs[i].alg.crypto.cra_name); + ss_algs[i].alg.crypto.base.cra_name); goto error_alg; } break; case CRYPTO_ALG_TYPE_AHASH: err = crypto_register_ahash(&ss_algs[i].alg.hash); - if (err != 0) { + if (err) { dev_err(ss->dev, "Fail to register %s\n", ss_algs[i].alg.hash.halg.base.cra_name); goto error_alg; @@ -364,8 +363,8 @@ error_alg: i--; for (; i >= 0; i--) { switch (ss_algs[i].type) { - case CRYPTO_ALG_TYPE_ABLKCIPHER: - crypto_unregister_alg(&ss_algs[i].alg.crypto); + case CRYPTO_ALG_TYPE_SKCIPHER: + crypto_unregister_skcipher(&ss_algs[i].alg.crypto); break; case CRYPTO_ALG_TYPE_AHASH: crypto_unregister_ahash(&ss_algs[i].alg.hash); @@ -388,8 +387,8 @@ static int sun4i_ss_remove(struct platform_device *pdev) for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { switch (ss_algs[i].type) { - case CRYPTO_ALG_TYPE_ABLKCIPHER: - crypto_unregister_alg(&ss_algs[i].alg.crypto); + case CRYPTO_ALG_TYPE_SKCIPHER: + crypto_unregister_skcipher(&ss_algs[i].alg.crypto); break; case CRYPTO_ALG_TYPE_AHASH: crypto_unregister_ahash(&ss_algs[i].alg.hash); diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c index 0de2f62d51ff..a4b5ff2b72f8 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c @@ -60,7 +60,7 @@ int sun4i_hash_export_md5(struct ahash_request *areq, void *out) memcpy(octx->block, op->buf, op->len); - if (op->byte_count > 0) { + if (op->byte_count) { for (i = 0; i < 4; i++) octx->hash[i] = op->hash[i]; } else { @@ -102,7 +102,7 @@ int sun4i_hash_export_sha1(struct ahash_request *areq, void *out) memcpy(octx->buffer, op->buf, op->len); - if (op->byte_count > 0) { + if (op->byte_count) { for (i = 0; i < 5; i++) octx->state[i] = op->hash[i]; } else { @@ -167,44 +167,34 @@ int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in) */ static int sun4i_hash(struct ahash_request *areq) { - u32 v, ivmode = 0; - unsigned int i = 0; /* * i is the total bytes read from SGs, to be compared to areq->nbytes * i is important because we cannot rely on SG length since the sum of * SG->length could be greater than areq->nbytes + * + * end is the position when we need to stop writing to the device, + * to be compared to i + * + * in_i: advancement in the current SG */ - + unsigned int i = 0, end, fill, min_fill, nwait, nbw = 0, j = 0, todo; + unsigned int in_i = 0; + u32 spaces, rx_cnt = SS_RX_DEFAULT, bf[32] = {0}, wb = 0, v, ivmode = 0; struct sun4i_req_ctx *op = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun4i_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); struct sun4i_ss_ctx *ss = tfmctx->ss; - unsigned int in_i = 0; /* advancement in the current SG */ - unsigned int end; - /* - * end is the position when we need to stop writing to the device, - * to be compared to i - */ + struct scatterlist *in_sg = areq->src; + struct sg_mapping_iter mi; int in_r, err = 0; - unsigned int todo; - u32 spaces, rx_cnt = SS_RX_DEFAULT; size_t copied = 0; - struct sg_mapping_iter mi; - unsigned int j = 0; - int zeros; - unsigned int index, padlen; - __be64 bits; - u32 bf[32]; - u32 wb = 0; - unsigned int nwait, nbw = 0; - struct scatterlist *in_sg = areq->src; dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x", __func__, crypto_tfm_alg_name(areq->base.tfm), op->byte_count, areq->nbytes, op->mode, op->len, op->hash[0]); - if (unlikely(areq->nbytes == 0) && (op->flags & SS_HASH_FINAL) == 0) + if (unlikely(!areq->nbytes) && !(op->flags & SS_HASH_FINAL)) return 0; /* protect against overflow */ @@ -213,7 +203,7 @@ static int sun4i_hash(struct ahash_request *areq) return -EINVAL; } - if (op->len + areq->nbytes < 64 && (op->flags & SS_HASH_FINAL) == 0) { + if (op->len + areq->nbytes < 64 && !(op->flags & SS_HASH_FINAL)) { /* linearize data to op->buf */ copied = sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), op->buf + op->len, areq->nbytes, 0); @@ -227,7 +217,7 @@ static int sun4i_hash(struct ahash_request *areq) * if some data have been processed before, * we need to restore the partial hash state */ - if (op->byte_count > 0) { + if (op->byte_count) { ivmode = SS_IV_ARBITRARY; for (i = 0; i < 5; i++) writel(op->hash[i], ss->base + SS_IV0 + i * 4); @@ -235,11 +225,11 @@ static int sun4i_hash(struct ahash_request *areq) /* Enable the device */ writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL); - if ((op->flags & SS_HASH_UPDATE) == 0) + if (!(op->flags & SS_HASH_UPDATE)) goto hash_final; /* start of handling data */ - if ((op->flags & SS_HASH_FINAL) == 0) { + if (!(op->flags & SS_HASH_FINAL)) { end = ((areq->nbytes + op->len) / 64) * 64 - op->len; if (end > areq->nbytes || areq->nbytes - end > 63) { @@ -253,14 +243,14 @@ static int sun4i_hash(struct ahash_request *areq) end = ((areq->nbytes + op->len) / 4) * 4 - op->len; } - /* TODO if SGlen % 4 and op->len == 0 then DMA */ + /* TODO if SGlen % 4 and !op->len then DMA */ i = 1; while (in_sg && i == 1) { - if ((in_sg->length % 4) != 0) + if (in_sg->length % 4) i = 0; in_sg = sg_next(in_sg); } - if (i == 1 && op->len == 0) + if (i == 1 && !op->len && areq->nbytes) dev_dbg(ss->dev, "We can DMA\n"); i = 0; @@ -275,7 +265,7 @@ static int sun4i_hash(struct ahash_request *areq) * - the buffer is already used * - the SG does not have enough byte remaining ( < 4) */ - if (op->len > 0 || (mi.length - in_i) < 4) { + if (op->len || (mi.length - in_i) < 4) { /* * if we have entered here we have two reason to stop * - the buffer is full @@ -294,7 +284,7 @@ static int sun4i_hash(struct ahash_request *areq) in_i = 0; } } - if (op->len > 3 && (op->len % 4) == 0) { + if (op->len > 3 && !(op->len % 4)) { /* write buf to the device */ writesl(ss->base + SS_RXFIFO, op->buf, op->len / 4); @@ -313,7 +303,7 @@ static int sun4i_hash(struct ahash_request *areq) i += todo * 4; in_i += todo * 4; rx_cnt -= todo; - if (rx_cnt == 0) { + if (!rx_cnt) { spaces = readl(ss->base + SS_FCSR); rx_cnt = SS_RXFIFO_SPACES(spaces); } @@ -351,7 +341,7 @@ static int sun4i_hash(struct ahash_request *areq) * Now if we have the flag final go to finalize part * If not, store the partial hash */ - if ((op->flags & SS_HASH_FINAL) > 0) + if (op->flags & SS_HASH_FINAL) goto hash_final; writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL); @@ -359,7 +349,7 @@ static int sun4i_hash(struct ahash_request *areq) do { v = readl(ss->base + SS_CTL); i++; - } while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0); + } while (i < SS_TIMEOUT && (v & SS_DATA_END)); if (unlikely(i >= SS_TIMEOUT)) { dev_err_ratelimited(ss->dev, "ERROR: hash end timeout %d>%d ctl=%x len=%u\n", @@ -368,6 +358,15 @@ static int sun4i_hash(struct ahash_request *areq) goto release_ss; } + /* + * The datasheet isn't very clear about when to retrieve the digest. The + * bit SS_DATA_END is cleared when the engine has processed the data and + * when the digest is computed *but* it doesn't mean the digest is + * available in the digest registers. Hence the delay to be sure we can + * read it. + */ + ndelay(1); + for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) op->hash[i] = readl(ss->base + SS_MD0 + i * 4); @@ -388,56 +387,50 @@ static int sun4i_hash(struct ahash_request *areq) hash_final: /* write the remaining words of the wait buffer */ - if (op->len > 0) { + if (op->len) { nwait = op->len / 4; - if (nwait > 0) { + if (nwait) { writesl(ss->base + SS_RXFIFO, op->buf, nwait); op->byte_count += 4 * nwait; } + nbw = op->len - 4 * nwait; - wb = *(u32 *)(op->buf + nwait * 4); - wb &= (0xFFFFFFFF >> (4 - nbw) * 8); + if (nbw) { + wb = *(u32 *)(op->buf + nwait * 4); + wb &= GENMASK((nbw * 8) - 1, 0); + + op->byte_count += nbw; + } } /* write the remaining bytes of the nbw buffer */ - if (nbw > 0) { - wb |= ((1 << 7) << (nbw * 8)); - bf[j++] = wb; - } else { - bf[j++] = 1 << 7; - } + wb |= ((1 << 7) << (nbw * 8)); + bf[j++] = wb; /* * number of space to pad to obtain 64o minus 8(size) minus 4 (final 1) * I take the operations from other MD5/SHA1 implementations */ - /* we have already send 4 more byte of which nbw data */ - if (op->mode == SS_OP_MD5) { - index = (op->byte_count + 4) & 0x3f; - op->byte_count += nbw; - if (index > 56) - zeros = (120 - index) / 4; - else - zeros = (56 - index) / 4; - } else { - op->byte_count += nbw; - index = op->byte_count & 0x3f; - padlen = (index < 56) ? (56 - index) : ((64 + 56) - index); - zeros = (padlen - 1) / 4; - } + /* last block size */ + fill = 64 - (op->byte_count % 64); + min_fill = 2 * sizeof(u32) + (nbw ? 0 : sizeof(u32)); + + /* if we can't fill all data, jump to the next 64 block */ + if (fill < min_fill) + fill += 64; - memset(bf + j, 0, 4 * zeros); - j += zeros; + j += (fill - min_fill) / sizeof(u32); /* write the length of data */ if (op->mode == SS_OP_SHA1) { - bits = cpu_to_be64(op->byte_count << 3); - bf[j++] = bits & 0xffffffff; - bf[j++] = (bits >> 32) & 0xffffffff; + __be64 bits = cpu_to_be64(op->byte_count << 3); + bf[j++] = lower_32_bits(bits); + bf[j++] = upper_32_bits(bits); } else { - bf[j++] = (op->byte_count << 3) & 0xffffffff; - bf[j++] = (op->byte_count >> 29) & 0xffffffff; + __le64 bits = op->byte_count << 3; + bf[j++] = lower_32_bits(bits); + bf[j++] = upper_32_bits(bits); } writesl(ss->base + SS_RXFIFO, bf, j); @@ -453,7 +446,7 @@ hash_final: do { v = readl(ss->base + SS_CTL); i++; - } while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0); + } while (i < SS_TIMEOUT && (v & SS_DATA_END)); if (unlikely(i >= SS_TIMEOUT)) { dev_err_ratelimited(ss->dev, "ERROR: hash end timeout %d>%d ctl=%x len=%u\n", @@ -462,6 +455,15 @@ hash_final: goto release_ss; } + /* + * The datasheet isn't very clear about when to retrieve the digest. The + * bit SS_DATA_END is cleared when the engine has processed the data and + * when the digest is computed *but* it doesn't mean the digest is + * available in the digest registers. Hence the delay to be sure we can + * read it. + */ + ndelay(1); + /* Get the hash from the device */ if (op->mode == SS_OP_SHA1) { for (i = 0; i < 5; i++) { @@ -513,7 +515,7 @@ int sun4i_hash_digest(struct ahash_request *areq) struct sun4i_req_ctx *op = ahash_request_ctx(areq); err = sun4i_hash_init(areq); - if (err != 0) + if (err) return err; op->flags = SS_HASH_UPDATE | SS_HASH_FINAL; diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h index f04c0f8cf026..a0e1efc1cb2a 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss.h +++ b/drivers/crypto/sunxi-ss/sun4i-ss.h @@ -24,9 +24,11 @@ #include <linux/interrupt.h> #include <linux/delay.h> #include <crypto/md5.h> +#include <crypto/skcipher.h> #include <crypto/sha.h> #include <crypto/hash.h> #include <crypto/internal/hash.h> +#include <crypto/internal/skcipher.h> #include <crypto/aes.h> #include <crypto/des.h> #include <crypto/internal/rng.h> @@ -140,7 +142,7 @@ struct sun4i_ss_alg_template { u32 type; u32 mode; union { - struct crypto_alg crypto; + struct skcipher_alg crypto; struct ahash_alg hash; } alg; struct sun4i_ss_ctx *ss; @@ -177,25 +179,25 @@ int sun4i_hash_import_md5(struct ahash_request *areq, const void *in); int sun4i_hash_export_sha1(struct ahash_request *areq, void *out); int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in); -int sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request *areq); -int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq); -int sun4i_ss_ecb_aes_encrypt(struct ablkcipher_request *areq); -int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request *areq); +int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq); +int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq); +int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq); +int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq); -int sun4i_ss_cbc_des_encrypt(struct ablkcipher_request *areq); -int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request *areq); -int sun4i_ss_ecb_des_encrypt(struct ablkcipher_request *areq); -int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request *areq); +int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq); +int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq); +int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq); +int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq); -int sun4i_ss_cbc_des3_encrypt(struct ablkcipher_request *areq); -int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request *areq); -int sun4i_ss_ecb_des3_encrypt(struct ablkcipher_request *areq); -int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request *areq); +int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq); +int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq); +int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq); +int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq); int sun4i_ss_cipher_init(struct crypto_tfm *tfm); -int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, +int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen); -int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, +int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen); -int sun4i_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key, +int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen); |