diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-29 02:04:56 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-29 02:04:56 +0300 |
commit | 6159c49e12284b4880fd60e0575a71a40556a67e (patch) | |
tree | b969ffe7cd182d77052f91ec3d221e6726f0457c /drivers/crypto/hisilicon/sec2 | |
parent | 31e798fd6f0ff0acdc49c1a358b581730936a09a (diff) | |
parent | 9f38b678ffc4e2ccf167a1131c0403dc4f5e1bb7 (diff) | |
download | linux-6159c49e12284b4880fd60e0575a71a40556a67e.tar.xz |
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
"Algorithms:
- Fix rmmod crash with x86/curve25519
- Add ECDH NIST P384
- Generate assembly files at build-time with perl scripts on arm
- Switch to HMAC SHA512 DRBG as default DRBG
Drivers:
- Add sl3516 crypto engine
- Add ECDH NIST P384 support in hisilicon/hpre
- Add {ofb,cfb,ctr} over {aes,sm4} in hisilicon/sec
- Add {ccm,gcm} over {aes,sm4} in hisilicon/sec
- Enable omap hwrng driver for TI K3 family
- Add support for AEAD algorithms in qce"
* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (142 commits)
crypto: sl3516 - depends on HAS_IOMEM
crypto: hisilicon/qm - implement for querying hardware tasks status.
crypto: sl3516 - Fix build warning without CONFIG_PM
MAINTAINERS: update caam crypto driver maintainers list
crypto: nx - Fix numerous sparse byte-order warnings
crypto: nx - Fix RCU warning in nx842_OF_upd_status
crypto: api - Move crypto attr definitions out of crypto.h
crypto: nx - Fix memcpy() over-reading in nonce
crypto: hisilicon/sec - Fix spelling mistake "fallbcak" -> "fallback"
crypto: sa2ul - Remove unused auth_len variable
crypto: sl3516 - fix duplicated inclusion
crypto: hisilicon/zip - adds the max shaper type rate
crypto: hisilicon/hpre - adds the max shaper type rate
crypto: hisilicon/sec - adds the max shaper type rate
crypto: hisilicon/qm - supports to inquiry each function's QoS
crypto: hisilicon/qm - add pf ping single vf function
crypto: hisilicon/qm - merges the work initialization process into a single function
crypto: hisilicon/qm - add the "alg_qos" file node
crypto: hisilicon/qm - supports writing QoS int the host
crypto: api - remove CRYPTOA_U32 and related functions
...
Diffstat (limited to 'drivers/crypto/hisilicon/sec2')
-rw-r--r-- | drivers/crypto/hisilicon/sec2/sec.h | 23 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/sec2/sec_crypto.c | 1036 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/sec2/sec_crypto.h | 193 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/sec2/sec_main.c | 100 |
4 files changed, 1192 insertions, 160 deletions
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index dfdce2f21e65..018415b9840a 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -13,14 +13,14 @@ struct sec_alg_res { dma_addr_t pbuf_dma; u8 *c_ivin; dma_addr_t c_ivin_dma; + u8 *a_ivin; + dma_addr_t a_ivin_dma; u8 *out_mac; dma_addr_t out_mac_dma; }; /* Cipher request of SEC private */ struct sec_cipher_req { - struct hisi_acc_hw_sgl *c_in; - dma_addr_t c_in_dma; struct hisi_acc_hw_sgl *c_out; dma_addr_t c_out_dma; u8 *c_ivin; @@ -33,15 +33,25 @@ struct sec_cipher_req { struct sec_aead_req { u8 *out_mac; dma_addr_t out_mac_dma; + u8 *a_ivin; + dma_addr_t a_ivin_dma; struct aead_request *aead_req; }; /* SEC request of Crypto */ struct sec_req { - struct sec_sqe sec_sqe; + union { + struct sec_sqe sec_sqe; + struct sec_sqe3 sec_sqe3; + }; struct sec_ctx *ctx; struct sec_qp_ctx *qp_ctx; + /** + * Common parameter of the SEC request. + */ + struct hisi_acc_hw_sgl *in; + dma_addr_t in_dma; struct sec_cipher_req c_req; struct sec_aead_req aead_req; struct list_head backlog_head; @@ -81,7 +91,9 @@ struct sec_auth_ctx { u8 a_key_len; u8 mac_len; u8 a_alg; + bool fallback; struct crypto_shash *hash_tfm; + struct crypto_aead *fallback_aead_tfm; }; /* SEC cipher context which cipher's relatives */ @@ -94,6 +106,10 @@ struct sec_cipher_ctx { u8 c_mode; u8 c_alg; u8 c_key_len; + + /* add software support */ + bool fallback; + struct crypto_sync_skcipher *fbtfm; }; /* SEC queue context which defines queue's relatives */ @@ -137,6 +153,7 @@ struct sec_ctx { bool pbuf_supported; struct sec_cipher_ctx c_ctx; struct sec_auth_ctx a_ctx; + u8 type_supported; struct device *dev; }; diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 133aede8bf07..6a45bd23b363 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -2,6 +2,7 @@ /* Copyright (c) 2019 HiSilicon Limited. */ #include <crypto/aes.h> +#include <crypto/aead.h> #include <crypto/algapi.h> #include <crypto/authenc.h> #include <crypto/des.h> @@ -21,6 +22,7 @@ #define SEC_PRIORITY 4001 #define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE) +#define SEC_XTS_MID_KEY_SIZE (3 * AES_MIN_KEY_SIZE) #define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE) #define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE) #define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE) @@ -37,10 +39,23 @@ #define SEC_AEAD_ALG_OFFSET 11 #define SEC_AUTH_OFFSET 6 +#define SEC_DE_OFFSET_V3 9 +#define SEC_SCENE_OFFSET_V3 5 +#define SEC_CKEY_OFFSET_V3 13 +#define SEC_SRC_SGL_OFFSET_V3 11 +#define SEC_DST_SGL_OFFSET_V3 14 +#define SEC_CALG_OFFSET_V3 4 +#define SEC_AKEY_OFFSET_V3 9 +#define SEC_MAC_OFFSET_V3 4 +#define SEC_AUTH_ALG_OFFSET_V3 15 +#define SEC_CIPHER_AUTH_V3 0xbf +#define SEC_AUTH_CIPHER_V3 0x40 #define SEC_FLAG_OFFSET 7 #define SEC_FLAG_MASK 0x0780 #define SEC_TYPE_MASK 0x0F #define SEC_DONE_MASK 0x0001 +#define SEC_ICV_MASK 0x000E +#define SEC_SQE_LEN_RATE_MASK 0x3 #define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH) #define SEC_SGL_SGE_NR 128 @@ -66,6 +81,25 @@ #define SEC_SQE_CFLAG 2 #define SEC_SQE_AEAD_FLAG 3 #define SEC_SQE_DONE 0x1 +#define SEC_ICV_ERR 0x2 +#define MIN_MAC_LEN 4 +#define MAC_LEN_MASK 0x1U +#define MAX_INPUT_DATA_LEN 0xFFFE00 +#define BITS_MASK 0xFF +#define BYTE_BITS 0x8 +#define SEC_XTS_NAME_SZ 0x3 +#define IV_CM_CAL_NUM 2 +#define IV_CL_MASK 0x7 +#define IV_CL_MIN 2 +#define IV_CL_MID 4 +#define IV_CL_MAX 8 +#define IV_FLAGS_OFFSET 0x6 +#define IV_CM_OFFSET 0x3 +#define IV_LAST_BYTE1 1 +#define IV_LAST_BYTE2 2 +#define IV_LAST_BYTE_MASK 0xFF +#define IV_CTR_INIT 0x1 +#define IV_BYTE_OFFSET 0x8 /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */ static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req) @@ -124,22 +158,59 @@ static void sec_free_req_id(struct sec_req *req) mutex_unlock(&qp_ctx->req_lock); } -static int sec_aead_verify(struct sec_req *req) +static u8 pre_parse_finished_bd(struct bd_status *status, void *resp) { - struct aead_request *aead_req = req->aead_req.aead_req; - struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); - size_t authsize = crypto_aead_authsize(tfm); - u8 *mac_out = req->aead_req.out_mac; - u8 *mac = mac_out + SEC_MAX_MAC_LEN; - struct scatterlist *sgl = aead_req->src; - size_t sz; + struct sec_sqe *bd = resp; + + status->done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK; + status->icv = (le16_to_cpu(bd->type2.done_flag) & SEC_ICV_MASK) >> 1; + status->flag = (le16_to_cpu(bd->type2.done_flag) & + SEC_FLAG_MASK) >> SEC_FLAG_OFFSET; + status->tag = le16_to_cpu(bd->type2.tag); + status->err_type = bd->type2.error_type; + + return bd->type_cipher_auth & SEC_TYPE_MASK; +} + +static u8 pre_parse_finished_bd3(struct bd_status *status, void *resp) +{ + struct sec_sqe3 *bd3 = resp; + + status->done = le16_to_cpu(bd3->done_flag) & SEC_DONE_MASK; + status->icv = (le16_to_cpu(bd3->done_flag) & SEC_ICV_MASK) >> 1; + status->flag = (le16_to_cpu(bd3->done_flag) & + SEC_FLAG_MASK) >> SEC_FLAG_OFFSET; + status->tag = le64_to_cpu(bd3->tag); + status->err_type = bd3->error_type; + + return le32_to_cpu(bd3->bd_param) & SEC_TYPE_MASK; +} + +static int sec_cb_status_check(struct sec_req *req, + struct bd_status *status) +{ + struct sec_ctx *ctx = req->ctx; - sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac, authsize, - aead_req->cryptlen + aead_req->assoclen - - authsize); - if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) { - dev_err(req->ctx->dev, "aead verify failure!\n"); - return -EBADMSG; + if (unlikely(req->err_type || status->done != SEC_SQE_DONE)) { + dev_err_ratelimited(ctx->dev, "err_type[%d], done[%u]\n", + req->err_type, status->done); + return -EIO; + } + + if (unlikely(ctx->alg_type == SEC_SKCIPHER)) { + if (unlikely(status->flag != SEC_SQE_CFLAG)) { + dev_err_ratelimited(ctx->dev, "flag[%u]\n", + status->flag); + return -EIO; + } + } else if (unlikely(ctx->alg_type == SEC_AEAD)) { + if (unlikely(status->flag != SEC_SQE_AEAD_FLAG || + status->icv == SEC_ICV_ERR)) { + dev_err_ratelimited(ctx->dev, + "flag[%u], icv[%u]\n", + status->flag, status->icv); + return -EBADMSG; + } } return 0; @@ -149,43 +220,38 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp) { struct sec_qp_ctx *qp_ctx = qp->qp_ctx; struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx; - struct sec_sqe *bd = resp; + u8 type_supported = qp_ctx->ctx->type_supported; + struct bd_status status; struct sec_ctx *ctx; struct sec_req *req; - u16 done, flag; - int err = 0; + int err; u8 type; - type = bd->type_cipher_auth & SEC_TYPE_MASK; - if (unlikely(type != SEC_BD_TYPE2)) { + if (type_supported == SEC_BD_TYPE2) { + type = pre_parse_finished_bd(&status, resp); + req = qp_ctx->req_list[status.tag]; + } else { + type = pre_parse_finished_bd3(&status, resp); + req = (void *)(uintptr_t)status.tag; + } + + if (unlikely(type != type_supported)) { atomic64_inc(&dfx->err_bd_cnt); pr_err("err bd type [%d]\n", type); return; } - req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)]; if (unlikely(!req)) { atomic64_inc(&dfx->invalid_req_cnt); atomic_inc(&qp->qp_status.used); return; } - req->err_type = bd->type2.error_type; + + req->err_type = status.err_type; ctx = req->ctx; - done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK; - flag = (le16_to_cpu(bd->type2.done_flag) & - SEC_FLAG_MASK) >> SEC_FLAG_OFFSET; - if (unlikely(req->err_type || done != SEC_SQE_DONE || - (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) || - (ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) { - dev_err_ratelimited(ctx->dev, - "err_type[%d],done[%d],flag[%d]\n", - req->err_type, done, flag); - err = -EIO; + err = sec_cb_status_check(req, &status); + if (err) atomic64_inc(&dfx->done_flag_cnt); - } - - if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt) - err = sec_aead_verify(req); atomic64_inc(&dfx->recv_cnt); @@ -253,6 +319,30 @@ static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res) res->c_ivin, res->c_ivin_dma); } +static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res) +{ + int i; + + res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ, + &res->a_ivin_dma, GFP_KERNEL); + if (!res->a_ivin) + return -ENOMEM; + + for (i = 1; i < QM_Q_DEPTH; i++) { + res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE; + res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE; + } + + return 0; +} + +static void sec_free_aiv_resource(struct device *dev, struct sec_alg_res *res) +{ + if (res->a_ivin) + dma_free_coherent(dev, SEC_TOTAL_IV_SZ, + res->a_ivin, res->a_ivin_dma); +} + static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res) { int i; @@ -335,9 +425,13 @@ static int sec_alg_resource_alloc(struct sec_ctx *ctx, return ret; if (ctx->alg_type == SEC_AEAD) { + ret = sec_alloc_aiv_resource(dev, res); + if (ret) + goto alloc_aiv_fail; + ret = sec_alloc_mac_resource(dev, res); if (ret) - goto alloc_fail; + goto alloc_mac_fail; } if (ctx->pbuf_supported) { ret = sec_alloc_pbuf_resource(dev, res); @@ -352,7 +446,10 @@ static int sec_alg_resource_alloc(struct sec_ctx *ctx, alloc_pbuf_fail: if (ctx->alg_type == SEC_AEAD) sec_free_mac_resource(dev, qp_ctx->res); -alloc_fail: +alloc_mac_fail: + if (ctx->alg_type == SEC_AEAD) + sec_free_aiv_resource(dev, res); +alloc_aiv_fail: sec_free_civ_resource(dev, res); return ret; } @@ -382,10 +479,11 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, qp = ctx->qps[qp_ctx_id]; qp->req_type = 0; qp->qp_ctx = qp_ctx; - qp->req_cb = sec_req_cb; qp_ctx->qp = qp; qp_ctx->ctx = ctx; + qp->req_cb = sec_req_cb; + mutex_init(&qp_ctx->req_lock); idr_init(&qp_ctx->req_idr); INIT_LIST_HEAD(&qp_ctx->backlog); @@ -536,6 +634,26 @@ static void sec_auth_uninit(struct sec_ctx *ctx) a_ctx->a_key, a_ctx->a_key_dma); } +static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm) +{ + const char *alg = crypto_tfm_alg_name(&tfm->base); + struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); + struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + + c_ctx->fallback = false; + if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ))) + return 0; + + c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0, + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(c_ctx->fbtfm)) { + pr_err("failed to alloc fallback tfm!\n"); + return PTR_ERR(c_ctx->fbtfm); + } + + return 0; +} + static int sec_skcipher_init(struct crypto_skcipher *tfm) { struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); @@ -557,8 +675,14 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm) if (ret) goto err_cipher_init; + ret = sec_skcipher_fbtfm_init(tfm); + if (ret) + goto err_fbtfm_init; + return 0; +err_fbtfm_init: + sec_cipher_uninit(ctx); err_cipher_init: sec_ctx_base_uninit(ctx); return ret; @@ -568,6 +692,9 @@ static void sec_skcipher_uninit(struct crypto_skcipher *tfm) { struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); + if (ctx->c_ctx.fbtfm) + crypto_free_sync_skcipher(ctx->c_ctx.fbtfm); + sec_cipher_uninit(ctx); sec_ctx_base_uninit(ctx); } @@ -607,6 +734,9 @@ static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx, case SEC_XTS_MIN_KEY_SIZE: c_ctx->c_key_len = SEC_CKEY_128BIT; break; + case SEC_XTS_MID_KEY_SIZE: + c_ctx->fallback = true; + break; case SEC_XTS_MAX_KEY_SIZE: c_ctx->c_key_len = SEC_CKEY_256BIT; break; @@ -615,19 +745,25 @@ static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx, return -EINVAL; } } else { - switch (keylen) { - case AES_KEYSIZE_128: - c_ctx->c_key_len = SEC_CKEY_128BIT; - break; - case AES_KEYSIZE_192: - c_ctx->c_key_len = SEC_CKEY_192BIT; - break; - case AES_KEYSIZE_256: - c_ctx->c_key_len = SEC_CKEY_256BIT; - break; - default: - pr_err("hisi_sec2: aes key error!\n"); + if (c_ctx->c_alg == SEC_CALG_SM4 && + keylen != AES_KEYSIZE_128) { + pr_err("hisi_sec2: sm4 key error!\n"); return -EINVAL; + } else { + switch (keylen) { + case AES_KEYSIZE_128: + c_ctx->c_key_len = SEC_CKEY_128BIT; + break; + case AES_KEYSIZE_192: + c_ctx->c_key_len = SEC_CKEY_192BIT; + break; + case AES_KEYSIZE_256: + c_ctx->c_key_len = SEC_CKEY_256BIT; + break; + default: + pr_err("hisi_sec2: aes key error!\n"); + return -EINVAL; + } } } @@ -672,7 +808,13 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, } memcpy(c_ctx->c_key, key, keylen); - + if (c_ctx->fallback) { + ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen); + if (ret) { + dev_err(dev, "failed to set fallback skcipher key!\n"); + return ret; + } + } return 0; } @@ -686,22 +828,30 @@ static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\ GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB) GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC) GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS) - +GEN_SEC_SETKEY_FUNC(aes_ofb, SEC_CALG_AES, SEC_CMODE_OFB) +GEN_SEC_SETKEY_FUNC(aes_cfb, SEC_CALG_AES, SEC_CMODE_CFB) +GEN_SEC_SETKEY_FUNC(aes_ctr, SEC_CALG_AES, SEC_CMODE_CTR) GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB) GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC) - GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS) GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC) +GEN_SEC_SETKEY_FUNC(sm4_ofb, SEC_CALG_SM4, SEC_CMODE_OFB) +GEN_SEC_SETKEY_FUNC(sm4_cfb, SEC_CALG_SM4, SEC_CMODE_CFB) +GEN_SEC_SETKEY_FUNC(sm4_ctr, SEC_CALG_SM4, SEC_CMODE_CTR) static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req, struct scatterlist *src) { - struct aead_request *aead_req = req->aead_req.aead_req; + struct sec_aead_req *a_req = &req->aead_req; + struct aead_request *aead_req = a_req->aead_req; struct sec_cipher_req *c_req = &req->c_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; struct device *dev = ctx->dev; int copy_size, pbuf_length; int req_id = req->req_id; + struct crypto_aead *tfm; + size_t authsize; + u8 *mac_offset; if (ctx->alg_type == SEC_AEAD) copy_size = aead_req->cryptlen + aead_req->assoclen; @@ -709,15 +859,20 @@ static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req, copy_size = c_req->c_len; pbuf_length = sg_copy_to_buffer(src, sg_nents(src), - qp_ctx->res[req_id].pbuf, - copy_size); + qp_ctx->res[req_id].pbuf, copy_size); if (unlikely(pbuf_length != copy_size)) { dev_err(dev, "copy src data to pbuf error!\n"); return -EINVAL; } + if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) { + tfm = crypto_aead_reqtfm(aead_req); + authsize = crypto_aead_authsize(tfm); + mac_offset = qp_ctx->res[req_id].pbuf + copy_size - authsize; + memcpy(a_req->out_mac, mac_offset, authsize); + } - c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma; - c_req->c_out_dma = c_req->c_in_dma; + req->in_dma = qp_ctx->res[req_id].pbuf_dma; + c_req->c_out_dma = req->in_dma; return 0; } @@ -728,7 +883,6 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req, struct aead_request *aead_req = req->aead_req.aead_req; struct sec_cipher_req *c_req = &req->c_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; - struct device *dev = ctx->dev; int copy_size, pbuf_length; int req_id = req->req_id; @@ -738,10 +892,29 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req, copy_size = c_req->c_len; pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), - qp_ctx->res[req_id].pbuf, - copy_size); + qp_ctx->res[req_id].pbuf, copy_size); if (unlikely(pbuf_length != copy_size)) - dev_err(dev, "copy pbuf data to dst error!\n"); + dev_err(ctx->dev, "copy pbuf data to dst error!\n"); +} + +static int sec_aead_mac_init(struct sec_aead_req *req) +{ + struct aead_request *aead_req = req->aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); + size_t authsize = crypto_aead_authsize(tfm); + u8 *mac_out = req->out_mac; + struct scatterlist *sgl = aead_req->src; + size_t copy_size; + off_t skip_size; + + /* Copy input mac */ + skip_size = aead_req->assoclen + aead_req->cryptlen - authsize; + copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, + authsize, skip_size); + if (unlikely(copy_size != authsize)) + return -EINVAL; + + return 0; } static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req, @@ -755,37 +928,48 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req, int ret; if (req->use_pbuf) { - ret = sec_cipher_pbuf_map(ctx, req, src); c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET; c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET; if (ctx->alg_type == SEC_AEAD) { + a_req->a_ivin = res->a_ivin; + a_req->a_ivin_dma = res->a_ivin_dma; a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET; a_req->out_mac_dma = res->pbuf_dma + SEC_PBUF_MAC_OFFSET; } + ret = sec_cipher_pbuf_map(ctx, req, src); return ret; } c_req->c_ivin = res->c_ivin; c_req->c_ivin_dma = res->c_ivin_dma; if (ctx->alg_type == SEC_AEAD) { + a_req->a_ivin = res->a_ivin; + a_req->a_ivin_dma = res->a_ivin_dma; a_req->out_mac = res->out_mac; a_req->out_mac_dma = res->out_mac_dma; } - c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src, - qp_ctx->c_in_pool, - req->req_id, - &c_req->c_in_dma); - - if (IS_ERR(c_req->c_in)) { + req->in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src, + qp_ctx->c_in_pool, + req->req_id, + &req->in_dma); + if (IS_ERR(req->in)) { dev_err(dev, "fail to dma map input sgl buffers!\n"); - return PTR_ERR(c_req->c_in); + return PTR_ERR(req->in); + } + + if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) { + ret = sec_aead_mac_init(a_req); + if (unlikely(ret)) { + dev_err(dev, "fail to init mac data for ICV!\n"); + return ret; + } } if (dst == src) { - c_req->c_out = c_req->c_in; - c_req->c_out_dma = c_req->c_in_dma; + c_req->c_out = req->in; + c_req->c_out_dma = req->in_dma; } else { c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst, qp_ctx->c_out_pool, @@ -794,7 +978,7 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req, if (IS_ERR(c_req->c_out)) { dev_err(dev, "fail to dma map output sgl buffers!\n"); - hisi_acc_sg_buf_unmap(dev, src, c_req->c_in); + hisi_acc_sg_buf_unmap(dev, src, req->in); return PTR_ERR(c_req->c_out); } } @@ -812,7 +996,7 @@ static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req, sec_cipher_pbuf_unmap(ctx, req, dst); } else { if (dst != src) - hisi_acc_sg_buf_unmap(dev, src, c_req->c_in); + hisi_acc_sg_buf_unmap(dev, src, req->in); hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out); } @@ -883,6 +1067,28 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, return 0; } +static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize) +{ + struct crypto_tfm *tfm = crypto_aead_tfm(aead); + struct sec_ctx *ctx = crypto_tfm_ctx(tfm); + struct sec_auth_ctx *a_ctx = &ctx->a_ctx; + + if (unlikely(a_ctx->fallback_aead_tfm)) + return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize); + + return 0; +} + +static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx, + struct crypto_aead *tfm, const u8 *key, + unsigned int keylen) +{ + crypto_aead_clear_flags(a_ctx->fallback_aead_tfm, CRYPTO_TFM_REQ_MASK); + crypto_aead_set_flags(a_ctx->fallback_aead_tfm, + crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); + return crypto_aead_setkey(a_ctx->fallback_aead_tfm, key, keylen); +} + static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, const u32 keylen, const enum sec_hash_alg a_alg, const enum sec_calg c_alg, @@ -891,6 +1097,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, { struct sec_ctx *ctx = crypto_aead_ctx(tfm); struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + struct sec_auth_ctx *a_ctx = &ctx->a_ctx; struct device *dev = ctx->dev; struct crypto_authenc_keys keys; int ret; @@ -900,6 +1107,23 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, ctx->a_ctx.mac_len = mac_len; c_ctx->c_mode = c_mode; + if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) { + ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode); + if (ret) { + dev_err(dev, "set sec aes ccm cipher key err!\n"); + return ret; + } + memcpy(c_ctx->c_key, key, keylen); + + if (unlikely(a_ctx->fallback_aead_tfm)) { + ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen); + if (ret) + return ret; + } + + return 0; + } + if (crypto_authenc_extractkeys(&keys, key, keylen)) goto bad_key; @@ -915,6 +1139,12 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, goto bad_key; } + if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK) || + (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) { + dev_err(dev, "MAC or AUTH key length error!\n"); + goto bad_key; + } + return 0; bad_key: @@ -936,6 +1166,14 @@ GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC) GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC) +GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, + SEC_HMAC_CCM_MAC, SEC_CMODE_CCM) +GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, + SEC_HMAC_GCM_MAC, SEC_CMODE_GCM) +GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, + SEC_HMAC_CCM_MAC, SEC_CMODE_CCM) +GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, + SEC_HMAC_GCM_MAC, SEC_CMODE_GCM) static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req) { @@ -998,7 +1236,7 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req) sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma); sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma); - sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma); + sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma); sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma); sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) << @@ -1014,29 +1252,86 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req) cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET; sec_sqe->type_cipher_auth = bd_type | cipher; - if (req->use_pbuf) + /* Set destination and source address type */ + if (req->use_pbuf) { sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET; - else + da_type = SEC_PBUF << SEC_DST_SGL_OFFSET; + } else { sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET; + da_type = SEC_SGL << SEC_DST_SGL_OFFSET; + } + + sec_sqe->sdm_addr_type |= da_type; scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET; - if (c_req->c_in_dma != c_req->c_out_dma) + if (req->in_dma != c_req->c_out_dma) de = 0x1 << SEC_DE_OFFSET; sec_sqe->sds_sa_type = (de | scene | sa_type); - /* Just set DST address type */ - if (req->use_pbuf) - da_type = SEC_PBUF << SEC_DST_SGL_OFFSET; - else - da_type = SEC_SGL << SEC_DST_SGL_OFFSET; - sec_sqe->sdm_addr_type |= da_type; - sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len); sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id); return 0; } +static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req) +{ + struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3; + struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + struct sec_cipher_req *c_req = &req->c_req; + u32 bd_param = 0; + u16 cipher; + + memset(sec_sqe3, 0, sizeof(struct sec_sqe3)); + + sec_sqe3->c_key_addr = cpu_to_le64(c_ctx->c_key_dma); + sec_sqe3->no_scene.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma); + sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma); + sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma); + + sec_sqe3->c_mode_alg = ((u8)c_ctx->c_alg << SEC_CALG_OFFSET_V3) | + c_ctx->c_mode; + sec_sqe3->c_icv_key |= cpu_to_le16(((u16)c_ctx->c_key_len) << + SEC_CKEY_OFFSET_V3); + + if (c_req->encrypt) + cipher = SEC_CIPHER_ENC; + else + cipher = SEC_CIPHER_DEC; + sec_sqe3->c_icv_key |= cpu_to_le16(cipher); + + if (req->use_pbuf) { + bd_param |= SEC_PBUF << SEC_SRC_SGL_OFFSET_V3; + bd_param |= SEC_PBUF << SEC_DST_SGL_OFFSET_V3; + } else { + bd_param |= SEC_SGL << SEC_SRC_SGL_OFFSET_V3; + bd_param |= SEC_SGL << SEC_DST_SGL_OFFSET_V3; + } + + bd_param |= SEC_COMM_SCENE << SEC_SCENE_OFFSET_V3; + if (req->in_dma != c_req->c_out_dma) + bd_param |= 0x1 << SEC_DE_OFFSET_V3; + + bd_param |= SEC_BD_TYPE3; + sec_sqe3->bd_param = cpu_to_le32(bd_param); + + sec_sqe3->c_len_ivin |= cpu_to_le32(c_req->c_len); + sec_sqe3->tag = cpu_to_le64(req); + + return 0; +} + +/* increment counter (128-bit int) */ +static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums) +{ + do { + --bits; + nums += counter[bits]; + counter[bits] = nums & BITS_MASK; + nums >>= BYTE_BITS; + } while (bits && nums); +} + static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type) { struct aead_request *aead_req = req->aead_req.aead_req; @@ -1060,10 +1355,17 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type) cryptlen = aead_req->cryptlen; } - sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size, - cryptlen - iv_size); - if (unlikely(sz != iv_size)) - dev_err(req->ctx->dev, "copy output iv error!\n"); + if (req->ctx->c_ctx.c_mode == SEC_CMODE_CBC) { + sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size, + cryptlen - iv_size); + if (unlikely(sz != iv_size)) + dev_err(req->ctx->dev, "copy output iv error!\n"); + } else { + sz = cryptlen / iv_size; + if (cryptlen % iv_size) + sz += 1; + ctr_iv_inc(iv, iv_size, sz); + } } static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx, @@ -1094,8 +1396,9 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, sec_free_req_id(req); - /* IV output at encrypto of CBC mode */ - if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt) + /* IV output at encrypto of CBC/CTR mode */ + if (!err && (ctx->c_ctx.c_mode == SEC_CMODE_CBC || + ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt) sec_update_iv(req, SEC_SKCIPHER); while (1) { @@ -1112,12 +1415,125 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, sk_req->base.complete(&sk_req->base, err); } -static void sec_aead_copy_iv(struct sec_ctx *ctx, struct sec_req *req) +static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req) +{ + struct aead_request *aead_req = req->aead_req.aead_req; + struct sec_cipher_req *c_req = &req->c_req; + struct sec_aead_req *a_req = &req->aead_req; + size_t authsize = ctx->a_ctx.mac_len; + u32 data_size = aead_req->cryptlen; + u8 flage = 0; + u8 cm, cl; + + /* the specification has been checked in aead_iv_demension_check() */ + cl = c_req->c_ivin[0] + 1; + c_req->c_ivin[ctx->c_ctx.ivsize - cl] = 0x00; + memset(&c_req->c_ivin[ctx->c_ctx.ivsize - cl], 0, cl); + c_req->c_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = IV_CTR_INIT; + + /* the last 3bit is L' */ + flage |= c_req->c_ivin[0] & IV_CL_MASK; + + /* the M' is bit3~bit5, the Flags is bit6 */ + cm = (authsize - IV_CM_CAL_NUM) / IV_CM_CAL_NUM; + flage |= cm << IV_CM_OFFSET; + if (aead_req->assoclen) + flage |= 0x01 << IV_FLAGS_OFFSET; + + memcpy(a_req->a_ivin, c_req->c_ivin, ctx->c_ctx.ivsize); + a_req->a_ivin[0] = flage; + + /* + * the last 32bit is counter's initial number, + * but the nonce uses the first 16bit + * the tail 16bit fill with the cipher length + */ + if (!c_req->encrypt) + data_size = aead_req->cryptlen - authsize; + + a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = + data_size & IV_LAST_BYTE_MASK; + data_size >>= IV_BYTE_OFFSET; + a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE2] = + data_size & IV_LAST_BYTE_MASK; +} + +static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req) { struct aead_request *aead_req = req->aead_req.aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); + size_t authsize = crypto_aead_authsize(tfm); struct sec_cipher_req *c_req = &req->c_req; + struct sec_aead_req *a_req = &req->aead_req; memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize); + + if (ctx->c_ctx.c_mode == SEC_CMODE_CCM) { + /* + * CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter}, + * the counter must set to 0x01 + */ + ctx->a_ctx.mac_len = authsize; + /* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */ + set_aead_auth_iv(ctx, req); + } + + /* GCM 12Byte Cipher_IV == Auth_IV */ + if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) { + ctx->a_ctx.mac_len = authsize; + memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE); + } +} + +static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir, + struct sec_req *req, struct sec_sqe *sec_sqe) +{ + struct sec_aead_req *a_req = &req->aead_req; + struct aead_request *aq = a_req->aead_req; + + /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */ + sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len); + + /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */ + sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr; + sec_sqe->type2.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma); + sec_sqe->type_cipher_auth |= SEC_NO_AUTH << SEC_AUTH_OFFSET; + + if (dir) + sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH; + else + sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER; + + sec_sqe->type2.alen_ivllen = cpu_to_le32(aq->assoclen); + sec_sqe->type2.auth_src_offset = cpu_to_le16(0x0); + sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen); + + sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma); +} + +static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir, + struct sec_req *req, struct sec_sqe3 *sqe3) +{ + struct sec_aead_req *a_req = &req->aead_req; + struct aead_request *aq = a_req->aead_req; + + /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */ + sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3); + + /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */ + sqe3->a_key_addr = sqe3->c_key_addr; + sqe3->auth_ivin.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma); + sqe3->auth_mac_key |= SEC_NO_AUTH; + + if (dir) + sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3; + else + sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3; + + sqe3->a_len_key = cpu_to_le32(aq->assoclen); + sqe3->auth_src_offset = cpu_to_le16(0x0); + sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen); + sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma); } static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir, @@ -1139,13 +1555,13 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir, sec_sqe->type2.mac_key_alg |= cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET); - sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET; - - if (dir) + if (dir) { + sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET; sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH; - else + } else { + sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE2 << SEC_AUTH_OFFSET; sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER; - + } sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen); sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen); @@ -1165,7 +1581,68 @@ static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req) return ret; } - sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe); + if (ctx->c_ctx.c_mode == SEC_CMODE_CCM || + ctx->c_ctx.c_mode == SEC_CMODE_GCM) + sec_auth_bd_fill_xcm(auth_ctx, req->c_req.encrypt, req, sec_sqe); + else + sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe); + + return 0; +} + +static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir, + struct sec_req *req, struct sec_sqe3 *sqe3) +{ + struct sec_aead_req *a_req = &req->aead_req; + struct sec_cipher_req *c_req = &req->c_req; + struct aead_request *aq = a_req->aead_req; + + sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma); + + sqe3->auth_mac_key |= + cpu_to_le32((u32)(ctx->mac_len / + SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3); + + sqe3->auth_mac_key |= + cpu_to_le32((u32)(ctx->a_key_len / + SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET_V3); + + sqe3->auth_mac_key |= + cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3); + + if (dir) { + sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1); + sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3; + } else { + sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1); + sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3; + } + sqe3->a_len_key = cpu_to_le32(c_req->c_len + aq->assoclen); + + sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen); + + sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma); +} + +static int sec_aead_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req) +{ + struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; + struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3; + int ret; + + ret = sec_skcipher_bd_fill_v3(ctx, req); + if (unlikely(ret)) { + dev_err(ctx->dev, "skcipher bd3 fill is error!\n"); + return ret; + } + + if (ctx->c_ctx.c_mode == SEC_CMODE_CCM || + ctx->c_ctx.c_mode == SEC_CMODE_GCM) + sec_auth_bd_fill_xcm_v3(auth_ctx, req->c_req.encrypt, + req, sec_sqe3); + else + sec_auth_bd_fill_ex_v3(auth_ctx, req->c_req.encrypt, + req, sec_sqe3); return 0; } @@ -1254,7 +1731,8 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req) goto err_uninit_req; /* Output IV as decrypto */ - if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) + if (!req->c_req.encrypt && (ctx->c_ctx.c_mode == SEC_CMODE_CBC || + ctx->c_ctx.c_mode == SEC_CMODE_CTR)) sec_update_iv(req, ctx->alg_type); ret = ctx->req_op->bd_send(ctx, req); @@ -1296,20 +1774,51 @@ static const struct sec_req_op sec_skcipher_req_ops = { static const struct sec_req_op sec_aead_req_ops = { .buf_map = sec_aead_sgl_map, .buf_unmap = sec_aead_sgl_unmap, - .do_transfer = sec_aead_copy_iv, + .do_transfer = sec_aead_set_iv, .bd_fill = sec_aead_bd_fill, .bd_send = sec_bd_send, .callback = sec_aead_callback, .process = sec_process, }; +static const struct sec_req_op sec_skcipher_req_ops_v3 = { + .buf_map = sec_skcipher_sgl_map, + .buf_unmap = sec_skcipher_sgl_unmap, + .do_transfer = sec_skcipher_copy_iv, + .bd_fill = sec_skcipher_bd_fill_v3, + .bd_send = sec_bd_send, + .callback = sec_skcipher_callback, + .process = sec_process, +}; + +static const struct sec_req_op sec_aead_req_ops_v3 = { + .buf_map = sec_aead_sgl_map, + .buf_unmap = sec_aead_sgl_unmap, + .do_transfer = sec_aead_set_iv, + .bd_fill = sec_aead_bd_fill_v3, + .bd_send = sec_bd_send, + .callback = sec_aead_callback, + .process = sec_process, +}; + static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm) { struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); + int ret; - ctx->req_op = &sec_skcipher_req_ops; + ret = sec_skcipher_init(tfm); + if (ret) + return ret; - return sec_skcipher_init(tfm); + if (ctx->sec->qm.ver < QM_HW_V3) { + ctx->type_supported = SEC_BD_TYPE2; + ctx->req_op = &sec_skcipher_req_ops; + } else { + ctx->type_supported = SEC_BD_TYPE3; + ctx->req_op = &sec_skcipher_req_ops_v3; + } + + return ret; } static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm) @@ -1325,15 +1834,22 @@ static int sec_aead_init(struct crypto_aead *tfm) crypto_aead_set_reqsize(tfm, sizeof(struct sec_req)); ctx->alg_type = SEC_AEAD; ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm); - if (ctx->c_ctx.ivsize > SEC_IV_SIZE) { - dev_err(ctx->dev, "get error aead iv size!\n"); + if (ctx->c_ctx.ivsize < SEC_AIV_SIZE || + ctx->c_ctx.ivsize > SEC_IV_SIZE) { + pr_err("get error aead iv size!\n"); return -EINVAL; } - ctx->req_op = &sec_aead_req_ops; ret = sec_ctx_base_init(ctx); if (ret) return ret; + if (ctx->sec->qm.ver < QM_HW_V3) { + ctx->type_supported = SEC_BD_TYPE2; + ctx->req_op = &sec_aead_req_ops; + } else { + ctx->type_supported = SEC_BD_TYPE3; + ctx->req_op = &sec_aead_req_ops_v3; + } ret = sec_auth_init(ctx); if (ret) @@ -1391,6 +1907,41 @@ static void sec_aead_ctx_exit(struct crypto_aead *tfm) sec_aead_exit(tfm); } +static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm) +{ + struct aead_alg *alg = crypto_aead_alg(tfm); + struct sec_ctx *ctx = crypto_aead_ctx(tfm); + struct sec_auth_ctx *a_ctx = &ctx->a_ctx; + const char *aead_name = alg->base.cra_name; + int ret; + + ret = sec_aead_init(tfm); + if (ret) { + dev_err(ctx->dev, "hisi_sec2: aead xcm init error!\n"); + return ret; + } + + a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0, + CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_ASYNC); + if (IS_ERR(a_ctx->fallback_aead_tfm)) { + dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n"); + sec_aead_exit(tfm); + return PTR_ERR(a_ctx->fallback_aead_tfm); + } + a_ctx->fallback = false; + + return 0; +} + +static void sec_aead_xcm_ctx_exit(struct crypto_aead *tfm) +{ + struct sec_ctx *ctx = crypto_aead_ctx(tfm); + + crypto_free_aead(ctx->a_ctx.fallback_aead_tfm); + sec_aead_exit(tfm); +} + static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm) { return sec_aead_ctx_init(tfm, "sha1"); @@ -1429,6 +1980,14 @@ static int sec_skcipher_cryptlen_ckeck(struct sec_ctx *ctx, ret = -EINVAL; } break; + case SEC_CMODE_CFB: + case SEC_CMODE_OFB: + case SEC_CMODE_CTR: + if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) { + dev_err(dev, "skcipher HW version error!\n"); + ret = -EINVAL; + } + break; default: ret = -EINVAL; } @@ -1442,7 +2001,8 @@ static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq) struct device *dev = ctx->dev; u8 c_alg = ctx->c_ctx.c_alg; - if (unlikely(!sk_req->src || !sk_req->dst)) { + if (unlikely(!sk_req->src || !sk_req->dst || + sk_req->cryptlen > MAX_INPUT_DATA_LEN)) { dev_err(dev, "skcipher input param error!\n"); return -EINVAL; } @@ -1468,6 +2028,37 @@ static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq) return -EINVAL; } +static int sec_skcipher_soft_crypto(struct sec_ctx *ctx, + struct skcipher_request *sreq, bool encrypt) +{ + struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + struct device *dev = ctx->dev; + int ret; + + SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm); + + if (!c_ctx->fbtfm) { + dev_err(dev, "failed to check fallback tfm\n"); + return -EINVAL; + } + + skcipher_request_set_sync_tfm(subreq, c_ctx->fbtfm); + + /* software need sync mode to do crypto */ + skcipher_request_set_callback(subreq, sreq->base.flags, + NULL, NULL); + skcipher_request_set_crypt(subreq, sreq->src, sreq->dst, + sreq->cryptlen, sreq->iv); + if (encrypt) + ret = crypto_skcipher_encrypt(subreq); + else + ret = crypto_skcipher_decrypt(subreq); + + skcipher_request_zero(subreq); + + return ret; +} + static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req); @@ -1475,8 +2066,11 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); int ret; - if (!sk_req->cryptlen) + if (!sk_req->cryptlen) { + if (ctx->c_ctx.c_mode == SEC_CMODE_XTS) + return -EINVAL; return 0; + } req->flag = sk_req->base.flags; req->c_req.sk_req = sk_req; @@ -1487,6 +2081,9 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) if (unlikely(ret)) return -EINVAL; + if (unlikely(ctx->c_ctx.fallback)) + return sec_skcipher_soft_crypto(ctx, sk_req, encrypt); + return ctx->req_op->process(ctx, req); } @@ -1507,7 +2104,9 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req) .cra_name = sec_cra_name,\ .cra_driver_name = "hisi_sec_"sec_cra_name,\ .cra_priority = SEC_PRIORITY,\ - .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\ + .cra_flags = CRYPTO_ALG_ASYNC |\ + CRYPTO_ALG_ALLOCATES_MEMORY |\ + CRYPTO_ALG_NEED_FALLBACK,\ .cra_blocksize = blk_size,\ .cra_ctxsize = sizeof(struct sec_ctx),\ .cra_module = THIS_MODULE,\ @@ -1541,11 +2140,11 @@ static struct skcipher_alg sec_skciphers[] = { AES_BLOCK_SIZE, AES_BLOCK_SIZE) SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, - SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE, + SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0) SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, - SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE, + SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE) SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts, @@ -1557,6 +2156,90 @@ static struct skcipher_alg sec_skciphers[] = { AES_BLOCK_SIZE, AES_BLOCK_SIZE) }; +static struct skcipher_alg sec_skciphers_v3[] = { + SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb, + AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, + SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE) + + SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb, + AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, + SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE) + + SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr, + AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, + SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE) + + SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb, + AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, + SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE) + + SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb, + AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, + SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE) + + SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr, + AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, + SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE) +}; + +static int aead_iv_demension_check(struct aead_request *aead_req) +{ + u8 cl; + + cl = aead_req->iv[0] + 1; + if (cl < IV_CL_MIN || cl > IV_CL_MAX) + return -EINVAL; + + if (cl < IV_CL_MID && aead_req->cryptlen >> (BYTE_BITS * cl)) + return -EOVERFLOW; + + return 0; +} + +static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq) +{ + struct aead_request *req = sreq->aead_req.aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + size_t authsize = crypto_aead_authsize(tfm); + u8 c_mode = ctx->c_ctx.c_mode; + struct device *dev = ctx->dev; + int ret; + + if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN || + req->assoclen > SEC_MAX_AAD_LEN)) { + dev_err(dev, "aead input spec error!\n"); + return -EINVAL; + } + + if (unlikely((c_mode == SEC_CMODE_GCM && authsize < DES_BLOCK_SIZE) || + (c_mode == SEC_CMODE_CCM && (authsize < MIN_MAC_LEN || + authsize & MAC_LEN_MASK)))) { + dev_err(dev, "aead input mac length error!\n"); + return -EINVAL; + } + + if (c_mode == SEC_CMODE_CCM) { + ret = aead_iv_demension_check(req); + if (ret) { + dev_err(dev, "aead input iv param error!\n"); + return ret; + } + } + + if (sreq->c_req.encrypt) + sreq->c_req.c_len = req->cryptlen; + else + sreq->c_req.c_len = req->cryptlen - authsize; + if (c_mode == SEC_CMODE_CBC) { + if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { + dev_err(dev, "aead crypto length error!\n"); + return -EINVAL; + } + } + + return 0; +} + static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) { struct aead_request *req = sreq->aead_req.aead_req; @@ -1565,34 +2248,61 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) struct device *dev = ctx->dev; u8 c_alg = ctx->c_ctx.c_alg; - if (unlikely(!req->src || !req->dst || !req->cryptlen || - req->assoclen > SEC_MAX_AAD_LEN)) { + if (unlikely(!req->src || !req->dst)) { dev_err(dev, "aead input param error!\n"); return -EINVAL; } + if (ctx->sec->qm.ver == QM_HW_V2) { + if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt && + req->cryptlen <= authsize))) { + dev_err(dev, "Kunpeng920 not support 0 length!\n"); + ctx->a_ctx.fallback = true; + return -EINVAL; + } + } + + /* Support AES or SM4 */ + if (unlikely(c_alg != SEC_CALG_AES && c_alg != SEC_CALG_SM4)) { + dev_err(dev, "aead crypto alg error!\n"); + return -EINVAL; + } + + if (unlikely(sec_aead_spec_check(ctx, sreq))) + return -EINVAL; + if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <= SEC_PBUF_SZ) sreq->use_pbuf = true; else sreq->use_pbuf = false; - /* Support AES only */ - if (unlikely(c_alg != SEC_CALG_AES)) { - dev_err(dev, "aead crypto alg error!\n"); - return -EINVAL; - } - if (sreq->c_req.encrypt) - sreq->c_req.c_len = req->cryptlen; - else - sreq->c_req.c_len = req->cryptlen - authsize; + return 0; +} - if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { - dev_err(dev, "aead crypto length error!\n"); +static int sec_aead_soft_crypto(struct sec_ctx *ctx, + struct aead_request *aead_req, + bool encrypt) +{ + struct aead_request *subreq = aead_request_ctx(aead_req); + struct sec_auth_ctx *a_ctx = &ctx->a_ctx; + struct device *dev = ctx->dev; + + /* Kunpeng920 aead mode not support input 0 size */ + if (!a_ctx->fallback_aead_tfm) { + dev_err(dev, "aead fallback tfm is NULL!\n"); return -EINVAL; } - return 0; + aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm); + aead_request_set_callback(subreq, aead_req->base.flags, + aead_req->base.complete, aead_req->base.data); + aead_request_set_crypt(subreq, aead_req->src, aead_req->dst, + aead_req->cryptlen, aead_req->iv); + aead_request_set_ad(subreq, aead_req->assoclen); + + return encrypt ? crypto_aead_encrypt(subreq) : + crypto_aead_decrypt(subreq); } static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) @@ -1608,8 +2318,11 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) req->ctx = ctx; ret = sec_aead_param_check(ctx, req); - if (unlikely(ret)) + if (unlikely(ret)) { + if (ctx->a_ctx.fallback) + return sec_aead_soft_crypto(ctx, a_req, encrypt); return -EINVAL; + } return ctx->req_op->process(ctx, req); } @@ -1624,14 +2337,16 @@ static int sec_aead_decrypt(struct aead_request *a_req) return sec_aead_crypto(a_req, false); } -#define SEC_AEAD_GEN_ALG(sec_cra_name, sec_set_key, ctx_init,\ +#define SEC_AEAD_ALG(sec_cra_name, sec_set_key, ctx_init,\ ctx_exit, blk_size, iv_size, max_authsize)\ {\ .base = {\ .cra_name = sec_cra_name,\ .cra_driver_name = "hisi_sec_"sec_cra_name,\ .cra_priority = SEC_PRIORITY,\ - .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\ + .cra_flags = CRYPTO_ALG_ASYNC |\ + CRYPTO_ALG_ALLOCATES_MEMORY |\ + CRYPTO_ALG_NEED_FALLBACK,\ .cra_blocksize = blk_size,\ .cra_ctxsize = sizeof(struct sec_ctx),\ .cra_module = THIS_MODULE,\ @@ -1639,28 +2354,46 @@ static int sec_aead_decrypt(struct aead_request *a_req) .init = ctx_init,\ .exit = ctx_exit,\ .setkey = sec_set_key,\ + .setauthsize = sec_aead_setauthsize,\ .decrypt = sec_aead_decrypt,\ .encrypt = sec_aead_encrypt,\ .ivsize = iv_size,\ .maxauthsize = max_authsize,\ } -#define SEC_AEAD_ALG(algname, keyfunc, aead_init, blksize, ivsize, authsize)\ - SEC_AEAD_GEN_ALG(algname, keyfunc, aead_init,\ - sec_aead_ctx_exit, blksize, ivsize, authsize) - static struct aead_alg sec_aeads[] = { SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init, - AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA1_DIGEST_SIZE), + sec_aead_ctx_exit, AES_BLOCK_SIZE, + AES_BLOCK_SIZE, SHA1_DIGEST_SIZE), SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init, - AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA256_DIGEST_SIZE), + sec_aead_ctx_exit, AES_BLOCK_SIZE, + AES_BLOCK_SIZE, SHA256_DIGEST_SIZE), SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init, - AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE), + sec_aead_ctx_exit, AES_BLOCK_SIZE, + AES_BLOCK_SIZE, SHA512_DIGEST_SIZE), + + SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init, + sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, + AES_BLOCK_SIZE, AES_BLOCK_SIZE), + + SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init, + sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, + SEC_AIV_SIZE, AES_BLOCK_SIZE) +}; + +static struct aead_alg sec_aeads_v3[] = { + SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init, + sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, + AES_BLOCK_SIZE, AES_BLOCK_SIZE), + + SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init, + sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, + SEC_AIV_SIZE, AES_BLOCK_SIZE) }; int sec_register_to_crypto(struct hisi_qm *qm) @@ -1673,16 +2406,45 @@ int sec_register_to_crypto(struct hisi_qm *qm) if (ret) return ret; + if (qm->ver > QM_HW_V2) { + ret = crypto_register_skciphers(sec_skciphers_v3, + ARRAY_SIZE(sec_skciphers_v3)); + if (ret) + goto reg_skcipher_fail; + } + ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads)); if (ret) - crypto_unregister_skciphers(sec_skciphers, - ARRAY_SIZE(sec_skciphers)); + goto reg_aead_fail; + if (qm->ver > QM_HW_V2) { + ret = crypto_register_aeads(sec_aeads_v3, ARRAY_SIZE(sec_aeads_v3)); + if (ret) + goto reg_aead_v3_fail; + } + return ret; + +reg_aead_v3_fail: + crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads)); +reg_aead_fail: + if (qm->ver > QM_HW_V2) + crypto_unregister_skciphers(sec_skciphers_v3, + ARRAY_SIZE(sec_skciphers_v3)); +reg_skcipher_fail: + crypto_unregister_skciphers(sec_skciphers, + ARRAY_SIZE(sec_skciphers)); return ret; } void sec_unregister_from_crypto(struct hisi_qm *qm) { + if (qm->ver > QM_HW_V2) + crypto_unregister_aeads(sec_aeads_v3, + ARRAY_SIZE(sec_aeads_v3)); + crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads)); + + if (qm->ver > QM_HW_V2) + crypto_unregister_skciphers(sec_skciphers_v3, + ARRAY_SIZE(sec_skciphers_v3)); crypto_unregister_skciphers(sec_skciphers, ARRAY_SIZE(sec_skciphers)); - crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads)); } diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h index 9c78edac56a4..9f71c358a6d3 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.h +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h @@ -4,9 +4,11 @@ #ifndef __HISI_SEC_V2_CRYPTO_H #define __HISI_SEC_V2_CRYPTO_H +#define SEC_AIV_SIZE 12 #define SEC_IV_SIZE 24 #define SEC_MAX_KEY_SIZE 64 #define SEC_COMM_SCENE 0 +#define SEC_MIN_BLOCK_SZ 1 enum sec_calg { SEC_CALG_3DES = 0x1, @@ -21,6 +23,11 @@ enum sec_hash_alg { }; enum sec_mac_len { + SEC_HMAC_CCM_MAC = 16, + SEC_HMAC_GCM_MAC = 16, + SEC_SM3_MAC = 32, + SEC_HMAC_SM3_MAC = 32, + SEC_HMAC_MD5_MAC = 16, SEC_HMAC_SHA1_MAC = 20, SEC_HMAC_SHA256_MAC = 32, SEC_HMAC_SHA512_MAC = 64, @@ -29,7 +36,11 @@ enum sec_mac_len { enum sec_cmode { SEC_CMODE_ECB = 0x0, SEC_CMODE_CBC = 0x1, + SEC_CMODE_CFB = 0x2, + SEC_CMODE_OFB = 0x3, SEC_CMODE_CTR = 0x4, + SEC_CMODE_CCM = 0x5, + SEC_CMODE_GCM = 0x6, SEC_CMODE_XTS = 0x7, }; @@ -44,6 +55,7 @@ enum sec_ckey_type { enum sec_bd_type { SEC_BD_TYPE1 = 0x1, SEC_BD_TYPE2 = 0x2, + SEC_BD_TYPE3 = 0x3, }; enum sec_auth { @@ -63,6 +75,24 @@ enum sec_addr_type { SEC_PRP = 0x2, }; +struct bd_status { + u64 tag; + u8 done; + u8 err_type; + u16 flag; + u16 icv; +}; + +enum { + AUTHPAD_PAD, + AUTHPAD_NOPAD, +}; + +enum { + AIGEN_GEN, + AIGEN_NOGEN, +}; + struct sec_sqe_type2 { /* * mac_len: 0~4 bits @@ -209,6 +239,169 @@ struct sec_sqe { struct sec_sqe_type2 type2; }; +struct bd3_auth_ivin { + __le64 a_ivin_addr; + __le32 rsvd0; + __le32 rsvd1; +} __packed __aligned(4); + +struct bd3_skip_data { + __le32 rsvd0; + + /* + * gran_num: 0~15 bits + * reserved: 16~31 bits + */ + __le32 gran_num; + + /* + * src_skip_data_len: 0~24 bits + * reserved: 25~31 bits + */ + __le32 src_skip_data_len; + + /* + * dst_skip_data_len: 0~24 bits + * reserved: 25~31 bits + */ + __le32 dst_skip_data_len; +}; + +struct bd3_stream_scene { + __le64 c_ivin_addr; + __le64 long_a_data_len; + + /* + * auth_pad: 0~1 bits + * stream_protocol: 2~4 bits + * reserved: 5~7 bits + */ + __u8 stream_auth_pad; + __u8 plaintext_type; + __le16 pad_len_1p3; +} __packed __aligned(4); + +struct bd3_no_scene { + __le64 c_ivin_addr; + __le32 rsvd0; + __le32 rsvd1; + __le32 rsvd2; +} __packed __aligned(4); + +struct bd3_check_sum { + __u8 rsvd0; + __u8 hac_sva_status; + __le16 check_sum_i; +}; + +struct bd3_tls_type_back { + __u8 tls_1p3_type_back; + __u8 hac_sva_status; + __le16 pad_len_1p3_back; +}; + +struct sec_sqe3 { + /* + * type: 0~3 bit + * bd_invalid: 4 bit + * scene: 5~8 bit + * de: 9~10 bit + * src_addr_type: 11~13 bit + * dst_addr_type: 14~16 bit + * mac_addr_type: 17~19 bit + * reserved: 20~31 bits + */ + __le32 bd_param; + + /* + * cipher: 0~1 bits + * ci_gen: 2~3 bit + * c_icv_len: 4~9 bit + * c_width: 10~12 bits + * c_key_len: 13~15 bits + */ + __le16 c_icv_key; + + /* + * c_mode : 0~3 bits + * c_alg : 4~7 bits + */ + __u8 c_mode_alg; + + /* + * nonce_len : 0~3 bits + * huk : 4 bits + * cal_iv_addr_en : 5 bits + * seq : 6 bits + * reserved : 7 bits + */ + __u8 huk_iv_seq; + + __le64 tag; + __le64 data_src_addr; + __le64 a_key_addr; + union { + struct bd3_auth_ivin auth_ivin; + struct bd3_skip_data skip_data; + }; + + __le64 c_key_addr; + + /* + * auth: 0~1 bits + * ai_gen: 2~3 bits + * mac_len: 4~8 bits + * akey_len: 9~14 bits + * a_alg: 15~20 bits + * key_sel: 21~24 bits + * updata_key: 25 bits + * reserved: 26~31 bits + */ + __le32 auth_mac_key; + __le32 salt; + __le16 auth_src_offset; + __le16 cipher_src_offset; + + /* + * auth_len: 0~23 bit + * auth_key_offset: 24~31 bits + */ + __le32 a_len_key; + + /* + * cipher_len: 0~23 bit + * auth_ivin_offset: 24~31 bits + */ + __le32 c_len_ivin; + __le64 data_dst_addr; + __le64 mac_addr; + union { + struct bd3_stream_scene stream_scene; + struct bd3_no_scene no_scene; + }; + + /* + * done: 0 bit + * icv: 1~3 bit + * csc: 4~6 bit + * flag: 7~10 bit + * reserved: 11~15 bit + */ + __le16 done_flag; + __u8 error_type; + __u8 warning_type; + union { + __le32 mac_i; + __le32 kek_key_addr_l; + }; + union { + __le32 kek_key_addr_h; + struct bd3_check_sum check_sum; + struct bd3_tls_type_back tls_type_back; + }; + __le32 counter; +} __packed __aligned(4); + int sec_register_to_crypto(struct hisi_qm *qm); void sec_unregister_from_crypto(struct hisi_qm *qm); #endif diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 6f0062d4408c..d120ce3e34ed 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -52,6 +52,7 @@ #define SEC_RAS_CE_ENB_MSK 0x88 #define SEC_RAS_FE_ENB_MSK 0x0 #define SEC_RAS_NFE_ENB_MSK 0x7c177 +#define SEC_OOO_SHUTDOWN_SEL 0x301014 #define SEC_RAS_DISABLE 0x0 #define SEC_MEM_START_INIT_REG 0x301100 #define SEC_MEM_INIT_DONE_REG 0x301104 @@ -84,6 +85,12 @@ #define SEC_USER1_SMMU_MASK (~SEC_USER1_SVA_SET) #define SEC_CORE_INT_STATUS_M_ECC BIT(2) +#define SEC_PREFETCH_CFG 0x301130 +#define SEC_SVA_TRANS 0x301EC4 +#define SEC_PREFETCH_ENABLE (~(BIT(0) | BIT(1) | BIT(11))) +#define SEC_PREFETCH_DISABLE BIT(1) +#define SEC_SVA_DISABLE_READY (BIT(7) | BIT(11)) + #define SEC_DELAY_10_US 10 #define SEC_POLL_TIMEOUT_US 1000 #define SEC_DBGFS_VAL_MAX_LEN 20 @@ -91,6 +98,7 @@ #define SEC_SQE_MASK_OFFSET 64 #define SEC_SQE_MASK_LEN 48 +#define SEC_SHAPER_TYPE_RATE 128 struct sec_hw_error { u32 int_msk; @@ -331,6 +339,42 @@ static u8 sec_get_endian(struct hisi_qm *qm) return SEC_64BE; } +static void sec_open_sva_prefetch(struct hisi_qm *qm) +{ + u32 val; + int ret; + + if (qm->ver < QM_HW_V3) + return; + + /* Enable prefetch */ + val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); + val &= SEC_PREFETCH_ENABLE; + writel(val, qm->io_base + SEC_PREFETCH_CFG); + + ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG, + val, !(val & SEC_PREFETCH_DISABLE), + SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); + if (ret) + pci_err(qm->pdev, "failed to open sva prefetch\n"); +} + +static void sec_close_sva_prefetch(struct hisi_qm *qm) +{ + u32 val; + int ret; + + val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); + val |= SEC_PREFETCH_DISABLE; + writel(val, qm->io_base + SEC_PREFETCH_CFG); + + ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS, + val, !(val & SEC_SVA_DISABLE_READY), + SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); + if (ret) + pci_err(qm->pdev, "failed to close sva prefetch\n"); +} + static int sec_engine_init(struct hisi_qm *qm) { int ret; @@ -430,53 +474,60 @@ static void sec_debug_regs_clear(struct hisi_qm *qm) hisi_qm_debug_regs_clear(qm); } -static void sec_hw_error_enable(struct hisi_qm *qm) +static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable) { - u32 val; + u32 val1, val2; + + val1 = readl(qm->io_base + SEC_CONTROL_REG); + if (enable) { + val1 |= SEC_AXI_SHUTDOWN_ENABLE; + val2 = SEC_RAS_NFE_ENB_MSK; + } else { + val1 &= SEC_AXI_SHUTDOWN_DISABLE; + val2 = 0x0; + } + + if (qm->ver > QM_HW_V2) + writel(val2, qm->io_base + SEC_OOO_SHUTDOWN_SEL); + + writel(val1, qm->io_base + SEC_CONTROL_REG); +} +static void sec_hw_error_enable(struct hisi_qm *qm) +{ if (qm->ver == QM_HW_V1) { writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); pci_info(qm->pdev, "V1 not support hw error handle\n"); return; } - val = readl(qm->io_base + SEC_CONTROL_REG); - /* clear SEC hw error source if having */ writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE); - /* enable SEC hw error interrupts */ - writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK); - /* enable RAS int */ writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG); writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG); writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG); - /* enable SEC block master OOO when m-bit error occur */ - val = val | SEC_AXI_SHUTDOWN_ENABLE; + /* enable SEC block master OOO when nfe occurs on Kunpeng930 */ + sec_master_ooo_ctrl(qm, true); - writel(val, qm->io_base + SEC_CONTROL_REG); + /* enable SEC hw error interrupts */ + writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK); } static void sec_hw_error_disable(struct hisi_qm *qm) { - u32 val; + /* disable SEC hw error interrupts */ + writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); - val = readl(qm->io_base + SEC_CONTROL_REG); + /* disable SEC block master OOO when nfe occurs on Kunpeng930 */ + sec_master_ooo_ctrl(qm, false); /* disable RAS int */ writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG); writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG); writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG); - - /* disable SEC hw error interrupts */ - writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); - - /* disable SEC block master OOO when m-bit error occur */ - val = val & SEC_AXI_SHUTDOWN_DISABLE; - - writel(val, qm->io_base + SEC_CONTROL_REG); } static u32 sec_clear_enable_read(struct sec_debug_file *file) @@ -743,6 +794,8 @@ static const struct hisi_qm_err_ini sec_err_ini = { .clear_dev_hw_err_status = sec_clear_hw_err_status, .log_dev_hw_err = sec_log_hw_error, .open_axi_master_ooo = sec_open_axi_master_ooo, + .open_sva_prefetch = sec_open_sva_prefetch, + .close_sva_prefetch = sec_close_sva_prefetch, .err_info_init = sec_err_info_init, }; @@ -758,6 +811,7 @@ static int sec_pf_probe_init(struct sec_dev *sec) if (ret) return ret; + sec_open_sva_prefetch(qm); hisi_qm_dev_err_init(qm); sec_debug_regs_clear(qm); @@ -821,6 +875,7 @@ static void sec_qm_uninit(struct hisi_qm *qm) static int sec_probe_init(struct sec_dev *sec) { + u32 type_rate = SEC_SHAPER_TYPE_RATE; struct hisi_qm *qm = &sec->qm; int ret; @@ -828,6 +883,11 @@ static int sec_probe_init(struct sec_dev *sec) ret = sec_pf_probe_init(sec); if (ret) return ret; + /* enable shaper type 0 */ + if (qm->ver >= QM_HW_V3) { + type_rate |= QM_SHAPER_ENABLE; + qm->type_rate = type_rate; + } } return 0; |