diff options
Diffstat (limited to 'drivers/crypto/chelsio')
-rw-r--r-- | drivers/crypto/chelsio/chcr_algo.c | 303 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chcr_algo.h | 3 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chcr_core.h | 2 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chcr_crypto.h | 15 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chcr_ipsec.c | 35 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chtls/chtls.h | 11 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chtls/chtls_cm.c | 28 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chtls/chtls_hw.c | 6 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chtls/chtls_io.c | 158 | ||||
-rw-r--r-- | drivers/crypto/chelsio/chtls/chtls_main.c | 16 |
10 files changed, 302 insertions, 275 deletions
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index 59fe6631e73e..b916c4eb608c 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -203,13 +203,8 @@ static inline void chcr_handle_aead_resp(struct aead_request *req, int err) { struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm)); - chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op); - if (reqctx->b0_dma) - dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma, - reqctx->b0_len, DMA_BIDIRECTIONAL); + chcr_aead_common_exit(req); if (reqctx->verify == VERIFY_SW) { chcr_verify_tag(req, input, &err); reqctx->verify = VERIFY_HW; @@ -638,7 +633,6 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src, src = sg_next(src); srcskip = 0; } - if (sg_dma_len(dst) == dstskip) { dst = sg_next(dst); dstskip = 0; @@ -688,6 +682,7 @@ static int chcr_cipher_fallback(struct crypto_skcipher *cipher, int err; SKCIPHER_REQUEST_ON_STACK(subreq, cipher); + skcipher_request_set_tfm(subreq, cipher); skcipher_request_set_callback(subreq, flags, NULL, NULL); skcipher_request_set_crypt(subreq, src, dst, @@ -760,13 +755,13 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE, reqctx->dst_ofst); - dst_size = get_space_for_phys_dsgl(nents + 1); + dst_size = get_space_for_phys_dsgl(nents); kctx_len = roundup(ablkctx->enckey_len, 16); transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes, CHCR_SRC_SG_SIZE, reqctx->src_ofst); - temp = reqctx->imm ? roundup(IV + wrparam->req->nbytes, 16) : - (sgl_len(nents + MIN_CIPHER_SG) * 8); + temp = reqctx->imm ? roundup(wrparam->bytes, 16) : + (sgl_len(nents) * 8); transhdr_len += temp; transhdr_len = roundup(transhdr_len, 16); skb = alloc_skb(SGE_MAX_WR_LEN, flags); @@ -788,7 +783,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) ablkctx->ciph_mode, 0, 0, IV >> 1); chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0, - 0, 0, dst_size); + 0, 1, dst_size); chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr; if ((reqctx->op == CHCR_DECRYPT_OP) && @@ -818,8 +813,8 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid); atomic_inc(&adap->chcr_stats.cipher_rqst); - temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len - +(reqctx->imm ? (IV + wrparam->bytes) : 0); + temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV + + (reqctx->imm ? (wrparam->bytes) : 0); create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0, transhdr_len, temp, ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC); @@ -1022,7 +1017,7 @@ static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv, ret = crypto_cipher_setkey(cipher, key, keylen); if (ret) goto out; - /*H/W sends the encrypted IV in dsgl when AADIVDROP bit is 0*/ + crypto_cipher_encrypt_one(cipher, iv, iv); for (i = 0; i < round8; i++) gf128mul_x8_ble((le128 *)iv, (le128 *)iv); @@ -1113,16 +1108,8 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req, goto complete; } - if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], - c_ctx(tfm)->tx_qidx))) { - if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { - err = -EBUSY; - goto unmap; - } - - } if (!reqctx->imm) { - bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1, + bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0, CIP_SPACE_LEFT(ablkctx->enckey_len), reqctx->src_ofst, reqctx->dst_ofst); if ((bytes + reqctx->processed) >= req->nbytes) @@ -1133,11 +1120,7 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req, /*CTR mode counter overfloa*/ bytes = req->nbytes - reqctx->processed; } - dma_sync_single_for_cpu(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, - reqctx->iv_dma, IV, DMA_BIDIRECTIONAL); err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv); - dma_sync_single_for_device(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, - reqctx->iv_dma, IV, DMA_BIDIRECTIONAL); if (err) goto unmap; @@ -1212,7 +1195,6 @@ static int process_cipher(struct ablkcipher_request *req, dnents = sg_nents_xlen(req->dst, req->nbytes, CHCR_DST_SG_SIZE, 0); - dnents += 1; // IV phys_dsgl = get_space_for_phys_dsgl(dnents); kctx_len = roundup(ablkctx->enckey_len, 16); transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl); @@ -1225,8 +1207,7 @@ static int process_cipher(struct ablkcipher_request *req, } if (!reqctx->imm) { - bytes = chcr_sg_ent_in_wr(req->src, req->dst, - MIN_CIPHER_SG, + bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0, CIP_SPACE_LEFT(ablkctx->enckey_len), 0, 0); if ((bytes + reqctx->processed) >= req->nbytes) @@ -1293,13 +1274,14 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req) { struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct sk_buff *skb = NULL; - int err; + int err, isfull = 0; struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], c_ctx(tfm)->tx_qidx))) { + isfull = 1; if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) - return -EBUSY; + return -ENOSPC; } err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx], @@ -1309,7 +1291,7 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req) skb->dev = u_ctx->lldi.ports[0]; set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx); chcr_send_wr(skb); - return -EINPROGRESS; + return isfull ? -EBUSY : -EINPROGRESS; } static int chcr_aes_decrypt(struct ablkcipher_request *req) @@ -1317,12 +1299,13 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req) struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); struct sk_buff *skb = NULL; - int err; + int err, isfull = 0; if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], c_ctx(tfm)->tx_qidx))) { + isfull = 1; if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) - return -EBUSY; + return -ENOSPC; } err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx], @@ -1332,7 +1315,7 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req) skb->dev = u_ctx->lldi.ports[0]; set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx); chcr_send_wr(skb); - return -EINPROGRESS; + return isfull ? -EBUSY : -EINPROGRESS; } static int chcr_device_init(struct chcr_context *ctx) @@ -1574,14 +1557,15 @@ static int chcr_ahash_update(struct ahash_request *req) u8 remainder = 0, bs; unsigned int nbytes = req->nbytes; struct hash_wr_param params; - int error; + int error, isfull = 0; bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); u_ctx = ULD_CTX(h_ctx(rtfm)); if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], h_ctx(rtfm)->tx_qidx))) { + isfull = 1; if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) - return -EBUSY; + return -ENOSPC; } if (nbytes + req_ctx->reqlen >= bs) { @@ -1633,7 +1617,7 @@ static int chcr_ahash_update(struct ahash_request *req) set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); chcr_send_wr(skb); - return -EINPROGRESS; + return isfull ? -EBUSY : -EINPROGRESS; unmap: chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); return error; @@ -1710,15 +1694,16 @@ static int chcr_ahash_finup(struct ahash_request *req) struct sk_buff *skb; struct hash_wr_param params; u8 bs; - int error; + int error, isfull = 0; bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); u_ctx = ULD_CTX(h_ctx(rtfm)); if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], h_ctx(rtfm)->tx_qidx))) { + isfull = 1; if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) - return -EBUSY; + return -ENOSPC; } chcr_init_hctx_per_wr(req_ctx); error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); @@ -1777,7 +1762,7 @@ static int chcr_ahash_finup(struct ahash_request *req) set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); chcr_send_wr(skb); - return -EINPROGRESS; + return isfull ? -EBUSY : -EINPROGRESS; unmap: chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); return error; @@ -1791,7 +1776,7 @@ static int chcr_ahash_digest(struct ahash_request *req) struct sk_buff *skb; struct hash_wr_param params; u8 bs; - int error; + int error, isfull = 0; rtfm->init(req); bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); @@ -1799,8 +1784,9 @@ static int chcr_ahash_digest(struct ahash_request *req) u_ctx = ULD_CTX(h_ctx(rtfm)); if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], h_ctx(rtfm)->tx_qidx))) { + isfull = 1; if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) - return -EBUSY; + return -ENOSPC; } chcr_init_hctx_per_wr(req_ctx); @@ -1856,7 +1842,7 @@ static int chcr_ahash_digest(struct ahash_request *req) skb->dev = u_ctx->lldi.ports[0]; set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); chcr_send_wr(skb); - return -EINPROGRESS; + return isfull ? -EBUSY : -EINPROGRESS; unmap: chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); return error; @@ -1875,11 +1861,6 @@ static int chcr_ahash_continue(struct ahash_request *req) bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); u_ctx = ULD_CTX(h_ctx(rtfm)); - if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], - h_ctx(rtfm)->tx_qidx))) { - if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) - return -EBUSY; - } get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); params.kctx_len = roundup(params.alg_prm.result_size, 16); if (is_hmac(crypto_ahash_tfm(rtfm))) { @@ -2192,22 +2173,35 @@ static void chcr_hmac_cra_exit(struct crypto_tfm *tfm) } } -static int chcr_aead_common_init(struct aead_request *req, - unsigned short op_type) +inline void chcr_aead_common_exit(struct aead_request *req) +{ + struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm)); + + chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op); +} + +static int chcr_aead_common_init(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); - int error = -EINVAL; unsigned int authsize = crypto_aead_authsize(tfm); + int error = -EINVAL; /* validate key size */ if (aeadctx->enckey_len == 0) goto err; - if (op_type && req->cryptlen < authsize) + if (reqctx->op && req->cryptlen < authsize) goto err; + if (reqctx->b0_len) + reqctx->scratch_pad = reqctx->iv + IV; + else + reqctx->scratch_pad = NULL; + error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, - op_type); + reqctx->op); if (error) { error = -ENOMEM; goto err; @@ -2244,7 +2238,7 @@ static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type) aead_request_set_tfm(subreq, aeadctx->sw_cipher); aead_request_set_callback(subreq, req->base.flags, req->base.complete, req->base.data); - aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, + aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, req->iv); aead_request_set_ad(subreq, req->assoclen); return op_type ? crypto_aead_decrypt(subreq) : @@ -2253,8 +2247,7 @@ static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type) static struct sk_buff *create_authenc_wr(struct aead_request *req, unsigned short qid, - int size, - unsigned short op_type) + int size) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); @@ -2278,18 +2271,20 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, if (req->cryptlen == 0) return NULL; - reqctx->b0_dma = 0; + reqctx->b0_len = 0; + error = chcr_aead_common_init(req); + if (error) + return ERR_PTR(error); + if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL || - subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { + subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { null = 1; assoclen = 0; + reqctx->aad_nents = 0; } - error = chcr_aead_common_init(req, op_type); - if (error) - return ERR_PTR(error); dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); dnents += sg_nents_xlen(req->dst, req->cryptlen + - (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE, + (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, req->assoclen); dnents += MIN_AUTH_SG; // For IV @@ -2306,11 +2301,10 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, transhdr_len = roundup(transhdr_len, 16); if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE, - transhdr_len, op_type)) { + transhdr_len, reqctx->op)) { atomic_inc(&adap->chcr_stats.fallback); - chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, - op_type); - return ERR_PTR(chcr_aead_fallback(req, op_type)); + chcr_aead_common_exit(req); + return ERR_PTR(chcr_aead_fallback(req, reqctx->op)); } skb = alloc_skb(SGE_MAX_WR_LEN, flags); if (!skb) { @@ -2320,7 +2314,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, chcr_req = __skb_put_zero(skb, transhdr_len); - temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize; + temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize; /* * Input order is AAD,IV and Payload. where IV should be included as @@ -2344,8 +2338,8 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, temp = CHCR_SCMD_CIPHER_MODE_AES_CTR; else temp = CHCR_SCMD_CIPHER_MODE_AES_CBC; - chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, - (op_type == CHCR_ENCRYPT_OP) ? 1 : 0, + chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, + (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0, temp, actx->auth_mode, aeadctx->hmac_ctrl, IV >> 1); @@ -2353,7 +2347,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, 0, 0, dst_size); chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; - if (op_type == CHCR_ENCRYPT_OP || + if (reqctx->op == CHCR_ENCRYPT_OP || subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) memcpy(chcr_req->key_ctx.key, aeadctx->key, @@ -2376,20 +2370,18 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, } phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); - chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid); - chcr_add_aead_src_ent(req, ulptx, assoclen, op_type); + chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid); + chcr_add_aead_src_ent(req, ulptx, assoclen); atomic_inc(&adap->chcr_stats.cipher_rqst); temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0); create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size, transhdr_len, temp, 0); reqctx->skb = skb; - reqctx->op = op_type; return skb; err: - chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, - op_type); + chcr_aead_common_exit(req); return ERR_PTR(error); } @@ -2408,11 +2400,14 @@ int chcr_aead_dma_map(struct device *dev, -authsize : authsize); if (!req->cryptlen || !dst_size) return 0; - reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV, + reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len), DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, reqctx->iv_dma)) return -ENOMEM; - + if (reqctx->b0_len) + reqctx->b0_dma = reqctx->iv_dma + IV; + else + reqctx->b0_dma = 0; if (req->src == req->dst) { error = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL); @@ -2452,7 +2447,7 @@ void chcr_aead_dma_unmap(struct device *dev, if (!req->cryptlen || !dst_size) return; - dma_unmap_single(dev, reqctx->iv_dma, IV, + dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len), DMA_BIDIRECTIONAL); if (req->src == req->dst) { dma_unmap_sg(dev, req->src, sg_nents(req->src), @@ -2467,8 +2462,7 @@ void chcr_aead_dma_unmap(struct device *dev, void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx, - unsigned int assoclen, - unsigned short op_type) + unsigned int assoclen) { struct ulptx_walk ulp_walk; struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); @@ -2476,7 +2470,7 @@ void chcr_add_aead_src_ent(struct aead_request *req, if (reqctx->imm) { u8 *buf = (u8 *)ulptx; - if (reqctx->b0_dma) { + if (reqctx->b0_len) { memcpy(buf, reqctx->scratch_pad, reqctx->b0_len); buf += reqctx->b0_len; } @@ -2489,7 +2483,7 @@ void chcr_add_aead_src_ent(struct aead_request *req, buf, req->cryptlen, req->assoclen); } else { ulptx_walk_init(&ulp_walk, ulptx); - if (reqctx->b0_dma) + if (reqctx->b0_len) ulptx_walk_add_page(&ulp_walk, reqctx->b0_len, &reqctx->b0_dma); ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0); @@ -2503,7 +2497,6 @@ void chcr_add_aead_src_ent(struct aead_request *req, void chcr_add_aead_dst_ent(struct aead_request *req, struct cpl_rx_phys_dsgl *phys_cpl, unsigned int assoclen, - unsigned short op_type, unsigned short qid) { struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); @@ -2513,32 +2506,30 @@ void chcr_add_aead_dst_ent(struct aead_request *req, u32 temp; dsgl_walk_init(&dsgl_walk, phys_cpl); - if (reqctx->b0_dma) + if (reqctx->b0_len) dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma); dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0); dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma); - temp = req->cryptlen + (op_type ? -authsize : authsize); + temp = req->cryptlen + (reqctx->op ? -authsize : authsize); dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen); dsgl_walk_end(&dsgl_walk, qid); } void chcr_add_cipher_src_ent(struct ablkcipher_request *req, - struct ulptx_sgl *ulptx, + void *ulptx, struct cipher_wr_param *wrparam) { struct ulptx_walk ulp_walk; struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); + u8 *buf = ulptx; + memcpy(buf, reqctx->iv, IV); + buf += IV; if (reqctx->imm) { - u8 *buf = (u8 *)ulptx; - - memcpy(buf, reqctx->iv, IV); - buf += IV; sg_pcopy_to_buffer(req->src, sg_nents(req->src), buf, wrparam->bytes, reqctx->processed); } else { - ulptx_walk_init(&ulp_walk, ulptx); - ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma); + ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf); ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes, reqctx->src_ofst); reqctx->srcsg = ulp_walk.last_sg; @@ -2556,7 +2547,6 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req, struct dsgl_walk dsgl_walk; dsgl_walk_init(&dsgl_walk, phys_cpl); - dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma); dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes, reqctx->dst_ofst); reqctx->dstsg = dsgl_walk.last_sg; @@ -2630,12 +2620,6 @@ int chcr_cipher_dma_map(struct device *dev, struct ablkcipher_request *req) { int error; - struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); - - reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV, - DMA_BIDIRECTIONAL); - if (dma_mapping_error(dev, reqctx->iv_dma)) - return -ENOMEM; if (req->src == req->dst) { error = dma_map_sg(dev, req->src, sg_nents(req->src), @@ -2658,17 +2642,12 @@ int chcr_cipher_dma_map(struct device *dev, return 0; err: - dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL); return -ENOMEM; } void chcr_cipher_dma_unmap(struct device *dev, struct ablkcipher_request *req) { - struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); - - dma_unmap_single(dev, reqctx->iv_dma, IV, - DMA_BIDIRECTIONAL); if (req->src == req->dst) { dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL); @@ -2738,7 +2717,8 @@ static inline int crypto_ccm_check_iv(const u8 *iv) static int ccm_format_packet(struct aead_request *req, struct chcr_aead_ctx *aeadctx, unsigned int sub_type, - unsigned short op_type) + unsigned short op_type, + unsigned int assoclen) { struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); int rc = 0; @@ -2748,13 +2728,13 @@ static int ccm_format_packet(struct aead_request *req, memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3); memcpy(reqctx->iv + 4, req->iv, 8); memset(reqctx->iv + 12, 0, 4); - *((unsigned short *)(reqctx->scratch_pad + 16)) = - htons(req->assoclen - 8); } else { memcpy(reqctx->iv, req->iv, 16); - *((unsigned short *)(reqctx->scratch_pad + 16)) = - htons(req->assoclen); } + if (assoclen) + *((unsigned short *)(reqctx->scratch_pad + 16)) = + htons(assoclen); + generate_b0(req, aeadctx, op_type); /* zero the ctr value */ memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1); @@ -2836,8 +2816,7 @@ static int aead_ccm_validate_input(unsigned short op_type, static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, unsigned short qid, - int size, - unsigned short op_type) + int size) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); @@ -2855,22 +2834,20 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, GFP_ATOMIC; struct adapter *adap = padap(a_ctx(tfm)->dev); - reqctx->b0_dma = 0; sub_type = get_aead_subtype(tfm); if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) assoclen -= 8; - error = chcr_aead_common_init(req, op_type); + reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0); + error = chcr_aead_common_init(req); if (error) return ERR_PTR(error); - - reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0); - error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type); + error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type); if (error) goto err; dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); dnents += sg_nents_xlen(req->dst, req->cryptlen - + (op_type ? -authsize : authsize), + + (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, req->assoclen); dnents += MIN_CCM_SG; // For IV and B0 dst_size = get_space_for_phys_dsgl(dnents); @@ -2886,11 +2863,10 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, transhdr_len = roundup(transhdr_len, 16); if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE - - reqctx->b0_len, transhdr_len, op_type)) { + reqctx->b0_len, transhdr_len, reqctx->op)) { atomic_inc(&adap->chcr_stats.fallback); - chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, - op_type); - return ERR_PTR(chcr_aead_fallback(req, op_type)); + chcr_aead_common_exit(req); + return ERR_PTR(chcr_aead_fallback(req, reqctx->op)); } skb = alloc_skb(SGE_MAX_WR_LEN, flags); @@ -2901,7 +2877,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len); - fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type); + fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op); chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); @@ -2910,21 +2886,11 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); - error = ccm_format_packet(req, aeadctx, sub_type, op_type); + error = ccm_format_packet(req, aeadctx, sub_type, reqctx->op, assoclen); if (error) goto dstmap_fail; - - reqctx->b0_dma = dma_map_single(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, - &reqctx->scratch_pad, reqctx->b0_len, - DMA_BIDIRECTIONAL); - if (dma_mapping_error(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, - reqctx->b0_dma)) { - error = -ENOMEM; - goto dstmap_fail; - } - - chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid); - chcr_add_aead_src_ent(req, ulptx, assoclen, op_type); + chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid); + chcr_add_aead_src_ent(req, ulptx, assoclen); atomic_inc(&adap->chcr_stats.aead_rqst); temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + @@ -2933,20 +2899,18 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0, transhdr_len, temp, 0); reqctx->skb = skb; - reqctx->op = op_type; return skb; dstmap_fail: kfree_skb(skb); err: - chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type); + chcr_aead_common_exit(req); return ERR_PTR(error); } static struct sk_buff *create_gcm_wr(struct aead_request *req, unsigned short qid, - int size, - unsigned short op_type) + int size) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); @@ -2966,13 +2930,13 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) assoclen = req->assoclen - 8; - reqctx->b0_dma = 0; - error = chcr_aead_common_init(req, op_type); + reqctx->b0_len = 0; + error = chcr_aead_common_init(req); if (error) return ERR_PTR(error); dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); dnents += sg_nents_xlen(req->dst, req->cryptlen + - (op_type ? -authsize : authsize), + (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, req->assoclen); dnents += MIN_GCM_SG; // For IV dst_size = get_space_for_phys_dsgl(dnents); @@ -2986,11 +2950,11 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, transhdr_len += temp; transhdr_len = roundup(transhdr_len, 16); if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE, - transhdr_len, op_type)) { + transhdr_len, reqctx->op)) { + atomic_inc(&adap->chcr_stats.fallback); - chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, - op_type); - return ERR_PTR(chcr_aead_fallback(req, op_type)); + chcr_aead_common_exit(req); + return ERR_PTR(chcr_aead_fallback(req, reqctx->op)); } skb = alloc_skb(SGE_MAX_WR_LEN, flags); if (!skb) { @@ -3001,7 +2965,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, chcr_req = __skb_put_zero(skb, transhdr_len); //Offset of tag from end - temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize; + temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize; chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR( a_ctx(tfm)->dev->rx_channel_id, 2, (assoclen + 1)); @@ -3014,7 +2978,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1, temp, temp); chcr_req->sec_cpl.seqno_numivs = - FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type == + FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0, CHCR_SCMD_CIPHER_MODE_AES_GCM, CHCR_SCMD_AUTH_MODE_GHASH, @@ -3040,19 +3004,18 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); - chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid); - chcr_add_aead_src_ent(req, ulptx, assoclen, op_type); + chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid); + chcr_add_aead_src_ent(req, ulptx, assoclen); atomic_inc(&adap->chcr_stats.aead_rqst); temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0); create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size, transhdr_len, temp, reqctx->verify); reqctx->skb = skb; - reqctx->op = op_type; return skb; err: - chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type); + chcr_aead_common_exit(req); return ERR_PTR(error); } @@ -3461,6 +3424,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, } { SHASH_DESC_ON_STACK(shash, base_hash); + shash->tfm = base_hash; shash->flags = crypto_shash_get_flags(base_hash); bs = crypto_shash_blocksize(base_hash); @@ -3585,13 +3549,13 @@ out: } static int chcr_aead_op(struct aead_request *req, - unsigned short op_type, int size, create_wr_t create_wr_fn) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct uld_ctx *u_ctx; struct sk_buff *skb; + int isfull = 0; if (!a_ctx(tfm)->dev) { pr_err("chcr : %s : No crypto device.\n", __func__); @@ -3600,13 +3564,13 @@ static int chcr_aead_op(struct aead_request *req, u_ctx = ULD_CTX(a_ctx(tfm)); if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], a_ctx(tfm)->tx_qidx)) { + isfull = 1; if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) - return -EBUSY; + return -ENOSPC; } /* Form a WR from req */ - skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size, - op_type); + skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size); if (IS_ERR(skb) || !skb) return PTR_ERR(skb); @@ -3614,7 +3578,7 @@ static int chcr_aead_op(struct aead_request *req, skb->dev = u_ctx->lldi.ports[0]; set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx); chcr_send_wr(skb); - return -EINPROGRESS; + return isfull ? -EBUSY : -EINPROGRESS; } static int chcr_aead_encrypt(struct aead_request *req) @@ -3623,21 +3587,19 @@ static int chcr_aead_encrypt(struct aead_request *req) struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); reqctx->verify = VERIFY_HW; + reqctx->op = CHCR_ENCRYPT_OP; switch (get_aead_subtype(tfm)) { case CRYPTO_ALG_SUB_TYPE_CTR_SHA: case CRYPTO_ALG_SUB_TYPE_CBC_SHA: case CRYPTO_ALG_SUB_TYPE_CBC_NULL: case CRYPTO_ALG_SUB_TYPE_CTR_NULL: - return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, - create_authenc_wr); + return chcr_aead_op(req, 0, create_authenc_wr); case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309: - return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, - create_aead_ccm_wr); + return chcr_aead_op(req, 0, create_aead_ccm_wr); default: - return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, - create_gcm_wr); + return chcr_aead_op(req, 0, create_gcm_wr); } } @@ -3655,21 +3617,18 @@ static int chcr_aead_decrypt(struct aead_request *req) size = 0; reqctx->verify = VERIFY_HW; } - + reqctx->op = CHCR_DECRYPT_OP; switch (get_aead_subtype(tfm)) { case CRYPTO_ALG_SUB_TYPE_CBC_SHA: case CRYPTO_ALG_SUB_TYPE_CTR_SHA: case CRYPTO_ALG_SUB_TYPE_CBC_NULL: case CRYPTO_ALG_SUB_TYPE_CTR_NULL: - return chcr_aead_op(req, CHCR_DECRYPT_OP, size, - create_authenc_wr); + return chcr_aead_op(req, size, create_authenc_wr); case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309: - return chcr_aead_op(req, CHCR_DECRYPT_OP, size, - create_aead_ccm_wr); + return chcr_aead_op(req, size, create_aead_ccm_wr); default: - return chcr_aead_op(req, CHCR_DECRYPT_OP, size, - create_gcm_wr); + return chcr_aead_op(req, size, create_gcm_wr); } } diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h index dba3dff1e209..1871500309e2 100644 --- a/drivers/crypto/chelsio/chcr_algo.h +++ b/drivers/crypto/chelsio/chcr_algo.h @@ -146,7 +146,7 @@ kctx_len) #define CIPHER_TRANSHDR_SIZE(kctx_len, sge_pairs) \ (TRANSHDR_SIZE((kctx_len)) + (sge_pairs) +\ - sizeof(struct cpl_rx_phys_dsgl)) + sizeof(struct cpl_rx_phys_dsgl) + AES_BLOCK_SIZE) #define HASH_TRANSHDR_SIZE(kctx_len)\ (TRANSHDR_SIZE(kctx_len) + DUMMY_BYTES) @@ -259,7 +259,6 @@ ULP_TX_SC_MORE_V((immdatalen))) #define MAX_NK 8 #define MAX_DSGL_ENT 32 -#define MIN_CIPHER_SG 1 /* IV */ #define MIN_AUTH_SG 1 /* IV */ #define MIN_GCM_SG 1 /* IV */ #define MIN_DIGEST_SG 1 /*Partial Buffer*/ diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h index 1a20424e18c6..de3a9c085daf 100644 --- a/drivers/crypto/chelsio/chcr_core.h +++ b/drivers/crypto/chelsio/chcr_core.h @@ -56,7 +56,7 @@ #define MAX_SALT 4 #define CIP_WR_MIN_LEN (sizeof(struct chcr_wr) + \ sizeof(struct cpl_rx_phys_dsgl) + \ - sizeof(struct ulptx_sgl)) + sizeof(struct ulptx_sgl) + 16) //IV #define HASH_WR_MIN_LEN (sizeof(struct chcr_wr) + \ DUMMY_BYTES + \ diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h index c8e8972af283..54835cb109e5 100644 --- a/drivers/crypto/chelsio/chcr_crypto.h +++ b/drivers/crypto/chelsio/chcr_crypto.h @@ -190,8 +190,8 @@ struct chcr_aead_reqctx { short int dst_nents; u16 imm; u16 verify; - u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; - unsigned char scratch_pad[MAX_SCRATCH_PAD_SIZE]; + u8 iv[CHCR_MAX_CRYPTO_IV_LEN + MAX_SCRATCH_PAD_SIZE]; + u8 *scratch_pad; }; struct ulptx_walk { @@ -295,7 +295,6 @@ struct chcr_blkcipher_req_ctx { unsigned int src_ofst; unsigned int dst_ofst; unsigned int op; - dma_addr_t iv_dma; u16 imm; u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; }; @@ -312,8 +311,7 @@ struct chcr_alg_template { typedef struct sk_buff *(*create_wr_t)(struct aead_request *req, unsigned short qid, - int size, - unsigned short op_type); + int size); void chcr_verify_tag(struct aead_request *req, u8 *input, int *err); int chcr_aead_dma_map(struct device *dev, struct aead_request *req, @@ -322,12 +320,12 @@ void chcr_aead_dma_unmap(struct device *dev, struct aead_request *req, unsigned short op_type); void chcr_add_aead_dst_ent(struct aead_request *req, struct cpl_rx_phys_dsgl *phys_cpl, - unsigned int assoclen, unsigned short op_type, + unsigned int assoclen, unsigned short qid); void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx, - unsigned int assoclen, unsigned short op_type); + unsigned int assoclen); void chcr_add_cipher_src_ent(struct ablkcipher_request *req, - struct ulptx_sgl *ulptx, + void *ulptx, struct cipher_wr_param *wrparam); int chcr_cipher_dma_map(struct device *dev, struct ablkcipher_request *req); void chcr_cipher_dma_unmap(struct device *dev, struct ablkcipher_request *req); @@ -340,4 +338,5 @@ void chcr_add_hash_src_ent(struct ahash_request *req, struct ulptx_sgl *ulptx, struct hash_wr_param *param); int chcr_hash_dma_map(struct device *dev, struct ahash_request *req); void chcr_hash_dma_unmap(struct device *dev, struct ahash_request *req); +void chcr_aead_common_exit(struct aead_request *req); #endif /* __CHCR_CRYPTO_H__ */ diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c index 8e0aa3f175c9..461b97e2f1fd 100644 --- a/drivers/crypto/chelsio/chcr_ipsec.c +++ b/drivers/crypto/chelsio/chcr_ipsec.c @@ -346,18 +346,23 @@ inline void *copy_cpltx_pktxt(struct sk_buff *skb, struct net_device *dev, void *pos) { + struct cpl_tx_pkt_core *cpl; + struct sge_eth_txq *q; struct adapter *adap; struct port_info *pi; - struct sge_eth_txq *q; - struct cpl_tx_pkt_core *cpl; - u64 cntrl = 0; u32 ctrl0, qidx; + u64 cntrl = 0; + int left; pi = netdev_priv(dev); adap = pi->adapter; qidx = skb->queue_mapping; q = &adap->sge.ethtxq[qidx + pi->first_qset]; + left = (void *)q->q.stat - pos; + if (!left) + pos = q->q.desc; + cpl = (struct cpl_tx_pkt_core *)pos; cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; @@ -382,18 +387,17 @@ inline void *copy_key_cpltx_pktxt(struct sk_buff *skb, void *pos, struct ipsec_sa_entry *sa_entry) { - struct adapter *adap; - struct port_info *pi; - struct sge_eth_txq *q; - unsigned int len, qidx; struct _key_ctx *key_ctx; int left, eoq, key_len; + struct sge_eth_txq *q; + struct adapter *adap; + struct port_info *pi; + unsigned int qidx; pi = netdev_priv(dev); adap = pi->adapter; qidx = skb->queue_mapping; q = &adap->sge.ethtxq[qidx + pi->first_qset]; - len = sa_entry->enckey_len + sizeof(struct cpl_tx_pkt_core); key_len = sa_entry->kctx_len; /* end of queue, reset pos to start of queue */ @@ -411,19 +415,14 @@ inline void *copy_key_cpltx_pktxt(struct sk_buff *skb, pos += sizeof(struct _key_ctx); left -= sizeof(struct _key_ctx); - if (likely(len <= left)) { + if (likely(key_len <= left)) { memcpy(key_ctx->key, sa_entry->key, key_len); pos += key_len; } else { - if (key_len <= left) { - memcpy(pos, sa_entry->key, key_len); - pos += key_len; - } else { - memcpy(pos, sa_entry->key, left); - memcpy(q->q.desc, sa_entry->key + left, - key_len - left); - pos = (u8 *)q->q.desc + (key_len - left); - } + memcpy(pos, sa_entry->key, left); + memcpy(q->q.desc, sa_entry->key + left, + key_len - left); + pos = (u8 *)q->q.desc + (key_len - left); } /* Copy CPL TX PKT XT */ pos = copy_cpltx_pktxt(skb, dev, pos); diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h index f4b8f1ec0061..a53a0e6ba024 100644 --- a/drivers/crypto/chelsio/chtls/chtls.h +++ b/drivers/crypto/chelsio/chtls/chtls.h @@ -67,11 +67,6 @@ enum { CPL_RET_UNKNOWN_TID = 4 /* unexpected unknown TID */ }; -#define TLS_RCV_ST_READ_HEADER 0xF0 -#define TLS_RCV_ST_READ_BODY 0xF1 -#define TLS_RCV_ST_READ_DONE 0xF2 -#define TLS_RCV_ST_READ_NB 0xF3 - #define LISTEN_INFO_HASH_SIZE 32 #define RSPQ_HASH_BITS 5 struct listen_info { @@ -149,6 +144,7 @@ struct chtls_dev { struct list_head rcu_node; struct list_head na_node; unsigned int send_page_order; + int max_host_sndbuf; struct key_map kmap; }; @@ -278,6 +274,7 @@ struct tlsrx_cmp_hdr { #define TLSRX_HDR_PKT_MAC_ERROR_F TLSRX_HDR_PKT_MAC_ERROR_V(1U) #define TLSRX_HDR_PKT_ERROR_M 0x1F +#define CONTENT_TYPE_ERROR 0x7F struct ulp_mem_rw { __be32 cmd; @@ -347,8 +344,8 @@ enum { ULPCB_FLAG_HOLD = 1 << 3, /* skb not ready for Tx yet */ ULPCB_FLAG_COMPL = 1 << 4, /* request WR completion */ ULPCB_FLAG_URG = 1 << 5, /* urgent data */ - ULPCB_FLAG_TLS_ND = 1 << 6, /* payload of zero length */ - ULPCB_FLAG_NO_HDR = 1 << 7, /* not a ofld wr */ + ULPCB_FLAG_TLS_HDR = 1 << 6, /* payload with tls hdr */ + ULPCB_FLAG_NO_HDR = 1 << 7, /* not a ofld wr */ }; /* The ULP mode/submode of an skbuff */ diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c index 82a473a0cefa..2bb6f0380758 100644 --- a/drivers/crypto/chelsio/chtls/chtls_cm.c +++ b/drivers/crypto/chelsio/chtls/chtls_cm.c @@ -1537,6 +1537,10 @@ static int chtls_rx_data(struct chtls_dev *cdev, struct sk_buff *skb) struct sock *sk; sk = lookup_tid(cdev->tids, hwtid); + if (unlikely(!sk)) { + pr_err("can't find conn. for hwtid %u.\n", hwtid); + return -EINVAL; + } skb_dst_set(skb, NULL); process_cpl_msg(chtls_recv_data, sk, skb); return 0; @@ -1585,6 +1589,10 @@ static int chtls_rx_pdu(struct chtls_dev *cdev, struct sk_buff *skb) struct sock *sk; sk = lookup_tid(cdev->tids, hwtid); + if (unlikely(!sk)) { + pr_err("can't find conn. for hwtid %u.\n", hwtid); + return -EINVAL; + } skb_dst_set(skb, NULL); process_cpl_msg(chtls_recv_pdu, sk, skb); return 0; @@ -1600,12 +1608,14 @@ static void chtls_set_hdrlen(struct sk_buff *skb, unsigned int nlen) static void chtls_rx_hdr(struct sock *sk, struct sk_buff *skb) { - struct cpl_rx_tls_cmp *cmp_cpl = cplhdr(skb); + struct tlsrx_cmp_hdr *tls_hdr_pkt; + struct cpl_rx_tls_cmp *cmp_cpl; struct sk_buff *skb_rec; struct chtls_sock *csk; struct chtls_hws *tlsk; struct tcp_sock *tp; + cmp_cpl = cplhdr(skb); csk = rcu_dereference_sk_user_data(sk); tlsk = &csk->tlshws; tp = tcp_sk(sk); @@ -1615,16 +1625,18 @@ static void chtls_rx_hdr(struct sock *sk, struct sk_buff *skb) skb_reset_transport_header(skb); __skb_pull(skb, sizeof(*cmp_cpl)); + tls_hdr_pkt = (struct tlsrx_cmp_hdr *)skb->data; + if (tls_hdr_pkt->res_to_mac_error & TLSRX_HDR_PKT_ERROR_M) + tls_hdr_pkt->type = CONTENT_TYPE_ERROR; if (!skb->data_len) - __skb_trim(skb, CPL_RX_TLS_CMP_LENGTH_G - (ntohl(cmp_cpl->pdulength_length))); + __skb_trim(skb, TLS_HEADER_LENGTH); tp->rcv_nxt += CPL_RX_TLS_CMP_PDULENGTH_G(ntohl(cmp_cpl->pdulength_length)); + ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_TLS_HDR; skb_rec = __skb_dequeue(&tlsk->sk_recv_queue); if (!skb_rec) { - ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_TLS_ND; __skb_queue_tail(&sk->sk_receive_queue, skb); } else { chtls_set_hdrlen(skb, tlsk->pldlen); @@ -1646,6 +1658,10 @@ static int chtls_rx_cmp(struct chtls_dev *cdev, struct sk_buff *skb) struct sock *sk; sk = lookup_tid(cdev->tids, hwtid); + if (unlikely(!sk)) { + pr_err("can't find conn. for hwtid %u.\n", hwtid); + return -EINVAL; + } skb_dst_set(skb, NULL); process_cpl_msg(chtls_rx_hdr, sk, skb); @@ -2105,6 +2121,10 @@ static int chtls_wr_ack(struct chtls_dev *cdev, struct sk_buff *skb) struct sock *sk; sk = lookup_tid(cdev->tids, hwtid); + if (unlikely(!sk)) { + pr_err("can't find conn. for hwtid %u.\n", hwtid); + return -EINVAL; + } process_cpl_msg(chtls_rx_ack, sk, skb); return 0; diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/crypto/chelsio/chtls/chtls_hw.c index 54a13aa99121..55d50140f9e5 100644 --- a/drivers/crypto/chelsio/chtls/chtls_hw.c +++ b/drivers/crypto/chelsio/chtls/chtls_hw.c @@ -213,7 +213,7 @@ static int chtls_key_info(struct chtls_sock *csk, struct _key_ctx *kctx, u32 keylen, u32 optname) { - unsigned char key[CHCR_KEYCTX_CIPHER_KEY_SIZE_256]; + unsigned char key[AES_KEYSIZE_128]; struct tls12_crypto_info_aes_gcm_128 *gcm_ctx; unsigned char ghash_h[AEAD_H_SIZE]; struct crypto_cipher *cipher; @@ -228,10 +228,6 @@ static int chtls_key_info(struct chtls_sock *csk, if (keylen == AES_KEYSIZE_128) { ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; - } else if (keylen == AES_KEYSIZE_192) { - ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; - } else if (keylen == AES_KEYSIZE_256) { - ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; } else { pr_err("GCM: Invalid key length %d\n", keylen); return -EINVAL; diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c index 5a75be43950f..51fc6821cbbf 100644 --- a/drivers/crypto/chelsio/chtls/chtls_io.c +++ b/drivers/crypto/chelsio/chtls/chtls_io.c @@ -907,11 +907,83 @@ static int chtls_skb_copy_to_page_nocache(struct sock *sk, } /* Read TLS header to find content type and data length */ -static u16 tls_header_read(struct tls_hdr *thdr, struct iov_iter *from) +static int tls_header_read(struct tls_hdr *thdr, struct iov_iter *from) { if (copy_from_iter(thdr, sizeof(*thdr), from) != sizeof(*thdr)) return -EFAULT; - return (__force u16)cpu_to_be16(thdr->length); + return (__force int)cpu_to_be16(thdr->length); +} + +static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk) +{ + return (cdev->max_host_sndbuf - sk->sk_wmem_queued); +} + +static int csk_wait_memory(struct chtls_dev *cdev, + struct sock *sk, long *timeo_p) +{ + DEFINE_WAIT_FUNC(wait, woken_wake_function); + int sndbuf, err = 0; + long current_timeo; + long vm_wait = 0; + bool noblock; + + current_timeo = *timeo_p; + noblock = (*timeo_p ? false : true); + sndbuf = cdev->max_host_sndbuf; + if (csk_mem_free(cdev, sk)) { + current_timeo = (prandom_u32() % (HZ / 5)) + 2; + vm_wait = (prandom_u32() % (HZ / 5)) + 2; + } + + add_wait_queue(sk_sleep(sk), &wait); + while (1) { + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); + + if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) + goto do_error; + if (!*timeo_p) { + if (noblock) + set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); + goto do_nonblock; + } + if (signal_pending(current)) + goto do_interrupted; + sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); + if (csk_mem_free(cdev, sk) && !vm_wait) + break; + + set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); + sk->sk_write_pending++; + sk_wait_event(sk, ¤t_timeo, sk->sk_err || + (sk->sk_shutdown & SEND_SHUTDOWN) || + (csk_mem_free(cdev, sk) && !vm_wait), &wait); + sk->sk_write_pending--; + + if (vm_wait) { + vm_wait -= current_timeo; + current_timeo = *timeo_p; + if (current_timeo != MAX_SCHEDULE_TIMEOUT) { + current_timeo -= vm_wait; + if (current_timeo < 0) + current_timeo = 0; + } + vm_wait = 0; + } + *timeo_p = current_timeo; + } +do_rm_wq: + remove_wait_queue(sk_sleep(sk), &wait); + return err; +do_error: + err = -EPIPE; + goto do_rm_wq; +do_nonblock: + err = -EAGAIN; + goto do_rm_wq; +do_interrupted: + err = sock_intr_errno(*timeo_p); + goto do_rm_wq; } int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) @@ -952,6 +1024,8 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) copy = mss - skb->len; skb->ip_summed = CHECKSUM_UNNECESSARY; } + if (!csk_mem_free(cdev, sk)) + goto wait_for_sndbuf; if (is_tls_tx(csk) && !csk->tlshws.txleft) { struct tls_hdr hdr; @@ -1009,9 +1083,10 @@ new_buf: int off = TCP_OFF(sk); bool merge; - if (page) - pg_size <<= compound_order(page); + if (!page) + goto wait_for_memory; + pg_size <<= compound_order(page); if (off < pg_size && skb_can_coalesce(skb, i, page, off)) { merge = 1; @@ -1099,8 +1174,10 @@ copy: if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) push_frames_if_head(sk); continue; +wait_for_sndbuf: + set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); wait_for_memory: - err = sk_stream_wait_memory(sk, &timeo); + err = csk_wait_memory(cdev, sk, &timeo); if (err) goto do_error; } @@ -1131,6 +1208,7 @@ int chtls_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags) { struct chtls_sock *csk; + struct chtls_dev *cdev; int mss, err, copied; struct tcp_sock *tp; long timeo; @@ -1138,6 +1216,7 @@ int chtls_sendpage(struct sock *sk, struct page *page, tp = tcp_sk(sk); copied = 0; csk = rcu_dereference_sk_user_data(sk); + cdev = csk->cdev; timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); err = sk_stream_wait_connect(sk, &timeo); @@ -1152,10 +1231,11 @@ int chtls_sendpage(struct sock *sk, struct page *page, struct sk_buff *skb = skb_peek_tail(&csk->txq); int copy, i; - copy = mss - skb->len; if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) || - copy <= 0) { + (copy = mss - skb->len) <= 0) { new_buf: + if (!csk_mem_free(cdev, sk)) + goto wait_for_sndbuf; if (is_tls_tx(csk)) { skb = get_record_skb(sk, @@ -1167,7 +1247,7 @@ new_buf: skb = get_tx_skb(sk, 0); } if (!skb) - goto do_error; + goto wait_for_memory; copy = mss; } if (copy > size) @@ -1206,8 +1286,12 @@ new_buf: if (unlikely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND)) push_frames_if_head(sk); continue; - +wait_for_sndbuf: set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); +wait_for_memory: + err = csk_wait_memory(cdev, sk, &timeo); + if (err) + goto do_error; } out: csk_reset_flag(csk, CSK_TX_MORE_DATA); @@ -1409,7 +1493,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, break; chtls_cleanup_rbuf(sk, copied); sk_wait_data(sk, &timeo, NULL); - continue; + continue; found_ok_skb: if (!skb->len) { skb_dst_set(skb, NULL); @@ -1449,31 +1533,13 @@ found_ok_skb: } } } - if (hws->rstate == TLS_RCV_ST_READ_BODY) { - if (skb_copy_datagram_msg(skb, offset, - msg, avail)) { - if (!copied) { - copied = -EFAULT; - break; - } - } - } else { - struct tlsrx_cmp_hdr *tls_hdr_pkt = - (struct tlsrx_cmp_hdr *)skb->data; - - if ((tls_hdr_pkt->res_to_mac_error & - TLSRX_HDR_PKT_ERROR_M)) - tls_hdr_pkt->type = 0x7F; - - /* CMP pld len is for recv seq */ - hws->rcvpld = skb->hdr_len; - if (skb_copy_datagram_msg(skb, offset, msg, avail)) { - if (!copied) { - copied = -EFAULT; - break; - } + if (skb_copy_datagram_msg(skb, offset, msg, avail)) { + if (!copied) { + copied = -EFAULT; + break; } } + copied += avail; len -= avail; hws->copied_seq += avail; @@ -1481,32 +1547,20 @@ skip_copy: if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) tp->urg_data = 0; - if (hws->rstate == TLS_RCV_ST_READ_BODY && - (avail + offset) >= skb->len) { + if ((avail + offset) >= skb->len) { if (likely(skb)) chtls_free_skb(sk, skb); buffers_freed++; - hws->rstate = TLS_RCV_ST_READ_HEADER; - atomic_inc(&adap->chcr_stats.tls_pdu_rx); - tp->copied_seq += hws->rcvpld; + if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) { + tp->copied_seq += skb->len; + hws->rcvpld = skb->hdr_len; + } else { + tp->copied_seq += hws->rcvpld; + } hws->copied_seq = 0; if (copied >= target && !skb_peek(&sk->sk_receive_queue)) break; - } else { - if (likely(skb)) { - if (ULP_SKB_CB(skb)->flags & - ULPCB_FLAG_TLS_ND) - hws->rstate = - TLS_RCV_ST_READ_HEADER; - else - hws->rstate = - TLS_RCV_ST_READ_BODY; - chtls_free_skb(sk, skb); - } - buffers_freed++; - tp->copied_seq += avail; - hws->copied_seq = 0; } } while (len > 0); diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c index 007c45c38fc7..9b07f9165658 100644 --- a/drivers/crypto/chelsio/chtls/chtls_main.c +++ b/drivers/crypto/chelsio/chtls/chtls_main.c @@ -216,7 +216,6 @@ static void *chtls_uld_add(const struct cxgb4_lld_info *info) cdev->lldi = lldi; cdev->pdev = lldi->pdev; cdev->tids = lldi->tids; - cdev->ports = (struct net_device **)(cdev + 1); cdev->ports = lldi->ports; cdev->mtus = lldi->mtus; cdev->tids = lldi->tids; @@ -239,6 +238,7 @@ static void *chtls_uld_add(const struct cxgb4_lld_info *info) spin_lock_init(&cdev->idr_lock); cdev->send_page_order = min_t(uint, get_order(32768), send_page_order); + cdev->max_host_sndbuf = 48 * 1024; if (lldi->vr->key.size) if (chtls_init_kmap(cdev, lldi)) @@ -250,7 +250,7 @@ static void *chtls_uld_add(const struct cxgb4_lld_info *info) return cdev; out_rspq_skb: - for (j = 0; j <= i; j++) + for (j = 0; j < i; j++) kfree_skb(cdev->rspq_skb_cache[j]); kfree_skb(cdev->askb); out_skb: @@ -441,7 +441,7 @@ nomem: static int do_chtls_getsockopt(struct sock *sk, char __user *optval, int __user *optlen) { - struct tls_crypto_info crypto_info; + struct tls_crypto_info crypto_info = { 0 }; crypto_info.version = TLS_1_2_VERSION; if (copy_to_user(optval, &crypto_info, sizeof(struct tls_crypto_info))) @@ -491,9 +491,13 @@ static int do_chtls_setsockopt(struct sock *sk, int optname, switch (tmp_crypto_info.cipher_type) { case TLS_CIPHER_AES_GCM_128: { - rc = copy_from_user(crypto_info, optval, - sizeof(struct - tls12_crypto_info_aes_gcm_128)); + /* Obtain version and type from previous copy */ + crypto_info[0] = tmp_crypto_info; + /* Now copy the following data */ + rc = copy_from_user((char *)crypto_info + sizeof(*crypto_info), + optval + sizeof(*crypto_info), + sizeof(struct tls12_crypto_info_aes_gcm_128) + - sizeof(*crypto_info)); if (rc) { rc = -EFAULT; |