summaryrefslogtreecommitdiff
path: root/drivers/crypto/chelsio/chcr_algo.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/chelsio/chcr_algo.c')
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c2017
1 files changed, 1742 insertions, 275 deletions
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index e4ddb921d7b3..2ed1e24b44a8 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -54,6 +54,12 @@
#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/sha.h>
+#include <crypto/authenc.h>
+#include <crypto/internal/aead.h>
+#include <crypto/null.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aead.h>
+#include <crypto/scatterwalk.h>
#include <crypto/internal/hash.h>
#include "t4fw_api.h"
@@ -62,6 +68,11 @@
#include "chcr_algo.h"
#include "chcr_crypto.h"
+static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
+{
+ return ctx->crypto_ctx->aeadctx;
+}
+
static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
{
return ctx->crypto_ctx->ablkctx;
@@ -72,6 +83,16 @@ static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
return ctx->crypto_ctx->hmacctx;
}
+static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
+{
+ return gctx->ctx->gcm;
+}
+
+static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
+{
+ return gctx->ctx->authenc;
+}
+
static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
{
return ctx->dev->u_ctx;
@@ -94,12 +115,37 @@ static inline unsigned int sgl_len(unsigned int n)
return (3 * n) / 2 + (n & 1) + 2;
}
+static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
+{
+ u8 temp[SHA512_DIGEST_SIZE];
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ int authsize = crypto_aead_authsize(tfm);
+ struct cpl_fw6_pld *fw6_pld;
+ int cmp = 0;
+
+ fw6_pld = (struct cpl_fw6_pld *)input;
+ if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
+ (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
+ cmp = memcmp(&fw6_pld->data[2], (fw6_pld + 1), authsize);
+ } else {
+
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
+ authsize, req->assoclen +
+ req->cryptlen - authsize);
+ cmp = memcmp(temp, (fw6_pld + 1), authsize);
+ }
+ if (cmp)
+ *err = -EBADMSG;
+ else
+ *err = 0;
+}
+
/*
* chcr_handle_resp - Unmap the DMA buffers associated with the request
* @req: crypto request
*/
int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
- int error_status)
+ int err)
{
struct crypto_tfm *tfm = req->tfm;
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
@@ -109,17 +155,33 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
unsigned int digestsize, updated_digestsize;
switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_AEAD:
+ ctx_req.req.aead_req = (struct aead_request *)req;
+ ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
+ dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.aead_req->dst,
+ ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
+ if (ctx_req.ctx.reqctx->skb) {
+ kfree_skb(ctx_req.ctx.reqctx->skb);
+ ctx_req.ctx.reqctx->skb = NULL;
+ }
+ if (ctx_req.ctx.reqctx->verify == VERIFY_SW) {
+ chcr_verify_tag(ctx_req.req.aead_req, input,
+ &err);
+ ctx_req.ctx.reqctx->verify = VERIFY_HW;
+ }
+ break;
+
case CRYPTO_ALG_TYPE_BLKCIPHER:
ctx_req.req.ablk_req = (struct ablkcipher_request *)req;
ctx_req.ctx.ablk_ctx =
ablkcipher_request_ctx(ctx_req.req.ablk_req);
- if (!error_status) {
+ if (!err) {
fw6_pld = (struct cpl_fw6_pld *)input;
memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2],
AES_BLOCK_SIZE);
}
dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst,
- ABLK_CTX(ctx)->dst_nents, DMA_FROM_DEVICE);
+ ctx_req.ctx.ablk_ctx->dst_nents, DMA_FROM_DEVICE);
if (ctx_req.ctx.ablk_ctx->skb) {
kfree_skb(ctx_req.ctx.ablk_ctx->skb);
ctx_req.ctx.ablk_ctx->skb = NULL;
@@ -138,8 +200,10 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
updated_digestsize = SHA256_DIGEST_SIZE;
else if (digestsize == SHA384_DIGEST_SIZE)
updated_digestsize = SHA512_DIGEST_SIZE;
- if (ctx_req.ctx.ahash_ctx->skb)
+ if (ctx_req.ctx.ahash_ctx->skb) {
+ kfree_skb(ctx_req.ctx.ahash_ctx->skb);
ctx_req.ctx.ahash_ctx->skb = NULL;
+ }
if (ctx_req.ctx.ahash_ctx->result == 1) {
ctx_req.ctx.ahash_ctx->result = 0;
memcpy(ctx_req.req.ahash_req->result, input +
@@ -150,11 +214,9 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
sizeof(struct cpl_fw6_pld),
updated_digestsize);
}
- kfree(ctx_req.ctx.ahash_ctx->dummy_payload_ptr);
- ctx_req.ctx.ahash_ctx->dummy_payload_ptr = NULL;
break;
}
- return 0;
+ return err;
}
/*
@@ -178,40 +240,81 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
return flits + sgl_len(cnt);
}
-static struct shash_desc *chcr_alloc_shash(unsigned int ds)
+static inline void get_aes_decrypt_key(unsigned char *dec_key,
+ const unsigned char *key,
+ unsigned int keylength)
+{
+ u32 temp;
+ u32 w_ring[MAX_NK];
+ int i, j, k;
+ u8 nr, nk;
+
+ switch (keylength) {
+ case AES_KEYLENGTH_128BIT:
+ nk = KEYLENGTH_4BYTES;
+ nr = NUMBER_OF_ROUNDS_10;
+ break;
+ case AES_KEYLENGTH_192BIT:
+ nk = KEYLENGTH_6BYTES;
+ nr = NUMBER_OF_ROUNDS_12;
+ break;
+ case AES_KEYLENGTH_256BIT:
+ nk = KEYLENGTH_8BYTES;
+ nr = NUMBER_OF_ROUNDS_14;
+ break;
+ default:
+ return;
+ }
+ for (i = 0; i < nk; i++)
+ w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
+
+ i = 0;
+ temp = w_ring[nk - 1];
+ while (i + nk < (nr + 1) * 4) {
+ if (!(i % nk)) {
+ /* RotWord(temp) */
+ temp = (temp << 8) | (temp >> 24);
+ temp = aes_ks_subword(temp);
+ temp ^= round_constant[i / nk];
+ } else if (nk == 8 && (i % 4 == 0)) {
+ temp = aes_ks_subword(temp);
+ }
+ w_ring[i % nk] ^= temp;
+ temp = w_ring[i % nk];
+ i++;
+ }
+ i--;
+ for (k = 0, j = i % nk; k < nk; k++) {
+ *((u32 *)dec_key + k) = htonl(w_ring[j]);
+ j--;
+ if (j < 0)
+ j += nk;
+ }
+}
+
+static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
{
struct crypto_shash *base_hash = NULL;
- struct shash_desc *desc;
switch (ds) {
case SHA1_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha1-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha1", 0, 0);
break;
case SHA224_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha224-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha224", 0, 0);
break;
case SHA256_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha256-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha256", 0, 0);
break;
case SHA384_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha384-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha384", 0, 0);
break;
case SHA512_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha512-generic", 0, 0);
+ base_hash = crypto_alloc_shash("sha512", 0, 0);
break;
}
- if (IS_ERR(base_hash)) {
- pr_err("Can not allocate sha-generic algo.\n");
- return (void *)base_hash;
- }
- desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(base_hash),
- GFP_KERNEL);
- if (!desc)
- return ERR_PTR(-ENOMEM);
- desc->tfm = base_hash;
- desc->flags = crypto_shash_get_flags(base_hash);
- return desc;
+ return base_hash;
}
static int chcr_compute_partial_hash(struct shash_desc *desc,
@@ -279,31 +382,18 @@ static inline int is_hmac(struct crypto_tfm *tfm)
struct chcr_alg_template *chcr_crypto_alg =
container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
alg.hash);
- if ((chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK) ==
- CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
+ if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
return 1;
return 0;
}
-static inline unsigned int ch_nents(struct scatterlist *sg,
- unsigned int *total_size)
-{
- unsigned int nents;
-
- for (nents = 0, *total_size = 0; sg; sg = sg_next(sg)) {
- nents++;
- *total_size += sg->length;
- }
- return nents;
-}
-
static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
struct scatterlist *sg,
struct phys_sge_parm *sg_param)
{
struct phys_sge_pairs *to;
- unsigned int out_buf_size = sg_param->obsize;
- unsigned int nents = sg_param->nents, i, j, tot_len = 0;
+ int out_buf_size = sg_param->obsize;
+ unsigned int nents = sg_param->nents, i, j = 0;
phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
| CPL_RX_PHYS_DSGL_ISRDMA_V(0));
@@ -321,25 +411,24 @@ static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
sizeof(struct cpl_rx_phys_dsgl));
for (i = 0; nents; to++) {
- for (j = i; (nents && (j < (8 + i))); j++, nents--) {
- to->len[j] = htons(sg->length);
+ for (j = 0; j < 8 && nents; j++, nents--) {
+ out_buf_size -= sg_dma_len(sg);
+ to->len[j] = htons(sg_dma_len(sg));
to->addr[j] = cpu_to_be64(sg_dma_address(sg));
- if (out_buf_size) {
- if (tot_len + sg_dma_len(sg) >= out_buf_size) {
- to->len[j] = htons(out_buf_size -
- tot_len);
- return;
- }
- tot_len += sg_dma_len(sg);
- }
sg = sg_next(sg);
}
}
+ if (out_buf_size) {
+ j--;
+ to--;
+ to->len[j] = htons(ntohs(to->len[j]) + (out_buf_size));
+ }
}
-static inline unsigned
-int map_writesg_phys_cpl(struct device *dev, struct cpl_rx_phys_dsgl *phys_cpl,
- struct scatterlist *sg, struct phys_sge_parm *sg_param)
+static inline int map_writesg_phys_cpl(struct device *dev,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ struct scatterlist *sg,
+ struct phys_sge_parm *sg_param)
{
if (!sg || !sg_param->nents)
return 0;
@@ -353,6 +442,14 @@ int map_writesg_phys_cpl(struct device *dev, struct cpl_rx_phys_dsgl *phys_cpl,
return 0;
}
+static inline int get_aead_subtype(struct crypto_aead *aead)
+{
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct chcr_alg_template *chcr_crypto_alg =
+ container_of(alg, struct chcr_alg_template, alg.aead);
+ return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
+}
+
static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
{
struct crypto_alg *alg = tfm->__crt_alg;
@@ -362,8 +459,23 @@ static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
}
+static inline void write_buffer_to_skb(struct sk_buff *skb,
+ unsigned int *frags,
+ char *bfr,
+ u8 bfr_len)
+{
+ skb->len += bfr_len;
+ skb->data_len += bfr_len;
+ skb->truesize += bfr_len;
+ get_page(virt_to_page(bfr));
+ skb_fill_page_desc(skb, *frags, virt_to_page(bfr),
+ offset_in_page(bfr), bfr_len);
+ (*frags)++;
+}
+
+
static inline void
-write_sg_data_page_desc(struct sk_buff *skb, unsigned int *frags,
+write_sg_to_skb(struct sk_buff *skb, unsigned int *frags,
struct scatterlist *sg, unsigned int count)
{
struct page *spage;
@@ -372,8 +484,9 @@ write_sg_data_page_desc(struct sk_buff *skb, unsigned int *frags,
skb->len += count;
skb->data_len += count;
skb->truesize += count;
+
while (count > 0) {
- if (sg && (!(sg->length)))
+ if (!sg || (!(sg->length)))
break;
spage = sg_page(sg);
get_page(spage);
@@ -389,29 +502,25 @@ static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
struct _key_ctx *key_ctx)
{
if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
- get_aes_decrypt_key(key_ctx->key, ablkctx->key,
- ablkctx->enckey_len << 3);
- memset(key_ctx->key + ablkctx->enckey_len, 0,
- CHCR_AES_MAX_KEY_LEN - ablkctx->enckey_len);
+ memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
} else {
memcpy(key_ctx->key,
ablkctx->key + (ablkctx->enckey_len >> 1),
ablkctx->enckey_len >> 1);
- get_aes_decrypt_key(key_ctx->key + (ablkctx->enckey_len >> 1),
- ablkctx->key, ablkctx->enckey_len << 2);
+ memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
+ ablkctx->rrkey, ablkctx->enckey_len >> 1);
}
return 0;
}
static inline void create_wreq(struct chcr_context *ctx,
- struct fw_crypto_lookaside_wr *wreq,
+ struct chcr_wr *chcr_req,
void *req, struct sk_buff *skb,
int kctx_len, int hash_sz,
- unsigned int phys_dsgl)
+ int is_iv,
+ unsigned int sc_len)
{
struct uld_ctx *u_ctx = ULD_CTX(ctx);
- struct ulp_txpkt *ulptx = (struct ulp_txpkt *)(wreq + 1);
- struct ulptx_idata *sc_imm = (struct ulptx_idata *)(ulptx + 1);
int iv_loc = IV_DSGL;
int qid = u_ctx->lldi.rxq_ids[ctx->tx_channel_id];
unsigned int immdatalen = 0, nr_frags = 0;
@@ -423,27 +532,27 @@ static inline void create_wreq(struct chcr_context *ctx,
nr_frags = skb_shinfo(skb)->nr_frags;
}
- wreq->op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
- (kctx_len >> 4));
- wreq->pld_size_hash_size =
+ chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
+ ((sizeof(chcr_req->key_ctx) + kctx_len) >> 4));
+ chcr_req->wreq.pld_size_hash_size =
htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
- wreq->len16_pkd = htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
+ chcr_req->wreq.len16_pkd =
+ htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
(calc_tx_flits_ofld(skb) * 8), 16)));
- wreq->cookie = cpu_to_be64((uintptr_t)req);
- wreq->rx_chid_to_rx_q_id =
+ chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
+ chcr_req->wreq.rx_chid_to_rx_q_id =
FILL_WR_RX_Q_ID(ctx->dev->tx_channel_id, qid,
- (hash_sz) ? IV_NOP : iv_loc);
+ is_iv ? iv_loc : IV_NOP);
- ulptx->cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
- ulptx->len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
- 16) - ((sizeof(*wreq)) >> 4)));
+ chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
+ chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
+ 16) - ((sizeof(chcr_req->wreq)) >> 4)));
- sc_imm->cmd_more = FILL_CMD_MORE(immdatalen);
- sc_imm->len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + kctx_len +
- ((hash_sz) ? DUMMY_BYTES :
- (sizeof(struct cpl_rx_phys_dsgl) +
- phys_dsgl)) + immdatalen);
+ chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
+ chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
+ sizeof(chcr_req->key_ctx) +
+ kctx_len + sc_len + immdatalen);
}
/**
@@ -454,86 +563,83 @@ static inline void create_wreq(struct chcr_context *ctx,
* @op_type: encryption or decryption
*/
static struct sk_buff
-*create_cipher_wr(struct crypto_async_request *req_base,
- struct chcr_context *ctx, unsigned short qid,
+*create_cipher_wr(struct ablkcipher_request *req,
+ unsigned short qid,
unsigned short op_type)
{
- struct ablkcipher_request *req = (struct ablkcipher_request *)req_base;
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
struct sk_buff *skb = NULL;
- struct _key_ctx *key_ctx;
- struct fw_crypto_lookaside_wr *wreq;
- struct cpl_tx_sec_pdu *sec_cpl;
+ struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
- struct chcr_blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+ struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
struct phys_sge_parm sg_param;
- unsigned int frags = 0, transhdr_len, phys_dsgl, dst_bufsize = 0;
+ unsigned int frags = 0, transhdr_len, phys_dsgl;
unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
if (!req->info)
return ERR_PTR(-EINVAL);
- ablkctx->dst_nents = ch_nents(req->dst, &dst_bufsize);
- ablkctx->enc = op_type;
-
+ reqctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ if (reqctx->dst_nents <= 0) {
+ pr_err("AES:Invalid Destination sg lists\n");
+ return ERR_PTR(-EINVAL);
+ }
if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
- (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE))
+ (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE)) {
+ pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
+ ablkctx->enckey_len, req->nbytes, ivsize);
return ERR_PTR(-EINVAL);
+ }
- phys_dsgl = get_space_for_phys_dsgl(ablkctx->dst_nents);
+ phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents);
- kctx_len = sizeof(*key_ctx) +
- (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
+ kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
- GFP_ATOMIC);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
if (!skb)
return ERR_PTR(-ENOMEM);
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
- wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
-
- sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
- sec_cpl->op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1, 1);
-
- sec_cpl->pldlen = htonl(ivsize + req->nbytes);
- sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(0, 0,
- ivsize + 1, 0);
-
- sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, 0,
- 0, 0);
- sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
+ chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
+ chcr_req->sec_cpl.op_ivinsrtofst =
+ FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1);
+
+ chcr_req->sec_cpl.pldlen = htonl(ivsize + req->nbytes);
+ chcr_req->sec_cpl.aadstart_cipherstop_hi =
+ FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0);
+
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
+ chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
ablkctx->ciph_mode,
- 0, 0, ivsize >> 1, 1);
- sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
+ 0, 0, ivsize >> 1);
+ chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
0, 1, phys_dsgl);
- key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
- key_ctx->ctx_hdr = ablkctx->key_ctx_hdr;
+ chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
if (op_type == CHCR_DECRYPT_OP) {
- if (generate_copy_rrkey(ablkctx, key_ctx))
- goto map_fail1;
+ generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
} else {
if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
- memcpy(key_ctx->key, ablkctx->key, ablkctx->enckey_len);
+ memcpy(chcr_req->key_ctx.key, ablkctx->key,
+ ablkctx->enckey_len);
} else {
- memcpy(key_ctx->key, ablkctx->key +
+ memcpy(chcr_req->key_ctx.key, ablkctx->key +
(ablkctx->enckey_len >> 1),
ablkctx->enckey_len >> 1);
- memcpy(key_ctx->key +
+ memcpy(chcr_req->key_ctx.key +
(ablkctx->enckey_len >> 1),
ablkctx->key,
ablkctx->enckey_len >> 1);
}
}
- phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)key_ctx + kctx_len);
-
- memcpy(ablkctx->iv, req->info, ivsize);
- sg_init_table(&ablkctx->iv_sg, 1);
- sg_set_buf(&ablkctx->iv_sg, ablkctx->iv, ivsize);
- sg_param.nents = ablkctx->dst_nents;
- sg_param.obsize = dst_bufsize;
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ sg_param.nents = reqctx->dst_nents;
+ sg_param.obsize = req->nbytes;
sg_param.qid = qid;
sg_param.align = 1;
if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst,
@@ -541,10 +647,12 @@ static struct sk_buff
goto map_fail1;
skb_set_transport_header(skb, transhdr_len);
- write_sg_data_page_desc(skb, &frags, &ablkctx->iv_sg, ivsize);
- write_sg_data_page_desc(skb, &frags, req->src, req->nbytes);
- create_wreq(ctx, wreq, req, skb, kctx_len, 0, phys_dsgl);
- req_ctx->skb = skb;
+ memcpy(reqctx->iv, req->info, ivsize);
+ write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
+ write_sg_to_skb(skb, &frags, req->src, req->nbytes);
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
+ sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl);
+ reqctx->skb = skb;
skb_get(skb);
return skb;
map_fail1:
@@ -557,15 +665,9 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
{
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- struct ablkcipher_alg *alg = crypto_ablkcipher_alg(tfm);
unsigned int ck_size, context_size;
u16 alignment = 0;
- if ((keylen < alg->min_keysize) || (keylen > alg->max_keysize))
- goto badkey_err;
-
- memcpy(ablkctx->key, key, keylen);
- ablkctx->enckey_len = keylen;
if (keylen == AES_KEYSIZE_128) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
} else if (keylen == AES_KEYSIZE_192) {
@@ -576,7 +678,9 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
} else {
goto badkey_err;
}
-
+ memcpy(ablkctx->key, key, keylen);
+ ablkctx->enckey_len = keylen;
+ get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
keylen + alignment) >> 4;
@@ -592,16 +696,18 @@ badkey_err:
static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
{
- int ret = 0;
- struct sge_ofld_txq *q;
struct adapter *adap = netdev2adap(dev);
+ struct sge_uld_txq_info *txq_info =
+ adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
+ struct sge_uld_txq *txq;
+ int ret = 0;
local_bh_disable();
- q = &adap->sge.ofldtxq[idx];
- spin_lock(&q->sendq.lock);
- if (q->full)
+ txq = &txq_info->uldtxq[idx];
+ spin_lock(&txq->sendq.lock);
+ if (txq->full)
ret = -1;
- spin_unlock(&q->sendq.lock);
+ spin_unlock(&txq->sendq.lock);
local_bh_enable();
return ret;
}
@@ -610,7 +716,6 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct crypto_async_request *req_base = &req->base;
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct sk_buff *skb;
@@ -620,8 +725,7 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
return -EBUSY;
}
- skb = create_cipher_wr(req_base, ctx,
- u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
+ skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
CHCR_ENCRYPT_OP);
if (IS_ERR(skb)) {
pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
@@ -637,7 +741,6 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
- struct crypto_async_request *req_base = &req->base;
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct sk_buff *skb;
@@ -647,7 +750,7 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
return -EBUSY;
}
- skb = create_cipher_wr(req_base, ctx, u_ctx->lldi.rxq_ids[0],
+ skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[0],
CHCR_DECRYPT_OP);
if (IS_ERR(skb)) {
pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
@@ -674,11 +777,11 @@ static int chcr_device_init(struct chcr_context *ctx)
}
u_ctx = ULD_CTX(ctx);
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
- ctx->dev->tx_channel_id = 0;
rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
rxq_idx += id % rxq_perchan;
spin_lock(&ctx->dev->lock_chcr_dev);
ctx->tx_channel_id = rxq_idx;
+ ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
spin_unlock(&ctx->dev->lock_chcr_dev);
}
out:
@@ -727,50 +830,33 @@ static int get_alg_config(struct algo_param *params,
return 0;
}
-static inline int
-write_buffer_data_page_desc(struct chcr_ahash_req_ctx *req_ctx,
- struct sk_buff *skb, unsigned int *frags, char *bfr,
- u8 bfr_len)
+static inline void chcr_free_shash(struct crypto_shash *base_hash)
{
- void *page_ptr = NULL;
-
- skb->len += bfr_len;
- skb->data_len += bfr_len;
- skb->truesize += bfr_len;
- page_ptr = kmalloc(CHCR_HASH_MAX_BLOCK_SIZE_128, GFP_ATOMIC | GFP_DMA);
- if (!page_ptr)
- return -ENOMEM;
- get_page(virt_to_page(page_ptr));
- req_ctx->dummy_payload_ptr = page_ptr;
- memcpy(page_ptr, bfr, bfr_len);
- skb_fill_page_desc(skb, *frags, virt_to_page(page_ptr),
- offset_in_page(page_ptr), bfr_len);
- (*frags)++;
- return 0;
+ crypto_free_shash(base_hash);
}
/**
- * create_final_hash_wr - Create hash work request
+ * create_hash_wr - Create hash work request
* @req - Cipher req base
*/
-static struct sk_buff *create_final_hash_wr(struct ahash_request *req,
- struct hash_wr_param *param)
+static struct sk_buff *create_hash_wr(struct ahash_request *req,
+ struct hash_wr_param *param)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
struct sk_buff *skb = NULL;
- struct _key_ctx *key_ctx;
- struct fw_crypto_lookaside_wr *wreq;
- struct cpl_tx_sec_pdu *sec_cpl;
+ struct chcr_wr *chcr_req;
unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
unsigned int digestsize = crypto_ahash_digestsize(tfm);
- unsigned int kctx_len = sizeof(*key_ctx);
+ unsigned int kctx_len = 0;
u8 hash_size_in_response = 0;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
- kctx_len += param->alg_prm.result_size + iopad_alignment;
+ kctx_len = param->alg_prm.result_size + iopad_alignment;
if (param->opad_needed)
kctx_len += param->alg_prm.result_size + iopad_alignment;
@@ -779,54 +865,54 @@ static struct sk_buff *create_final_hash_wr(struct ahash_request *req,
else
hash_size_in_response = param->alg_prm.result_size;
transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
- skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
- GFP_ATOMIC);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
if (!skb)
return skb;
skb_reserve(skb, sizeof(struct sge_opaque_hdr));
- wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
- memset(wreq, 0, transhdr_len);
+ chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
- sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
- sec_cpl->op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0, 0);
- sec_cpl->pldlen = htonl(param->bfr_len + param->sg_len);
+ chcr_req->sec_cpl.op_ivinsrtofst =
+ FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0);
+ chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
- sec_cpl->aadstart_cipherstop_hi =
+ chcr_req->sec_cpl.aadstart_cipherstop_hi =
FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
- sec_cpl->cipherstop_lo_authinsert =
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
- sec_cpl->seqno_numivs =
+ chcr_req->sec_cpl.seqno_numivs =
FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
- param->opad_needed, 0, 0);
+ param->opad_needed, 0);
- sec_cpl->ivgen_hdrlen =
+ chcr_req->sec_cpl.ivgen_hdrlen =
FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
- key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
- memcpy(key_ctx->key, req_ctx->partial_hash, param->alg_prm.result_size);
+ memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
+ param->alg_prm.result_size);
if (param->opad_needed)
- memcpy(key_ctx->key + ((param->alg_prm.result_size <= 32) ? 32 :
- CHCR_HASH_MAX_DIGEST_SIZE),
+ memcpy(chcr_req->key_ctx.key +
+ ((param->alg_prm.result_size <= 32) ? 32 :
+ CHCR_HASH_MAX_DIGEST_SIZE),
hmacctx->opad, param->alg_prm.result_size);
- key_ctx->ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
+ chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
param->alg_prm.mk_size, 0,
param->opad_needed,
- (kctx_len >> 4));
- sec_cpl->scmd1 = cpu_to_be64((u64)param->scmd1);
+ ((kctx_len +
+ sizeof(chcr_req->key_ctx)) >> 4));
+ chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
skb_set_transport_header(skb, transhdr_len);
if (param->bfr_len != 0)
- write_buffer_data_page_desc(req_ctx, skb, &frags, req_ctx->bfr,
- param->bfr_len);
+ write_buffer_to_skb(skb, &frags, req_ctx->reqbfr,
+ param->bfr_len);
if (param->sg_len != 0)
- write_sg_data_page_desc(skb, &frags, req->src, param->sg_len);
+ write_sg_to_skb(skb, &frags, req->src, param->sg_len);
- create_wreq(ctx, wreq, req, skb, kctx_len, hash_size_in_response,
- 0);
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response, 0,
+ DUMMY_BYTES);
req_ctx->skb = skb;
skb_get(skb);
return skb;
@@ -852,34 +938,40 @@ static int chcr_ahash_update(struct ahash_request *req)
return -EBUSY;
}
- if (nbytes + req_ctx->bfr_len >= bs) {
- remainder = (nbytes + req_ctx->bfr_len) % bs;
- nbytes = nbytes + req_ctx->bfr_len - remainder;
+ if (nbytes + req_ctx->reqlen >= bs) {
+ remainder = (nbytes + req_ctx->reqlen) % bs;
+ nbytes = nbytes + req_ctx->reqlen - remainder;
} else {
- sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->bfr +
- req_ctx->bfr_len, nbytes, 0);
- req_ctx->bfr_len += nbytes;
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
+ + req_ctx->reqlen, nbytes, 0);
+ req_ctx->reqlen += nbytes;
return 0;
}
params.opad_needed = 0;
params.more = 1;
params.last = 0;
- params.sg_len = nbytes - req_ctx->bfr_len;
- params.bfr_len = req_ctx->bfr_len;
+ params.sg_len = nbytes - req_ctx->reqlen;
+ params.bfr_len = req_ctx->reqlen;
params.scmd1 = 0;
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
req_ctx->result = 0;
req_ctx->data_len += params.sg_len + params.bfr_len;
- skb = create_final_hash_wr(req, &params);
+ skb = create_hash_wr(req, &params);
if (!skb)
return -ENOMEM;
- req_ctx->bfr_len = remainder;
- if (remainder)
+ if (remainder) {
+ u8 *temp;
+ /* Swap buffers */
+ temp = req_ctx->reqbfr;
+ req_ctx->reqbfr = req_ctx->skbfr;
+ req_ctx->skbfr = temp;
sg_pcopy_to_buffer(req->src, sg_nents(req->src),
- req_ctx->bfr, remainder, req->nbytes -
+ req_ctx->reqbfr, remainder, req->nbytes -
remainder);
+ }
+ req_ctx->reqlen = remainder;
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
chcr_send_wr(skb);
@@ -915,10 +1007,10 @@ static int chcr_ahash_final(struct ahash_request *req)
params.sg_len = 0;
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
req_ctx->result = 1;
- params.bfr_len = req_ctx->bfr_len;
+ params.bfr_len = req_ctx->reqlen;
req_ctx->data_len += params.bfr_len + params.sg_len;
- if (req_ctx->bfr && (req_ctx->bfr_len == 0)) {
- create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
+ if (req_ctx->reqlen == 0) {
+ create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
params.last = 0;
params.more = 1;
params.scmd1 = 0;
@@ -929,7 +1021,10 @@ static int chcr_ahash_final(struct ahash_request *req)
params.last = 1;
params.more = 0;
}
- skb = create_final_hash_wr(req, &params);
+ skb = create_hash_wr(req, &params);
+ if (!skb)
+ return -ENOMEM;
+
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
chcr_send_wr(skb);
@@ -961,12 +1056,12 @@ static int chcr_ahash_finup(struct ahash_request *req)
params.opad_needed = 0;
params.sg_len = req->nbytes;
- params.bfr_len = req_ctx->bfr_len;
+ params.bfr_len = req_ctx->reqlen;
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
req_ctx->data_len += params.bfr_len + params.sg_len;
req_ctx->result = 1;
- if (req_ctx->bfr && (req_ctx->bfr_len + req->nbytes) == 0) {
- create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
+ if ((req_ctx->reqlen + req->nbytes) == 0) {
+ create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
params.last = 0;
params.more = 1;
params.scmd1 = 0;
@@ -977,9 +1072,10 @@ static int chcr_ahash_finup(struct ahash_request *req)
params.more = 0;
}
- skb = create_final_hash_wr(req, &params);
+ skb = create_hash_wr(req, &params);
if (!skb)
return -ENOMEM;
+
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
chcr_send_wr(skb);
@@ -1021,13 +1117,13 @@ static int chcr_ahash_digest(struct ahash_request *req)
req_ctx->result = 1;
req_ctx->data_len += params.bfr_len + params.sg_len;
- if (req_ctx->bfr && req->nbytes == 0) {
- create_last_hash_block(req_ctx->bfr, bs, 0);
+ if (req->nbytes == 0) {
+ create_last_hash_block(req_ctx->reqbfr, bs, 0);
params.more = 1;
params.bfr_len = bs;
}
- skb = create_final_hash_wr(req, &params);
+ skb = create_hash_wr(req, &params);
if (!skb)
return -ENOMEM;
@@ -1042,12 +1138,12 @@ static int chcr_ahash_export(struct ahash_request *areq, void *out)
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct chcr_ahash_req_ctx *state = out;
- state->bfr_len = req_ctx->bfr_len;
+ state->reqlen = req_ctx->reqlen;
state->data_len = req_ctx->data_len;
- memcpy(state->bfr, req_ctx->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
+ memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
memcpy(state->partial_hash, req_ctx->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE);
- return 0;
+ return 0;
}
static int chcr_ahash_import(struct ahash_request *areq, const void *in)
@@ -1055,10 +1151,11 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in)
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
- req_ctx->bfr_len = state->bfr_len;
+ req_ctx->reqlen = state->reqlen;
req_ctx->data_len = state->data_len;
- req_ctx->dummy_payload_ptr = NULL;
- memcpy(req_ctx->bfr, state->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
+ req_ctx->reqbfr = req_ctx->bfr1;
+ req_ctx->skbfr = req_ctx->bfr2;
+ memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
memcpy(req_ctx->partial_hash, state->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE);
return 0;
@@ -1073,15 +1170,16 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
unsigned int i, err = 0, updated_digestsize;
- /*
- * use the key to calculate the ipad and opad. ipad will sent with the
+ SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
+
+ /* use the key to calculate the ipad and opad. ipad will sent with the
* first request's data. opad will be sent with the final hash result
* ipad in hmacctx->ipad and opad in hmacctx->opad location
*/
- if (!hmacctx->desc)
- return -EINVAL;
+ shash->tfm = hmacctx->base_hash;
+ shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
if (keylen > bs) {
- err = crypto_shash_digest(hmacctx->desc, key, keylen,
+ err = crypto_shash_digest(shash, key, keylen,
hmacctx->ipad);
if (err)
goto out;
@@ -1102,13 +1200,13 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
updated_digestsize = SHA256_DIGEST_SIZE;
else if (digestsize == SHA384_DIGEST_SIZE)
updated_digestsize = SHA512_DIGEST_SIZE;
- err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->ipad,
+ err = chcr_compute_partial_hash(shash, hmacctx->ipad,
hmacctx->ipad, digestsize);
if (err)
goto out;
chcr_change_order(hmacctx->ipad, updated_digestsize);
- err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->opad,
+ err = chcr_compute_partial_hash(shash, hmacctx->opad,
hmacctx->opad, digestsize);
if (err)
goto out;
@@ -1122,28 +1220,29 @@ static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
{
struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- int status = 0;
unsigned short context_size = 0;
- if ((key_len == (AES_KEYSIZE_128 << 1)) ||
- (key_len == (AES_KEYSIZE_256 << 1))) {
- memcpy(ablkctx->key, key, key_len);
- ablkctx->enckey_len = key_len;
- context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
- ablkctx->key_ctx_hdr =
- FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
- CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
- CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
- CHCR_KEYCTX_NO_KEY, 1,
- 0, context_size);
- ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
- } else {
+ if ((key_len != (AES_KEYSIZE_128 << 1)) &&
+ (key_len != (AES_KEYSIZE_256 << 1))) {
crypto_tfm_set_flags((struct crypto_tfm *)tfm,
CRYPTO_TFM_RES_BAD_KEY_LEN);
ablkctx->enckey_len = 0;
- status = -EINVAL;
+ return -EINVAL;
+
}
- return status;
+
+ memcpy(ablkctx->key, key, key_len);
+ ablkctx->enckey_len = key_len;
+ get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
+ context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
+ ablkctx->key_ctx_hdr =
+ FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
+ CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
+ CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
+ CHCR_KEYCTX_NO_KEY, 1,
+ 0, context_size);
+ ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
+ return 0;
}
static int chcr_sha_init(struct ahash_request *areq)
@@ -1153,8 +1252,9 @@ static int chcr_sha_init(struct ahash_request *areq)
int digestsize = crypto_ahash_digestsize(tfm);
req_ctx->data_len = 0;
- req_ctx->dummy_payload_ptr = NULL;
- req_ctx->bfr_len = 0;
+ req_ctx->reqlen = 0;
+ req_ctx->reqbfr = req_ctx->bfr1;
+ req_ctx->skbfr = req_ctx->bfr2;
req_ctx->skb = NULL;
req_ctx->result = 0;
copy_hash_init_values(req_ctx->partial_hash, digestsize);
@@ -1202,29 +1302,1184 @@ static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct chcr_ahash_req_ctx));
- hmacctx->desc = chcr_alloc_shash(digestsize);
- if (IS_ERR(hmacctx->desc))
- return PTR_ERR(hmacctx->desc);
+ hmacctx->base_hash = chcr_alloc_shash(digestsize);
+ if (IS_ERR(hmacctx->base_hash))
+ return PTR_ERR(hmacctx->base_hash);
return chcr_device_init(crypto_tfm_ctx(tfm));
}
-static void chcr_free_shash(struct shash_desc *desc)
-{
- crypto_free_shash(desc->tfm);
- kfree(desc);
-}
-
static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
{
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
- if (hmacctx->desc) {
- chcr_free_shash(hmacctx->desc);
- hmacctx->desc = NULL;
+ if (hmacctx->base_hash) {
+ chcr_free_shash(hmacctx->base_hash);
+ hmacctx->base_hash = NULL;
+ }
+}
+
+static int chcr_copy_assoc(struct aead_request *req,
+ struct chcr_aead_ctx *ctx)
+{
+ SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
+
+ skcipher_request_set_tfm(skreq, ctx->null);
+ skcipher_request_set_callback(skreq, aead_request_flags(req),
+ NULL, NULL);
+ skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
+ NULL);
+
+ return crypto_skcipher_encrypt(skreq);
+}
+
+static unsigned char get_hmac(unsigned int authsize)
+{
+ switch (authsize) {
+ case ICV_8:
+ return CHCR_SCMD_HMAC_CTRL_PL1;
+ case ICV_10:
+ return CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+ case ICV_12:
+ return CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ }
+ return CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+}
+
+
+static struct sk_buff *create_authenc_wr(struct aead_request *req,
+ unsigned short qid,
+ int size,
+ unsigned short op_type)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct sk_buff *skb = NULL;
+ struct chcr_wr *chcr_req;
+ struct cpl_rx_phys_dsgl *phys_cpl;
+ struct phys_sge_parm sg_param;
+ struct scatterlist *src, *dst;
+ struct scatterlist src_sg[2], dst_sg[2];
+ unsigned int frags = 0, transhdr_len;
+ unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
+ unsigned int kctx_len = 0;
+ unsigned short stop_offset = 0;
+ unsigned int assoclen = req->assoclen;
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ int err = 0;
+ int null = 0;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+
+ if (aeadctx->enckey_len == 0 || (req->cryptlen == 0))
+ goto err;
+
+ if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
+ goto err;
+
+ if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
+ goto err;
+ src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
+ dst = src;
+ if (req->src != req->dst) {
+ err = chcr_copy_assoc(req, aeadctx);
+ if (err)
+ return ERR_PTR(err);
+ dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+ }
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
+ null = 1;
+ assoclen = 0;
+ }
+ reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+ (op_type ? -authsize : authsize));
+ if (reqctx->dst_nents <= 0) {
+ pr_err("AUTHENC:Invalid Destination sg entries\n");
+ goto err;
+ }
+ dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+ kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
+ - sizeof(chcr_req->key_ctx);
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+ if (!skb)
+ goto err;
+
+ /* LLD is going to write the sge hdr. */
+ skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+
+ /* Write WR */
+ chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
+
+ stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
+
+ /*
+ * Input order is AAD,IV and Payload. where IV should be included as
+ * the part of authdata. All other fields should be filled according
+ * to the hardware spec
+ */
+ chcr_req->sec_cpl.op_ivinsrtofst =
+ FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2,
+ (ivsize ? (assoclen + 1) : 0));
+ chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen);
+ chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ assoclen ? 1 : 0, assoclen,
+ assoclen + ivsize + 1,
+ (stop_offset & 0x1F0) >> 4);
+ chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
+ stop_offset & 0xF,
+ null ? 0 : assoclen + ivsize + 1,
+ stop_offset, stop_offset);
+ chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+ (op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
+ CHCR_SCMD_CIPHER_MODE_AES_CBC,
+ actx->auth_mode, aeadctx->hmac_ctrl,
+ ivsize >> 1);
+ chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+ 0, 1, dst_size);
+
+ chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+ if (op_type == CHCR_ENCRYPT_OP)
+ memcpy(chcr_req->key_ctx.key, aeadctx->key,
+ aeadctx->enckey_len);
+ else
+ memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
+ aeadctx->enckey_len);
+
+ memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
+ 4), actx->h_iopad, kctx_len -
+ (DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
+
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ sg_param.nents = reqctx->dst_nents;
+ sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+ sg_param.qid = qid;
+ sg_param.align = 0;
+ if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+ &sg_param))
+ goto dstmap_fail;
+
+ skb_set_transport_header(skb, transhdr_len);
+
+ if (assoclen) {
+ /* AAD buffer in */
+ write_sg_to_skb(skb, &frags, req->src, assoclen);
+
+ }
+ write_buffer_to_skb(skb, &frags, req->iv, ivsize);
+ write_sg_to_skb(skb, &frags, src, req->cryptlen);
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
+ sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+ reqctx->skb = skb;
+ skb_get(skb);
+
+ return skb;
+dstmap_fail:
+ /* ivmap_fail: */
+ kfree_skb(skb);
+err:
+ return ERR_PTR(-EINVAL);
+}
+
+static void aes_gcm_empty_pld_pad(struct scatterlist *sg,
+ unsigned short offset)
+{
+ struct page *spage;
+ unsigned char *addr;
+
+ spage = sg_page(sg);
+ get_page(spage); /* so that it is not freed by NIC */
+#ifdef KMAP_ATOMIC_ARGS
+ addr = kmap_atomic(spage, KM_SOFTIRQ0);
+#else
+ addr = kmap_atomic(spage);
+#endif
+ memset(addr + sg->offset, 0, offset + 1);
+
+ kunmap_atomic(addr);
+}
+
+static int set_msg_len(u8 *block, unsigned int msglen, int csize)
+{
+ __be32 data;
+
+ memset(block, 0, csize);
+ block += csize;
+
+ if (csize >= 4)
+ csize = 4;
+ else if (msglen > (unsigned int)(1 << (8 * csize)))
+ return -EOVERFLOW;
+
+ data = cpu_to_be32(msglen);
+ memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+
+ return 0;
+}
+
+static void generate_b0(struct aead_request *req,
+ struct chcr_aead_ctx *aeadctx,
+ unsigned short op_type)
+{
+ unsigned int l, lp, m;
+ int rc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ u8 *b0 = reqctx->scratch_pad;
+
+ m = crypto_aead_authsize(aead);
+
+ memcpy(b0, reqctx->iv, 16);
+
+ lp = b0[0];
+ l = lp + 1;
+
+ /* set m, bits 3-5 */
+ *b0 |= (8 * ((m - 2) / 2));
+
+ /* set adata, bit 6, if associated data is used */
+ if (req->assoclen)
+ *b0 |= 64;
+ rc = set_msg_len(b0 + 16 - l,
+ (op_type == CHCR_DECRYPT_OP) ?
+ req->cryptlen - m : req->cryptlen, l);
+}
+
+static inline int crypto_ccm_check_iv(const u8 *iv)
+{
+ /* 2 <= L <= 8, so 1 <= L' <= 7. */
+ if (iv[0] < 1 || iv[0] > 7)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ccm_format_packet(struct aead_request *req,
+ struct chcr_aead_ctx *aeadctx,
+ unsigned int sub_type,
+ unsigned short op_type)
+{
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ int rc = 0;
+
+ if (req->assoclen > T5_MAX_AAD_SIZE) {
+ pr_err("CCM: Unsupported AAD data. It should be < %d\n",
+ T5_MAX_AAD_SIZE);
+ return -EINVAL;
+ }
+ if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
+ reqctx->iv[0] = 3;
+ memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
+ memcpy(reqctx->iv + 4, req->iv, 8);
+ memset(reqctx->iv + 12, 0, 4);
+ *((unsigned short *)(reqctx->scratch_pad + 16)) =
+ htons(req->assoclen - 8);
+ } else {
+ memcpy(reqctx->iv, req->iv, 16);
+ *((unsigned short *)(reqctx->scratch_pad + 16)) =
+ htons(req->assoclen);
+ }
+ generate_b0(req, aeadctx, op_type);
+ /* zero the ctr value */
+ memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
+ return rc;
+}
+
+static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
+ unsigned int dst_size,
+ struct aead_request *req,
+ unsigned short op_type,
+ struct chcr_context *chcrctx)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned int ivsize = AES_BLOCK_SIZE;
+ unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
+ unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
+ unsigned int c_id = chcrctx->dev->tx_channel_id;
+ unsigned int ccm_xtra;
+ unsigned char tag_offset = 0, auth_offset = 0;
+ unsigned char hmac_ctrl = get_hmac(crypto_aead_authsize(tfm));
+ unsigned int assoclen;
+
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
+ assoclen = req->assoclen - 8;
+ else
+ assoclen = req->assoclen;
+ ccm_xtra = CCM_B0_SIZE +
+ ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
+
+ auth_offset = req->cryptlen ?
+ (assoclen + ivsize + 1 + ccm_xtra) : 0;
+ if (op_type == CHCR_DECRYPT_OP) {
+ if (crypto_aead_authsize(tfm) != req->cryptlen)
+ tag_offset = crypto_aead_authsize(tfm);
+ else
+ auth_offset = 0;
+ }
+
+
+ sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
+ 2, (ivsize ? (assoclen + 1) : 0) +
+ ccm_xtra);
+ sec_cpl->pldlen =
+ htonl(assoclen + ivsize + req->cryptlen + ccm_xtra);
+ /* For CCM there wil be b0 always. So AAD start will be 1 always */
+ sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ 1, assoclen + ccm_xtra, assoclen
+ + ivsize + 1 + ccm_xtra, 0);
+
+ sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
+ auth_offset, tag_offset,
+ (op_type == CHCR_ENCRYPT_OP) ? 0 :
+ crypto_aead_authsize(tfm));
+ sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+ (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
+ cipher_mode, mac_mode, hmac_ctrl,
+ ivsize >> 1);
+
+ sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
+ 1, dst_size);
+}
+
+int aead_ccm_validate_input(unsigned short op_type,
+ struct aead_request *req,
+ struct chcr_aead_ctx *aeadctx,
+ unsigned int sub_type)
+{
+ if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
+ if (crypto_ccm_check_iv(req->iv)) {
+ pr_err("CCM: IV check fails\n");
+ return -EINVAL;
+ }
+ } else {
+ if (req->assoclen != 16 && req->assoclen != 20) {
+ pr_err("RFC4309: Invalid AAD length %d\n",
+ req->assoclen);
+ return -EINVAL;
+ }
+ }
+ if (aeadctx->enckey_len == 0) {
+ pr_err("CCM: Encryption key not set\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+unsigned int fill_aead_req_fields(struct sk_buff *skb,
+ struct aead_request *req,
+ struct scatterlist *src,
+ unsigned int ivsize,
+ struct chcr_aead_ctx *aeadctx)
+{
+ unsigned int frags = 0;
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ /* b0 and aad length(if available) */
+
+ write_buffer_to_skb(skb, &frags, reqctx->scratch_pad, CCM_B0_SIZE +
+ (req->assoclen ? CCM_AAD_FIELD_SIZE : 0));
+ if (req->assoclen) {
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
+ write_sg_to_skb(skb, &frags, req->src,
+ req->assoclen - 8);
+ else
+ write_sg_to_skb(skb, &frags, req->src, req->assoclen);
+ }
+ write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
+ if (req->cryptlen)
+ write_sg_to_skb(skb, &frags, src, req->cryptlen);
+
+ return frags;
+}
+
+static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
+ unsigned short qid,
+ int size,
+ unsigned short op_type)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct sk_buff *skb = NULL;
+ struct chcr_wr *chcr_req;
+ struct cpl_rx_phys_dsgl *phys_cpl;
+ struct phys_sge_parm sg_param;
+ struct scatterlist *src, *dst;
+ struct scatterlist src_sg[2], dst_sg[2];
+ unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
+ unsigned int dst_size = 0, kctx_len;
+ unsigned int sub_type;
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ int err = 0;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+
+
+ if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
+ goto err;
+
+ if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
+ goto err;
+ sub_type = get_aead_subtype(tfm);
+ src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
+ dst = src;
+ if (req->src != req->dst) {
+ err = chcr_copy_assoc(req, aeadctx);
+ if (err) {
+ pr_err("AAD copy to destination buffer fails\n");
+ return ERR_PTR(err);
+ }
+ dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+ }
+ reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+ (op_type ? -authsize : authsize));
+ if (reqctx->dst_nents <= 0) {
+ pr_err("CCM:Invalid Destination sg entries\n");
+ goto err;
+ }
+
+
+ if (aead_ccm_validate_input(op_type, req, aeadctx, sub_type))
+ goto err;
+
+ dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+ kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+
+ if (!skb)
+ goto err;
+
+ skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+
+ chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
+
+ fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx);
+
+ chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+ memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
+ memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
+ 16), aeadctx->key, aeadctx->enckey_len);
+
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ if (ccm_format_packet(req, aeadctx, sub_type, op_type))
+ goto dstmap_fail;
+
+ sg_param.nents = reqctx->dst_nents;
+ sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+ sg_param.qid = qid;
+ sg_param.align = 0;
+ if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+ &sg_param))
+ goto dstmap_fail;
+
+ skb_set_transport_header(skb, transhdr_len);
+ frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx);
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
+ sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+ reqctx->skb = skb;
+ skb_get(skb);
+ return skb;
+dstmap_fail:
+ kfree_skb(skb);
+ skb = NULL;
+err:
+ return ERR_PTR(-EINVAL);
+}
+
+static struct sk_buff *create_gcm_wr(struct aead_request *req,
+ unsigned short qid,
+ int size,
+ unsigned short op_type)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct sk_buff *skb = NULL;
+ struct chcr_wr *chcr_req;
+ struct cpl_rx_phys_dsgl *phys_cpl;
+ struct phys_sge_parm sg_param;
+ struct scatterlist *src, *dst;
+ struct scatterlist src_sg[2], dst_sg[2];
+ unsigned int frags = 0, transhdr_len;
+ unsigned int ivsize = AES_BLOCK_SIZE;
+ unsigned int dst_size = 0, kctx_len;
+ unsigned char tag_offset = 0;
+ unsigned int crypt_len = 0;
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ unsigned char hmac_ctrl = get_hmac(authsize);
+ int err = 0;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+
+ /* validate key size */
+ if (aeadctx->enckey_len == 0)
+ goto err;
+
+ if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
+ goto err;
+
+ if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
+ goto err;
+
+ src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
+ dst = src;
+ if (req->src != req->dst) {
+ err = chcr_copy_assoc(req, aeadctx);
+ if (err)
+ return ERR_PTR(err);
+ dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+ }
+
+ if (!req->cryptlen)
+ /* null-payload is not supported in the hardware.
+ * software is sending block size
+ */
+ crypt_len = AES_BLOCK_SIZE;
+ else
+ crypt_len = req->cryptlen;
+ reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+ (op_type ? -authsize : authsize));
+ if (reqctx->dst_nents <= 0) {
+ pr_err("GCM:Invalid Destination sg entries\n");
+ goto err;
+ }
+
+
+ dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+ kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
+ AEAD_H_SIZE;
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+ skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+ if (!skb)
+ goto err;
+
+ /* NIC driver is going to write the sge hdr. */
+ skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+
+ chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+ memset(chcr_req, 0, transhdr_len);
+
+ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
+ req->assoclen -= 8;
+
+ tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
+ chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
+ ctx->dev->tx_channel_id, 2, (ivsize ?
+ (req->assoclen + 1) : 0));
+ chcr_req->sec_cpl.pldlen = htonl(req->assoclen + ivsize + crypt_len);
+ chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ req->assoclen ? 1 : 0, req->assoclen,
+ req->assoclen + ivsize + 1, 0);
+ if (req->cryptlen) {
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + ivsize + 1,
+ tag_offset, tag_offset);
+ chcr_req->sec_cpl.seqno_numivs =
+ FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
+ CHCR_ENCRYPT_OP) ? 1 : 0,
+ CHCR_SCMD_CIPHER_MODE_AES_GCM,
+ CHCR_SCMD_AUTH_MODE_GHASH, hmac_ctrl,
+ ivsize >> 1);
+ } else {
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
+ chcr_req->sec_cpl.seqno_numivs =
+ FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+ (op_type == CHCR_ENCRYPT_OP) ?
+ 1 : 0, CHCR_SCMD_CIPHER_MODE_AES_CBC,
+ 0, 0, ivsize >> 1);
+ }
+ chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+ 0, 1, dst_size);
+ chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+ memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
+ memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
+ 16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
+
+ /* prepare a 16 byte iv */
+ /* S A L T | IV | 0x00000001 */
+ if (get_aead_subtype(tfm) ==
+ CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
+ memcpy(reqctx->iv, aeadctx->salt, 4);
+ memcpy(reqctx->iv + 4, req->iv, 8);
+ } else {
+ memcpy(reqctx->iv, req->iv, 12);
+ }
+ *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
+
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ sg_param.nents = reqctx->dst_nents;
+ sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+ sg_param.qid = qid;
+ sg_param.align = 0;
+ if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+ &sg_param))
+ goto dstmap_fail;
+
+ skb_set_transport_header(skb, transhdr_len);
+
+ write_sg_to_skb(skb, &frags, req->src, req->assoclen);
+
+ write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
+
+ if (req->cryptlen) {
+ write_sg_to_skb(skb, &frags, src, req->cryptlen);
+ } else {
+ aes_gcm_empty_pld_pad(req->dst, authsize - 1);
+ write_sg_to_skb(skb, &frags, dst, crypt_len);
+ }
+
+ create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
+ sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+ reqctx->skb = skb;
+ skb_get(skb);
+ return skb;
+
+dstmap_fail:
+ /* ivmap_fail: */
+ kfree_skb(skb);
+ skb = NULL;
+err:
+ return skb;
+}
+
+
+
+static int chcr_aead_cra_init(struct crypto_aead *tfm)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct chcr_aead_reqctx));
+ aeadctx->null = crypto_get_default_null_skcipher();
+ if (IS_ERR(aeadctx->null))
+ return PTR_ERR(aeadctx->null);
+ return chcr_device_init(ctx);
+}
+
+static void chcr_aead_cra_exit(struct crypto_aead *tfm)
+{
+ crypto_put_default_null_skcipher();
+}
+
+static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
+ aeadctx->mayverify = VERIFY_HW;
+ return 0;
+}
+static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ u32 maxauth = crypto_aead_maxauthsize(tfm);
+
+ /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
+ * true for sha1. authsize == 12 condition should be before
+ * authsize == (maxauth >> 1)
+ */
+ if (authsize == ICV_4) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == ICV_6) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == ICV_10) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == ICV_12) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == ICV_14) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == (maxauth >> 1)) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ aeadctx->mayverify = VERIFY_HW;
+ } else if (authsize == maxauth) {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_HW;
+ } else {
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_SW;
+ }
+ return 0;
+}
+
+
+static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+ switch (authsize) {
+ case ICV_4:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_8:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_12:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_14:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_16:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_13:
+ case ICV_15:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_SW;
+ break;
+ default:
+
+ crypto_tfm_set_flags((struct crypto_tfm *) tfm,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+ switch (authsize) {
+ case ICV_8:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_12:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_16:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ default:
+ crypto_tfm_set_flags((struct crypto_tfm *)tfm,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+ switch (authsize) {
+ case ICV_4:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_6:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_8:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_10:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_12:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_14:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_16:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ default:
+ crypto_tfm_set_flags((struct crypto_tfm *)tfm,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
}
+ return 0;
}
+static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
+ const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(aead);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ unsigned char ck_size, mk_size;
+ int key_ctx_size = 0;
+
+ memcpy(aeadctx->key, key, keylen);
+ aeadctx->enckey_len = keylen;
+ key_ctx_size = sizeof(struct _key_ctx) +
+ ((DIV_ROUND_UP(keylen, 16)) << 4) * 2;
+ if (keylen == AES_KEYSIZE_128) {
+ mk_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
+ } else if (keylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
+ } else {
+ crypto_tfm_set_flags((struct crypto_tfm *)aead,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ aeadctx->enckey_len = 0;
+ return -EINVAL;
+ }
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
+ key_ctx_size >> 4);
+ return 0;
+}
+
+static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(aead);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+
+ if (keylen < 3) {
+ crypto_tfm_set_flags((struct crypto_tfm *)aead,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ aeadctx->enckey_len = 0;
+ return -EINVAL;
+ }
+ keylen -= 3;
+ memcpy(aeadctx->salt, key + keylen, 3);
+ return chcr_aead_ccm_setkey(aead, key, keylen);
+}
+
+static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(aead);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
+ struct blkcipher_desc h_desc;
+ struct scatterlist src[1];
+ unsigned int ck_size;
+ int ret = 0, key_ctx_size = 0;
+
+ if (get_aead_subtype(aead) ==
+ CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
+ keylen -= 4; /* nonce/salt is present in the last 4 bytes */
+ memcpy(aeadctx->salt, key + keylen, 4);
+ }
+ if (keylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ crypto_tfm_set_flags((struct crypto_tfm *)aead,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ aeadctx->enckey_len = 0;
+ pr_err("GCM: Invalid key length %d", keylen);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(aeadctx->key, key, keylen);
+ aeadctx->enckey_len = keylen;
+ key_ctx_size = sizeof(struct _key_ctx) +
+ ((DIV_ROUND_UP(keylen, 16)) << 4) +
+ AEAD_H_SIZE;
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
+ CHCR_KEYCTX_MAC_KEY_SIZE_128,
+ 0, 0,
+ key_ctx_size >> 4);
+ /* Calculate the H = CIPH(K, 0 repeated 16 times) using sync aes
+ * blkcipher It will go on key context
+ */
+ h_desc.tfm = crypto_alloc_blkcipher("cbc(aes-generic)", 0, 0);
+ if (IS_ERR(h_desc.tfm)) {
+ aeadctx->enckey_len = 0;
+ ret = -ENOMEM;
+ goto out;
+ }
+ h_desc.flags = 0;
+ ret = crypto_blkcipher_setkey(h_desc.tfm, key, keylen);
+ if (ret) {
+ aeadctx->enckey_len = 0;
+ goto out1;
+ }
+ memset(gctx->ghash_h, 0, AEAD_H_SIZE);
+ sg_init_one(&src[0], gctx->ghash_h, AEAD_H_SIZE);
+ ret = crypto_blkcipher_encrypt(&h_desc, &src[0], &src[0], AEAD_H_SIZE);
+
+out1:
+ crypto_free_blkcipher(h_desc.tfm);
+out:
+ return ret;
+}
+
+static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
+ unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(authenc);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+ /* it contains auth and cipher key both*/
+ struct crypto_authenc_keys keys;
+ unsigned int bs;
+ unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
+ int err = 0, i, key_ctx_len = 0;
+ unsigned char ck_size = 0;
+ unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
+ struct crypto_shash *base_hash = NULL;
+ struct algo_param param;
+ int align;
+ u8 *o_ptr = NULL;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
+ crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+ }
+
+ if (get_alg_config(&param, max_authsize)) {
+ pr_err("chcr : Unsupported digest size\n");
+ goto out;
+ }
+ if (keys.enckeylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keys.enckeylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keys.enckeylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ pr_err("chcr : Unsupported cipher key\n");
+ goto out;
+ }
+
+ /* Copy only encryption key. We use authkey to generate h(ipad) and
+ * h(opad) so authkey is not needed again. authkeylen size have the
+ * size of the hash digest size.
+ */
+ memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
+ aeadctx->enckey_len = keys.enckeylen;
+ get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+ aeadctx->enckey_len << 3);
+
+ base_hash = chcr_alloc_shash(max_authsize);
+ if (IS_ERR(base_hash)) {
+ pr_err("chcr : Base driver cannot be loaded\n");
+ goto out;
+ }
+ {
+ SHASH_DESC_ON_STACK(shash, base_hash);
+ shash->tfm = base_hash;
+ shash->flags = crypto_shash_get_flags(base_hash);
+ bs = crypto_shash_blocksize(base_hash);
+ align = KEYCTX_ALIGN_PAD(max_authsize);
+ o_ptr = actx->h_iopad + param.result_size + align;
+
+ if (keys.authkeylen > bs) {
+ err = crypto_shash_digest(shash, keys.authkey,
+ keys.authkeylen,
+ o_ptr);
+ if (err) {
+ pr_err("chcr : Base driver cannot be loaded\n");
+ goto out;
+ }
+ keys.authkeylen = max_authsize;
+ } else
+ memcpy(o_ptr, keys.authkey, keys.authkeylen);
+
+ /* Compute the ipad-digest*/
+ memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
+ memcpy(pad, o_ptr, keys.authkeylen);
+ for (i = 0; i < bs >> 2; i++)
+ *((unsigned int *)pad + i) ^= IPAD_DATA;
+
+ if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
+ max_authsize))
+ goto out;
+ /* Compute the opad-digest */
+ memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
+ memcpy(pad, o_ptr, keys.authkeylen);
+ for (i = 0; i < bs >> 2; i++)
+ *((unsigned int *)pad + i) ^= OPAD_DATA;
+
+ if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
+ goto out;
+
+ /* convert the ipad and opad digest to network order */
+ chcr_change_order(actx->h_iopad, param.result_size);
+ chcr_change_order(o_ptr, param.result_size);
+ key_ctx_len = sizeof(struct _key_ctx) +
+ ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4) +
+ (param.result_size + align) * 2;
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
+ 0, 1, key_ctx_len >> 4);
+ actx->auth_mode = param.auth_mode;
+ chcr_free_shash(base_hash);
+
+ return 0;
+ }
+out:
+ aeadctx->enckey_len = 0;
+ if (base_hash)
+ chcr_free_shash(base_hash);
+ return -EINVAL;
+}
+
+static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
+ const u8 *key, unsigned int keylen)
+{
+ struct chcr_context *ctx = crypto_aead_ctx(authenc);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+ struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+ struct crypto_authenc_keys keys;
+
+ /* it contains auth and cipher key both*/
+ int key_ctx_len = 0;
+ unsigned char ck_size = 0;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
+ crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+ }
+ if (keys.enckeylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keys.enckeylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keys.enckeylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ pr_err("chcr : Unsupported cipher key\n");
+ goto out;
+ }
+ memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
+ aeadctx->enckey_len = keys.enckeylen;
+ get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+ aeadctx->enckey_len << 3);
+ key_ctx_len = sizeof(struct _key_ctx)
+ + ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
+
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
+ 0, key_ctx_len >> 4);
+ actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
+ return 0;
+out:
+ aeadctx->enckey_len = 0;
+ return -EINVAL;
+}
+static int chcr_aead_encrypt(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+
+ reqctx->verify = VERIFY_HW;
+
+ switch (get_aead_subtype(tfm)) {
+ case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
+ case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+ return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
+ create_authenc_wr);
+ case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
+ case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
+ return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
+ create_aead_ccm_wr);
+ default:
+ return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
+ create_gcm_wr);
+ }
+}
+
+static int chcr_aead_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ int size;
+
+ if (aeadctx->mayverify == VERIFY_SW) {
+ size = crypto_aead_maxauthsize(tfm);
+ reqctx->verify = VERIFY_SW;
+ } else {
+ size = 0;
+ reqctx->verify = VERIFY_HW;
+ }
+
+ switch (get_aead_subtype(tfm)) {
+ case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
+ case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+ return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
+ create_authenc_wr);
+ case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
+ case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
+ return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
+ create_aead_ccm_wr);
+ default:
+ return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
+ create_gcm_wr);
+ }
+}
+
+static int chcr_aead_op(struct aead_request *req,
+ unsigned short op_type,
+ int size,
+ create_wr_t create_wr_fn)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_context *ctx = crypto_aead_ctx(tfm);
+ struct uld_ctx *u_ctx = ULD_CTX(ctx);
+ struct sk_buff *skb;
+
+ if (ctx && !ctx->dev) {
+ pr_err("chcr : %s : No crypto device.\n", __func__);
+ return -ENXIO;
+ }
+ if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ ctx->tx_channel_id)) {
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return -EBUSY;
+ }
+
+ /* Form a WR from req */
+ skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id], size,
+ op_type);
+
+ if (IS_ERR(skb) || skb == NULL) {
+ pr_err("chcr : %s : failed to form WR. No memory\n", __func__);
+ return PTR_ERR(skb);
+ }
+
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
+ chcr_send_wr(skb);
+ return -EINPROGRESS;
+}
static struct chcr_alg_template driver_algs[] = {
/* AES-CBC */
{
@@ -1232,7 +2487,7 @@ static struct chcr_alg_template driver_algs[] = {
.is_registered = 0,
.alg.crypto = {
.cra_name = "cbc(aes)",
- .cra_driver_name = "cbc(aes-chcr)",
+ .cra_driver_name = "cbc-aes-chcr",
.cra_priority = CHCR_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_ASYNC,
@@ -1259,7 +2514,7 @@ static struct chcr_alg_template driver_algs[] = {
.is_registered = 0,
.alg.crypto = {
.cra_name = "xts(aes)",
- .cra_driver_name = "xts(aes-chcr)",
+ .cra_driver_name = "xts-aes-chcr",
.cra_priority = CHCR_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
CRYPTO_ALG_ASYNC,
@@ -1352,7 +2607,7 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA1_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha1)",
- .cra_driver_name = "hmac(sha1-chcr)",
+ .cra_driver_name = "hmac-sha1-chcr",
.cra_blocksize = SHA1_BLOCK_SIZE,
}
}
@@ -1364,7 +2619,7 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA224_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha224)",
- .cra_driver_name = "hmac(sha224-chcr)",
+ .cra_driver_name = "hmac-sha224-chcr",
.cra_blocksize = SHA224_BLOCK_SIZE,
}
}
@@ -1376,7 +2631,7 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA256_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha256)",
- .cra_driver_name = "hmac(sha256-chcr)",
+ .cra_driver_name = "hmac-sha256-chcr",
.cra_blocksize = SHA256_BLOCK_SIZE,
}
}
@@ -1388,7 +2643,7 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA384_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha384)",
- .cra_driver_name = "hmac(sha384-chcr)",
+ .cra_driver_name = "hmac-sha384-chcr",
.cra_blocksize = SHA384_BLOCK_SIZE,
}
}
@@ -1400,11 +2655,205 @@ static struct chcr_alg_template driver_algs[] = {
.halg.digestsize = SHA512_DIGEST_SIZE,
.halg.base = {
.cra_name = "hmac(sha512)",
- .cra_driver_name = "hmac(sha512-chcr)",
+ .cra_driver_name = "hmac-sha512-chcr",
.cra_blocksize = SHA512_BLOCK_SIZE,
}
}
},
+ /* Add AEAD Algorithms */
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_gcm_ctx),
+ },
+ .ivsize = 12,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .setkey = chcr_gcm_setkey,
+ .setauthsize = chcr_gcm_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "rfc4106-gcm-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_gcm_ctx),
+
+ },
+ .ivsize = 8,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .setkey = chcr_gcm_setkey,
+ .setauthsize = chcr_4106_4309_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "ccm-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .setkey = chcr_aead_ccm_setkey,
+ .setauthsize = chcr_ccm_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "rfc4309(ccm(aes))",
+ .cra_driver_name = "rfc4309-ccm-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx),
+
+ },
+ .ivsize = 8,
+ .maxauthsize = GHASH_DIGEST_SIZE,
+ .setkey = chcr_aead_rfc4309_setkey,
+ .setauthsize = chcr_4106_4309_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha1-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha256-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha224-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha384-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name =
+ "authenc-hmac-sha512-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(digest_null,cbc(aes))",
+ .cra_driver_name =
+ "authenc-digest_null-cbc-aes-chcr",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = 0,
+ .setkey = chcr_aead_digest_null_setkey,
+ .setauthsize = chcr_authenc_null_setauthsize,
+ }
+ },
};
/*
@@ -1422,6 +2871,11 @@ static int chcr_unregister_alg(void)
crypto_unregister_alg(
&driver_algs[i].alg.crypto);
break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ if (driver_algs[i].is_registered)
+ crypto_unregister_aead(
+ &driver_algs[i].alg.aead);
+ break;
case CRYPTO_ALG_TYPE_AHASH:
if (driver_algs[i].is_registered)
crypto_unregister_ahash(
@@ -1456,6 +2910,19 @@ static int chcr_register_alg(void)
err = crypto_register_alg(&driver_algs[i].alg.crypto);
name = driver_algs[i].alg.crypto.cra_driver_name;
break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ driver_algs[i].alg.aead.base.cra_priority =
+ CHCR_CRA_PRIORITY;
+ driver_algs[i].alg.aead.base.cra_flags =
+ CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
+ driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
+ driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
+ driver_algs[i].alg.aead.init = chcr_aead_cra_init;
+ driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
+ driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
+ err = crypto_register_aead(&driver_algs[i].alg.aead);
+ name = driver_algs[i].alg.aead.base.cra_driver_name;
+ break;
case CRYPTO_ALG_TYPE_AHASH:
a_hash = &driver_algs[i].alg.hash;
a_hash->update = chcr_ahash_update;