summaryrefslogtreecommitdiff
path: root/drivers/crypto/caam
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-06-23 07:04:48 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-23 07:04:48 +0300
commit44d21c3f3a2ef2f58b18bda64c52c99e723f3f4a (patch)
tree5146cf96cb0dbd7121176d484417ab942c92dcd4 /drivers/crypto/caam
parentefdfce2b7ff3205ba0fba10270b92b80bbc6187d (diff)
parentfe55dfdcdfabf160ab0c14617725c57c7a1facfc (diff)
downloadlinux-44d21c3f3a2ef2f58b18bda64c52c99e723f3f4a.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: "Here is the crypto update for 4.2: API: - Convert RNG interface to new style. - New AEAD interface with one SG list for AD and plain/cipher text. All external AEAD users have been converted. - New asymmetric key interface (akcipher). Algorithms: - Chacha20, Poly1305 and RFC7539 support. - New RSA implementation. - Jitter RNG. - DRBG is now seeded with both /dev/random and Jitter RNG. If kernel pool isn't ready then DRBG will be reseeded when it is. - DRBG is now the default crypto API RNG, replacing krng. - 842 compression (previously part of powerpc nx driver). Drivers: - Accelerated SHA-512 for arm64. - New Marvell CESA driver that supports DMA and more algorithms. - Updated powerpc nx 842 support. - Added support for SEC1 hardware to talitos" * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (292 commits) crypto: marvell/cesa - remove COMPILE_TEST dependency crypto: algif_aead - Temporarily disable all AEAD algorithms crypto: af_alg - Forbid the use internal algorithms crypto: echainiv - Only hold RNG during initialisation crypto: seqiv - Add compatibility support without RNG crypto: eseqiv - Offer normal cipher functionality without RNG crypto: chainiv - Offer normal cipher functionality without RNG crypto: user - Add CRYPTO_MSG_DELRNG crypto: user - Move cryptouser.h to uapi crypto: rng - Do not free default RNG when it becomes unused crypto: skcipher - Allow givencrypt to be NULL crypto: sahara - propagate the error on clk_disable_unprepare() failure crypto: rsa - fix invalid select for AKCIPHER crypto: picoxcell - Update to the current clk API crypto: nx - Check for bogus firmware properties crypto: marvell/cesa - add DT bindings documentation crypto: marvell/cesa - add support for Kirkwood and Dove SoCs crypto: marvell/cesa - add support for Orion SoCs crypto: marvell/cesa - add allhwsupport module parameter crypto: marvell/cesa - add support for all armada SoCs ...
Diffstat (limited to 'drivers/crypto/caam')
-rw-r--r--drivers/crypto/caam/Kconfig5
-rw-r--r--drivers/crypto/caam/caamalg.c1371
-rw-r--r--drivers/crypto/caam/caamhash.c9
-rw-r--r--drivers/crypto/caam/compat.h2
-rw-r--r--drivers/crypto/caam/ctrl.c4
-rw-r--r--drivers/crypto/caam/regs.h38
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h50
7 files changed, 773 insertions, 706 deletions
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index e7555ff4cafd..e286e285aa8a 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -45,7 +45,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
config CRYPTO_DEV_FSL_CAAM_INTC
bool "Job Ring interrupt coalescing"
depends on CRYPTO_DEV_FSL_CAAM_JR
- default n
help
Enable the Job Ring's interrupt coalescing feature.
@@ -77,8 +76,9 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
tristate "Register algorithm implementations with the Crypto API"
depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
default y
- select CRYPTO_ALGAPI
+ select CRYPTO_AEAD
select CRYPTO_AUTHENC
+ select CRYPTO_BLKCIPHER
help
Selecting this will offload crypto for users of the
scatterlist crypto API (such as the linux native IPSec
@@ -115,7 +115,6 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
config CRYPTO_DEV_FSL_CAAM_DEBUG
bool "Enable debug output in CAAM driver"
depends on CRYPTO_DEV_FSL_CAAM
- default n
help
Selecting this will enable printing of various debug
information in the CAAM driver.
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 29071a156cbe..daca933a82ec 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -65,6 +65,10 @@
/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
#define CAAM_MAX_IV_LENGTH 16
+#define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
+#define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
+ CAAM_CMD_SZ * 4)
+
/* length of descriptors text */
#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
@@ -79,18 +83,16 @@
#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
-#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 23 * CAAM_CMD_SZ)
-#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 19 * CAAM_CMD_SZ)
+#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
+#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
-#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 15 * CAAM_CMD_SZ)
-#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 14 * CAAM_CMD_SZ)
-#define DESC_RFC4106_GIVENC_LEN (DESC_RFC4106_BASE + 21 * CAAM_CMD_SZ)
+#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 10 * CAAM_CMD_SZ)
+#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 10 * CAAM_CMD_SZ)
#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
-#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 25 * CAAM_CMD_SZ)
-#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 27 * CAAM_CMD_SZ)
-#define DESC_RFC4543_GIVENC_LEN (DESC_RFC4543_BASE + 30 * CAAM_CMD_SZ)
+#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
@@ -98,8 +100,7 @@
#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
15 * CAAM_CMD_SZ)
-#define DESC_MAX_USED_BYTES (DESC_RFC4543_GIVENC_LEN + \
- CAAM_MAX_KEY_SIZE)
+#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
#ifdef DEBUG
@@ -258,7 +259,7 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
static int aead_null_set_sh_desc(struct crypto_aead *aead)
{
- struct aead_tfm *tfm = &aead->base.crt_aead;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
bool keys_fit_inline = false;
@@ -273,7 +274,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
keys_fit_inline = true;
- /* aead_encrypt shared descriptor */
+ /* old_aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
init_sh_desc(desc, HDR_SHARE_SERIAL);
@@ -362,7 +363,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_dec;
- /* aead_decrypt shared descriptor */
+ /* old_aead_decrypt shared descriptor */
init_sh_desc(desc, HDR_SHARE_SERIAL);
/* Skip if already shared */
@@ -383,7 +384,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
/* assoclen + cryptlen = seqinlen - ivsize - authsize */
append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
- ctx->authsize + tfm->ivsize);
+ ctx->authsize + ivsize);
/* assoclen = (assoclen + cryptlen) - cryptlen */
append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
@@ -449,7 +450,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
static int aead_set_sh_desc(struct crypto_aead *aead)
{
- struct aead_tfm *tfm = &aead->base.crt_aead;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_tfm *ctfm = crypto_aead_tfm(aead);
const char *alg_name = crypto_tfm_alg_name(ctfm);
@@ -496,7 +497,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
CAAM_DESC_BYTES_MAX)
keys_fit_inline = true;
- /* aead_encrypt shared descriptor */
+ /* old_aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
/* Note: Context registers are saved. */
@@ -510,7 +511,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
/* assoclen + cryptlen = seqinlen - ivsize */
- append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
+ append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, ivsize);
/* assoclen = (assoclen + cryptlen) - cryptlen */
append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
@@ -518,7 +519,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* read assoc before reading payload */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
KEY_VLF);
- aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off);
+ aead_append_ld_iv(desc, ivsize, ctx1_iv_off);
/* Load Counter into CONTEXT1 reg */
if (is_rfc3686)
@@ -565,7 +566,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
CAAM_DESC_BYTES_MAX)
keys_fit_inline = true;
- /* aead_decrypt shared descriptor */
+ /* old_aead_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
/* Note: Context registers are saved. */
@@ -577,7 +578,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* assoclen + cryptlen = seqinlen - ivsize - authsize */
append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
- ctx->authsize + tfm->ivsize);
+ ctx->authsize + ivsize);
/* assoclen = (assoclen + cryptlen) - cryptlen */
append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
@@ -586,7 +587,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
KEY_VLF);
- aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off);
+ aead_append_ld_iv(desc, ivsize, ctx1_iv_off);
/* Load Counter into CONTEXT1 reg */
if (is_rfc3686)
@@ -645,20 +646,20 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* Generate IV */
geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
- NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
+ NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
append_move(desc, MOVE_WAITCOMP |
MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
(ctx1_iv_off << MOVE_OFFSET_SHIFT) |
- (tfm->ivsize << MOVE_LEN_SHIFT));
+ (ivsize << MOVE_LEN_SHIFT));
append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
/* Copy IV to class 1 context */
append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
(ctx1_iv_off << MOVE_OFFSET_SHIFT) |
- (tfm->ivsize << MOVE_LEN_SHIFT));
+ (ivsize << MOVE_LEN_SHIFT));
/* Return to encryption */
append_operation(desc, ctx->class2_alg_type |
@@ -676,10 +677,10 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* Copy iv from outfifo to class 2 fifo */
moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
- NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
+ NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
- append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
+ append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
/* Load Counter into CONTEXT1 reg */
@@ -698,7 +699,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
/* Not need to reload iv */
- append_seq_fifo_load(desc, tfm->ivsize,
+ append_seq_fifo_load(desc, ivsize,
FIFOLD_CLASS_SKIP);
/* Will read cryptlen */
@@ -738,7 +739,6 @@ static int aead_setauthsize(struct crypto_aead *authenc,
static int gcm_set_sh_desc(struct crypto_aead *aead)
{
- struct aead_tfm *tfm = &aead->base.crt_aead;
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
bool keys_fit_inline = false;
@@ -754,7 +754,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_GCM_ENC_LEN + DESC_JOB_IO_LEN +
+ if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
keys_fit_inline = true;
@@ -777,34 +777,34 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
append_operation(desc, ctx->class1_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
- /* cryptlen = seqoutlen - authsize */
- append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+ /* if assoclen + cryptlen is ZERO, skip to ICV write */
+ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
- /* assoclen + cryptlen = seqinlen - ivsize */
- append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
+ /* if assoclen is ZERO, skip reading the assoc data */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
- /* assoclen = (assoclen + cryptlen) - cryptlen */
- append_math_sub(desc, REG1, REG2, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /* skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+ /* cryptlen = seqinlen - assoclen */
+ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
/* if cryptlen is ZERO jump to zero-payload commands */
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
JUMP_COND_MATH_Z);
- /* read IV */
- append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
- FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
-
- /* if assoclen is ZERO, skip reading the assoc data */
- append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
- zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
/* read assoc data */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
/* write encrypted data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
@@ -814,31 +814,17 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
/* jump the zero-payload commands */
- append_jump(desc, JUMP_TEST_ALL | 7);
+ append_jump(desc, JUMP_TEST_ALL | 2);
/* zero-payload commands */
set_jump_tgt_here(desc, zero_payload_jump_cmd);
- /* if assoclen is ZERO, jump to IV reading - is the only input data */
- append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
- zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
- /* read IV */
- append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
- FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
-
/* read assoc data */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
- /* jump to ICV writing */
- append_jump(desc, JUMP_TEST_ALL | 2);
-
- /* read IV - is the only input data */
+ /* There is no input data */
set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
- append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
- FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
- FIFOLD_TYPE_LAST1);
/* write ICV */
append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
@@ -862,7 +848,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
* must all fit into the 64-word Descriptor h/w Buffer
*/
keys_fit_inline = false;
- if (DESC_GCM_DEC_LEN + DESC_JOB_IO_LEN +
+ if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
keys_fit_inline = true;
@@ -886,33 +872,30 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
append_operation(desc, ctx->class1_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
- /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
- append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
- ctx->authsize + tfm->ivsize);
-
- /* assoclen = (assoclen + cryptlen) - cryptlen */
- append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
- append_math_sub(desc, REG1, REG3, REG2, CAAM_CMD_SZ);
+ /* if assoclen is ZERO, skip reading the assoc data */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
- /* read IV */
- append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
- FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
- /* jump to zero-payload command if cryptlen is zero */
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
- zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
+ /* skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
- append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
- /* if asoclen is ZERO, skip reading assoc data */
- zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
/* read assoc data */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
- append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+ /* cryptlen = seqoutlen - assoclen */
+ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+ /* jump to zero-payload command if cryptlen is zero */
+ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+ JUMP_COND_MATH_Z);
+
+ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
/* store encrypted data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
@@ -921,21 +904,9 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
- /* jump the zero-payload commands */
- append_jump(desc, JUMP_TEST_ALL | 4);
-
/* zero-payload command */
set_jump_tgt_here(desc, zero_payload_jump_cmd);
- /* if assoclen is ZERO, jump to ICV reading */
- append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
- zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
- JUMP_COND_MATH_Z);
- /* read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
- set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
-
/* read ICV */
append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
@@ -968,13 +939,11 @@ static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
static int rfc4106_set_sh_desc(struct crypto_aead *aead)
{
- struct aead_tfm *tfm = &aead->base.crt_aead;
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
bool keys_fit_inline = false;
- u32 *key_jump_cmd, *move_cmd, *write_iv_cmd;
+ u32 *key_jump_cmd;
u32 *desc;
- u32 geniv;
if (!ctx->enckeylen || !ctx->authsize)
return 0;
@@ -984,7 +953,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_RFC4106_ENC_LEN + DESC_JOB_IO_LEN +
+ if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
keys_fit_inline = true;
@@ -1007,29 +976,21 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
append_operation(desc, ctx->class1_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
- /* cryptlen = seqoutlen - authsize */
- append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
- /* assoclen + cryptlen = seqinlen - ivsize */
- append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
-
- /* assoclen = (assoclen + cryptlen) - cryptlen */
- append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
-
- /* Read Salt */
- append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen),
- 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV);
- /* Read AES-GCM-ESP IV */
- append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
- FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
/* Read assoc data */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+ /* cryptlen = seqoutlen - assoclen */
+ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
/* Will read cryptlen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
/* Write encrypted data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
@@ -1083,30 +1044,21 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
append_operation(desc, ctx->class1_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
- /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
- append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
- ctx->authsize + tfm->ivsize);
-
- /* assoclen = (assoclen + cryptlen) - cryptlen */
- append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
- append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
-
- /* Will write cryptlen bytes */
- append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
- /* Read Salt */
- append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen),
- 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV);
- /* Read AES-GCM-ESP IV */
- append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
- FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
/* Read assoc data */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+ /* Will write cryptlen bytes */
+ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
/* Will read cryptlen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
/* Store payload data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
@@ -1132,107 +1084,6 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
desc_bytes(desc), 1);
#endif
- /*
- * Job Descriptor and Shared Descriptors
- * must all fit into the 64-word Descriptor h/w Buffer
- */
- keys_fit_inline = false;
- if (DESC_RFC4106_GIVENC_LEN + DESC_JOB_IO_LEN +
- ctx->split_key_pad_len + ctx->enckeylen <=
- CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
-
- /* rfc4106_givencrypt shared descriptor */
- desc = ctx->sh_desc_givenc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip key loading if it is loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Generate IV */
- geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
- NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
- NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
- append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- move_cmd = append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_DESCBUF |
- (tfm->ivsize << MOVE_LEN_SHIFT));
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
- /* Copy generated IV to OFIFO */
- write_iv_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_OUTFIFO |
- (tfm->ivsize << MOVE_LEN_SHIFT));
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* ivsize + cryptlen = seqoutlen - authsize */
- append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
-
- /* assoclen = seqinlen - (ivsize + cryptlen) */
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
-
- /* Will write ivsize + cryptlen */
- append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ);
-
- /* Read Salt and generated IV */
- append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV |
- FIFOLD_TYPE_FLUSH1 | IMMEDIATE | 12);
- /* Append Salt */
- append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
- set_move_tgt_here(desc, move_cmd);
- set_move_tgt_here(desc, write_iv_cmd);
- /* Blank commands. Will be overwritten by generated IV. */
- append_cmd(desc, 0x00000000);
- append_cmd(desc, 0x00000000);
- /* End of blank commands */
-
- /* No need to reload iv */
- append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP);
-
- /* Read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
-
- /* Will read cryptlen */
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Store generated IV and encrypted data */
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
-
- /* Read payload data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
-
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
- ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
- desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "rfc4106 givenc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
-
return 0;
}
@@ -1249,14 +1100,12 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc,
static int rfc4543_set_sh_desc(struct crypto_aead *aead)
{
- struct aead_tfm *tfm = &aead->base.crt_aead;
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
bool keys_fit_inline = false;
- u32 *key_jump_cmd, *write_iv_cmd, *write_aad_cmd;
+ u32 *key_jump_cmd;
u32 *read_move_cmd, *write_move_cmd;
u32 *desc;
- u32 geniv;
if (!ctx->enckeylen || !ctx->authsize)
return 0;
@@ -1266,7 +1115,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
- if (DESC_RFC4543_ENC_LEN + DESC_JOB_IO_LEN +
+ if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
keys_fit_inline = true;
@@ -1289,48 +1138,8 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
append_operation(desc, ctx->class1_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
- /* Load AES-GMAC ESP IV into Math1 register */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 |
- LDST_CLASS_DECO | tfm->ivsize);
-
- /* Wait the DMA transaction to finish */
- append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM |
- (1 << JUMP_OFFSET_SHIFT));
-
- /* Overwrite blank immediate AES-GMAC ESP IV data */
- write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
- (tfm->ivsize << MOVE_LEN_SHIFT));
-
- /* Overwrite blank immediate AAD data */
- write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
- (tfm->ivsize << MOVE_LEN_SHIFT));
-
- /* cryptlen = seqoutlen - authsize */
- append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
-
- /* assoclen = (seqinlen - ivsize) - cryptlen */
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
-
- /* Read Salt and AES-GMAC ESP IV */
- append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
- FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
- /* Append Salt */
- append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
- set_move_tgt_here(desc, write_iv_cmd);
- /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
- append_cmd(desc, 0x00000000);
- append_cmd(desc, 0x00000000);
- /* End of blank commands */
-
- /* Read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD);
-
- /* Will read cryptlen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-
- /* Will write cryptlen bytes */
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ /* assoclen + cryptlen = seqinlen */
+ append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
/*
* MOVE_LEN opcode is not available in all SEC HW revisions,
@@ -1342,16 +1151,13 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
(0x8 << MOVE_LEN_SHIFT));
- /* Authenticate AES-GMAC ESP IV */
- append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
- FIFOLD_TYPE_AAD | tfm->ivsize);
- set_move_tgt_here(desc, write_aad_cmd);
- /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
- append_cmd(desc, 0x00000000);
- append_cmd(desc, 0x00000000);
- /* End of blank commands */
+ /* Will read assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- /* Read and write cryptlen bytes */
+ /* Will write assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Read and write assoclen + cryptlen bytes */
aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
set_move_tgt_here(desc, read_move_cmd);
@@ -1382,7 +1188,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
* must all fit into the 64-word Descriptor h/w Buffer
*/
keys_fit_inline = false;
- if (DESC_RFC4543_DEC_LEN + DESC_JOB_IO_LEN +
+ if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
keys_fit_inline = true;
@@ -1405,28 +1211,8 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
append_operation(desc, ctx->class1_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
- /* Load AES-GMAC ESP IV into Math1 register */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 |
- LDST_CLASS_DECO | tfm->ivsize);
-
- /* Wait the DMA transaction to finish */
- append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM |
- (1 << JUMP_OFFSET_SHIFT));
-
- /* assoclen + cryptlen = (seqinlen - ivsize) - icvsize */
- append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, ctx->authsize);
-
- /* Overwrite blank immediate AES-GMAC ESP IV data */
- write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
- (tfm->ivsize << MOVE_LEN_SHIFT));
-
- /* Overwrite blank immediate AAD data */
- write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
- (tfm->ivsize << MOVE_LEN_SHIFT));
-
- /* assoclen = (assoclen + cryptlen) - cryptlen */
- append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
- append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
+ /* assoclen + cryptlen = seqoutlen */
+ append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
/*
* MOVE_LEN opcode is not available in all SEC HW revisions,
@@ -1438,40 +1224,16 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
(0x8 << MOVE_LEN_SHIFT));
- /* Read Salt and AES-GMAC ESP IV */
- append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
- FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
- /* Append Salt */
- append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
- set_move_tgt_here(desc, write_iv_cmd);
- /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
- append_cmd(desc, 0x00000000);
- append_cmd(desc, 0x00000000);
- /* End of blank commands */
-
- /* Read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD);
-
- /* Will read cryptlen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+ /* Will read assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
- /* Will write cryptlen bytes */
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
-
- /* Authenticate AES-GMAC ESP IV */
- append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
- FIFOLD_TYPE_AAD | tfm->ivsize);
- set_move_tgt_here(desc, write_aad_cmd);
- /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
- append_cmd(desc, 0x00000000);
- append_cmd(desc, 0x00000000);
- /* End of blank commands */
+ /* Will write assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
/* Store payload data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
- /* In-snoop cryptlen data */
+ /* In-snoop assoclen + cryptlen data */
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
@@ -1499,135 +1261,6 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
desc_bytes(desc), 1);
#endif
- /*
- * Job Descriptor and Shared Descriptors
- * must all fit into the 64-word Descriptor h/w Buffer
- */
- keys_fit_inline = false;
- if (DESC_RFC4543_GIVENC_LEN + DESC_JOB_IO_LEN +
- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = true;
-
- /* rfc4543_givencrypt shared descriptor */
- desc = ctx->sh_desc_givenc;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip key loading if it is loaded due to sharing */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
- if (keys_fit_inline)
- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- else
- append_key(desc, ctx->key_dma, ctx->enckeylen,
- CLASS_1 | KEY_DEST_CLASS_REG);
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Generate IV */
- geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
- NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
- NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
- append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- /* Move generated IV to Math1 register */
- append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_MATH1 |
- (tfm->ivsize << MOVE_LEN_SHIFT));
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
- /* Overwrite blank immediate AES-GMAC IV data */
- write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
- (tfm->ivsize << MOVE_LEN_SHIFT));
-
- /* Overwrite blank immediate AAD data */
- write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
- (tfm->ivsize << MOVE_LEN_SHIFT));
-
- /* Copy generated IV to OFIFO */
- append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_OUTFIFO |
- (tfm->ivsize << MOVE_LEN_SHIFT));
-
- /* Class 1 operation */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
-
- /* ivsize + cryptlen = seqoutlen - authsize */
- append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
-
- /* assoclen = seqinlen - (ivsize + cryptlen) */
- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
-
- /* Will write ivsize + cryptlen */
- append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ);
-
- /*
- * MOVE_LEN opcode is not available in all SEC HW revisions,
- * thus need to do some magic, i.e. self-patch the descriptor
- * buffer.
- */
- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
- (0x6 << MOVE_LEN_SHIFT));
- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
- (0x8 << MOVE_LEN_SHIFT));
-
- /* Read Salt and AES-GMAC generated IV */
- append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
- FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
- /* Append Salt */
- append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
- set_move_tgt_here(desc, write_iv_cmd);
- /* Blank commands. Will be overwritten by AES-GMAC generated IV. */
- append_cmd(desc, 0x00000000);
- append_cmd(desc, 0x00000000);
- /* End of blank commands */
-
- /* No need to reload iv */
- append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP);
-
- /* Read assoc data */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
- FIFOLD_TYPE_AAD);
-
- /* Will read cryptlen */
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-
- /* Authenticate AES-GMAC IV */
- append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
- FIFOLD_TYPE_AAD | tfm->ivsize);
- set_move_tgt_here(desc, write_aad_cmd);
- /* Blank commands. Will be overwritten by AES-GMAC IV. */
- append_cmd(desc, 0x00000000);
- append_cmd(desc, 0x00000000);
- /* End of blank commands */
-
- /* Read and write cryptlen bytes */
- aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
-
- set_move_tgt_here(desc, read_move_cmd);
- set_move_tgt_here(desc, write_move_cmd);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- /* Move payload data to OFIFO */
- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
-
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
- ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
- desc_bytes(desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "rfc4543 givenc shdesc@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
-
return 0;
}
@@ -2100,7 +1733,7 @@ struct aead_edesc {
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
- u32 hw_desc[0];
+ u32 hw_desc[];
};
/*
@@ -2154,6 +1787,16 @@ static void aead_unmap(struct device *dev,
struct aead_edesc *edesc,
struct aead_request *req)
{
+ caam_unmap(dev, req->src, req->dst,
+ edesc->src_nents, edesc->src_chained, edesc->dst_nents,
+ edesc->dst_chained, 0, 0,
+ edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
+}
+
+static void old_aead_unmap(struct device *dev,
+ struct aead_edesc *edesc,
+ struct aead_request *req)
+{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
int ivsize = crypto_aead_ivsize(aead);
@@ -2184,6 +1827,28 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
{
struct aead_request *req = context;
struct aead_edesc *edesc;
+
+#ifdef DEBUG
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
+
+ if (err)
+ caam_jr_strstatus(jrdev, err);
+
+ aead_unmap(jrdev, edesc, req);
+
+ kfree(edesc);
+
+ aead_request_complete(req, err);
+}
+
+static void old_aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct aead_request *req = context;
+ struct aead_edesc *edesc;
#ifdef DEBUG
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
@@ -2198,7 +1863,7 @@ static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
if (err)
caam_jr_strstatus(jrdev, err);
- aead_unmap(jrdev, edesc, req);
+ old_aead_unmap(jrdev, edesc, req);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
@@ -2223,6 +1888,34 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
{
struct aead_request *req = context;
struct aead_edesc *edesc;
+
+#ifdef DEBUG
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
+
+ if (err)
+ caam_jr_strstatus(jrdev, err);
+
+ aead_unmap(jrdev, edesc, req);
+
+ /*
+ * verify hw auth check passed else return -EBADMSG
+ */
+ if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
+ err = -EBADMSG;
+
+ kfree(edesc);
+
+ aead_request_complete(req, err);
+}
+
+static void old_aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct aead_request *req = context;
+ struct aead_edesc *edesc;
#ifdef DEBUG
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
@@ -2246,7 +1939,7 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
if (err)
caam_jr_strstatus(jrdev, err);
- aead_unmap(jrdev, edesc, req);
+ old_aead_unmap(jrdev, edesc, req);
/*
* verify hw auth check passed else return -EBADMSG
@@ -2342,10 +2035,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
/*
* Fill in aead job descriptor
*/
-static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
- struct aead_edesc *edesc,
- struct aead_request *req,
- bool all_contig, bool encrypt)
+static void old_init_aead_job(u32 *sh_desc, dma_addr_t ptr,
+ struct aead_edesc *edesc,
+ struct aead_request *req,
+ bool all_contig, bool encrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
@@ -2425,6 +2118,97 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
}
/*
+ * Fill in aead job descriptor
+ */
+static void init_aead_job(struct aead_request *req,
+ struct aead_edesc *edesc,
+ bool all_contig, bool encrypt)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int authsize = ctx->authsize;
+ u32 *desc = edesc->hw_desc;
+ u32 out_options, in_options;
+ dma_addr_t dst_dma, src_dma;
+ int len, sec4_sg_index = 0;
+ dma_addr_t ptr;
+ u32 *sh_desc;
+
+ sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
+ ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
+
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ if (all_contig) {
+ src_dma = sg_dma_address(req->src);
+ in_options = 0;
+ } else {
+ src_dma = edesc->sec4_sg_dma;
+ sec4_sg_index += edesc->src_nents;
+ in_options = LDST_SGF;
+ }
+
+ append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
+ in_options);
+
+ dst_dma = src_dma;
+ out_options = in_options;
+
+ if (unlikely(req->src != req->dst)) {
+ if (!edesc->dst_nents) {
+ dst_dma = sg_dma_address(req->dst);
+ } else {
+ dst_dma = edesc->sec4_sg_dma +
+ sec4_sg_index *
+ sizeof(struct sec4_sg_entry);
+ out_options = LDST_SGF;
+ }
+ }
+
+ if (encrypt)
+ append_seq_out_ptr(desc, dst_dma,
+ req->assoclen + req->cryptlen + authsize,
+ out_options);
+ else
+ append_seq_out_ptr(desc, dst_dma,
+ req->assoclen + req->cryptlen - authsize,
+ out_options);
+
+ /* REG3 = assoclen */
+ append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
+}
+
+static void init_gcm_job(struct aead_request *req,
+ struct aead_edesc *edesc,
+ bool all_contig, bool encrypt)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ u32 *desc = edesc->hw_desc;
+ bool generic_gcm = (ivsize == 12);
+ unsigned int last;
+
+ init_aead_job(req, edesc, all_contig, encrypt);
+
+ /* BUG This should not be specific to generic GCM. */
+ last = 0;
+ if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
+ last = FIFOLD_TYPE_LAST1;
+
+ /* Read GCM IV */
+ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+ FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
+ /* Append Salt */
+ if (!generic_gcm)
+ append_data(desc, ctx->key + ctx->enckeylen, 4);
+ /* Append IV */
+ append_data(desc, req->iv, ivsize);
+ /* End of blank commands */
+}
+
+/*
* Fill in aead givencrypt job descriptor
*/
static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
@@ -2608,9 +2392,10 @@ static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
/*
* allocate and map the aead extended descriptor
*/
-static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
- int desc_bytes, bool *all_contig_ptr,
- bool encrypt)
+static struct aead_edesc *old_aead_edesc_alloc(struct aead_request *req,
+ int desc_bytes,
+ bool *all_contig_ptr,
+ bool encrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
@@ -2713,10 +2498,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
sec4_sg_index = 0;
if (!all_contig) {
if (!is_gcm) {
- sg_to_sec4_sg(req->assoc,
- assoc_nents,
- edesc->sec4_sg +
- sec4_sg_index, 0);
+ sg_to_sec4_sg_len(req->assoc, req->assoclen,
+ edesc->sec4_sg + sec4_sg_index);
sec4_sg_index += assoc_nents;
}
@@ -2725,10 +2508,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
sec4_sg_index += 1;
if (is_gcm) {
- sg_to_sec4_sg(req->assoc,
- assoc_nents,
- edesc->sec4_sg +
- sec4_sg_index, 0);
+ sg_to_sec4_sg_len(req->assoc, req->assoclen,
+ edesc->sec4_sg + sec4_sg_index);
sec4_sg_index += assoc_nents;
}
@@ -2752,7 +2533,124 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
return edesc;
}
-static int aead_encrypt(struct aead_request *req)
+/*
+ * allocate and map the aead extended descriptor
+ */
+static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+ int desc_bytes, bool *all_contig_ptr,
+ bool encrypt)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, dst_nents = 0;
+ struct aead_edesc *edesc;
+ int sgc;
+ bool all_contig = true;
+ bool src_chained = false, dst_chained = false;
+ int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
+ unsigned int authsize = ctx->authsize;
+
+ if (unlikely(req->dst != req->src)) {
+ src_nents = sg_count(req->src, req->assoclen + req->cryptlen,
+ &src_chained);
+ dst_nents = sg_count(req->dst,
+ req->assoclen + req->cryptlen +
+ (encrypt ? authsize : (-authsize)),
+ &dst_chained);
+ } else {
+ src_nents = sg_count(req->src,
+ req->assoclen + req->cryptlen +
+ (encrypt ? authsize : 0),
+ &src_chained);
+ }
+
+ /* Check if data are contiguous. */
+ all_contig = !src_nents;
+ if (!all_contig) {
+ src_nents = src_nents ? : 1;
+ sec4_sg_len = src_nents;
+ }
+
+ sec4_sg_len += dst_nents;
+
+ sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kzalloc(sizeof(struct aead_edesc) + desc_bytes +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (likely(req->src == req->dst)) {
+ sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL, src_chained);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map source\n");
+ kfree(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE, src_chained);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map source\n");
+ kfree(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
+ DMA_FROM_DEVICE, dst_chained);
+ if (unlikely(!sgc)) {
+ dev_err(jrdev, "unable to map destination\n");
+ dma_unmap_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE, src_chained);
+ kfree(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->src_chained = src_chained;
+ edesc->dst_nents = dst_nents;
+ edesc->dst_chained = dst_chained;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
+ desc_bytes;
+ *all_contig_ptr = all_contig;
+
+ sec4_sg_index = 0;
+ if (!all_contig) {
+ sg_to_sec4_sg_last(req->src, src_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
+ sec4_sg_index += src_nents;
+ }
+ if (dst_nents) {
+ sg_to_sec4_sg_last(req->dst, dst_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
+ }
+
+ if (!sec4_sg_bytes)
+ return edesc;
+
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+ dev_err(jrdev, "unable to map S/G table\n");
+ aead_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+
+ return edesc;
+}
+
+static int gcm_encrypt(struct aead_request *req)
{
struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
@@ -2763,14 +2661,12 @@ static int aead_encrypt(struct aead_request *req)
int ret = 0;
/* allocate extended descriptor */
- edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
- CAAM_CMD_SZ, &all_contig, true);
+ edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/* Create and submit job descriptor */
- init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
- all_contig, true);
+ init_gcm_job(req, edesc, all_contig, true);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
@@ -2789,7 +2685,7 @@ static int aead_encrypt(struct aead_request *req)
return ret;
}
-static int aead_decrypt(struct aead_request *req)
+static int old_aead_encrypt(struct aead_request *req)
{
struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
@@ -2800,8 +2696,80 @@ static int aead_decrypt(struct aead_request *req)
int ret = 0;
/* allocate extended descriptor */
- edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
- CAAM_CMD_SZ, &all_contig, false);
+ edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &all_contig, true);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ /* Create and submit job descriptor */
+ old_init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
+ all_contig, true);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
+
+ desc = edesc->hw_desc;
+ ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ old_aead_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+static int gcm_decrypt(struct aead_request *req)
+{
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool all_contig;
+ u32 *desc;
+ int ret = 0;
+
+ /* allocate extended descriptor */
+ edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ /* Create and submit job descriptor*/
+ init_gcm_job(req, edesc, all_contig, false);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
+
+ desc = edesc->hw_desc;
+ ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ aead_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+static int old_aead_decrypt(struct aead_request *req)
+{
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool all_contig;
+ u32 *desc;
+ int ret = 0;
+
+ /* allocate extended descriptor */
+ edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &all_contig, false);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -2812,8 +2780,8 @@ static int aead_decrypt(struct aead_request *req)
#endif
/* Create and submit job descriptor*/
- init_aead_job(ctx->sh_desc_dec,
- ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
+ old_init_aead_job(ctx->sh_desc_dec,
+ ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
@@ -2821,11 +2789,11 @@ static int aead_decrypt(struct aead_request *req)
#endif
desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
+ ret = caam_jr_enqueue(jrdev, desc, old_aead_decrypt_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
- aead_unmap(jrdev, edesc, req);
+ old_aead_unmap(jrdev, edesc, req);
kfree(edesc);
}
@@ -2953,8 +2921,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
sec4_sg_index = 0;
if (!(contig & GIV_SRC_CONTIG)) {
if (!is_gcm) {
- sg_to_sec4_sg(req->assoc, assoc_nents,
- edesc->sec4_sg + sec4_sg_index, 0);
+ sg_to_sec4_sg_len(req->assoc, req->assoclen,
+ edesc->sec4_sg + sec4_sg_index);
sec4_sg_index += assoc_nents;
}
@@ -2963,8 +2931,8 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
sec4_sg_index += 1;
if (is_gcm) {
- sg_to_sec4_sg(req->assoc, assoc_nents,
- edesc->sec4_sg + sec4_sg_index, 0);
+ sg_to_sec4_sg_len(req->assoc, req->assoclen,
+ edesc->sec4_sg + sec4_sg_index);
sec4_sg_index += assoc_nents;
}
@@ -2999,7 +2967,7 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
return edesc;
}
-static int aead_givencrypt(struct aead_givcrypt_request *areq)
+static int old_aead_givencrypt(struct aead_givcrypt_request *areq)
{
struct aead_request *req = &areq->areq;
struct aead_edesc *edesc;
@@ -3033,11 +3001,11 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)
#endif
desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
+ ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
- aead_unmap(jrdev, edesc, req);
+ old_aead_unmap(jrdev, edesc, req);
kfree(edesc);
}
@@ -3046,7 +3014,7 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)
static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
{
- return aead_encrypt(&areq->areq);
+ return old_aead_encrypt(&areq->areq);
}
/*
@@ -3379,11 +3347,7 @@ struct caam_alg_template {
u32 type;
union {
struct ablkcipher_alg ablkcipher;
- struct aead_alg aead;
- struct blkcipher_alg blkcipher;
- struct cipher_alg cipher;
- struct compress_alg compress;
- struct rng_alg rng;
+ struct old_aead_alg aead;
} template_u;
u32 class1_alg_type;
u32 class2_alg_type;
@@ -3400,8 +3364,8 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
.givencrypt = aead_null_givencrypt,
.geniv = "<built-in>",
.ivsize = NULL_IV_SIZE,
@@ -3419,8 +3383,8 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
.givencrypt = aead_null_givencrypt,
.geniv = "<built-in>",
.ivsize = NULL_IV_SIZE,
@@ -3438,8 +3402,8 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
.givencrypt = aead_null_givencrypt,
.geniv = "<built-in>",
.ivsize = NULL_IV_SIZE,
@@ -3458,8 +3422,8 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
.givencrypt = aead_null_givencrypt,
.geniv = "<built-in>",
.ivsize = NULL_IV_SIZE,
@@ -3478,8 +3442,8 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
.givencrypt = aead_null_givencrypt,
.geniv = "<built-in>",
.ivsize = NULL_IV_SIZE,
@@ -3498,8 +3462,8 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
.givencrypt = aead_null_givencrypt,
.geniv = "<built-in>",
.ivsize = NULL_IV_SIZE,
@@ -3518,9 +3482,9 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
+ .givencrypt = old_aead_givencrypt,
.geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
@@ -3537,9 +3501,9 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
+ .givencrypt = old_aead_givencrypt,
.geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
@@ -3556,9 +3520,9 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
+ .givencrypt = old_aead_givencrypt,
.geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
@@ -3576,9 +3540,9 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
+ .givencrypt = old_aead_givencrypt,
.geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
@@ -3596,9 +3560,9 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
+ .givencrypt = old_aead_givencrypt,
.geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
@@ -3617,9 +3581,9 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
+ .givencrypt = old_aead_givencrypt,
.geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
@@ -3637,9 +3601,9 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
+ .givencrypt = old_aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
@@ -3656,9 +3620,9 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
+ .givencrypt = old_aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
@@ -3675,9 +3639,9 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
+ .givencrypt = old_aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
@@ -3695,9 +3659,9 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
+ .givencrypt = old_aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
@@ -3715,9 +3679,9 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
+ .givencrypt = old_aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
@@ -3735,9 +3699,9 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
+ .givencrypt = old_aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
@@ -3755,9 +3719,9 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
+ .givencrypt = old_aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
@@ -3774,9 +3738,9 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
+ .givencrypt = old_aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
@@ -3793,9 +3757,9 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
+ .givencrypt = old_aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
@@ -3813,9 +3777,9 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
+ .givencrypt = old_aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
@@ -3833,9 +3797,9 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
+ .givencrypt = old_aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
@@ -3853,9 +3817,9 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
+ .givencrypt = old_aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
@@ -3873,9 +3837,9 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
+ .givencrypt = old_aead_givencrypt,
.geniv = "<built-in>",
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
@@ -3892,9 +3856,9 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
+ .givencrypt = old_aead_givencrypt,
.geniv = "<built-in>",
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
@@ -3911,9 +3875,9 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
+ .givencrypt = old_aead_givencrypt,
.geniv = "<built-in>",
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
@@ -3931,9 +3895,9 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
+ .givencrypt = old_aead_givencrypt,
.geniv = "<built-in>",
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
@@ -3951,9 +3915,9 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
+ .givencrypt = old_aead_givencrypt,
.geniv = "<built-in>",
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
@@ -3971,9 +3935,9 @@ static struct caam_alg_template driver_algs[] = {
.template_aead = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
+ .encrypt = old_aead_encrypt,
+ .decrypt = old_aead_decrypt,
+ .givencrypt = old_aead_givencrypt,
.geniv = "<built-in>",
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
@@ -3983,58 +3947,6 @@ static struct caam_alg_template driver_algs[] = {
OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
- {
- .name = "rfc4106(gcm(aes))",
- .driver_name = "rfc4106-gcm-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
- .setkey = rfc4106_setkey,
- .setauthsize = rfc4106_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
- .geniv = "<built-in>",
- .ivsize = 8,
- .maxauthsize = AES_BLOCK_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
- },
- {
- .name = "rfc4543(gcm(aes))",
- .driver_name = "rfc4543-gcm-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
- .setkey = rfc4543_setkey,
- .setauthsize = rfc4543_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = aead_givencrypt,
- .geniv = "<built-in>",
- .ivsize = 8,
- .maxauthsize = AES_BLOCK_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
- },
- /* Galois Counter Mode */
- {
- .name = "gcm(aes)",
- .driver_name = "gcm-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_AEAD,
- .template_aead = {
- .setkey = gcm_setkey,
- .setauthsize = gcm_setauthsize,
- .encrypt = aead_encrypt,
- .decrypt = aead_decrypt,
- .givencrypt = NULL,
- .geniv = "<built-in>",
- .ivsize = 12,
- .maxauthsize = AES_BLOCK_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
- },
/* ablkcipher descriptor */
{
.name = "cbc(aes)",
@@ -4124,21 +4036,84 @@ static struct caam_alg_template driver_algs[] = {
}
};
-struct caam_crypto_alg {
- struct list_head entry;
+struct caam_alg_entry {
int class1_alg_type;
int class2_alg_type;
int alg_op;
+};
+
+struct caam_aead_alg {
+ struct aead_alg aead;
+ struct caam_alg_entry caam;
+ bool registered;
+};
+
+static struct caam_aead_alg driver_aeads[] = {
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "rfc4106-gcm-aes-caam",
+ .cra_blocksize = 1,
+ },
+ .setkey = rfc4106_setkey,
+ .setauthsize = rfc4106_setauthsize,
+ .encrypt = gcm_encrypt,
+ .decrypt = gcm_decrypt,
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc4543(gcm(aes))",
+ .cra_driver_name = "rfc4543-gcm-aes-caam",
+ .cra_blocksize = 1,
+ },
+ .setkey = rfc4543_setkey,
+ .setauthsize = rfc4543_setauthsize,
+ .encrypt = gcm_encrypt,
+ .decrypt = gcm_decrypt,
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ },
+ /* Galois Counter Mode */
+ {
+ .aead = {
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-caam",
+ .cra_blocksize = 1,
+ },
+ .setkey = gcm_setkey,
+ .setauthsize = gcm_setauthsize,
+ .encrypt = gcm_encrypt,
+ .decrypt = gcm_decrypt,
+ .ivsize = 12,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ },
+};
+
+struct caam_crypto_alg {
struct crypto_alg crypto_alg;
+ struct list_head entry;
+ struct caam_alg_entry caam;
};
-static int caam_cra_init(struct crypto_tfm *tfm)
+static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct caam_crypto_alg *caam_alg =
- container_of(alg, struct caam_crypto_alg, crypto_alg);
- struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
-
ctx->jrdev = caam_jr_alloc();
if (IS_ERR(ctx->jrdev)) {
pr_err("Job Ring Device allocation for transform failed\n");
@@ -4146,17 +4121,35 @@ static int caam_cra_init(struct crypto_tfm *tfm)
}
/* copy descriptor header template value */
- ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
- ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
- ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
+ ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
+ ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
+ ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
return 0;
}
-static void caam_cra_exit(struct crypto_tfm *tfm)
+static int caam_cra_init(struct crypto_tfm *tfm)
{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct caam_crypto_alg *caam_alg =
+ container_of(alg, struct caam_crypto_alg, crypto_alg);
struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+ return caam_init_common(ctx, &caam_alg->caam);
+}
+
+static int caam_aead_init(struct crypto_aead *tfm)
+{
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct caam_aead_alg *caam_alg =
+ container_of(alg, struct caam_aead_alg, aead);
+ struct caam_ctx *ctx = crypto_aead_ctx(tfm);
+
+ return caam_init_common(ctx, &caam_alg->caam);
+}
+
+static void caam_exit_common(struct caam_ctx *ctx)
+{
if (ctx->sh_desc_enc_dma &&
!dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
@@ -4179,10 +4172,28 @@ static void caam_cra_exit(struct crypto_tfm *tfm)
caam_jr_free(ctx->jrdev);
}
+static void caam_cra_exit(struct crypto_tfm *tfm)
+{
+ caam_exit_common(crypto_tfm_ctx(tfm));
+}
+
+static void caam_aead_exit(struct crypto_aead *tfm)
+{
+ caam_exit_common(crypto_aead_ctx(tfm));
+}
+
static void __exit caam_algapi_exit(void)
{
struct caam_crypto_alg *t_alg, *n;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
+ struct caam_aead_alg *t_alg = driver_aeads + i;
+
+ if (t_alg->registered)
+ crypto_unregister_aead(&t_alg->aead);
+ }
if (!alg_list.next)
return;
@@ -4235,13 +4246,26 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
break;
}
- t_alg->class1_alg_type = template->class1_alg_type;
- t_alg->class2_alg_type = template->class2_alg_type;
- t_alg->alg_op = template->alg_op;
+ t_alg->caam.class1_alg_type = template->class1_alg_type;
+ t_alg->caam.class2_alg_type = template->class2_alg_type;
+ t_alg->caam.alg_op = template->alg_op;
return t_alg;
}
+static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
+{
+ struct aead_alg *alg = &t_alg->aead;
+
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+
+ alg->init = caam_aead_init;
+ alg->exit = caam_aead_exit;
+}
+
static int __init caam_algapi_init(void)
{
struct device_node *dev_node;
@@ -4249,6 +4273,7 @@ static int __init caam_algapi_init(void)
struct device *ctrldev;
void *priv;
int i = 0, err = 0;
+ bool registered = false;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
if (!dev_node) {
@@ -4295,10 +4320,30 @@ static int __init caam_algapi_init(void)
pr_warn("%s alg registration failed\n",
t_alg->crypto_alg.cra_driver_name);
kfree(t_alg);
- } else
- list_add_tail(&t_alg->entry, &alg_list);
+ continue;
+ }
+
+ list_add_tail(&t_alg->entry, &alg_list);
+ registered = true;
}
- if (!list_empty(&alg_list))
+
+ for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
+ struct caam_aead_alg *t_alg = driver_aeads + i;
+
+ caam_aead_alg_init(t_alg);
+
+ err = crypto_register_aead(&t_alg->aead);
+ if (err) {
+ pr_warn("%s alg registration failed\n",
+ t_alg->aead.base.cra_driver_name);
+ continue;
+ }
+
+ t_alg->registered = true;
+ registered = true;
+ }
+
+ if (registered)
pr_info("caam algorithms registered in /proc/crypto\n");
return err;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 332c8ef8dae2..dae1e8099969 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -835,17 +835,17 @@ static int ahash_update_ctx(struct ahash_request *req)
src_map_to_sec4_sg(jrdev, req->src, src_nents,
edesc->sec4_sg + sec4_sg_src_index,
chained);
- if (*next_buflen) {
+ if (*next_buflen)
scatterwalk_map_and_copy(next_buf, req->src,
to_hash - *buflen,
*next_buflen, 0);
- state->current_buf = !state->current_buf;
- }
} else {
(edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
SEC4_SG_LEN_FIN;
}
+ state->current_buf = !state->current_buf;
+
sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
@@ -1268,9 +1268,10 @@ static int ahash_update_no_ctx(struct ahash_request *req)
scatterwalk_map_and_copy(next_buf, req->src,
to_hash - *buflen,
*next_buflen, 0);
- state->current_buf = !state->current_buf;
}
+ state->current_buf = !state->current_buf;
+
sh_len = desc_len(sh_desc);
desc = edesc->hw_desc;
init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index acd7743e2603..f57f395db33f 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -32,7 +32,7 @@
#include <crypto/des.h>
#include <crypto/sha.h>
#include <crypto/md5.h>
-#include <crypto/aead.h>
+#include <crypto/internal/aead.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/skcipher.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index efba4ccd4fac..efacab7539ef 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -301,7 +301,7 @@ static int caam_remove(struct platform_device *pdev)
#endif
/* Unmap controller region */
- iounmap(&ctrl);
+ iounmap(ctrl);
return ret;
}
@@ -496,7 +496,7 @@ static int caam_probe(struct platform_device *pdev)
sizeof(struct platform_device *) * rspec,
GFP_KERNEL);
if (ctrlpriv->jrpdev == NULL) {
- iounmap(&ctrl);
+ iounmap(ctrl);
return -ENOMEM;
}
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 378ddc17f60e..672c97489505 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -83,35 +83,35 @@
#endif
#endif
+/*
+ * The only users of these wr/rd_reg64 functions is the Job Ring (JR).
+ * The DMA address registers in the JR are a pair of 32-bit registers.
+ * The layout is:
+ *
+ * base + 0x0000 : most-significant 32 bits
+ * base + 0x0004 : least-significant 32 bits
+ *
+ * The 32-bit version of this core therefore has to write to base + 0x0004
+ * to set the 32-bit wide DMA address. This seems to be independent of the
+ * endianness of the written/read data.
+ */
+
#ifndef CONFIG_64BIT
-#ifdef __BIG_ENDIAN
-static inline void wr_reg64(u64 __iomem *reg, u64 data)
-{
- wr_reg32((u32 __iomem *)reg, (data & 0xffffffff00000000ull) >> 32);
- wr_reg32((u32 __iomem *)reg + 1, data & 0x00000000ffffffffull);
-}
+#define REG64_MS32(reg) ((u32 __iomem *)(reg))
+#define REG64_LS32(reg) ((u32 __iomem *)(reg) + 1)
-static inline u64 rd_reg64(u64 __iomem *reg)
-{
- return (((u64)rd_reg32((u32 __iomem *)reg)) << 32) |
- ((u64)rd_reg32((u32 __iomem *)reg + 1));
-}
-#else
-#ifdef __LITTLE_ENDIAN
static inline void wr_reg64(u64 __iomem *reg, u64 data)
{
- wr_reg32((u32 __iomem *)reg + 1, (data & 0xffffffff00000000ull) >> 32);
- wr_reg32((u32 __iomem *)reg, data & 0x00000000ffffffffull);
+ wr_reg32(REG64_MS32(reg), data >> 32);
+ wr_reg32(REG64_LS32(reg), data);
}
static inline u64 rd_reg64(u64 __iomem *reg)
{
- return (((u64)rd_reg32((u32 __iomem *)reg + 1)) << 32) |
- ((u64)rd_reg32((u32 __iomem *)reg));
+ return ((u64)rd_reg32(REG64_MS32(reg)) << 32 |
+ (u64)rd_reg32(REG64_LS32(reg)));
}
#endif
-#endif
-#endif
/*
* jr_outentry
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index 3b918218aa4c..b68b74cc7b77 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -55,6 +55,21 @@ static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count,
sec4_sg_ptr->len |= SEC4_SG_LEN_FIN;
}
+static inline struct sec4_sg_entry *sg_to_sec4_sg_len(
+ struct scatterlist *sg, unsigned int total,
+ struct sec4_sg_entry *sec4_sg_ptr)
+{
+ do {
+ unsigned int len = min(sg_dma_len(sg), total);
+
+ dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), len, 0);
+ sec4_sg_ptr++;
+ sg = sg_next(sg);
+ total -= len;
+ } while (total);
+ return sec4_sg_ptr - 1;
+}
+
/* count number of elements in scatterlist */
static inline int __sg_count(struct scatterlist *sg_list, int nbytes,
bool *chained)
@@ -85,34 +100,41 @@ static inline int sg_count(struct scatterlist *sg_list, int nbytes,
return sg_nents;
}
-static int dma_map_sg_chained(struct device *dev, struct scatterlist *sg,
- unsigned int nents, enum dma_data_direction dir,
- bool chained)
+static inline void dma_unmap_sg_chained(
+ struct device *dev, struct scatterlist *sg, unsigned int nents,
+ enum dma_data_direction dir, bool chained)
{
if (unlikely(chained)) {
int i;
for (i = 0; i < nents; i++) {
- dma_map_sg(dev, sg, 1, dir);
+ dma_unmap_sg(dev, sg, 1, dir);
sg = sg_next(sg);
}
- } else {
- dma_map_sg(dev, sg, nents, dir);
+ } else if (nents) {
+ dma_unmap_sg(dev, sg, nents, dir);
}
- return nents;
}
-static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg,
- unsigned int nents, enum dma_data_direction dir,
- bool chained)
+static inline int dma_map_sg_chained(
+ struct device *dev, struct scatterlist *sg, unsigned int nents,
+ enum dma_data_direction dir, bool chained)
{
+ struct scatterlist *first = sg;
+
if (unlikely(chained)) {
int i;
for (i = 0; i < nents; i++) {
- dma_unmap_sg(dev, sg, 1, dir);
+ if (!dma_map_sg(dev, sg, 1, dir)) {
+ dma_unmap_sg_chained(dev, first, i, dir,
+ chained);
+ nents = 0;
+ break;
+ }
+
sg = sg_next(sg);
}
- } else {
- dma_unmap_sg(dev, sg, nents, dir);
- }
+ } else
+ nents = dma_map_sg(dev, sg, nents, dir);
+
return nents;
}