diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-12-14 23:31:09 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-12-14 23:31:09 +0300 |
commit | 64e7003c6b85626a533a67c1ba938b75a3db24e6 (patch) | |
tree | 5e3e776d23a9520f51251b4838d4aa66d920dbff /arch/arm64/crypto/sm4-ce-ccm-core.S | |
parent | 48ea09cddae0b794cde2070f106ef676703dbcd3 (diff) | |
parent | 453de3eb08c4b7e31b3019a4b0cc3ebce51a6219 (diff) | |
download | linux-64e7003c6b85626a533a67c1ba938b75a3db24e6.tar.xz |
Merge tag 'v6.2-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
"API:
- Optimise away self-test overhead when they are disabled
- Support symmetric encryption via keyring keys in af_alg
- Flip hwrng default_quality, the default is now maximum entropy
Algorithms:
- Add library version of aesgcm
- CFI fixes for assembly code
- Add arm/arm64 accelerated versions of sm3/sm4
Drivers:
- Remove assumption on arm64 that kmalloc is DMA-aligned
- Fix selftest failures in rockchip
- Add support for RK3328/RK3399 in rockchip
- Add deflate support in qat
- Merge ux500 into stm32
- Add support for TEE for PCI ID 0x14CA in ccp
- Add mt7986 support in mtk
- Add MaxLinear platform support in inside-secure
- Add NPCM8XX support in npcm"
* tag 'v6.2-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (184 commits)
crypto: ux500/cryp - delete driver
crypto: stm32/cryp - enable for use with Ux500
crypto: stm32 - enable drivers to be used on Ux500
dt-bindings: crypto: Let STM32 define Ux500 CRYP
hwrng: geode - Fix PCI device refcount leak
hwrng: amd - Fix PCI device refcount leak
crypto: qce - Set DMA alignment explicitly
crypto: octeontx2 - Set DMA alignment explicitly
crypto: octeontx - Set DMA alignment explicitly
crypto: keembay - Set DMA alignment explicitly
crypto: safexcel - Set DMA alignment explicitly
crypto: hisilicon/hpre - Set DMA alignment explicitly
crypto: chelsio - Set DMA alignment explicitly
crypto: ccree - Set DMA alignment explicitly
crypto: ccp - Set DMA alignment explicitly
crypto: cavium - Set DMA alignment explicitly
crypto: img-hash - Fix variable dereferenced before check 'hdev->req'
crypto: arm64/ghash-ce - use frame_push/pop macros consistently
crypto: arm64/crct10dif - use frame_push/pop macros consistently
crypto: arm64/aes-modes - use frame_push/pop macros consistently
...
Diffstat (limited to 'arch/arm64/crypto/sm4-ce-ccm-core.S')
-rw-r--r-- | arch/arm64/crypto/sm4-ce-ccm-core.S | 328 |
1 files changed, 328 insertions, 0 deletions
diff --git a/arch/arm64/crypto/sm4-ce-ccm-core.S b/arch/arm64/crypto/sm4-ce-ccm-core.S new file mode 100644 index 000000000000..028207c4afd0 --- /dev/null +++ b/arch/arm64/crypto/sm4-ce-ccm-core.S @@ -0,0 +1,328 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * SM4-CCM AEAD Algorithm using ARMv8 Crypto Extensions + * as specified in rfc8998 + * https://datatracker.ietf.org/doc/html/rfc8998 + * + * Copyright (C) 2022 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> + */ + +#include <linux/linkage.h> +#include <asm/assembler.h> +#include "sm4-ce-asm.h" + +.arch armv8-a+crypto + +.irp b, 0, 1, 8, 9, 10, 11, 12, 13, 14, 15, 16, 24, 25, 26, 27, 28, 29, 30, 31 + .set .Lv\b\().4s, \b +.endr + +.macro sm4e, vd, vn + .inst 0xcec08400 | (.L\vn << 5) | .L\vd +.endm + +/* Register macros */ + +#define RMAC v16 + +/* Helper macros. */ + +#define inc_le128(vctr) \ + mov vctr.d[1], x8; \ + mov vctr.d[0], x7; \ + adds x8, x8, #1; \ + rev64 vctr.16b, vctr.16b; \ + adc x7, x7, xzr; + + +.align 3 +SYM_FUNC_START(sm4_ce_cbcmac_update) + /* input: + * x0: round key array, CTX + * x1: mac + * x2: src + * w3: nblocks + */ + SM4_PREPARE(x0) + + ld1 {RMAC.16b}, [x1] + +.Lcbcmac_loop_4x: + cmp w3, #4 + blt .Lcbcmac_loop_1x + + sub w3, w3, #4 + + ld1 {v0.16b-v3.16b}, [x2], #64 + + SM4_CRYPT_BLK(RMAC) + eor RMAC.16b, RMAC.16b, v0.16b + SM4_CRYPT_BLK(RMAC) + eor RMAC.16b, RMAC.16b, v1.16b + SM4_CRYPT_BLK(RMAC) + eor RMAC.16b, RMAC.16b, v2.16b + SM4_CRYPT_BLK(RMAC) + eor RMAC.16b, RMAC.16b, v3.16b + + cbz w3, .Lcbcmac_end + b .Lcbcmac_loop_4x + +.Lcbcmac_loop_1x: + sub w3, w3, #1 + + ld1 {v0.16b}, [x2], #16 + + SM4_CRYPT_BLK(RMAC) + eor RMAC.16b, RMAC.16b, v0.16b + + cbnz w3, .Lcbcmac_loop_1x + +.Lcbcmac_end: + st1 {RMAC.16b}, [x1] + ret +SYM_FUNC_END(sm4_ce_cbcmac_update) + +.align 3 +SYM_FUNC_START(sm4_ce_ccm_final) + /* input: + * x0: round key array, CTX + * x1: ctr0 (big endian, 128 bit) + * x2: mac + */ + SM4_PREPARE(x0) + + ld1 {RMAC.16b}, [x2] + ld1 {v0.16b}, [x1] + + SM4_CRYPT_BLK2(RMAC, v0) + + /* en-/decrypt the mac with ctr0 */ + eor RMAC.16b, RMAC.16b, v0.16b + st1 {RMAC.16b}, [x2] + + ret +SYM_FUNC_END(sm4_ce_ccm_final) + +.align 3 +SYM_FUNC_START(sm4_ce_ccm_enc) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * x3: ctr (big endian, 128 bit) + * w4: nbytes + * x5: mac + */ + SM4_PREPARE(x0) + + ldp x7, x8, [x3] + rev x7, x7 + rev x8, x8 + + ld1 {RMAC.16b}, [x5] + +.Lccm_enc_loop_4x: + cmp w4, #(4 * 16) + blt .Lccm_enc_loop_1x + + sub w4, w4, #(4 * 16) + + /* construct CTRs */ + inc_le128(v8) /* +0 */ + inc_le128(v9) /* +1 */ + inc_le128(v10) /* +2 */ + inc_le128(v11) /* +3 */ + + ld1 {v0.16b-v3.16b}, [x2], #64 + + SM4_CRYPT_BLK2(v8, RMAC) + eor v8.16b, v8.16b, v0.16b + eor RMAC.16b, RMAC.16b, v0.16b + SM4_CRYPT_BLK2(v9, RMAC) + eor v9.16b, v9.16b, v1.16b + eor RMAC.16b, RMAC.16b, v1.16b + SM4_CRYPT_BLK2(v10, RMAC) + eor v10.16b, v10.16b, v2.16b + eor RMAC.16b, RMAC.16b, v2.16b + SM4_CRYPT_BLK2(v11, RMAC) + eor v11.16b, v11.16b, v3.16b + eor RMAC.16b, RMAC.16b, v3.16b + + st1 {v8.16b-v11.16b}, [x1], #64 + + cbz w4, .Lccm_enc_end + b .Lccm_enc_loop_4x + +.Lccm_enc_loop_1x: + cmp w4, #16 + blt .Lccm_enc_tail + + sub w4, w4, #16 + + /* construct CTRs */ + inc_le128(v8) + + ld1 {v0.16b}, [x2], #16 + + SM4_CRYPT_BLK2(v8, RMAC) + eor v8.16b, v8.16b, v0.16b + eor RMAC.16b, RMAC.16b, v0.16b + + st1 {v8.16b}, [x1], #16 + + cbz w4, .Lccm_enc_end + b .Lccm_enc_loop_1x + +.Lccm_enc_tail: + /* construct CTRs */ + inc_le128(v8) + + SM4_CRYPT_BLK2(RMAC, v8) + + /* store new MAC */ + st1 {RMAC.16b}, [x5] + +.Lccm_enc_tail_loop: + ldrb w0, [x2], #1 /* get 1 byte from input */ + umov w9, v8.b[0] /* get top crypted CTR byte */ + umov w6, RMAC.b[0] /* get top MAC byte */ + + eor w9, w9, w0 /* w9 = CTR ^ input */ + eor w6, w6, w0 /* w6 = MAC ^ input */ + + strb w9, [x1], #1 /* store out byte */ + strb w6, [x5], #1 /* store MAC byte */ + + subs w4, w4, #1 + beq .Lccm_enc_ret + + /* shift out one byte */ + ext RMAC.16b, RMAC.16b, RMAC.16b, #1 + ext v8.16b, v8.16b, v8.16b, #1 + + b .Lccm_enc_tail_loop + +.Lccm_enc_end: + /* store new MAC */ + st1 {RMAC.16b}, [x5] + + /* store new CTR */ + rev x7, x7 + rev x8, x8 + stp x7, x8, [x3] + +.Lccm_enc_ret: + ret +SYM_FUNC_END(sm4_ce_ccm_enc) + +.align 3 +SYM_FUNC_START(sm4_ce_ccm_dec) + /* input: + * x0: round key array, CTX + * x1: dst + * x2: src + * x3: ctr (big endian, 128 bit) + * w4: nbytes + * x5: mac + */ + SM4_PREPARE(x0) + + ldp x7, x8, [x3] + rev x7, x7 + rev x8, x8 + + ld1 {RMAC.16b}, [x5] + +.Lccm_dec_loop_4x: + cmp w4, #(4 * 16) + blt .Lccm_dec_loop_1x + + sub w4, w4, #(4 * 16) + + /* construct CTRs */ + inc_le128(v8) /* +0 */ + inc_le128(v9) /* +1 */ + inc_le128(v10) /* +2 */ + inc_le128(v11) /* +3 */ + + ld1 {v0.16b-v3.16b}, [x2], #64 + + SM4_CRYPT_BLK2(v8, RMAC) + eor v8.16b, v8.16b, v0.16b + eor RMAC.16b, RMAC.16b, v8.16b + SM4_CRYPT_BLK2(v9, RMAC) + eor v9.16b, v9.16b, v1.16b + eor RMAC.16b, RMAC.16b, v9.16b + SM4_CRYPT_BLK2(v10, RMAC) + eor v10.16b, v10.16b, v2.16b + eor RMAC.16b, RMAC.16b, v10.16b + SM4_CRYPT_BLK2(v11, RMAC) + eor v11.16b, v11.16b, v3.16b + eor RMAC.16b, RMAC.16b, v11.16b + + st1 {v8.16b-v11.16b}, [x1], #64 + + cbz w4, .Lccm_dec_end + b .Lccm_dec_loop_4x + +.Lccm_dec_loop_1x: + cmp w4, #16 + blt .Lccm_dec_tail + + sub w4, w4, #16 + + /* construct CTRs */ + inc_le128(v8) + + ld1 {v0.16b}, [x2], #16 + + SM4_CRYPT_BLK2(v8, RMAC) + eor v8.16b, v8.16b, v0.16b + eor RMAC.16b, RMAC.16b, v8.16b + + st1 {v8.16b}, [x1], #16 + + cbz w4, .Lccm_dec_end + b .Lccm_dec_loop_1x + +.Lccm_dec_tail: + /* construct CTRs */ + inc_le128(v8) + + SM4_CRYPT_BLK2(RMAC, v8) + + /* store new MAC */ + st1 {RMAC.16b}, [x5] + +.Lccm_dec_tail_loop: + ldrb w0, [x2], #1 /* get 1 byte from input */ + umov w9, v8.b[0] /* get top crypted CTR byte */ + umov w6, RMAC.b[0] /* get top MAC byte */ + + eor w9, w9, w0 /* w9 = CTR ^ input */ + eor w6, w6, w9 /* w6 = MAC ^ output */ + + strb w9, [x1], #1 /* store out byte */ + strb w6, [x5], #1 /* store MAC byte */ + + subs w4, w4, #1 + beq .Lccm_dec_ret + + /* shift out one byte */ + ext RMAC.16b, RMAC.16b, RMAC.16b, #1 + ext v8.16b, v8.16b, v8.16b, #1 + + b .Lccm_dec_tail_loop + +.Lccm_dec_end: + /* store new MAC */ + st1 {RMAC.16b}, [x5] + + /* store new CTR */ + rev x7, x7 + rev x8, x8 + stp x7, x8, [x3] + +.Lccm_dec_ret: + ret +SYM_FUNC_END(sm4_ce_ccm_dec) |