summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/debugfs-hisi-hpre7
-rw-r--r--Documentation/ABI/testing/debugfs-hisi-sec7
-rw-r--r--Documentation/ABI/testing/debugfs-hisi-zip7
-rw-r--r--Documentation/ABI/testing/sysfs-driver-qat_svn114
-rw-r--r--Documentation/crypto/userspace-if.rst4
-rw-r--r--Documentation/devicetree/bindings/crypto/inside-secure,safexcel.yaml5
-rw-r--r--Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml27
-rw-r--r--Documentation/devicetree/bindings/rng/atmel,at91-trng.yaml1
-rw-r--r--MAINTAINERS24
-rw-r--r--arch/s390/configs/debug_defconfig1
-rw-r--r--arch/s390/configs/defconfig1
-rw-r--r--arch/s390/crypto/Kconfig16
-rw-r--r--arch/s390/crypto/Makefile1
-rw-r--r--arch/s390/crypto/des_s390.c502
-rw-r--r--arch/sparc/crypto/Kconfig14
-rw-r--r--arch/sparc/crypto/Makefile2
-rw-r--r--arch/sparc/crypto/des_asm.S419
-rw-r--r--arch/sparc/crypto/des_glue.c482
-rw-r--r--arch/x86/crypto/Kconfig14
-rw-r--r--arch/x86/crypto/Makefile3
-rw-r--r--arch/x86/crypto/des3_ede-asm_64.S831
-rw-r--r--arch/x86/crypto/des3_ede_glue.c391
-rw-r--r--crypto/Kconfig36
-rw-r--r--crypto/af_alg.c4
-rw-r--r--crypto/cryptd.c112
-rw-r--r--crypto/dh.c8
-rw-r--r--crypto/drbg.c2
-rw-r--r--crypto/ecc.c11
-rw-r--r--crypto/geniv.c10
-rw-r--r--crypto/jitterentropy-kcapi.c14
-rw-r--r--crypto/krb5enc.c5
-rw-r--r--crypto/lrw.c2
-rw-r--r--crypto/rng.c23
-rw-r--r--crypto/simd.c239
-rw-r--r--crypto/tcrypt.c17
-rw-r--r--crypto/tea.c2
-rw-r--r--crypto/testmgr.c53
-rw-r--r--crypto/testmgr.h1764
-rw-r--r--crypto/xts.c2
-rw-r--r--drivers/crypto/Kconfig13
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/allwinner/Kconfig2
-rw-r--r--drivers/crypto/aspeed/aspeed-hace-hash.c3
-rw-r--r--drivers/crypto/atmel-aes.c8
-rw-r--r--drivers/crypto/atmel-ecc.c1
-rw-r--r--drivers/crypto/atmel-i2c.c6
-rw-r--r--drivers/crypto/atmel-sha.c17
-rw-r--r--drivers/crypto/atmel-sha204a.c41
-rw-r--r--drivers/crypto/atmel-tdes.c8
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c9
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c17
-rw-r--r--drivers/crypto/caam/caamhash.c16
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c6
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c6
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c5
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c5
-rw-r--r--drivers/crypto/ccp/ccp-crypto-rsa.c6
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c5
-rw-r--r--drivers/crypto/ccp/sev-dev.c27
-rw-r--r--drivers/crypto/ccree/cc_hash.c1
-rw-r--r--drivers/crypto/hifn_795x.c6
-rw-r--r--drivers/crypto/hisilicon/debugfs.c76
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c12
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c18
-rw-r--r--drivers/crypto/hisilicon/qm.c16
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c2
-rw-r--r--drivers/crypto/hisilicon/sec2/sec.h2
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c2
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c13
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c19
-rw-r--r--drivers/crypto/img-hash.c24
-rw-r--r--drivers/crypto/inside-secure/eip93/Kconfig2
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-aead.c2
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-aead.h2
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-aes.h2
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-cipher.c4
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-cipher.h2
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-common.c2
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-common.h2
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-des.h2
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-hash.c2
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-hash.h2
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-main.c18
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-main.h2
-rw-r--r--drivers/crypto/inside-secure/eip93/eip93-regs.h4
-rw-r--r--drivers/crypto/inside-secure/safexcel.c8
-rw-r--r--drivers/crypto/inside-secure/safexcel.h8
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c149
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c2
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-ecc.c17
-rw-r--r--drivers/crypto/intel/qat/Kconfig2
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c21
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c15
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c130
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h20
-rw-r--r--drivers/crypto/intel/qat/qat_6xxx/adf_drv.c37
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h8
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_engine.c7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_admin.c70
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_admin.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_anti_rb.c66
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_anti_rb.h37
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_common_drv.h6
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_fw_config.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c18
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_init.c9
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs_anti_rb.c133
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs_anti_rb.h11
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c12
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c10
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw.h7
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h15
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw.h6
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_comp_algs.c540
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_comp_req.h9
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_comp_zstd_utils.c165
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_comp_zstd_utils.h13
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_compression.c23
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_hal.c27
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_uclo.c25
-rw-r--r--drivers/crypto/marvell/cesa/hash.c3
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c8
-rw-r--r--drivers/crypto/nx/nx-842.c10
-rw-r--r--drivers/crypto/nx/nx-842.h6
-rw-r--r--drivers/crypto/omap-sham.c21
-rw-r--r--drivers/crypto/qce/aead.c22
-rw-r--r--drivers/crypto/qce/common.c12
-rw-r--r--drivers/crypto/qce/sha.c6
-rw-r--r--drivers/crypto/qce/skcipher.c6
-rw-r--r--drivers/crypto/s5p-sss.c27
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c16
-rw-r--r--drivers/crypto/stm32/stm32-hash.c16
-rw-r--r--drivers/crypto/talitos.c254
-rw-r--r--drivers/crypto/tegra/tegra-se-aes.c9
-rw-r--r--drivers/crypto/tegra/tegra-se-hash.c3
-rw-r--r--drivers/crypto/ti/Kconfig4
-rw-r--r--drivers/crypto/ti/dthev2-aes.c899
-rw-r--r--drivers/crypto/ti/dthev2-common.c19
-rw-r--r--drivers/crypto/ti/dthev2-common.h27
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h3
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c11
-rw-r--r--include/crypto/acompress.h5
-rw-r--r--include/crypto/cryptd.h33
-rw-r--r--include/crypto/des.h8
-rw-r--r--include/crypto/internal/acompress.h1
-rw-r--r--include/crypto/internal/ecc.h22
-rw-r--r--include/crypto/internal/geniv.h2
-rw-r--r--include/crypto/internal/scompress.h1
-rw-r--r--include/crypto/internal/simd.h19
-rw-r--r--include/crypto/rng.h25
-rw-r--r--include/crypto/skcipher.h1
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/hisi_acc_qm.h14
-rw-r--r--include/linux/hw_random.h2
-rw-r--r--include/linux/padata.h8
-rw-r--r--include/linux/printk.h13
-rw-r--r--include/linux/rhashtable.h13
-rw-r--r--include/uapi/linux/psp-sev.h2
-rw-r--r--kernel/padata.c130
-rw-r--r--net/tipc/crypto.c13
166 files changed, 5166 insertions, 3799 deletions
diff --git a/Documentation/ABI/testing/debugfs-hisi-hpre b/Documentation/ABI/testing/debugfs-hisi-hpre
index 29fb7d5ffc69..5a137f701eea 100644
--- a/Documentation/ABI/testing/debugfs-hisi-hpre
+++ b/Documentation/ABI/testing/debugfs-hisi-hpre
@@ -50,6 +50,13 @@ Description: Dump debug registers from the QM.
Available for PF and VF in host. VF in guest currently only
has one debug register.
+What: /sys/kernel/debug/hisi_hpre/<bdf>/dev_usage
+Date: Mar 2026
+Contact: linux-crypto@vger.kernel.org
+Description: Query the real-time bandwidth usage of device.
+ Returns the bandwidth usage of each channel on the device.
+ The returned number is in percentage.
+
What: /sys/kernel/debug/hisi_hpre/<bdf>/qm/current_q
Date: Sep 2019
Contact: linux-crypto@vger.kernel.org
diff --git a/Documentation/ABI/testing/debugfs-hisi-sec b/Documentation/ABI/testing/debugfs-hisi-sec
index 82bf4a0dc7f7..676e2dc2de8d 100644
--- a/Documentation/ABI/testing/debugfs-hisi-sec
+++ b/Documentation/ABI/testing/debugfs-hisi-sec
@@ -24,6 +24,13 @@ Description: The <bdf> is related the function for PF and VF.
1/1000~1000/1000 of total QoS. The driver reading alg_qos to
get related QoS in the host and VM, Such as "cat alg_qos".
+What: /sys/kernel/debug/hisi_sec2/<bdf>/dev_usage
+Date: Mar 2026
+Contact: linux-crypto@vger.kernel.org
+Description: Query the real-time bandwidth usage of device.
+ Returns the bandwidth usage of each channel on the device.
+ The returned number is in percentage.
+
What: /sys/kernel/debug/hisi_sec2/<bdf>/qm/qm_regs
Date: Oct 2019
Contact: linux-crypto@vger.kernel.org
diff --git a/Documentation/ABI/testing/debugfs-hisi-zip b/Documentation/ABI/testing/debugfs-hisi-zip
index 0abd65d27e9b..46bf47bf6b42 100644
--- a/Documentation/ABI/testing/debugfs-hisi-zip
+++ b/Documentation/ABI/testing/debugfs-hisi-zip
@@ -36,6 +36,13 @@ Description: The <bdf> is related the function for PF and VF.
1/1000~1000/1000 of total QoS. The driver reading alg_qos to
get related QoS in the host and VM, Such as "cat alg_qos".
+What: /sys/kernel/debug/hisi_zip/<bdf>/dev_usage
+Date: Mar 2026
+Contact: linux-crypto@vger.kernel.org
+Description: Query the real-time bandwidth usage of device.
+ Returns the bandwidth usage of each channel on the device.
+ The returned number is in percentage.
+
What: /sys/kernel/debug/hisi_zip/<bdf>/qm/regs
Date: Nov 2018
Contact: linux-crypto@vger.kernel.org
diff --git a/Documentation/ABI/testing/sysfs-driver-qat_svn b/Documentation/ABI/testing/sysfs-driver-qat_svn
new file mode 100644
index 000000000000..3832b523dcb0
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-qat_svn
@@ -0,0 +1,114 @@
+What: /sys/bus/pci/devices/<BDF>/qat_svn/
+Date: June 2026
+KernelVersion: 7.1
+Contact: qat-linux@intel.com
+Description: Directory containing Security Version Number (SVN) attributes for
+ the Anti-Rollback (ARB) feature. The ARB feature prevents downloading
+ older firmware versions to the acceleration device.
+
+What: /sys/bus/pci/devices/<BDF>/qat_svn/enforced_min
+Date: June 2026
+KernelVersion: 7.1
+Contact: qat-linux@intel.com
+Description:
+ (RO) Reports the minimum allowed firmware SVN.
+
+ Returns an integer greater than zero. Firmware with SVN lower than
+ this value is rejected.
+
+ A write to qat_svn/commit will update this value. The update is not
+ persistent across reboot; on reboot, this value is reset from
+ qat_svn/permanent_min.
+
+ Example usage::
+
+ # cat /sys/bus/pci/devices/<BDF>/qat_svn/enforced_min
+ 2
+
+ This attribute is available only on devices that support
+ Anti-Rollback.
+
+What: /sys/bus/pci/devices/<BDF>/qat_svn/permanent_min
+Date: June 2026
+KernelVersion: 7.1
+Contact: qat-linux@intel.com
+Description:
+ (RO) Reports the persistent minimum SVN used to initialize
+ qat_svn/enforced_min on each reboot.
+
+ Returns an integer greater than zero. A write to qat_svn/commit
+ may update this value, depending on platform/BIOS settings.
+
+ Example usage::
+
+ # cat /sys/bus/pci/devices/<BDF>/qat_svn/permanent_min
+ 3
+
+ This attribute is available only on devices that support
+ Anti-Rollback.
+
+What: /sys/bus/pci/devices/<BDF>/qat_svn/active
+Date: June 2026
+KernelVersion: 7.1
+Contact: qat-linux@intel.com
+Description:
+ (RO) Reports the SVN of the currently active firmware image.
+
+ Returns an integer greater than zero.
+
+ Example usage::
+
+ # cat /sys/bus/pci/devices/<BDF>/qat_svn/active
+ 2
+
+ This attribute is available only on devices that support
+ Anti-Rollback.
+
+What: /sys/bus/pci/devices/<BDF>/qat_svn/commit
+Date: June 2026
+KernelVersion: 7.1
+Contact: qat-linux@intel.com
+Description:
+ (WO) Commits the currently active SVN as the minimum allowed SVN.
+
+ Writing 1 sets qat_svn/enforced_min to the value of qat_svn/active,
+ preventing future firmware loads with lower SVN.
+
+ Depending on platform/BIOS settings, a commit may also update
+ qat_svn/permanent_min.
+
+ Note that on reboot, qat_svn/enforced_min reverts to
+ qat_svn/permanent_min.
+
+ It is advisable to use this attribute with caution, only when
+ it is necessary to set a new minimum SVN for the firmware.
+
+ Before committing the SVN update, it is crucial to check the
+ current values of qat_svn/active, qat_svn/enforced_min and
+ qat_svn/permanent_min. This verification helps ensure that the
+ commit operation aligns with the intended outcome.
+
+ While writing to the file, any value other than '1' will result
+ in an error and have no effect.
+
+ Example usage::
+
+ ## Read current values
+ # cat /sys/bus/pci/devices/<BDF>/qat_svn/enforced_min
+ 2
+ # cat /sys/bus/pci/devices/<BDF>/qat_svn/permanent_min
+ 2
+ # cat /sys/bus/pci/devices/<BDF>/qat_svn/active
+ 3
+
+ ## Commit active SVN
+ # echo 1 > /sys/bus/pci/devices/<BDF>/qat_svn/commit
+
+ ## Read updated values
+ # cat /sys/bus/pci/devices/<BDF>/qat_svn/enforced_min
+ 3
+ # cat /sys/bus/pci/devices/<BDF>/qat_svn/permanent_min
+ 3
+
+ This attribute is available only on devices that support
+ Anti-Rollback.
diff --git a/Documentation/crypto/userspace-if.rst b/Documentation/crypto/userspace-if.rst
index 8158b363cd98..021759198fe7 100644
--- a/Documentation/crypto/userspace-if.rst
+++ b/Documentation/crypto/userspace-if.rst
@@ -23,7 +23,7 @@ user space, however. This includes the difference between synchronous
and asynchronous invocations. The user space API call is fully
synchronous.
-[1] https://www.chronox.de/libkcapi.html
+[1] https://www.chronox.de/libkcapi/index.html
User Space API General Remarks
------------------------------
@@ -406,4 +406,4 @@ Please see [1] for libkcapi which provides an easy-to-use wrapper around
the aforementioned Netlink kernel interface. [1] also contains a test
application that invokes all libkcapi API calls.
-[1] https://www.chronox.de/libkcapi.html
+[1] https://www.chronox.de/libkcapi/index.html
diff --git a/Documentation/devicetree/bindings/crypto/inside-secure,safexcel.yaml b/Documentation/devicetree/bindings/crypto/inside-secure,safexcel.yaml
index 3dc6c5f89d32..a34d13e92c59 100644
--- a/Documentation/devicetree/bindings/crypto/inside-secure,safexcel.yaml
+++ b/Documentation/devicetree/bindings/crypto/inside-secure,safexcel.yaml
@@ -18,6 +18,7 @@ properties:
- items:
- enum:
- marvell,armada-3700-crypto
+ - mediatek,mt7981-crypto
- mediatek,mt7986-crypto
- const: inside-secure,safexcel-eip97ies
- const: inside-secure,safexcel-eip197b
@@ -80,7 +81,9 @@ allOf:
compatible:
not:
contains:
- const: mediatek,mt7986-crypto
+ enum:
+ - mediatek,mt7981-crypto
+ - mediatek,mt7986-crypto
then:
properties:
interrupts:
diff --git a/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml b/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml
index 061ff718b23d..876bf90ed96e 100644
--- a/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml
+++ b/Documentation/devicetree/bindings/crypto/qcom,inline-crypto-engine.yaml
@@ -13,6 +13,7 @@ properties:
compatible:
items:
- enum:
+ - qcom,eliza-inline-crypto-engine
- qcom,kaanapali-inline-crypto-engine
- qcom,milos-inline-crypto-engine
- qcom,qcs8300-inline-crypto-engine
@@ -31,6 +32,11 @@ properties:
clocks:
maxItems: 1
+ operating-points-v2: true
+
+ opp-table:
+ type: object
+
required:
- compatible
- reg
@@ -47,5 +53,26 @@ examples:
"qcom,inline-crypto-engine";
reg = <0x01d88000 0x8000>;
clocks = <&gcc GCC_UFS_PHY_ICE_CORE_CLK>;
+
+ operating-points-v2 = <&ice_opp_table>;
+
+ ice_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ opp-100000000 {
+ opp-hz = /bits/ 64 <100000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-201500000 {
+ opp-hz = /bits/ 64 <201500000>;
+ required-opps = <&rpmhpd_opp_svs_l1>;
+ };
+
+ opp-403000000 {
+ opp-hz = /bits/ 64 <403000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ };
+ };
};
...
diff --git a/Documentation/devicetree/bindings/rng/atmel,at91-trng.yaml b/Documentation/devicetree/bindings/rng/atmel,at91-trng.yaml
index f78614100ea8..3628251b8c51 100644
--- a/Documentation/devicetree/bindings/rng/atmel,at91-trng.yaml
+++ b/Documentation/devicetree/bindings/rng/atmel,at91-trng.yaml
@@ -19,6 +19,7 @@ properties:
- microchip,sam9x60-trng
- items:
- enum:
+ - microchip,lan9691-trng
- microchip,sama7g5-trng
- const: atmel,at91sam9g45-trng
- items:
diff --git a/MAINTAINERS b/MAINTAINERS
index 9ac45cdd7c43..447189411512 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2909,7 +2909,6 @@ F: include/linux/soc/ixp4xx/qmgr.h
ARM/INTEL KEEMBAY ARCHITECTURE
M: Paul J. Murphy <paul.j.murphy@intel.com>
-M: Daniele Alessandrelli <daniele.alessandrelli@intel.com>
S: Maintained
F: Documentation/devicetree/bindings/arm/intel,keembay.yaml
F: arch/arm64/boot/dts/intel/keembay-evm.dts
@@ -6838,12 +6837,6 @@ L: linux-crypto@vger.kernel.org
S: Maintained
F: tools/crypto/tcrypt/tcrypt_speed_compare.py
-CRYPTOGRAPHIC RANDOM NUMBER GENERATOR
-M: Neil Horman <nhorman@tuxdriver.com>
-L: linux-crypto@vger.kernel.org
-S: Maintained
-F: crypto/rng.c
-
CS3308 MEDIA DRIVER
M: Hans Verkuil <hverkuil@kernel.org>
L: linux-media@vger.kernel.org
@@ -12956,7 +12949,6 @@ F: drivers/dma/ioat*
INTEL IAA CRYPTO DRIVER
M: Kristen Accardi <kristen.c.accardi@intel.com>
M: Vinicius Costa Gomes <vinicius.gomes@intel.com>
-M: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
L: linux-crypto@vger.kernel.org
S: Supported
F: Documentation/driver-api/crypto/iaa/iaa-crypto.rst
@@ -13064,8 +13056,7 @@ F: Documentation/devicetree/bindings/display/intel,keembay-display.yaml
F: drivers/gpu/drm/kmb/
INTEL KEEM BAY OCS AES/SM4 CRYPTO DRIVER
-M: Daniele Alessandrelli <daniele.alessandrelli@intel.com>
-S: Maintained
+S: Orphan
F: Documentation/devicetree/bindings/crypto/intel,keembay-ocs-aes.yaml
F: drivers/crypto/intel/keembay/Kconfig
F: drivers/crypto/intel/keembay/Makefile
@@ -13074,7 +13065,6 @@ F: drivers/crypto/intel/keembay/ocs-aes.c
F: drivers/crypto/intel/keembay/ocs-aes.h
INTEL KEEM BAY OCS ECC CRYPTO DRIVER
-M: Daniele Alessandrelli <daniele.alessandrelli@intel.com>
M: Prabhjot Khurana <prabhjot.khurana@intel.com>
M: Mark Gross <mgross@linux.intel.com>
S: Maintained
@@ -13084,7 +13074,6 @@ F: drivers/crypto/intel/keembay/Makefile
F: drivers/crypto/intel/keembay/keembay-ocs-ecc.c
INTEL KEEM BAY OCS HCU CRYPTO DRIVER
-M: Daniele Alessandrelli <daniele.alessandrelli@intel.com>
M: Declan Murphy <declan.murphy@intel.com>
S: Maintained
F: Documentation/devicetree/bindings/crypto/intel,keembay-ocs-hcu.yaml
@@ -17295,6 +17284,12 @@ S: Supported
F: Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml
F: drivers/spi/spi-at91-usart.c
+MICROCHIP ATSHA204A DRIVER
+M: Thorsten Blum <thorsten.blum@linux.dev>
+L: linux-crypto@vger.kernel.org
+S: Maintained
+F: drivers/crypto/atmel-sha204a.c
+
MICROCHIP AUDIO ASOC DRIVERS
M: Claudiu Beznea <claudiu.beznea@tuxon.dev>
M: Andrei Simion <andrei.simion@microchip.com>
@@ -17314,9 +17309,10 @@ F: Documentation/devicetree/bindings/media/microchip,csi2dc.yaml
F: drivers/media/platform/microchip/microchip-csi2dc.c
MICROCHIP ECC DRIVER
+M: Thorsten Blum <thorsten.blum@linux.dev>
L: linux-crypto@vger.kernel.org
-S: Orphan
-F: drivers/crypto/atmel-ecc.*
+S: Maintained
+F: drivers/crypto/atmel-ecc.c
MICROCHIP EIC DRIVER
M: Claudiu Beznea <claudiu.beznea@tuxon.dev>
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 2ad83fab2b45..34b5ea7885f5 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -808,7 +808,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_CRYPTO_AES_S390=m
-CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_HMAC_S390=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 5e3e2fe31b6b..d89c988f33ea 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -793,7 +793,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_CRYPTO_AES_S390=m
-CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_HMAC_S390=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
diff --git a/arch/s390/crypto/Kconfig b/arch/s390/crypto/Kconfig
index ee83052dbc15..00051d27db95 100644
--- a/arch/s390/crypto/Kconfig
+++ b/arch/s390/crypto/Kconfig
@@ -21,22 +21,6 @@ config CRYPTO_AES_S390
key sizes and XTS mode is hardware accelerated for 256 and
512 bit keys.
-config CRYPTO_DES_S390
- tristate "Ciphers: DES and Triple DES EDE, modes: ECB, CBC, CTR"
- select CRYPTO_ALGAPI
- select CRYPTO_SKCIPHER
- select CRYPTO_LIB_DES
- help
- Block ciphers: DES (FIPS 46-2) cipher algorithm
- Block ciphers: Triple DES EDE (FIPS 46-3) cipher algorithm
- Length-preserving ciphers: DES with ECB, CBC, and CTR modes
- Length-preserving ciphers: Triple DES EDED with ECB, CBC, and CTR modes
-
- Architecture: s390
-
- As of z990 the ECB and CBC mode are hardware accelerated.
- As of z196 the CTR mode is hardware accelerated.
-
config CRYPTO_HMAC_S390
tristate "Keyed-hash message authentication code: HMAC"
select CRYPTO_HASH
diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile
index 4449c1b19ef5..48aeb0c0ffbd 100644
--- a/arch/s390/crypto/Makefile
+++ b/arch/s390/crypto/Makefile
@@ -3,7 +3,6 @@
# Cryptographic API
#
-obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o
obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o
obj-$(CONFIG_CRYPTO_PAES_S390) += paes_s390.o
obj-$(CONFIG_S390_PRNG) += prng.o
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
deleted file mode 100644
index 8e75b83a5ddc..000000000000
--- a/arch/s390/crypto/des_s390.c
+++ /dev/null
@@ -1,502 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Cryptographic API.
- *
- * s390 implementation of the DES Cipher Algorithm.
- *
- * Copyright IBM Corp. 2003, 2011
- * Author(s): Thomas Spatzier
- * Jan Glauber (jan.glauber@de.ibm.com)
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/cpufeature.h>
-#include <linux/crypto.h>
-#include <linux/fips.h>
-#include <linux/mutex.h>
-#include <crypto/algapi.h>
-#include <crypto/internal/des.h>
-#include <crypto/internal/skcipher.h>
-#include <asm/cpacf.h>
-
-#define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
-
-static u8 *ctrblk;
-static DEFINE_MUTEX(ctrblk_lock);
-
-static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
-
-struct s390_des_ctx {
- u8 iv[DES_BLOCK_SIZE];
- u8 key[DES3_KEY_SIZE];
-};
-
-static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int key_len)
-{
- struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
- int err;
-
- err = crypto_des_verify_key(tfm, key);
- if (err)
- return err;
-
- memcpy(ctx->key, key, key_len);
- return 0;
-}
-
-static int des_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int key_len)
-{
- return des_setkey(crypto_skcipher_tfm(tfm), key, key_len);
-}
-
-static void s390_des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
-{
- struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
-
- cpacf_km(CPACF_KM_DEA, ctx->key, out, in, DES_BLOCK_SIZE);
-}
-
-static void s390_des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
-{
- struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
-
- cpacf_km(CPACF_KM_DEA | CPACF_DECRYPT,
- ctx->key, out, in, DES_BLOCK_SIZE);
-}
-
-static struct crypto_alg des_alg = {
- .cra_name = "des",
- .cra_driver_name = "des-s390",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s390_des_ctx),
- .cra_module = THIS_MODULE,
- .cra_u = {
- .cipher = {
- .cia_min_keysize = DES_KEY_SIZE,
- .cia_max_keysize = DES_KEY_SIZE,
- .cia_setkey = des_setkey,
- .cia_encrypt = s390_des_encrypt,
- .cia_decrypt = s390_des_decrypt,
- }
- }
-};
-
-static int ecb_desall_crypt(struct skcipher_request *req, unsigned long fc)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct s390_des_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- unsigned int nbytes, n;
- int ret;
-
- ret = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk.nbytes) != 0) {
- /* only use complete blocks */
- n = nbytes & ~(DES_BLOCK_SIZE - 1);
- cpacf_km(fc, ctx->key, walk.dst.virt.addr,
- walk.src.virt.addr, n);
- ret = skcipher_walk_done(&walk, nbytes - n);
- }
- return ret;
-}
-
-static int cbc_desall_crypt(struct skcipher_request *req, unsigned long fc)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct s390_des_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- unsigned int nbytes, n;
- int ret;
- struct {
- u8 iv[DES_BLOCK_SIZE];
- u8 key[DES3_KEY_SIZE];
- } param;
-
- ret = skcipher_walk_virt(&walk, req, false);
- if (ret)
- return ret;
- memcpy(param.iv, walk.iv, DES_BLOCK_SIZE);
- memcpy(param.key, ctx->key, DES3_KEY_SIZE);
- while ((nbytes = walk.nbytes) != 0) {
- /* only use complete blocks */
- n = nbytes & ~(DES_BLOCK_SIZE - 1);
- cpacf_kmc(fc, &param, walk.dst.virt.addr,
- walk.src.virt.addr, n);
- memcpy(walk.iv, param.iv, DES_BLOCK_SIZE);
- ret = skcipher_walk_done(&walk, nbytes - n);
- }
- return ret;
-}
-
-static int ecb_des_encrypt(struct skcipher_request *req)
-{
- return ecb_desall_crypt(req, CPACF_KM_DEA);
-}
-
-static int ecb_des_decrypt(struct skcipher_request *req)
-{
- return ecb_desall_crypt(req, CPACF_KM_DEA | CPACF_DECRYPT);
-}
-
-static struct skcipher_alg ecb_des_alg = {
- .base.cra_name = "ecb(des)",
- .base.cra_driver_name = "ecb-des-s390",
- .base.cra_priority = 400, /* combo: des + ecb */
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct s390_des_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = des_setkey_skcipher,
- .encrypt = ecb_des_encrypt,
- .decrypt = ecb_des_decrypt,
-};
-
-static int cbc_des_encrypt(struct skcipher_request *req)
-{
- return cbc_desall_crypt(req, CPACF_KMC_DEA);
-}
-
-static int cbc_des_decrypt(struct skcipher_request *req)
-{
- return cbc_desall_crypt(req, CPACF_KMC_DEA | CPACF_DECRYPT);
-}
-
-static struct skcipher_alg cbc_des_alg = {
- .base.cra_name = "cbc(des)",
- .base.cra_driver_name = "cbc-des-s390",
- .base.cra_priority = 400, /* combo: des + cbc */
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct s390_des_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = des_setkey_skcipher,
- .encrypt = cbc_des_encrypt,
- .decrypt = cbc_des_decrypt,
-};
-
-/*
- * RFC2451:
- *
- * For DES-EDE3, there is no known need to reject weak or
- * complementation keys. Any weakness is obviated by the use of
- * multiple keys.
- *
- * However, if the first two or last two independent 64-bit keys are
- * equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the
- * same as DES. Implementers MUST reject keys that exhibit this
- * property.
- *
- * In fips mode additionally check for all 3 keys are unique.
- *
- */
-static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int key_len)
-{
- struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
- int err;
-
- err = crypto_des3_ede_verify_key(tfm, key);
- if (err)
- return err;
-
- memcpy(ctx->key, key, key_len);
- return 0;
-}
-
-static int des3_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int key_len)
-{
- return des3_setkey(crypto_skcipher_tfm(tfm), key, key_len);
-}
-
-static void des3_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
-{
- struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
-
- cpacf_km(CPACF_KM_TDEA_192, ctx->key, dst, src, DES_BLOCK_SIZE);
-}
-
-static void des3_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
-{
- struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
-
- cpacf_km(CPACF_KM_TDEA_192 | CPACF_DECRYPT,
- ctx->key, dst, src, DES_BLOCK_SIZE);
-}
-
-static struct crypto_alg des3_alg = {
- .cra_name = "des3_ede",
- .cra_driver_name = "des3_ede-s390",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s390_des_ctx),
- .cra_module = THIS_MODULE,
- .cra_u = {
- .cipher = {
- .cia_min_keysize = DES3_KEY_SIZE,
- .cia_max_keysize = DES3_KEY_SIZE,
- .cia_setkey = des3_setkey,
- .cia_encrypt = des3_encrypt,
- .cia_decrypt = des3_decrypt,
- }
- }
-};
-
-static int ecb_des3_encrypt(struct skcipher_request *req)
-{
- return ecb_desall_crypt(req, CPACF_KM_TDEA_192);
-}
-
-static int ecb_des3_decrypt(struct skcipher_request *req)
-{
- return ecb_desall_crypt(req, CPACF_KM_TDEA_192 | CPACF_DECRYPT);
-}
-
-static struct skcipher_alg ecb_des3_alg = {
- .base.cra_name = "ecb(des3_ede)",
- .base.cra_driver_name = "ecb-des3_ede-s390",
- .base.cra_priority = 400, /* combo: des3 + ecb */
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct s390_des_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = DES3_KEY_SIZE,
- .max_keysize = DES3_KEY_SIZE,
- .setkey = des3_setkey_skcipher,
- .encrypt = ecb_des3_encrypt,
- .decrypt = ecb_des3_decrypt,
-};
-
-static int cbc_des3_encrypt(struct skcipher_request *req)
-{
- return cbc_desall_crypt(req, CPACF_KMC_TDEA_192);
-}
-
-static int cbc_des3_decrypt(struct skcipher_request *req)
-{
- return cbc_desall_crypt(req, CPACF_KMC_TDEA_192 | CPACF_DECRYPT);
-}
-
-static struct skcipher_alg cbc_des3_alg = {
- .base.cra_name = "cbc(des3_ede)",
- .base.cra_driver_name = "cbc-des3_ede-s390",
- .base.cra_priority = 400, /* combo: des3 + cbc */
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct s390_des_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = DES3_KEY_SIZE,
- .max_keysize = DES3_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = des3_setkey_skcipher,
- .encrypt = cbc_des3_encrypt,
- .decrypt = cbc_des3_decrypt,
-};
-
-static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
-{
- unsigned int i, n;
-
- /* align to block size, max. PAGE_SIZE */
- n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1);
- memcpy(ctrptr, iv, DES_BLOCK_SIZE);
- for (i = (n / DES_BLOCK_SIZE) - 1; i > 0; i--) {
- memcpy(ctrptr + DES_BLOCK_SIZE, ctrptr, DES_BLOCK_SIZE);
- crypto_inc(ctrptr + DES_BLOCK_SIZE, DES_BLOCK_SIZE);
- ctrptr += DES_BLOCK_SIZE;
- }
- return n;
-}
-
-static int ctr_desall_crypt(struct skcipher_request *req, unsigned long fc)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct s390_des_ctx *ctx = crypto_skcipher_ctx(tfm);
- u8 buf[DES_BLOCK_SIZE], *ctrptr;
- struct skcipher_walk walk;
- unsigned int n, nbytes;
- int ret, locked;
-
- locked = mutex_trylock(&ctrblk_lock);
-
- ret = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = walk.nbytes) >= DES_BLOCK_SIZE) {
- n = DES_BLOCK_SIZE;
- if (nbytes >= 2*DES_BLOCK_SIZE && locked)
- n = __ctrblk_init(ctrblk, walk.iv, nbytes);
- ctrptr = (n > DES_BLOCK_SIZE) ? ctrblk : walk.iv;
- cpacf_kmctr(fc, ctx->key, walk.dst.virt.addr,
- walk.src.virt.addr, n, ctrptr);
- if (ctrptr == ctrblk)
- memcpy(walk.iv, ctrptr + n - DES_BLOCK_SIZE,
- DES_BLOCK_SIZE);
- crypto_inc(walk.iv, DES_BLOCK_SIZE);
- ret = skcipher_walk_done(&walk, nbytes - n);
- }
- if (locked)
- mutex_unlock(&ctrblk_lock);
- /* final block may be < DES_BLOCK_SIZE, copy only nbytes */
- if (nbytes) {
- cpacf_kmctr(fc, ctx->key, buf, walk.src.virt.addr,
- DES_BLOCK_SIZE, walk.iv);
- memcpy(walk.dst.virt.addr, buf, nbytes);
- crypto_inc(walk.iv, DES_BLOCK_SIZE);
- ret = skcipher_walk_done(&walk, 0);
- }
- return ret;
-}
-
-static int ctr_des_crypt(struct skcipher_request *req)
-{
- return ctr_desall_crypt(req, CPACF_KMCTR_DEA);
-}
-
-static struct skcipher_alg ctr_des_alg = {
- .base.cra_name = "ctr(des)",
- .base.cra_driver_name = "ctr-des-s390",
- .base.cra_priority = 400, /* combo: des + ctr */
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct s390_des_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = des_setkey_skcipher,
- .encrypt = ctr_des_crypt,
- .decrypt = ctr_des_crypt,
- .chunksize = DES_BLOCK_SIZE,
-};
-
-static int ctr_des3_crypt(struct skcipher_request *req)
-{
- return ctr_desall_crypt(req, CPACF_KMCTR_TDEA_192);
-}
-
-static struct skcipher_alg ctr_des3_alg = {
- .base.cra_name = "ctr(des3_ede)",
- .base.cra_driver_name = "ctr-des3_ede-s390",
- .base.cra_priority = 400, /* combo: des3 + ede */
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct s390_des_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = DES3_KEY_SIZE,
- .max_keysize = DES3_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = des3_setkey_skcipher,
- .encrypt = ctr_des3_crypt,
- .decrypt = ctr_des3_crypt,
- .chunksize = DES_BLOCK_SIZE,
-};
-
-static struct crypto_alg *des_s390_algs_ptr[2];
-static int des_s390_algs_num;
-static struct skcipher_alg *des_s390_skciphers_ptr[6];
-static int des_s390_skciphers_num;
-
-static int des_s390_register_alg(struct crypto_alg *alg)
-{
- int ret;
-
- ret = crypto_register_alg(alg);
- if (!ret)
- des_s390_algs_ptr[des_s390_algs_num++] = alg;
- return ret;
-}
-
-static int des_s390_register_skcipher(struct skcipher_alg *alg)
-{
- int ret;
-
- ret = crypto_register_skcipher(alg);
- if (!ret)
- des_s390_skciphers_ptr[des_s390_skciphers_num++] = alg;
- return ret;
-}
-
-static void des_s390_exit(void)
-{
- while (des_s390_algs_num--)
- crypto_unregister_alg(des_s390_algs_ptr[des_s390_algs_num]);
- while (des_s390_skciphers_num--)
- crypto_unregister_skcipher(des_s390_skciphers_ptr[des_s390_skciphers_num]);
- if (ctrblk)
- free_page((unsigned long) ctrblk);
-}
-
-static int __init des_s390_init(void)
-{
- int ret;
-
- /* Query available functions for KM, KMC and KMCTR */
- cpacf_query(CPACF_KM, &km_functions);
- cpacf_query(CPACF_KMC, &kmc_functions);
- cpacf_query(CPACF_KMCTR, &kmctr_functions);
-
- if (cpacf_test_func(&km_functions, CPACF_KM_DEA)) {
- ret = des_s390_register_alg(&des_alg);
- if (ret)
- goto out_err;
- ret = des_s390_register_skcipher(&ecb_des_alg);
- if (ret)
- goto out_err;
- }
- if (cpacf_test_func(&kmc_functions, CPACF_KMC_DEA)) {
- ret = des_s390_register_skcipher(&cbc_des_alg);
- if (ret)
- goto out_err;
- }
- if (cpacf_test_func(&km_functions, CPACF_KM_TDEA_192)) {
- ret = des_s390_register_alg(&des3_alg);
- if (ret)
- goto out_err;
- ret = des_s390_register_skcipher(&ecb_des3_alg);
- if (ret)
- goto out_err;
- }
- if (cpacf_test_func(&kmc_functions, CPACF_KMC_TDEA_192)) {
- ret = des_s390_register_skcipher(&cbc_des3_alg);
- if (ret)
- goto out_err;
- }
-
- if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_DEA) ||
- cpacf_test_func(&kmctr_functions, CPACF_KMCTR_TDEA_192)) {
- ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
- if (!ctrblk) {
- ret = -ENOMEM;
- goto out_err;
- }
- }
-
- if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_DEA)) {
- ret = des_s390_register_skcipher(&ctr_des_alg);
- if (ret)
- goto out_err;
- }
- if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_TDEA_192)) {
- ret = des_s390_register_skcipher(&ctr_des3_alg);
- if (ret)
- goto out_err;
- }
-
- return 0;
-out_err:
- des_s390_exit();
- return ret;
-}
-
-module_cpu_feature_match(S390_CPU_FEATURE_MSA, des_s390_init);
-module_exit(des_s390_exit);
-
-MODULE_ALIAS_CRYPTO("des");
-MODULE_ALIAS_CRYPTO("des3_ede");
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
diff --git a/arch/sparc/crypto/Kconfig b/arch/sparc/crypto/Kconfig
index c1932ce46c7f..8db3f6eea5dc 100644
--- a/arch/sparc/crypto/Kconfig
+++ b/arch/sparc/crypto/Kconfig
@@ -2,20 +2,6 @@
menu "Accelerated Cryptographic Algorithms for CPU (sparc64)"
-config CRYPTO_DES_SPARC64
- tristate "Ciphers: DES and Triple DES EDE, modes: ECB/CBC"
- depends on SPARC64
- select CRYPTO_ALGAPI
- select CRYPTO_LIB_DES
- select CRYPTO_SKCIPHER
- help
- Block cipher: DES (FIPS 46-2) cipher algorithm
- Block cipher: Triple DES EDE (FIPS 46-3) cipher algorithm
- Length-preserving ciphers: DES with ECB and CBC modes
- Length-preserving ciphers: Tripe DES EDE with ECB and CBC modes
-
- Architecture: sparc64
-
config CRYPTO_AES_SPARC64
tristate "Ciphers: AES, modes: ECB, CBC, CTR"
depends on SPARC64
diff --git a/arch/sparc/crypto/Makefile b/arch/sparc/crypto/Makefile
index cdf9f4b3efbb..ab4a7765babf 100644
--- a/arch/sparc/crypto/Makefile
+++ b/arch/sparc/crypto/Makefile
@@ -4,9 +4,7 @@
#
obj-$(CONFIG_CRYPTO_AES_SPARC64) += aes-sparc64.o
-obj-$(CONFIG_CRYPTO_DES_SPARC64) += des-sparc64.o
obj-$(CONFIG_CRYPTO_CAMELLIA_SPARC64) += camellia-sparc64.o
aes-sparc64-y := aes_glue.o
-des-sparc64-y := des_asm.o des_glue.o
camellia-sparc64-y := camellia_asm.o camellia_glue.o
diff --git a/arch/sparc/crypto/des_asm.S b/arch/sparc/crypto/des_asm.S
deleted file mode 100644
index d534446cbef9..000000000000
--- a/arch/sparc/crypto/des_asm.S
+++ /dev/null
@@ -1,419 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <linux/linkage.h>
-#include <asm/opcodes.h>
-#include <asm/visasm.h>
-
- .align 32
-ENTRY(des_sparc64_key_expand)
- /* %o0=input_key, %o1=output_key */
- VISEntryHalf
- ld [%o0 + 0x00], %f0
- ld [%o0 + 0x04], %f1
- DES_KEXPAND(0, 0, 0)
- DES_KEXPAND(0, 1, 2)
- DES_KEXPAND(2, 3, 6)
- DES_KEXPAND(2, 2, 4)
- DES_KEXPAND(6, 3, 10)
- DES_KEXPAND(6, 2, 8)
- DES_KEXPAND(10, 3, 14)
- DES_KEXPAND(10, 2, 12)
- DES_KEXPAND(14, 1, 16)
- DES_KEXPAND(16, 3, 20)
- DES_KEXPAND(16, 2, 18)
- DES_KEXPAND(20, 3, 24)
- DES_KEXPAND(20, 2, 22)
- DES_KEXPAND(24, 3, 28)
- DES_KEXPAND(24, 2, 26)
- DES_KEXPAND(28, 1, 30)
- std %f0, [%o1 + 0x00]
- std %f2, [%o1 + 0x08]
- std %f4, [%o1 + 0x10]
- std %f6, [%o1 + 0x18]
- std %f8, [%o1 + 0x20]
- std %f10, [%o1 + 0x28]
- std %f12, [%o1 + 0x30]
- std %f14, [%o1 + 0x38]
- std %f16, [%o1 + 0x40]
- std %f18, [%o1 + 0x48]
- std %f20, [%o1 + 0x50]
- std %f22, [%o1 + 0x58]
- std %f24, [%o1 + 0x60]
- std %f26, [%o1 + 0x68]
- std %f28, [%o1 + 0x70]
- std %f30, [%o1 + 0x78]
- retl
- VISExitHalf
-ENDPROC(des_sparc64_key_expand)
-
- .align 32
-ENTRY(des_sparc64_crypt)
- /* %o0=key, %o1=input, %o2=output */
- VISEntry
- ldd [%o1 + 0x00], %f32
- ldd [%o0 + 0x00], %f0
- ldd [%o0 + 0x08], %f2
- ldd [%o0 + 0x10], %f4
- ldd [%o0 + 0x18], %f6
- ldd [%o0 + 0x20], %f8
- ldd [%o0 + 0x28], %f10
- ldd [%o0 + 0x30], %f12
- ldd [%o0 + 0x38], %f14
- ldd [%o0 + 0x40], %f16
- ldd [%o0 + 0x48], %f18
- ldd [%o0 + 0x50], %f20
- ldd [%o0 + 0x58], %f22
- ldd [%o0 + 0x60], %f24
- ldd [%o0 + 0x68], %f26
- ldd [%o0 + 0x70], %f28
- ldd [%o0 + 0x78], %f30
- DES_IP(32, 32)
- DES_ROUND(0, 2, 32, 32)
- DES_ROUND(4, 6, 32, 32)
- DES_ROUND(8, 10, 32, 32)
- DES_ROUND(12, 14, 32, 32)
- DES_ROUND(16, 18, 32, 32)
- DES_ROUND(20, 22, 32, 32)
- DES_ROUND(24, 26, 32, 32)
- DES_ROUND(28, 30, 32, 32)
- DES_IIP(32, 32)
- std %f32, [%o2 + 0x00]
- retl
- VISExit
-ENDPROC(des_sparc64_crypt)
-
- .align 32
-ENTRY(des_sparc64_load_keys)
- /* %o0=key */
- VISEntry
- ldd [%o0 + 0x00], %f0
- ldd [%o0 + 0x08], %f2
- ldd [%o0 + 0x10], %f4
- ldd [%o0 + 0x18], %f6
- ldd [%o0 + 0x20], %f8
- ldd [%o0 + 0x28], %f10
- ldd [%o0 + 0x30], %f12
- ldd [%o0 + 0x38], %f14
- ldd [%o0 + 0x40], %f16
- ldd [%o0 + 0x48], %f18
- ldd [%o0 + 0x50], %f20
- ldd [%o0 + 0x58], %f22
- ldd [%o0 + 0x60], %f24
- ldd [%o0 + 0x68], %f26
- ldd [%o0 + 0x70], %f28
- retl
- ldd [%o0 + 0x78], %f30
-ENDPROC(des_sparc64_load_keys)
-
- .align 32
-ENTRY(des_sparc64_ecb_crypt)
- /* %o0=input, %o1=output, %o2=len */
-1: ldd [%o0 + 0x00], %f32
- add %o0, 0x08, %o0
- DES_IP(32, 32)
- DES_ROUND(0, 2, 32, 32)
- DES_ROUND(4, 6, 32, 32)
- DES_ROUND(8, 10, 32, 32)
- DES_ROUND(12, 14, 32, 32)
- DES_ROUND(16, 18, 32, 32)
- DES_ROUND(20, 22, 32, 32)
- DES_ROUND(24, 26, 32, 32)
- DES_ROUND(28, 30, 32, 32)
- DES_IIP(32, 32)
- std %f32, [%o1 + 0x00]
- subcc %o2, 0x08, %o2
- bne,pt %icc, 1b
- add %o1, 0x08, %o1
- retl
- nop
-ENDPROC(des_sparc64_ecb_crypt)
-
- .align 32
-ENTRY(des_sparc64_cbc_encrypt)
- /* %o0=input, %o1=output, %o2=len, %o3=IV */
- ldd [%o3 + 0x00], %f32
-1: ldd [%o0 + 0x00], %f34
- fxor %f32, %f34, %f32
- DES_IP(32, 32)
- DES_ROUND(0, 2, 32, 32)
- DES_ROUND(4, 6, 32, 32)
- DES_ROUND(8, 10, 32, 32)
- DES_ROUND(12, 14, 32, 32)
- DES_ROUND(16, 18, 32, 32)
- DES_ROUND(20, 22, 32, 32)
- DES_ROUND(24, 26, 32, 32)
- DES_ROUND(28, 30, 32, 32)
- DES_IIP(32, 32)
- std %f32, [%o1 + 0x00]
- add %o0, 0x08, %o0
- subcc %o2, 0x08, %o2
- bne,pt %icc, 1b
- add %o1, 0x08, %o1
- retl
- std %f32, [%o3 + 0x00]
-ENDPROC(des_sparc64_cbc_encrypt)
-
- .align 32
-ENTRY(des_sparc64_cbc_decrypt)
- /* %o0=input, %o1=output, %o2=len, %o3=IV */
- ldd [%o3 + 0x00], %f34
-1: ldd [%o0 + 0x00], %f36
- DES_IP(36, 32)
- DES_ROUND(0, 2, 32, 32)
- DES_ROUND(4, 6, 32, 32)
- DES_ROUND(8, 10, 32, 32)
- DES_ROUND(12, 14, 32, 32)
- DES_ROUND(16, 18, 32, 32)
- DES_ROUND(20, 22, 32, 32)
- DES_ROUND(24, 26, 32, 32)
- DES_ROUND(28, 30, 32, 32)
- DES_IIP(32, 32)
- fxor %f32, %f34, %f32
- fsrc2 %f36, %f34
- std %f32, [%o1 + 0x00]
- add %o0, 0x08, %o0
- subcc %o2, 0x08, %o2
- bne,pt %icc, 1b
- add %o1, 0x08, %o1
- retl
- std %f36, [%o3 + 0x00]
-ENDPROC(des_sparc64_cbc_decrypt)
-
- .align 32
-ENTRY(des3_ede_sparc64_crypt)
- /* %o0=key, %o1=input, %o2=output */
- VISEntry
- ldd [%o1 + 0x00], %f32
- ldd [%o0 + 0x00], %f0
- ldd [%o0 + 0x08], %f2
- ldd [%o0 + 0x10], %f4
- ldd [%o0 + 0x18], %f6
- ldd [%o0 + 0x20], %f8
- ldd [%o0 + 0x28], %f10
- ldd [%o0 + 0x30], %f12
- ldd [%o0 + 0x38], %f14
- ldd [%o0 + 0x40], %f16
- ldd [%o0 + 0x48], %f18
- ldd [%o0 + 0x50], %f20
- ldd [%o0 + 0x58], %f22
- ldd [%o0 + 0x60], %f24
- ldd [%o0 + 0x68], %f26
- ldd [%o0 + 0x70], %f28
- ldd [%o0 + 0x78], %f30
- DES_IP(32, 32)
- DES_ROUND(0, 2, 32, 32)
- ldd [%o0 + 0x80], %f0
- ldd [%o0 + 0x88], %f2
- DES_ROUND(4, 6, 32, 32)
- ldd [%o0 + 0x90], %f4
- ldd [%o0 + 0x98], %f6
- DES_ROUND(8, 10, 32, 32)
- ldd [%o0 + 0xa0], %f8
- ldd [%o0 + 0xa8], %f10
- DES_ROUND(12, 14, 32, 32)
- ldd [%o0 + 0xb0], %f12
- ldd [%o0 + 0xb8], %f14
- DES_ROUND(16, 18, 32, 32)
- ldd [%o0 + 0xc0], %f16
- ldd [%o0 + 0xc8], %f18
- DES_ROUND(20, 22, 32, 32)
- ldd [%o0 + 0xd0], %f20
- ldd [%o0 + 0xd8], %f22
- DES_ROUND(24, 26, 32, 32)
- ldd [%o0 + 0xe0], %f24
- ldd [%o0 + 0xe8], %f26
- DES_ROUND(28, 30, 32, 32)
- ldd [%o0 + 0xf0], %f28
- ldd [%o0 + 0xf8], %f30
- DES_IIP(32, 32)
- DES_IP(32, 32)
- DES_ROUND(0, 2, 32, 32)
- ldd [%o0 + 0x100], %f0
- ldd [%o0 + 0x108], %f2
- DES_ROUND(4, 6, 32, 32)
- ldd [%o0 + 0x110], %f4
- ldd [%o0 + 0x118], %f6
- DES_ROUND(8, 10, 32, 32)
- ldd [%o0 + 0x120], %f8
- ldd [%o0 + 0x128], %f10
- DES_ROUND(12, 14, 32, 32)
- ldd [%o0 + 0x130], %f12
- ldd [%o0 + 0x138], %f14
- DES_ROUND(16, 18, 32, 32)
- ldd [%o0 + 0x140], %f16
- ldd [%o0 + 0x148], %f18
- DES_ROUND(20, 22, 32, 32)
- ldd [%o0 + 0x150], %f20
- ldd [%o0 + 0x158], %f22
- DES_ROUND(24, 26, 32, 32)
- ldd [%o0 + 0x160], %f24
- ldd [%o0 + 0x168], %f26
- DES_ROUND(28, 30, 32, 32)
- ldd [%o0 + 0x170], %f28
- ldd [%o0 + 0x178], %f30
- DES_IIP(32, 32)
- DES_IP(32, 32)
- DES_ROUND(0, 2, 32, 32)
- DES_ROUND(4, 6, 32, 32)
- DES_ROUND(8, 10, 32, 32)
- DES_ROUND(12, 14, 32, 32)
- DES_ROUND(16, 18, 32, 32)
- DES_ROUND(20, 22, 32, 32)
- DES_ROUND(24, 26, 32, 32)
- DES_ROUND(28, 30, 32, 32)
- DES_IIP(32, 32)
-
- std %f32, [%o2 + 0x00]
- retl
- VISExit
-ENDPROC(des3_ede_sparc64_crypt)
-
- .align 32
-ENTRY(des3_ede_sparc64_load_keys)
- /* %o0=key */
- VISEntry
- ldd [%o0 + 0x00], %f0
- ldd [%o0 + 0x08], %f2
- ldd [%o0 + 0x10], %f4
- ldd [%o0 + 0x18], %f6
- ldd [%o0 + 0x20], %f8
- ldd [%o0 + 0x28], %f10
- ldd [%o0 + 0x30], %f12
- ldd [%o0 + 0x38], %f14
- ldd [%o0 + 0x40], %f16
- ldd [%o0 + 0x48], %f18
- ldd [%o0 + 0x50], %f20
- ldd [%o0 + 0x58], %f22
- ldd [%o0 + 0x60], %f24
- ldd [%o0 + 0x68], %f26
- ldd [%o0 + 0x70], %f28
- ldd [%o0 + 0x78], %f30
- ldd [%o0 + 0x80], %f32
- ldd [%o0 + 0x88], %f34
- ldd [%o0 + 0x90], %f36
- ldd [%o0 + 0x98], %f38
- ldd [%o0 + 0xa0], %f40
- ldd [%o0 + 0xa8], %f42
- ldd [%o0 + 0xb0], %f44
- ldd [%o0 + 0xb8], %f46
- ldd [%o0 + 0xc0], %f48
- ldd [%o0 + 0xc8], %f50
- ldd [%o0 + 0xd0], %f52
- ldd [%o0 + 0xd8], %f54
- ldd [%o0 + 0xe0], %f56
- retl
- ldd [%o0 + 0xe8], %f58
-ENDPROC(des3_ede_sparc64_load_keys)
-
-#define DES3_LOOP_BODY(X) \
- DES_IP(X, X) \
- DES_ROUND(0, 2, X, X) \
- DES_ROUND(4, 6, X, X) \
- DES_ROUND(8, 10, X, X) \
- DES_ROUND(12, 14, X, X) \
- DES_ROUND(16, 18, X, X) \
- ldd [%o0 + 0xf0], %f16; \
- ldd [%o0 + 0xf8], %f18; \
- DES_ROUND(20, 22, X, X) \
- ldd [%o0 + 0x100], %f20; \
- ldd [%o0 + 0x108], %f22; \
- DES_ROUND(24, 26, X, X) \
- ldd [%o0 + 0x110], %f24; \
- ldd [%o0 + 0x118], %f26; \
- DES_ROUND(28, 30, X, X) \
- ldd [%o0 + 0x120], %f28; \
- ldd [%o0 + 0x128], %f30; \
- DES_IIP(X, X) \
- DES_IP(X, X) \
- DES_ROUND(32, 34, X, X) \
- ldd [%o0 + 0x130], %f0; \
- ldd [%o0 + 0x138], %f2; \
- DES_ROUND(36, 38, X, X) \
- ldd [%o0 + 0x140], %f4; \
- ldd [%o0 + 0x148], %f6; \
- DES_ROUND(40, 42, X, X) \
- ldd [%o0 + 0x150], %f8; \
- ldd [%o0 + 0x158], %f10; \
- DES_ROUND(44, 46, X, X) \
- ldd [%o0 + 0x160], %f12; \
- ldd [%o0 + 0x168], %f14; \
- DES_ROUND(48, 50, X, X) \
- DES_ROUND(52, 54, X, X) \
- DES_ROUND(56, 58, X, X) \
- DES_ROUND(16, 18, X, X) \
- ldd [%o0 + 0x170], %f16; \
- ldd [%o0 + 0x178], %f18; \
- DES_IIP(X, X) \
- DES_IP(X, X) \
- DES_ROUND(20, 22, X, X) \
- ldd [%o0 + 0x50], %f20; \
- ldd [%o0 + 0x58], %f22; \
- DES_ROUND(24, 26, X, X) \
- ldd [%o0 + 0x60], %f24; \
- ldd [%o0 + 0x68], %f26; \
- DES_ROUND(28, 30, X, X) \
- ldd [%o0 + 0x70], %f28; \
- ldd [%o0 + 0x78], %f30; \
- DES_ROUND(0, 2, X, X) \
- ldd [%o0 + 0x00], %f0; \
- ldd [%o0 + 0x08], %f2; \
- DES_ROUND(4, 6, X, X) \
- ldd [%o0 + 0x10], %f4; \
- ldd [%o0 + 0x18], %f6; \
- DES_ROUND(8, 10, X, X) \
- ldd [%o0 + 0x20], %f8; \
- ldd [%o0 + 0x28], %f10; \
- DES_ROUND(12, 14, X, X) \
- ldd [%o0 + 0x30], %f12; \
- ldd [%o0 + 0x38], %f14; \
- DES_ROUND(16, 18, X, X) \
- ldd [%o0 + 0x40], %f16; \
- ldd [%o0 + 0x48], %f18; \
- DES_IIP(X, X)
-
- .align 32
-ENTRY(des3_ede_sparc64_ecb_crypt)
- /* %o0=key, %o1=input, %o2=output, %o3=len */
-1: ldd [%o1 + 0x00], %f60
- DES3_LOOP_BODY(60)
- std %f60, [%o2 + 0x00]
- add %o1, 0x08, %o1
- subcc %o3, 0x08, %o3
- bne,pt %icc, 1b
- add %o2, 0x08, %o2
- retl
- nop
-ENDPROC(des3_ede_sparc64_ecb_crypt)
-
- .align 32
-ENTRY(des3_ede_sparc64_cbc_encrypt)
- /* %o0=key, %o1=input, %o2=output, %o3=len, %o4=IV */
- ldd [%o4 + 0x00], %f60
-1: ldd [%o1 + 0x00], %f62
- fxor %f60, %f62, %f60
- DES3_LOOP_BODY(60)
- std %f60, [%o2 + 0x00]
- add %o1, 0x08, %o1
- subcc %o3, 0x08, %o3
- bne,pt %icc, 1b
- add %o2, 0x08, %o2
- retl
- std %f60, [%o4 + 0x00]
-ENDPROC(des3_ede_sparc64_cbc_encrypt)
-
- .align 32
-ENTRY(des3_ede_sparc64_cbc_decrypt)
- /* %o0=key, %o1=input, %o2=output, %o3=len, %o4=IV */
- ldd [%o4 + 0x00], %f62
-1: ldx [%o1 + 0x00], %g1
- MOVXTOD_G1_F60
- DES3_LOOP_BODY(60)
- fxor %f62, %f60, %f60
- MOVXTOD_G1_F62
- std %f60, [%o2 + 0x00]
- add %o1, 0x08, %o1
- subcc %o3, 0x08, %o3
- bne,pt %icc, 1b
- add %o2, 0x08, %o2
- retl
- stx %g1, [%o4 + 0x00]
-ENDPROC(des3_ede_sparc64_cbc_decrypt)
diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
deleted file mode 100644
index e50ec4cd57cd..000000000000
--- a/arch/sparc/crypto/des_glue.c
+++ /dev/null
@@ -1,482 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Glue code for DES encryption optimized for sparc64 crypto opcodes.
- *
- * Copyright (C) 2012 David S. Miller <davem@davemloft.net>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/crypto.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <crypto/algapi.h>
-#include <crypto/internal/des.h>
-#include <crypto/internal/skcipher.h>
-
-#include <asm/fpumacro.h>
-#include <asm/opcodes.h>
-#include <asm/pstate.h>
-#include <asm/elf.h>
-
-struct des_sparc64_ctx {
- u64 encrypt_expkey[DES_EXPKEY_WORDS / 2];
- u64 decrypt_expkey[DES_EXPKEY_WORDS / 2];
-};
-
-struct des3_ede_sparc64_ctx {
- u64 encrypt_expkey[DES3_EDE_EXPKEY_WORDS / 2];
- u64 decrypt_expkey[DES3_EDE_EXPKEY_WORDS / 2];
-};
-
-static void encrypt_to_decrypt(u64 *d, const u64 *e)
-{
- const u64 *s = e + (DES_EXPKEY_WORDS / 2) - 1;
- int i;
-
- for (i = 0; i < DES_EXPKEY_WORDS / 2; i++)
- *d++ = *s--;
-}
-
-extern void des_sparc64_key_expand(const u32 *input_key, u64 *key);
-
-static int des_set_key(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct des_sparc64_ctx *dctx = crypto_tfm_ctx(tfm);
- int err;
-
- /* Even though we have special instructions for key expansion,
- * we call des_verify_key() so that we don't have to write our own
- * weak key detection code.
- */
- err = crypto_des_verify_key(tfm, key);
- if (err)
- return err;
-
- des_sparc64_key_expand((const u32 *) key, &dctx->encrypt_expkey[0]);
- encrypt_to_decrypt(&dctx->decrypt_expkey[0], &dctx->encrypt_expkey[0]);
-
- return 0;
-}
-
-static int des_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keylen)
-{
- return des_set_key(crypto_skcipher_tfm(tfm), key, keylen);
-}
-
-extern void des_sparc64_crypt(const u64 *key, const u64 *input,
- u64 *output);
-
-static void sparc_des_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
-{
- struct des_sparc64_ctx *ctx = crypto_tfm_ctx(tfm);
- const u64 *K = ctx->encrypt_expkey;
-
- des_sparc64_crypt(K, (const u64 *) src, (u64 *) dst);
-}
-
-static void sparc_des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
-{
- struct des_sparc64_ctx *ctx = crypto_tfm_ctx(tfm);
- const u64 *K = ctx->decrypt_expkey;
-
- des_sparc64_crypt(K, (const u64 *) src, (u64 *) dst);
-}
-
-extern void des_sparc64_load_keys(const u64 *key);
-
-extern void des_sparc64_ecb_crypt(const u64 *input, u64 *output,
- unsigned int len);
-
-static int __ecb_crypt(struct skcipher_request *req, bool encrypt)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- const struct des_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- unsigned int nbytes;
- int err;
-
- err = skcipher_walk_virt(&walk, req, true);
- if (err)
- return err;
-
- if (encrypt)
- des_sparc64_load_keys(&ctx->encrypt_expkey[0]);
- else
- des_sparc64_load_keys(&ctx->decrypt_expkey[0]);
- while ((nbytes = walk.nbytes) != 0) {
- des_sparc64_ecb_crypt(walk.src.virt.addr, walk.dst.virt.addr,
- round_down(nbytes, DES_BLOCK_SIZE));
- err = skcipher_walk_done(&walk, nbytes % DES_BLOCK_SIZE);
- }
- fprs_write(0);
- return err;
-}
-
-static int ecb_encrypt(struct skcipher_request *req)
-{
- return __ecb_crypt(req, true);
-}
-
-static int ecb_decrypt(struct skcipher_request *req)
-{
- return __ecb_crypt(req, false);
-}
-
-extern void des_sparc64_cbc_encrypt(const u64 *input, u64 *output,
- unsigned int len, u64 *iv);
-
-extern void des_sparc64_cbc_decrypt(const u64 *input, u64 *output,
- unsigned int len, u64 *iv);
-
-static int __cbc_crypt(struct skcipher_request *req, bool encrypt)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- const struct des_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- unsigned int nbytes;
- int err;
-
- err = skcipher_walk_virt(&walk, req, true);
- if (err)
- return err;
-
- if (encrypt)
- des_sparc64_load_keys(&ctx->encrypt_expkey[0]);
- else
- des_sparc64_load_keys(&ctx->decrypt_expkey[0]);
- while ((nbytes = walk.nbytes) != 0) {
- if (encrypt)
- des_sparc64_cbc_encrypt(walk.src.virt.addr,
- walk.dst.virt.addr,
- round_down(nbytes,
- DES_BLOCK_SIZE),
- walk.iv);
- else
- des_sparc64_cbc_decrypt(walk.src.virt.addr,
- walk.dst.virt.addr,
- round_down(nbytes,
- DES_BLOCK_SIZE),
- walk.iv);
- err = skcipher_walk_done(&walk, nbytes % DES_BLOCK_SIZE);
- }
- fprs_write(0);
- return err;
-}
-
-static int cbc_encrypt(struct skcipher_request *req)
-{
- return __cbc_crypt(req, true);
-}
-
-static int cbc_decrypt(struct skcipher_request *req)
-{
- return __cbc_crypt(req, false);
-}
-
-static int des3_ede_set_key(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct des3_ede_sparc64_ctx *dctx = crypto_tfm_ctx(tfm);
- u64 k1[DES_EXPKEY_WORDS / 2];
- u64 k2[DES_EXPKEY_WORDS / 2];
- u64 k3[DES_EXPKEY_WORDS / 2];
- int err;
-
- err = crypto_des3_ede_verify_key(tfm, key);
- if (err)
- return err;
-
- des_sparc64_key_expand((const u32 *)key, k1);
- key += DES_KEY_SIZE;
- des_sparc64_key_expand((const u32 *)key, k2);
- key += DES_KEY_SIZE;
- des_sparc64_key_expand((const u32 *)key, k3);
-
- memcpy(&dctx->encrypt_expkey[0], &k1[0], sizeof(k1));
- encrypt_to_decrypt(&dctx->encrypt_expkey[DES_EXPKEY_WORDS / 2], &k2[0]);
- memcpy(&dctx->encrypt_expkey[(DES_EXPKEY_WORDS / 2) * 2],
- &k3[0], sizeof(k3));
-
- encrypt_to_decrypt(&dctx->decrypt_expkey[0], &k3[0]);
- memcpy(&dctx->decrypt_expkey[DES_EXPKEY_WORDS / 2],
- &k2[0], sizeof(k2));
- encrypt_to_decrypt(&dctx->decrypt_expkey[(DES_EXPKEY_WORDS / 2) * 2],
- &k1[0]);
-
- return 0;
-}
-
-static int des3_ede_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keylen)
-{
- return des3_ede_set_key(crypto_skcipher_tfm(tfm), key, keylen);
-}
-
-extern void des3_ede_sparc64_crypt(const u64 *key, const u64 *input,
- u64 *output);
-
-static void sparc_des3_ede_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
-{
- struct des3_ede_sparc64_ctx *ctx = crypto_tfm_ctx(tfm);
- const u64 *K = ctx->encrypt_expkey;
-
- des3_ede_sparc64_crypt(K, (const u64 *) src, (u64 *) dst);
-}
-
-static void sparc_des3_ede_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
-{
- struct des3_ede_sparc64_ctx *ctx = crypto_tfm_ctx(tfm);
- const u64 *K = ctx->decrypt_expkey;
-
- des3_ede_sparc64_crypt(K, (const u64 *) src, (u64 *) dst);
-}
-
-extern void des3_ede_sparc64_load_keys(const u64 *key);
-
-extern void des3_ede_sparc64_ecb_crypt(const u64 *expkey, const u64 *input,
- u64 *output, unsigned int len);
-
-static int __ecb3_crypt(struct skcipher_request *req, bool encrypt)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- const struct des3_ede_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- const u64 *K;
- unsigned int nbytes;
- int err;
-
- err = skcipher_walk_virt(&walk, req, true);
- if (err)
- return err;
-
- if (encrypt)
- K = &ctx->encrypt_expkey[0];
- else
- K = &ctx->decrypt_expkey[0];
- des3_ede_sparc64_load_keys(K);
- while ((nbytes = walk.nbytes) != 0) {
- des3_ede_sparc64_ecb_crypt(K, walk.src.virt.addr,
- walk.dst.virt.addr,
- round_down(nbytes, DES_BLOCK_SIZE));
- err = skcipher_walk_done(&walk, nbytes % DES_BLOCK_SIZE);
- }
- fprs_write(0);
- return err;
-}
-
-static int ecb3_encrypt(struct skcipher_request *req)
-{
- return __ecb3_crypt(req, true);
-}
-
-static int ecb3_decrypt(struct skcipher_request *req)
-{
- return __ecb3_crypt(req, false);
-}
-
-extern void des3_ede_sparc64_cbc_encrypt(const u64 *expkey, const u64 *input,
- u64 *output, unsigned int len,
- u64 *iv);
-
-extern void des3_ede_sparc64_cbc_decrypt(const u64 *expkey, const u64 *input,
- u64 *output, unsigned int len,
- u64 *iv);
-
-static int __cbc3_crypt(struct skcipher_request *req, bool encrypt)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- const struct des3_ede_sparc64_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- const u64 *K;
- unsigned int nbytes;
- int err;
-
- err = skcipher_walk_virt(&walk, req, true);
- if (err)
- return err;
-
- if (encrypt)
- K = &ctx->encrypt_expkey[0];
- else
- K = &ctx->decrypt_expkey[0];
- des3_ede_sparc64_load_keys(K);
- while ((nbytes = walk.nbytes) != 0) {
- if (encrypt)
- des3_ede_sparc64_cbc_encrypt(K, walk.src.virt.addr,
- walk.dst.virt.addr,
- round_down(nbytes,
- DES_BLOCK_SIZE),
- walk.iv);
- else
- des3_ede_sparc64_cbc_decrypt(K, walk.src.virt.addr,
- walk.dst.virt.addr,
- round_down(nbytes,
- DES_BLOCK_SIZE),
- walk.iv);
- err = skcipher_walk_done(&walk, nbytes % DES_BLOCK_SIZE);
- }
- fprs_write(0);
- return err;
-}
-
-static int cbc3_encrypt(struct skcipher_request *req)
-{
- return __cbc3_crypt(req, true);
-}
-
-static int cbc3_decrypt(struct skcipher_request *req)
-{
- return __cbc3_crypt(req, false);
-}
-
-static struct crypto_alg cipher_algs[] = {
- {
- .cra_name = "des",
- .cra_driver_name = "des-sparc64",
- .cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct des_sparc64_ctx),
- .cra_alignmask = 7,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .cipher = {
- .cia_min_keysize = DES_KEY_SIZE,
- .cia_max_keysize = DES_KEY_SIZE,
- .cia_setkey = des_set_key,
- .cia_encrypt = sparc_des_encrypt,
- .cia_decrypt = sparc_des_decrypt
- }
- }
- }, {
- .cra_name = "des3_ede",
- .cra_driver_name = "des3_ede-sparc64",
- .cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx),
- .cra_alignmask = 7,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .cipher = {
- .cia_min_keysize = DES3_EDE_KEY_SIZE,
- .cia_max_keysize = DES3_EDE_KEY_SIZE,
- .cia_setkey = des3_ede_set_key,
- .cia_encrypt = sparc_des3_ede_encrypt,
- .cia_decrypt = sparc_des3_ede_decrypt
- }
- }
- }
-};
-
-static struct skcipher_alg skcipher_algs[] = {
- {
- .base.cra_name = "ecb(des)",
- .base.cra_driver_name = "ecb-des-sparc64",
- .base.cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct des_sparc64_ctx),
- .base.cra_alignmask = 7,
- .base.cra_module = THIS_MODULE,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = des_set_key_skcipher,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- }, {
- .base.cra_name = "cbc(des)",
- .base.cra_driver_name = "cbc-des-sparc64",
- .base.cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct des_sparc64_ctx),
- .base.cra_alignmask = 7,
- .base.cra_module = THIS_MODULE,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = des_set_key_skcipher,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- }, {
- .base.cra_name = "ecb(des3_ede)",
- .base.cra_driver_name = "ecb-des3_ede-sparc64",
- .base.cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx),
- .base.cra_alignmask = 7,
- .base.cra_module = THIS_MODULE,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = des3_ede_set_key_skcipher,
- .encrypt = ecb3_encrypt,
- .decrypt = ecb3_decrypt,
- }, {
- .base.cra_name = "cbc(des3_ede)",
- .base.cra_driver_name = "cbc-des3_ede-sparc64",
- .base.cra_priority = SPARC_CR_OPCODE_PRIORITY,
- .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct des3_ede_sparc64_ctx),
- .base.cra_alignmask = 7,
- .base.cra_module = THIS_MODULE,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .setkey = des3_ede_set_key_skcipher,
- .encrypt = cbc3_encrypt,
- .decrypt = cbc3_decrypt,
- }
-};
-
-static bool __init sparc64_has_des_opcode(void)
-{
- unsigned long cfr;
-
- if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
- return false;
-
- __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
- if (!(cfr & CFR_DES))
- return false;
-
- return true;
-}
-
-static int __init des_sparc64_mod_init(void)
-{
- int err;
-
- if (!sparc64_has_des_opcode()) {
- pr_info("sparc64 des opcodes not available.\n");
- return -ENODEV;
- }
- pr_info("Using sparc64 des opcodes optimized DES implementation\n");
- err = crypto_register_algs(cipher_algs, ARRAY_SIZE(cipher_algs));
- if (err)
- return err;
- err = crypto_register_skciphers(skcipher_algs,
- ARRAY_SIZE(skcipher_algs));
- if (err)
- crypto_unregister_algs(cipher_algs, ARRAY_SIZE(cipher_algs));
- return err;
-}
-
-static void __exit des_sparc64_mod_fini(void)
-{
- crypto_unregister_algs(cipher_algs, ARRAY_SIZE(cipher_algs));
- crypto_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs));
-}
-
-module_init(des_sparc64_mod_init);
-module_exit(des_sparc64_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated");
-
-MODULE_ALIAS_CRYPTO("des");
-MODULE_ALIAS_CRYPTO("des3_ede");
-
-#include "crop_devid.c"
diff --git a/arch/x86/crypto/Kconfig b/arch/x86/crypto/Kconfig
index 822cbf414269..f65d7b83702f 100644
--- a/arch/x86/crypto/Kconfig
+++ b/arch/x86/crypto/Kconfig
@@ -99,20 +99,6 @@ config CRYPTO_CAST6_AVX_X86_64
Processes eight blocks in parallel.
-config CRYPTO_DES3_EDE_X86_64
- tristate "Ciphers: Triple DES EDE with modes: ECB, CBC"
- depends on 64BIT
- select CRYPTO_SKCIPHER
- select CRYPTO_LIB_DES
- imply CRYPTO_CTR
- help
- Block cipher: Triple DES EDE (FIPS 46-3) cipher algorithm
- Length-preserving ciphers: Triple DES EDE with ECB and CBC modes
-
- Architecture: x86_64
-
- Processes one or three blocks in parallel.
-
config CRYPTO_SERPENT_SSE2_X86_64
tristate "Ciphers: Serpent with modes: ECB, CBC (SSE2)"
depends on 64BIT
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 3d6d5087a65e..e04ff8718d6b 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -20,9 +20,6 @@ serpent-avx-x86_64-y := serpent-avx-x86_64-asm_64.o serpent_avx_glue.o
obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o
serpent-avx2-y := serpent-avx2-asm_64.o serpent_avx2_glue.o
-obj-$(CONFIG_CRYPTO_DES3_EDE_X86_64) += des3_ede-x86_64.o
-des3_ede-x86_64-y := des3_ede-asm_64.o des3_ede_glue.o
-
obj-$(CONFIG_CRYPTO_CAMELLIA_X86_64) += camellia-x86_64.o
camellia-x86_64-y := camellia-x86_64-asm_64.o camellia_glue.o
obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64) += camellia-aesni-avx-x86_64.o
diff --git a/arch/x86/crypto/des3_ede-asm_64.S b/arch/x86/crypto/des3_ede-asm_64.S
deleted file mode 100644
index cf21b998e77c..000000000000
--- a/arch/x86/crypto/des3_ede-asm_64.S
+++ /dev/null
@@ -1,831 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * des3_ede-asm_64.S - x86-64 assembly implementation of 3DES cipher
- *
- * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
- */
-
-#include <linux/linkage.h>
-
-.file "des3_ede-asm_64.S"
-.text
-
-#define s1 .L_s1
-#define s2 ((s1) + (64*8))
-#define s3 ((s2) + (64*8))
-#define s4 ((s3) + (64*8))
-#define s5 ((s4) + (64*8))
-#define s6 ((s5) + (64*8))
-#define s7 ((s6) + (64*8))
-#define s8 ((s7) + (64*8))
-
-/* register macros */
-#define CTX %rdi
-
-#define RL0 %r8
-#define RL1 %r9
-#define RL2 %r10
-
-#define RL0d %r8d
-#define RL1d %r9d
-#define RL2d %r10d
-
-#define RR0 %r11
-#define RR1 %r12
-#define RR2 %r13
-
-#define RR0d %r11d
-#define RR1d %r12d
-#define RR2d %r13d
-
-#define RW0 %rax
-#define RW1 %rbx
-#define RW2 %rcx
-
-#define RW0d %eax
-#define RW1d %ebx
-#define RW2d %ecx
-
-#define RW0bl %al
-#define RW1bl %bl
-#define RW2bl %cl
-
-#define RW0bh %ah
-#define RW1bh %bh
-#define RW2bh %ch
-
-#define RT0 %r15
-#define RT1 %rsi
-#define RT2 %r14
-#define RT3 %rdx
-
-#define RT0d %r15d
-#define RT1d %esi
-#define RT2d %r14d
-#define RT3d %edx
-
-/***********************************************************************
- * 1-way 3DES
- ***********************************************************************/
-#define do_permutation(a, b, offset, mask) \
- movl a, RT0d; \
- shrl $(offset), RT0d; \
- xorl b, RT0d; \
- andl $(mask), RT0d; \
- xorl RT0d, b; \
- shll $(offset), RT0d; \
- xorl RT0d, a;
-
-#define expand_to_64bits(val, mask) \
- movl val##d, RT0d; \
- rorl $4, RT0d; \
- shlq $32, RT0; \
- orq RT0, val; \
- andq mask, val;
-
-#define compress_to_64bits(val) \
- movq val, RT0; \
- shrq $32, RT0; \
- roll $4, RT0d; \
- orl RT0d, val##d;
-
-#define initial_permutation(left, right) \
- do_permutation(left##d, right##d, 4, 0x0f0f0f0f); \
- do_permutation(left##d, right##d, 16, 0x0000ffff); \
- do_permutation(right##d, left##d, 2, 0x33333333); \
- do_permutation(right##d, left##d, 8, 0x00ff00ff); \
- movabs $0x3f3f3f3f3f3f3f3f, RT3; \
- movl left##d, RW0d; \
- roll $1, right##d; \
- xorl right##d, RW0d; \
- andl $0xaaaaaaaa, RW0d; \
- xorl RW0d, left##d; \
- xorl RW0d, right##d; \
- roll $1, left##d; \
- expand_to_64bits(right, RT3); \
- expand_to_64bits(left, RT3);
-
-#define final_permutation(left, right) \
- compress_to_64bits(right); \
- compress_to_64bits(left); \
- movl right##d, RW0d; \
- rorl $1, left##d; \
- xorl left##d, RW0d; \
- andl $0xaaaaaaaa, RW0d; \
- xorl RW0d, right##d; \
- xorl RW0d, left##d; \
- rorl $1, right##d; \
- do_permutation(right##d, left##d, 8, 0x00ff00ff); \
- do_permutation(right##d, left##d, 2, 0x33333333); \
- do_permutation(left##d, right##d, 16, 0x0000ffff); \
- do_permutation(left##d, right##d, 4, 0x0f0f0f0f);
-
-#define round1(n, from, to, load_next_key) \
- xorq from, RW0; \
- \
- movzbl RW0bl, RT0d; \
- movzbl RW0bh, RT1d; \
- shrq $16, RW0; \
- movzbl RW0bl, RT2d; \
- movzbl RW0bh, RT3d; \
- shrq $16, RW0; \
- leaq s8(%rip), RW1; \
- movq (RW1, RT0, 8), RT0; \
- leaq s6(%rip), RW1; \
- xorq (RW1, RT1, 8), to; \
- movzbl RW0bl, RL1d; \
- movzbl RW0bh, RT1d; \
- shrl $16, RW0d; \
- leaq s4(%rip), RW1; \
- xorq (RW1, RT2, 8), RT0; \
- leaq s2(%rip), RW1; \
- xorq (RW1, RT3, 8), to; \
- movzbl RW0bl, RT2d; \
- movzbl RW0bh, RT3d; \
- leaq s7(%rip), RW1; \
- xorq (RW1, RL1, 8), RT0; \
- leaq s5(%rip), RW1; \
- xorq (RW1, RT1, 8), to; \
- leaq s3(%rip), RW1; \
- xorq (RW1, RT2, 8), RT0; \
- load_next_key(n, RW0); \
- xorq RT0, to; \
- leaq s1(%rip), RW1; \
- xorq (RW1, RT3, 8), to; \
-
-#define load_next_key(n, RWx) \
- movq (((n) + 1) * 8)(CTX), RWx;
-
-#define dummy2(a, b) /*_*/
-
-#define read_block(io, left, right) \
- movl (io), left##d; \
- movl 4(io), right##d; \
- bswapl left##d; \
- bswapl right##d;
-
-#define write_block(io, left, right) \
- bswapl left##d; \
- bswapl right##d; \
- movl left##d, (io); \
- movl right##d, 4(io);
-
-SYM_FUNC_START(des3_ede_x86_64_crypt_blk)
- /* input:
- * %rdi: round keys, CTX
- * %rsi: dst
- * %rdx: src
- */
- pushq %rbx;
- pushq %r12;
- pushq %r13;
- pushq %r14;
- pushq %r15;
-
- pushq %rsi; /* dst */
-
- read_block(%rdx, RL0, RR0);
- initial_permutation(RL0, RR0);
-
- movq (CTX), RW0;
-
- round1(0, RR0, RL0, load_next_key);
- round1(1, RL0, RR0, load_next_key);
- round1(2, RR0, RL0, load_next_key);
- round1(3, RL0, RR0, load_next_key);
- round1(4, RR0, RL0, load_next_key);
- round1(5, RL0, RR0, load_next_key);
- round1(6, RR0, RL0, load_next_key);
- round1(7, RL0, RR0, load_next_key);
- round1(8, RR0, RL0, load_next_key);
- round1(9, RL0, RR0, load_next_key);
- round1(10, RR0, RL0, load_next_key);
- round1(11, RL0, RR0, load_next_key);
- round1(12, RR0, RL0, load_next_key);
- round1(13, RL0, RR0, load_next_key);
- round1(14, RR0, RL0, load_next_key);
- round1(15, RL0, RR0, load_next_key);
-
- round1(16+0, RL0, RR0, load_next_key);
- round1(16+1, RR0, RL0, load_next_key);
- round1(16+2, RL0, RR0, load_next_key);
- round1(16+3, RR0, RL0, load_next_key);
- round1(16+4, RL0, RR0, load_next_key);
- round1(16+5, RR0, RL0, load_next_key);
- round1(16+6, RL0, RR0, load_next_key);
- round1(16+7, RR0, RL0, load_next_key);
- round1(16+8, RL0, RR0, load_next_key);
- round1(16+9, RR0, RL0, load_next_key);
- round1(16+10, RL0, RR0, load_next_key);
- round1(16+11, RR0, RL0, load_next_key);
- round1(16+12, RL0, RR0, load_next_key);
- round1(16+13, RR0, RL0, load_next_key);
- round1(16+14, RL0, RR0, load_next_key);
- round1(16+15, RR0, RL0, load_next_key);
-
- round1(32+0, RR0, RL0, load_next_key);
- round1(32+1, RL0, RR0, load_next_key);
- round1(32+2, RR0, RL0, load_next_key);
- round1(32+3, RL0, RR0, load_next_key);
- round1(32+4, RR0, RL0, load_next_key);
- round1(32+5, RL0, RR0, load_next_key);
- round1(32+6, RR0, RL0, load_next_key);
- round1(32+7, RL0, RR0, load_next_key);
- round1(32+8, RR0, RL0, load_next_key);
- round1(32+9, RL0, RR0, load_next_key);
- round1(32+10, RR0, RL0, load_next_key);
- round1(32+11, RL0, RR0, load_next_key);
- round1(32+12, RR0, RL0, load_next_key);
- round1(32+13, RL0, RR0, load_next_key);
- round1(32+14, RR0, RL0, load_next_key);
- round1(32+15, RL0, RR0, dummy2);
-
- final_permutation(RR0, RL0);
-
- popq %rsi /* dst */
- write_block(%rsi, RR0, RL0);
-
- popq %r15;
- popq %r14;
- popq %r13;
- popq %r12;
- popq %rbx;
-
- RET;
-SYM_FUNC_END(des3_ede_x86_64_crypt_blk)
-
-/***********************************************************************
- * 3-way 3DES
- ***********************************************************************/
-#define expand_to_64bits(val, mask) \
- movl val##d, RT0d; \
- rorl $4, RT0d; \
- shlq $32, RT0; \
- orq RT0, val; \
- andq mask, val;
-
-#define compress_to_64bits(val) \
- movq val, RT0; \
- shrq $32, RT0; \
- roll $4, RT0d; \
- orl RT0d, val##d;
-
-#define initial_permutation3(left, right) \
- do_permutation(left##0d, right##0d, 4, 0x0f0f0f0f); \
- do_permutation(left##0d, right##0d, 16, 0x0000ffff); \
- do_permutation(left##1d, right##1d, 4, 0x0f0f0f0f); \
- do_permutation(left##1d, right##1d, 16, 0x0000ffff); \
- do_permutation(left##2d, right##2d, 4, 0x0f0f0f0f); \
- do_permutation(left##2d, right##2d, 16, 0x0000ffff); \
- \
- do_permutation(right##0d, left##0d, 2, 0x33333333); \
- do_permutation(right##0d, left##0d, 8, 0x00ff00ff); \
- do_permutation(right##1d, left##1d, 2, 0x33333333); \
- do_permutation(right##1d, left##1d, 8, 0x00ff00ff); \
- do_permutation(right##2d, left##2d, 2, 0x33333333); \
- do_permutation(right##2d, left##2d, 8, 0x00ff00ff); \
- \
- movabs $0x3f3f3f3f3f3f3f3f, RT3; \
- \
- movl left##0d, RW0d; \
- roll $1, right##0d; \
- xorl right##0d, RW0d; \
- andl $0xaaaaaaaa, RW0d; \
- xorl RW0d, left##0d; \
- xorl RW0d, right##0d; \
- roll $1, left##0d; \
- expand_to_64bits(right##0, RT3); \
- expand_to_64bits(left##0, RT3); \
- movl left##1d, RW1d; \
- roll $1, right##1d; \
- xorl right##1d, RW1d; \
- andl $0xaaaaaaaa, RW1d; \
- xorl RW1d, left##1d; \
- xorl RW1d, right##1d; \
- roll $1, left##1d; \
- expand_to_64bits(right##1, RT3); \
- expand_to_64bits(left##1, RT3); \
- movl left##2d, RW2d; \
- roll $1, right##2d; \
- xorl right##2d, RW2d; \
- andl $0xaaaaaaaa, RW2d; \
- xorl RW2d, left##2d; \
- xorl RW2d, right##2d; \
- roll $1, left##2d; \
- expand_to_64bits(right##2, RT3); \
- expand_to_64bits(left##2, RT3);
-
-#define final_permutation3(left, right) \
- compress_to_64bits(right##0); \
- compress_to_64bits(left##0); \
- movl right##0d, RW0d; \
- rorl $1, left##0d; \
- xorl left##0d, RW0d; \
- andl $0xaaaaaaaa, RW0d; \
- xorl RW0d, right##0d; \
- xorl RW0d, left##0d; \
- rorl $1, right##0d; \
- compress_to_64bits(right##1); \
- compress_to_64bits(left##1); \
- movl right##1d, RW1d; \
- rorl $1, left##1d; \
- xorl left##1d, RW1d; \
- andl $0xaaaaaaaa, RW1d; \
- xorl RW1d, right##1d; \
- xorl RW1d, left##1d; \
- rorl $1, right##1d; \
- compress_to_64bits(right##2); \
- compress_to_64bits(left##2); \
- movl right##2d, RW2d; \
- rorl $1, left##2d; \
- xorl left##2d, RW2d; \
- andl $0xaaaaaaaa, RW2d; \
- xorl RW2d, right##2d; \
- xorl RW2d, left##2d; \
- rorl $1, right##2d; \
- \
- do_permutation(right##0d, left##0d, 8, 0x00ff00ff); \
- do_permutation(right##0d, left##0d, 2, 0x33333333); \
- do_permutation(right##1d, left##1d, 8, 0x00ff00ff); \
- do_permutation(right##1d, left##1d, 2, 0x33333333); \
- do_permutation(right##2d, left##2d, 8, 0x00ff00ff); \
- do_permutation(right##2d, left##2d, 2, 0x33333333); \
- \
- do_permutation(left##0d, right##0d, 16, 0x0000ffff); \
- do_permutation(left##0d, right##0d, 4, 0x0f0f0f0f); \
- do_permutation(left##1d, right##1d, 16, 0x0000ffff); \
- do_permutation(left##1d, right##1d, 4, 0x0f0f0f0f); \
- do_permutation(left##2d, right##2d, 16, 0x0000ffff); \
- do_permutation(left##2d, right##2d, 4, 0x0f0f0f0f);
-
-#define round3(n, from, to, load_next_key, do_movq) \
- xorq from##0, RW0; \
- movzbl RW0bl, RT3d; \
- movzbl RW0bh, RT1d; \
- shrq $16, RW0; \
- leaq s8(%rip), RT2; \
- xorq (RT2, RT3, 8), to##0; \
- leaq s6(%rip), RT2; \
- xorq (RT2, RT1, 8), to##0; \
- movzbl RW0bl, RT3d; \
- movzbl RW0bh, RT1d; \
- shrq $16, RW0; \
- leaq s4(%rip), RT2; \
- xorq (RT2, RT3, 8), to##0; \
- leaq s2(%rip), RT2; \
- xorq (RT2, RT1, 8), to##0; \
- movzbl RW0bl, RT3d; \
- movzbl RW0bh, RT1d; \
- shrl $16, RW0d; \
- leaq s7(%rip), RT2; \
- xorq (RT2, RT3, 8), to##0; \
- leaq s5(%rip), RT2; \
- xorq (RT2, RT1, 8), to##0; \
- movzbl RW0bl, RT3d; \
- movzbl RW0bh, RT1d; \
- load_next_key(n, RW0); \
- leaq s3(%rip), RT2; \
- xorq (RT2, RT3, 8), to##0; \
- leaq s1(%rip), RT2; \
- xorq (RT2, RT1, 8), to##0; \
- xorq from##1, RW1; \
- movzbl RW1bl, RT3d; \
- movzbl RW1bh, RT1d; \
- shrq $16, RW1; \
- leaq s8(%rip), RT2; \
- xorq (RT2, RT3, 8), to##1; \
- leaq s6(%rip), RT2; \
- xorq (RT2, RT1, 8), to##1; \
- movzbl RW1bl, RT3d; \
- movzbl RW1bh, RT1d; \
- shrq $16, RW1; \
- leaq s4(%rip), RT2; \
- xorq (RT2, RT3, 8), to##1; \
- leaq s2(%rip), RT2; \
- xorq (RT2, RT1, 8), to##1; \
- movzbl RW1bl, RT3d; \
- movzbl RW1bh, RT1d; \
- shrl $16, RW1d; \
- leaq s7(%rip), RT2; \
- xorq (RT2, RT3, 8), to##1; \
- leaq s5(%rip), RT2; \
- xorq (RT2, RT1, 8), to##1; \
- movzbl RW1bl, RT3d; \
- movzbl RW1bh, RT1d; \
- do_movq(RW0, RW1); \
- leaq s3(%rip), RT2; \
- xorq (RT2, RT3, 8), to##1; \
- leaq s1(%rip), RT2; \
- xorq (RT2, RT1, 8), to##1; \
- xorq from##2, RW2; \
- movzbl RW2bl, RT3d; \
- movzbl RW2bh, RT1d; \
- shrq $16, RW2; \
- leaq s8(%rip), RT2; \
- xorq (RT2, RT3, 8), to##2; \
- leaq s6(%rip), RT2; \
- xorq (RT2, RT1, 8), to##2; \
- movzbl RW2bl, RT3d; \
- movzbl RW2bh, RT1d; \
- shrq $16, RW2; \
- leaq s4(%rip), RT2; \
- xorq (RT2, RT3, 8), to##2; \
- leaq s2(%rip), RT2; \
- xorq (RT2, RT1, 8), to##2; \
- movzbl RW2bl, RT3d; \
- movzbl RW2bh, RT1d; \
- shrl $16, RW2d; \
- leaq s7(%rip), RT2; \
- xorq (RT2, RT3, 8), to##2; \
- leaq s5(%rip), RT2; \
- xorq (RT2, RT1, 8), to##2; \
- movzbl RW2bl, RT3d; \
- movzbl RW2bh, RT1d; \
- do_movq(RW0, RW2); \
- leaq s3(%rip), RT2; \
- xorq (RT2, RT3, 8), to##2; \
- leaq s1(%rip), RT2; \
- xorq (RT2, RT1, 8), to##2;
-
-#define __movq(src, dst) \
- movq src, dst;
-
-SYM_FUNC_START(des3_ede_x86_64_crypt_blk_3way)
- /* input:
- * %rdi: ctx, round keys
- * %rsi: dst (3 blocks)
- * %rdx: src (3 blocks)
- */
-
- pushq %rbx;
- pushq %r12;
- pushq %r13;
- pushq %r14;
- pushq %r15;
-
- pushq %rsi /* dst */
-
- /* load input */
- movl 0 * 4(%rdx), RL0d;
- movl 1 * 4(%rdx), RR0d;
- movl 2 * 4(%rdx), RL1d;
- movl 3 * 4(%rdx), RR1d;
- movl 4 * 4(%rdx), RL2d;
- movl 5 * 4(%rdx), RR2d;
-
- bswapl RL0d;
- bswapl RR0d;
- bswapl RL1d;
- bswapl RR1d;
- bswapl RL2d;
- bswapl RR2d;
-
- initial_permutation3(RL, RR);
-
- movq 0(CTX), RW0;
- movq RW0, RW1;
- movq RW0, RW2;
-
- round3(0, RR, RL, load_next_key, __movq);
- round3(1, RL, RR, load_next_key, __movq);
- round3(2, RR, RL, load_next_key, __movq);
- round3(3, RL, RR, load_next_key, __movq);
- round3(4, RR, RL, load_next_key, __movq);
- round3(5, RL, RR, load_next_key, __movq);
- round3(6, RR, RL, load_next_key, __movq);
- round3(7, RL, RR, load_next_key, __movq);
- round3(8, RR, RL, load_next_key, __movq);
- round3(9, RL, RR, load_next_key, __movq);
- round3(10, RR, RL, load_next_key, __movq);
- round3(11, RL, RR, load_next_key, __movq);
- round3(12, RR, RL, load_next_key, __movq);
- round3(13, RL, RR, load_next_key, __movq);
- round3(14, RR, RL, load_next_key, __movq);
- round3(15, RL, RR, load_next_key, __movq);
-
- round3(16+0, RL, RR, load_next_key, __movq);
- round3(16+1, RR, RL, load_next_key, __movq);
- round3(16+2, RL, RR, load_next_key, __movq);
- round3(16+3, RR, RL, load_next_key, __movq);
- round3(16+4, RL, RR, load_next_key, __movq);
- round3(16+5, RR, RL, load_next_key, __movq);
- round3(16+6, RL, RR, load_next_key, __movq);
- round3(16+7, RR, RL, load_next_key, __movq);
- round3(16+8, RL, RR, load_next_key, __movq);
- round3(16+9, RR, RL, load_next_key, __movq);
- round3(16+10, RL, RR, load_next_key, __movq);
- round3(16+11, RR, RL, load_next_key, __movq);
- round3(16+12, RL, RR, load_next_key, __movq);
- round3(16+13, RR, RL, load_next_key, __movq);
- round3(16+14, RL, RR, load_next_key, __movq);
- round3(16+15, RR, RL, load_next_key, __movq);
-
- round3(32+0, RR, RL, load_next_key, __movq);
- round3(32+1, RL, RR, load_next_key, __movq);
- round3(32+2, RR, RL, load_next_key, __movq);
- round3(32+3, RL, RR, load_next_key, __movq);
- round3(32+4, RR, RL, load_next_key, __movq);
- round3(32+5, RL, RR, load_next_key, __movq);
- round3(32+6, RR, RL, load_next_key, __movq);
- round3(32+7, RL, RR, load_next_key, __movq);
- round3(32+8, RR, RL, load_next_key, __movq);
- round3(32+9, RL, RR, load_next_key, __movq);
- round3(32+10, RR, RL, load_next_key, __movq);
- round3(32+11, RL, RR, load_next_key, __movq);
- round3(32+12, RR, RL, load_next_key, __movq);
- round3(32+13, RL, RR, load_next_key, __movq);
- round3(32+14, RR, RL, load_next_key, __movq);
- round3(32+15, RL, RR, dummy2, dummy2);
-
- final_permutation3(RR, RL);
-
- bswapl RR0d;
- bswapl RL0d;
- bswapl RR1d;
- bswapl RL1d;
- bswapl RR2d;
- bswapl RL2d;
-
- popq %rsi /* dst */
- movl RR0d, 0 * 4(%rsi);
- movl RL0d, 1 * 4(%rsi);
- movl RR1d, 2 * 4(%rsi);
- movl RL1d, 3 * 4(%rsi);
- movl RR2d, 4 * 4(%rsi);
- movl RL2d, 5 * 4(%rsi);
-
- popq %r15;
- popq %r14;
- popq %r13;
- popq %r12;
- popq %rbx;
-
- RET;
-SYM_FUNC_END(des3_ede_x86_64_crypt_blk_3way)
-
-.section .rodata, "a", @progbits
-.align 16
-.L_s1:
- .quad 0x0010100001010400, 0x0000000000000000
- .quad 0x0000100000010000, 0x0010100001010404
- .quad 0x0010100001010004, 0x0000100000010404
- .quad 0x0000000000000004, 0x0000100000010000
- .quad 0x0000000000000400, 0x0010100001010400
- .quad 0x0010100001010404, 0x0000000000000400
- .quad 0x0010000001000404, 0x0010100001010004
- .quad 0x0010000001000000, 0x0000000000000004
- .quad 0x0000000000000404, 0x0010000001000400
- .quad 0x0010000001000400, 0x0000100000010400
- .quad 0x0000100000010400, 0x0010100001010000
- .quad 0x0010100001010000, 0x0010000001000404
- .quad 0x0000100000010004, 0x0010000001000004
- .quad 0x0010000001000004, 0x0000100000010004
- .quad 0x0000000000000000, 0x0000000000000404
- .quad 0x0000100000010404, 0x0010000001000000
- .quad 0x0000100000010000, 0x0010100001010404
- .quad 0x0000000000000004, 0x0010100001010000
- .quad 0x0010100001010400, 0x0010000001000000
- .quad 0x0010000001000000, 0x0000000000000400
- .quad 0x0010100001010004, 0x0000100000010000
- .quad 0x0000100000010400, 0x0010000001000004
- .quad 0x0000000000000400, 0x0000000000000004
- .quad 0x0010000001000404, 0x0000100000010404
- .quad 0x0010100001010404, 0x0000100000010004
- .quad 0x0010100001010000, 0x0010000001000404
- .quad 0x0010000001000004, 0x0000000000000404
- .quad 0x0000100000010404, 0x0010100001010400
- .quad 0x0000000000000404, 0x0010000001000400
- .quad 0x0010000001000400, 0x0000000000000000
- .quad 0x0000100000010004, 0x0000100000010400
- .quad 0x0000000000000000, 0x0010100001010004
-.L_s2:
- .quad 0x0801080200100020, 0x0800080000000000
- .quad 0x0000080000000000, 0x0001080200100020
- .quad 0x0001000000100000, 0x0000000200000020
- .quad 0x0801000200100020, 0x0800080200000020
- .quad 0x0800000200000020, 0x0801080200100020
- .quad 0x0801080000100000, 0x0800000000000000
- .quad 0x0800080000000000, 0x0001000000100000
- .quad 0x0000000200000020, 0x0801000200100020
- .quad 0x0001080000100000, 0x0001000200100020
- .quad 0x0800080200000020, 0x0000000000000000
- .quad 0x0800000000000000, 0x0000080000000000
- .quad 0x0001080200100020, 0x0801000000100000
- .quad 0x0001000200100020, 0x0800000200000020
- .quad 0x0000000000000000, 0x0001080000100000
- .quad 0x0000080200000020, 0x0801080000100000
- .quad 0x0801000000100000, 0x0000080200000020
- .quad 0x0000000000000000, 0x0001080200100020
- .quad 0x0801000200100020, 0x0001000000100000
- .quad 0x0800080200000020, 0x0801000000100000
- .quad 0x0801080000100000, 0x0000080000000000
- .quad 0x0801000000100000, 0x0800080000000000
- .quad 0x0000000200000020, 0x0801080200100020
- .quad 0x0001080200100020, 0x0000000200000020
- .quad 0x0000080000000000, 0x0800000000000000
- .quad 0x0000080200000020, 0x0801080000100000
- .quad 0x0001000000100000, 0x0800000200000020
- .quad 0x0001000200100020, 0x0800080200000020
- .quad 0x0800000200000020, 0x0001000200100020
- .quad 0x0001080000100000, 0x0000000000000000
- .quad 0x0800080000000000, 0x0000080200000020
- .quad 0x0800000000000000, 0x0801000200100020
- .quad 0x0801080200100020, 0x0001080000100000
-.L_s3:
- .quad 0x0000002000000208, 0x0000202008020200
- .quad 0x0000000000000000, 0x0000200008020008
- .quad 0x0000002008000200, 0x0000000000000000
- .quad 0x0000202000020208, 0x0000002008000200
- .quad 0x0000200000020008, 0x0000000008000008
- .quad 0x0000000008000008, 0x0000200000020000
- .quad 0x0000202008020208, 0x0000200000020008
- .quad 0x0000200008020000, 0x0000002000000208
- .quad 0x0000000008000000, 0x0000000000000008
- .quad 0x0000202008020200, 0x0000002000000200
- .quad 0x0000202000020200, 0x0000200008020000
- .quad 0x0000200008020008, 0x0000202000020208
- .quad 0x0000002008000208, 0x0000202000020200
- .quad 0x0000200000020000, 0x0000002008000208
- .quad 0x0000000000000008, 0x0000202008020208
- .quad 0x0000002000000200, 0x0000000008000000
- .quad 0x0000202008020200, 0x0000000008000000
- .quad 0x0000200000020008, 0x0000002000000208
- .quad 0x0000200000020000, 0x0000202008020200
- .quad 0x0000002008000200, 0x0000000000000000
- .quad 0x0000002000000200, 0x0000200000020008
- .quad 0x0000202008020208, 0x0000002008000200
- .quad 0x0000000008000008, 0x0000002000000200
- .quad 0x0000000000000000, 0x0000200008020008
- .quad 0x0000002008000208, 0x0000200000020000
- .quad 0x0000000008000000, 0x0000202008020208
- .quad 0x0000000000000008, 0x0000202000020208
- .quad 0x0000202000020200, 0x0000000008000008
- .quad 0x0000200008020000, 0x0000002008000208
- .quad 0x0000002000000208, 0x0000200008020000
- .quad 0x0000202000020208, 0x0000000000000008
- .quad 0x0000200008020008, 0x0000202000020200
-.L_s4:
- .quad 0x1008020000002001, 0x1000020800002001
- .quad 0x1000020800002001, 0x0000000800000000
- .quad 0x0008020800002000, 0x1008000800000001
- .quad 0x1008000000000001, 0x1000020000002001
- .quad 0x0000000000000000, 0x0008020000002000
- .quad 0x0008020000002000, 0x1008020800002001
- .quad 0x1000000800000001, 0x0000000000000000
- .quad 0x0008000800000000, 0x1008000000000001
- .quad 0x1000000000000001, 0x0000020000002000
- .quad 0x0008000000000000, 0x1008020000002001
- .quad 0x0000000800000000, 0x0008000000000000
- .quad 0x1000020000002001, 0x0000020800002000
- .quad 0x1008000800000001, 0x1000000000000001
- .quad 0x0000020800002000, 0x0008000800000000
- .quad 0x0000020000002000, 0x0008020800002000
- .quad 0x1008020800002001, 0x1000000800000001
- .quad 0x0008000800000000, 0x1008000000000001
- .quad 0x0008020000002000, 0x1008020800002001
- .quad 0x1000000800000001, 0x0000000000000000
- .quad 0x0000000000000000, 0x0008020000002000
- .quad 0x0000020800002000, 0x0008000800000000
- .quad 0x1008000800000001, 0x1000000000000001
- .quad 0x1008020000002001, 0x1000020800002001
- .quad 0x1000020800002001, 0x0000000800000000
- .quad 0x1008020800002001, 0x1000000800000001
- .quad 0x1000000000000001, 0x0000020000002000
- .quad 0x1008000000000001, 0x1000020000002001
- .quad 0x0008020800002000, 0x1008000800000001
- .quad 0x1000020000002001, 0x0000020800002000
- .quad 0x0008000000000000, 0x1008020000002001
- .quad 0x0000000800000000, 0x0008000000000000
- .quad 0x0000020000002000, 0x0008020800002000
-.L_s5:
- .quad 0x0000001000000100, 0x0020001002080100
- .quad 0x0020000002080000, 0x0420001002000100
- .quad 0x0000000000080000, 0x0000001000000100
- .quad 0x0400000000000000, 0x0020000002080000
- .quad 0x0400001000080100, 0x0000000000080000
- .quad 0x0020001002000100, 0x0400001000080100
- .quad 0x0420001002000100, 0x0420000002080000
- .quad 0x0000001000080100, 0x0400000000000000
- .quad 0x0020000002000000, 0x0400000000080000
- .quad 0x0400000000080000, 0x0000000000000000
- .quad 0x0400001000000100, 0x0420001002080100
- .quad 0x0420001002080100, 0x0020001002000100
- .quad 0x0420000002080000, 0x0400001000000100
- .quad 0x0000000000000000, 0x0420000002000000
- .quad 0x0020001002080100, 0x0020000002000000
- .quad 0x0420000002000000, 0x0000001000080100
- .quad 0x0000000000080000, 0x0420001002000100
- .quad 0x0000001000000100, 0x0020000002000000
- .quad 0x0400000000000000, 0x0020000002080000
- .quad 0x0420001002000100, 0x0400001000080100
- .quad 0x0020001002000100, 0x0400000000000000
- .quad 0x0420000002080000, 0x0020001002080100
- .quad 0x0400001000080100, 0x0000001000000100
- .quad 0x0020000002000000, 0x0420000002080000
- .quad 0x0420001002080100, 0x0000001000080100
- .quad 0x0420000002000000, 0x0420001002080100
- .quad 0x0020000002080000, 0x0000000000000000
- .quad 0x0400000000080000, 0x0420000002000000
- .quad 0x0000001000080100, 0x0020001002000100
- .quad 0x0400001000000100, 0x0000000000080000
- .quad 0x0000000000000000, 0x0400000000080000
- .quad 0x0020001002080100, 0x0400001000000100
-.L_s6:
- .quad 0x0200000120000010, 0x0204000020000000
- .quad 0x0000040000000000, 0x0204040120000010
- .quad 0x0204000020000000, 0x0000000100000010
- .quad 0x0204040120000010, 0x0004000000000000
- .quad 0x0200040020000000, 0x0004040100000010
- .quad 0x0004000000000000, 0x0200000120000010
- .quad 0x0004000100000010, 0x0200040020000000
- .quad 0x0200000020000000, 0x0000040100000010
- .quad 0x0000000000000000, 0x0004000100000010
- .quad 0x0200040120000010, 0x0000040000000000
- .quad 0x0004040000000000, 0x0200040120000010
- .quad 0x0000000100000010, 0x0204000120000010
- .quad 0x0204000120000010, 0x0000000000000000
- .quad 0x0004040100000010, 0x0204040020000000
- .quad 0x0000040100000010, 0x0004040000000000
- .quad 0x0204040020000000, 0x0200000020000000
- .quad 0x0200040020000000, 0x0000000100000010
- .quad 0x0204000120000010, 0x0004040000000000
- .quad 0x0204040120000010, 0x0004000000000000
- .quad 0x0000040100000010, 0x0200000120000010
- .quad 0x0004000000000000, 0x0200040020000000
- .quad 0x0200000020000000, 0x0000040100000010
- .quad 0x0200000120000010, 0x0204040120000010
- .quad 0x0004040000000000, 0x0204000020000000
- .quad 0x0004040100000010, 0x0204040020000000
- .quad 0x0000000000000000, 0x0204000120000010
- .quad 0x0000000100000010, 0x0000040000000000
- .quad 0x0204000020000000, 0x0004040100000010
- .quad 0x0000040000000000, 0x0004000100000010
- .quad 0x0200040120000010, 0x0000000000000000
- .quad 0x0204040020000000, 0x0200000020000000
- .quad 0x0004000100000010, 0x0200040120000010
-.L_s7:
- .quad 0x0002000000200000, 0x2002000004200002
- .quad 0x2000000004000802, 0x0000000000000000
- .quad 0x0000000000000800, 0x2000000004000802
- .quad 0x2002000000200802, 0x0002000004200800
- .quad 0x2002000004200802, 0x0002000000200000
- .quad 0x0000000000000000, 0x2000000004000002
- .quad 0x2000000000000002, 0x0000000004000000
- .quad 0x2002000004200002, 0x2000000000000802
- .quad 0x0000000004000800, 0x2002000000200802
- .quad 0x2002000000200002, 0x0000000004000800
- .quad 0x2000000004000002, 0x0002000004200000
- .quad 0x0002000004200800, 0x2002000000200002
- .quad 0x0002000004200000, 0x0000000000000800
- .quad 0x2000000000000802, 0x2002000004200802
- .quad 0x0002000000200800, 0x2000000000000002
- .quad 0x0000000004000000, 0x0002000000200800
- .quad 0x0000000004000000, 0x0002000000200800
- .quad 0x0002000000200000, 0x2000000004000802
- .quad 0x2000000004000802, 0x2002000004200002
- .quad 0x2002000004200002, 0x2000000000000002
- .quad 0x2002000000200002, 0x0000000004000000
- .quad 0x0000000004000800, 0x0002000000200000
- .quad 0x0002000004200800, 0x2000000000000802
- .quad 0x2002000000200802, 0x0002000004200800
- .quad 0x2000000000000802, 0x2000000004000002
- .quad 0x2002000004200802, 0x0002000004200000
- .quad 0x0002000000200800, 0x0000000000000000
- .quad 0x2000000000000002, 0x2002000004200802
- .quad 0x0000000000000000, 0x2002000000200802
- .quad 0x0002000004200000, 0x0000000000000800
- .quad 0x2000000004000002, 0x0000000004000800
- .quad 0x0000000000000800, 0x2002000000200002
-.L_s8:
- .quad 0x0100010410001000, 0x0000010000001000
- .quad 0x0000000000040000, 0x0100010410041000
- .quad 0x0100000010000000, 0x0100010410001000
- .quad 0x0000000400000000, 0x0100000010000000
- .quad 0x0000000400040000, 0x0100000010040000
- .quad 0x0100010410041000, 0x0000010000041000
- .quad 0x0100010010041000, 0x0000010400041000
- .quad 0x0000010000001000, 0x0000000400000000
- .quad 0x0100000010040000, 0x0100000410000000
- .quad 0x0100010010001000, 0x0000010400001000
- .quad 0x0000010000041000, 0x0000000400040000
- .quad 0x0100000410040000, 0x0100010010041000
- .quad 0x0000010400001000, 0x0000000000000000
- .quad 0x0000000000000000, 0x0100000410040000
- .quad 0x0100000410000000, 0x0100010010001000
- .quad 0x0000010400041000, 0x0000000000040000
- .quad 0x0000010400041000, 0x0000000000040000
- .quad 0x0100010010041000, 0x0000010000001000
- .quad 0x0000000400000000, 0x0100000410040000
- .quad 0x0000010000001000, 0x0000010400041000
- .quad 0x0100010010001000, 0x0000000400000000
- .quad 0x0100000410000000, 0x0100000010040000
- .quad 0x0100000410040000, 0x0100000010000000
- .quad 0x0000000000040000, 0x0100010410001000
- .quad 0x0000000000000000, 0x0100010410041000
- .quad 0x0000000400040000, 0x0100000410000000
- .quad 0x0100000010040000, 0x0100010010001000
- .quad 0x0100010410001000, 0x0000000000000000
- .quad 0x0100010410041000, 0x0000010000041000
- .quad 0x0000010000041000, 0x0000010400001000
- .quad 0x0000010400001000, 0x0000000400040000
- .quad 0x0100000010000000, 0x0100010010041000
diff --git a/arch/x86/crypto/des3_ede_glue.c b/arch/x86/crypto/des3_ede_glue.c
deleted file mode 100644
index 34600f90d8a6..000000000000
--- a/arch/x86/crypto/des3_ede_glue.c
+++ /dev/null
@@ -1,391 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Glue Code for assembler optimized version of 3DES
- *
- * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
- *
- * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
- * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
- */
-
-#include <crypto/algapi.h>
-#include <crypto/des.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/crypto.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-
-struct des3_ede_x86_ctx {
- struct des3_ede_ctx enc;
- struct des3_ede_ctx dec;
-};
-
-/* regular block cipher functions */
-asmlinkage void des3_ede_x86_64_crypt_blk(const u32 *expkey, u8 *dst,
- const u8 *src);
-
-/* 3-way parallel cipher functions */
-asmlinkage void des3_ede_x86_64_crypt_blk_3way(const u32 *expkey, u8 *dst,
- const u8 *src);
-
-static inline void des3_ede_enc_blk(struct des3_ede_x86_ctx *ctx, u8 *dst,
- const u8 *src)
-{
- u32 *enc_ctx = ctx->enc.expkey;
-
- des3_ede_x86_64_crypt_blk(enc_ctx, dst, src);
-}
-
-static inline void des3_ede_dec_blk(struct des3_ede_x86_ctx *ctx, u8 *dst,
- const u8 *src)
-{
- u32 *dec_ctx = ctx->dec.expkey;
-
- des3_ede_x86_64_crypt_blk(dec_ctx, dst, src);
-}
-
-static inline void des3_ede_dec_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst,
- const u8 *src)
-{
- u32 *dec_ctx = ctx->dec.expkey;
-
- des3_ede_x86_64_crypt_blk_3way(dec_ctx, dst, src);
-}
-
-static void des3_ede_x86_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
-{
- des3_ede_enc_blk(crypto_tfm_ctx(tfm), dst, src);
-}
-
-static void des3_ede_x86_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
-{
- des3_ede_dec_blk(crypto_tfm_ctx(tfm), dst, src);
-}
-
-static int ecb_crypt(struct skcipher_request *req, const u32 *expkey)
-{
- const unsigned int bsize = DES3_EDE_BLOCK_SIZE;
- struct skcipher_walk walk;
- unsigned int nbytes;
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while ((nbytes = walk.nbytes)) {
- const u8 *wsrc = walk.src.virt.addr;
- u8 *wdst = walk.dst.virt.addr;
-
- /* Process four block batch */
- if (nbytes >= bsize * 3) {
- do {
- des3_ede_x86_64_crypt_blk_3way(expkey, wdst,
- wsrc);
-
- wsrc += bsize * 3;
- wdst += bsize * 3;
- nbytes -= bsize * 3;
- } while (nbytes >= bsize * 3);
-
- if (nbytes < bsize)
- goto done;
- }
-
- /* Handle leftovers */
- do {
- des3_ede_x86_64_crypt_blk(expkey, wdst, wsrc);
-
- wsrc += bsize;
- wdst += bsize;
- nbytes -= bsize;
- } while (nbytes >= bsize);
-
-done:
- err = skcipher_walk_done(&walk, nbytes);
- }
-
- return err;
-}
-
-static int ecb_encrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return ecb_crypt(req, ctx->enc.expkey);
-}
-
-static int ecb_decrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- return ecb_crypt(req, ctx->dec.expkey);
-}
-
-static unsigned int __cbc_encrypt(struct des3_ede_x86_ctx *ctx,
- struct skcipher_walk *walk)
-{
- unsigned int bsize = DES3_EDE_BLOCK_SIZE;
- unsigned int nbytes = walk->nbytes;
- u64 *src = (u64 *)walk->src.virt.addr;
- u64 *dst = (u64 *)walk->dst.virt.addr;
- u64 *iv = (u64 *)walk->iv;
-
- do {
- *dst = *src ^ *iv;
- des3_ede_enc_blk(ctx, (u8 *)dst, (u8 *)dst);
- iv = dst;
-
- src += 1;
- dst += 1;
- nbytes -= bsize;
- } while (nbytes >= bsize);
-
- *(u64 *)walk->iv = *iv;
- return nbytes;
-}
-
-static int cbc_encrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- unsigned int nbytes;
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while (walk.nbytes) {
- nbytes = __cbc_encrypt(ctx, &walk);
- err = skcipher_walk_done(&walk, nbytes);
- }
-
- return err;
-}
-
-static unsigned int __cbc_decrypt(struct des3_ede_x86_ctx *ctx,
- struct skcipher_walk *walk)
-{
- unsigned int bsize = DES3_EDE_BLOCK_SIZE;
- unsigned int nbytes = walk->nbytes;
- u64 *src = (u64 *)walk->src.virt.addr;
- u64 *dst = (u64 *)walk->dst.virt.addr;
- u64 ivs[3 - 1];
- u64 last_iv;
-
- /* Start of the last block. */
- src += nbytes / bsize - 1;
- dst += nbytes / bsize - 1;
-
- last_iv = *src;
-
- /* Process four block batch */
- if (nbytes >= bsize * 3) {
- do {
- nbytes -= bsize * 3 - bsize;
- src -= 3 - 1;
- dst -= 3 - 1;
-
- ivs[0] = src[0];
- ivs[1] = src[1];
-
- des3_ede_dec_blk_3way(ctx, (u8 *)dst, (u8 *)src);
-
- dst[1] ^= ivs[0];
- dst[2] ^= ivs[1];
-
- nbytes -= bsize;
- if (nbytes < bsize)
- goto done;
-
- *dst ^= *(src - 1);
- src -= 1;
- dst -= 1;
- } while (nbytes >= bsize * 3);
- }
-
- /* Handle leftovers */
- for (;;) {
- des3_ede_dec_blk(ctx, (u8 *)dst, (u8 *)src);
-
- nbytes -= bsize;
- if (nbytes < bsize)
- break;
-
- *dst ^= *(src - 1);
- src -= 1;
- dst -= 1;
- }
-
-done:
- *dst ^= *(u64 *)walk->iv;
- *(u64 *)walk->iv = last_iv;
-
- return nbytes;
-}
-
-static int cbc_decrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- unsigned int nbytes;
- int err;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while (walk.nbytes) {
- nbytes = __cbc_decrypt(ctx, &walk);
- err = skcipher_walk_done(&walk, nbytes);
- }
-
- return err;
-}
-
-static int des3_ede_x86_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct des3_ede_x86_ctx *ctx = crypto_tfm_ctx(tfm);
- u32 i, j, tmp;
- int err;
-
- err = des3_ede_expand_key(&ctx->enc, key, keylen);
- if (err == -ENOKEY) {
- if (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)
- err = -EINVAL;
- else
- err = 0;
- }
-
- if (err) {
- memset(ctx, 0, sizeof(*ctx));
- return err;
- }
-
- /* Fix encryption context for this implementation and form decryption
- * context. */
- j = DES3_EDE_EXPKEY_WORDS - 2;
- for (i = 0; i < DES3_EDE_EXPKEY_WORDS; i += 2, j -= 2) {
- tmp = ror32(ctx->enc.expkey[i + 1], 4);
- ctx->enc.expkey[i + 1] = tmp;
-
- ctx->dec.expkey[j + 0] = ctx->enc.expkey[i + 0];
- ctx->dec.expkey[j + 1] = tmp;
- }
-
- return 0;
-}
-
-static int des3_ede_x86_setkey_skcipher(struct crypto_skcipher *tfm,
- const u8 *key,
- unsigned int keylen)
-{
- return des3_ede_x86_setkey(&tfm->base, key, keylen);
-}
-
-static struct crypto_alg des3_ede_cipher = {
- .cra_name = "des3_ede",
- .cra_driver_name = "des3_ede-asm",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
- .cra_module = THIS_MODULE,
- .cra_u = {
- .cipher = {
- .cia_min_keysize = DES3_EDE_KEY_SIZE,
- .cia_max_keysize = DES3_EDE_KEY_SIZE,
- .cia_setkey = des3_ede_x86_setkey,
- .cia_encrypt = des3_ede_x86_encrypt,
- .cia_decrypt = des3_ede_x86_decrypt,
- }
- }
-};
-
-static struct skcipher_alg des3_ede_skciphers[] = {
- {
- .base.cra_name = "ecb(des3_ede)",
- .base.cra_driver_name = "ecb-des3_ede-asm",
- .base.cra_priority = 300,
- .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = des3_ede_x86_setkey_skcipher,
- .encrypt = ecb_encrypt,
- .decrypt = ecb_decrypt,
- }, {
- .base.cra_name = "cbc(des3_ede)",
- .base.cra_driver_name = "cbc-des3_ede-asm",
- .base.cra_priority = 300,
- .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
- .base.cra_module = THIS_MODULE,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .setkey = des3_ede_x86_setkey_skcipher,
- .encrypt = cbc_encrypt,
- .decrypt = cbc_decrypt,
- }
-};
-
-static bool is_blacklisted_cpu(void)
-{
- if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
- return false;
-
- if (boot_cpu_data.x86 == 0x0f) {
- /*
- * On Pentium 4, des3_ede-x86_64 is slower than generic C
- * implementation because use of 64bit rotates (which are really
- * slow on P4). Therefore blacklist P4s.
- */
- return true;
- }
-
- return false;
-}
-
-static int force;
-module_param(force, int, 0);
-MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
-
-static int __init des3_ede_x86_init(void)
-{
- int err;
-
- if (!force && is_blacklisted_cpu()) {
- pr_info("des3_ede-x86_64: performance on this CPU would be suboptimal: disabling des3_ede-x86_64.\n");
- return -ENODEV;
- }
-
- err = crypto_register_alg(&des3_ede_cipher);
- if (err)
- return err;
-
- err = crypto_register_skciphers(des3_ede_skciphers,
- ARRAY_SIZE(des3_ede_skciphers));
- if (err)
- crypto_unregister_alg(&des3_ede_cipher);
-
- return err;
-}
-
-static void __exit des3_ede_x86_fini(void)
-{
- crypto_unregister_alg(&des3_ede_cipher);
- crypto_unregister_skciphers(des3_ede_skciphers,
- ARRAY_SIZE(des3_ede_skciphers));
-}
-
-module_init(des3_ede_x86_init);
-module_exit(des3_ede_x86_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Triple DES EDE Cipher Algorithm, asm optimized");
-MODULE_ALIAS_CRYPTO("des3_ede");
-MODULE_ALIAS_CRYPTO("des3_ede-asm");
-MODULE_AUTHOR("Jussi Kivilinna <jussi.kivilinna@iki.fi>");
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 05f54c9332c3..b54a1bef6ade 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -25,7 +25,7 @@ menu "Crypto core or helper"
config CRYPTO_FIPS
bool "FIPS 200 compliance"
- depends on CRYPTO_DRBG && CRYPTO_SELFTESTS
+ depends on CRYPTO_DRBG=y && CRYPTO_SELFTESTS
depends on (MODULE_SIG || !MODULES)
help
This option enables the fips boot option which is
@@ -109,10 +109,6 @@ config CRYPTO_RNG2
tristate
select CRYPTO_ALGAPI2
-config CRYPTO_RNG_DEFAULT
- tristate
- select CRYPTO_DRBG_MENU
-
config CRYPTO_AKCIPHER2
tristate
select CRYPTO_ALGAPI2
@@ -151,19 +147,20 @@ config CRYPTO_MANAGER
config CRYPTO_MANAGER2
def_tristate CRYPTO_MANAGER || (CRYPTO_MANAGER!=n && CRYPTO_ALGAPI=y)
- select CRYPTO_ACOMP2
- select CRYPTO_AEAD2
- select CRYPTO_AKCIPHER2
- select CRYPTO_SIG2
- select CRYPTO_HASH2
- select CRYPTO_KPP2
- select CRYPTO_RNG2
- select CRYPTO_SKCIPHER2
+ select CRYPTO_ACOMP2 if CRYPTO_SELFTESTS
+ select CRYPTO_AEAD2 if CRYPTO_SELFTESTS
+ select CRYPTO_AKCIPHER2 if CRYPTO_SELFTESTS
+ select CRYPTO_SIG2 if CRYPTO_SELFTESTS
+ select CRYPTO_HASH2 if CRYPTO_SELFTESTS
+ select CRYPTO_KPP2 if CRYPTO_SELFTESTS
+ select CRYPTO_RNG2 if CRYPTO_SELFTESTS
+ select CRYPTO_SKCIPHER2 if CRYPTO_SELFTESTS
config CRYPTO_USER
tristate "Userspace cryptographic algorithm configuration"
depends on NET
select CRYPTO_MANAGER
+ select CRYPTO_RNG
help
Userspace configuration for cryptographic instantiations such as
cbc(aes).
@@ -218,6 +215,7 @@ config CRYPTO_PCRYPT
config CRYPTO_CRYPTD
tristate "Software async crypto daemon"
+ select CRYPTO_AEAD
select CRYPTO_SKCIPHER
select CRYPTO_HASH
select CRYPTO_MANAGER
@@ -251,7 +249,10 @@ config CRYPTO_KRB5ENC
config CRYPTO_BENCHMARK
tristate "Crypto benchmarking module"
depends on m || EXPERT
+ select CRYPTO_AEAD
+ select CRYPTO_HASH
select CRYPTO_MANAGER
+ select CRYPTO_SKCIPHER
help
Quick & dirty crypto benchmarking module.
@@ -261,10 +262,16 @@ config CRYPTO_BENCHMARK
config CRYPTO_SIMD
tristate
+ select CRYPTO_AEAD
select CRYPTO_CRYPTD
config CRYPTO_ENGINE
tristate
+ select CRYPTO_AEAD
+ select CRYPTO_AKCIPHER
+ select CRYPTO_HASH
+ select CRYPTO_KPP
+ select CRYPTO_SKCIPHER
endmenu
@@ -290,7 +297,6 @@ config CRYPTO_DH
config CRYPTO_DH_RFC7919_GROUPS
bool "RFC 7919 FFDHE groups"
depends on CRYPTO_DH
- select CRYPTO_RNG_DEFAULT
help
FFDHE (Finite-Field-based Diffie-Hellman Ephemeral) groups
defined in RFC7919.
@@ -302,7 +308,6 @@ config CRYPTO_DH_RFC7919_GROUPS
config CRYPTO_ECC
tristate
- select CRYPTO_RNG_DEFAULT
config CRYPTO_ECDH
tristate "ECDH (Elliptic Curve Diffie-Hellman)"
@@ -800,7 +805,6 @@ config CRYPTO_GENIV
tristate
select CRYPTO_AEAD
select CRYPTO_MANAGER
- select CRYPTO_RNG_DEFAULT
config CRYPTO_SEQIV
tristate "Sequence Number IV Generator"
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index dd0e5be4d8c0..5a00c18eb145 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -324,15 +324,13 @@ static int alg_setkey_by_key_serial(struct alg_sock *ask, sockptr_t optval,
return PTR_ERR(ret);
}
- key_data = sock_kmalloc(&ask->sk, key_datalen, GFP_KERNEL);
+ key_data = sock_kmemdup(&ask->sk, ret, key_datalen, GFP_KERNEL);
if (!key_data) {
up_read(&key->sem);
key_put(key);
return -ENOMEM;
}
- memcpy(key_data, ret, key_datalen);
-
up_read(&key->sem);
key_put(key);
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index cd38f4676176..aba9fe0f23b4 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -646,7 +646,8 @@ static int cryptd_hash_import(struct ahash_request *req, const void *in)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct shash_desc *desc = cryptd_shash_desc(req);
+ struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct shash_desc *desc = &rctx->desc;
desc->tfm = ctx->child;
@@ -952,115 +953,6 @@ static struct crypto_template cryptd_tmpl = {
.module = THIS_MODULE,
};
-struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
- u32 type, u32 mask)
-{
- char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
- struct cryptd_skcipher_ctx *ctx;
- struct crypto_skcipher *tfm;
-
- if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
- "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
- return ERR_PTR(-EINVAL);
-
- tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
- if (IS_ERR(tfm))
- return ERR_CAST(tfm);
-
- if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
- crypto_free_skcipher(tfm);
- return ERR_PTR(-EINVAL);
- }
-
- ctx = crypto_skcipher_ctx(tfm);
- refcount_set(&ctx->refcnt, 1);
-
- return container_of(tfm, struct cryptd_skcipher, base);
-}
-EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
-
-struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
-{
- struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
-
- return ctx->child;
-}
-EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
-
-bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
-{
- struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
-
- return refcount_read(&ctx->refcnt) - 1;
-}
-EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
-
-void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
-{
- struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
-
- if (refcount_dec_and_test(&ctx->refcnt))
- crypto_free_skcipher(&tfm->base);
-}
-EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
-
-struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
- u32 type, u32 mask)
-{
- char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
- struct cryptd_hash_ctx *ctx;
- struct crypto_ahash *tfm;
-
- if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
- "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
- return ERR_PTR(-EINVAL);
- tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
- if (IS_ERR(tfm))
- return ERR_CAST(tfm);
- if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
- crypto_free_ahash(tfm);
- return ERR_PTR(-EINVAL);
- }
-
- ctx = crypto_ahash_ctx(tfm);
- refcount_set(&ctx->refcnt, 1);
-
- return __cryptd_ahash_cast(tfm);
-}
-EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
-
-struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
-{
- struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
-
- return ctx->child;
-}
-EXPORT_SYMBOL_GPL(cryptd_ahash_child);
-
-struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
-{
- struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- return &rctx->desc;
-}
-EXPORT_SYMBOL_GPL(cryptd_shash_desc);
-
-bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
-{
- struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
-
- return refcount_read(&ctx->refcnt) - 1;
-}
-EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
-
-void cryptd_free_ahash(struct cryptd_ahash *tfm)
-{
- struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
-
- if (refcount_dec_and_test(&ctx->refcnt))
- crypto_free_ahash(&tfm->base);
-}
-EXPORT_SYMBOL_GPL(cryptd_free_ahash);
-
struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
u32 type, u32 mask)
{
diff --git a/crypto/dh.c b/crypto/dh.c
index 8250eeeebd0f..7ad4768716c8 100644
--- a/crypto/dh.c
+++ b/crypto/dh.c
@@ -388,13 +388,7 @@ static void *dh_safe_prime_gen_privkey(const struct dh_safe_prime *safe_prime,
* 5.6.1.1.3, step 3 (and implicitly step 4): obtain N + 64
* random bits and interpret them as a big endian integer.
*/
- err = -EFAULT;
- if (crypto_get_default_rng())
- goto out_err;
-
- err = crypto_rng_get_bytes(crypto_default_rng, (u8 *)key,
- oversampling_size);
- crypto_put_default_rng();
+ err = crypto_stdrng_get_bytes(key, oversampling_size);
if (err)
goto out_err;
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 1ed209e5d5dd..9204e6edb426 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1780,7 +1780,7 @@ static inline int __init drbg_healthcheck_sanity(void)
max_addtllen = drbg_max_addtl(drbg);
max_request_bytes = drbg_max_request_bytes(drbg);
drbg_string_fill(&addtl, buf, max_addtllen + 1);
- /* overflow addtllen with additonal info string */
+ /* overflow addtllen with additional info string */
len = drbg_generate(drbg, buf, OUTBUFLEN, &addtl);
BUG_ON(0 < len);
/* overflow max_bits */
diff --git a/crypto/ecc.c b/crypto/ecc.c
index 08150b14e17e..43b0def3a225 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -1533,16 +1533,11 @@ int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits,
* The maximum security strength identified by NIST SP800-57pt1r4 for
* ECC is 256 (N >= 512).
*
- * This condition is met by the default RNG because it selects a favored
- * DRBG with a security strength of 256.
+ * This condition is met by stdrng because it selects a favored DRBG
+ * with a security strength of 256.
*/
- if (crypto_get_default_rng())
- return -EFAULT;
-
/* Step 3: obtain N returned_bits from the DRBG. */
- err = crypto_rng_get_bytes(crypto_default_rng,
- (u8 *)private_key, nbytes);
- crypto_put_default_rng();
+ err = crypto_stdrng_get_bytes(private_key, nbytes);
if (err)
return err;
diff --git a/crypto/geniv.c b/crypto/geniv.c
index 42eff6a7387c..04befe3a7f44 100644
--- a/crypto/geniv.c
+++ b/crypto/geniv.c
@@ -112,15 +112,7 @@ int aead_init_geniv(struct crypto_aead *aead)
struct crypto_aead *child;
int err;
- spin_lock_init(&ctx->lock);
-
- err = crypto_get_default_rng();
- if (err)
- goto out;
-
- err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
- crypto_aead_ivsize(aead));
- crypto_put_default_rng();
+ err = crypto_stdrng_get_bytes(ctx->salt, crypto_aead_ivsize(aead));
if (err)
goto out;
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c
index 4ad729357441..652852649a31 100644
--- a/crypto/jitterentropy-kcapi.c
+++ b/crypto/jitterentropy-kcapi.c
@@ -41,6 +41,7 @@
#include <linux/fips.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <crypto/internal/rng.h>
@@ -172,7 +173,7 @@ void jent_read_random_block(struct sha3_ctx *hash_state, char *dst,
***************************************************************************/
struct jitterentropy {
- spinlock_t jent_lock;
+ struct mutex jent_lock;
struct rand_data *entropy_collector;
struct sha3_ctx hash_state;
};
@@ -181,14 +182,14 @@ static void jent_kcapi_cleanup(struct crypto_tfm *tfm)
{
struct jitterentropy *rng = crypto_tfm_ctx(tfm);
- spin_lock(&rng->jent_lock);
+ mutex_lock(&rng->jent_lock);
memzero_explicit(&rng->hash_state, sizeof(rng->hash_state));
if (rng->entropy_collector)
jent_entropy_collector_free(rng->entropy_collector);
rng->entropy_collector = NULL;
- spin_unlock(&rng->jent_lock);
+ mutex_unlock(&rng->jent_lock);
}
static int jent_kcapi_init(struct crypto_tfm *tfm)
@@ -196,7 +197,7 @@ static int jent_kcapi_init(struct crypto_tfm *tfm)
struct jitterentropy *rng = crypto_tfm_ctx(tfm);
int ret = 0;
- spin_lock_init(&rng->jent_lock);
+ mutex_init(&rng->jent_lock);
/* Use SHA3-256 as conditioner */
sha3_256_init(&rng->hash_state);
@@ -208,7 +209,6 @@ static int jent_kcapi_init(struct crypto_tfm *tfm)
goto err;
}
- spin_lock_init(&rng->jent_lock);
return 0;
err:
@@ -223,7 +223,7 @@ static int jent_kcapi_random(struct crypto_rng *tfm,
struct jitterentropy *rng = crypto_rng_ctx(tfm);
int ret = 0;
- spin_lock(&rng->jent_lock);
+ mutex_lock(&rng->jent_lock);
ret = jent_read_entropy(rng->entropy_collector, rdata, dlen);
@@ -249,7 +249,7 @@ static int jent_kcapi_random(struct crypto_rng *tfm,
ret = -EINVAL;
}
- spin_unlock(&rng->jent_lock);
+ mutex_unlock(&rng->jent_lock);
return ret;
}
diff --git a/crypto/krb5enc.c b/crypto/krb5enc.c
index a1de55994d92..1bfe8370cf94 100644
--- a/crypto/krb5enc.c
+++ b/crypto/krb5enc.c
@@ -154,7 +154,7 @@ static int krb5enc_dispatch_encrypt(struct aead_request *req,
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
skcipher_request_set_tfm(skreq, enc);
- skcipher_request_set_callback(skreq, aead_request_flags(req),
+ skcipher_request_set_callback(skreq, flags,
krb5enc_encrypt_done, req);
skcipher_request_set_crypt(skreq, src, dst, req->cryptlen, req->iv);
@@ -192,7 +192,8 @@ static void krb5enc_encrypt_ahash_done(void *data, int err)
krb5enc_insert_checksum(req, ahreq->result);
- err = krb5enc_dispatch_encrypt(req, 0);
+ err = krb5enc_dispatch_encrypt(req,
+ aead_request_flags(req) & ~CRYPTO_TFM_REQ_MAY_SLEEP);
if (err != -EINPROGRESS)
aead_request_complete(req, err);
}
diff --git a/crypto/lrw.c b/crypto/lrw.c
index dd403b800513..aa31ab03a597 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -134,7 +134,7 @@ static int lrw_next_index(u32 *counter)
/*
* We compute the tweak masks twice (both before and after the ECB encryption or
* decryption) to avoid having to allocate a temporary buffer and/or make
- * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
+ * multiple calls to the 'ecb(..)' instance, which usually would be slower than
* just doing the lrw_next_index() calls again.
*/
static int lrw_xor_tweak(struct skcipher_request *req, bool second_pass)
diff --git a/crypto/rng.c b/crypto/rng.c
index c6165c8eb387..1d4b9177bad4 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -24,8 +24,7 @@
#include "internal.h"
static DEFINE_MUTEX(crypto_default_rng_lock);
-struct crypto_rng *crypto_default_rng;
-EXPORT_SYMBOL_GPL(crypto_default_rng);
+static struct crypto_rng *crypto_default_rng;
static int crypto_default_rng_refcnt;
int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
@@ -106,7 +105,7 @@ struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_alloc_rng);
-int crypto_get_default_rng(void)
+static int crypto_get_default_rng(void)
{
struct crypto_rng *rng;
int err;
@@ -135,15 +134,27 @@ unlock:
return err;
}
-EXPORT_SYMBOL_GPL(crypto_get_default_rng);
-void crypto_put_default_rng(void)
+static void crypto_put_default_rng(void)
{
mutex_lock(&crypto_default_rng_lock);
crypto_default_rng_refcnt--;
mutex_unlock(&crypto_default_rng_lock);
}
-EXPORT_SYMBOL_GPL(crypto_put_default_rng);
+
+int __crypto_stdrng_get_bytes(void *buf, unsigned int len)
+{
+ int err;
+
+ err = crypto_get_default_rng();
+ if (err)
+ return err;
+
+ err = crypto_rng_get_bytes(crypto_default_rng, buf, len);
+ crypto_put_default_rng();
+ return err;
+}
+EXPORT_SYMBOL_GPL(__crypto_stdrng_get_bytes);
#if defined(CONFIG_CRYPTO_RNG) || defined(CONFIG_CRYPTO_RNG_MODULE)
int crypto_del_default_rng(void)
diff --git a/crypto/simd.c b/crypto/simd.c
index f71c4a334c7d..4e29f797709b 100644
--- a/crypto/simd.c
+++ b/crypto/simd.c
@@ -13,11 +13,11 @@
/*
* Shared crypto SIMD helpers. These functions dynamically create and register
- * an skcipher or AEAD algorithm that wraps another, internal algorithm. The
- * wrapper ensures that the internal algorithm is only executed in a context
- * where SIMD instructions are usable, i.e. where may_use_simd() returns true.
- * If SIMD is already usable, the wrapper directly calls the internal algorithm.
- * Otherwise it defers execution to a workqueue via cryptd.
+ * an AEAD algorithm that wraps another, internal algorithm. The wrapper
+ * ensures that the internal algorithm is only executed in a context where SIMD
+ * instructions are usable, i.e. where may_use_simd() returns true. If SIMD is
+ * already usable, the wrapper directly calls the internal algorithm. Otherwise
+ * it defers execution to a workqueue via cryptd.
*
* This is an alternative to the internal algorithm implementing a fallback for
* the !may_use_simd() case itself.
@@ -30,232 +30,11 @@
#include <crypto/cryptd.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/simd.h>
-#include <crypto/internal/skcipher.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/preempt.h>
#include <asm/simd.h>
-/* skcipher support */
-
-struct simd_skcipher_alg {
- const char *ialg_name;
- struct skcipher_alg alg;
-};
-
-struct simd_skcipher_ctx {
- struct cryptd_skcipher *cryptd_tfm;
-};
-
-static int simd_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int key_len)
-{
- struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_skcipher *child = &ctx->cryptd_tfm->base;
-
- crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(tfm) &
- CRYPTO_TFM_REQ_MASK);
- return crypto_skcipher_setkey(child, key, key_len);
-}
-
-static int simd_skcipher_encrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_request *subreq;
- struct crypto_skcipher *child;
-
- subreq = skcipher_request_ctx(req);
- *subreq = *req;
-
- if (!crypto_simd_usable() ||
- (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm)))
- child = &ctx->cryptd_tfm->base;
- else
- child = cryptd_skcipher_child(ctx->cryptd_tfm);
-
- skcipher_request_set_tfm(subreq, child);
-
- return crypto_skcipher_encrypt(subreq);
-}
-
-static int simd_skcipher_decrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_request *subreq;
- struct crypto_skcipher *child;
-
- subreq = skcipher_request_ctx(req);
- *subreq = *req;
-
- if (!crypto_simd_usable() ||
- (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm)))
- child = &ctx->cryptd_tfm->base;
- else
- child = cryptd_skcipher_child(ctx->cryptd_tfm);
-
- skcipher_request_set_tfm(subreq, child);
-
- return crypto_skcipher_decrypt(subreq);
-}
-
-static void simd_skcipher_exit(struct crypto_skcipher *tfm)
-{
- struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- cryptd_free_skcipher(ctx->cryptd_tfm);
-}
-
-static int simd_skcipher_init(struct crypto_skcipher *tfm)
-{
- struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct cryptd_skcipher *cryptd_tfm;
- struct simd_skcipher_alg *salg;
- struct skcipher_alg *alg;
- unsigned reqsize;
-
- alg = crypto_skcipher_alg(tfm);
- salg = container_of(alg, struct simd_skcipher_alg, alg);
-
- cryptd_tfm = cryptd_alloc_skcipher(salg->ialg_name,
- CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(cryptd_tfm))
- return PTR_ERR(cryptd_tfm);
-
- ctx->cryptd_tfm = cryptd_tfm;
-
- reqsize = crypto_skcipher_reqsize(cryptd_skcipher_child(cryptd_tfm));
- reqsize = max(reqsize, crypto_skcipher_reqsize(&cryptd_tfm->base));
- reqsize += sizeof(struct skcipher_request);
-
- crypto_skcipher_set_reqsize(tfm, reqsize);
-
- return 0;
-}
-
-struct simd_skcipher_alg *simd_skcipher_create_compat(struct skcipher_alg *ialg,
- const char *algname,
- const char *drvname,
- const char *basename)
-{
- struct simd_skcipher_alg *salg;
- struct skcipher_alg *alg;
- int err;
-
- salg = kzalloc_obj(*salg);
- if (!salg) {
- salg = ERR_PTR(-ENOMEM);
- goto out;
- }
-
- salg->ialg_name = basename;
- alg = &salg->alg;
-
- err = -ENAMETOOLONG;
- if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", algname) >=
- CRYPTO_MAX_ALG_NAME)
- goto out_free_salg;
-
- if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- drvname) >= CRYPTO_MAX_ALG_NAME)
- goto out_free_salg;
-
- alg->base.cra_flags = CRYPTO_ALG_ASYNC |
- (ialg->base.cra_flags & CRYPTO_ALG_INHERITED_FLAGS);
- alg->base.cra_priority = ialg->base.cra_priority;
- alg->base.cra_blocksize = ialg->base.cra_blocksize;
- alg->base.cra_alignmask = ialg->base.cra_alignmask;
- alg->base.cra_module = ialg->base.cra_module;
- alg->base.cra_ctxsize = sizeof(struct simd_skcipher_ctx);
-
- alg->ivsize = ialg->ivsize;
- alg->chunksize = ialg->chunksize;
- alg->min_keysize = ialg->min_keysize;
- alg->max_keysize = ialg->max_keysize;
-
- alg->init = simd_skcipher_init;
- alg->exit = simd_skcipher_exit;
-
- alg->setkey = simd_skcipher_setkey;
- alg->encrypt = simd_skcipher_encrypt;
- alg->decrypt = simd_skcipher_decrypt;
-
- err = crypto_register_skcipher(alg);
- if (err)
- goto out_free_salg;
-
-out:
- return salg;
-
-out_free_salg:
- kfree(salg);
- salg = ERR_PTR(err);
- goto out;
-}
-EXPORT_SYMBOL_GPL(simd_skcipher_create_compat);
-
-void simd_skcipher_free(struct simd_skcipher_alg *salg)
-{
- crypto_unregister_skcipher(&salg->alg);
- kfree(salg);
-}
-EXPORT_SYMBOL_GPL(simd_skcipher_free);
-
-int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
- struct simd_skcipher_alg **simd_algs)
-{
- int err;
- int i;
- const char *algname;
- const char *drvname;
- const char *basename;
- struct simd_skcipher_alg *simd;
-
- err = crypto_register_skciphers(algs, count);
- if (err)
- return err;
-
- for (i = 0; i < count; i++) {
- WARN_ON(strncmp(algs[i].base.cra_name, "__", 2));
- WARN_ON(strncmp(algs[i].base.cra_driver_name, "__", 2));
- algname = algs[i].base.cra_name + 2;
- drvname = algs[i].base.cra_driver_name + 2;
- basename = algs[i].base.cra_driver_name;
- simd = simd_skcipher_create_compat(algs + i, algname, drvname, basename);
- err = PTR_ERR(simd);
- if (IS_ERR(simd))
- goto err_unregister;
- simd_algs[i] = simd;
- }
- return 0;
-
-err_unregister:
- simd_unregister_skciphers(algs, count, simd_algs);
- return err;
-}
-EXPORT_SYMBOL_GPL(simd_register_skciphers_compat);
-
-void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
- struct simd_skcipher_alg **simd_algs)
-{
- int i;
-
- crypto_unregister_skciphers(algs, count);
-
- for (i = 0; i < count; i++) {
- if (simd_algs[i]) {
- simd_skcipher_free(simd_algs[i]);
- simd_algs[i] = NULL;
- }
- }
-}
-EXPORT_SYMBOL_GPL(simd_unregister_skciphers);
-
-/* AEAD support */
-
struct simd_aead_alg {
const char *ialg_name;
struct aead_alg alg;
@@ -437,13 +216,17 @@ int simd_register_aeads_compat(struct aead_alg *algs, int count,
const char *basename;
struct simd_aead_alg *simd;
+ for (i = 0; i < count; i++) {
+ if (WARN_ON(strncmp(algs[i].base.cra_name, "__", 2) ||
+ strncmp(algs[i].base.cra_driver_name, "__", 2)))
+ return -EINVAL;
+ }
+
err = crypto_register_aeads(algs, count);
if (err)
return err;
for (i = 0; i < count; i++) {
- WARN_ON(strncmp(algs[i].base.cra_name, "__", 2));
- WARN_ON(strncmp(algs[i].base.cra_driver_name, "__", 2));
algname = algs[i].base.cra_name + 2;
drvname = algs[i].base.cra_driver_name + 2;
basename = algs[i].base.cra_driver_name;
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index e43ba59e7627..e54517605f5f 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -911,8 +911,14 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs,
break;
}
- if (klen)
- crypto_ahash_setkey(tfm, tvmem[0], klen);
+ if (klen) {
+ ret = crypto_ahash_setkey(tfm, tvmem[0], klen);
+ if (ret) {
+ pr_err("setkey() failed flags=%x: %d\n",
+ crypto_ahash_get_flags(tfm), ret);
+ break;
+ }
+ }
pr_info("test%3u "
"(%5u byte blocks,%5u bytes per update,%4u updates): ",
@@ -2795,6 +2801,11 @@ static int __init tcrypt_mod_init(void)
goto err_free_tv;
}
+ if (!num_mb) {
+ pr_warn("num_mb must be at least 1; forcing to 1\n");
+ num_mb = 1;
+ }
+
err = do_test(alg, type, mask, mode, num_mb);
if (err) {
@@ -2804,7 +2815,7 @@ static int __init tcrypt_mod_init(void)
pr_debug("all tests passed\n");
}
- /* We intentionaly return -EAGAIN to prevent keeping the module,
+ /* We intentionally return -EAGAIN to prevent keeping the module,
* unless we're running in fips mode. It does all its work from
* init() and doesn't offer any runtime functionality, but in
* the fips case, checking for a successful load is helpful.
diff --git a/crypto/tea.c b/crypto/tea.c
index cb05140e3470..7c66efcb5caa 100644
--- a/crypto/tea.c
+++ b/crypto/tea.c
@@ -2,7 +2,7 @@
/*
* Cryptographic API.
*
- * TEA, XTEA, and XETA crypto alogrithms
+ * TEA, XTEA, and XETA crypto algorithms
*
* The TEA and Xtended TEA algorithms were developed by David Wheeler
* and Roger Needham at the Computer Laboratory of Cambridge University.
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index a8079cff7755..4d86efae65b2 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -4080,6 +4080,20 @@ static const struct alg_test_desc alg_test_descs[] = {
.aead = __VECS(aegis128_tv_template)
}
}, {
+ .alg = "authenc(hmac(md5),cbc(aes))",
+ .generic_driver = "authenc(hmac-md5-lib,cbc(aes-lib))",
+ .test = alg_test_aead,
+ .suite = {
+ .aead = __VECS(hmac_md5_aes_cbc_tv_temp)
+ }
+ }, {
+ .alg = "authenc(hmac(md5),cbc(des))",
+ .generic_driver = "authenc(hmac-md5-lib,cbc(des-generic))",
+ .test = alg_test_aead,
+ .suite = {
+ .aead = __VECS(hmac_md5_des_cbc_tv_temp)
+ }
+ }, {
.alg = "authenc(hmac(md5),cbc(des3_ede))",
.generic_driver = "authenc(hmac-md5-lib,cbc(des3_ede-generic))",
.test = alg_test_aead,
@@ -4094,6 +4108,13 @@ static const struct alg_test_desc alg_test_descs[] = {
.aead = __VECS(hmac_md5_ecb_cipher_null_tv_template)
}
}, {
+ .alg = "authenc(hmac(md5),rfc3686(ctr(aes)))",
+ .generic_driver = "authenc(hmac-md5-lib,rfc3686(ctr(aes-lib)))",
+ .test = alg_test_aead,
+ .suite = {
+ .aead = __VECS(hmac_md5_aes_ctr_rfc3686_tv_temp)
+ }
+ }, {
.alg = "authenc(hmac(sha1),cbc(aes))",
.generic_driver = "authenc(hmac-sha1-lib,cbc(aes-lib))",
.test = alg_test_aead,
@@ -4128,12 +4149,17 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
- .test = alg_test_null,
+ .generic_driver = "authenc(hmac-sha1-lib,rfc3686(ctr(aes-lib)))",
+ .test = alg_test_aead,
.fips_allowed = 1,
+ .suite = {
+ .aead = __VECS(hmac_sha1_aes_ctr_rfc3686_tv_temp)
+ }
}, {
.alg = "authenc(hmac(sha224),cbc(aes))",
.generic_driver = "authenc(hmac-sha224-lib,cbc(aes-lib))",
.test = alg_test_aead,
+ .fips_allowed = 1,
.suite = {
.aead = __VECS(hmac_sha224_aes_cbc_tv_temp)
}
@@ -4153,8 +4179,12 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
- .test = alg_test_null,
+ .generic_driver = "authenc(hmac-sha224-lib,rfc3686(ctr(aes-lib)))",
+ .test = alg_test_aead,
.fips_allowed = 1,
+ .suite = {
+ .aead = __VECS(hmac_sha224_aes_ctr_rfc3686_tv_temp)
+ }
}, {
.alg = "authenc(hmac(sha256),cbc(aes))",
.generic_driver = "authenc(hmac-sha256-lib,cbc(aes-lib))",
@@ -4190,12 +4220,17 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
- .test = alg_test_null,
+ .generic_driver = "authenc(hmac-sha256-lib,rfc3686(ctr(aes-lib)))",
+ .test = alg_test_aead,
.fips_allowed = 1,
+ .suite = {
+ .aead = __VECS(hmac_sha256_aes_ctr_rfc3686_tv_temp)
+ }
}, {
.alg = "authenc(hmac(sha384),cbc(aes))",
.generic_driver = "authenc(hmac-sha384-lib,cbc(aes-lib))",
.test = alg_test_aead,
+ .fips_allowed = 1,
.suite = {
.aead = __VECS(hmac_sha384_aes_cbc_tv_temp)
}
@@ -4226,8 +4261,12 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
- .test = alg_test_null,
+ .generic_driver = "authenc(hmac-sha384-lib,rfc3686(ctr(aes-lib)))",
+ .test = alg_test_aead,
.fips_allowed = 1,
+ .suite = {
+ .aead = __VECS(hmac_sha384_aes_ctr_rfc3686_tv_temp)
+ }
}, {
.alg = "authenc(hmac(sha512),cbc(aes))",
.generic_driver = "authenc(hmac-sha512-lib,cbc(aes-lib))",
@@ -4256,8 +4295,12 @@ static const struct alg_test_desc alg_test_descs[] = {
.fips_allowed = 1,
}, {
.alg = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
- .test = alg_test_null,
+ .generic_driver = "authenc(hmac-sha512-lib,rfc3686(ctr(aes-lib)))",
+ .test = alg_test_aead,
.fips_allowed = 1,
+ .suite = {
+ .aead = __VECS(hmac_sha512_aes_ctr_rfc3686_tv_temp)
+ }
}, {
.alg = "blake2b-160",
.generic_driver = "blake2b-160-lib",
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index efbc707bb8e3..9b4d7e11c9fd 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -14453,6 +14453,261 @@ static const struct cipher_testvec aes_cbc_tv_template[] = {
},
};
+static const struct aead_testvec hmac_md5_aes_cbc_tv_temp[] = {
+ { /* RFC 3602 Case 1 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x10" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
+ "\x51\x2e\x03\xd5\x34\x12\x00\x06",
+ .klen = 8 + 16 + 16,
+ .iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
+ "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
+ .assoc = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
+ "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
+ .alen = 16,
+ .ptext = "Single block msg",
+ .plen = 16,
+ .ctext = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
+ "\x27\x08\x94\x2d\xbe\x77\x18\x1a"
+ "\x22\x10\xf2\x25\x7f\xe9\x0d\x92"
+ "\xfc\x00\x55\xb1\xd0\xb5\x3a\x74",
+ .clen = 16 + 16,
+ }, { /* RFC 3602 Case 2 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x10" /* enc key length */
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0"
+ "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a",
+ .klen = 8 + 16 + 16,
+ .iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
+ "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
+ .assoc = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
+ "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
+ .alen = 16,
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ .plen = 32,
+ .ctext = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a"
+ "\x3a\x86\x30\x28\xb5\xe1\xdc\x0a"
+ "\x75\x86\x60\x2d\x25\x3c\xff\xf9"
+ "\x1b\x82\x66\xbe\xa6\xd6\x1a\xb1"
+ "\x31\xef\xd1\x5e\x2d\x83\xde\x59"
+ "\x5c\x63\x6c\xd6\x6e\x96\x8c\x5b",
+ .clen = 32 + 16,
+ }, { /* RFC 3602 Case 3 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x10" /* enc key length */
+ "\x11\x22\x33\x44\x55\x66\x77\x88"
+ "\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
+ "\x6c\x3e\xa0\x47\x76\x30\xce\x21"
+ "\xa2\xce\x33\x4a\xa7\x46\xc2\xcd",
+ .klen = 8 + 16 + 16,
+ .iv = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
+ "\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
+ .assoc = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
+ "\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
+ .alen = 16,
+ .ptext = "This is a 48-byte message (exactly 3 AES blocks)",
+ .plen = 48,
+ .ctext = "\xd0\xa0\x2b\x38\x36\x45\x17\x53"
+ "\xd4\x93\x66\x5d\x33\xf0\xe8\x86"
+ "\x2d\xea\x54\xcd\xb2\x93\xab\xc7"
+ "\x50\x69\x39\x27\x67\x72\xf8\xd5"
+ "\x02\x1c\x19\x21\x6b\xad\x52\x5c"
+ "\x85\x79\x69\x5d\x83\xba\x26\x84"
+ "\xa1\x9e\xc5\x65\x43\xc5\x51\x70"
+ "\xb5\xc8\x38\xce\xbb\x3b\xc6\x0f",
+ .clen = 48 + 16,
+ }, { /* RFC 3602 Case 4 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x10" /* enc key length */
+ "\x11\x22\x33\x44\x55\x66\x77\x88"
+ "\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
+ "\x56\xe4\x7a\x38\xc5\x59\x89\x74"
+ "\xbc\x46\x90\x3d\xba\x29\x03\x49",
+ .klen = 8 + 16 + 16,
+ .iv = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
+ "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
+ .assoc = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
+ "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
+ .alen = 16,
+ .ptext = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf",
+ .plen = 64,
+ .ctext = "\xc3\x0e\x32\xff\xed\xc0\x77\x4e"
+ "\x6a\xff\x6a\xf0\x86\x9f\x71\xaa"
+ "\x0f\x3a\xf0\x7a\x9a\x31\xa9\xc6"
+ "\x84\xdb\x20\x7e\xb0\xef\x8e\x4e"
+ "\x35\x90\x7a\xa6\x32\xc3\xff\xdf"
+ "\x86\x8b\xb7\xb2\x9d\x3d\x46\xad"
+ "\x83\xce\x9f\x9a\x10\x2e\xe9\x9d"
+ "\x49\xa5\x3e\x87\xf4\xc3\xda\x55"
+ "\x19\x90\xcc\x2c\x6d\x76\x0f\xd6"
+ "\x6c\x54\x09\xb1\x3e\x98\x0c\x11",
+ .clen = 64 + 16,
+ }, { /* RFC 3602 Case 5 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x10" /* enc key length */
+ "\x11\x22\x33\x44\x55\x66\x77\x88"
+ "\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
+ "\x90\xd3\x82\xb4\x10\xee\xba\x7a"
+ "\xd9\x38\xc4\x6c\xec\x1a\x82\xbf",
+ .klen = 8 + 16 + 16,
+ .iv = "\xe9\x6e\x8c\x08\xab\x46\x57\x63"
+ "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
+ .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01"
+ "\xe9\x6e\x8c\x08\xab\x46\x57\x63"
+ "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
+ .alen = 24,
+ .ptext = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00"
+ "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x01\x02\x03\x04\x05\x06\x07\x08"
+ "\x09\x0a\x0b\x0c\x0d\x0e\x0e\x01",
+ .plen = 80,
+ .ctext = "\xf6\x63\xc2\x5d\x32\x5c\x18\xc6"
+ "\xa9\x45\x3e\x19\x4e\x12\x08\x49"
+ "\xa4\x87\x0b\x66\xcc\x6b\x99\x65"
+ "\x33\x00\x13\xb4\x89\x8d\xc8\x56"
+ "\xa4\x69\x9e\x52\x3a\x55\xdb\x08"
+ "\x0b\x59\xec\x3a\x8e\x4b\x7e\x52"
+ "\x77\x5b\x07\xd1\xdb\x34\xed\x9c"
+ "\x53\x8a\xb5\x0c\x55\x1b\x87\x4a"
+ "\xa2\x69\xad\xd0\x47\xad\x2d\x59"
+ "\x13\xac\x19\xb7\xcf\xba\xd4\xa6"
+ "\x9f\x6f\xa4\x85\x28\xf1\xc9\xea"
+ "\xe1\xd0\x7d\x30\x4a\xd0\x81\x12",
+ .clen = 80 + 16,
+ }, { /* NIST SP800-38A F.2.3 CBC-AES192.Encrypt */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x18" /* enc key length */
+ "\x11\x22\x33\x44\x55\x66\x77\x88"
+ "\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
+ "\x8e\x73\xb0\xf7\xda\x0e\x64\x52"
+ "\xc8\x10\xf3\x2b\x80\x90\x79\xe5"
+ "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b",
+ .klen = 8 + 16 + 24,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .alen = 16,
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .plen = 64,
+ .ctext = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d"
+ "\x71\x78\x18\x3a\x9f\xa0\x71\xe8"
+ "\xb4\xd9\xad\xa9\xad\x7d\xed\xf4"
+ "\xe5\xe7\x38\x76\x3f\x69\x14\x5a"
+ "\x57\x1b\x24\x20\x12\xfb\x7a\xe0"
+ "\x7f\xa9\xba\xac\x3d\xf1\x02\xe0"
+ "\x08\xb0\xe2\x79\x88\x59\x88\x81"
+ "\xd9\x20\xa9\xe6\x4f\x56\x15\xcd"
+ "\xc3\x46\xe5\x2c\x07\x27\x50\xca"
+ "\x50\x4a\x83\x5f\x72\xd9\x76\x8d",
+ .clen = 64 + 16,
+ }, { /* NIST SP800-38A F.2.5 CBC-AES256.Encrypt */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x20" /* enc key length */
+ "\x11\x22\x33\x44\x55\x66\x77\x88"
+ "\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
+ "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
+ "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
+ "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
+ "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
+ .klen = 8 + 16 + 32,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .alen = 16,
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .plen = 64,
+ .ctext = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba"
+ "\x77\x9e\xab\xfb\x5f\x7b\xfb\xd6"
+ "\x9c\xfc\x4e\x96\x7e\xdb\x80\x8d"
+ "\x67\x9f\x77\x7b\xc6\x70\x2c\x7d"
+ "\x39\xf2\x33\x69\xa9\xd9\xba\xcf"
+ "\xa5\x30\xe2\x63\x04\x23\x14\x61"
+ "\xb2\xeb\x05\xe2\xc3\x9b\xe9\xfc"
+ "\xda\x6c\x19\x07\x8c\x6a\x9d\x1b"
+ "\x59\x62\x06\x71\x57\xdf\x18\x15"
+ "\x32\x02\xfa\xce\x2c\xd2\x1a\x8d",
+ .clen = 64 + 16,
+ },
+};
+
static const struct aead_testvec hmac_md5_ecb_cipher_null_tv_template[] = {
{ /* Input data from RFC 2410 Case 1 */
#ifdef __LITTLE_ENDIAN
@@ -14764,6 +15019,227 @@ static const struct aead_testvec hmac_sha1_aes_cbc_tv_temp[] = {
},
};
+static const struct aead_testvec hmac_sha1_aes_ctr_rfc3686_tv_temp[] = {
+ { /* RFC 3686 Case 1 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x14" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00"
+ "\xae\x68\x52\xf8\x12\x10\x67\xcc"
+ "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
+ "\x00\x00\x00\x30",
+ .klen = 8 + 20 + 20,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .alen = 8,
+ .ptext = "Single block msg",
+ .plen = 16,
+ .ctext = "\xe4\x09\x5d\x4f\xb7\xa7\xb3\x79"
+ "\x2d\x61\x75\xa3\x26\x13\x11\xb8"
+ "\x70\xdc\x6b\x62\x43\xa1\x2f\x08"
+ "\xf1\xec\x93\x7d\x69\xb2\x8e\x1f"
+ "\x0a\x97\x39\x86",
+ .clen = 16 + 20,
+ }, { /* RFC 3686 Case 2 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x14" /* enc key length */
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33"
+ "\x7e\x24\x06\x78\x17\xfa\xe0\xd7"
+ "\x43\xd6\xce\x1f\x32\x53\x91\x63"
+ "\x00\x6c\xb6\xdb",
+ .klen = 8 + 20 + 20,
+ .iv = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b",
+ .assoc = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b",
+ .alen = 8,
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ .plen = 32,
+ .ctext = "\x51\x04\xa1\x06\x16\x8a\x72\xd9"
+ "\x79\x0d\x41\xee\x8e\xda\xd3\x88"
+ "\xeb\x2e\x1e\xfc\x46\xda\x57\xc8"
+ "\xfc\xe6\x30\xdf\x91\x41\xbe\x28"
+ "\x6b\x7b\x4d\x39\x36\x1c\x12\x5f"
+ "\x72\xd2\x88\xb2\x26\xa6\xa6\xb5"
+ "\x1d\x3a\x49\xa6",
+ .clen = 32 + 20,
+ }, { /* RFC 3686 Case 3 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x14" /* enc key length */
+ "\x11\x22\x33\x44\x55\x66\x77\x88"
+ "\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
+ "\x22\x33\x44\x55"
+ "\x76\x91\xbe\x03\x5e\x50\x20\xa8"
+ "\xac\x6e\x61\x85\x29\xf9\xa0\xdc"
+ "\x00\xe0\x01\x7b",
+ .klen = 8 + 20 + 20,
+ .iv = "\x27\x77\x7f\x3f\x4a\x17\x86\xf0",
+ .assoc = "\x27\x77\x7f\x3f\x4a\x17\x86\xf0",
+ .alen = 8,
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23",
+ .plen = 36,
+ .ctext = "\xc1\xcf\x48\xa8\x9f\x2f\xfd\xd9"
+ "\xcf\x46\x52\xe9\xef\xdb\x72\xd7"
+ "\x45\x40\xa4\x2b\xde\x6d\x78\x36"
+ "\xd5\x9a\x5c\xea\xae\xf3\x10\x53"
+ "\x25\xb2\x07\x2f"
+ "\x2c\x86\xa0\x90\x8e\xc1\x02\x1d"
+ "\x51\xdc\xd6\x21\xc7\x30\xcc\x32"
+ "\x38\x55\x47\x64",
+ .clen = 36 + 20,
+ }, { /* RFC 3686 Case 4 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x1c" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00"
+ "\x16\xaf\x5b\x14\x5f\xc9\xf5\x79"
+ "\xc1\x75\xf9\x3e\x3b\xfb\x0e\xed"
+ "\x86\x3d\x06\xcc\xfd\xb7\x85\x15"
+ "\x00\x00\x00\x48",
+ .klen = 8 + 20 + 28,
+ .iv = "\x36\x73\x3c\x14\x7d\x6d\x93\xcb",
+ .assoc = "\x36\x73\x3c\x14\x7d\x6d\x93\xcb",
+ .alen = 8,
+ .ptext = "Single block msg",
+ .plen = 16,
+ .ctext = "\x4b\x55\x38\x4f\xe2\x59\xc9\xc8"
+ "\x4e\x79\x35\xa0\x03\xcb\xe9\x28"
+ "\xe9\x4e\x49\xf0\x6b\x8d\x58\x2b"
+ "\x26\x7f\xf3\xab\xeb\x2f\x74\x2f"
+ "\x45\x43\x64\xc1",
+ .clen = 16 + 20,
+ }, { /* RFC 3686 Case 5 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x1c" /* enc key length */
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33"
+ "\x7c\x5c\xb2\x40\x1b\x3d\xc3\x3c"
+ "\x19\xe7\x34\x08\x19\xe0\xf6\x9c"
+ "\x67\x8c\x3d\xb8\xe6\xf6\xa9\x1a"
+ "\x00\x96\xb0\x3b",
+ .klen = 8 + 20 + 28,
+ .iv = "\x02\x0c\x6e\xad\xc2\xcb\x50\x0d",
+ .assoc = "\x02\x0c\x6e\xad\xc2\xcb\x50\x0d",
+ .alen = 8,
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ .plen = 32,
+ .ctext = "\x45\x32\x43\xfc\x60\x9b\x23\x32"
+ "\x7e\xdf\xaa\xfa\x71\x31\xcd\x9f"
+ "\x84\x90\x70\x1c\x5a\xd4\xa7\x9c"
+ "\xfc\x1f\xe0\xff\x42\xf4\xfb\x00"
+ "\xab\xc4\xfa\x6d\x20\xe1\xce\x72"
+ "\x0e\x92\x4e\x97\xaa\x4d\x30\x84"
+ "\xb6\xd8\x4d\x3b",
+ .clen = 32 + 20,
+ }, { /* RFC 3686 Case 7 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x24" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00"
+ "\x77\x6b\xef\xf2\x85\x1d\xb0\x6f"
+ "\x4c\x8a\x05\x42\xc8\x69\x6f\x6c"
+ "\x6a\x81\xaf\x1e\xec\x96\xb4\xd3"
+ "\x7f\xc1\xd6\x89\xe6\xc1\xc1\x04"
+ "\x00\x00\x00\x60",
+ .klen = 8 + 20 + 36,
+ .iv = "\xdb\x56\x72\xc9\x7a\xa8\xf0\xb2",
+ .assoc = "\xdb\x56\x72\xc9\x7a\xa8\xf0\xb2",
+ .alen = 8,
+ .ptext = "Single block msg",
+ .plen = 16,
+ .ctext = "\x14\x5a\xd0\x1d\xbf\x82\x4e\xc7"
+ "\x56\x08\x63\xdc\x71\xe3\xe0\xc0"
+ "\x3d\x6c\x23\x27\xda\x0e\x7f\x29"
+ "\xfd\x8d\x3c\x1b\xf7\x7a\x63\xd9"
+ "\x7e\x0f\xe9\xf6",
+ .clen = 16 + 20,
+ }, { /* RFC 3686 Case 8 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x24" /* enc key length */
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33"
+ "\xf6\xd6\x6d\x6b\xd5\x2d\x59\xbb"
+ "\x07\x96\x36\x58\x79\xef\xf8\x86"
+ "\xc6\x6d\xd5\x1a\x5b\x6a\x99\x74"
+ "\x4b\x50\x59\x0c\x87\xa2\x38\x84"
+ "\x00\xfa\xac\x24",
+ .klen = 8 + 20 + 36,
+ .iv = "\xc1\x58\x5e\xf1\x5a\x43\xd8\x75",
+ .assoc = "\xc1\x58\x5e\xf1\x5a\x43\xd8\x75",
+ .alen = 8,
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ .plen = 32,
+ .ctext = "\xf0\x5e\x23\x1b\x38\x94\x61\x2c"
+ "\x49\xee\x00\x0b\x80\x4e\xb2\xa9"
+ "\xb8\x30\x6b\x50\x8f\x83\x9d\x6a"
+ "\x55\x30\x83\x1d\x93\x44\xaf\x1c"
+ "\xe7\xee\x22\xa4\xdd\xbf\x5d\x44"
+ "\x3b\x43\x1c\x69\x55\x11\xd5\xad"
+ "\x14\x5f\x44\xa6",
+ .clen = 32 + 20,
+ },
+};
+
static const struct aead_testvec hmac_sha1_ecb_cipher_null_tv_temp[] = {
{ /* Input data from RFC 2410 Case 1 */
#ifdef __LITTLE_ENDIAN
@@ -15095,6 +15571,241 @@ static const struct aead_testvec hmac_sha224_aes_cbc_tv_temp[] = {
},
};
+static const struct aead_testvec hmac_sha224_aes_ctr_rfc3686_tv_temp[] = {
+ { /* RFC 3686 Case 1 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x14" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00"
+ "\xae\x68\x52\xf8\x12\x10\x67\xcc"
+ "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
+ "\x00\x00\x00\x30",
+ .klen = 8 + 28 + 20,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .alen = 8,
+ .ptext = "Single block msg",
+ .plen = 16,
+ .ctext = "\xe4\x09\x5d\x4f\xb7\xa7\xb3\x79"
+ "\x2d\x61\x75\xa3\x26\x13\x11\xb8"
+ "\x36\xb4\x3b\x9c\x62\xed\xcf\x77"
+ "\xdc\x19\x27\x3f\x92\x80\x52\xce"
+ "\x8f\xad\x01\x0b\x79\xda\x04\x83"
+ "\xcb\x45\x1a\x52",
+ .clen = 16 + 28,
+ }, { /* RFC 3686 Case 2 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x14" /* enc key length */
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b"
+ "\x7e\x24\x06\x78\x17\xfa\xe0\xd7"
+ "\x43\xd6\xce\x1f\x32\x53\x91\x63"
+ "\x00\x6c\xb6\xdb",
+ .klen = 8 + 28 + 20,
+ .iv = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b",
+ .assoc = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b",
+ .alen = 8,
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ .plen = 32,
+ .ctext = "\x51\x04\xa1\x06\x16\x8a\x72\xd9"
+ "\x79\x0d\x41\xee\x8e\xda\xd3\x88"
+ "\xeb\x2e\x1e\xfc\x46\xda\x57\xc8"
+ "\xfc\xe6\x30\xdf\x91\x41\xbe\x28"
+ "\x7f\xe4\x8f\xa7\x06\x71\xe9\xe5"
+ "\x16\x79\xef\xf9\x7e\x5c\x93\x4d"
+ "\xa0\xf8\x3b\x3a\xaa\x1c\xc0\xd9"
+ "\x6b\x48\x49\x01",
+ .clen = 32 + 28,
+ }, { /* RFC 3686 Case 3 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x14" /* enc key length */
+ "\x11\x22\x33\x44\x55\x66\x77\x88"
+ "\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
+ "\x22\x33\x44\x55\x66\x77\x88\x99"
+ "\xaa\xbb\xcc\xdd"
+ "\x76\x91\xbe\x03\x5e\x50\x20\xa8"
+ "\xac\x6e\x61\x85\x29\xf9\xa0\xdc"
+ "\x00\xe0\x01\x7b",
+ .klen = 8 + 28 + 20,
+ .iv = "\x27\x77\x7f\x3f\x4a\x17\x86\xf0",
+ .assoc = "\x27\x77\x7f\x3f\x4a\x17\x86\xf0",
+ .alen = 8,
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23",
+ .plen = 36,
+ .ctext = "\xc1\xcf\x48\xa8\x9f\x2f\xfd\xd9"
+ "\xcf\x46\x52\xe9\xef\xdb\x72\xd7"
+ "\x45\x40\xa4\x2b\xde\x6d\x78\x36"
+ "\xd5\x9a\x5c\xea\xae\xf3\x10\x53"
+ "\x25\xb2\x07\x2f"
+ "\xb0\x19\x45\xee\xa7\x31\xd9\xd0"
+ "\x74\x6b\xb8\xb1\x67\x61\x2f\x8c"
+ "\x68\xde\xe3\xc9\x3b\x0c\x72\xda"
+ "\x48\xba\x1b\x51",
+ .clen = 36 + 28,
+ }, { /* RFC 3686 Case 4 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x1c" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00"
+ "\x16\xaf\x5b\x14\x5f\xc9\xf5\x79"
+ "\xc1\x75\xf9\x3e\x3b\xfb\x0e\xed"
+ "\x86\x3d\x06\xcc\xfd\xb7\x85\x15"
+ "\x00\x00\x00\x48",
+ .klen = 8 + 28 + 28,
+ .iv = "\x36\x73\x3c\x14\x7d\x6d\x93\xcb",
+ .assoc = "\x36\x73\x3c\x14\x7d\x6d\x93\xcb",
+ .alen = 8,
+ .ptext = "Single block msg",
+ .plen = 16,
+ .ctext = "\x4b\x55\x38\x4f\xe2\x59\xc9\xc8"
+ "\x4e\x79\x35\xa0\x03\xcb\xe9\x28"
+ "\xfd\xf5\x35\x26\x50\x3d\xdf\x80"
+ "\x6e\xbe\xba\x8d\x56\xf3\x03\xb7"
+ "\x27\xb8\x13\xe8\x72\x8f\xc9\x52"
+ "\x4a\xb7\xc3\x3a",
+ .clen = 16 + 28,
+ }, { /* RFC 3686 Case 5 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x1c" /* enc key length */
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b"
+ "\x7c\x5c\xb2\x40\x1b\x3d\xc3\x3c"
+ "\x19\xe7\x34\x08\x19\xe0\xf6\x9c"
+ "\x67\x8c\x3d\xb8\xe6\xf6\xa9\x1a"
+ "\x00\x96\xb0\x3b",
+ .klen = 8 + 28 + 28,
+ .iv = "\x02\x0c\x6e\xad\xc2\xcb\x50\x0d",
+ .assoc = "\x02\x0c\x6e\xad\xc2\xcb\x50\x0d",
+ .alen = 8,
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ .plen = 32,
+ .ctext = "\x45\x32\x43\xfc\x60\x9b\x23\x32"
+ "\x7e\xdf\xaa\xfa\x71\x31\xcd\x9f"
+ "\x84\x90\x70\x1c\x5a\xd4\xa7\x9c"
+ "\xfc\x1f\xe0\xff\x42\xf4\xfb\x00"
+ "\x72\x89\xa8\x04\xa5\xac\x8f\x29"
+ "\xe6\xb8\x58\xe8\xcf\x6a\x91\x89"
+ "\xd3\x66\x3b\xdc\xce\x43\x23\xb7"
+ "\x6a\xdd\x9d\xbd",
+ .clen = 32 + 28,
+ }, { /* RFC 3686 Case 7 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x24" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00"
+ "\x77\x6b\xef\xf2\x85\x1d\xb0\x6f"
+ "\x4c\x8a\x05\x42\xc8\x69\x6f\x6c"
+ "\x6a\x81\xaf\x1e\xec\x96\xb4\xd3"
+ "\x7f\xc1\xd6\x89\xe6\xc1\xc1\x04"
+ "\x00\x00\x00\x60",
+ .klen = 8 + 28 + 36,
+ .iv = "\xdb\x56\x72\xc9\x7a\xa8\xf0\xb2",
+ .assoc = "\xdb\x56\x72\xc9\x7a\xa8\xf0\xb2",
+ .alen = 8,
+ .ptext = "Single block msg",
+ .plen = 16,
+ .ctext = "\x14\x5a\xd0\x1d\xbf\x82\x4e\xc7"
+ "\x56\x08\x63\xdc\x71\xe3\xe0\xc0"
+ "\xfe\xdf\x6f\x62\x8a\x79\xb5\x34"
+ "\xd0\x6f\x32\xaf\x31\x50\x5b\x1f"
+ "\xe0\x6d\x0b\xbc\x02\x25\xee\x74"
+ "\x7a\xdf\x97\x3c",
+ .clen = 16 + 28,
+ }, { /* RFC 3686 Case 8 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x24" /* enc key length */
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b"
+ "\xf6\xd6\x6d\x6b\xd5\x2d\x59\xbb"
+ "\x07\x96\x36\x58\x79\xef\xf8\x86"
+ "\xc6\x6d\xd5\x1a\x5b\x6a\x99\x74"
+ "\x4b\x50\x59\x0c\x87\xa2\x38\x84"
+ "\x00\xfa\xac\x24",
+ .klen = 8 + 28 + 36,
+ .iv = "\xc1\x58\x5e\xf1\x5a\x43\xd8\x75",
+ .assoc = "\xc1\x58\x5e\xf1\x5a\x43\xd8\x75",
+ .alen = 8,
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ .plen = 32,
+ .ctext = "\xf0\x5e\x23\x1b\x38\x94\x61\x2c"
+ "\x49\xee\x00\x0b\x80\x4e\xb2\xa9"
+ "\xb8\x30\x6b\x50\x8f\x83\x9d\x6a"
+ "\x55\x30\x83\x1d\x93\x44\xaf\x1c"
+ "\x19\x1e\x9c\x2c\x6d\x4e\x21\xda"
+ "\x6c\x4d\x88\x90\xf8\x5f\xa5\x9d"
+ "\xb4\xd4\x40\xad\xfa\x67\x3f\x0e"
+ "\x11\x12\xd6\x10",
+ .clen = 32 + 28,
+ },
+};
+
static const struct aead_testvec hmac_sha256_aes_cbc_tv_temp[] = {
{ /* RFC 3602 Case 1 */
#ifdef __LITTLE_ENDIAN
@@ -15378,6 +16089,241 @@ static const struct aead_testvec hmac_sha256_aes_cbc_tv_temp[] = {
},
};
+static const struct aead_testvec hmac_sha256_aes_ctr_rfc3686_tv_temp[] = {
+ { /* RFC 3686 Case 1 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x14" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\xae\x68\x52\xf8\x12\x10\x67\xcc"
+ "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
+ "\x00\x00\x00\x30",
+ .klen = 8 + 32 + 20,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .alen = 8,
+ .ptext = "Single block msg",
+ .plen = 16,
+ .ctext = "\xe4\x09\x5d\x4f\xb7\xa7\xb3\x79"
+ "\x2d\x61\x75\xa3\x26\x13\x11\xb8"
+ "\x9b\xa2\x34\x62\xe5\xb3\xe8\x2d"
+ "\x6d\xdb\x93\x64\xa5\x08\x2e\x77"
+ "\x72\x1f\x21\x94\xc7\xbe\x14\xa6"
+ "\xcd\xea\x96\xa1\x29\x8f\x30\xc3",
+ .clen = 16 + 32,
+ }, { /* RFC 3686 Case 2 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x14" /* enc key length */
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x7e\x24\x06\x78\x17\xfa\xe0\xd7"
+ "\x43\xd6\xce\x1f\x32\x53\x91\x63"
+ "\x00\x6c\xb6\xdb",
+ .klen = 8 + 32 + 20,
+ .iv = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b",
+ .assoc = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b",
+ .alen = 8,
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ .plen = 32,
+ .ctext = "\x51\x04\xa1\x06\x16\x8a\x72\xd9"
+ "\x79\x0d\x41\xee\x8e\xda\xd3\x88"
+ "\xeb\x2e\x1e\xfc\x46\xda\x57\xc8"
+ "\xfc\xe6\x30\xdf\x91\x41\xbe\x28"
+ "\x22\xf7\x95\xa8\xbb\xcd\x19\xf4"
+ "\x58\x16\x54\x28\x2b\xf4\x52\xe7"
+ "\x5c\x6c\xe1\x44\x0b\xd5\x10\x6e"
+ "\xe1\xf7\x04\xc4\x2c\xab\x93\xdd",
+ .clen = 32 + 32,
+ }, { /* RFC 3686 Case 3 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x14" /* enc key length */
+ "\x11\x22\x33\x44\x55\x66\x77\x88"
+ "\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
+ "\x22\x33\x44\x55\x66\x77\x88\x99"
+ "\xaa\xbb\xcc\xdd\xee\xff\x11\x22"
+ "\x76\x91\xbe\x03\x5e\x50\x20\xa8"
+ "\xac\x6e\x61\x85\x29\xf9\xa0\xdc"
+ "\x00\xe0\x01\x7b",
+ .klen = 8 + 32 + 20,
+ .iv = "\x27\x77\x7f\x3f\x4a\x17\x86\xf0",
+ .assoc = "\x27\x77\x7f\x3f\x4a\x17\x86\xf0",
+ .alen = 8,
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23",
+ .plen = 36,
+ .ctext = "\xc1\xcf\x48\xa8\x9f\x2f\xfd\xd9"
+ "\xcf\x46\x52\xe9\xef\xdb\x72\xd7"
+ "\x45\x40\xa4\x2b\xde\x6d\x78\x36"
+ "\xd5\x9a\x5c\xea\xae\xf3\x10\x53"
+ "\x25\xb2\x07\x2f"
+ "\x1d\x05\x5f\x77\x3b\x4f\x5c\x21"
+ "\x29\xea\xf1\xa8\x71\x49\x7b\x0b"
+ "\x66\x0d\xff\x18\x81\x63\xfc\xc3"
+ "\x91\xb6\x38\xc8\xcd\x2d\x39\x83",
+ .clen = 36 + 32,
+ }, { /* RFC 3686 Case 4 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x1c" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x16\xaf\x5b\x14\x5f\xc9\xf5\x79"
+ "\xc1\x75\xf9\x3e\x3b\xfb\x0e\xed"
+ "\x86\x3d\x06\xcc\xfd\xb7\x85\x15"
+ "\x00\x00\x00\x48",
+ .klen = 8 + 32 + 28,
+ .iv = "\x36\x73\x3c\x14\x7d\x6d\x93\xcb",
+ .assoc = "\x36\x73\x3c\x14\x7d\x6d\x93\xcb",
+ .alen = 8,
+ .ptext = "Single block msg",
+ .plen = 16,
+ .ctext = "\x4b\x55\x38\x4f\xe2\x59\xc9\xc8"
+ "\x4e\x79\x35\xa0\x03\xcb\xe9\x28"
+ "\x8d\x03\x77\xb2\x1c\xc9\xe0\xac"
+ "\xde\x69\xbe\x8a\xef\x5b\x13\x74"
+ "\x1d\x39\xbc\xdc\x95\xa4\xbf\xc3"
+ "\xd5\xc6\xd1\xda\xda\x3b\xca\x78",
+ .clen = 16 + 32,
+ }, { /* RFC 3686 Case 5 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x1c" /* enc key length */
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x7c\x5c\xb2\x40\x1b\x3d\xc3\x3c"
+ "\x19\xe7\x34\x08\x19\xe0\xf6\x9c"
+ "\x67\x8c\x3d\xb8\xe6\xf6\xa9\x1a"
+ "\x00\x96\xb0\x3b",
+ .klen = 8 + 32 + 28,
+ .iv = "\x02\x0c\x6e\xad\xc2\xcb\x50\x0d",
+ .assoc = "\x02\x0c\x6e\xad\xc2\xcb\x50\x0d",
+ .alen = 8,
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ .plen = 32,
+ .ctext = "\x45\x32\x43\xfc\x60\x9b\x23\x32"
+ "\x7e\xdf\xaa\xfa\x71\x31\xcd\x9f"
+ "\x84\x90\x70\x1c\x5a\xd4\xa7\x9c"
+ "\xfc\x1f\xe0\xff\x42\xf4\xfb\x00"
+ "\x34\x06\x2b\x3d\xf1\xa8\x3d\xf1"
+ "\xa6\x5e\x5c\x1a\xdb\x0c\xb5\x1e"
+ "\x8f\xdb\xf4\xca\x7d\x09\x5e\x81"
+ "\xdb\x32\x07\x4a\x1d\x1c\x6d\x83",
+ .clen = 32 + 32,
+ }, { /* RFC 3686 Case 7 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x24" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x77\x6b\xef\xf2\x85\x1d\xb0\x6f"
+ "\x4c\x8a\x05\x42\xc8\x69\x6f\x6c"
+ "\x6a\x81\xaf\x1e\xec\x96\xb4\xd3"
+ "\x7f\xc1\xd6\x89\xe6\xc1\xc1\x04"
+ "\x00\x00\x00\x60",
+ .klen = 8 + 32 + 36,
+ .iv = "\xdb\x56\x72\xc9\x7a\xa8\xf0\xb2",
+ .assoc = "\xdb\x56\x72\xc9\x7a\xa8\xf0\xb2",
+ .alen = 8,
+ .ptext = "Single block msg",
+ .plen = 16,
+ .ctext = "\x14\x5a\xd0\x1d\xbf\x82\x4e\xc7"
+ "\x56\x08\x63\xdc\x71\xe3\xe0\xc0"
+ "\xc3\xb4\x5f\xb0\xbf\xf5\x1b\xff"
+ "\x7c\xf1\x79\x00\x63\x50\xdd\x77"
+ "\xc0\x4a\xba\xcd\xdc\x47\x05\x2a"
+ "\x5d\x85\x2d\x83\x44\xca\x79\x2c",
+ .clen = 16 + 32,
+ }, { /* RFC 3686 Case 8 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x24" /* enc key length */
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\xf6\xd6\x6d\x6b\xd5\x2d\x59\xbb"
+ "\x07\x96\x36\x58\x79\xef\xf8\x86"
+ "\xc6\x6d\xd5\x1a\x5b\x6a\x99\x74"
+ "\x4b\x50\x59\x0c\x87\xa2\x38\x84"
+ "\x00\xfa\xac\x24",
+ .klen = 8 + 32 + 36,
+ .iv = "\xc1\x58\x5e\xf1\x5a\x43\xd8\x75",
+ .assoc = "\xc1\x58\x5e\xf1\x5a\x43\xd8\x75",
+ .alen = 8,
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ .plen = 32,
+ .ctext = "\xf0\x5e\x23\x1b\x38\x94\x61\x2c"
+ "\x49\xee\x00\x0b\x80\x4e\xb2\xa9"
+ "\xb8\x30\x6b\x50\x8f\x83\x9d\x6a"
+ "\x55\x30\x83\x1d\x93\x44\xaf\x1c"
+ "\xc8\x59\x5d\xe1\xba\xac\x13\x82"
+ "\xfd\x21\x7c\x8c\x23\x31\x04\x02"
+ "\x9e\x69\x5b\x57\xa8\x13\xe7\x21"
+ "\x60\x0c\x24\xc2\x80\x4a\x93\x6e",
+ .clen = 32 + 32,
+ },
+};
+
static const struct aead_testvec hmac_sha384_aes_cbc_tv_temp[] = {
{ /* RFC 3602 Case 1 */
#ifdef __LITTLE_ENDIAN
@@ -15689,6 +16635,269 @@ static const struct aead_testvec hmac_sha384_aes_cbc_tv_temp[] = {
},
};
+static const struct aead_testvec hmac_sha384_aes_ctr_rfc3686_tv_temp[] = {
+ { /* RFC 3686 Case 1 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x14" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\xae\x68\x52\xf8\x12\x10\x67\xcc"
+ "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
+ "\x00\x00\x00\x30",
+ .klen = 8 + 48 + 20,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .alen = 8,
+ .ptext = "Single block msg",
+ .plen = 16,
+ .ctext = "\xe4\x09\x5d\x4f\xb7\xa7\xb3\x79"
+ "\x2d\x61\x75\xa3\x26\x13\x11\xb8"
+ "\x45\x51\x59\x72\x16\xd3\xc6\x15"
+ "\x25\x1e\xe8\x92\x2e\x47\x52\xcc"
+ "\x91\x9c\x24\xef\x11\xb2\x53\x00"
+ "\x10\x20\x43\x06\xe2\x35\x88\x9e"
+ "\x18\x32\x5a\x79\x7d\x73\x7e\x89"
+ "\xfe\xa1\xda\xa4\x86\xc4\x2a\x04",
+ .clen = 16 + 48,
+ }, { /* RFC 3686 Case 2 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x14" /* enc key length */
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x7e\x24\x06\x78\x17\xfa\xe0\xd7"
+ "\x43\xd6\xce\x1f\x32\x53\x91\x63"
+ "\x00\x6c\xb6\xdb",
+ .klen = 8 + 48 + 20,
+ .iv = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b",
+ .assoc = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b",
+ .alen = 8,
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ .plen = 32,
+ .ctext = "\x51\x04\xa1\x06\x16\x8a\x72\xd9"
+ "\x79\x0d\x41\xee\x8e\xda\xd3\x88"
+ "\xeb\x2e\x1e\xfc\x46\xda\x57\xc8"
+ "\xfc\xe6\x30\xdf\x91\x41\xbe\x28"
+ "\x83\x65\x32\x1e\x6b\x60\xe6\x4a"
+ "\xe2\xab\x52\x2b\xa6\x70\x3a\xfa"
+ "\xd2\xec\x83\xe4\x31\x0c\x28\x40"
+ "\x9b\x5e\x18\xa4\xdc\x48\xb8\x56"
+ "\x33\xab\x7f\x2b\xaf\xe4\x3a\xe3"
+ "\x8a\x61\xf6\x22\xb4\x6b\xfe\x7d",
+ .clen = 32 + 48,
+ }, { /* RFC 3686 Case 3 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x14" /* enc key length */
+ "\x11\x22\x33\x44\x55\x66\x77\x88"
+ "\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
+ "\x22\x33\x44\x55\x66\x77\x88\x99"
+ "\xaa\xbb\xcc\xdd\xee\xff\x11\x22"
+ "\x33\x44\x55\x66\x77\x88\x99\xaa"
+ "\xbb\xcc\xdd\xee\xff\x11\x22\x33"
+ "\x76\x91\xbe\x03\x5e\x50\x20\xa8"
+ "\xac\x6e\x61\x85\x29\xf9\xa0\xdc"
+ "\x00\xe0\x01\x7b",
+ .klen = 8 + 48 + 20,
+ .iv = "\x27\x77\x7f\x3f\x4a\x17\x86\xf0",
+ .assoc = "\x27\x77\x7f\x3f\x4a\x17\x86\xf0",
+ .alen = 8,
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23",
+ .plen = 36,
+ .ctext = "\xc1\xcf\x48\xa8\x9f\x2f\xfd\xd9"
+ "\xcf\x46\x52\xe9\xef\xdb\x72\xd7"
+ "\x45\x40\xa4\x2b\xde\x6d\x78\x36"
+ "\xd5\x9a\x5c\xea\xae\xf3\x10\x53"
+ "\x25\xb2\x07\x2f"
+ "\x4a\xaa\xad\x3b\x3b\xb6\x9a\xba"
+ "\xa1\x7b\xc6\xce\x96\xc3\xff\x67"
+ "\xf3\x0c\x33\x57\xf0\x51\x24\x08"
+ "\xed\x4f\x6a\x9c\x22\x42\xbd\x18"
+ "\x97\x74\x68\x36\x00\xf1\x69\x3a"
+ "\x18\x77\x40\xf0\x56\xba\xba\xe0",
+ .clen = 36 + 48,
+ }, { /* RFC 3686 Case 4 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x1c" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x16\xaf\x5b\x14\x5f\xc9\xf5\x79"
+ "\xc1\x75\xf9\x3e\x3b\xfb\x0e\xed"
+ "\x86\x3d\x06\xcc\xfd\xb7\x85\x15"
+ "\x00\x00\x00\x48",
+ .klen = 8 + 48 + 28,
+ .iv = "\x36\x73\x3c\x14\x7d\x6d\x93\xcb",
+ .assoc = "\x36\x73\x3c\x14\x7d\x6d\x93\xcb",
+ .alen = 8,
+ .ptext = "Single block msg",
+ .plen = 16,
+ .ctext = "\x4b\x55\x38\x4f\xe2\x59\xc9\xc8"
+ "\x4e\x79\x35\xa0\x03\xcb\xe9\x28"
+ "\x36\xd6\xc7\x55\xac\xb6\x0b\x14"
+ "\x95\x71\xf9\x86\x30\xe3\x96\xc3"
+ "\x76\x85\x6d\xa5\x06\xed\x6f\x34"
+ "\xcc\x1f\xcc\x2d\x88\x06\xb0\x1d"
+ "\xbe\xd9\xa2\xd3\x64\xf1\x33\x03"
+ "\x13\x50\x8f\xae\x61\x2d\x82\xb8",
+ .clen = 16 + 48,
+ }, { /* RFC 3686 Case 5 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x1c" /* enc key length */
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x7c\x5c\xb2\x40\x1b\x3d\xc3\x3c"
+ "\x19\xe7\x34\x08\x19\xe0\xf6\x9c"
+ "\x67\x8c\x3d\xb8\xe6\xf6\xa9\x1a"
+ "\x00\x96\xb0\x3b",
+ .klen = 8 + 48 + 28,
+ .iv = "\x02\x0c\x6e\xad\xc2\xcb\x50\x0d",
+ .assoc = "\x02\x0c\x6e\xad\xc2\xcb\x50\x0d",
+ .alen = 8,
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ .plen = 32,
+ .ctext = "\x45\x32\x43\xfc\x60\x9b\x23\x32"
+ "\x7e\xdf\xaa\xfa\x71\x31\xcd\x9f"
+ "\x84\x90\x70\x1c\x5a\xd4\xa7\x9c"
+ "\xfc\x1f\xe0\xff\x42\xf4\xfb\x00"
+ "\x80\x12\x67\x22\xf2\x4d\x9b\xbf"
+ "\xdc\x38\xd3\xaa\x12\xc0\x58\x1a"
+ "\x9a\x62\x6e\x42\x3d\x44\x63\xdd"
+ "\xee\x7e\xe3\xa3\xdf\x2a\x65\x05"
+ "\xd0\xc1\xd2\x54\x55\x35\x5c\xc7"
+ "\xb0\xb5\xb1\x36\xe0\x0b\xaf\x72",
+ .clen = 32 + 48,
+ }, { /* RFC 3686 Case 7 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x24" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x77\x6b\xef\xf2\x85\x1d\xb0\x6f"
+ "\x4c\x8a\x05\x42\xc8\x69\x6f\x6c"
+ "\x6a\x81\xaf\x1e\xec\x96\xb4\xd3"
+ "\x7f\xc1\xd6\x89\xe6\xc1\xc1\x04"
+ "\x00\x00\x00\x60",
+ .klen = 8 + 48 + 36,
+ .iv = "\xdb\x56\x72\xc9\x7a\xa8\xf0\xb2",
+ .assoc = "\xdb\x56\x72\xc9\x7a\xa8\xf0\xb2",
+ .alen = 8,
+ .ptext = "Single block msg",
+ .plen = 16,
+ .ctext = "\x14\x5a\xd0\x1d\xbf\x82\x4e\xc7"
+ "\x56\x08\x63\xdc\x71\xe3\xe0\xc0"
+ "\xb1\x7b\xb1\xec\xca\x94\x55\xc4"
+ "\x3f\x2b\xb1\x70\x04\x91\xf5\x9d"
+ "\x1a\xc0\xe1\x2a\x93\x5f\x96\x2a"
+ "\x12\x85\x38\x36\xe1\xb2\xe9\xf0"
+ "\xf2\x6e\x5d\x81\xcc\x49\x07\x9c"
+ "\x5b\x88\xc8\xcc\xc4\x21\x4f\x32",
+ .clen = 16 + 48,
+ }, { /* RFC 3686 Case 8 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x24" /* enc key length */
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\xf6\xd6\x6d\x6b\xd5\x2d\x59\xbb"
+ "\x07\x96\x36\x58\x79\xef\xf8\x86"
+ "\xc6\x6d\xd5\x1a\x5b\x6a\x99\x74"
+ "\x4b\x50\x59\x0c\x87\xa2\x38\x84"
+ "\x00\xfa\xac\x24",
+ .klen = 8 + 48 + 36,
+ .iv = "\xc1\x58\x5e\xf1\x5a\x43\xd8\x75",
+ .assoc = "\xc1\x58\x5e\xf1\x5a\x43\xd8\x75",
+ .alen = 8,
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ .plen = 32,
+ .ctext = "\xf0\x5e\x23\x1b\x38\x94\x61\x2c"
+ "\x49\xee\x00\x0b\x80\x4e\xb2\xa9"
+ "\xb8\x30\x6b\x50\x8f\x83\x9d\x6a"
+ "\x55\x30\x83\x1d\x93\x44\xaf\x1c"
+ "\xd6\x96\xbb\x12\x39\xc4\x4d\xe2"
+ "\x4c\x02\xe7\x1f\xdc\xb2\xb1\x57"
+ "\x38\x0d\xdd\x13\xb3\x89\x57\x9e"
+ "\x1f\xb5\x48\x32\xc4\xd3\x9d\x1f"
+ "\x68\xab\x8d\xc6\xa8\x05\x3a\xc2"
+ "\x87\xaf\x23\xb3\xe4\x1b\xde\xb3",
+ .clen = 32 + 48,
+ },
+};
+
static const struct aead_testvec hmac_sha512_aes_cbc_tv_temp[] = {
{ /* RFC 3602 Case 1 */
#ifdef __LITTLE_ENDIAN
@@ -16028,6 +17237,354 @@ static const struct aead_testvec hmac_sha512_aes_cbc_tv_temp[] = {
},
};
+static const struct aead_testvec hmac_md5_des_cbc_tv_temp[] = {
+ { /*Generated with cryptopp*/
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x08" /* enc key length */
+ "\x11\x22\x33\x44\x55\x66\x77\x88"
+ "\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
+ "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24",
+ .klen = 8 + 16 + 8,
+ .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+ .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01"
+ "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+ .alen = 16,
+ .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
+ "\x53\x20\x63\x65\x65\x72\x73\x74"
+ "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
+ "\x20\x79\x65\x53\x72\x63\x74\x65"
+ "\x20\x73\x6f\x54\x20\x6f\x61\x4d"
+ "\x79\x6e\x53\x20\x63\x65\x65\x72"
+ "\x73\x74\x54\x20\x6f\x6f\x4d\x20"
+ "\x6e\x61\x20\x79\x65\x53\x72\x63"
+ "\x74\x65\x20\x73\x6f\x54\x20\x6f"
+ "\x61\x4d\x79\x6e\x53\x20\x63\x65"
+ "\x65\x72\x73\x74\x54\x20\x6f\x6f"
+ "\x4d\x20\x6e\x61\x20\x79\x65\x53"
+ "\x72\x63\x74\x65\x20\x73\x6f\x54"
+ "\x20\x6f\x61\x4d\x79\x6e\x53\x20"
+ "\x63\x65\x65\x72\x73\x74\x54\x20"
+ "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79",
+ .plen = 128,
+ .ctext = "\x70\xd6\xde\x64\x87\x17\xf1\xe8"
+ "\x54\x31\x85\x37\xed\x6b\x01\x8d"
+ "\xe3\xcc\xe0\x1d\x5e\xf3\xfe\xf1"
+ "\x41\xaa\x33\x91\xa7\x7d\x99\x88"
+ "\x4d\x85\x6e\x2f\xa3\x69\xf5\x82"
+ "\x3a\x6f\x25\xcb\x7d\x58\x1f\x9b"
+ "\xaa\x9c\x11\xd5\x76\x67\xce\xde"
+ "\x56\xd7\x5a\x80\x69\xea\x3a\x02"
+ "\xf0\xc7\x7c\xe3\xcb\x40\xe5\x52"
+ "\xd1\x10\x92\x78\x0b\x8e\x5b\xf1"
+ "\xe3\x26\x1f\xe1\x15\x41\xc7\xba"
+ "\x99\xdb\x08\x51\x1c\xd3\x01\xf4"
+ "\x87\x47\x39\xb8\xd2\xdd\xbd\xfb"
+ "\x66\x13\xdf\x1c\x01\x44\xf0\x7a"
+ "\x1a\x6b\x13\xf5\xd5\x0b\xb8\xba"
+ "\x53\xba\xe1\x76\xe3\x82\x07\x86"
+ "\x95\x9e\x7d\x37\x1e\x60\xaf\x7c"
+ "\x53\x12\x61\x68\xef\xb4\x47\xa6",
+ .clen = 128 + 16,
+ },
+};
+
+static const struct aead_testvec hmac_sha512_aes_ctr_rfc3686_tv_temp[] = {
+ { /* RFC 3686 Case 1 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x14" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\xae\x68\x52\xf8\x12\x10\x67\xcc"
+ "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
+ "\x00\x00\x00\x30",
+ .klen = 8 + 64 + 20,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .alen = 8,
+ .ptext = "Single block msg",
+ .plen = 16,
+ .ctext = "\xe4\x09\x5d\x4f\xb7\xa7\xb3\x79"
+ "\x2d\x61\x75\xa3\x26\x13\x11\xb8"
+ "\xa4\x45\x3a\x44\x9c\xe5\x1c\xd9"
+ "\x10\x43\x51\x2e\x76\x5e\xf8\x9d"
+ "\x03\x12\x1a\x31\x00\x33\x10\xb4"
+ "\x94\x4b\x70\x84\x6c\xda\xb1\x46"
+ "\x24\xb6\x3b\x2a\xec\xd5\x67\xb8"
+ "\x65\xa2\xbd\xac\x18\xe2\xf8\x55"
+ "\xc6\x91\xb0\x92\x84\x2d\x74\x44"
+ "\xa7\xee\xc3\x44\xa0\x07\x0e\x62",
+ .clen = 16 + 64,
+ }, { /* RFC 3686 Case 2 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x14" /* enc key length */
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x7e\x24\x06\x78\x17\xfa\xe0\xd7"
+ "\x43\xd6\xce\x1f\x32\x53\x91\x63"
+ "\x00\x6c\xb6\xdb",
+ .klen = 8 + 64 + 20,
+ .iv = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b",
+ .assoc = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b",
+ .alen = 8,
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ .plen = 32,
+ .ctext = "\x51\x04\xa1\x06\x16\x8a\x72\xd9"
+ "\x79\x0d\x41\xee\x8e\xda\xd3\x88"
+ "\xeb\x2e\x1e\xfc\x46\xda\x57\xc8"
+ "\xfc\xe6\x30\xdf\x91\x41\xbe\x28"
+ "\xec\x67\x0d\xb3\xbd\x98\x13\x01"
+ "\x2b\x04\x9b\xe6\x06\x67\x3c\x76"
+ "\xcd\x41\xb7\xcc\x70\x6c\x7f\xc8"
+ "\x67\xbd\x22\x39\xb2\xaa\xe8\x88"
+ "\xe0\x4f\x81\x52\xdf\xc9\xc3\xd6"
+ "\x44\xf4\x66\x33\x87\x64\x61\x02"
+ "\x02\xa2\x64\x15\x2b\xe9\x0b\x3d"
+ "\x4c\xea\xa1\xa5\xa7\xc9\xd3\x1b",
+ .clen = 32 + 64,
+ }, { /* RFC 3686 Case 3 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x14" /* enc key length */
+ "\x11\x22\x33\x44\x55\x66\x77\x88"
+ "\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
+ "\x22\x33\x44\x55\x66\x77\x88\x99"
+ "\xaa\xbb\xcc\xdd\xee\xff\x11\x22"
+ "\x33\x44\x55\x66\x77\x88\x99\xaa"
+ "\xbb\xcc\xdd\xee\xff\x11\x22\x33"
+ "\x44\x55\x66\x77\x88\x99\xaa\xbb"
+ "\xcc\xdd\xee\xff\x11\x22\x33\x44"
+ "\x76\x91\xbe\x03\x5e\x50\x20\xa8"
+ "\xac\x6e\x61\x85\x29\xf9\xa0\xdc"
+ "\x00\xe0\x01\x7b",
+ .klen = 8 + 64 + 20,
+ .iv = "\x27\x77\x7f\x3f\x4a\x17\x86\xf0",
+ .assoc = "\x27\x77\x7f\x3f\x4a\x17\x86\xf0",
+ .alen = 8,
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23",
+ .plen = 36,
+ .ctext = "\xc1\xcf\x48\xa8\x9f\x2f\xfd\xd9"
+ "\xcf\x46\x52\xe9\xef\xdb\x72\xd7"
+ "\x45\x40\xa4\x2b\xde\x6d\x78\x36"
+ "\xd5\x9a\x5c\xea\xae\xf3\x10\x53"
+ "\x25\xb2\x07\x2f"
+ "\x6f\x90\xb6\xa3\x35\x43\x59\xff"
+ "\x1e\x32\xd6\xfe\xfa\x33\xf9\xf0"
+ "\x31\x2f\x03\x2d\x88\x1d\xab\xbf"
+ "\x0e\x19\x16\xd9\xf3\x98\x3e\xdd"
+ "\x0c\xec\xfe\xe8\x89\x13\x91\x15"
+ "\xf6\x61\x65\x5c\x1b\x7d\xde\xc0"
+ "\xe4\xba\x6d\x27\xe2\x89\x23\x24"
+ "\x15\x82\x37\x3d\x48\xd3\xc9\x32",
+ .clen = 36 + 64,
+ }, { /* RFC 3686 Case 4 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x1c" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x16\xaf\x5b\x14\x5f\xc9\xf5\x79"
+ "\xc1\x75\xf9\x3e\x3b\xfb\x0e\xed"
+ "\x86\x3d\x06\xcc\xfd\xb7\x85\x15"
+ "\x00\x00\x00\x48",
+ .klen = 8 + 64 + 28,
+ .iv = "\x36\x73\x3c\x14\x7d\x6d\x93\xcb",
+ .assoc = "\x36\x73\x3c\x14\x7d\x6d\x93\xcb",
+ .alen = 8,
+ .ptext = "Single block msg",
+ .plen = 16,
+ .ctext = "\x4b\x55\x38\x4f\xe2\x59\xc9\xc8"
+ "\x4e\x79\x35\xa0\x03\xcb\xe9\x28"
+ "\x25\xea\xdc\xad\x52\xb8\x0f\x70"
+ "\xe7\x39\x83\x80\x10\x3f\x18\xc4"
+ "\xf8\x59\x14\x25\x5f\xba\x20\x87"
+ "\x0b\x04\x5e\xf7\xde\x41\x39\xff"
+ "\xa2\xee\x84\x3f\x9d\x38\xfd\x17"
+ "\xc0\x66\x5e\x74\x39\xe3\xd3\xd7"
+ "\x3d\xbc\xe3\x99\x2f\xe7\xef\x37"
+ "\x61\x03\xf3\x9e\x01\xaf\xba\x9d",
+ .clen = 16 + 64,
+ }, { /* RFC 3686 Case 5 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x1c" /* enc key length */
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\x7c\x5c\xb2\x40\x1b\x3d\xc3\x3c"
+ "\x19\xe7\x34\x08\x19\xe0\xf6\x9c"
+ "\x67\x8c\x3d\xb8\xe6\xf6\xa9\x1a"
+ "\x00\x96\xb0\x3b",
+ .klen = 8 + 64 + 28,
+ .iv = "\x02\x0c\x6e\xad\xc2\xcb\x50\x0d",
+ .assoc = "\x02\x0c\x6e\xad\xc2\xcb\x50\x0d",
+ .alen = 8,
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ .plen = 32,
+ .ctext = "\x45\x32\x43\xfc\x60\x9b\x23\x32"
+ "\x7e\xdf\xaa\xfa\x71\x31\xcd\x9f"
+ "\x84\x90\x70\x1c\x5a\xd4\xa7\x9c"
+ "\xfc\x1f\xe0\xff\x42\xf4\xfb\x00"
+ "\x51\xa3\xe6\x1d\x23\x7d\xd1\x18"
+ "\x55\x9c\x1c\x92\x2b\xc2\xcd\xfe"
+ "\x8a\xa8\xa5\x96\x65\x2e\x9d\xdb"
+ "\x06\xd2\x1c\x57\x2b\x76\xb5\x9c"
+ "\xd4\x3e\x8b\x61\x54\x2d\x08\xe5"
+ "\xb2\xf8\x88\x20\x0c\xad\xe8\x85"
+ "\x61\x8e\x5c\xa4\x96\x2c\xe2\x7d"
+ "\x4f\xb6\x1d\xb2\x8c\xd7\xe3\x38",
+ .clen = 32 + 64,
+ }, { /* RFC 3686 Case 7 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x24" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x77\x6b\xef\xf2\x85\x1d\xb0\x6f"
+ "\x4c\x8a\x05\x42\xc8\x69\x6f\x6c"
+ "\x6a\x81\xaf\x1e\xec\x96\xb4\xd3"
+ "\x7f\xc1\xd6\x89\xe6\xc1\xc1\x04"
+ "\x00\x00\x00\x60",
+ .klen = 8 + 64 + 36,
+ .iv = "\xdb\x56\x72\xc9\x7a\xa8\xf0\xb2",
+ .assoc = "\xdb\x56\x72\xc9\x7a\xa8\xf0\xb2",
+ .alen = 8,
+ .ptext = "Single block msg",
+ .plen = 16,
+ .ctext = "\x14\x5a\xd0\x1d\xbf\x82\x4e\xc7"
+ "\x56\x08\x63\xdc\x71\xe3\xe0\xc0"
+ "\x6b\x68\x0b\x99\x9a\x4d\xc8\xb9"
+ "\x35\xea\xcd\x56\x3f\x40\xa2\xb6"
+ "\x68\xda\x59\xd8\xa0\x89\xcd\x52"
+ "\xb1\x6e\xed\xc1\x42\x10\xa5\x0f"
+ "\x88\x0b\x80\xce\xc4\x67\xf0\x45"
+ "\x5d\xb2\x9e\xde\x1c\x79\x52\x0d"
+ "\xff\x75\x36\xd5\x0f\x52\x8e\xe5"
+ "\x31\x85\xcf\x1d\x31\xf8\x62\x67",
+ .clen = 16 + 64,
+ }, { /* RFC 3686 Case 8 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x24" /* enc key length */
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
+ "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x57"
+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
+ "\xf6\xd6\x6d\x6b\xd5\x2d\x59\xbb"
+ "\x07\x96\x36\x58\x79\xef\xf8\x86"
+ "\xc6\x6d\xd5\x1a\x5b\x6a\x99\x74"
+ "\x4b\x50\x59\x0c\x87\xa2\x38\x84"
+ "\x00\xfa\xac\x24",
+ .klen = 8 + 64 + 36,
+ .iv = "\xc1\x58\x5e\xf1\x5a\x43\xd8\x75",
+ .assoc = "\xc1\x58\x5e\xf1\x5a\x43\xd8\x75",
+ .alen = 8,
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ .plen = 32,
+ .ctext = "\xf0\x5e\x23\x1b\x38\x94\x61\x2c"
+ "\x49\xee\x00\x0b\x80\x4e\xb2\xa9"
+ "\xb8\x30\x6b\x50\x8f\x83\x9d\x6a"
+ "\x55\x30\x83\x1d\x93\x44\xaf\x1c"
+ "\x9a\xac\x38\xbd\xf3\xcf\xd5\xd0"
+ "\x09\x07\xa6\xe1\x7f\xd6\x79\x98"
+ "\x4e\x90\x0e\xc0\x3d\xa0\xf2\x12"
+ "\x52\x79\x9c\x17\xff\xb9\xb8\xe3"
+ "\x2f\x31\xcb\xbd\x63\x70\x72\x7b"
+ "\x4e\x1e\xd1\xde\xb5\x6b\x7d\x54"
+ "\x68\x56\xdd\xe5\x53\xee\x29\xd2"
+ "\x85\xa1\x73\x61\x00\xa9\x26\x8f",
+ .clen = 32 + 64,
+ },
+};
+
static const struct aead_testvec hmac_sha1_des_cbc_tv_temp[] = {
{ /*Generated with cryptopp*/
#ifdef __LITTLE_ENDIAN
@@ -16341,6 +17898,213 @@ static const struct aead_testvec hmac_sha512_des_cbc_tv_temp[] = {
},
};
+static const struct aead_testvec hmac_md5_aes_ctr_rfc3686_tv_temp[] = {
+ { /* RFC 3686 Case 1 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x14" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\xae\x68\x52\xf8\x12\x10\x67\xcc"
+ "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
+ "\x00\x00\x00\x30",
+ .klen = 8 + 16 + 20,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .alen = 8,
+ .ptext = "Single block msg",
+ .plen = 16,
+ .ctext = "\xe4\x09\x5d\x4f\xb7\xa7\xb3\x79"
+ "\x2d\x61\x75\xa3\x26\x13\x11\xb8"
+ "\xdd\x5f\xea\x13\x2a\xf2\xb0\xf1"
+ "\x91\x79\x46\x40\x62\x6c\x87\x5b",
+ .clen = 16 + 16,
+ }, { /* RFC 3686 Case 2 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x14" /* enc key length */
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x7e\x24\x06\x78\x17\xfa\xe0\xd7"
+ "\x43\xd6\xce\x1f\x32\x53\x91\x63"
+ "\x00\x6c\xb6\xdb",
+ .klen = 8 + 16 + 20,
+ .iv = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b",
+ .assoc = "\xc0\x54\x3b\x59\xda\x48\xd9\x0b",
+ .alen = 8,
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ .plen = 32,
+ .ctext = "\x51\x04\xa1\x06\x16\x8a\x72\xd9"
+ "\x79\x0d\x41\xee\x8e\xda\xd3\x88"
+ "\xeb\x2e\x1e\xfc\x46\xda\x57\xc8"
+ "\xfc\xe6\x30\xdf\x91\x41\xbe\x28"
+ "\x03\x39\x23\xcd\x22\x5f\x1b\x8b"
+ "\x93\x70\xbc\x45\xf3\xba\xde\x2e",
+ .clen = 32 + 16,
+ }, { /* RFC 3686 Case 3 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x14" /* enc key length */
+ "\x11\x22\x33\x44\x55\x66\x77\x88"
+ "\x99\xaa\xbb\xcc\xdd\xee\xff\x11"
+ "\x76\x91\xbe\x03\x5e\x50\x20\xa8"
+ "\xac\x6e\x61\x85\x29\xf9\xa0\xdc"
+ "\x00\xe0\x01\x7b",
+ .klen = 8 + 16 + 20,
+ .iv = "\x27\x77\x7f\x3f\x4a\x17\x86\xf0",
+ .assoc = "\x27\x77\x7f\x3f\x4a\x17\x86\xf0",
+ .alen = 8,
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+ "\x20\x21\x22\x23",
+ .plen = 36,
+ .ctext = "\xc1\xcf\x48\xa8\x9f\x2f\xfd\xd9"
+ "\xcf\x46\x52\xe9\xef\xdb\x72\xd7"
+ "\x45\x40\xa4\x2b\xde\x6d\x78\x36"
+ "\xd5\x9a\x5c\xea\xae\xf3\x10\x53"
+ "\x25\xb2\x07\x2f"
+ "\xb4\x40\x0c\x7b\x4c\x55\x8a\x4b"
+ "\x04\xf7\x48\x9e\x0f\x9a\xae\x73",
+ .clen = 36 + 16,
+ }, { /* RFC 3686 Case 4 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x1c" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x16\xaf\x5b\x14\x5f\xc9\xf5\x79"
+ "\xc1\x75\xf9\x3e\x3b\xfb\x0e\xed"
+ "\x86\x3d\x06\xcc\xfd\xb7\x85\x15"
+ "\x00\x00\x00\x48",
+ .klen = 8 + 16 + 28,
+ .iv = "\x36\x73\x3c\x14\x7d\x6d\x93\xcb",
+ .assoc = "\x36\x73\x3c\x14\x7d\x6d\x93\xcb",
+ .alen = 8,
+ .ptext = "Single block msg",
+ .plen = 16,
+ .ctext = "\x4b\x55\x38\x4f\xe2\x59\xc9\xc8"
+ "\x4e\x79\x35\xa0\x03\xcb\xe9\x28"
+ "\xc4\x5d\xa1\x16\x6c\x2d\xa5\x43"
+ "\x60\x7b\x58\x98\x11\x9b\x50\x06",
+ .clen = 16 + 16,
+ }, { /* RFC 3686 Case 5 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x1c" /* enc key length */
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\x7c\x5c\xb2\x40\x1b\x3d\xc3\x3c"
+ "\x19\xe7\x34\x08\x19\xe0\xf6\x9c"
+ "\x67\x8c\x3d\xb8\xe6\xf6\xa9\x1a"
+ "\x00\x96\xb0\x3b",
+ .klen = 8 + 16 + 28,
+ .iv = "\x02\x0c\x6e\xad\xc2\xcb\x50\x0d",
+ .assoc = "\x02\x0c\x6e\xad\xc2\xcb\x50\x0d",
+ .alen = 8,
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ .plen = 32,
+ .ctext = "\x45\x32\x43\xfc\x60\x9b\x23\x32"
+ "\x7e\xdf\xaa\xfa\x71\x31\xcd\x9f"
+ "\x84\x90\x70\x1c\x5a\xd4\xa7\x9c"
+ "\xfc\x1f\xe0\xff\x42\xf4\xfb\x00"
+ "\xc5\xec\x47\x33\xae\x05\x28\x49"
+ "\xd5\x2b\x08\xad\x10\x98\x24\x01",
+ .clen = 32 + 16,
+ }, { /* RFC 3686 Case 7 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x24" /* enc key length */
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x77\x6b\xef\xf2\x85\x1d\xb0\x6f"
+ "\x4c\x8a\x05\x42\xc8\x69\x6f\x6c"
+ "\x6a\x81\xaf\x1e\xec\x96\xb4\xd3"
+ "\x7f\xc1\xd6\x89\xe6\xc1\xc1\x04"
+ "\x00\x00\x00\x60",
+ .klen = 8 + 16 + 36,
+ .iv = "\xdb\x56\x72\xc9\x7a\xa8\xf0\xb2",
+ .assoc = "\xdb\x56\x72\xc9\x7a\xa8\xf0\xb2",
+ .alen = 8,
+ .ptext = "Single block msg",
+ .plen = 16,
+ .ctext = "\x14\x5a\xd0\x1d\xbf\x82\x4e\xc7"
+ "\x56\x08\x63\xdc\x71\xe3\xe0\xc0"
+ "\xc6\x26\xb2\x27\x0d\x21\xd4\x40"
+ "\x6c\x4f\x53\xea\x19\x75\xda\x8e",
+ .clen = 16 + 16,
+ }, { /* RFC 3686 Case 8 */
+#ifdef __LITTLE_ENDIAN
+ .key = "\x08\x00" /* rta length */
+ "\x01\x00" /* rta type */
+#else
+ .key = "\x00\x08" /* rta length */
+ "\x00\x01" /* rta type */
+#endif
+ "\x00\x00\x00\x24" /* enc key length */
+ "\x20\x21\x22\x23\x24\x25\x26\x27"
+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
+ "\xf6\xd6\x6d\x6b\xd5\x2d\x59\xbb"
+ "\x07\x96\x36\x58\x79\xef\xf8\x86"
+ "\xc6\x6d\xd5\x1a\x5b\x6a\x99\x74"
+ "\x4b\x50\x59\x0c\x87\xa2\x38\x84"
+ "\x00\xfa\xac\x24",
+ .klen = 8 + 16 + 36,
+ .iv = "\xc1\x58\x5e\xf1\x5a\x43\xd8\x75",
+ .assoc = "\xc1\x58\x5e\xf1\x5a\x43\xd8\x75",
+ .alen = 8,
+ .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ .plen = 32,
+ .ctext = "\xf0\x5e\x23\x1b\x38\x94\x61\x2c"
+ "\x49\xee\x00\x0b\x80\x4e\xb2\xa9"
+ "\xb8\x30\x6b\x50\x8f\x83\x9d\x6a"
+ "\x55\x30\x83\x1d\x93\x44\xaf\x1c"
+ "\x8c\x4d\x2a\x8d\x23\x47\x59\x6f"
+ "\x1e\x74\x62\x39\xed\x14\x50\x6c",
+ .clen = 32 + 16,
+ },
+};
+
static const struct aead_testvec hmac_md5_des3_ede_cbc_tv_temp[] = {
{ /*Generated with cryptopp*/
#ifdef __LITTLE_ENDIAN
diff --git a/crypto/xts.c b/crypto/xts.c
index 3da8f5e053d6..ad97c8091582 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -76,7 +76,7 @@ static int xts_setkey(struct crypto_skcipher *parent, const u8 *key,
/*
* We compute the tweak masks twice (both before and after the ECB encryption or
* decryption) to avoid having to allocate a temporary buffer and/or make
- * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
+ * multiple calls to the 'ecb(..)' instance, which usually would be slower than
* just doing the gf128mul_x_ble() calls again.
*/
static int xts_xor_tweak(struct skcipher_request *req, bool second_pass,
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 9960100e6066..d23b58b81ca3 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -301,6 +301,7 @@ config CRYPTO_DEV_PPC4XX
select CRYPTO_CCM
select CRYPTO_CTR
select CRYPTO_GCM
+ select CRYPTO_RNG
select CRYPTO_SKCIPHER
help
This option allows you to have support for AMCC crypto acceleration.
@@ -490,7 +491,7 @@ config CRYPTO_DEV_ATMEL_ECC
select CRYPTO_ECDH
select CRC16
help
- Microhip / Atmel ECC hw accelerator.
+ Microchip / Atmel ECC hw accelerator.
Select this if you want to use the Microchip / Atmel module for
ECDH algorithm.
@@ -504,7 +505,7 @@ config CRYPTO_DEV_ATMEL_SHA204A
select HW_RANDOM
select CRC16
help
- Microhip / Atmel SHA accelerator and RNG.
+ Microchip / Atmel SHA accelerator and RNG.
Select this if you want to use the Microchip / Atmel SHA204A
module as a random number generator. (Other functions of the
chip are currently not exposed by this driver)
@@ -667,14 +668,6 @@ config CRYPTO_DEV_QCOM_RNG
To compile this driver as a module, choose M here. The
module will be called qcom-rng. If unsure, say N.
-#config CRYPTO_DEV_VMX
-# bool "Support for VMX cryptographic acceleration instructions"
-# depends on PPC64 && VSX
-# help
-# Support for VMX cryptographic acceleration instructions.
-#
-#source "drivers/crypto/vmx/Kconfig"
-
config CRYPTO_DEV_IMGTEC_HASH
tristate "Imagination Technologies hardware hash accelerator"
depends on MIPS || COMPILE_TEST
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 322ae8854e3e..283bbc650b5b 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -38,7 +38,6 @@ obj-y += stm32/
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_TEGRA) += tegra/
obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
-#obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
obj-y += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
diff --git a/drivers/crypto/allwinner/Kconfig b/drivers/crypto/allwinner/Kconfig
index b8e75210a0e3..7270e5fbc573 100644
--- a/drivers/crypto/allwinner/Kconfig
+++ b/drivers/crypto/allwinner/Kconfig
@@ -14,6 +14,7 @@ config CRYPTO_DEV_SUN4I_SS
select CRYPTO_SHA1
select CRYPTO_AES
select CRYPTO_LIB_DES
+ select CRYPTO_RNG
select CRYPTO_SKCIPHER
help
Some Allwinner SoC have a crypto accelerator named
@@ -49,6 +50,7 @@ config CRYPTO_DEV_SUN8I_CE
select CRYPTO_CBC
select CRYPTO_AES
select CRYPTO_DES
+ select CRYPTO_RNG
depends on CRYPTO_DEV_ALLWINNER
depends on PM
help
diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c
index f8f37c9d5f3c..6f0d03cfbefc 100644
--- a/drivers/crypto/aspeed/aspeed-hace-hash.c
+++ b/drivers/crypto/aspeed/aspeed-hace-hash.c
@@ -182,8 +182,7 @@ static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev)
final = true;
} else
length -= remain;
- scatterwalk_map_and_copy(hash_engine->ahash_src_addr, rctx->src_sg,
- rctx->offset, length, 0);
+ memcpy_from_sglist(hash_engine->ahash_src_addr, rctx->src_sg, rctx->offset, length);
aspeed_ahash_update_counter(rctx, length);
if (final)
length += aspeed_ahash_fill_padding(
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index bc0c40f10944..b393689400b4 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -2131,7 +2131,7 @@ static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
{
- free_page((unsigned long)dd->buf);
+ free_pages((unsigned long)dd->buf, ATMEL_AES_BUFFER_ORDER);
}
static int atmel_aes_dma_init(struct atmel_aes_dev *dd)
@@ -2270,10 +2270,12 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
/* i = ARRAY_SIZE(aes_authenc_algs); */
err_aes_authenc_alg:
crypto_unregister_aeads(aes_authenc_algs, i);
- crypto_unregister_skcipher(&aes_xts_alg);
+ if (dd->caps.has_xts)
+ crypto_unregister_skcipher(&aes_xts_alg);
#endif
err_aes_xts_alg:
- crypto_unregister_aead(&aes_gcm_alg);
+ if (dd->caps.has_gcm)
+ crypto_unregister_aead(&aes_gcm_alg);
err_aes_gcm_alg:
i = ARRAY_SIZE(aes_algs);
err_aes_algs:
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index b6a77c8d439c..9c380351d2f9 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -261,6 +261,7 @@ static int atmel_ecdh_init_tfm(struct crypto_kpp *tfm)
if (IS_ERR(fallback)) {
dev_err(&ctx->client->dev, "Failed to allocate transformation for '%s': %ld\n",
alg, PTR_ERR(fallback));
+ atmel_ecc_i2c_client_free(ctx->client);
return PTR_ERR(fallback);
}
diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c
index da3cd986b1eb..0e275dbdc8c5 100644
--- a/drivers/crypto/atmel-i2c.c
+++ b/drivers/crypto/atmel-i2c.c
@@ -72,8 +72,8 @@ EXPORT_SYMBOL(atmel_i2c_init_read_config_cmd);
int atmel_i2c_init_read_otp_cmd(struct atmel_i2c_cmd *cmd, u16 addr)
{
- if (addr < 0 || addr > OTP_ZONE_SIZE)
- return -1;
+ if (addr >= OTP_ZONE_SIZE / 4)
+ return -EINVAL;
cmd->word_addr = COMMAND;
cmd->opcode = OPCODE_READ;
@@ -370,7 +370,7 @@ int atmel_i2c_probe(struct i2c_client *client)
}
}
- if (bus_clk_rate > 1000000L) {
+ if (bus_clk_rate > I2C_MAX_FAST_MODE_PLUS_FREQ) {
dev_err(dev, "%u exceeds maximum supported clock frequency (1MHz)\n",
bus_clk_rate);
return -EINVAL;
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 1f1341a16c42..002b62902553 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -404,20 +404,13 @@ static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
static struct atmel_sha_dev *atmel_sha_find_dev(struct atmel_sha_ctx *tctx)
{
- struct atmel_sha_dev *dd = NULL;
- struct atmel_sha_dev *tmp;
+ struct atmel_sha_dev *dd;
spin_lock_bh(&atmel_sha.lock);
- if (!tctx->dd) {
- list_for_each_entry(tmp, &atmel_sha.dev_list, list) {
- dd = tmp;
- break;
- }
- tctx->dd = dd;
- } else {
- dd = tctx->dd;
- }
-
+ if (!tctx->dd)
+ tctx->dd = list_first_entry_or_null(&atmel_sha.dev_list,
+ struct atmel_sha_dev, list);
+ dd = tctx->dd;
spin_unlock_bh(&atmel_sha.lock);
return dd;
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
index 98d1023007e3..dbb39ed0cea1 100644
--- a/drivers/crypto/atmel-sha204a.c
+++ b/drivers/crypto/atmel-sha204a.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <linux/sysfs.h>
#include <linux/workqueue.h>
#include "atmel-i2c.h"
@@ -95,19 +96,24 @@ static int atmel_sha204a_rng_read(struct hwrng *rng, void *data, size_t max,
static int atmel_sha204a_otp_read(struct i2c_client *client, u16 addr, u8 *otp)
{
struct atmel_i2c_cmd cmd;
- int ret = -1;
+ int ret;
- if (atmel_i2c_init_read_otp_cmd(&cmd, addr) < 0) {
+ ret = atmel_i2c_init_read_otp_cmd(&cmd, addr);
+ if (ret < 0) {
dev_err(&client->dev, "failed, invalid otp address %04X\n",
addr);
return ret;
}
ret = atmel_i2c_send_receive(client, &cmd);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to read otp at %04X\n", addr);
+ return ret;
+ }
if (cmd.data[0] == 0xff) {
dev_err(&client->dev, "failed, device not ready\n");
- return -EINVAL;
+ return -EIO;
}
memcpy(otp, cmd.data+1, 4);
@@ -120,21 +126,22 @@ static ssize_t otp_show(struct device *dev,
{
u16 addr;
u8 otp[OTP_ZONE_SIZE];
- char *str = buf;
struct i2c_client *client = to_i2c_client(dev);
- int i;
+ ssize_t len = 0;
+ int i, ret;
- for (addr = 0; addr < OTP_ZONE_SIZE/4; addr++) {
- if (atmel_sha204a_otp_read(client, addr, otp + addr * 4) < 0) {
+ for (addr = 0; addr < OTP_ZONE_SIZE / 4; addr++) {
+ ret = atmel_sha204a_otp_read(client, addr, otp + addr * 4);
+ if (ret < 0) {
dev_err(dev, "failed to read otp zone\n");
- break;
+ return ret;
}
}
- for (i = 0; i < addr*2; i++)
- str += sprintf(str, "%02X", otp[i]);
- str += sprintf(str, "\n");
- return str - buf;
+ for (i = 0; i < OTP_ZONE_SIZE; i++)
+ len += sysfs_emit_at(buf, len, "%02X", otp[i]);
+ len += sysfs_emit_at(buf, len, "\n");
+ return len;
}
static DEVICE_ATTR_RO(otp);
@@ -174,10 +181,6 @@ static int atmel_sha204a_probe(struct i2c_client *client)
if (ret)
dev_warn(&client->dev, "failed to register RNG (%d)\n", ret);
- /* otp read out */
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
- return -ENODEV;
-
ret = sysfs_create_group(&client->dev.kobj, &atmel_sha204a_groups);
if (ret) {
dev_err(&client->dev, "failed to register sysfs entry\n");
@@ -191,10 +194,8 @@ static void atmel_sha204a_remove(struct i2c_client *client)
{
struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client);
- if (atomic_read(&i2c_priv->tfm_count)) {
- dev_emerg(&client->dev, "Device is busy, will remove it anyhow\n");
- return;
- }
+ devm_hwrng_unregister(&client->dev, &i2c_priv->hwrng);
+ atmel_i2c_flush_queue();
sysfs_remove_group(&client->dev.kobj, &atmel_sha204a_groups);
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 278c0df3c92f..643e507f9c02 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -294,8 +294,8 @@ static int atmel_tdes_crypt_pdc_stop(struct atmel_tdes_dev *dd)
dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
} else {
- dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
- dd->dma_size, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
+ dd->dma_size, DMA_FROM_DEVICE);
/* copy data */
count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
@@ -619,8 +619,8 @@ static int atmel_tdes_crypt_dma_stop(struct atmel_tdes_dev *dd)
dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
} else {
- dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
- dd->dma_size, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
+ dd->dma_size, DMA_FROM_DEVICE);
/* copy data */
count = atmel_tdes_sg_copy(&dd->out_sg, &dd->out_offset,
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index b04d6379244a..a4793b76300c 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -1323,7 +1323,7 @@ static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
artpec6_crypto_init_dma_operation(common);
- /* Upload HMAC key, must be first the first packet */
+ /* Upload HMAC key, it must be the first packet */
if (req_ctx->hash_flags & HASH_FLAG_HMAC) {
if (variant == ARTPEC6_CRYPTO) {
req_ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
@@ -1333,11 +1333,8 @@ static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
a7_regk_crypto_dlkey);
}
- /* Copy and pad up the key */
- memcpy(req_ctx->key_buffer, ctx->hmac_key,
- ctx->hmac_key_length);
- memset(req_ctx->key_buffer + ctx->hmac_key_length, 0,
- blocksize - ctx->hmac_key_length);
+ memcpy_and_pad(req_ctx->key_buffer, blocksize, ctx->hmac_key,
+ ctx->hmac_key_length, 0);
error = artpec6_crypto_setup_out_descr(common,
(void *)&req_ctx->key_md,
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 78964e1712e5..bf10c3dda745 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -19,6 +19,7 @@
#include <linux/dma-mapping.h>
#include <linux/fsl/mc.h>
#include <linux/kernel.h>
+#include <linux/string.h>
#include <linux/string_choices.h>
#include <soc/fsl/dpaa2-io.h>
#include <soc/fsl/dpaa2-fd.h>
@@ -3269,7 +3270,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
dpaa2_fl_set_addr(out_fle, key_dma);
dpaa2_fl_set_len(out_fle, digestsize);
- print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
+ print_hex_dump_devel("key_in@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
@@ -3289,7 +3290,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
/* in progress */
wait_for_completion(&result.completion);
ret = result.err;
- print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
+ print_hex_dump_devel("digested key@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key,
digestsize, 1);
}
@@ -4645,16 +4646,12 @@ static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
alg = &halg->halg.base;
if (keyed) {
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->hmac_name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->hmac_driver_name);
+ strscpy(alg->cra_name, template->hmac_name);
+ strscpy(alg->cra_driver_name, template->hmac_driver_name);
t_alg->is_hmac = true;
} else {
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->driver_name);
+ strscpy(alg->cra_name, template->name);
+ strscpy(alg->cra_driver_name, template->driver_name);
t_alg->ahash_alg.setkey = NULL;
t_alg->is_hmac = false;
}
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 44122208f70c..ddb2a35aec2d 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -393,7 +393,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
LDST_SRCDST_BYTE_CONTEXT);
- print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
+ print_hex_dump_devel("key_in@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
@@ -408,7 +408,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
wait_for_completion(&result.completion);
ret = result.err;
- print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
+ print_hex_dump_devel("digested key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key,
digestsize, 1);
}
@@ -1914,16 +1914,12 @@ caam_hash_alloc(struct caam_hash_template *template,
alg = &halg->halg.base;
if (keyed) {
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->hmac_name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->hmac_driver_name);
+ strscpy(alg->cra_name, template->hmac_name);
+ strscpy(alg->cra_driver_name, template->hmac_driver_name);
t_alg->is_hmac = true;
} else {
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->driver_name);
+ strscpy(alg->cra_name, template->name);
+ strscpy(alg->cra_driver_name, template->driver_name);
halg->setkey = NULL;
t_alg->is_hmac = false;
}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index 6c8d1b87d60d..fc14c2e73ccd 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/scatterlist.h>
+#include <linux/string.h>
#include <linux/crypto.h>
#include <crypto/internal/aead.h>
#include <crypto/algapi.h>
@@ -223,9 +224,8 @@ static int ccp_register_aes_aead(struct list_head *head,
/* Copy the defaults and override as necessary */
alg = &ccp_aead->alg;
*alg = *def->alg_defaults;
- snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- def->driver_name);
+ strscpy(alg->base.cra_name, def->name);
+ strscpy(alg->base.cra_driver_name, def->driver_name);
alg->base.cra_blocksize = def->blocksize;
ret = crypto_register_aead(alg);
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index c7e26ce71156..8e59137284b7 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -12,6 +12,7 @@
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/scatterlist.h>
+#include <linux/string.h>
#include <crypto/aes.h>
#include <crypto/xts.h>
#include <crypto/internal/skcipher.h>
@@ -239,9 +240,8 @@ static int ccp_register_aes_xts_alg(struct list_head *head,
alg = &ccp_alg->alg;
- snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- def->drv_name);
+ strscpy(alg->base.cra_name, def->name);
+ strscpy(alg->base.cra_driver_name, def->drv_name);
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index 01d298350b92..94bccc5d6c78 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -305,9 +305,8 @@ static int ccp_register_aes_alg(struct list_head *head,
/* Copy the defaults and override as necessary */
alg = &ccp_alg->alg;
*alg = *def->alg_defaults;
- snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- def->driver_name);
+ strscpy(alg->base.cra_name, def->name);
+ strscpy(alg->base.cra_driver_name, def->driver_name);
alg->base.cra_blocksize = def->blocksize;
alg->ivsize = def->ivsize;
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index c20b5a6a340a..e26b431a5993 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -193,9 +193,8 @@ static int ccp_register_des3_alg(struct list_head *head,
/* Copy the defaults and override as necessary */
alg = &ccp_alg->alg;
*alg = *def->alg_defaults;
- snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- def->driver_name);
+ strscpy(alg->base.cra_name, def->name);
+ strscpy(alg->base.cra_driver_name, def->driver_name);
alg->base.cra_blocksize = def->blocksize;
alg->ivsize = def->ivsize;
diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c
index 090adacaaf93..287d7f62026d 100644
--- a/drivers/crypto/ccp/ccp-crypto-rsa.c
+++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/scatterlist.h>
+#include <linux/string.h>
#include <linux/crypto.h>
#include <crypto/algapi.h>
#include <crypto/internal/rsa.h>
@@ -257,9 +258,8 @@ static int ccp_register_rsa_alg(struct list_head *head,
alg = &ccp_alg->alg;
*alg = *def->alg_defaults;
- snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- def->driver_name);
+ strscpy(alg->base.cra_name, def->name);
+ strscpy(alg->base.cra_driver_name, def->driver_name);
ret = crypto_register_akcipher(alg);
if (ret) {
pr_err("%s akcipher algorithm registration error (%d)\n",
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 286b2d716236..85058a89f35b 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -484,9 +484,8 @@ static int ccp_register_sha_alg(struct list_head *head,
halg->statesize = sizeof(struct ccp_sha_exp_ctx);
base = &halg->base;
- snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- def->drv_name);
+ strscpy(base->cra_name, def->name);
+ strscpy(base->cra_driver_name, def->drv_name);
base->cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY |
CRYPTO_ALG_KERN_DRIVER_ONLY |
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 939fa8aa155c..450d491379d4 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -1965,11 +1965,11 @@ static int sev_get_firmware(struct device *dev,
/* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */
static int sev_update_firmware(struct device *dev)
{
- struct sev_data_download_firmware *data;
+ struct sev_data_download_firmware data;
const struct firmware *firmware;
int ret, error, order;
struct page *p;
- u64 data_size;
+ void *fw_blob;
if (!sev_version_greater_or_equal(0, 15)) {
dev_dbg(dev, "DOWNLOAD_FIRMWARE not supported\n");
@@ -1981,16 +1981,7 @@ static int sev_update_firmware(struct device *dev)
return -1;
}
- /*
- * SEV FW expects the physical address given to it to be 32
- * byte aligned. Memory allocated has structure placed at the
- * beginning followed by the firmware being passed to the SEV
- * FW. Allocate enough memory for data structure + alignment
- * padding + SEV FW.
- */
- data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32);
-
- order = get_order(firmware->size + data_size);
+ order = get_order(firmware->size);
p = alloc_pages(GFP_KERNEL, order);
if (!p) {
ret = -1;
@@ -2001,20 +1992,20 @@ static int sev_update_firmware(struct device *dev)
* Copy firmware data to a kernel allocated contiguous
* memory region.
*/
- data = page_address(p);
- memcpy(page_address(p) + data_size, firmware->data, firmware->size);
+ fw_blob = page_address(p);
+ memcpy(fw_blob, firmware->data, firmware->size);
- data->address = __psp_pa(page_address(p) + data_size);
- data->len = firmware->size;
+ data.address = __psp_pa(fw_blob);
+ data.len = firmware->size;
- ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
+ ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, &data, &error);
/*
* A quirk for fixing the committed TCB version, when upgrading from
* earlier firmware version than 1.50.
*/
if (!ret && !sev_version_greater_or_equal(1, 50))
- ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
+ ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, &data, &error);
if (ret)
dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error);
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index c6d085c8ff79..73179bf725a7 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -1448,6 +1448,7 @@ static int cc_mac_digest(struct ahash_request *req)
if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
req->nbytes, 1, flags)) {
dev_err(dev, "map_ahash_request_final() failed\n");
+ cc_unmap_result(dev, state, digestsize, req->result);
cc_unmap_req(dev, state, ctx);
return -ENOMEM;
}
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index a897541f897b..2da0894f31fd 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -15,6 +15,7 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
+#include <linux/string.h>
#include <linux/highmem.h>
#include <linux/crypto.h>
#include <linux/hw_random.h>
@@ -2256,8 +2257,7 @@ static int hifn_alg_alloc(struct hifn_device *dev, const struct hifn_alg_templat
alg->alg.init = hifn_init_tfm;
err = -EINVAL;
- if (snprintf(alg->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
- "%s", t->name) >= CRYPTO_MAX_ALG_NAME)
+ if (strscpy(alg->alg.base.cra_name, t->name) < 0)
goto out_free_alg;
if (snprintf(alg->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"%s-%s", t->drv_name, dev->name) >= CRYPTO_MAX_ALG_NAME)
@@ -2367,7 +2367,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&dev->alg_list);
- snprintf(dev->name, sizeof(dev->name), "%s", name);
+ strscpy(dev->name, name);
spin_lock_init(&dev->lock);
for (i = 0; i < 3; ++i) {
diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c
index 32e9f8350289..3ee6de16e3f1 100644
--- a/drivers/crypto/hisilicon/debugfs.c
+++ b/drivers/crypto/hisilicon/debugfs.c
@@ -45,8 +45,8 @@ struct qm_dfx_item {
struct qm_cmd_dump_item {
const char *cmd;
- char *info_name;
- int (*dump_fn)(struct hisi_qm *qm, char *cmd, char *info_name);
+ const char *info_name;
+ int (*dump_fn)(struct hisi_qm *qm, char *cmd, const char *info_name);
};
static struct qm_dfx_item qm_dfx_files[] = {
@@ -151,7 +151,7 @@ static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
}
static void dump_show(struct hisi_qm *qm, void *info,
- unsigned int info_size, char *info_name)
+ unsigned int info_size, const char *info_name)
{
struct device *dev = &qm->pdev->dev;
u8 *info_curr = info;
@@ -165,7 +165,7 @@ static void dump_show(struct hisi_qm *qm, void *info,
}
}
-static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
+static int qm_sqc_dump(struct hisi_qm *qm, char *s, const char *name)
{
struct device *dev = &qm->pdev->dev;
struct qm_sqc sqc;
@@ -202,7 +202,7 @@ static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
return 0;
}
-static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name)
+static int qm_cqc_dump(struct hisi_qm *qm, char *s, const char *name)
{
struct device *dev = &qm->pdev->dev;
struct qm_cqc cqc;
@@ -239,7 +239,7 @@ static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name)
return 0;
}
-static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, char *name)
+static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, const char *name)
{
struct device *dev = &qm->pdev->dev;
struct qm_aeqc aeqc;
@@ -305,7 +305,7 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s,
ret = kstrtou32(presult, 0, e_id);
if (ret || *e_id >= q_depth) {
- dev_err(dev, "Please input sqe num (0-%u)", q_depth - 1);
+ dev_err(dev, "Please input sqe num (0-%d)", q_depth - 1);
return -EINVAL;
}
@@ -317,7 +317,7 @@ static int q_dump_param_parse(struct hisi_qm *qm, char *s,
return 0;
}
-static int qm_sq_dump(struct hisi_qm *qm, char *s, char *name)
+static int qm_sq_dump(struct hisi_qm *qm, char *s, const char *name)
{
u16 sq_depth = qm->qp_array->sq_depth;
struct hisi_qp *qp;
@@ -345,7 +345,7 @@ static int qm_sq_dump(struct hisi_qm *qm, char *s, char *name)
return 0;
}
-static int qm_cq_dump(struct hisi_qm *qm, char *s, char *name)
+static int qm_cq_dump(struct hisi_qm *qm, char *s, const char *name)
{
struct qm_cqe *cqe_curr;
struct hisi_qp *qp;
@@ -363,7 +363,7 @@ static int qm_cq_dump(struct hisi_qm *qm, char *s, char *name)
return 0;
}
-static int qm_eq_aeq_dump(struct hisi_qm *qm, char *s, char *name)
+static int qm_eq_aeq_dump(struct hisi_qm *qm, char *s, const char *name)
{
struct device *dev = &qm->pdev->dev;
u16 xeq_depth;
@@ -388,7 +388,7 @@ static int qm_eq_aeq_dump(struct hisi_qm *qm, char *s, char *name)
}
if (xeqe_id >= xeq_depth) {
- dev_err(dev, "Please input eqe or aeqe num (0-%u)", xeq_depth - 1);
+ dev_err(dev, "Please input eqe or aeqe num (0-%d)", xeq_depth - 1);
return -EINVAL;
}
@@ -1040,6 +1040,57 @@ void hisi_qm_show_last_dfx_regs(struct hisi_qm *qm)
}
}
+static int qm_usage_percent(struct hisi_qm *qm, int chan_num)
+{
+ u32 val, used_bw, total_bw;
+
+ val = readl(qm->io_base + QM_CHANNEL_USAGE_OFFSET +
+ chan_num * QM_CHANNEL_ADDR_INTRVL);
+ used_bw = lower_16_bits(val);
+ total_bw = upper_16_bits(val);
+ if (!total_bw)
+ return -EIO;
+
+ if (total_bw <= used_bw)
+ return QM_MAX_DEV_USAGE;
+
+ return (used_bw * QM_DEV_USAGE_RATE) / total_bw;
+}
+
+static int qm_usage_show(struct seq_file *s, void *unused)
+{
+ struct hisi_qm *qm = s->private;
+ bool dev_is_active = true;
+ int i, ret;
+
+ /* If device is in suspended, usage is 0. */
+ ret = hisi_qm_get_dfx_access(qm);
+ if (ret == -EAGAIN) {
+ dev_is_active = false;
+ } else if (ret) {
+ dev_err(&qm->pdev->dev, "failed to get dfx access for usage_show!\n");
+ return ret;
+ }
+
+ ret = 0;
+ for (i = 0; i < qm->channel_data.channel_num; i++) {
+ if (dev_is_active) {
+ ret = qm_usage_percent(qm, i);
+ if (ret < 0) {
+ hisi_qm_put_dfx_access(qm);
+ return ret;
+ }
+ }
+ seq_printf(s, "%s: %d\n", qm->channel_data.channel_name[i], ret);
+ }
+
+ if (dev_is_active)
+ hisi_qm_put_dfx_access(qm);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(qm_usage);
+
static int qm_diff_regs_show(struct seq_file *s, void *unused)
{
struct hisi_qm *qm = s->private;
@@ -1159,6 +1210,9 @@ void hisi_qm_debug_init(struct hisi_qm *qm)
debugfs_create_file("diff_regs", 0444, qm->debug.qm_d,
qm, &qm_diff_regs_fops);
+ if (qm->ver >= QM_HW_V5)
+ debugfs_create_file("dev_usage", 0444, qm->debug.debug_root, qm, &qm_usage_fops);
+
debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
debugfs_create_file("cmd", 0600, qm->debug.qm_d, qm, &qm_cmd_fops);
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 839c1f677143..09077abbf6ad 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -1327,17 +1327,9 @@ static int ecdh_gen_privkey(struct hpre_ctx *ctx, struct ecdh *params)
struct device *dev = ctx->dev;
int ret;
- ret = crypto_get_default_rng();
- if (ret) {
- dev_err(dev, "failed to get default rng, ret = %d!\n", ret);
- return ret;
- }
-
- ret = crypto_rng_get_bytes(crypto_default_rng, (u8 *)params->key,
- params->key_size);
- crypto_put_default_rng();
+ ret = crypto_stdrng_get_bytes(params->key, params->key_size);
if (ret)
- dev_err(dev, "failed to get rng, ret = %d!\n", ret);
+ dev_err(dev, "failed to get random bytes, ret = %d!\n", ret);
return ret;
}
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 884d5d0afaf4..357ab5e5887e 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -121,6 +121,8 @@
#define HPRE_DFX_COMMON2_LEN 0xE
#define HPRE_DFX_CORE_LEN 0x43
+#define HPRE_MAX_CHANNEL_NUM 2
+
static const char hpre_name[] = "hisi_hpre";
static struct dentry *hpre_debugfs_root;
static const struct pci_device_id hpre_dev_ids[] = {
@@ -370,6 +372,11 @@ static struct dfx_diff_registers hpre_diff_regs[] = {
},
};
+static const char *hpre_channel_name[HPRE_MAX_CHANNEL_NUM] = {
+ "RSA",
+ "ECC",
+};
+
static const struct hisi_qm_err_ini hpre_err_ini;
bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg)
@@ -1234,6 +1241,16 @@ static int hpre_pre_store_cap_reg(struct hisi_qm *qm)
return 0;
}
+static void hpre_set_channels(struct hisi_qm *qm)
+{
+ struct qm_channel *channel_data = &qm->channel_data;
+ int i;
+
+ channel_data->channel_num = HPRE_MAX_CHANNEL_NUM;
+ for (i = 0; i < HPRE_MAX_CHANNEL_NUM; i++)
+ channel_data->channel_name[i] = hpre_channel_name[i];
+}
+
static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
u64 alg_msk;
@@ -1267,6 +1284,7 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
return ret;
}
+ hpre_set_channels(qm);
/* Fetch and save the value of capability registers */
ret = hpre_pre_store_cap_reg(qm);
if (ret) {
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index d1626685ed9f..3ca47e2a9719 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -472,6 +472,8 @@ static struct qm_typical_qos_table shaper_cbs_s[] = {
static void qm_irqs_unregister(struct hisi_qm *qm);
static int qm_reset_device(struct hisi_qm *qm);
+static void hisi_qm_stop_qp(struct hisi_qp *qp);
+
int hisi_qm_q_num_set(const char *val, const struct kernel_param *kp,
unsigned int device)
{
@@ -2262,7 +2264,7 @@ static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg)
* After this function, qp can receive request from user. Return 0 if
* successful, negative error code if failed.
*/
-int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
+static int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
{
struct hisi_qm *qm = qp->qm;
int ret;
@@ -2273,7 +2275,6 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
return ret;
}
-EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
/**
* qp_stop_fail_cb() - call request cb.
@@ -2418,13 +2419,12 @@ static void qm_stop_qp_nolock(struct hisi_qp *qp)
*
* This function is reverse of hisi_qm_start_qp.
*/
-void hisi_qm_stop_qp(struct hisi_qp *qp)
+static void hisi_qm_stop_qp(struct hisi_qp *qp)
{
down_write(&qp->qm->qps_lock);
qm_stop_qp_nolock(qp);
up_write(&qp->qm->qps_lock);
}
-EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
/**
* hisi_qp_send() - Queue up a task in the hardware queue.
@@ -3381,7 +3381,7 @@ static int __hisi_qm_start(struct hisi_qm *qm)
int hisi_qm_start(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- int ret = 0;
+ int ret;
down_write(&qm->qps_lock);
@@ -3917,8 +3917,8 @@ back_func_qos:
static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index)
{
- u64 cir_u = 0, cir_b = 0, cir_s = 0;
u64 shaper_vft, ir_calc, ir;
+ u64 cir_u, cir_b, cir_s;
unsigned int val;
u32 error_rate;
int ret;
@@ -4278,8 +4278,8 @@ int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
if (num_vfs == 0)
return hisi_qm_sriov_disable(pdev, false);
- else
- return hisi_qm_sriov_enable(pdev, num_vfs);
+
+ return hisi_qm_sriov_enable(pdev, num_vfs);
}
EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure);
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index 54e24fd7b9be..85eecbb40e7e 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -844,7 +844,7 @@ err_free_elements:
if (crypto_skcipher_ivsize(atfm))
dma_unmap_single(info->dev, sec_req->dma_iv,
crypto_skcipher_ivsize(atfm),
- DMA_BIDIRECTIONAL);
+ DMA_TO_DEVICE);
err_unmap_out_sg:
if (split)
sec_unmap_sg_on_err(skreq->dst, steps, splits_out,
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 0710977861f3..adf95795dffe 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -285,7 +285,5 @@ enum sec_cap_table_type {
void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
struct hisi_qp **sec_create_qps(void);
-int sec_register_to_crypto(struct hisi_qm *qm);
-void sec_unregister_from_crypto(struct hisi_qm *qm);
u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 15174216d8c4..2471a4dd0b50 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -230,7 +230,7 @@ static int qp_send_message(struct sec_req *req)
spin_unlock_bh(&qp_ctx->req_lock);
- atomic64_inc(&req->ctx->sec->debug.dfx.send_cnt);
+ atomic64_inc(&qp_ctx->ctx->sec->debug.dfx.send_cnt);
return -EINPROGRESS;
}
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index efda8646fc60..056bd8f4da5a 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -133,6 +133,8 @@
#define SEC_AEAD_BITMAP (GENMASK_ULL(7, 6) | GENMASK_ULL(18, 17) | \
GENMASK_ULL(45, 43))
+#define SEC_MAX_CHANNEL_NUM 1
+
struct sec_hw_error {
u32 int_msk;
const char *msg;
@@ -907,7 +909,7 @@ static int sec_debugfs_atomic64_set(void *data, u64 val)
}
DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
- sec_debugfs_atomic64_set, "%lld\n");
+ sec_debugfs_atomic64_set, "%llu\n");
static int sec_regs_show(struct seq_file *s, void *unused)
{
@@ -1288,6 +1290,14 @@ static int sec_pre_store_cap_reg(struct hisi_qm *qm)
return 0;
}
+static void sec_set_channels(struct hisi_qm *qm)
+{
+ struct qm_channel *channel_data = &qm->channel_data;
+
+ channel_data->channel_num = SEC_MAX_CHANNEL_NUM;
+ channel_data->channel_name[0] = "SEC";
+}
+
static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
u64 alg_msk;
@@ -1325,6 +1335,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
return ret;
}
+ sec_set_channels(qm);
/* Fetch and save the value of capability registers */
ret = sec_pre_store_cap_reg(qm);
if (ret) {
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 85b26ef17548..44df9c859bd8 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -122,6 +122,8 @@
#define HZIP_LIT_LEN_EN_OFFSET 0x301204
#define HZIP_LIT_LEN_EN_EN BIT(4)
+#define HZIP_MAX_CHANNEL_NUM 3
+
enum {
HZIP_HIGH_COMP_RATE,
HZIP_HIGH_COMP_PERF,
@@ -359,6 +361,12 @@ static struct dfx_diff_registers hzip_diff_regs[] = {
},
};
+static const char *zip_channel_name[HZIP_MAX_CHANNEL_NUM] = {
+ "COMPRESS",
+ "DECOMPRESS",
+ "DAE"
+};
+
static int hzip_diff_regs_show(struct seq_file *s, void *unused)
{
struct hisi_qm *qm = s->private;
@@ -1400,6 +1408,16 @@ static int zip_pre_store_cap_reg(struct hisi_qm *qm)
return 0;
}
+static void zip_set_channels(struct hisi_qm *qm)
+{
+ struct qm_channel *channel_data = &qm->channel_data;
+ int i;
+
+ channel_data->channel_num = HZIP_MAX_CHANNEL_NUM;
+ for (i = 0; i < HZIP_MAX_CHANNEL_NUM; i++)
+ channel_data->channel_name[i] = zip_channel_name[i];
+}
+
static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
u64 alg_msk;
@@ -1438,6 +1456,7 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
return ret;
}
+ zip_set_channels(qm);
/* Fetch and save the value of capability registers */
ret = zip_pre_store_cap_reg(qm);
if (ret) {
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index 7195c37dd102..c0467185ee42 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -629,24 +629,14 @@ static int img_hash_digest(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm);
struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
- struct img_hash_dev *hdev = NULL;
- struct img_hash_dev *tmp;
- int err;
spin_lock(&img_hash.lock);
- if (!tctx->hdev) {
- list_for_each_entry(tmp, &img_hash.dev_list, list) {
- hdev = tmp;
- break;
- }
- tctx->hdev = hdev;
-
- } else {
- hdev = tctx->hdev;
- }
-
+ if (!tctx->hdev)
+ tctx->hdev = list_first_entry_or_null(&img_hash.dev_list,
+ struct img_hash_dev, list);
+ ctx->hdev = tctx->hdev;
spin_unlock(&img_hash.lock);
- ctx->hdev = hdev;
+
ctx->flags = 0;
ctx->digsize = crypto_ahash_digestsize(tfm);
@@ -675,9 +665,7 @@ static int img_hash_digest(struct ahash_request *req)
ctx->sgfirst = req->src;
ctx->nents = sg_nents(ctx->sg);
- err = img_hash_handle_queue(tctx->hdev, req);
-
- return err;
+ return img_hash_handle_queue(ctx->hdev, req);
}
static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
diff --git a/drivers/crypto/inside-secure/eip93/Kconfig b/drivers/crypto/inside-secure/eip93/Kconfig
index 8353d3d7ec9b..29523f6927dd 100644
--- a/drivers/crypto/inside-secure/eip93/Kconfig
+++ b/drivers/crypto/inside-secure/eip93/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config CRYPTO_DEV_EIP93
tristate "Support for EIP93 crypto HW accelerators"
- depends on SOC_MT7621 || ARCH_AIROHA ||COMPILE_TEST
+ depends on SOC_MT7621 || ARCH_AIROHA || ECONET || COMPILE_TEST
select CRYPTO_LIB_AES
select CRYPTO_LIB_DES
select CRYPTO_SKCIPHER
diff --git a/drivers/crypto/inside-secure/eip93/eip93-aead.c b/drivers/crypto/inside-secure/eip93/eip93-aead.c
index 1a08aed5de13..2bbd0af7b0e0 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-aead.c
+++ b/drivers/crypto/inside-secure/eip93/eip93-aead.c
@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
- * Christian Marangi <ansuelsmth@gmail.com
+ * Christian Marangi <ansuelsmth@gmail.com>
*/
#include <crypto/aead.h>
diff --git a/drivers/crypto/inside-secure/eip93/eip93-aead.h b/drivers/crypto/inside-secure/eip93/eip93-aead.h
index e2fa8fd39c50..d933a8fbdf04 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-aead.h
+++ b/drivers/crypto/inside-secure/eip93/eip93-aead.h
@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
- * Christian Marangi <ansuelsmth@gmail.com
+ * Christian Marangi <ansuelsmth@gmail.com>
*/
#ifndef _EIP93_AEAD_H_
#define _EIP93_AEAD_H_
diff --git a/drivers/crypto/inside-secure/eip93/eip93-aes.h b/drivers/crypto/inside-secure/eip93/eip93-aes.h
index 1d83d39cab2a..82064cc8f5c7 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-aes.h
+++ b/drivers/crypto/inside-secure/eip93/eip93-aes.h
@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
- * Christian Marangi <ansuelsmth@gmail.com
+ * Christian Marangi <ansuelsmth@gmail.com>
*/
#ifndef _EIP93_AES_H_
#define _EIP93_AES_H_
diff --git a/drivers/crypto/inside-secure/eip93/eip93-cipher.c b/drivers/crypto/inside-secure/eip93/eip93-cipher.c
index 0713c71ab458..4dd7ab7503e8 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-cipher.c
+++ b/drivers/crypto/inside-secure/eip93/eip93-cipher.c
@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
- * Christian Marangi <ansuelsmth@gmail.com
+ * Christian Marangi <ansuelsmth@gmail.com>
*/
#include <crypto/aes.h>
@@ -320,7 +320,7 @@ struct eip93_alg_template eip93_alg_ecb_des = {
.ivsize = 0,
.base = {
.cra_name = "ecb(des)",
- .cra_driver_name = "ebc(des-eip93)",
+ .cra_driver_name = "ecb(des-eip93)",
.cra_priority = EIP93_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
diff --git a/drivers/crypto/inside-secure/eip93/eip93-cipher.h b/drivers/crypto/inside-secure/eip93/eip93-cipher.h
index 6e2545ebd879..47e4e84ff14e 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-cipher.h
+++ b/drivers/crypto/inside-secure/eip93/eip93-cipher.h
@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
- * Christian Marangi <ansuelsmth@gmail.com
+ * Christian Marangi <ansuelsmth@gmail.com>
*/
#ifndef _EIP93_CIPHER_H_
#define _EIP93_CIPHER_H_
diff --git a/drivers/crypto/inside-secure/eip93/eip93-common.c b/drivers/crypto/inside-secure/eip93/eip93-common.c
index f4ad6beff15e..6f147014f996 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-common.c
+++ b/drivers/crypto/inside-secure/eip93/eip93-common.c
@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
- * Christian Marangi <ansuelsmth@gmail.com
+ * Christian Marangi <ansuelsmth@gmail.com>
*/
#include <crypto/aes.h>
diff --git a/drivers/crypto/inside-secure/eip93/eip93-common.h b/drivers/crypto/inside-secure/eip93/eip93-common.h
index 80964cfa34df..41c43782eb5c 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-common.h
+++ b/drivers/crypto/inside-secure/eip93/eip93-common.h
@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
- * Christian Marangi <ansuelsmth@gmail.com
+ * Christian Marangi <ansuelsmth@gmail.com>
*/
#ifndef _EIP93_COMMON_H_
diff --git a/drivers/crypto/inside-secure/eip93/eip93-des.h b/drivers/crypto/inside-secure/eip93/eip93-des.h
index 74748df04acf..53ffe0f341b8 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-des.h
+++ b/drivers/crypto/inside-secure/eip93/eip93-des.h
@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
- * Christian Marangi <ansuelsmth@gmail.com
+ * Christian Marangi <ansuelsmth@gmail.com>
*/
#ifndef _EIP93_DES_H_
#define _EIP93_DES_H_
diff --git a/drivers/crypto/inside-secure/eip93/eip93-hash.c b/drivers/crypto/inside-secure/eip93/eip93-hash.c
index 2705855475b2..84d3ff2d3836 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-hash.c
+++ b/drivers/crypto/inside-secure/eip93/eip93-hash.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2024
*
- * Christian Marangi <ansuelsmth@gmail.com
+ * Christian Marangi <ansuelsmth@gmail.com>
*/
#include <crypto/sha1.h>
diff --git a/drivers/crypto/inside-secure/eip93/eip93-hash.h b/drivers/crypto/inside-secure/eip93/eip93-hash.h
index 556f22fc1dd0..29da18d78894 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-hash.h
+++ b/drivers/crypto/inside-secure/eip93/eip93-hash.h
@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
- * Christian Marangi <ansuelsmth@gmail.com
+ * Christian Marangi <ansuelsmth@gmail.com>
*/
#ifndef _EIP93_HASH_H_
#define _EIP93_HASH_H_
diff --git a/drivers/crypto/inside-secure/eip93/eip93-main.c b/drivers/crypto/inside-secure/eip93/eip93-main.c
index b7fd9795062d..7dccfdeb7b11 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-main.c
+++ b/drivers/crypto/inside-secure/eip93/eip93-main.c
@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
- * Christian Marangi <ansuelsmth@gmail.com
+ * Christian Marangi <ansuelsmth@gmail.com>
*/
#include <linux/atomic.h>
@@ -36,6 +36,14 @@ static struct eip93_alg_template *eip93_algs[] = {
&eip93_alg_cbc_aes,
&eip93_alg_ctr_aes,
&eip93_alg_rfc3686_aes,
+ &eip93_alg_md5,
+ &eip93_alg_sha1,
+ &eip93_alg_sha224,
+ &eip93_alg_sha256,
+ &eip93_alg_hmac_md5,
+ &eip93_alg_hmac_sha1,
+ &eip93_alg_hmac_sha224,
+ &eip93_alg_hmac_sha256,
&eip93_alg_authenc_hmac_md5_cbc_des,
&eip93_alg_authenc_hmac_sha1_cbc_des,
&eip93_alg_authenc_hmac_sha224_cbc_des,
@@ -52,14 +60,6 @@ static struct eip93_alg_template *eip93_algs[] = {
&eip93_alg_authenc_hmac_sha1_rfc3686_aes,
&eip93_alg_authenc_hmac_sha224_rfc3686_aes,
&eip93_alg_authenc_hmac_sha256_rfc3686_aes,
- &eip93_alg_md5,
- &eip93_alg_sha1,
- &eip93_alg_sha224,
- &eip93_alg_sha256,
- &eip93_alg_hmac_md5,
- &eip93_alg_hmac_sha1,
- &eip93_alg_hmac_sha224,
- &eip93_alg_hmac_sha256,
};
inline void eip93_irq_disable(struct eip93_device *eip93, u32 mask)
diff --git a/drivers/crypto/inside-secure/eip93/eip93-main.h b/drivers/crypto/inside-secure/eip93/eip93-main.h
index 79b078f0e5da..990c2401b7ce 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-main.h
+++ b/drivers/crypto/inside-secure/eip93/eip93-main.h
@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
- * Christian Marangi <ansuelsmth@gmail.com
+ * Christian Marangi <ansuelsmth@gmail.com>
*/
#ifndef _EIP93_MAIN_H_
#define _EIP93_MAIN_H_
diff --git a/drivers/crypto/inside-secure/eip93/eip93-regs.h b/drivers/crypto/inside-secure/eip93/eip93-regs.h
index 0490b8d15131..96285ca6fbbe 100644
--- a/drivers/crypto/inside-secure/eip93/eip93-regs.h
+++ b/drivers/crypto/inside-secure/eip93/eip93-regs.h
@@ -3,7 +3,7 @@
* Copyright (C) 2019 - 2021
*
* Richard van Schagen <vschagen@icloud.com>
- * Christian Marangi <ansuelsmth@gmail.com
+ * Christian Marangi <ansuelsmth@gmail.com>
*/
#ifndef REG_EIP93_H
#define REG_EIP93_H
@@ -109,7 +109,7 @@
#define EIP93_REG_PE_BUF_THRESH 0x10c
#define EIP93_PE_OUTBUF_THRESH GENMASK(23, 16)
#define EIP93_PE_INBUF_THRESH GENMASK(7, 0)
-#define EIP93_REG_PE_INBUF_COUNT 0x100
+#define EIP93_REG_PE_INBUF_COUNT 0x110
#define EIP93_REG_PE_OUTBUF_COUNT 0x114
#define EIP93_REG_PE_BUF_RW_PNTR 0x118 /* BUF_PNTR */
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 660f45ab8647..fb4936e7afa2 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1204,12 +1204,13 @@ static struct safexcel_alg_template *safexcel_algs[] = {
&safexcel_alg_hmac_sha256,
&safexcel_alg_hmac_sha384,
&safexcel_alg_hmac_sha512,
+ &safexcel_alg_authenc_hmac_md5_cbc_aes,
&safexcel_alg_authenc_hmac_sha1_cbc_aes,
&safexcel_alg_authenc_hmac_sha224_cbc_aes,
&safexcel_alg_authenc_hmac_sha256_cbc_aes,
&safexcel_alg_authenc_hmac_sha384_cbc_aes,
&safexcel_alg_authenc_hmac_sha512_cbc_aes,
- &safexcel_alg_authenc_hmac_sha1_cbc_des3_ede,
+ &safexcel_alg_authenc_hmac_md5_ctr_aes,
&safexcel_alg_authenc_hmac_sha1_ctr_aes,
&safexcel_alg_authenc_hmac_sha224_ctr_aes,
&safexcel_alg_authenc_hmac_sha256_ctr_aes,
@@ -1241,11 +1242,14 @@ static struct safexcel_alg_template *safexcel_algs[] = {
&safexcel_alg_hmac_sha3_256,
&safexcel_alg_hmac_sha3_384,
&safexcel_alg_hmac_sha3_512,
- &safexcel_alg_authenc_hmac_sha1_cbc_des,
+ &safexcel_alg_authenc_hmac_md5_cbc_des3_ede,
+ &safexcel_alg_authenc_hmac_sha1_cbc_des3_ede,
&safexcel_alg_authenc_hmac_sha256_cbc_des3_ede,
&safexcel_alg_authenc_hmac_sha224_cbc_des3_ede,
&safexcel_alg_authenc_hmac_sha512_cbc_des3_ede,
&safexcel_alg_authenc_hmac_sha384_cbc_des3_ede,
+ &safexcel_alg_authenc_hmac_md5_cbc_des,
+ &safexcel_alg_authenc_hmac_sha1_cbc_des,
&safexcel_alg_authenc_hmac_sha256_cbc_des,
&safexcel_alg_authenc_hmac_sha224_cbc_des,
&safexcel_alg_authenc_hmac_sha512_cbc_des,
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 0f27367a85fa..52fd460c0e9b 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -945,12 +945,13 @@ extern struct safexcel_alg_template safexcel_alg_hmac_sha224;
extern struct safexcel_alg_template safexcel_alg_hmac_sha256;
extern struct safexcel_alg_template safexcel_alg_hmac_sha384;
extern struct safexcel_alg_template safexcel_alg_hmac_sha512;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_cbc_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_aes;
-extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_ctr_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_ctr_aes;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_ctr_aes;
@@ -982,11 +983,14 @@ extern struct safexcel_alg_template safexcel_alg_hmac_sha3_224;
extern struct safexcel_alg_template safexcel_alg_hmac_sha3_256;
extern struct safexcel_alg_template safexcel_alg_hmac_sha3_384;
extern struct safexcel_alg_template safexcel_alg_hmac_sha3_512;
-extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_cbc_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des3_ede;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des3_ede;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des3_ede;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des3_ede;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des3_ede;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_cbc_des;
+extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des;
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 27b180057417..a8349b684693 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -17,6 +17,7 @@
#include <crypto/internal/des.h>
#include <crypto/gcm.h>
#include <crypto/ghash.h>
+#include <crypto/md5.h>
#include <crypto/poly1305.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
@@ -462,6 +463,9 @@ static int safexcel_aead_setkey(struct crypto_aead *ctfm, const u8 *key,
/* Auth key */
switch (ctx->hash_alg) {
+ case CONTEXT_CONTROL_CRYPTO_ALG_MD5:
+ alg = "safexcel-md5";
+ break;
case CONTEXT_CONTROL_CRYPTO_ALG_SHA1:
alg = "safexcel-sha1";
break;
@@ -1662,6 +1666,42 @@ static int safexcel_aead_cra_init(struct crypto_tfm *tfm)
return 0;
}
+static int safexcel_aead_md5_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_cra_init(tfm);
+ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
+ ctx->state_sz = MD5_DIGEST_SIZE;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_cbc_aes = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_MD5,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_driver_name = "safexcel-authenc-hmac-md5-cbc-aes",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_md5_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
static int safexcel_aead_sha1_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -1842,6 +1882,43 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_aes = {
},
};
+static int safexcel_aead_md5_des3_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_md5_cra_init(tfm);
+ ctx->alg = SAFEXCEL_3DES; /* override default */
+ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_cbc_des3_ede = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_MD5,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_driver_name = "safexcel-authenc-hmac-md5-cbc-des3_ede",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_md5_des3_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
static int safexcel_aead_sha1_des3_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -2027,6 +2104,43 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des3_ede = {
},
};
+static int safexcel_aead_md5_des_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_md5_cra_init(tfm);
+ ctx->alg = SAFEXCEL_DES; /* override default */
+ ctx->blocksz = DES_BLOCK_SIZE;
+ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_cbc_des = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_MD5,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des))",
+ .cra_driver_name = "safexcel-authenc-hmac-md5-cbc-des",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_md5_des_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
static int safexcel_aead_sha1_des_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -2212,6 +2326,41 @@ struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des = {
},
};
+static int safexcel_aead_md5_ctr_cra_init(struct crypto_tfm *tfm)
+{
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ safexcel_aead_md5_cra_init(tfm);
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD; /* override default */
+ return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_authenc_hmac_md5_ctr_aes = {
+ .type = SAFEXCEL_ALG_TYPE_AEAD,
+ .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_MD5,
+ .alg.aead = {
+ .setkey = safexcel_aead_setkey,
+ .encrypt = safexcel_aead_encrypt,
+ .decrypt = safexcel_aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ .base = {
+ .cra_name = "authenc(hmac(md5),rfc3686(ctr(aes)))",
+ .cra_driver_name = "safexcel-authenc-hmac-md5-ctr-aes",
+ .cra_priority = SAFEXCEL_CRA_PRIORITY,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+ .cra_alignmask = 0,
+ .cra_init = safexcel_aead_md5_ctr_cra_init,
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
+ },
+};
+
static int safexcel_aead_sha1_ctr_cra_init(struct crypto_tfm *tfm)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index 547abf453d4a..f62b994e18e5 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -906,8 +906,8 @@ static void rebalance_wq_table(void)
return;
}
+ cpu = 0;
for_each_node_with_cpus(node) {
- cpu = 0;
node_cpus = cpumask_of_node(node);
for_each_cpu(node_cpu, node_cpus) {
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-ecc.c b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
index 59308926399d..e61a95f66a0c 100644
--- a/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
@@ -230,12 +230,7 @@ static int kmb_ecc_point_mult(struct ocs_ecc_dev *ecc_dev,
int rc = 0;
/* Generate random nbytes for Simple and Differential SCA protection. */
- rc = crypto_get_default_rng();
- if (rc)
- return rc;
-
- rc = crypto_rng_get_bytes(crypto_default_rng, sca, nbytes);
- crypto_put_default_rng();
+ rc = crypto_stdrng_get_bytes(sca, nbytes);
if (rc)
return rc;
@@ -509,14 +504,10 @@ static int kmb_ecc_gen_privkey(const struct ecc_curve *curve, u64 *privkey)
* The maximum security strength identified by NIST SP800-57pt1r4 for
* ECC is 256 (N >= 512).
*
- * This condition is met by the default RNG because it selects a favored
- * DRBG with a security strength of 256.
+ * This condition is met by stdrng because it selects a favored DRBG
+ * with a security strength of 256.
*/
- if (crypto_get_default_rng())
- return -EFAULT;
-
- rc = crypto_rng_get_bytes(crypto_default_rng, (u8 *)priv, nbytes);
- crypto_put_default_rng();
+ rc = crypto_stdrng_get_bytes(priv, nbytes);
if (rc)
goto cleanup;
diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig
index 4b4861460dd4..9d6e6f52d2dc 100644
--- a/drivers/crypto/intel/qat/Kconfig
+++ b/drivers/crypto/intel/qat/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config CRYPTO_DEV_QAT
tristate
+ select CRYPTO_ACOMP
select CRYPTO_AEAD
select CRYPTO_AUTHENC
select CRYPTO_SKCIPHER
@@ -11,6 +12,7 @@ config CRYPTO_DEV_QAT
select CRYPTO_LIB_SHA1
select CRYPTO_LIB_SHA256
select CRYPTO_LIB_SHA512
+ select CRYPTO_ZSTD
select FW_LOADER
select CRC8
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
index 35105213d40c..19f9f738630b 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
@@ -97,9 +97,25 @@ static struct adf_hw_device_class adf_420xx_class = {
static u32 get_ae_mask(struct adf_hw_device_data *self)
{
- u32 me_disable = self->fuses[ADF_FUSECTL4];
+ unsigned long fuses = self->fuses[ADF_FUSECTL4];
+ u32 mask = ADF_420XX_ACCELENGINES_MASK;
- return ~me_disable & ADF_420XX_ACCELENGINES_MASK;
+ if (test_bit(0, &fuses))
+ mask &= ~ADF_AE_GROUP_0;
+
+ if (test_bit(4, &fuses))
+ mask &= ~ADF_AE_GROUP_1;
+
+ if (test_bit(8, &fuses))
+ mask &= ~ADF_AE_GROUP_2;
+
+ if (test_bit(12, &fuses))
+ mask &= ~ADF_AE_GROUP_3;
+
+ if (test_bit(16, &fuses))
+ mask &= ~ADF_AE_GROUP_4;
+
+ return mask;
}
static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
@@ -472,6 +488,7 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->clock_frequency = ADF_420XX_AE_FREQ;
hw_data->services_supported = adf_gen4_services_supported;
hw_data->get_svc_slice_cnt = adf_gen4_get_svc_slice_cnt;
+ hw_data->accel_capabilities_ext_mask = ADF_ACCEL_CAPABILITIES_EXT_ZSTD_LZ4S;
adf_gen4_set_err_mask(&hw_data->dev_err_mask);
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index 740f68a36ac5..49b425be34c8 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -100,9 +100,19 @@ static struct adf_hw_device_class adf_4xxx_class = {
static u32 get_ae_mask(struct adf_hw_device_data *self)
{
- u32 me_disable = self->fuses[ADF_FUSECTL4];
+ unsigned long fuses = self->fuses[ADF_FUSECTL4];
+ u32 mask = ADF_4XXX_ACCELENGINES_MASK;
- return ~me_disable & ADF_4XXX_ACCELENGINES_MASK;
+ if (test_bit(0, &fuses))
+ mask &= ~ADF_AE_GROUP_0;
+
+ if (test_bit(4, &fuses))
+ mask &= ~ADF_AE_GROUP_1;
+
+ if (test_bit(8, &fuses))
+ mask &= ~ADF_AE_GROUP_2;
+
+ return mask;
}
static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
@@ -463,6 +473,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->clock_frequency = ADF_4XXX_AE_FREQ;
hw_data->services_supported = adf_gen4_services_supported;
hw_data->get_svc_slice_cnt = adf_gen4_get_svc_slice_cnt;
+ hw_data->accel_capabilities_ext_mask = ADF_ACCEL_CAPABILITIES_EXT_ZSTD_LZ4S;
adf_gen4_set_err_mask(&hw_data->dev_err_mask);
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c
index bed88d3ce8ca..205680797e2c 100644
--- a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c
@@ -33,6 +33,8 @@
#define ADF_AE_GROUP_1 GENMASK(7, 4)
#define ADF_AE_GROUP_2 BIT(8)
+#define ASB_MULTIPLIER 9
+
struct adf_ring_config {
u32 ring_mask;
enum adf_cfg_service_type ring_type;
@@ -82,10 +84,15 @@ static const unsigned long thrd_mask_dcpr[ADF_6XXX_MAX_ACCELENGINES] = {
0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x00
};
+static const unsigned long thrd_mask_wcy[ADF_6XXX_MAX_ACCELENGINES] = {
+ 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x7F, 0x00
+};
+
static const char *const adf_6xxx_fw_objs[] = {
[ADF_FW_CY_OBJ] = ADF_6XXX_CY_OBJ,
[ADF_FW_DC_OBJ] = ADF_6XXX_DC_OBJ,
[ADF_FW_ADMIN_OBJ] = ADF_6XXX_ADMIN_OBJ,
+ [ADF_FW_WCY_OBJ] = ADF_6XXX_WCY_OBJ,
};
static const struct adf_fw_config adf_default_fw_config[] = {
@@ -94,6 +101,12 @@ static const struct adf_fw_config adf_default_fw_config[] = {
{ ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ },
};
+static const struct adf_fw_config adf_wcy_fw_config[] = {
+ { ADF_AE_GROUP_1, ADF_FW_WCY_OBJ },
+ { ADF_AE_GROUP_0, ADF_FW_WCY_OBJ },
+ { ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ },
+};
+
static struct adf_hw_device_class adf_6xxx_class = {
.name = ADF_6XXX_DEVICE_NAME,
.type = DEV_6XXX,
@@ -118,6 +131,12 @@ static bool services_supported(unsigned long mask)
}
}
+static bool wcy_services_supported(unsigned long mask)
+{
+ /* The wireless SKU supports only the symmetric crypto service */
+ return mask == BIT(SVC_SYM);
+}
+
static int get_service(unsigned long *mask)
{
if (test_and_clear_bit(SVC_ASYM, mask))
@@ -155,8 +174,12 @@ static enum adf_cfg_service_type get_ring_type(unsigned int service)
}
}
-static const unsigned long *get_thrd_mask(unsigned int service)
+static const unsigned long *get_thrd_mask(struct adf_accel_dev *accel_dev,
+ unsigned int service)
{
+ if (adf_6xxx_is_wcy(GET_HW_DATA(accel_dev)))
+ return (service == SVC_SYM) ? thrd_mask_wcy : NULL;
+
switch (service) {
case SVC_SYM:
return thrd_mask_sym;
@@ -194,7 +217,7 @@ static int get_rp_config(struct adf_accel_dev *accel_dev, struct adf_ring_config
return service;
rp_config[i].ring_type = get_ring_type(service);
- rp_config[i].thrd_mask = get_thrd_mask(service);
+ rp_config[i].thrd_mask = get_thrd_mask(accel_dev, service);
/*
* If there is only one service enabled, use all ring pairs for
@@ -386,6 +409,8 @@ static void set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTCNVL_OFFSET, ADF_SSMWDTCNVH_OFFSET, val);
ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTUCSL_OFFSET, ADF_SSMWDTUCSH_OFFSET, val);
ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTDCPRL_OFFSET, ADF_SSMWDTDCPRH_OFFSET, val);
+ ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTWCPL_OFFSET, ADF_SSMWDTWCPH_OFFSET, val);
+ ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTWATL_OFFSET, ADF_SSMWDTWATH_OFFSET, val);
/* Enable watchdog timer for pke */
ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTPKEL_OFFSET, ADF_SSMWDTPKEH_OFFSET, val_pke);
@@ -439,6 +464,21 @@ static int reset_ring_pair(void __iomem *csr, u32 bank_number)
return 0;
}
+static bool adf_anti_rb_enabled(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+
+ return !!(hw_data->fuses[0] & ADF_GEN6_ANTI_RB_FUSE_BIT);
+}
+
+static void adf_gen6_init_anti_rb(struct adf_anti_rb_hw_data *anti_rb_data)
+{
+ anti_rb_data->anti_rb_enabled = adf_anti_rb_enabled;
+ anti_rb_data->svncheck_offset = ADF_GEN6_SVNCHECK_CSR_MSG;
+ anti_rb_data->svncheck_retry = 0;
+ anti_rb_data->sysfs_added = false;
+}
+
static int ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
@@ -471,6 +511,9 @@ static int build_comp_block(void *ctx, enum adf_dc_algo algo)
case QAT_DEFLATE:
header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
break;
+ case QAT_ZSTD:
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_ZSTD_COMPRESS;
+ break;
default:
return -EINVAL;
}
@@ -481,6 +524,13 @@ static int build_comp_block(void *ctx, enum adf_dc_algo algo)
cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
+ /*
+ * Store Auto Select Best (ASB) multiplier in the request template.
+ * This will be used in the data path to set the actual threshold
+ * value based on the input data size.
+ */
+ req_tmpl->u3.asb_threshold.asb_value = ASB_MULTIPLIER;
+
return 0;
}
@@ -494,12 +544,16 @@ static int build_decomp_block(void *ctx, enum adf_dc_algo algo)
case QAT_DEFLATE:
header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
break;
+ case QAT_ZSTD:
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_ZSTD_DECOMPRESS;
+ break;
default:
return -EINVAL;
}
cd_pars->u.sl.comp_slice_cfg_word[0] = 0;
cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
+ req_tmpl->u3.asb_threshold.asb_value = 0;
return 0;
}
@@ -631,6 +685,12 @@ static int adf_gen6_set_vc(struct adf_accel_dev *accel_dev)
return set_vc_config(accel_dev);
}
+static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev)
+{
+ return adf_6xxx_is_wcy(GET_HW_DATA(accel_dev)) ? adf_wcy_fw_config :
+ adf_default_fw_config;
+}
+
static u32 get_ae_mask(struct adf_hw_device_data *self)
{
unsigned long fuses = self->fuses[ADF_FUSECTL4];
@@ -653,6 +713,38 @@ static u32 get_ae_mask(struct adf_hw_device_data *self)
return mask;
}
+static u32 get_accel_cap_wcy(struct adf_accel_dev *accel_dev)
+{
+ u32 capabilities_sym;
+ u32 fuse;
+
+ fuse = GET_HW_DATA(accel_dev)->fuses[ADF_FUSECTL1];
+
+ capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_CIPHER |
+ ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
+ ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT |
+ ICP_ACCEL_CAPABILITIES_5G |
+ ICP_ACCEL_CAPABILITIES_ZUC |
+ ICP_ACCEL_CAPABILITIES_ZUC_256 |
+ ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN;
+
+ if (fuse & ICP_ACCEL_GEN6_MASK_EIA3_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
+ }
+ if (fuse & ICP_ACCEL_GEN6_MASK_ZUC_256_SLICE)
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
+
+ if (fuse & ICP_ACCEL_GEN6_MASK_5G_SLICE)
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_5G;
+
+ if (adf_get_service_enabled(accel_dev) == SVC_SYM)
+ return capabilities_sym;
+
+ return 0;
+}
+
static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
{
u32 capabilities_sym, capabilities_asym;
@@ -661,6 +753,9 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
u32 caps = 0;
u32 fusectl1;
+ if (adf_6xxx_is_wcy(GET_HW_DATA(accel_dev)))
+ return get_accel_cap_wcy(accel_dev);
+
fusectl1 = GET_HW_DATA(accel_dev)->fuses[ADF_FUSECTL1];
/* Read accelerator capabilities mask */
@@ -733,15 +828,19 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
{
- return ARRAY_SIZE(adf_default_fw_config);
+ return adf_6xxx_is_wcy(GET_HW_DATA(accel_dev)) ?
+ ARRAY_SIZE(adf_wcy_fw_config) :
+ ARRAY_SIZE(adf_default_fw_config);
}
static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num)
{
int num_fw_objs = ARRAY_SIZE(adf_6xxx_fw_objs);
+ const struct adf_fw_config *fw_config;
int id;
- id = adf_default_fw_config[obj_num].obj;
+ fw_config = get_fw_config(accel_dev);
+ id = fw_config[obj_num].obj;
if (id >= num_fw_objs)
return NULL;
@@ -755,15 +854,22 @@ static const char *uof_get_name_6xxx(struct adf_accel_dev *accel_dev, u32 obj_nu
static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num)
{
+ const struct adf_fw_config *fw_config;
+
if (obj_num >= uof_get_num_objs(accel_dev))
return -EINVAL;
- return adf_default_fw_config[obj_num].obj;
+ fw_config = get_fw_config(accel_dev);
+
+ return fw_config[obj_num].obj;
}
static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
{
- return adf_default_fw_config[obj_num].ae_mask;
+ const struct adf_fw_config *fw_config;
+
+ fw_config = get_fw_config(accel_dev);
+ return fw_config[obj_num].ae_mask;
}
static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
@@ -873,6 +979,14 @@ static void adf_gen6_init_rl_data(struct adf_rl_hw_data *rl_data)
init_num_svc_aes(rl_data);
}
+static void adf_gen6_init_services_supported(struct adf_hw_device_data *hw_data)
+{
+ if (adf_6xxx_is_wcy(hw_data))
+ hw_data->services_supported = wcy_services_supported;
+ else
+ hw_data->services_supported = services_supported;
+}
+
void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &adf_6xxx_class;
@@ -929,11 +1043,12 @@ void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data)
hw_data->stop_timer = adf_timer_stop;
hw_data->init_device = adf_init_device;
hw_data->enable_pm = enable_pm;
- hw_data->services_supported = services_supported;
hw_data->num_rps = ADF_GEN6_ETR_MAX_BANKS;
hw_data->clock_frequency = ADF_6XXX_AE_FREQ;
hw_data->get_svc_slice_cnt = adf_gen6_get_svc_slice_cnt;
+ hw_data->accel_capabilities_ext_mask = ADF_ACCEL_CAPABILITIES_EXT_ZSTD;
+ adf_gen6_init_services_supported(hw_data);
adf_gen6_init_hw_csr_ops(&hw_data->csr_ops);
adf_gen6_init_pf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen6_init_dc_ops(&hw_data->dc_ops);
@@ -941,6 +1056,7 @@ void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data)
adf_gen6_init_ras_ops(&hw_data->ras_ops);
adf_gen6_init_tl_data(&hw_data->tl_data);
adf_gen6_init_rl_data(&hw_data->rl_data);
+ adf_gen6_init_anti_rb(&hw_data->anti_rb_data);
}
void adf_clean_hw_data_6xxx(struct adf_hw_device_data *hw_data)
diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h
index d822911fe68c..e4d433bdd379 100644
--- a/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.h
@@ -53,6 +53,12 @@
#define ADF_GEN6_ADMINMSGLR_OFFSET 0x500578
#define ADF_GEN6_MAILBOX_BASE_OFFSET 0x600970
+/* Anti-rollback */
+#define ADF_GEN6_SVNCHECK_CSR_MSG 0x640004
+
+/* Fuse bits */
+#define ADF_GEN6_ANTI_RB_FUSE_BIT BIT(24)
+
/*
* Watchdog timers
* Timeout is in cycles. Clock speed may vary across products but this
@@ -64,10 +70,14 @@
#define ADF_SSMWDTATHH_OFFSET 0x520C
#define ADF_SSMWDTCNVL_OFFSET 0x5408
#define ADF_SSMWDTCNVH_OFFSET 0x540C
+#define ADF_SSMWDTWCPL_OFFSET 0x5608
+#define ADF_SSMWDTWCPH_OFFSET 0x560C
#define ADF_SSMWDTUCSL_OFFSET 0x5808
#define ADF_SSMWDTUCSH_OFFSET 0x580C
#define ADF_SSMWDTDCPRL_OFFSET 0x5A08
#define ADF_SSMWDTDCPRH_OFFSET 0x5A0C
+#define ADF_SSMWDTWATL_OFFSET 0x5C08
+#define ADF_SSMWDTWATH_OFFSET 0x5C0C
#define ADF_SSMWDTPKEL_OFFSET 0x5E08
#define ADF_SSMWDTPKEH_OFFSET 0x5E0C
@@ -139,6 +149,7 @@
#define ADF_6XXX_CY_OBJ "qat_6xxx_cy.bin"
#define ADF_6XXX_DC_OBJ "qat_6xxx_dc.bin"
#define ADF_6XXX_ADMIN_OBJ "qat_6xxx_admin.bin"
+#define ADF_6XXX_WCY_OBJ "qat_6xxx_wcy.bin"
/* RL constants */
#define ADF_6XXX_RL_PCIE_SCALE_FACTOR_DIV 100
@@ -159,9 +170,18 @@ enum icp_qat_gen6_slice_mask {
ICP_ACCEL_GEN6_MASK_PKE_SLICE = BIT(2),
ICP_ACCEL_GEN6_MASK_CPR_SLICE = BIT(3),
ICP_ACCEL_GEN6_MASK_DCPRZ_SLICE = BIT(4),
+ ICP_ACCEL_GEN6_MASK_EIA3_SLICE = BIT(5),
ICP_ACCEL_GEN6_MASK_WCP_WAT_SLICE = BIT(6),
+ ICP_ACCEL_GEN6_MASK_ZUC_256_SLICE = BIT(7),
+ ICP_ACCEL_GEN6_MASK_5G_SLICE = BIT(8),
};
+/* Return true if the device is a wireless crypto (WCY) SKU */
+static inline bool adf_6xxx_is_wcy(struct adf_hw_device_data *hw_data)
+{
+ return !(hw_data->fuses[ADF_FUSECTL1] & ICP_ACCEL_GEN6_MASK_WCP_WAT_SLICE);
+}
+
void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_6xxx(struct adf_hw_device_data *hw_data);
diff --git a/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c
index c1dc9c56fdf5..c52462a48c34 100644
--- a/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_6xxx/adf_drv.c
@@ -16,6 +16,7 @@
#include "adf_gen6_shared.h"
#include "adf_6xxx_hw_data.h"
+#include "adf_heartbeat.h"
static int bar_map[] = {
0, /* SRAM */
@@ -53,6 +54,35 @@ static void adf_devmgr_remove(void *accel_dev)
adf_devmgr_rm_dev(accel_dev, NULL);
}
+static int adf_gen6_cfg_dev_init(struct adf_accel_dev *accel_dev)
+{
+ const char *config;
+ int ret;
+
+ /*
+ * Wireless SKU - symmetric crypto service only
+ * Non-wireless SKU - crypto service for even devices and compression for odd devices
+ */
+ if (adf_6xxx_is_wcy(GET_HW_DATA(accel_dev)))
+ config = ADF_CFG_SYM;
+ else
+ config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY;
+
+ ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
+ if (ret)
+ return ret;
+
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED, config,
+ ADF_STR);
+ if (ret)
+ return ret;
+
+ adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_MIN_MS);
+
+ return 0;
+}
+
static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct adf_accel_pci *accel_pci_dev;
@@ -91,9 +121,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_read_config_dword(pdev, ADF_GEN6_FUSECTL0_OFFSET, &hw_data->fuses[ADF_FUSECTL0]);
pci_read_config_dword(pdev, ADF_GEN6_FUSECTL1_OFFSET, &hw_data->fuses[ADF_FUSECTL1]);
- if (!(hw_data->fuses[ADF_FUSECTL1] & ICP_ACCEL_GEN6_MASK_WCP_WAT_SLICE))
- return dev_err_probe(dev, -EFAULT, "Wireless mode is not supported.\n");
-
/* Enable PCI device */
ret = pcim_enable_device(pdev);
if (ret)
@@ -182,8 +209,10 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
ret = adf_dev_up(accel_dev, true);
- if (ret)
+ if (ret) {
+ adf_dev_down(accel_dev);
return ret;
+ }
ret = devm_add_action_or_reset(dev, adf_device_down, accel_dev);
if (ret)
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index 89845754841b..9478111c8437 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -4,6 +4,7 @@ ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"CRYPTO_QAT"'
intel_qat-y := adf_accel_engine.o \
adf_admin.o \
adf_aer.o \
+ adf_anti_rb.o \
adf_bank_state.o \
adf_cfg.o \
adf_cfg_services.o \
@@ -29,6 +30,7 @@ intel_qat-y := adf_accel_engine.o \
adf_rl_admin.o \
adf_rl.o \
adf_sysfs.o \
+ adf_sysfs_anti_rb.o \
adf_sysfs_ras_counters.o \
adf_sysfs_rl.o \
adf_timer.o \
@@ -39,6 +41,7 @@ intel_qat-y := adf_accel_engine.o \
qat_bl.o \
qat_comp_algs.o \
qat_compression.o \
+ qat_comp_zstd_utils.o \
qat_crypto.o \
qat_hal.o \
qat_mig_dev.o \
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index 9fe3239f0114..03a4e9690208 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/qat/qat_mig_dev.h>
#include <linux/wordpart.h>
+#include "adf_anti_rb.h"
#include "adf_cfg_common.h"
#include "adf_dc.h"
#include "adf_rl.h"
@@ -58,6 +59,11 @@ enum adf_accel_capabilities {
ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128
};
+enum adf_accel_capabilities_ext {
+ ADF_ACCEL_CAPABILITIES_EXT_ZSTD_LZ4S = BIT(0),
+ ADF_ACCEL_CAPABILITIES_EXT_ZSTD = BIT(1),
+};
+
enum adf_fuses {
ADF_FUSECTL0,
ADF_FUSECTL1,
@@ -328,12 +334,14 @@ struct adf_hw_device_data {
struct adf_dev_err_mask dev_err_mask;
struct adf_rl_hw_data rl_data;
struct adf_tl_hw_data tl_data;
+ struct adf_anti_rb_hw_data anti_rb_data;
struct qat_migdev_ops vfmig_ops;
const char *fw_name;
const char *fw_mmp_name;
u32 fuses[ADF_MAX_FUSES];
u32 straps;
u32 accel_capabilities_mask;
+ u32 accel_capabilities_ext_mask;
u32 extended_dc_capabilities;
u16 fw_capabilities;
u32 clock_frequency;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
index f9f1018a2823..09d4f547e082 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2020 Intel Corporation */
+#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/pci.h>
#include "adf_cfg.h"
@@ -162,8 +163,14 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev)
static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae)
{
struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+ unsigned long reset_delay;
qat_hal_reset(loader_data->fw_loader);
+
+ reset_delay = loader_data->fw_loader->chip_info->reset_delay_us;
+ if (reset_delay)
+ fsleep(reset_delay);
+
if (qat_hal_clr_reset(loader_data->fw_loader))
return -EFAULT;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c
index 573388c37100..841aa802c79e 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c
@@ -6,8 +6,10 @@
#include <linux/iopoll.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
+#include <linux/delay.h>
#include "adf_accel_devices.h"
#include "adf_admin.h"
+#include "adf_anti_rb.h"
#include "adf_common_drv.h"
#include "adf_cfg.h"
#include "adf_heartbeat.h"
@@ -19,6 +21,7 @@
#define ADF_ADMIN_POLL_DELAY_US 20
#define ADF_ADMIN_POLL_TIMEOUT_US (5 * USEC_PER_SEC)
#define ADF_ONE_AE 1
+#define ADF_ADMIN_RETRY_MAX 60
static const u8 const_tab[1024] __aligned(1024) = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -536,6 +539,73 @@ int adf_send_admin_tl_stop(struct adf_accel_dev *accel_dev)
return adf_send_admin(accel_dev, &req, &resp, ae_mask);
}
+static int adf_send_admin_retry(struct adf_accel_dev *accel_dev, u8 cmd_id,
+ struct icp_qat_fw_init_admin_resp *resp,
+ unsigned int sleep_ms)
+{
+ u32 admin_ae_mask = GET_HW_DATA(accel_dev)->admin_ae_mask;
+ struct icp_qat_fw_init_admin_req req = { };
+ unsigned int retries = ADF_ADMIN_RETRY_MAX;
+ int ret;
+
+ req.cmd_id = cmd_id;
+
+ do {
+ ret = adf_send_admin(accel_dev, &req, resp, admin_ae_mask);
+ if (!ret)
+ return 0;
+
+ if (resp->status != ICP_QAT_FW_INIT_RESP_STATUS_RETRY)
+ return ret;
+
+ msleep(sleep_ms);
+ } while (--retries);
+
+ return -ETIMEDOUT;
+}
+
+static int adf_send_admin_svn(struct adf_accel_dev *accel_dev, u8 cmd_id,
+ struct icp_qat_fw_init_admin_resp *resp)
+{
+ return adf_send_admin_retry(accel_dev, cmd_id, resp, ADF_SVN_RETRY_MS);
+}
+
+int adf_send_admin_arb_query(struct adf_accel_dev *accel_dev, int cmd, u8 *svn)
+{
+ struct icp_qat_fw_init_admin_resp resp = { };
+ int ret;
+
+ ret = adf_send_admin_svn(accel_dev, ICP_QAT_FW_SVN_READ, &resp);
+ if (ret)
+ return ret;
+
+ switch (cmd) {
+ case ARB_ENFORCED_MIN_SVN:
+ *svn = resp.enforced_min_svn;
+ break;
+ case ARB_PERMANENT_MIN_SVN:
+ *svn = resp.permanent_min_svn;
+ break;
+ case ARB_ACTIVE_SVN:
+ *svn = resp.active_svn;
+ break;
+ default:
+ *svn = 0;
+ dev_err(&GET_DEV(accel_dev),
+ "Unknown secure version number request\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int adf_send_admin_arb_commit(struct adf_accel_dev *accel_dev)
+{
+ struct icp_qat_fw_init_admin_resp resp = { };
+
+ return adf_send_admin_svn(accel_dev, ICP_QAT_FW_SVN_COMMIT, &resp);
+}
+
int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
{
struct adf_admin_comms *admin;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.h b/drivers/crypto/intel/qat/qat_common/adf_admin.h
index 647c8e196752..9704219f2eb7 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_admin.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_admin.h
@@ -27,5 +27,7 @@ int adf_send_admin_tl_start(struct adf_accel_dev *accel_dev,
dma_addr_t tl_dma_addr, size_t layout_sz, u8 *rp_indexes,
struct icp_qat_fw_init_admin_slice_cnt *slice_count);
int adf_send_admin_tl_stop(struct adf_accel_dev *accel_dev);
+int adf_send_admin_arb_query(struct adf_accel_dev *accel_dev, int cmd, u8 *svn);
+int adf_send_admin_arb_commit(struct adf_accel_dev *accel_dev);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_anti_rb.c b/drivers/crypto/intel/qat/qat_common/adf_anti_rb.c
new file mode 100644
index 000000000000..2c19a82d89ad
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_anti_rb.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2026 Intel Corporation */
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kstrtox.h>
+
+#include "adf_accel_devices.h"
+#include "adf_admin.h"
+#include "adf_anti_rb.h"
+#include "adf_common_drv.h"
+#include "icp_qat_fw_init_admin.h"
+
+#define ADF_SVN_RETRY_MAX 60
+
+int adf_anti_rb_commit(struct adf_accel_dev *accel_dev)
+{
+ return adf_send_admin_arb_commit(accel_dev);
+}
+
+int adf_anti_rb_query(struct adf_accel_dev *accel_dev, enum anti_rb cmd, u8 *svn)
+{
+ return adf_send_admin_arb_query(accel_dev, cmd, svn);
+}
+
+int adf_anti_rb_check(struct pci_dev *pdev)
+{
+ struct adf_anti_rb_hw_data *anti_rb;
+ u32 svncheck_sts, cfc_svncheck_sts;
+ struct adf_accel_dev *accel_dev;
+ void __iomem *pmisc_addr;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+ if (!accel_dev)
+ return -EINVAL;
+
+ anti_rb = GET_ANTI_RB_DATA(accel_dev);
+ if (!anti_rb->anti_rb_enabled || !anti_rb->anti_rb_enabled(accel_dev))
+ return 0;
+
+ pmisc_addr = adf_get_pmisc_base(accel_dev);
+
+ cfc_svncheck_sts = ADF_CSR_RD(pmisc_addr, anti_rb->svncheck_offset);
+
+ svncheck_sts = FIELD_GET(ADF_SVN_STS_MASK, cfc_svncheck_sts);
+ switch (svncheck_sts) {
+ case ADF_SVN_NO_STS:
+ return 0;
+ case ADF_SVN_PASS_STS:
+ anti_rb->svncheck_retry = 0;
+ return 0;
+ case ADF_SVN_FAIL_STS:
+ dev_err(&GET_DEV(accel_dev), "Security Version Number failure\n");
+ return -EIO;
+ case ADF_SVN_RETRY_STS:
+ if (anti_rb->svncheck_retry++ >= ADF_SVN_RETRY_MAX) {
+ anti_rb->svncheck_retry = 0;
+ return -ETIMEDOUT;
+ }
+ msleep(ADF_SVN_RETRY_MS);
+ return -EAGAIN;
+ default:
+ dev_err(&GET_DEV(accel_dev), "Invalid SVN check status\n");
+ return -EINVAL;
+ }
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_anti_rb.h b/drivers/crypto/intel/qat/qat_common/adf_anti_rb.h
new file mode 100644
index 000000000000..531af41a3db8
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_anti_rb.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2026 Intel Corporation */
+#ifndef ADF_ANTI_RB_H_
+#define ADF_ANTI_RB_H_
+
+#include <linux/types.h>
+
+#define GET_ANTI_RB_DATA(accel_dev) (&(accel_dev)->hw_device->anti_rb_data)
+
+#define ADF_SVN_NO_STS 0x00
+#define ADF_SVN_PASS_STS 0x01
+#define ADF_SVN_RETRY_STS 0x02
+#define ADF_SVN_FAIL_STS 0x03
+#define ADF_SVN_RETRY_MS 250
+#define ADF_SVN_STS_MASK GENMASK(7, 0)
+
+enum anti_rb {
+ ARB_ENFORCED_MIN_SVN,
+ ARB_PERMANENT_MIN_SVN,
+ ARB_ACTIVE_SVN,
+};
+
+struct adf_accel_dev;
+struct pci_dev;
+
+struct adf_anti_rb_hw_data {
+ bool (*anti_rb_enabled)(struct adf_accel_dev *accel_dev);
+ u32 svncheck_offset;
+ u32 svncheck_retry;
+ bool sysfs_added;
+};
+
+int adf_anti_rb_commit(struct adf_accel_dev *accel_dev);
+int adf_anti_rb_query(struct adf_accel_dev *accel_dev, enum anti_rb cmd, u8 *svn);
+int adf_anti_rb_check(struct pci_dev *pdev);
+
+#endif /* ADF_ANTI_RB_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
index 6cf3a95489e8..7b8b295ac459 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
@@ -111,12 +111,12 @@ void qat_algs_unregister(void);
int qat_asym_algs_register(void);
void qat_asym_algs_unregister(void);
-struct qat_compression_instance *qat_compression_get_instance_node(int node);
+struct qat_compression_instance *qat_compression_get_instance_node(int node, int alg);
void qat_compression_put_instance(struct qat_compression_instance *inst);
int qat_compression_register(void);
int qat_compression_unregister(void);
-int qat_comp_algs_register(void);
-void qat_comp_algs_unregister(void);
+int qat_comp_algs_register(u32 caps);
+void qat_comp_algs_unregister(u32 caps);
void qat_comp_alg_callback(void *resp);
int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_config.h b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h
index 78957fa900b7..d5c578e3fd8d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_fw_config.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h
@@ -9,6 +9,7 @@ enum adf_fw_objs {
ADF_FW_DC_OBJ,
ADF_FW_ADMIN_OBJ,
ADF_FW_CY_OBJ,
+ ADF_FW_WCY_OBJ,
};
struct adf_fw_config {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
index 349fdb323763..f4a58f04071a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
@@ -504,14 +504,20 @@ static int adf_gen4_build_comp_block(void *ctx, enum adf_dc_algo algo)
switch (algo) {
case QAT_DEFLATE:
header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
+ hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77;
+ hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED;
+ hw_comp_lower_csr.skip_ctrl = ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL;
+ break;
+ case QAT_LZ4S:
+ header->service_cmd_id = ICP_QAT_FW_COMP_20_CMD_LZ4S_COMPRESS;
+ hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_LZ4S;
+ hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_DISABLED;
+ hw_comp_lower_csr.abd = ICP_QAT_HW_COMP_20_ABD_ABD_DISABLED;
break;
default:
return -EINVAL;
}
- hw_comp_lower_csr.skip_ctrl = ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL;
- hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77;
- hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED;
hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1;
hw_comp_lower_csr.hash_update = ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW;
hw_comp_lower_csr.edmm = ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED;
@@ -538,12 +544,16 @@ static int adf_gen4_build_decomp_block(void *ctx, enum adf_dc_algo algo)
switch (algo) {
case QAT_DEFLATE:
header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
+ hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE;
+ break;
+ case QAT_LZ4S:
+ header->service_cmd_id = ICP_QAT_FW_COMP_20_CMD_LZ4S_DECOMPRESS;
+ hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_LZ4S;
break;
default:
return -EINVAL;
}
- hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE;
lower_val = ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(hw_decomp_lower_csr);
cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c
index c9b151006dca..ffe4525a1e69 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.c
@@ -31,12 +31,6 @@ void adf_gen6_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
}
EXPORT_SYMBOL_GPL(adf_gen6_init_hw_csr_ops);
-int adf_gen6_cfg_dev_init(struct adf_accel_dev *accel_dev)
-{
- return adf_gen4_cfg_dev_init(accel_dev);
-}
-EXPORT_SYMBOL_GPL(adf_gen6_cfg_dev_init);
-
int adf_gen6_comp_dev_config(struct adf_accel_dev *accel_dev)
{
return adf_comp_dev_config(accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h
index fc6fad029a70..072115a531e4 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_shared.h
@@ -10,7 +10,6 @@ struct adf_pfvf_ops;
void adf_gen6_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
void adf_gen6_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
-int adf_gen6_cfg_dev_init(struct adf_accel_dev *accel_dev);
int adf_gen6_comp_dev_config(struct adf_accel_dev *accel_dev);
int adf_gen6_no_dev_config(struct adf_accel_dev *accel_dev);
void adf_gen6_init_vf_mig_ops(struct qat_migdev_ops *vfmig_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c
index 46491048e0bb..f8088388cf12 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_init.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_init.c
@@ -10,6 +10,7 @@
#include "adf_dbgfs.h"
#include "adf_heartbeat.h"
#include "adf_rl.h"
+#include "adf_sysfs_anti_rb.h"
#include "adf_sysfs_ras_counters.h"
#include "adf_telemetry.h"
@@ -179,6 +180,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct service_hndl *service;
+ u32 caps;
int ret;
set_bit(ADF_STATUS_STARTING, &accel_dev->status);
@@ -252,7 +254,8 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
}
set_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status);
- if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register()) {
+ caps = hw_data->accel_capabilities_ext_mask;
+ if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register(caps)) {
dev_err(&GET_DEV(accel_dev),
"Failed to register compression algs\n");
set_bit(ADF_STATUS_STARTING, &accel_dev->status);
@@ -263,6 +266,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
adf_dbgfs_add(accel_dev);
adf_sysfs_start_ras(accel_dev);
+ adf_sysfs_start_arb(accel_dev);
return 0;
}
@@ -292,6 +296,7 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev)
adf_rl_stop(accel_dev);
adf_dbgfs_rm(accel_dev);
adf_sysfs_stop_ras(accel_dev);
+ adf_sysfs_stop_arb(accel_dev);
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
@@ -305,7 +310,7 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev)
if (!list_empty(&accel_dev->compression_list) &&
test_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status))
- qat_comp_algs_unregister();
+ qat_comp_algs_unregister(hw_data->accel_capabilities_ext_mask);
clear_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status);
list_for_each_entry(service, &service_table, list) {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_anti_rb.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_anti_rb.c
new file mode 100644
index 000000000000..789341ad1bdc
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_anti_rb.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2026 Intel Corporation */
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#include "adf_anti_rb.h"
+#include "adf_common_drv.h"
+#include "adf_sysfs_anti_rb.h"
+
+static ssize_t enforced_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct adf_accel_dev *accel_dev;
+ int err;
+ u8 svn;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ err = adf_anti_rb_query(accel_dev, ARB_ENFORCED_MIN_SVN, &svn);
+ if (err)
+ return err;
+
+ return sysfs_emit(buf, "%u\n", svn);
+}
+static DEVICE_ATTR_RO(enforced_min);
+
+static ssize_t active_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct adf_accel_dev *accel_dev;
+ int err;
+ u8 svn;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ err = adf_anti_rb_query(accel_dev, ARB_ACTIVE_SVN, &svn);
+ if (err)
+ return err;
+
+ return sysfs_emit(buf, "%u\n", svn);
+}
+static DEVICE_ATTR_RO(active);
+
+static ssize_t permanent_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct adf_accel_dev *accel_dev;
+ int err;
+ u8 svn;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ err = adf_anti_rb_query(accel_dev, ARB_PERMANENT_MIN_SVN, &svn);
+ if (err)
+ return err;
+
+ return sysfs_emit(buf, "%u\n", svn);
+}
+static DEVICE_ATTR_RO(permanent_min);
+
+static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct adf_accel_dev *accel_dev;
+ bool val;
+ int err;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ err = kstrtobool(buf, &val);
+ if (err)
+ return err;
+
+ if (!val)
+ return -EINVAL;
+
+ err = adf_anti_rb_commit(accel_dev);
+ if (err)
+ return err;
+
+ return count;
+}
+static DEVICE_ATTR_WO(commit);
+
+static struct attribute *qat_svn_attrs[] = {
+ &dev_attr_commit.attr,
+ &dev_attr_active.attr,
+ &dev_attr_enforced_min.attr,
+ &dev_attr_permanent_min.attr,
+ NULL
+};
+
+static const struct attribute_group qat_svn_group = {
+ .attrs = qat_svn_attrs,
+ .name = "qat_svn",
+};
+
+void adf_sysfs_start_arb(struct adf_accel_dev *accel_dev)
+{
+ struct adf_anti_rb_hw_data *anti_rb = GET_ANTI_RB_DATA(accel_dev);
+
+ if (!anti_rb->anti_rb_enabled || !anti_rb->anti_rb_enabled(accel_dev))
+ return;
+
+ if (device_add_group(&GET_DEV(accel_dev), &qat_svn_group)) {
+ dev_warn(&GET_DEV(accel_dev),
+ "Failed to create qat_svn attribute group\n");
+ return;
+ }
+
+ anti_rb->sysfs_added = true;
+}
+
+void adf_sysfs_stop_arb(struct adf_accel_dev *accel_dev)
+{
+ struct adf_anti_rb_hw_data *anti_rb = GET_ANTI_RB_DATA(accel_dev);
+
+ if (!anti_rb->sysfs_added)
+ return;
+
+ device_remove_group(&GET_DEV(accel_dev), &qat_svn_group);
+
+ anti_rb->sysfs_added = false;
+ anti_rb->svncheck_retry = 0;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_anti_rb.h b/drivers/crypto/intel/qat/qat_common/adf_sysfs_anti_rb.h
new file mode 100644
index 000000000000..f0c2b6e464f7
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_anti_rb.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2026 Intel Corporation */
+#ifndef ADF_SYSFS_ANTI_RB_H_
+#define ADF_SYSFS_ANTI_RB_H_
+
+struct adf_accel_dev;
+
+void adf_sysfs_start_arb(struct adf_accel_dev *accel_dev);
+void adf_sysfs_stop_arb(struct adf_accel_dev *accel_dev);
+
+#endif /* ADF_SYSFS_ANTI_RB_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c
index e97c67c87b3c..ef1420199210 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c
@@ -13,14 +13,14 @@ static ssize_t errors_correctable_show(struct device *dev,
char *buf)
{
struct adf_accel_dev *accel_dev;
- unsigned long counter;
+ int counter;
accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
if (!accel_dev)
return -EINVAL;
counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_CORR);
- return scnprintf(buf, PAGE_SIZE, "%ld\n", counter);
+ return sysfs_emit(buf, "%d\n", counter);
}
static ssize_t errors_nonfatal_show(struct device *dev,
@@ -28,14 +28,14 @@ static ssize_t errors_nonfatal_show(struct device *dev,
char *buf)
{
struct adf_accel_dev *accel_dev;
- unsigned long counter;
+ int counter;
accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
if (!accel_dev)
return -EINVAL;
counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_UNCORR);
- return scnprintf(buf, PAGE_SIZE, "%ld\n", counter);
+ return sysfs_emit(buf, "%d\n", counter);
}
static ssize_t errors_fatal_show(struct device *dev,
@@ -43,14 +43,14 @@ static ssize_t errors_fatal_show(struct device *dev,
char *buf)
{
struct adf_accel_dev *accel_dev;
- unsigned long counter;
+ int counter;
accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
if (!accel_dev)
return -EINVAL;
counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_FATAL);
- return scnprintf(buf, PAGE_SIZE, "%ld\n", counter);
+ return sysfs_emit(buf, "%d\n", counter);
}
static ssize_t reset_error_counters_store(struct device *dev,
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c
index f31556beed8b..89bfd8761d75 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c
@@ -321,7 +321,7 @@ static ssize_t cap_rem_show(struct device *dev, struct device_attribute *attr,
{
struct adf_rl_interface_data *data;
struct adf_accel_dev *accel_dev;
- int ret, rem_cap;
+ int rem_cap;
accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
if (!accel_dev)
@@ -336,23 +336,19 @@ static ssize_t cap_rem_show(struct device *dev, struct device_attribute *attr,
if (rem_cap < 0)
return rem_cap;
- ret = sysfs_emit(buf, "%u\n", rem_cap);
-
- return ret;
+ return sysfs_emit(buf, "%u\n", rem_cap);
}
static ssize_t cap_rem_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- unsigned int val;
int ret;
ret = sysfs_match_string(rl_services, buf);
if (ret < 0)
return ret;
- val = ret;
- ret = set_param_u(dev, CAP_REM_SRV, val);
+ ret = set_param_u(dev, CAP_REM_SRV, ret);
if (ret)
return ret;
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw.h
index c141160421e1..2fea30a78340 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw.h
@@ -151,6 +151,13 @@ struct icp_qat_fw_comn_resp {
ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \
ICP_QAT_FW_COMN_CNV_FLAG_MASK)
+#define ICP_QAT_FW_COMN_ST_BLK_FLAG_BITPOS 4
+#define ICP_QAT_FW_COMN_ST_BLK_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_HDR_ST_BLK_FLAG_GET(hdr_flags) \
+ QAT_FIELD_GET(hdr_flags, \
+ ICP_QAT_FW_COMN_ST_BLK_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_ST_BLK_FLAG_MASK)
+
#define ICP_QAT_FW_COMN_HDR_CNV_FLAG_SET(hdr_t, val) \
QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
index 81969c515a17..2526053ee630 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h
@@ -8,6 +8,8 @@ enum icp_qat_fw_comp_cmd_id {
ICP_QAT_FW_COMP_CMD_STATIC = 0,
ICP_QAT_FW_COMP_CMD_DYNAMIC = 1,
ICP_QAT_FW_COMP_CMD_DECOMPRESS = 2,
+ ICP_QAT_FW_COMP_CMD_ZSTD_COMPRESS = 10,
+ ICP_QAT_FW_COMP_CMD_ZSTD_DECOMPRESS = 11,
ICP_QAT_FW_COMP_CMD_DELIMITER
};
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
index 63cf18e2a4e5..6b0f0d100cb9 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
@@ -31,11 +31,15 @@ enum icp_qat_fw_init_admin_cmd_id {
ICP_QAT_FW_RL_REMOVE = 136,
ICP_QAT_FW_TL_START = 137,
ICP_QAT_FW_TL_STOP = 138,
+ ICP_QAT_FW_SVN_READ = 146,
+ ICP_QAT_FW_SVN_COMMIT = 147,
};
enum icp_qat_fw_init_admin_resp_status {
ICP_QAT_FW_INIT_RESP_STATUS_SUCCESS = 0,
- ICP_QAT_FW_INIT_RESP_STATUS_FAIL
+ ICP_QAT_FW_INIT_RESP_STATUS_FAIL = 1,
+ ICP_QAT_FW_INIT_RESP_STATUS_RETRY = 2,
+ ICP_QAT_FW_INIT_RESP_STATUS_UNSUPPORTED = 4,
};
struct icp_qat_fw_init_admin_tl_rp_indexes {
@@ -159,6 +163,15 @@ struct icp_qat_fw_init_admin_resp {
};
struct icp_qat_fw_init_admin_slice_cnt slices;
__u16 fw_capabilities;
+ struct {
+ __u8 enforced_min_svn;
+ __u8 permanent_min_svn;
+ __u8 active_svn;
+ __u8 resrvd9;
+ __u16 svn_status;
+ __u16 resrvd10;
+ __u64 resrvd11;
+ };
};
} __packed;
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
index 6887930c7995..e74cafa95f1c 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
@@ -27,6 +27,7 @@ struct icp_qat_fw_loader_chip_info {
int mmp_sram_size;
bool nn;
bool lm2lm3;
+ u16 reset_delay_us;
u32 lm_size;
u32 icp_rst_csr;
u32 icp_rst_mask;
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
index b8f1c4ffb8b5..16ef6d98fa42 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
@@ -94,7 +94,8 @@ enum icp_qat_capabilities_mask {
ICP_ACCEL_CAPABILITIES_AUTHENTICATION = BIT(3),
ICP_ACCEL_CAPABILITIES_RESERVED_1 = BIT(4),
ICP_ACCEL_CAPABILITIES_COMPRESSION = BIT(5),
- /* Bits 6-7 are currently reserved */
+ /* Bit 6 is currently reserved */
+ ICP_ACCEL_CAPABILITIES_5G = BIT(7),
ICP_ACCEL_CAPABILITIES_ZUC = BIT(8),
ICP_ACCEL_CAPABILITIES_SHA3 = BIT(9),
/* Bits 10-11 are currently reserved */
@@ -335,7 +336,8 @@ enum icp_qat_hw_compression_delayed_match {
enum icp_qat_hw_compression_algo {
ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE = 0,
ICP_QAT_HW_COMPRESSION_ALGO_LZS = 1,
- ICP_QAT_HW_COMPRESSION_ALGO_DELIMITER = 2
+ ICP_QAT_HW_COMPRESSION_ALGO_ZSTD = 2,
+ ICP_QAT_HW_COMPRESSION_ALGO_DELIMITER
};
enum icp_qat_hw_compression_depth {
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h
index 7ea8962272f2..d28732225c9e 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw_20_comp.h
@@ -3,6 +3,8 @@
#ifndef _ICP_QAT_HW_20_COMP_H_
#define _ICP_QAT_HW_20_COMP_H_
+#include <linux/swab.h>
+
#include "icp_qat_hw_20_comp_defs.h"
#include "icp_qat_fw.h"
@@ -54,7 +56,7 @@ ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(struct icp_qat_hw_comp_20_config_csr_lower
QAT_FIELD_SET(val32, csr.abd, ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_ABD_MASK);
- return __builtin_bswap32(val32);
+ return swab32(val32);
}
struct icp_qat_hw_comp_20_config_csr_upper {
@@ -106,7 +108,7 @@ ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(struct icp_qat_hw_comp_20_config_csr_upper
ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_BITPOS,
ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_MASK);
- return __builtin_bswap32(val32);
+ return swab32(val32);
}
struct icp_qat_hw_decomp_20_config_csr_lower {
@@ -138,7 +140,7 @@ ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(struct icp_qat_hw_decomp_20_config_csr_l
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_BITPOS,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_LZ4_BLOCK_CHECKSUM_PRESENT_MASK);
- return __builtin_bswap32(val32);
+ return swab32(val32);
}
struct icp_qat_hw_decomp_20_config_csr_upper {
@@ -158,7 +160,7 @@ ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_UPPER(struct icp_qat_hw_decomp_20_config_csr_u
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_BITPOS,
ICP_QAT_HW_DECOMP_20_CONFIG_CSR_MINI_CAM_CONTROL_MASK);
- return __builtin_bswap32(val32);
+ return swab32(val32);
}
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
index 8b123472b71c..e0d003b50358 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
@@ -6,6 +6,7 @@
#include <crypto/scatterwalk.h>
#include <linux/dma-mapping.h>
#include <linux/workqueue.h>
+#include <linux/zstd.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_dc.h"
@@ -13,9 +14,104 @@
#include "qat_comp_req.h"
#include "qat_compression.h"
#include "qat_algs_send.h"
+#include "qat_comp_zstd_utils.h"
+
+#define QAT_ZSTD_SCRATCH_SIZE 524288
+#define QAT_ZSTD_MAX_BLOCK_SIZE 65535
+#define QAT_ZSTD_MAX_CONTENT_SIZE 4096
+#define QAT_LZ4S_MIN_INPUT_SIZE 8192
+#define QAT_LZ4S_MAX_OUTPUT_SIZE QAT_ZSTD_SCRATCH_SIZE
+#define QAT_MAX_SEQUENCES (128 * 1024)
static DEFINE_MUTEX(algs_lock);
-static unsigned int active_devs;
+static unsigned int active_devs_deflate;
+static unsigned int active_devs_lz4s;
+static unsigned int active_devs_zstd;
+
+struct qat_zstd_scratch {
+ size_t cctx_buffer_size;
+ void *lz4s;
+ void *literals;
+ void *out_seqs;
+ void *workspace;
+ ZSTD_CCtx *ctx;
+};
+
+static void *qat_zstd_alloc_scratch(void)
+{
+ struct qat_zstd_scratch *scratch;
+ ZSTD_parameters params;
+ size_t cctx_size;
+ ZSTD_CCtx *ctx;
+ size_t zret;
+ int ret;
+
+ ret = -ENOMEM;
+ scratch = kzalloc_obj(*scratch);
+ if (!scratch)
+ return ERR_PTR(ret);
+
+ scratch->lz4s = kvmalloc(QAT_ZSTD_SCRATCH_SIZE, GFP_KERNEL);
+ if (!scratch->lz4s)
+ goto error;
+
+ scratch->literals = kvmalloc(QAT_ZSTD_SCRATCH_SIZE, GFP_KERNEL);
+ if (!scratch->literals)
+ goto error;
+
+ scratch->out_seqs = kvcalloc(QAT_MAX_SEQUENCES, sizeof(ZSTD_Sequence),
+ GFP_KERNEL);
+ if (!scratch->out_seqs)
+ goto error;
+
+ params = zstd_get_params(zstd_max_clevel(), QAT_ZSTD_SCRATCH_SIZE);
+ cctx_size = zstd_cctx_workspace_bound(&params.cParams);
+
+ scratch->workspace = kvmalloc(cctx_size, GFP_KERNEL | __GFP_ZERO);
+ if (!scratch->workspace)
+ goto error;
+
+ ret = -EINVAL;
+ ctx = zstd_init_cctx(scratch->workspace, cctx_size);
+ if (!ctx)
+ goto error;
+
+ scratch->ctx = ctx;
+ scratch->cctx_buffer_size = cctx_size;
+
+ zret = zstd_cctx_set_param(ctx, ZSTD_c_blockDelimiters, ZSTD_sf_explicitBlockDelimiters);
+ if (zstd_is_error(zret))
+ goto error;
+
+ return scratch;
+
+error:
+ kvfree(scratch->lz4s);
+ kvfree(scratch->literals);
+ kvfree(scratch->out_seqs);
+ kvfree(scratch->workspace);
+ kfree(scratch);
+ return ERR_PTR(ret);
+}
+
+static void qat_zstd_free_scratch(void *ctx)
+{
+ struct qat_zstd_scratch *scratch = ctx;
+
+ if (!scratch)
+ return;
+
+ kvfree(scratch->lz4s);
+ kvfree(scratch->literals);
+ kvfree(scratch->out_seqs);
+ kvfree(scratch->workspace);
+ kfree(scratch);
+}
+
+static struct crypto_acomp_streams qat_zstd_streams = {
+ .alloc_ctx = qat_zstd_alloc_scratch,
+ .free_ctx = qat_zstd_free_scratch,
+};
enum direction {
DECOMPRESSION = 0,
@@ -24,10 +120,18 @@ enum direction {
struct qat_compression_req;
+struct qat_callback_params {
+ unsigned int produced;
+ unsigned int dlen;
+ bool plain;
+};
+
struct qat_compression_ctx {
u8 comp_ctx[QAT_COMP_CTX_SIZE];
struct qat_compression_instance *inst;
- int (*qat_comp_callback)(struct qat_compression_req *qat_req, void *resp);
+ int (*qat_comp_callback)(struct qat_compression_req *qat_req, void *resp,
+ struct qat_callback_params *params);
+ struct crypto_acomp *ftfm;
};
struct qat_compression_req {
@@ -62,6 +166,7 @@ static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq);
struct qat_compression_instance *inst = ctx->inst;
+ struct qat_callback_params params = { };
int consumed, produced;
s8 cmp_err, xlt_err;
int res = -EBADMSG;
@@ -76,6 +181,10 @@ static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
consumed = qat_comp_get_consumed_ctr(resp);
produced = qat_comp_get_produced_ctr(resp);
+ /* Cache parameters for algorithm specific callback */
+ params.produced = produced;
+ params.dlen = areq->dlen;
+
dev_dbg(&GET_DEV(accel_dev),
"[%s][%s][%s] slen = %8d dlen = %8d consumed = %8d produced = %8d cmp_err = %3d xlt_err = %3d",
crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)),
@@ -83,16 +192,20 @@ static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
status ? "ERR" : "OK ",
areq->slen, areq->dlen, consumed, produced, cmp_err, xlt_err);
- areq->dlen = 0;
+ if (unlikely(status != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) {
+ if (cmp_err == ERR_CODE_OVERFLOW_ERROR || xlt_err == ERR_CODE_OVERFLOW_ERROR)
+ res = -E2BIG;
- if (unlikely(status != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
+ areq->dlen = 0;
goto end;
+ }
if (qat_req->dir == COMPRESSION) {
cnv = qat_comp_get_cmp_cnv_flag(resp);
if (unlikely(!cnv)) {
dev_err(&GET_DEV(accel_dev),
"Verified compression not supported\n");
+ areq->dlen = 0;
goto end;
}
@@ -102,38 +215,41 @@ static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
dev_dbg(&GET_DEV(accel_dev),
"Actual buffer overflow: produced=%d, dlen=%d\n",
produced, qat_req->actual_dlen);
+
+ res = -E2BIG;
+ areq->dlen = 0;
goto end;
}
+
+ params.plain = !!qat_comp_get_cmp_uncomp_flag(resp);
}
res = 0;
areq->dlen = produced;
if (ctx->qat_comp_callback)
- res = ctx->qat_comp_callback(qat_req, resp);
+ res = ctx->qat_comp_callback(qat_req, resp, &params);
end:
qat_bl_free_bufl(accel_dev, &qat_req->buf);
acomp_request_complete(areq, res);
+ qat_alg_send_backlog(qat_req->alg_req.backlog);
}
void qat_comp_alg_callback(void *resp)
{
struct qat_compression_req *qat_req =
(void *)(__force long)qat_comp_get_opaque(resp);
- struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
qat_comp_generic_callback(qat_req, resp);
-
- qat_alg_send_backlog(backlog);
}
-static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm)
+static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm, int alg)
{
+ struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm);
struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
- struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
struct qat_compression_instance *inst;
- int node;
+ int node, ret;
if (tfm->node == NUMA_NO_NODE)
node = numa_node_id();
@@ -141,18 +257,28 @@ static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm)
node = tfm->node;
memset(ctx, 0, sizeof(*ctx));
- inst = qat_compression_get_instance_node(node);
+ inst = qat_compression_get_instance_node(node, alg);
if (!inst)
return -EINVAL;
ctx->inst = inst;
- return qat_comp_build_ctx(inst->accel_dev, ctx->comp_ctx, QAT_DEFLATE);
+ ret = qat_comp_build_ctx(inst->accel_dev, ctx->comp_ctx, alg);
+ if (ret) {
+ qat_compression_put_instance(inst);
+ memset(ctx, 0, sizeof(*ctx));
+ }
+
+ return ret;
+}
+
+static int qat_comp_alg_deflate_init_tfm(struct crypto_acomp *acomp_tfm)
+{
+ return qat_comp_alg_init_tfm(acomp_tfm, QAT_DEFLATE);
}
static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm)
{
- struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
- struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm);
qat_compression_put_instance(ctx->inst);
memset(ctx, 0, sizeof(*ctx));
@@ -164,8 +290,7 @@ static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum directi
{
struct qat_compression_req *qat_req = acomp_request_ctx(areq);
struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(areq);
- struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
- struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm);
struct qat_compression_instance *inst = ctx->inst;
gfp_t f = qat_algs_alloc_flags(&areq->base);
struct qat_sgl_to_bufl_params params = {0};
@@ -233,7 +358,234 @@ static int qat_comp_alg_decompress(struct acomp_req *req)
return qat_comp_alg_compress_decompress(req, DECOMPRESSION, 0, 0, 0, 0);
}
-static struct acomp_alg qat_acomp[] = { {
+static int qat_comp_alg_zstd_decompress(struct acomp_req *req)
+{
+ struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(req);
+ struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm);
+ struct acomp_req *nreq = acomp_request_ctx(req);
+ zstd_frame_header header;
+ void *buffer;
+ size_t zret;
+ int ret;
+
+ buffer = kmap_local_page(sg_page(req->src)) + req->src->offset;
+ zret = zstd_get_frame_header(&header, buffer, req->src->length);
+ kunmap_local(buffer);
+ if (zret) {
+ dev_err(&GET_DEV(ctx->inst->accel_dev),
+ "ZSTD-compressed data has an incomplete frame header\n");
+ return -EINVAL;
+ }
+
+ if (header.windowSize > QAT_ZSTD_MAX_BLOCK_SIZE ||
+ header.frameContentSize >= QAT_ZSTD_MAX_CONTENT_SIZE) {
+ dev_dbg(&GET_DEV(ctx->inst->accel_dev), "Window size=0x%llx\n",
+ header.windowSize);
+
+ memcpy(nreq, req, sizeof(*req));
+ acomp_request_set_tfm(nreq, ctx->ftfm);
+
+ ret = crypto_acomp_decompress(nreq);
+ req->dlen = nreq->dlen;
+
+ return ret;
+ }
+
+ return qat_comp_alg_compress_decompress(req, DECOMPRESSION, 0, 0, 0, 0);
+}
+
+static int qat_comp_lz4s_zstd_callback(struct qat_compression_req *qat_req, void *resp,
+ struct qat_callback_params *params)
+{
+ struct qat_compression_ctx *qat_ctx = qat_req->qat_compression_ctx;
+ struct acomp_req *areq = qat_req->acompress_req;
+ struct qat_zstd_scratch *scratch;
+ struct crypto_acomp_stream *s;
+ unsigned int lit_len = 0;
+ ZSTD_Sequence *out_seqs;
+ void *lz4s, *zstd;
+ size_t comp_size;
+ ZSTD_CCtx *ctx;
+ void *literals;
+ int seq_count;
+ int ret = 0;
+
+ if (params->produced + QAT_ZSTD_LIT_COPY_LEN > QAT_ZSTD_SCRATCH_SIZE) {
+ dev_dbg(&GET_DEV(qat_ctx->inst->accel_dev),
+ "LZ4s-ZSTD: produced size (%u) + COPY_SIZE > QAT_ZSTD_SCRATCH_SIZE (%u)\n",
+ params->produced, QAT_ZSTD_SCRATCH_SIZE);
+ areq->dlen = 0;
+ return -E2BIG;
+ }
+
+ s = crypto_acomp_lock_stream_bh(&qat_zstd_streams);
+ scratch = s->ctx;
+
+ lz4s = scratch->lz4s;
+ zstd = lz4s; /* Output buffer is same as lz4s */
+ out_seqs = scratch->out_seqs;
+ ctx = scratch->ctx;
+ literals = scratch->literals;
+
+ if (likely(!params->plain)) {
+ if (likely(sg_nents(areq->dst) == 1)) {
+ zstd = sg_virt(areq->dst);
+ lz4s = zstd;
+ } else {
+ memcpy_from_sglist(lz4s, areq->dst, 0, params->produced);
+ }
+
+ seq_count = qat_alg_dec_lz4s(out_seqs, QAT_MAX_SEQUENCES, lz4s,
+ params->produced, literals, &lit_len);
+ if (seq_count < 0) {
+ ret = seq_count;
+ comp_size = 0;
+ goto out;
+ }
+ } else {
+ out_seqs[0].litLength = areq->slen;
+ out_seqs[0].offset = 0;
+ out_seqs[0].matchLength = 0;
+
+ seq_count = 1;
+ }
+
+ comp_size = zstd_compress_sequences_and_literals(ctx, zstd, params->dlen,
+ out_seqs, seq_count,
+ literals, lit_len,
+ QAT_ZSTD_SCRATCH_SIZE,
+ areq->slen);
+ if (zstd_is_error(comp_size)) {
+ if (comp_size == ZSTD_error_cannotProduce_uncompressedBlock)
+ ret = -E2BIG;
+ else
+ ret = -EOPNOTSUPP;
+
+ comp_size = 0;
+ goto out;
+ }
+
+ if (comp_size > params->dlen) {
+ dev_dbg(&GET_DEV(qat_ctx->inst->accel_dev),
+ "LZ4s-ZSTD: compressed_size (%u) > output buffer size (%u)\n",
+ (unsigned int)comp_size, params->dlen);
+ ret = -EOVERFLOW;
+ goto out;
+ }
+
+ if (unlikely(sg_nents(areq->dst) != 1))
+ memcpy_to_sglist(areq->dst, 0, zstd, comp_size);
+
+out:
+ areq->dlen = comp_size;
+ crypto_acomp_unlock_stream_bh(s);
+
+ return ret;
+}
+
+static int qat_comp_alg_lz4s_zstd_init_tfm(struct crypto_acomp *acomp_tfm)
+{
+ struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm);
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
+ int reqsize;
+ int ret;
+
+ /* qat_comp_alg_init_tfm() wipes out the ctx */
+ ret = qat_comp_alg_init_tfm(acomp_tfm, QAT_LZ4S);
+ if (ret)
+ return ret;
+
+ ctx->ftfm = crypto_alloc_acomp_node("zstd", 0, CRYPTO_ALG_NEED_FALLBACK,
+ tfm->node);
+ if (IS_ERR(ctx->ftfm)) {
+ qat_comp_alg_exit_tfm(acomp_tfm);
+ return PTR_ERR(ctx->ftfm);
+ }
+
+ reqsize = max(sizeof(struct qat_compression_req),
+ sizeof(struct acomp_req) + crypto_acomp_reqsize(ctx->ftfm));
+
+ acomp_tfm->reqsize = reqsize;
+
+ ctx->qat_comp_callback = qat_comp_lz4s_zstd_callback;
+
+ return 0;
+}
+
+static int qat_comp_alg_zstd_init_tfm(struct crypto_acomp *acomp_tfm)
+{
+ struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm);
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
+ int reqsize;
+ int ret;
+
+ /* qat_comp_alg_init_tfm() wipes out the ctx */
+ ret = qat_comp_alg_init_tfm(acomp_tfm, QAT_ZSTD);
+ if (ret)
+ return ret;
+
+ ctx->ftfm = crypto_alloc_acomp_node("zstd", 0, CRYPTO_ALG_NEED_FALLBACK,
+ tfm->node);
+ if (IS_ERR(ctx->ftfm)) {
+ qat_comp_alg_exit_tfm(acomp_tfm);
+ return PTR_ERR(ctx->ftfm);
+ }
+
+ reqsize = max(sizeof(struct qat_compression_req),
+ sizeof(struct acomp_req) + crypto_acomp_reqsize(ctx->ftfm));
+
+ acomp_tfm->reqsize = reqsize;
+
+ return 0;
+}
+
+static void qat_comp_alg_zstd_exit_tfm(struct crypto_acomp *acomp_tfm)
+{
+ struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm);
+
+ if (ctx->ftfm)
+ crypto_free_acomp(ctx->ftfm);
+
+ qat_comp_alg_exit_tfm(acomp_tfm);
+}
+
+static int qat_comp_alg_lz4s_zstd_compress(struct acomp_req *req)
+{
+ struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(req);
+ struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm);
+ struct acomp_req *nreq = acomp_request_ctx(req);
+ int ret;
+
+ if (req->slen >= QAT_LZ4S_MIN_INPUT_SIZE && req->dlen >= QAT_LZ4S_MIN_INPUT_SIZE &&
+ req->slen <= QAT_LZ4S_MAX_OUTPUT_SIZE && req->dlen <= QAT_LZ4S_MAX_OUTPUT_SIZE)
+ return qat_comp_alg_compress(req);
+
+ memcpy(nreq, req, sizeof(*req));
+ acomp_request_set_tfm(nreq, ctx->ftfm);
+
+ ret = crypto_acomp_compress(nreq);
+ req->dlen = nreq->dlen;
+
+ return ret;
+}
+
+static int qat_comp_alg_sw_decompress(struct acomp_req *req)
+{
+ struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(req);
+ struct qat_compression_ctx *ctx = acomp_tfm_ctx(acomp_tfm);
+ struct acomp_req *nreq = acomp_request_ctx(req);
+ int ret;
+
+ memcpy(nreq, req, sizeof(*req));
+ acomp_request_set_tfm(nreq, ctx->ftfm);
+
+ ret = crypto_acomp_decompress(nreq);
+ req->dlen = nreq->dlen;
+
+ return ret;
+}
+
+static struct acomp_alg qat_acomp_deflate[] = { {
.base = {
.cra_name = "deflate",
.cra_driver_name = "qat_deflate",
@@ -243,27 +595,165 @@ static struct acomp_alg qat_acomp[] = { {
.cra_reqsize = sizeof(struct qat_compression_req),
.cra_module = THIS_MODULE,
},
- .init = qat_comp_alg_init_tfm,
+ .init = qat_comp_alg_deflate_init_tfm,
.exit = qat_comp_alg_exit_tfm,
.compress = qat_comp_alg_compress,
.decompress = qat_comp_alg_decompress,
}};
-int qat_comp_algs_register(void)
+static struct acomp_alg qat_acomp_zstd_lz4s = {
+ .base = {
+ .cra_name = "zstd",
+ .cra_driver_name = "qat_zstd",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_reqsize = sizeof(struct qat_compression_req),
+ .cra_ctxsize = sizeof(struct qat_compression_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .init = qat_comp_alg_lz4s_zstd_init_tfm,
+ .exit = qat_comp_alg_zstd_exit_tfm,
+ .compress = qat_comp_alg_lz4s_zstd_compress,
+ .decompress = qat_comp_alg_sw_decompress,
+};
+
+static struct acomp_alg qat_acomp_zstd_native = {
+ .base = {
+ .cra_name = "zstd",
+ .cra_driver_name = "qat_zstd",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_reqsize = sizeof(struct qat_compression_req),
+ .cra_ctxsize = sizeof(struct qat_compression_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .init = qat_comp_alg_zstd_init_tfm,
+ .exit = qat_comp_alg_zstd_exit_tfm,
+ .compress = qat_comp_alg_compress,
+ .decompress = qat_comp_alg_zstd_decompress,
+};
+
+static int qat_comp_algs_register_deflate(void)
+{
+ int ret = 0;
+
+ mutex_lock(&algs_lock);
+ if (++active_devs_deflate == 1) {
+ ret = crypto_register_acomps(qat_acomp_deflate,
+ ARRAY_SIZE(qat_acomp_deflate));
+ if (ret)
+ active_devs_deflate--;
+ }
+ mutex_unlock(&algs_lock);
+
+ return ret;
+}
+
+static void qat_comp_algs_unregister_deflate(void)
+{
+ mutex_lock(&algs_lock);
+ if (--active_devs_deflate == 0)
+ crypto_unregister_acomps(qat_acomp_deflate, ARRAY_SIZE(qat_acomp_deflate));
+ mutex_unlock(&algs_lock);
+}
+
+static int qat_comp_algs_register_lz4s(void)
{
int ret = 0;
mutex_lock(&algs_lock);
- if (++active_devs == 1)
- ret = crypto_register_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
+ if (++active_devs_lz4s == 1) {
+ ret = crypto_acomp_alloc_streams(&qat_zstd_streams);
+ if (ret) {
+ active_devs_lz4s--;
+ goto unlock;
+ }
+
+ ret = crypto_register_acomp(&qat_acomp_zstd_lz4s);
+ if (ret) {
+ crypto_acomp_free_streams(&qat_zstd_streams);
+ active_devs_lz4s--;
+ }
+ }
+unlock:
mutex_unlock(&algs_lock);
+
return ret;
}
-void qat_comp_algs_unregister(void)
+static void qat_comp_algs_unregister_lz4s(void)
{
mutex_lock(&algs_lock);
- if (--active_devs == 0)
- crypto_unregister_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
+ if (--active_devs_lz4s == 0) {
+ crypto_unregister_acomp(&qat_acomp_zstd_lz4s);
+ crypto_acomp_free_streams(&qat_zstd_streams);
+ }
+ mutex_unlock(&algs_lock);
+}
+
+static int qat_comp_algs_register_zstd(void)
+{
+ int ret = 0;
+
+ mutex_lock(&algs_lock);
+ if (++active_devs_zstd == 1) {
+ ret = crypto_register_acomp(&qat_acomp_zstd_native);
+ if (ret)
+ active_devs_zstd--;
+ }
mutex_unlock(&algs_lock);
+
+ return ret;
+}
+
+static void qat_comp_algs_unregister_zstd(void)
+{
+ mutex_lock(&algs_lock);
+ if (--active_devs_zstd == 0)
+ crypto_unregister_acomp(&qat_acomp_zstd_native);
+ mutex_unlock(&algs_lock);
+}
+
+int qat_comp_algs_register(u32 caps)
+{
+ int ret;
+
+ ret = qat_comp_algs_register_deflate();
+ if (ret)
+ return ret;
+
+ if (caps & ADF_ACCEL_CAPABILITIES_EXT_ZSTD_LZ4S) {
+ ret = qat_comp_algs_register_lz4s();
+ if (ret)
+ goto err_unregister_deflate;
+ }
+
+ if (caps & ADF_ACCEL_CAPABILITIES_EXT_ZSTD) {
+ ret = qat_comp_algs_register_zstd();
+ if (ret)
+ goto err_unregister_lz4s;
+ }
+
+ return ret;
+
+err_unregister_lz4s:
+ if (caps & ADF_ACCEL_CAPABILITIES_EXT_ZSTD_LZ4S)
+ qat_comp_algs_unregister_lz4s();
+err_unregister_deflate:
+ qat_comp_algs_unregister_deflate();
+
+ return ret;
+}
+
+void qat_comp_algs_unregister(u32 caps)
+{
+ qat_comp_algs_unregister_deflate();
+
+ if (caps & ADF_ACCEL_CAPABILITIES_EXT_ZSTD_LZ4S)
+ qat_comp_algs_unregister_lz4s();
+
+ if (caps & ADF_ACCEL_CAPABILITIES_EXT_ZSTD)
+ qat_comp_algs_unregister_zstd();
}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_req.h b/drivers/crypto/intel/qat/qat_common/qat_comp_req.h
index 18a1f33a6db9..f165d28aaaf4 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_comp_req.h
+++ b/drivers/crypto/intel/qat/qat_common/qat_comp_req.h
@@ -23,6 +23,7 @@ static inline void qat_comp_create_req(void *ctx, void *req, u64 src, u32 slen,
fw_req->comn_mid.opaque_data = opaque;
req_pars->comp_len = slen;
req_pars->out_buffer_sz = dlen;
+ fw_req->u3.asb_threshold.asb_value *= slen >> 4;
}
static inline void qat_comp_create_compression_req(void *ctx, void *req,
@@ -110,4 +111,12 @@ static inline u8 qat_comp_get_cmp_cnv_flag(void *resp)
return ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(flags);
}
+static inline u8 qat_comp_get_cmp_uncomp_flag(void *resp)
+{
+ struct icp_qat_fw_comp_resp *qat_resp = resp;
+ u8 flags = qat_resp->comn_resp.hdr_flags;
+
+ return ICP_QAT_FW_COMN_HDR_ST_BLK_FLAG_GET(flags);
+}
+
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_zstd_utils.c b/drivers/crypto/intel/qat/qat_common/qat_comp_zstd_utils.c
new file mode 100644
index 000000000000..62ec2d5c3ab8
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/qat_comp_zstd_utils.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2026 Intel Corporation */
+#include <linux/errno.h>
+#include <linux/printk.h>
+#include <linux/string.h>
+#include <linux/unaligned.h>
+#include <linux/zstd.h>
+
+#include "qat_comp_zstd_utils.h"
+
+#define ML_BITS 4
+#define ML_MASK ((1U << ML_BITS) - 1)
+#define RUN_BITS (8 - ML_BITS)
+#define RUN_MASK ((1U << RUN_BITS) - 1)
+#define LZ4S_MINMATCH 2
+
+/*
+ * ZSTD blocks can decompress to at most min(windowSize, 128KB) bytes.
+ * Insert explicit block delimiters to keep blocks within this limit.
+ */
+#define QAT_ZSTD_BLOCK_MAX ZSTD_BLOCKSIZE_MAX
+
+static int emit_delimiter(ZSTD_Sequence *out_seqs, size_t *seqs_idx,
+ size_t out_seqs_capacity, unsigned int lz4s_buff_size)
+{
+ if (*seqs_idx >= out_seqs_capacity - 1) {
+ pr_debug("QAT ZSTD: sequence overflow (seqs_idx:%zu, capacity:%zu, lz4s_size:%u)\n",
+ *seqs_idx, out_seqs_capacity, lz4s_buff_size);
+ return -EOVERFLOW;
+ }
+
+ out_seqs[*seqs_idx].offset = 0;
+ out_seqs[*seqs_idx].litLength = 0;
+ out_seqs[*seqs_idx].matchLength = 0;
+ (*seqs_idx)++;
+
+ return 0;
+}
+
+int qat_alg_dec_lz4s(ZSTD_Sequence *out_seqs, size_t out_seqs_capacity,
+ unsigned char *lz4s_buff, unsigned int lz4s_buff_size,
+ unsigned char *literals, unsigned int *lit_len)
+{
+ unsigned char *end_ip = lz4s_buff + lz4s_buff_size;
+ unsigned char *start, *dest, *dest_end;
+ unsigned int hist_literal_len = 0;
+ unsigned char *ip = lz4s_buff;
+ size_t block_decomp_size = 0;
+ size_t seqs_idx = 0;
+ int ret;
+
+ *lit_len = 0;
+
+ if (!lz4s_buff_size)
+ return 0;
+
+ while (ip < end_ip) {
+ size_t literal_len = 0, match_len = 0;
+ const unsigned int token = *ip++;
+ size_t length = 0;
+ size_t offset = 0;
+
+ /* Get literal length */
+ length = token >> ML_BITS;
+ if (length == RUN_MASK) {
+ unsigned int s;
+
+ do {
+ s = *ip++;
+ length += s;
+ } while (s == 255);
+ }
+
+ literal_len = length;
+
+ start = ip;
+ dest = literals;
+ dest_end = literals + length;
+
+ do {
+ memcpy(dest, start, QAT_ZSTD_LIT_COPY_LEN);
+ dest += QAT_ZSTD_LIT_COPY_LEN;
+ start += QAT_ZSTD_LIT_COPY_LEN;
+ } while (dest < dest_end);
+
+ literals += length;
+ *lit_len += length;
+
+ ip += length;
+ if (ip == end_ip) {
+ literal_len += hist_literal_len;
+ /*
+ * If adding trailing literals would overflow the
+ * current block, close it first.
+ */
+ if (block_decomp_size + literal_len > QAT_ZSTD_BLOCK_MAX) {
+ ret = emit_delimiter(out_seqs, &seqs_idx,
+ out_seqs_capacity,
+ lz4s_buff_size);
+ if (ret)
+ return ret;
+ }
+ out_seqs[seqs_idx].litLength = literal_len;
+ out_seqs[seqs_idx].offset = offset;
+ out_seqs[seqs_idx].matchLength = match_len;
+ break;
+ }
+
+ offset = get_unaligned_le16(ip);
+ ip += 2;
+
+ length = token & ML_MASK;
+ if (length == ML_MASK) {
+ unsigned int s;
+
+ do {
+ s = *ip++;
+ length += s;
+ } while (s == 255);
+ }
+ if (length != 0) {
+ length += LZ4S_MINMATCH;
+ match_len = (unsigned short)length;
+ literal_len += hist_literal_len;
+
+ /*
+ * If this sequence would push the current block past
+ * the ZSTD maximum, close the block first.
+ */
+ if (block_decomp_size + literal_len + match_len > QAT_ZSTD_BLOCK_MAX) {
+ ret = emit_delimiter(out_seqs, &seqs_idx,
+ out_seqs_capacity,
+ lz4s_buff_size);
+ if (ret)
+ return ret;
+
+ block_decomp_size = 0;
+ }
+
+ out_seqs[seqs_idx].offset = offset;
+ out_seqs[seqs_idx].litLength = literal_len;
+ out_seqs[seqs_idx].matchLength = match_len;
+ hist_literal_len = 0;
+ seqs_idx++;
+ if (seqs_idx >= out_seqs_capacity - 1) {
+ pr_debug("QAT ZSTD: sequence overflow (seqs_idx:%zu, capacity:%zu, lz4s_size:%u)\n",
+ seqs_idx, out_seqs_capacity, lz4s_buff_size);
+ return -EOVERFLOW;
+ }
+
+ block_decomp_size += literal_len + match_len;
+ } else {
+ if (literal_len > 0) {
+ /*
+ * When match length is 0, the literal length needs
+ * to be temporarily stored and processed together
+ * with the next data block.
+ */
+ hist_literal_len += literal_len;
+ }
+ }
+ }
+
+ return seqs_idx + 1;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_zstd_utils.h b/drivers/crypto/intel/qat/qat_common/qat_comp_zstd_utils.h
new file mode 100644
index 000000000000..55c7a1b9b848
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/qat_comp_zstd_utils.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2026 Intel Corporation */
+#ifndef QAT_COMP_ZSTD_UTILS_H_
+#define QAT_COMP_ZSTD_UTILS_H_
+#include <linux/zstd_lib.h>
+
+#define QAT_ZSTD_LIT_COPY_LEN 8
+
+int qat_alg_dec_lz4s(ZSTD_Sequence *out_seqs, size_t out_seqs_capacity,
+ unsigned char *lz4s_buff, unsigned int lz4s_buff_size,
+ unsigned char *literals, unsigned int *lit_len);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/qat_compression.c b/drivers/crypto/intel/qat/qat_common/qat_compression.c
index 53a4db5507ec..1424d7a9bcd3 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_compression.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_compression.c
@@ -46,12 +46,14 @@ static int qat_compression_free_instances(struct adf_accel_dev *accel_dev)
return 0;
}
-struct qat_compression_instance *qat_compression_get_instance_node(int node)
+struct qat_compression_instance *qat_compression_get_instance_node(int node, int alg)
{
struct qat_compression_instance *inst = NULL;
+ struct adf_hw_device_data *hw_data = NULL;
struct adf_accel_dev *accel_dev = NULL;
unsigned long best = ~0;
struct list_head *itr;
+ u32 caps, mask;
list_for_each(itr, adf_devmgr_get_head()) {
struct adf_accel_dev *tmp_dev;
@@ -61,6 +63,15 @@ struct qat_compression_instance *qat_compression_get_instance_node(int node)
tmp_dev = list_entry(itr, struct adf_accel_dev, list);
tmp_dev_node = dev_to_node(&GET_DEV(tmp_dev));
+ if (alg == QAT_ZSTD || alg == QAT_LZ4S) {
+ hw_data = tmp_dev->hw_device;
+ caps = hw_data->accel_capabilities_ext_mask;
+ mask = ADF_ACCEL_CAPABILITIES_EXT_ZSTD |
+ ADF_ACCEL_CAPABILITIES_EXT_ZSTD_LZ4S;
+ if (!(caps & mask))
+ continue;
+ }
+
if ((node == tmp_dev_node || tmp_dev_node < 0) &&
adf_dev_started(tmp_dev) && !list_empty(&tmp_dev->compression_list)) {
ctr = atomic_read(&tmp_dev->ref_count);
@@ -78,6 +89,16 @@ struct qat_compression_instance *qat_compression_get_instance_node(int node)
struct adf_accel_dev *tmp_dev;
tmp_dev = list_entry(itr, struct adf_accel_dev, list);
+
+ if (alg == QAT_ZSTD || alg == QAT_LZ4S) {
+ hw_data = tmp_dev->hw_device;
+ caps = hw_data->accel_capabilities_ext_mask;
+ mask = ADF_ACCEL_CAPABILITIES_EXT_ZSTD |
+ ADF_ACCEL_CAPABILITIES_EXT_ZSTD_LZ4S;
+ if (!(caps & mask))
+ continue;
+ }
+
if (adf_dev_started(tmp_dev) &&
!list_empty(&tmp_dev->compression_list)) {
accel_dev = tmp_dev;
diff --git a/drivers/crypto/intel/qat/qat_common/qat_hal.c b/drivers/crypto/intel/qat/qat_common/qat_hal.c
index 7a6ba6f22e3e..1c3d1311f1c7 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_hal.c
@@ -9,17 +9,18 @@
#include "icp_qat_hal.h"
#include "icp_qat_uclo.h"
-#define BAD_REGADDR 0xffff
-#define MAX_RETRY_TIMES 10000
-#define INIT_CTX_ARB_VALUE 0x0
-#define INIT_CTX_ENABLE_VALUE 0x0
-#define INIT_PC_VALUE 0x0
-#define INIT_WAKEUP_EVENTS_VALUE 0x1
-#define INIT_SIG_EVENTS_VALUE 0x1
-#define INIT_CCENABLE_VALUE 0x2000
-#define RST_CSR_QAT_LSB 20
-#define RST_CSR_AE_LSB 0
-#define MC_TIMESTAMP_ENABLE (0x1 << 7)
+#define BAD_REGADDR 0xffff
+#define MAX_RETRY_TIMES 10000
+#define INIT_CTX_ARB_VALUE 0x0
+#define INIT_CTX_ENABLE_VALUE 0x0
+#define INIT_PC_VALUE 0x0
+#define INIT_WAKEUP_EVENTS_VALUE 0x1
+#define INIT_SIG_EVENTS_VALUE 0x1
+#define INIT_CCENABLE_VALUE 0x2000
+#define RST_CSR_QAT_LSB 20
+#define RST_CSR_AE_LSB 0
+#define MC_TIMESTAMP_ENABLE (0x1 << 7)
+#define MIN_RESET_DELAY_US 3
#define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \
(~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \
@@ -713,8 +714,10 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
handle->chip_info->wakeup_event_val = 0x80000000;
handle->chip_info->fw_auth = true;
handle->chip_info->css_3k = true;
- if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_6XXX)
+ if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_6XXX) {
handle->chip_info->dual_sign = true;
+ handle->chip_info->reset_delay_us = MIN_RESET_DELAY_US;
+ }
handle->chip_info->tgroup_share_ustore = true;
handle->chip_info->fcu_ctl_csr = FCU_CONTROL_4XXX;
handle->chip_info->fcu_sts_csr = FCU_STATUS_4XXX;
diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
index e61a367b0d17..a00ca2a0900f 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
@@ -12,6 +12,7 @@
#include <linux/pci_ids.h>
#include <linux/wordpart.h>
#include "adf_accel_devices.h"
+#include "adf_anti_rb.h"
#include "adf_common_drv.h"
#include "icp_qat_uclo.h"
#include "icp_qat_hal.h"
@@ -1230,10 +1231,11 @@ static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_fw_auth_desc *desc)
{
- u32 fcu_sts, retry = 0;
+ unsigned int retries = FW_AUTH_MAX_RETRY;
u32 fcu_ctl_csr, fcu_sts_csr;
u32 fcu_dram_hi_csr, fcu_dram_lo_csr;
u64 bus_addr;
+ u32 fcu_sts;
bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low)
- sizeof(struct icp_qat_auth_chunk);
@@ -1248,17 +1250,32 @@ static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH);
do {
+ int arb_ret;
+
msleep(FW_AUTH_WAIT_PERIOD);
fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
+
+ arb_ret = adf_anti_rb_check(handle->pci_dev);
+ if (arb_ret == -EAGAIN) {
+ if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL) {
+ SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH);
+ continue;
+ }
+ } else if (arb_ret) {
+ goto auth_fail;
+ }
+
if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL)
goto auth_fail;
+
if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1))
if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE)
return 0;
- } while (retry++ < FW_AUTH_MAX_RETRY);
+ } while (--retries);
+
auth_fail:
- pr_err("authentication error (FCU_STATUS = 0x%x),retry = %d\n",
- fcu_sts & FCU_AUTH_STS_MASK, retry);
+ pr_err("authentication error (FCU_STATUS = 0x%x)\n", fcu_sts & FCU_AUTH_STS_MASK);
+
return -EINVAL;
}
diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c
index 5103d36cdfdb..2f203042d9bd 100644
--- a/drivers/crypto/marvell/cesa/hash.c
+++ b/drivers/crypto/marvell/cesa/hash.c
@@ -847,8 +847,7 @@ static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
*len = creq->len;
memcpy(hash, creq->state, digsize);
- memset(cache, 0, blocksize);
- memcpy(cache, creq->cache, creq->cache_ptr);
+ memcpy_and_pad(cache, blocksize, creq->cache, creq->cache_ptr, 0);
return 0;
}
diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
index 09e6a8474d1a..e0f38d32bc93 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c
@@ -10,6 +10,7 @@
#include <linux/ctype.h>
#include <linux/firmware.h>
+#include <linux/string.h>
#include <linux/string_choices.h>
#include "otx_cpt_common.h"
#include "otx_cptpf_ucode.h"
@@ -509,13 +510,12 @@ EXPORT_SYMBOL_GPL(otx_cpt_uc_supports_eng_type);
static void print_ucode_info(struct otx_cpt_eng_grp_info *eng_grp,
char *buf, int size)
{
- if (eng_grp->mirror.is_ena) {
+ if (eng_grp->mirror.is_ena)
scnprintf(buf, size, "%s (shared with engine_group%d)",
eng_grp->g->grp[eng_grp->mirror.idx].ucode[0].ver_str,
eng_grp->mirror.idx);
- } else {
- scnprintf(buf, size, "%s", eng_grp->ucode[0].ver_str);
- }
+ else
+ strscpy(buf, eng_grp->ucode[0].ver_str, size);
}
static void print_engs_info(struct otx_cpt_eng_grp_info *eng_grp,
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index b61f2545e165..a61208cbcd27 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -115,10 +115,7 @@ void *nx842_crypto_alloc_ctx(struct nx842_driver *driver)
ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) {
- kfree(ctx->wmem);
- free_page((unsigned long)ctx->sbounce);
- free_page((unsigned long)ctx->dbounce);
- kfree(ctx);
+ nx842_crypto_free_ctx(ctx);
return ERR_PTR(-ENOMEM);
}
@@ -131,8 +128,9 @@ void nx842_crypto_free_ctx(void *p)
struct nx842_crypto_ctx *ctx = p;
kfree(ctx->wmem);
- free_page((unsigned long)ctx->sbounce);
- free_page((unsigned long)ctx->dbounce);
+ free_pages((unsigned long)ctx->sbounce, BOUNCE_BUFFER_ORDER);
+ free_pages((unsigned long)ctx->dbounce, BOUNCE_BUFFER_ORDER);
+ kfree(ctx);
}
EXPORT_SYMBOL_GPL(nx842_crypto_free_ctx);
diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h
index f5e2c82ba876..c401cdf1a453 100644
--- a/drivers/crypto/nx/nx-842.h
+++ b/drivers/crypto/nx/nx-842.h
@@ -159,15 +159,15 @@ struct nx842_crypto_header_group {
struct nx842_crypto_header {
/* New members MUST be added within the struct_group() macro below. */
- struct_group_tagged(nx842_crypto_header_hdr, hdr,
+ __struct_group(nx842_crypto_header_hdr, hdr, __packed,
__be16 magic; /* NX842_CRYPTO_MAGIC */
__be16 ignore; /* decompressed end bytes to ignore */
u8 groups; /* total groups in this header */
);
- struct nx842_crypto_header_group group[];
+ struct nx842_crypto_header_group group[] __counted_by(groups);
} __packed;
static_assert(offsetof(struct nx842_crypto_header, group) == sizeof(struct nx842_crypto_header_hdr),
- "struct member likely outside of struct_group_tagged()");
+ "struct member likely outside of __struct_group()");
#define NX842_CRYPTO_GROUP_MAX (0x20)
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 6a3c7f9277cf..b8c416c5ee70 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -147,7 +147,6 @@ struct omap_sham_reqctx {
u8 digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED;
size_t digcnt;
size_t bufcnt;
- size_t buflen;
/* walk state */
struct scatterlist *sg;
@@ -156,7 +155,7 @@ struct omap_sham_reqctx {
int sg_len;
unsigned int total; /* total request */
- u8 buffer[] OMAP_ALIGNED;
+ u8 buffer[BUFLEN] OMAP_ALIGNED;
};
struct omap_sham_hmac_ctx {
@@ -891,7 +890,7 @@ static int omap_sham_prepare_request(struct crypto_engine *engine, void *areq)
if (hash_later < 0)
hash_later = 0;
- if (hash_later && hash_later <= rctx->buflen) {
+ if (hash_later && hash_later <= sizeof(rctx->buffer)) {
scatterwalk_map_and_copy(rctx->buffer,
req->src,
req->nbytes - hash_later,
@@ -902,7 +901,7 @@ static int omap_sham_prepare_request(struct crypto_engine *engine, void *areq)
rctx->bufcnt = 0;
}
- if (hash_later > rctx->buflen)
+ if (hash_later > sizeof(rctx->buffer))
set_bit(FLAGS_HUGE, &rctx->dd->flags);
rctx->total = min(nbytes, rctx->total);
@@ -987,7 +986,6 @@ static int omap_sham_init(struct ahash_request *req)
ctx->digcnt = 0;
ctx->total = 0;
ctx->offset = 0;
- ctx->buflen = BUFLEN;
if (tctx->flags & BIT(FLAGS_HMAC)) {
if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
@@ -1200,7 +1198,7 @@ static int omap_sham_update(struct ahash_request *req)
if (!req->nbytes)
return 0;
- if (ctx->bufcnt + req->nbytes <= ctx->buflen) {
+ if (ctx->bufcnt + req->nbytes <= sizeof(ctx->buffer)) {
scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
0, req->nbytes, 0);
ctx->bufcnt += req->nbytes;
@@ -1333,7 +1331,7 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
}
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct omap_sham_reqctx) + BUFLEN);
+ sizeof(struct omap_sham_reqctx));
if (alg_base) {
struct omap_sham_hmac_ctx *bctx = tctx->base;
@@ -1404,7 +1402,8 @@ static int omap_sham_export(struct ahash_request *req, void *out)
{
struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
- memcpy(out, rctx, sizeof(*rctx) + rctx->bufcnt);
+ memcpy(out, rctx, offsetof(struct omap_sham_reqctx, buffer) +
+ rctx->bufcnt);
return 0;
}
@@ -1414,7 +1413,8 @@ static int omap_sham_import(struct ahash_request *req, const void *in)
struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
const struct omap_sham_reqctx *ctx_in = in;
- memcpy(rctx, in, sizeof(*rctx) + ctx_in->bufcnt);
+ memcpy(rctx, in, offsetof(struct omap_sham_reqctx, buffer) +
+ ctx_in->bufcnt);
return 0;
}
@@ -2146,8 +2146,7 @@ static int omap_sham_probe(struct platform_device *pdev)
alg = &ealg->base;
alg->export = omap_sham_export;
alg->import = omap_sham_import;
- alg->halg.statesize = sizeof(struct omap_sham_reqctx) +
- BUFLEN;
+ alg->halg.statesize = sizeof(struct omap_sham_reqctx);
err = crypto_engine_register_ahash(ealg);
if (err)
goto err_algs;
diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c
index 846e1d42775d..9cb11fada2c4 100644
--- a/drivers/crypto/qce/aead.c
+++ b/drivers/crypto/qce/aead.c
@@ -5,6 +5,7 @@
*/
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/string.h>
#include <crypto/gcm.h>
#include <crypto/authenc.h>
#include <crypto/internal/aead.h>
@@ -35,7 +36,6 @@ static void qce_aead_done(void *data)
u32 status;
unsigned int totallen;
unsigned char tag[SHA256_DIGEST_SIZE] = {0};
- int ret = 0;
diff_dst = (req->src != req->dst) ? true : false;
dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
@@ -79,8 +79,7 @@ static void qce_aead_done(void *data)
} else if (!IS_CCM(rctx->flags)) {
totallen = req->cryptlen + req->assoclen - ctx->authsize;
scatterwalk_map_and_copy(tag, req->src, totallen, ctx->authsize, 0);
- ret = memcmp(result_buf->auth_iv, tag, ctx->authsize);
- if (ret) {
+ if (memcmp(result_buf->auth_iv, tag, ctx->authsize)) {
pr_err("Bad message error\n");
error = -EBADMSG;
}
@@ -144,16 +143,12 @@ qce_aead_prepare_dst_buf(struct aead_request *req)
sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->adata_sg,
rctx->assoclen);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
+ if (IS_ERR(sg))
goto dst_tbl_free;
- }
/* dst buffer */
sg = qce_sgtable_add(&rctx->dst_tbl, msg_sg, rctx->cryptlen);
- if (IS_ERR(sg)) {
- ret = PTR_ERR(sg);
+ if (IS_ERR(sg))
goto dst_tbl_free;
- }
totallen = rctx->cryptlen + rctx->assoclen;
} else {
if (totallen) {
@@ -642,8 +637,8 @@ static int qce_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int
memcpy(ctx->enc_key, authenc_keys.enckey, authenc_keys.enckeylen);
- memset(ctx->auth_key, 0, sizeof(ctx->auth_key));
- memcpy(ctx->auth_key, authenc_keys.authkey, authenc_keys.authkeylen);
+ memcpy_and_pad(ctx->auth_key, sizeof(ctx->auth_key),
+ authenc_keys.authkey, authenc_keys.authkeylen, 0);
return crypto_aead_setkey(ctx->fallback, key, keylen);
}
@@ -768,9 +763,8 @@ static int qce_aead_register_one(const struct qce_aead_def *def, struct qce_devi
alg = &tmpl->alg.aead;
- snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- def->drv_name);
+ strscpy(alg->base.cra_name, def->name);
+ strscpy(alg->base.cra_driver_name, def->drv_name);
alg->base.cra_blocksize = def->blocksize;
alg->chunksize = def->chunksize;
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
index 04253a8d3340..54a78a57f630 100644
--- a/drivers/crypto/qce/common.c
+++ b/drivers/crypto/qce/common.c
@@ -280,17 +280,17 @@ static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
{
- u8 swap[QCE_AES_IV_LENGTH];
- u32 i, j;
+ u8 swap[QCE_AES_IV_LENGTH] = {0};
+ unsigned int i, offset;
if (ivsize > QCE_AES_IV_LENGTH)
return;
- memset(swap, 0, QCE_AES_IV_LENGTH);
+ offset = QCE_AES_IV_LENGTH - ivsize;
- for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
- i < QCE_AES_IV_LENGTH; i++, j--)
- swap[i] = src[j];
+ /* Reverse and right-align IV bytes. */
+ for (i = 0; i < ivsize; i++)
+ swap[offset + i] = src[ivsize - 1 - i];
qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
}
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 402e4e64347d..1b37121cbcdc 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -6,6 +6,7 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/string.h>
#include <crypto/internal/hash.h>
#include "common.h"
@@ -489,9 +490,8 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def,
base->cra_module = THIS_MODULE;
base->cra_init = qce_ahash_cra_init;
- snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- def->drv_name);
+ strscpy(base->cra_name, def->name);
+ strscpy(base->cra_driver_name, def->drv_name);
INIT_LIST_HEAD(&tmpl->entry);
tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_AHASH;
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 4ad3a1702010..db0b648a56eb 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -7,6 +7,7 @@
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
+#include <linux/string.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <crypto/aes.h>
@@ -446,9 +447,8 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
alg = &tmpl->alg.skcipher;
- snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
- snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- def->drv_name);
+ strscpy(alg->base.cra_name, def->name);
+ strscpy(alg->base.cra_driver_name, def->drv_name);
alg->base.cra_blocksize = def->blocksize;
alg->chunksize = def->chunksize;
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index eece1ff6c62f..bdda7b39af85 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -2131,7 +2131,7 @@ static struct skcipher_alg algs[] = {
static int s5p_aes_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- int i, j, err;
+ int i, err;
const struct samsung_aes_variant *variant;
struct s5p_aes_dev *pdata;
struct resource *res;
@@ -2237,8 +2237,11 @@ static int s5p_aes_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(algs); i++) {
err = crypto_register_skcipher(&algs[i]);
- if (err)
+ if (err) {
+ dev_err(dev, "can't register '%s': %d\n",
+ algs[i].base.cra_name, err);
goto err_algs;
+ }
}
if (pdata->use_hash) {
@@ -2265,20 +2268,12 @@ static int s5p_aes_probe(struct platform_device *pdev)
return 0;
err_hash:
- for (j = hash_i - 1; j >= 0; j--)
- crypto_unregister_ahash(&algs_sha1_md5_sha256[j]);
-
+ crypto_unregister_ahashes(algs_sha1_md5_sha256, hash_i);
tasklet_kill(&pdata->hash_tasklet);
res->end -= 0x300;
err_algs:
- if (i < ARRAY_SIZE(algs))
- dev_err(dev, "can't register '%s': %d\n", algs[i].base.cra_name,
- err);
-
- for (j = 0; j < i; j++)
- crypto_unregister_skcipher(&algs[j]);
-
+ crypto_unregister_skciphers(algs, i);
tasklet_kill(&pdata->tasklet);
err_irq:
@@ -2294,15 +2289,13 @@ err_clk:
static void s5p_aes_remove(struct platform_device *pdev)
{
struct s5p_aes_dev *pdata = platform_get_drvdata(pdev);
- int i;
- for (i = 0; i < ARRAY_SIZE(algs); i++)
- crypto_unregister_skcipher(&algs[i]);
+ crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
tasklet_kill(&pdata->tasklet);
if (pdata->use_hash) {
- for (i = ARRAY_SIZE(algs_sha1_md5_sha256) - 1; i >= 0; i--)
- crypto_unregister_ahash(&algs_sha1_md5_sha256[i]);
+ crypto_unregister_ahashes(algs_sha1_md5_sha256,
+ ARRAY_SIZE(algs_sha1_md5_sha256));
pdata->res->end -= 0x300;
tasklet_kill(&pdata->hash_tasklet);
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 3c9b3f679461..b79877099942 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -361,19 +361,13 @@ static int stm32_cryp_it_start(struct stm32_cryp *cryp);
static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx)
{
- struct stm32_cryp *tmp, *cryp = NULL;
+ struct stm32_cryp *cryp;
spin_lock_bh(&cryp_list.lock);
- if (!ctx->cryp) {
- list_for_each_entry(tmp, &cryp_list.dev_list, list) {
- cryp = tmp;
- break;
- }
- ctx->cryp = cryp;
- } else {
- cryp = ctx->cryp;
- }
-
+ if (!ctx->cryp)
+ ctx->cryp = list_first_entry_or_null(&cryp_list.dev_list,
+ struct stm32_cryp, list);
+ cryp = ctx->cryp;
spin_unlock_bh(&cryp_list.lock);
return cryp;
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index d60147a7594e..dada5951082c 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -792,19 +792,13 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
{
- struct stm32_hash_dev *hdev = NULL, *tmp;
+ struct stm32_hash_dev *hdev;
spin_lock_bh(&stm32_hash.lock);
- if (!ctx->hdev) {
- list_for_each_entry(tmp, &stm32_hash.dev_list, list) {
- hdev = tmp;
- break;
- }
- ctx->hdev = hdev;
- } else {
- hdev = ctx->hdev;
- }
-
+ if (!ctx->hdev)
+ ctx->hdev = list_first_entry_or_null(&stm32_hash.dev_list,
+ struct stm32_hash_dev, list);
+ hdev = ctx->hdev;
spin_unlock_bh(&stm32_hash.lock);
return hdev;
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index e8c0db687c57..bc61d0fe3514 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -12,6 +12,7 @@
* All rights reserved.
*/
+#include <linux/workqueue.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
@@ -868,20 +869,28 @@ struct talitos_ahash_req_ctx {
u8 buf[2][HASH_MAX_BLOCK_SIZE];
int buf_idx;
unsigned int swinit;
- unsigned int first;
- unsigned int last;
+ unsigned int first_desc;
+ unsigned int last_desc;
+ unsigned int last_request;
unsigned int to_hash_later;
unsigned int nbuf;
struct scatterlist bufsl[2];
struct scatterlist *psrc;
+
+ struct scatterlist request_bufsl[2];
+ struct ahash_request *areq;
+ struct scatterlist *request_sl;
+ unsigned int remaining_ahash_request_bytes;
+ unsigned int current_ahash_request_bytes;
+ struct work_struct sec1_ahash_process_remaining;
};
struct talitos_export_state {
u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
u8 buf[HASH_MAX_BLOCK_SIZE];
unsigned int swinit;
- unsigned int first;
- unsigned int last;
+ unsigned int first_desc;
+ unsigned int last_desc;
unsigned int to_hash_later;
unsigned int nbuf;
};
@@ -1713,7 +1722,7 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
if (desc->next_desc &&
desc->ptr[5].ptr != desc2->ptr[5].ptr)
unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
- if (req_ctx->last)
+ if (req_ctx->last_desc)
memcpy(areq->result, req_ctx->hw_context,
crypto_ahash_digestsize(tfm));
@@ -1750,7 +1759,7 @@ static void ahash_done(struct device *dev,
container_of(desc, struct talitos_edesc, desc);
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
- if (!req_ctx->last && req_ctx->to_hash_later) {
+ if (!req_ctx->last_desc && req_ctx->to_hash_later) {
/* Position any partial block for next update/final/finup */
req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
req_ctx->nbuf = req_ctx->to_hash_later;
@@ -1759,7 +1768,20 @@ static void ahash_done(struct device *dev,
kfree(edesc);
- ahash_request_complete(areq, err);
+ if (err) {
+ ahash_request_complete(areq, err);
+ return;
+ }
+
+ req_ctx->remaining_ahash_request_bytes -=
+ req_ctx->current_ahash_request_bytes;
+
+ if (!req_ctx->remaining_ahash_request_bytes) {
+ ahash_request_complete(areq, 0);
+ return;
+ }
+
+ schedule_work(&req_ctx->sec1_ahash_process_remaining);
}
/*
@@ -1803,7 +1825,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
/* first DWORD empty */
/* hash context in */
- if (!req_ctx->first || req_ctx->swinit) {
+ if (!req_ctx->first_desc || req_ctx->swinit) {
map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
req_ctx->hw_context_size,
req_ctx->hw_context,
@@ -1811,7 +1833,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
req_ctx->swinit = 0;
}
/* Indicate next op is not the first. */
- req_ctx->first = 0;
+ req_ctx->first_desc = 0;
/* HMAC key */
if (ctx->keylen)
@@ -1844,7 +1866,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
/* fifth DWORD empty */
/* hash/HMAC out -or- hash context out */
- if (req_ctx->last)
+ if (req_ctx->last_desc)
map_single_talitos_ptr(dev, &desc->ptr[5],
crypto_ahash_digestsize(tfm),
req_ctx->hw_context, DMA_FROM_DEVICE);
@@ -1886,7 +1908,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
if (sg_count > 1)
sync_needed = true;
copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
- if (req_ctx->last)
+ if (req_ctx->last_desc)
map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
req_ctx->hw_context_size,
req_ctx->hw_context,
@@ -1925,60 +1947,7 @@ static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
nbytes, 0, 0, 0, areq->base.flags, false);
}
-static int ahash_init(struct ahash_request *areq)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
- struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
- struct device *dev = ctx->dev;
- struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
- unsigned int size;
- dma_addr_t dma;
-
- /* Initialize the context */
- req_ctx->buf_idx = 0;
- req_ctx->nbuf = 0;
- req_ctx->first = 1; /* first indicates h/w must init its context */
- req_ctx->swinit = 0; /* assume h/w init of context */
- size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
- ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
- : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
- req_ctx->hw_context_size = size;
-
- dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
- DMA_TO_DEVICE);
- dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
-
- return 0;
-}
-
-/*
- * on h/w without explicit sha224 support, we initialize h/w context
- * manually with sha224 constants, and tell it to run sha256.
- */
-static int ahash_init_sha224_swinit(struct ahash_request *areq)
-{
- struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
-
- req_ctx->hw_context[0] = SHA224_H0;
- req_ctx->hw_context[1] = SHA224_H1;
- req_ctx->hw_context[2] = SHA224_H2;
- req_ctx->hw_context[3] = SHA224_H3;
- req_ctx->hw_context[4] = SHA224_H4;
- req_ctx->hw_context[5] = SHA224_H5;
- req_ctx->hw_context[6] = SHA224_H6;
- req_ctx->hw_context[7] = SHA224_H7;
-
- /* init 64-bit count */
- req_ctx->hw_context[8] = 0;
- req_ctx->hw_context[9] = 0;
-
- ahash_init(areq);
- req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
-
- return 0;
-}
-
-static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
+static int ahash_process_req_one(struct ahash_request *areq, unsigned int nbytes)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
@@ -1995,14 +1964,14 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
bool is_sec1 = has_ftr_sec1(priv);
u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
- if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
+ if (!req_ctx->last_desc && (nbytes + req_ctx->nbuf <= blocksize)) {
/* Buffer up to one whole block */
- nents = sg_nents_for_len(areq->src, nbytes);
+ nents = sg_nents_for_len(req_ctx->request_sl, nbytes);
if (nents < 0) {
dev_err(dev, "Invalid number of src SG.\n");
return nents;
}
- sg_copy_to_buffer(areq->src, nents,
+ sg_copy_to_buffer(req_ctx->request_sl, nents,
ctx_buf + req_ctx->nbuf, nbytes);
req_ctx->nbuf += nbytes;
return 0;
@@ -2012,7 +1981,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
nbytes_to_hash = nbytes + req_ctx->nbuf;
to_hash_later = nbytes_to_hash & (blocksize - 1);
- if (req_ctx->last)
+ if (req_ctx->last_desc)
to_hash_later = 0;
else if (to_hash_later)
/* There is a partial block. Hash the full block(s) now */
@@ -2029,7 +1998,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
sg_init_table(req_ctx->bufsl, nsg);
sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
if (nsg > 1)
- sg_chain(req_ctx->bufsl, 2, areq->src);
+ sg_chain(req_ctx->bufsl, 2, req_ctx->request_sl);
req_ctx->psrc = req_ctx->bufsl;
} else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
int offset;
@@ -2038,26 +2007,26 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
offset = blocksize - req_ctx->nbuf;
else
offset = nbytes_to_hash - req_ctx->nbuf;
- nents = sg_nents_for_len(areq->src, offset);
+ nents = sg_nents_for_len(req_ctx->request_sl, offset);
if (nents < 0) {
dev_err(dev, "Invalid number of src SG.\n");
return nents;
}
- sg_copy_to_buffer(areq->src, nents,
+ sg_copy_to_buffer(req_ctx->request_sl, nents,
ctx_buf + req_ctx->nbuf, offset);
req_ctx->nbuf += offset;
- req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
+ req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, req_ctx->request_sl,
offset);
} else
- req_ctx->psrc = areq->src;
+ req_ctx->psrc = req_ctx->request_sl;
if (to_hash_later) {
- nents = sg_nents_for_len(areq->src, nbytes);
+ nents = sg_nents_for_len(req_ctx->request_sl, nbytes);
if (nents < 0) {
dev_err(dev, "Invalid number of src SG.\n");
return nents;
}
- sg_pcopy_to_buffer(areq->src, nents,
+ sg_pcopy_to_buffer(req_ctx->request_sl, nents,
req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
to_hash_later,
nbytes - to_hash_later);
@@ -2065,36 +2034,145 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
req_ctx->to_hash_later = to_hash_later;
/* Allocate extended descriptor */
- edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
+ edesc = ahash_edesc_alloc(req_ctx->areq, nbytes_to_hash);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
edesc->desc.hdr = ctx->desc_hdr_template;
/* On last one, request SEC to pad; otherwise continue */
- if (req_ctx->last)
+ if (req_ctx->last_desc)
edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
else
edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
/* request SEC to INIT hash. */
- if (req_ctx->first && !req_ctx->swinit)
+ if (req_ctx->first_desc && !req_ctx->swinit)
edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
/* When the tfm context has a keylen, it's an HMAC.
* A first or last (ie. not middle) descriptor must request HMAC.
*/
- if (ctx->keylen && (req_ctx->first || req_ctx->last))
+ if (ctx->keylen && (req_ctx->first_desc || req_ctx->last_desc))
edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
- return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
+ return common_nonsnoop_hash(edesc, req_ctx->areq, nbytes_to_hash, ahash_done);
+}
+
+static void sec1_ahash_process_remaining(struct work_struct *work)
+{
+ struct talitos_ahash_req_ctx *req_ctx =
+ container_of(work, struct talitos_ahash_req_ctx,
+ sec1_ahash_process_remaining);
+ int err = 0;
+
+ req_ctx->request_sl = scatterwalk_ffwd(req_ctx->request_bufsl,
+ req_ctx->request_sl, TALITOS1_MAX_DATA_LEN);
+
+ if (req_ctx->remaining_ahash_request_bytes > TALITOS1_MAX_DATA_LEN)
+ req_ctx->current_ahash_request_bytes = TALITOS1_MAX_DATA_LEN;
+ else {
+ req_ctx->current_ahash_request_bytes =
+ req_ctx->remaining_ahash_request_bytes;
+
+ if (req_ctx->last_request)
+ req_ctx->last_desc = 1;
+ }
+
+ err = ahash_process_req_one(req_ctx->areq,
+ req_ctx->current_ahash_request_bytes);
+
+ if (err != -EINPROGRESS)
+ ahash_request_complete(req_ctx->areq, err);
+}
+
+static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = ctx->dev;
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
+
+ req_ctx->areq = areq;
+ req_ctx->request_sl = areq->src;
+ req_ctx->remaining_ahash_request_bytes = nbytes;
+
+ if (is_sec1) {
+ if (nbytes > TALITOS1_MAX_DATA_LEN)
+ nbytes = TALITOS1_MAX_DATA_LEN;
+ else if (req_ctx->last_request)
+ req_ctx->last_desc = 1;
+ }
+
+ req_ctx->current_ahash_request_bytes = nbytes;
+
+ return ahash_process_req_one(req_ctx->areq,
+ req_ctx->current_ahash_request_bytes);
+}
+
+static int ahash_init(struct ahash_request *areq)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct device *dev = ctx->dev;
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+ unsigned int size;
+ dma_addr_t dma;
+
+ /* Initialize the context */
+ req_ctx->buf_idx = 0;
+ req_ctx->nbuf = 0;
+ req_ctx->first_desc = 1; /* first_desc indicates h/w must init its context */
+ req_ctx->swinit = 0; /* assume h/w init of context */
+ size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
+ ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
+ : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
+ req_ctx->hw_context_size = size;
+ req_ctx->last_request = 0;
+ req_ctx->last_desc = 0;
+ INIT_WORK(&req_ctx->sec1_ahash_process_remaining, sec1_ahash_process_remaining);
+
+ dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
+ DMA_TO_DEVICE);
+ dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
+
+ return 0;
+}
+
+/*
+ * on h/w without explicit sha224 support, we initialize h/w context
+ * manually with sha224 constants, and tell it to run sha256.
+ */
+static int ahash_init_sha224_swinit(struct ahash_request *areq)
+{
+ struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
+
+ req_ctx->hw_context[0] = SHA224_H0;
+ req_ctx->hw_context[1] = SHA224_H1;
+ req_ctx->hw_context[2] = SHA224_H2;
+ req_ctx->hw_context[3] = SHA224_H3;
+ req_ctx->hw_context[4] = SHA224_H4;
+ req_ctx->hw_context[5] = SHA224_H5;
+ req_ctx->hw_context[6] = SHA224_H6;
+ req_ctx->hw_context[7] = SHA224_H7;
+
+ /* init 64-bit count */
+ req_ctx->hw_context[8] = 0;
+ req_ctx->hw_context[9] = 0;
+
+ ahash_init(areq);
+ req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
+
+ return 0;
}
static int ahash_update(struct ahash_request *areq)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
- req_ctx->last = 0;
+ req_ctx->last_request = 0;
return ahash_process_req(areq, areq->nbytes);
}
@@ -2103,7 +2181,7 @@ static int ahash_final(struct ahash_request *areq)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
- req_ctx->last = 1;
+ req_ctx->last_request = 1;
return ahash_process_req(areq, 0);
}
@@ -2112,7 +2190,7 @@ static int ahash_finup(struct ahash_request *areq)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
- req_ctx->last = 1;
+ req_ctx->last_request = 1;
return ahash_process_req(areq, areq->nbytes);
}
@@ -2146,8 +2224,8 @@ static int ahash_export(struct ahash_request *areq, void *out)
req_ctx->hw_context_size);
memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
export->swinit = req_ctx->swinit;
- export->first = req_ctx->first;
- export->last = req_ctx->last;
+ export->first_desc = req_ctx->first_desc;
+ export->last_desc = req_ctx->last_desc;
export->to_hash_later = req_ctx->to_hash_later;
export->nbuf = req_ctx->nbuf;
@@ -2172,8 +2250,8 @@ static int ahash_import(struct ahash_request *areq, const void *in)
memcpy(req_ctx->hw_context, export->hw_context, size);
memcpy(req_ctx->buf[0], export->buf, export->nbuf);
req_ctx->swinit = export->swinit;
- req_ctx->first = export->first;
- req_ctx->last = export->last;
+ req_ctx->first_desc = export->first_desc;
+ req_ctx->last_desc = export->last_desc;
req_ctx->to_hash_later = export->to_hash_later;
req_ctx->nbuf = export->nbuf;
diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c
index 9210cceb4b7b..30c78afe3dea 100644
--- a/drivers/crypto/tegra/tegra-se-aes.c
+++ b/drivers/crypto/tegra/tegra-se-aes.c
@@ -4,6 +4,7 @@
* Crypto driver to handle block cipher algorithms using NVIDIA Security Engine.
*/
+#include <linux/bottom_half.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
@@ -333,7 +334,9 @@ out:
tegra_key_invalidate_reserved(ctx->se, key2_id, ctx->alg);
out_finalize:
+ local_bh_disable();
crypto_finalize_skcipher_request(se->engine, req, ret);
+ local_bh_enable();
return 0;
}
@@ -1262,7 +1265,9 @@ out_free_inbuf:
tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
out_finalize:
+ local_bh_disable();
crypto_finalize_aead_request(ctx->se->engine, req, ret);
+ local_bh_enable();
return 0;
}
@@ -1348,7 +1353,9 @@ out_free_inbuf:
tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
out_finalize:
+ local_bh_disable();
crypto_finalize_aead_request(ctx->se->engine, req, ret);
+ local_bh_enable();
return 0;
}
@@ -1746,7 +1753,9 @@ out:
if (tegra_key_is_reserved(rctx->key_id))
tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
+ local_bh_disable();
crypto_finalize_hash_request(se->engine, req, ret);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c
index 06bb5bf0fa33..23d549801612 100644
--- a/drivers/crypto/tegra/tegra-se-hash.c
+++ b/drivers/crypto/tegra/tegra-se-hash.c
@@ -4,6 +4,7 @@
* Crypto driver to handle HASH algorithms using NVIDIA Security Engine.
*/
+#include <linux/bottom_half.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
@@ -546,7 +547,9 @@ static int tegra_sha_do_one_req(struct crypto_engine *engine, void *areq)
}
out:
+ local_bh_disable();
crypto_finalize_hash_request(se->engine, req, ret);
+ local_bh_enable();
return 0;
}
diff --git a/drivers/crypto/ti/Kconfig b/drivers/crypto/ti/Kconfig
index a3692ceec49b..1a3a571ac8ce 100644
--- a/drivers/crypto/ti/Kconfig
+++ b/drivers/crypto/ti/Kconfig
@@ -6,7 +6,11 @@ config CRYPTO_DEV_TI_DTHEV2
select CRYPTO_SKCIPHER
select CRYPTO_ECB
select CRYPTO_CBC
+ select CRYPTO_CTR
select CRYPTO_XTS
+ select CRYPTO_GCM
+ select CRYPTO_CCM
+ select SG_SPLIT
help
This enables support for the TI DTHE V2 hw cryptography engine
which can be found on TI K3 SOCs. Selecting this enables use
diff --git a/drivers/crypto/ti/dthev2-aes.c b/drivers/crypto/ti/dthev2-aes.c
index 156729ccc50e..eb5cd902dfb5 100644
--- a/drivers/crypto/ti/dthev2-aes.c
+++ b/drivers/crypto/ti/dthev2-aes.c
@@ -10,15 +10,18 @@
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/engine.h>
+#include <crypto/gcm.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include "dthev2-common.h"
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/scatterlist.h>
/* Registers */
@@ -53,6 +56,7 @@
#define DTHE_P_AES_C_LENGTH_1 0x0058
#define DTHE_P_AES_AUTH_LENGTH 0x005C
#define DTHE_P_AES_DATA_IN_OUT 0x0060
+#define DTHE_P_AES_TAG_OUT 0x0070
#define DTHE_P_AES_SYSCONFIG 0x0084
#define DTHE_P_AES_IRQSTATUS 0x008C
@@ -63,7 +67,10 @@
enum aes_ctrl_mode_masks {
AES_CTRL_ECB_MASK = 0x00,
AES_CTRL_CBC_MASK = BIT(5),
+ AES_CTRL_CTR_MASK = BIT(6),
AES_CTRL_XTS_MASK = BIT(12) | BIT(11),
+ AES_CTRL_GCM_MASK = BIT(17) | BIT(16) | BIT(6),
+ AES_CTRL_CCM_MASK = BIT(18) | BIT(6),
};
#define DTHE_AES_CTRL_MODE_CLEAR_MASK ~GENMASK(28, 5)
@@ -74,6 +81,13 @@ enum aes_ctrl_mode_masks {
#define DTHE_AES_CTRL_KEYSIZE_24B BIT(4)
#define DTHE_AES_CTRL_KEYSIZE_32B (BIT(3) | BIT(4))
+#define DTHE_AES_CTRL_CTR_WIDTH_128B (BIT(7) | BIT(8))
+
+#define DTHE_AES_CCM_L_FROM_IV_MASK GENMASK(2, 0)
+#define DTHE_AES_CCM_M_BITS GENMASK(2, 0)
+#define DTHE_AES_CTRL_CCM_L_FIELD_MASK GENMASK(21, 19)
+#define DTHE_AES_CTRL_CCM_M_FIELD_MASK GENMASK(24, 22)
+
#define DTHE_AES_CTRL_SAVE_CTX_SET BIT(29)
#define DTHE_AES_CTRL_OUTPUT_READY BIT_MASK(0)
@@ -88,6 +102,10 @@ enum aes_ctrl_mode_masks {
#define AES_IV_SIZE AES_BLOCK_SIZE
#define AES_BLOCK_WORDS (AES_BLOCK_SIZE / sizeof(u32))
#define AES_IV_WORDS AES_BLOCK_WORDS
+#define DTHE_AES_GCM_AAD_MAXLEN (BIT_ULL(32) - 1)
+#define DTHE_AES_CCM_AAD_MAXLEN (BIT(16) - BIT(8))
+#define DTHE_AES_CCM_CRYPT_MAXLEN (BIT_ULL(61) - 1)
+#define POLL_TIMEOUT_INTERVAL HZ
static int dthe_cipher_init_tfm(struct crypto_skcipher *tfm)
{
@@ -100,25 +118,27 @@ static int dthe_cipher_init_tfm(struct crypto_skcipher *tfm)
return 0;
}
-static int dthe_cipher_xts_init_tfm(struct crypto_skcipher *tfm)
+static int dthe_cipher_init_tfm_fallback(struct crypto_skcipher *tfm)
{
struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
struct dthe_data *dev_data = dthe_get_dev(ctx);
+ const char *alg_name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
ctx->dev_data = dev_data;
ctx->keylen = 0;
- ctx->skcipher_fb = crypto_alloc_sync_skcipher("xts(aes)", 0,
+ ctx->skcipher_fb = crypto_alloc_sync_skcipher(alg_name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->skcipher_fb)) {
- dev_err(dev_data->dev, "fallback driver xts(aes) couldn't be loaded\n");
+ dev_err(dev_data->dev, "fallback driver %s couldn't be loaded\n",
+ alg_name);
return PTR_ERR(ctx->skcipher_fb);
}
return 0;
}
-static void dthe_cipher_xts_exit_tfm(struct crypto_skcipher *tfm)
+static void dthe_cipher_exit_tfm(struct crypto_skcipher *tfm)
{
struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
@@ -156,6 +176,24 @@ static int dthe_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key, unsig
return dthe_aes_setkey(tfm, key, keylen);
}
+static int dthe_aes_ctr_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret = dthe_aes_setkey(tfm, key, keylen);
+
+ if (ret)
+ return ret;
+
+ ctx->aes_mode = DTHE_AES_CTR;
+
+ crypto_sync_skcipher_clear_flags(ctx->skcipher_fb, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(ctx->skcipher_fb,
+ crypto_skcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_skcipher_setkey(ctx->skcipher_fb, key, keylen);
+}
+
static int dthe_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen)
{
struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
@@ -171,8 +209,8 @@ static int dthe_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, unsig
crypto_sync_skcipher_clear_flags(ctx->skcipher_fb, CRYPTO_TFM_REQ_MASK);
crypto_sync_skcipher_set_flags(ctx->skcipher_fb,
- crypto_skcipher_get_flags(tfm) &
- CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
return crypto_sync_skcipher_setkey(ctx->skcipher_fb, key, keylen);
}
@@ -236,9 +274,23 @@ static void dthe_aes_set_ctrl_key(struct dthe_tfm_ctx *ctx,
case DTHE_AES_CBC:
ctrl_val |= AES_CTRL_CBC_MASK;
break;
+ case DTHE_AES_CTR:
+ ctrl_val |= AES_CTRL_CTR_MASK;
+ ctrl_val |= DTHE_AES_CTRL_CTR_WIDTH_128B;
+ break;
case DTHE_AES_XTS:
ctrl_val |= AES_CTRL_XTS_MASK;
break;
+ case DTHE_AES_GCM:
+ ctrl_val |= AES_CTRL_GCM_MASK;
+ break;
+ case DTHE_AES_CCM:
+ ctrl_val |= AES_CTRL_CCM_MASK;
+ ctrl_val |= FIELD_PREP(DTHE_AES_CTRL_CCM_L_FIELD_MASK,
+ (iv_in[0] & DTHE_AES_CCM_L_FROM_IV_MASK));
+ ctrl_val |= FIELD_PREP(DTHE_AES_CTRL_CCM_M_FIELD_MASK,
+ ((ctx->authsize - 2) >> 1) & DTHE_AES_CCM_M_BITS);
+ break;
}
if (iv_in) {
@@ -251,6 +303,22 @@ static void dthe_aes_set_ctrl_key(struct dthe_tfm_ctx *ctx,
writel_relaxed(ctrl_val, aes_base_reg + DTHE_P_AES_CTRL);
}
+static int dthe_aes_do_fallback(struct skcipher_request *req)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct dthe_aes_req_ctx *rctx = skcipher_request_ctx(req);
+
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->skcipher_fb);
+
+ skcipher_request_set_callback(subreq, skcipher_request_flags(req),
+ req->base.complete, req->base.data);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+
+ return rctx->enc ? crypto_skcipher_encrypt(subreq) :
+ crypto_skcipher_decrypt(subreq);
+}
+
static void dthe_aes_dma_in_callback(void *data)
{
struct skcipher_request *req = (struct skcipher_request *)data;
@@ -271,7 +339,7 @@ static int dthe_aes_run(struct crypto_engine *engine, void *areq)
struct scatterlist *dst = req->dst;
int src_nents = sg_nents_for_len(src, len);
- int dst_nents;
+ int dst_nents = sg_nents_for_len(dst, len);
int src_mapped_nents;
int dst_mapped_nents;
@@ -305,25 +373,62 @@ static int dthe_aes_run(struct crypto_engine *engine, void *areq)
dst_dir = DMA_FROM_DEVICE;
}
+ /*
+ * CTR mode can operate on any input length, but the hardware
+ * requires input length to be a multiple of the block size.
+ * We need to handle the padding in the driver.
+ */
+ if (ctx->aes_mode == DTHE_AES_CTR && req->cryptlen % AES_BLOCK_SIZE) {
+ unsigned int pad_size = AES_BLOCK_SIZE - (req->cryptlen % AES_BLOCK_SIZE);
+ u8 *pad_buf = rctx->padding;
+ struct scatterlist *sg;
+
+ len += pad_size;
+ src_nents++;
+ dst_nents++;
+
+ src = kmalloc_array(src_nents, sizeof(*src), GFP_ATOMIC);
+ if (!src) {
+ ret = -ENOMEM;
+ goto aes_ctr_src_alloc_err;
+ }
+
+ sg_init_table(src, src_nents);
+ sg = dthe_copy_sg(src, req->src, req->cryptlen);
+ memzero_explicit(pad_buf, AES_BLOCK_SIZE);
+ sg_set_buf(sg, pad_buf, pad_size);
+
+ if (diff_dst) {
+ dst = kmalloc_array(dst_nents, sizeof(*dst), GFP_ATOMIC);
+ if (!dst) {
+ ret = -ENOMEM;
+ goto aes_ctr_dst_alloc_err;
+ }
+
+ sg_init_table(dst, dst_nents);
+ sg = dthe_copy_sg(dst, req->dst, req->cryptlen);
+ sg_set_buf(sg, pad_buf, pad_size);
+ } else {
+ dst = src;
+ }
+ }
+
tx_dev = dmaengine_get_dma_device(dev_data->dma_aes_tx);
rx_dev = dmaengine_get_dma_device(dev_data->dma_aes_rx);
src_mapped_nents = dma_map_sg(tx_dev, src, src_nents, src_dir);
if (src_mapped_nents == 0) {
ret = -EINVAL;
- goto aes_err;
+ goto aes_map_src_err;
}
if (!diff_dst) {
- dst_nents = src_nents;
dst_mapped_nents = src_mapped_nents;
} else {
- dst_nents = sg_nents_for_len(dst, len);
dst_mapped_nents = dma_map_sg(rx_dev, dst, dst_nents, dst_dir);
if (dst_mapped_nents == 0) {
- dma_unmap_sg(tx_dev, src, src_nents, src_dir);
ret = -EINVAL;
- goto aes_err;
+ goto aes_map_dst_err;
}
}
@@ -353,8 +458,8 @@ static int dthe_aes_run(struct crypto_engine *engine, void *areq)
else
dthe_aes_set_ctrl_key(ctx, rctx, (u32 *)req->iv);
- writel_relaxed(lower_32_bits(req->cryptlen), aes_base_reg + DTHE_P_AES_C_LENGTH_0);
- writel_relaxed(upper_32_bits(req->cryptlen), aes_base_reg + DTHE_P_AES_C_LENGTH_1);
+ writel_relaxed(lower_32_bits(len), aes_base_reg + DTHE_P_AES_C_LENGTH_0);
+ writel_relaxed(upper_32_bits(len), aes_base_reg + DTHE_P_AES_C_LENGTH_1);
dmaengine_submit(desc_in);
dmaengine_submit(desc_out);
@@ -386,11 +491,26 @@ static int dthe_aes_run(struct crypto_engine *engine, void *areq)
}
aes_prep_err:
- dma_unmap_sg(tx_dev, src, src_nents, src_dir);
if (dst_dir != DMA_BIDIRECTIONAL)
dma_unmap_sg(rx_dev, dst, dst_nents, dst_dir);
+aes_map_dst_err:
+ dma_unmap_sg(tx_dev, src, src_nents, src_dir);
+
+aes_map_src_err:
+ if (ctx->aes_mode == DTHE_AES_CTR && req->cryptlen % AES_BLOCK_SIZE) {
+ memzero_explicit(rctx->padding, AES_BLOCK_SIZE);
+ if (diff_dst)
+ kfree(dst);
+aes_ctr_dst_alloc_err:
+ kfree(src);
+aes_ctr_src_alloc_err:
+ /*
+ * Fallback to software if ENOMEM
+ */
+ if (ret == -ENOMEM)
+ ret = dthe_aes_do_fallback(req);
+ }
-aes_err:
local_bh_disable();
crypto_finalize_skcipher_request(dev_data->engine, req, ret);
local_bh_enable();
@@ -400,7 +520,6 @@ aes_err:
static int dthe_aes_crypt(struct skcipher_request *req)
{
struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
- struct dthe_aes_req_ctx *rctx = skcipher_request_ctx(req);
struct dthe_data *dev_data = dthe_get_dev(ctx);
struct crypto_engine *engine;
@@ -408,20 +527,14 @@ static int dthe_aes_crypt(struct skcipher_request *req)
* If data is not a multiple of AES_BLOCK_SIZE:
* - need to return -EINVAL for ECB, CBC as they are block ciphers
* - need to fallback to software as H/W doesn't support Ciphertext Stealing for XTS
+ * - do nothing for CTR
*/
if (req->cryptlen % AES_BLOCK_SIZE) {
- if (ctx->aes_mode == DTHE_AES_XTS) {
- SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->skcipher_fb);
-
- skcipher_request_set_callback(subreq, skcipher_request_flags(req),
- req->base.complete, req->base.data);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->cryptlen, req->iv);
+ if (ctx->aes_mode == DTHE_AES_XTS)
+ return dthe_aes_do_fallback(req);
- return rctx->enc ? crypto_skcipher_encrypt(subreq) :
- crypto_skcipher_decrypt(subreq);
- }
- return -EINVAL;
+ if (ctx->aes_mode != DTHE_AES_CTR)
+ return -EINVAL;
}
/*
@@ -454,6 +567,642 @@ static int dthe_aes_decrypt(struct skcipher_request *req)
return dthe_aes_crypt(req);
}
+static int dthe_aead_init_tfm(struct crypto_aead *tfm)
+{
+ struct dthe_tfm_ctx *ctx = crypto_aead_ctx(tfm);
+ struct dthe_data *dev_data = dthe_get_dev(ctx);
+
+ ctx->dev_data = dev_data;
+
+ const char *alg_name = crypto_tfm_alg_name(crypto_aead_tfm(tfm));
+
+ ctx->aead_fb = crypto_alloc_sync_aead(alg_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->aead_fb)) {
+ dev_err(dev_data->dev, "fallback driver %s couldn't be loaded\n",
+ alg_name);
+ return PTR_ERR(ctx->aead_fb);
+ }
+
+ return 0;
+}
+
+static void dthe_aead_exit_tfm(struct crypto_aead *tfm)
+{
+ struct dthe_tfm_ctx *ctx = crypto_aead_ctx(tfm);
+
+ crypto_free_sync_aead(ctx->aead_fb);
+}
+
+/**
+ * dthe_aead_prep_aad - Prepare AAD scatterlist from input request
+ * @sg: Input scatterlist containing AAD
+ * @assoclen: Length of AAD
+ * @pad_buf: Buffer to hold AAD padding if needed
+ *
+ * Description:
+ * Creates a scatterlist containing only the AAD portion with padding
+ * to align to AES_BLOCK_SIZE. This simplifies DMA handling by allowing
+ * AAD to be sent separately via TX-only DMA.
+ *
+ * Return:
+ * Pointer to the AAD scatterlist, or ERR_PTR(error) on failure.
+ * The calling function needs to free the returned scatterlist when done.
+ **/
+static struct scatterlist *dthe_aead_prep_aad(struct scatterlist *sg,
+ unsigned int assoclen,
+ u8 *pad_buf)
+{
+ struct scatterlist *aad_sg;
+ struct scatterlist *to_sg;
+ int aad_nents;
+
+ if (assoclen == 0)
+ return NULL;
+
+ aad_nents = sg_nents_for_len(sg, assoclen);
+ if (assoclen % AES_BLOCK_SIZE)
+ aad_nents++;
+
+ aad_sg = kmalloc_array(aad_nents, sizeof(struct scatterlist), GFP_ATOMIC);
+ if (!aad_sg)
+ return ERR_PTR(-ENOMEM);
+
+ sg_init_table(aad_sg, aad_nents);
+ to_sg = dthe_copy_sg(aad_sg, sg, assoclen);
+ if (assoclen % AES_BLOCK_SIZE) {
+ unsigned int pad_len = AES_BLOCK_SIZE - (assoclen % AES_BLOCK_SIZE);
+
+ memset(pad_buf, 0, pad_len);
+ sg_set_buf(to_sg, pad_buf, pad_len);
+ }
+
+ return aad_sg;
+}
+
+/**
+ * dthe_aead_prep_crypt - Prepare crypt scatterlist from req->src/req->dst
+ * @sg: Input req->src/req->dst scatterlist
+ * @assoclen: Length of AAD (to skip)
+ * @cryptlen: Length of ciphertext/plaintext (minus the size of TAG in decryption)
+ * @pad_buf: Zeroed buffer to hold crypt padding if needed
+ *
+ * Description:
+ * Creates a scatterlist containing only the ciphertext/plaintext portion
+ * (skipping AAD) with padding to align to AES_BLOCK_SIZE.
+ *
+ * Return:
+ * Pointer to the ciphertext scatterlist, or ERR_PTR(error) on failure.
+ * The calling function needs to free the returned scatterlist when done.
+ **/
+static struct scatterlist *dthe_aead_prep_crypt(struct scatterlist *sg,
+ unsigned int assoclen,
+ unsigned int cryptlen,
+ u8 *pad_buf)
+{
+ struct scatterlist *out_sg[1];
+ struct scatterlist *crypt_sg;
+ struct scatterlist *to_sg;
+ size_t split_sizes[1] = {cryptlen};
+ int out_mapped_nents[1];
+ int crypt_nents;
+ int err;
+
+ if (cryptlen == 0)
+ return NULL;
+
+ /* Skip AAD, extract ciphertext portion */
+ err = sg_split(sg, 0, assoclen, 1, split_sizes, out_sg, out_mapped_nents, GFP_ATOMIC);
+ if (err)
+ goto dthe_aead_prep_crypt_split_err;
+
+ crypt_nents = sg_nents_for_len(out_sg[0], cryptlen);
+ if (cryptlen % AES_BLOCK_SIZE)
+ crypt_nents++;
+
+ crypt_sg = kmalloc_array(crypt_nents, sizeof(struct scatterlist), GFP_ATOMIC);
+ if (!crypt_sg) {
+ err = -ENOMEM;
+ goto dthe_aead_prep_crypt_mem_err;
+ }
+
+ sg_init_table(crypt_sg, crypt_nents);
+ to_sg = dthe_copy_sg(crypt_sg, out_sg[0], cryptlen);
+ if (cryptlen % AES_BLOCK_SIZE) {
+ unsigned int pad_len = AES_BLOCK_SIZE - (cryptlen % AES_BLOCK_SIZE);
+
+ sg_set_buf(to_sg, pad_buf, pad_len);
+ }
+
+dthe_aead_prep_crypt_mem_err:
+ kfree(out_sg[0]);
+
+dthe_aead_prep_crypt_split_err:
+ if (err)
+ return ERR_PTR(err);
+ return crypt_sg;
+}
+
+static int dthe_aead_read_tag(struct dthe_tfm_ctx *ctx, u32 *tag)
+{
+ struct dthe_data *dev_data = dthe_get_dev(ctx);
+ void __iomem *aes_base_reg = dev_data->regs + DTHE_P_AES_BASE;
+ u32 val;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout(aes_base_reg + DTHE_P_AES_CTRL, val,
+ (val & DTHE_AES_CTRL_SAVED_CTX_READY),
+ 0, POLL_TIMEOUT_INTERVAL);
+ if (ret)
+ return ret;
+
+ for (int i = 0; i < AES_BLOCK_WORDS; ++i)
+ tag[i] = readl_relaxed(aes_base_reg +
+ DTHE_P_AES_TAG_OUT +
+ DTHE_REG_SIZE * i);
+ return 0;
+}
+
+static int dthe_aead_enc_get_tag(struct aead_request *req)
+{
+ struct dthe_tfm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ u32 tag[AES_BLOCK_WORDS];
+ int nents;
+ int ret;
+
+ ret = dthe_aead_read_tag(ctx, tag);
+ if (ret)
+ return ret;
+
+ nents = sg_nents_for_len(req->dst, req->cryptlen + req->assoclen + ctx->authsize);
+
+ sg_pcopy_from_buffer(req->dst, nents, tag, ctx->authsize,
+ req->assoclen + req->cryptlen);
+
+ return 0;
+}
+
+static int dthe_aead_dec_verify_tag(struct aead_request *req)
+{
+ struct dthe_tfm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ u32 tag_out[AES_BLOCK_WORDS];
+ u32 tag_in[AES_BLOCK_WORDS];
+ int nents;
+ int ret;
+
+ ret = dthe_aead_read_tag(ctx, tag_out);
+ if (ret)
+ return ret;
+
+ nents = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
+
+ sg_pcopy_to_buffer(req->src, nents, tag_in, ctx->authsize,
+ req->assoclen + req->cryptlen - ctx->authsize);
+
+ if (crypto_memneq(tag_in, tag_out, ctx->authsize))
+ return -EBADMSG;
+ else
+ return 0;
+}
+
+static int dthe_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+{
+ struct dthe_tfm_ctx *ctx = crypto_aead_ctx(tfm);
+
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+
+ crypto_sync_aead_clear_flags(ctx->aead_fb, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_aead_set_flags(ctx->aead_fb,
+ crypto_aead_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_aead_setkey(ctx->aead_fb, key, keylen);
+}
+
+static int dthe_gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+{
+ struct dthe_tfm_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
+
+ ret = dthe_aead_setkey(tfm, key, keylen);
+ if (ret)
+ return ret;
+
+ ctx->aes_mode = DTHE_AES_GCM;
+ ctx->keylen = keylen;
+ memcpy(ctx->key, key, keylen);
+
+ return ret;
+}
+
+static int dthe_ccm_aes_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
+{
+ struct dthe_tfm_ctx *ctx = crypto_aead_ctx(tfm);
+ int ret;
+
+ ret = dthe_aead_setkey(tfm, key, keylen);
+ if (ret)
+ return ret;
+
+ ctx->aes_mode = DTHE_AES_CCM;
+ ctx->keylen = keylen;
+ memcpy(ctx->key, key, keylen);
+
+ return ret;
+}
+
+static int dthe_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ struct dthe_tfm_ctx *ctx = crypto_aead_ctx(tfm);
+
+ /* Invalid auth size will be handled by crypto_aead_setauthsize() */
+ ctx->authsize = authsize;
+
+ return crypto_sync_aead_setauthsize(ctx->aead_fb, authsize);
+}
+
+static int dthe_aead_do_fallback(struct aead_request *req)
+{
+ struct dthe_tfm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct dthe_aes_req_ctx *rctx = aead_request_ctx(req);
+
+ SYNC_AEAD_REQUEST_ON_STACK(subreq, ctx->aead_fb);
+
+ aead_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+
+ return rctx->enc ? crypto_aead_encrypt(subreq) :
+ crypto_aead_decrypt(subreq);
+}
+
+static void dthe_aead_dma_in_callback(void *data)
+{
+ struct aead_request *req = (struct aead_request *)data;
+ struct dthe_aes_req_ctx *rctx = aead_request_ctx(req);
+
+ complete(&rctx->aes_compl);
+}
+
+static int dthe_aead_run(struct crypto_engine *engine, void *areq)
+{
+ struct aead_request *req = container_of(areq, struct aead_request, base);
+ struct dthe_tfm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct dthe_aes_req_ctx *rctx = aead_request_ctx(req);
+ struct dthe_data *dev_data = dthe_get_dev(ctx);
+
+ unsigned int cryptlen = req->cryptlen;
+ unsigned int assoclen = req->assoclen;
+ unsigned int authsize = ctx->authsize;
+ unsigned int unpadded_cryptlen;
+ struct scatterlist *src = NULL;
+ struct scatterlist *dst = NULL;
+ struct scatterlist *aad_sg = NULL;
+ u32 iv_in[AES_IV_WORDS];
+
+ int aad_nents = 0;
+ int src_nents = 0;
+ int dst_nents = 0;
+ int aad_mapped_nents = 0;
+ int src_mapped_nents = 0;
+ int dst_mapped_nents = 0;
+
+ u8 *src_assoc_padbuf = rctx->padding;
+ u8 *src_crypt_padbuf = rctx->padding + AES_BLOCK_SIZE;
+ u8 *dst_crypt_padbuf = rctx->padding + AES_BLOCK_SIZE;
+
+ bool diff_dst;
+ enum dma_data_direction aad_dir, src_dir, dst_dir;
+
+ struct device *tx_dev, *rx_dev;
+ struct dma_async_tx_descriptor *desc_in, *desc_out, *desc_aad_out;
+
+ int ret;
+ int err;
+
+ void __iomem *aes_base_reg = dev_data->regs + DTHE_P_AES_BASE;
+
+ u32 aes_irqenable_val = readl_relaxed(aes_base_reg + DTHE_P_AES_IRQENABLE);
+ u32 aes_sysconfig_val = readl_relaxed(aes_base_reg + DTHE_P_AES_SYSCONFIG);
+
+ aes_sysconfig_val |= DTHE_AES_SYSCONFIG_DMA_DATA_IN_OUT_EN;
+ writel_relaxed(aes_sysconfig_val, aes_base_reg + DTHE_P_AES_SYSCONFIG);
+
+ aes_irqenable_val |= DTHE_AES_IRQENABLE_EN_ALL;
+ writel_relaxed(aes_irqenable_val, aes_base_reg + DTHE_P_AES_IRQENABLE);
+
+ /* In decryption, the last authsize bytes are the TAG */
+ if (!rctx->enc)
+ cryptlen -= authsize;
+ unpadded_cryptlen = cryptlen;
+
+ memset(src_assoc_padbuf, 0, AES_BLOCK_SIZE);
+ memset(src_crypt_padbuf, 0, AES_BLOCK_SIZE);
+ memset(dst_crypt_padbuf, 0, AES_BLOCK_SIZE);
+
+ tx_dev = dmaengine_get_dma_device(dev_data->dma_aes_tx);
+ rx_dev = dmaengine_get_dma_device(dev_data->dma_aes_rx);
+
+ if (req->src == req->dst) {
+ diff_dst = false;
+ src_dir = DMA_BIDIRECTIONAL;
+ dst_dir = DMA_BIDIRECTIONAL;
+ } else {
+ diff_dst = true;
+ src_dir = DMA_TO_DEVICE;
+ dst_dir = DMA_FROM_DEVICE;
+ }
+ aad_dir = DMA_TO_DEVICE;
+
+ /* Prep AAD scatterlist (always from req->src) */
+ aad_sg = dthe_aead_prep_aad(req->src, req->assoclen, src_assoc_padbuf);
+ if (IS_ERR(aad_sg)) {
+ ret = PTR_ERR(aad_sg);
+ goto aead_prep_aad_err;
+ }
+
+ /* Prep ciphertext src scatterlist */
+ src = dthe_aead_prep_crypt(req->src, req->assoclen, cryptlen, src_crypt_padbuf);
+ if (IS_ERR(src)) {
+ ret = PTR_ERR(src);
+ goto aead_prep_src_err;
+ }
+
+ /* Prep ciphertext dst scatterlist (only if separate dst) */
+ if (diff_dst) {
+ dst = dthe_aead_prep_crypt(req->dst, req->assoclen, unpadded_cryptlen,
+ dst_crypt_padbuf);
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ goto aead_prep_dst_err;
+ }
+ } else {
+ dst = src;
+ }
+
+ /* Calculate padded lengths for nents calculations */
+ if (req->assoclen % AES_BLOCK_SIZE)
+ assoclen += AES_BLOCK_SIZE - (req->assoclen % AES_BLOCK_SIZE);
+ if (cryptlen % AES_BLOCK_SIZE)
+ cryptlen += AES_BLOCK_SIZE - (cryptlen % AES_BLOCK_SIZE);
+
+ if (assoclen != 0) {
+ /* Map AAD for TX only */
+ aad_nents = sg_nents_for_len(aad_sg, assoclen);
+ aad_mapped_nents = dma_map_sg(tx_dev, aad_sg, aad_nents, aad_dir);
+ if (aad_mapped_nents == 0) {
+ dev_err(dev_data->dev, "Failed to map AAD for TX\n");
+ ret = -EINVAL;
+ goto aead_dma_map_aad_err;
+ }
+
+ /* Prepare DMA descriptors for AAD TX */
+ desc_aad_out = dmaengine_prep_slave_sg(dev_data->dma_aes_tx, aad_sg,
+ aad_mapped_nents, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_aad_out) {
+ dev_err(dev_data->dev, "AAD TX prep_slave_sg() failed\n");
+ ret = -EINVAL;
+ goto aead_dma_prep_aad_err;
+ }
+ }
+
+ if (cryptlen != 0) {
+ /* Map ciphertext src for TX (BIDIRECTIONAL if in-place) */
+ src_nents = sg_nents_for_len(src, cryptlen);
+ src_mapped_nents = dma_map_sg(tx_dev, src, src_nents, src_dir);
+ if (src_mapped_nents == 0) {
+ dev_err(dev_data->dev, "Failed to map ciphertext src for TX\n");
+ ret = -EINVAL;
+ goto aead_dma_prep_aad_err;
+ }
+
+ /* Prepare DMA descriptors for ciphertext TX */
+ desc_out = dmaengine_prep_slave_sg(dev_data->dma_aes_tx, src,
+ src_mapped_nents, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_out) {
+ dev_err(dev_data->dev, "Ciphertext TX prep_slave_sg() failed\n");
+ ret = -EINVAL;
+ goto aead_dma_prep_src_err;
+ }
+
+ /* Map ciphertext dst for RX (only if separate dst) */
+ if (diff_dst) {
+ dst_nents = sg_nents_for_len(dst, cryptlen);
+ dst_mapped_nents = dma_map_sg(rx_dev, dst, dst_nents, dst_dir);
+ if (dst_mapped_nents == 0) {
+ dev_err(dev_data->dev, "Failed to map ciphertext dst for RX\n");
+ ret = -EINVAL;
+ goto aead_dma_prep_src_err;
+ }
+ } else {
+ dst_nents = src_nents;
+ dst_mapped_nents = src_mapped_nents;
+ }
+
+ /* Prepare DMA descriptor for ciphertext RX */
+ desc_in = dmaengine_prep_slave_sg(dev_data->dma_aes_rx, dst,
+ dst_mapped_nents, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_in) {
+ dev_err(dev_data->dev, "Ciphertext RX prep_slave_sg() failed\n");
+ ret = -EINVAL;
+ goto aead_dma_prep_dst_err;
+ }
+
+ desc_in->callback = dthe_aead_dma_in_callback;
+ desc_in->callback_param = req;
+ } else if (assoclen != 0) {
+ /* AAD-only operation */
+ desc_aad_out->callback = dthe_aead_dma_in_callback;
+ desc_aad_out->callback_param = req;
+ }
+
+ init_completion(&rctx->aes_compl);
+
+ /*
+ * HACK: There is an unknown hw issue where if the previous operation had alen = 0 and
+ * plen != 0, the current operation's tag calculation is incorrect in the case where
+ * plen = 0 and alen != 0 currently. This is a workaround for now which somehow works;
+ * by resetting the context by writing a 1 to the C_LENGTH_0 and AUTH_LENGTH registers.
+ */
+ if (cryptlen == 0) {
+ writel_relaxed(1, aes_base_reg + DTHE_P_AES_C_LENGTH_0);
+ writel_relaxed(1, aes_base_reg + DTHE_P_AES_AUTH_LENGTH);
+ }
+
+ if (ctx->aes_mode == DTHE_AES_GCM) {
+ if (req->iv) {
+ memcpy(iv_in, req->iv, GCM_AES_IV_SIZE);
+ } else {
+ iv_in[0] = 0;
+ iv_in[1] = 0;
+ iv_in[2] = 0;
+ }
+ iv_in[3] = 0x01000000;
+ } else {
+ memcpy(iv_in, req->iv, AES_IV_SIZE);
+ }
+
+ /* Clear key2 to reset previous GHASH intermediate data */
+ for (int i = 0; i < AES_KEYSIZE_256 / sizeof(u32); ++i)
+ writel_relaxed(0, aes_base_reg + DTHE_P_AES_KEY2_6 + DTHE_REG_SIZE * i);
+
+ dthe_aes_set_ctrl_key(ctx, rctx, iv_in);
+
+ writel_relaxed(lower_32_bits(unpadded_cryptlen), aes_base_reg + DTHE_P_AES_C_LENGTH_0);
+ writel_relaxed(upper_32_bits(unpadded_cryptlen), aes_base_reg + DTHE_P_AES_C_LENGTH_1);
+ writel_relaxed(req->assoclen, aes_base_reg + DTHE_P_AES_AUTH_LENGTH);
+
+ /* Submit DMA descriptors: AAD TX, ciphertext TX, ciphertext RX */
+ if (assoclen != 0)
+ dmaengine_submit(desc_aad_out);
+ if (cryptlen != 0) {
+ dmaengine_submit(desc_out);
+ dmaengine_submit(desc_in);
+ }
+
+ if (cryptlen != 0)
+ dma_async_issue_pending(dev_data->dma_aes_rx);
+ dma_async_issue_pending(dev_data->dma_aes_tx);
+
+ /* Need to do timeout to ensure finalise gets called if DMA callback fails for any reason */
+ ret = wait_for_completion_timeout(&rctx->aes_compl, msecs_to_jiffies(DTHE_DMA_TIMEOUT_MS));
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ if (cryptlen != 0)
+ dmaengine_terminate_sync(dev_data->dma_aes_rx);
+ dmaengine_terminate_sync(dev_data->dma_aes_tx);
+
+ for (int i = 0; i < AES_BLOCK_WORDS; ++i)
+ readl_relaxed(aes_base_reg + DTHE_P_AES_DATA_IN_OUT + DTHE_REG_SIZE * i);
+ } else {
+ ret = 0;
+ }
+
+ if (cryptlen != 0)
+ dma_sync_sg_for_cpu(rx_dev, dst, dst_nents, dst_dir);
+
+ if (rctx->enc)
+ err = dthe_aead_enc_get_tag(req);
+ else
+ err = dthe_aead_dec_verify_tag(req);
+
+ ret = (ret) ? ret : err;
+
+aead_dma_prep_dst_err:
+ if (diff_dst && cryptlen != 0)
+ dma_unmap_sg(rx_dev, dst, dst_nents, dst_dir);
+aead_dma_prep_src_err:
+ if (cryptlen != 0)
+ dma_unmap_sg(tx_dev, src, src_nents, src_dir);
+aead_dma_prep_aad_err:
+ if (assoclen != 0)
+ dma_unmap_sg(tx_dev, aad_sg, aad_nents, aad_dir);
+
+aead_dma_map_aad_err:
+ if (diff_dst && cryptlen != 0)
+ kfree(dst);
+aead_prep_dst_err:
+ if (cryptlen != 0)
+ kfree(src);
+aead_prep_src_err:
+ if (assoclen != 0)
+ kfree(aad_sg);
+
+aead_prep_aad_err:
+ memzero_explicit(rctx->padding, 2 * AES_BLOCK_SIZE);
+
+ if (ret)
+ ret = dthe_aead_do_fallback(req);
+
+ local_bh_disable();
+ crypto_finalize_aead_request(engine, req, ret);
+ local_bh_enable();
+ return 0;
+}
+
+static int dthe_aead_crypt(struct aead_request *req)
+{
+ struct dthe_tfm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+ struct dthe_aes_req_ctx *rctx = aead_request_ctx(req);
+ struct dthe_data *dev_data = dthe_get_dev(ctx);
+ struct crypto_engine *engine;
+ unsigned int cryptlen = req->cryptlen;
+ bool is_zero_ctr = true;
+
+ /* In decryption, last authsize bytes are the TAG */
+ if (!rctx->enc)
+ cryptlen -= ctx->authsize;
+
+ if (ctx->aes_mode == DTHE_AES_CCM) {
+ /*
+ * For CCM Mode, the 128-bit IV contains the following:
+ * | 0 .. 2 | 3 .. 7 | 8 .. (127-8*L) | (128-8*L) .. 127 |
+ * | L-1 | Zero | Nonce | Counter |
+ * L needs to be between 2-8 (inclusive), i.e. 1 <= (L-1) <= 7
+ * and the next 5 bits need to be zeroes. Else return -EINVAL
+ */
+ u8 *iv = req->iv;
+ u8 L = iv[0];
+
+ /* variable L stores L-1 here */
+ if (L < 1 || L > 7)
+ return -EINVAL;
+ /*
+ * DTHEv2 HW can only work with zero initial counter in CCM mode.
+ * Check if the initial counter value is zero or not
+ */
+ for (int i = 0; i < L + 1; ++i) {
+ if (iv[AES_IV_SIZE - 1 - i] != 0) {
+ is_zero_ctr = false;
+ break;
+ }
+ }
+ }
+
+ /*
+ * Need to fallback to software in the following cases due to HW restrictions:
+ * - Both AAD and plaintext/ciphertext are zero length
+ * - For AES-GCM, AAD length is more than 2^32 - 1 bytes
+ * - For AES-CCM, AAD length is more than 2^16 - 2^8 bytes
+ * - For AES-CCM, plaintext/ciphertext length is more than 2^61 - 1 bytes
+ * - For AES-CCM, AAD length is non-zero but plaintext/ciphertext length is zero
+ * - For AES-CCM, the initial counter (last L+1 bytes of IV) is not all zeroes
+ *
+ * PS: req->cryptlen is currently unsigned int type, which causes the second and fourth
+ * cases above tautologically false. If req->cryptlen is to be changed to a 64-bit
+ * type, the check for these would also need to be added below.
+ */
+ if ((req->assoclen == 0 && cryptlen == 0) ||
+ (ctx->aes_mode == DTHE_AES_CCM && req->assoclen > DTHE_AES_CCM_AAD_MAXLEN) ||
+ (ctx->aes_mode == DTHE_AES_CCM && cryptlen == 0) ||
+ (ctx->aes_mode == DTHE_AES_CCM && !is_zero_ctr))
+ return dthe_aead_do_fallback(req);
+
+ engine = dev_data->engine;
+ return crypto_transfer_aead_request_to_engine(engine, req);
+}
+
+static int dthe_aead_encrypt(struct aead_request *req)
+{
+ struct dthe_aes_req_ctx *rctx = aead_request_ctx(req);
+
+ rctx->enc = 1;
+ return dthe_aead_crypt(req);
+}
+
+static int dthe_aead_decrypt(struct aead_request *req)
+{
+ struct dthe_aes_req_ctx *rctx = aead_request_ctx(req);
+
+ rctx->enc = 0;
+ return dthe_aead_crypt(req);
+}
+
static struct skcipher_engine_alg cipher_algs[] = {
{
.base.init = dthe_cipher_init_tfm,
@@ -501,8 +1250,33 @@ static struct skcipher_engine_alg cipher_algs[] = {
.op.do_one_request = dthe_aes_run,
}, /* CBC AES */
{
- .base.init = dthe_cipher_xts_init_tfm,
- .base.exit = dthe_cipher_xts_exit_tfm,
+ .base.init = dthe_cipher_init_tfm_fallback,
+ .base.exit = dthe_cipher_exit_tfm,
+ .base.setkey = dthe_aes_ctr_setkey,
+ .base.encrypt = dthe_aes_encrypt,
+ .base.decrypt = dthe_aes_decrypt,
+ .base.min_keysize = AES_MIN_KEY_SIZE,
+ .base.max_keysize = AES_MAX_KEY_SIZE,
+ .base.ivsize = AES_IV_SIZE,
+ .base.chunksize = AES_BLOCK_SIZE,
+ .base.base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-dthev2",
+ .cra_priority = 299,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct dthe_tfm_ctx),
+ .cra_reqsize = sizeof(struct dthe_aes_req_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .op.do_one_request = dthe_aes_run,
+ }, /* CTR AES */
+ {
+ .base.init = dthe_cipher_init_tfm_fallback,
+ .base.exit = dthe_cipher_exit_tfm,
.base.setkey = dthe_aes_xts_setkey,
.base.encrypt = dthe_aes_encrypt,
.base.decrypt = dthe_aes_decrypt,
@@ -527,12 +1301,75 @@ static struct skcipher_engine_alg cipher_algs[] = {
}, /* XTS AES */
};
+static struct aead_engine_alg aead_algs[] = {
+ {
+ .base.init = dthe_aead_init_tfm,
+ .base.exit = dthe_aead_exit_tfm,
+ .base.setkey = dthe_gcm_aes_setkey,
+ .base.setauthsize = dthe_aead_setauthsize,
+ .base.maxauthsize = AES_BLOCK_SIZE,
+ .base.encrypt = dthe_aead_encrypt,
+ .base.decrypt = dthe_aead_decrypt,
+ .base.chunksize = AES_BLOCK_SIZE,
+ .base.ivsize = GCM_AES_IV_SIZE,
+ .base.base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-dthev2",
+ .cra_priority = 299,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct dthe_tfm_ctx),
+ .cra_reqsize = sizeof(struct dthe_aes_req_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .op.do_one_request = dthe_aead_run,
+ }, /* GCM AES */
+ {
+ .base.init = dthe_aead_init_tfm,
+ .base.exit = dthe_aead_exit_tfm,
+ .base.setkey = dthe_ccm_aes_setkey,
+ .base.setauthsize = dthe_aead_setauthsize,
+ .base.maxauthsize = AES_BLOCK_SIZE,
+ .base.encrypt = dthe_aead_encrypt,
+ .base.decrypt = dthe_aead_decrypt,
+ .base.chunksize = AES_BLOCK_SIZE,
+ .base.ivsize = AES_IV_SIZE,
+ .base.base = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "ccm-aes-dthev2",
+ .cra_priority = 299,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct dthe_tfm_ctx),
+ .cra_reqsize = sizeof(struct dthe_aes_req_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .op.do_one_request = dthe_aead_run,
+ }, /* CCM AES */
+};
+
int dthe_register_aes_algs(void)
{
- return crypto_engine_register_skciphers(cipher_algs, ARRAY_SIZE(cipher_algs));
+ int ret = 0;
+
+ ret = crypto_engine_register_skciphers(cipher_algs, ARRAY_SIZE(cipher_algs));
+ if (ret)
+ return ret;
+ ret = crypto_engine_register_aeads(aead_algs, ARRAY_SIZE(aead_algs));
+ if (ret)
+ crypto_engine_unregister_skciphers(cipher_algs, ARRAY_SIZE(cipher_algs));
+
+ return ret;
}
void dthe_unregister_aes_algs(void)
{
crypto_engine_unregister_skciphers(cipher_algs, ARRAY_SIZE(cipher_algs));
+ crypto_engine_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs));
}
diff --git a/drivers/crypto/ti/dthev2-common.c b/drivers/crypto/ti/dthev2-common.c
index c39d37933b9e..a2ad79bec105 100644
--- a/drivers/crypto/ti/dthev2-common.c
+++ b/drivers/crypto/ti/dthev2-common.c
@@ -48,6 +48,25 @@ struct dthe_data *dthe_get_dev(struct dthe_tfm_ctx *ctx)
return dev_data;
}
+struct scatterlist *dthe_copy_sg(struct scatterlist *dst,
+ struct scatterlist *src,
+ int buflen)
+{
+ struct scatterlist *from_sg, *to_sg;
+ int sglen;
+
+ for (to_sg = dst, from_sg = src; buflen && from_sg; buflen -= sglen) {
+ sglen = from_sg->length;
+ if (sglen > buflen)
+ sglen = buflen;
+ sg_set_buf(to_sg, sg_virt(from_sg), sglen);
+ from_sg = sg_next(from_sg);
+ to_sg = sg_next(to_sg);
+ }
+
+ return to_sg;
+}
+
static int dthe_dma_init(struct dthe_data *dev_data)
{
int ret;
diff --git a/drivers/crypto/ti/dthev2-common.h b/drivers/crypto/ti/dthev2-common.h
index c7a06a4c353f..d4a3b9c18bbc 100644
--- a/drivers/crypto/ti/dthev2-common.h
+++ b/drivers/crypto/ti/dthev2-common.h
@@ -36,7 +36,10 @@
enum dthe_aes_mode {
DTHE_AES_ECB = 0,
DTHE_AES_CBC,
+ DTHE_AES_CTR,
DTHE_AES_XTS,
+ DTHE_AES_GCM,
+ DTHE_AES_CCM,
};
/* Driver specific struct definitions */
@@ -77,25 +80,33 @@ struct dthe_list {
* struct dthe_tfm_ctx - Transform ctx struct containing ctx for all sub-components of DTHE V2
* @dev_data: Device data struct pointer
* @keylen: AES key length
+ * @authsize: Authentication size for modes with authentication
* @key: AES key
* @aes_mode: AES mode
+ * @aead_fb: Fallback crypto aead handle
* @skcipher_fb: Fallback crypto skcipher handle for AES-XTS mode
*/
struct dthe_tfm_ctx {
struct dthe_data *dev_data;
unsigned int keylen;
+ unsigned int authsize;
u32 key[DTHE_MAX_KEYSIZE / sizeof(u32)];
enum dthe_aes_mode aes_mode;
- struct crypto_sync_skcipher *skcipher_fb;
+ union {
+ struct crypto_sync_aead *aead_fb;
+ struct crypto_sync_skcipher *skcipher_fb;
+ };
};
/**
* struct dthe_aes_req_ctx - AES engine req ctx struct
* @enc: flag indicating encryption or decryption operation
+ * @padding: padding buffer for handling unaligned data
* @aes_compl: Completion variable for use in manual completion in case of DMA callback failure
*/
struct dthe_aes_req_ctx {
int enc;
+ u8 padding[2 * AES_BLOCK_SIZE];
struct completion aes_compl;
};
@@ -103,6 +114,20 @@ struct dthe_aes_req_ctx {
struct dthe_data *dthe_get_dev(struct dthe_tfm_ctx *ctx);
+/**
+ * dthe_copy_sg - Copy sg entries from src to dst
+ * @dst: Destination sg to be filled
+ * @src: Source sg to be copied from
+ * @buflen: Number of bytes to be copied
+ *
+ * Description:
+ * Copy buflen bytes of data from src to dst.
+ *
+ **/
+struct scatterlist *dthe_copy_sg(struct scatterlist *dst,
+ struct scatterlist *src,
+ int buflen);
+
int dthe_register_aes_algs(void);
void dthe_unregister_aes_algs(void);
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index e559bdadf4f9..0c2efdc83257 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -11,6 +11,7 @@
#include <linux/crypto.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
#include <crypto/aead.h>
#include <crypto/aes.h>
#include <crypto/engine.h>
@@ -29,7 +30,7 @@ struct data_queue {
char name[32];
struct crypto_engine *engine;
- struct tasklet_struct done_task;
+ struct work_struct done_work;
};
struct virtio_crypto {
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index 955bff8820da..ee83bf6568f0 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -70,9 +70,9 @@ int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterl
return 0;
}
-static void virtcrypto_done_task(unsigned long data)
+static void virtcrypto_done_work(struct work_struct *work)
{
- struct data_queue *data_vq = (struct data_queue *)data;
+ struct data_queue *data_vq = from_work(data_vq, work, done_work);
struct virtqueue *vq = data_vq->vq;
struct virtio_crypto_request *vc_req;
unsigned long flags;
@@ -96,7 +96,7 @@ static void virtcrypto_dataq_callback(struct virtqueue *vq)
struct virtio_crypto *vcrypto = vq->vdev->priv;
struct data_queue *dq = &vcrypto->data_vq[vq->index];
- tasklet_schedule(&dq->done_task);
+ queue_work(system_bh_wq, &dq->done_work);
}
static int virtcrypto_find_vqs(struct virtio_crypto *vi)
@@ -150,8 +150,7 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
ret = -ENOMEM;
goto err_engine;
}
- tasklet_init(&vi->data_vq[i].done_task, virtcrypto_done_task,
- (unsigned long)&vi->data_vq[i]);
+ INIT_WORK(&vi->data_vq[i].done_work, virtcrypto_done_work);
}
kfree(vqs_info);
@@ -501,7 +500,7 @@ static void virtcrypto_remove(struct virtio_device *vdev)
if (virtcrypto_dev_started(vcrypto))
virtcrypto_dev_stop(vcrypto);
for (i = 0; i < vcrypto->max_data_queues; i++)
- tasklet_kill(&vcrypto->data_vq[i].done_task);
+ cancel_work_sync(&vcrypto->data_vq[i].done_work);
virtio_reset_device(vdev);
virtcrypto_free_unused_reqs(vcrypto);
virtcrypto_clear_crypto_engines(vcrypto);
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
index 9eacb9fa375d..5d5358dfab73 100644
--- a/include/crypto/acompress.h
+++ b/include/crypto/acompress.h
@@ -240,9 +240,10 @@ static inline const char *crypto_acomp_driver_name(struct crypto_acomp *tfm)
}
/**
- * acomp_request_alloc() -- allocates asynchronous (de)compression request
+ * acomp_request_alloc_extra() -- allocates asynchronous (de)compression request
*
* @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
+ * @extra: amount of extra memory
* @gfp: gfp to pass to kzalloc (defaults to GFP_KERNEL)
*
* Return: allocated handle in case of success or NULL in case of an error
@@ -318,7 +319,7 @@ static inline void acomp_request_free(struct acomp_req *req)
*
* @req: request that the callback will be set for
* @flgs: specify for instance if the operation may backlog
- * @cmlp: callback which will be called
+ * @cmpl: callback which will be called
* @data: private data used by the caller
*/
static inline void acomp_request_set_callback(struct acomp_req *req,
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h
index 796d986e58e1..29c5878a3609 100644
--- a/include/crypto/cryptd.h
+++ b/include/crypto/cryptd.h
@@ -16,39 +16,6 @@
#include <linux/types.h>
#include <crypto/aead.h>
-#include <crypto/hash.h>
-#include <crypto/skcipher.h>
-
-struct cryptd_skcipher {
- struct crypto_skcipher base;
-};
-
-/* alg_name should be algorithm to be cryptd-ed */
-struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
- u32 type, u32 mask);
-struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm);
-/* Must be called without moving CPUs. */
-bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm);
-void cryptd_free_skcipher(struct cryptd_skcipher *tfm);
-
-struct cryptd_ahash {
- struct crypto_ahash base;
-};
-
-static inline struct cryptd_ahash *__cryptd_ahash_cast(
- struct crypto_ahash *tfm)
-{
- return (struct cryptd_ahash *)tfm;
-}
-
-/* alg_name should be algorithm to be cryptd-ed */
-struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
- u32 type, u32 mask);
-struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm);
-struct shash_desc *cryptd_shash_desc(struct ahash_request *req);
-/* Must be called without moving CPUs. */
-bool cryptd_ahash_queued(struct cryptd_ahash *tfm);
-void cryptd_free_ahash(struct cryptd_ahash *tfm);
struct cryptd_aead {
struct crypto_aead base;
diff --git a/include/crypto/des.h b/include/crypto/des.h
index 7812b4331ae4..73eec617f480 100644
--- a/include/crypto/des.h
+++ b/include/crypto/des.h
@@ -34,9 +34,9 @@ void des3_ede_decrypt(const struct des3_ede_ctx *dctx, u8 *dst, const u8 *src);
* des_expand_key - Expand a DES input key into a key schedule
* @ctx: the key schedule
* @key: buffer containing the input key
- * @len: size of the buffer contents
+ * @keylen: size of the buffer contents
*
- * Returns 0 on success, -EINVAL if the input key is rejected and -ENOKEY if
+ * Returns: 0 on success, -EINVAL if the input key is rejected and -ENOKEY if
* the key is accepted but has been found to be weak.
*/
int des_expand_key(struct des_ctx *ctx, const u8 *key, unsigned int keylen);
@@ -45,9 +45,9 @@ int des_expand_key(struct des_ctx *ctx, const u8 *key, unsigned int keylen);
* des3_ede_expand_key - Expand a triple DES input key into a key schedule
* @ctx: the key schedule
* @key: buffer containing the input key
- * @len: size of the buffer contents
+ * @keylen: size of the buffer contents
*
- * Returns 0 on success, -EINVAL if the input key is rejected and -ENOKEY if
+ * Returns: 0 on success, -EINVAL if the input key is rejected and -ENOKEY if
* the key is accepted but has been found to be weak. Note that weak keys will
* be rejected (and -EINVAL will be returned) when running in FIPS mode.
*/
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index 9a3f28baa804..9cd37df32dc4 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -42,6 +42,7 @@
*
* @base: Common crypto API algorithm data structure
* @calg: Cmonn algorithm data structure shared with scomp
+ * @COMP_ALG_COMMON: see struct comp_alg_common
*/
struct acomp_alg {
int (*compress)(struct acomp_req *req);
diff --git a/include/crypto/internal/ecc.h b/include/crypto/internal/ecc.h
index 57cd75242141..a4b48d76f53a 100644
--- a/include/crypto/internal/ecc.h
+++ b/include/crypto/internal/ecc.h
@@ -72,8 +72,8 @@ static inline void ecc_swap_digits(const void *in, u64 *out, unsigned int ndigit
/**
* ecc_digits_from_bytes() - Create ndigits-sized digits array from byte array
* @in: Input byte array
- * @nbytes Size of input byte array
- * @out Output digits array
+ * @nbytes: Size of input byte array
+ * @out: Output digits array
* @ndigits: Number of digits to create from byte array
*
* The first byte in the input byte array is expected to hold the most
@@ -90,7 +90,7 @@ void ecc_digits_from_bytes(const u8 *in, unsigned int nbytes,
* @private_key: private key to be used for the given curve
* @private_key_len: private key length
*
- * Returns 0 if the key is acceptable, a negative value otherwise
+ * Returns: 0 if the key is acceptable, a negative value otherwise
*/
int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
const u64 *private_key, unsigned int private_key_len);
@@ -104,7 +104,7 @@ int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
* @ndigits: curve number of digits
* @private_key: buffer for storing the generated private key
*
- * Returns 0 if the private key was generated successfully, a negative value
+ * Returns: 0 if the private key was generated successfully, a negative value
* if an error occurred.
*/
int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits,
@@ -118,7 +118,7 @@ int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits,
* @private_key: pregenerated private key for the given curve
* @public_key: buffer for storing the generated public key
*
- * Returns 0 if the public key was generated successfully, a negative value
+ * Returns: 0 if the public key was generated successfully, a negative value
* if an error occurred.
*/
int ecc_make_pub_key(const unsigned int curve_id, unsigned int ndigits,
@@ -136,7 +136,7 @@ int ecc_make_pub_key(const unsigned int curve_id, unsigned int ndigits,
* Note: It is recommended that you hash the result of crypto_ecdh_shared_secret
* before using it for symmetric encryption or HMAC.
*
- * Returns 0 if the shared secret was generated successfully, a negative value
+ * Returns: 0 if the shared secret was generated successfully, a negative value
* if an error occurred.
*/
int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
@@ -179,6 +179,8 @@ int ecc_is_pubkey_valid_full(const struct ecc_curve *curve,
*
* @vli: vli to check.
* @ndigits: length of the @vli
+ *
+ * Returns: %true if vli == 0, %false otherwise.
*/
bool vli_is_zero(const u64 *vli, unsigned int ndigits);
@@ -189,7 +191,7 @@ bool vli_is_zero(const u64 *vli, unsigned int ndigits);
* @right: vli
* @ndigits: length of both vlis
*
- * Returns sign of @left - @right, i.e. -1 if @left < @right,
+ * Returns: sign of @left - @right, i.e. -1 if @left < @right,
* 0 if @left == @right, 1 if @left > @right.
*/
int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits);
@@ -199,7 +201,7 @@ int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits);
*
* @result: where to write result
* @left: vli
- * @right vli
+ * @right: vli
* @ndigits: length of all vlis
*
* Note: can modify in-place.
@@ -263,7 +265,7 @@ void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right,
unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits);
/**
- * ecc_aloc_point() - Allocate ECC point.
+ * ecc_alloc_point() - Allocate ECC point.
*
* @ndigits: Length of vlis in u64 qwords.
*
@@ -281,7 +283,7 @@ void ecc_free_point(struct ecc_point *p);
/**
* ecc_point_is_zero() - Check if point is zero.
*
- * @p: Point to check for zero.
+ * @point: Point to check for zero.
*
* Return: true if point is the point at infinity, false otherwise.
*/
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
index 012f5fb22d43..e38d9f0487ec 100644
--- a/include/crypto/internal/geniv.h
+++ b/include/crypto/internal/geniv.h
@@ -9,11 +9,9 @@
#define _CRYPTO_INTERNAL_GENIV_H
#include <crypto/internal/aead.h>
-#include <linux/spinlock.h>
#include <linux/types.h>
struct aead_geniv_ctx {
- spinlock_t lock;
struct crypto_aead *child;
u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
};
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
index 6a2c5f2e90f9..13a0851a995b 100644
--- a/include/crypto/internal/scompress.h
+++ b/include/crypto/internal/scompress.h
@@ -22,6 +22,7 @@ struct crypto_scomp {
* @decompress: Function performs a de-compress operation
* @streams: Per-cpu memory for algorithm
* @calg: Cmonn algorithm data structure shared with acomp
+ * @COMP_ALG_COMMON: see struct comp_alg_common
*/
struct scomp_alg {
int (*compress)(struct crypto_scomp *tfm, const u8 *src,
diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h
index 9e338e7aafbd..f5e5d7b63951 100644
--- a/include/crypto/internal/simd.h
+++ b/include/crypto/internal/simd.h
@@ -10,25 +10,6 @@
#include <linux/percpu.h>
#include <linux/types.h>
-/* skcipher support */
-
-struct simd_skcipher_alg;
-struct skcipher_alg;
-
-struct simd_skcipher_alg *simd_skcipher_create_compat(struct skcipher_alg *ialg,
- const char *algname,
- const char *drvname,
- const char *basename);
-void simd_skcipher_free(struct simd_skcipher_alg *alg);
-
-int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
- struct simd_skcipher_alg **simd_algs);
-
-void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
- struct simd_skcipher_alg **simd_algs);
-
-/* AEAD support */
-
struct simd_aead_alg;
struct aead_alg;
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index d451b54b322a..07f494b2c881 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -12,6 +12,8 @@
#include <linux/atomic.h>
#include <linux/container_of.h>
#include <linux/crypto.h>
+#include <linux/fips.h>
+#include <linux/random.h>
struct crypto_rng;
@@ -57,10 +59,27 @@ struct crypto_rng {
struct crypto_tfm base;
};
-extern struct crypto_rng *crypto_default_rng;
+int __crypto_stdrng_get_bytes(void *buf, unsigned int len);
-int crypto_get_default_rng(void);
-void crypto_put_default_rng(void);
+/**
+ * crypto_stdrng_get_bytes() - get cryptographically secure random bytes
+ * @buf: output buffer holding the random numbers
+ * @len: length of the output buffer
+ *
+ * This function fills the caller-allocated buffer with random numbers using the
+ * normal Linux RNG if fips_enabled=0, or the highest-priority "stdrng"
+ * algorithm in the crypto_rng subsystem if fips_enabled=1.
+ *
+ * Context: May sleep
+ * Return: 0 function was successful; < 0 if an error occurred
+ */
+static inline int crypto_stdrng_get_bytes(void *buf, unsigned int len)
+{
+ might_sleep();
+ if (fips_enabled)
+ return __crypto_stdrng_get_bytes(buf, len);
+ return get_random_bytes_wait(buf, len);
+}
/**
* DOC: Random number generator API
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 9e5853464345..4efe2ca8c4d1 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -145,6 +145,7 @@ struct skcipher_alg_common SKCIPHER_ALG_COMMON;
* considerably more efficient if it can operate on multiple chunks
* in parallel. Should be a multiple of chunksize.
* @co: see struct skcipher_alg_common
+ * @SKCIPHER_ALG_COMMON: see struct skcipher_alg_common
*
* All fields except @ivsize are mandatory and must be filled.
*/
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 62cd7b35a29c..22ba327ec227 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -92,7 +92,6 @@ enum cpuhp_state {
CPUHP_NET_DEV_DEAD,
CPUHP_IOMMU_IOVA_DEAD,
CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
- CPUHP_PADATA_DEAD,
CPUHP_AP_DTPM_CPU_DEAD,
CPUHP_RANDOM_PREPARE,
CPUHP_WORKQUEUE_PREP,
diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
index 51a6dc2b97e9..a6268dc4f7cb 100644
--- a/include/linux/hisi_acc_qm.h
+++ b/include/linux/hisi_acc_qm.h
@@ -102,6 +102,12 @@
#define QM_MIG_REGION_SEL 0x100198
#define QM_MIG_REGION_EN BIT(0)
+#define QM_MAX_CHANNEL_NUM 8
+#define QM_CHANNEL_USAGE_OFFSET 0x1100
+#define QM_MAX_DEV_USAGE 100
+#define QM_DEV_USAGE_RATE 100
+#define QM_CHANNEL_ADDR_INTRVL 0x4
+
/* uacce mode of the driver */
#define UACCE_MODE_NOUACCE 0 /* don't use uacce */
#define UACCE_MODE_SVA 1 /* use uacce sva mode */
@@ -359,6 +365,11 @@ struct qm_rsv_buf {
struct qm_dma qcdma;
};
+struct qm_channel {
+ int channel_num;
+ const char *channel_name[QM_MAX_CHANNEL_NUM];
+};
+
struct hisi_qm {
enum qm_hw_ver ver;
enum qm_fun_type fun_type;
@@ -433,6 +444,7 @@ struct hisi_qm {
struct qm_err_isolate isolate_data;
struct hisi_qm_cap_tables cap_tables;
+ struct qm_channel channel_data;
};
struct hisi_qp_status {
@@ -546,8 +558,6 @@ int hisi_qm_init(struct hisi_qm *qm);
void hisi_qm_uninit(struct hisi_qm *qm);
int hisi_qm_start(struct hisi_qm *qm);
int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r);
-int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg);
-void hisi_qm_stop_qp(struct hisi_qp *qp);
int hisi_qp_send(struct hisi_qp *qp, const void *msg);
void hisi_qm_debug_init(struct hisi_qm *qm);
void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index b77bc55a4cf3..1d3c1927986e 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -46,7 +46,7 @@ struct hwrng {
unsigned long priv;
unsigned short quality;
- /* internal. */
+ /* private: internal. */
struct list_head list;
struct kref ref;
struct work_struct cleanup_work;
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 765f2778e264..b6232bea6edf 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -149,23 +149,23 @@ struct padata_mt_job {
/**
* struct padata_instance - The overall control structure.
*
- * @cpu_online_node: Linkage for CPU online callback.
- * @cpu_dead_node: Linkage for CPU offline callback.
+ * @cpuhp_node: Linkage for CPU hotplug callbacks.
* @parallel_wq: The workqueue used for parallel work.
* @serial_wq: The workqueue used for serial work.
* @pslist: List of padata_shell objects attached to this instance.
* @cpumask: User supplied cpumasks for parallel and serial works.
+ * @validate_cpumask: Internal cpumask used to validate @cpumask during hotplug.
* @kobj: padata instance kernel object.
* @lock: padata instance lock.
* @flags: padata flags.
*/
struct padata_instance {
- struct hlist_node cpu_online_node;
- struct hlist_node cpu_dead_node;
+ struct hlist_node cpuhp_node;
struct workqueue_struct *parallel_wq;
struct workqueue_struct *serial_wq;
struct list_head pslist;
struct padata_cpumask cpumask;
+ cpumask_var_t validate_cpumask;
struct kobject kobj;
struct mutex lock;
u8 flags;
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 63d516c873b4..54e3c621fec3 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -801,6 +801,19 @@ static inline void print_hex_dump_debug(const char *prefix_str, int prefix_type,
}
#endif
+#if defined(DEBUG)
+#define print_hex_dump_devel(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
+#else
+static inline void print_hex_dump_devel(const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii)
+{
+}
+#endif
+
/**
* print_hex_dump_bytes - shorthand form of print_hex_dump() with default params
* @prefix_str: string to prefix each line with;
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 133ccb39137a..0480509a6339 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -129,10 +129,10 @@ static __always_inline unsigned int rht_key_get_hash(struct rhashtable *ht,
unsigned int hash;
/* params must be equal to ht->p if it isn't constant. */
- if (!__builtin_constant_p(params.key_len))
+ if (!__builtin_constant_p(params.key_len)) {
hash = ht->p.hashfn(key, ht->key_len, hash_rnd);
- else if (params.key_len) {
- unsigned int key_len = params.key_len;
+ } else {
+ unsigned int key_len = params.key_len ? : ht->p.key_len;
if (params.hashfn)
hash = params.hashfn(key, key_len, hash_rnd);
@@ -140,13 +140,6 @@ static __always_inline unsigned int rht_key_get_hash(struct rhashtable *ht,
hash = jhash(key, key_len, hash_rnd);
else
hash = jhash2(key, key_len / sizeof(u32), hash_rnd);
- } else {
- unsigned int key_len = ht->p.key_len;
-
- if (params.hashfn)
- hash = params.hashfn(key, key_len, hash_rnd);
- else
- hash = jhash(key, key_len, hash_rnd);
}
return hash;
diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h
index 2b5b042eb73b..52dae70b058b 100644
--- a/include/uapi/linux/psp-sev.h
+++ b/include/uapi/linux/psp-sev.h
@@ -277,7 +277,7 @@ struct sev_user_data_snp_wrapped_vlek_hashstick {
* struct sev_issue_cmd - SEV ioctl parameters
*
* @cmd: SEV commands to execute
- * @opaque: pointer to the command structure
+ * @data: pointer to the command structure
* @error: SEV FW return code on failure
*/
struct sev_issue_cmd {
diff --git a/kernel/padata.c b/kernel/padata.c
index 8657e6e0c224..0d3ea1b68b1f 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -535,7 +535,8 @@ static void padata_init_reorder_list(struct parallel_data *pd)
}
/* Allocate and initialize the internal cpumask dependend resources. */
-static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
+static struct parallel_data *padata_alloc_pd(struct padata_shell *ps,
+ int offlining_cpu)
{
struct padata_instance *pinst = ps->pinst;
struct parallel_data *pd;
@@ -561,6 +562,10 @@ static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask);
cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask);
+ if (offlining_cpu >= 0) {
+ __cpumask_clear_cpu(offlining_cpu, pd->cpumask.pcpu);
+ __cpumask_clear_cpu(offlining_cpu, pd->cpumask.cbcpu);
+ }
padata_init_reorder_list(pd);
padata_init_squeues(pd);
@@ -607,11 +612,11 @@ static void __padata_stop(struct padata_instance *pinst)
}
/* Replace the internal control structure with a new one. */
-static int padata_replace_one(struct padata_shell *ps)
+static int padata_replace_one(struct padata_shell *ps, int offlining_cpu)
{
struct parallel_data *pd_new;
- pd_new = padata_alloc_pd(ps);
+ pd_new = padata_alloc_pd(ps, offlining_cpu);
if (!pd_new)
return -ENOMEM;
@@ -621,7 +626,7 @@ static int padata_replace_one(struct padata_shell *ps)
return 0;
}
-static int padata_replace(struct padata_instance *pinst)
+static int padata_replace(struct padata_instance *pinst, int offlining_cpu)
{
struct padata_shell *ps;
int err = 0;
@@ -629,7 +634,7 @@ static int padata_replace(struct padata_instance *pinst)
pinst->flags |= PADATA_RESET;
list_for_each_entry(ps, &pinst->pslist, list) {
- err = padata_replace_one(ps);
+ err = padata_replace_one(ps, offlining_cpu);
if (err)
break;
}
@@ -646,9 +651,21 @@ static int padata_replace(struct padata_instance *pinst)
/* If cpumask contains no active cpu, we mark the instance as invalid. */
static bool padata_validate_cpumask(struct padata_instance *pinst,
- const struct cpumask *cpumask)
+ const struct cpumask *cpumask,
+ int offlining_cpu)
{
- if (!cpumask_intersects(cpumask, cpu_online_mask)) {
+ cpumask_copy(pinst->validate_cpumask, cpu_online_mask);
+
+ /*
+ * @offlining_cpu is still in cpu_online_mask, so remove it here for
+ * validation. Using a sub-CPUHP_TEARDOWN_CPU hotplug state where
+ * @offlining_cpu wouldn't be in the online mask doesn't work because
+ * padata_cpu_offline() can fail but such a state doesn't allow failure.
+ */
+ if (offlining_cpu >= 0)
+ __cpumask_clear_cpu(offlining_cpu, pinst->validate_cpumask);
+
+ if (!cpumask_intersects(cpumask, pinst->validate_cpumask)) {
pinst->flags |= PADATA_INVALID;
return false;
}
@@ -664,13 +681,13 @@ static int __padata_set_cpumasks(struct padata_instance *pinst,
int valid;
int err;
- valid = padata_validate_cpumask(pinst, pcpumask);
+ valid = padata_validate_cpumask(pinst, pcpumask, -1);
if (!valid) {
__padata_stop(pinst);
goto out_replace;
}
- valid = padata_validate_cpumask(pinst, cbcpumask);
+ valid = padata_validate_cpumask(pinst, cbcpumask, -1);
if (!valid)
__padata_stop(pinst);
@@ -678,7 +695,7 @@ out_replace:
cpumask_copy(pinst->cpumask.pcpu, pcpumask);
cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
- err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst);
+ err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst, -1);
if (valid)
__padata_start(pinst);
@@ -730,36 +747,6 @@ EXPORT_SYMBOL(padata_set_cpumask);
#ifdef CONFIG_HOTPLUG_CPU
-static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
-{
- int err = 0;
-
- if (cpumask_test_cpu(cpu, cpu_online_mask)) {
- err = padata_replace(pinst);
-
- if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
- padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
- __padata_start(pinst);
- }
-
- return err;
-}
-
-static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
-{
- int err = 0;
-
- if (!cpumask_test_cpu(cpu, cpu_online_mask)) {
- if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
- !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
- __padata_stop(pinst);
-
- err = padata_replace(pinst);
- }
-
- return err;
-}
-
static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
{
return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
@@ -771,27 +758,39 @@ static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
struct padata_instance *pinst;
int ret;
- pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node);
+ pinst = hlist_entry_safe(node, struct padata_instance, cpuhp_node);
if (!pinst_has_cpu(pinst, cpu))
return 0;
mutex_lock(&pinst->lock);
- ret = __padata_add_cpu(pinst, cpu);
+
+ ret = padata_replace(pinst, -1);
+
+ if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu, -1) &&
+ padata_validate_cpumask(pinst, pinst->cpumask.cbcpu, -1))
+ __padata_start(pinst);
+
mutex_unlock(&pinst->lock);
return ret;
}
-static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
+static int padata_cpu_offline(unsigned int cpu, struct hlist_node *node)
{
struct padata_instance *pinst;
int ret;
- pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node);
+ pinst = hlist_entry_safe(node, struct padata_instance, cpuhp_node);
if (!pinst_has_cpu(pinst, cpu))
return 0;
mutex_lock(&pinst->lock);
- ret = __padata_remove_cpu(pinst, cpu);
+
+ if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu, cpu) ||
+ !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu, cpu))
+ __padata_stop(pinst);
+
+ ret = padata_replace(pinst, cpu);
+
mutex_unlock(&pinst->lock);
return ret;
}
@@ -802,15 +801,14 @@ static enum cpuhp_state hp_online;
static void __padata_free(struct padata_instance *pinst)
{
#ifdef CONFIG_HOTPLUG_CPU
- cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD,
- &pinst->cpu_dead_node);
- cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node);
+ cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpuhp_node);
#endif
WARN_ON(!list_empty(&pinst->pslist));
free_cpumask_var(pinst->cpumask.pcpu);
free_cpumask_var(pinst->cpumask.cbcpu);
+ free_cpumask_var(pinst->validate_cpumask);
destroy_workqueue(pinst->serial_wq);
destroy_workqueue(pinst->parallel_wq);
kfree(pinst);
@@ -971,10 +969,10 @@ struct padata_instance *padata_alloc(const char *name)
if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
goto err_free_serial_wq;
- if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
- free_cpumask_var(pinst->cpumask.pcpu);
- goto err_free_serial_wq;
- }
+ if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL))
+ goto err_free_p_mask;
+ if (!alloc_cpumask_var(&pinst->validate_cpumask, GFP_KERNEL))
+ goto err_free_cb_mask;
INIT_LIST_HEAD(&pinst->pslist);
@@ -982,7 +980,7 @@ struct padata_instance *padata_alloc(const char *name)
cpumask_copy(pinst->cpumask.cbcpu, cpu_possible_mask);
if (padata_setup_cpumasks(pinst))
- goto err_free_masks;
+ goto err_free_v_mask;
__padata_start(pinst);
@@ -991,18 +989,19 @@ struct padata_instance *padata_alloc(const char *name)
#ifdef CONFIG_HOTPLUG_CPU
cpuhp_state_add_instance_nocalls_cpuslocked(hp_online,
- &pinst->cpu_online_node);
- cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD,
- &pinst->cpu_dead_node);
+ &pinst->cpuhp_node);
#endif
cpus_read_unlock();
return pinst;
-err_free_masks:
- free_cpumask_var(pinst->cpumask.pcpu);
+err_free_v_mask:
+ free_cpumask_var(pinst->validate_cpumask);
+err_free_cb_mask:
free_cpumask_var(pinst->cpumask.cbcpu);
+err_free_p_mask:
+ free_cpumask_var(pinst->cpumask.pcpu);
err_free_serial_wq:
destroy_workqueue(pinst->serial_wq);
err_put_cpus:
@@ -1045,7 +1044,7 @@ struct padata_shell *padata_alloc_shell(struct padata_instance *pinst)
ps->pinst = pinst;
cpus_read_lock();
- pd = padata_alloc_pd(ps);
+ pd = padata_alloc_pd(ps, -1);
cpus_read_unlock();
if (!pd)
@@ -1094,31 +1093,24 @@ void __init padata_init(void)
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
- padata_cpu_online, NULL);
+ padata_cpu_online, padata_cpu_offline);
if (ret < 0)
goto err;
hp_online = ret;
-
- ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead",
- NULL, padata_cpu_dead);
- if (ret < 0)
- goto remove_online_state;
#endif
possible_cpus = num_possible_cpus();
padata_works = kmalloc_objs(struct padata_work, possible_cpus);
if (!padata_works)
- goto remove_dead_state;
+ goto remove_online_state;
for (i = 0; i < possible_cpus; ++i)
list_add(&padata_works[i].pw_list, &padata_free_works);
return;
-remove_dead_state:
-#ifdef CONFIG_HOTPLUG_CPU
- cpuhp_remove_multi_state(CPUHP_PADATA_DEAD);
remove_online_state:
+#ifdef CONFIG_HOTPLUG_CPU
cpuhp_remove_multi_state(hp_online);
err:
#endif
diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
index d3046a39ff72..6d3b6b89b1d1 100644
--- a/net/tipc/crypto.c
+++ b/net/tipc/crypto.c
@@ -367,17 +367,8 @@ int tipc_aead_key_validate(struct tipc_aead_key *ukey, struct genl_info *info)
*/
static int tipc_aead_key_generate(struct tipc_aead_key *skey)
{
- int rc = 0;
-
- /* Fill the key's content with a random value via RNG cipher */
- rc = crypto_get_default_rng();
- if (likely(!rc)) {
- rc = crypto_rng_get_bytes(crypto_default_rng, skey->key,
- skey->keylen);
- crypto_put_default_rng();
- }
-
- return rc;
+ /* Fill the key's content with a random value via stdrng */
+ return crypto_stdrng_get_bytes(skey->key, skey->keylen);
}
static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead)