summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig15
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts8
-rw-r--r--arch/arm64/boot/dts/apple/t8103-j274.dts4
-rw-r--r--arch/arm64/boot/dts/apple/t8103-j293.dts4
-rw-r--r--arch/arm64/boot/dts/apple/t8103-j313.dts4
-rw-r--r--arch/arm64/boot/dts/apple/t8103-j456.dts4
-rw-r--r--arch/arm64/boot/dts/apple/t8103-j457.dts4
-rw-r--r--arch/arm64/boot/dts/apple/t8103-jxxx.dtsi8
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi24
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi25
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8dxl.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-data-modul-edm-sbc.dts3
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs232-rts.dtso1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs232-rts.dtso1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts3
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-verdin-dev.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-evk.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi10
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp.dtsi15
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-nitrogen.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-thor96.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qxp-mek.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts6
-rw-r--r--arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp11x.dtsi17
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7986a.dtsi65
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195-demo.dts4
-rw-r--r--arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi6
-rw-r--r--arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts77
-rw-r--r--arch/arm64/boot/dts/qcom/msm8992.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts19
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp.dtsi79
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/sm8350.dtsi4
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi33
-rw-r--r--arch/arm64/crypto/Kconfig49
-rw-r--r--arch/arm64/crypto/Makefile9
-rw-r--r--arch/arm64/crypto/aes-ce-glue.c2
-rw-r--r--arch/arm64/crypto/aes-cipher-glue.c2
-rw-r--r--arch/arm64/crypto/aes-modes.S34
-rw-r--r--arch/arm64/crypto/aes-neonbs-core.S16
-rw-r--r--arch/arm64/crypto/crct10dif-ce-core.S5
-rw-r--r--arch/arm64/crypto/ghash-ce-core.S8
-rw-r--r--arch/arm64/crypto/ghash-ce-glue.c2
-rw-r--r--arch/arm64/crypto/nh-neon-core.S5
-rw-r--r--arch/arm64/crypto/nhpoly1305-neon-glue.c11
-rw-r--r--arch/arm64/crypto/sm3-ce-glue.c2
-rw-r--r--arch/arm64/crypto/sm3-neon-core.S601
-rw-r--r--arch/arm64/crypto/sm3-neon-glue.c103
-rw-r--r--arch/arm64/crypto/sm4-ce-asm.h209
-rw-r--r--arch/arm64/crypto/sm4-ce-ccm-core.S329
-rw-r--r--arch/arm64/crypto/sm4-ce-ccm-glue.c303
-rw-r--r--arch/arm64/crypto/sm4-ce-cipher-glue.c2
-rw-r--r--arch/arm64/crypto/sm4-ce-core.S1205
-rw-r--r--arch/arm64/crypto/sm4-ce-gcm-core.S742
-rw-r--r--arch/arm64/crypto/sm4-ce-gcm-glue.c286
-rw-r--r--arch/arm64/crypto/sm4-ce-glue.c575
-rw-r--r--arch/arm64/crypto/sm4-ce.h16
-rw-r--r--arch/arm64/crypto/sm4-neon-core.S630
-rw-r--r--arch/arm64/crypto/sm4-neon-glue.c172
-rw-r--r--arch/arm64/include/asm/atomic_ll_sc.h2
-rw-r--r--arch/arm64/include/asm/atomic_lse.h2
-rw-r--r--arch/arm64/include/asm/cputype.h4
-rw-r--r--arch/arm64/include/asm/efi.h36
-rw-r--r--arch/arm64/include/asm/esr.h9
-rw-r--r--arch/arm64/include/asm/kvm_arm.h23
-rw-r--r--arch/arm64/include/asm/kvm_asm.h7
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h42
-rw-r--r--arch/arm64/include/asm/kvm_host.h76
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h3
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h2
-rw-r--r--arch/arm64/include/asm/kvm_pgtable.h175
-rw-r--r--arch/arm64/include/asm/kvm_pkvm.h38
-rw-r--r--arch/arm64/include/asm/mte.h65
-rw-r--r--arch/arm64/include/asm/pgtable.h13
-rw-r--r--arch/arm64/include/asm/ptdump.h1
-rw-r--r--arch/arm64/include/asm/stacktrace.h15
-rw-r--r--arch/arm64/include/asm/uprobes.h2
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h1
-rw-r--r--arch/arm64/kernel/Makefile9
-rw-r--r--arch/arm64/kernel/cpufeature.c4
-rw-r--r--arch/arm64/kernel/efi-entry.S69
-rw-r--r--arch/arm64/kernel/efi-rt-wrapper.S46
-rw-r--r--arch/arm64/kernel/efi.c50
-rw-r--r--arch/arm64/kernel/elfcore.c63
-rw-r--r--arch/arm64/kernel/fpsimd.c2
-rw-r--r--arch/arm64/kernel/hibernate.c2
-rw-r--r--arch/arm64/kernel/image-vars.h23
-rw-r--r--arch/arm64/kernel/mte.c21
-rw-r--r--arch/arm64/kernel/ptrace.c2
-rw-r--r--arch/arm64/kernel/signal.c9
-rw-r--r--arch/arm64/kernel/stacktrace.c12
-rw-r--r--arch/arm64/kernel/vdso/Makefile2
-rw-r--r--arch/arm64/kernel/vdso32/Makefile2
-rw-r--r--arch/arm64/kvm/Kconfig2
-rw-r--r--arch/arm64/kvm/arm.c95
-rw-r--r--arch/arm64/kvm/guest.c18
-rw-r--r--arch/arm64/kvm/hyp/hyp-constants.c3
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/fault.h2
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h2
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/mem_protect.h25
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/memory.h27
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/mm.h18
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/pkvm.h68
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/spinlock.h10
-rw-r--r--arch/arm64/kvm/hyp/nvhe/cache.S11
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-main.c110
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-smp.c2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mem_protect.c521
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mm.c167
-rw-r--r--arch/arm64/kvm/hyp/nvhe/page_alloc.c29
-rw-r--r--arch/arm64/kvm/hyp/nvhe/pkvm.c436
-rw-r--r--arch/arm64/kvm/hyp/nvhe/setup.c98
-rw-r--r--arch/arm64/kvm/hyp/pgtable.c652
-rw-r--r--arch/arm64/kvm/hyp/vhe/Makefile2
-rw-r--r--arch/arm64/kvm/irq.h16
-rw-r--r--arch/arm64/kvm/mmu.c214
-rw-r--r--arch/arm64/kvm/pkvm.c138
-rw-r--r--arch/arm64/kvm/pmu-emul.c482
-rw-r--r--arch/arm64/kvm/reset.c29
-rw-r--r--arch/arm64/kvm/sys_regs.c157
-rw-r--r--arch/arm64/kvm/vgic/vgic-its.c25
-rw-r--r--arch/arm64/kvm/vgic/vgic-v3.c31
-rw-r--r--arch/arm64/kvm/vgic/vgic-v4.c8
-rw-r--r--arch/arm64/kvm/vgic/vgic.h15
-rw-r--r--arch/arm64/mm/copypage.c7
-rw-r--r--arch/arm64/mm/fault.c8
-rw-r--r--arch/arm64/mm/hugetlbpage.c2
-rw-r--r--arch/arm64/mm/mmu.c104
-rw-r--r--arch/arm64/mm/mteswap.c16
-rw-r--r--arch/arm64/mm/pageattr.c3
-rw-r--r--arch/arm64/net/bpf_jit_comp.c9
152 files changed, 7941 insertions, 2276 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 7fc3457a8891..c5ccca26a408 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -184,8 +184,6 @@ config ARM64
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
- select HAVE_DYNAMIC_FTRACE_WITH_ARGS \
- if $(cc-option,-fpatchable-function-entry=2)
select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
if DYNAMIC_FTRACE_WITH_ARGS
select HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -1988,6 +1986,7 @@ config ARM64_MTE
depends on ARM64_PAN
select ARCH_HAS_SUBPAGE_FAULTS
select ARCH_USES_HIGH_VMA_FLAGS
+ select ARCH_USES_PG_ARCH_X
help
Memory Tagging (part of the ARMv8.5 Extensions) provides
architectural support for run-time, always-on detection of
@@ -2168,18 +2167,6 @@ config STACKPROTECTOR_PER_TASK
def_bool y
depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_SYSREG
-# The GPIO number here must be sorted by descending number. In case of
-# a multiplatform kernel, we just want the highest value required by the
-# selected platforms.
-config ARCH_NR_GPIO
- int
- default 2048 if ARCH_APPLE
- default 0
- help
- Maximum number of GPIOs in the system.
-
- If unsure, leave the default value.
-
config UNWIND_PATCH_PAC_INTO_SCS
bool "Enable shadow call stack dynamically using code patching"
# needs Clang with https://reviews.llvm.org/D111780 incorporated
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts
index e3486f60645a..a1f0c38ccadd 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts
@@ -131,10 +131,6 @@
};
&usb {
- phys = <&usb2_phy1>;
- phy-names = "usb2-phy1";
-};
-
-&usb2_phy0 {
- status = "disabled";
+ phys = <&usb2_phy0>, <&usb2_phy1>;
+ phy-names = "usb2-phy0", "usb2-phy1";
};
diff --git a/arch/arm64/boot/dts/apple/t8103-j274.dts b/arch/arm64/boot/dts/apple/t8103-j274.dts
index c1f3ba9c39f6..b52ddc409893 100644
--- a/arch/arm64/boot/dts/apple/t8103-j274.dts
+++ b/arch/arm64/boot/dts/apple/t8103-j274.dts
@@ -21,6 +21,10 @@
};
};
+&bluetooth0 {
+ brcm,board-type = "apple,atlantisb";
+};
+
&wifi0 {
brcm,board-type = "apple,atlantisb";
};
diff --git a/arch/arm64/boot/dts/apple/t8103-j293.dts b/arch/arm64/boot/dts/apple/t8103-j293.dts
index ecb10d237a05..151074109a11 100644
--- a/arch/arm64/boot/dts/apple/t8103-j293.dts
+++ b/arch/arm64/boot/dts/apple/t8103-j293.dts
@@ -17,6 +17,10 @@
model = "Apple MacBook Pro (13-inch, M1, 2020)";
};
+&bluetooth0 {
+ brcm,board-type = "apple,honshu";
+};
+
&wifi0 {
brcm,board-type = "apple,honshu";
};
diff --git a/arch/arm64/boot/dts/apple/t8103-j313.dts b/arch/arm64/boot/dts/apple/t8103-j313.dts
index df741737b8e6..bc1f865aa790 100644
--- a/arch/arm64/boot/dts/apple/t8103-j313.dts
+++ b/arch/arm64/boot/dts/apple/t8103-j313.dts
@@ -17,6 +17,10 @@
model = "Apple MacBook Air (M1, 2020)";
};
+&bluetooth0 {
+ brcm,board-type = "apple,shikoku";
+};
+
&wifi0 {
brcm,board-type = "apple,shikoku";
};
diff --git a/arch/arm64/boot/dts/apple/t8103-j456.dts b/arch/arm64/boot/dts/apple/t8103-j456.dts
index 83bb4dc32af9..2db425ceb30f 100644
--- a/arch/arm64/boot/dts/apple/t8103-j456.dts
+++ b/arch/arm64/boot/dts/apple/t8103-j456.dts
@@ -21,6 +21,10 @@
};
};
+&bluetooth0 {
+ brcm,board-type = "apple,capri";
+};
+
&wifi0 {
brcm,board-type = "apple,capri";
};
diff --git a/arch/arm64/boot/dts/apple/t8103-j457.dts b/arch/arm64/boot/dts/apple/t8103-j457.dts
index 67e433d5f8ba..3821ff146c56 100644
--- a/arch/arm64/boot/dts/apple/t8103-j457.dts
+++ b/arch/arm64/boot/dts/apple/t8103-j457.dts
@@ -21,6 +21,10 @@
};
};
+&bluetooth0 {
+ brcm,board-type = "apple,santorini";
+};
+
&wifi0 {
brcm,board-type = "apple,santorini";
};
diff --git a/arch/arm64/boot/dts/apple/t8103-jxxx.dtsi b/arch/arm64/boot/dts/apple/t8103-jxxx.dtsi
index 9706d26f9c64..5988a4eb6efa 100644
--- a/arch/arm64/boot/dts/apple/t8103-jxxx.dtsi
+++ b/arch/arm64/boot/dts/apple/t8103-jxxx.dtsi
@@ -11,6 +11,7 @@
/ {
aliases {
+ bluetooth0 = &bluetooth0;
serial0 = &serial0;
serial2 = &serial2;
wifi0 = &wifi0;
@@ -77,6 +78,13 @@
local-mac-address = [00 00 00 00 00 00];
apple,antenna-sku = "XX";
};
+
+ bluetooth0: bluetooth@0,1 {
+ compatible = "pci14e4,5f69";
+ reg = <0x10100 0x0 0x0 0x0 0x0>;
+ /* To be filled by the loader */
+ local-bd-address = [00 00 00 00 00 00];
+ };
};
&nco_clkref {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts
index 5a8d85a7d161..bbdf989058ff 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts
@@ -110,7 +110,7 @@
&i2c0 {
status = "okay";
- pca9547@77 {
+ i2c-mux@77 {
compatible = "nxp,pca9547";
reg = <0x77>;
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi
index d237162a8744..5c4d7eef8b61 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi
@@ -24,9 +24,12 @@
/* these aliases provide the FMan ports mapping */
enet0: ethernet@e0000 {
+ pcs-handle-names = "qsgmii";
};
enet1: ethernet@e2000 {
+ pcsphy-handle = <&pcsphy1>, <&qsgmiib_pcs1>;
+ pcs-handle-names = "sgmii", "qsgmii";
};
enet2: ethernet@e4000 {
@@ -36,11 +39,32 @@
};
enet4: ethernet@e8000 {
+ pcsphy-handle = <&pcsphy4>, <&qsgmiib_pcs2>;
+ pcs-handle-names = "sgmii", "qsgmii";
};
enet5: ethernet@ea000 {
+ pcsphy-handle = <&pcsphy5>, <&qsgmiib_pcs3>;
+ pcs-handle-names = "sgmii", "qsgmii";
};
enet6: ethernet@f0000 {
};
+
+ mdio@e1000 {
+ qsgmiib_pcs1: ethernet-pcs@1 {
+ compatible = "fsl,lynx-pcs";
+ reg = <0x1>;
+ };
+
+ qsgmiib_pcs2: ethernet-pcs@2 {
+ compatible = "fsl,lynx-pcs";
+ reg = <0x2>;
+ };
+
+ qsgmiib_pcs3: ethernet-pcs@3 {
+ compatible = "fsl,lynx-pcs";
+ reg = <0x3>;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
index 9b726c2a4842..dda27ed7aaf2 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
@@ -89,7 +89,7 @@
&i2c0 {
status = "okay";
- pca9547@77 {
+ i2c-mux@77 {
compatible = "nxp,pca9547";
reg = <0x77>;
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi
index d6caaea57d90..4e3345093943 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046-post.dtsi
@@ -23,6 +23,8 @@
&fman0 {
/* these aliases provide the FMan ports mapping */
enet0: ethernet@e0000 {
+ pcsphy-handle = <&qsgmiib_pcs3>;
+ pcs-handle-names = "qsgmii";
};
enet1: ethernet@e2000 {
@@ -35,14 +37,37 @@
};
enet4: ethernet@e8000 {
+ pcsphy-handle = <&pcsphy4>, <&qsgmiib_pcs1>;
+ pcs-handle-names = "sgmii", "qsgmii";
};
enet5: ethernet@ea000 {
+ pcsphy-handle = <&pcsphy5>, <&pcsphy5>;
+ pcs-handle-names = "sgmii", "qsgmii";
};
enet6: ethernet@f0000 {
};
enet7: ethernet@f2000 {
+ pcsphy-handle = <&pcsphy7>, <&qsgmiib_pcs2>, <&pcsphy7>;
+ pcs-handle-names = "sgmii", "qsgmii", "xfi";
+ };
+
+ mdio@eb000 {
+ qsgmiib_pcs1: ethernet-pcs@1 {
+ compatible = "fsl,lynx-pcs";
+ reg = <0x1>;
+ };
+
+ qsgmiib_pcs2: ethernet-pcs@2 {
+ compatible = "fsl,lynx-pcs";
+ reg = <0x2>;
+ };
+
+ qsgmiib_pcs3: ethernet-pcs@3 {
+ compatible = "fsl,lynx-pcs";
+ reg = <0x3>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
index b2fcbba60d3a..3b0ed9305f2b 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
@@ -88,7 +88,7 @@
&i2c0 {
status = "okay";
- pca9547@77 {
+ i2c-mux@77 {
compatible = "nxp,pca9547";
reg = <0x77>;
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts
index 41d8b15f25a5..aa52ff73ff9e 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts
@@ -53,7 +53,7 @@
&i2c0 {
status = "okay";
- i2c-switch@77 {
+ i2c-mux@77 {
compatible = "nxp,pca9547";
reg = <0x77>;
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
index 1bfbce69cc8b..ee8e932628d1 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
@@ -136,7 +136,7 @@
&i2c0 {
status = "okay";
- i2c-switch@77 {
+ i2c-mux@77 {
compatible = "nxp,pca9547";
reg = <0x77>;
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts b/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts
index ef6c8967533e..d4867d6cf47c 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts
@@ -245,7 +245,7 @@
&i2c3 {
status = "okay";
- i2c-switch@70 {
+ i2c-mux@70 {
compatible = "nxp,pca9540";
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi
index f598669e742f..52c5a43b30a0 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi
@@ -103,7 +103,7 @@
&i2c0 {
status = "okay";
- pca9547@77 {
+ i2c-mux@77 {
compatible = "nxp,pca9547";
reg = <0x77>;
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
index 3d9647b3da14..537cecb13dd0 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
@@ -44,7 +44,7 @@
&i2c0 {
status = "okay";
- pca9547@75 {
+ i2c-mux@75 {
compatible = "nxp,pca9547";
reg = <0x75>;
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi
index afb455210bd0..d32a52ab00a4 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi
@@ -54,7 +54,7 @@
&i2c0 {
status = "okay";
- i2c-switch@77 {
+ i2c-mux@77 {
compatible = "nxp,pca9547";
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/freescale/imx8dxl.dtsi b/arch/arm64/boot/dts/freescale/imx8dxl.dtsi
index 0c64b9194621..214f21bd0cb4 100644
--- a/arch/arm64/boot/dts/freescale/imx8dxl.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8dxl.dtsi
@@ -164,7 +164,7 @@
sc_pwrkey: keys {
compatible = "fsl,imx8qxp-sc-key", "fsl,imx-sc-key";
- linux,keycode = <KEY_POWER>;
+ linux,keycodes = <KEY_POWER>;
wakeup-source;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi
index 03266bd90a06..169f047fbca5 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi
@@ -120,7 +120,7 @@
&ecspi2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_espi2>;
- cs-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>;
+ cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>;
status = "okay";
eeprom@0 {
@@ -316,7 +316,7 @@
MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0x82
MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0x82
MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0x82
- MX8MM_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x41
+ MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0x41
>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-data-modul-edm-sbc.dts b/arch/arm64/boot/dts/freescale/imx8mm-data-modul-edm-sbc.dts
index 24f61db33eba..9889319d4f04 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-data-modul-edm-sbc.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-data-modul-edm-sbc.dts
@@ -88,6 +88,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_watchdog_gpio>;
compatible = "linux,wdt-gpio";
+ always-running;
gpios = <&gpio1 8 GPIO_ACTIVE_HIGH>;
hw_algo = "level";
/* Reset triggers in 2..3 seconds */
@@ -275,7 +276,7 @@
compatible = "rohm,bd71847";
reg = <0x4b>;
#clock-cells = <0>;
- clocks = <&clk_xtal32k 0>;
+ clocks = <&clk_xtal32k>;
clock-output-names = "clk-32k-out";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pmic>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts b/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts
index 74c09891600f..6357078185ed 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts
@@ -214,7 +214,7 @@
pinctrl-0 = <&pinctrl_i2c3>;
status = "okay";
- i2cmux@70 {
+ i2c-mux@70 {
compatible = "nxp,pca9540";
reg = <0x70>;
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
index 83c8f715cd90..b1f11098d248 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
+++ b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
@@ -602,7 +602,7 @@
#define MX8MM_IOMUXC_UART1_RXD_GPIO5_IO22 0x234 0x49C 0x000 0x5 0x0
#define MX8MM_IOMUXC_UART1_RXD_TPSMP_HDATA24 0x234 0x49C 0x000 0x7 0x0
#define MX8MM_IOMUXC_UART1_TXD_UART1_DCE_TX 0x238 0x4A0 0x000 0x0 0x0
-#define MX8MM_IOMUXC_UART1_TXD_UART1_DTE_RX 0x238 0x4A0 0x4F4 0x0 0x0
+#define MX8MM_IOMUXC_UART1_TXD_UART1_DTE_RX 0x238 0x4A0 0x4F4 0x0 0x1
#define MX8MM_IOMUXC_UART1_TXD_ECSPI3_MOSI 0x238 0x4A0 0x000 0x1 0x0
#define MX8MM_IOMUXC_UART1_TXD_GPIO5_IO23 0x238 0x4A0 0x000 0x5 0x0
#define MX8MM_IOMUXC_UART1_TXD_TPSMP_HDATA25 0x238 0x4A0 0x000 0x7 0x0
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs232-rts.dtso b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs232-rts.dtso
index 3ea73a6886ff..f6ad1a4b8b66 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs232-rts.dtso
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx-0x-rs232-rts.dtso
@@ -33,7 +33,6 @@
pinctrl-0 = <&pinctrl_uart2>;
rts-gpios = <&gpio5 29 GPIO_ACTIVE_LOW>;
cts-gpios = <&gpio5 28 GPIO_ACTIVE_LOW>;
- uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs232-rts.dtso b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs232-rts.dtso
index 2fa635e1c1a8..1f8ea20dfafc 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs232-rts.dtso
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx-0x-rs232-rts.dtso
@@ -33,7 +33,6 @@
pinctrl-0 = <&pinctrl_uart2>;
rts-gpios = <&gpio5 29 GPIO_ACTIVE_LOW>;
cts-gpios = <&gpio5 28 GPIO_ACTIVE_LOW>;
- uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi
index 244ef8d6cc68..7761d5671cb1 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi
@@ -222,7 +222,6 @@
pinctrl-0 = <&pinctrl_uart3>, <&pinctrl_bten>;
cts-gpios = <&gpio5 8 GPIO_ACTIVE_LOW>;
rts-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>;
- uart-has-rtscts;
status = "okay";
bluetooth {
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
index 750a1f07ecb7..64b366e83fa1 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
@@ -733,7 +733,6 @@
dtr-gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
dsr-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
dcd-gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
- uart-has-rtscts;
status = "okay";
};
@@ -749,7 +748,6 @@
pinctrl-0 = <&pinctrl_uart3>, <&pinctrl_uart3_gpio>;
cts-gpios = <&gpio4 10 GPIO_ACTIVE_LOW>;
rts-gpios = <&gpio4 9 GPIO_ACTIVE_LOW>;
- uart-has-rtscts;
status = "okay";
};
@@ -758,7 +756,6 @@
pinctrl-0 = <&pinctrl_uart4>, <&pinctrl_uart4_gpio>;
cts-gpios = <&gpio5 11 GPIO_ACTIVE_LOW>;
rts-gpios = <&gpio5 12 GPIO_ACTIVE_LOW>;
- uart-has-rtscts;
status = "okay";
};
@@ -771,6 +768,7 @@
&usbotg2 {
dr_mode = "host";
vbus-supply = <&reg_usb2_vbus>;
+ over-current-active-low;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts
index 32872b0b1aaf..e8bc1fccc47b 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts
@@ -664,7 +664,6 @@
pinctrl-0 = <&pinctrl_uart1>, <&pinctrl_uart1_gpio>;
rts-gpios = <&gpio4 10 GPIO_ACTIVE_LOW>;
cts-gpios = <&gpio4 24 GPIO_ACTIVE_LOW>;
- uart-has-rtscts;
status = "okay";
};
@@ -681,7 +680,6 @@
pinctrl-0 = <&pinctrl_uart3>, <&pinctrl_uart3_gpio>;
rts-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
cts-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
- uart-has-rtscts;
status = "okay";
bluetooth {
@@ -699,7 +697,6 @@
dtr-gpios = <&gpio4 3 GPIO_ACTIVE_LOW>;
dsr-gpios = <&gpio4 4 GPIO_ACTIVE_LOW>;
dcd-gpios = <&gpio4 6 GPIO_ACTIVE_LOW>;
- uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts
index 8ce562246a08..acc2ba8e00a8 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7903.dts
@@ -581,7 +581,6 @@
dtr-gpios = <&gpio1 0 GPIO_ACTIVE_LOW>;
dsr-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
dcd-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>;
- uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi
index c2a5c2f7b204..7c3f5c54f040 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi
@@ -9,6 +9,7 @@
simple-audio-card,bitclock-master = <&dailink_master>;
simple-audio-card,format = "i2s";
simple-audio-card,frame-master = <&dailink_master>;
+ simple-audio-card,mclk-fs = <256>;
simple-audio-card,name = "imx8mm-wm8904";
simple-audio-card,routing =
"Headphone Jack", "HPOUTL",
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dev.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dev.dtsi
index 73cc3fafa018..b2bcd2282170 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dev.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dev.dtsi
@@ -11,6 +11,7 @@
simple-audio-card,bitclock-master = <&dailink_master>;
simple-audio-card,format = "i2s";
simple-audio-card,frame-master = <&dailink_master>;
+ simple-audio-card,mclk-fs = <256>;
simple-audio-card,name = "imx8mm-nau8822";
simple-audio-card,routing =
"Headphones", "LHP",
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
index 0d454e0e2f7c..702d87621bb4 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
@@ -98,6 +98,7 @@
off-on-delay = <500000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_reg_eth>;
+ regulator-always-on;
regulator-boot-on;
regulator-max-microvolt = <3300000>;
regulator-min-microvolt = <3300000>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts b/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts
index b9444e4a3d2d..7c12518dbc96 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts
@@ -643,7 +643,6 @@
pinctrl-0 = <&pinctrl_uart3>, <&pinctrl_uart3_gpio>;
rts-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
cts-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
- uart-has-rtscts;
status = "okay";
bluetooth {
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
index d4c7ca16abd0..f2d93437084b 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
@@ -36,8 +36,8 @@
pcie0_refclk: pcie0-refclk {
compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <100000000>;
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
};
reg_can1_stby: regulator-can1-stby {
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
index 79b290a002c1..ecc4bce6db97 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
@@ -99,7 +99,6 @@
regulators {
buck1: BUCK1 {
- regulator-compatible = "BUCK1";
regulator-min-microvolt = <600000>;
regulator-max-microvolt = <2187500>;
regulator-boot-on;
@@ -108,7 +107,6 @@
};
buck2: BUCK2 {
- regulator-compatible = "BUCK2";
regulator-min-microvolt = <600000>;
regulator-max-microvolt = <2187500>;
regulator-boot-on;
@@ -119,7 +117,6 @@
};
buck4: BUCK4 {
- regulator-compatible = "BUCK4";
regulator-min-microvolt = <600000>;
regulator-max-microvolt = <3400000>;
regulator-boot-on;
@@ -127,7 +124,6 @@
};
buck5: BUCK5 {
- regulator-compatible = "BUCK5";
regulator-min-microvolt = <600000>;
regulator-max-microvolt = <3400000>;
regulator-boot-on;
@@ -135,7 +131,6 @@
};
buck6: BUCK6 {
- regulator-compatible = "BUCK6";
regulator-min-microvolt = <600000>;
regulator-max-microvolt = <3400000>;
regulator-boot-on;
@@ -143,7 +138,6 @@
};
ldo1: LDO1 {
- regulator-compatible = "LDO1";
regulator-min-microvolt = <1600000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
@@ -151,7 +145,6 @@
};
ldo2: LDO2 {
- regulator-compatible = "LDO2";
regulator-min-microvolt = <800000>;
regulator-max-microvolt = <1150000>;
regulator-boot-on;
@@ -159,7 +152,6 @@
};
ldo3: LDO3 {
- regulator-compatible = "LDO3";
regulator-min-microvolt = <800000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
@@ -167,13 +159,11 @@
};
ldo4: LDO4 {
- regulator-compatible = "LDO4";
regulator-min-microvolt = <800000>;
regulator-max-microvolt = <3300000>;
};
ldo5: LDO5 {
- regulator-compatible = "LDO5";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
index ceeca4966fc5..8eb7d5ee38da 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
@@ -623,7 +623,6 @@
pinctrl-0 = <&pinctrl_uart3>, <&pinctrl_uart3_gpio>;
cts-gpios = <&gpio3 21 GPIO_ACTIVE_LOW>;
rts-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>;
- uart-has-rtscts;
status = "okay";
bluetooth {
diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
index 7a6e6221f421..03034b439c1f 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
@@ -524,6 +524,7 @@
compatible = "fsl,imx8mp-gpc";
reg = <0x303a0000 0x1000>;
interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
interrupt-controller;
#interrupt-cells = <3>;
@@ -590,7 +591,7 @@
reg = <IMX8MP_POWER_DOMAIN_MIPI_PHY2>;
};
- pgc_hsiomix: power-domains@17 {
+ pgc_hsiomix: power-domain@17 {
#power-domain-cells = <0>;
reg = <IMX8MP_POWER_DOMAIN_HSIOMIX>;
clocks = <&clk IMX8MP_CLK_HSIO_AXI>,
@@ -1297,7 +1298,7 @@
reg = <0x32f10100 0x8>,
<0x381f0000 0x20>;
clocks = <&clk IMX8MP_CLK_HSIO_ROOT>,
- <&clk IMX8MP_CLK_USB_ROOT>;
+ <&clk IMX8MP_CLK_USB_SUSP>;
clock-names = "hsio", "suspend";
interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&hsio_blk_ctrl IMX8MP_HSIOBLK_PD_USB>;
@@ -1310,9 +1311,9 @@
usb_dwc3_0: usb@38100000 {
compatible = "snps,dwc3";
reg = <0x38100000 0x10000>;
- clocks = <&clk IMX8MP_CLK_HSIO_AXI>,
+ clocks = <&clk IMX8MP_CLK_USB_ROOT>,
<&clk IMX8MP_CLK_USB_CORE_REF>,
- <&clk IMX8MP_CLK_USB_ROOT>;
+ <&clk IMX8MP_CLK_USB_SUSP>;
clock-names = "bus_early", "ref", "suspend";
interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
phys = <&usb3_phy0>, <&usb3_phy0>;
@@ -1339,7 +1340,7 @@
reg = <0x32f10108 0x8>,
<0x382f0000 0x20>;
clocks = <&clk IMX8MP_CLK_HSIO_ROOT>,
- <&clk IMX8MP_CLK_USB_ROOT>;
+ <&clk IMX8MP_CLK_USB_SUSP>;
clock-names = "hsio", "suspend";
interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&hsio_blk_ctrl IMX8MP_HSIOBLK_PD_USB>;
@@ -1352,9 +1353,9 @@
usb_dwc3_1: usb@38200000 {
compatible = "snps,dwc3";
reg = <0x38200000 0x10000>;
- clocks = <&clk IMX8MP_CLK_HSIO_AXI>,
+ clocks = <&clk IMX8MP_CLK_USB_ROOT>,
<&clk IMX8MP_CLK_USB_CORE_REF>,
- <&clk IMX8MP_CLK_USB_ROOT>;
+ <&clk IMX8MP_CLK_USB_SUSP>;
clock-names = "bus_early", "ref", "suspend";
interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
phys = <&usb3_phy1>, <&usb3_phy1>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-nitrogen.dts b/arch/arm64/boot/dts/freescale/imx8mq-nitrogen.dts
index 9dda2a1554c3..8614c18b5998 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-nitrogen.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-nitrogen.dts
@@ -133,7 +133,7 @@
pinctrl-0 = <&pinctrl_i2c1>;
status = "okay";
- i2cmux@70 {
+ i2c-mux@70 {
compatible = "nxp,pca9546";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c1_pca9546>;
@@ -216,7 +216,7 @@
pinctrl-0 = <&pinctrl_i2c4>;
status = "okay";
- pca9546: i2cmux@70 {
+ pca9546: i2c-mux@70 {
compatible = "nxp,pca9546";
reg = <0x70>;
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-thor96.dts b/arch/arm64/boot/dts/freescale/imx8mq-thor96.dts
index 5d5aa6537225..6e6182709d22 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-thor96.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-thor96.dts
@@ -339,7 +339,7 @@
bus-width = <4>;
non-removable;
no-sd;
- no-emmc;
+ no-mmc;
status = "okay";
brcmf: wifi@1 {
@@ -359,7 +359,7 @@
cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
bus-width = <4>;
no-sdio;
- no-emmc;
+ no-mmc;
disable-wp;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
index 07d8dd8160f6..afa883389456 100644
--- a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
+++ b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
@@ -61,7 +61,7 @@
pinctrl-0 = <&pinctrl_lpi2c1 &pinctrl_ioexp_rst>;
status = "okay";
- i2c-switch@71 {
+ i2c-mux@71 {
compatible = "nxp,pca9646", "nxp,pca9546";
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
index 69786c326db0..27f9a9f33134 100644
--- a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
@@ -74,7 +74,7 @@
pinctrl_usdhc1: usdhc1grp {
fsl,pins = <
- MX93_PAD_SD1_CLK__USDHC1_CLK 0x17fe
+ MX93_PAD_SD1_CLK__USDHC1_CLK 0x15fe
MX93_PAD_SD1_CMD__USDHC1_CMD 0x13fe
MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x13fe
MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x13fe
@@ -84,7 +84,7 @@
MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x13fe
MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x13fe
MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x13fe
- MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x17fe
+ MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x15fe
>;
};
@@ -102,7 +102,7 @@
pinctrl_usdhc2: usdhc2grp {
fsl,pins = <
- MX93_PAD_SD2_CLK__USDHC2_CLK 0x17fe
+ MX93_PAD_SD2_CLK__USDHC2_CLK 0x15fe
MX93_PAD_SD2_CMD__USDHC2_CMD 0x13fe
MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x13fe
MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x13fe
diff --git a/arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi b/arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi
index 7308f7b6b22c..8bce64069138 100644
--- a/arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi
@@ -98,7 +98,7 @@
uart1: serial@12100 {
compatible = "snps,dw-apb-uart";
- reg = <0x11000 0x100>;
+ reg = <0x12100 0x100>;
reg-shift = <2>;
interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
reg-io-width = <1>;
diff --git a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
index d6c0990a267d..7d0043824f2a 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
@@ -58,6 +58,8 @@
ranges = <0x0 0x0 ADDRESSIFY(CP11X_BASE) 0x2000000>;
CP11X_LABEL(ethernet): ethernet@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
compatible = "marvell,armada-7k-pp22";
reg = <0x0 0x100000>, <0x129000 0xb000>, <0x220000 0x800>;
clocks = <&CP11X_LABEL(clk) 1 3>, <&CP11X_LABEL(clk) 1 9>,
@@ -69,7 +71,7 @@
status = "disabled";
dma-coherent;
- CP11X_LABEL(eth0): eth0 {
+ CP11X_LABEL(eth0): ethernet-port@0 {
interrupts = <39 IRQ_TYPE_LEVEL_HIGH>,
<43 IRQ_TYPE_LEVEL_HIGH>,
<47 IRQ_TYPE_LEVEL_HIGH>,
@@ -83,12 +85,13 @@
interrupt-names = "hif0", "hif1", "hif2",
"hif3", "hif4", "hif5", "hif6", "hif7",
"hif8", "link";
- port-id = <0>;
+ reg = <0>;
+ port-id = <0>; /* For backward compatibility. */
gop-port-id = <0>;
status = "disabled";
};
- CP11X_LABEL(eth1): eth1 {
+ CP11X_LABEL(eth1): ethernet-port@1 {
interrupts = <40 IRQ_TYPE_LEVEL_HIGH>,
<44 IRQ_TYPE_LEVEL_HIGH>,
<48 IRQ_TYPE_LEVEL_HIGH>,
@@ -102,12 +105,13 @@
interrupt-names = "hif0", "hif1", "hif2",
"hif3", "hif4", "hif5", "hif6", "hif7",
"hif8", "link";
- port-id = <1>;
+ reg = <1>;
+ port-id = <1>; /* For backward compatibility. */
gop-port-id = <2>;
status = "disabled";
};
- CP11X_LABEL(eth2): eth2 {
+ CP11X_LABEL(eth2): ethernet-port@2 {
interrupts = <41 IRQ_TYPE_LEVEL_HIGH>,
<45 IRQ_TYPE_LEVEL_HIGH>,
<49 IRQ_TYPE_LEVEL_HIGH>,
@@ -121,7 +125,8 @@
interrupt-names = "hif0", "hif1", "hif2",
"hif3", "hif4", "hif5", "hif6", "hif7",
"hif8", "link";
- port-id = <2>;
+ reg = <2>;
+ port-id = <2>; /* For backward compatibility. */
gop-port-id = <3>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
index ed703025a7cc..0e9406fc63e2 100644
--- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
@@ -77,6 +77,47 @@
no-map;
reg = <0 0x4fc00000 0 0x00100000>;
};
+
+ wo_emi0: wo-emi@4fd00000 {
+ reg = <0 0x4fd00000 0 0x40000>;
+ no-map;
+ };
+
+ wo_emi1: wo-emi@4fd40000 {
+ reg = <0 0x4fd40000 0 0x40000>;
+ no-map;
+ };
+
+ wo_ilm0: wo-ilm@151e0000 {
+ reg = <0 0x151e0000 0 0x8000>;
+ no-map;
+ };
+
+ wo_ilm1: wo-ilm@151f0000 {
+ reg = <0 0x151f0000 0 0x8000>;
+ no-map;
+ };
+
+ wo_data: wo-data@4fd80000 {
+ reg = <0 0x4fd80000 0 0x240000>;
+ no-map;
+ };
+
+ wo_dlm0: wo-dlm@151e8000 {
+ reg = <0 0x151e8000 0 0x2000>;
+ no-map;
+ };
+
+ wo_dlm1: wo-dlm@151f8000 {
+ reg = <0 0x151f8000 0 0x2000>;
+ no-map;
+ };
+
+ wo_boot: wo-boot@15194000 {
+ reg = <0 0x15194000 0 0x1000>;
+ no-map;
+ };
+
};
timer {
@@ -298,6 +339,11 @@
reg = <0 0x15010000 0 0x1000>;
interrupt-parent = <&gic>;
interrupts = <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
+ memory-region = <&wo_emi0>, <&wo_ilm0>, <&wo_dlm0>,
+ <&wo_data>, <&wo_boot>;
+ memory-region-names = "wo-emi", "wo-ilm", "wo-dlm",
+ "wo-data", "wo-boot";
+ mediatek,wo-ccif = <&wo_ccif0>;
};
wed1: wed@15011000 {
@@ -306,6 +352,25 @@
reg = <0 0x15011000 0 0x1000>;
interrupt-parent = <&gic>;
interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>;
+ memory-region = <&wo_emi1>, <&wo_ilm1>, <&wo_dlm1>,
+ <&wo_data>, <&wo_boot>;
+ memory-region-names = "wo-emi", "wo-ilm", "wo-dlm",
+ "wo-data", "wo-boot";
+ mediatek,wo-ccif = <&wo_ccif1>;
+ };
+
+ wo_ccif0: syscon@151a5000 {
+ compatible = "mediatek,mt7986-wo-ccif", "syscon";
+ reg = <0 0x151a5000 0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ wo_ccif1: syscon@151ad000 {
+ compatible = "mediatek,mt7986-wo-ccif", "syscon";
+ reg = <0 0x151ad000 0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
};
eth: ethernet@15100000 {
diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
index a70b669c49ba..402136bfd535 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
@@ -1678,7 +1678,7 @@
<GIC_SPI 278 IRQ_TYPE_LEVEL_LOW>;
interrupt-names = "job", "mmu", "gpu";
- clocks = <&topckgen CLK_TOP_MFGPLL_CK>;
+ clocks = <&mfgcfg CLK_MFG_BG3D>;
power-domains =
<&spm MT8183_POWER_DOMAIN_MFG_CORE0>,
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
index 4fbd99eb496a..dec85d254838 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
@@ -56,10 +56,10 @@
#size-cells = <2>;
ranges;
- /* 192 KiB reserved for ARM Trusted Firmware (BL31) */
+ /* 2 MiB reserved for ARM Trusted Firmware (BL31) */
bl31_secmon_reserved: secmon@54600000 {
no-map;
- reg = <0 0x54600000 0x0 0x30000>;
+ reg = <0 0x54600000 0x0 0x200000>;
};
/* 12 MiB reserved for OP-TEE (BL32)
diff --git a/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi b/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi
index 87c90e93667f..79de9cc395c4 100644
--- a/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi
@@ -3,6 +3,7 @@
* Copyright (c) 2015, LGE Inc. All rights reserved.
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
* Copyright (c) 2021, Petr Vorel <petr.vorel@gmail.com>
+ * Copyright (c) 2022, Dominik Kobinski <dominikkobinski314@gmail.com>
*/
/dts-v1/;
@@ -51,6 +52,11 @@
reg = <0 0x03400000 0 0x1200000>;
no-map;
};
+
+ removed_region: reserved@5000000 {
+ reg = <0 0x05000000 0 0x2200000>;
+ no-map;
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts b/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
index b242c272d2af..fcca1ba94da6 100644
--- a/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
+++ b/arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
@@ -11,6 +11,12 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/gpio-keys.h>
+/delete-node/ &adsp_mem;
+/delete-node/ &audio_mem;
+/delete-node/ &mpss_mem;
+/delete-node/ &peripheral_region;
+/delete-node/ &rmtfs_mem;
+
/ {
model = "Xiaomi Mi 4C";
compatible = "xiaomi,libra", "qcom,msm8992";
@@ -70,25 +76,67 @@
#size-cells = <2>;
ranges;
- /* This is for getting crash logs using Android downstream kernels */
- ramoops@dfc00000 {
- compatible = "ramoops";
- reg = <0x0 0xdfc00000 0x0 0x40000>;
- console-size = <0x10000>;
- record-size = <0x10000>;
- ftrace-size = <0x10000>;
- pmsg-size = <0x20000>;
+ memory_hole: hole@6400000 {
+ reg = <0 0x06400000 0 0x600000>;
+ no-map;
+ };
+
+ memory_hole2: hole2@6c00000 {
+ reg = <0 0x06c00000 0 0x2400000>;
+ no-map;
+ };
+
+ mpss_mem: mpss@9000000 {
+ reg = <0 0x09000000 0 0x5a00000>;
+ no-map;
+ };
+
+ tzapp: tzapp@ea00000 {
+ reg = <0 0x0ea00000 0 0x1900000>;
+ no-map;
+ };
+
+ mdm_rfsa_mem: mdm-rfsa@ca0b0000 {
+ reg = <0 0xca0b0000 0 0x10000>;
+ no-map;
+ };
+
+ rmtfs_mem: rmtfs@ca100000 {
+ compatible = "qcom,rmtfs-mem";
+ reg = <0 0xca100000 0 0x180000>;
+ no-map;
+
+ qcom,client-id = <1>;
};
- modem_region: modem_region@9000000 {
- reg = <0x0 0x9000000 0x0 0x5a00000>;
+ audio_mem: audio@cb400000 {
+ reg = <0 0xcb000000 0 0x400000>;
+ no-mem;
+ };
+
+ qseecom_mem: qseecom@cb400000 {
+ reg = <0 0xcb400000 0 0x1c00000>;
+ no-mem;
+ };
+
+ adsp_rfsa_mem: adsp-rfsa@cd000000 {
+ reg = <0 0xcd000000 0 0x10000>;
no-map;
};
- tzapp: modem_region@ea00000 {
- reg = <0x0 0xea00000 0x0 0x1900000>;
+ sensor_rfsa_mem: sensor-rfsa@cd010000 {
+ reg = <0 0xcd010000 0 0x10000>;
no-map;
};
+
+ ramoops@dfc00000 {
+ compatible = "ramoops";
+ reg = <0 0xdfc00000 0 0x40000>;
+ console-size = <0x10000>;
+ record-size = <0x10000>;
+ ftrace-size = <0x10000>;
+ pmsg-size = <0x20000>;
+ };
};
};
@@ -130,11 +178,6 @@
status = "okay";
};
-&peripheral_region {
- reg = <0x0 0x7400000 0x0 0x1c00000>;
- no-map;
-};
-
&pm8994_spmi_regulators {
VDD_APC0: s8 {
regulator-min-microvolt = <680000>;
diff --git a/arch/arm64/boot/dts/qcom/msm8992.dtsi b/arch/arm64/boot/dts/qcom/msm8992.dtsi
index 10adb4986ef1..02fc3795dbfd 100644
--- a/arch/arm64/boot/dts/qcom/msm8992.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8992.dtsi
@@ -37,10 +37,6 @@
compatible = "qcom,rpmcc-msm8992", "qcom,rpmcc";
};
-&tcsr_mutex {
- compatible = "qcom,sfpb-mutex";
-};
-
&timer {
interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
diff --git a/arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts b/arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts
index 85abff0e9b3f..7b0f62144c3e 100644
--- a/arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts
+++ b/arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts
@@ -9,9 +9,6 @@
#include "msm8994.dtsi"
-/* Angler's firmware does not report where the memory is allocated */
-/delete-node/ &cont_splash_mem;
-
/ {
model = "Huawei Nexus 6P";
compatible = "huawei,angler", "qcom,msm8994";
@@ -28,6 +25,22 @@
chosen {
stdout-path = "serial0:115200n8";
};
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ tzapp_mem: tzapp@4800000 {
+ reg = <0 0x04800000 0 0x1900000>;
+ no-map;
+ };
+
+ removed_region: reserved@6300000 {
+ reg = <0 0x06300000 0 0xD00000>;
+ no-map;
+ };
+ };
};
&blsp1_uart2 {
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
index 109c9d2b684d..71cf81a8eb4d 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
@@ -10,6 +10,7 @@
#include <dt-bindings/interconnect/qcom,sc8280xp.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/mailbox/qcom-ipcc.h>
+#include <dt-bindings/phy/phy-qcom-qmp.h>
#include <dt-bindings/power/qcom-rpmpd.h>
#include <dt-bindings/soc/qcom,rpmh-rsc.h>
#include <dt-bindings/thermal/thermal.h>
@@ -762,7 +763,7 @@
<0>,
<0>,
<0>,
- <&usb_0_ssphy>,
+ <&usb_0_qmpphy QMP_USB43DP_USB3_PIPE_CLK>,
<0>,
<0>,
<0>,
@@ -770,7 +771,7 @@
<0>,
<0>,
<0>,
- <&usb_1_ssphy>,
+ <&usb_1_qmpphy QMP_USB43DP_USB3_PIPE_CLK>,
<0>,
<0>,
<0>,
@@ -1673,42 +1674,26 @@
};
};
- usb_0_qmpphy: phy-wrapper@88ec000 {
+ usb_0_qmpphy: phy@88eb000 {
compatible = "qcom,sc8280xp-qmp-usb43dp-phy";
- reg = <0 0x088ec000 0 0x1e4>,
- <0 0x088eb000 0 0x40>,
- <0 0x088ed000 0 0x1c8>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
+ reg = <0 0x088eb000 0 0x4000>;
clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>,
- <&rpmhcc RPMH_CXO_CLK>,
<&gcc GCC_USB4_EUD_CLKREF_CLK>,
- <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>;
- clock-names = "aux", "ref_clk_src", "ref", "com_aux";
+ <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>,
+ <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
+ clock-names = "aux", "ref", "com_aux", "usb3_pipe";
+
+ power-domains = <&gcc USB30_PRIM_GDSC>;
resets = <&gcc GCC_USB3_PHY_PRIM_BCR>,
- <&gcc GCC_USB3_DP_PHY_PRIM_BCR>;
+ <&gcc GCC_USB4_DP_PHY_PRIM_BCR>;
reset-names = "phy", "common";
- power-domains = <&gcc USB30_PRIM_GDSC>;
+ #clock-cells = <1>;
+ #phy-cells = <1>;
status = "disabled";
-
- usb_0_ssphy: usb3-phy@88eb400 {
- reg = <0 0x088eb400 0 0x100>,
- <0 0x088eb600 0 0x3ec>,
- <0 0x088ec400 0 0x364>,
- <0 0x088eba00 0 0x100>,
- <0 0x088ebc00 0 0x3ec>,
- <0 0x088ec200 0 0x18>;
- #phy-cells = <0>;
- #clock-cells = <0>;
- clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
- clock-names = "pipe0";
- clock-output-names = "usb0_phy_pipe_clk_src";
- };
};
usb_1_hsphy: phy@8902000 {
@@ -1725,42 +1710,26 @@
status = "disabled";
};
- usb_1_qmpphy: phy-wrapper@8904000 {
+ usb_1_qmpphy: phy@8903000 {
compatible = "qcom,sc8280xp-qmp-usb43dp-phy";
- reg = <0 0x08904000 0 0x1e4>,
- <0 0x08903000 0 0x40>,
- <0 0x08905000 0 0x1c8>;
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
+ reg = <0 0x08903000 0 0x4000>;
clocks = <&gcc GCC_USB3_SEC_PHY_AUX_CLK>,
- <&rpmhcc RPMH_CXO_CLK>,
<&gcc GCC_USB4_CLKREF_CLK>,
- <&gcc GCC_USB3_SEC_PHY_COM_AUX_CLK>;
- clock-names = "aux", "ref_clk_src", "ref", "com_aux";
+ <&gcc GCC_USB3_SEC_PHY_COM_AUX_CLK>,
+ <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>;
+ clock-names = "aux", "ref", "com_aux", "usb3_pipe";
+
+ power-domains = <&gcc USB30_SEC_GDSC>;
resets = <&gcc GCC_USB3_PHY_SEC_BCR>,
<&gcc GCC_USB4_1_DP_PHY_PRIM_BCR>;
reset-names = "phy", "common";
- power-domains = <&gcc USB30_SEC_GDSC>;
+ #clock-cells = <1>;
+ #phy-cells = <1>;
status = "disabled";
-
- usb_1_ssphy: usb3-phy@8903400 {
- reg = <0 0x08903400 0 0x100>,
- <0 0x08903600 0 0x3ec>,
- <0 0x08904400 0 0x364>,
- <0 0x08903a00 0 0x100>,
- <0 0x08903c00 0 0x3ec>,
- <0 0x08904200 0 0x18>;
- #phy-cells = <0>;
- #clock-cells = <0>;
- clocks = <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>;
- clock-names = "pipe0";
- clock-output-names = "usb1_phy_pipe_clk_src";
- };
};
pmu@9091000 {
@@ -1910,7 +1879,7 @@
reg = <0 0x0a600000 0 0xcd00>;
interrupts = <GIC_SPI 803 IRQ_TYPE_LEVEL_HIGH>;
iommus = <&apps_smmu 0x820 0x0>;
- phys = <&usb_0_hsphy>, <&usb_0_ssphy>;
+ phys = <&usb_0_hsphy>, <&usb_0_qmpphy QMP_USB43DP_USB3_PHY>;
phy-names = "usb2-phy", "usb3-phy";
};
};
@@ -1964,7 +1933,7 @@
reg = <0 0x0a800000 0 0xcd00>;
interrupts = <GIC_SPI 810 IRQ_TYPE_LEVEL_HIGH>;
iommus = <&apps_smmu 0x860 0x0>;
- phys = <&usb_1_hsphy>, <&usb_1_ssphy>;
+ phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
phy-names = "usb2-phy", "usb3-phy";
};
};
diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
index dab5579946f3..927032863e2f 100644
--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
@@ -334,7 +334,6 @@
exit-latency-us = <6562>;
min-residency-us = <9987>;
local-timer-stop;
- status = "disabled";
};
};
};
diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
index 245dce24ec59..fb3cd20a82b5 100644
--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
@@ -2382,8 +2382,8 @@
<&rpmhcc RPMH_CXO_CLK>;
clock-names = "iface", "core", "xo";
resets = <&gcc GCC_SDCC2_BCR>;
- interconnects = <&aggre2_noc MASTER_SDCC_2 0 &mc_virt SLAVE_EBI1 0>,
- <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_SDCC_2 0>;
+ interconnects = <&aggre2_noc MASTER_SDCC_2 &mc_virt SLAVE_EBI1>,
+ <&gem_noc MASTER_APPSS_PROC &config_noc SLAVE_SDCC_2>;
interconnect-names = "sdhc-ddr","cpu-sdhc";
iommus = <&apps_smmu 0x4a0 0x0>;
power-domains = <&rpmhpd SM8350_CX>;
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index 307c76cd8544..4325cb8526ed 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -100,6 +100,22 @@
};
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ rproc_0_fw_image: memory@3ed00000 {
+ no-map;
+ reg = <0x0 0x3ed00000 0x0 0x40000>;
+ };
+
+ rproc_1_fw_image: memory@3ef00000 {
+ no-map;
+ reg = <0x0 0x3ef00000 0x0 0x40000>;
+ };
+ };
+
zynqmp_ipi: zynqmp_ipi {
compatible = "xlnx,zynqmp-ipi-mailbox";
interrupt-parent = <&gic>;
@@ -203,6 +219,23 @@
ranges;
};
+ remoteproc {
+ compatible = "xlnx,zynqmp-r5fss";
+ xlnx,cluster-mode = <1>;
+
+ r5f-0 {
+ compatible = "xlnx,zynqmp-r5f";
+ power-domains = <&zynqmp_firmware PD_RPU_0>;
+ memory-region = <&rproc_0_fw_image>;
+ };
+
+ r5f-1 {
+ compatible = "xlnx,zynqmp-r5f";
+ power-domains = <&zynqmp_firmware PD_RPU_1>;
+ memory-region = <&rproc_1_fw_image>;
+ };
+ };
+
amba: axi {
compatible = "simple-bus";
#address-cells = <2>;
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index 8bd80508a710..6d06b448a66e 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -6,8 +6,8 @@ config CRYPTO_GHASH_ARM64_CE
tristate "Hash functions: GHASH (ARMv8 Crypto Extensions)"
depends on KERNEL_MODE_NEON
select CRYPTO_HASH
- select CRYPTO_GF128MUL
select CRYPTO_LIB_AES
+ select CRYPTO_LIB_GF128MUL
select CRYPTO_AEAD
help
GCM GHASH function (NIST SP800-38D)
@@ -96,6 +96,17 @@ config CRYPTO_SHA3_ARM64
Architecture: arm64 using:
- ARMv8.2 Crypto Extensions
+config CRYPTO_SM3_NEON
+ tristate "Hash functions: SM3 (NEON)"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_HASH
+ select CRYPTO_SM3
+ help
+ SM3 (ShangMi 3) secure hash function (OSCCA GM/T 0004-2012)
+
+ Architecture: arm64 using:
+ - NEON (Advanced SIMD) extensions
+
config CRYPTO_SM3_ARM64_CE
tristate "Hash functions: SM3 (ARMv8.2 Crypto Extensions)"
depends on KERNEL_MODE_NEON
@@ -220,7 +231,7 @@ config CRYPTO_SM4_ARM64_CE
- NEON (Advanced SIMD) extensions
config CRYPTO_SM4_ARM64_CE_BLK
- tristate "Ciphers: SM4, modes: ECB/CBC/CFB/CTR (ARMv8 Crypto Extensions)"
+ tristate "Ciphers: SM4, modes: ECB/CBC/CFB/CTR/XTS (ARMv8 Crypto Extensions)"
depends on KERNEL_MODE_NEON
select CRYPTO_SKCIPHER
select CRYPTO_SM4
@@ -231,6 +242,8 @@ config CRYPTO_SM4_ARM64_CE_BLK
- CBC (Cipher Block Chaining) mode (NIST SP800-38A)
- CFB (Cipher Feedback) mode (NIST SP800-38A)
- CTR (Counter) mode (NIST SP800-38A)
+ - XTS (XOR Encrypt XOR with ciphertext stealing) mode (NIST SP800-38E
+ and IEEE 1619)
Architecture: arm64 using:
- ARMv8 Crypto Extensions
@@ -268,6 +281,38 @@ config CRYPTO_AES_ARM64_CE_CCM
- ARMv8 Crypto Extensions
- NEON (Advanced SIMD) extensions
+config CRYPTO_SM4_ARM64_CE_CCM
+ tristate "AEAD cipher: SM4 in CCM mode (ARMv8 Crypto Extensions)"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_ALGAPI
+ select CRYPTO_AEAD
+ select CRYPTO_SM4
+ select CRYPTO_SM4_ARM64_CE_BLK
+ help
+ AEAD cipher: SM4 cipher algorithms (OSCCA GB/T 32907-2016) with
+ CCM (Counter with Cipher Block Chaining-Message Authentication Code)
+ authenticated encryption mode (NIST SP800-38C)
+
+ Architecture: arm64 using:
+ - ARMv8 Crypto Extensions
+ - NEON (Advanced SIMD) extensions
+
+config CRYPTO_SM4_ARM64_CE_GCM
+ tristate "AEAD cipher: SM4 in GCM mode (ARMv8 Crypto Extensions)"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_ALGAPI
+ select CRYPTO_AEAD
+ select CRYPTO_SM4
+ select CRYPTO_SM4_ARM64_CE_BLK
+ help
+ AEAD cipher: SM4 cipher algorithms (OSCCA GB/T 32907-2016) with
+ GCM (Galois/Counter Mode) authenticated encryption mode (NIST SP800-38D)
+
+ Architecture: arm64 using:
+ - ARMv8 Crypto Extensions
+ - PMULL (Polynomial Multiply Long) instructions
+ - NEON (Advanced SIMD) extensions
+
config CRYPTO_CRCT10DIF_ARM64_CE
tristate "CRCT10DIF (PMULL)"
depends on KERNEL_MODE_NEON && CRC_T10DIF
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index 24bb0c4610de..4818e204c2ac 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -17,6 +17,9 @@ sha512-ce-y := sha512-ce-glue.o sha512-ce-core.o
obj-$(CONFIG_CRYPTO_SHA3_ARM64) += sha3-ce.o
sha3-ce-y := sha3-ce-glue.o sha3-ce-core.o
+obj-$(CONFIG_CRYPTO_SM3_NEON) += sm3-neon.o
+sm3-neon-y := sm3-neon-glue.o sm3-neon-core.o
+
obj-$(CONFIG_CRYPTO_SM3_ARM64_CE) += sm3-ce.o
sm3-ce-y := sm3-ce-glue.o sm3-ce-core.o
@@ -26,6 +29,12 @@ sm4-ce-cipher-y := sm4-ce-cipher-glue.o sm4-ce-cipher-core.o
obj-$(CONFIG_CRYPTO_SM4_ARM64_CE_BLK) += sm4-ce.o
sm4-ce-y := sm4-ce-glue.o sm4-ce-core.o
+obj-$(CONFIG_CRYPTO_SM4_ARM64_CE_CCM) += sm4-ce-ccm.o
+sm4-ce-ccm-y := sm4-ce-ccm-glue.o sm4-ce-ccm-core.o
+
+obj-$(CONFIG_CRYPTO_SM4_ARM64_CE_GCM) += sm4-ce-gcm.o
+sm4-ce-gcm-y := sm4-ce-gcm-glue.o sm4-ce-gcm-core.o
+
obj-$(CONFIG_CRYPTO_SM4_ARM64_NEON_BLK) += sm4-neon.o
sm4-neon-y := sm4-neon-glue.o sm4-neon-core.o
diff --git a/arch/arm64/crypto/aes-ce-glue.c b/arch/arm64/crypto/aes-ce-glue.c
index 56a5f6f0b0c1..e921823ca103 100644
--- a/arch/arm64/crypto/aes-ce-glue.c
+++ b/arch/arm64/crypto/aes-ce-glue.c
@@ -9,9 +9,9 @@
#include <asm/simd.h>
#include <asm/unaligned.h>
#include <crypto/aes.h>
+#include <crypto/algapi.h>
#include <crypto/internal/simd.h>
#include <linux/cpufeature.h>
-#include <linux/crypto.h>
#include <linux/module.h>
#include "aes-ce-setkey.h"
diff --git a/arch/arm64/crypto/aes-cipher-glue.c b/arch/arm64/crypto/aes-cipher-glue.c
index 8caf6dfefce8..4ec55e568941 100644
--- a/arch/arm64/crypto/aes-cipher-glue.c
+++ b/arch/arm64/crypto/aes-cipher-glue.c
@@ -6,7 +6,7 @@
*/
#include <crypto/aes.h>
-#include <linux/crypto.h>
+#include <crypto/algapi.h>
#include <linux/module.h>
asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
index 5abc834271f4..0e834a2c062c 100644
--- a/arch/arm64/crypto/aes-modes.S
+++ b/arch/arm64/crypto/aes-modes.S
@@ -52,8 +52,7 @@ SYM_FUNC_END(aes_decrypt_block5x)
*/
AES_FUNC_START(aes_ecb_encrypt)
- stp x29, x30, [sp, #-16]!
- mov x29, sp
+ frame_push 0
enc_prepare w3, x2, x5
@@ -77,14 +76,13 @@ ST5( st1 {v4.16b}, [x0], #16 )
subs w4, w4, #1
bne .Lecbencloop
.Lecbencout:
- ldp x29, x30, [sp], #16
+ frame_pop
ret
AES_FUNC_END(aes_ecb_encrypt)
AES_FUNC_START(aes_ecb_decrypt)
- stp x29, x30, [sp, #-16]!
- mov x29, sp
+ frame_push 0
dec_prepare w3, x2, x5
@@ -108,7 +106,7 @@ ST5( st1 {v4.16b}, [x0], #16 )
subs w4, w4, #1
bne .Lecbdecloop
.Lecbdecout:
- ldp x29, x30, [sp], #16
+ frame_pop
ret
AES_FUNC_END(aes_ecb_decrypt)
@@ -171,9 +169,6 @@ AES_FUNC_END(aes_cbc_encrypt)
AES_FUNC_END(aes_essiv_cbc_encrypt)
AES_FUNC_START(aes_essiv_cbc_decrypt)
- stp x29, x30, [sp, #-16]!
- mov x29, sp
-
ld1 {cbciv.16b}, [x5] /* get iv */
mov w8, #14 /* AES-256: 14 rounds */
@@ -182,11 +177,9 @@ AES_FUNC_START(aes_essiv_cbc_decrypt)
b .Lessivcbcdecstart
AES_FUNC_START(aes_cbc_decrypt)
- stp x29, x30, [sp, #-16]!
- mov x29, sp
-
ld1 {cbciv.16b}, [x5] /* get iv */
.Lessivcbcdecstart:
+ frame_push 0
dec_prepare w3, x2, x6
.LcbcdecloopNx:
@@ -236,7 +229,7 @@ ST5( st1 {v4.16b}, [x0], #16 )
bne .Lcbcdecloop
.Lcbcdecout:
st1 {cbciv.16b}, [x5] /* return iv */
- ldp x29, x30, [sp], #16
+ frame_pop
ret
AES_FUNC_END(aes_cbc_decrypt)
AES_FUNC_END(aes_essiv_cbc_decrypt)
@@ -337,8 +330,7 @@ AES_FUNC_END(aes_cbc_cts_decrypt)
BLOCKS .req x13
BLOCKS_W .req w13
- stp x29, x30, [sp, #-16]!
- mov x29, sp
+ frame_push 0
enc_prepare ROUNDS_W, KEY, IV_PART
ld1 {vctr.16b}, [IV]
@@ -481,7 +473,7 @@ ST5( st1 {v4.16b}, [OUT], #16 )
.if !\xctr
st1 {vctr.16b}, [IV] /* return next CTR value */
.endif
- ldp x29, x30, [sp], #16
+ frame_pop
ret
.Lctrtail\xctr:
@@ -645,8 +637,7 @@ AES_FUNC_END(aes_xctr_encrypt)
.endm
AES_FUNC_START(aes_xts_encrypt)
- stp x29, x30, [sp, #-16]!
- mov x29, sp
+ frame_push 0
ld1 {v4.16b}, [x6]
xts_load_mask v8
@@ -704,7 +695,7 @@ AES_FUNC_START(aes_xts_encrypt)
st1 {v0.16b}, [x0]
.Lxtsencret:
st1 {v4.16b}, [x6]
- ldp x29, x30, [sp], #16
+ frame_pop
ret
.LxtsencctsNx:
@@ -732,8 +723,7 @@ AES_FUNC_START(aes_xts_encrypt)
AES_FUNC_END(aes_xts_encrypt)
AES_FUNC_START(aes_xts_decrypt)
- stp x29, x30, [sp, #-16]!
- mov x29, sp
+ frame_push 0
/* subtract 16 bytes if we are doing CTS */
sub w8, w4, #0x10
@@ -794,7 +784,7 @@ AES_FUNC_START(aes_xts_decrypt)
b .Lxtsdecloop
.Lxtsdecout:
st1 {v4.16b}, [x6]
- ldp x29, x30, [sp], #16
+ frame_pop
ret
.Lxtsdeccts:
diff --git a/arch/arm64/crypto/aes-neonbs-core.S b/arch/arm64/crypto/aes-neonbs-core.S
index d427f4556b6e..7278a37c2d5c 100644
--- a/arch/arm64/crypto/aes-neonbs-core.S
+++ b/arch/arm64/crypto/aes-neonbs-core.S
@@ -760,7 +760,7 @@ SYM_FUNC_START_LOCAL(__xts_crypt8)
eor v6.16b, v6.16b, v31.16b
eor v7.16b, v7.16b, v16.16b
- stp q16, q17, [sp, #16]
+ stp q16, q17, [x6]
mov bskey, x2
mov rounds, x3
@@ -768,8 +768,8 @@ SYM_FUNC_START_LOCAL(__xts_crypt8)
SYM_FUNC_END(__xts_crypt8)
.macro __xts_crypt, do8, o0, o1, o2, o3, o4, o5, o6, o7
- stp x29, x30, [sp, #-48]!
- mov x29, sp
+ frame_push 0, 32
+ add x6, sp, #.Lframe_local_offset
ld1 {v25.16b}, [x5]
@@ -781,7 +781,7 @@ SYM_FUNC_END(__xts_crypt8)
eor v18.16b, \o2\().16b, v27.16b
eor v19.16b, \o3\().16b, v28.16b
- ldp q24, q25, [sp, #16]
+ ldp q24, q25, [x6]
eor v20.16b, \o4\().16b, v29.16b
eor v21.16b, \o5\().16b, v30.16b
@@ -795,7 +795,7 @@ SYM_FUNC_END(__xts_crypt8)
b.gt 0b
st1 {v25.16b}, [x5]
- ldp x29, x30, [sp], #48
+ frame_pop
ret
.endm
@@ -820,9 +820,7 @@ SYM_FUNC_END(aesbs_xts_decrypt)
* int rounds, int blocks, u8 iv[])
*/
SYM_FUNC_START(aesbs_ctr_encrypt)
- stp x29, x30, [sp, #-16]!
- mov x29, sp
-
+ frame_push 0
ldp x7, x8, [x5]
ld1 {v0.16b}, [x5]
CPU_LE( rev x7, x7 )
@@ -862,6 +860,6 @@ CPU_LE( rev x8, x8 )
b.gt 0b
st1 {v0.16b}, [x5]
- ldp x29, x30, [sp], #16
+ frame_pop
ret
SYM_FUNC_END(aesbs_ctr_encrypt)
diff --git a/arch/arm64/crypto/crct10dif-ce-core.S b/arch/arm64/crypto/crct10dif-ce-core.S
index dce6dcebfca1..5604de61d06d 100644
--- a/arch/arm64/crypto/crct10dif-ce-core.S
+++ b/arch/arm64/crypto/crct10dif-ce-core.S
@@ -429,7 +429,7 @@ CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
umov w0, v0.h[0]
.ifc \p, p8
- ldp x29, x30, [sp], #16
+ frame_pop
.endif
ret
@@ -466,8 +466,7 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
// Assumes len >= 16.
//
SYM_FUNC_START(crc_t10dif_pmull_p8)
- stp x29, x30, [sp, #-16]!
- mov x29, sp
+ frame_push 1
crc_t10dif_pmull p8
SYM_FUNC_END(crc_t10dif_pmull_p8)
diff --git a/arch/arm64/crypto/ghash-ce-core.S b/arch/arm64/crypto/ghash-ce-core.S
index ebe5558929b7..23ee9a5eaf27 100644
--- a/arch/arm64/crypto/ghash-ce-core.S
+++ b/arch/arm64/crypto/ghash-ce-core.S
@@ -436,9 +436,7 @@ SYM_FUNC_END(pmull_ghash_update_p8)
.align 6
.macro pmull_gcm_do_crypt, enc
- stp x29, x30, [sp, #-32]!
- mov x29, sp
- str x19, [sp, #24]
+ frame_push 1
load_round_keys x7, x6, x8
@@ -529,7 +527,7 @@ CPU_LE( rev w8, w8 )
.endif
bne 0b
-3: ldp x19, x10, [sp, #24]
+3: ldr x10, [sp, #.Lframe_local_offset]
cbz x10, 5f // output tag?
ld1 {INP3.16b}, [x10] // load lengths[]
@@ -562,7 +560,7 @@ CPU_LE( rev w8, w8 )
smov w0, v0.b[0] // return b0
.endif
-4: ldp x29, x30, [sp], #32
+4: frame_pop
ret
5:
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index 15794fe21a0b..e5e9adc1fcf4 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -508,7 +508,7 @@ static void __exit ghash_ce_mod_exit(void)
crypto_unregister_shash(&ghash_alg);
}
-static const struct cpu_feature ghash_cpu_feature[] = {
+static const struct cpu_feature __maybe_unused ghash_cpu_feature[] = {
{ cpu_feature(PMULL) }, { }
};
MODULE_DEVICE_TABLE(cpu, ghash_cpu_feature);
diff --git a/arch/arm64/crypto/nh-neon-core.S b/arch/arm64/crypto/nh-neon-core.S
index 51c0a534ef87..13eda08fda1e 100644
--- a/arch/arm64/crypto/nh-neon-core.S
+++ b/arch/arm64/crypto/nh-neon-core.S
@@ -8,6 +8,7 @@
*/
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
KEY .req x0
MESSAGE .req x1
@@ -58,11 +59,11 @@
/*
* void nh_neon(const u32 *key, const u8 *message, size_t message_len,
- * u8 hash[NH_HASH_BYTES])
+ * __le64 hash[NH_NUM_PASSES])
*
* It's guaranteed that message_len % 16 == 0.
*/
-SYM_FUNC_START(nh_neon)
+SYM_TYPED_FUNC_START(nh_neon)
ld1 {K0.4s,K1.4s}, [KEY], #32
movi PASS0_SUMS.2d, #0
diff --git a/arch/arm64/crypto/nhpoly1305-neon-glue.c b/arch/arm64/crypto/nhpoly1305-neon-glue.c
index c5405e6a6db7..cd882c35d925 100644
--- a/arch/arm64/crypto/nhpoly1305-neon-glue.c
+++ b/arch/arm64/crypto/nhpoly1305-neon-glue.c
@@ -14,14 +14,7 @@
#include <linux/module.h>
asmlinkage void nh_neon(const u32 *key, const u8 *message, size_t message_len,
- u8 hash[NH_HASH_BYTES]);
-
-/* wrapper to avoid indirect call to assembly, which doesn't work with CFI */
-static void _nh_neon(const u32 *key, const u8 *message, size_t message_len,
- __le64 hash[NH_NUM_PASSES])
-{
- nh_neon(key, message, message_len, (u8 *)hash);
-}
+ __le64 hash[NH_NUM_PASSES]);
static int nhpoly1305_neon_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
@@ -33,7 +26,7 @@ static int nhpoly1305_neon_update(struct shash_desc *desc,
unsigned int n = min_t(unsigned int, srclen, SZ_4K);
kernel_neon_begin();
- crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon);
+ crypto_nhpoly1305_update_helper(desc, src, n, nh_neon);
kernel_neon_end();
src += n;
srclen -= n;
diff --git a/arch/arm64/crypto/sm3-ce-glue.c b/arch/arm64/crypto/sm3-ce-glue.c
index ee98954ae8ca..54bf6ebcfffb 100644
--- a/arch/arm64/crypto/sm3-ce-glue.c
+++ b/arch/arm64/crypto/sm3-ce-glue.c
@@ -84,7 +84,7 @@ static struct shash_alg sm3_alg = {
.base.cra_driver_name = "sm3-ce",
.base.cra_blocksize = SM3_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
- .base.cra_priority = 200,
+ .base.cra_priority = 400,
};
static int __init sm3_ce_mod_init(void)
diff --git a/arch/arm64/crypto/sm3-neon-core.S b/arch/arm64/crypto/sm3-neon-core.S
new file mode 100644
index 000000000000..4357e0e51be3
--- /dev/null
+++ b/arch/arm64/crypto/sm3-neon-core.S
@@ -0,0 +1,601 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * sm3-neon-core.S - SM3 secure hash using NEON instructions
+ *
+ * Linux/arm64 port of the libgcrypt SM3 implementation for AArch64
+ *
+ * Copyright (C) 2021 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ * Copyright (c) 2022 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+#include <linux/linkage.h>
+#include <linux/cfi_types.h>
+#include <asm/assembler.h>
+
+/* Context structure */
+
+#define state_h0 0
+#define state_h1 4
+#define state_h2 8
+#define state_h3 12
+#define state_h4 16
+#define state_h5 20
+#define state_h6 24
+#define state_h7 28
+
+/* Stack structure */
+
+#define STACK_W_SIZE (32 * 2 * 3)
+
+#define STACK_W (0)
+#define STACK_SIZE (STACK_W + STACK_W_SIZE)
+
+/* Register macros */
+
+#define RSTATE x0
+#define RDATA x1
+#define RNBLKS x2
+#define RKPTR x28
+#define RFRAME x29
+
+#define ra w3
+#define rb w4
+#define rc w5
+#define rd w6
+#define re w7
+#define rf w8
+#define rg w9
+#define rh w10
+
+#define t0 w11
+#define t1 w12
+#define t2 w13
+#define t3 w14
+#define t4 w15
+#define t5 w16
+#define t6 w17
+
+#define k_even w19
+#define k_odd w20
+
+#define addr0 x21
+#define addr1 x22
+
+#define s0 w23
+#define s1 w24
+#define s2 w25
+#define s3 w26
+
+#define W0 v0
+#define W1 v1
+#define W2 v2
+#define W3 v3
+#define W4 v4
+#define W5 v5
+
+#define XTMP0 v6
+#define XTMP1 v7
+#define XTMP2 v16
+#define XTMP3 v17
+#define XTMP4 v18
+#define XTMP5 v19
+#define XTMP6 v20
+
+/* Helper macros. */
+
+#define _(...) /*_*/
+
+#define clear_vec(x) \
+ movi x.8h, #0;
+
+#define rolw(o, a, n) \
+ ror o, a, #(32 - n);
+
+/* Round function macros. */
+
+#define GG1_1(x, y, z, o, t) \
+ eor o, x, y;
+#define GG1_2(x, y, z, o, t) \
+ eor o, o, z;
+#define GG1_3(x, y, z, o, t)
+
+#define FF1_1(x, y, z, o, t) GG1_1(x, y, z, o, t)
+#define FF1_2(x, y, z, o, t)
+#define FF1_3(x, y, z, o, t) GG1_2(x, y, z, o, t)
+
+#define GG2_1(x, y, z, o, t) \
+ bic o, z, x;
+#define GG2_2(x, y, z, o, t) \
+ and t, y, x;
+#define GG2_3(x, y, z, o, t) \
+ eor o, o, t;
+
+#define FF2_1(x, y, z, o, t) \
+ eor o, x, y;
+#define FF2_2(x, y, z, o, t) \
+ and t, x, y; \
+ and o, o, z;
+#define FF2_3(x, y, z, o, t) \
+ eor o, o, t;
+
+#define R(i, a, b, c, d, e, f, g, h, k, K_LOAD, round, widx, wtype, IOP, iop_param) \
+ K_LOAD(round); \
+ ldr t5, [sp, #(wtype##_W1_ADDR(round, widx))]; \
+ rolw(t0, a, 12); /* rol(a, 12) => t0 */ \
+ IOP(1, iop_param); \
+ FF##i##_1(a, b, c, t1, t2); \
+ ldr t6, [sp, #(wtype##_W1W2_ADDR(round, widx))]; \
+ add k, k, e; \
+ IOP(2, iop_param); \
+ GG##i##_1(e, f, g, t3, t4); \
+ FF##i##_2(a, b, c, t1, t2); \
+ IOP(3, iop_param); \
+ add k, k, t0; \
+ add h, h, t5; \
+ add d, d, t6; /* w1w2 + d => d */ \
+ IOP(4, iop_param); \
+ rolw(k, k, 7); /* rol (t0 + e + t), 7) => k */ \
+ GG##i##_2(e, f, g, t3, t4); \
+ add h, h, k; /* h + w1 + k => h */ \
+ IOP(5, iop_param); \
+ FF##i##_3(a, b, c, t1, t2); \
+ eor t0, t0, k; /* k ^ t0 => t0 */ \
+ GG##i##_3(e, f, g, t3, t4); \
+ add d, d, t1; /* FF(a,b,c) + d => d */ \
+ IOP(6, iop_param); \
+ add t3, t3, h; /* GG(e,f,g) + h => t3 */ \
+ rolw(b, b, 9); /* rol(b, 9) => b */ \
+ eor h, t3, t3, ror #(32-9); \
+ IOP(7, iop_param); \
+ add d, d, t0; /* t0 + d => d */ \
+ rolw(f, f, 19); /* rol(f, 19) => f */ \
+ IOP(8, iop_param); \
+ eor h, h, t3, ror #(32-17); /* P0(t3) => h */
+
+#define R1(a, b, c, d, e, f, g, h, k, K_LOAD, round, widx, wtype, IOP, iop_param) \
+ R(1, ##a, ##b, ##c, ##d, ##e, ##f, ##g, ##h, ##k, K_LOAD, round, widx, wtype, IOP, iop_param)
+
+#define R2(a, b, c, d, e, f, g, h, k, K_LOAD, round, widx, wtype, IOP, iop_param) \
+ R(2, ##a, ##b, ##c, ##d, ##e, ##f, ##g, ##h, ##k, K_LOAD, round, widx, wtype, IOP, iop_param)
+
+#define KL(round) \
+ ldp k_even, k_odd, [RKPTR, #(4*(round))];
+
+/* Input expansion macros. */
+
+/* Byte-swapped input address. */
+#define IW_W_ADDR(round, widx, offs) \
+ (STACK_W + ((round) / 4) * 64 + (offs) + ((widx) * 4))
+
+/* Expanded input address. */
+#define XW_W_ADDR(round, widx, offs) \
+ (STACK_W + ((((round) / 3) - 4) % 2) * 64 + (offs) + ((widx) * 4))
+
+/* Rounds 1-12, byte-swapped input block addresses. */
+#define IW_W1_ADDR(round, widx) IW_W_ADDR(round, widx, 32)
+#define IW_W1W2_ADDR(round, widx) IW_W_ADDR(round, widx, 48)
+
+/* Rounds 1-12, expanded input block addresses. */
+#define XW_W1_ADDR(round, widx) XW_W_ADDR(round, widx, 0)
+#define XW_W1W2_ADDR(round, widx) XW_W_ADDR(round, widx, 16)
+
+/* Input block loading.
+ * Interleaving within round function needed for in-order CPUs. */
+#define LOAD_W_VEC_1_1() \
+ add addr0, sp, #IW_W1_ADDR(0, 0);
+#define LOAD_W_VEC_1_2() \
+ add addr1, sp, #IW_W1_ADDR(4, 0);
+#define LOAD_W_VEC_1_3() \
+ ld1 {W0.16b}, [RDATA], #16;
+#define LOAD_W_VEC_1_4() \
+ ld1 {W1.16b}, [RDATA], #16;
+#define LOAD_W_VEC_1_5() \
+ ld1 {W2.16b}, [RDATA], #16;
+#define LOAD_W_VEC_1_6() \
+ ld1 {W3.16b}, [RDATA], #16;
+#define LOAD_W_VEC_1_7() \
+ rev32 XTMP0.16b, W0.16b;
+#define LOAD_W_VEC_1_8() \
+ rev32 XTMP1.16b, W1.16b;
+#define LOAD_W_VEC_2_1() \
+ rev32 XTMP2.16b, W2.16b;
+#define LOAD_W_VEC_2_2() \
+ rev32 XTMP3.16b, W3.16b;
+#define LOAD_W_VEC_2_3() \
+ eor XTMP4.16b, XTMP1.16b, XTMP0.16b;
+#define LOAD_W_VEC_2_4() \
+ eor XTMP5.16b, XTMP2.16b, XTMP1.16b;
+#define LOAD_W_VEC_2_5() \
+ st1 {XTMP0.16b}, [addr0], #16;
+#define LOAD_W_VEC_2_6() \
+ st1 {XTMP4.16b}, [addr0]; \
+ add addr0, sp, #IW_W1_ADDR(8, 0);
+#define LOAD_W_VEC_2_7() \
+ eor XTMP6.16b, XTMP3.16b, XTMP2.16b;
+#define LOAD_W_VEC_2_8() \
+ ext W0.16b, XTMP0.16b, XTMP0.16b, #8; /* W0: xx, w0, xx, xx */
+#define LOAD_W_VEC_3_1() \
+ mov W2.16b, XTMP1.16b; /* W2: xx, w6, w5, w4 */
+#define LOAD_W_VEC_3_2() \
+ st1 {XTMP1.16b}, [addr1], #16;
+#define LOAD_W_VEC_3_3() \
+ st1 {XTMP5.16b}, [addr1]; \
+ ext W1.16b, XTMP0.16b, XTMP0.16b, #4; /* W1: xx, w3, w2, w1 */
+#define LOAD_W_VEC_3_4() \
+ ext W3.16b, XTMP1.16b, XTMP2.16b, #12; /* W3: xx, w9, w8, w7 */
+#define LOAD_W_VEC_3_5() \
+ ext W4.16b, XTMP2.16b, XTMP3.16b, #8; /* W4: xx, w12, w11, w10 */
+#define LOAD_W_VEC_3_6() \
+ st1 {XTMP2.16b}, [addr0], #16;
+#define LOAD_W_VEC_3_7() \
+ st1 {XTMP6.16b}, [addr0];
+#define LOAD_W_VEC_3_8() \
+ ext W5.16b, XTMP3.16b, XTMP3.16b, #4; /* W5: xx, w15, w14, w13 */
+
+#define LOAD_W_VEC_1(iop_num, ...) \
+ LOAD_W_VEC_1_##iop_num()
+#define LOAD_W_VEC_2(iop_num, ...) \
+ LOAD_W_VEC_2_##iop_num()
+#define LOAD_W_VEC_3(iop_num, ...) \
+ LOAD_W_VEC_3_##iop_num()
+
+/* Message scheduling. Note: 3 words per vector register.
+ * Interleaving within round function needed for in-order CPUs. */
+#define SCHED_W_1_1(round, w0, w1, w2, w3, w4, w5) \
+ /* Load (w[i - 16]) => XTMP0 */ \
+ /* Load (w[i - 13]) => XTMP5 */ \
+ ext XTMP0.16b, w0.16b, w0.16b, #12; /* XTMP0: w0, xx, xx, xx */
+#define SCHED_W_1_2(round, w0, w1, w2, w3, w4, w5) \
+ ext XTMP5.16b, w1.16b, w1.16b, #12;
+#define SCHED_W_1_3(round, w0, w1, w2, w3, w4, w5) \
+ ext XTMP0.16b, XTMP0.16b, w1.16b, #12; /* XTMP0: xx, w2, w1, w0 */
+#define SCHED_W_1_4(round, w0, w1, w2, w3, w4, w5) \
+ ext XTMP5.16b, XTMP5.16b, w2.16b, #12;
+#define SCHED_W_1_5(round, w0, w1, w2, w3, w4, w5) \
+ /* w[i - 9] == w3 */ \
+ /* W3 ^ XTMP0 => XTMP0 */ \
+ eor XTMP0.16b, XTMP0.16b, w3.16b;
+#define SCHED_W_1_6(round, w0, w1, w2, w3, w4, w5) \
+ /* w[i - 3] == w5 */ \
+ /* rol(XMM5, 15) ^ XTMP0 => XTMP0 */ \
+ /* rol(XTMP5, 7) => XTMP1 */ \
+ add addr0, sp, #XW_W1_ADDR((round), 0); \
+ shl XTMP2.4s, w5.4s, #15;
+#define SCHED_W_1_7(round, w0, w1, w2, w3, w4, w5) \
+ shl XTMP1.4s, XTMP5.4s, #7;
+#define SCHED_W_1_8(round, w0, w1, w2, w3, w4, w5) \
+ sri XTMP2.4s, w5.4s, #(32-15);
+#define SCHED_W_2_1(round, w0, w1, w2, w3, w4, w5) \
+ sri XTMP1.4s, XTMP5.4s, #(32-7);
+#define SCHED_W_2_2(round, w0, w1, w2, w3, w4, w5) \
+ eor XTMP0.16b, XTMP0.16b, XTMP2.16b;
+#define SCHED_W_2_3(round, w0, w1, w2, w3, w4, w5) \
+ /* w[i - 6] == W4 */ \
+ /* W4 ^ XTMP1 => XTMP1 */ \
+ eor XTMP1.16b, XTMP1.16b, w4.16b;
+#define SCHED_W_2_4(round, w0, w1, w2, w3, w4, w5) \
+ /* P1(XTMP0) ^ XTMP1 => W0 */ \
+ shl XTMP3.4s, XTMP0.4s, #15;
+#define SCHED_W_2_5(round, w0, w1, w2, w3, w4, w5) \
+ shl XTMP4.4s, XTMP0.4s, #23;
+#define SCHED_W_2_6(round, w0, w1, w2, w3, w4, w5) \
+ eor w0.16b, XTMP1.16b, XTMP0.16b;
+#define SCHED_W_2_7(round, w0, w1, w2, w3, w4, w5) \
+ sri XTMP3.4s, XTMP0.4s, #(32-15);
+#define SCHED_W_2_8(round, w0, w1, w2, w3, w4, w5) \
+ sri XTMP4.4s, XTMP0.4s, #(32-23);
+#define SCHED_W_3_1(round, w0, w1, w2, w3, w4, w5) \
+ eor w0.16b, w0.16b, XTMP3.16b;
+#define SCHED_W_3_2(round, w0, w1, w2, w3, w4, w5) \
+ /* Load (w[i - 3]) => XTMP2 */ \
+ ext XTMP2.16b, w4.16b, w4.16b, #12;
+#define SCHED_W_3_3(round, w0, w1, w2, w3, w4, w5) \
+ eor w0.16b, w0.16b, XTMP4.16b;
+#define SCHED_W_3_4(round, w0, w1, w2, w3, w4, w5) \
+ ext XTMP2.16b, XTMP2.16b, w5.16b, #12;
+#define SCHED_W_3_5(round, w0, w1, w2, w3, w4, w5) \
+ /* W1 ^ W2 => XTMP3 */ \
+ eor XTMP3.16b, XTMP2.16b, w0.16b;
+#define SCHED_W_3_6(round, w0, w1, w2, w3, w4, w5)
+#define SCHED_W_3_7(round, w0, w1, w2, w3, w4, w5) \
+ st1 {XTMP2.16b-XTMP3.16b}, [addr0];
+#define SCHED_W_3_8(round, w0, w1, w2, w3, w4, w5)
+
+#define SCHED_W_W0W1W2W3W4W5_1(iop_num, round) \
+ SCHED_W_1_##iop_num(round, W0, W1, W2, W3, W4, W5)
+#define SCHED_W_W0W1W2W3W4W5_2(iop_num, round) \
+ SCHED_W_2_##iop_num(round, W0, W1, W2, W3, W4, W5)
+#define SCHED_W_W0W1W2W3W4W5_3(iop_num, round) \
+ SCHED_W_3_##iop_num(round, W0, W1, W2, W3, W4, W5)
+
+#define SCHED_W_W1W2W3W4W5W0_1(iop_num, round) \
+ SCHED_W_1_##iop_num(round, W1, W2, W3, W4, W5, W0)
+#define SCHED_W_W1W2W3W4W5W0_2(iop_num, round) \
+ SCHED_W_2_##iop_num(round, W1, W2, W3, W4, W5, W0)
+#define SCHED_W_W1W2W3W4W5W0_3(iop_num, round) \
+ SCHED_W_3_##iop_num(round, W1, W2, W3, W4, W5, W0)
+
+#define SCHED_W_W2W3W4W5W0W1_1(iop_num, round) \
+ SCHED_W_1_##iop_num(round, W2, W3, W4, W5, W0, W1)
+#define SCHED_W_W2W3W4W5W0W1_2(iop_num, round) \
+ SCHED_W_2_##iop_num(round, W2, W3, W4, W5, W0, W1)
+#define SCHED_W_W2W3W4W5W0W1_3(iop_num, round) \
+ SCHED_W_3_##iop_num(round, W2, W3, W4, W5, W0, W1)
+
+#define SCHED_W_W3W4W5W0W1W2_1(iop_num, round) \
+ SCHED_W_1_##iop_num(round, W3, W4, W5, W0, W1, W2)
+#define SCHED_W_W3W4W5W0W1W2_2(iop_num, round) \
+ SCHED_W_2_##iop_num(round, W3, W4, W5, W0, W1, W2)
+#define SCHED_W_W3W4W5W0W1W2_3(iop_num, round) \
+ SCHED_W_3_##iop_num(round, W3, W4, W5, W0, W1, W2)
+
+#define SCHED_W_W4W5W0W1W2W3_1(iop_num, round) \
+ SCHED_W_1_##iop_num(round, W4, W5, W0, W1, W2, W3)
+#define SCHED_W_W4W5W0W1W2W3_2(iop_num, round) \
+ SCHED_W_2_##iop_num(round, W4, W5, W0, W1, W2, W3)
+#define SCHED_W_W4W5W0W1W2W3_3(iop_num, round) \
+ SCHED_W_3_##iop_num(round, W4, W5, W0, W1, W2, W3)
+
+#define SCHED_W_W5W0W1W2W3W4_1(iop_num, round) \
+ SCHED_W_1_##iop_num(round, W5, W0, W1, W2, W3, W4)
+#define SCHED_W_W5W0W1W2W3W4_2(iop_num, round) \
+ SCHED_W_2_##iop_num(round, W5, W0, W1, W2, W3, W4)
+#define SCHED_W_W5W0W1W2W3W4_3(iop_num, round) \
+ SCHED_W_3_##iop_num(round, W5, W0, W1, W2, W3, W4)
+
+
+ /*
+ * Transform blocks*64 bytes (blocks*16 32-bit words) at 'src'.
+ *
+ * void sm3_neon_transform(struct sm3_state *sst, u8 const *src,
+ * int blocks)
+ */
+ .text
+.align 3
+SYM_TYPED_FUNC_START(sm3_neon_transform)
+ ldp ra, rb, [RSTATE, #0]
+ ldp rc, rd, [RSTATE, #8]
+ ldp re, rf, [RSTATE, #16]
+ ldp rg, rh, [RSTATE, #24]
+
+ stp x28, x29, [sp, #-16]!
+ stp x19, x20, [sp, #-16]!
+ stp x21, x22, [sp, #-16]!
+ stp x23, x24, [sp, #-16]!
+ stp x25, x26, [sp, #-16]!
+ mov RFRAME, sp
+
+ sub addr0, sp, #STACK_SIZE
+ adr_l RKPTR, .LKtable
+ and sp, addr0, #(~63)
+
+ /* Preload first block. */
+ LOAD_W_VEC_1(1, 0)
+ LOAD_W_VEC_1(2, 0)
+ LOAD_W_VEC_1(3, 0)
+ LOAD_W_VEC_1(4, 0)
+ LOAD_W_VEC_1(5, 0)
+ LOAD_W_VEC_1(6, 0)
+ LOAD_W_VEC_1(7, 0)
+ LOAD_W_VEC_1(8, 0)
+ LOAD_W_VEC_2(1, 0)
+ LOAD_W_VEC_2(2, 0)
+ LOAD_W_VEC_2(3, 0)
+ LOAD_W_VEC_2(4, 0)
+ LOAD_W_VEC_2(5, 0)
+ LOAD_W_VEC_2(6, 0)
+ LOAD_W_VEC_2(7, 0)
+ LOAD_W_VEC_2(8, 0)
+ LOAD_W_VEC_3(1, 0)
+ LOAD_W_VEC_3(2, 0)
+ LOAD_W_VEC_3(3, 0)
+ LOAD_W_VEC_3(4, 0)
+ LOAD_W_VEC_3(5, 0)
+ LOAD_W_VEC_3(6, 0)
+ LOAD_W_VEC_3(7, 0)
+ LOAD_W_VEC_3(8, 0)
+
+.balign 16
+.Loop:
+ /* Transform 0-3 */
+ R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 0, 0, IW, _, 0)
+ R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 1, 1, IW, _, 0)
+ R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 2, 2, IW, _, 0)
+ R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 3, 3, IW, _, 0)
+
+ /* Transform 4-7 + Precalc 12-14 */
+ R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 4, 0, IW, _, 0)
+ R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 5, 1, IW, _, 0)
+ R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 6, 2, IW, SCHED_W_W0W1W2W3W4W5_1, 12)
+ R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 7, 3, IW, SCHED_W_W0W1W2W3W4W5_2, 12)
+
+ /* Transform 8-11 + Precalc 12-17 */
+ R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 8, 0, IW, SCHED_W_W0W1W2W3W4W5_3, 12)
+ R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 9, 1, IW, SCHED_W_W1W2W3W4W5W0_1, 15)
+ R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 10, 2, IW, SCHED_W_W1W2W3W4W5W0_2, 15)
+ R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 11, 3, IW, SCHED_W_W1W2W3W4W5W0_3, 15)
+
+ /* Transform 12-14 + Precalc 18-20 */
+ R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 12, 0, XW, SCHED_W_W2W3W4W5W0W1_1, 18)
+ R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 13, 1, XW, SCHED_W_W2W3W4W5W0W1_2, 18)
+ R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 14, 2, XW, SCHED_W_W2W3W4W5W0W1_3, 18)
+
+ /* Transform 15-17 + Precalc 21-23 */
+ R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 15, 0, XW, SCHED_W_W3W4W5W0W1W2_1, 21)
+ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 16, 1, XW, SCHED_W_W3W4W5W0W1W2_2, 21)
+ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 17, 2, XW, SCHED_W_W3W4W5W0W1W2_3, 21)
+
+ /* Transform 18-20 + Precalc 24-26 */
+ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 18, 0, XW, SCHED_W_W4W5W0W1W2W3_1, 24)
+ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 19, 1, XW, SCHED_W_W4W5W0W1W2W3_2, 24)
+ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 20, 2, XW, SCHED_W_W4W5W0W1W2W3_3, 24)
+
+ /* Transform 21-23 + Precalc 27-29 */
+ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 21, 0, XW, SCHED_W_W5W0W1W2W3W4_1, 27)
+ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 22, 1, XW, SCHED_W_W5W0W1W2W3W4_2, 27)
+ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 23, 2, XW, SCHED_W_W5W0W1W2W3W4_3, 27)
+
+ /* Transform 24-26 + Precalc 30-32 */
+ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 24, 0, XW, SCHED_W_W0W1W2W3W4W5_1, 30)
+ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 25, 1, XW, SCHED_W_W0W1W2W3W4W5_2, 30)
+ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 26, 2, XW, SCHED_W_W0W1W2W3W4W5_3, 30)
+
+ /* Transform 27-29 + Precalc 33-35 */
+ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 27, 0, XW, SCHED_W_W1W2W3W4W5W0_1, 33)
+ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 28, 1, XW, SCHED_W_W1W2W3W4W5W0_2, 33)
+ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 29, 2, XW, SCHED_W_W1W2W3W4W5W0_3, 33)
+
+ /* Transform 30-32 + Precalc 36-38 */
+ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 30, 0, XW, SCHED_W_W2W3W4W5W0W1_1, 36)
+ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 31, 1, XW, SCHED_W_W2W3W4W5W0W1_2, 36)
+ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 32, 2, XW, SCHED_W_W2W3W4W5W0W1_3, 36)
+
+ /* Transform 33-35 + Precalc 39-41 */
+ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 33, 0, XW, SCHED_W_W3W4W5W0W1W2_1, 39)
+ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 34, 1, XW, SCHED_W_W3W4W5W0W1W2_2, 39)
+ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 35, 2, XW, SCHED_W_W3W4W5W0W1W2_3, 39)
+
+ /* Transform 36-38 + Precalc 42-44 */
+ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 36, 0, XW, SCHED_W_W4W5W0W1W2W3_1, 42)
+ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 37, 1, XW, SCHED_W_W4W5W0W1W2W3_2, 42)
+ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 38, 2, XW, SCHED_W_W4W5W0W1W2W3_3, 42)
+
+ /* Transform 39-41 + Precalc 45-47 */
+ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 39, 0, XW, SCHED_W_W5W0W1W2W3W4_1, 45)
+ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 40, 1, XW, SCHED_W_W5W0W1W2W3W4_2, 45)
+ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 41, 2, XW, SCHED_W_W5W0W1W2W3W4_3, 45)
+
+ /* Transform 42-44 + Precalc 48-50 */
+ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 42, 0, XW, SCHED_W_W0W1W2W3W4W5_1, 48)
+ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 43, 1, XW, SCHED_W_W0W1W2W3W4W5_2, 48)
+ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 44, 2, XW, SCHED_W_W0W1W2W3W4W5_3, 48)
+
+ /* Transform 45-47 + Precalc 51-53 */
+ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 45, 0, XW, SCHED_W_W1W2W3W4W5W0_1, 51)
+ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 46, 1, XW, SCHED_W_W1W2W3W4W5W0_2, 51)
+ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 47, 2, XW, SCHED_W_W1W2W3W4W5W0_3, 51)
+
+ /* Transform 48-50 + Precalc 54-56 */
+ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 48, 0, XW, SCHED_W_W2W3W4W5W0W1_1, 54)
+ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 49, 1, XW, SCHED_W_W2W3W4W5W0W1_2, 54)
+ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 50, 2, XW, SCHED_W_W2W3W4W5W0W1_3, 54)
+
+ /* Transform 51-53 + Precalc 57-59 */
+ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 51, 0, XW, SCHED_W_W3W4W5W0W1W2_1, 57)
+ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 52, 1, XW, SCHED_W_W3W4W5W0W1W2_2, 57)
+ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 53, 2, XW, SCHED_W_W3W4W5W0W1W2_3, 57)
+
+ /* Transform 54-56 + Precalc 60-62 */
+ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 54, 0, XW, SCHED_W_W4W5W0W1W2W3_1, 60)
+ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 55, 1, XW, SCHED_W_W4W5W0W1W2W3_2, 60)
+ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 56, 2, XW, SCHED_W_W4W5W0W1W2W3_3, 60)
+
+ /* Transform 57-59 + Precalc 63 */
+ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 57, 0, XW, SCHED_W_W5W0W1W2W3W4_1, 63)
+ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 58, 1, XW, SCHED_W_W5W0W1W2W3W4_2, 63)
+ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 59, 2, XW, SCHED_W_W5W0W1W2W3W4_3, 63)
+
+ /* Transform 60 */
+ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 60, 0, XW, _, _)
+ subs RNBLKS, RNBLKS, #1
+ b.eq .Lend
+
+ /* Transform 61-63 + Preload next block */
+ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 61, 1, XW, LOAD_W_VEC_1, _)
+ ldp s0, s1, [RSTATE, #0]
+ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 62, 2, XW, LOAD_W_VEC_2, _)
+ ldp s2, s3, [RSTATE, #8]
+ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 63, 0, XW, LOAD_W_VEC_3, _)
+
+ /* Update the chaining variables. */
+ eor ra, ra, s0
+ eor rb, rb, s1
+ ldp s0, s1, [RSTATE, #16]
+ eor rc, rc, s2
+ ldp k_even, k_odd, [RSTATE, #24]
+ eor rd, rd, s3
+ eor re, re, s0
+ stp ra, rb, [RSTATE, #0]
+ eor rf, rf, s1
+ stp rc, rd, [RSTATE, #8]
+ eor rg, rg, k_even
+ stp re, rf, [RSTATE, #16]
+ eor rh, rh, k_odd
+ stp rg, rh, [RSTATE, #24]
+ b .Loop
+
+.Lend:
+ /* Transform 61-63 */
+ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 61, 1, XW, _, _)
+ ldp s0, s1, [RSTATE, #0]
+ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 62, 2, XW, _, _)
+ ldp s2, s3, [RSTATE, #8]
+ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 63, 0, XW, _, _)
+
+ /* Update the chaining variables. */
+ eor ra, ra, s0
+ clear_vec(W0)
+ eor rb, rb, s1
+ clear_vec(W1)
+ ldp s0, s1, [RSTATE, #16]
+ clear_vec(W2)
+ eor rc, rc, s2
+ clear_vec(W3)
+ ldp k_even, k_odd, [RSTATE, #24]
+ clear_vec(W4)
+ eor rd, rd, s3
+ clear_vec(W5)
+ eor re, re, s0
+ clear_vec(XTMP0)
+ stp ra, rb, [RSTATE, #0]
+ clear_vec(XTMP1)
+ eor rf, rf, s1
+ clear_vec(XTMP2)
+ stp rc, rd, [RSTATE, #8]
+ clear_vec(XTMP3)
+ eor rg, rg, k_even
+ clear_vec(XTMP4)
+ stp re, rf, [RSTATE, #16]
+ clear_vec(XTMP5)
+ eor rh, rh, k_odd
+ clear_vec(XTMP6)
+ stp rg, rh, [RSTATE, #24]
+
+ /* Clear message expansion area */
+ add addr0, sp, #STACK_W
+ st1 {W0.16b-W3.16b}, [addr0], #64
+ st1 {W0.16b-W3.16b}, [addr0], #64
+ st1 {W0.16b-W3.16b}, [addr0]
+
+ mov sp, RFRAME
+
+ ldp x25, x26, [sp], #16
+ ldp x23, x24, [sp], #16
+ ldp x21, x22, [sp], #16
+ ldp x19, x20, [sp], #16
+ ldp x28, x29, [sp], #16
+
+ ret
+SYM_FUNC_END(sm3_neon_transform)
+
+
+ .section ".rodata", "a"
+
+ .align 4
+.LKtable:
+ .long 0x79cc4519, 0xf3988a32, 0xe7311465, 0xce6228cb
+ .long 0x9cc45197, 0x3988a32f, 0x7311465e, 0xe6228cbc
+ .long 0xcc451979, 0x988a32f3, 0x311465e7, 0x6228cbce
+ .long 0xc451979c, 0x88a32f39, 0x11465e73, 0x228cbce6
+ .long 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c
+ .long 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce
+ .long 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec
+ .long 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5
+ .long 0x7a879d8a, 0xf50f3b14, 0xea1e7629, 0xd43cec53
+ .long 0xa879d8a7, 0x50f3b14f, 0xa1e7629e, 0x43cec53d
+ .long 0x879d8a7a, 0x0f3b14f5, 0x1e7629ea, 0x3cec53d4
+ .long 0x79d8a7a8, 0xf3b14f50, 0xe7629ea1, 0xcec53d43
+ .long 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c
+ .long 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce
+ .long 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec
+ .long 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5
diff --git a/arch/arm64/crypto/sm3-neon-glue.c b/arch/arm64/crypto/sm3-neon-glue.c
new file mode 100644
index 000000000000..7182ee683f14
--- /dev/null
+++ b/arch/arm64/crypto/sm3-neon-glue.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * sm3-neon-glue.c - SM3 secure hash using NEON instructions
+ *
+ * Copyright (C) 2022 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+#include <asm/neon.h>
+#include <asm/simd.h>
+#include <asm/unaligned.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
+#include <crypto/sm3.h>
+#include <crypto/sm3_base.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+
+asmlinkage void sm3_neon_transform(struct sm3_state *sst, u8 const *src,
+ int blocks);
+
+static int sm3_neon_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ if (!crypto_simd_usable()) {
+ sm3_update(shash_desc_ctx(desc), data, len);
+ return 0;
+ }
+
+ kernel_neon_begin();
+ sm3_base_do_update(desc, data, len, sm3_neon_transform);
+ kernel_neon_end();
+
+ return 0;
+}
+
+static int sm3_neon_final(struct shash_desc *desc, u8 *out)
+{
+ if (!crypto_simd_usable()) {
+ sm3_final(shash_desc_ctx(desc), out);
+ return 0;
+ }
+
+ kernel_neon_begin();
+ sm3_base_do_finalize(desc, sm3_neon_transform);
+ kernel_neon_end();
+
+ return sm3_base_finish(desc, out);
+}
+
+static int sm3_neon_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ if (!crypto_simd_usable()) {
+ struct sm3_state *sctx = shash_desc_ctx(desc);
+
+ if (len)
+ sm3_update(sctx, data, len);
+ sm3_final(sctx, out);
+ return 0;
+ }
+
+ kernel_neon_begin();
+ if (len)
+ sm3_base_do_update(desc, data, len, sm3_neon_transform);
+ sm3_base_do_finalize(desc, sm3_neon_transform);
+ kernel_neon_end();
+
+ return sm3_base_finish(desc, out);
+}
+
+static struct shash_alg sm3_alg = {
+ .digestsize = SM3_DIGEST_SIZE,
+ .init = sm3_base_init,
+ .update = sm3_neon_update,
+ .final = sm3_neon_final,
+ .finup = sm3_neon_finup,
+ .descsize = sizeof(struct sm3_state),
+ .base.cra_name = "sm3",
+ .base.cra_driver_name = "sm3-neon",
+ .base.cra_blocksize = SM3_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .base.cra_priority = 200,
+};
+
+static int __init sm3_neon_init(void)
+{
+ return crypto_register_shash(&sm3_alg);
+}
+
+static void __exit sm3_neon_fini(void)
+{
+ crypto_unregister_shash(&sm3_alg);
+}
+
+module_init(sm3_neon_init);
+module_exit(sm3_neon_fini);
+
+MODULE_DESCRIPTION("SM3 secure hash using NEON instructions");
+MODULE_AUTHOR("Jussi Kivilinna <jussi.kivilinna@iki.fi>");
+MODULE_AUTHOR("Tianjia Zhang <tianjia.zhang@linux.alibaba.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm64/crypto/sm4-ce-asm.h b/arch/arm64/crypto/sm4-ce-asm.h
new file mode 100644
index 000000000000..7ea98e42e779
--- /dev/null
+++ b/arch/arm64/crypto/sm4-ce-asm.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SM4 helper macros for Crypto Extensions
+ * Copyright (C) 2022 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+#define SM4_PREPARE(ptr) \
+ ld1 {v24.16b-v27.16b}, [ptr], #64; \
+ ld1 {v28.16b-v31.16b}, [ptr];
+
+#define SM4_CRYPT_BLK_BE(b0) \
+ sm4e b0.4s, v24.4s; \
+ sm4e b0.4s, v25.4s; \
+ sm4e b0.4s, v26.4s; \
+ sm4e b0.4s, v27.4s; \
+ sm4e b0.4s, v28.4s; \
+ sm4e b0.4s, v29.4s; \
+ sm4e b0.4s, v30.4s; \
+ sm4e b0.4s, v31.4s; \
+ rev64 b0.4s, b0.4s; \
+ ext b0.16b, b0.16b, b0.16b, #8; \
+ rev32 b0.16b, b0.16b;
+
+#define SM4_CRYPT_BLK(b0) \
+ rev32 b0.16b, b0.16b; \
+ SM4_CRYPT_BLK_BE(b0);
+
+#define SM4_CRYPT_BLK2_BE(b0, b1) \
+ sm4e b0.4s, v24.4s; \
+ sm4e b1.4s, v24.4s; \
+ sm4e b0.4s, v25.4s; \
+ sm4e b1.4s, v25.4s; \
+ sm4e b0.4s, v26.4s; \
+ sm4e b1.4s, v26.4s; \
+ sm4e b0.4s, v27.4s; \
+ sm4e b1.4s, v27.4s; \
+ sm4e b0.4s, v28.4s; \
+ sm4e b1.4s, v28.4s; \
+ sm4e b0.4s, v29.4s; \
+ sm4e b1.4s, v29.4s; \
+ sm4e b0.4s, v30.4s; \
+ sm4e b1.4s, v30.4s; \
+ sm4e b0.4s, v31.4s; \
+ sm4e b1.4s, v31.4s; \
+ rev64 b0.4s, b0.4s; \
+ rev64 b1.4s, b1.4s; \
+ ext b0.16b, b0.16b, b0.16b, #8; \
+ ext b1.16b, b1.16b, b1.16b, #8; \
+ rev32 b0.16b, b0.16b; \
+ rev32 b1.16b, b1.16b; \
+
+#define SM4_CRYPT_BLK2(b0, b1) \
+ rev32 b0.16b, b0.16b; \
+ rev32 b1.16b, b1.16b; \
+ SM4_CRYPT_BLK2_BE(b0, b1);
+
+#define SM4_CRYPT_BLK4_BE(b0, b1, b2, b3) \
+ sm4e b0.4s, v24.4s; \
+ sm4e b1.4s, v24.4s; \
+ sm4e b2.4s, v24.4s; \
+ sm4e b3.4s, v24.4s; \
+ sm4e b0.4s, v25.4s; \
+ sm4e b1.4s, v25.4s; \
+ sm4e b2.4s, v25.4s; \
+ sm4e b3.4s, v25.4s; \
+ sm4e b0.4s, v26.4s; \
+ sm4e b1.4s, v26.4s; \
+ sm4e b2.4s, v26.4s; \
+ sm4e b3.4s, v26.4s; \
+ sm4e b0.4s, v27.4s; \
+ sm4e b1.4s, v27.4s; \
+ sm4e b2.4s, v27.4s; \
+ sm4e b3.4s, v27.4s; \
+ sm4e b0.4s, v28.4s; \
+ sm4e b1.4s, v28.4s; \
+ sm4e b2.4s, v28.4s; \
+ sm4e b3.4s, v28.4s; \
+ sm4e b0.4s, v29.4s; \
+ sm4e b1.4s, v29.4s; \
+ sm4e b2.4s, v29.4s; \
+ sm4e b3.4s, v29.4s; \
+ sm4e b0.4s, v30.4s; \
+ sm4e b1.4s, v30.4s; \
+ sm4e b2.4s, v30.4s; \
+ sm4e b3.4s, v30.4s; \
+ sm4e b0.4s, v31.4s; \
+ sm4e b1.4s, v31.4s; \
+ sm4e b2.4s, v31.4s; \
+ sm4e b3.4s, v31.4s; \
+ rev64 b0.4s, b0.4s; \
+ rev64 b1.4s, b1.4s; \
+ rev64 b2.4s, b2.4s; \
+ rev64 b3.4s, b3.4s; \
+ ext b0.16b, b0.16b, b0.16b, #8; \
+ ext b1.16b, b1.16b, b1.16b, #8; \
+ ext b2.16b, b2.16b, b2.16b, #8; \
+ ext b3.16b, b3.16b, b3.16b, #8; \
+ rev32 b0.16b, b0.16b; \
+ rev32 b1.16b, b1.16b; \
+ rev32 b2.16b, b2.16b; \
+ rev32 b3.16b, b3.16b;
+
+#define SM4_CRYPT_BLK4(b0, b1, b2, b3) \
+ rev32 b0.16b, b0.16b; \
+ rev32 b1.16b, b1.16b; \
+ rev32 b2.16b, b2.16b; \
+ rev32 b3.16b, b3.16b; \
+ SM4_CRYPT_BLK4_BE(b0, b1, b2, b3);
+
+#define SM4_CRYPT_BLK8_BE(b0, b1, b2, b3, b4, b5, b6, b7) \
+ sm4e b0.4s, v24.4s; \
+ sm4e b1.4s, v24.4s; \
+ sm4e b2.4s, v24.4s; \
+ sm4e b3.4s, v24.4s; \
+ sm4e b4.4s, v24.4s; \
+ sm4e b5.4s, v24.4s; \
+ sm4e b6.4s, v24.4s; \
+ sm4e b7.4s, v24.4s; \
+ sm4e b0.4s, v25.4s; \
+ sm4e b1.4s, v25.4s; \
+ sm4e b2.4s, v25.4s; \
+ sm4e b3.4s, v25.4s; \
+ sm4e b4.4s, v25.4s; \
+ sm4e b5.4s, v25.4s; \
+ sm4e b6.4s, v25.4s; \
+ sm4e b7.4s, v25.4s; \
+ sm4e b0.4s, v26.4s; \
+ sm4e b1.4s, v26.4s; \
+ sm4e b2.4s, v26.4s; \
+ sm4e b3.4s, v26.4s; \
+ sm4e b4.4s, v26.4s; \
+ sm4e b5.4s, v26.4s; \
+ sm4e b6.4s, v26.4s; \
+ sm4e b7.4s, v26.4s; \
+ sm4e b0.4s, v27.4s; \
+ sm4e b1.4s, v27.4s; \
+ sm4e b2.4s, v27.4s; \
+ sm4e b3.4s, v27.4s; \
+ sm4e b4.4s, v27.4s; \
+ sm4e b5.4s, v27.4s; \
+ sm4e b6.4s, v27.4s; \
+ sm4e b7.4s, v27.4s; \
+ sm4e b0.4s, v28.4s; \
+ sm4e b1.4s, v28.4s; \
+ sm4e b2.4s, v28.4s; \
+ sm4e b3.4s, v28.4s; \
+ sm4e b4.4s, v28.4s; \
+ sm4e b5.4s, v28.4s; \
+ sm4e b6.4s, v28.4s; \
+ sm4e b7.4s, v28.4s; \
+ sm4e b0.4s, v29.4s; \
+ sm4e b1.4s, v29.4s; \
+ sm4e b2.4s, v29.4s; \
+ sm4e b3.4s, v29.4s; \
+ sm4e b4.4s, v29.4s; \
+ sm4e b5.4s, v29.4s; \
+ sm4e b6.4s, v29.4s; \
+ sm4e b7.4s, v29.4s; \
+ sm4e b0.4s, v30.4s; \
+ sm4e b1.4s, v30.4s; \
+ sm4e b2.4s, v30.4s; \
+ sm4e b3.4s, v30.4s; \
+ sm4e b4.4s, v30.4s; \
+ sm4e b5.4s, v30.4s; \
+ sm4e b6.4s, v30.4s; \
+ sm4e b7.4s, v30.4s; \
+ sm4e b0.4s, v31.4s; \
+ sm4e b1.4s, v31.4s; \
+ sm4e b2.4s, v31.4s; \
+ sm4e b3.4s, v31.4s; \
+ sm4e b4.4s, v31.4s; \
+ sm4e b5.4s, v31.4s; \
+ sm4e b6.4s, v31.4s; \
+ sm4e b7.4s, v31.4s; \
+ rev64 b0.4s, b0.4s; \
+ rev64 b1.4s, b1.4s; \
+ rev64 b2.4s, b2.4s; \
+ rev64 b3.4s, b3.4s; \
+ rev64 b4.4s, b4.4s; \
+ rev64 b5.4s, b5.4s; \
+ rev64 b6.4s, b6.4s; \
+ rev64 b7.4s, b7.4s; \
+ ext b0.16b, b0.16b, b0.16b, #8; \
+ ext b1.16b, b1.16b, b1.16b, #8; \
+ ext b2.16b, b2.16b, b2.16b, #8; \
+ ext b3.16b, b3.16b, b3.16b, #8; \
+ ext b4.16b, b4.16b, b4.16b, #8; \
+ ext b5.16b, b5.16b, b5.16b, #8; \
+ ext b6.16b, b6.16b, b6.16b, #8; \
+ ext b7.16b, b7.16b, b7.16b, #8; \
+ rev32 b0.16b, b0.16b; \
+ rev32 b1.16b, b1.16b; \
+ rev32 b2.16b, b2.16b; \
+ rev32 b3.16b, b3.16b; \
+ rev32 b4.16b, b4.16b; \
+ rev32 b5.16b, b5.16b; \
+ rev32 b6.16b, b6.16b; \
+ rev32 b7.16b, b7.16b;
+
+#define SM4_CRYPT_BLK8(b0, b1, b2, b3, b4, b5, b6, b7) \
+ rev32 b0.16b, b0.16b; \
+ rev32 b1.16b, b1.16b; \
+ rev32 b2.16b, b2.16b; \
+ rev32 b3.16b, b3.16b; \
+ rev32 b4.16b, b4.16b; \
+ rev32 b5.16b, b5.16b; \
+ rev32 b6.16b, b6.16b; \
+ rev32 b7.16b, b7.16b; \
+ SM4_CRYPT_BLK8_BE(b0, b1, b2, b3, b4, b5, b6, b7);
diff --git a/arch/arm64/crypto/sm4-ce-ccm-core.S b/arch/arm64/crypto/sm4-ce-ccm-core.S
new file mode 100644
index 000000000000..fa85856f33ce
--- /dev/null
+++ b/arch/arm64/crypto/sm4-ce-ccm-core.S
@@ -0,0 +1,329 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SM4-CCM AEAD Algorithm using ARMv8 Crypto Extensions
+ * as specified in rfc8998
+ * https://datatracker.ietf.org/doc/html/rfc8998
+ *
+ * Copyright (C) 2022 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+#include <linux/linkage.h>
+#include <linux/cfi_types.h>
+#include <asm/assembler.h>
+#include "sm4-ce-asm.h"
+
+.arch armv8-a+crypto
+
+.irp b, 0, 1, 8, 9, 10, 11, 12, 13, 14, 15, 16, 24, 25, 26, 27, 28, 29, 30, 31
+ .set .Lv\b\().4s, \b
+.endr
+
+.macro sm4e, vd, vn
+ .inst 0xcec08400 | (.L\vn << 5) | .L\vd
+.endm
+
+/* Register macros */
+
+#define RMAC v16
+
+/* Helper macros. */
+
+#define inc_le128(vctr) \
+ mov vctr.d[1], x8; \
+ mov vctr.d[0], x7; \
+ adds x8, x8, #1; \
+ rev64 vctr.16b, vctr.16b; \
+ adc x7, x7, xzr;
+
+
+.align 3
+SYM_FUNC_START(sm4_ce_cbcmac_update)
+ /* input:
+ * x0: round key array, CTX
+ * x1: mac
+ * x2: src
+ * w3: nblocks
+ */
+ SM4_PREPARE(x0)
+
+ ld1 {RMAC.16b}, [x1]
+
+.Lcbcmac_loop_4x:
+ cmp w3, #4
+ blt .Lcbcmac_loop_1x
+
+ sub w3, w3, #4
+
+ ld1 {v0.16b-v3.16b}, [x2], #64
+
+ SM4_CRYPT_BLK(RMAC)
+ eor RMAC.16b, RMAC.16b, v0.16b
+ SM4_CRYPT_BLK(RMAC)
+ eor RMAC.16b, RMAC.16b, v1.16b
+ SM4_CRYPT_BLK(RMAC)
+ eor RMAC.16b, RMAC.16b, v2.16b
+ SM4_CRYPT_BLK(RMAC)
+ eor RMAC.16b, RMAC.16b, v3.16b
+
+ cbz w3, .Lcbcmac_end
+ b .Lcbcmac_loop_4x
+
+.Lcbcmac_loop_1x:
+ sub w3, w3, #1
+
+ ld1 {v0.16b}, [x2], #16
+
+ SM4_CRYPT_BLK(RMAC)
+ eor RMAC.16b, RMAC.16b, v0.16b
+
+ cbnz w3, .Lcbcmac_loop_1x
+
+.Lcbcmac_end:
+ st1 {RMAC.16b}, [x1]
+ ret
+SYM_FUNC_END(sm4_ce_cbcmac_update)
+
+.align 3
+SYM_FUNC_START(sm4_ce_ccm_final)
+ /* input:
+ * x0: round key array, CTX
+ * x1: ctr0 (big endian, 128 bit)
+ * x2: mac
+ */
+ SM4_PREPARE(x0)
+
+ ld1 {RMAC.16b}, [x2]
+ ld1 {v0.16b}, [x1]
+
+ SM4_CRYPT_BLK2(RMAC, v0)
+
+ /* en-/decrypt the mac with ctr0 */
+ eor RMAC.16b, RMAC.16b, v0.16b
+ st1 {RMAC.16b}, [x2]
+
+ ret
+SYM_FUNC_END(sm4_ce_ccm_final)
+
+.align 3
+SYM_TYPED_FUNC_START(sm4_ce_ccm_enc)
+ /* input:
+ * x0: round key array, CTX
+ * x1: dst
+ * x2: src
+ * x3: ctr (big endian, 128 bit)
+ * w4: nbytes
+ * x5: mac
+ */
+ SM4_PREPARE(x0)
+
+ ldp x7, x8, [x3]
+ rev x7, x7
+ rev x8, x8
+
+ ld1 {RMAC.16b}, [x5]
+
+.Lccm_enc_loop_4x:
+ cmp w4, #(4 * 16)
+ blt .Lccm_enc_loop_1x
+
+ sub w4, w4, #(4 * 16)
+
+ /* construct CTRs */
+ inc_le128(v8) /* +0 */
+ inc_le128(v9) /* +1 */
+ inc_le128(v10) /* +2 */
+ inc_le128(v11) /* +3 */
+
+ ld1 {v0.16b-v3.16b}, [x2], #64
+
+ SM4_CRYPT_BLK2(v8, RMAC)
+ eor v8.16b, v8.16b, v0.16b
+ eor RMAC.16b, RMAC.16b, v0.16b
+ SM4_CRYPT_BLK2(v9, RMAC)
+ eor v9.16b, v9.16b, v1.16b
+ eor RMAC.16b, RMAC.16b, v1.16b
+ SM4_CRYPT_BLK2(v10, RMAC)
+ eor v10.16b, v10.16b, v2.16b
+ eor RMAC.16b, RMAC.16b, v2.16b
+ SM4_CRYPT_BLK2(v11, RMAC)
+ eor v11.16b, v11.16b, v3.16b
+ eor RMAC.16b, RMAC.16b, v3.16b
+
+ st1 {v8.16b-v11.16b}, [x1], #64
+
+ cbz w4, .Lccm_enc_end
+ b .Lccm_enc_loop_4x
+
+.Lccm_enc_loop_1x:
+ cmp w4, #16
+ blt .Lccm_enc_tail
+
+ sub w4, w4, #16
+
+ /* construct CTRs */
+ inc_le128(v8)
+
+ ld1 {v0.16b}, [x2], #16
+
+ SM4_CRYPT_BLK2(v8, RMAC)
+ eor v8.16b, v8.16b, v0.16b
+ eor RMAC.16b, RMAC.16b, v0.16b
+
+ st1 {v8.16b}, [x1], #16
+
+ cbz w4, .Lccm_enc_end
+ b .Lccm_enc_loop_1x
+
+.Lccm_enc_tail:
+ /* construct CTRs */
+ inc_le128(v8)
+
+ SM4_CRYPT_BLK2(RMAC, v8)
+
+ /* store new MAC */
+ st1 {RMAC.16b}, [x5]
+
+.Lccm_enc_tail_loop:
+ ldrb w0, [x2], #1 /* get 1 byte from input */
+ umov w9, v8.b[0] /* get top crypted CTR byte */
+ umov w6, RMAC.b[0] /* get top MAC byte */
+
+ eor w9, w9, w0 /* w9 = CTR ^ input */
+ eor w6, w6, w0 /* w6 = MAC ^ input */
+
+ strb w9, [x1], #1 /* store out byte */
+ strb w6, [x5], #1 /* store MAC byte */
+
+ subs w4, w4, #1
+ beq .Lccm_enc_ret
+
+ /* shift out one byte */
+ ext RMAC.16b, RMAC.16b, RMAC.16b, #1
+ ext v8.16b, v8.16b, v8.16b, #1
+
+ b .Lccm_enc_tail_loop
+
+.Lccm_enc_end:
+ /* store new MAC */
+ st1 {RMAC.16b}, [x5]
+
+ /* store new CTR */
+ rev x7, x7
+ rev x8, x8
+ stp x7, x8, [x3]
+
+.Lccm_enc_ret:
+ ret
+SYM_FUNC_END(sm4_ce_ccm_enc)
+
+.align 3
+SYM_TYPED_FUNC_START(sm4_ce_ccm_dec)
+ /* input:
+ * x0: round key array, CTX
+ * x1: dst
+ * x2: src
+ * x3: ctr (big endian, 128 bit)
+ * w4: nbytes
+ * x5: mac
+ */
+ SM4_PREPARE(x0)
+
+ ldp x7, x8, [x3]
+ rev x7, x7
+ rev x8, x8
+
+ ld1 {RMAC.16b}, [x5]
+
+.Lccm_dec_loop_4x:
+ cmp w4, #(4 * 16)
+ blt .Lccm_dec_loop_1x
+
+ sub w4, w4, #(4 * 16)
+
+ /* construct CTRs */
+ inc_le128(v8) /* +0 */
+ inc_le128(v9) /* +1 */
+ inc_le128(v10) /* +2 */
+ inc_le128(v11) /* +3 */
+
+ ld1 {v0.16b-v3.16b}, [x2], #64
+
+ SM4_CRYPT_BLK2(v8, RMAC)
+ eor v8.16b, v8.16b, v0.16b
+ eor RMAC.16b, RMAC.16b, v8.16b
+ SM4_CRYPT_BLK2(v9, RMAC)
+ eor v9.16b, v9.16b, v1.16b
+ eor RMAC.16b, RMAC.16b, v9.16b
+ SM4_CRYPT_BLK2(v10, RMAC)
+ eor v10.16b, v10.16b, v2.16b
+ eor RMAC.16b, RMAC.16b, v10.16b
+ SM4_CRYPT_BLK2(v11, RMAC)
+ eor v11.16b, v11.16b, v3.16b
+ eor RMAC.16b, RMAC.16b, v11.16b
+
+ st1 {v8.16b-v11.16b}, [x1], #64
+
+ cbz w4, .Lccm_dec_end
+ b .Lccm_dec_loop_4x
+
+.Lccm_dec_loop_1x:
+ cmp w4, #16
+ blt .Lccm_dec_tail
+
+ sub w4, w4, #16
+
+ /* construct CTRs */
+ inc_le128(v8)
+
+ ld1 {v0.16b}, [x2], #16
+
+ SM4_CRYPT_BLK2(v8, RMAC)
+ eor v8.16b, v8.16b, v0.16b
+ eor RMAC.16b, RMAC.16b, v8.16b
+
+ st1 {v8.16b}, [x1], #16
+
+ cbz w4, .Lccm_dec_end
+ b .Lccm_dec_loop_1x
+
+.Lccm_dec_tail:
+ /* construct CTRs */
+ inc_le128(v8)
+
+ SM4_CRYPT_BLK2(RMAC, v8)
+
+ /* store new MAC */
+ st1 {RMAC.16b}, [x5]
+
+.Lccm_dec_tail_loop:
+ ldrb w0, [x2], #1 /* get 1 byte from input */
+ umov w9, v8.b[0] /* get top crypted CTR byte */
+ umov w6, RMAC.b[0] /* get top MAC byte */
+
+ eor w9, w9, w0 /* w9 = CTR ^ input */
+ eor w6, w6, w9 /* w6 = MAC ^ output */
+
+ strb w9, [x1], #1 /* store out byte */
+ strb w6, [x5], #1 /* store MAC byte */
+
+ subs w4, w4, #1
+ beq .Lccm_dec_ret
+
+ /* shift out one byte */
+ ext RMAC.16b, RMAC.16b, RMAC.16b, #1
+ ext v8.16b, v8.16b, v8.16b, #1
+
+ b .Lccm_dec_tail_loop
+
+.Lccm_dec_end:
+ /* store new MAC */
+ st1 {RMAC.16b}, [x5]
+
+ /* store new CTR */
+ rev x7, x7
+ rev x8, x8
+ stp x7, x8, [x3]
+
+.Lccm_dec_ret:
+ ret
+SYM_FUNC_END(sm4_ce_ccm_dec)
diff --git a/arch/arm64/crypto/sm4-ce-ccm-glue.c b/arch/arm64/crypto/sm4-ce-ccm-glue.c
new file mode 100644
index 000000000000..f2cec7b52efc
--- /dev/null
+++ b/arch/arm64/crypto/sm4-ce-ccm-glue.c
@@ -0,0 +1,303 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SM4-CCM AEAD Algorithm using ARMv8 Crypto Extensions
+ * as specified in rfc8998
+ * https://datatracker.ietf.org/doc/html/rfc8998
+ *
+ * Copyright (C) 2022 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/cpufeature.h>
+#include <asm/neon.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/sm4.h>
+#include "sm4-ce.h"
+
+asmlinkage void sm4_ce_cbcmac_update(const u32 *rkey_enc, u8 *mac,
+ const u8 *src, unsigned int nblocks);
+asmlinkage void sm4_ce_ccm_enc(const u32 *rkey_enc, u8 *dst, const u8 *src,
+ u8 *iv, unsigned int nbytes, u8 *mac);
+asmlinkage void sm4_ce_ccm_dec(const u32 *rkey_enc, u8 *dst, const u8 *src,
+ u8 *iv, unsigned int nbytes, u8 *mac);
+asmlinkage void sm4_ce_ccm_final(const u32 *rkey_enc, u8 *iv, u8 *mac);
+
+
+static int ccm_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct sm4_ctx *ctx = crypto_aead_ctx(tfm);
+
+ if (key_len != SM4_KEY_SIZE)
+ return -EINVAL;
+
+ kernel_neon_begin();
+ sm4_ce_expand_key(key, ctx->rkey_enc, ctx->rkey_dec,
+ crypto_sm4_fk, crypto_sm4_ck);
+ kernel_neon_end();
+
+ return 0;
+}
+
+static int ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ if ((authsize & 1) || authsize < 4)
+ return -EINVAL;
+ return 0;
+}
+
+static int ccm_format_input(u8 info[], struct aead_request *req,
+ unsigned int msglen)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ unsigned int l = req->iv[0] + 1;
+ unsigned int m;
+ __be32 len;
+
+ /* verify that CCM dimension 'L': 2 <= L <= 8 */
+ if (l < 2 || l > 8)
+ return -EINVAL;
+ if (l < 4 && msglen >> (8 * l))
+ return -EOVERFLOW;
+
+ memset(&req->iv[SM4_BLOCK_SIZE - l], 0, l);
+
+ memcpy(info, req->iv, SM4_BLOCK_SIZE);
+
+ m = crypto_aead_authsize(aead);
+
+ /* format flags field per RFC 3610/NIST 800-38C */
+ *info |= ((m - 2) / 2) << 3;
+ if (req->assoclen)
+ *info |= (1 << 6);
+
+ /*
+ * format message length field,
+ * Linux uses a u32 type to represent msglen
+ */
+ if (l >= 4)
+ l = 4;
+
+ len = cpu_to_be32(msglen);
+ memcpy(&info[SM4_BLOCK_SIZE - l], (u8 *)&len + 4 - l, l);
+
+ return 0;
+}
+
+static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct sm4_ctx *ctx = crypto_aead_ctx(aead);
+ struct __packed { __be16 l; __be32 h; } aadlen;
+ u32 assoclen = req->assoclen;
+ struct scatter_walk walk;
+ unsigned int len;
+
+ if (assoclen < 0xff00) {
+ aadlen.l = cpu_to_be16(assoclen);
+ len = 2;
+ } else {
+ aadlen.l = cpu_to_be16(0xfffe);
+ put_unaligned_be32(assoclen, &aadlen.h);
+ len = 6;
+ }
+
+ sm4_ce_crypt_block(ctx->rkey_enc, mac, mac);
+ crypto_xor(mac, (const u8 *)&aadlen, len);
+
+ scatterwalk_start(&walk, req->src);
+
+ do {
+ u32 n = scatterwalk_clamp(&walk, assoclen);
+ u8 *p, *ptr;
+
+ if (!n) {
+ scatterwalk_start(&walk, sg_next(walk.sg));
+ n = scatterwalk_clamp(&walk, assoclen);
+ }
+
+ p = ptr = scatterwalk_map(&walk);
+ assoclen -= n;
+ scatterwalk_advance(&walk, n);
+
+ while (n > 0) {
+ unsigned int l, nblocks;
+
+ if (len == SM4_BLOCK_SIZE) {
+ if (n < SM4_BLOCK_SIZE) {
+ sm4_ce_crypt_block(ctx->rkey_enc,
+ mac, mac);
+
+ len = 0;
+ } else {
+ nblocks = n / SM4_BLOCK_SIZE;
+ sm4_ce_cbcmac_update(ctx->rkey_enc,
+ mac, ptr, nblocks);
+
+ ptr += nblocks * SM4_BLOCK_SIZE;
+ n %= SM4_BLOCK_SIZE;
+
+ continue;
+ }
+ }
+
+ l = min(n, SM4_BLOCK_SIZE - len);
+ if (l) {
+ crypto_xor(mac + len, ptr, l);
+ len += l;
+ ptr += l;
+ n -= l;
+ }
+ }
+
+ scatterwalk_unmap(p);
+ scatterwalk_done(&walk, 0, assoclen);
+ } while (assoclen);
+}
+
+static int ccm_crypt(struct aead_request *req, struct skcipher_walk *walk,
+ u32 *rkey_enc, u8 mac[],
+ void (*sm4_ce_ccm_crypt)(const u32 *rkey_enc, u8 *dst,
+ const u8 *src, u8 *iv,
+ unsigned int nbytes, u8 *mac))
+{
+ u8 __aligned(8) ctr0[SM4_BLOCK_SIZE];
+ int err;
+
+ /* preserve the initial ctr0 for the TAG */
+ memcpy(ctr0, walk->iv, SM4_BLOCK_SIZE);
+ crypto_inc(walk->iv, SM4_BLOCK_SIZE);
+
+ kernel_neon_begin();
+
+ if (req->assoclen)
+ ccm_calculate_auth_mac(req, mac);
+
+ do {
+ unsigned int tail = walk->nbytes % SM4_BLOCK_SIZE;
+ const u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
+
+ if (walk->nbytes == walk->total)
+ tail = 0;
+
+ if (walk->nbytes - tail)
+ sm4_ce_ccm_crypt(rkey_enc, dst, src, walk->iv,
+ walk->nbytes - tail, mac);
+
+ if (walk->nbytes == walk->total)
+ sm4_ce_ccm_final(rkey_enc, ctr0, mac);
+
+ kernel_neon_end();
+
+ if (walk->nbytes) {
+ err = skcipher_walk_done(walk, tail);
+ if (err)
+ return err;
+ if (walk->nbytes)
+ kernel_neon_begin();
+ }
+ } while (walk->nbytes > 0);
+
+ return 0;
+}
+
+static int ccm_encrypt(struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct sm4_ctx *ctx = crypto_aead_ctx(aead);
+ u8 __aligned(8) mac[SM4_BLOCK_SIZE];
+ struct skcipher_walk walk;
+ int err;
+
+ err = ccm_format_input(mac, req, req->cryptlen);
+ if (err)
+ return err;
+
+ err = skcipher_walk_aead_encrypt(&walk, req, false);
+ if (err)
+ return err;
+
+ err = ccm_crypt(req, &walk, ctx->rkey_enc, mac, sm4_ce_ccm_enc);
+ if (err)
+ return err;
+
+ /* copy authtag to end of dst */
+ scatterwalk_map_and_copy(mac, req->dst, req->assoclen + req->cryptlen,
+ crypto_aead_authsize(aead), 1);
+
+ return 0;
+}
+
+static int ccm_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ unsigned int authsize = crypto_aead_authsize(aead);
+ struct sm4_ctx *ctx = crypto_aead_ctx(aead);
+ u8 __aligned(8) mac[SM4_BLOCK_SIZE];
+ u8 authtag[SM4_BLOCK_SIZE];
+ struct skcipher_walk walk;
+ int err;
+
+ err = ccm_format_input(mac, req, req->cryptlen - authsize);
+ if (err)
+ return err;
+
+ err = skcipher_walk_aead_decrypt(&walk, req, false);
+ if (err)
+ return err;
+
+ err = ccm_crypt(req, &walk, ctx->rkey_enc, mac, sm4_ce_ccm_dec);
+ if (err)
+ return err;
+
+ /* compare calculated auth tag with the stored one */
+ scatterwalk_map_and_copy(authtag, req->src,
+ req->assoclen + req->cryptlen - authsize,
+ authsize, 0);
+
+ if (crypto_memneq(authtag, mac, authsize))
+ return -EBADMSG;
+
+ return 0;
+}
+
+static struct aead_alg sm4_ccm_alg = {
+ .base = {
+ .cra_name = "ccm(sm4)",
+ .cra_driver_name = "ccm-sm4-ce",
+ .cra_priority = 400,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct sm4_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .ivsize = SM4_BLOCK_SIZE,
+ .chunksize = SM4_BLOCK_SIZE,
+ .maxauthsize = SM4_BLOCK_SIZE,
+ .setkey = ccm_setkey,
+ .setauthsize = ccm_setauthsize,
+ .encrypt = ccm_encrypt,
+ .decrypt = ccm_decrypt,
+};
+
+static int __init sm4_ce_ccm_init(void)
+{
+ return crypto_register_aead(&sm4_ccm_alg);
+}
+
+static void __exit sm4_ce_ccm_exit(void)
+{
+ crypto_unregister_aead(&sm4_ccm_alg);
+}
+
+module_cpu_feature_match(SM4, sm4_ce_ccm_init);
+module_exit(sm4_ce_ccm_exit);
+
+MODULE_DESCRIPTION("Synchronous SM4 in CCM mode using ARMv8 Crypto Extensions");
+MODULE_ALIAS_CRYPTO("ccm(sm4)");
+MODULE_AUTHOR("Tianjia Zhang <tianjia.zhang@linux.alibaba.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm64/crypto/sm4-ce-cipher-glue.c b/arch/arm64/crypto/sm4-ce-cipher-glue.c
index 76a34ef4abbb..c31d76fb5a17 100644
--- a/arch/arm64/crypto/sm4-ce-cipher-glue.c
+++ b/arch/arm64/crypto/sm4-ce-cipher-glue.c
@@ -2,11 +2,11 @@
#include <asm/neon.h>
#include <asm/simd.h>
+#include <crypto/algapi.h>
#include <crypto/sm4.h>
#include <crypto/internal/simd.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
-#include <linux/crypto.h>
#include <linux/types.h>
MODULE_ALIAS_CRYPTO("sm4");
diff --git a/arch/arm64/crypto/sm4-ce-core.S b/arch/arm64/crypto/sm4-ce-core.S
index 934e0f093279..877b80c54a0d 100644
--- a/arch/arm64/crypto/sm4-ce-core.S
+++ b/arch/arm64/crypto/sm4-ce-core.S
@@ -10,10 +10,12 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include "sm4-ce-asm.h"
.arch armv8-a+crypto
-.irp b, 0, 1, 2, 3, 4, 5, 6, 7, 16, 20, 24, 25, 26, 27, 28, 29, 30, 31
+.irp b, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, \
+ 20, 24, 25, 26, 27, 28, 29, 30, 31
.set .Lv\b\().4s, \b
.endr
@@ -33,174 +35,8 @@
#define RTMP3 v19
#define RIV v20
-
-/* Helper macros. */
-
-#define PREPARE \
- ld1 {v24.16b-v27.16b}, [x0], #64; \
- ld1 {v28.16b-v31.16b}, [x0];
-
-#define SM4_CRYPT_BLK(b0) \
- rev32 b0.16b, b0.16b; \
- sm4e b0.4s, v24.4s; \
- sm4e b0.4s, v25.4s; \
- sm4e b0.4s, v26.4s; \
- sm4e b0.4s, v27.4s; \
- sm4e b0.4s, v28.4s; \
- sm4e b0.4s, v29.4s; \
- sm4e b0.4s, v30.4s; \
- sm4e b0.4s, v31.4s; \
- rev64 b0.4s, b0.4s; \
- ext b0.16b, b0.16b, b0.16b, #8; \
- rev32 b0.16b, b0.16b;
-
-#define SM4_CRYPT_BLK4(b0, b1, b2, b3) \
- rev32 b0.16b, b0.16b; \
- rev32 b1.16b, b1.16b; \
- rev32 b2.16b, b2.16b; \
- rev32 b3.16b, b3.16b; \
- sm4e b0.4s, v24.4s; \
- sm4e b1.4s, v24.4s; \
- sm4e b2.4s, v24.4s; \
- sm4e b3.4s, v24.4s; \
- sm4e b0.4s, v25.4s; \
- sm4e b1.4s, v25.4s; \
- sm4e b2.4s, v25.4s; \
- sm4e b3.4s, v25.4s; \
- sm4e b0.4s, v26.4s; \
- sm4e b1.4s, v26.4s; \
- sm4e b2.4s, v26.4s; \
- sm4e b3.4s, v26.4s; \
- sm4e b0.4s, v27.4s; \
- sm4e b1.4s, v27.4s; \
- sm4e b2.4s, v27.4s; \
- sm4e b3.4s, v27.4s; \
- sm4e b0.4s, v28.4s; \
- sm4e b1.4s, v28.4s; \
- sm4e b2.4s, v28.4s; \
- sm4e b3.4s, v28.4s; \
- sm4e b0.4s, v29.4s; \
- sm4e b1.4s, v29.4s; \
- sm4e b2.4s, v29.4s; \
- sm4e b3.4s, v29.4s; \
- sm4e b0.4s, v30.4s; \
- sm4e b1.4s, v30.4s; \
- sm4e b2.4s, v30.4s; \
- sm4e b3.4s, v30.4s; \
- sm4e b0.4s, v31.4s; \
- sm4e b1.4s, v31.4s; \
- sm4e b2.4s, v31.4s; \
- sm4e b3.4s, v31.4s; \
- rev64 b0.4s, b0.4s; \
- rev64 b1.4s, b1.4s; \
- rev64 b2.4s, b2.4s; \
- rev64 b3.4s, b3.4s; \
- ext b0.16b, b0.16b, b0.16b, #8; \
- ext b1.16b, b1.16b, b1.16b, #8; \
- ext b2.16b, b2.16b, b2.16b, #8; \
- ext b3.16b, b3.16b, b3.16b, #8; \
- rev32 b0.16b, b0.16b; \
- rev32 b1.16b, b1.16b; \
- rev32 b2.16b, b2.16b; \
- rev32 b3.16b, b3.16b;
-
-#define SM4_CRYPT_BLK8(b0, b1, b2, b3, b4, b5, b6, b7) \
- rev32 b0.16b, b0.16b; \
- rev32 b1.16b, b1.16b; \
- rev32 b2.16b, b2.16b; \
- rev32 b3.16b, b3.16b; \
- rev32 b4.16b, b4.16b; \
- rev32 b5.16b, b5.16b; \
- rev32 b6.16b, b6.16b; \
- rev32 b7.16b, b7.16b; \
- sm4e b0.4s, v24.4s; \
- sm4e b1.4s, v24.4s; \
- sm4e b2.4s, v24.4s; \
- sm4e b3.4s, v24.4s; \
- sm4e b4.4s, v24.4s; \
- sm4e b5.4s, v24.4s; \
- sm4e b6.4s, v24.4s; \
- sm4e b7.4s, v24.4s; \
- sm4e b0.4s, v25.4s; \
- sm4e b1.4s, v25.4s; \
- sm4e b2.4s, v25.4s; \
- sm4e b3.4s, v25.4s; \
- sm4e b4.4s, v25.4s; \
- sm4e b5.4s, v25.4s; \
- sm4e b6.4s, v25.4s; \
- sm4e b7.4s, v25.4s; \
- sm4e b0.4s, v26.4s; \
- sm4e b1.4s, v26.4s; \
- sm4e b2.4s, v26.4s; \
- sm4e b3.4s, v26.4s; \
- sm4e b4.4s, v26.4s; \
- sm4e b5.4s, v26.4s; \
- sm4e b6.4s, v26.4s; \
- sm4e b7.4s, v26.4s; \
- sm4e b0.4s, v27.4s; \
- sm4e b1.4s, v27.4s; \
- sm4e b2.4s, v27.4s; \
- sm4e b3.4s, v27.4s; \
- sm4e b4.4s, v27.4s; \
- sm4e b5.4s, v27.4s; \
- sm4e b6.4s, v27.4s; \
- sm4e b7.4s, v27.4s; \
- sm4e b0.4s, v28.4s; \
- sm4e b1.4s, v28.4s; \
- sm4e b2.4s, v28.4s; \
- sm4e b3.4s, v28.4s; \
- sm4e b4.4s, v28.4s; \
- sm4e b5.4s, v28.4s; \
- sm4e b6.4s, v28.4s; \
- sm4e b7.4s, v28.4s; \
- sm4e b0.4s, v29.4s; \
- sm4e b1.4s, v29.4s; \
- sm4e b2.4s, v29.4s; \
- sm4e b3.4s, v29.4s; \
- sm4e b4.4s, v29.4s; \
- sm4e b5.4s, v29.4s; \
- sm4e b6.4s, v29.4s; \
- sm4e b7.4s, v29.4s; \
- sm4e b0.4s, v30.4s; \
- sm4e b1.4s, v30.4s; \
- sm4e b2.4s, v30.4s; \
- sm4e b3.4s, v30.4s; \
- sm4e b4.4s, v30.4s; \
- sm4e b5.4s, v30.4s; \
- sm4e b6.4s, v30.4s; \
- sm4e b7.4s, v30.4s; \
- sm4e b0.4s, v31.4s; \
- sm4e b1.4s, v31.4s; \
- sm4e b2.4s, v31.4s; \
- sm4e b3.4s, v31.4s; \
- sm4e b4.4s, v31.4s; \
- sm4e b5.4s, v31.4s; \
- sm4e b6.4s, v31.4s; \
- sm4e b7.4s, v31.4s; \
- rev64 b0.4s, b0.4s; \
- rev64 b1.4s, b1.4s; \
- rev64 b2.4s, b2.4s; \
- rev64 b3.4s, b3.4s; \
- rev64 b4.4s, b4.4s; \
- rev64 b5.4s, b5.4s; \
- rev64 b6.4s, b6.4s; \
- rev64 b7.4s, b7.4s; \
- ext b0.16b, b0.16b, b0.16b, #8; \
- ext b1.16b, b1.16b, b1.16b, #8; \
- ext b2.16b, b2.16b, b2.16b, #8; \
- ext b3.16b, b3.16b, b3.16b, #8; \
- ext b4.16b, b4.16b, b4.16b, #8; \
- ext b5.16b, b5.16b, b5.16b, #8; \
- ext b6.16b, b6.16b, b6.16b, #8; \
- ext b7.16b, b7.16b, b7.16b, #8; \
- rev32 b0.16b, b0.16b; \
- rev32 b1.16b, b1.16b; \
- rev32 b2.16b, b2.16b; \
- rev32 b3.16b, b3.16b; \
- rev32 b4.16b, b4.16b; \
- rev32 b5.16b, b5.16b; \
- rev32 b6.16b, b6.16b; \
- rev32 b7.16b, b7.16b;
+#define RMAC v20
+#define RMASK v21
.align 3
@@ -231,32 +67,23 @@ SYM_FUNC_START(sm4_ce_expand_key)
sm4ekey v6.4s, v5.4s, v30.4s;
sm4ekey v7.4s, v6.4s, v31.4s;
+ adr_l x5, .Lbswap128_mask
+ ld1 {v24.16b}, [x5]
+
st1 {v0.16b-v3.16b}, [x1], #64;
st1 {v4.16b-v7.16b}, [x1];
- rev64 v7.4s, v7.4s;
- rev64 v6.4s, v6.4s;
- rev64 v5.4s, v5.4s;
- rev64 v4.4s, v4.4s;
- rev64 v3.4s, v3.4s;
- rev64 v2.4s, v2.4s;
- rev64 v1.4s, v1.4s;
- rev64 v0.4s, v0.4s;
- ext v7.16b, v7.16b, v7.16b, #8;
- ext v6.16b, v6.16b, v6.16b, #8;
- ext v5.16b, v5.16b, v5.16b, #8;
- ext v4.16b, v4.16b, v4.16b, #8;
- ext v3.16b, v3.16b, v3.16b, #8;
- ext v2.16b, v2.16b, v2.16b, #8;
- ext v1.16b, v1.16b, v1.16b, #8;
- ext v0.16b, v0.16b, v0.16b, #8;
- st1 {v7.16b}, [x2], #16;
- st1 {v6.16b}, [x2], #16;
- st1 {v5.16b}, [x2], #16;
- st1 {v4.16b}, [x2], #16;
- st1 {v3.16b}, [x2], #16;
- st1 {v2.16b}, [x2], #16;
- st1 {v1.16b}, [x2], #16;
- st1 {v0.16b}, [x2];
+
+ tbl v16.16b, {v7.16b}, v24.16b
+ tbl v17.16b, {v6.16b}, v24.16b
+ tbl v18.16b, {v5.16b}, v24.16b
+ tbl v19.16b, {v4.16b}, v24.16b
+ tbl v20.16b, {v3.16b}, v24.16b
+ tbl v21.16b, {v2.16b}, v24.16b
+ tbl v22.16b, {v1.16b}, v24.16b
+ tbl v23.16b, {v0.16b}, v24.16b
+
+ st1 {v16.16b-v19.16b}, [x2], #64
+ st1 {v20.16b-v23.16b}, [x2]
ret;
SYM_FUNC_END(sm4_ce_expand_key)
@@ -268,7 +95,7 @@ SYM_FUNC_START(sm4_ce_crypt_block)
* x1: dst
* x2: src
*/
- PREPARE;
+ SM4_PREPARE(x0)
ld1 {v0.16b}, [x2];
SM4_CRYPT_BLK(v0);
@@ -285,7 +112,7 @@ SYM_FUNC_START(sm4_ce_crypt)
* x2: src
* w3: nblocks
*/
- PREPARE;
+ SM4_PREPARE(x0)
.Lcrypt_loop_blk:
sub w3, w3, #8;
@@ -337,26 +164,50 @@ SYM_FUNC_START(sm4_ce_cbc_enc)
* x3: iv (big endian, 128 bit)
* w4: nblocks
*/
- PREPARE;
+ SM4_PREPARE(x0)
+
+ ld1 {RIV.16b}, [x3]
+
+.Lcbc_enc_loop_4x:
+ cmp w4, #4
+ blt .Lcbc_enc_loop_1x
+
+ sub w4, w4, #4
- ld1 {RIV.16b}, [x3];
+ ld1 {v0.16b-v3.16b}, [x2], #64
-.Lcbc_enc_loop:
- sub w4, w4, #1;
+ eor v0.16b, v0.16b, RIV.16b
+ SM4_CRYPT_BLK(v0)
+ eor v1.16b, v1.16b, v0.16b
+ SM4_CRYPT_BLK(v1)
+ eor v2.16b, v2.16b, v1.16b
+ SM4_CRYPT_BLK(v2)
+ eor v3.16b, v3.16b, v2.16b
+ SM4_CRYPT_BLK(v3)
- ld1 {RTMP0.16b}, [x2], #16;
- eor RIV.16b, RIV.16b, RTMP0.16b;
+ st1 {v0.16b-v3.16b}, [x1], #64
+ mov RIV.16b, v3.16b
- SM4_CRYPT_BLK(RIV);
+ cbz w4, .Lcbc_enc_end
+ b .Lcbc_enc_loop_4x
- st1 {RIV.16b}, [x1], #16;
+.Lcbc_enc_loop_1x:
+ sub w4, w4, #1
- cbnz w4, .Lcbc_enc_loop;
+ ld1 {v0.16b}, [x2], #16
+ eor RIV.16b, RIV.16b, v0.16b
+ SM4_CRYPT_BLK(RIV)
+
+ st1 {RIV.16b}, [x1], #16
+
+ cbnz w4, .Lcbc_enc_loop_1x
+
+.Lcbc_enc_end:
/* store new IV */
- st1 {RIV.16b}, [x3];
+ st1 {RIV.16b}, [x3]
- ret;
+ ret
SYM_FUNC_END(sm4_ce_cbc_enc)
.align 3
@@ -368,82 +219,190 @@ SYM_FUNC_START(sm4_ce_cbc_dec)
* x3: iv (big endian, 128 bit)
* w4: nblocks
*/
- PREPARE;
+ SM4_PREPARE(x0)
- ld1 {RIV.16b}, [x3];
+ ld1 {RIV.16b}, [x3]
-.Lcbc_loop_blk:
- sub w4, w4, #8;
- tbnz w4, #31, .Lcbc_tail8;
+.Lcbc_dec_loop_8x:
+ sub w4, w4, #8
+ tbnz w4, #31, .Lcbc_dec_4x
- ld1 {v0.16b-v3.16b}, [x2], #64;
- ld1 {v4.16b-v7.16b}, [x2];
+ ld1 {v0.16b-v3.16b}, [x2], #64
+ ld1 {v4.16b-v7.16b}, [x2], #64
- SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7);
+ rev32 v8.16b, v0.16b
+ rev32 v9.16b, v1.16b
+ rev32 v10.16b, v2.16b
+ rev32 v11.16b, v3.16b
+ rev32 v12.16b, v4.16b
+ rev32 v13.16b, v5.16b
+ rev32 v14.16b, v6.16b
+ rev32 v15.16b, v7.16b
- sub x2, x2, #64;
- eor v0.16b, v0.16b, RIV.16b;
- ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
- eor v1.16b, v1.16b, RTMP0.16b;
- eor v2.16b, v2.16b, RTMP1.16b;
- eor v3.16b, v3.16b, RTMP2.16b;
- st1 {v0.16b-v3.16b}, [x1], #64;
+ SM4_CRYPT_BLK8_BE(v8, v9, v10, v11, v12, v13, v14, v15)
- eor v4.16b, v4.16b, RTMP3.16b;
- ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
- eor v5.16b, v5.16b, RTMP0.16b;
- eor v6.16b, v6.16b, RTMP1.16b;
- eor v7.16b, v7.16b, RTMP2.16b;
+ eor v8.16b, v8.16b, RIV.16b
+ eor v9.16b, v9.16b, v0.16b
+ eor v10.16b, v10.16b, v1.16b
+ eor v11.16b, v11.16b, v2.16b
+ eor v12.16b, v12.16b, v3.16b
+ eor v13.16b, v13.16b, v4.16b
+ eor v14.16b, v14.16b, v5.16b
+ eor v15.16b, v15.16b, v6.16b
- mov RIV.16b, RTMP3.16b;
- st1 {v4.16b-v7.16b}, [x1], #64;
+ st1 {v8.16b-v11.16b}, [x1], #64
+ st1 {v12.16b-v15.16b}, [x1], #64
- cbz w4, .Lcbc_end;
- b .Lcbc_loop_blk;
+ mov RIV.16b, v7.16b
-.Lcbc_tail8:
- add w4, w4, #8;
- cmp w4, #4;
- blt .Lcbc_tail4;
+ cbz w4, .Lcbc_dec_end
+ b .Lcbc_dec_loop_8x
- sub w4, w4, #4;
+.Lcbc_dec_4x:
+ add w4, w4, #8
+ cmp w4, #4
+ blt .Lcbc_dec_loop_1x
- ld1 {v0.16b-v3.16b}, [x2];
+ sub w4, w4, #4
- SM4_CRYPT_BLK4(v0, v1, v2, v3);
+ ld1 {v0.16b-v3.16b}, [x2], #64
- eor v0.16b, v0.16b, RIV.16b;
- ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
- eor v1.16b, v1.16b, RTMP0.16b;
- eor v2.16b, v2.16b, RTMP1.16b;
- eor v3.16b, v3.16b, RTMP2.16b;
+ rev32 v8.16b, v0.16b
+ rev32 v9.16b, v1.16b
+ rev32 v10.16b, v2.16b
+ rev32 v11.16b, v3.16b
- mov RIV.16b, RTMP3.16b;
- st1 {v0.16b-v3.16b}, [x1], #64;
+ SM4_CRYPT_BLK4_BE(v8, v9, v10, v11)
- cbz w4, .Lcbc_end;
+ eor v8.16b, v8.16b, RIV.16b
+ eor v9.16b, v9.16b, v0.16b
+ eor v10.16b, v10.16b, v1.16b
+ eor v11.16b, v11.16b, v2.16b
-.Lcbc_tail4:
- sub w4, w4, #1;
+ st1 {v8.16b-v11.16b}, [x1], #64
- ld1 {v0.16b}, [x2];
+ mov RIV.16b, v3.16b
- SM4_CRYPT_BLK(v0);
+ cbz w4, .Lcbc_dec_end
- eor v0.16b, v0.16b, RIV.16b;
- ld1 {RIV.16b}, [x2], #16;
- st1 {v0.16b}, [x1], #16;
+.Lcbc_dec_loop_1x:
+ sub w4, w4, #1
+
+ ld1 {v0.16b}, [x2], #16
- cbnz w4, .Lcbc_tail4;
+ rev32 v8.16b, v0.16b
-.Lcbc_end:
+ SM4_CRYPT_BLK_BE(v8)
+
+ eor v8.16b, v8.16b, RIV.16b
+ st1 {v8.16b}, [x1], #16
+
+ mov RIV.16b, v0.16b
+
+ cbnz w4, .Lcbc_dec_loop_1x
+
+.Lcbc_dec_end:
/* store new IV */
- st1 {RIV.16b}, [x3];
+ st1 {RIV.16b}, [x3]
- ret;
+ ret
SYM_FUNC_END(sm4_ce_cbc_dec)
.align 3
+SYM_FUNC_START(sm4_ce_cbc_cts_enc)
+ /* input:
+ * x0: round key array, CTX
+ * x1: dst
+ * x2: src
+ * x3: iv (big endian, 128 bit)
+ * w4: nbytes
+ */
+ SM4_PREPARE(x0)
+
+ sub w5, w4, #16
+ uxtw x5, w5
+
+ ld1 {RIV.16b}, [x3]
+
+ ld1 {v0.16b}, [x2]
+ eor RIV.16b, RIV.16b, v0.16b
+ SM4_CRYPT_BLK(RIV)
+
+ /* load permute table */
+ adr_l x6, .Lcts_permute_table
+ add x7, x6, #32
+ add x6, x6, x5
+ sub x7, x7, x5
+ ld1 {v3.16b}, [x6]
+ ld1 {v4.16b}, [x7]
+
+ /* overlapping loads */
+ add x2, x2, x5
+ ld1 {v1.16b}, [x2]
+
+ /* create Cn from En-1 */
+ tbl v0.16b, {RIV.16b}, v3.16b
+ /* padding Pn with zeros */
+ tbl v1.16b, {v1.16b}, v4.16b
+
+ eor v1.16b, v1.16b, RIV.16b
+ SM4_CRYPT_BLK(v1)
+
+ /* overlapping stores */
+ add x5, x1, x5
+ st1 {v0.16b}, [x5]
+ st1 {v1.16b}, [x1]
+
+ ret
+SYM_FUNC_END(sm4_ce_cbc_cts_enc)
+
+.align 3
+SYM_FUNC_START(sm4_ce_cbc_cts_dec)
+ /* input:
+ * x0: round key array, CTX
+ * x1: dst
+ * x2: src
+ * x3: iv (big endian, 128 bit)
+ * w4: nbytes
+ */
+ SM4_PREPARE(x0)
+
+ sub w5, w4, #16
+ uxtw x5, w5
+
+ ld1 {RIV.16b}, [x3]
+
+ /* load permute table */
+ adr_l x6, .Lcts_permute_table
+ add x7, x6, #32
+ add x6, x6, x5
+ sub x7, x7, x5
+ ld1 {v3.16b}, [x6]
+ ld1 {v4.16b}, [x7]
+
+ /* overlapping loads */
+ ld1 {v0.16b}, [x2], x5
+ ld1 {v1.16b}, [x2]
+
+ SM4_CRYPT_BLK(v0)
+ /* select the first Ln bytes of Xn to create Pn */
+ tbl v2.16b, {v0.16b}, v3.16b
+ eor v2.16b, v2.16b, v1.16b
+
+ /* overwrite the first Ln bytes with Cn to create En-1 */
+ tbx v0.16b, {v1.16b}, v4.16b
+ SM4_CRYPT_BLK(v0)
+ eor v0.16b, v0.16b, RIV.16b
+
+ /* overlapping stores */
+ add x5, x1, x5
+ st1 {v2.16b}, [x5]
+ st1 {v0.16b}, [x1]
+
+ ret
+SYM_FUNC_END(sm4_ce_cbc_cts_dec)
+
+.align 3
SYM_FUNC_START(sm4_ce_cfb_enc)
/* input:
* x0: round key array, CTX
@@ -452,25 +411,57 @@ SYM_FUNC_START(sm4_ce_cfb_enc)
* x3: iv (big endian, 128 bit)
* w4: nblocks
*/
- PREPARE;
+ SM4_PREPARE(x0)
+
+ ld1 {RIV.16b}, [x3]
+
+.Lcfb_enc_loop_4x:
+ cmp w4, #4
+ blt .Lcfb_enc_loop_1x
+
+ sub w4, w4, #4
+
+ ld1 {v0.16b-v3.16b}, [x2], #64
+
+ rev32 v8.16b, RIV.16b
+ SM4_CRYPT_BLK_BE(v8)
+ eor v0.16b, v0.16b, v8.16b
+
+ rev32 v8.16b, v0.16b
+ SM4_CRYPT_BLK_BE(v8)
+ eor v1.16b, v1.16b, v8.16b
+
+ rev32 v8.16b, v1.16b
+ SM4_CRYPT_BLK_BE(v8)
+ eor v2.16b, v2.16b, v8.16b
+
+ rev32 v8.16b, v2.16b
+ SM4_CRYPT_BLK_BE(v8)
+ eor v3.16b, v3.16b, v8.16b
+
+ st1 {v0.16b-v3.16b}, [x1], #64
+ mov RIV.16b, v3.16b
+
+ cbz w4, .Lcfb_enc_end
+ b .Lcfb_enc_loop_4x
- ld1 {RIV.16b}, [x3];
+.Lcfb_enc_loop_1x:
+ sub w4, w4, #1
-.Lcfb_enc_loop:
- sub w4, w4, #1;
+ ld1 {v0.16b}, [x2], #16
- SM4_CRYPT_BLK(RIV);
+ SM4_CRYPT_BLK(RIV)
+ eor RIV.16b, RIV.16b, v0.16b
- ld1 {RTMP0.16b}, [x2], #16;
- eor RIV.16b, RIV.16b, RTMP0.16b;
- st1 {RIV.16b}, [x1], #16;
+ st1 {RIV.16b}, [x1], #16
- cbnz w4, .Lcfb_enc_loop;
+ cbnz w4, .Lcfb_enc_loop_1x
+.Lcfb_enc_end:
/* store new IV */
- st1 {RIV.16b}, [x3];
+ st1 {RIV.16b}, [x3]
- ret;
+ ret
SYM_FUNC_END(sm4_ce_cfb_enc)
.align 3
@@ -482,79 +473,91 @@ SYM_FUNC_START(sm4_ce_cfb_dec)
* x3: iv (big endian, 128 bit)
* w4: nblocks
*/
- PREPARE;
+ SM4_PREPARE(x0)
- ld1 {v0.16b}, [x3];
+ ld1 {RIV.16b}, [x3]
-.Lcfb_loop_blk:
- sub w4, w4, #8;
- tbnz w4, #31, .Lcfb_tail8;
+.Lcfb_dec_loop_8x:
+ sub w4, w4, #8
+ tbnz w4, #31, .Lcfb_dec_4x
- ld1 {v1.16b, v2.16b, v3.16b}, [x2], #48;
- ld1 {v4.16b-v7.16b}, [x2];
+ ld1 {v0.16b-v3.16b}, [x2], #64
+ ld1 {v4.16b-v7.16b}, [x2], #64
- SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7);
+ rev32 v8.16b, RIV.16b
+ rev32 v9.16b, v0.16b
+ rev32 v10.16b, v1.16b
+ rev32 v11.16b, v2.16b
+ rev32 v12.16b, v3.16b
+ rev32 v13.16b, v4.16b
+ rev32 v14.16b, v5.16b
+ rev32 v15.16b, v6.16b
- sub x2, x2, #48;
- ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
- eor v0.16b, v0.16b, RTMP0.16b;
- eor v1.16b, v1.16b, RTMP1.16b;
- eor v2.16b, v2.16b, RTMP2.16b;
- eor v3.16b, v3.16b, RTMP3.16b;
- st1 {v0.16b-v3.16b}, [x1], #64;
+ SM4_CRYPT_BLK8_BE(v8, v9, v10, v11, v12, v13, v14, v15)
- ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
- eor v4.16b, v4.16b, RTMP0.16b;
- eor v5.16b, v5.16b, RTMP1.16b;
- eor v6.16b, v6.16b, RTMP2.16b;
- eor v7.16b, v7.16b, RTMP3.16b;
- st1 {v4.16b-v7.16b}, [x1], #64;
+ mov RIV.16b, v7.16b
- mov v0.16b, RTMP3.16b;
+ eor v0.16b, v0.16b, v8.16b
+ eor v1.16b, v1.16b, v9.16b
+ eor v2.16b, v2.16b, v10.16b
+ eor v3.16b, v3.16b, v11.16b
+ eor v4.16b, v4.16b, v12.16b
+ eor v5.16b, v5.16b, v13.16b
+ eor v6.16b, v6.16b, v14.16b
+ eor v7.16b, v7.16b, v15.16b
- cbz w4, .Lcfb_end;
- b .Lcfb_loop_blk;
+ st1 {v0.16b-v3.16b}, [x1], #64
+ st1 {v4.16b-v7.16b}, [x1], #64
-.Lcfb_tail8:
- add w4, w4, #8;
- cmp w4, #4;
- blt .Lcfb_tail4;
+ cbz w4, .Lcfb_dec_end
+ b .Lcfb_dec_loop_8x
- sub w4, w4, #4;
+.Lcfb_dec_4x:
+ add w4, w4, #8
+ cmp w4, #4
+ blt .Lcfb_dec_loop_1x
- ld1 {v1.16b, v2.16b, v3.16b}, [x2];
+ sub w4, w4, #4
- SM4_CRYPT_BLK4(v0, v1, v2, v3);
+ ld1 {v0.16b-v3.16b}, [x2], #64
- ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
- eor v0.16b, v0.16b, RTMP0.16b;
- eor v1.16b, v1.16b, RTMP1.16b;
- eor v2.16b, v2.16b, RTMP2.16b;
- eor v3.16b, v3.16b, RTMP3.16b;
- st1 {v0.16b-v3.16b}, [x1], #64;
+ rev32 v8.16b, RIV.16b
+ rev32 v9.16b, v0.16b
+ rev32 v10.16b, v1.16b
+ rev32 v11.16b, v2.16b
- mov v0.16b, RTMP3.16b;
+ SM4_CRYPT_BLK4_BE(v8, v9, v10, v11)
- cbz w4, .Lcfb_end;
+ mov RIV.16b, v3.16b
-.Lcfb_tail4:
- sub w4, w4, #1;
+ eor v0.16b, v0.16b, v8.16b
+ eor v1.16b, v1.16b, v9.16b
+ eor v2.16b, v2.16b, v10.16b
+ eor v3.16b, v3.16b, v11.16b
- SM4_CRYPT_BLK(v0);
+ st1 {v0.16b-v3.16b}, [x1], #64
- ld1 {RTMP0.16b}, [x2], #16;
- eor v0.16b, v0.16b, RTMP0.16b;
- st1 {v0.16b}, [x1], #16;
+ cbz w4, .Lcfb_dec_end
+
+.Lcfb_dec_loop_1x:
+ sub w4, w4, #1
+
+ ld1 {v0.16b}, [x2], #16
+
+ SM4_CRYPT_BLK(RIV)
- mov v0.16b, RTMP0.16b;
+ eor RIV.16b, RIV.16b, v0.16b
+ st1 {RIV.16b}, [x1], #16
- cbnz w4, .Lcfb_tail4;
+ mov RIV.16b, v0.16b
-.Lcfb_end:
+ cbnz w4, .Lcfb_dec_loop_1x
+
+.Lcfb_dec_end:
/* store new IV */
- st1 {v0.16b}, [x3];
+ st1 {RIV.16b}, [x3]
- ret;
+ ret
SYM_FUNC_END(sm4_ce_cfb_dec)
.align 3
@@ -566,95 +569,525 @@ SYM_FUNC_START(sm4_ce_ctr_enc)
* x3: ctr (big endian, 128 bit)
* w4: nblocks
*/
- PREPARE;
+ SM4_PREPARE(x0)
- ldp x7, x8, [x3];
- rev x7, x7;
- rev x8, x8;
+ ldp x7, x8, [x3]
+ rev x7, x7
+ rev x8, x8
-.Lctr_loop_blk:
- sub w4, w4, #8;
- tbnz w4, #31, .Lctr_tail8;
+.Lctr_loop_8x:
+ sub w4, w4, #8
+ tbnz w4, #31, .Lctr_4x
-#define inc_le128(vctr) \
- mov vctr.d[1], x8; \
- mov vctr.d[0], x7; \
- adds x8, x8, #1; \
- adc x7, x7, xzr; \
- rev64 vctr.16b, vctr.16b;
+#define inc_le128(vctr) \
+ mov vctr.d[1], x8; \
+ mov vctr.d[0], x7; \
+ adds x8, x8, #1; \
+ rev64 vctr.16b, vctr.16b; \
+ adc x7, x7, xzr;
/* construct CTRs */
- inc_le128(v0); /* +0 */
- inc_le128(v1); /* +1 */
- inc_le128(v2); /* +2 */
- inc_le128(v3); /* +3 */
- inc_le128(v4); /* +4 */
- inc_le128(v5); /* +5 */
- inc_le128(v6); /* +6 */
- inc_le128(v7); /* +7 */
+ inc_le128(v0) /* +0 */
+ inc_le128(v1) /* +1 */
+ inc_le128(v2) /* +2 */
+ inc_le128(v3) /* +3 */
+ inc_le128(v4) /* +4 */
+ inc_le128(v5) /* +5 */
+ inc_le128(v6) /* +6 */
+ inc_le128(v7) /* +7 */
+
+ ld1 {v8.16b-v11.16b}, [x2], #64
+ ld1 {v12.16b-v15.16b}, [x2], #64
+
+ SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7)
+
+ eor v0.16b, v0.16b, v8.16b
+ eor v1.16b, v1.16b, v9.16b
+ eor v2.16b, v2.16b, v10.16b
+ eor v3.16b, v3.16b, v11.16b
+ eor v4.16b, v4.16b, v12.16b
+ eor v5.16b, v5.16b, v13.16b
+ eor v6.16b, v6.16b, v14.16b
+ eor v7.16b, v7.16b, v15.16b
+
+ st1 {v0.16b-v3.16b}, [x1], #64
+ st1 {v4.16b-v7.16b}, [x1], #64
+
+ cbz w4, .Lctr_end
+ b .Lctr_loop_8x
+
+.Lctr_4x:
+ add w4, w4, #8
+ cmp w4, #4
+ blt .Lctr_loop_1x
+
+ sub w4, w4, #4
- SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7);
+ /* construct CTRs */
+ inc_le128(v0) /* +0 */
+ inc_le128(v1) /* +1 */
+ inc_le128(v2) /* +2 */
+ inc_le128(v3) /* +3 */
- ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
- eor v0.16b, v0.16b, RTMP0.16b;
- eor v1.16b, v1.16b, RTMP1.16b;
- eor v2.16b, v2.16b, RTMP2.16b;
- eor v3.16b, v3.16b, RTMP3.16b;
- st1 {v0.16b-v3.16b}, [x1], #64;
+ ld1 {v8.16b-v11.16b}, [x2], #64
- ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
- eor v4.16b, v4.16b, RTMP0.16b;
- eor v5.16b, v5.16b, RTMP1.16b;
- eor v6.16b, v6.16b, RTMP2.16b;
- eor v7.16b, v7.16b, RTMP3.16b;
- st1 {v4.16b-v7.16b}, [x1], #64;
+ SM4_CRYPT_BLK4(v0, v1, v2, v3)
+
+ eor v0.16b, v0.16b, v8.16b
+ eor v1.16b, v1.16b, v9.16b
+ eor v2.16b, v2.16b, v10.16b
+ eor v3.16b, v3.16b, v11.16b
- cbz w4, .Lctr_end;
- b .Lctr_loop_blk;
+ st1 {v0.16b-v3.16b}, [x1], #64
-.Lctr_tail8:
- add w4, w4, #8;
- cmp w4, #4;
- blt .Lctr_tail4;
+ cbz w4, .Lctr_end
- sub w4, w4, #4;
+.Lctr_loop_1x:
+ sub w4, w4, #1
/* construct CTRs */
- inc_le128(v0); /* +0 */
- inc_le128(v1); /* +1 */
- inc_le128(v2); /* +2 */
- inc_le128(v3); /* +3 */
+ inc_le128(v0)
- SM4_CRYPT_BLK4(v0, v1, v2, v3);
+ ld1 {v8.16b}, [x2], #16
- ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
- eor v0.16b, v0.16b, RTMP0.16b;
- eor v1.16b, v1.16b, RTMP1.16b;
- eor v2.16b, v2.16b, RTMP2.16b;
- eor v3.16b, v3.16b, RTMP3.16b;
- st1 {v0.16b-v3.16b}, [x1], #64;
+ SM4_CRYPT_BLK(v0)
- cbz w4, .Lctr_end;
+ eor v0.16b, v0.16b, v8.16b
+ st1 {v0.16b}, [x1], #16
-.Lctr_tail4:
- sub w4, w4, #1;
+ cbnz w4, .Lctr_loop_1x
- /* construct CTRs */
- inc_le128(v0);
+.Lctr_end:
+ /* store new CTR */
+ rev x7, x7
+ rev x8, x8
+ stp x7, x8, [x3]
- SM4_CRYPT_BLK(v0);
+ ret
+SYM_FUNC_END(sm4_ce_ctr_enc)
- ld1 {RTMP0.16b}, [x2], #16;
- eor v0.16b, v0.16b, RTMP0.16b;
- st1 {v0.16b}, [x1], #16;
- cbnz w4, .Lctr_tail4;
+#define tweak_next(vt, vin, RTMP) \
+ sshr RTMP.2d, vin.2d, #63; \
+ and RTMP.16b, RTMP.16b, RMASK.16b; \
+ add vt.2d, vin.2d, vin.2d; \
+ ext RTMP.16b, RTMP.16b, RTMP.16b, #8; \
+ eor vt.16b, vt.16b, RTMP.16b;
-.Lctr_end:
- /* store new CTR */
- rev x7, x7;
- rev x8, x8;
- stp x7, x8, [x3];
+.align 3
+SYM_FUNC_START(sm4_ce_xts_enc)
+ /* input:
+ * x0: round key array, CTX
+ * x1: dst
+ * x2: src
+ * x3: tweak (big endian, 128 bit)
+ * w4: nbytes
+ * x5: round key array for IV
+ */
+ ld1 {v8.16b}, [x3]
- ret;
-SYM_FUNC_END(sm4_ce_ctr_enc)
+ cbz x5, .Lxts_enc_nofirst
+
+ SM4_PREPARE(x5)
+
+ /* Generate first tweak */
+ SM4_CRYPT_BLK(v8)
+
+.Lxts_enc_nofirst:
+ SM4_PREPARE(x0)
+
+ ands w5, w4, #15
+ lsr w4, w4, #4
+ sub w6, w4, #1
+ csel w4, w4, w6, eq
+ uxtw x5, w5
+
+ movi RMASK.2s, #0x1
+ movi RTMP0.2s, #0x87
+ uzp1 RMASK.4s, RMASK.4s, RTMP0.4s
+
+ cbz w4, .Lxts_enc_cts
+
+.Lxts_enc_loop_8x:
+ sub w4, w4, #8
+ tbnz w4, #31, .Lxts_enc_4x
+
+ tweak_next( v9, v8, RTMP0)
+ tweak_next(v10, v9, RTMP1)
+ tweak_next(v11, v10, RTMP2)
+ tweak_next(v12, v11, RTMP3)
+ tweak_next(v13, v12, RTMP0)
+ tweak_next(v14, v13, RTMP1)
+ tweak_next(v15, v14, RTMP2)
+
+ ld1 {v0.16b-v3.16b}, [x2], #64
+ ld1 {v4.16b-v7.16b}, [x2], #64
+ eor v0.16b, v0.16b, v8.16b
+ eor v1.16b, v1.16b, v9.16b
+ eor v2.16b, v2.16b, v10.16b
+ eor v3.16b, v3.16b, v11.16b
+ eor v4.16b, v4.16b, v12.16b
+ eor v5.16b, v5.16b, v13.16b
+ eor v6.16b, v6.16b, v14.16b
+ eor v7.16b, v7.16b, v15.16b
+
+ SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7)
+
+ eor v0.16b, v0.16b, v8.16b
+ eor v1.16b, v1.16b, v9.16b
+ eor v2.16b, v2.16b, v10.16b
+ eor v3.16b, v3.16b, v11.16b
+ eor v4.16b, v4.16b, v12.16b
+ eor v5.16b, v5.16b, v13.16b
+ eor v6.16b, v6.16b, v14.16b
+ eor v7.16b, v7.16b, v15.16b
+ st1 {v0.16b-v3.16b}, [x1], #64
+ st1 {v4.16b-v7.16b}, [x1], #64
+
+ tweak_next(v8, v15, RTMP3)
+
+ cbz w4, .Lxts_enc_cts
+ b .Lxts_enc_loop_8x
+
+.Lxts_enc_4x:
+ add w4, w4, #8
+ cmp w4, #4
+ blt .Lxts_enc_loop_1x
+
+ sub w4, w4, #4
+
+ tweak_next( v9, v8, RTMP0)
+ tweak_next(v10, v9, RTMP1)
+ tweak_next(v11, v10, RTMP2)
+
+ ld1 {v0.16b-v3.16b}, [x2], #64
+ eor v0.16b, v0.16b, v8.16b
+ eor v1.16b, v1.16b, v9.16b
+ eor v2.16b, v2.16b, v10.16b
+ eor v3.16b, v3.16b, v11.16b
+
+ SM4_CRYPT_BLK4(v0, v1, v2, v3)
+
+ eor v0.16b, v0.16b, v8.16b
+ eor v1.16b, v1.16b, v9.16b
+ eor v2.16b, v2.16b, v10.16b
+ eor v3.16b, v3.16b, v11.16b
+ st1 {v0.16b-v3.16b}, [x1], #64
+
+ tweak_next(v8, v11, RTMP3)
+
+ cbz w4, .Lxts_enc_cts
+
+.Lxts_enc_loop_1x:
+ sub w4, w4, #1
+
+ ld1 {v0.16b}, [x2], #16
+ eor v0.16b, v0.16b, v8.16b
+
+ SM4_CRYPT_BLK(v0)
+
+ eor v0.16b, v0.16b, v8.16b
+ st1 {v0.16b}, [x1], #16
+
+ tweak_next(v8, v8, RTMP0)
+
+ cbnz w4, .Lxts_enc_loop_1x
+
+.Lxts_enc_cts:
+ cbz x5, .Lxts_enc_end
+
+ /* cipher text stealing */
+
+ tweak_next(v9, v8, RTMP0)
+ ld1 {v0.16b}, [x2]
+ eor v0.16b, v0.16b, v8.16b
+ SM4_CRYPT_BLK(v0)
+ eor v0.16b, v0.16b, v8.16b
+
+ /* load permute table */
+ adr_l x6, .Lcts_permute_table
+ add x7, x6, #32
+ add x6, x6, x5
+ sub x7, x7, x5
+ ld1 {v3.16b}, [x6]
+ ld1 {v4.16b}, [x7]
+
+ /* overlapping loads */
+ add x2, x2, x5
+ ld1 {v1.16b}, [x2]
+
+ /* create Cn from En-1 */
+ tbl v2.16b, {v0.16b}, v3.16b
+ /* padding Pn with En-1 at the end */
+ tbx v0.16b, {v1.16b}, v4.16b
+
+ eor v0.16b, v0.16b, v9.16b
+ SM4_CRYPT_BLK(v0)
+ eor v0.16b, v0.16b, v9.16b
+
+
+ /* overlapping stores */
+ add x5, x1, x5
+ st1 {v2.16b}, [x5]
+ st1 {v0.16b}, [x1]
+
+ b .Lxts_enc_ret
+
+.Lxts_enc_end:
+ /* store new tweak */
+ st1 {v8.16b}, [x3]
+
+.Lxts_enc_ret:
+ ret
+SYM_FUNC_END(sm4_ce_xts_enc)
+
+.align 3
+SYM_FUNC_START(sm4_ce_xts_dec)
+ /* input:
+ * x0: round key array, CTX
+ * x1: dst
+ * x2: src
+ * x3: tweak (big endian, 128 bit)
+ * w4: nbytes
+ * x5: round key array for IV
+ */
+ ld1 {v8.16b}, [x3]
+
+ cbz x5, .Lxts_dec_nofirst
+
+ SM4_PREPARE(x5)
+
+ /* Generate first tweak */
+ SM4_CRYPT_BLK(v8)
+
+.Lxts_dec_nofirst:
+ SM4_PREPARE(x0)
+
+ ands w5, w4, #15
+ lsr w4, w4, #4
+ sub w6, w4, #1
+ csel w4, w4, w6, eq
+ uxtw x5, w5
+
+ movi RMASK.2s, #0x1
+ movi RTMP0.2s, #0x87
+ uzp1 RMASK.4s, RMASK.4s, RTMP0.4s
+
+ cbz w4, .Lxts_dec_cts
+
+.Lxts_dec_loop_8x:
+ sub w4, w4, #8
+ tbnz w4, #31, .Lxts_dec_4x
+
+ tweak_next( v9, v8, RTMP0)
+ tweak_next(v10, v9, RTMP1)
+ tweak_next(v11, v10, RTMP2)
+ tweak_next(v12, v11, RTMP3)
+ tweak_next(v13, v12, RTMP0)
+ tweak_next(v14, v13, RTMP1)
+ tweak_next(v15, v14, RTMP2)
+
+ ld1 {v0.16b-v3.16b}, [x2], #64
+ ld1 {v4.16b-v7.16b}, [x2], #64
+ eor v0.16b, v0.16b, v8.16b
+ eor v1.16b, v1.16b, v9.16b
+ eor v2.16b, v2.16b, v10.16b
+ eor v3.16b, v3.16b, v11.16b
+ eor v4.16b, v4.16b, v12.16b
+ eor v5.16b, v5.16b, v13.16b
+ eor v6.16b, v6.16b, v14.16b
+ eor v7.16b, v7.16b, v15.16b
+
+ SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7)
+
+ eor v0.16b, v0.16b, v8.16b
+ eor v1.16b, v1.16b, v9.16b
+ eor v2.16b, v2.16b, v10.16b
+ eor v3.16b, v3.16b, v11.16b
+ eor v4.16b, v4.16b, v12.16b
+ eor v5.16b, v5.16b, v13.16b
+ eor v6.16b, v6.16b, v14.16b
+ eor v7.16b, v7.16b, v15.16b
+ st1 {v0.16b-v3.16b}, [x1], #64
+ st1 {v4.16b-v7.16b}, [x1], #64
+
+ tweak_next(v8, v15, RTMP3)
+
+ cbz w4, .Lxts_dec_cts
+ b .Lxts_dec_loop_8x
+
+.Lxts_dec_4x:
+ add w4, w4, #8
+ cmp w4, #4
+ blt .Lxts_dec_loop_1x
+
+ sub w4, w4, #4
+
+ tweak_next( v9, v8, RTMP0)
+ tweak_next(v10, v9, RTMP1)
+ tweak_next(v11, v10, RTMP2)
+
+ ld1 {v0.16b-v3.16b}, [x2], #64
+ eor v0.16b, v0.16b, v8.16b
+ eor v1.16b, v1.16b, v9.16b
+ eor v2.16b, v2.16b, v10.16b
+ eor v3.16b, v3.16b, v11.16b
+
+ SM4_CRYPT_BLK4(v0, v1, v2, v3)
+
+ eor v0.16b, v0.16b, v8.16b
+ eor v1.16b, v1.16b, v9.16b
+ eor v2.16b, v2.16b, v10.16b
+ eor v3.16b, v3.16b, v11.16b
+ st1 {v0.16b-v3.16b}, [x1], #64
+
+ tweak_next(v8, v11, RTMP3)
+
+ cbz w4, .Lxts_dec_cts
+
+.Lxts_dec_loop_1x:
+ sub w4, w4, #1
+
+ ld1 {v0.16b}, [x2], #16
+ eor v0.16b, v0.16b, v8.16b
+
+ SM4_CRYPT_BLK(v0)
+
+ eor v0.16b, v0.16b, v8.16b
+ st1 {v0.16b}, [x1], #16
+
+ tweak_next(v8, v8, RTMP0)
+
+ cbnz w4, .Lxts_dec_loop_1x
+
+.Lxts_dec_cts:
+ cbz x5, .Lxts_dec_end
+
+ /* cipher text stealing */
+
+ tweak_next(v9, v8, RTMP0)
+ ld1 {v0.16b}, [x2]
+ eor v0.16b, v0.16b, v9.16b
+ SM4_CRYPT_BLK(v0)
+ eor v0.16b, v0.16b, v9.16b
+
+ /* load permute table */
+ adr_l x6, .Lcts_permute_table
+ add x7, x6, #32
+ add x6, x6, x5
+ sub x7, x7, x5
+ ld1 {v3.16b}, [x6]
+ ld1 {v4.16b}, [x7]
+
+ /* overlapping loads */
+ add x2, x2, x5
+ ld1 {v1.16b}, [x2]
+
+ /* create Cn from En-1 */
+ tbl v2.16b, {v0.16b}, v3.16b
+ /* padding Pn with En-1 at the end */
+ tbx v0.16b, {v1.16b}, v4.16b
+
+ eor v0.16b, v0.16b, v8.16b
+ SM4_CRYPT_BLK(v0)
+ eor v0.16b, v0.16b, v8.16b
+
+
+ /* overlapping stores */
+ add x5, x1, x5
+ st1 {v2.16b}, [x5]
+ st1 {v0.16b}, [x1]
+
+ b .Lxts_dec_ret
+
+.Lxts_dec_end:
+ /* store new tweak */
+ st1 {v8.16b}, [x3]
+
+.Lxts_dec_ret:
+ ret
+SYM_FUNC_END(sm4_ce_xts_dec)
+
+.align 3
+SYM_FUNC_START(sm4_ce_mac_update)
+ /* input:
+ * x0: round key array, CTX
+ * x1: digest
+ * x2: src
+ * w3: nblocks
+ * w4: enc_before
+ * w5: enc_after
+ */
+ SM4_PREPARE(x0)
+
+ ld1 {RMAC.16b}, [x1]
+
+ cbz w4, .Lmac_update
+
+ SM4_CRYPT_BLK(RMAC)
+
+.Lmac_update:
+ cbz w3, .Lmac_ret
+
+ sub w6, w3, #1
+ cmp w5, wzr
+ csel w3, w3, w6, ne
+
+ cbz w3, .Lmac_end
+
+.Lmac_loop_4x:
+ cmp w3, #4
+ blt .Lmac_loop_1x
+
+ sub w3, w3, #4
+
+ ld1 {v0.16b-v3.16b}, [x2], #64
+
+ eor RMAC.16b, RMAC.16b, v0.16b
+ SM4_CRYPT_BLK(RMAC)
+ eor RMAC.16b, RMAC.16b, v1.16b
+ SM4_CRYPT_BLK(RMAC)
+ eor RMAC.16b, RMAC.16b, v2.16b
+ SM4_CRYPT_BLK(RMAC)
+ eor RMAC.16b, RMAC.16b, v3.16b
+ SM4_CRYPT_BLK(RMAC)
+
+ cbz w3, .Lmac_end
+ b .Lmac_loop_4x
+
+.Lmac_loop_1x:
+ sub w3, w3, #1
+
+ ld1 {v0.16b}, [x2], #16
+
+ eor RMAC.16b, RMAC.16b, v0.16b
+ SM4_CRYPT_BLK(RMAC)
+
+ cbnz w3, .Lmac_loop_1x
+
+
+.Lmac_end:
+ cbnz w5, .Lmac_ret
+
+ ld1 {v0.16b}, [x2], #16
+ eor RMAC.16b, RMAC.16b, v0.16b
+
+.Lmac_ret:
+ st1 {RMAC.16b}, [x1]
+ ret
+SYM_FUNC_END(sm4_ce_mac_update)
+
+
+ .section ".rodata", "a"
+ .align 4
+.Lbswap128_mask:
+ .byte 0x0c, 0x0d, 0x0e, 0x0f, 0x08, 0x09, 0x0a, 0x0b
+ .byte 0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03
+
+.Lcts_permute_table:
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
+ .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
diff --git a/arch/arm64/crypto/sm4-ce-gcm-core.S b/arch/arm64/crypto/sm4-ce-gcm-core.S
new file mode 100644
index 000000000000..347f25d75727
--- /dev/null
+++ b/arch/arm64/crypto/sm4-ce-gcm-core.S
@@ -0,0 +1,742 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SM4-GCM AEAD Algorithm using ARMv8 Crypto Extensions
+ * as specified in rfc8998
+ * https://datatracker.ietf.org/doc/html/rfc8998
+ *
+ * Copyright (C) 2016 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ * Copyright (C) 2022 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+#include <linux/linkage.h>
+#include <linux/cfi_types.h>
+#include <asm/assembler.h>
+#include "sm4-ce-asm.h"
+
+.arch armv8-a+crypto
+
+.irp b, 0, 1, 2, 3, 24, 25, 26, 27, 28, 29, 30, 31
+ .set .Lv\b\().4s, \b
+.endr
+
+.macro sm4e, vd, vn
+ .inst 0xcec08400 | (.L\vn << 5) | .L\vd
+.endm
+
+/* Register macros */
+
+/* Used for both encryption and decryption */
+#define RHASH v21
+#define RRCONST v22
+#define RZERO v23
+
+/* Helper macros. */
+
+/*
+ * input: m0, m1
+ * output: r0:r1 (low 128-bits in r0, high in r1)
+ */
+#define PMUL_128x128(r0, r1, m0, m1, T0, T1) \
+ ext T0.16b, m1.16b, m1.16b, #8; \
+ pmull r0.1q, m0.1d, m1.1d; \
+ pmull T1.1q, m0.1d, T0.1d; \
+ pmull2 T0.1q, m0.2d, T0.2d; \
+ pmull2 r1.1q, m0.2d, m1.2d; \
+ eor T0.16b, T0.16b, T1.16b; \
+ ext T1.16b, RZERO.16b, T0.16b, #8; \
+ ext T0.16b, T0.16b, RZERO.16b, #8; \
+ eor r0.16b, r0.16b, T1.16b; \
+ eor r1.16b, r1.16b, T0.16b;
+
+#define PMUL_128x128_4x(r0, r1, m0, m1, T0, T1, \
+ r2, r3, m2, m3, T2, T3, \
+ r4, r5, m4, m5, T4, T5, \
+ r6, r7, m6, m7, T6, T7) \
+ ext T0.16b, m1.16b, m1.16b, #8; \
+ ext T2.16b, m3.16b, m3.16b, #8; \
+ ext T4.16b, m5.16b, m5.16b, #8; \
+ ext T6.16b, m7.16b, m7.16b, #8; \
+ pmull r0.1q, m0.1d, m1.1d; \
+ pmull r2.1q, m2.1d, m3.1d; \
+ pmull r4.1q, m4.1d, m5.1d; \
+ pmull r6.1q, m6.1d, m7.1d; \
+ pmull T1.1q, m0.1d, T0.1d; \
+ pmull T3.1q, m2.1d, T2.1d; \
+ pmull T5.1q, m4.1d, T4.1d; \
+ pmull T7.1q, m6.1d, T6.1d; \
+ pmull2 T0.1q, m0.2d, T0.2d; \
+ pmull2 T2.1q, m2.2d, T2.2d; \
+ pmull2 T4.1q, m4.2d, T4.2d; \
+ pmull2 T6.1q, m6.2d, T6.2d; \
+ pmull2 r1.1q, m0.2d, m1.2d; \
+ pmull2 r3.1q, m2.2d, m3.2d; \
+ pmull2 r5.1q, m4.2d, m5.2d; \
+ pmull2 r7.1q, m6.2d, m7.2d; \
+ eor T0.16b, T0.16b, T1.16b; \
+ eor T2.16b, T2.16b, T3.16b; \
+ eor T4.16b, T4.16b, T5.16b; \
+ eor T6.16b, T6.16b, T7.16b; \
+ ext T1.16b, RZERO.16b, T0.16b, #8; \
+ ext T3.16b, RZERO.16b, T2.16b, #8; \
+ ext T5.16b, RZERO.16b, T4.16b, #8; \
+ ext T7.16b, RZERO.16b, T6.16b, #8; \
+ ext T0.16b, T0.16b, RZERO.16b, #8; \
+ ext T2.16b, T2.16b, RZERO.16b, #8; \
+ ext T4.16b, T4.16b, RZERO.16b, #8; \
+ ext T6.16b, T6.16b, RZERO.16b, #8; \
+ eor r0.16b, r0.16b, T1.16b; \
+ eor r2.16b, r2.16b, T3.16b; \
+ eor r4.16b, r4.16b, T5.16b; \
+ eor r6.16b, r6.16b, T7.16b; \
+ eor r1.16b, r1.16b, T0.16b; \
+ eor r3.16b, r3.16b, T2.16b; \
+ eor r5.16b, r5.16b, T4.16b; \
+ eor r7.16b, r7.16b, T6.16b;
+
+/*
+ * input: r0:r1 (low 128-bits in r0, high in r1)
+ * output: a
+ */
+#define REDUCTION(a, r0, r1, rconst, T0, T1) \
+ pmull2 T0.1q, r1.2d, rconst.2d; \
+ ext T1.16b, T0.16b, RZERO.16b, #8; \
+ ext T0.16b, RZERO.16b, T0.16b, #8; \
+ eor r1.16b, r1.16b, T1.16b; \
+ eor r0.16b, r0.16b, T0.16b; \
+ pmull T0.1q, r1.1d, rconst.1d; \
+ eor a.16b, r0.16b, T0.16b;
+
+#define SM4_CRYPT_PMUL_128x128_BLK(b0, r0, r1, m0, m1, T0, T1) \
+ rev32 b0.16b, b0.16b; \
+ ext T0.16b, m1.16b, m1.16b, #8; \
+ sm4e b0.4s, v24.4s; \
+ pmull r0.1q, m0.1d, m1.1d; \
+ sm4e b0.4s, v25.4s; \
+ pmull T1.1q, m0.1d, T0.1d; \
+ sm4e b0.4s, v26.4s; \
+ pmull2 T0.1q, m0.2d, T0.2d; \
+ sm4e b0.4s, v27.4s; \
+ pmull2 r1.1q, m0.2d, m1.2d; \
+ sm4e b0.4s, v28.4s; \
+ eor T0.16b, T0.16b, T1.16b; \
+ sm4e b0.4s, v29.4s; \
+ ext T1.16b, RZERO.16b, T0.16b, #8; \
+ sm4e b0.4s, v30.4s; \
+ ext T0.16b, T0.16b, RZERO.16b, #8; \
+ sm4e b0.4s, v31.4s; \
+ eor r0.16b, r0.16b, T1.16b; \
+ rev64 b0.4s, b0.4s; \
+ eor r1.16b, r1.16b, T0.16b; \
+ ext b0.16b, b0.16b, b0.16b, #8; \
+ rev32 b0.16b, b0.16b;
+
+#define SM4_CRYPT_PMUL_128x128_BLK3(b0, b1, b2, \
+ r0, r1, m0, m1, T0, T1, \
+ r2, r3, m2, m3, T2, T3, \
+ r4, r5, m4, m5, T4, T5) \
+ rev32 b0.16b, b0.16b; \
+ rev32 b1.16b, b1.16b; \
+ rev32 b2.16b, b2.16b; \
+ ext T0.16b, m1.16b, m1.16b, #8; \
+ ext T2.16b, m3.16b, m3.16b, #8; \
+ ext T4.16b, m5.16b, m5.16b, #8; \
+ sm4e b0.4s, v24.4s; \
+ sm4e b1.4s, v24.4s; \
+ sm4e b2.4s, v24.4s; \
+ pmull r0.1q, m0.1d, m1.1d; \
+ pmull r2.1q, m2.1d, m3.1d; \
+ pmull r4.1q, m4.1d, m5.1d; \
+ sm4e b0.4s, v25.4s; \
+ sm4e b1.4s, v25.4s; \
+ sm4e b2.4s, v25.4s; \
+ pmull T1.1q, m0.1d, T0.1d; \
+ pmull T3.1q, m2.1d, T2.1d; \
+ pmull T5.1q, m4.1d, T4.1d; \
+ sm4e b0.4s, v26.4s; \
+ sm4e b1.4s, v26.4s; \
+ sm4e b2.4s, v26.4s; \
+ pmull2 T0.1q, m0.2d, T0.2d; \
+ pmull2 T2.1q, m2.2d, T2.2d; \
+ pmull2 T4.1q, m4.2d, T4.2d; \
+ sm4e b0.4s, v27.4s; \
+ sm4e b1.4s, v27.4s; \
+ sm4e b2.4s, v27.4s; \
+ pmull2 r1.1q, m0.2d, m1.2d; \
+ pmull2 r3.1q, m2.2d, m3.2d; \
+ pmull2 r5.1q, m4.2d, m5.2d; \
+ sm4e b0.4s, v28.4s; \
+ sm4e b1.4s, v28.4s; \
+ sm4e b2.4s, v28.4s; \
+ eor T0.16b, T0.16b, T1.16b; \
+ eor T2.16b, T2.16b, T3.16b; \
+ eor T4.16b, T4.16b, T5.16b; \
+ sm4e b0.4s, v29.4s; \
+ sm4e b1.4s, v29.4s; \
+ sm4e b2.4s, v29.4s; \
+ ext T1.16b, RZERO.16b, T0.16b, #8; \
+ ext T3.16b, RZERO.16b, T2.16b, #8; \
+ ext T5.16b, RZERO.16b, T4.16b, #8; \
+ sm4e b0.4s, v30.4s; \
+ sm4e b1.4s, v30.4s; \
+ sm4e b2.4s, v30.4s; \
+ ext T0.16b, T0.16b, RZERO.16b, #8; \
+ ext T2.16b, T2.16b, RZERO.16b, #8; \
+ ext T4.16b, T4.16b, RZERO.16b, #8; \
+ sm4e b0.4s, v31.4s; \
+ sm4e b1.4s, v31.4s; \
+ sm4e b2.4s, v31.4s; \
+ eor r0.16b, r0.16b, T1.16b; \
+ eor r2.16b, r2.16b, T3.16b; \
+ eor r4.16b, r4.16b, T5.16b; \
+ rev64 b0.4s, b0.4s; \
+ rev64 b1.4s, b1.4s; \
+ rev64 b2.4s, b2.4s; \
+ eor r1.16b, r1.16b, T0.16b; \
+ eor r3.16b, r3.16b, T2.16b; \
+ eor r5.16b, r5.16b, T4.16b; \
+ ext b0.16b, b0.16b, b0.16b, #8; \
+ ext b1.16b, b1.16b, b1.16b, #8; \
+ ext b2.16b, b2.16b, b2.16b, #8; \
+ eor r0.16b, r0.16b, r2.16b; \
+ eor r1.16b, r1.16b, r3.16b; \
+ rev32 b0.16b, b0.16b; \
+ rev32 b1.16b, b1.16b; \
+ rev32 b2.16b, b2.16b; \
+ eor r0.16b, r0.16b, r4.16b; \
+ eor r1.16b, r1.16b, r5.16b;
+
+#define inc32_le128(vctr) \
+ mov vctr.d[1], x9; \
+ add w6, w9, #1; \
+ mov vctr.d[0], x8; \
+ bfi x9, x6, #0, #32; \
+ rev64 vctr.16b, vctr.16b;
+
+#define GTAG_HASH_LENGTHS(vctr0, vlen) \
+ ld1 {vlen.16b}, [x7]; \
+ /* construct CTR0 */ \
+ /* the lower 32-bits of initial IV is always be32(1) */ \
+ mov x6, #0x1; \
+ bfi x9, x6, #0, #32; \
+ mov vctr0.d[0], x8; \
+ mov vctr0.d[1], x9; \
+ rbit vlen.16b, vlen.16b; \
+ rev64 vctr0.16b, vctr0.16b; \
+ /* authtag = GCTR(CTR0, GHASH) */ \
+ eor RHASH.16b, RHASH.16b, vlen.16b; \
+ SM4_CRYPT_PMUL_128x128_BLK(vctr0, RR0, RR1, RHASH, RH1, \
+ RTMP0, RTMP1); \
+ REDUCTION(RHASH, RR0, RR1, RRCONST, RTMP2, RTMP3); \
+ rbit RHASH.16b, RHASH.16b; \
+ eor RHASH.16b, RHASH.16b, vctr0.16b;
+
+
+/* Register macros for encrypt and ghash */
+
+/* can be the same as input v0-v3 */
+#define RR1 v0
+#define RR3 v1
+#define RR5 v2
+#define RR7 v3
+
+#define RR0 v4
+#define RR2 v5
+#define RR4 v6
+#define RR6 v7
+
+#define RTMP0 v8
+#define RTMP1 v9
+#define RTMP2 v10
+#define RTMP3 v11
+#define RTMP4 v12
+#define RTMP5 v13
+#define RTMP6 v14
+#define RTMP7 v15
+
+#define RH1 v16
+#define RH2 v17
+#define RH3 v18
+#define RH4 v19
+
+.align 3
+SYM_FUNC_START(sm4_ce_pmull_ghash_setup)
+ /* input:
+ * x0: round key array, CTX
+ * x1: ghash table
+ */
+ SM4_PREPARE(x0)
+
+ adr_l x2, .Lghash_rconst
+ ld1r {RRCONST.2d}, [x2]
+
+ eor RZERO.16b, RZERO.16b, RZERO.16b
+
+ /* H = E(K, 0^128) */
+ rev32 v0.16b, RZERO.16b
+ SM4_CRYPT_BLK_BE(v0)
+
+ /* H ^ 1 */
+ rbit RH1.16b, v0.16b
+
+ /* H ^ 2 */
+ PMUL_128x128(RR0, RR1, RH1, RH1, RTMP0, RTMP1)
+ REDUCTION(RH2, RR0, RR1, RRCONST, RTMP2, RTMP3)
+
+ /* H ^ 3 */
+ PMUL_128x128(RR0, RR1, RH2, RH1, RTMP0, RTMP1)
+ REDUCTION(RH3, RR0, RR1, RRCONST, RTMP2, RTMP3)
+
+ /* H ^ 4 */
+ PMUL_128x128(RR0, RR1, RH2, RH2, RTMP0, RTMP1)
+ REDUCTION(RH4, RR0, RR1, RRCONST, RTMP2, RTMP3)
+
+ st1 {RH1.16b-RH4.16b}, [x1]
+
+ ret
+SYM_FUNC_END(sm4_ce_pmull_ghash_setup)
+
+.align 3
+SYM_FUNC_START(pmull_ghash_update)
+ /* input:
+ * x0: ghash table
+ * x1: ghash result
+ * x2: src
+ * w3: nblocks
+ */
+ ld1 {RH1.16b-RH4.16b}, [x0]
+
+ ld1 {RHASH.16b}, [x1]
+ rbit RHASH.16b, RHASH.16b
+
+ adr_l x4, .Lghash_rconst
+ ld1r {RRCONST.2d}, [x4]
+
+ eor RZERO.16b, RZERO.16b, RZERO.16b
+
+.Lghash_loop_4x:
+ cmp w3, #4
+ blt .Lghash_loop_1x
+
+ sub w3, w3, #4
+
+ ld1 {v0.16b-v3.16b}, [x2], #64
+
+ rbit v0.16b, v0.16b
+ rbit v1.16b, v1.16b
+ rbit v2.16b, v2.16b
+ rbit v3.16b, v3.16b
+
+ /*
+ * (in0 ^ HASH) * H^4 => rr0:rr1
+ * (in1) * H^3 => rr2:rr3
+ * (in2) * H^2 => rr4:rr5
+ * (in3) * H^1 => rr6:rr7
+ */
+ eor RHASH.16b, RHASH.16b, v0.16b
+
+ PMUL_128x128_4x(RR0, RR1, RHASH, RH4, RTMP0, RTMP1,
+ RR2, RR3, v1, RH3, RTMP2, RTMP3,
+ RR4, RR5, v2, RH2, RTMP4, RTMP5,
+ RR6, RR7, v3, RH1, RTMP6, RTMP7)
+
+ eor RR0.16b, RR0.16b, RR2.16b
+ eor RR1.16b, RR1.16b, RR3.16b
+ eor RR0.16b, RR0.16b, RR4.16b
+ eor RR1.16b, RR1.16b, RR5.16b
+ eor RR0.16b, RR0.16b, RR6.16b
+ eor RR1.16b, RR1.16b, RR7.16b
+
+ REDUCTION(RHASH, RR0, RR1, RRCONST, RTMP0, RTMP1)
+
+ cbz w3, .Lghash_end
+ b .Lghash_loop_4x
+
+.Lghash_loop_1x:
+ sub w3, w3, #1
+
+ ld1 {v0.16b}, [x2], #16
+ rbit v0.16b, v0.16b
+ eor RHASH.16b, RHASH.16b, v0.16b
+
+ PMUL_128x128(RR0, RR1, RHASH, RH1, RTMP0, RTMP1)
+ REDUCTION(RHASH, RR0, RR1, RRCONST, RTMP2, RTMP3)
+
+ cbnz w3, .Lghash_loop_1x
+
+.Lghash_end:
+ rbit RHASH.16b, RHASH.16b
+ st1 {RHASH.2d}, [x1]
+
+ ret
+SYM_FUNC_END(pmull_ghash_update)
+
+.align 3
+SYM_TYPED_FUNC_START(sm4_ce_pmull_gcm_enc)
+ /* input:
+ * x0: round key array, CTX
+ * x1: dst
+ * x2: src
+ * x3: ctr (big endian, 128 bit)
+ * w4: nbytes
+ * x5: ghash result
+ * x6: ghash table
+ * x7: lengths (only for last block)
+ */
+ SM4_PREPARE(x0)
+
+ ldp x8, x9, [x3]
+ rev x8, x8
+ rev x9, x9
+
+ ld1 {RH1.16b-RH4.16b}, [x6]
+
+ ld1 {RHASH.16b}, [x5]
+ rbit RHASH.16b, RHASH.16b
+
+ adr_l x6, .Lghash_rconst
+ ld1r {RRCONST.2d}, [x6]
+
+ eor RZERO.16b, RZERO.16b, RZERO.16b
+
+ cbz w4, .Lgcm_enc_hash_len
+
+.Lgcm_enc_loop_4x:
+ cmp w4, #(4 * 16)
+ blt .Lgcm_enc_loop_1x
+
+ sub w4, w4, #(4 * 16)
+
+ /* construct CTRs */
+ inc32_le128(v0) /* +0 */
+ inc32_le128(v1) /* +1 */
+ inc32_le128(v2) /* +2 */
+ inc32_le128(v3) /* +3 */
+
+ ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64
+
+ SM4_CRYPT_BLK4(v0, v1, v2, v3)
+
+ eor v0.16b, v0.16b, RTMP0.16b
+ eor v1.16b, v1.16b, RTMP1.16b
+ eor v2.16b, v2.16b, RTMP2.16b
+ eor v3.16b, v3.16b, RTMP3.16b
+ st1 {v0.16b-v3.16b}, [x1], #64
+
+ /* ghash update */
+
+ rbit v0.16b, v0.16b
+ rbit v1.16b, v1.16b
+ rbit v2.16b, v2.16b
+ rbit v3.16b, v3.16b
+
+ /*
+ * (in0 ^ HASH) * H^4 => rr0:rr1
+ * (in1) * H^3 => rr2:rr3
+ * (in2) * H^2 => rr4:rr5
+ * (in3) * H^1 => rr6:rr7
+ */
+ eor RHASH.16b, RHASH.16b, v0.16b
+
+ PMUL_128x128_4x(RR0, RR1, RHASH, RH4, RTMP0, RTMP1,
+ RR2, RR3, v1, RH3, RTMP2, RTMP3,
+ RR4, RR5, v2, RH2, RTMP4, RTMP5,
+ RR6, RR7, v3, RH1, RTMP6, RTMP7)
+
+ eor RR0.16b, RR0.16b, RR2.16b
+ eor RR1.16b, RR1.16b, RR3.16b
+ eor RR0.16b, RR0.16b, RR4.16b
+ eor RR1.16b, RR1.16b, RR5.16b
+ eor RR0.16b, RR0.16b, RR6.16b
+ eor RR1.16b, RR1.16b, RR7.16b
+
+ REDUCTION(RHASH, RR0, RR1, RRCONST, RTMP0, RTMP1)
+
+ cbz w4, .Lgcm_enc_hash_len
+ b .Lgcm_enc_loop_4x
+
+.Lgcm_enc_loop_1x:
+ cmp w4, #16
+ blt .Lgcm_enc_tail
+
+ sub w4, w4, #16
+
+ /* construct CTRs */
+ inc32_le128(v0)
+
+ ld1 {RTMP0.16b}, [x2], #16
+
+ SM4_CRYPT_BLK(v0)
+
+ eor v0.16b, v0.16b, RTMP0.16b
+ st1 {v0.16b}, [x1], #16
+
+ /* ghash update */
+ rbit v0.16b, v0.16b
+ eor RHASH.16b, RHASH.16b, v0.16b
+ PMUL_128x128(RR0, RR1, RHASH, RH1, RTMP0, RTMP1)
+ REDUCTION(RHASH, RR0, RR1, RRCONST, RTMP2, RTMP3)
+
+ cbz w4, .Lgcm_enc_hash_len
+ b .Lgcm_enc_loop_1x
+
+.Lgcm_enc_tail:
+ /* construct CTRs */
+ inc32_le128(v0)
+ SM4_CRYPT_BLK(v0)
+
+ /* load permute table */
+ adr_l x0, .Lcts_permute_table
+ add x0, x0, #32
+ sub x0, x0, w4, uxtw
+ ld1 {v3.16b}, [x0]
+
+.Lgcm_enc_tail_loop:
+ /* do encrypt */
+ ldrb w0, [x2], #1 /* get 1 byte from input */
+ umov w6, v0.b[0] /* get top crypted byte */
+ eor w6, w6, w0 /* w6 = CTR ^ input */
+ strb w6, [x1], #1 /* store out byte */
+
+ /* shift right out one byte */
+ ext v0.16b, v0.16b, v0.16b, #1
+ /* the last ciphertext is placed in high bytes */
+ ins v0.b[15], w6
+
+ subs w4, w4, #1
+ bne .Lgcm_enc_tail_loop
+
+ /* padding last block with zeros */
+ tbl v0.16b, {v0.16b}, v3.16b
+
+ /* ghash update */
+ rbit v0.16b, v0.16b
+ eor RHASH.16b, RHASH.16b, v0.16b
+ PMUL_128x128(RR0, RR1, RHASH, RH1, RTMP0, RTMP1)
+ REDUCTION(RHASH, RR0, RR1, RRCONST, RTMP2, RTMP3)
+
+.Lgcm_enc_hash_len:
+ cbz x7, .Lgcm_enc_end
+
+ GTAG_HASH_LENGTHS(v1, v3)
+
+ b .Lgcm_enc_ret
+
+.Lgcm_enc_end:
+ /* store new CTR */
+ rev x8, x8
+ rev x9, x9
+ stp x8, x9, [x3]
+
+ rbit RHASH.16b, RHASH.16b
+
+.Lgcm_enc_ret:
+ /* store new MAC */
+ st1 {RHASH.2d}, [x5]
+
+ ret
+SYM_FUNC_END(sm4_ce_pmull_gcm_enc)
+
+#undef RR1
+#undef RR3
+#undef RR5
+#undef RR7
+#undef RR0
+#undef RR2
+#undef RR4
+#undef RR6
+#undef RTMP0
+#undef RTMP1
+#undef RTMP2
+#undef RTMP3
+#undef RTMP4
+#undef RTMP5
+#undef RTMP6
+#undef RTMP7
+#undef RH1
+#undef RH2
+#undef RH3
+#undef RH4
+
+
+/* Register macros for decrypt */
+
+/* v0-v2 for building CTRs, v3-v5 for saving inputs */
+
+#define RR1 v6
+#define RR3 v7
+#define RR5 v8
+
+#define RR0 v9
+#define RR2 v10
+#define RR4 v11
+
+#define RTMP0 v12
+#define RTMP1 v13
+#define RTMP2 v14
+#define RTMP3 v15
+#define RTMP4 v16
+#define RTMP5 v17
+
+#define RH1 v18
+#define RH2 v19
+#define RH3 v20
+
+.align 3
+SYM_TYPED_FUNC_START(sm4_ce_pmull_gcm_dec)
+ /* input:
+ * x0: round key array, CTX
+ * x1: dst
+ * x2: src
+ * x3: ctr (big endian, 128 bit)
+ * w4: nbytes
+ * x5: ghash result
+ * x6: ghash table
+ * x7: lengths (only for last block)
+ */
+ SM4_PREPARE(x0)
+
+ ldp x8, x9, [x3]
+ rev x8, x8
+ rev x9, x9
+
+ ld1 {RH1.16b-RH3.16b}, [x6]
+
+ ld1 {RHASH.16b}, [x5]
+ rbit RHASH.16b, RHASH.16b
+
+ adr_l x6, .Lghash_rconst
+ ld1r {RRCONST.2d}, [x6]
+
+ eor RZERO.16b, RZERO.16b, RZERO.16b
+
+ cbz w4, .Lgcm_dec_hash_len
+
+.Lgcm_dec_loop_3x:
+ cmp w4, #(3 * 16)
+ blt .Lgcm_dec_loop_1x
+
+ sub w4, w4, #(3 * 16)
+
+ ld1 {v3.16b-v5.16b}, [x2], #(3 * 16)
+
+ /* construct CTRs */
+ inc32_le128(v0) /* +0 */
+ rbit v6.16b, v3.16b
+ inc32_le128(v1) /* +1 */
+ rbit v7.16b, v4.16b
+ inc32_le128(v2) /* +2 */
+ rbit v8.16b, v5.16b
+
+ eor RHASH.16b, RHASH.16b, v6.16b
+
+ /* decrypt & ghash update */
+ SM4_CRYPT_PMUL_128x128_BLK3(v0, v1, v2,
+ RR0, RR1, RHASH, RH3, RTMP0, RTMP1,
+ RR2, RR3, v7, RH2, RTMP2, RTMP3,
+ RR4, RR5, v8, RH1, RTMP4, RTMP5)
+
+ eor v0.16b, v0.16b, v3.16b
+ eor v1.16b, v1.16b, v4.16b
+ eor v2.16b, v2.16b, v5.16b
+
+ REDUCTION(RHASH, RR0, RR1, RRCONST, RTMP0, RTMP1)
+
+ st1 {v0.16b-v2.16b}, [x1], #(3 * 16)
+
+ cbz w4, .Lgcm_dec_hash_len
+ b .Lgcm_dec_loop_3x
+
+.Lgcm_dec_loop_1x:
+ cmp w4, #16
+ blt .Lgcm_dec_tail
+
+ sub w4, w4, #16
+
+ ld1 {v3.16b}, [x2], #16
+
+ /* construct CTRs */
+ inc32_le128(v0)
+ rbit v6.16b, v3.16b
+
+ eor RHASH.16b, RHASH.16b, v6.16b
+
+ SM4_CRYPT_PMUL_128x128_BLK(v0, RR0, RR1, RHASH, RH1, RTMP0, RTMP1)
+
+ eor v0.16b, v0.16b, v3.16b
+
+ REDUCTION(RHASH, RR0, RR1, RRCONST, RTMP2, RTMP3)
+
+ st1 {v0.16b}, [x1], #16
+
+ cbz w4, .Lgcm_dec_hash_len
+ b .Lgcm_dec_loop_1x
+
+.Lgcm_dec_tail:
+ /* construct CTRs */
+ inc32_le128(v0)
+ SM4_CRYPT_BLK(v0)
+
+ /* load permute table */
+ adr_l x0, .Lcts_permute_table
+ add x0, x0, #32
+ sub x0, x0, w4, uxtw
+ ld1 {v3.16b}, [x0]
+
+.Lgcm_dec_tail_loop:
+ /* do decrypt */
+ ldrb w0, [x2], #1 /* get 1 byte from input */
+ umov w6, v0.b[0] /* get top crypted byte */
+ eor w6, w6, w0 /* w6 = CTR ^ input */
+ strb w6, [x1], #1 /* store out byte */
+
+ /* shift right out one byte */
+ ext v0.16b, v0.16b, v0.16b, #1
+ /* the last ciphertext is placed in high bytes */
+ ins v0.b[15], w0
+
+ subs w4, w4, #1
+ bne .Lgcm_dec_tail_loop
+
+ /* padding last block with zeros */
+ tbl v0.16b, {v0.16b}, v3.16b
+
+ /* ghash update */
+ rbit v0.16b, v0.16b
+ eor RHASH.16b, RHASH.16b, v0.16b
+ PMUL_128x128(RR0, RR1, RHASH, RH1, RTMP0, RTMP1)
+ REDUCTION(RHASH, RR0, RR1, RRCONST, RTMP2, RTMP3)
+
+.Lgcm_dec_hash_len:
+ cbz x7, .Lgcm_dec_end
+
+ GTAG_HASH_LENGTHS(v1, v3)
+
+ b .Lgcm_dec_ret
+
+.Lgcm_dec_end:
+ /* store new CTR */
+ rev x8, x8
+ rev x9, x9
+ stp x8, x9, [x3]
+
+ rbit RHASH.16b, RHASH.16b
+
+.Lgcm_dec_ret:
+ /* store new MAC */
+ st1 {RHASH.2d}, [x5]
+
+ ret
+SYM_FUNC_END(sm4_ce_pmull_gcm_dec)
+
+ .section ".rodata", "a"
+ .align 4
+.Lcts_permute_table:
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
+ .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+
+.Lghash_rconst:
+ .quad 0x87
diff --git a/arch/arm64/crypto/sm4-ce-gcm-glue.c b/arch/arm64/crypto/sm4-ce-gcm-glue.c
new file mode 100644
index 000000000000..c450a2025ca9
--- /dev/null
+++ b/arch/arm64/crypto/sm4-ce-gcm-glue.c
@@ -0,0 +1,286 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SM4-GCM AEAD Algorithm using ARMv8 Crypto Extensions
+ * as specified in rfc8998
+ * https://datatracker.ietf.org/doc/html/rfc8998
+ *
+ * Copyright (C) 2022 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/cpufeature.h>
+#include <asm/neon.h>
+#include <crypto/b128ops.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/sm4.h>
+#include "sm4-ce.h"
+
+asmlinkage void sm4_ce_pmull_ghash_setup(const u32 *rkey_enc, u8 *ghash_table);
+asmlinkage void pmull_ghash_update(const u8 *ghash_table, u8 *ghash,
+ const u8 *src, unsigned int nblocks);
+asmlinkage void sm4_ce_pmull_gcm_enc(const u32 *rkey_enc, u8 *dst,
+ const u8 *src, u8 *iv,
+ unsigned int nbytes, u8 *ghash,
+ const u8 *ghash_table, const u8 *lengths);
+asmlinkage void sm4_ce_pmull_gcm_dec(const u32 *rkey_enc, u8 *dst,
+ const u8 *src, u8 *iv,
+ unsigned int nbytes, u8 *ghash,
+ const u8 *ghash_table, const u8 *lengths);
+
+#define GHASH_BLOCK_SIZE 16
+#define GCM_IV_SIZE 12
+
+struct sm4_gcm_ctx {
+ struct sm4_ctx key;
+ u8 ghash_table[16 * 4];
+};
+
+
+static int gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct sm4_gcm_ctx *ctx = crypto_aead_ctx(tfm);
+
+ if (key_len != SM4_KEY_SIZE)
+ return -EINVAL;
+
+ kernel_neon_begin();
+
+ sm4_ce_expand_key(key, ctx->key.rkey_enc, ctx->key.rkey_dec,
+ crypto_sm4_fk, crypto_sm4_ck);
+ sm4_ce_pmull_ghash_setup(ctx->key.rkey_enc, ctx->ghash_table);
+
+ kernel_neon_end();
+ return 0;
+}
+
+static int gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ switch (authsize) {
+ case 4:
+ case 8:
+ case 12 ... 16:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void gcm_calculate_auth_mac(struct aead_request *req, u8 ghash[])
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead);
+ u8 __aligned(8) buffer[GHASH_BLOCK_SIZE];
+ u32 assoclen = req->assoclen;
+ struct scatter_walk walk;
+ unsigned int buflen = 0;
+
+ scatterwalk_start(&walk, req->src);
+
+ do {
+ u32 n = scatterwalk_clamp(&walk, assoclen);
+ u8 *p, *ptr;
+
+ if (!n) {
+ scatterwalk_start(&walk, sg_next(walk.sg));
+ n = scatterwalk_clamp(&walk, assoclen);
+ }
+
+ p = ptr = scatterwalk_map(&walk);
+ assoclen -= n;
+ scatterwalk_advance(&walk, n);
+
+ if (n + buflen < GHASH_BLOCK_SIZE) {
+ memcpy(&buffer[buflen], ptr, n);
+ buflen += n;
+ } else {
+ unsigned int nblocks;
+
+ if (buflen) {
+ unsigned int l = GHASH_BLOCK_SIZE - buflen;
+
+ memcpy(&buffer[buflen], ptr, l);
+ ptr += l;
+ n -= l;
+
+ pmull_ghash_update(ctx->ghash_table, ghash,
+ buffer, 1);
+ }
+
+ nblocks = n / GHASH_BLOCK_SIZE;
+ if (nblocks) {
+ pmull_ghash_update(ctx->ghash_table, ghash,
+ ptr, nblocks);
+ ptr += nblocks * GHASH_BLOCK_SIZE;
+ }
+
+ buflen = n % GHASH_BLOCK_SIZE;
+ if (buflen)
+ memcpy(&buffer[0], ptr, buflen);
+ }
+
+ scatterwalk_unmap(p);
+ scatterwalk_done(&walk, 0, assoclen);
+ } while (assoclen);
+
+ /* padding with '0' */
+ if (buflen) {
+ memset(&buffer[buflen], 0, GHASH_BLOCK_SIZE - buflen);
+ pmull_ghash_update(ctx->ghash_table, ghash, buffer, 1);
+ }
+}
+
+static int gcm_crypt(struct aead_request *req, struct skcipher_walk *walk,
+ struct sm4_gcm_ctx *ctx, u8 ghash[],
+ void (*sm4_ce_pmull_gcm_crypt)(const u32 *rkey_enc,
+ u8 *dst, const u8 *src, u8 *iv,
+ unsigned int nbytes, u8 *ghash,
+ const u8 *ghash_table, const u8 *lengths))
+{
+ u8 __aligned(8) iv[SM4_BLOCK_SIZE];
+ be128 __aligned(8) lengths;
+ int err;
+
+ memset(ghash, 0, SM4_BLOCK_SIZE);
+
+ lengths.a = cpu_to_be64(req->assoclen * 8);
+ lengths.b = cpu_to_be64(walk->total * 8);
+
+ memcpy(iv, walk->iv, GCM_IV_SIZE);
+ put_unaligned_be32(2, iv + GCM_IV_SIZE);
+
+ kernel_neon_begin();
+
+ if (req->assoclen)
+ gcm_calculate_auth_mac(req, ghash);
+
+ do {
+ unsigned int tail = walk->nbytes % SM4_BLOCK_SIZE;
+ const u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
+
+ if (walk->nbytes == walk->total) {
+ tail = 0;
+
+ sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv,
+ walk->nbytes, ghash,
+ ctx->ghash_table,
+ (const u8 *)&lengths);
+ } else if (walk->nbytes - tail) {
+ sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv,
+ walk->nbytes - tail, ghash,
+ ctx->ghash_table, NULL);
+ }
+
+ kernel_neon_end();
+
+ err = skcipher_walk_done(walk, tail);
+ if (err)
+ return err;
+ if (walk->nbytes)
+ kernel_neon_begin();
+ } while (walk->nbytes > 0);
+
+ return 0;
+}
+
+static int gcm_encrypt(struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead);
+ u8 __aligned(8) ghash[SM4_BLOCK_SIZE];
+ struct skcipher_walk walk;
+ int err;
+
+ err = skcipher_walk_aead_encrypt(&walk, req, false);
+ if (err)
+ return err;
+
+ err = gcm_crypt(req, &walk, ctx, ghash, sm4_ce_pmull_gcm_enc);
+ if (err)
+ return err;
+
+ /* copy authtag to end of dst */
+ scatterwalk_map_and_copy(ghash, req->dst, req->assoclen + req->cryptlen,
+ crypto_aead_authsize(aead), 1);
+
+ return 0;
+}
+
+static int gcm_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ unsigned int authsize = crypto_aead_authsize(aead);
+ struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead);
+ u8 __aligned(8) ghash[SM4_BLOCK_SIZE];
+ u8 authtag[SM4_BLOCK_SIZE];
+ struct skcipher_walk walk;
+ int err;
+
+ err = skcipher_walk_aead_decrypt(&walk, req, false);
+ if (err)
+ return err;
+
+ err = gcm_crypt(req, &walk, ctx, ghash, sm4_ce_pmull_gcm_dec);
+ if (err)
+ return err;
+
+ /* compare calculated auth tag with the stored one */
+ scatterwalk_map_and_copy(authtag, req->src,
+ req->assoclen + req->cryptlen - authsize,
+ authsize, 0);
+
+ if (crypto_memneq(authtag, ghash, authsize))
+ return -EBADMSG;
+
+ return 0;
+}
+
+static struct aead_alg sm4_gcm_alg = {
+ .base = {
+ .cra_name = "gcm(sm4)",
+ .cra_driver_name = "gcm-sm4-ce",
+ .cra_priority = 400,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct sm4_gcm_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .ivsize = GCM_IV_SIZE,
+ .chunksize = SM4_BLOCK_SIZE,
+ .maxauthsize = SM4_BLOCK_SIZE,
+ .setkey = gcm_setkey,
+ .setauthsize = gcm_setauthsize,
+ .encrypt = gcm_encrypt,
+ .decrypt = gcm_decrypt,
+};
+
+static int __init sm4_ce_gcm_init(void)
+{
+ if (!cpu_have_named_feature(PMULL))
+ return -ENODEV;
+
+ return crypto_register_aead(&sm4_gcm_alg);
+}
+
+static void __exit sm4_ce_gcm_exit(void)
+{
+ crypto_unregister_aead(&sm4_gcm_alg);
+}
+
+static const struct cpu_feature __maybe_unused sm4_ce_gcm_cpu_feature[] = {
+ { cpu_feature(PMULL) },
+ {}
+};
+MODULE_DEVICE_TABLE(cpu, sm4_ce_gcm_cpu_feature);
+
+module_cpu_feature_match(SM4, sm4_ce_gcm_init);
+module_exit(sm4_ce_gcm_exit);
+
+MODULE_DESCRIPTION("Synchronous SM4 in GCM mode using ARMv8 Crypto Extensions");
+MODULE_ALIAS_CRYPTO("gcm(sm4)");
+MODULE_AUTHOR("Tianjia Zhang <tianjia.zhang@linux.alibaba.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c
index 496d55c0d01a..0a2d32ed3bde 100644
--- a/arch/arm64/crypto/sm4-ce-glue.c
+++ b/arch/arm64/crypto/sm4-ce-glue.c
@@ -14,8 +14,12 @@
#include <linux/cpufeature.h>
#include <asm/neon.h>
#include <asm/simd.h>
+#include <crypto/b128ops.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/xts.h>
#include <crypto/sm4.h>
#define BYTES2BLKS(nbytes) ((nbytes) >> 4)
@@ -26,15 +30,48 @@ asmlinkage void sm4_ce_crypt_block(const u32 *rkey, u8 *dst, const u8 *src);
asmlinkage void sm4_ce_crypt(const u32 *rkey, u8 *dst, const u8 *src,
unsigned int nblks);
asmlinkage void sm4_ce_cbc_enc(const u32 *rkey, u8 *dst, const u8 *src,
- u8 *iv, unsigned int nblks);
+ u8 *iv, unsigned int nblocks);
asmlinkage void sm4_ce_cbc_dec(const u32 *rkey, u8 *dst, const u8 *src,
- u8 *iv, unsigned int nblks);
+ u8 *iv, unsigned int nblocks);
+asmlinkage void sm4_ce_cbc_cts_enc(const u32 *rkey, u8 *dst, const u8 *src,
+ u8 *iv, unsigned int nbytes);
+asmlinkage void sm4_ce_cbc_cts_dec(const u32 *rkey, u8 *dst, const u8 *src,
+ u8 *iv, unsigned int nbytes);
asmlinkage void sm4_ce_cfb_enc(const u32 *rkey, u8 *dst, const u8 *src,
u8 *iv, unsigned int nblks);
asmlinkage void sm4_ce_cfb_dec(const u32 *rkey, u8 *dst, const u8 *src,
u8 *iv, unsigned int nblks);
asmlinkage void sm4_ce_ctr_enc(const u32 *rkey, u8 *dst, const u8 *src,
u8 *iv, unsigned int nblks);
+asmlinkage void sm4_ce_xts_enc(const u32 *rkey1, u8 *dst, const u8 *src,
+ u8 *tweak, unsigned int nbytes,
+ const u32 *rkey2_enc);
+asmlinkage void sm4_ce_xts_dec(const u32 *rkey1, u8 *dst, const u8 *src,
+ u8 *tweak, unsigned int nbytes,
+ const u32 *rkey2_enc);
+asmlinkage void sm4_ce_mac_update(const u32 *rkey_enc, u8 *digest,
+ const u8 *src, unsigned int nblocks,
+ bool enc_before, bool enc_after);
+
+EXPORT_SYMBOL(sm4_ce_expand_key);
+EXPORT_SYMBOL(sm4_ce_crypt_block);
+EXPORT_SYMBOL(sm4_ce_cbc_enc);
+EXPORT_SYMBOL(sm4_ce_cfb_enc);
+
+struct sm4_xts_ctx {
+ struct sm4_ctx key1;
+ struct sm4_ctx key2;
+};
+
+struct sm4_mac_tfm_ctx {
+ struct sm4_ctx key;
+ u8 __aligned(8) consts[];
+};
+
+struct sm4_mac_desc_ctx {
+ unsigned int len;
+ u8 digest[SM4_BLOCK_SIZE];
+};
static int sm4_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
@@ -44,8 +81,33 @@ static int sm4_setkey(struct crypto_skcipher *tfm, const u8 *key,
if (key_len != SM4_KEY_SIZE)
return -EINVAL;
+ kernel_neon_begin();
sm4_ce_expand_key(key, ctx->rkey_enc, ctx->rkey_dec,
crypto_sm4_fk, crypto_sm4_ck);
+ kernel_neon_end();
+ return 0;
+}
+
+static int sm4_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct sm4_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
+
+ if (key_len != SM4_KEY_SIZE * 2)
+ return -EINVAL;
+
+ ret = xts_verify_key(tfm, key, key_len);
+ if (ret)
+ return ret;
+
+ kernel_neon_begin();
+ sm4_ce_expand_key(key, ctx->key1.rkey_enc,
+ ctx->key1.rkey_dec, crypto_sm4_fk, crypto_sm4_ck);
+ sm4_ce_expand_key(&key[SM4_KEY_SIZE], ctx->key2.rkey_enc,
+ ctx->key2.rkey_dec, crypto_sm4_fk, crypto_sm4_ck);
+ kernel_neon_end();
+
return 0;
}
@@ -94,66 +156,128 @@ static int sm4_ecb_decrypt(struct skcipher_request *req)
return sm4_ecb_do_crypt(req, ctx->rkey_dec);
}
-static int sm4_cbc_encrypt(struct skcipher_request *req)
+static int sm4_cbc_crypt(struct skcipher_request *req,
+ struct sm4_ctx *ctx, bool encrypt)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
+ if (err)
+ return err;
while ((nbytes = walk.nbytes) > 0) {
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
- unsigned int nblks;
+ unsigned int nblocks;
- kernel_neon_begin();
+ nblocks = nbytes / SM4_BLOCK_SIZE;
+ if (nblocks) {
+ kernel_neon_begin();
- nblks = BYTES2BLKS(nbytes);
- if (nblks) {
- sm4_ce_cbc_enc(ctx->rkey_enc, dst, src, walk.iv, nblks);
- nbytes -= nblks * SM4_BLOCK_SIZE;
- }
+ if (encrypt)
+ sm4_ce_cbc_enc(ctx->rkey_enc, dst, src,
+ walk.iv, nblocks);
+ else
+ sm4_ce_cbc_dec(ctx->rkey_dec, dst, src,
+ walk.iv, nblocks);
- kernel_neon_end();
+ kernel_neon_end();
+ }
- err = skcipher_walk_done(&walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes % SM4_BLOCK_SIZE);
}
return err;
}
+static int sm4_cbc_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ return sm4_cbc_crypt(req, ctx, true);
+}
+
static int sm4_cbc_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ return sm4_cbc_crypt(req, ctx, false);
+}
+
+static int sm4_cbc_cts_crypt(struct skcipher_request *req, bool encrypt)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct scatterlist *src = req->src;
+ struct scatterlist *dst = req->dst;
+ struct scatterlist sg_src[2], sg_dst[2];
+ struct skcipher_request subreq;
struct skcipher_walk walk;
- unsigned int nbytes;
+ int cbc_blocks;
int err;
- err = skcipher_walk_virt(&walk, req, false);
+ if (req->cryptlen < SM4_BLOCK_SIZE)
+ return -EINVAL;
- while ((nbytes = walk.nbytes) > 0) {
- const u8 *src = walk.src.virt.addr;
- u8 *dst = walk.dst.virt.addr;
- unsigned int nblks;
+ if (req->cryptlen == SM4_BLOCK_SIZE)
+ return sm4_cbc_crypt(req, ctx, encrypt);
- kernel_neon_begin();
+ skcipher_request_set_tfm(&subreq, tfm);
+ skcipher_request_set_callback(&subreq, skcipher_request_flags(req),
+ NULL, NULL);
- nblks = BYTES2BLKS(nbytes);
- if (nblks) {
- sm4_ce_cbc_dec(ctx->rkey_dec, dst, src, walk.iv, nblks);
- nbytes -= nblks * SM4_BLOCK_SIZE;
- }
+ /* handle the CBC cryption part */
+ cbc_blocks = DIV_ROUND_UP(req->cryptlen, SM4_BLOCK_SIZE) - 2;
+ if (cbc_blocks) {
+ skcipher_request_set_crypt(&subreq, src, dst,
+ cbc_blocks * SM4_BLOCK_SIZE,
+ req->iv);
- kernel_neon_end();
+ err = sm4_cbc_crypt(&subreq, ctx, encrypt);
+ if (err)
+ return err;
- err = skcipher_walk_done(&walk, nbytes);
+ dst = src = scatterwalk_ffwd(sg_src, src, subreq.cryptlen);
+ if (req->dst != req->src)
+ dst = scatterwalk_ffwd(sg_dst, req->dst,
+ subreq.cryptlen);
}
- return err;
+ /* handle ciphertext stealing */
+ skcipher_request_set_crypt(&subreq, src, dst,
+ req->cryptlen - cbc_blocks * SM4_BLOCK_SIZE,
+ req->iv);
+
+ err = skcipher_walk_virt(&walk, &subreq, false);
+ if (err)
+ return err;
+
+ kernel_neon_begin();
+
+ if (encrypt)
+ sm4_ce_cbc_cts_enc(ctx->rkey_enc, walk.dst.virt.addr,
+ walk.src.virt.addr, walk.iv, walk.nbytes);
+ else
+ sm4_ce_cbc_cts_dec(ctx->rkey_dec, walk.dst.virt.addr,
+ walk.src.virt.addr, walk.iv, walk.nbytes);
+
+ kernel_neon_end();
+
+ return skcipher_walk_done(&walk, 0);
+}
+
+static int sm4_cbc_cts_encrypt(struct skcipher_request *req)
+{
+ return sm4_cbc_cts_crypt(req, true);
+}
+
+static int sm4_cbc_cts_decrypt(struct skcipher_request *req)
+{
+ return sm4_cbc_cts_crypt(req, false);
}
static int sm4_cfb_encrypt(struct skcipher_request *req)
@@ -283,6 +407,111 @@ static int sm4_ctr_crypt(struct skcipher_request *req)
return err;
}
+static int sm4_xts_crypt(struct skcipher_request *req, bool encrypt)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct sm4_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int tail = req->cryptlen % SM4_BLOCK_SIZE;
+ const u32 *rkey2_enc = ctx->key2.rkey_enc;
+ struct scatterlist sg_src[2], sg_dst[2];
+ struct skcipher_request subreq;
+ struct scatterlist *src, *dst;
+ struct skcipher_walk walk;
+ unsigned int nbytes;
+ int err;
+
+ if (req->cryptlen < SM4_BLOCK_SIZE)
+ return -EINVAL;
+
+ err = skcipher_walk_virt(&walk, req, false);
+ if (err)
+ return err;
+
+ if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
+ int nblocks = DIV_ROUND_UP(req->cryptlen, SM4_BLOCK_SIZE) - 2;
+
+ skcipher_walk_abort(&walk);
+
+ skcipher_request_set_tfm(&subreq, tfm);
+ skcipher_request_set_callback(&subreq,
+ skcipher_request_flags(req),
+ NULL, NULL);
+ skcipher_request_set_crypt(&subreq, req->src, req->dst,
+ nblocks * SM4_BLOCK_SIZE, req->iv);
+
+ err = skcipher_walk_virt(&walk, &subreq, false);
+ if (err)
+ return err;
+ } else {
+ tail = 0;
+ }
+
+ while ((nbytes = walk.nbytes) >= SM4_BLOCK_SIZE) {
+ if (nbytes < walk.total)
+ nbytes &= ~(SM4_BLOCK_SIZE - 1);
+
+ kernel_neon_begin();
+
+ if (encrypt)
+ sm4_ce_xts_enc(ctx->key1.rkey_enc, walk.dst.virt.addr,
+ walk.src.virt.addr, walk.iv, nbytes,
+ rkey2_enc);
+ else
+ sm4_ce_xts_dec(ctx->key1.rkey_dec, walk.dst.virt.addr,
+ walk.src.virt.addr, walk.iv, nbytes,
+ rkey2_enc);
+
+ kernel_neon_end();
+
+ rkey2_enc = NULL;
+
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ if (err)
+ return err;
+ }
+
+ if (likely(tail == 0))
+ return 0;
+
+ /* handle ciphertext stealing */
+
+ dst = src = scatterwalk_ffwd(sg_src, req->src, subreq.cryptlen);
+ if (req->dst != req->src)
+ dst = scatterwalk_ffwd(sg_dst, req->dst, subreq.cryptlen);
+
+ skcipher_request_set_crypt(&subreq, src, dst, SM4_BLOCK_SIZE + tail,
+ req->iv);
+
+ err = skcipher_walk_virt(&walk, &subreq, false);
+ if (err)
+ return err;
+
+ kernel_neon_begin();
+
+ if (encrypt)
+ sm4_ce_xts_enc(ctx->key1.rkey_enc, walk.dst.virt.addr,
+ walk.src.virt.addr, walk.iv, walk.nbytes,
+ rkey2_enc);
+ else
+ sm4_ce_xts_dec(ctx->key1.rkey_dec, walk.dst.virt.addr,
+ walk.src.virt.addr, walk.iv, walk.nbytes,
+ rkey2_enc);
+
+ kernel_neon_end();
+
+ return skcipher_walk_done(&walk, 0);
+}
+
+static int sm4_xts_encrypt(struct skcipher_request *req)
+{
+ return sm4_xts_crypt(req, true);
+}
+
+static int sm4_xts_decrypt(struct skcipher_request *req)
+{
+ return sm4_xts_crypt(req, false);
+}
+
static struct skcipher_alg sm4_algs[] = {
{
.base = {
@@ -345,28 +574,312 @@ static struct skcipher_alg sm4_algs[] = {
.setkey = sm4_setkey,
.encrypt = sm4_ctr_crypt,
.decrypt = sm4_ctr_crypt,
+ }, {
+ .base = {
+ .cra_name = "cts(cbc(sm4))",
+ .cra_driver_name = "cts-cbc-sm4-ce",
+ .cra_priority = 400,
+ .cra_blocksize = SM4_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sm4_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ .walksize = SM4_BLOCK_SIZE * 2,
+ .setkey = sm4_setkey,
+ .encrypt = sm4_cbc_cts_encrypt,
+ .decrypt = sm4_cbc_cts_decrypt,
+ }, {
+ .base = {
+ .cra_name = "xts(sm4)",
+ .cra_driver_name = "xts-sm4-ce",
+ .cra_priority = 400,
+ .cra_blocksize = SM4_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sm4_xts_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = SM4_KEY_SIZE * 2,
+ .max_keysize = SM4_KEY_SIZE * 2,
+ .ivsize = SM4_BLOCK_SIZE,
+ .walksize = SM4_BLOCK_SIZE * 2,
+ .setkey = sm4_xts_setkey,
+ .encrypt = sm4_xts_encrypt,
+ .decrypt = sm4_xts_decrypt,
+ }
+};
+
+static int sm4_cbcmac_setkey(struct crypto_shash *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct sm4_mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
+
+ if (key_len != SM4_KEY_SIZE)
+ return -EINVAL;
+
+ kernel_neon_begin();
+ sm4_ce_expand_key(key, ctx->key.rkey_enc, ctx->key.rkey_dec,
+ crypto_sm4_fk, crypto_sm4_ck);
+ kernel_neon_end();
+
+ return 0;
+}
+
+static int sm4_cmac_setkey(struct crypto_shash *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct sm4_mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
+ be128 *consts = (be128 *)ctx->consts;
+ u64 a, b;
+
+ if (key_len != SM4_KEY_SIZE)
+ return -EINVAL;
+
+ memset(consts, 0, SM4_BLOCK_SIZE);
+
+ kernel_neon_begin();
+
+ sm4_ce_expand_key(key, ctx->key.rkey_enc, ctx->key.rkey_dec,
+ crypto_sm4_fk, crypto_sm4_ck);
+
+ /* encrypt the zero block */
+ sm4_ce_crypt_block(ctx->key.rkey_enc, (u8 *)consts, (const u8 *)consts);
+
+ kernel_neon_end();
+
+ /* gf(2^128) multiply zero-ciphertext with u and u^2 */
+ a = be64_to_cpu(consts[0].a);
+ b = be64_to_cpu(consts[0].b);
+ consts[0].a = cpu_to_be64((a << 1) | (b >> 63));
+ consts[0].b = cpu_to_be64((b << 1) ^ ((a >> 63) ? 0x87 : 0));
+
+ a = be64_to_cpu(consts[0].a);
+ b = be64_to_cpu(consts[0].b);
+ consts[1].a = cpu_to_be64((a << 1) | (b >> 63));
+ consts[1].b = cpu_to_be64((b << 1) ^ ((a >> 63) ? 0x87 : 0));
+
+ return 0;
+}
+
+static int sm4_xcbc_setkey(struct crypto_shash *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct sm4_mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
+ u8 __aligned(8) key2[SM4_BLOCK_SIZE];
+ static u8 const ks[3][SM4_BLOCK_SIZE] = {
+ { [0 ... SM4_BLOCK_SIZE - 1] = 0x1},
+ { [0 ... SM4_BLOCK_SIZE - 1] = 0x2},
+ { [0 ... SM4_BLOCK_SIZE - 1] = 0x3},
+ };
+
+ if (key_len != SM4_KEY_SIZE)
+ return -EINVAL;
+
+ kernel_neon_begin();
+
+ sm4_ce_expand_key(key, ctx->key.rkey_enc, ctx->key.rkey_dec,
+ crypto_sm4_fk, crypto_sm4_ck);
+
+ sm4_ce_crypt_block(ctx->key.rkey_enc, key2, ks[0]);
+ sm4_ce_crypt(ctx->key.rkey_enc, ctx->consts, ks[1], 2);
+
+ sm4_ce_expand_key(key2, ctx->key.rkey_enc, ctx->key.rkey_dec,
+ crypto_sm4_fk, crypto_sm4_ck);
+
+ kernel_neon_end();
+
+ return 0;
+}
+
+static int sm4_mac_init(struct shash_desc *desc)
+{
+ struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ memset(ctx->digest, 0, SM4_BLOCK_SIZE);
+ ctx->len = 0;
+
+ return 0;
+}
+
+static int sm4_mac_update(struct shash_desc *desc, const u8 *p,
+ unsigned int len)
+{
+ struct sm4_mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc);
+ unsigned int l, nblocks;
+
+ if (len == 0)
+ return 0;
+
+ if (ctx->len || ctx->len + len < SM4_BLOCK_SIZE) {
+ l = min(len, SM4_BLOCK_SIZE - ctx->len);
+
+ crypto_xor(ctx->digest + ctx->len, p, l);
+ ctx->len += l;
+ len -= l;
+ p += l;
+ }
+
+ if (len && (ctx->len % SM4_BLOCK_SIZE) == 0) {
+ kernel_neon_begin();
+
+ if (len < SM4_BLOCK_SIZE && ctx->len == SM4_BLOCK_SIZE) {
+ sm4_ce_crypt_block(tctx->key.rkey_enc,
+ ctx->digest, ctx->digest);
+ ctx->len = 0;
+ } else {
+ nblocks = len / SM4_BLOCK_SIZE;
+ len %= SM4_BLOCK_SIZE;
+
+ sm4_ce_mac_update(tctx->key.rkey_enc, ctx->digest, p,
+ nblocks, (ctx->len == SM4_BLOCK_SIZE),
+ (len != 0));
+
+ p += nblocks * SM4_BLOCK_SIZE;
+
+ if (len == 0)
+ ctx->len = SM4_BLOCK_SIZE;
+ }
+
+ kernel_neon_end();
+
+ if (len) {
+ crypto_xor(ctx->digest, p, len);
+ ctx->len = len;
+ }
+ }
+
+ return 0;
+}
+
+static int sm4_cmac_final(struct shash_desc *desc, u8 *out)
+{
+ struct sm4_mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc);
+ const u8 *consts = tctx->consts;
+
+ if (ctx->len != SM4_BLOCK_SIZE) {
+ ctx->digest[ctx->len] ^= 0x80;
+ consts += SM4_BLOCK_SIZE;
+ }
+
+ kernel_neon_begin();
+ sm4_ce_mac_update(tctx->key.rkey_enc, ctx->digest, consts, 1,
+ false, true);
+ kernel_neon_end();
+
+ memcpy(out, ctx->digest, SM4_BLOCK_SIZE);
+
+ return 0;
+}
+
+static int sm4_cbcmac_final(struct shash_desc *desc, u8 *out)
+{
+ struct sm4_mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct sm4_mac_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ if (ctx->len) {
+ kernel_neon_begin();
+ sm4_ce_crypt_block(tctx->key.rkey_enc, ctx->digest,
+ ctx->digest);
+ kernel_neon_end();
+ }
+
+ memcpy(out, ctx->digest, SM4_BLOCK_SIZE);
+
+ return 0;
+}
+
+static struct shash_alg sm4_mac_algs[] = {
+ {
+ .base = {
+ .cra_name = "cmac(sm4)",
+ .cra_driver_name = "cmac-sm4-ce",
+ .cra_priority = 400,
+ .cra_blocksize = SM4_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sm4_mac_tfm_ctx)
+ + SM4_BLOCK_SIZE * 2,
+ .cra_module = THIS_MODULE,
+ },
+ .digestsize = SM4_BLOCK_SIZE,
+ .init = sm4_mac_init,
+ .update = sm4_mac_update,
+ .final = sm4_cmac_final,
+ .setkey = sm4_cmac_setkey,
+ .descsize = sizeof(struct sm4_mac_desc_ctx),
+ }, {
+ .base = {
+ .cra_name = "xcbc(sm4)",
+ .cra_driver_name = "xcbc-sm4-ce",
+ .cra_priority = 400,
+ .cra_blocksize = SM4_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sm4_mac_tfm_ctx)
+ + SM4_BLOCK_SIZE * 2,
+ .cra_module = THIS_MODULE,
+ },
+ .digestsize = SM4_BLOCK_SIZE,
+ .init = sm4_mac_init,
+ .update = sm4_mac_update,
+ .final = sm4_cmac_final,
+ .setkey = sm4_xcbc_setkey,
+ .descsize = sizeof(struct sm4_mac_desc_ctx),
+ }, {
+ .base = {
+ .cra_name = "cbcmac(sm4)",
+ .cra_driver_name = "cbcmac-sm4-ce",
+ .cra_priority = 400,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct sm4_mac_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .digestsize = SM4_BLOCK_SIZE,
+ .init = sm4_mac_init,
+ .update = sm4_mac_update,
+ .final = sm4_cbcmac_final,
+ .setkey = sm4_cbcmac_setkey,
+ .descsize = sizeof(struct sm4_mac_desc_ctx),
}
};
static int __init sm4_init(void)
{
- return crypto_register_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs));
+ int err;
+
+ err = crypto_register_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs));
+ if (err)
+ return err;
+
+ err = crypto_register_shashes(sm4_mac_algs, ARRAY_SIZE(sm4_mac_algs));
+ if (err)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ crypto_unregister_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs));
+ return err;
}
static void __exit sm4_exit(void)
{
+ crypto_unregister_shashes(sm4_mac_algs, ARRAY_SIZE(sm4_mac_algs));
crypto_unregister_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs));
}
module_cpu_feature_match(SM4, sm4_init);
module_exit(sm4_exit);
-MODULE_DESCRIPTION("SM4 ECB/CBC/CFB/CTR using ARMv8 Crypto Extensions");
+MODULE_DESCRIPTION("SM4 ECB/CBC/CFB/CTR/XTS using ARMv8 Crypto Extensions");
MODULE_ALIAS_CRYPTO("sm4-ce");
MODULE_ALIAS_CRYPTO("sm4");
MODULE_ALIAS_CRYPTO("ecb(sm4)");
MODULE_ALIAS_CRYPTO("cbc(sm4)");
MODULE_ALIAS_CRYPTO("cfb(sm4)");
MODULE_ALIAS_CRYPTO("ctr(sm4)");
+MODULE_ALIAS_CRYPTO("cts(cbc(sm4))");
+MODULE_ALIAS_CRYPTO("xts(sm4)");
+MODULE_ALIAS_CRYPTO("cmac(sm4)");
+MODULE_ALIAS_CRYPTO("xcbc(sm4)");
+MODULE_ALIAS_CRYPTO("cbcmac(sm4)");
MODULE_AUTHOR("Tianjia Zhang <tianjia.zhang@linux.alibaba.com>");
MODULE_LICENSE("GPL v2");
diff --git a/arch/arm64/crypto/sm4-ce.h b/arch/arm64/crypto/sm4-ce.h
new file mode 100644
index 000000000000..109c21b37590
--- /dev/null
+++ b/arch/arm64/crypto/sm4-ce.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SM4 common functions for Crypto Extensions
+ * Copyright (C) 2022 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+void sm4_ce_expand_key(const u8 *key, u32 *rkey_enc, u32 *rkey_dec,
+ const u32 *fk, const u32 *ck);
+
+void sm4_ce_crypt_block(const u32 *rkey, u8 *dst, const u8 *src);
+
+void sm4_ce_cbc_enc(const u32 *rkey_enc, u8 *dst, const u8 *src,
+ u8 *iv, unsigned int nblocks);
+
+void sm4_ce_cfb_enc(const u32 *rkey_enc, u8 *dst, const u8 *src,
+ u8 *iv, unsigned int nblocks);
diff --git a/arch/arm64/crypto/sm4-neon-core.S b/arch/arm64/crypto/sm4-neon-core.S
index 3d5256b354d2..f295b4b7d70a 100644
--- a/arch/arm64/crypto/sm4-neon-core.S
+++ b/arch/arm64/crypto/sm4-neon-core.S
@@ -18,6 +18,11 @@
#define RTMP2 v10
#define RTMP3 v11
+#define RTMP4 v12
+#define RTMP5 v13
+#define RTMP6 v14
+#define RTMP7 v15
+
#define RX0 v12
#define RX1 v13
#define RKEY v14
@@ -25,7 +30,7 @@
/* Helper macros. */
-#define PREPARE \
+#define SM4_PREPARE() \
adr_l x5, crypto_sm4_sbox; \
ld1 {v16.16b-v19.16b}, [x5], #64; \
ld1 {v20.16b-v23.16b}, [x5], #64; \
@@ -42,7 +47,25 @@
zip1 s2.2d, RTMP2.2d, RTMP3.2d; \
zip2 s3.2d, RTMP2.2d, RTMP3.2d;
-#define rotate_clockwise_90(s0, s1, s2, s3) \
+#define transpose_4x4_2x(s0, s1, s2, s3, s4, s5, s6, s7) \
+ zip1 RTMP0.4s, s0.4s, s1.4s; \
+ zip1 RTMP1.4s, s2.4s, s3.4s; \
+ zip2 RTMP2.4s, s0.4s, s1.4s; \
+ zip2 RTMP3.4s, s2.4s, s3.4s; \
+ zip1 RTMP4.4s, s4.4s, s5.4s; \
+ zip1 RTMP5.4s, s6.4s, s7.4s; \
+ zip2 RTMP6.4s, s4.4s, s5.4s; \
+ zip2 RTMP7.4s, s6.4s, s7.4s; \
+ zip1 s0.2d, RTMP0.2d, RTMP1.2d; \
+ zip2 s1.2d, RTMP0.2d, RTMP1.2d; \
+ zip1 s2.2d, RTMP2.2d, RTMP3.2d; \
+ zip2 s3.2d, RTMP2.2d, RTMP3.2d; \
+ zip1 s4.2d, RTMP4.2d, RTMP5.2d; \
+ zip2 s5.2d, RTMP4.2d, RTMP5.2d; \
+ zip1 s6.2d, RTMP6.2d, RTMP7.2d; \
+ zip2 s7.2d, RTMP6.2d, RTMP7.2d;
+
+#define rotate_clockwise_4x4(s0, s1, s2, s3) \
zip1 RTMP0.4s, s1.4s, s0.4s; \
zip2 RTMP1.4s, s1.4s, s0.4s; \
zip1 RTMP2.4s, s3.4s, s2.4s; \
@@ -52,6 +75,24 @@
zip1 s2.2d, RTMP3.2d, RTMP1.2d; \
zip2 s3.2d, RTMP3.2d, RTMP1.2d;
+#define rotate_clockwise_4x4_2x(s0, s1, s2, s3, s4, s5, s6, s7) \
+ zip1 RTMP0.4s, s1.4s, s0.4s; \
+ zip1 RTMP2.4s, s3.4s, s2.4s; \
+ zip2 RTMP1.4s, s1.4s, s0.4s; \
+ zip2 RTMP3.4s, s3.4s, s2.4s; \
+ zip1 RTMP4.4s, s5.4s, s4.4s; \
+ zip1 RTMP6.4s, s7.4s, s6.4s; \
+ zip2 RTMP5.4s, s5.4s, s4.4s; \
+ zip2 RTMP7.4s, s7.4s, s6.4s; \
+ zip1 s0.2d, RTMP2.2d, RTMP0.2d; \
+ zip2 s1.2d, RTMP2.2d, RTMP0.2d; \
+ zip1 s2.2d, RTMP3.2d, RTMP1.2d; \
+ zip2 s3.2d, RTMP3.2d, RTMP1.2d; \
+ zip1 s4.2d, RTMP6.2d, RTMP4.2d; \
+ zip2 s5.2d, RTMP6.2d, RTMP4.2d; \
+ zip1 s6.2d, RTMP7.2d, RTMP5.2d; \
+ zip2 s7.2d, RTMP7.2d, RTMP5.2d;
+
#define ROUND4(round, s0, s1, s2, s3) \
dup RX0.4s, RKEY.s[round]; \
/* rk ^ s1 ^ s2 ^ s3 */ \
@@ -87,14 +128,7 @@
/* s0 ^= RTMP3 */ \
eor s0.16b, s0.16b, RTMP3.16b;
-#define SM4_CRYPT_BLK4(b0, b1, b2, b3) \
- rev32 b0.16b, b0.16b; \
- rev32 b1.16b, b1.16b; \
- rev32 b2.16b, b2.16b; \
- rev32 b3.16b, b3.16b; \
- \
- transpose_4x4(b0, b1, b2, b3); \
- \
+#define SM4_CRYPT_BLK4_BE(b0, b1, b2, b3) \
mov x6, 8; \
4: \
ld1 {RKEY.4s}, [x0], #16; \
@@ -107,15 +141,23 @@
\
bne 4b; \
\
- rotate_clockwise_90(b0, b1, b2, b3); \
rev32 b0.16b, b0.16b; \
rev32 b1.16b, b1.16b; \
rev32 b2.16b, b2.16b; \
rev32 b3.16b, b3.16b; \
\
+ rotate_clockwise_4x4(b0, b1, b2, b3); \
+ \
/* repoint to rkey */ \
sub x0, x0, #128;
+#define SM4_CRYPT_BLK4(b0, b1, b2, b3) \
+ rev32 b0.16b, b0.16b; \
+ rev32 b1.16b, b1.16b; \
+ rev32 b2.16b, b2.16b; \
+ rev32 b3.16b, b3.16b; \
+ SM4_CRYPT_BLK4_BE(b0, b1, b2, b3);
+
#define ROUND8(round, s0, s1, s2, s3, t0, t1, t2, t3) \
/* rk ^ s1 ^ s2 ^ s3 */ \
dup RX0.4s, RKEY.s[round]; \
@@ -175,7 +217,7 @@
eor s0.16b, s0.16b, RTMP0.16b; \
eor t0.16b, t0.16b, RTMP1.16b;
-#define SM4_CRYPT_BLK8(b0, b1, b2, b3, b4, b5, b6, b7) \
+#define SM4_CRYPT_BLK8_norotate(b0, b1, b2, b3, b4, b5, b6, b7) \
rev32 b0.16b, b0.16b; \
rev32 b1.16b, b1.16b; \
rev32 b2.16b, b2.16b; \
@@ -185,9 +227,6 @@
rev32 b6.16b, b6.16b; \
rev32 b7.16b, b7.16b; \
\
- transpose_4x4(b0, b1, b2, b3); \
- transpose_4x4(b4, b5, b6, b7); \
- \
mov x6, 8; \
8: \
ld1 {RKEY.4s}, [x0], #16; \
@@ -200,8 +239,6 @@
\
bne 8b; \
\
- rotate_clockwise_90(b0, b1, b2, b3); \
- rotate_clockwise_90(b4, b5, b6, b7); \
rev32 b0.16b, b0.16b; \
rev32 b1.16b, b1.16b; \
rev32 b2.16b, b2.16b; \
@@ -214,274 +251,429 @@
/* repoint to rkey */ \
sub x0, x0, #128;
+#define SM4_CRYPT_BLK8(b0, b1, b2, b3, b4, b5, b6, b7) \
+ SM4_CRYPT_BLK8_norotate(b0, b1, b2, b3, b4, b5, b6, b7); \
+ rotate_clockwise_4x4_2x(b0, b1, b2, b3, b4, b5, b6, b7); \
-.align 3
-SYM_FUNC_START_LOCAL(__sm4_neon_crypt_blk1_4)
- /* input:
- * x0: round key array, CTX
- * x1: dst
- * x2: src
- * w3: num blocks (1..4)
- */
- PREPARE;
-
- ld1 {v0.16b}, [x2], #16;
- mov v1.16b, v0.16b;
- mov v2.16b, v0.16b;
- mov v3.16b, v0.16b;
- cmp w3, #2;
- blt .Lblk4_load_input_done;
- ld1 {v1.16b}, [x2], #16;
- beq .Lblk4_load_input_done;
- ld1 {v2.16b}, [x2], #16;
- cmp w3, #3;
- beq .Lblk4_load_input_done;
- ld1 {v3.16b}, [x2];
-
-.Lblk4_load_input_done:
- SM4_CRYPT_BLK4(v0, v1, v2, v3);
-
- st1 {v0.16b}, [x1], #16;
- cmp w3, #2;
- blt .Lblk4_store_output_done;
- st1 {v1.16b}, [x1], #16;
- beq .Lblk4_store_output_done;
- st1 {v2.16b}, [x1], #16;
- cmp w3, #3;
- beq .Lblk4_store_output_done;
- st1 {v3.16b}, [x1];
-
-.Lblk4_store_output_done:
- ret;
-SYM_FUNC_END(__sm4_neon_crypt_blk1_4)
.align 3
-SYM_FUNC_START(sm4_neon_crypt_blk1_8)
+SYM_FUNC_START(sm4_neon_crypt)
/* input:
* x0: round key array, CTX
* x1: dst
* x2: src
- * w3: num blocks (1..8)
+ * w3: nblocks
*/
- cmp w3, #5;
- blt __sm4_neon_crypt_blk1_4;
-
- PREPARE;
-
- ld1 {v0.16b-v3.16b}, [x2], #64;
- ld1 {v4.16b}, [x2], #16;
- mov v5.16b, v4.16b;
- mov v6.16b, v4.16b;
- mov v7.16b, v4.16b;
- beq .Lblk8_load_input_done;
- ld1 {v5.16b}, [x2], #16;
- cmp w3, #7;
- blt .Lblk8_load_input_done;
- ld1 {v6.16b}, [x2], #16;
- beq .Lblk8_load_input_done;
- ld1 {v7.16b}, [x2];
-
-.Lblk8_load_input_done:
- SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7);
-
- cmp w3, #6;
- st1 {v0.16b-v3.16b}, [x1], #64;
- st1 {v4.16b}, [x1], #16;
- blt .Lblk8_store_output_done;
- st1 {v5.16b}, [x1], #16;
- beq .Lblk8_store_output_done;
- st1 {v6.16b}, [x1], #16;
- cmp w3, #7;
- beq .Lblk8_store_output_done;
- st1 {v7.16b}, [x1];
-
-.Lblk8_store_output_done:
- ret;
-SYM_FUNC_END(sm4_neon_crypt_blk1_8)
+ SM4_PREPARE()
-.align 3
-SYM_FUNC_START(sm4_neon_crypt_blk8)
- /* input:
- * x0: round key array, CTX
- * x1: dst
- * x2: src
- * w3: nblocks (multiples of 8)
- */
- PREPARE;
+.Lcrypt_loop_8x:
+ sub w3, w3, #8
+ tbnz w3, #31, .Lcrypt_4x
+
+ ld4 {v0.4s-v3.4s}, [x2], #64
+ ld4 {v4.4s-v7.4s}, [x2], #64
-.Lcrypt_loop_blk:
- subs w3, w3, #8;
- bmi .Lcrypt_end;
+ SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7)
- ld1 {v0.16b-v3.16b}, [x2], #64;
- ld1 {v4.16b-v7.16b}, [x2], #64;
+ st1 {v0.16b-v3.16b}, [x1], #64
+ st1 {v4.16b-v7.16b}, [x1], #64
- SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7);
+ cbz w3, .Lcrypt_end
+ b .Lcrypt_loop_8x
- st1 {v0.16b-v3.16b}, [x1], #64;
- st1 {v4.16b-v7.16b}, [x1], #64;
+.Lcrypt_4x:
+ add w3, w3, #8
+ cmp w3, #4
+ blt .Lcrypt_tail
- b .Lcrypt_loop_blk;
+ sub w3, w3, #4
+
+ ld4 {v0.4s-v3.4s}, [x2], #64
+
+ SM4_CRYPT_BLK4(v0, v1, v2, v3)
+
+ st1 {v0.16b-v3.16b}, [x1], #64
+
+ cbz w3, .Lcrypt_end
+
+.Lcrypt_tail:
+ cmp w3, #2
+ ld1 {v0.16b}, [x2], #16
+ blt .Lcrypt_tail_load_done
+ ld1 {v1.16b}, [x2], #16
+ beq .Lcrypt_tail_load_done
+ ld1 {v2.16b}, [x2], #16
+
+.Lcrypt_tail_load_done:
+ transpose_4x4(v0, v1, v2, v3)
+
+ SM4_CRYPT_BLK4(v0, v1, v2, v3)
+
+ cmp w3, #2
+ st1 {v0.16b}, [x1], #16
+ blt .Lcrypt_end
+ st1 {v1.16b}, [x1], #16
+ beq .Lcrypt_end
+ st1 {v2.16b}, [x1], #16
.Lcrypt_end:
- ret;
-SYM_FUNC_END(sm4_neon_crypt_blk8)
+ ret
+SYM_FUNC_END(sm4_neon_crypt)
.align 3
-SYM_FUNC_START(sm4_neon_cbc_dec_blk8)
+SYM_FUNC_START(sm4_neon_cbc_dec)
/* input:
* x0: round key array, CTX
* x1: dst
* x2: src
* x3: iv (big endian, 128 bit)
- * w4: nblocks (multiples of 8)
+ * w4: nblocks
*/
- PREPARE;
+ SM4_PREPARE()
+
+ ld1 {RIV.16b}, [x3]
+
+.Lcbc_dec_loop_8x:
+ sub w4, w4, #8
+ tbnz w4, #31, .Lcbc_dec_4x
+
+ ld4 {v0.4s-v3.4s}, [x2], #64
+ ld4 {v4.4s-v7.4s}, [x2]
+
+ SM4_CRYPT_BLK8_norotate(v0, v1, v2, v3, v4, v5, v6, v7)
+
+ /* Avoid overwriting the RIV register */
+ rotate_clockwise_4x4(v0, v1, v2, v3)
+ rotate_clockwise_4x4(v4, v5, v6, v7)
+
+ sub x2, x2, #64
+
+ eor v0.16b, v0.16b, RIV.16b
- ld1 {RIV.16b}, [x3];
+ ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64
+ ld1 {RTMP4.16b-RTMP7.16b}, [x2], #64
-.Lcbc_loop_blk:
- subs w4, w4, #8;
- bmi .Lcbc_end;
+ eor v1.16b, v1.16b, RTMP0.16b
+ eor v2.16b, v2.16b, RTMP1.16b
+ eor v3.16b, v3.16b, RTMP2.16b
+ eor v4.16b, v4.16b, RTMP3.16b
+ eor v5.16b, v5.16b, RTMP4.16b
+ eor v6.16b, v6.16b, RTMP5.16b
+ eor v7.16b, v7.16b, RTMP6.16b
- ld1 {v0.16b-v3.16b}, [x2], #64;
- ld1 {v4.16b-v7.16b}, [x2];
+ mov RIV.16b, RTMP7.16b
- SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7);
+ st1 {v0.16b-v3.16b}, [x1], #64
+ st1 {v4.16b-v7.16b}, [x1], #64
- sub x2, x2, #64;
- eor v0.16b, v0.16b, RIV.16b;
- ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
- eor v1.16b, v1.16b, RTMP0.16b;
- eor v2.16b, v2.16b, RTMP1.16b;
- eor v3.16b, v3.16b, RTMP2.16b;
- st1 {v0.16b-v3.16b}, [x1], #64;
+ cbz w4, .Lcbc_dec_end
+ b .Lcbc_dec_loop_8x
- eor v4.16b, v4.16b, RTMP3.16b;
- ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
- eor v5.16b, v5.16b, RTMP0.16b;
- eor v6.16b, v6.16b, RTMP1.16b;
- eor v7.16b, v7.16b, RTMP2.16b;
+.Lcbc_dec_4x:
+ add w4, w4, #8
+ cmp w4, #4
+ blt .Lcbc_dec_tail
- mov RIV.16b, RTMP3.16b;
- st1 {v4.16b-v7.16b}, [x1], #64;
+ sub w4, w4, #4
- b .Lcbc_loop_blk;
+ ld1 {v0.16b-v3.16b}, [x2], #64
-.Lcbc_end:
+ rev32 v4.16b, v0.16b
+ rev32 v5.16b, v1.16b
+ rev32 v6.16b, v2.16b
+ rev32 v7.16b, v3.16b
+
+ transpose_4x4(v4, v5, v6, v7)
+
+ SM4_CRYPT_BLK4_BE(v4, v5, v6, v7)
+
+ eor v4.16b, v4.16b, RIV.16b
+ eor v5.16b, v5.16b, v0.16b
+ eor v6.16b, v6.16b, v1.16b
+ eor v7.16b, v7.16b, v2.16b
+
+ mov RIV.16b, v3.16b
+
+ st1 {v4.16b-v7.16b}, [x1], #64
+
+ cbz w4, .Lcbc_dec_end
+
+.Lcbc_dec_tail:
+ cmp w4, #2
+ ld1 {v0.16b}, [x2], #16
+ blt .Lcbc_dec_tail_load_done
+ ld1 {v1.16b}, [x2], #16
+ beq .Lcbc_dec_tail_load_done
+ ld1 {v2.16b}, [x2], #16
+
+.Lcbc_dec_tail_load_done:
+ rev32 v4.16b, v0.16b
+ rev32 v5.16b, v1.16b
+ rev32 v6.16b, v2.16b
+
+ transpose_4x4(v4, v5, v6, v7)
+
+ SM4_CRYPT_BLK4_BE(v4, v5, v6, v7)
+
+ cmp w4, #2
+ eor v4.16b, v4.16b, RIV.16b
+ mov RIV.16b, v0.16b
+ st1 {v4.16b}, [x1], #16
+ blt .Lcbc_dec_end
+
+ eor v5.16b, v5.16b, v0.16b
+ mov RIV.16b, v1.16b
+ st1 {v5.16b}, [x1], #16
+ beq .Lcbc_dec_end
+
+ eor v6.16b, v6.16b, v1.16b
+ mov RIV.16b, v2.16b
+ st1 {v6.16b}, [x1], #16
+
+.Lcbc_dec_end:
/* store new IV */
- st1 {RIV.16b}, [x3];
+ st1 {RIV.16b}, [x3]
- ret;
-SYM_FUNC_END(sm4_neon_cbc_dec_blk8)
+ ret
+SYM_FUNC_END(sm4_neon_cbc_dec)
.align 3
-SYM_FUNC_START(sm4_neon_cfb_dec_blk8)
+SYM_FUNC_START(sm4_neon_cfb_dec)
/* input:
* x0: round key array, CTX
* x1: dst
* x2: src
* x3: iv (big endian, 128 bit)
- * w4: nblocks (multiples of 8)
+ * w4: nblocks
*/
- PREPARE;
+ SM4_PREPARE()
+
+ ld1 {v0.16b}, [x3]
+
+.Lcfb_dec_loop_8x:
+ sub w4, w4, #8
+ tbnz w4, #31, .Lcfb_dec_4x
+
+ ld1 {v1.16b-v3.16b}, [x2], #48
+ ld4 {v4.4s-v7.4s}, [x2]
+
+ transpose_4x4(v0, v1, v2, v3)
+
+ SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7)
+
+ sub x2, x2, #48
+ ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64
+ ld1 {RTMP4.16b-RTMP7.16b}, [x2], #64
+
+ eor v0.16b, v0.16b, RTMP0.16b
+ eor v1.16b, v1.16b, RTMP1.16b
+ eor v2.16b, v2.16b, RTMP2.16b
+ eor v3.16b, v3.16b, RTMP3.16b
+ eor v4.16b, v4.16b, RTMP4.16b
+ eor v5.16b, v5.16b, RTMP5.16b
+ eor v6.16b, v6.16b, RTMP6.16b
+ eor v7.16b, v7.16b, RTMP7.16b
+
+ st1 {v0.16b-v3.16b}, [x1], #64
+ st1 {v4.16b-v7.16b}, [x1], #64
+
+ mov v0.16b, RTMP7.16b
+
+ cbz w4, .Lcfb_dec_end
+ b .Lcfb_dec_loop_8x
+
+.Lcfb_dec_4x:
+ add w4, w4, #8
+ cmp w4, #4
+ blt .Lcfb_dec_tail
+
+ sub w4, w4, #4
+
+ ld1 {v4.16b-v7.16b}, [x2], #64
+
+ rev32 v0.16b, v0.16b /* v0 is IV register */
+ rev32 v1.16b, v4.16b
+ rev32 v2.16b, v5.16b
+ rev32 v3.16b, v6.16b
+
+ transpose_4x4(v0, v1, v2, v3)
+
+ SM4_CRYPT_BLK4_BE(v0, v1, v2, v3)
- ld1 {v0.16b}, [x3];
+ eor v0.16b, v0.16b, v4.16b
+ eor v1.16b, v1.16b, v5.16b
+ eor v2.16b, v2.16b, v6.16b
+ eor v3.16b, v3.16b, v7.16b
-.Lcfb_loop_blk:
- subs w4, w4, #8;
- bmi .Lcfb_end;
+ st1 {v0.16b-v3.16b}, [x1], #64
- ld1 {v1.16b, v2.16b, v3.16b}, [x2], #48;
- ld1 {v4.16b-v7.16b}, [x2];
+ mov v0.16b, v7.16b
- SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7);
+ cbz w4, .Lcfb_dec_end
- sub x2, x2, #48;
- ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
- eor v0.16b, v0.16b, RTMP0.16b;
- eor v1.16b, v1.16b, RTMP1.16b;
- eor v2.16b, v2.16b, RTMP2.16b;
- eor v3.16b, v3.16b, RTMP3.16b;
- st1 {v0.16b-v3.16b}, [x1], #64;
+.Lcfb_dec_tail:
+ cmp w4, #2
+ ld1 {v4.16b}, [x2], #16
+ blt .Lcfb_dec_tail_load_done
+ ld1 {v5.16b}, [x2], #16
+ beq .Lcfb_dec_tail_load_done
+ ld1 {v6.16b}, [x2], #16
- ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
- eor v4.16b, v4.16b, RTMP0.16b;
- eor v5.16b, v5.16b, RTMP1.16b;
- eor v6.16b, v6.16b, RTMP2.16b;
- eor v7.16b, v7.16b, RTMP3.16b;
- st1 {v4.16b-v7.16b}, [x1], #64;
+.Lcfb_dec_tail_load_done:
+ rev32 v0.16b, v0.16b /* v0 is IV register */
+ rev32 v1.16b, v4.16b
+ rev32 v2.16b, v5.16b
- mov v0.16b, RTMP3.16b;
+ transpose_4x4(v0, v1, v2, v3)
- b .Lcfb_loop_blk;
+ SM4_CRYPT_BLK4_BE(v0, v1, v2, v3)
-.Lcfb_end:
+ cmp w4, #2
+ eor v0.16b, v0.16b, v4.16b
+ st1 {v0.16b}, [x1], #16
+ mov v0.16b, v4.16b
+ blt .Lcfb_dec_end
+
+ eor v1.16b, v1.16b, v5.16b
+ st1 {v1.16b}, [x1], #16
+ mov v0.16b, v5.16b
+ beq .Lcfb_dec_end
+
+ eor v2.16b, v2.16b, v6.16b
+ st1 {v2.16b}, [x1], #16
+ mov v0.16b, v6.16b
+
+.Lcfb_dec_end:
/* store new IV */
- st1 {v0.16b}, [x3];
+ st1 {v0.16b}, [x3]
- ret;
-SYM_FUNC_END(sm4_neon_cfb_dec_blk8)
+ ret
+SYM_FUNC_END(sm4_neon_cfb_dec)
.align 3
-SYM_FUNC_START(sm4_neon_ctr_enc_blk8)
+SYM_FUNC_START(sm4_neon_ctr_crypt)
/* input:
* x0: round key array, CTX
* x1: dst
* x2: src
* x3: ctr (big endian, 128 bit)
- * w4: nblocks (multiples of 8)
+ * w4: nblocks
*/
- PREPARE;
+ SM4_PREPARE()
- ldp x7, x8, [x3];
- rev x7, x7;
- rev x8, x8;
+ ldp x7, x8, [x3]
+ rev x7, x7
+ rev x8, x8
-.Lctr_loop_blk:
- subs w4, w4, #8;
- bmi .Lctr_end;
+.Lctr_crypt_loop_8x:
+ sub w4, w4, #8
+ tbnz w4, #31, .Lctr_crypt_4x
-#define inc_le128(vctr) \
- mov vctr.d[1], x8; \
- mov vctr.d[0], x7; \
- adds x8, x8, #1; \
- adc x7, x7, xzr; \
- rev64 vctr.16b, vctr.16b;
+#define inc_le128(vctr) \
+ mov vctr.d[1], x8; \
+ mov vctr.d[0], x7; \
+ adds x8, x8, #1; \
+ rev64 vctr.16b, vctr.16b; \
+ adc x7, x7, xzr;
/* construct CTRs */
- inc_le128(v0); /* +0 */
- inc_le128(v1); /* +1 */
- inc_le128(v2); /* +2 */
- inc_le128(v3); /* +3 */
- inc_le128(v4); /* +4 */
- inc_le128(v5); /* +5 */
- inc_le128(v6); /* +6 */
- inc_le128(v7); /* +7 */
-
- SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7);
-
- ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
- eor v0.16b, v0.16b, RTMP0.16b;
- eor v1.16b, v1.16b, RTMP1.16b;
- eor v2.16b, v2.16b, RTMP2.16b;
- eor v3.16b, v3.16b, RTMP3.16b;
- st1 {v0.16b-v3.16b}, [x1], #64;
-
- ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64;
- eor v4.16b, v4.16b, RTMP0.16b;
- eor v5.16b, v5.16b, RTMP1.16b;
- eor v6.16b, v6.16b, RTMP2.16b;
- eor v7.16b, v7.16b, RTMP3.16b;
- st1 {v4.16b-v7.16b}, [x1], #64;
-
- b .Lctr_loop_blk;
-
-.Lctr_end:
+ inc_le128(v0) /* +0 */
+ inc_le128(v1) /* +1 */
+ inc_le128(v2) /* +2 */
+ inc_le128(v3) /* +3 */
+ inc_le128(v4) /* +4 */
+ inc_le128(v5) /* +5 */
+ inc_le128(v6) /* +6 */
+ inc_le128(v7) /* +7 */
+
+ transpose_4x4_2x(v0, v1, v2, v3, v4, v5, v6, v7)
+
+ SM4_CRYPT_BLK8(v0, v1, v2, v3, v4, v5, v6, v7)
+
+ ld1 {RTMP0.16b-RTMP3.16b}, [x2], #64
+ ld1 {RTMP4.16b-RTMP7.16b}, [x2], #64
+
+ eor v0.16b, v0.16b, RTMP0.16b
+ eor v1.16b, v1.16b, RTMP1.16b
+ eor v2.16b, v2.16b, RTMP2.16b
+ eor v3.16b, v3.16b, RTMP3.16b
+ eor v4.16b, v4.16b, RTMP4.16b
+ eor v5.16b, v5.16b, RTMP5.16b
+ eor v6.16b, v6.16b, RTMP6.16b
+ eor v7.16b, v7.16b, RTMP7.16b
+
+ st1 {v0.16b-v3.16b}, [x1], #64
+ st1 {v4.16b-v7.16b}, [x1], #64
+
+ cbz w4, .Lctr_crypt_end
+ b .Lctr_crypt_loop_8x
+
+.Lctr_crypt_4x:
+ add w4, w4, #8
+ cmp w4, #4
+ blt .Lctr_crypt_tail
+
+ sub w4, w4, #4
+
+ /* construct CTRs */
+ inc_le128(v0) /* +0 */
+ inc_le128(v1) /* +1 */
+ inc_le128(v2) /* +2 */
+ inc_le128(v3) /* +3 */
+
+ ld1 {v4.16b-v7.16b}, [x2], #64
+
+ transpose_4x4(v0, v1, v2, v3)
+
+ SM4_CRYPT_BLK4(v0, v1, v2, v3)
+
+ eor v0.16b, v0.16b, v4.16b
+ eor v1.16b, v1.16b, v5.16b
+ eor v2.16b, v2.16b, v6.16b
+ eor v3.16b, v3.16b, v7.16b
+
+ st1 {v0.16b-v3.16b}, [x1], #64
+
+ cbz w4, .Lctr_crypt_end
+
+.Lctr_crypt_tail:
+ /* inc_le128 will change the sign bit */
+ ld1 {v4.16b}, [x2], #16
+ inc_le128(v0)
+ cmp w4, #2
+ blt .Lctr_crypt_tail_load_done
+
+ ld1 {v5.16b}, [x2], #16
+ inc_le128(v1)
+ cmp w4, #2
+ beq .Lctr_crypt_tail_load_done
+
+ ld1 {v6.16b}, [x2], #16
+ inc_le128(v2)
+
+.Lctr_crypt_tail_load_done:
+ transpose_4x4(v0, v1, v2, v3)
+
+ SM4_CRYPT_BLK4(v0, v1, v2, v3)
+
+ cmp w4, #2
+
+ eor v0.16b, v0.16b, v4.16b
+ st1 {v0.16b}, [x1], #16
+ blt .Lctr_crypt_end
+
+ eor v1.16b, v1.16b, v5.16b
+ st1 {v1.16b}, [x1], #16
+ beq .Lctr_crypt_end
+
+ eor v2.16b, v2.16b, v6.16b
+ st1 {v2.16b}, [x1], #16
+
+.Lctr_crypt_end:
/* store new CTR */
- rev x7, x7;
- rev x8, x8;
- stp x7, x8, [x3];
+ rev x7, x7
+ rev x8, x8
+ stp x7, x8, [x3]
- ret;
-SYM_FUNC_END(sm4_neon_ctr_enc_blk8)
+ ret
+SYM_FUNC_END(sm4_neon_ctr_crypt)
diff --git a/arch/arm64/crypto/sm4-neon-glue.c b/arch/arm64/crypto/sm4-neon-glue.c
index 03a6a6866a31..7b19accf5c03 100644
--- a/arch/arm64/crypto/sm4-neon-glue.c
+++ b/arch/arm64/crypto/sm4-neon-glue.c
@@ -18,19 +18,14 @@
#include <crypto/internal/skcipher.h>
#include <crypto/sm4.h>
-#define BYTES2BLKS(nbytes) ((nbytes) >> 4)
-#define BYTES2BLK8(nbytes) (((nbytes) >> 4) & ~(8 - 1))
-
-asmlinkage void sm4_neon_crypt_blk1_8(const u32 *rkey, u8 *dst, const u8 *src,
- unsigned int nblks);
-asmlinkage void sm4_neon_crypt_blk8(const u32 *rkey, u8 *dst, const u8 *src,
- unsigned int nblks);
-asmlinkage void sm4_neon_cbc_dec_blk8(const u32 *rkey, u8 *dst, const u8 *src,
- u8 *iv, unsigned int nblks);
-asmlinkage void sm4_neon_cfb_dec_blk8(const u32 *rkey, u8 *dst, const u8 *src,
- u8 *iv, unsigned int nblks);
-asmlinkage void sm4_neon_ctr_enc_blk8(const u32 *rkey, u8 *dst, const u8 *src,
- u8 *iv, unsigned int nblks);
+asmlinkage void sm4_neon_crypt(const u32 *rkey, u8 *dst, const u8 *src,
+ unsigned int nblocks);
+asmlinkage void sm4_neon_cbc_dec(const u32 *rkey_dec, u8 *dst, const u8 *src,
+ u8 *iv, unsigned int nblocks);
+asmlinkage void sm4_neon_cfb_dec(const u32 *rkey_enc, u8 *dst, const u8 *src,
+ u8 *iv, unsigned int nblocks);
+asmlinkage void sm4_neon_ctr_crypt(const u32 *rkey_enc, u8 *dst, const u8 *src,
+ u8 *iv, unsigned int nblocks);
static int sm4_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
@@ -51,27 +46,18 @@ static int sm4_ecb_do_crypt(struct skcipher_request *req, const u32 *rkey)
while ((nbytes = walk.nbytes) > 0) {
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
- unsigned int nblks;
+ unsigned int nblocks;
- kernel_neon_begin();
+ nblocks = nbytes / SM4_BLOCK_SIZE;
+ if (nblocks) {
+ kernel_neon_begin();
- nblks = BYTES2BLK8(nbytes);
- if (nblks) {
- sm4_neon_crypt_blk8(rkey, dst, src, nblks);
- dst += nblks * SM4_BLOCK_SIZE;
- src += nblks * SM4_BLOCK_SIZE;
- nbytes -= nblks * SM4_BLOCK_SIZE;
- }
+ sm4_neon_crypt(rkey, dst, src, nblocks);
- nblks = BYTES2BLKS(nbytes);
- if (nblks) {
- sm4_neon_crypt_blk1_8(rkey, dst, src, nblks);
- nbytes -= nblks * SM4_BLOCK_SIZE;
+ kernel_neon_end();
}
- kernel_neon_end();
-
- err = skcipher_walk_done(&walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes % SM4_BLOCK_SIZE);
}
return err;
@@ -138,48 +124,19 @@ static int sm4_cbc_decrypt(struct skcipher_request *req)
while ((nbytes = walk.nbytes) > 0) {
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
- unsigned int nblks;
+ unsigned int nblocks;
- kernel_neon_begin();
+ nblocks = nbytes / SM4_BLOCK_SIZE;
+ if (nblocks) {
+ kernel_neon_begin();
- nblks = BYTES2BLK8(nbytes);
- if (nblks) {
- sm4_neon_cbc_dec_blk8(ctx->rkey_dec, dst, src,
- walk.iv, nblks);
- dst += nblks * SM4_BLOCK_SIZE;
- src += nblks * SM4_BLOCK_SIZE;
- nbytes -= nblks * SM4_BLOCK_SIZE;
- }
+ sm4_neon_cbc_dec(ctx->rkey_dec, dst, src,
+ walk.iv, nblocks);
- nblks = BYTES2BLKS(nbytes);
- if (nblks) {
- u8 keystream[SM4_BLOCK_SIZE * 8];
- u8 iv[SM4_BLOCK_SIZE];
- int i;
-
- sm4_neon_crypt_blk1_8(ctx->rkey_dec, keystream,
- src, nblks);
-
- src += ((int)nblks - 2) * SM4_BLOCK_SIZE;
- dst += (nblks - 1) * SM4_BLOCK_SIZE;
- memcpy(iv, src + SM4_BLOCK_SIZE, SM4_BLOCK_SIZE);
-
- for (i = nblks - 1; i > 0; i--) {
- crypto_xor_cpy(dst, src,
- &keystream[i * SM4_BLOCK_SIZE],
- SM4_BLOCK_SIZE);
- src -= SM4_BLOCK_SIZE;
- dst -= SM4_BLOCK_SIZE;
- }
- crypto_xor_cpy(dst, walk.iv,
- keystream, SM4_BLOCK_SIZE);
- memcpy(walk.iv, iv, SM4_BLOCK_SIZE);
- nbytes -= nblks * SM4_BLOCK_SIZE;
+ kernel_neon_end();
}
- kernel_neon_end();
-
- err = skcipher_walk_done(&walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes % SM4_BLOCK_SIZE);
}
return err;
@@ -238,41 +195,21 @@ static int sm4_cfb_decrypt(struct skcipher_request *req)
while ((nbytes = walk.nbytes) > 0) {
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
- unsigned int nblks;
+ unsigned int nblocks;
- kernel_neon_begin();
+ nblocks = nbytes / SM4_BLOCK_SIZE;
+ if (nblocks) {
+ kernel_neon_begin();
- nblks = BYTES2BLK8(nbytes);
- if (nblks) {
- sm4_neon_cfb_dec_blk8(ctx->rkey_enc, dst, src,
- walk.iv, nblks);
- dst += nblks * SM4_BLOCK_SIZE;
- src += nblks * SM4_BLOCK_SIZE;
- nbytes -= nblks * SM4_BLOCK_SIZE;
- }
+ sm4_neon_cfb_dec(ctx->rkey_enc, dst, src,
+ walk.iv, nblocks);
- nblks = BYTES2BLKS(nbytes);
- if (nblks) {
- u8 keystream[SM4_BLOCK_SIZE * 8];
-
- memcpy(keystream, walk.iv, SM4_BLOCK_SIZE);
- if (nblks > 1)
- memcpy(&keystream[SM4_BLOCK_SIZE], src,
- (nblks - 1) * SM4_BLOCK_SIZE);
- memcpy(walk.iv, src + (nblks - 1) * SM4_BLOCK_SIZE,
- SM4_BLOCK_SIZE);
-
- sm4_neon_crypt_blk1_8(ctx->rkey_enc, keystream,
- keystream, nblks);
-
- crypto_xor_cpy(dst, src, keystream,
- nblks * SM4_BLOCK_SIZE);
- dst += nblks * SM4_BLOCK_SIZE;
- src += nblks * SM4_BLOCK_SIZE;
- nbytes -= nblks * SM4_BLOCK_SIZE;
- }
+ kernel_neon_end();
- kernel_neon_end();
+ dst += nblocks * SM4_BLOCK_SIZE;
+ src += nblocks * SM4_BLOCK_SIZE;
+ nbytes -= nblocks * SM4_BLOCK_SIZE;
+ }
/* tail */
if (walk.nbytes == walk.total && nbytes > 0) {
@@ -302,40 +239,21 @@ static int sm4_ctr_crypt(struct skcipher_request *req)
while ((nbytes = walk.nbytes) > 0) {
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
- unsigned int nblks;
+ unsigned int nblocks;
- kernel_neon_begin();
+ nblocks = nbytes / SM4_BLOCK_SIZE;
+ if (nblocks) {
+ kernel_neon_begin();
- nblks = BYTES2BLK8(nbytes);
- if (nblks) {
- sm4_neon_ctr_enc_blk8(ctx->rkey_enc, dst, src,
- walk.iv, nblks);
- dst += nblks * SM4_BLOCK_SIZE;
- src += nblks * SM4_BLOCK_SIZE;
- nbytes -= nblks * SM4_BLOCK_SIZE;
- }
+ sm4_neon_ctr_crypt(ctx->rkey_enc, dst, src,
+ walk.iv, nblocks);
- nblks = BYTES2BLKS(nbytes);
- if (nblks) {
- u8 keystream[SM4_BLOCK_SIZE * 8];
- int i;
-
- for (i = 0; i < nblks; i++) {
- memcpy(&keystream[i * SM4_BLOCK_SIZE],
- walk.iv, SM4_BLOCK_SIZE);
- crypto_inc(walk.iv, SM4_BLOCK_SIZE);
- }
- sm4_neon_crypt_blk1_8(ctx->rkey_enc, keystream,
- keystream, nblks);
-
- crypto_xor_cpy(dst, src, keystream,
- nblks * SM4_BLOCK_SIZE);
- dst += nblks * SM4_BLOCK_SIZE;
- src += nblks * SM4_BLOCK_SIZE;
- nbytes -= nblks * SM4_BLOCK_SIZE;
- }
+ kernel_neon_end();
- kernel_neon_end();
+ dst += nblocks * SM4_BLOCK_SIZE;
+ src += nblocks * SM4_BLOCK_SIZE;
+ nbytes -= nblocks * SM4_BLOCK_SIZE;
+ }
/* tail */
if (walk.nbytes == walk.total && nbytes > 0) {
diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
index 0890e4f568fb..cbb3d961123b 100644
--- a/arch/arm64/include/asm/atomic_ll_sc.h
+++ b/arch/arm64/include/asm/atomic_ll_sc.h
@@ -315,7 +315,7 @@ __ll_sc__cmpxchg_double##name(unsigned long old1, \
" cbnz %w0, 1b\n" \
" " #mb "\n" \
"2:" \
- : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \
+ : "=&r" (tmp), "=&r" (ret), "+Q" (*(__uint128_t *)ptr) \
: "r" (old1), "r" (old2), "r" (new1), "r" (new2) \
: cl); \
\
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 52075e93de6c..a94d6dacc029 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -311,7 +311,7 @@ __lse__cmpxchg_double##name(unsigned long old1, \
" eor %[old2], %[old2], %[oldval2]\n" \
" orr %[old1], %[old1], %[old2]" \
: [old1] "+&r" (x0), [old2] "+&r" (x1), \
- [v] "+Q" (*(unsigned long *)ptr) \
+ [v] "+Q" (*(__uint128_t *)ptr) \
: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
[oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
: cl); \
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 4e8b66c74ea2..683ca3af4084 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -124,6 +124,8 @@
#define APPLE_CPU_PART_M1_FIRESTORM_PRO 0x025
#define APPLE_CPU_PART_M1_ICESTORM_MAX 0x028
#define APPLE_CPU_PART_M1_FIRESTORM_MAX 0x029
+#define APPLE_CPU_PART_M2_BLIZZARD 0x032
+#define APPLE_CPU_PART_M2_AVALANCHE 0x033
#define AMPERE_CPU_PART_AMPERE1 0xAC3
@@ -177,6 +179,8 @@
#define MIDR_APPLE_M1_FIRESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_PRO)
#define MIDR_APPLE_M1_ICESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_MAX)
#define MIDR_APPLE_M1_FIRESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_MAX)
+#define MIDR_APPLE_M2_BLIZZARD MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD)
+#define MIDR_APPLE_M2_AVALANCHE MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE)
#define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 439e2bc5d5d8..de4ff90785b2 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -14,8 +14,16 @@
#ifdef CONFIG_EFI
extern void efi_init(void);
+
+bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg);
#else
#define efi_init()
+
+static inline
+bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg)
+{
+ return false;
+}
#endif
int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
@@ -25,6 +33,7 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
({ \
efi_virtmap_load(); \
__efi_fpsimd_begin(); \
+ spin_lock(&efi_rt_lock); \
})
#undef arch_efi_call_virt
@@ -33,12 +42,23 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
#define arch_efi_call_virt_teardown() \
({ \
+ spin_unlock(&efi_rt_lock); \
__efi_fpsimd_end(); \
efi_virtmap_unload(); \
})
+extern spinlock_t efi_rt_lock;
+extern u64 *efi_rt_stack_top;
efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...);
+/*
+ * efi_rt_stack_top[-1] contains the value the stack pointer had before
+ * switching to the EFI runtime stack.
+ */
+#define current_in_efi() \
+ (!preemptible() && efi_rt_stack_top != NULL && \
+ on_task_stack(current, READ_ONCE(efi_rt_stack_top[-1]), 1))
+
#define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
/*
@@ -76,13 +96,23 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr)
return (image_addr & ~(SZ_1G - 1UL)) + (1UL << (VA_BITS_MIN - 1));
}
-#define alloc_screen_info(x...) &screen_info
-
-static inline void free_screen_info(struct screen_info *si)
+static inline unsigned long efi_get_kimg_min_align(void)
{
+ extern bool efi_nokaslr;
+
+ /*
+ * Although relocatable kernels can fix up the misalignment with
+ * respect to MIN_KIMG_ALIGN, the resulting virtual text addresses are
+ * subtly out of sync with those recorded in the vmlinux when kaslr is
+ * disabled but the image required relocation anyway. Therefore retain
+ * 2M alignment if KASLR was explicitly disabled, even if it was not
+ * going to be activated to begin with.
+ */
+ return efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN;
}
#define EFI_ALLOC_ALIGN SZ_64K
+#define EFI_ALLOC_LIMIT ((1UL << 48) - 1)
/*
* On ARM systems, virtually remapped UEFI runtime services are set up in two
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 15b34fbfca66..206de10524e3 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -114,6 +114,15 @@
#define ESR_ELx_FSC_ACCESS (0x08)
#define ESR_ELx_FSC_FAULT (0x04)
#define ESR_ELx_FSC_PERM (0x0C)
+#define ESR_ELx_FSC_SEA_TTW0 (0x14)
+#define ESR_ELx_FSC_SEA_TTW1 (0x15)
+#define ESR_ELx_FSC_SEA_TTW2 (0x16)
+#define ESR_ELx_FSC_SEA_TTW3 (0x17)
+#define ESR_ELx_FSC_SECC (0x18)
+#define ESR_ELx_FSC_SECC_TTW0 (0x1c)
+#define ESR_ELx_FSC_SECC_TTW1 (0x1d)
+#define ESR_ELx_FSC_SECC_TTW2 (0x1e)
+#define ESR_ELx_FSC_SECC_TTW3 (0x1f)
/* ISS field definitions for Data Aborts */
#define ESR_ELx_ISV_SHIFT (24)
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 8aa8492dafc0..26b0c97df986 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -135,7 +135,7 @@
* 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are
* not known to exist and will break with this configuration.
*
- * The VTCR_EL2 is configured per VM and is initialised in kvm_arm_setup_stage2().
+ * The VTCR_EL2 is configured per VM and is initialised in kvm_init_stage2_mmu.
*
* Note that when using 4K pages, we concatenate two first level page tables
* together. With 16K pages, we concatenate 16 first level page tables.
@@ -319,30 +319,19 @@
BIT(18) | \
GENMASK(16, 15))
-/* For compatibility with fault code shared with 32-bit */
-#define FSC_FAULT ESR_ELx_FSC_FAULT
-#define FSC_ACCESS ESR_ELx_FSC_ACCESS
-#define FSC_PERM ESR_ELx_FSC_PERM
-#define FSC_SEA ESR_ELx_FSC_EXTABT
-#define FSC_SEA_TTW0 (0x14)
-#define FSC_SEA_TTW1 (0x15)
-#define FSC_SEA_TTW2 (0x16)
-#define FSC_SEA_TTW3 (0x17)
-#define FSC_SECC (0x18)
-#define FSC_SECC_TTW0 (0x1c)
-#define FSC_SECC_TTW1 (0x1d)
-#define FSC_SECC_TTW2 (0x1e)
-#define FSC_SECC_TTW3 (0x1f)
-
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
#define HPFAR_MASK (~UL(0xf))
/*
* We have
* PAR [PA_Shift - 1 : 12] = PA [PA_Shift - 1 : 12]
* HPFAR [PA_Shift - 9 : 4] = FIPA [PA_Shift - 1 : 12]
+ *
+ * Always assume 52 bit PA since at this point, we don't know how many PA bits
+ * the page table has been set up for. This should be safe since unused address
+ * bits in PAR are res0.
*/
#define PAR_TO_HPFAR(par) \
- (((par) & GENMASK_ULL(PHYS_MASK_SHIFT - 1, 12)) >> 8)
+ (((par) & GENMASK_ULL(52 - 1, 12)) >> 8)
#define ECN(x) { ESR_ELx_EC_##x, #x }
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 53035763e48e..43c3bc0f9544 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -76,6 +76,9 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs,
__KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs,
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_init_traps,
+ __KVM_HOST_SMCCC_FUNC___pkvm_init_vm,
+ __KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu,
+ __KVM_HOST_SMCCC_FUNC___pkvm_teardown_vm,
};
#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
@@ -106,7 +109,7 @@ enum __kvm_host_smccc_func {
#define per_cpu_ptr_nvhe_sym(sym, cpu) \
({ \
unsigned long base, off; \
- base = kvm_arm_hyp_percpu_base[cpu]; \
+ base = kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu]; \
off = (unsigned long)&CHOOSE_NVHE_SYM(sym) - \
(unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start); \
base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL; \
@@ -211,7 +214,7 @@ DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
#define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init)
#define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector)
-extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
+extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[];
DECLARE_KVM_NVHE_SYM(__per_cpu_start);
DECLARE_KVM_NVHE_SYM(__per_cpu_end);
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 9bdba47f7e14..193583df2d9c 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -349,16 +349,16 @@ static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *v
static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
{
switch (kvm_vcpu_trap_get_fault(vcpu)) {
- case FSC_SEA:
- case FSC_SEA_TTW0:
- case FSC_SEA_TTW1:
- case FSC_SEA_TTW2:
- case FSC_SEA_TTW3:
- case FSC_SECC:
- case FSC_SECC_TTW0:
- case FSC_SECC_TTW1:
- case FSC_SECC_TTW2:
- case FSC_SECC_TTW3:
+ case ESR_ELx_FSC_EXTABT:
+ case ESR_ELx_FSC_SEA_TTW0:
+ case ESR_ELx_FSC_SEA_TTW1:
+ case ESR_ELx_FSC_SEA_TTW2:
+ case ESR_ELx_FSC_SEA_TTW3:
+ case ESR_ELx_FSC_SECC:
+ case ESR_ELx_FSC_SECC_TTW0:
+ case ESR_ELx_FSC_SECC_TTW1:
+ case ESR_ELx_FSC_SECC_TTW2:
+ case ESR_ELx_FSC_SECC_TTW3:
return true;
default:
return false;
@@ -373,8 +373,26 @@ static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
{
- if (kvm_vcpu_abt_iss1tw(vcpu))
- return true;
+ if (kvm_vcpu_abt_iss1tw(vcpu)) {
+ /*
+ * Only a permission fault on a S1PTW should be
+ * considered as a write. Otherwise, page tables baked
+ * in a read-only memslot will result in an exception
+ * being delivered in the guest.
+ *
+ * The drawback is that we end-up faulting twice if the
+ * guest is using any of HW AF/DB: a translation fault
+ * to map the page containing the PT (read only at
+ * first), then a permission fault to allow the flags
+ * to be set.
+ */
+ switch (kvm_vcpu_trap_get_fault_type(vcpu)) {
+ case ESR_ELx_FSC_PERM:
+ return true;
+ default:
+ return false;
+ }
+ }
if (kvm_vcpu_trap_is_iabt(vcpu))
return false;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index fd34ab155d0b..35a159d131b5 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -73,6 +73,63 @@ u32 __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
+struct kvm_hyp_memcache {
+ phys_addr_t head;
+ unsigned long nr_pages;
+};
+
+static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc,
+ phys_addr_t *p,
+ phys_addr_t (*to_pa)(void *virt))
+{
+ *p = mc->head;
+ mc->head = to_pa(p);
+ mc->nr_pages++;
+}
+
+static inline void *pop_hyp_memcache(struct kvm_hyp_memcache *mc,
+ void *(*to_va)(phys_addr_t phys))
+{
+ phys_addr_t *p = to_va(mc->head);
+
+ if (!mc->nr_pages)
+ return NULL;
+
+ mc->head = *p;
+ mc->nr_pages--;
+
+ return p;
+}
+
+static inline int __topup_hyp_memcache(struct kvm_hyp_memcache *mc,
+ unsigned long min_pages,
+ void *(*alloc_fn)(void *arg),
+ phys_addr_t (*to_pa)(void *virt),
+ void *arg)
+{
+ while (mc->nr_pages < min_pages) {
+ phys_addr_t *p = alloc_fn(arg);
+
+ if (!p)
+ return -ENOMEM;
+ push_hyp_memcache(mc, p, to_pa);
+ }
+
+ return 0;
+}
+
+static inline void __free_hyp_memcache(struct kvm_hyp_memcache *mc,
+ void (*free_fn)(void *virt, void *arg),
+ void *(*to_va)(phys_addr_t phys),
+ void *arg)
+{
+ while (mc->nr_pages)
+ free_fn(pop_hyp_memcache(mc, to_va), arg);
+}
+
+void free_hyp_memcache(struct kvm_hyp_memcache *mc);
+int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages);
+
struct kvm_vmid {
atomic64_t id;
};
@@ -115,6 +172,13 @@ struct kvm_smccc_features {
unsigned long vendor_hyp_bmap;
};
+typedef unsigned int pkvm_handle_t;
+
+struct kvm_protected_vm {
+ pkvm_handle_t handle;
+ struct kvm_hyp_memcache teardown_mc;
+};
+
struct kvm_arch {
struct kvm_s2_mmu mmu;
@@ -163,9 +227,19 @@ struct kvm_arch {
u8 pfr0_csv2;
u8 pfr0_csv3;
+ struct {
+ u8 imp:4;
+ u8 unimp:4;
+ } dfr0_pmuver;
/* Hypercall features firmware registers' descriptor */
struct kvm_smccc_features smccc_feat;
+
+ /*
+ * For an untrusted host VM, 'pkvm.handle' is used to lookup
+ * the associated pKVM instance in the hypervisor.
+ */
+ struct kvm_protected_vm pkvm;
};
struct kvm_vcpu_fault_info {
@@ -925,8 +999,6 @@ int kvm_set_ipa_limit(void);
#define __KVM_HAVE_ARCH_VM_ALLOC
struct kvm *kvm_arch_alloc_vm(void);
-int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
-
static inline bool kvm_vm_is_protected(struct kvm *kvm)
{
return false;
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index aa7fa2a08f06..6797eafe7890 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -123,4 +123,7 @@ extern u64 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val);
extern u64 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val);
extern u64 kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val);
+extern unsigned long kvm_nvhe_sym(__icache_flags);
+extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits);
+
#endif /* __ARM64_KVM_HYP_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 7784081088e7..e4a7e6369499 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -166,7 +166,7 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
void free_hyp_pgds(void);
void stage2_unmap_vm(struct kvm *kvm);
-int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu);
+int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type);
void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
phys_addr_t pa, unsigned long size, bool writable);
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index 3252eb50ecfe..63f81b27a4e3 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -42,6 +42,8 @@ typedef u64 kvm_pte_t;
#define KVM_PTE_ADDR_MASK GENMASK(47, PAGE_SHIFT)
#define KVM_PTE_ADDR_51_48 GENMASK(15, 12)
+#define KVM_PHYS_INVALID (-1ULL)
+
static inline bool kvm_pte_valid(kvm_pte_t pte)
{
return pte & KVM_PTE_VALID;
@@ -57,6 +59,18 @@ static inline u64 kvm_pte_to_phys(kvm_pte_t pte)
return pa;
}
+static inline kvm_pte_t kvm_phys_to_pte(u64 pa)
+{
+ kvm_pte_t pte = pa & KVM_PTE_ADDR_MASK;
+
+ if (PAGE_SHIFT == 16) {
+ pa &= GENMASK(51, 48);
+ pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48);
+ }
+
+ return pte;
+}
+
static inline u64 kvm_granule_shift(u32 level)
{
/* Assumes KVM_PGTABLE_MAX_LEVELS is 4 */
@@ -85,6 +99,8 @@ static inline bool kvm_level_supports_block_mapping(u32 level)
* allocation is physically contiguous.
* @free_pages_exact: Free an exact number of memory pages previously
* allocated by zalloc_pages_exact.
+ * @free_removed_table: Free a removed paging structure by unlinking and
+ * dropping references.
* @get_page: Increment the refcount on a page.
* @put_page: Decrement the refcount on a page. When the
* refcount reaches 0 the page is automatically
@@ -103,6 +119,7 @@ struct kvm_pgtable_mm_ops {
void* (*zalloc_page)(void *arg);
void* (*zalloc_pages_exact)(size_t size);
void (*free_pages_exact)(void *addr, size_t size);
+ void (*free_removed_table)(void *addr, u32 level);
void (*get_page)(void *addr);
void (*put_page)(void *addr);
int (*page_count)(void *addr);
@@ -162,29 +179,6 @@ typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end,
enum kvm_pgtable_prot prot);
/**
- * struct kvm_pgtable - KVM page-table.
- * @ia_bits: Maximum input address size, in bits.
- * @start_level: Level at which the page-table walk starts.
- * @pgd: Pointer to the first top-level entry of the page-table.
- * @mm_ops: Memory management callbacks.
- * @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
- * @flags: Stage-2 page-table flags.
- * @force_pte_cb: Function that returns true if page level mappings must
- * be used instead of block mappings.
- */
-struct kvm_pgtable {
- u32 ia_bits;
- u32 start_level;
- kvm_pte_t *pgd;
- struct kvm_pgtable_mm_ops *mm_ops;
-
- /* Stage-2 only */
- struct kvm_s2_mmu *mmu;
- enum kvm_pgtable_stage2_flags flags;
- kvm_pgtable_force_pte_cb_t force_pte_cb;
-};
-
-/**
* enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk.
* @KVM_PGTABLE_WALK_LEAF: Visit leaf entries, including invalid
* entries.
@@ -192,17 +186,34 @@ struct kvm_pgtable {
* children.
* @KVM_PGTABLE_WALK_TABLE_POST: Visit table entries after their
* children.
+ * @KVM_PGTABLE_WALK_SHARED: Indicates the page-tables may be shared
+ * with other software walkers.
*/
enum kvm_pgtable_walk_flags {
KVM_PGTABLE_WALK_LEAF = BIT(0),
KVM_PGTABLE_WALK_TABLE_PRE = BIT(1),
KVM_PGTABLE_WALK_TABLE_POST = BIT(2),
+ KVM_PGTABLE_WALK_SHARED = BIT(3),
+};
+
+struct kvm_pgtable_visit_ctx {
+ kvm_pte_t *ptep;
+ kvm_pte_t old;
+ void *arg;
+ struct kvm_pgtable_mm_ops *mm_ops;
+ u64 addr;
+ u64 end;
+ u32 level;
+ enum kvm_pgtable_walk_flags flags;
};
-typedef int (*kvm_pgtable_visitor_fn_t)(u64 addr, u64 end, u32 level,
- kvm_pte_t *ptep,
- enum kvm_pgtable_walk_flags flag,
- void * const arg);
+typedef int (*kvm_pgtable_visitor_fn_t)(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit);
+
+static inline bool kvm_pgtable_walk_shared(const struct kvm_pgtable_visit_ctx *ctx)
+{
+ return ctx->flags & KVM_PGTABLE_WALK_SHARED;
+}
/**
* struct kvm_pgtable_walker - Hook into a page-table walk.
@@ -217,6 +228,94 @@ struct kvm_pgtable_walker {
const enum kvm_pgtable_walk_flags flags;
};
+/*
+ * RCU cannot be used in a non-kernel context such as the hyp. As such, page
+ * table walkers used in hyp do not call into RCU and instead use other
+ * synchronization mechanisms (such as a spinlock).
+ */
+#if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
+
+typedef kvm_pte_t *kvm_pteref_t;
+
+static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
+ kvm_pteref_t pteref)
+{
+ return pteref;
+}
+
+static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
+{
+ /*
+ * Due to the lack of RCU (or a similar protection scheme), only
+ * non-shared table walkers are allowed in the hypervisor.
+ */
+ if (walker->flags & KVM_PGTABLE_WALK_SHARED)
+ return -EPERM;
+
+ return 0;
+}
+
+static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker) {}
+
+static inline bool kvm_pgtable_walk_lock_held(void)
+{
+ return true;
+}
+
+#else
+
+typedef kvm_pte_t __rcu *kvm_pteref_t;
+
+static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walker,
+ kvm_pteref_t pteref)
+{
+ return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED));
+}
+
+static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
+{
+ if (walker->flags & KVM_PGTABLE_WALK_SHARED)
+ rcu_read_lock();
+
+ return 0;
+}
+
+static inline void kvm_pgtable_walk_end(struct kvm_pgtable_walker *walker)
+{
+ if (walker->flags & KVM_PGTABLE_WALK_SHARED)
+ rcu_read_unlock();
+}
+
+static inline bool kvm_pgtable_walk_lock_held(void)
+{
+ return rcu_read_lock_held();
+}
+
+#endif
+
+/**
+ * struct kvm_pgtable - KVM page-table.
+ * @ia_bits: Maximum input address size, in bits.
+ * @start_level: Level at which the page-table walk starts.
+ * @pgd: Pointer to the first top-level entry of the page-table.
+ * @mm_ops: Memory management callbacks.
+ * @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
+ * @flags: Stage-2 page-table flags.
+ * @force_pte_cb: Function that returns true if page level mappings must
+ * be used instead of block mappings.
+ */
+struct kvm_pgtable {
+ u32 ia_bits;
+ u32 start_level;
+ kvm_pteref_t pgd;
+ struct kvm_pgtable_mm_ops *mm_ops;
+
+ /* Stage-2 only */
+ struct kvm_s2_mmu *mmu;
+ enum kvm_pgtable_stage2_flags flags;
+ kvm_pgtable_force_pte_cb_t force_pte_cb;
+};
+
/**
* kvm_pgtable_hyp_init() - Initialise a hypervisor stage-1 page-table.
* @pgt: Uninitialised page-table structure to initialise.
@@ -297,6 +396,14 @@ u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
/**
+ * kvm_pgtable_stage2_pgd_size() - Helper to compute size of a stage-2 PGD
+ * @vtcr: Content of the VTCR register.
+ *
+ * Return: the size (in bytes) of the stage-2 PGD
+ */
+size_t kvm_pgtable_stage2_pgd_size(u64 vtcr);
+
+/**
* __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table.
* @pgt: Uninitialised page-table structure to initialise.
* @mmu: S2 MMU context for this S2 translation
@@ -325,6 +432,17 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
/**
+ * kvm_pgtable_stage2_free_removed() - Free a removed stage-2 paging structure.
+ * @mm_ops: Memory management callbacks.
+ * @pgtable: Unlinked stage-2 paging structure to be freed.
+ * @level: Level of the stage-2 paging structure to be freed.
+ *
+ * The page-table is assumed to be unreachable by any hardware walkers prior to
+ * freeing and therefore no TLB invalidation is performed.
+ */
+void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level);
+
+/**
* kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table.
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Intermediate physical address at which to place the mapping.
@@ -333,6 +451,7 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
* @prot: Permissions and attributes for the mapping.
* @mc: Cache of pre-allocated and zeroed memory from which to allocate
* page-table pages.
+ * @flags: Flags to control the page-table walk (ex. a shared walk)
*
* The offset of @addr within a page is ignored, @size is rounded-up to
* the next page boundary and @phys is rounded-down to the previous page
@@ -354,7 +473,7 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
*/
int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
u64 phys, enum kvm_pgtable_prot prot,
- void *mc);
+ void *mc, enum kvm_pgtable_walk_flags flags);
/**
* kvm_pgtable_stage2_set_owner() - Unmap and annotate pages in the IPA space to
diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h
index 9f4ad2a8df59..01129b0d4c68 100644
--- a/arch/arm64/include/asm/kvm_pkvm.h
+++ b/arch/arm64/include/asm/kvm_pkvm.h
@@ -9,11 +9,49 @@
#include <linux/memblock.h>
#include <asm/kvm_pgtable.h>
+/* Maximum number of VMs that can co-exist under pKVM. */
+#define KVM_MAX_PVMS 255
+
#define HYP_MEMBLOCK_REGIONS 128
+int pkvm_init_host_vm(struct kvm *kvm);
+int pkvm_create_hyp_vm(struct kvm *kvm);
+void pkvm_destroy_hyp_vm(struct kvm *kvm);
+
extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
+static inline unsigned long
+hyp_vmemmap_memblock_size(struct memblock_region *reg, size_t vmemmap_entry_size)
+{
+ unsigned long nr_pages = reg->size >> PAGE_SHIFT;
+ unsigned long start, end;
+
+ start = (reg->base >> PAGE_SHIFT) * vmemmap_entry_size;
+ end = start + nr_pages * vmemmap_entry_size;
+ start = ALIGN_DOWN(start, PAGE_SIZE);
+ end = ALIGN(end, PAGE_SIZE);
+
+ return end - start;
+}
+
+static inline unsigned long hyp_vmemmap_pages(size_t vmemmap_entry_size)
+{
+ unsigned long res = 0, i;
+
+ for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
+ res += hyp_vmemmap_memblock_size(&kvm_nvhe_sym(hyp_memory)[i],
+ vmemmap_entry_size);
+ }
+
+ return res >> PAGE_SHIFT;
+}
+
+static inline unsigned long hyp_vm_table_pages(void)
+{
+ return PAGE_ALIGN(KVM_MAX_PVMS * sizeof(void *)) >> PAGE_SHIFT;
+}
+
static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
{
unsigned long total = 0, i;
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index 760c62f8e22f..20dd06d70af5 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -25,7 +25,7 @@ unsigned long mte_copy_tags_to_user(void __user *to, void *from,
unsigned long n);
int mte_save_tags(struct page *page);
void mte_save_page_tags(const void *page_addr, void *tag_storage);
-bool mte_restore_tags(swp_entry_t entry, struct page *page);
+void mte_restore_tags(swp_entry_t entry, struct page *page);
void mte_restore_page_tags(void *page_addr, const void *tag_storage);
void mte_invalidate_tags(int type, pgoff_t offset);
void mte_invalidate_tags_area(int type);
@@ -36,6 +36,58 @@ void mte_free_tag_storage(char *storage);
/* track which pages have valid allocation tags */
#define PG_mte_tagged PG_arch_2
+/* simple lock to avoid multiple threads tagging the same page */
+#define PG_mte_lock PG_arch_3
+
+static inline void set_page_mte_tagged(struct page *page)
+{
+ /*
+ * Ensure that the tags written prior to this function are visible
+ * before the page flags update.
+ */
+ smp_wmb();
+ set_bit(PG_mte_tagged, &page->flags);
+}
+
+static inline bool page_mte_tagged(struct page *page)
+{
+ bool ret = test_bit(PG_mte_tagged, &page->flags);
+
+ /*
+ * If the page is tagged, ensure ordering with a likely subsequent
+ * read of the tags.
+ */
+ if (ret)
+ smp_rmb();
+ return ret;
+}
+
+/*
+ * Lock the page for tagging and return 'true' if the page can be tagged,
+ * 'false' if already tagged. PG_mte_tagged is never cleared and therefore the
+ * locking only happens once for page initialisation.
+ *
+ * The page MTE lock state:
+ *
+ * Locked: PG_mte_lock && !PG_mte_tagged
+ * Unlocked: !PG_mte_lock || PG_mte_tagged
+ *
+ * Acquire semantics only if the page is tagged (returning 'false').
+ */
+static inline bool try_page_mte_tagging(struct page *page)
+{
+ if (!test_and_set_bit(PG_mte_lock, &page->flags))
+ return true;
+
+ /*
+ * The tags are either being initialised or may have been initialised
+ * already. Check if the PG_mte_tagged flag has been set or wait
+ * otherwise.
+ */
+ smp_cond_load_acquire(&page->flags, VAL & (1UL << PG_mte_tagged));
+
+ return false;
+}
void mte_zero_clear_page_tags(void *addr);
void mte_sync_tags(pte_t old_pte, pte_t pte);
@@ -56,6 +108,17 @@ size_t mte_probe_user_range(const char __user *uaddr, size_t size);
/* unused if !CONFIG_ARM64_MTE, silence the compiler */
#define PG_mte_tagged 0
+static inline void set_page_mte_tagged(struct page *page)
+{
+}
+static inline bool page_mte_tagged(struct page *page)
+{
+ return false;
+}
+static inline bool try_page_mte_tagging(struct page *page)
+{
+ return false;
+}
static inline void mte_zero_clear_page_tags(void *addr)
{
}
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index c36d56dbf940..65e78999c75d 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -681,7 +681,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
#define pud_leaf(pud) (pud_present(pud) && !pud_table(pud))
#define pud_valid(pud) pte_valid(pud_pte(pud))
#define pud_user(pud) pte_user(pud_pte(pud))
-
+#define pud_user_exec(pud) pte_user_exec(pud_pte(pud))
static inline void set_pud(pud_t *pudp, pud_t pud)
{
@@ -730,6 +730,7 @@ static inline pmd_t *pud_pgtable(pud_t pud)
#else
#define pud_page_paddr(pud) ({ BUILD_BUG(); 0; })
+#define pud_user_exec(pud) pud_user(pud) /* Always 0 with folding */
/* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
#define pmd_set_fixmap(addr) NULL
@@ -862,12 +863,12 @@ static inline bool pte_user_accessible_page(pte_t pte)
static inline bool pmd_user_accessible_page(pmd_t pmd)
{
- return pmd_leaf(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
+ return pmd_leaf(pmd) && !pmd_present_invalid(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
}
static inline bool pud_user_accessible_page(pud_t pud)
{
- return pud_leaf(pud) && pud_user(pud);
+ return pud_leaf(pud) && (pud_user(pud) || pud_user_exec(pud));
}
#endif
@@ -1020,8 +1021,6 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
*/
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
-extern int kern_addr_valid(unsigned long addr);
-
#ifdef CONFIG_ARM64_MTE
#define __HAVE_ARCH_PREPARE_TO_SWAP
@@ -1048,8 +1047,8 @@ static inline void arch_swap_invalidate_area(int type)
#define __HAVE_ARCH_SWAP_RESTORE
static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
{
- if (system_supports_mte() && mte_restore_tags(entry, &folio->page))
- set_bit(PG_mte_tagged, &folio->flags);
+ if (system_supports_mte())
+ mte_restore_tags(entry, &folio->page);
}
#endif /* CONFIG_ARM64_MTE */
diff --git a/arch/arm64/include/asm/ptdump.h b/arch/arm64/include/asm/ptdump.h
index b1dd7ecff7ef..581caac525b0 100644
--- a/arch/arm64/include/asm/ptdump.h
+++ b/arch/arm64/include/asm/ptdump.h
@@ -23,6 +23,7 @@ struct ptdump_info {
void ptdump_walk(struct seq_file *s, struct ptdump_info *info);
#ifdef CONFIG_PTDUMP_DEBUGFS
+#define EFI_RUNTIME_MAP_END DEFAULT_MAP_WINDOW_64
void __init ptdump_debugfs_register(struct ptdump_info *info, const char *name);
#else
static inline void ptdump_debugfs_register(struct ptdump_info *info,
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 4e5354beafb0..66ec8caa6ac0 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -106,4 +106,19 @@ static inline struct stack_info stackinfo_get_sdei_critical(void)
#define stackinfo_get_sdei_critical() stackinfo_get_unknown()
#endif
+#ifdef CONFIG_EFI
+extern u64 *efi_rt_stack_top;
+
+static inline struct stack_info stackinfo_get_efi(void)
+{
+ unsigned long high = (u64)efi_rt_stack_top;
+ unsigned long low = high - THREAD_SIZE;
+
+ return (struct stack_info) {
+ .low = low,
+ .high = high,
+ };
+}
+#endif
+
#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/arm64/include/asm/uprobes.h b/arch/arm64/include/asm/uprobes.h
index ba4bff5ca674..2b09495499c6 100644
--- a/arch/arm64/include/asm/uprobes.h
+++ b/arch/arm64/include/asm/uprobes.h
@@ -16,7 +16,7 @@
#define UPROBE_SWBP_INSN_SIZE AARCH64_INSN_SIZE
#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES
-typedef u32 uprobe_opcode_t;
+typedef __le32 uprobe_opcode_t;
struct arch_uprobe_task {
};
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 316917b98707..a7a857f1784d 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -43,6 +43,7 @@
#define __KVM_HAVE_VCPU_EVENTS
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+#define KVM_DIRTY_LOG_PAGE_OFFSET 64
#define KVM_REG_SIZE(id) \
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 8dd925f4a4c6..ceba6792f5b3 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -36,12 +36,6 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
syscall.o proton-pack.o idreg-override.o idle.o \
patching.o
-targets += efi-entry.o
-
-OBJCOPYFLAGS := --prefix-symbols=__efistub_
-$(obj)/%.stub.o: $(obj)/%.o FORCE
- $(call if_changed,objcopy)
-
obj-$(CONFIG_COMPAT) += sys32.o signal32.o \
sys_compat.o
obj-$(CONFIG_COMPAT) += sigreturn32.o
@@ -57,8 +51,7 @@ obj-$(CONFIG_CPU_PM) += sleep.o suspend.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_KGDB) += kgdb.o
-obj-$(CONFIG_EFI) += efi.o efi-entry.stub.o \
- efi-rt-wrapper.o
+obj-$(CONFIG_EFI) += efi.o efi-rt-wrapper.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o
obj-$(CONFIG_ACPI) += acpi.o
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 7e76e1fda2a1..a77315b338e6 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -2076,8 +2076,10 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
* Clear the tags in the zero page. This needs to be done via the
* linear map which has the Tagged attribute.
*/
- if (!test_and_set_bit(PG_mte_tagged, &ZERO_PAGE(0)->flags))
+ if (try_page_mte_tagging(ZERO_PAGE(0))) {
mte_clear_page_tags(lm_alias(empty_zero_page));
+ set_page_mte_tagged(ZERO_PAGE(0));
+ }
kasan_init_hw_tags_cpu();
}
diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S
deleted file mode 100644
index 61a87fa1c305..000000000000
--- a/arch/arm64/kernel/efi-entry.S
+++ /dev/null
@@ -1,69 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * EFI entry point.
- *
- * Copyright (C) 2013, 2014 Red Hat, Inc.
- * Author: Mark Salter <msalter@redhat.com>
- */
-#include <linux/linkage.h>
-#include <linux/init.h>
-
-#include <asm/assembler.h>
-
- __INIT
-
-SYM_CODE_START(efi_enter_kernel)
- /*
- * efi_pe_entry() will have copied the kernel image if necessary and we
- * end up here with device tree address in x1 and the kernel entry
- * point stored in x0. Save those values in registers which are
- * callee preserved.
- */
- ldr w2, =primary_entry_offset
- add x19, x0, x2 // relocated Image entrypoint
- mov x20, x1 // DTB address
-
- /*
- * Clean the copied Image to the PoC, and ensure it is not shadowed by
- * stale icache entries from before relocation.
- */
- ldr w1, =kernel_size
- add x1, x0, x1
- bl dcache_clean_poc
- ic ialluis
-
- /*
- * Clean the remainder of this routine to the PoC
- * so that we can safely disable the MMU and caches.
- */
- adr x0, 0f
- adr x1, 3f
- bl dcache_clean_poc
-0:
- /* Turn off Dcache and MMU */
- mrs x0, CurrentEL
- cmp x0, #CurrentEL_EL2
- b.ne 1f
- mrs x0, sctlr_el2
- bic x0, x0, #1 << 0 // clear SCTLR.M
- bic x0, x0, #1 << 2 // clear SCTLR.C
- pre_disable_mmu_workaround
- msr sctlr_el2, x0
- isb
- b 2f
-1:
- mrs x0, sctlr_el1
- bic x0, x0, #1 << 0 // clear SCTLR.M
- bic x0, x0, #1 << 2 // clear SCTLR.C
- pre_disable_mmu_workaround
- msr sctlr_el1, x0
- isb
-2:
- /* Jump to kernel entry point */
- mov x0, x20
- mov x1, xzr
- mov x2, xzr
- mov x3, xzr
- br x19
-3:
-SYM_CODE_END(efi_enter_kernel)
diff --git a/arch/arm64/kernel/efi-rt-wrapper.S b/arch/arm64/kernel/efi-rt-wrapper.S
index 75691a2641c1..e8ae803662cf 100644
--- a/arch/arm64/kernel/efi-rt-wrapper.S
+++ b/arch/arm64/kernel/efi-rt-wrapper.S
@@ -4,9 +4,10 @@
*/
#include <linux/linkage.h>
+#include <asm/assembler.h>
SYM_FUNC_START(__efi_rt_asm_wrapper)
- stp x29, x30, [sp, #-32]!
+ stp x29, x30, [sp, #-112]!
mov x29, sp
/*
@@ -17,6 +18,22 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
stp x1, x18, [sp, #16]
/*
+ * Preserve all callee saved registers and preserve the stack pointer
+ * value at the base of the EFI runtime stack so we can recover from
+ * synchronous exceptions occurring while executing the firmware
+ * routines.
+ */
+ stp x19, x20, [sp, #32]
+ stp x21, x22, [sp, #48]
+ stp x23, x24, [sp, #64]
+ stp x25, x26, [sp, #80]
+ stp x27, x28, [sp, #96]
+
+ ldr_l x16, efi_rt_stack_top
+ mov sp, x16
+ stp x18, x29, [sp, #-16]!
+
+ /*
* We are lucky enough that no EFI runtime services take more than
* 5 arguments, so all are passed in registers rather than via the
* stack.
@@ -29,9 +46,13 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
mov x4, x6
blr x8
+ mov x16, sp
+ mov sp, x29
+ str xzr, [x16, #8] // clear recorded task SP value
+
ldp x1, x2, [sp, #16]
cmp x2, x18
- ldp x29, x30, [sp], #32
+ ldp x29, x30, [sp], #112
b.ne 0f
ret
0:
@@ -42,6 +63,25 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
* called with preemption disabled and a separate shadow stack is used
* for interrupts.
*/
- mov x18, x2
+#ifdef CONFIG_SHADOW_CALL_STACK
+ ldr_l x18, efi_rt_stack_top
+ ldr x18, [x18, #-16]
+#endif
+
b efi_handle_corrupted_x18 // tail call
SYM_FUNC_END(__efi_rt_asm_wrapper)
+
+SYM_CODE_START(__efi_rt_asm_recover)
+ mov sp, x30
+
+ ldr_l x16, efi_rt_stack_top // clear recorded task SP value
+ str xzr, [x16, #-8]
+
+ ldp x19, x20, [sp, #32]
+ ldp x21, x22, [sp, #48]
+ ldp x23, x24, [sp, #64]
+ ldp x25, x26, [sp, #80]
+ ldp x27, x28, [sp, #96]
+ ldp x29, x30, [sp], #112
+ ret
+SYM_CODE_END(__efi_rt_asm_recover)
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index a908a37f0367..b273900f4566 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <asm/efi.h>
+#include <asm/stacktrace.h>
static bool region_is_misaligned(const efi_memory_desc_t *md)
{
@@ -144,3 +145,52 @@ asmlinkage efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f)
pr_err_ratelimited(FW_BUG "register x18 corrupted by EFI %s\n", f);
return s;
}
+
+DEFINE_SPINLOCK(efi_rt_lock);
+
+asmlinkage u64 *efi_rt_stack_top __ro_after_init;
+
+asmlinkage efi_status_t __efi_rt_asm_recover(void);
+
+bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg)
+{
+ /* Check whether the exception occurred while running the firmware */
+ if (!current_in_efi() || regs->pc >= TASK_SIZE_64)
+ return false;
+
+ pr_err(FW_BUG "Unable to handle %s in EFI runtime service\n", msg);
+ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+
+ regs->regs[0] = EFI_ABORTED;
+ regs->regs[30] = efi_rt_stack_top[-1];
+ regs->pc = (u64)__efi_rt_asm_recover;
+
+ if (IS_ENABLED(CONFIG_SHADOW_CALL_STACK))
+ regs->regs[18] = efi_rt_stack_top[-2];
+
+ return true;
+}
+
+/* EFI requires 8 KiB of stack space for runtime services */
+static_assert(THREAD_SIZE >= SZ_8K);
+
+static int __init arm64_efi_rt_init(void)
+{
+ void *p;
+
+ if (!efi_enabled(EFI_RUNTIME_SERVICES))
+ return 0;
+
+ p = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, GFP_KERNEL,
+ NUMA_NO_NODE, &&l);
+l: if (!p) {
+ pr_warn("Failed to allocate EFI runtime stack\n");
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ return -ENOMEM;
+ }
+
+ efi_rt_stack_top = p + THREAD_SIZE;
+ return 0;
+}
+core_initcall(arm64_efi_rt_init);
diff --git a/arch/arm64/kernel/elfcore.c b/arch/arm64/kernel/elfcore.c
index 27ef7ad3ffd2..2e94d20c4ac7 100644
--- a/arch/arm64/kernel/elfcore.c
+++ b/arch/arm64/kernel/elfcore.c
@@ -8,28 +8,27 @@
#include <asm/cpufeature.h>
#include <asm/mte.h>
-#define for_each_mte_vma(vmi, vma) \
+#define for_each_mte_vma(cprm, i, m) \
if (system_supports_mte()) \
- for_each_vma(vmi, vma) \
- if (vma->vm_flags & VM_MTE)
+ for (i = 0, m = cprm->vma_meta; \
+ i < cprm->vma_count; \
+ i++, m = cprm->vma_meta + i) \
+ if (m->flags & VM_MTE)
-static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma)
+static unsigned long mte_vma_tag_dump_size(struct core_vma_metadata *m)
{
- if (vma->vm_flags & VM_DONTDUMP)
- return 0;
-
- return vma_pages(vma) * MTE_PAGE_TAG_STORAGE;
+ return (m->dump_size >> PAGE_SHIFT) * MTE_PAGE_TAG_STORAGE;
}
/* Derived from dump_user_range(); start/end must be page-aligned */
static int mte_dump_tag_range(struct coredump_params *cprm,
- unsigned long start, unsigned long end)
+ unsigned long start, unsigned long len)
{
int ret = 1;
unsigned long addr;
void *tags = NULL;
- for (addr = start; addr < end; addr += PAGE_SIZE) {
+ for (addr = start; addr < start + len; addr += PAGE_SIZE) {
struct page *page = get_dump_page(addr);
/*
@@ -47,7 +46,7 @@ static int mte_dump_tag_range(struct coredump_params *cprm,
* Pages mapped in user space as !pte_access_permitted() (e.g.
* PROT_EXEC only) may not have the PG_mte_tagged flag set.
*/
- if (!test_bit(PG_mte_tagged, &page->flags)) {
+ if (!page_mte_tagged(page)) {
put_page(page);
dump_skip(cprm, MTE_PAGE_TAG_STORAGE);
continue;
@@ -65,7 +64,6 @@ static int mte_dump_tag_range(struct coredump_params *cprm,
mte_save_page_tags(page_address(page), tags);
put_page(page);
if (!dump_emit(cprm, tags, MTE_PAGE_TAG_STORAGE)) {
- mte_free_tag_storage(tags);
ret = 0;
break;
}
@@ -77,13 +75,13 @@ static int mte_dump_tag_range(struct coredump_params *cprm,
return ret;
}
-Elf_Half elf_core_extra_phdrs(void)
+Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm)
{
- struct vm_area_struct *vma;
+ int i;
+ struct core_vma_metadata *m;
int vma_count = 0;
- VMA_ITERATOR(vmi, current->mm, 0);
- for_each_mte_vma(vmi, vma)
+ for_each_mte_vma(cprm, i, m)
vma_count++;
return vma_count;
@@ -91,18 +89,18 @@ Elf_Half elf_core_extra_phdrs(void)
int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
{
- struct vm_area_struct *vma;
- VMA_ITERATOR(vmi, current->mm, 0);
+ int i;
+ struct core_vma_metadata *m;
- for_each_mte_vma(vmi, vma) {
+ for_each_mte_vma(cprm, i, m) {
struct elf_phdr phdr;
phdr.p_type = PT_AARCH64_MEMTAG_MTE;
phdr.p_offset = offset;
- phdr.p_vaddr = vma->vm_start;
+ phdr.p_vaddr = m->start;
phdr.p_paddr = 0;
- phdr.p_filesz = mte_vma_tag_dump_size(vma);
- phdr.p_memsz = vma->vm_end - vma->vm_start;
+ phdr.p_filesz = mte_vma_tag_dump_size(m);
+ phdr.p_memsz = m->end - m->start;
offset += phdr.p_filesz;
phdr.p_flags = 0;
phdr.p_align = 0;
@@ -114,28 +112,25 @@ int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
return 1;
}
-size_t elf_core_extra_data_size(void)
+size_t elf_core_extra_data_size(struct coredump_params *cprm)
{
- struct vm_area_struct *vma;
+ int i;
+ struct core_vma_metadata *m;
size_t data_size = 0;
- VMA_ITERATOR(vmi, current->mm, 0);
- for_each_mte_vma(vmi, vma)
- data_size += mte_vma_tag_dump_size(vma);
+ for_each_mte_vma(cprm, i, m)
+ data_size += mte_vma_tag_dump_size(m);
return data_size;
}
int elf_core_write_extra_data(struct coredump_params *cprm)
{
- struct vm_area_struct *vma;
- VMA_ITERATOR(vmi, current->mm, 0);
-
- for_each_mte_vma(vmi, vma) {
- if (vma->vm_flags & VM_DONTDUMP)
- continue;
+ int i;
+ struct core_vma_metadata *m;
- if (!mte_dump_tag_range(cprm, vma->vm_start, vma->vm_end))
+ for_each_mte_vma(cprm, i, m) {
+ if (!mte_dump_tag_range(cprm, m->start, m->dump_size))
return 0;
}
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index dcc81e7200d4..b6ef1af0122e 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -385,7 +385,7 @@ static void task_fpsimd_load(void)
WARN_ON(!system_supports_fpsimd());
WARN_ON(!have_cpu_fpsimd_context());
- if (system_supports_sve()) {
+ if (system_supports_sve() || system_supports_sme()) {
switch (current->thread.fp_type) {
case FP_STATE_FPSIMD:
/* Stop tracking SVE for this task until next use. */
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index af5df48ba915..788597a6b6a2 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -271,7 +271,7 @@ static int swsusp_mte_save_tags(void)
if (!page)
continue;
- if (!test_bit(PG_mte_tagged, &page->flags))
+ if (!page_mte_tagged(page))
continue;
ret = save_tags(page, pfn);
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index 8151412653de..d0e9bb5c91fc 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -10,7 +10,6 @@
#error This file should only be included in vmlinux.lds.S
#endif
-PROVIDE(__efistub_kernel_size = _edata - _text);
PROVIDE(__efistub_primary_entry_offset = primary_entry - _text);
/*
@@ -22,13 +21,6 @@ PROVIDE(__efistub_primary_entry_offset = primary_entry - _text);
* linked at. The routines below are all implemented in assembler in a
* position independent manner
*/
-PROVIDE(__efistub_memcmp = __pi_memcmp);
-PROVIDE(__efistub_memchr = __pi_memchr);
-PROVIDE(__efistub_strlen = __pi_strlen);
-PROVIDE(__efistub_strnlen = __pi_strnlen);
-PROVIDE(__efistub_strcmp = __pi_strcmp);
-PROVIDE(__efistub_strncmp = __pi_strncmp);
-PROVIDE(__efistub_strrchr = __pi_strrchr);
PROVIDE(__efistub_dcache_clean_poc = __pi_dcache_clean_poc);
PROVIDE(__efistub__text = _text);
@@ -71,12 +63,6 @@ KVM_NVHE_ALIAS(nvhe_hyp_panic_handler);
/* Vectors installed by hyp-init on reset HVC. */
KVM_NVHE_ALIAS(__hyp_stub_vectors);
-/* Kernel symbol used by icache_is_vpipt(). */
-KVM_NVHE_ALIAS(__icache_flags);
-
-/* VMID bits set by the KVM VMID allocator */
-KVM_NVHE_ALIAS(kvm_arm_vmid_bits);
-
/* Static keys which are set if a vGIC trap should be handled in hyp. */
KVM_NVHE_ALIAS(vgic_v2_cpuif_trap);
KVM_NVHE_ALIAS(vgic_v3_cpuif_trap);
@@ -92,9 +78,6 @@ KVM_NVHE_ALIAS(gic_nonsecure_priorities);
KVM_NVHE_ALIAS(__start___kvm_ex_table);
KVM_NVHE_ALIAS(__stop___kvm_ex_table);
-/* Array containing bases of nVHE per-CPU memory regions. */
-KVM_NVHE_ALIAS(kvm_arm_hyp_percpu_base);
-
/* PMU available static key */
#ifdef CONFIG_HW_PERF_EVENTS
KVM_NVHE_ALIAS(kvm_arm_pmu_available);
@@ -111,12 +94,6 @@ KVM_NVHE_ALIAS_HYP(__memcpy, __pi_memcpy);
KVM_NVHE_ALIAS_HYP(__memset, __pi_memset);
#endif
-/* Kernel memory sections */
-KVM_NVHE_ALIAS(__start_rodata);
-KVM_NVHE_ALIAS(__end_rodata);
-KVM_NVHE_ALIAS(__bss_start);
-KVM_NVHE_ALIAS(__bss_stop);
-
/* Hyp memory sections */
KVM_NVHE_ALIAS(__hyp_idmap_text_start);
KVM_NVHE_ALIAS(__hyp_idmap_text_end);
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index 7467217c1eaf..f5bcb0dc6267 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -41,19 +41,17 @@ static void mte_sync_page_tags(struct page *page, pte_t old_pte,
if (check_swap && is_swap_pte(old_pte)) {
swp_entry_t entry = pte_to_swp_entry(old_pte);
- if (!non_swap_entry(entry) && mte_restore_tags(entry, page))
- return;
+ if (!non_swap_entry(entry))
+ mte_restore_tags(entry, page);
}
if (!pte_is_tagged)
return;
- /*
- * Test PG_mte_tagged again in case it was racing with another
- * set_pte_at().
- */
- if (!test_and_set_bit(PG_mte_tagged, &page->flags))
+ if (try_page_mte_tagging(page)) {
mte_clear_page_tags(page_address(page));
+ set_page_mte_tagged(page);
+ }
}
void mte_sync_tags(pte_t old_pte, pte_t pte)
@@ -69,9 +67,11 @@ void mte_sync_tags(pte_t old_pte, pte_t pte)
/* if PG_mte_tagged is set, tags have already been initialised */
for (i = 0; i < nr_pages; i++, page++) {
- if (!test_bit(PG_mte_tagged, &page->flags))
+ if (!page_mte_tagged(page)) {
mte_sync_page_tags(page, old_pte, check_swap,
pte_is_tagged);
+ set_page_mte_tagged(page);
+ }
}
/* ensure the tags are visible before the PTE is set */
@@ -96,8 +96,7 @@ int memcmp_pages(struct page *page1, struct page *page2)
* pages is tagged, set_pte_at() may zero or change the tags of the
* other page via mte_sync_tags().
*/
- if (test_bit(PG_mte_tagged, &page1->flags) ||
- test_bit(PG_mte_tagged, &page2->flags))
+ if (page_mte_tagged(page1) || page_mte_tagged(page2))
return addr1 != addr2;
return ret;
@@ -454,7 +453,7 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
put_page(page);
break;
}
- WARN_ON_ONCE(!test_bit(PG_mte_tagged, &page->flags));
+ WARN_ON_ONCE(!page_mte_tagged(page));
/* limit access to the end of the page */
offset = offset_in_page(addr);
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 2686ab157601..0c321ad23cd3 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -1357,7 +1357,7 @@ enum aarch64_regset {
#ifdef CONFIG_ARM64_SVE
REGSET_SVE,
#endif
-#ifdef CONFIG_ARM64_SVE
+#ifdef CONFIG_ARM64_SME
REGSET_SSVE,
REGSET_ZA,
#endif
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index e0d09bf5b01b..be279fd48248 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -281,7 +281,12 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
vl = task_get_sme_vl(current);
} else {
- if (!system_supports_sve())
+ /*
+ * A SME only system use SVE for streaming mode so can
+ * have a SVE formatted context with a zero VL and no
+ * payload data.
+ */
+ if (!system_supports_sve() && !system_supports_sme())
return -EINVAL;
vl = task_get_sve_vl(current);
@@ -732,7 +737,7 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
return err;
}
- if (system_supports_sve()) {
+ if (system_supports_sve() || system_supports_sme()) {
unsigned int vq = 0;
if (add_all || test_thread_flag(TIF_SVE) ||
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 117e2c180f3c..83154303e682 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -5,6 +5,7 @@
* Copyright (C) 2012 ARM Ltd.
*/
#include <linux/kernel.h>
+#include <linux/efi.h>
#include <linux/export.h>
#include <linux/ftrace.h>
#include <linux/sched.h>
@@ -12,6 +13,7 @@
#include <linux/sched/task_stack.h>
#include <linux/stacktrace.h>
+#include <asm/efi.h>
#include <asm/irq.h>
#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
@@ -186,6 +188,13 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
: stackinfo_get_unknown(); \
})
+#define STACKINFO_EFI \
+ ({ \
+ ((task == current) && current_in_efi()) \
+ ? stackinfo_get_efi() \
+ : stackinfo_get_unknown(); \
+ })
+
noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
void *cookie, struct task_struct *task,
struct pt_regs *regs)
@@ -200,6 +209,9 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
STACKINFO_SDEI(normal),
STACKINFO_SDEI(critical),
#endif
+#ifdef CONFIG_EFI
+ STACKINFO_EFI,
+#endif
};
struct unwind_state state = {
.stacks = stacks,
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index 619e2dc7ee14..beaf9586338f 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -27,7 +27,7 @@ ldflags-y := -shared -soname=linux-vdso.so.1 --hash-style=sysv \
-Bsymbolic --build-id=sha1 -n $(btildflags-y)
ifdef CONFIG_LD_ORPHAN_WARN
- ldflags-y += --orphan-handling=warn
+ ldflags-y += --orphan-handling=$(CONFIG_LD_ORPHAN_WARN_LEVEL)
endif
ldflags-y += -T
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index 36c8f66cad25..f59bd1a4ead6 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -104,7 +104,7 @@ VDSO_AFLAGS += -D__ASSEMBLY__
VDSO_LDFLAGS += -Bsymbolic --no-undefined -soname=linux-vdso.so.1
VDSO_LDFLAGS += -z max-page-size=4096 -z common-page-size=4096
VDSO_LDFLAGS += -shared --hash-style=sysv --build-id=sha1
-VDSO_LDFLAGS += --orphan-handling=warn
+VDSO_LDFLAGS += --orphan-handling=$(CONFIG_LD_ORPHAN_WARN_LEVEL)
# Borrow vdsomunge.c from the arm vDSO
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 815cc118c675..05da3c8f7e88 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -32,6 +32,8 @@ menuconfig KVM
select KVM_VFIO
select HAVE_KVM_EVENTFD
select HAVE_KVM_IRQFD
+ select HAVE_KVM_DIRTY_RING_ACQ_REL
+ select NEED_KVM_DIRTY_RING_WITH_BITMAP
select HAVE_KVM_MSI
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQ_ROUTING
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 94d33e296e10..9c5573bc4614 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -37,6 +37,7 @@
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
+#include <asm/kvm_pkvm.h>
#include <asm/kvm_emulate.h>
#include <asm/sections.h>
@@ -50,7 +51,6 @@ DEFINE_STATIC_KEY_FALSE(kvm_protected_mode_initialized);
DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
-unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
static bool vgic_present;
@@ -138,24 +138,24 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
int ret;
- ret = kvm_arm_setup_stage2(kvm, type);
- if (ret)
- return ret;
-
- ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu);
+ ret = kvm_share_hyp(kvm, kvm + 1);
if (ret)
return ret;
- ret = kvm_share_hyp(kvm, kvm + 1);
+ ret = pkvm_init_host_vm(kvm);
if (ret)
- goto out_free_stage2_pgd;
+ goto err_unshare_kvm;
if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL)) {
ret = -ENOMEM;
- goto out_free_stage2_pgd;
+ goto err_unshare_kvm;
}
cpumask_copy(kvm->arch.supported_cpus, cpu_possible_mask);
+ ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu, type);
+ if (ret)
+ goto err_free_cpumask;
+
kvm_vgic_early_init(kvm);
/* The maximum number of VCPUs is limited by the host's GIC model */
@@ -164,9 +164,18 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
set_default_spectre(kvm);
kvm_arm_init_hypercalls(kvm);
- return ret;
-out_free_stage2_pgd:
- kvm_free_stage2_pgd(&kvm->arch.mmu);
+ /*
+ * Initialise the default PMUver before there is a chance to
+ * create an actual PMU.
+ */
+ kvm->arch.dfr0_pmuver.imp = kvm_arm_pmu_get_pmuver_limit();
+
+ return 0;
+
+err_free_cpumask:
+ free_cpumask_var(kvm->arch.supported_cpus);
+err_unshare_kvm:
+ kvm_unshare_hyp(kvm, kvm + 1);
return ret;
}
@@ -187,6 +196,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_vgic_destroy(kvm);
+ if (is_protected_kvm_enabled())
+ pkvm_destroy_hyp_vm(kvm);
+
kvm_destroy_vcpus(kvm);
kvm_unshare_hyp(kvm, kvm + 1);
@@ -569,6 +581,12 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
if (ret)
return ret;
+ if (is_protected_kvm_enabled()) {
+ ret = pkvm_create_hyp_vm(kvm);
+ if (ret)
+ return ret;
+ }
+
if (!irqchip_in_kernel(kvm)) {
/*
* Tell the rest of the code that there are userspace irqchip
@@ -746,6 +764,9 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_SUSPEND, vcpu))
return kvm_vcpu_suspend(vcpu);
+
+ if (kvm_dirty_ring_check_request(vcpu))
+ return 0;
}
return 1;
@@ -1518,7 +1539,7 @@ static int kvm_init_vector_slots(void)
return 0;
}
-static void cpu_prepare_hyp_mode(int cpu)
+static void cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
{
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
unsigned long tcr;
@@ -1534,23 +1555,9 @@ static void cpu_prepare_hyp_mode(int cpu)
params->mair_el2 = read_sysreg(mair_el1);
- /*
- * The ID map may be configured to use an extended virtual address
- * range. This is only the case if system RAM is out of range for the
- * currently configured page size and VA_BITS, in which case we will
- * also need the extended virtual range for the HYP ID map, or we won't
- * be able to enable the EL2 MMU.
- *
- * However, at EL2, there is only one TTBR register, and we can't switch
- * between translation tables *and* update TCR_EL2.T0SZ at the same
- * time. Bottom line: we need to use the extended range with *both* our
- * translation tables.
- *
- * So use the same T0SZ value we use for the ID map.
- */
tcr = (read_sysreg(tcr_el1) & TCR_EL2_MASK) | TCR_EL2_RES1;
tcr &= ~TCR_T0SZ_MASK;
- tcr |= (idmap_t0sz & GENMASK(TCR_TxSZ_WIDTH - 1, 0)) << TCR_T0SZ_OFFSET;
+ tcr |= TCR_T0SZ(hyp_va_bits);
params->tcr_el2 = tcr;
params->pgd_pa = kvm_mmu_get_httbr();
@@ -1844,13 +1851,13 @@ static void teardown_hyp_mode(void)
free_hyp_pgds();
for_each_possible_cpu(cpu) {
free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
- free_pages(kvm_arm_hyp_percpu_base[cpu], nvhe_percpu_order());
+ free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
}
}
static int do_pkvm_init(u32 hyp_va_bits)
{
- void *per_cpu_base = kvm_ksym_ref(kvm_arm_hyp_percpu_base);
+ void *per_cpu_base = kvm_ksym_ref(kvm_nvhe_sym(kvm_arm_hyp_percpu_base));
int ret;
preempt_disable();
@@ -1870,11 +1877,8 @@ static int do_pkvm_init(u32 hyp_va_bits)
return ret;
}
-static int kvm_hyp_init_protection(u32 hyp_va_bits)
+static void kvm_hyp_init_symbols(void)
{
- void *addr = phys_to_virt(hyp_mem_base);
- int ret;
-
kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1);
@@ -1883,6 +1887,14 @@ static int kvm_hyp_init_protection(u32 hyp_va_bits)
kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1);
+ kvm_nvhe_sym(__icache_flags) = __icache_flags;
+ kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits;
+}
+
+static int kvm_hyp_init_protection(u32 hyp_va_bits)
+{
+ void *addr = phys_to_virt(hyp_mem_base);
+ int ret;
ret = create_hyp_mappings(addr, addr + hyp_mem_size, PAGE_HYP);
if (ret)
@@ -1950,7 +1962,7 @@ static int init_hyp_mode(void)
page_addr = page_address(page);
memcpy(page_addr, CHOOSE_NVHE_SYM(__per_cpu_start), nvhe_percpu_size());
- kvm_arm_hyp_percpu_base[cpu] = (unsigned long)page_addr;
+ kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu] = (unsigned long)page_addr;
}
/*
@@ -2043,7 +2055,7 @@ static int init_hyp_mode(void)
}
for_each_possible_cpu(cpu) {
- char *percpu_begin = (char *)kvm_arm_hyp_percpu_base[cpu];
+ char *percpu_begin = (char *)kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu];
char *percpu_end = percpu_begin + nvhe_percpu_size();
/* Map Hyp percpu pages */
@@ -2054,9 +2066,11 @@ static int init_hyp_mode(void)
}
/* Prepare the CPU initialization parameters */
- cpu_prepare_hyp_mode(cpu);
+ cpu_prepare_hyp_mode(cpu, hyp_va_bits);
}
+ kvm_hyp_init_symbols();
+
if (is_protected_kvm_enabled()) {
init_cpu_logical_map();
@@ -2064,9 +2078,7 @@ static int init_hyp_mode(void)
err = -ENODEV;
goto out_err;
}
- }
- if (is_protected_kvm_enabled()) {
err = kvm_hyp_init_protection(hyp_va_bits);
if (err) {
kvm_err("Failed to init hyp memory protection\n");
@@ -2130,6 +2142,11 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
return NULL;
}
+bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
+{
+ return irqchip_in_kernel(kvm);
+}
+
bool kvm_arch_has_irq_bypass(void)
{
return true;
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 2ff13a3f8479..cf4c495a4321 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -1059,7 +1059,7 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
maddr = page_address(page);
if (!write) {
- if (test_bit(PG_mte_tagged, &page->flags))
+ if (page_mte_tagged(page))
num_tags = mte_copy_tags_to_user(tags, maddr,
MTE_GRANULES_PER_PAGE);
else
@@ -1068,15 +1068,19 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
clear_user(tags, MTE_GRANULES_PER_PAGE);
kvm_release_pfn_clean(pfn);
} else {
+ /*
+ * Only locking to serialise with a concurrent
+ * set_pte_at() in the VMM but still overriding the
+ * tags, hence ignoring the return value.
+ */
+ try_page_mte_tagging(page);
num_tags = mte_copy_tags_from_user(maddr, tags,
MTE_GRANULES_PER_PAGE);
- /*
- * Set the flag after checking the write
- * completed fully
- */
- if (num_tags == MTE_GRANULES_PER_PAGE)
- set_bit(PG_mte_tagged, &page->flags);
+ /* uaccess failed, don't leave stale tags */
+ if (num_tags != MTE_GRANULES_PER_PAGE)
+ mte_clear_page_tags(maddr);
+ set_page_mte_tagged(page);
kvm_release_pfn_dirty(pfn);
}
diff --git a/arch/arm64/kvm/hyp/hyp-constants.c b/arch/arm64/kvm/hyp/hyp-constants.c
index b3742a6691e8..b257a3b4bfc5 100644
--- a/arch/arm64/kvm/hyp/hyp-constants.c
+++ b/arch/arm64/kvm/hyp/hyp-constants.c
@@ -2,9 +2,12 @@
#include <linux/kbuild.h>
#include <nvhe/memory.h>
+#include <nvhe/pkvm.h>
int main(void)
{
DEFINE(STRUCT_HYP_PAGE_SIZE, sizeof(struct hyp_page));
+ DEFINE(PKVM_HYP_VM_SIZE, sizeof(struct pkvm_hyp_vm));
+ DEFINE(PKVM_HYP_VCPU_SIZE, sizeof(struct pkvm_hyp_vcpu));
return 0;
}
diff --git a/arch/arm64/kvm/hyp/include/hyp/fault.h b/arch/arm64/kvm/hyp/include/hyp/fault.h
index 1b8a2dcd712f..9ddcfe2c3e57 100644
--- a/arch/arm64/kvm/hyp/include/hyp/fault.h
+++ b/arch/arm64/kvm/hyp/include/hyp/fault.h
@@ -60,7 +60,7 @@ static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
*/
if (!(esr & ESR_ELx_S1PTW) &&
(cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
- (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
+ (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_PERM)) {
if (!__translate_far_to_hpfar(far, &hpfar))
return false;
} else {
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 3330d1b76bdd..07d37ff88a3f 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -367,7 +367,7 @@ static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
bool valid;
- valid = kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
+ valid = kvm_vcpu_trap_get_fault_type(vcpu) == ESR_ELx_FSC_FAULT &&
kvm_vcpu_dabt_isvalid(vcpu) &&
!kvm_vcpu_abt_issea(vcpu) &&
!kvm_vcpu_abt_iss1tw(vcpu);
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
index 80e99836eac7..b7bdbe63deed 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
@@ -8,8 +8,10 @@
#define __KVM_NVHE_MEM_PROTECT__
#include <linux/kvm_host.h>
#include <asm/kvm_hyp.h>
+#include <asm/kvm_mmu.h>
#include <asm/kvm_pgtable.h>
#include <asm/virt.h>
+#include <nvhe/pkvm.h>
#include <nvhe/spinlock.h>
/*
@@ -43,30 +45,45 @@ static inline enum pkvm_page_state pkvm_getstate(enum kvm_pgtable_prot prot)
return prot & PKVM_PAGE_STATE_PROT_MASK;
}
-struct host_kvm {
+struct host_mmu {
struct kvm_arch arch;
struct kvm_pgtable pgt;
struct kvm_pgtable_mm_ops mm_ops;
hyp_spinlock_t lock;
};
-extern struct host_kvm host_kvm;
+extern struct host_mmu host_mmu;
-extern const u8 pkvm_hyp_id;
+/* This corresponds to page-table locking order */
+enum pkvm_component_id {
+ PKVM_ID_HOST,
+ PKVM_ID_HYP,
+};
+
+extern unsigned long hyp_nr_cpus;
int __pkvm_prot_finalize(void);
int __pkvm_host_share_hyp(u64 pfn);
int __pkvm_host_unshare_hyp(u64 pfn);
+int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
+int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
bool addr_is_memory(phys_addr_t phys);
int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot);
int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id);
int kvm_host_prepare_stage2(void *pgt_pool_base);
+int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd);
void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
+int hyp_pin_shared_mem(void *from, void *to);
+void hyp_unpin_shared_mem(void *from, void *to);
+void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc);
+int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
+ struct kvm_hyp_memcache *host_mc);
+
static __always_inline void __load_host_stage2(void)
{
if (static_branch_likely(&kvm_protected_mode_initialized))
- __load_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
+ __load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
else
write_sysreg(0, vttbr_el2);
}
diff --git a/arch/arm64/kvm/hyp/include/nvhe/memory.h b/arch/arm64/kvm/hyp/include/nvhe/memory.h
index 592b7edb3edb..ab205c4d6774 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/memory.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/memory.h
@@ -38,6 +38,10 @@ static inline phys_addr_t hyp_virt_to_phys(void *addr)
#define hyp_page_to_virt(page) __hyp_va(hyp_page_to_phys(page))
#define hyp_page_to_pool(page) (((struct hyp_page *)page)->pool)
+/*
+ * Refcounting for 'struct hyp_page'.
+ * hyp_pool::lock must be held if atomic access to the refcount is required.
+ */
static inline int hyp_page_count(void *addr)
{
struct hyp_page *p = hyp_virt_to_page(addr);
@@ -45,4 +49,27 @@ static inline int hyp_page_count(void *addr)
return p->refcount;
}
+static inline void hyp_page_ref_inc(struct hyp_page *p)
+{
+ BUG_ON(p->refcount == USHRT_MAX);
+ p->refcount++;
+}
+
+static inline void hyp_page_ref_dec(struct hyp_page *p)
+{
+ BUG_ON(!p->refcount);
+ p->refcount--;
+}
+
+static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
+{
+ hyp_page_ref_dec(p);
+ return (p->refcount == 0);
+}
+
+static inline void hyp_set_page_refcounted(struct hyp_page *p)
+{
+ BUG_ON(p->refcount);
+ p->refcount = 1;
+}
#endif /* __KVM_HYP_MEMORY_H */
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mm.h b/arch/arm64/kvm/hyp/include/nvhe/mm.h
index 42d8eb9bfe72..d5ec972b5c1e 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mm.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mm.h
@@ -13,9 +13,13 @@
extern struct kvm_pgtable pkvm_pgtable;
extern hyp_spinlock_t pkvm_pgd_lock;
+int hyp_create_pcpu_fixmap(void);
+void *hyp_fixmap_map(phys_addr_t phys);
+void hyp_fixmap_unmap(void);
+
int hyp_create_idmap(u32 hyp_va_bits);
int hyp_map_vectors(void);
-int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back);
+int hyp_back_vmemmap(phys_addr_t back);
int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot);
int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot);
int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot);
@@ -24,16 +28,4 @@ int __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
unsigned long *haddr);
int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr);
-static inline void hyp_vmemmap_range(phys_addr_t phys, unsigned long size,
- unsigned long *start, unsigned long *end)
-{
- unsigned long nr_pages = size >> PAGE_SHIFT;
- struct hyp_page *p = hyp_phys_to_page(phys);
-
- *start = (unsigned long)p;
- *end = *start + nr_pages * sizeof(struct hyp_page);
- *start = ALIGN_DOWN(*start, PAGE_SIZE);
- *end = ALIGN(*end, PAGE_SIZE);
-}
-
#endif /* __KVM_HYP_MM_H */
diff --git a/arch/arm64/kvm/hyp/include/nvhe/pkvm.h b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
new file mode 100644
index 000000000000..82b3d62538a6
--- /dev/null
+++ b/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Google LLC
+ * Author: Fuad Tabba <tabba@google.com>
+ */
+
+#ifndef __ARM64_KVM_NVHE_PKVM_H__
+#define __ARM64_KVM_NVHE_PKVM_H__
+
+#include <asm/kvm_pkvm.h>
+
+#include <nvhe/gfp.h>
+#include <nvhe/spinlock.h>
+
+/*
+ * Holds the relevant data for maintaining the vcpu state completely at hyp.
+ */
+struct pkvm_hyp_vcpu {
+ struct kvm_vcpu vcpu;
+
+ /* Backpointer to the host's (untrusted) vCPU instance. */
+ struct kvm_vcpu *host_vcpu;
+};
+
+/*
+ * Holds the relevant data for running a protected vm.
+ */
+struct pkvm_hyp_vm {
+ struct kvm kvm;
+
+ /* Backpointer to the host's (untrusted) KVM instance. */
+ struct kvm *host_kvm;
+
+ /* The guest's stage-2 page-table managed by the hypervisor. */
+ struct kvm_pgtable pgt;
+ struct kvm_pgtable_mm_ops mm_ops;
+ struct hyp_pool pool;
+ hyp_spinlock_t lock;
+
+ /*
+ * The number of vcpus initialized and ready to run.
+ * Modifying this is protected by 'vm_table_lock'.
+ */
+ unsigned int nr_vcpus;
+
+ /* Array of the hyp vCPU structures for this VM. */
+ struct pkvm_hyp_vcpu *vcpus[];
+};
+
+static inline struct pkvm_hyp_vm *
+pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu *hyp_vcpu)
+{
+ return container_of(hyp_vcpu->vcpu.kvm, struct pkvm_hyp_vm, kvm);
+}
+
+void pkvm_hyp_vm_table_init(void *tbl);
+
+int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
+ unsigned long pgd_hva);
+int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
+ unsigned long vcpu_hva);
+int __pkvm_teardown_vm(pkvm_handle_t handle);
+
+struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
+ unsigned int vcpu_idx);
+void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu);
+
+#endif /* __ARM64_KVM_NVHE_PKVM_H__ */
diff --git a/arch/arm64/kvm/hyp/include/nvhe/spinlock.h b/arch/arm64/kvm/hyp/include/nvhe/spinlock.h
index 4652fd04bdbe..7c7ea8c55405 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/spinlock.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/spinlock.h
@@ -28,9 +28,17 @@ typedef union hyp_spinlock {
};
} hyp_spinlock_t;
+#define __HYP_SPIN_LOCK_INITIALIZER \
+ { .__val = 0 }
+
+#define __HYP_SPIN_LOCK_UNLOCKED \
+ ((hyp_spinlock_t) __HYP_SPIN_LOCK_INITIALIZER)
+
+#define DEFINE_HYP_SPINLOCK(x) hyp_spinlock_t x = __HYP_SPIN_LOCK_UNLOCKED
+
#define hyp_spin_lock_init(l) \
do { \
- *(l) = (hyp_spinlock_t){ .__val = 0 }; \
+ *(l) = __HYP_SPIN_LOCK_UNLOCKED; \
} while (0)
static inline void hyp_spin_lock(hyp_spinlock_t *lock)
diff --git a/arch/arm64/kvm/hyp/nvhe/cache.S b/arch/arm64/kvm/hyp/nvhe/cache.S
index 0c367eb5f4e2..85936c17ae40 100644
--- a/arch/arm64/kvm/hyp/nvhe/cache.S
+++ b/arch/arm64/kvm/hyp/nvhe/cache.S
@@ -12,3 +12,14 @@ SYM_FUNC_START(__pi_dcache_clean_inval_poc)
ret
SYM_FUNC_END(__pi_dcache_clean_inval_poc)
SYM_FUNC_ALIAS(dcache_clean_inval_poc, __pi_dcache_clean_inval_poc)
+
+SYM_FUNC_START(__pi_icache_inval_pou)
+alternative_if ARM64_HAS_CACHE_DIC
+ isb
+ ret
+alternative_else_nop_endif
+
+ invalidate_icache_by_line x0, x1, x2, x3
+ ret
+SYM_FUNC_END(__pi_icache_inval_pou)
+SYM_FUNC_ALIAS(icache_inval_pou, __pi_icache_inval_pou)
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 3cea4b6ac23e..728e01d4536b 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -15,17 +15,93 @@
#include <nvhe/mem_protect.h>
#include <nvhe/mm.h>
+#include <nvhe/pkvm.h>
#include <nvhe/trap_handler.h>
DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
+static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
+{
+ struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
+
+ hyp_vcpu->vcpu.arch.ctxt = host_vcpu->arch.ctxt;
+
+ hyp_vcpu->vcpu.arch.sve_state = kern_hyp_va(host_vcpu->arch.sve_state);
+ hyp_vcpu->vcpu.arch.sve_max_vl = host_vcpu->arch.sve_max_vl;
+
+ hyp_vcpu->vcpu.arch.hw_mmu = host_vcpu->arch.hw_mmu;
+
+ hyp_vcpu->vcpu.arch.hcr_el2 = host_vcpu->arch.hcr_el2;
+ hyp_vcpu->vcpu.arch.mdcr_el2 = host_vcpu->arch.mdcr_el2;
+ hyp_vcpu->vcpu.arch.cptr_el2 = host_vcpu->arch.cptr_el2;
+
+ hyp_vcpu->vcpu.arch.iflags = host_vcpu->arch.iflags;
+ hyp_vcpu->vcpu.arch.fp_state = host_vcpu->arch.fp_state;
+
+ hyp_vcpu->vcpu.arch.debug_ptr = kern_hyp_va(host_vcpu->arch.debug_ptr);
+ hyp_vcpu->vcpu.arch.host_fpsimd_state = host_vcpu->arch.host_fpsimd_state;
+
+ hyp_vcpu->vcpu.arch.vsesr_el2 = host_vcpu->arch.vsesr_el2;
+
+ hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3 = host_vcpu->arch.vgic_cpu.vgic_v3;
+}
+
+static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
+{
+ struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
+ struct vgic_v3_cpu_if *hyp_cpu_if = &hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3;
+ struct vgic_v3_cpu_if *host_cpu_if = &host_vcpu->arch.vgic_cpu.vgic_v3;
+ unsigned int i;
+
+ host_vcpu->arch.ctxt = hyp_vcpu->vcpu.arch.ctxt;
+
+ host_vcpu->arch.hcr_el2 = hyp_vcpu->vcpu.arch.hcr_el2;
+ host_vcpu->arch.cptr_el2 = hyp_vcpu->vcpu.arch.cptr_el2;
+
+ host_vcpu->arch.fault = hyp_vcpu->vcpu.arch.fault;
+
+ host_vcpu->arch.iflags = hyp_vcpu->vcpu.arch.iflags;
+ host_vcpu->arch.fp_state = hyp_vcpu->vcpu.arch.fp_state;
+
+ host_cpu_if->vgic_hcr = hyp_cpu_if->vgic_hcr;
+ for (i = 0; i < hyp_cpu_if->used_lrs; ++i)
+ host_cpu_if->vgic_lr[i] = hyp_cpu_if->vgic_lr[i];
+}
+
static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
{
- DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
+ DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 1);
+ int ret;
- cpu_reg(host_ctxt, 1) = __kvm_vcpu_run(kern_hyp_va(vcpu));
+ host_vcpu = kern_hyp_va(host_vcpu);
+
+ if (unlikely(is_protected_kvm_enabled())) {
+ struct pkvm_hyp_vcpu *hyp_vcpu;
+ struct kvm *host_kvm;
+
+ host_kvm = kern_hyp_va(host_vcpu->kvm);
+ hyp_vcpu = pkvm_load_hyp_vcpu(host_kvm->arch.pkvm.handle,
+ host_vcpu->vcpu_idx);
+ if (!hyp_vcpu) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ flush_hyp_vcpu(hyp_vcpu);
+
+ ret = __kvm_vcpu_run(&hyp_vcpu->vcpu);
+
+ sync_hyp_vcpu(hyp_vcpu);
+ pkvm_put_hyp_vcpu(hyp_vcpu);
+ } else {
+ /* The host is fully trusted, run its vCPU directly. */
+ ret = __kvm_vcpu_run(host_vcpu);
+ }
+
+out:
+ cpu_reg(host_ctxt, 1) = ret;
}
static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
@@ -191,6 +267,33 @@ static void handle___pkvm_vcpu_init_traps(struct kvm_cpu_context *host_ctxt)
__pkvm_vcpu_init_traps(kern_hyp_va(vcpu));
}
+static void handle___pkvm_init_vm(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(struct kvm *, host_kvm, host_ctxt, 1);
+ DECLARE_REG(unsigned long, vm_hva, host_ctxt, 2);
+ DECLARE_REG(unsigned long, pgd_hva, host_ctxt, 3);
+
+ host_kvm = kern_hyp_va(host_kvm);
+ cpu_reg(host_ctxt, 1) = __pkvm_init_vm(host_kvm, vm_hva, pgd_hva);
+}
+
+static void handle___pkvm_init_vcpu(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
+ DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 2);
+ DECLARE_REG(unsigned long, vcpu_hva, host_ctxt, 3);
+
+ host_vcpu = kern_hyp_va(host_vcpu);
+ cpu_reg(host_ctxt, 1) = __pkvm_init_vcpu(handle, host_vcpu, vcpu_hva);
+}
+
+static void handle___pkvm_teardown_vm(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
+
+ cpu_reg(host_ctxt, 1) = __pkvm_teardown_vm(handle);
+}
+
typedef void (*hcall_t)(struct kvm_cpu_context *);
#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
@@ -220,6 +323,9 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__vgic_v3_save_aprs),
HANDLE_FUNC(__vgic_v3_restore_aprs),
HANDLE_FUNC(__pkvm_vcpu_init_traps),
+ HANDLE_FUNC(__pkvm_init_vm),
+ HANDLE_FUNC(__pkvm_init_vcpu),
+ HANDLE_FUNC(__pkvm_teardown_vm),
};
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
index 9f54833af400..04d194583f1e 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
@@ -23,6 +23,8 @@ u64 cpu_logical_map(unsigned int cpu)
return hyp_cpu_logical_map[cpu];
}
+unsigned long __ro_after_init kvm_arm_hyp_percpu_base[NR_CPUS];
+
unsigned long __hyp_per_cpu_offset(unsigned int cpu)
{
unsigned long *cpu_base_array;
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index 07f9dc9848ef..552653fa18be 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -21,21 +21,33 @@
#define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_NOFWB | KVM_PGTABLE_S2_IDMAP)
-extern unsigned long hyp_nr_cpus;
-struct host_kvm host_kvm;
+struct host_mmu host_mmu;
static struct hyp_pool host_s2_pool;
-const u8 pkvm_hyp_id = 1;
+static DEFINE_PER_CPU(struct pkvm_hyp_vm *, __current_vm);
+#define current_vm (*this_cpu_ptr(&__current_vm))
+
+static void guest_lock_component(struct pkvm_hyp_vm *vm)
+{
+ hyp_spin_lock(&vm->lock);
+ current_vm = vm;
+}
+
+static void guest_unlock_component(struct pkvm_hyp_vm *vm)
+{
+ current_vm = NULL;
+ hyp_spin_unlock(&vm->lock);
+}
static void host_lock_component(void)
{
- hyp_spin_lock(&host_kvm.lock);
+ hyp_spin_lock(&host_mmu.lock);
}
static void host_unlock_component(void)
{
- hyp_spin_unlock(&host_kvm.lock);
+ hyp_spin_unlock(&host_mmu.lock);
}
static void hyp_lock_component(void)
@@ -79,6 +91,11 @@ static void host_s2_put_page(void *addr)
hyp_put_page(&host_s2_pool, addr);
}
+static void host_s2_free_removed_table(void *addr, u32 level)
+{
+ kvm_pgtable_stage2_free_removed(&host_mmu.mm_ops, addr, level);
+}
+
static int prepare_s2_pool(void *pgt_pool_base)
{
unsigned long nr_pages, pfn;
@@ -90,9 +107,10 @@ static int prepare_s2_pool(void *pgt_pool_base)
if (ret)
return ret;
- host_kvm.mm_ops = (struct kvm_pgtable_mm_ops) {
+ host_mmu.mm_ops = (struct kvm_pgtable_mm_ops) {
.zalloc_pages_exact = host_s2_zalloc_pages_exact,
.zalloc_page = host_s2_zalloc_page,
+ .free_removed_table = host_s2_free_removed_table,
.phys_to_virt = hyp_phys_to_virt,
.virt_to_phys = hyp_virt_to_phys,
.page_count = hyp_page_count,
@@ -111,7 +129,7 @@ static void prepare_host_vtcr(void)
parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val);
phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange);
- host_kvm.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val,
+ host_mmu.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val,
id_aa64mmfr1_el1_sys_val, phys_shift);
}
@@ -119,45 +137,170 @@ static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot pr
int kvm_host_prepare_stage2(void *pgt_pool_base)
{
- struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
+ struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu;
int ret;
prepare_host_vtcr();
- hyp_spin_lock_init(&host_kvm.lock);
- mmu->arch = &host_kvm.arch;
+ hyp_spin_lock_init(&host_mmu.lock);
+ mmu->arch = &host_mmu.arch;
ret = prepare_s2_pool(pgt_pool_base);
if (ret)
return ret;
- ret = __kvm_pgtable_stage2_init(&host_kvm.pgt, mmu,
- &host_kvm.mm_ops, KVM_HOST_S2_FLAGS,
+ ret = __kvm_pgtable_stage2_init(&host_mmu.pgt, mmu,
+ &host_mmu.mm_ops, KVM_HOST_S2_FLAGS,
host_stage2_force_pte_cb);
if (ret)
return ret;
- mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd);
- mmu->pgt = &host_kvm.pgt;
+ mmu->pgd_phys = __hyp_pa(host_mmu.pgt.pgd);
+ mmu->pgt = &host_mmu.pgt;
atomic64_set(&mmu->vmid.id, 0);
return 0;
}
+static bool guest_stage2_force_pte_cb(u64 addr, u64 end,
+ enum kvm_pgtable_prot prot)
+{
+ return true;
+}
+
+static void *guest_s2_zalloc_pages_exact(size_t size)
+{
+ void *addr = hyp_alloc_pages(&current_vm->pool, get_order(size));
+
+ WARN_ON(size != (PAGE_SIZE << get_order(size)));
+ hyp_split_page(hyp_virt_to_page(addr));
+
+ return addr;
+}
+
+static void guest_s2_free_pages_exact(void *addr, unsigned long size)
+{
+ u8 order = get_order(size);
+ unsigned int i;
+
+ for (i = 0; i < (1 << order); i++)
+ hyp_put_page(&current_vm->pool, addr + (i * PAGE_SIZE));
+}
+
+static void *guest_s2_zalloc_page(void *mc)
+{
+ struct hyp_page *p;
+ void *addr;
+
+ addr = hyp_alloc_pages(&current_vm->pool, 0);
+ if (addr)
+ return addr;
+
+ addr = pop_hyp_memcache(mc, hyp_phys_to_virt);
+ if (!addr)
+ return addr;
+
+ memset(addr, 0, PAGE_SIZE);
+ p = hyp_virt_to_page(addr);
+ memset(p, 0, sizeof(*p));
+ p->refcount = 1;
+
+ return addr;
+}
+
+static void guest_s2_get_page(void *addr)
+{
+ hyp_get_page(&current_vm->pool, addr);
+}
+
+static void guest_s2_put_page(void *addr)
+{
+ hyp_put_page(&current_vm->pool, addr);
+}
+
+static void clean_dcache_guest_page(void *va, size_t size)
+{
+ __clean_dcache_guest_page(hyp_fixmap_map(__hyp_pa(va)), size);
+ hyp_fixmap_unmap();
+}
+
+static void invalidate_icache_guest_page(void *va, size_t size)
+{
+ __invalidate_icache_guest_page(hyp_fixmap_map(__hyp_pa(va)), size);
+ hyp_fixmap_unmap();
+}
+
+int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd)
+{
+ struct kvm_s2_mmu *mmu = &vm->kvm.arch.mmu;
+ unsigned long nr_pages;
+ int ret;
+
+ nr_pages = kvm_pgtable_stage2_pgd_size(vm->kvm.arch.vtcr) >> PAGE_SHIFT;
+ ret = hyp_pool_init(&vm->pool, hyp_virt_to_pfn(pgd), nr_pages, 0);
+ if (ret)
+ return ret;
+
+ hyp_spin_lock_init(&vm->lock);
+ vm->mm_ops = (struct kvm_pgtable_mm_ops) {
+ .zalloc_pages_exact = guest_s2_zalloc_pages_exact,
+ .free_pages_exact = guest_s2_free_pages_exact,
+ .zalloc_page = guest_s2_zalloc_page,
+ .phys_to_virt = hyp_phys_to_virt,
+ .virt_to_phys = hyp_virt_to_phys,
+ .page_count = hyp_page_count,
+ .get_page = guest_s2_get_page,
+ .put_page = guest_s2_put_page,
+ .dcache_clean_inval_poc = clean_dcache_guest_page,
+ .icache_inval_pou = invalidate_icache_guest_page,
+ };
+
+ guest_lock_component(vm);
+ ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0,
+ guest_stage2_force_pte_cb);
+ guest_unlock_component(vm);
+ if (ret)
+ return ret;
+
+ vm->kvm.arch.mmu.pgd_phys = __hyp_pa(vm->pgt.pgd);
+
+ return 0;
+}
+
+void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc)
+{
+ void *addr;
+
+ /* Dump all pgtable pages in the hyp_pool */
+ guest_lock_component(vm);
+ kvm_pgtable_stage2_destroy(&vm->pgt);
+ vm->kvm.arch.mmu.pgd_phys = 0ULL;
+ guest_unlock_component(vm);
+
+ /* Drain the hyp_pool into the memcache */
+ addr = hyp_alloc_pages(&vm->pool, 0);
+ while (addr) {
+ memset(hyp_virt_to_page(addr), 0, sizeof(struct hyp_page));
+ push_hyp_memcache(mc, addr, hyp_virt_to_phys);
+ WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(addr), 1));
+ addr = hyp_alloc_pages(&vm->pool, 0);
+ }
+}
+
int __pkvm_prot_finalize(void)
{
- struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
+ struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu;
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
if (params->hcr_el2 & HCR_VM)
return -EPERM;
params->vttbr = kvm_get_vttbr(mmu);
- params->vtcr = host_kvm.arch.vtcr;
+ params->vtcr = host_mmu.arch.vtcr;
params->hcr_el2 |= HCR_VM;
kvm_flush_dcache_to_poc(params, sizeof(*params));
write_sysreg(params->hcr_el2, hcr_el2);
- __load_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
+ __load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
/*
* Make sure to have an ISB before the TLB maintenance below but only
@@ -175,7 +318,7 @@ int __pkvm_prot_finalize(void)
static int host_stage2_unmap_dev_all(void)
{
- struct kvm_pgtable *pgt = &host_kvm.pgt;
+ struct kvm_pgtable *pgt = &host_mmu.pgt;
struct memblock_region *reg;
u64 addr = 0;
int i, ret;
@@ -195,7 +338,7 @@ struct kvm_mem_range {
u64 end;
};
-static bool find_mem_range(phys_addr_t addr, struct kvm_mem_range *range)
+static struct memblock_region *find_mem_range(phys_addr_t addr, struct kvm_mem_range *range)
{
int cur, left = 0, right = hyp_memblock_nr;
struct memblock_region *reg;
@@ -218,18 +361,28 @@ static bool find_mem_range(phys_addr_t addr, struct kvm_mem_range *range)
} else {
range->start = reg->base;
range->end = end;
- return true;
+ return reg;
}
}
- return false;
+ return NULL;
}
bool addr_is_memory(phys_addr_t phys)
{
struct kvm_mem_range range;
- return find_mem_range(phys, &range);
+ return !!find_mem_range(phys, &range);
+}
+
+static bool addr_is_allowed_memory(phys_addr_t phys)
+{
+ struct memblock_region *reg;
+ struct kvm_mem_range range;
+
+ reg = find_mem_range(phys, &range);
+
+ return reg && !(reg->flags & MEMBLOCK_NOMAP);
}
static bool is_in_mem_range(u64 addr, struct kvm_mem_range *range)
@@ -250,8 +403,8 @@ static bool range_is_memory(u64 start, u64 end)
static inline int __host_stage2_idmap(u64 start, u64 end,
enum kvm_pgtable_prot prot)
{
- return kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start,
- prot, &host_s2_pool);
+ return kvm_pgtable_stage2_map(&host_mmu.pgt, start, end - start, start,
+ prot, &host_s2_pool, 0);
}
/*
@@ -263,7 +416,7 @@ static inline int __host_stage2_idmap(u64 start, u64 end,
#define host_stage2_try(fn, ...) \
({ \
int __ret; \
- hyp_assert_lock_held(&host_kvm.lock); \
+ hyp_assert_lock_held(&host_mmu.lock); \
__ret = fn(__VA_ARGS__); \
if (__ret == -ENOMEM) { \
__ret = host_stage2_unmap_dev_all(); \
@@ -286,8 +439,8 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
u32 level;
int ret;
- hyp_assert_lock_held(&host_kvm.lock);
- ret = kvm_pgtable_get_leaf(&host_kvm.pgt, addr, &pte, &level);
+ hyp_assert_lock_held(&host_mmu.lock);
+ ret = kvm_pgtable_get_leaf(&host_mmu.pgt, addr, &pte, &level);
if (ret)
return ret;
@@ -319,7 +472,7 @@ int host_stage2_idmap_locked(phys_addr_t addr, u64 size,
int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
{
- return host_stage2_try(kvm_pgtable_stage2_set_owner, &host_kvm.pgt,
+ return host_stage2_try(kvm_pgtable_stage2_set_owner, &host_mmu.pgt,
addr, size, &host_s2_pool, owner_id);
}
@@ -348,7 +501,7 @@ static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot pr
static int host_stage2_idmap(u64 addr)
{
struct kvm_mem_range range;
- bool is_memory = find_mem_range(addr, &range);
+ bool is_memory = !!find_mem_range(addr, &range);
enum kvm_pgtable_prot prot;
int ret;
@@ -380,12 +533,6 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
BUG_ON(ret && ret != -EAGAIN);
}
-/* This corresponds to locking order */
-enum pkvm_component_id {
- PKVM_ID_HOST,
- PKVM_ID_HYP,
-};
-
struct pkvm_mem_transition {
u64 nr_pages;
@@ -399,6 +546,9 @@ struct pkvm_mem_transition {
/* Address in the completer's address space */
u64 completer_addr;
} host;
+ struct {
+ u64 completer_addr;
+ } hyp;
};
} initiator;
@@ -412,23 +562,24 @@ struct pkvm_mem_share {
const enum kvm_pgtable_prot completer_prot;
};
+struct pkvm_mem_donation {
+ const struct pkvm_mem_transition tx;
+};
+
struct check_walk_data {
enum pkvm_page_state desired;
enum pkvm_page_state (*get_page_state)(kvm_pte_t pte);
};
-static int __check_page_state_visitor(u64 addr, u64 end, u32 level,
- kvm_pte_t *ptep,
- enum kvm_pgtable_walk_flags flag,
- void * const arg)
+static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
{
- struct check_walk_data *d = arg;
- kvm_pte_t pte = *ptep;
+ struct check_walk_data *d = ctx->arg;
- if (kvm_pte_valid(pte) && !addr_is_memory(kvm_pte_to_phys(pte)))
+ if (kvm_pte_valid(ctx->old) && !addr_is_allowed_memory(kvm_pte_to_phys(ctx->old)))
return -EINVAL;
- return d->get_page_state(pte) == d->desired ? 0 : -EPERM;
+ return d->get_page_state(ctx->old) == d->desired ? 0 : -EPERM;
}
static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
@@ -459,8 +610,8 @@ static int __host_check_page_state_range(u64 addr, u64 size,
.get_page_state = host_get_page_state,
};
- hyp_assert_lock_held(&host_kvm.lock);
- return check_page_state_range(&host_kvm.pgt, addr, size, &d);
+ hyp_assert_lock_held(&host_mmu.lock);
+ return check_page_state_range(&host_mmu.pgt, addr, size, &d);
}
static int __host_set_page_state_range(u64 addr, u64 size,
@@ -511,6 +662,46 @@ static int host_initiate_unshare(u64 *completer_addr,
return __host_set_page_state_range(addr, size, PKVM_PAGE_OWNED);
}
+static int host_initiate_donation(u64 *completer_addr,
+ const struct pkvm_mem_transition *tx)
+{
+ u8 owner_id = tx->completer.id;
+ u64 size = tx->nr_pages * PAGE_SIZE;
+
+ *completer_addr = tx->initiator.host.completer_addr;
+ return host_stage2_set_owner_locked(tx->initiator.addr, size, owner_id);
+}
+
+static bool __host_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx)
+{
+ return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) ||
+ tx->initiator.id != PKVM_ID_HYP);
+}
+
+static int __host_ack_transition(u64 addr, const struct pkvm_mem_transition *tx,
+ enum pkvm_page_state state)
+{
+ u64 size = tx->nr_pages * PAGE_SIZE;
+
+ if (__host_ack_skip_pgtable_check(tx))
+ return 0;
+
+ return __host_check_page_state_range(addr, size, state);
+}
+
+static int host_ack_donation(u64 addr, const struct pkvm_mem_transition *tx)
+{
+ return __host_ack_transition(addr, tx, PKVM_NOPAGE);
+}
+
+static int host_complete_donation(u64 addr, const struct pkvm_mem_transition *tx)
+{
+ u64 size = tx->nr_pages * PAGE_SIZE;
+ u8 host_id = tx->completer.id;
+
+ return host_stage2_set_owner_locked(addr, size, host_id);
+}
+
static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte)
{
if (!kvm_pte_valid(pte))
@@ -531,6 +722,27 @@ static int __hyp_check_page_state_range(u64 addr, u64 size,
return check_page_state_range(&pkvm_pgtable, addr, size, &d);
}
+static int hyp_request_donation(u64 *completer_addr,
+ const struct pkvm_mem_transition *tx)
+{
+ u64 size = tx->nr_pages * PAGE_SIZE;
+ u64 addr = tx->initiator.addr;
+
+ *completer_addr = tx->initiator.hyp.completer_addr;
+ return __hyp_check_page_state_range(addr, size, PKVM_PAGE_OWNED);
+}
+
+static int hyp_initiate_donation(u64 *completer_addr,
+ const struct pkvm_mem_transition *tx)
+{
+ u64 size = tx->nr_pages * PAGE_SIZE;
+ int ret;
+
+ *completer_addr = tx->initiator.hyp.completer_addr;
+ ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, tx->initiator.addr, size);
+ return (ret != size) ? -EFAULT : 0;
+}
+
static bool __hyp_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx)
{
return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) ||
@@ -555,6 +767,9 @@ static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
+ if (tx->initiator.id == PKVM_ID_HOST && hyp_page_count((void *)addr))
+ return -EBUSY;
+
if (__hyp_ack_skip_pgtable_check(tx))
return 0;
@@ -562,6 +777,16 @@ static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
PKVM_PAGE_SHARED_BORROWED);
}
+static int hyp_ack_donation(u64 addr, const struct pkvm_mem_transition *tx)
+{
+ u64 size = tx->nr_pages * PAGE_SIZE;
+
+ if (__hyp_ack_skip_pgtable_check(tx))
+ return 0;
+
+ return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE);
+}
+
static int hyp_complete_share(u64 addr, const struct pkvm_mem_transition *tx,
enum kvm_pgtable_prot perms)
{
@@ -580,6 +805,15 @@ static int hyp_complete_unshare(u64 addr, const struct pkvm_mem_transition *tx)
return (ret != size) ? -EFAULT : 0;
}
+static int hyp_complete_donation(u64 addr,
+ const struct pkvm_mem_transition *tx)
+{
+ void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE);
+ enum kvm_pgtable_prot prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_OWNED);
+
+ return pkvm_create_mappings_locked(start, end, prot);
+}
+
static int check_share(struct pkvm_mem_share *share)
{
const struct pkvm_mem_transition *tx = &share->tx;
@@ -732,6 +966,94 @@ static int do_unshare(struct pkvm_mem_share *share)
return WARN_ON(__do_unshare(share));
}
+static int check_donation(struct pkvm_mem_donation *donation)
+{
+ const struct pkvm_mem_transition *tx = &donation->tx;
+ u64 completer_addr;
+ int ret;
+
+ switch (tx->initiator.id) {
+ case PKVM_ID_HOST:
+ ret = host_request_owned_transition(&completer_addr, tx);
+ break;
+ case PKVM_ID_HYP:
+ ret = hyp_request_donation(&completer_addr, tx);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ switch (tx->completer.id) {
+ case PKVM_ID_HOST:
+ ret = host_ack_donation(completer_addr, tx);
+ break;
+ case PKVM_ID_HYP:
+ ret = hyp_ack_donation(completer_addr, tx);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int __do_donate(struct pkvm_mem_donation *donation)
+{
+ const struct pkvm_mem_transition *tx = &donation->tx;
+ u64 completer_addr;
+ int ret;
+
+ switch (tx->initiator.id) {
+ case PKVM_ID_HOST:
+ ret = host_initiate_donation(&completer_addr, tx);
+ break;
+ case PKVM_ID_HYP:
+ ret = hyp_initiate_donation(&completer_addr, tx);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ switch (tx->completer.id) {
+ case PKVM_ID_HOST:
+ ret = host_complete_donation(completer_addr, tx);
+ break;
+ case PKVM_ID_HYP:
+ ret = hyp_complete_donation(completer_addr, tx);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/*
+ * do_donate():
+ *
+ * The page owner transfers ownership to another component, losing access
+ * as a consequence.
+ *
+ * Initiator: OWNED => NOPAGE
+ * Completer: NOPAGE => OWNED
+ */
+static int do_donate(struct pkvm_mem_donation *donation)
+{
+ int ret;
+
+ ret = check_donation(donation);
+ if (ret)
+ return ret;
+
+ return WARN_ON(__do_donate(donation));
+}
+
int __pkvm_host_share_hyp(u64 pfn)
{
int ret;
@@ -797,3 +1119,112 @@ int __pkvm_host_unshare_hyp(u64 pfn)
return ret;
}
+
+int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
+{
+ int ret;
+ u64 host_addr = hyp_pfn_to_phys(pfn);
+ u64 hyp_addr = (u64)__hyp_va(host_addr);
+ struct pkvm_mem_donation donation = {
+ .tx = {
+ .nr_pages = nr_pages,
+ .initiator = {
+ .id = PKVM_ID_HOST,
+ .addr = host_addr,
+ .host = {
+ .completer_addr = hyp_addr,
+ },
+ },
+ .completer = {
+ .id = PKVM_ID_HYP,
+ },
+ },
+ };
+
+ host_lock_component();
+ hyp_lock_component();
+
+ ret = do_donate(&donation);
+
+ hyp_unlock_component();
+ host_unlock_component();
+
+ return ret;
+}
+
+int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
+{
+ int ret;
+ u64 host_addr = hyp_pfn_to_phys(pfn);
+ u64 hyp_addr = (u64)__hyp_va(host_addr);
+ struct pkvm_mem_donation donation = {
+ .tx = {
+ .nr_pages = nr_pages,
+ .initiator = {
+ .id = PKVM_ID_HYP,
+ .addr = hyp_addr,
+ .hyp = {
+ .completer_addr = host_addr,
+ },
+ },
+ .completer = {
+ .id = PKVM_ID_HOST,
+ },
+ },
+ };
+
+ host_lock_component();
+ hyp_lock_component();
+
+ ret = do_donate(&donation);
+
+ hyp_unlock_component();
+ host_unlock_component();
+
+ return ret;
+}
+
+int hyp_pin_shared_mem(void *from, void *to)
+{
+ u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
+ u64 end = PAGE_ALIGN((u64)to);
+ u64 size = end - start;
+ int ret;
+
+ host_lock_component();
+ hyp_lock_component();
+
+ ret = __host_check_page_state_range(__hyp_pa(start), size,
+ PKVM_PAGE_SHARED_OWNED);
+ if (ret)
+ goto unlock;
+
+ ret = __hyp_check_page_state_range(start, size,
+ PKVM_PAGE_SHARED_BORROWED);
+ if (ret)
+ goto unlock;
+
+ for (cur = start; cur < end; cur += PAGE_SIZE)
+ hyp_page_ref_inc(hyp_virt_to_page(cur));
+
+unlock:
+ hyp_unlock_component();
+ host_unlock_component();
+
+ return ret;
+}
+
+void hyp_unpin_shared_mem(void *from, void *to)
+{
+ u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
+ u64 end = PAGE_ALIGN((u64)to);
+
+ host_lock_component();
+ hyp_lock_component();
+
+ for (cur = start; cur < end; cur += PAGE_SIZE)
+ hyp_page_ref_dec(hyp_virt_to_page(cur));
+
+ hyp_unlock_component();
+ host_unlock_component();
+}
diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c
index 96193cb31a39..318298eb3d6b 100644
--- a/arch/arm64/kvm/hyp/nvhe/mm.c
+++ b/arch/arm64/kvm/hyp/nvhe/mm.c
@@ -14,6 +14,7 @@
#include <nvhe/early_alloc.h>
#include <nvhe/gfp.h>
#include <nvhe/memory.h>
+#include <nvhe/mem_protect.h>
#include <nvhe/mm.h>
#include <nvhe/spinlock.h>
@@ -25,6 +26,12 @@ unsigned int hyp_memblock_nr;
static u64 __io_map_base;
+struct hyp_fixmap_slot {
+ u64 addr;
+ kvm_pte_t *ptep;
+};
+static DEFINE_PER_CPU(struct hyp_fixmap_slot, fixmap_slots);
+
static int __pkvm_create_mappings(unsigned long start, unsigned long size,
unsigned long phys, enum kvm_pgtable_prot prot)
{
@@ -129,13 +136,36 @@ int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
return ret;
}
-int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back)
+int hyp_back_vmemmap(phys_addr_t back)
{
- unsigned long start, end;
+ unsigned long i, start, size, end = 0;
+ int ret;
- hyp_vmemmap_range(phys, size, &start, &end);
+ for (i = 0; i < hyp_memblock_nr; i++) {
+ start = hyp_memory[i].base;
+ start = ALIGN_DOWN((u64)hyp_phys_to_page(start), PAGE_SIZE);
+ /*
+ * The begining of the hyp_vmemmap region for the current
+ * memblock may already be backed by the page backing the end
+ * the previous region, so avoid mapping it twice.
+ */
+ start = max(start, end);
+
+ end = hyp_memory[i].base + hyp_memory[i].size;
+ end = PAGE_ALIGN((u64)hyp_phys_to_page(end));
+ if (start >= end)
+ continue;
+
+ size = end - start;
+ ret = __pkvm_create_mappings(start, size, back, PAGE_HYP);
+ if (ret)
+ return ret;
+
+ memset(hyp_phys_to_virt(back), 0, size);
+ back += size;
+ }
- return __pkvm_create_mappings(start, end - start, back, PAGE_HYP);
+ return 0;
}
static void *__hyp_bp_vect_base;
@@ -189,6 +219,102 @@ int hyp_map_vectors(void)
return 0;
}
+void *hyp_fixmap_map(phys_addr_t phys)
+{
+ struct hyp_fixmap_slot *slot = this_cpu_ptr(&fixmap_slots);
+ kvm_pte_t pte, *ptep = slot->ptep;
+
+ pte = *ptep;
+ pte &= ~kvm_phys_to_pte(KVM_PHYS_INVALID);
+ pte |= kvm_phys_to_pte(phys) | KVM_PTE_VALID;
+ WRITE_ONCE(*ptep, pte);
+ dsb(ishst);
+
+ return (void *)slot->addr;
+}
+
+static void fixmap_clear_slot(struct hyp_fixmap_slot *slot)
+{
+ kvm_pte_t *ptep = slot->ptep;
+ u64 addr = slot->addr;
+
+ WRITE_ONCE(*ptep, *ptep & ~KVM_PTE_VALID);
+
+ /*
+ * Irritatingly, the architecture requires that we use inner-shareable
+ * broadcast TLB invalidation here in case another CPU speculates
+ * through our fixmap and decides to create an "amalagamation of the
+ * values held in the TLB" due to the apparent lack of a
+ * break-before-make sequence.
+ *
+ * https://lore.kernel.org/kvm/20221017115209.2099-1-will@kernel.org/T/#mf10dfbaf1eaef9274c581b81c53758918c1d0f03
+ */
+ dsb(ishst);
+ __tlbi_level(vale2is, __TLBI_VADDR(addr, 0), (KVM_PGTABLE_MAX_LEVELS - 1));
+ dsb(ish);
+ isb();
+}
+
+void hyp_fixmap_unmap(void)
+{
+ fixmap_clear_slot(this_cpu_ptr(&fixmap_slots));
+}
+
+static int __create_fixmap_slot_cb(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
+{
+ struct hyp_fixmap_slot *slot = per_cpu_ptr(&fixmap_slots, (u64)ctx->arg);
+
+ if (!kvm_pte_valid(ctx->old) || ctx->level != KVM_PGTABLE_MAX_LEVELS - 1)
+ return -EINVAL;
+
+ slot->addr = ctx->addr;
+ slot->ptep = ctx->ptep;
+
+ /*
+ * Clear the PTE, but keep the page-table page refcount elevated to
+ * prevent it from ever being freed. This lets us manipulate the PTEs
+ * by hand safely without ever needing to allocate memory.
+ */
+ fixmap_clear_slot(slot);
+
+ return 0;
+}
+
+static int create_fixmap_slot(u64 addr, u64 cpu)
+{
+ struct kvm_pgtable_walker walker = {
+ .cb = __create_fixmap_slot_cb,
+ .flags = KVM_PGTABLE_WALK_LEAF,
+ .arg = (void *)cpu,
+ };
+
+ return kvm_pgtable_walk(&pkvm_pgtable, addr, PAGE_SIZE, &walker);
+}
+
+int hyp_create_pcpu_fixmap(void)
+{
+ unsigned long addr, i;
+ int ret;
+
+ for (i = 0; i < hyp_nr_cpus; i++) {
+ ret = pkvm_alloc_private_va_range(PAGE_SIZE, &addr);
+ if (ret)
+ return ret;
+
+ ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, PAGE_SIZE,
+ __hyp_pa(__hyp_bss_start), PAGE_HYP);
+ if (ret)
+ return ret;
+
+ ret = create_fixmap_slot(addr, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
int hyp_create_idmap(u32 hyp_va_bits)
{
unsigned long start, end;
@@ -213,3 +339,36 @@ int hyp_create_idmap(u32 hyp_va_bits)
return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC);
}
+
+static void *admit_host_page(void *arg)
+{
+ struct kvm_hyp_memcache *host_mc = arg;
+
+ if (!host_mc->nr_pages)
+ return NULL;
+
+ /*
+ * The host still owns the pages in its memcache, so we need to go
+ * through a full host-to-hyp donation cycle to change it. Fortunately,
+ * __pkvm_host_donate_hyp() takes care of races for us, so if it
+ * succeeds we're good to go.
+ */
+ if (__pkvm_host_donate_hyp(hyp_phys_to_pfn(host_mc->head), 1))
+ return NULL;
+
+ return pop_hyp_memcache(host_mc, hyp_phys_to_virt);
+}
+
+/* Refill our local memcache by poping pages from the one provided by the host. */
+int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
+ struct kvm_hyp_memcache *host_mc)
+{
+ struct kvm_hyp_memcache tmp = *host_mc;
+ int ret;
+
+ ret = __topup_hyp_memcache(mc, min_pages, admit_host_page,
+ hyp_virt_to_phys, &tmp);
+ *host_mc = tmp;
+
+ return ret;
+}
diff --git a/arch/arm64/kvm/hyp/nvhe/page_alloc.c b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
index d40f0b30b534..803ba3222e75 100644
--- a/arch/arm64/kvm/hyp/nvhe/page_alloc.c
+++ b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
@@ -93,11 +93,16 @@ static inline struct hyp_page *node_to_page(struct list_head *node)
static void __hyp_attach_page(struct hyp_pool *pool,
struct hyp_page *p)
{
+ phys_addr_t phys = hyp_page_to_phys(p);
unsigned short order = p->order;
struct hyp_page *buddy;
memset(hyp_page_to_virt(p), 0, PAGE_SIZE << p->order);
+ /* Skip coalescing for 'external' pages being freed into the pool. */
+ if (phys < pool->range_start || phys >= pool->range_end)
+ goto insert;
+
/*
* Only the first struct hyp_page of a high-order page (otherwise known
* as the 'head') should have p->order set. The non-head pages should
@@ -116,6 +121,7 @@ static void __hyp_attach_page(struct hyp_pool *pool,
p = min(p, buddy);
}
+insert:
/* Mark the new head, and insert it */
p->order = order;
page_add_to_list(p, &pool->free_area[order]);
@@ -144,25 +150,6 @@ static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool,
return p;
}
-static inline void hyp_page_ref_inc(struct hyp_page *p)
-{
- BUG_ON(p->refcount == USHRT_MAX);
- p->refcount++;
-}
-
-static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
-{
- BUG_ON(!p->refcount);
- p->refcount--;
- return (p->refcount == 0);
-}
-
-static inline void hyp_set_page_refcounted(struct hyp_page *p)
-{
- BUG_ON(p->refcount);
- p->refcount = 1;
-}
-
static void __hyp_put_page(struct hyp_pool *pool, struct hyp_page *p)
{
if (hyp_page_ref_dec_and_test(p))
@@ -249,10 +236,8 @@ int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
/* Init the vmemmap portion */
p = hyp_phys_to_page(phys);
- for (i = 0; i < nr_pages; i++) {
- p[i].order = 0;
+ for (i = 0; i < nr_pages; i++)
hyp_set_page_refcounted(&p[i]);
- }
/* Attach the unused pages to the buddy tree */
for (i = reserved_pages; i < nr_pages; i++)
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index 85d3b7ae720f..a06ece14a6d8 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -7,8 +7,17 @@
#include <linux/kvm_host.h>
#include <linux/mm.h>
#include <nvhe/fixed_config.h>
+#include <nvhe/mem_protect.h>
+#include <nvhe/memory.h>
+#include <nvhe/pkvm.h>
#include <nvhe/trap_handler.h>
+/* Used by icache_is_vpipt(). */
+unsigned long __icache_flags;
+
+/* Used by kvm_get_vttbr(). */
+unsigned int kvm_arm_vmid_bits;
+
/*
* Set trap register values based on features in ID_AA64PFR0.
*/
@@ -183,3 +192,430 @@ void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
pvm_init_traps_aa64mmfr0(vcpu);
pvm_init_traps_aa64mmfr1(vcpu);
}
+
+/*
+ * Start the VM table handle at the offset defined instead of at 0.
+ * Mainly for sanity checking and debugging.
+ */
+#define HANDLE_OFFSET 0x1000
+
+static unsigned int vm_handle_to_idx(pkvm_handle_t handle)
+{
+ return handle - HANDLE_OFFSET;
+}
+
+static pkvm_handle_t idx_to_vm_handle(unsigned int idx)
+{
+ return idx + HANDLE_OFFSET;
+}
+
+/*
+ * Spinlock for protecting state related to the VM table. Protects writes
+ * to 'vm_table' and 'nr_table_entries' as well as reads and writes to
+ * 'last_hyp_vcpu_lookup'.
+ */
+static DEFINE_HYP_SPINLOCK(vm_table_lock);
+
+/*
+ * The table of VM entries for protected VMs in hyp.
+ * Allocated at hyp initialization and setup.
+ */
+static struct pkvm_hyp_vm **vm_table;
+
+void pkvm_hyp_vm_table_init(void *tbl)
+{
+ WARN_ON(vm_table);
+ vm_table = tbl;
+}
+
+/*
+ * Return the hyp vm structure corresponding to the handle.
+ */
+static struct pkvm_hyp_vm *get_vm_by_handle(pkvm_handle_t handle)
+{
+ unsigned int idx = vm_handle_to_idx(handle);
+
+ if (unlikely(idx >= KVM_MAX_PVMS))
+ return NULL;
+
+ return vm_table[idx];
+}
+
+struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
+ unsigned int vcpu_idx)
+{
+ struct pkvm_hyp_vcpu *hyp_vcpu = NULL;
+ struct pkvm_hyp_vm *hyp_vm;
+
+ hyp_spin_lock(&vm_table_lock);
+ hyp_vm = get_vm_by_handle(handle);
+ if (!hyp_vm || hyp_vm->nr_vcpus <= vcpu_idx)
+ goto unlock;
+
+ hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
+ hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
+unlock:
+ hyp_spin_unlock(&vm_table_lock);
+ return hyp_vcpu;
+}
+
+void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
+{
+ struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
+
+ hyp_spin_lock(&vm_table_lock);
+ hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
+ hyp_spin_unlock(&vm_table_lock);
+}
+
+static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
+{
+ if (host_vcpu)
+ hyp_unpin_shared_mem(host_vcpu, host_vcpu + 1);
+}
+
+static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[],
+ unsigned int nr_vcpus)
+{
+ int i;
+
+ for (i = 0; i < nr_vcpus; i++)
+ unpin_host_vcpu(hyp_vcpus[i]->host_vcpu);
+}
+
+static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
+ unsigned int nr_vcpus)
+{
+ hyp_vm->host_kvm = host_kvm;
+ hyp_vm->kvm.created_vcpus = nr_vcpus;
+ hyp_vm->kvm.arch.vtcr = host_mmu.arch.vtcr;
+}
+
+static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
+ struct pkvm_hyp_vm *hyp_vm,
+ struct kvm_vcpu *host_vcpu,
+ unsigned int vcpu_idx)
+{
+ int ret = 0;
+
+ if (hyp_pin_shared_mem(host_vcpu, host_vcpu + 1))
+ return -EBUSY;
+
+ if (host_vcpu->vcpu_idx != vcpu_idx) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ hyp_vcpu->host_vcpu = host_vcpu;
+
+ hyp_vcpu->vcpu.kvm = &hyp_vm->kvm;
+ hyp_vcpu->vcpu.vcpu_id = READ_ONCE(host_vcpu->vcpu_id);
+ hyp_vcpu->vcpu.vcpu_idx = vcpu_idx;
+
+ hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu;
+ hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
+done:
+ if (ret)
+ unpin_host_vcpu(host_vcpu);
+ return ret;
+}
+
+static int find_free_vm_table_entry(struct kvm *host_kvm)
+{
+ int i;
+
+ for (i = 0; i < KVM_MAX_PVMS; ++i) {
+ if (!vm_table[i])
+ return i;
+ }
+
+ return -ENOMEM;
+}
+
+/*
+ * Allocate a VM table entry and insert a pointer to the new vm.
+ *
+ * Return a unique handle to the protected VM on success,
+ * negative error code on failure.
+ */
+static pkvm_handle_t insert_vm_table_entry(struct kvm *host_kvm,
+ struct pkvm_hyp_vm *hyp_vm)
+{
+ struct kvm_s2_mmu *mmu = &hyp_vm->kvm.arch.mmu;
+ int idx;
+
+ hyp_assert_lock_held(&vm_table_lock);
+
+ /*
+ * Initializing protected state might have failed, yet a malicious
+ * host could trigger this function. Thus, ensure that 'vm_table'
+ * exists.
+ */
+ if (unlikely(!vm_table))
+ return -EINVAL;
+
+ idx = find_free_vm_table_entry(host_kvm);
+ if (idx < 0)
+ return idx;
+
+ hyp_vm->kvm.arch.pkvm.handle = idx_to_vm_handle(idx);
+
+ /* VMID 0 is reserved for the host */
+ atomic64_set(&mmu->vmid.id, idx + 1);
+
+ mmu->arch = &hyp_vm->kvm.arch;
+ mmu->pgt = &hyp_vm->pgt;
+
+ vm_table[idx] = hyp_vm;
+ return hyp_vm->kvm.arch.pkvm.handle;
+}
+
+/*
+ * Deallocate and remove the VM table entry corresponding to the handle.
+ */
+static void remove_vm_table_entry(pkvm_handle_t handle)
+{
+ hyp_assert_lock_held(&vm_table_lock);
+ vm_table[vm_handle_to_idx(handle)] = NULL;
+}
+
+static size_t pkvm_get_hyp_vm_size(unsigned int nr_vcpus)
+{
+ return size_add(sizeof(struct pkvm_hyp_vm),
+ size_mul(sizeof(struct pkvm_hyp_vcpu *), nr_vcpus));
+}
+
+static void *map_donated_memory_noclear(unsigned long host_va, size_t size)
+{
+ void *va = (void *)kern_hyp_va(host_va);
+
+ if (!PAGE_ALIGNED(va))
+ return NULL;
+
+ if (__pkvm_host_donate_hyp(hyp_virt_to_pfn(va),
+ PAGE_ALIGN(size) >> PAGE_SHIFT))
+ return NULL;
+
+ return va;
+}
+
+static void *map_donated_memory(unsigned long host_va, size_t size)
+{
+ void *va = map_donated_memory_noclear(host_va, size);
+
+ if (va)
+ memset(va, 0, size);
+
+ return va;
+}
+
+static void __unmap_donated_memory(void *va, size_t size)
+{
+ WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(va),
+ PAGE_ALIGN(size) >> PAGE_SHIFT));
+}
+
+static void unmap_donated_memory(void *va, size_t size)
+{
+ if (!va)
+ return;
+
+ memset(va, 0, size);
+ __unmap_donated_memory(va, size);
+}
+
+static void unmap_donated_memory_noclear(void *va, size_t size)
+{
+ if (!va)
+ return;
+
+ __unmap_donated_memory(va, size);
+}
+
+/*
+ * Initialize the hypervisor copy of the protected VM state using the
+ * memory donated by the host.
+ *
+ * Unmaps the donated memory from the host at stage 2.
+ *
+ * host_kvm: A pointer to the host's struct kvm.
+ * vm_hva: The host va of the area being donated for the VM state.
+ * Must be page aligned.
+ * pgd_hva: The host va of the area being donated for the stage-2 PGD for
+ * the VM. Must be page aligned. Its size is implied by the VM's
+ * VTCR.
+ *
+ * Return a unique handle to the protected VM on success,
+ * negative error code on failure.
+ */
+int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
+ unsigned long pgd_hva)
+{
+ struct pkvm_hyp_vm *hyp_vm = NULL;
+ size_t vm_size, pgd_size;
+ unsigned int nr_vcpus;
+ void *pgd = NULL;
+ int ret;
+
+ ret = hyp_pin_shared_mem(host_kvm, host_kvm + 1);
+ if (ret)
+ return ret;
+
+ nr_vcpus = READ_ONCE(host_kvm->created_vcpus);
+ if (nr_vcpus < 1) {
+ ret = -EINVAL;
+ goto err_unpin_kvm;
+ }
+
+ vm_size = pkvm_get_hyp_vm_size(nr_vcpus);
+ pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.vtcr);
+
+ ret = -ENOMEM;
+
+ hyp_vm = map_donated_memory(vm_hva, vm_size);
+ if (!hyp_vm)
+ goto err_remove_mappings;
+
+ pgd = map_donated_memory_noclear(pgd_hva, pgd_size);
+ if (!pgd)
+ goto err_remove_mappings;
+
+ init_pkvm_hyp_vm(host_kvm, hyp_vm, nr_vcpus);
+
+ hyp_spin_lock(&vm_table_lock);
+ ret = insert_vm_table_entry(host_kvm, hyp_vm);
+ if (ret < 0)
+ goto err_unlock;
+
+ ret = kvm_guest_prepare_stage2(hyp_vm, pgd);
+ if (ret)
+ goto err_remove_vm_table_entry;
+ hyp_spin_unlock(&vm_table_lock);
+
+ return hyp_vm->kvm.arch.pkvm.handle;
+
+err_remove_vm_table_entry:
+ remove_vm_table_entry(hyp_vm->kvm.arch.pkvm.handle);
+err_unlock:
+ hyp_spin_unlock(&vm_table_lock);
+err_remove_mappings:
+ unmap_donated_memory(hyp_vm, vm_size);
+ unmap_donated_memory(pgd, pgd_size);
+err_unpin_kvm:
+ hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
+ return ret;
+}
+
+/*
+ * Initialize the hypervisor copy of the protected vCPU state using the
+ * memory donated by the host.
+ *
+ * handle: The handle for the protected vm.
+ * host_vcpu: A pointer to the corresponding host vcpu.
+ * vcpu_hva: The host va of the area being donated for the vcpu state.
+ * Must be page aligned. The size of the area must be equal to
+ * the page-aligned size of 'struct pkvm_hyp_vcpu'.
+ * Return 0 on success, negative error code on failure.
+ */
+int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
+ unsigned long vcpu_hva)
+{
+ struct pkvm_hyp_vcpu *hyp_vcpu;
+ struct pkvm_hyp_vm *hyp_vm;
+ unsigned int idx;
+ int ret;
+
+ hyp_vcpu = map_donated_memory(vcpu_hva, sizeof(*hyp_vcpu));
+ if (!hyp_vcpu)
+ return -ENOMEM;
+
+ hyp_spin_lock(&vm_table_lock);
+
+ hyp_vm = get_vm_by_handle(handle);
+ if (!hyp_vm) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ idx = hyp_vm->nr_vcpus;
+ if (idx >= hyp_vm->kvm.created_vcpus) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu, idx);
+ if (ret)
+ goto unlock;
+
+ hyp_vm->vcpus[idx] = hyp_vcpu;
+ hyp_vm->nr_vcpus++;
+unlock:
+ hyp_spin_unlock(&vm_table_lock);
+
+ if (ret)
+ unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
+
+ return ret;
+}
+
+static void
+teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
+{
+ size = PAGE_ALIGN(size);
+ memset(addr, 0, size);
+
+ for (void *start = addr; start < addr + size; start += PAGE_SIZE)
+ push_hyp_memcache(mc, start, hyp_virt_to_phys);
+
+ unmap_donated_memory_noclear(addr, size);
+}
+
+int __pkvm_teardown_vm(pkvm_handle_t handle)
+{
+ struct kvm_hyp_memcache *mc;
+ struct pkvm_hyp_vm *hyp_vm;
+ struct kvm *host_kvm;
+ unsigned int idx;
+ size_t vm_size;
+ int err;
+
+ hyp_spin_lock(&vm_table_lock);
+ hyp_vm = get_vm_by_handle(handle);
+ if (!hyp_vm) {
+ err = -ENOENT;
+ goto err_unlock;
+ }
+
+ if (WARN_ON(hyp_page_count(hyp_vm))) {
+ err = -EBUSY;
+ goto err_unlock;
+ }
+
+ host_kvm = hyp_vm->host_kvm;
+
+ /* Ensure the VMID is clean before it can be reallocated */
+ __kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu);
+ remove_vm_table_entry(handle);
+ hyp_spin_unlock(&vm_table_lock);
+
+ /* Reclaim guest pages (including page-table pages) */
+ mc = &host_kvm->arch.pkvm.teardown_mc;
+ reclaim_guest_pages(hyp_vm, mc);
+ unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->nr_vcpus);
+
+ /* Push the metadata pages to the teardown memcache */
+ for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) {
+ struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
+
+ teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
+ }
+
+ vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus);
+ teardown_donated_memory(mc, hyp_vm, vm_size);
+ hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
+ return 0;
+
+err_unlock:
+ hyp_spin_unlock(&vm_table_lock);
+ return err;
+}
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index e8d4ea2fcfa0..110f04627785 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -16,6 +16,7 @@
#include <nvhe/memory.h>
#include <nvhe/mem_protect.h>
#include <nvhe/mm.h>
+#include <nvhe/pkvm.h>
#include <nvhe/trap_handler.h>
unsigned long hyp_nr_cpus;
@@ -24,6 +25,7 @@ unsigned long hyp_nr_cpus;
(unsigned long)__per_cpu_start)
static void *vmemmap_base;
+static void *vm_table_base;
static void *hyp_pgt_base;
static void *host_s2_pgt_base;
static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
@@ -31,16 +33,20 @@ static struct hyp_pool hpool;
static int divide_memory_pool(void *virt, unsigned long size)
{
- unsigned long vstart, vend, nr_pages;
+ unsigned long nr_pages;
hyp_early_alloc_init(virt, size);
- hyp_vmemmap_range(__hyp_pa(virt), size, &vstart, &vend);
- nr_pages = (vend - vstart) >> PAGE_SHIFT;
+ nr_pages = hyp_vmemmap_pages(sizeof(struct hyp_page));
vmemmap_base = hyp_early_alloc_contig(nr_pages);
if (!vmemmap_base)
return -ENOMEM;
+ nr_pages = hyp_vm_table_pages();
+ vm_table_base = hyp_early_alloc_contig(nr_pages);
+ if (!vm_table_base)
+ return -ENOMEM;
+
nr_pages = hyp_s1_pgtable_pages();
hyp_pgt_base = hyp_early_alloc_contig(nr_pages);
if (!hyp_pgt_base)
@@ -78,7 +84,7 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
if (ret)
return ret;
- ret = hyp_back_vmemmap(phys, size, hyp_virt_to_phys(vmemmap_base));
+ ret = hyp_back_vmemmap(hyp_virt_to_phys(vmemmap_base));
if (ret)
return ret;
@@ -138,20 +144,17 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
}
/*
- * Map the host's .bss and .rodata sections RO in the hypervisor, but
- * transfer the ownership from the host to the hypervisor itself to
- * make sure it can't be donated or shared with another entity.
+ * Map the host sections RO in the hypervisor, but transfer the
+ * ownership from the host to the hypervisor itself to make sure they
+ * can't be donated or shared with another entity.
*
* The ownership transition requires matching changes in the host
* stage-2. This will be done later (see finalize_host_mappings()) once
* the hyp_vmemmap is addressable.
*/
prot = pkvm_mkstate(PAGE_HYP_RO, PKVM_PAGE_SHARED_OWNED);
- ret = pkvm_create_mappings(__start_rodata, __end_rodata, prot);
- if (ret)
- return ret;
-
- ret = pkvm_create_mappings(__hyp_bss_end, __bss_stop, prot);
+ ret = pkvm_create_mappings(&kvm_vgic_global_state,
+ &kvm_vgic_global_state + 1, prot);
if (ret)
return ret;
@@ -186,33 +189,20 @@ static void hpool_put_page(void *addr)
hyp_put_page(&hpool, addr);
}
-static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level,
- kvm_pte_t *ptep,
- enum kvm_pgtable_walk_flags flag,
- void * const arg)
+static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
{
- struct kvm_pgtable_mm_ops *mm_ops = arg;
enum kvm_pgtable_prot prot;
enum pkvm_page_state state;
- kvm_pte_t pte = *ptep;
phys_addr_t phys;
- if (!kvm_pte_valid(pte))
+ if (!kvm_pte_valid(ctx->old))
return 0;
- /*
- * Fix-up the refcount for the page-table pages as the early allocator
- * was unable to access the hyp_vmemmap and so the buddy allocator has
- * initialised the refcount to '1'.
- */
- mm_ops->get_page(ptep);
- if (flag != KVM_PGTABLE_WALK_LEAF)
- return 0;
-
- if (level != (KVM_PGTABLE_MAX_LEVELS - 1))
+ if (ctx->level != (KVM_PGTABLE_MAX_LEVELS - 1))
return -EINVAL;
- phys = kvm_pte_to_phys(pte);
+ phys = kvm_pte_to_phys(ctx->old);
if (!addr_is_memory(phys))
return -EINVAL;
@@ -220,10 +210,10 @@ static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level,
* Adjust the host stage-2 mappings to match the ownership attributes
* configured in the hypervisor stage-1.
*/
- state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(pte));
+ state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(ctx->old));
switch (state) {
case PKVM_PAGE_OWNED:
- return host_stage2_set_owner_locked(phys, PAGE_SIZE, pkvm_hyp_id);
+ return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP);
case PKVM_PAGE_SHARED_OWNED:
prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_BORROWED);
break;
@@ -237,12 +227,25 @@ static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level,
return host_stage2_idmap_locked(phys, PAGE_SIZE, prot);
}
-static int finalize_host_mappings(void)
+static int fix_hyp_pgtable_refcnt_walker(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
+{
+ /*
+ * Fix-up the refcount for the page-table pages as the early allocator
+ * was unable to access the hyp_vmemmap and so the buddy allocator has
+ * initialised the refcount to '1'.
+ */
+ if (kvm_pte_valid(ctx->old))
+ ctx->mm_ops->get_page(ctx->ptep);
+
+ return 0;
+}
+
+static int fix_host_ownership(void)
{
struct kvm_pgtable_walker walker = {
- .cb = finalize_host_mappings_walker,
- .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
- .arg = pkvm_pgtable.mm_ops,
+ .cb = fix_host_ownership_walker,
+ .flags = KVM_PGTABLE_WALK_LEAF,
};
int i, ret;
@@ -258,6 +261,18 @@ static int finalize_host_mappings(void)
return 0;
}
+static int fix_hyp_pgtable_refcnt(void)
+{
+ struct kvm_pgtable_walker walker = {
+ .cb = fix_hyp_pgtable_refcnt_walker,
+ .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
+ .arg = pkvm_pgtable.mm_ops,
+ };
+
+ return kvm_pgtable_walk(&pkvm_pgtable, 0, BIT(pkvm_pgtable.ia_bits),
+ &walker);
+}
+
void __noreturn __pkvm_init_finalise(void)
{
struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data);
@@ -287,10 +302,19 @@ void __noreturn __pkvm_init_finalise(void)
};
pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
- ret = finalize_host_mappings();
+ ret = fix_host_ownership();
+ if (ret)
+ goto out;
+
+ ret = fix_hyp_pgtable_refcnt();
+ if (ret)
+ goto out;
+
+ ret = hyp_create_pcpu_fixmap();
if (ret)
goto out;
+ pkvm_hyp_vm_table_init(vm_table_base);
out:
/*
* We tail-called to here from handle___pkvm_init() and will not return,
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index cdf8e76b0be1..b11cf2c618a6 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -49,35 +49,38 @@
#define KVM_INVALID_PTE_OWNER_MASK GENMASK(9, 2)
#define KVM_MAX_OWNER_ID 1
+/*
+ * Used to indicate a pte for which a 'break-before-make' sequence is in
+ * progress.
+ */
+#define KVM_INVALID_PTE_LOCKED BIT(10)
+
struct kvm_pgtable_walk_data {
- struct kvm_pgtable *pgt;
struct kvm_pgtable_walker *walker;
u64 addr;
u64 end;
};
-#define KVM_PHYS_INVALID (-1ULL)
-
static bool kvm_phys_is_valid(u64 phys)
{
return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_EL1_PARANGE_MAX));
}
-static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level)
+static bool kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx *ctx, u64 phys)
{
- u64 granule = kvm_granule_size(level);
+ u64 granule = kvm_granule_size(ctx->level);
- if (!kvm_level_supports_block_mapping(level))
+ if (!kvm_level_supports_block_mapping(ctx->level))
return false;
- if (granule > (end - addr))
+ if (granule > (ctx->end - ctx->addr))
return false;
if (kvm_phys_is_valid(phys) && !IS_ALIGNED(phys, granule))
return false;
- return IS_ALIGNED(addr, granule);
+ return IS_ALIGNED(ctx->addr, granule);
}
static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, u32 level)
@@ -88,7 +91,7 @@ static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, u32 level)
return (data->addr >> shift) & mask;
}
-static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
+static u32 kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
{
u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */
u64 mask = BIT(pgt->ia_bits) - 1;
@@ -96,11 +99,6 @@ static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
return (addr & mask) >> shift;
}
-static u32 kvm_pgd_page_idx(struct kvm_pgtable_walk_data *data)
-{
- return __kvm_pgd_page_idx(data->pgt, data->addr);
-}
-
static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
{
struct kvm_pgtable pgt = {
@@ -108,7 +106,7 @@ static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
.start_level = start_level,
};
- return __kvm_pgd_page_idx(&pgt, -1ULL) + 1;
+ return kvm_pgd_page_idx(&pgt, -1ULL) + 1;
}
static bool kvm_pte_table(kvm_pte_t pte, u32 level)
@@ -122,16 +120,6 @@ static bool kvm_pte_table(kvm_pte_t pte, u32 level)
return FIELD_GET(KVM_PTE_TYPE, pte) == KVM_PTE_TYPE_TABLE;
}
-static kvm_pte_t kvm_phys_to_pte(u64 pa)
-{
- kvm_pte_t pte = pa & KVM_PTE_ADDR_MASK;
-
- if (PAGE_SHIFT == 16)
- pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48);
-
- return pte;
-}
-
static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
{
return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
@@ -142,16 +130,13 @@ static void kvm_clear_pte(kvm_pte_t *ptep)
WRITE_ONCE(*ptep, 0);
}
-static void kvm_set_table_pte(kvm_pte_t *ptep, kvm_pte_t *childp,
- struct kvm_pgtable_mm_ops *mm_ops)
+static kvm_pte_t kvm_init_table_pte(kvm_pte_t *childp, struct kvm_pgtable_mm_ops *mm_ops)
{
- kvm_pte_t old = *ptep, pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp));
+ kvm_pte_t pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp));
pte |= FIELD_PREP(KVM_PTE_TYPE, KVM_PTE_TYPE_TABLE);
pte |= KVM_PTE_VALID;
-
- WARN_ON(kvm_pte_valid(old));
- smp_store_release(ptep, pte);
+ return pte;
}
static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, u32 level)
@@ -172,36 +157,47 @@ static kvm_pte_t kvm_init_invalid_leaf_owner(u8 owner_id)
return FIELD_PREP(KVM_INVALID_PTE_OWNER_MASK, owner_id);
}
-static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data, u64 addr,
- u32 level, kvm_pte_t *ptep,
- enum kvm_pgtable_walk_flags flag)
+static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data,
+ const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
{
struct kvm_pgtable_walker *walker = data->walker;
- return walker->cb(addr, data->end, level, ptep, flag, walker->arg);
+
+ /* Ensure the appropriate lock is held (e.g. RCU lock for stage-2 MMU) */
+ WARN_ON_ONCE(kvm_pgtable_walk_shared(ctx) && !kvm_pgtable_walk_lock_held());
+ return walker->cb(ctx, visit);
}
static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
- kvm_pte_t *pgtable, u32 level);
+ struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, u32 level);
static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
- kvm_pte_t *ptep, u32 level)
+ struct kvm_pgtable_mm_ops *mm_ops,
+ kvm_pteref_t pteref, u32 level)
{
- int ret = 0;
- u64 addr = data->addr;
- kvm_pte_t *childp, pte = *ptep;
- bool table = kvm_pte_table(pte, level);
enum kvm_pgtable_walk_flags flags = data->walker->flags;
+ kvm_pte_t *ptep = kvm_dereference_pteref(data->walker, pteref);
+ struct kvm_pgtable_visit_ctx ctx = {
+ .ptep = ptep,
+ .old = READ_ONCE(*ptep),
+ .arg = data->walker->arg,
+ .mm_ops = mm_ops,
+ .addr = data->addr,
+ .end = data->end,
+ .level = level,
+ .flags = flags,
+ };
+ int ret = 0;
+ kvm_pteref_t childp;
+ bool table = kvm_pte_table(ctx.old, level);
- if (table && (flags & KVM_PGTABLE_WALK_TABLE_PRE)) {
- ret = kvm_pgtable_visitor_cb(data, addr, level, ptep,
- KVM_PGTABLE_WALK_TABLE_PRE);
- }
+ if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE))
+ ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_PRE);
- if (!table && (flags & KVM_PGTABLE_WALK_LEAF)) {
- ret = kvm_pgtable_visitor_cb(data, addr, level, ptep,
- KVM_PGTABLE_WALK_LEAF);
- pte = *ptep;
- table = kvm_pte_table(pte, level);
+ if (!table && (ctx.flags & KVM_PGTABLE_WALK_LEAF)) {
+ ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_LEAF);
+ ctx.old = READ_ONCE(*ptep);
+ table = kvm_pte_table(ctx.old, level);
}
if (ret)
@@ -213,22 +209,20 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
goto out;
}
- childp = kvm_pte_follow(pte, data->pgt->mm_ops);
- ret = __kvm_pgtable_walk(data, childp, level + 1);
+ childp = (kvm_pteref_t)kvm_pte_follow(ctx.old, mm_ops);
+ ret = __kvm_pgtable_walk(data, mm_ops, childp, level + 1);
if (ret)
goto out;
- if (flags & KVM_PGTABLE_WALK_TABLE_POST) {
- ret = kvm_pgtable_visitor_cb(data, addr, level, ptep,
- KVM_PGTABLE_WALK_TABLE_POST);
- }
+ if (ctx.flags & KVM_PGTABLE_WALK_TABLE_POST)
+ ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_POST);
out:
return ret;
}
static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
- kvm_pte_t *pgtable, u32 level)
+ struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, u32 level)
{
u32 idx;
int ret = 0;
@@ -237,12 +231,12 @@ static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
return -EINVAL;
for (idx = kvm_pgtable_idx(data, level); idx < PTRS_PER_PTE; ++idx) {
- kvm_pte_t *ptep = &pgtable[idx];
+ kvm_pteref_t pteref = &pgtable[idx];
if (data->addr >= data->end)
break;
- ret = __kvm_pgtable_visit(data, ptep, level);
+ ret = __kvm_pgtable_visit(data, mm_ops, pteref, level);
if (ret)
break;
}
@@ -250,11 +244,10 @@ static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
return ret;
}
-static int _kvm_pgtable_walk(struct kvm_pgtable_walk_data *data)
+static int _kvm_pgtable_walk(struct kvm_pgtable *pgt, struct kvm_pgtable_walk_data *data)
{
u32 idx;
int ret = 0;
- struct kvm_pgtable *pgt = data->pgt;
u64 limit = BIT(pgt->ia_bits);
if (data->addr > limit || data->end > limit)
@@ -263,10 +256,10 @@ static int _kvm_pgtable_walk(struct kvm_pgtable_walk_data *data)
if (!pgt->pgd)
return -EINVAL;
- for (idx = kvm_pgd_page_idx(data); data->addr < data->end; ++idx) {
- kvm_pte_t *ptep = &pgt->pgd[idx * PTRS_PER_PTE];
+ for (idx = kvm_pgd_page_idx(pgt, data->addr); data->addr < data->end; ++idx) {
+ kvm_pteref_t pteref = &pgt->pgd[idx * PTRS_PER_PTE];
- ret = __kvm_pgtable_walk(data, ptep, pgt->start_level);
+ ret = __kvm_pgtable_walk(data, pgt->mm_ops, pteref, pgt->start_level);
if (ret)
break;
}
@@ -278,13 +271,20 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
struct kvm_pgtable_walker *walker)
{
struct kvm_pgtable_walk_data walk_data = {
- .pgt = pgt,
.addr = ALIGN_DOWN(addr, PAGE_SIZE),
.end = PAGE_ALIGN(walk_data.addr + size),
.walker = walker,
};
+ int r;
- return _kvm_pgtable_walk(&walk_data);
+ r = kvm_pgtable_walk_begin(walker);
+ if (r)
+ return r;
+
+ r = _kvm_pgtable_walk(pgt, &walk_data);
+ kvm_pgtable_walk_end(walker);
+
+ return r;
}
struct leaf_walk_data {
@@ -292,13 +292,13 @@ struct leaf_walk_data {
u32 level;
};
-static int leaf_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
- enum kvm_pgtable_walk_flags flag, void * const arg)
+static int leaf_walker(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
{
- struct leaf_walk_data *data = arg;
+ struct leaf_walk_data *data = ctx->arg;
- data->pte = *ptep;
- data->level = level;
+ data->pte = ctx->old;
+ data->level = ctx->level;
return 0;
}
@@ -329,7 +329,6 @@ int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
struct hyp_map_data {
u64 phys;
kvm_pte_t attr;
- struct kvm_pgtable_mm_ops *mm_ops;
};
static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
@@ -383,47 +382,49 @@ enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte)
return prot;
}
-static bool hyp_map_walker_try_leaf(u64 addr, u64 end, u32 level,
- kvm_pte_t *ptep, struct hyp_map_data *data)
+static bool hyp_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
+ struct hyp_map_data *data)
{
- kvm_pte_t new, old = *ptep;
- u64 granule = kvm_granule_size(level), phys = data->phys;
+ kvm_pte_t new;
+ u64 granule = kvm_granule_size(ctx->level), phys = data->phys;
- if (!kvm_block_mapping_supported(addr, end, phys, level))
+ if (!kvm_block_mapping_supported(ctx, phys))
return false;
data->phys += granule;
- new = kvm_init_valid_leaf_pte(phys, data->attr, level);
- if (old == new)
+ new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
+ if (ctx->old == new)
return true;
- if (!kvm_pte_valid(old))
- data->mm_ops->get_page(ptep);
- else if (WARN_ON((old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW))
+ if (!kvm_pte_valid(ctx->old))
+ ctx->mm_ops->get_page(ctx->ptep);
+ else if (WARN_ON((ctx->old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW))
return false;
- smp_store_release(ptep, new);
+ smp_store_release(ctx->ptep, new);
return true;
}
-static int hyp_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
- enum kvm_pgtable_walk_flags flag, void * const arg)
+static int hyp_map_walker(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
{
- kvm_pte_t *childp;
- struct hyp_map_data *data = arg;
- struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
+ kvm_pte_t *childp, new;
+ struct hyp_map_data *data = ctx->arg;
+ struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
- if (hyp_map_walker_try_leaf(addr, end, level, ptep, arg))
+ if (hyp_map_walker_try_leaf(ctx, data))
return 0;
- if (WARN_ON(level == KVM_PGTABLE_MAX_LEVELS - 1))
+ if (WARN_ON(ctx->level == KVM_PGTABLE_MAX_LEVELS - 1))
return -EINVAL;
childp = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
if (!childp)
return -ENOMEM;
- kvm_set_table_pte(ptep, childp, mm_ops);
- mm_ops->get_page(ptep);
+ new = kvm_init_table_pte(childp, mm_ops);
+ mm_ops->get_page(ctx->ptep);
+ smp_store_release(ctx->ptep, new);
+
return 0;
}
@@ -433,7 +434,6 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
int ret;
struct hyp_map_data map_data = {
.phys = ALIGN_DOWN(phys, PAGE_SIZE),
- .mm_ops = pgt->mm_ops,
};
struct kvm_pgtable_walker walker = {
.cb = hyp_map_walker,
@@ -451,44 +451,39 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
return ret;
}
-struct hyp_unmap_data {
- u64 unmapped;
- struct kvm_pgtable_mm_ops *mm_ops;
-};
-
-static int hyp_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
- enum kvm_pgtable_walk_flags flag, void * const arg)
+static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
{
- kvm_pte_t pte = *ptep, *childp = NULL;
- u64 granule = kvm_granule_size(level);
- struct hyp_unmap_data *data = arg;
- struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
+ kvm_pte_t *childp = NULL;
+ u64 granule = kvm_granule_size(ctx->level);
+ u64 *unmapped = ctx->arg;
+ struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
- if (!kvm_pte_valid(pte))
+ if (!kvm_pte_valid(ctx->old))
return -EINVAL;
- if (kvm_pte_table(pte, level)) {
- childp = kvm_pte_follow(pte, mm_ops);
+ if (kvm_pte_table(ctx->old, ctx->level)) {
+ childp = kvm_pte_follow(ctx->old, mm_ops);
if (mm_ops->page_count(childp) != 1)
return 0;
- kvm_clear_pte(ptep);
+ kvm_clear_pte(ctx->ptep);
dsb(ishst);
- __tlbi_level(vae2is, __TLBI_VADDR(addr, 0), level);
+ __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
} else {
- if (end - addr < granule)
+ if (ctx->end - ctx->addr < granule)
return -EINVAL;
- kvm_clear_pte(ptep);
+ kvm_clear_pte(ctx->ptep);
dsb(ishst);
- __tlbi_level(vale2is, __TLBI_VADDR(addr, 0), level);
- data->unmapped += granule;
+ __tlbi_level(vale2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
+ *unmapped += granule;
}
dsb(ish);
isb();
- mm_ops->put_page(ptep);
+ mm_ops->put_page(ctx->ptep);
if (childp)
mm_ops->put_page(childp);
@@ -498,12 +493,10 @@ static int hyp_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
{
- struct hyp_unmap_data unmap_data = {
- .mm_ops = pgt->mm_ops,
- };
+ u64 unmapped = 0;
struct kvm_pgtable_walker walker = {
.cb = hyp_unmap_walker,
- .arg = &unmap_data,
+ .arg = &unmapped,
.flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
};
@@ -511,7 +504,7 @@ u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
return 0;
kvm_pgtable_walk(pgt, addr, size, &walker);
- return unmap_data.unmapped;
+ return unmapped;
}
int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
@@ -519,7 +512,7 @@ int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
{
u64 levels = ARM64_HW_PGTABLE_LEVELS(va_bits);
- pgt->pgd = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
+ pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_page(NULL);
if (!pgt->pgd)
return -ENOMEM;
@@ -532,19 +525,18 @@ int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
return 0;
}
-static int hyp_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
- enum kvm_pgtable_walk_flags flag, void * const arg)
+static int hyp_free_walker(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
{
- struct kvm_pgtable_mm_ops *mm_ops = arg;
- kvm_pte_t pte = *ptep;
+ struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
- if (!kvm_pte_valid(pte))
+ if (!kvm_pte_valid(ctx->old))
return 0;
- mm_ops->put_page(ptep);
+ mm_ops->put_page(ctx->ptep);
- if (kvm_pte_table(pte, level))
- mm_ops->put_page(kvm_pte_follow(pte, mm_ops));
+ if (kvm_pte_table(ctx->old, ctx->level))
+ mm_ops->put_page(kvm_pte_follow(ctx->old, mm_ops));
return 0;
}
@@ -554,11 +546,10 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
struct kvm_pgtable_walker walker = {
.cb = hyp_free_walker,
.flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
- .arg = pgt->mm_ops,
};
WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
- pgt->mm_ops->put_page(pgt->pgd);
+ pgt->mm_ops->put_page(kvm_dereference_pteref(&walker, pgt->pgd));
pgt->pgd = NULL;
}
@@ -573,8 +564,6 @@ struct stage2_map_data {
struct kvm_s2_mmu *mmu;
void *memcache;
- struct kvm_pgtable_mm_ops *mm_ops;
-
/* Force mappings to page granularity */
bool force_pte;
};
@@ -682,19 +671,92 @@ static bool stage2_pte_is_counted(kvm_pte_t pte)
return !!pte;
}
-static void stage2_put_pte(kvm_pte_t *ptep, struct kvm_s2_mmu *mmu, u64 addr,
- u32 level, struct kvm_pgtable_mm_ops *mm_ops)
+static bool stage2_pte_is_locked(kvm_pte_t pte)
+{
+ return !kvm_pte_valid(pte) && (pte & KVM_INVALID_PTE_LOCKED);
+}
+
+static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
+{
+ if (!kvm_pgtable_walk_shared(ctx)) {
+ WRITE_ONCE(*ctx->ptep, new);
+ return true;
+ }
+
+ return cmpxchg(ctx->ptep, ctx->old, new) == ctx->old;
+}
+
+/**
+ * stage2_try_break_pte() - Invalidates a pte according to the
+ * 'break-before-make' requirements of the
+ * architecture.
+ *
+ * @ctx: context of the visited pte.
+ * @mmu: stage-2 mmu
+ *
+ * Returns: true if the pte was successfully broken.
+ *
+ * If the removed pte was valid, performs the necessary serialization and TLB
+ * invalidation for the old value. For counted ptes, drops the reference count
+ * on the containing table page.
+ */
+static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
+ struct kvm_s2_mmu *mmu)
+{
+ struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
+
+ if (stage2_pte_is_locked(ctx->old)) {
+ /*
+ * Should never occur if this walker has exclusive access to the
+ * page tables.
+ */
+ WARN_ON(!kvm_pgtable_walk_shared(ctx));
+ return false;
+ }
+
+ if (!stage2_try_set_pte(ctx, KVM_INVALID_PTE_LOCKED))
+ return false;
+
+ /*
+ * Perform the appropriate TLB invalidation based on the evicted pte
+ * value (if any).
+ */
+ if (kvm_pte_table(ctx->old, ctx->level))
+ kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
+ else if (kvm_pte_valid(ctx->old))
+ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ctx->level);
+
+ if (stage2_pte_is_counted(ctx->old))
+ mm_ops->put_page(ctx->ptep);
+
+ return true;
+}
+
+static void stage2_make_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
+{
+ struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
+
+ WARN_ON(!stage2_pte_is_locked(*ctx->ptep));
+
+ if (stage2_pte_is_counted(new))
+ mm_ops->get_page(ctx->ptep);
+
+ smp_store_release(ctx->ptep, new);
+}
+
+static void stage2_put_pte(const struct kvm_pgtable_visit_ctx *ctx, struct kvm_s2_mmu *mmu,
+ struct kvm_pgtable_mm_ops *mm_ops)
{
/*
* Clear the existing PTE, and perform break-before-make with
* TLB maintenance if it was valid.
*/
- if (kvm_pte_valid(*ptep)) {
- kvm_clear_pte(ptep);
- kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, addr, level);
+ if (kvm_pte_valid(ctx->old)) {
+ kvm_clear_pte(ctx->ptep);
+ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ctx->level);
}
- mm_ops->put_page(ptep);
+ mm_ops->put_page(ctx->ptep);
}
static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
@@ -708,44 +770,42 @@ static bool stage2_pte_executable(kvm_pte_t pte)
return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
}
-static bool stage2_leaf_mapping_allowed(u64 addr, u64 end, u32 level,
+static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx,
struct stage2_map_data *data)
{
- if (data->force_pte && (level < (KVM_PGTABLE_MAX_LEVELS - 1)))
+ if (data->force_pte && (ctx->level < (KVM_PGTABLE_MAX_LEVELS - 1)))
return false;
- return kvm_block_mapping_supported(addr, end, data->phys, level);
+ return kvm_block_mapping_supported(ctx, data->phys);
}
-static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
- kvm_pte_t *ptep,
+static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
struct stage2_map_data *data)
{
- kvm_pte_t new, old = *ptep;
- u64 granule = kvm_granule_size(level), phys = data->phys;
+ kvm_pte_t new;
+ u64 granule = kvm_granule_size(ctx->level), phys = data->phys;
struct kvm_pgtable *pgt = data->mmu->pgt;
- struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
+ struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
- if (!stage2_leaf_mapping_allowed(addr, end, level, data))
+ if (!stage2_leaf_mapping_allowed(ctx, data))
return -E2BIG;
if (kvm_phys_is_valid(phys))
- new = kvm_init_valid_leaf_pte(phys, data->attr, level);
+ new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
else
new = kvm_init_invalid_leaf_owner(data->owner_id);
- if (stage2_pte_is_counted(old)) {
- /*
- * Skip updating the PTE if we are trying to recreate the exact
- * same mapping or only change the access permissions. Instead,
- * the vCPU will exit one more time from guest if still needed
- * and then go through the path of relaxing permissions.
- */
- if (!stage2_pte_needs_update(old, new))
- return -EAGAIN;
+ /*
+ * Skip updating the PTE if we are trying to recreate the exact
+ * same mapping or only change the access permissions. Instead,
+ * the vCPU will exit one more time from guest if still needed
+ * and then go through the path of relaxing permissions.
+ */
+ if (!stage2_pte_needs_update(ctx->old, new))
+ return -EAGAIN;
- stage2_put_pte(ptep, data->mmu, addr, level, mm_ops);
- }
+ if (!stage2_try_break_pte(ctx, data->mmu))
+ return -EAGAIN;
/* Perform CMOs before installation of the guest stage-2 PTE */
if (mm_ops->dcache_clean_inval_poc && stage2_pte_cacheable(pgt, new))
@@ -755,56 +815,43 @@ static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
if (mm_ops->icache_inval_pou && stage2_pte_executable(new))
mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule);
- smp_store_release(ptep, new);
- if (stage2_pte_is_counted(new))
- mm_ops->get_page(ptep);
+ stage2_make_pte(ctx, new);
+
if (kvm_phys_is_valid(phys))
data->phys += granule;
return 0;
}
-static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level,
- kvm_pte_t *ptep,
+static int stage2_map_walk_table_pre(const struct kvm_pgtable_visit_ctx *ctx,
struct stage2_map_data *data)
{
- if (data->anchor)
- return 0;
+ struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
+ kvm_pte_t *childp = kvm_pte_follow(ctx->old, mm_ops);
+ int ret;
- if (!stage2_leaf_mapping_allowed(addr, end, level, data))
+ if (!stage2_leaf_mapping_allowed(ctx, data))
return 0;
- data->childp = kvm_pte_follow(*ptep, data->mm_ops);
- kvm_clear_pte(ptep);
+ ret = stage2_map_walker_try_leaf(ctx, data);
+ if (ret)
+ return ret;
- /*
- * Invalidate the whole stage-2, as we may have numerous leaf
- * entries below us which would otherwise need invalidating
- * individually.
- */
- kvm_call_hyp(__kvm_tlb_flush_vmid, data->mmu);
- data->anchor = ptep;
+ mm_ops->free_removed_table(childp, ctx->level);
return 0;
}
-static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
+static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx,
struct stage2_map_data *data)
{
- struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
- kvm_pte_t *childp, pte = *ptep;
+ struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
+ kvm_pte_t *childp, new;
int ret;
- if (data->anchor) {
- if (stage2_pte_is_counted(pte))
- mm_ops->put_page(ptep);
-
- return 0;
- }
-
- ret = stage2_map_walker_try_leaf(addr, end, level, ptep, data);
+ ret = stage2_map_walker_try_leaf(ctx, data);
if (ret != -E2BIG)
return ret;
- if (WARN_ON(level == KVM_PGTABLE_MAX_LEVELS - 1))
+ if (WARN_ON(ctx->level == KVM_PGTABLE_MAX_LEVELS - 1))
return -EINVAL;
if (!data->memcache)
@@ -814,99 +861,62 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
if (!childp)
return -ENOMEM;
+ if (!stage2_try_break_pte(ctx, data->mmu)) {
+ mm_ops->put_page(childp);
+ return -EAGAIN;
+ }
+
/*
* If we've run into an existing block mapping then replace it with
* a table. Accesses beyond 'end' that fall within the new table
* will be mapped lazily.
*/
- if (stage2_pte_is_counted(pte))
- stage2_put_pte(ptep, data->mmu, addr, level, mm_ops);
-
- kvm_set_table_pte(ptep, childp, mm_ops);
- mm_ops->get_page(ptep);
+ new = kvm_init_table_pte(childp, mm_ops);
+ stage2_make_pte(ctx, new);
return 0;
}
-static int stage2_map_walk_table_post(u64 addr, u64 end, u32 level,
- kvm_pte_t *ptep,
- struct stage2_map_data *data)
-{
- struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
- kvm_pte_t *childp;
- int ret = 0;
-
- if (!data->anchor)
- return 0;
-
- if (data->anchor == ptep) {
- childp = data->childp;
- data->anchor = NULL;
- data->childp = NULL;
- ret = stage2_map_walk_leaf(addr, end, level, ptep, data);
- } else {
- childp = kvm_pte_follow(*ptep, mm_ops);
- }
-
- mm_ops->put_page(childp);
- mm_ops->put_page(ptep);
-
- return ret;
-}
-
/*
- * This is a little fiddly, as we use all three of the walk flags. The idea
- * is that the TABLE_PRE callback runs for table entries on the way down,
- * looking for table entries which we could conceivably replace with a
- * block entry for this mapping. If it finds one, then it sets the 'anchor'
- * field in 'struct stage2_map_data' to point at the table entry, before
- * clearing the entry to zero and descending into the now detached table.
+ * The TABLE_PRE callback runs for table entries on the way down, looking
+ * for table entries which we could conceivably replace with a block entry
+ * for this mapping. If it finds one it replaces the entry and calls
+ * kvm_pgtable_mm_ops::free_removed_table() to tear down the detached table.
*
- * The behaviour of the LEAF callback then depends on whether or not the
- * anchor has been set. If not, then we're not using a block mapping higher
- * up the table and we perform the mapping at the existing leaves instead.
- * If, on the other hand, the anchor _is_ set, then we drop references to
- * all valid leaves so that the pages beneath the anchor can be freed.
- *
- * Finally, the TABLE_POST callback does nothing if the anchor has not
- * been set, but otherwise frees the page-table pages while walking back up
- * the page-table, installing the block entry when it revisits the anchor
- * pointer and clearing the anchor to NULL.
+ * Otherwise, the LEAF callback performs the mapping at the existing leaves
+ * instead.
*/
-static int stage2_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
- enum kvm_pgtable_walk_flags flag, void * const arg)
+static int stage2_map_walker(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
{
- struct stage2_map_data *data = arg;
+ struct stage2_map_data *data = ctx->arg;
- switch (flag) {
+ switch (visit) {
case KVM_PGTABLE_WALK_TABLE_PRE:
- return stage2_map_walk_table_pre(addr, end, level, ptep, data);
+ return stage2_map_walk_table_pre(ctx, data);
case KVM_PGTABLE_WALK_LEAF:
- return stage2_map_walk_leaf(addr, end, level, ptep, data);
- case KVM_PGTABLE_WALK_TABLE_POST:
- return stage2_map_walk_table_post(addr, end, level, ptep, data);
+ return stage2_map_walk_leaf(ctx, data);
+ default:
+ return -EINVAL;
}
-
- return -EINVAL;
}
int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
u64 phys, enum kvm_pgtable_prot prot,
- void *mc)
+ void *mc, enum kvm_pgtable_walk_flags flags)
{
int ret;
struct stage2_map_data map_data = {
.phys = ALIGN_DOWN(phys, PAGE_SIZE),
.mmu = pgt->mmu,
.memcache = mc,
- .mm_ops = pgt->mm_ops,
.force_pte = pgt->force_pte_cb && pgt->force_pte_cb(addr, addr + size, prot),
};
struct kvm_pgtable_walker walker = {
.cb = stage2_map_walker,
- .flags = KVM_PGTABLE_WALK_TABLE_PRE |
- KVM_PGTABLE_WALK_LEAF |
- KVM_PGTABLE_WALK_TABLE_POST,
+ .flags = flags |
+ KVM_PGTABLE_WALK_TABLE_PRE |
+ KVM_PGTABLE_WALK_LEAF,
.arg = &map_data,
};
@@ -930,15 +940,13 @@ int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
.phys = KVM_PHYS_INVALID,
.mmu = pgt->mmu,
.memcache = mc,
- .mm_ops = pgt->mm_ops,
.owner_id = owner_id,
.force_pte = true,
};
struct kvm_pgtable_walker walker = {
.cb = stage2_map_walker,
.flags = KVM_PGTABLE_WALK_TABLE_PRE |
- KVM_PGTABLE_WALK_LEAF |
- KVM_PGTABLE_WALK_TABLE_POST,
+ KVM_PGTABLE_WALK_LEAF,
.arg = &map_data,
};
@@ -949,30 +957,29 @@ int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
return ret;
}
-static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
- enum kvm_pgtable_walk_flags flag,
- void * const arg)
+static int stage2_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
{
- struct kvm_pgtable *pgt = arg;
+ struct kvm_pgtable *pgt = ctx->arg;
struct kvm_s2_mmu *mmu = pgt->mmu;
- struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
- kvm_pte_t pte = *ptep, *childp = NULL;
+ struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
+ kvm_pte_t *childp = NULL;
bool need_flush = false;
- if (!kvm_pte_valid(pte)) {
- if (stage2_pte_is_counted(pte)) {
- kvm_clear_pte(ptep);
- mm_ops->put_page(ptep);
+ if (!kvm_pte_valid(ctx->old)) {
+ if (stage2_pte_is_counted(ctx->old)) {
+ kvm_clear_pte(ctx->ptep);
+ mm_ops->put_page(ctx->ptep);
}
return 0;
}
- if (kvm_pte_table(pte, level)) {
- childp = kvm_pte_follow(pte, mm_ops);
+ if (kvm_pte_table(ctx->old, ctx->level)) {
+ childp = kvm_pte_follow(ctx->old, mm_ops);
if (mm_ops->page_count(childp) != 1)
return 0;
- } else if (stage2_pte_cacheable(pgt, pte)) {
+ } else if (stage2_pte_cacheable(pgt, ctx->old)) {
need_flush = !stage2_has_fwb(pgt);
}
@@ -981,11 +988,11 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
* block entry and rely on the remaining portions being faulted
* back lazily.
*/
- stage2_put_pte(ptep, mmu, addr, level, mm_ops);
+ stage2_put_pte(ctx, mmu, mm_ops);
if (need_flush && mm_ops->dcache_clean_inval_poc)
- mm_ops->dcache_clean_inval_poc(kvm_pte_follow(pte, mm_ops),
- kvm_granule_size(level));
+ mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops),
+ kvm_granule_size(ctx->level));
if (childp)
mm_ops->put_page(childp);
@@ -1009,21 +1016,19 @@ struct stage2_attr_data {
kvm_pte_t attr_clr;
kvm_pte_t pte;
u32 level;
- struct kvm_pgtable_mm_ops *mm_ops;
};
-static int stage2_attr_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
- enum kvm_pgtable_walk_flags flag,
- void * const arg)
+static int stage2_attr_walker(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
{
- kvm_pte_t pte = *ptep;
- struct stage2_attr_data *data = arg;
- struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
+ kvm_pte_t pte = ctx->old;
+ struct stage2_attr_data *data = ctx->arg;
+ struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
- if (!kvm_pte_valid(pte))
+ if (!kvm_pte_valid(ctx->old))
return 0;
- data->level = level;
+ data->level = ctx->level;
data->pte = pte;
pte &= ~data->attr_clr;
pte |= data->attr_set;
@@ -1039,10 +1044,12 @@ static int stage2_attr_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
* stage-2 PTE if we are going to add executable permission.
*/
if (mm_ops->icache_inval_pou &&
- stage2_pte_executable(pte) && !stage2_pte_executable(*ptep))
+ stage2_pte_executable(pte) && !stage2_pte_executable(ctx->old))
mm_ops->icache_inval_pou(kvm_pte_follow(pte, mm_ops),
- kvm_granule_size(level));
- WRITE_ONCE(*ptep, pte);
+ kvm_granule_size(ctx->level));
+
+ if (!stage2_try_set_pte(ctx, pte))
+ return -EAGAIN;
}
return 0;
@@ -1051,19 +1058,18 @@ static int stage2_attr_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr,
u64 size, kvm_pte_t attr_set,
kvm_pte_t attr_clr, kvm_pte_t *orig_pte,
- u32 *level)
+ u32 *level, enum kvm_pgtable_walk_flags flags)
{
int ret;
kvm_pte_t attr_mask = KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI;
struct stage2_attr_data data = {
.attr_set = attr_set & attr_mask,
.attr_clr = attr_clr & attr_mask,
- .mm_ops = pgt->mm_ops,
};
struct kvm_pgtable_walker walker = {
.cb = stage2_attr_walker,
.arg = &data,
- .flags = KVM_PGTABLE_WALK_LEAF,
+ .flags = flags | KVM_PGTABLE_WALK_LEAF,
};
ret = kvm_pgtable_walk(pgt, addr, size, &walker);
@@ -1082,14 +1088,14 @@ int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
{
return stage2_update_leaf_attrs(pgt, addr, size, 0,
KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W,
- NULL, NULL);
+ NULL, NULL, 0);
}
kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr)
{
kvm_pte_t pte = 0;
stage2_update_leaf_attrs(pgt, addr, 1, KVM_PTE_LEAF_ATTR_LO_S2_AF, 0,
- &pte, NULL);
+ &pte, NULL, 0);
dsb(ishst);
return pte;
}
@@ -1098,7 +1104,7 @@ kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr)
{
kvm_pte_t pte = 0;
stage2_update_leaf_attrs(pgt, addr, 1, 0, KVM_PTE_LEAF_ATTR_LO_S2_AF,
- &pte, NULL);
+ &pte, NULL, 0);
/*
* "But where's the TLBI?!", you scream.
* "Over in the core code", I sigh.
@@ -1111,7 +1117,7 @@ kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr)
bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr)
{
kvm_pte_t pte = 0;
- stage2_update_leaf_attrs(pgt, addr, 1, 0, 0, &pte, NULL);
+ stage2_update_leaf_attrs(pgt, addr, 1, 0, 0, &pte, NULL, 0);
return pte & KVM_PTE_LEAF_ATTR_LO_S2_AF;
}
@@ -1134,26 +1140,25 @@ int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
if (prot & KVM_PGTABLE_PROT_X)
clr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
- ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level);
+ ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level,
+ KVM_PGTABLE_WALK_SHARED);
if (!ret)
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, pgt->mmu, addr, level);
return ret;
}
-static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
- enum kvm_pgtable_walk_flags flag,
- void * const arg)
+static int stage2_flush_walker(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
{
- struct kvm_pgtable *pgt = arg;
+ struct kvm_pgtable *pgt = ctx->arg;
struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
- kvm_pte_t pte = *ptep;
- if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte))
+ if (!kvm_pte_valid(ctx->old) || !stage2_pte_cacheable(pgt, ctx->old))
return 0;
if (mm_ops->dcache_clean_inval_poc)
- mm_ops->dcache_clean_inval_poc(kvm_pte_follow(pte, mm_ops),
- kvm_granule_size(level));
+ mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops),
+ kvm_granule_size(ctx->level));
return 0;
}
@@ -1184,7 +1189,7 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
- pgt->pgd = mm_ops->zalloc_pages_exact(pgd_sz);
+ pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_pages_exact(pgd_sz);
if (!pgt->pgd)
return -ENOMEM;
@@ -1200,20 +1205,27 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
return 0;
}
-static int stage2_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
- enum kvm_pgtable_walk_flags flag,
- void * const arg)
+size_t kvm_pgtable_stage2_pgd_size(u64 vtcr)
+{
+ u32 ia_bits = VTCR_EL2_IPA(vtcr);
+ u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
+ u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
+
+ return kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
+}
+
+static int stage2_free_walker(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
{
- struct kvm_pgtable_mm_ops *mm_ops = arg;
- kvm_pte_t pte = *ptep;
+ struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
- if (!stage2_pte_is_counted(pte))
+ if (!stage2_pte_is_counted(ctx->old))
return 0;
- mm_ops->put_page(ptep);
+ mm_ops->put_page(ctx->ptep);
- if (kvm_pte_table(pte, level))
- mm_ops->put_page(kvm_pte_follow(pte, mm_ops));
+ if (kvm_pte_table(ctx->old, ctx->level))
+ mm_ops->put_page(kvm_pte_follow(ctx->old, mm_ops));
return 0;
}
@@ -1225,11 +1237,33 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
.cb = stage2_free_walker,
.flags = KVM_PGTABLE_WALK_LEAF |
KVM_PGTABLE_WALK_TABLE_POST,
- .arg = pgt->mm_ops,
};
WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
- pgt->mm_ops->free_pages_exact(pgt->pgd, pgd_sz);
+ pgt->mm_ops->free_pages_exact(kvm_dereference_pteref(&walker, pgt->pgd), pgd_sz);
pgt->pgd = NULL;
}
+
+void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, u32 level)
+{
+ kvm_pteref_t ptep = (kvm_pteref_t)pgtable;
+ struct kvm_pgtable_walker walker = {
+ .cb = stage2_free_walker,
+ .flags = KVM_PGTABLE_WALK_LEAF |
+ KVM_PGTABLE_WALK_TABLE_POST,
+ };
+ struct kvm_pgtable_walk_data data = {
+ .walker = &walker,
+
+ /*
+ * At this point the IPA really doesn't matter, as the page
+ * table being traversed has already been removed from the stage
+ * 2. Set an appropriate range to cover the entire page table.
+ */
+ .addr = 0,
+ .end = kvm_granule_size(level),
+ };
+
+ WARN_ON(__kvm_pgtable_walk(&data, mm_ops, ptep, level + 1));
+}
diff --git a/arch/arm64/kvm/hyp/vhe/Makefile b/arch/arm64/kvm/hyp/vhe/Makefile
index 96bec0ecf9dd..3b9e5464b5b3 100644
--- a/arch/arm64/kvm/hyp/vhe/Makefile
+++ b/arch/arm64/kvm/hyp/vhe/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
#
-# Makefile for Kernel-based Virtual Machine module, HYP/nVHE part
+# Makefile for Kernel-based Virtual Machine module, HYP/VHE part
#
asflags-y := -D__KVM_VHE_HYPERVISOR__
diff --git a/arch/arm64/kvm/irq.h b/arch/arm64/kvm/irq.h
deleted file mode 100644
index 0d257de42c10..000000000000
--- a/arch/arm64/kvm/irq.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * irq.h: in kernel interrupt controller related definitions
- * Copyright (c) 2016 Red Hat, Inc.
- *
- * This header is included by irqchip.c. However, on ARM, interrupt
- * controller declarations are located in include/kvm/arm_vgic.h since
- * they are mostly shared between arm and arm64.
- */
-
-#ifndef __IRQ_H
-#define __IRQ_H
-
-#include <kvm/arm_vgic.h>
-
-#endif
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 60ee3d9f01f8..a3ee3b605c9b 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -128,6 +128,25 @@ static void kvm_s2_free_pages_exact(void *virt, size_t size)
free_pages_exact(virt, size);
}
+static struct kvm_pgtable_mm_ops kvm_s2_mm_ops;
+
+static void stage2_free_removed_table_rcu_cb(struct rcu_head *head)
+{
+ struct page *page = container_of(head, struct page, rcu_head);
+ void *pgtable = page_to_virt(page);
+ u32 level = page_private(page);
+
+ kvm_pgtable_stage2_free_removed(&kvm_s2_mm_ops, pgtable, level);
+}
+
+static void stage2_free_removed_table(void *addr, u32 level)
+{
+ struct page *page = virt_to_page(addr);
+
+ set_page_private(page, (unsigned long)level);
+ call_rcu(&page->rcu_head, stage2_free_removed_table_rcu_cb);
+}
+
static void kvm_host_get_page(void *addr)
{
get_page(virt_to_page(addr));
@@ -640,8 +659,8 @@ static struct kvm_pgtable_mm_ops kvm_user_mm_ops = {
static int get_user_mapping_size(struct kvm *kvm, u64 addr)
{
struct kvm_pgtable pgt = {
- .pgd = (kvm_pte_t *)kvm->mm->pgd,
- .ia_bits = VA_BITS,
+ .pgd = (kvm_pteref_t)kvm->mm->pgd,
+ .ia_bits = vabits_actual,
.start_level = (KVM_PGTABLE_MAX_LEVELS -
CONFIG_PGTABLE_LEVELS),
.mm_ops = &kvm_user_mm_ops,
@@ -662,6 +681,7 @@ static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = {
.zalloc_page = stage2_memcache_zalloc_page,
.zalloc_pages_exact = kvm_s2_zalloc_pages_exact,
.free_pages_exact = kvm_s2_free_pages_exact,
+ .free_removed_table = stage2_free_removed_table,
.get_page = kvm_host_get_page,
.put_page = kvm_s2_put_page,
.page_count = kvm_host_page_count,
@@ -675,15 +695,42 @@ static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = {
* kvm_init_stage2_mmu - Initialise a S2 MMU structure
* @kvm: The pointer to the KVM structure
* @mmu: The pointer to the s2 MMU structure
+ * @type: The machine type of the virtual machine
*
* Allocates only the stage-2 HW PGD level table(s).
* Note we don't need locking here as this is only called when the VM is
* created, which can only be done once.
*/
-int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
+int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type)
{
+ u32 kvm_ipa_limit = get_kvm_ipa_limit();
int cpu, err;
struct kvm_pgtable *pgt;
+ u64 mmfr0, mmfr1;
+ u32 phys_shift;
+
+ if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
+ return -EINVAL;
+
+ phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type);
+ if (is_protected_kvm_enabled()) {
+ phys_shift = kvm_ipa_limit;
+ } else if (phys_shift) {
+ if (phys_shift > kvm_ipa_limit ||
+ phys_shift < ARM64_MIN_PARANGE_BITS)
+ return -EINVAL;
+ } else {
+ phys_shift = KVM_PHYS_SHIFT;
+ if (phys_shift > kvm_ipa_limit) {
+ pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n",
+ current->comm);
+ return -EINVAL;
+ }
+ }
+
+ mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+ mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
+ kvm->arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift);
if (mmu->pgt != NULL) {
kvm_err("kvm_arch already initialized?\n");
@@ -807,6 +854,32 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
}
}
+static void hyp_mc_free_fn(void *addr, void *unused)
+{
+ free_page((unsigned long)addr);
+}
+
+static void *hyp_mc_alloc_fn(void *unused)
+{
+ return (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
+}
+
+void free_hyp_memcache(struct kvm_hyp_memcache *mc)
+{
+ if (is_protected_kvm_enabled())
+ __free_hyp_memcache(mc, hyp_mc_free_fn,
+ kvm_host_va, NULL);
+}
+
+int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages)
+{
+ if (!is_protected_kvm_enabled())
+ return 0;
+
+ return __topup_hyp_memcache(mc, min_pages, hyp_mc_alloc_fn,
+ kvm_host_pa, NULL);
+}
+
/**
* kvm_phys_addr_ioremap - map a device range to guest IPA
*
@@ -841,7 +914,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
write_lock(&kvm->mmu_lock);
ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE, pa, prot,
- &cache);
+ &cache, 0);
write_unlock(&kvm->mmu_lock);
if (ret)
break;
@@ -1091,32 +1164,26 @@ static int get_vma_page_shift(struct vm_area_struct *vma, unsigned long hva)
* - mmap_lock protects between a VM faulting a page in and the VMM performing
* an mprotect() to add VM_MTE
*/
-static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
- unsigned long size)
+static void sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
+ unsigned long size)
{
unsigned long i, nr_pages = size >> PAGE_SHIFT;
- struct page *page;
+ struct page *page = pfn_to_page(pfn);
if (!kvm_has_mte(kvm))
- return 0;
-
- /*
- * pfn_to_online_page() is used to reject ZONE_DEVICE pages
- * that may not support tags.
- */
- page = pfn_to_online_page(pfn);
-
- if (!page)
- return -EFAULT;
+ return;
for (i = 0; i < nr_pages; i++, page++) {
- if (!test_bit(PG_mte_tagged, &page->flags)) {
+ if (try_page_mte_tagging(page)) {
mte_clear_page_tags(page_address(page));
- set_bit(PG_mte_tagged, &page->flags);
+ set_page_mte_tagged(page);
}
}
+}
- return 0;
+static bool kvm_vma_mte_allowed(struct vm_area_struct *vma)
+{
+ return vma->vm_flags & VM_MTE_ALLOWED;
}
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
@@ -1127,7 +1194,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
bool write_fault, writable, force_pte = false;
bool exec_fault;
bool device = false;
- bool shared;
unsigned long mmu_seq;
struct kvm *kvm = vcpu->kvm;
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
@@ -1136,7 +1202,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
gfn_t gfn;
kvm_pfn_t pfn;
bool logging_active = memslot_is_logging(memslot);
- bool use_read_lock = false;
unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
unsigned long vma_pagesize, fault_granule;
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
@@ -1147,7 +1212,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
VM_BUG_ON(write_fault && exec_fault);
- if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
+ if (fault_status == ESR_ELx_FSC_PERM && !write_fault && !exec_fault) {
kvm_err("Unexpected L2 read permission error\n");
return -EFAULT;
}
@@ -1171,14 +1236,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (logging_active) {
force_pte = true;
vma_shift = PAGE_SHIFT;
- use_read_lock = (fault_status == FSC_PERM && write_fault &&
- fault_granule == PAGE_SIZE);
} else {
vma_shift = get_vma_page_shift(vma, hva);
}
- shared = (vma->vm_flags & VM_SHARED);
-
switch (vma_shift) {
#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SHIFT:
@@ -1216,7 +1277,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* only exception to this is when dirty logging is enabled at runtime
* and a write fault needs to collapse a block entry into a table.
*/
- if (fault_status != FSC_PERM || (logging_active && write_fault)) {
+ if (fault_status != ESR_ELx_FSC_PERM ||
+ (logging_active && write_fault)) {
ret = kvm_mmu_topup_memory_cache(memcache,
kvm_mmu_cache_min_pages(kvm));
if (ret)
@@ -1239,7 +1301,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
*/
smp_rmb();
- pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
+ pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL,
write_fault, &writable, NULL);
if (pfn == KVM_PFN_ERR_HWPOISON) {
kvm_send_hwpoison_signal(hva, vma_shift);
@@ -1271,15 +1333,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (exec_fault && device)
return -ENOEXEC;
- /*
- * To reduce MMU contentions and enhance concurrency during dirty
- * logging dirty logging, only acquire read lock for permission
- * relaxation.
- */
- if (use_read_lock)
- read_lock(&kvm->mmu_lock);
- else
- write_lock(&kvm->mmu_lock);
+ read_lock(&kvm->mmu_lock);
pgt = vcpu->arch.hw_mmu->pgt;
if (mmu_invalidate_retry(kvm, mmu_seq))
goto out_unlock;
@@ -1289,7 +1343,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* backed by a THP and thus use block mapping if possible.
*/
if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) {
- if (fault_status == FSC_PERM && fault_granule > PAGE_SIZE)
+ if (fault_status == ESR_ELx_FSC_PERM &&
+ fault_granule > PAGE_SIZE)
vma_pagesize = fault_granule;
else
vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
@@ -1297,14 +1352,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
&fault_ipa);
}
- if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) {
- /* Check the VMM hasn't introduced a new VM_SHARED VMA */
- if (!shared)
- ret = sanitise_mte_tags(kvm, pfn, vma_pagesize);
- else
+ if (fault_status != ESR_ELx_FSC_PERM && !device && kvm_has_mte(kvm)) {
+ /* Check the VMM hasn't introduced a new disallowed VMA */
+ if (kvm_vma_mte_allowed(vma)) {
+ sanitise_mte_tags(kvm, pfn, vma_pagesize);
+ } else {
ret = -EFAULT;
- if (ret)
goto out_unlock;
+ }
}
if (writable)
@@ -1323,15 +1378,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* permissions only if vma_pagesize equals fault_granule. Otherwise,
* kvm_pgtable_stage2_map() should be called to change block size.
*/
- if (fault_status == FSC_PERM && vma_pagesize == fault_granule) {
+ if (fault_status == ESR_ELx_FSC_PERM && vma_pagesize == fault_granule)
ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
- } else {
- WARN_ONCE(use_read_lock, "Attempted stage-2 map outside of write lock\n");
-
+ else
ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
__pfn_to_phys(pfn), prot,
- memcache);
- }
+ memcache, KVM_PGTABLE_WALK_SHARED);
/* Mark the page dirty only if the fault is handled successfully */
if (writable && !ret) {
@@ -1340,10 +1392,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
}
out_unlock:
- if (use_read_lock)
- read_unlock(&kvm->mmu_lock);
- else
- write_unlock(&kvm->mmu_lock);
+ read_unlock(&kvm->mmu_lock);
kvm_set_pfn_accessed(pfn);
kvm_release_pfn_clean(pfn);
return ret != -EAGAIN ? ret : 0;
@@ -1394,7 +1443,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
- if (fault_status == FSC_FAULT) {
+ if (fault_status == ESR_ELx_FSC_FAULT) {
/* Beyond sanitised PARange (which is the IPA limit) */
if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) {
kvm_inject_size_fault(vcpu);
@@ -1429,8 +1478,9 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
kvm_vcpu_get_hfar(vcpu), fault_ipa);
/* Check the stage-2 fault is trans. fault or write fault */
- if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
- fault_status != FSC_ACCESS) {
+ if (fault_status != ESR_ELx_FSC_FAULT &&
+ fault_status != ESR_ELx_FSC_PERM &&
+ fault_status != ESR_ELx_FSC_ACCESS) {
kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
kvm_vcpu_trap_get_class(vcpu),
(unsigned long)kvm_vcpu_trap_get_fault(vcpu),
@@ -1492,7 +1542,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
/* Userspace should not be able to register out-of-bounds IPAs */
VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
- if (fault_status == FSC_ACCESS) {
+ if (fault_status == ESR_ELx_FSC_ACCESS) {
handle_access_fault(vcpu, fault_ipa);
ret = 1;
goto out_unlock;
@@ -1526,15 +1576,18 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
kvm_pfn_t pfn = pte_pfn(range->pte);
- int ret;
if (!kvm->arch.mmu.pgt)
return false;
WARN_ON(range->end - range->start != 1);
- ret = sanitise_mte_tags(kvm, pfn, PAGE_SIZE);
- if (ret)
+ /*
+ * If the page isn't tagged, defer to user_mem_abort() for sanitising
+ * the MTE tags. The S2 pte should have been unmapped by
+ * mmu_notifier_invalidate_range_end().
+ */
+ if (kvm_has_mte(kvm) && !page_mte_tagged(pfn_to_page(pfn)))
return false;
/*
@@ -1549,7 +1602,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
*/
kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT,
PAGE_SIZE, __pfn_to_phys(pfn),
- KVM_PGTABLE_PROT_R, NULL);
+ KVM_PGTABLE_PROT_R, NULL, 0);
return false;
}
@@ -1618,6 +1671,8 @@ static struct kvm_pgtable_mm_ops kvm_hyp_mm_ops = {
int kvm_mmu_init(u32 *hyp_va_bits)
{
int err;
+ u32 idmap_bits;
+ u32 kernel_bits;
hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start);
hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE);
@@ -1631,7 +1686,31 @@ int kvm_mmu_init(u32 *hyp_va_bits)
*/
BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
- *hyp_va_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET);
+ /*
+ * The ID map may be configured to use an extended virtual address
+ * range. This is only the case if system RAM is out of range for the
+ * currently configured page size and VA_BITS_MIN, in which case we will
+ * also need the extended virtual range for the HYP ID map, or we won't
+ * be able to enable the EL2 MMU.
+ *
+ * However, in some cases the ID map may be configured for fewer than
+ * the number of VA bits used by the regular kernel stage 1. This
+ * happens when VA_BITS=52 and the kernel image is placed in PA space
+ * below 48 bits.
+ *
+ * At EL2, there is only one TTBR register, and we can't switch between
+ * translation tables *and* update TCR_EL2.T0SZ at the same time. Bottom
+ * line: we need to use the extended range with *both* our translation
+ * tables.
+ *
+ * So use the maximum of the idmap VA bits and the regular kernel stage
+ * 1 VA bits to assure that the hypervisor can both ID map its code page
+ * and map any kernel memory.
+ */
+ idmap_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET);
+ kernel_bits = vabits_actual;
+ *hyp_va_bits = max(idmap_bits, kernel_bits);
+
kvm_debug("Using %u-bit virtual addresses at EL2\n", *hyp_va_bits);
kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
kvm_debug("HYP VA range: %lx:%lx\n",
@@ -1740,12 +1819,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
if (!vma)
break;
- /*
- * VM_SHARED mappings are not allowed with MTE to avoid races
- * when updating the PG_mte_tagged page flag, see
- * sanitise_mte_tags for more details.
- */
- if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED) {
+ if (kvm_has_mte(kvm) && !kvm_vma_mte_allowed(vma)) {
ret = -EINVAL;
break;
}
diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c
index ebecb7c045f4..cf56958b1492 100644
--- a/arch/arm64/kvm/pkvm.c
+++ b/arch/arm64/kvm/pkvm.c
@@ -6,6 +6,7 @@
#include <linux/kvm_host.h>
#include <linux/memblock.h>
+#include <linux/mutex.h>
#include <linux/sort.h>
#include <asm/kvm_pkvm.h>
@@ -53,7 +54,7 @@ static int __init register_memblock_regions(void)
void __init kvm_hyp_reserve(void)
{
- u64 nr_pages, prev, hyp_mem_pages = 0;
+ u64 hyp_mem_pages = 0;
int ret;
if (!is_hyp_mode_available() || is_kernel_in_hyp_mode())
@@ -71,21 +72,8 @@ void __init kvm_hyp_reserve(void)
hyp_mem_pages += hyp_s1_pgtable_pages();
hyp_mem_pages += host_s2_pgtable_pages();
-
- /*
- * The hyp_vmemmap needs to be backed by pages, but these pages
- * themselves need to be present in the vmemmap, so compute the number
- * of pages needed by looking for a fixed point.
- */
- nr_pages = 0;
- do {
- prev = nr_pages;
- nr_pages = hyp_mem_pages + prev;
- nr_pages = DIV_ROUND_UP(nr_pages * STRUCT_HYP_PAGE_SIZE,
- PAGE_SIZE);
- nr_pages += __hyp_pgtable_max_pages(nr_pages);
- } while (nr_pages != prev);
- hyp_mem_pages += nr_pages;
+ hyp_mem_pages += hyp_vm_table_pages();
+ hyp_mem_pages += hyp_vmemmap_pages(STRUCT_HYP_PAGE_SIZE);
/*
* Try to allocate a PMD-aligned region to reduce TLB pressure once
@@ -107,3 +95,121 @@ void __init kvm_hyp_reserve(void)
kvm_info("Reserved %lld MiB at 0x%llx\n", hyp_mem_size >> 20,
hyp_mem_base);
}
+
+/*
+ * Allocates and donates memory for hypervisor VM structs at EL2.
+ *
+ * Allocates space for the VM state, which includes the hyp vm as well as
+ * the hyp vcpus.
+ *
+ * Stores an opaque handler in the kvm struct for future reference.
+ *
+ * Return 0 on success, negative error code on failure.
+ */
+static int __pkvm_create_hyp_vm(struct kvm *host_kvm)
+{
+ size_t pgd_sz, hyp_vm_sz, hyp_vcpu_sz;
+ struct kvm_vcpu *host_vcpu;
+ pkvm_handle_t handle;
+ void *pgd, *hyp_vm;
+ unsigned long idx;
+ int ret;
+
+ if (host_kvm->created_vcpus < 1)
+ return -EINVAL;
+
+ pgd_sz = kvm_pgtable_stage2_pgd_size(host_kvm->arch.vtcr);
+
+ /*
+ * The PGD pages will be reclaimed using a hyp_memcache which implies
+ * page granularity. So, use alloc_pages_exact() to get individual
+ * refcounts.
+ */
+ pgd = alloc_pages_exact(pgd_sz, GFP_KERNEL_ACCOUNT);
+ if (!pgd)
+ return -ENOMEM;
+
+ /* Allocate memory to donate to hyp for vm and vcpu pointers. */
+ hyp_vm_sz = PAGE_ALIGN(size_add(PKVM_HYP_VM_SIZE,
+ size_mul(sizeof(void *),
+ host_kvm->created_vcpus)));
+ hyp_vm = alloc_pages_exact(hyp_vm_sz, GFP_KERNEL_ACCOUNT);
+ if (!hyp_vm) {
+ ret = -ENOMEM;
+ goto free_pgd;
+ }
+
+ /* Donate the VM memory to hyp and let hyp initialize it. */
+ ret = kvm_call_hyp_nvhe(__pkvm_init_vm, host_kvm, hyp_vm, pgd);
+ if (ret < 0)
+ goto free_vm;
+
+ handle = ret;
+
+ host_kvm->arch.pkvm.handle = handle;
+
+ /* Donate memory for the vcpus at hyp and initialize it. */
+ hyp_vcpu_sz = PAGE_ALIGN(PKVM_HYP_VCPU_SIZE);
+ kvm_for_each_vcpu(idx, host_vcpu, host_kvm) {
+ void *hyp_vcpu;
+
+ /* Indexing of the vcpus to be sequential starting at 0. */
+ if (WARN_ON(host_vcpu->vcpu_idx != idx)) {
+ ret = -EINVAL;
+ goto destroy_vm;
+ }
+
+ hyp_vcpu = alloc_pages_exact(hyp_vcpu_sz, GFP_KERNEL_ACCOUNT);
+ if (!hyp_vcpu) {
+ ret = -ENOMEM;
+ goto destroy_vm;
+ }
+
+ ret = kvm_call_hyp_nvhe(__pkvm_init_vcpu, handle, host_vcpu,
+ hyp_vcpu);
+ if (ret) {
+ free_pages_exact(hyp_vcpu, hyp_vcpu_sz);
+ goto destroy_vm;
+ }
+ }
+
+ return 0;
+
+destroy_vm:
+ pkvm_destroy_hyp_vm(host_kvm);
+ return ret;
+free_vm:
+ free_pages_exact(hyp_vm, hyp_vm_sz);
+free_pgd:
+ free_pages_exact(pgd, pgd_sz);
+ return ret;
+}
+
+int pkvm_create_hyp_vm(struct kvm *host_kvm)
+{
+ int ret = 0;
+
+ mutex_lock(&host_kvm->lock);
+ if (!host_kvm->arch.pkvm.handle)
+ ret = __pkvm_create_hyp_vm(host_kvm);
+ mutex_unlock(&host_kvm->lock);
+
+ return ret;
+}
+
+void pkvm_destroy_hyp_vm(struct kvm *host_kvm)
+{
+ if (host_kvm->arch.pkvm.handle) {
+ WARN_ON(kvm_call_hyp_nvhe(__pkvm_teardown_vm,
+ host_kvm->arch.pkvm.handle));
+ }
+
+ host_kvm->arch.pkvm.handle = 0;
+ free_hyp_memcache(&host_kvm->arch.pkvm.teardown_mc);
+}
+
+int pkvm_init_host_vm(struct kvm *host_kvm)
+{
+ mutex_init(&host_kvm->lock);
+ return 0;
+}
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 0003c7d37533..24908400e190 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -15,16 +15,25 @@
#include <kvm/arm_pmu.h>
#include <kvm/arm_vgic.h>
+#define PERF_ATTR_CFG1_COUNTER_64BIT BIT(0)
+
DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
static LIST_HEAD(arm_pmus);
static DEFINE_MUTEX(arm_pmus_lock);
-static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
-static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
-static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
+static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc);
+static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc);
+
+static struct kvm_vcpu *kvm_pmc_to_vcpu(const struct kvm_pmc *pmc)
+{
+ return container_of(pmc, struct kvm_vcpu, arch.pmu.pmc[pmc->idx]);
+}
-#define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
+static struct kvm_pmc *kvm_vcpu_idx_to_pmc(struct kvm_vcpu *vcpu, int cnt_idx)
+{
+ return &vcpu->arch.pmu.pmc[cnt_idx];
+}
static u32 kvm_pmu_event_mask(struct kvm *kvm)
{
@@ -47,113 +56,46 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm)
}
/**
- * kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter
- * @vcpu: The vcpu pointer
- * @select_idx: The counter index
+ * kvm_pmc_is_64bit - determine if counter is 64bit
+ * @pmc: counter context
*/
-static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx)
+static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc)
{
- return (select_idx == ARMV8_PMU_CYCLE_IDX &&
- __vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC);
+ return (pmc->idx == ARMV8_PMU_CYCLE_IDX ||
+ kvm_pmu_is_3p5(kvm_pmc_to_vcpu(pmc)));
}
-static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
+static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc)
{
- struct kvm_pmu *pmu;
- struct kvm_vcpu_arch *vcpu_arch;
+ u64 val = __vcpu_sys_reg(kvm_pmc_to_vcpu(pmc), PMCR_EL0);
- pmc -= pmc->idx;
- pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
- vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
- return container_of(vcpu_arch, struct kvm_vcpu, arch);
+ return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) ||
+ (pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC));
}
-/**
- * kvm_pmu_pmc_is_chained - determine if the pmc is chained
- * @pmc: The PMU counter pointer
- */
-static bool kvm_pmu_pmc_is_chained(struct kvm_pmc *pmc)
+static bool kvm_pmu_counter_can_chain(struct kvm_pmc *pmc)
{
- struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
-
- return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
+ return (!(pmc->idx & 1) && (pmc->idx + 1) < ARMV8_PMU_CYCLE_IDX &&
+ !kvm_pmc_has_64bit_overflow(pmc));
}
-/**
- * kvm_pmu_idx_is_high_counter - determine if select_idx is a high/low counter
- * @select_idx: The counter index
- */
-static bool kvm_pmu_idx_is_high_counter(u64 select_idx)
-{
- return select_idx & 0x1;
-}
-
-/**
- * kvm_pmu_get_canonical_pmc - obtain the canonical pmc
- * @pmc: The PMU counter pointer
- *
- * When a pair of PMCs are chained together we use the low counter (canonical)
- * to hold the underlying perf event.
- */
-static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
-{
- if (kvm_pmu_pmc_is_chained(pmc) &&
- kvm_pmu_idx_is_high_counter(pmc->idx))
- return pmc - 1;
-
- return pmc;
-}
-static struct kvm_pmc *kvm_pmu_get_alternate_pmc(struct kvm_pmc *pmc)
+static u32 counter_index_to_reg(u64 idx)
{
- if (kvm_pmu_idx_is_high_counter(pmc->idx))
- return pmc - 1;
- else
- return pmc + 1;
+ return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + idx;
}
-/**
- * kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
- * @vcpu: The vcpu pointer
- * @select_idx: The counter index
- */
-static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 select_idx)
+static u32 counter_index_to_evtreg(u64 idx)
{
- u64 eventsel, reg;
-
- select_idx |= 0x1;
-
- if (select_idx == ARMV8_PMU_CYCLE_IDX)
- return false;
-
- reg = PMEVTYPER0_EL0 + select_idx;
- eventsel = __vcpu_sys_reg(vcpu, reg) & kvm_pmu_event_mask(vcpu->kvm);
-
- return eventsel == ARMV8_PMUV3_PERFCTR_CHAIN;
+ return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + idx;
}
-/**
- * kvm_pmu_get_pair_counter_value - get PMU counter value
- * @vcpu: The vcpu pointer
- * @pmc: The PMU counter pointer
- */
-static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu,
- struct kvm_pmc *pmc)
+static u64 kvm_pmu_get_pmc_value(struct kvm_pmc *pmc)
{
- u64 counter, counter_high, reg, enabled, running;
-
- if (kvm_pmu_pmc_is_chained(pmc)) {
- pmc = kvm_pmu_get_canonical_pmc(pmc);
- reg = PMEVCNTR0_EL0 + pmc->idx;
+ struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
+ u64 counter, reg, enabled, running;
- counter = __vcpu_sys_reg(vcpu, reg);
- counter_high = __vcpu_sys_reg(vcpu, reg + 1);
-
- counter = lower_32_bits(counter) | (counter_high << 32);
- } else {
- reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
- ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
- counter = __vcpu_sys_reg(vcpu, reg);
- }
+ reg = counter_index_to_reg(pmc->idx);
+ counter = __vcpu_sys_reg(vcpu, reg);
/*
* The real counter value is equal to the value of counter register plus
@@ -163,6 +105,9 @@ static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu,
counter += perf_event_read_value(pmc->perf_event, &enabled,
&running);
+ if (!kvm_pmc_is_64bit(pmc))
+ counter = lower_32_bits(counter);
+
return counter;
}
@@ -173,22 +118,37 @@ static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu,
*/
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
{
- u64 counter;
- struct kvm_pmu *pmu = &vcpu->arch.pmu;
- struct kvm_pmc *pmc = &pmu->pmc[select_idx];
-
if (!kvm_vcpu_has_pmu(vcpu))
return 0;
- counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
+ return kvm_pmu_get_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx));
+}
- if (kvm_pmu_pmc_is_chained(pmc) &&
- kvm_pmu_idx_is_high_counter(select_idx))
- counter = upper_32_bits(counter);
- else if (select_idx != ARMV8_PMU_CYCLE_IDX)
- counter = lower_32_bits(counter);
+static void kvm_pmu_set_pmc_value(struct kvm_pmc *pmc, u64 val, bool force)
+{
+ struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
+ u64 reg;
- return counter;
+ kvm_pmu_release_perf_event(pmc);
+
+ reg = counter_index_to_reg(pmc->idx);
+
+ if (vcpu_mode_is_32bit(vcpu) && pmc->idx != ARMV8_PMU_CYCLE_IDX &&
+ !force) {
+ /*
+ * Even with PMUv3p5, AArch32 cannot write to the top
+ * 32bit of the counters. The only possible course of
+ * action is to use PMCR.P, which will reset them to
+ * 0 (the only use of the 'force' parameter).
+ */
+ val = __vcpu_sys_reg(vcpu, reg) & GENMASK(63, 32);
+ val |= lower_32_bits(val);
+ }
+
+ __vcpu_sys_reg(vcpu, reg) = val;
+
+ /* Recreate the perf event to reflect the updated sample_period */
+ kvm_pmu_create_perf_event(pmc);
}
/**
@@ -199,17 +159,10 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
*/
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
{
- u64 reg;
-
if (!kvm_vcpu_has_pmu(vcpu))
return;
- reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
- ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
- __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
-
- /* Recreate the perf event to reflect the updated sample_period */
- kvm_pmu_create_perf_event(vcpu, select_idx);
+ kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx), val, false);
}
/**
@@ -218,7 +171,6 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
*/
static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
{
- pmc = kvm_pmu_get_canonical_pmc(pmc);
if (pmc->perf_event) {
perf_event_disable(pmc->perf_event);
perf_event_release_kernel(pmc->perf_event);
@@ -232,29 +184,20 @@ static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
*
* If this counter has been configured to monitor some event, release it here.
*/
-static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
+static void kvm_pmu_stop_counter(struct kvm_pmc *pmc)
{
- u64 counter, reg, val;
+ struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
+ u64 reg, val;
- pmc = kvm_pmu_get_canonical_pmc(pmc);
if (!pmc->perf_event)
return;
- counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
+ val = kvm_pmu_get_pmc_value(pmc);
- if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
- reg = PMCCNTR_EL0;
- val = counter;
- } else {
- reg = PMEVCNTR0_EL0 + pmc->idx;
- val = lower_32_bits(counter);
- }
+ reg = counter_index_to_reg(pmc->idx);
__vcpu_sys_reg(vcpu, reg) = val;
- if (kvm_pmu_pmc_is_chained(pmc))
- __vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
-
kvm_pmu_release_perf_event(pmc);
}
@@ -280,13 +223,10 @@ void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
{
unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
- struct kvm_pmu *pmu = &vcpu->arch.pmu;
int i;
for_each_set_bit(i, &mask, 32)
- kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
-
- bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
+ kvm_pmu_stop_counter(kvm_vcpu_idx_to_pmc(vcpu, i));
}
/**
@@ -297,10 +237,9 @@ void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
{
int i;
- struct kvm_pmu *pmu = &vcpu->arch.pmu;
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
- kvm_pmu_release_perf_event(&pmu->pmc[i]);
+ kvm_pmu_release_perf_event(kvm_vcpu_idx_to_pmc(vcpu, i));
irq_work_sync(&vcpu->arch.pmu.overflow_work);
}
@@ -325,9 +264,6 @@ u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
{
int i;
- struct kvm_pmu *pmu = &vcpu->arch.pmu;
- struct kvm_pmc *pmc;
-
if (!kvm_vcpu_has_pmu(vcpu))
return;
@@ -335,17 +271,16 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
return;
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
+ struct kvm_pmc *pmc;
+
if (!(val & BIT(i)))
continue;
- pmc = &pmu->pmc[i];
-
- /* A change in the enable state may affect the chain state */
- kvm_pmu_update_pmc_chained(vcpu, i);
- kvm_pmu_create_perf_event(vcpu, i);
+ pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
- /* At this point, pmc must be the canonical */
- if (pmc->perf_event) {
+ if (!pmc->perf_event) {
+ kvm_pmu_create_perf_event(pmc);
+ } else {
perf_event_enable(pmc->perf_event);
if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
kvm_debug("fail to enable perf event\n");
@@ -363,23 +298,18 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
{
int i;
- struct kvm_pmu *pmu = &vcpu->arch.pmu;
- struct kvm_pmc *pmc;
if (!kvm_vcpu_has_pmu(vcpu) || !val)
return;
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
+ struct kvm_pmc *pmc;
+
if (!(val & BIT(i)))
continue;
- pmc = &pmu->pmc[i];
-
- /* A change in the enable state may affect the chain state */
- kvm_pmu_update_pmc_chained(vcpu, i);
- kvm_pmu_create_perf_event(vcpu, i);
+ pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
- /* At this point, pmc must be the canonical */
if (pmc->perf_event)
perf_event_disable(pmc->perf_event);
}
@@ -476,14 +406,69 @@ void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work)
{
struct kvm_vcpu *vcpu;
- struct kvm_pmu *pmu;
-
- pmu = container_of(work, struct kvm_pmu, overflow_work);
- vcpu = kvm_pmc_to_vcpu(pmu->pmc);
+ vcpu = container_of(work, struct kvm_vcpu, arch.pmu.overflow_work);
kvm_vcpu_kick(vcpu);
}
+/*
+ * Perform an increment on any of the counters described in @mask,
+ * generating the overflow if required, and propagate it as a chained
+ * event if possible.
+ */
+static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
+ unsigned long mask, u32 event)
+{
+ int i;
+
+ if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
+ return;
+
+ /* Weed out disabled counters */
+ mask &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
+
+ for_each_set_bit(i, &mask, ARMV8_PMU_CYCLE_IDX) {
+ struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
+ u64 type, reg;
+
+ /* Filter on event type */
+ type = __vcpu_sys_reg(vcpu, counter_index_to_evtreg(i));
+ type &= kvm_pmu_event_mask(vcpu->kvm);
+ if (type != event)
+ continue;
+
+ /* Increment this counter */
+ reg = __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) + 1;
+ if (!kvm_pmc_is_64bit(pmc))
+ reg = lower_32_bits(reg);
+ __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) = reg;
+
+ /* No overflow? move on */
+ if (kvm_pmc_has_64bit_overflow(pmc) ? reg : lower_32_bits(reg))
+ continue;
+
+ /* Mark overflow */
+ __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
+
+ if (kvm_pmu_counter_can_chain(pmc))
+ kvm_pmu_counter_increment(vcpu, BIT(i + 1),
+ ARMV8_PMUV3_PERFCTR_CHAIN);
+ }
+}
+
+/* Compute the sample period for a given counter value */
+static u64 compute_period(struct kvm_pmc *pmc, u64 counter)
+{
+ u64 val;
+
+ if (kvm_pmc_is_64bit(pmc) && kvm_pmc_has_64bit_overflow(pmc))
+ val = (-counter) & GENMASK(63, 0);
+ else
+ val = (-counter) & GENMASK(31, 0);
+
+ return val;
+}
+
/**
* When the perf event overflows, set the overflow status and inform the vcpu.
*/
@@ -503,10 +488,7 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
* Reset the sample period to the architectural limit,
* i.e. the point where the counter overflows.
*/
- period = -(local64_read(&perf_event->count));
-
- if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
- period &= GENMASK(31, 0);
+ period = compute_period(pmc, local64_read(&perf_event->count));
local64_set(&perf_event->hw.period_left, 0);
perf_event->attr.sample_period = period;
@@ -514,6 +496,10 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
+ if (kvm_pmu_counter_can_chain(pmc))
+ kvm_pmu_counter_increment(vcpu, BIT(idx + 1),
+ ARMV8_PMUV3_PERFCTR_CHAIN);
+
if (kvm_pmu_overflow_status(vcpu)) {
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
@@ -533,50 +519,7 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
*/
void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
{
- struct kvm_pmu *pmu = &vcpu->arch.pmu;
- int i;
-
- if (!kvm_vcpu_has_pmu(vcpu))
- return;
-
- if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
- return;
-
- /* Weed out disabled counters */
- val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
-
- for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
- u64 type, reg;
-
- if (!(val & BIT(i)))
- continue;
-
- /* PMSWINC only applies to ... SW_INC! */
- type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
- type &= kvm_pmu_event_mask(vcpu->kvm);
- if (type != ARMV8_PMUV3_PERFCTR_SW_INCR)
- continue;
-
- /* increment this even SW_INC counter */
- reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
- reg = lower_32_bits(reg);
- __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
-
- if (reg) /* no overflow on the low part */
- continue;
-
- if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
- /* increment the high counter */
- reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1;
- reg = lower_32_bits(reg);
- __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg;
- if (!reg) /* mark overflow on the high counter */
- __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1);
- } else {
- /* mark overflow on low counter */
- __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
- }
- }
+ kvm_pmu_counter_increment(vcpu, val, ARMV8_PMUV3_PERFCTR_SW_INCR);
}
/**
@@ -591,6 +534,12 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
if (!kvm_vcpu_has_pmu(vcpu))
return;
+ /* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */
+ if (!kvm_pmu_is_3p5(vcpu))
+ val &= ~ARMV8_PMU_PMCR_LP;
+
+ __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
+
if (val & ARMV8_PMU_PMCR_E) {
kvm_pmu_enable_counter_mask(vcpu,
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
@@ -606,49 +555,44 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
for_each_set_bit(i, &mask, 32)
- kvm_pmu_set_counter_value(vcpu, i, 0);
+ kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
}
}
-static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
+static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
{
+ struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
- (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
+ (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx));
}
/**
* kvm_pmu_create_perf_event - create a perf event for a counter
- * @vcpu: The vcpu pointer
- * @select_idx: The number of selected counter
+ * @pmc: Counter context
*/
-static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
+static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc)
{
+ struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu;
- struct kvm_pmu *pmu = &vcpu->arch.pmu;
- struct kvm_pmc *pmc;
struct perf_event *event;
struct perf_event_attr attr;
- u64 eventsel, counter, reg, data;
+ u64 eventsel, reg, data;
- /*
- * For chained counters the event type and filtering attributes are
- * obtained from the low/even counter. We also use this counter to
- * determine if the event is enabled/disabled.
- */
- pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]);
-
- reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
- ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx;
+ reg = counter_index_to_evtreg(pmc->idx);
data = __vcpu_sys_reg(vcpu, reg);
- kvm_pmu_stop_counter(vcpu, pmc);
+ kvm_pmu_stop_counter(pmc);
if (pmc->idx == ARMV8_PMU_CYCLE_IDX)
eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
else
eventsel = data & kvm_pmu_event_mask(vcpu->kvm);
- /* Software increment event doesn't need to be backed by a perf event */
- if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR)
+ /*
+ * Neither SW increment nor chained events need to be backed
+ * by a perf event.
+ */
+ if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR ||
+ eventsel == ARMV8_PMUV3_PERFCTR_CHAIN)
return;
/*
@@ -663,37 +607,25 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
attr.type = arm_pmu->pmu.type;
attr.size = sizeof(attr);
attr.pinned = 1;
- attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx);
+ attr.disabled = !kvm_pmu_counter_is_enabled(pmc);
attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
attr.exclude_hv = 1; /* Don't count EL2 events */
attr.exclude_host = 1; /* Don't count host events */
attr.config = eventsel;
- counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
-
- if (kvm_pmu_pmc_is_chained(pmc)) {
- /**
- * The initial sample period (overflow count) of an event. For
- * chained counters we only support overflow interrupts on the
- * high counter.
- */
- attr.sample_period = (-counter) & GENMASK(63, 0);
- attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
+ /*
+ * If counting with a 64bit counter, advertise it to the perf
+ * code, carefully dealing with the initial sample period
+ * which also depends on the overflow.
+ */
+ if (kvm_pmc_is_64bit(pmc))
+ attr.config1 |= PERF_ATTR_CFG1_COUNTER_64BIT;
- event = perf_event_create_kernel_counter(&attr, -1, current,
- kvm_pmu_perf_overflow,
- pmc + 1);
- } else {
- /* The initial sample period (overflow count) of an event. */
- if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
- attr.sample_period = (-counter) & GENMASK(63, 0);
- else
- attr.sample_period = (-counter) & GENMASK(31, 0);
+ attr.sample_period = compute_period(pmc, kvm_pmu_get_pmc_value(pmc));
- event = perf_event_create_kernel_counter(&attr, -1, current,
+ event = perf_event_create_kernel_counter(&attr, -1, current,
kvm_pmu_perf_overflow, pmc);
- }
if (IS_ERR(event)) {
pr_err_once("kvm: pmu event creation failed %ld\n",
@@ -705,41 +637,6 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
}
/**
- * kvm_pmu_update_pmc_chained - update chained bitmap
- * @vcpu: The vcpu pointer
- * @select_idx: The number of selected counter
- *
- * Update the chained bitmap based on the event type written in the
- * typer register and the enable state of the odd register.
- */
-static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
-{
- struct kvm_pmu *pmu = &vcpu->arch.pmu;
- struct kvm_pmc *pmc = &pmu->pmc[select_idx], *canonical_pmc;
- bool new_state, old_state;
-
- old_state = kvm_pmu_pmc_is_chained(pmc);
- new_state = kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx) &&
- kvm_pmu_counter_is_enabled(vcpu, pmc->idx | 0x1);
-
- if (old_state == new_state)
- return;
-
- canonical_pmc = kvm_pmu_get_canonical_pmc(pmc);
- kvm_pmu_stop_counter(vcpu, canonical_pmc);
- if (new_state) {
- /*
- * During promotion from !chained to chained we must ensure
- * the adjacent counter is stopped and its event destroyed
- */
- kvm_pmu_stop_counter(vcpu, kvm_pmu_get_alternate_pmc(pmc));
- set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
- return;
- }
- clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
-}
-
-/**
* kvm_pmu_set_counter_event_type - set selected counter to monitor some event
* @vcpu: The vcpu pointer
* @data: The data guest writes to PMXEVTYPER_EL0
@@ -752,6 +649,7 @@ static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
u64 select_idx)
{
+ struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx);
u64 reg, mask;
if (!kvm_vcpu_has_pmu(vcpu))
@@ -761,20 +659,19 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
mask &= ~ARMV8_PMU_EVTYPE_EVENT;
mask |= kvm_pmu_event_mask(vcpu->kvm);
- reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
- ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + select_idx;
+ reg = counter_index_to_evtreg(pmc->idx);
__vcpu_sys_reg(vcpu, reg) = data & mask;
- kvm_pmu_update_pmc_chained(vcpu, select_idx);
- kvm_pmu_create_perf_event(vcpu, select_idx);
+ kvm_pmu_create_perf_event(pmc);
}
void kvm_host_pmu_init(struct arm_pmu *pmu)
{
struct arm_pmu_entry *entry;
- if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
+ if (pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_NI ||
+ pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
return;
mutex_lock(&arm_pmus_lock);
@@ -827,7 +724,7 @@ static struct arm_pmu *kvm_pmu_probe_armpmu(void)
if (event->pmu) {
pmu = to_arm_pmu(event->pmu);
- if (pmu->pmuver == 0 ||
+ if (pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_NI ||
pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
pmu = NULL;
}
@@ -849,6 +746,8 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
if (!pmceid1) {
val = read_sysreg(pmceid0_el0);
+ /* always support CHAIN */
+ val |= BIT(ARMV8_PMUV3_PERFCTR_CHAIN);
base = 0;
} else {
val = read_sysreg(pmceid1_el0);
@@ -1150,3 +1049,14 @@ int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
return -ENXIO;
}
+
+u8 kvm_arm_pmu_get_pmuver_limit(void)
+{
+ u64 tmp;
+
+ tmp = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
+ tmp = cpuid_feature_cap_perfmon_field(tmp,
+ ID_AA64DFR0_EL1_PMUVer_SHIFT,
+ ID_AA64DFR0_EL1_PMUVer_V3P5);
+ return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), tmp);
+}
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 5ae18472205a..e0267f672b8a 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -395,32 +395,3 @@ int kvm_set_ipa_limit(void)
return 0;
}
-
-int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
-{
- u64 mmfr0, mmfr1;
- u32 phys_shift;
-
- if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
- return -EINVAL;
-
- phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type);
- if (phys_shift) {
- if (phys_shift > kvm_ipa_limit ||
- phys_shift < ARM64_MIN_PARANGE_BITS)
- return -EINVAL;
- } else {
- phys_shift = KVM_PHYS_SHIFT;
- if (phys_shift > kvm_ipa_limit) {
- pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n",
- current->comm);
- return -EINVAL;
- }
- }
-
- mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
- mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
- kvm->arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift);
-
- return 0;
-}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 608e4f25161d..c6cbfe6b854b 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -639,22 +639,18 @@ static void reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
- u64 pmcr, val;
+ u64 pmcr;
/* No PMU available, PMCR_EL0 may UNDEF... */
if (!kvm_arm_support_pmu_v3())
return;
- pmcr = read_sysreg(pmcr_el0);
- /*
- * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
- * except PMCR.E resetting to zero.
- */
- val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
- | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
+ /* Only preserve PMCR_EL0.N, and reset the rest to 0 */
+ pmcr = read_sysreg(pmcr_el0) & (ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
if (!kvm_supports_32bit_el0())
- val |= ARMV8_PMU_PMCR_LC;
- __vcpu_sys_reg(vcpu, r->reg) = val;
+ pmcr |= ARMV8_PMU_PMCR_LC;
+
+ __vcpu_sys_reg(vcpu, r->reg) = pmcr;
}
static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
@@ -697,13 +693,15 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return false;
if (p->is_write) {
- /* Only update writeable bits of PMCR */
+ /*
+ * Only update writeable bits of PMCR (continuing into
+ * kvm_pmu_handle_pmcr() as well)
+ */
val = __vcpu_sys_reg(vcpu, PMCR_EL0);
val &= ~ARMV8_PMU_PMCR_MASK;
val |= p->regval & ARMV8_PMU_PMCR_MASK;
if (!kvm_supports_32bit_el0())
val |= ARMV8_PMU_PMCR_LC;
- __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
kvm_pmu_handle_pmcr(vcpu, val);
kvm_vcpu_pmu_restore_guest(vcpu);
} else {
@@ -1062,6 +1060,40 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
return true;
}
+static u8 vcpu_pmuver(const struct kvm_vcpu *vcpu)
+{
+ if (kvm_vcpu_has_pmu(vcpu))
+ return vcpu->kvm->arch.dfr0_pmuver.imp;
+
+ return vcpu->kvm->arch.dfr0_pmuver.unimp;
+}
+
+static u8 perfmon_to_pmuver(u8 perfmon)
+{
+ switch (perfmon) {
+ case ID_DFR0_EL1_PerfMon_PMUv3:
+ return ID_AA64DFR0_EL1_PMUVer_IMP;
+ case ID_DFR0_EL1_PerfMon_IMPDEF:
+ return ID_AA64DFR0_EL1_PMUVer_IMP_DEF;
+ default:
+ /* Anything ARMv8.1+ and NI have the same value. For now. */
+ return perfmon;
+ }
+}
+
+static u8 pmuver_to_perfmon(u8 pmuver)
+{
+ switch (pmuver) {
+ case ID_AA64DFR0_EL1_PMUVer_IMP:
+ return ID_DFR0_EL1_PerfMon_PMUv3;
+ case ID_AA64DFR0_EL1_PMUVer_IMP_DEF:
+ return ID_DFR0_EL1_PerfMon_IMPDEF;
+ default:
+ /* Anything ARMv8.1+ and NI have the same value. For now. */
+ return pmuver;
+ }
+}
+
/* Read a sanitised cpufeature ID register by sys_reg_desc */
static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r)
{
@@ -1111,18 +1143,17 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r
/* Limit debug to ARMv8.0 */
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), 6);
- /* Limit guests to PMUv3 for ARMv8.4 */
- val = cpuid_feature_cap_perfmon_field(val,
- ID_AA64DFR0_EL1_PMUVer_SHIFT,
- kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_EL1_PMUVer_V3P4 : 0);
+ /* Set PMUver to the required version */
+ val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
+ val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
+ vcpu_pmuver(vcpu));
/* Hide SPE from guests */
val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer);
break;
case SYS_ID_DFR0_EL1:
- /* Limit guests to PMUv3 for ARMv8.4 */
- val = cpuid_feature_cap_perfmon_field(val,
- ID_DFR0_EL1_PerfMon_SHIFT,
- kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_EL1_PerfMon_PMUv3p4 : 0);
+ val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon);
+ val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon),
+ pmuver_to_perfmon(vcpu_pmuver(vcpu)));
break;
}
@@ -1222,6 +1253,85 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
return 0;
}
+static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd,
+ u64 val)
+{
+ u8 pmuver, host_pmuver;
+ bool valid_pmu;
+
+ host_pmuver = kvm_arm_pmu_get_pmuver_limit();
+
+ /*
+ * Allow AA64DFR0_EL1.PMUver to be set from userspace as long
+ * as it doesn't promise more than what the HW gives us. We
+ * allow an IMPDEF PMU though, only if no PMU is supported
+ * (KVM backward compatibility handling).
+ */
+ pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), val);
+ if ((pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF && pmuver > host_pmuver))
+ return -EINVAL;
+
+ valid_pmu = (pmuver != 0 && pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF);
+
+ /* Make sure view register and PMU support do match */
+ if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
+ return -EINVAL;
+
+ /* We can only differ with PMUver, and anything else is an error */
+ val ^= read_id_reg(vcpu, rd);
+ val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
+ if (val)
+ return -EINVAL;
+
+ if (valid_pmu)
+ vcpu->kvm->arch.dfr0_pmuver.imp = pmuver;
+ else
+ vcpu->kvm->arch.dfr0_pmuver.unimp = pmuver;
+
+ return 0;
+}
+
+static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd,
+ u64 val)
+{
+ u8 perfmon, host_perfmon;
+ bool valid_pmu;
+
+ host_perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
+
+ /*
+ * Allow DFR0_EL1.PerfMon to be set from userspace as long as
+ * it doesn't promise more than what the HW gives us on the
+ * AArch64 side (as everything is emulated with that), and
+ * that this is a PMUv3.
+ */
+ perfmon = FIELD_GET(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon), val);
+ if ((perfmon != ID_DFR0_EL1_PerfMon_IMPDEF && perfmon > host_perfmon) ||
+ (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3))
+ return -EINVAL;
+
+ valid_pmu = (perfmon != 0 && perfmon != ID_DFR0_EL1_PerfMon_IMPDEF);
+
+ /* Make sure view register and PMU support do match */
+ if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
+ return -EINVAL;
+
+ /* We can only differ with PerfMon, and anything else is an error */
+ val ^= read_id_reg(vcpu, rd);
+ val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon);
+ if (val)
+ return -EINVAL;
+
+ if (valid_pmu)
+ vcpu->kvm->arch.dfr0_pmuver.imp = perfmon_to_pmuver(perfmon);
+ else
+ vcpu->kvm->arch.dfr0_pmuver.unimp = perfmon_to_pmuver(perfmon);
+
+ return 0;
+}
+
/*
* cpufeature ID register user accessors
*
@@ -1443,7 +1553,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* CRm=1 */
AA32_ID_SANITISED(ID_PFR0_EL1),
AA32_ID_SANITISED(ID_PFR1_EL1),
- AA32_ID_SANITISED(ID_DFR0_EL1),
+ { SYS_DESC(SYS_ID_DFR0_EL1), .access = access_id_reg,
+ .get_user = get_id_reg, .set_user = set_id_dfr0_el1,
+ .visibility = aa32_id_visibility, },
ID_HIDDEN(ID_AFR0_EL1),
AA32_ID_SANITISED(ID_MMFR0_EL1),
AA32_ID_SANITISED(ID_MMFR1_EL1),
@@ -1483,7 +1595,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_UNALLOCATED(4,7),
/* CRm=5 */
- ID_SANITISED(ID_AA64DFR0_EL1),
+ { SYS_DESC(SYS_ID_AA64DFR0_EL1), .access = access_id_reg,
+ .get_user = get_id_reg, .set_user = set_id_aa64dfr0_el1, },
ID_SANITISED(ID_AA64DFR1_EL1),
ID_UNALLOCATED(5,2),
ID_UNALLOCATED(5,3),
diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
index 733b53055f97..2642e9ce2819 100644
--- a/arch/arm64/kvm/vgic/vgic-its.c
+++ b/arch/arm64/kvm/vgic/vgic-its.c
@@ -2187,7 +2187,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
ite->collection->collection_id;
val = cpu_to_le64(val);
- return kvm_write_guest_lock(kvm, gpa, &val, ite_esz);
+ return vgic_write_guest_lock(kvm, gpa, &val, ite_esz);
}
/**
@@ -2339,7 +2339,7 @@ static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
(itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
(dev->num_eventid_bits - 1));
val = cpu_to_le64(val);
- return kvm_write_guest_lock(kvm, ptr, &val, dte_esz);
+ return vgic_write_guest_lock(kvm, ptr, &val, dte_esz);
}
/**
@@ -2526,7 +2526,7 @@ static int vgic_its_save_cte(struct vgic_its *its,
((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
collection->collection_id);
val = cpu_to_le64(val);
- return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz);
+ return vgic_write_guest_lock(its->dev->kvm, gpa, &val, esz);
}
/*
@@ -2607,7 +2607,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
*/
val = 0;
BUG_ON(cte_esz > sizeof(val));
- ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
+ ret = vgic_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
return ret;
}
@@ -2775,6 +2775,23 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
return ret;
}
+/*
+ * kvm_arch_allow_write_without_running_vcpu - allow writing guest memory
+ * without the running VCPU when dirty ring is enabled.
+ *
+ * The running VCPU is required to track dirty guest pages when dirty ring
+ * is enabled. Otherwise, the backup bitmap should be used to track the
+ * dirty guest pages. When vgic/its tables are being saved, the backup
+ * bitmap is used to track the dirty guest pages due to the missed running
+ * VCPU in the period.
+ */
+bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+
+ return dist->table_write_in_progress;
+}
+
static int vgic_its_set_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index 826ff6f2a4e7..684bdfaad4a9 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -339,7 +339,7 @@ retry:
if (status) {
/* clear consumed data */
val &= ~(1 << bit_nr);
- ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
+ ret = vgic_write_guest_lock(kvm, ptr, &val, 1);
if (ret)
return ret;
}
@@ -350,26 +350,23 @@ retry:
* The deactivation of the doorbell interrupt will trigger the
* unmapping of the associated vPE.
*/
-static void unmap_all_vpes(struct vgic_dist *dist)
+static void unmap_all_vpes(struct kvm *kvm)
{
- struct irq_desc *desc;
+ struct vgic_dist *dist = &kvm->arch.vgic;
int i;
- for (i = 0; i < dist->its_vm.nr_vpes; i++) {
- desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
- irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
- }
+ for (i = 0; i < dist->its_vm.nr_vpes; i++)
+ free_irq(dist->its_vm.vpes[i]->irq, kvm_get_vcpu(kvm, i));
}
-static void map_all_vpes(struct vgic_dist *dist)
+static void map_all_vpes(struct kvm *kvm)
{
- struct irq_desc *desc;
+ struct vgic_dist *dist = &kvm->arch.vgic;
int i;
- for (i = 0; i < dist->its_vm.nr_vpes; i++) {
- desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
- irq_domain_activate_irq(irq_desc_get_irq_data(desc), false);
- }
+ for (i = 0; i < dist->its_vm.nr_vpes; i++)
+ WARN_ON(vgic_v4_request_vpe_irq(kvm_get_vcpu(kvm, i),
+ dist->its_vm.vpes[i]->irq));
}
/**
@@ -394,7 +391,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
* and enabling of the doorbells have already been done.
*/
if (kvm_vgic_global_state.has_gicv4_1) {
- unmap_all_vpes(dist);
+ unmap_all_vpes(kvm);
vlpi_avail = true;
}
@@ -437,14 +434,14 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
else
val &= ~(1 << bit_nr);
- ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
+ ret = vgic_write_guest_lock(kvm, ptr, &val, 1);
if (ret)
goto out;
}
out:
if (vlpi_avail)
- map_all_vpes(dist);
+ map_all_vpes(kvm);
return ret;
}
@@ -616,6 +613,8 @@ static const struct midr_range broken_seis[] = {
MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_PRO),
MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_MAX),
MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE),
{},
};
diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c
index ad06ba6c9b00..a413718be92b 100644
--- a/arch/arm64/kvm/vgic/vgic-v4.c
+++ b/arch/arm64/kvm/vgic/vgic-v4.c
@@ -222,6 +222,11 @@ void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val)
*val = !!(*ptr & mask);
}
+int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq)
+{
+ return request_irq(irq, vgic_v4_doorbell_handler, 0, "vcpu", vcpu);
+}
+
/**
* vgic_v4_init - Initialize the GICv4 data structures
* @kvm: Pointer to the VM being initialized
@@ -283,8 +288,7 @@ int vgic_v4_init(struct kvm *kvm)
irq_flags &= ~IRQ_NOAUTOEN;
irq_set_status_flags(irq, irq_flags);
- ret = request_irq(irq, vgic_v4_doorbell_handler,
- 0, "vcpu", vcpu);
+ ret = vgic_v4_request_vpe_irq(vcpu, irq);
if (ret) {
kvm_err("failed to allocate vcpu IRQ%d\n", irq);
/*
diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
index 0c8da72953f0..7f7f3c5ed85a 100644
--- a/arch/arm64/kvm/vgic/vgic.h
+++ b/arch/arm64/kvm/vgic/vgic.h
@@ -6,6 +6,7 @@
#define __KVM_ARM_VGIC_NEW_H__
#include <linux/irqchip/arm-gic-common.h>
+#include <asm/kvm_mmu.h>
#define PRODUCT_ID_KVM 0x4b /* ASCII code K */
#define IMPLEMENTER_ARM 0x43b
@@ -131,6 +132,19 @@ static inline bool vgic_irq_is_multi_sgi(struct vgic_irq *irq)
return vgic_irq_get_lr_count(irq) > 1;
}
+static inline int vgic_write_guest_lock(struct kvm *kvm, gpa_t gpa,
+ const void *data, unsigned long len)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ int ret;
+
+ dist->table_write_in_progress = true;
+ ret = kvm_write_guest_lock(kvm, gpa, data, len);
+ dist->table_write_in_progress = false;
+
+ return ret;
+}
+
/*
* This struct provides an intermediate representation of the fields contained
* in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC
@@ -331,5 +345,6 @@ int vgic_v4_init(struct kvm *kvm);
void vgic_v4_teardown(struct kvm *kvm);
void vgic_v4_configure_vsgis(struct kvm *kvm);
void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val);
+int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq);
#endif
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
index 24913271e898..8dd5a8fe64b4 100644
--- a/arch/arm64/mm/copypage.c
+++ b/arch/arm64/mm/copypage.c
@@ -21,9 +21,12 @@ void copy_highpage(struct page *to, struct page *from)
copy_page(kto, kfrom);
- if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) {
- set_bit(PG_mte_tagged, &to->flags);
+ if (system_supports_mte() && page_mte_tagged(from)) {
+ page_kasan_tag_reset(to);
+ /* It's a new page, shouldn't have been tagged yet */
+ WARN_ON_ONCE(!try_page_mte_tagging(to));
mte_copy_page_tags(kto, kfrom);
+ set_page_mte_tagged(to);
}
}
EXPORT_SYMBOL(copy_highpage);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 74f76514a48d..596f46dabe4e 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -30,6 +30,7 @@
#include <asm/bug.h>
#include <asm/cmpxchg.h>
#include <asm/cpufeature.h>
+#include <asm/efi.h>
#include <asm/exception.h>
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
@@ -397,6 +398,9 @@ static void __do_kernel_fault(unsigned long addr, unsigned long esr,
msg = "paging request";
}
+ if (efi_runtime_fixup_exception(regs, msg))
+ return;
+
die_kernel_fault(msg, addr, esr, regs);
}
@@ -939,6 +943,8 @@ struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
void tag_clear_highpage(struct page *page)
{
+ /* Newly allocated page, shouldn't have been tagged yet */
+ WARN_ON_ONCE(!try_page_mte_tagging(page));
mte_zero_clear_page_tags(page_address(page));
- set_bit(PG_mte_tagged, &page->flags);
+ set_page_mte_tagged(page);
}
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index cd8d96e1fa1a..95364e8bdc19 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -562,7 +562,7 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
- if (IS_ENABLED(CONFIG_ARM64_WORKAROUND_2645198) &&
+ if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) &&
cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
/*
* Break-before-make (BBM) is required for all user space mappings
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 2368e4daa23d..d77c9f56b7b4 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -814,53 +814,6 @@ void __init paging_init(void)
create_idmap();
}
-/*
- * Check whether a kernel address is valid (derived from arch/x86/).
- */
-int kern_addr_valid(unsigned long addr)
-{
- pgd_t *pgdp;
- p4d_t *p4dp;
- pud_t *pudp, pud;
- pmd_t *pmdp, pmd;
- pte_t *ptep, pte;
-
- addr = arch_kasan_reset_tag(addr);
- if ((((long)addr) >> VA_BITS) != -1UL)
- return 0;
-
- pgdp = pgd_offset_k(addr);
- if (pgd_none(READ_ONCE(*pgdp)))
- return 0;
-
- p4dp = p4d_offset(pgdp, addr);
- if (p4d_none(READ_ONCE(*p4dp)))
- return 0;
-
- pudp = pud_offset(p4dp, addr);
- pud = READ_ONCE(*pudp);
- if (pud_none(pud))
- return 0;
-
- if (pud_sect(pud))
- return pfn_valid(pud_pfn(pud));
-
- pmdp = pmd_offset(pudp, addr);
- pmd = READ_ONCE(*pmdp);
- if (pmd_none(pmd))
- return 0;
-
- if (pmd_sect(pmd))
- return pfn_valid(pmd_pfn(pmd));
-
- ptep = pte_offset_kernel(pmdp, addr);
- pte = READ_ONCE(*ptep);
- if (pte_none(pte))
- return 0;
-
- return pfn_valid(pte_pfn(pte));
-}
-
#ifdef CONFIG_MEMORY_HOTPLUG
static void free_hotplug_page_range(struct page *page, size_t size,
struct vmem_altmap *altmap)
@@ -1184,53 +1137,28 @@ static void free_empty_tables(unsigned long addr, unsigned long end,
}
#endif
+void __meminit vmemmap_set_pmd(pmd_t *pmdp, void *p, int node,
+ unsigned long addr, unsigned long next)
+{
+ pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL));
+}
+
+int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
+ unsigned long addr, unsigned long next)
+{
+ vmemmap_verify((pte_t *)pmdp, node, addr, next);
+ return 1;
+}
+
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap)
{
- unsigned long addr = start;
- unsigned long next;
- pgd_t *pgdp;
- p4d_t *p4dp;
- pud_t *pudp;
- pmd_t *pmdp;
-
WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES))
return vmemmap_populate_basepages(start, end, node, altmap);
-
- do {
- next = pmd_addr_end(addr, end);
-
- pgdp = vmemmap_pgd_populate(addr, node);
- if (!pgdp)
- return -ENOMEM;
-
- p4dp = vmemmap_p4d_populate(pgdp, addr, node);
- if (!p4dp)
- return -ENOMEM;
-
- pudp = vmemmap_pud_populate(p4dp, addr, node);
- if (!pudp)
- return -ENOMEM;
-
- pmdp = pmd_offset(pudp, addr);
- if (pmd_none(READ_ONCE(*pmdp))) {
- void *p = NULL;
-
- p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
- if (!p) {
- if (vmemmap_populate_basepages(addr, next, node, altmap))
- return -ENOMEM;
- continue;
- }
-
- pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL));
- } else
- vmemmap_verify((pte_t *)pmdp, node, addr, next);
- } while (addr = next, addr != end);
-
- return 0;
+ else
+ return vmemmap_populate_hugepages(start, end, node, altmap);
}
#ifdef CONFIG_MEMORY_HOTPLUG
@@ -1705,7 +1633,7 @@ early_initcall(prevent_bootmem_remove_init);
pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
- if (IS_ENABLED(CONFIG_ARM64_WORKAROUND_2645198) &&
+ if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) &&
cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
/*
* Break-before-make (BBM) is required for all user space mappings
diff --git a/arch/arm64/mm/mteswap.c b/arch/arm64/mm/mteswap.c
index bed803d8e158..cd508ba80ab1 100644
--- a/arch/arm64/mm/mteswap.c
+++ b/arch/arm64/mm/mteswap.c
@@ -24,7 +24,7 @@ int mte_save_tags(struct page *page)
{
void *tag_storage, *ret;
- if (!test_bit(PG_mte_tagged, &page->flags))
+ if (!page_mte_tagged(page))
return 0;
tag_storage = mte_allocate_tag_storage();
@@ -46,21 +46,17 @@ int mte_save_tags(struct page *page)
return 0;
}
-bool mte_restore_tags(swp_entry_t entry, struct page *page)
+void mte_restore_tags(swp_entry_t entry, struct page *page)
{
void *tags = xa_load(&mte_pages, entry.val);
if (!tags)
- return false;
+ return;
- /*
- * Test PG_mte_tagged again in case it was racing with another
- * set_pte_at().
- */
- if (!test_and_set_bit(PG_mte_tagged, &page->flags))
+ if (try_page_mte_tagging(page)) {
mte_restore_page_tags(page_address(page), tags);
-
- return true;
+ set_page_mte_tagged(page);
+ }
}
void mte_invalidate_tags(int type, pgoff_t offset)
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 5922178d7a06..79dd201c59d8 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -202,8 +202,7 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
/*
* This function is used to determine if a linear map page has been marked as
- * not-valid. Walk the page table and check the PTE_VALID bit. This is based
- * on kern_addr_valid(), which almost does what we need.
+ * not-valid. Walk the page table and check the PTE_VALID bit.
*
* Because this is only called on the kernel linear map, p?d_sect() implies
* p?d_present(). When debug_pagealloc is enabled, sections mappings are
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 30f76178608b..62f805f427b7 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -1649,13 +1649,8 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
struct bpf_prog *p = l->link.prog;
int cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
- if (p->aux->sleepable) {
- enter_prog = (u64)__bpf_prog_enter_sleepable;
- exit_prog = (u64)__bpf_prog_exit_sleepable;
- } else {
- enter_prog = (u64)__bpf_prog_enter;
- exit_prog = (u64)__bpf_prog_exit;
- }
+ enter_prog = (u64)bpf_trampoline_enter(p);
+ exit_prog = (u64)bpf_trampoline_exit(p);
if (l->cookie == 0) {
/* if cookie is zero, one instruction is enough to store it */