diff options
Diffstat (limited to 'arch/arm64')
38 files changed, 359 insertions, 185 deletions
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 55bc8546d9c7..130569f90c54 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -82,8 +82,8 @@ endif # compiler to generate them and consequently to break the single image contract # we pass it only to the assembler. This option is utilized only in case of non # integrated assemblers. -ifneq ($(CONFIG_AS_HAS_ARMV8_4), y) -branch-prot-flags-$(CONFIG_AS_HAS_PAC) += -Wa,-march=armv8.3-a +ifeq ($(CONFIG_AS_HAS_PAC), y) +asm-arch := armv8.3-a endif endif @@ -91,7 +91,12 @@ KBUILD_CFLAGS += $(branch-prot-flags-y) ifeq ($(CONFIG_AS_HAS_ARMV8_4), y) # make sure to pass the newest target architecture to -march. -KBUILD_CFLAGS += -Wa,-march=armv8.4-a +asm-arch := armv8.4-a +endif + +ifdef asm-arch +KBUILD_CFLAGS += -Wa,-march=$(asm-arch) \ + -DARM64_ASM_ARCH='"$(asm-arch)"' endif ifeq ($(CONFIG_SHADOW_CALL_STACK), y) @@ -165,6 +170,8 @@ zinstall install: PHONY += vdso_install vdso_install: $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@ + $(if $(CONFIG_COMPAT_VDSO), \ + $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@) # We use MRPROPER_FILES and CLEAN_FILES now archclean: diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi index 9edfae5944f7..24ef18fe77df 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi @@ -417,10 +417,10 @@ ti,intr-trigger-type = <1>; interrupt-controller; interrupt-parent = <&gic500>; - #interrupt-cells = <2>; + #interrupt-cells = <1>; ti,sci = <&dmsc>; - ti,sci-dst-id = <56>; - ti,sci-rm-range-girq = <0x1>; + ti,sci-dev-id = <100>; + ti,interrupt-ranges = <0 392 32>; }; main_navss { @@ -438,10 +438,11 @@ ti,intr-trigger-type = <4>; interrupt-controller; interrupt-parent = <&gic500>; - #interrupt-cells = <2>; + #interrupt-cells = <1>; ti,sci = <&dmsc>; - ti,sci-dst-id = <56>; - ti,sci-rm-range-girq = <0x0>, <0x2>; + ti,sci-dev-id = <182>; + ti,interrupt-ranges = <0 64 64>, + <64 448 64>; }; inta_main_udmass: interrupt-controller@33d00000 { @@ -452,8 +453,7 @@ msi-controller; ti,sci = <&dmsc>; ti,sci-dev-id = <179>; - ti,sci-rm-range-vint = <0x0>; - ti,sci-rm-range-global-event = <0x1>; + ti,interrupt-ranges = <0 0 256>; }; secure_proxy_main: mailbox@32c00000 { @@ -589,7 +589,7 @@ <0x0 0x33000000 0x0 0x40000>; reg-names = "rt", "fifos", "proxy_gcfg", "proxy_target"; ti,num-rings = <818>; - ti,sci-rm-range-gp-rings = <0x2>; /* GP ring range */ + ti,sci-rm-range-gp-rings = <0x1>; /* GP ring range */ ti,dma-ring-reset-quirk; ti,sci = <&dmsc>; ti,sci-dev-id = <187>; @@ -609,11 +609,11 @@ ti,sci-dev-id = <188>; ti,ringacc = <&ringacc>; - ti,sci-rm-range-tchan = <0x1>, /* TX_HCHAN */ - <0x2>; /* TX_CHAN */ - ti,sci-rm-range-rchan = <0x4>, /* RX_HCHAN */ - <0x5>; /* RX_CHAN */ - ti,sci-rm-range-rflow = <0x6>; /* GP RFLOW */ + ti,sci-rm-range-tchan = <0xf>, /* TX_HCHAN */ + <0xd>; /* TX_CHAN */ + ti,sci-rm-range-rchan = <0xb>, /* RX_HCHAN */ + <0xa>; /* RX_CHAN */ + ti,sci-rm-range-rflow = <0x0>; /* GP RFLOW */ }; cpts@310d0000 { @@ -622,7 +622,7 @@ reg-names = "cpts"; clocks = <&main_cpts_mux>; clock-names = "cpts"; - interrupts-extended = <&intr_main_navss 163 0>; + interrupts-extended = <&intr_main_navss 391>; interrupt-names = "cpts"; ti,cpts-periodic-outputs = <6>; ti,cpts-ext-ts-inputs = <8>; @@ -645,8 +645,7 @@ gpio-controller; #gpio-cells = <2>; interrupt-parent = <&intr_main_gpio>; - interrupts = <57 256>, <57 257>, <57 258>, <57 259>, <57 260>, - <57 261>; + interrupts = <192>, <193>, <194>, <195>, <196>, <197>; interrupt-controller; #interrupt-cells = <2>; ti,ngpio = <96>; @@ -661,8 +660,7 @@ gpio-controller; #gpio-cells = <2>; interrupt-parent = <&intr_main_gpio>; - interrupts = <58 256>, <58 257>, <58 258>, <58 259>, <58 260>, - <58 261>; + interrupts = <200>, <201>, <202>, <203>, <204>, <205>; interrupt-controller; #interrupt-cells = <2>; ti,ngpio = <90>; diff --git a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi index 8c1abcfe0860..51ca4b4d4c21 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi @@ -134,7 +134,7 @@ <0x0 0x2a500000 0x0 0x40000>; reg-names = "rt", "fifos", "proxy_gcfg", "proxy_target"; ti,num-rings = <286>; - ti,sci-rm-range-gp-rings = <0x2>; /* GP ring range */ + ti,sci-rm-range-gp-rings = <0x1>; /* GP ring range */ ti,dma-ring-reset-quirk; ti,sci = <&dmsc>; ti,sci-dev-id = <195>; @@ -154,11 +154,11 @@ ti,sci-dev-id = <194>; ti,ringacc = <&mcu_ringacc>; - ti,sci-rm-range-tchan = <0x1>, /* TX_HCHAN */ - <0x2>; /* TX_CHAN */ - ti,sci-rm-range-rchan = <0x3>, /* RX_HCHAN */ - <0x4>; /* RX_CHAN */ - ti,sci-rm-range-rflow = <0x5>; /* GP RFLOW */ + ti,sci-rm-range-tchan = <0xf>, /* TX_HCHAN */ + <0xd>; /* TX_CHAN */ + ti,sci-rm-range-rchan = <0xb>, /* RX_HCHAN */ + <0xa>; /* RX_CHAN */ + ti,sci-rm-range-rflow = <0x0>; /* GP RFLOW */ }; }; diff --git a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi index 5f55b9e82cf1..a1ffe88d9664 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi @@ -74,10 +74,10 @@ ti,intr-trigger-type = <1>; interrupt-controller; interrupt-parent = <&gic500>; - #interrupt-cells = <2>; + #interrupt-cells = <1>; ti,sci = <&dmsc>; - ti,sci-dst-id = <56>; - ti,sci-rm-range-girq = <0x4>; + ti,sci-dev-id = <156>; + ti,interrupt-ranges = <0 712 16>; }; wkup_gpio0: wkup_gpio0@42110000 { @@ -86,7 +86,7 @@ gpio-controller; #gpio-cells = <2>; interrupt-parent = <&intr_wkup_gpio>; - interrupts = <59 128>, <59 129>, <59 130>, <59 131>; + interrupts = <60>, <61>, <62>, <63>; interrupt-controller; #interrupt-cells = <2>; ti,ngpio = <56>; diff --git a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts index 611e66207010..b8a8a0fcb8af 100644 --- a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts +++ b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts @@ -384,7 +384,7 @@ }; &mailbox0_cluster0 { - interrupts = <164 0>; + interrupts = <436>; mbox_mcu_r5fss0_core0: mbox-mcu-r5fss0-core0 { ti,mbox-tx = <1 0 0>; @@ -393,7 +393,7 @@ }; &mailbox0_cluster1 { - interrupts = <165 0>; + interrupts = <432>; mbox_mcu_r5fss0_core1: mbox-mcu-r5fss0-core1 { ti,mbox-tx = <1 0 0>; diff --git a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts index 8bc1e6ecc50e..e8fc01d97ada 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts +++ b/arch/arm64/boot/dts/ti/k3-j721e-common-proc-board.dts @@ -287,7 +287,7 @@ }; &mailbox0_cluster0 { - interrupts = <214 0>; + interrupts = <436>; mbox_mcu_r5fss0_core0: mbox-mcu-r5fss0-core0 { ti,mbox-rx = <0 0 0>; @@ -301,7 +301,7 @@ }; &mailbox0_cluster1 { - interrupts = <215 0>; + interrupts = <432>; mbox_main_r5fss0_core0: mbox-main-r5fss0-core0 { ti,mbox-rx = <0 0 0>; @@ -315,7 +315,7 @@ }; &mailbox0_cluster2 { - interrupts = <216 0>; + interrupts = <428>; mbox_main_r5fss1_core0: mbox-main-r5fss1-core0 { ti,mbox-rx = <0 0 0>; @@ -329,7 +329,7 @@ }; &mailbox0_cluster3 { - interrupts = <217 0>; + interrupts = <424>; mbox_c66_0: mbox-c66-0 { ti,mbox-rx = <0 0 0>; @@ -343,7 +343,7 @@ }; &mailbox0_cluster4 { - interrupts = <218 0>; + interrupts = <420>; mbox_c71_0: mbox-c71-0 { ti,mbox-rx = <0 0 0>; diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi index d14060207f00..12ceea9b3c9a 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi @@ -80,10 +80,10 @@ ti,intr-trigger-type = <1>; interrupt-controller; interrupt-parent = <&gic500>; - #interrupt-cells = <2>; + #interrupt-cells = <1>; ti,sci = <&dmsc>; - ti,sci-dst-id = <14>; - ti,sci-rm-range-girq = <0x1>; + ti,sci-dev-id = <131>; + ti,interrupt-ranges = <8 392 56>; }; main_navss { @@ -101,10 +101,12 @@ ti,intr-trigger-type = <4>; interrupt-controller; interrupt-parent = <&gic500>; - #interrupt-cells = <2>; + #interrupt-cells = <1>; ti,sci = <&dmsc>; - ti,sci-dst-id = <14>; - ti,sci-rm-range-girq = <0>, <2>; + ti,sci-dev-id = <213>; + ti,interrupt-ranges = <0 64 64>, + <64 448 64>, + <128 672 64>; }; main_udmass_inta: interrupt-controller@33d00000 { @@ -115,8 +117,7 @@ msi-controller; ti,sci = <&dmsc>; ti,sci-dev-id = <209>; - ti,sci-rm-range-vint = <0xa>; - ti,sci-rm-range-global-event = <0xd>; + ti,interrupt-ranges = <0 0 256>; }; secure_proxy_main: mailbox@32c00000 { @@ -296,7 +297,7 @@ reg-names = "cpts"; clocks = <&k3_clks 201 1>; clock-names = "cpts"; - interrupts-extended = <&main_navss_intr 201 0>; + interrupts-extended = <&main_navss_intr 391>; interrupt-names = "cpts"; ti,cpts-periodic-outputs = <6>; ti,cpts-ext-ts-inputs = <8>; @@ -688,8 +689,8 @@ gpio-controller; #gpio-cells = <2>; interrupt-parent = <&main_gpio_intr>; - interrupts = <105 0>, <105 1>, <105 2>, <105 3>, - <105 4>, <105 5>, <105 6>, <105 7>; + interrupts = <256>, <257>, <258>, <259>, + <260>, <261>, <262>, <263>; interrupt-controller; #interrupt-cells = <2>; ti,ngpio = <128>; @@ -705,7 +706,7 @@ gpio-controller; #gpio-cells = <2>; interrupt-parent = <&main_gpio_intr>; - interrupts = <106 0>, <106 1>, <106 2>; + interrupts = <288>, <289>, <290>; interrupt-controller; #interrupt-cells = <2>; ti,ngpio = <36>; @@ -721,8 +722,8 @@ gpio-controller; #gpio-cells = <2>; interrupt-parent = <&main_gpio_intr>; - interrupts = <107 0>, <107 1>, <107 2>, <107 3>, - <107 4>, <107 5>, <107 6>, <107 7>; + interrupts = <264>, <265>, <266>, <267>, + <268>, <269>, <270>, <271>; interrupt-controller; #interrupt-cells = <2>; ti,ngpio = <128>; @@ -738,7 +739,7 @@ gpio-controller; #gpio-cells = <2>; interrupt-parent = <&main_gpio_intr>; - interrupts = <108 0>, <108 1>, <108 2>; + interrupts = <292>, <293>, <294>; interrupt-controller; #interrupt-cells = <2>; ti,ngpio = <36>; @@ -754,8 +755,8 @@ gpio-controller; #gpio-cells = <2>; interrupt-parent = <&main_gpio_intr>; - interrupts = <109 0>, <109 1>, <109 2>, <109 3>, - <109 4>, <109 5>, <109 6>, <109 7>; + interrupts = <272>, <273>, <274>, <275>, + <276>, <277>, <278>, <279>; interrupt-controller; #interrupt-cells = <2>; ti,ngpio = <128>; @@ -771,7 +772,7 @@ gpio-controller; #gpio-cells = <2>; interrupt-parent = <&main_gpio_intr>; - interrupts = <110 0>, <110 1>, <110 2>; + interrupts = <296>, <297>, <298>; interrupt-controller; #interrupt-cells = <2>; ti,ngpio = <36>; @@ -787,8 +788,8 @@ gpio-controller; #gpio-cells = <2>; interrupt-parent = <&main_gpio_intr>; - interrupts = <111 0>, <111 1>, <111 2>, <111 3>, - <111 4>, <111 5>, <111 6>, <111 7>; + interrupts = <280>, <281>, <282>, <283>, + <284>, <285>, <286>, <287>; interrupt-controller; #interrupt-cells = <2>; ti,ngpio = <128>; @@ -804,7 +805,7 @@ gpio-controller; #gpio-cells = <2>; interrupt-parent = <&main_gpio_intr>; - interrupts = <112 0>, <112 1>, <112 2>; + interrupts = <300>, <301>, <302>; interrupt-controller; #interrupt-cells = <2>; ti,ngpio = <36>; diff --git a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi index 30a735bcd0c8..c4a48e8d420a 100644 --- a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi @@ -101,10 +101,10 @@ ti,intr-trigger-type = <1>; interrupt-controller; interrupt-parent = <&gic500>; - #interrupt-cells = <2>; + #interrupt-cells = <1>; ti,sci = <&dmsc>; - ti,sci-dst-id = <14>; - ti,sci-rm-range-girq = <0x5>; + ti,sci-dev-id = <137>; + ti,interrupt-ranges = <16 960 16>; }; wkup_gpio0: gpio@42110000 { @@ -113,8 +113,7 @@ gpio-controller; #gpio-cells = <2>; interrupt-parent = <&wkup_gpio_intr>; - interrupts = <113 0>, <113 1>, <113 2>, - <113 3>, <113 4>, <113 5>; + interrupts = <103>, <104>, <105>, <106>, <107>, <108>; interrupt-controller; #interrupt-cells = <2>; ti,ngpio = <84>; @@ -130,8 +129,7 @@ gpio-controller; #gpio-cells = <2>; interrupt-parent = <&wkup_gpio_intr>; - interrupts = <114 0>, <114 1>, <114 2>, - <114 3>, <114 4>, <114 5>; + interrupts = <112>, <113>, <114>, <115>, <116>, <117>; interrupt-controller; #interrupt-cells = <2>; ti,ngpio = <84>; diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 9f39c8049a84..22b02fc47138 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -211,6 +211,7 @@ CONFIG_HOTPLUG_PCI_ACPI=y CONFIG_PCI_AARDVARK=y CONFIG_PCI_TEGRA=y CONFIG_PCIE_RCAR_HOST=y +CONFIG_PCIE_RCAR_EP=y CONFIG_PCI_HOST_GENERIC=y CONFIG_PCI_XGENE=y CONFIG_PCIE_ALTERA=y @@ -227,6 +228,9 @@ CONFIG_PCIE_ARMADA_8K=y CONFIG_PCIE_KIRIN=y CONFIG_PCIE_HISI_STB=y CONFIG_PCIE_TEGRA194_HOST=m +CONFIG_PCI_ENDPOINT=y +CONFIG_PCI_ENDPOINT_CONFIGFS=y +CONFIG_PCI_EPF_TEST=m CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y CONFIG_FW_LOADER_USER_HELPER=y @@ -234,6 +238,7 @@ CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y CONFIG_HISILICON_LPC=y CONFIG_SIMPLE_PM_BUS=y CONFIG_FSL_MC_BUS=y +CONFIG_TEGRA_ACONNECT=m CONFIG_MTD=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y @@ -257,6 +262,7 @@ CONFIG_BLK_DEV_NBD=m CONFIG_VIRTIO_BLK=y CONFIG_BLK_DEV_NVME=m CONFIG_SRAM=y +CONFIG_PCI_ENDPOINT_TEST=m CONFIG_EEPROM_AT24=m CONFIG_EEPROM_AT25=m CONFIG_UACCE=m @@ -456,6 +462,7 @@ CONFIG_SPI_MESON_SPIFC=m CONFIG_SPI_ORION=y CONFIG_SPI_PL022=y CONFIG_SPI_ROCKCHIP=y +CONFIG_SPI_RPCIF=m CONFIG_SPI_QCOM_QSPI=m CONFIG_SPI_QUP=y CONFIG_SPI_QCOM_GENI=m @@ -687,6 +694,12 @@ CONFIG_SND_SOC_RK3399_GRU_SOUND=m CONFIG_SND_SOC_SAMSUNG=y CONFIG_SND_SOC_RCAR=m CONFIG_SND_SUN4I_SPDIF=m +CONFIG_SND_SOC_TEGRA=m +CONFIG_SND_SOC_TEGRA210_AHUB=m +CONFIG_SND_SOC_TEGRA210_DMIC=m +CONFIG_SND_SOC_TEGRA210_I2S=m +CONFIG_SND_SOC_TEGRA186_DSPK=m +CONFIG_SND_SOC_TEGRA210_ADMAIF=m CONFIG_SND_SOC_AK4613=m CONFIG_SND_SOC_ES7134=m CONFIG_SND_SOC_ES7241=m @@ -803,6 +816,7 @@ CONFIG_MV_XOR_V2=y CONFIG_OWL_DMA=y CONFIG_PL330_DMA=y CONFIG_TEGRA20_APB_DMA=y +CONFIG_TEGRA210_ADMA=m CONFIG_QCOM_BAM_DMA=y CONFIG_QCOM_HIDMA_MGMT=y CONFIG_QCOM_HIDMA=y @@ -920,6 +934,7 @@ CONFIG_ARCH_K3_J721E_SOC=y CONFIG_TI_SCI_PM_DOMAINS=y CONFIG_EXTCON_USB_GPIO=y CONFIG_EXTCON_USBC_CROS_EC=y +CONFIG_RENESAS_RPCIF=m CONFIG_IIO=y CONFIG_EXYNOS_ADC=y CONFIG_MAX9611=m diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h index 51a7ce87cdfe..6fb2e6bcc392 100644 --- a/arch/arm64/include/asm/compiler.h +++ b/arch/arm64/include/asm/compiler.h @@ -2,6 +2,12 @@ #ifndef __ASM_COMPILER_H #define __ASM_COMPILER_H +#ifdef ARM64_ASM_ARCH +#define ARM64_ASM_PREAMBLE ".arch " ARM64_ASM_ARCH "\n" +#else +#define ARM64_ASM_PREAMBLE +#endif + /* * The EL0/EL1 pointer bits used by a pointer authentication code. * This is dependent on TBI0/TBI1 being enabled, or bits 63:56 would also apply. diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index aa4b6521ef14..ff328e5bbb75 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -95,6 +95,11 @@ static inline int arch_irqs_disabled_flags(unsigned long flags) return res; } +static inline int arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} + static inline unsigned long arch_local_irq_save(void) { unsigned long flags; diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 51c1d9918999..1da8e3dc4455 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -71,11 +71,12 @@ * IMO: Override CPSR.I and enable signaling with VI * FMO: Override CPSR.F and enable signaling with VF * SWIO: Turn set/way invalidates into set/way clean+invalidate + * PTW: Take a stage2 fault if a stage1 walk steps in device memory */ #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \ HCR_BSU_IS | HCR_FB | HCR_TAC | \ HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \ - HCR_FMO | HCR_IMO) + HCR_FMO | HCR_IMO | HCR_PTW ) #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF) #define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK) #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index fb1a922b31ba..6f98fbd0ac81 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -169,6 +169,34 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ]; *__hyp_this_cpu_ptr(sym); \ }) +#define __KVM_EXTABLE(from, to) \ + " .pushsection __kvm_ex_table, \"a\"\n" \ + " .align 3\n" \ + " .long (" #from " - .), (" #to " - .)\n" \ + " .popsection\n" + + +#define __kvm_at(at_op, addr) \ +( { \ + int __kvm_at_err = 0; \ + u64 spsr, elr; \ + asm volatile( \ + " mrs %1, spsr_el2\n" \ + " mrs %2, elr_el2\n" \ + "1: at "at_op", %3\n" \ + " isb\n" \ + " b 9f\n" \ + "2: msr spsr_el2, %1\n" \ + " msr elr_el2, %2\n" \ + " mov %w0, %4\n" \ + "9:\n" \ + __KVM_EXTABLE(1b, 2b) \ + : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \ + : "r" (addr), "i" (-EFAULT)); \ + __kvm_at_err; \ +} ) + + #else /* __ASSEMBLY__ */ .macro hyp_adr_this_cpu reg, sym, tmp @@ -193,6 +221,21 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ]; ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] .endm +/* + * KVM extable for unexpected exceptions. + * In the same format _asm_extable, but output to a different section so that + * it can be mapped to EL2. The KVM version is not sorted. The caller must + * ensure: + * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented + * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup. + */ +.macro _kvm_extable, from, to + .pushsection __kvm_ex_table, "a" + .align 3 + .long (\from - .), (\to - .) + .popsection +.endm + #endif #endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 65568b23868a..e52c927aade5 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -473,7 +473,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, #define KVM_ARCH_WANT_MMU_NOTIFIER int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end); + unsigned long start, unsigned long end, unsigned flags); int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index d493174415db..cc3f5a33ff9c 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -28,14 +28,16 @@ * not. The macros handles invoking the asm with or without the * register argument as appropriate. */ -#define __TLBI_0(op, arg) asm ("tlbi " #op "\n" \ +#define __TLBI_0(op, arg) asm (ARM64_ASM_PREAMBLE \ + "tlbi " #op "\n" \ ALTERNATIVE("nop\n nop", \ "dsb ish\n tlbi " #op, \ ARM64_WORKAROUND_REPEAT_TLBI, \ CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \ : : ) -#define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n" \ +#define __TLBI_1(op, arg) asm (ARM64_ASM_PREAMBLE \ + "tlbi " #op ", %0\n" \ ALTERNATIVE("nop\n nop", \ "dsb ish\n tlbi " #op ", %0", \ ARM64_WORKAROUND_REPEAT_TLBI, \ diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index 455966401102..a85174d05473 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -322,7 +322,7 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) */ if (memblock_is_map_memory(phys)) return (void __iomem *)__phys_to_virt(phys); - /* fall through */ + fallthrough; default: if (region->attribute & EFI_MEMORY_WB) diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 6bd1d3ad037a..c332d49780dc 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -910,6 +910,8 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .desc = "ARM erratum 1418040", .capability = ARM64_WORKAROUND_1418040, ERRATA_MIDR_RANGE_LIST(erratum_1418040_list), + .type = (ARM64_CPUCAP_SCOPE_LOCAL_CPU | + ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU), }, #endif #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index a389b999482e..6424584be01e 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -686,7 +686,7 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, case FTR_HIGHER_OR_ZERO_SAFE: if (!cur || !new) break; - /* Fallthrough */ + fallthrough; case FTR_HIGHER_SAFE: ret = new > cur ? new : cur; break; diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 393c6fb1f1cb..d0076c2159e6 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -327,7 +327,6 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info) set_bit(ICACHEF_VPIPT, &__icache_flags); break; default: - /* Fallthrough */ case ICACHE_POLICY_VIPT: /* Assume aliasing */ set_bit(ICACHEF_ALIASING, &__icache_flags); diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 2646178c8329..55af8b504b65 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -170,19 +170,6 @@ alternative_cb_end stp x28, x29, [sp, #16 * 14] .if \el == 0 - .if \regsize == 32 - /* - * If we're returning from a 32-bit task on a system affected by - * 1418040 then re-enable userspace access to the virtual counter. - */ -#ifdef CONFIG_ARM64_ERRATUM_1418040 -alternative_if ARM64_WORKAROUND_1418040 - mrs x0, cntkctl_el1 - orr x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN - msr cntkctl_el1, x0 -alternative_else_nop_endif -#endif - .endif clear_gp_regs mrs x21, sp_el0 ldr_this_cpu tsk, __entry_task, x20 @@ -294,14 +281,6 @@ alternative_else_nop_endif tst x22, #PSR_MODE32_BIT // native task? b.eq 3f -#ifdef CONFIG_ARM64_ERRATUM_1418040 -alternative_if ARM64_WORKAROUND_1418040 - mrs x0, cntkctl_el1 - bic x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN - msr cntkctl_el1, x0 -alternative_else_nop_endif -#endif - #ifdef CONFIG_ARM64_ERRATUM_845719 alternative_if ARM64_WORKAROUND_845719 #ifdef CONFIG_PID_IN_CONTEXTIDR diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index af234a1e08b7..712e97c03e54 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -257,7 +257,7 @@ static int hw_breakpoint_control(struct perf_event *bp, * level. */ enable_debug_monitors(dbg_el); - /* Fall through */ + fallthrough; case HW_BREAKPOINT_RESTORE: /* Setup the address register. */ write_wb_reg(val_reg, i, info->address); @@ -541,13 +541,13 @@ int hw_breakpoint_arch_parse(struct perf_event *bp, if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2) break; - /* Fallthrough */ + fallthrough; case 3: /* Allow single byte watchpoint. */ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1) break; - /* Fallthrough */ + fallthrough; default: return -EINVAL; } diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index 9e897c500237..8982b68289b7 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -103,6 +103,10 @@ KVM_NVHE_ALIAS(vgic_v3_cpuif_trap); KVM_NVHE_ALIAS(gic_pmr_sync); #endif +/* EL2 exception handling */ +KVM_NVHE_ALIAS(__start___kvm_ex_table); +KVM_NVHE_ALIAS(__stop___kvm_ex_table); + #endif /* CONFIG_KVM */ #endif /* __ARM64_KERNEL_IMAGE_VARS_H */ diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 1cd1a4d0ed30..2a1ad95d9b2c 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -315,21 +315,21 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, /* MOVW instruction relocations. */ case R_AARCH64_MOVW_UABS_G0_NC: overflow_check = false; - /* Fall through */ + fallthrough; case R_AARCH64_MOVW_UABS_G0: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, AARCH64_INSN_IMM_MOVKZ); break; case R_AARCH64_MOVW_UABS_G1_NC: overflow_check = false; - /* Fall through */ + fallthrough; case R_AARCH64_MOVW_UABS_G1: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, AARCH64_INSN_IMM_MOVKZ); break; case R_AARCH64_MOVW_UABS_G2_NC: overflow_check = false; - /* Fall through */ + fallthrough; case R_AARCH64_MOVW_UABS_G2: ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, AARCH64_INSN_IMM_MOVKZ); @@ -397,7 +397,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, break; case R_AARCH64_ADR_PREL_PG_HI21_NC: overflow_check = false; - /* Fall through */ + fallthrough; case R_AARCH64_ADR_PREL_PG_HI21: ovf = reloc_insn_adrp(me, sechdrs, loc, val); if (ovf && ovf != -ERANGE) diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 84ec630b8ab5..f1804496b935 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -123,10 +123,8 @@ void arch_cpu_idle(void) * This should do all the clock switching and wait for interrupt * tricks */ - trace_cpu_idle_rcuidle(1, smp_processor_id()); cpu_do_idle(); local_irq_enable(); - trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); } #ifdef CONFIG_HOTPLUG_CPU @@ -516,6 +514,39 @@ static void entry_task_switch(struct task_struct *next) } /* + * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT. + * Assuming the virtual counter is enabled at the beginning of times: + * + * - disable access when switching from a 64bit task to a 32bit task + * - enable access when switching from a 32bit task to a 64bit task + */ +static void erratum_1418040_thread_switch(struct task_struct *prev, + struct task_struct *next) +{ + bool prev32, next32; + u64 val; + + if (!(IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) && + cpus_have_const_cap(ARM64_WORKAROUND_1418040))) + return; + + prev32 = is_compat_thread(task_thread_info(prev)); + next32 = is_compat_thread(task_thread_info(next)); + + if (prev32 == next32) + return; + + val = read_sysreg(cntkctl_el1); + + if (!next32) + val |= ARCH_TIMER_USR_VCT_ACCESS_EN; + else + val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN; + + write_sysreg(val, cntkctl_el1); +} + +/* * Thread switching. */ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, @@ -530,6 +561,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, entry_task_switch(next); uao_thread_switch(next); ssbs_thread_switch(next); + erratum_1418040_thread_switch(prev, next); /* * Complete any pending TLB or cache maintenance on this CPU in case diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 03957a1ae6c0..355ee9eed4dd 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -151,7 +151,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) break; } pr_crit("CPU%u: may not have shut down cleanly\n", cpu); - /* Fall through */ + fallthrough; case CPU_STUCK_IN_KERNEL: pr_crit("CPU%u: is stuck in kernel\n", cpu); if (status & CPU_STUCK_REASON_52_BIT_VA) diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index 5139a5f19256..d6adb4677c25 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -208,7 +208,7 @@ quiet_cmd_vdsosym = VDSOSYM $@ cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ # Install commands for the unstripped file -quiet_cmd_vdso_install = INSTALL $@ +quiet_cmd_vdso_install = INSTALL32 $@ cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/vdso32.so vdso.so: $(obj)/vdso.so.dbg diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index ec8e894684a7..7cba7623fcec 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -20,6 +20,13 @@ ENTRY(_text) jiffies = jiffies_64; + +#define HYPERVISOR_EXTABLE \ + . = ALIGN(SZ_8); \ + __start___kvm_ex_table = .; \ + *(__kvm_ex_table) \ + __stop___kvm_ex_table = .; + #define HYPERVISOR_TEXT \ /* \ * Align to 4 KB so that \ @@ -35,6 +42,7 @@ jiffies = jiffies_64; __hyp_idmap_text_end = .; \ __hyp_text_start = .; \ *(.hyp.text) \ + HYPERVISOR_EXTABLE \ __hyp_text_end = .; #define IDMAP_TEXT \ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 691d21e4c717..46dc3d75cf13 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1640,6 +1640,10 @@ int kvm_arch_init(void *opaque) return -ENODEV; } + if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)) + kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \ + "Only trusted guests should be used on this system.\n"); + for_each_online_cpu(cpu) { smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1); if (ret < 0) { diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index fe6c7d79309d..5d690d60ccad 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -128,7 +128,7 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu) switch (ESR_ELx_EC(esr)) { case ESR_ELx_EC_WATCHPT_LOW: run->debug.arch.far = vcpu->arch.fault.far_el2; - /* fall through */ + fallthrough; case ESR_ELx_EC_SOFTSTP_LOW: case ESR_ELx_EC_BREAKPT_LOW: case ESR_ELx_EC_BKPT32: diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index ee32a7743389..76e7eaf4675e 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -196,20 +196,23 @@ alternative_endif // This is our single instruction exception window. A pending // SError is guaranteed to occur at the earliest when we unmask // it, and at the latest just after the ISB. - .global abort_guest_exit_start abort_guest_exit_start: isb - .global abort_guest_exit_end abort_guest_exit_end: msr daifset, #4 // Mask aborts + ret + + _kvm_extable abort_guest_exit_start, 9997f + _kvm_extable abort_guest_exit_end, 9997f +9997: + msr daifset, #4 // Mask aborts + mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) - // If the exception took place, restore the EL1 exception - // context so that we can report some information. - // Merge the exception code with the SError pending bit. - tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f + // restore the EL1 exception context so that we can report some + // information. Merge the exception code with the SError pending bit. msr elr_el2, x2 msr esr_el2, x3 msr spsr_el2, x4 diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 689fccbc9de7..46b4dab933d0 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -15,6 +15,30 @@ #include <asm/kvm_mmu.h> #include <asm/mmu.h> +.macro save_caller_saved_regs_vect + /* x0 and x1 were saved in the vector entry */ + stp x2, x3, [sp, #-16]! + stp x4, x5, [sp, #-16]! + stp x6, x7, [sp, #-16]! + stp x8, x9, [sp, #-16]! + stp x10, x11, [sp, #-16]! + stp x12, x13, [sp, #-16]! + stp x14, x15, [sp, #-16]! + stp x16, x17, [sp, #-16]! +.endm + +.macro restore_caller_saved_regs_vect + ldp x16, x17, [sp], #16 + ldp x14, x15, [sp], #16 + ldp x12, x13, [sp], #16 + ldp x10, x11, [sp], #16 + ldp x8, x9, [sp], #16 + ldp x6, x7, [sp], #16 + ldp x4, x5, [sp], #16 + ldp x2, x3, [sp], #16 + ldp x0, x1, [sp], #16 +.endm + .text .macro do_el2_call @@ -143,13 +167,19 @@ el1_error: b __guest_exit el2_sync: - /* Check for illegal exception return, otherwise panic */ + /* Check for illegal exception return */ mrs x0, spsr_el2 + tbnz x0, #20, 1f - /* if this was something else, then panic! */ - tst x0, #PSR_IL_BIT - b.eq __hyp_panic + save_caller_saved_regs_vect + stp x29, x30, [sp, #-16]! + bl kvm_unexpected_el2_exception + ldp x29, x30, [sp], #16 + restore_caller_saved_regs_vect + eret + +1: /* Let's attempt a recovery from the illegal exception return */ get_vcpu_ptr x1, x0 mov x0, #ARM_EXCEPTION_IL @@ -157,27 +187,14 @@ el2_sync: el2_error: - ldp x0, x1, [sp], #16 + save_caller_saved_regs_vect + stp x29, x30, [sp, #-16]! + + bl kvm_unexpected_el2_exception + + ldp x29, x30, [sp], #16 + restore_caller_saved_regs_vect - /* - * Only two possibilities: - * 1) Either we come from the exit path, having just unmasked - * PSTATE.A: change the return code to an EL2 fault, and - * carry on, as we're already in a sane state to handle it. - * 2) Or we come from anywhere else, and that's a bug: we panic. - * - * For (1), x0 contains the original return code and x1 doesn't - * contain anything meaningful at that stage. We can reuse them - * as temp registers. - * For (2), who cares? - */ - mrs x0, elr_el2 - adr x1, abort_guest_exit_start - cmp x0, x1 - adr x1, abort_guest_exit_end - ccmp x0, x1, #4, ne - b.ne __hyp_panic - mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) eret sb diff --git a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h index 0297dc63988c..5e28ea6aa097 100644 --- a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h +++ b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h @@ -21,70 +21,70 @@ #define save_debug(ptr,reg,nr) \ switch (nr) { \ case 15: ptr[15] = read_debug(reg, 15); \ - /* Fall through */ \ + fallthrough; \ case 14: ptr[14] = read_debug(reg, 14); \ - /* Fall through */ \ + fallthrough; \ case 13: ptr[13] = read_debug(reg, 13); \ - /* Fall through */ \ + fallthrough; \ case 12: ptr[12] = read_debug(reg, 12); \ - /* Fall through */ \ + fallthrough; \ case 11: ptr[11] = read_debug(reg, 11); \ - /* Fall through */ \ + fallthrough; \ case 10: ptr[10] = read_debug(reg, 10); \ - /* Fall through */ \ + fallthrough; \ case 9: ptr[9] = read_debug(reg, 9); \ - /* Fall through */ \ + fallthrough; \ case 8: ptr[8] = read_debug(reg, 8); \ - /* Fall through */ \ + fallthrough; \ case 7: ptr[7] = read_debug(reg, 7); \ - /* Fall through */ \ + fallthrough; \ case 6: ptr[6] = read_debug(reg, 6); \ - /* Fall through */ \ + fallthrough; \ case 5: ptr[5] = read_debug(reg, 5); \ - /* Fall through */ \ + fallthrough; \ case 4: ptr[4] = read_debug(reg, 4); \ - /* Fall through */ \ + fallthrough; \ case 3: ptr[3] = read_debug(reg, 3); \ - /* Fall through */ \ + fallthrough; \ case 2: ptr[2] = read_debug(reg, 2); \ - /* Fall through */ \ + fallthrough; \ case 1: ptr[1] = read_debug(reg, 1); \ - /* Fall through */ \ + fallthrough; \ default: ptr[0] = read_debug(reg, 0); \ } #define restore_debug(ptr,reg,nr) \ switch (nr) { \ case 15: write_debug(ptr[15], reg, 15); \ - /* Fall through */ \ + fallthrough; \ case 14: write_debug(ptr[14], reg, 14); \ - /* Fall through */ \ + fallthrough; \ case 13: write_debug(ptr[13], reg, 13); \ - /* Fall through */ \ + fallthrough; \ case 12: write_debug(ptr[12], reg, 12); \ - /* Fall through */ \ + fallthrough; \ case 11: write_debug(ptr[11], reg, 11); \ - /* Fall through */ \ + fallthrough; \ case 10: write_debug(ptr[10], reg, 10); \ - /* Fall through */ \ + fallthrough; \ case 9: write_debug(ptr[9], reg, 9); \ - /* Fall through */ \ + fallthrough; \ case 8: write_debug(ptr[8], reg, 8); \ - /* Fall through */ \ + fallthrough; \ case 7: write_debug(ptr[7], reg, 7); \ - /* Fall through */ \ + fallthrough; \ case 6: write_debug(ptr[6], reg, 6); \ - /* Fall through */ \ + fallthrough; \ case 5: write_debug(ptr[5], reg, 5); \ - /* Fall through */ \ + fallthrough; \ case 4: write_debug(ptr[4], reg, 4); \ - /* Fall through */ \ + fallthrough; \ case 3: write_debug(ptr[3], reg, 3); \ - /* Fall through */ \ + fallthrough; \ case 2: write_debug(ptr[2], reg, 2); \ - /* Fall through */ \ + fallthrough; \ case 1: write_debug(ptr[1], reg, 1); \ - /* Fall through */ \ + fallthrough; \ default: write_debug(ptr[0], reg, 0); \ } diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 426ef65601dd..5b6b8fa00f0a 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -17,6 +17,7 @@ #include <asm/barrier.h> #include <asm/cpufeature.h> +#include <asm/extable.h> #include <asm/kprobes.h> #include <asm/kvm_asm.h> #include <asm/kvm_emulate.h> @@ -29,6 +30,9 @@ extern const char __hyp_panic_string[]; +extern struct exception_table_entry __start___kvm_ex_table; +extern struct exception_table_entry __stop___kvm_ex_table; + /* Check whether the FP regs were dirtied while in the host-side run loop: */ static inline bool update_fp_enabled(struct kvm_vcpu *vcpu) { @@ -142,10 +146,10 @@ static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar) * saved the guest context yet, and we may return early... */ par = read_sysreg(par_el1); - asm volatile("at s1e1r, %0" : : "r" (far)); - isb(); - - tmp = read_sysreg(par_el1); + if (!__kvm_at("s1e1r", far)) + tmp = read_sysreg(par_el1); + else + tmp = SYS_PAR_EL1_F; /* back to the guest */ write_sysreg(par, par_el1); if (unlikely(tmp & SYS_PAR_EL1_F)) @@ -508,4 +512,31 @@ static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu) #endif } +static inline void __kvm_unexpected_el2_exception(void) +{ + unsigned long addr, fixup; + struct kvm_cpu_context *host_ctxt; + struct exception_table_entry *entry, *end; + unsigned long elr_el2 = read_sysreg(elr_el2); + + entry = hyp_symbol_addr(__start___kvm_ex_table); + end = hyp_symbol_addr(__stop___kvm_ex_table); + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; + + while (entry < end) { + addr = (unsigned long)&entry->insn + entry->insn; + fixup = (unsigned long)&entry->fixup + entry->fixup; + + if (addr != elr_el2) { + entry++; + continue; + } + + write_sysreg(fixup, elr_el2); + return; + } + + hyp_panic(host_ctxt); +} + #endif /* __ARM64_KVM_HYP_SWITCH_H__ */ diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 341be2f2f312..0970442d2dbc 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -270,3 +270,8 @@ void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) read_sysreg(hpfar_el2), par, vcpu); unreachable(); } + +asmlinkage void kvm_unexpected_el2_exception(void) +{ + return __kvm_unexpected_el2_exception(); +} diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index 5a0073511efb..452f4cacd674 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -340,10 +340,10 @@ void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if) case 7: cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3); cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2); - /* Fall through */ + fallthrough; case 6: cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1); - /* Fall through */ + fallthrough; default: cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0); } @@ -352,10 +352,10 @@ void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if) case 7: cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3); cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2); - /* Fall through */ + fallthrough; case 6: cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1); - /* Fall through */ + fallthrough; default: cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0); } @@ -373,10 +373,10 @@ void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if) case 7: __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3); __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2); - /* Fall through */ + fallthrough; case 6: __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1); - /* Fall through */ + fallthrough; default: __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0); } @@ -385,10 +385,10 @@ void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if) case 7: __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3); __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2); - /* Fall through */ + fallthrough; case 6: __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1); - /* Fall through */ + fallthrough; default: __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0); } diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index c52d714e0d75..c1da4f86ccac 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -217,3 +217,8 @@ void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) __hyp_call_panic(spsr, elr, par, host_ctxt); unreachable(); } + +asmlinkage void kvm_unexpected_el2_exception(void) +{ + return __kvm_unexpected_el2_exception(); +} diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 0121ef2c7c8d..ba00bcc0c884 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -343,7 +343,8 @@ static void unmap_stage2_p4ds(struct kvm_s2_mmu *mmu, pgd_t *pgd, * destroying the VM), otherwise another faulting VCPU may come in and mess * with things behind our backs. */ -static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size) +static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size, + bool may_block) { struct kvm *kvm = mmu->kvm; pgd_t *pgd; @@ -369,11 +370,16 @@ static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 si * If the range is too large, release the kvm->mmu_lock * to prevent starvation and lockup detector warnings. */ - if (next != end) + if (may_block && next != end) cond_resched_lock(&kvm->mmu_lock); } while (pgd++, addr = next, addr != end); } +static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size) +{ + __unmap_stage2_range(mmu, start, size, true); +} + static void stage2_flush_ptes(struct kvm_s2_mmu *mmu, pmd_t *pmd, phys_addr_t addr, phys_addr_t end) { @@ -2208,18 +2214,21 @@ static int handle_hva_to_gpa(struct kvm *kvm, static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) { - unmap_stage2_range(&kvm->arch.mmu, gpa, size); + unsigned flags = *(unsigned *)data; + bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE; + + __unmap_stage2_range(&kvm->arch.mmu, gpa, size, may_block); return 0; } int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end) + unsigned long start, unsigned long end, unsigned flags) { if (!kvm->arch.mmu.pgd) return 0; trace_kvm_unmap_hva_range(start, end); - handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); + handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags); return 0; } diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index a206655a39a5..9b11c096a042 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -45,7 +45,7 @@ static u32 get_cpu_asid_bits(void) default: pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n", smp_processor_id(), fld); - /* Fallthrough */ + fallthrough; case 0: asid = 8; break; |