diff options
Diffstat (limited to 'arch')
247 files changed, 4046 insertions, 3795 deletions
diff --git a/arch/arc/boot/dts/Makefile b/arch/arc/boot/dts/Makefile index 22a4c5d4702f..a83c4f5e928b 100644 --- a/arch/arc/boot/dts/Makefile +++ b/arch/arc/boot/dts/Makefile @@ -9,8 +9,6 @@ endif obj-y += $(builtindtb-y).dtb.o dtb-y := $(builtindtb-y).dtb -.SECONDARY: $(obj)/$(builtindtb-y).dtb.S - # for CONFIG_OF_ALL_DTBS test dtstree := $(srctree)/$(src) dtb- := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) diff --git a/arch/arm/boot/dts/gemini-nas4220b.dts b/arch/arm/boot/dts/gemini-nas4220b.dts index 8bbb6f85d161..4785fbcc41ed 100644 --- a/arch/arm/boot/dts/gemini-nas4220b.dts +++ b/arch/arm/boot/dts/gemini-nas4220b.dts @@ -134,37 +134,37 @@ function = "gmii"; groups = "gmii_gmac0_grp"; }; - /* Settings come from OpenWRT */ + /* Settings come from OpenWRT, pins on SL3516 */ conf0 { - pins = "R8 GMAC0 RXDV", "U11 GMAC1 RXDV"; + pins = "V8 GMAC0 RXDV", "T10 GMAC1 RXDV"; skew-delay = <0>; }; conf1 { - pins = "T8 GMAC0 RXC", "T11 GMAC1 RXC"; + pins = "Y7 GMAC0 RXC", "Y11 GMAC1 RXC"; skew-delay = <15>; }; conf2 { - pins = "P8 GMAC0 TXEN", "V11 GMAC1 TXEN"; + pins = "T8 GMAC0 TXEN", "W11 GMAC1 TXEN"; skew-delay = <7>; }; conf3 { - pins = "V7 GMAC0 TXC"; + pins = "U8 GMAC0 TXC"; skew-delay = <11>; }; conf4 { - pins = "P10 GMAC1 TXC"; + pins = "V11 GMAC1 TXC"; skew-delay = <10>; }; conf5 { /* The data lines all have default skew */ - pins = "U8 GMAC0 RXD0", "V8 GMAC0 RXD1", - "P9 GMAC0 RXD2", "R9 GMAC0 RXD3", - "U7 GMAC0 TXD0", "T7 GMAC0 TXD1", - "R7 GMAC0 TXD2", "P7 GMAC0 TXD3", - "R11 GMAC1 RXD0", "P11 GMAC1 RXD1", - "V12 GMAC1 RXD2", "U12 GMAC1 RXD3", - "R10 GMAC1 TXD0", "T10 GMAC1 TXD1", - "U10 GMAC1 TXD2", "V10 GMAC1 TXD3"; + pins = "W8 GMAC0 RXD0", "V9 GMAC0 RXD1", + "Y8 GMAC0 RXD2", "U9 GMAC0 RXD3", + "T7 GMAC0 TXD0", "U6 GMAC0 TXD1", + "V7 GMAC0 TXD2", "U7 GMAC0 TXD3", + "Y12 GMAC1 RXD0", "V12 GMAC1 RXD1", + "T11 GMAC1 RXD2", "W12 GMAC1 RXD3", + "U10 GMAC1 TXD0", "Y10 GMAC1 TXD1", + "W10 GMAC1 TXD2", "T9 GMAC1 TXD3"; skew-delay = <7>; }; /* Set up drive strength on GMAC0 to 16 mA */ diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi index 475904894b86..e554b6e039f3 100644 --- a/arch/arm/boot/dts/omap4.dtsi +++ b/arch/arm/boot/dts/omap4.dtsi @@ -163,10 +163,10 @@ cm2: cm2@8000 { compatible = "ti,omap4-cm2", "simple-bus"; - reg = <0x8000 0x3000>; + reg = <0x8000 0x2000>; #address-cells = <1>; #size-cells = <1>; - ranges = <0 0x8000 0x3000>; + ranges = <0 0x8000 0x2000>; cm2_clocks: clocks { #address-cells = <1>; @@ -250,11 +250,11 @@ prm: prm@6000 { compatible = "ti,omap4-prm"; - reg = <0x6000 0x3000>; + reg = <0x6000 0x2000>; interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <1>; - ranges = <0 0x6000 0x3000>; + ranges = <0 0x6000 0x2000>; prm_clocks: clocks { #address-cells = <1>; diff --git a/arch/arm/configs/gemini_defconfig b/arch/arm/configs/gemini_defconfig index 2a63fa10c813..553777ac2814 100644 --- a/arch/arm/configs/gemini_defconfig +++ b/arch/arm/configs/gemini_defconfig @@ -1,6 +1,7 @@ # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_NO_HZ_IDLE=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_USER_NS=y CONFIG_RELAY=y @@ -12,15 +13,21 @@ CONFIG_ARCH_GEMINI=y CONFIG_PCI=y CONFIG_PREEMPT=y CONFIG_AEABI=y +CONFIG_HIGHMEM=y +CONFIG_CMA=y CONFIG_CMDLINE="console=ttyS0,115200n8" CONFIG_KEXEC=y CONFIG_BINFMT_MISC=y CONFIG_PM=y +CONFIG_NET=y +CONFIG_UNIX=y +CONFIG_INET=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y CONFIG_MTD=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y CONFIG_MTD_CFI_INTELEXT=y CONFIG_MTD_CFI_AMDSTD=y CONFIG_MTD_CFI_STAA=y @@ -33,6 +40,11 @@ CONFIG_BLK_DEV_SD=y # CONFIG_SCSI_LOWLEVEL is not set CONFIG_ATA=y CONFIG_PATA_FTIDE010=y +CONFIG_NETDEVICES=y +CONFIG_GEMINI_ETHERNET=y +CONFIG_MDIO_BITBANG=y +CONFIG_MDIO_GPIO=y +CONFIG_REALTEK_PHY=y CONFIG_INPUT_EVDEV=y CONFIG_KEYBOARD_GPIO=y # CONFIG_INPUT_MOUSE is not set @@ -43,9 +55,19 @@ CONFIG_SERIAL_8250_NR_UARTS=1 CONFIG_SERIAL_8250_RUNTIME_UARTS=1 CONFIG_SERIAL_OF_PLATFORM=y # CONFIG_HW_RANDOM is not set -# CONFIG_HWMON is not set +CONFIG_I2C_GPIO=y +CONFIG_SPI=y +CONFIG_SPI_GPIO=y +CONFIG_SENSORS_GPIO_FAN=y +CONFIG_SENSORS_LM75=y +CONFIG_THERMAL=y CONFIG_WATCHDOG=y -CONFIG_GEMINI_WATCHDOG=y +CONFIG_REGULATOR=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_DRM=y +CONFIG_DRM_PANEL_ILITEK_IL9322=y +CONFIG_DRM_TVE200=y +CONFIG_LOGO=y CONFIG_USB=y CONFIG_USB_MON=y CONFIG_USB_FOTG210_HCD=y @@ -54,6 +76,7 @@ CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_GPIO=y CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_DISK=y CONFIG_LEDS_TRIGGER_HEARTBEAT=y CONFIG_RTC_CLASS=y CONFIG_DMADEVICES=y diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig index 2620ce790db0..371fca4e1ab7 100644 --- a/arch/arm/configs/socfpga_defconfig +++ b/arch/arm/configs/socfpga_defconfig @@ -57,6 +57,7 @@ CONFIG_MTD_M25P80=y CONFIG_MTD_NAND=y CONFIG_MTD_NAND_DENALI_DT=y CONFIG_MTD_SPI_NOR=y +# CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is not set CONFIG_SPI_CADENCE_QUADSPI=y CONFIG_OF_OVERLAY=y CONFIG_OF_CONFIGFS=y diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile index 3304e671918d..8de542c48ade 100644 --- a/arch/arm/crypto/Makefile +++ b/arch/arm/crypto/Makefile @@ -67,4 +67,4 @@ $(src)/sha512-core.S_shipped: $(src)/sha512-armv4.pl $(call cmd,perl) endif -.PRECIOUS: $(obj)/sha256-core.S $(obj)/sha512-core.S +targets += sha256-core.S sha512-core.S diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index c6a749568dd6..c7c28c885a19 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -77,6 +77,9 @@ struct kvm_arch { /* Interrupt controller */ struct vgic_dist vgic; int max_vcpus; + + /* Mandated version of PSCI */ + u32 psci_version; }; #define KVM_NR_MEM_OBJS 40 diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h index 2ba95d6fe852..caae4843cb70 100644 --- a/arch/arm/include/uapi/asm/kvm.h +++ b/arch/arm/include/uapi/asm/kvm.h @@ -195,6 +195,12 @@ struct kvm_arch_memory_slot { #define KVM_REG_ARM_VFP_FPINST 0x1009 #define KVM_REG_ARM_VFP_FPINST2 0x100A +/* KVM-as-firmware specific pseudo-registers */ +#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT) +#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \ + KVM_REG_ARM_FW | ((r) & 0xffff)) +#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) + /* Device Control API: ARM VGIC */ #define KVM_DEV_ARM_VGIC_GRP_ADDR 0 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c index 1e0784ebbfd6..a18f33edc471 100644 --- a/arch/arm/kvm/guest.c +++ b/arch/arm/kvm/guest.c @@ -22,6 +22,7 @@ #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/fs.h> +#include <kvm/arm_psci.h> #include <asm/cputype.h> #include <linux/uaccess.h> #include <asm/kvm.h> @@ -176,6 +177,7 @@ static unsigned long num_core_regs(void) unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) { return num_core_regs() + kvm_arm_num_coproc_regs(vcpu) + + kvm_arm_get_fw_num_regs(vcpu) + NUM_TIMER_REGS; } @@ -196,6 +198,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) uindices++; } + ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices); + if (ret) + return ret; + uindices += kvm_arm_get_fw_num_regs(vcpu); + ret = copy_timer_indices(vcpu, uindices); if (ret) return ret; @@ -214,6 +221,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) return get_core_reg(vcpu, reg); + if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW) + return kvm_arm_get_fw_reg(vcpu, reg); + if (is_timer_reg(reg->id)) return get_timer_reg(vcpu, reg); @@ -230,6 +240,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) return set_core_reg(vcpu, reg); + if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW) + return kvm_arm_set_fw_reg(vcpu, reg); + if (is_timer_reg(reg->id)) return set_timer_reg(vcpu, reg); diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 4603c30fef73..0d9ce58bc464 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -243,8 +243,4 @@ arch/arm/mach-omap2/pm-asm-offsets.s: arch/arm/mach-omap2/pm-asm-offsets.c include/generated/ti-pm-asm-offsets.h: arch/arm/mach-omap2/pm-asm-offsets.s FORCE $(call filechk,offsets,__TI_PM_ASM_OFFSETS_H__) -# For rule to generate ti-emif-asm-offsets.h dependency -include drivers/memory/Makefile.asm-offsets - -arch/arm/mach-omap2/sleep33xx.o: include/generated/ti-pm-asm-offsets.h include/generated/ti-emif-asm-offsets.h -arch/arm/mach-omap2/sleep43xx.o: include/generated/ti-pm-asm-offsets.h include/generated/ti-emif-asm-offsets.h +$(obj)/sleep33xx.o $(obj)/sleep43xx.o: include/generated/ti-pm-asm-offsets.h diff --git a/arch/arm/mach-omap2/pm-asm-offsets.c b/arch/arm/mach-omap2/pm-asm-offsets.c index 6d4392da7c11..b9846b19e5e2 100644 --- a/arch/arm/mach-omap2/pm-asm-offsets.c +++ b/arch/arm/mach-omap2/pm-asm-offsets.c @@ -7,9 +7,12 @@ #include <linux/kbuild.h> #include <linux/platform_data/pm33xx.h> +#include <linux/ti-emif-sram.h> int main(void) { + ti_emif_asm_offsets(); + DEFINE(AMX3_PM_WFI_FLAGS_OFFSET, offsetof(struct am33xx_pm_sram_data, wfi_flags)); DEFINE(AMX3_PM_L2_AUX_CTRL_VAL_OFFSET, diff --git a/arch/arm/mach-omap2/sleep33xx.S b/arch/arm/mach-omap2/sleep33xx.S index 218d79930b04..322b3bb868b4 100644 --- a/arch/arm/mach-omap2/sleep33xx.S +++ b/arch/arm/mach-omap2/sleep33xx.S @@ -6,7 +6,6 @@ * Dave Gerlach, Vaibhav Bedia */ -#include <generated/ti-emif-asm-offsets.h> #include <generated/ti-pm-asm-offsets.h> #include <linux/linkage.h> #include <linux/ti-emif-sram.h> diff --git a/arch/arm/mach-omap2/sleep43xx.S b/arch/arm/mach-omap2/sleep43xx.S index b24be624e8b9..8903814a6677 100644 --- a/arch/arm/mach-omap2/sleep43xx.S +++ b/arch/arm/mach-omap2/sleep43xx.S @@ -6,7 +6,6 @@ * Dave Gerlach, Vaibhav Bedia */ -#include <generated/ti-emif-asm-offsets.h> #include <generated/ti-pm-asm-offsets.h> #include <linux/linkage.h> #include <linux/ti-emif-sram.h> diff --git a/arch/arm/mach-s3c24xx/mach-jive.c b/arch/arm/mach-s3c24xx/mach-jive.c index 59589a4a0d4b..885e8f12e4b9 100644 --- a/arch/arm/mach-s3c24xx/mach-jive.c +++ b/arch/arm/mach-s3c24xx/mach-jive.c @@ -427,9 +427,9 @@ static struct gpiod_lookup_table jive_wm8750_gpiod_table = { .dev_id = "spi_gpio", .table = { GPIO_LOOKUP("GPIOB", 4, - "gpio-sck", GPIO_ACTIVE_HIGH), + "sck", GPIO_ACTIVE_HIGH), GPIO_LOOKUP("GPIOB", 9, - "gpio-mosi", GPIO_ACTIVE_HIGH), + "mosi", GPIO_ACTIVE_HIGH), GPIO_LOOKUP("GPIOH", 10, "cs", GPIO_ACTIVE_HIGH), { }, diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 15402861bb59..87f7d2f9f17c 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -56,7 +56,11 @@ KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) KBUILD_AFLAGS += $(call cc-option,-mabi=lp64) +ifeq ($(cc-name),clang) +KBUILD_CFLAGS += -DCONFIG_ARCH_SUPPORTS_INT128 +else KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0500, -DCONFIG_ARCH_SUPPORTS_INT128) +endif ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) KBUILD_CPPFLAGS += -mbig-endian diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi index 4eef36b22538..88e712ea757a 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi @@ -212,3 +212,7 @@ pinctrl-0 = <&uart_ao_a_pins>; pinctrl-names = "default"; }; + +&usb0 { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts index 22bf37404ff1..3e3eb31748a3 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts @@ -271,3 +271,15 @@ pinctrl-0 = <&uart_ao_a_pins>; pinctrl-names = "default"; }; + +&usb0 { + status = "okay"; +}; + +&usb2_phy0 { + /* + * even though the schematics don't show it: + * HDMI_5V is also used as supply for the USB VBUS. + */ + phy-supply = <&hdmi_5v>; +}; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts index 69c721a70e44..6739697be1de 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts @@ -215,3 +215,7 @@ pinctrl-0 = <&uart_ao_a_pins>; pinctrl-names = "default"; }; + +&usb0 { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi index 0a0953fbc7d4..0cfd701809de 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi @@ -185,3 +185,7 @@ pinctrl-0 = <&uart_ao_a_pins>; pinctrl-names = "default"; }; + +&usb0 { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi index e1a39cbed8c9..dba365ed4bd5 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi @@ -20,6 +20,67 @@ no-map; }; }; + + soc { + usb0: usb@c9000000 { + status = "disabled"; + compatible = "amlogic,meson-gxl-dwc3"; + #address-cells = <2>; + #size-cells = <2>; + ranges; + + clocks = <&clkc CLKID_USB>; + clock-names = "usb_general"; + resets = <&reset RESET_USB_OTG>; + reset-names = "usb_otg"; + + dwc3: dwc3@c9000000 { + compatible = "snps,dwc3"; + reg = <0x0 0xc9000000 0x0 0x100000>; + interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>; + dr_mode = "host"; + maximum-speed = "high-speed"; + snps,dis_u2_susphy_quirk; + phys = <&usb3_phy>, <&usb2_phy0>, <&usb2_phy1>; + }; + }; + }; +}; + +&apb { + usb2_phy0: phy@78000 { + compatible = "amlogic,meson-gxl-usb2-phy"; + #phy-cells = <0>; + reg = <0x0 0x78000 0x0 0x20>; + clocks = <&clkc CLKID_USB>; + clock-names = "phy"; + resets = <&reset RESET_USB_OTG>; + reset-names = "phy"; + status = "okay"; + }; + + usb2_phy1: phy@78020 { + compatible = "amlogic,meson-gxl-usb2-phy"; + #phy-cells = <0>; + reg = <0x0 0x78020 0x0 0x20>; + clocks = <&clkc CLKID_USB>; + clock-names = "phy"; + resets = <&reset RESET_USB_OTG>; + reset-names = "phy"; + status = "okay"; + }; + + usb3_phy: phy@78080 { + compatible = "amlogic,meson-gxl-usb3-phy"; + #phy-cells = <0>; + reg = <0x0 0x78080 0x0 0x20>; + interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clkc CLKID_USB>, <&clkc_AO CLKID_AO_CEC_32K>; + clock-names = "phy", "peripheral"; + resets = <&reset RESET_USB_OTG>, <&reset RESET_USB_OTG>; + reset-names = "phy", "peripheral"; + status = "okay"; + }; }; ðmac { diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts index 4fd46c1546a7..0868da476e41 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts @@ -406,3 +406,7 @@ status = "okay"; vref-supply = <&vddio_ao18>; }; + +&usb0 { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi index d076a7c425dd..247888d68a3a 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi @@ -80,6 +80,19 @@ }; }; +&apb { + usb2_phy2: phy@78040 { + compatible = "amlogic,meson-gxl-usb2-phy"; + #phy-cells = <0>; + reg = <0x0 0x78040 0x0 0x20>; + clocks = <&clkc CLKID_USB>; + clock-names = "phy"; + resets = <&reset RESET_USB_OTG>; + reset-names = "phy"; + status = "okay"; + }; +}; + &clkc_AO { compatible = "amlogic,meson-gxm-aoclkc", "amlogic,meson-gx-aoclkc"; }; @@ -100,3 +113,7 @@ &hdmi_tx { compatible = "amlogic,meson-gxm-dw-hdmi", "amlogic,meson-gx-dw-hdmi"; }; + +&dwc3 { + phys = <&usb3_phy>, <&usb2_phy0>, <&usb2_phy1>, <&usb2_phy2>; +}; diff --git a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi index 2ac43221ddb6..69804c5f1197 100644 --- a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi +++ b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi @@ -56,8 +56,6 @@ gpio_keys { compatible = "gpio-keys"; - #address-cells = <1>; - #size-cells = <0>; power-button { debounce_interval = <50>; diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi index 4b5465da81d8..8c68e0c26f1b 100644 --- a/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi +++ b/arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi @@ -36,11 +36,11 @@ #size-cells = <1>; ranges = <0x0 0x0 0x67d00000 0x00800000>; - sata0: ahci@210000 { + sata0: ahci@0 { compatible = "brcm,iproc-ahci", "generic-ahci"; - reg = <0x00210000 0x1000>; + reg = <0x00000000 0x1000>; reg-names = "ahci"; - interrupts = <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -52,9 +52,9 @@ }; }; - sata_phy0: sata_phy@212100 { + sata_phy0: sata_phy@2100 { compatible = "brcm,iproc-sr-sata-phy"; - reg = <0x00212100 0x1000>; + reg = <0x00002100 0x1000>; reg-names = "phy"; #address-cells = <1>; #size-cells = <0>; @@ -66,11 +66,11 @@ }; }; - sata1: ahci@310000 { + sata1: ahci@10000 { compatible = "brcm,iproc-ahci", "generic-ahci"; - reg = <0x00310000 0x1000>; + reg = <0x00010000 0x1000>; reg-names = "ahci"; - interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -82,9 +82,9 @@ }; }; - sata_phy1: sata_phy@312100 { + sata_phy1: sata_phy@12100 { compatible = "brcm,iproc-sr-sata-phy"; - reg = <0x00312100 0x1000>; + reg = <0x00012100 0x1000>; reg-names = "phy"; #address-cells = <1>; #size-cells = <0>; @@ -96,11 +96,11 @@ }; }; - sata2: ahci@120000 { + sata2: ahci@20000 { compatible = "brcm,iproc-ahci", "generic-ahci"; - reg = <0x00120000 0x1000>; + reg = <0x00020000 0x1000>; reg-names = "ahci"; - interrupts = <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -112,9 +112,9 @@ }; }; - sata_phy2: sata_phy@122100 { + sata_phy2: sata_phy@22100 { compatible = "brcm,iproc-sr-sata-phy"; - reg = <0x00122100 0x1000>; + reg = <0x00022100 0x1000>; reg-names = "phy"; #address-cells = <1>; #size-cells = <0>; @@ -126,11 +126,11 @@ }; }; - sata3: ahci@130000 { + sata3: ahci@30000 { compatible = "brcm,iproc-ahci", "generic-ahci"; - reg = <0x00130000 0x1000>; + reg = <0x00030000 0x1000>; reg-names = "ahci"; - interrupts = <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -142,9 +142,9 @@ }; }; - sata_phy3: sata_phy@132100 { + sata_phy3: sata_phy@32100 { compatible = "brcm,iproc-sr-sata-phy"; - reg = <0x00132100 0x1000>; + reg = <0x00032100 0x1000>; reg-names = "phy"; #address-cells = <1>; #size-cells = <0>; @@ -156,11 +156,11 @@ }; }; - sata4: ahci@330000 { + sata4: ahci@100000 { compatible = "brcm,iproc-ahci", "generic-ahci"; - reg = <0x00330000 0x1000>; + reg = <0x00100000 0x1000>; reg-names = "ahci"; - interrupts = <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -172,9 +172,9 @@ }; }; - sata_phy4: sata_phy@332100 { + sata_phy4: sata_phy@102100 { compatible = "brcm,iproc-sr-sata-phy"; - reg = <0x00332100 0x1000>; + reg = <0x00102100 0x1000>; reg-names = "phy"; #address-cells = <1>; #size-cells = <0>; @@ -186,11 +186,11 @@ }; }; - sata5: ahci@400000 { + sata5: ahci@110000 { compatible = "brcm,iproc-ahci", "generic-ahci"; - reg = <0x00400000 0x1000>; + reg = <0x00110000 0x1000>; reg-names = "ahci"; - interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -202,9 +202,9 @@ }; }; - sata_phy5: sata_phy@402100 { + sata_phy5: sata_phy@112100 { compatible = "brcm,iproc-sr-sata-phy"; - reg = <0x00402100 0x1000>; + reg = <0x00112100 0x1000>; reg-names = "phy"; #address-cells = <1>; #size-cells = <0>; @@ -216,11 +216,11 @@ }; }; - sata6: ahci@410000 { + sata6: ahci@120000 { compatible = "brcm,iproc-ahci", "generic-ahci"; - reg = <0x00410000 0x1000>; + reg = <0x00120000 0x1000>; reg-names = "ahci"; - interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -232,9 +232,9 @@ }; }; - sata_phy6: sata_phy@412100 { + sata_phy6: sata_phy@122100 { compatible = "brcm,iproc-sr-sata-phy"; - reg = <0x00412100 0x1000>; + reg = <0x00122100 0x1000>; reg-names = "phy"; #address-cells = <1>; #size-cells = <0>; @@ -246,11 +246,11 @@ }; }; - sata7: ahci@420000 { + sata7: ahci@130000 { compatible = "brcm,iproc-ahci", "generic-ahci"; - reg = <0x00420000 0x1000>; + reg = <0x00130000 0x1000>; reg-names = "ahci"; - interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; @@ -262,9 +262,9 @@ }; }; - sata_phy7: sata_phy@422100 { + sata_phy7: sata_phy@132100 { compatible = "brcm,iproc-sr-sata-phy"; - reg = <0x00422100 0x1000>; + reg = <0x00132100 0x1000>; reg-names = "phy"; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index 8df9f326f449..f35ac684b1c0 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile @@ -78,4 +78,4 @@ $(src)/sha512-core.S_shipped: $(src)/sha512-armv8.pl $(call cmd,perlasm) endif -.PRECIOUS: $(obj)/sha256-core.S $(obj)/sha512-core.S +targets += sha256-core.S sha512-core.S diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 053d83e8db6f..0bcc98dbba56 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -565,4 +565,140 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU #endif .endm + /* + * frame_push - Push @regcount callee saved registers to the stack, + * starting at x19, as well as x29/x30, and set x29 to + * the new value of sp. Add @extra bytes of stack space + * for locals. + */ + .macro frame_push, regcount:req, extra + __frame st, \regcount, \extra + .endm + + /* + * frame_pop - Pop the callee saved registers from the stack that were + * pushed in the most recent call to frame_push, as well + * as x29/x30 and any extra stack space that may have been + * allocated. + */ + .macro frame_pop + __frame ld + .endm + + .macro __frame_regs, reg1, reg2, op, num + .if .Lframe_regcount == \num + \op\()r \reg1, [sp, #(\num + 1) * 8] + .elseif .Lframe_regcount > \num + \op\()p \reg1, \reg2, [sp, #(\num + 1) * 8] + .endif + .endm + + .macro __frame, op, regcount, extra=0 + .ifc \op, st + .if (\regcount) < 0 || (\regcount) > 10 + .error "regcount should be in the range [0 ... 10]" + .endif + .if ((\extra) % 16) != 0 + .error "extra should be a multiple of 16 bytes" + .endif + .ifdef .Lframe_regcount + .if .Lframe_regcount != -1 + .error "frame_push/frame_pop may not be nested" + .endif + .endif + .set .Lframe_regcount, \regcount + .set .Lframe_extra, \extra + .set .Lframe_local_offset, ((\regcount + 3) / 2) * 16 + stp x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]! + mov x29, sp + .endif + + __frame_regs x19, x20, \op, 1 + __frame_regs x21, x22, \op, 3 + __frame_regs x23, x24, \op, 5 + __frame_regs x25, x26, \op, 7 + __frame_regs x27, x28, \op, 9 + + .ifc \op, ld + .if .Lframe_regcount == -1 + .error "frame_push/frame_pop may not be nested" + .endif + ldp x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra + .set .Lframe_regcount, -1 + .endif + .endm + +/* + * Check whether to yield to another runnable task from kernel mode NEON code + * (which runs with preemption disabled). + * + * if_will_cond_yield_neon + * // pre-yield patchup code + * do_cond_yield_neon + * // post-yield patchup code + * endif_yield_neon <label> + * + * where <label> is optional, and marks the point where execution will resume + * after a yield has been performed. If omitted, execution resumes right after + * the endif_yield_neon invocation. Note that the entire sequence, including + * the provided patchup code, will be omitted from the image if CONFIG_PREEMPT + * is not defined. + * + * As a convenience, in the case where no patchup code is required, the above + * sequence may be abbreviated to + * + * cond_yield_neon <label> + * + * Note that the patchup code does not support assembler directives that change + * the output section, any use of such directives is undefined. + * + * The yield itself consists of the following: + * - Check whether the preempt count is exactly 1, in which case disabling + * preemption once will make the task preemptible. If this is not the case, + * yielding is pointless. + * - Check whether TIF_NEED_RESCHED is set, and if so, disable and re-enable + * kernel mode NEON (which will trigger a reschedule), and branch to the + * yield fixup code. + * + * This macro sequence may clobber all CPU state that is not guaranteed by the + * AAPCS to be preserved across an ordinary function call. + */ + + .macro cond_yield_neon, lbl + if_will_cond_yield_neon + do_cond_yield_neon + endif_yield_neon \lbl + .endm + + .macro if_will_cond_yield_neon +#ifdef CONFIG_PREEMPT + get_thread_info x0 + ldr w1, [x0, #TSK_TI_PREEMPT] + ldr x0, [x0, #TSK_TI_FLAGS] + cmp w1, #PREEMPT_DISABLE_OFFSET + csel x0, x0, xzr, eq + tbnz x0, #TIF_NEED_RESCHED, .Lyield_\@ // needs rescheduling? + /* fall through to endif_yield_neon */ + .subsection 1 +.Lyield_\@ : +#else + .section ".discard.cond_yield_neon", "ax" +#endif + .endm + + .macro do_cond_yield_neon + bl kernel_neon_end + bl kernel_neon_begin + .endm + + .macro endif_yield_neon, lbl + .ifnb \lbl + b \lbl + .else + b .Lyield_out_\@ + .endif + .previous +.Lyield_out_\@ : + .endm + #endif /* __ASM_ASSEMBLER_H */ diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index a311880feb0f..bc51b72fafd4 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -43,13 +43,12 @@ #define ARM64_SVE 22 #define ARM64_UNMAP_KERNEL_AT_EL0 23 #define ARM64_HARDEN_BRANCH_PREDICTOR 24 -#define ARM64_HARDEN_BP_POST_GUEST_EXIT 25 -#define ARM64_HAS_RAS_EXTN 26 -#define ARM64_WORKAROUND_843419 27 -#define ARM64_HAS_CACHE_IDC 28 -#define ARM64_HAS_CACHE_DIC 29 -#define ARM64_HW_DBM 30 +#define ARM64_HAS_RAS_EXTN 25 +#define ARM64_WORKAROUND_843419 26 +#define ARM64_HAS_CACHE_IDC 27 +#define ARM64_HAS_CACHE_DIC 28 +#define ARM64_HW_DBM 29 -#define ARM64_NCAPS 31 +#define ARM64_NCAPS 30 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index d53d40704416..f6648a3e4152 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -71,8 +71,6 @@ extern u32 __kvm_get_mdcr_el2(void); extern u32 __init_stage2_translation(void); -extern void __qcom_hyp_sanitize_btac_predictors(void); - #else /* __ASSEMBLY__ */ .macro get_host_ctxt reg, tmp diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index ab46bc70add6..469de8acd06f 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -75,6 +75,9 @@ struct kvm_arch { /* Interrupt controller */ struct vgic_dist vgic; + + /* Mandated version of PSCI */ + u32 psci_version; }; #define KVM_NR_MEM_OBJS 40 diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h index b6dbbe3123a9..97d0ef12e2ff 100644 --- a/arch/arm64/include/asm/module.h +++ b/arch/arm64/include/asm/module.h @@ -39,7 +39,7 @@ struct mod_arch_specific { u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, Elf64_Sym *sym); -u64 module_emit_adrp_veneer(struct module *mod, void *loc, u64 val); +u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val); #ifdef CONFIG_RANDOMIZE_BASE extern u64 module_alloc_base; diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 7e2c27e63cd8..7c4c8f318ba9 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -230,7 +230,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte) } } -extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); +extern void __sync_icache_dcache(pte_t pteval); /* * PTE bits configuration in the presence of hardware Dirty Bit Management @@ -253,7 +253,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t old_pte; if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte)) - __sync_icache_dcache(pte, addr); + __sync_icache_dcache(pte); /* * If the existing pte is valid, check for potential race with diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 9abbf3044654..04b3256f8e6d 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -206,6 +206,12 @@ struct kvm_arch_memory_slot { #define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) #define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) +/* KVM-as-firmware specific pseudo-registers */ +#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT) +#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ + KVM_REG_ARM_FW | ((r) & 0xffff)) +#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) + /* Device Control API: ARM VGIC */ #define KVM_DEV_ARM_VGIC_GRP_ADDR 0 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 9b55a3f24be7..bf825f38d206 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -55,8 +55,6 @@ arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o arm64-obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o -arm64-obj-$(CONFIG_KVM_INDIRECT_VECTORS)+= bpi.o - obj-y += $(arm64-obj-y) vdso/ probes/ obj-m += $(arm64-obj-m) head-y := head.o diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 78e1b0a70aaf..5bdda651bd05 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -23,6 +23,7 @@ #include <linux/mm.h> #include <linux/dma-mapping.h> #include <linux/kvm_host.h> +#include <linux/preempt.h> #include <linux/suspend.h> #include <asm/cpufeature.h> #include <asm/fixmap.h> @@ -93,6 +94,8 @@ int main(void) DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); BLANK(); + DEFINE(PREEMPT_DISABLE_OFFSET, PREEMPT_DISABLE_OFFSET); + BLANK(); DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); DEFINE(CLOCK_MONOTONIC_RAW, CLOCK_MONOTONIC_RAW); diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S deleted file mode 100644 index bb0b67722e86..000000000000 --- a/arch/arm64/kernel/bpi.S +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Contains CPU specific branch predictor invalidation sequences - * - * Copyright (C) 2018 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#include <linux/linkage.h> -#include <linux/arm-smccc.h> - -#include <asm/alternative.h> -#include <asm/mmu.h> - -.macro hyp_ventry - .align 7 -1: .rept 27 - nop - .endr -/* - * The default sequence is to directly branch to the KVM vectors, - * using the computed offset. This applies for VHE as well as - * !ARM64_HARDEN_EL2_VECTORS. - * - * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced - * with: - * - * stp x0, x1, [sp, #-16]! - * movz x0, #(addr & 0xffff) - * movk x0, #((addr >> 16) & 0xffff), lsl #16 - * movk x0, #((addr >> 32) & 0xffff), lsl #32 - * br x0 - * - * Where addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + 4. - * See kvm_patch_vector_branch for details. - */ -alternative_cb kvm_patch_vector_branch - b __kvm_hyp_vector + (1b - 0b) - nop - nop - nop - nop -alternative_cb_end -.endm - -.macro generate_vectors -0: - .rept 16 - hyp_ventry - .endr - .org 0b + SZ_2K // Safety measure -.endm - - - .text - .pushsection .hyp.text, "ax" - - .align 11 -ENTRY(__bp_harden_hyp_vecs_start) - .rept BP_HARDEN_EL2_SLOTS - generate_vectors - .endr -ENTRY(__bp_harden_hyp_vecs_end) - - .popsection - -ENTRY(__qcom_hyp_sanitize_link_stack_start) - stp x29, x30, [sp, #-16]! - .rept 16 - bl . + 4 - .endr - ldp x29, x30, [sp], #16 -ENTRY(__qcom_hyp_sanitize_link_stack_end) - -.macro smccc_workaround_1 inst - sub sp, sp, #(8 * 4) - stp x2, x3, [sp, #(8 * 0)] - stp x0, x1, [sp, #(8 * 2)] - mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1 - \inst #0 - ldp x2, x3, [sp, #(8 * 0)] - ldp x0, x1, [sp, #(8 * 2)] - add sp, sp, #(8 * 4) -.endm - -ENTRY(__smccc_workaround_1_smc_start) - smccc_workaround_1 smc -ENTRY(__smccc_workaround_1_smc_end) - -ENTRY(__smccc_workaround_1_hvc_start) - smccc_workaround_1 hvc -ENTRY(__smccc_workaround_1_hvc_end) diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 9262ec57f5ab..a900befadfe8 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -86,13 +86,9 @@ atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1); DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); -#ifdef CONFIG_KVM -extern char __qcom_hyp_sanitize_link_stack_start[]; -extern char __qcom_hyp_sanitize_link_stack_end[]; +#ifdef CONFIG_KVM_INDIRECT_VECTORS extern char __smccc_workaround_1_smc_start[]; extern char __smccc_workaround_1_smc_end[]; -extern char __smccc_workaround_1_hvc_start[]; -extern char __smccc_workaround_1_hvc_end[]; static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, const char *hyp_vecs_end) @@ -132,12 +128,8 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn, spin_unlock(&bp_lock); } #else -#define __qcom_hyp_sanitize_link_stack_start NULL -#define __qcom_hyp_sanitize_link_stack_end NULL #define __smccc_workaround_1_smc_start NULL #define __smccc_workaround_1_smc_end NULL -#define __smccc_workaround_1_hvc_start NULL -#define __smccc_workaround_1_hvc_end NULL static void __install_bp_hardening_cb(bp_hardening_cb_t fn, const char *hyp_vecs_start, @@ -145,7 +137,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn, { __this_cpu_write(bp_hardening_data.fn, fn); } -#endif /* CONFIG_KVM */ +#endif /* CONFIG_KVM_INDIRECT_VECTORS */ static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry, bp_hardening_cb_t fn, @@ -178,12 +170,25 @@ static void call_hvc_arch_workaround_1(void) arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); } +static void qcom_link_stack_sanitization(void) +{ + u64 tmp; + + asm volatile("mov %0, x30 \n" + ".rept 16 \n" + "bl . + 4 \n" + ".endr \n" + "mov x30, %0 \n" + : "=&r" (tmp)); +} + static void enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry) { bp_hardening_cb_t cb; void *smccc_start, *smccc_end; struct arm_smccc_res res; + u32 midr = read_cpuid_id(); if (!entry->matches(entry, SCOPE_LOCAL_CPU)) return; @@ -198,8 +203,9 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry) if ((int)res.a0 < 0) return; cb = call_hvc_arch_workaround_1; - smccc_start = __smccc_workaround_1_hvc_start; - smccc_end = __smccc_workaround_1_hvc_end; + /* This is a guest, no need to patch KVM vectors */ + smccc_start = NULL; + smccc_end = NULL; break; case PSCI_CONDUIT_SMC: @@ -216,30 +222,14 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry) return; } + if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) || + ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) + cb = qcom_link_stack_sanitization; + install_bp_hardening_cb(entry, cb, smccc_start, smccc_end); return; } - -static void qcom_link_stack_sanitization(void) -{ - u64 tmp; - - asm volatile("mov %0, x30 \n" - ".rept 16 \n" - "bl . + 4 \n" - ".endr \n" - "mov x30, %0 \n" - : "=&r" (tmp)); -} - -static void -qcom_enable_link_stack_sanitization(const struct arm64_cpu_capabilities *entry) -{ - install_bp_hardening_cb(entry, qcom_link_stack_sanitization, - __qcom_hyp_sanitize_link_stack_start, - __qcom_hyp_sanitize_link_stack_end); -} #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ @@ -324,33 +314,23 @@ static const struct midr_range arm64_bp_harden_smccc_cpus[] = { MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), - {}, -}; - -static const struct midr_range qcom_bp_harden_cpus[] = { MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR), {}, }; -static const struct arm64_cpu_capabilities arm64_bp_harden_list[] = { - { - CAP_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus), - .cpu_enable = enable_smccc_arch_workaround_1, - }, - { - CAP_MIDR_RANGE_LIST(qcom_bp_harden_cpus), - .cpu_enable = qcom_enable_link_stack_sanitization, - }, +#endif + +#ifdef CONFIG_HARDEN_EL2_VECTORS + +static const struct midr_range arm64_harden_el2_vectors[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), {}, }; #endif -#ifndef ERRATA_MIDR_ALL_VERSIONS -#define ERRATA_MIDR_ALL_VERSIONS(x) MIDR_ALL_VERSIONS(x) -#endif - const struct arm64_cpu_capabilities arm64_errata[] = { #if defined(CONFIG_ARM64_ERRATUM_826319) || \ defined(CONFIG_ARM64_ERRATUM_827319) || \ @@ -495,25 +475,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = { { .capability = ARM64_HARDEN_BRANCH_PREDICTOR, .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, - .matches = multi_entry_cap_matches, - .cpu_enable = multi_entry_cap_cpu_enable, - .match_list = arm64_bp_harden_list, - }, - { - .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT, - ERRATA_MIDR_RANGE_LIST(qcom_bp_harden_cpus), + .cpu_enable = enable_smccc_arch_workaround_1, + ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus), }, #endif #ifdef CONFIG_HARDEN_EL2_VECTORS { - .desc = "Cortex-A57 EL2 vector hardening", - .capability = ARM64_HARDEN_EL2_VECTORS, - ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), - }, - { - .desc = "Cortex-A72 EL2 vector hardening", + .desc = "EL2 vector hardening", .capability = ARM64_HARDEN_EL2_VECTORS, - ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors), }, #endif { diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 536d572e5596..9d1b06d67c53 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -868,6 +868,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, static const struct midr_range kpti_safe_list[] = { MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), + { /* sentinel */ } }; char const *str = "command line option"; diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c index fa3637284a3d..f0690c2ca3e0 100644 --- a/arch/arm64/kernel/module-plts.c +++ b/arch/arm64/kernel/module-plts.c @@ -43,7 +43,7 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, } #ifdef CONFIG_ARM64_ERRATUM_843419 -u64 module_emit_adrp_veneer(struct module *mod, void *loc, u64 val) +u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val) { struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core : &mod->arch.init; diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 719fde8dcc19..155fd91e78f4 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -215,7 +215,7 @@ static int reloc_insn_adrp(struct module *mod, __le32 *place, u64 val) insn &= ~BIT(31); } else { /* out of range for ADR -> emit a veneer */ - val = module_emit_adrp_veneer(mod, place, val & ~0xfff); + val = module_emit_veneer_for_adrp(mod, place, val & ~0xfff); if (!val) return -ENOEXEC; insn = aarch64_insn_gen_branch_imm((u64)place, val, diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 71d99af24ef2..7ff81fed46e1 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -25,6 +25,7 @@ #include <linux/sched/signal.h> #include <linux/sched/task_stack.h> #include <linux/mm.h> +#include <linux/nospec.h> #include <linux/smp.h> #include <linux/ptrace.h> #include <linux/user.h> @@ -249,15 +250,20 @@ static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, switch (note_type) { case NT_ARM_HW_BREAK: - if (idx < ARM_MAX_BRP) - bp = tsk->thread.debug.hbp_break[idx]; + if (idx >= ARM_MAX_BRP) + goto out; + idx = array_index_nospec(idx, ARM_MAX_BRP); + bp = tsk->thread.debug.hbp_break[idx]; break; case NT_ARM_HW_WATCH: - if (idx < ARM_MAX_WRP) - bp = tsk->thread.debug.hbp_watch[idx]; + if (idx >= ARM_MAX_WRP) + goto out; + idx = array_index_nospec(idx, ARM_MAX_WRP); + bp = tsk->thread.debug.hbp_watch[idx]; break; } +out: return bp; } @@ -1458,9 +1464,7 @@ static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, { int ret; u32 kdata; - mm_segment_t old_fs = get_fs(); - set_fs(KERNEL_DS); /* Watchpoint */ if (num < 0) { ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); @@ -1471,7 +1475,6 @@ static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, } else { ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); } - set_fs(old_fs); if (!ret) ret = put_user(kdata, data); @@ -1484,7 +1487,6 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, { int ret; u32 kdata = 0; - mm_segment_t old_fs = get_fs(); if (num == 0) return 0; @@ -1493,12 +1495,10 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, if (ret) return ret; - set_fs(KERNEL_DS); if (num < 0) ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); else ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); - set_fs(old_fs); return ret; } diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index ba964da31a25..8bbdc17e49df 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -277,7 +277,8 @@ void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size) * If we were single stepping, we want to get the step exception after * we return from the trap. */ - user_fastforward_single_step(current); + if (user_mode(regs)) + user_fastforward_single_step(current); } static LIST_HEAD(undef_hook); @@ -366,7 +367,7 @@ void force_signal_inject(int signal, int code, unsigned long address) } /* Force signals we don't understand to SIGKILL */ - if (WARN_ON(signal != SIGKILL || + if (WARN_ON(signal != SIGKILL && siginfo_layout(signal, code) != SIL_FAULT)) { signal = SIGKILL; } diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 959e50d2588c..56a0260ceb11 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -25,6 +25,7 @@ #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/fs.h> +#include <kvm/arm_psci.h> #include <asm/cputype.h> #include <linux/uaccess.h> #include <asm/kvm.h> @@ -205,7 +206,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) { return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu) - + NUM_TIMER_REGS; + + kvm_arm_get_fw_num_regs(vcpu) + NUM_TIMER_REGS; } /** @@ -225,6 +226,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) uindices++; } + ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices); + if (ret) + return ret; + uindices += kvm_arm_get_fw_num_regs(vcpu); + ret = copy_timer_indices(vcpu, uindices); if (ret) return ret; @@ -243,6 +249,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) return get_core_reg(vcpu, reg); + if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW) + return kvm_arm_get_fw_reg(vcpu, reg); + if (is_timer_reg(reg->id)) return get_timer_reg(vcpu, reg); @@ -259,6 +268,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) return set_core_reg(vcpu, reg); + if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW) + return kvm_arm_set_fw_reg(vcpu, reg); + if (is_timer_reg(reg->id)) return set_timer_reg(vcpu, reg); diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index 1f458f7c3b44..e41a161d313a 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -209,15 +209,3 @@ alternative_endif eret ENDPROC(__fpsimd_guest_restore) - -ENTRY(__qcom_hyp_sanitize_btac_predictors) - /** - * Call SMC64 with Silicon provider serviceID 23<<8 (0xc2001700) - * 0xC2000000-0xC200FFFF: assigned to SiP Service Calls - * b15-b0: contains SiP functionID - */ - movz x0, #0x1700 - movk x0, #0xc200, lsl #16 - smc #0 - ret -ENDPROC(__qcom_hyp_sanitize_btac_predictors) diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 87dfecce82b1..bffece27b5c1 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 - ARM Ltd + * Copyright (C) 2015-2018 - ARM Ltd * Author: Marc Zyngier <marc.zyngier@arm.com> * * This program is free software; you can redistribute it and/or modify @@ -24,6 +24,7 @@ #include <asm/kvm_arm.h> #include <asm/kvm_asm.h> #include <asm/kvm_mmu.h> +#include <asm/mmu.h> .text .pushsection .hyp.text, "ax" @@ -237,3 +238,64 @@ ENTRY(__kvm_hyp_vector) invalid_vect el1_fiq_invalid // FIQ 32-bit EL1 valid_vect el1_error // Error 32-bit EL1 ENDPROC(__kvm_hyp_vector) + +#ifdef CONFIG_KVM_INDIRECT_VECTORS +.macro hyp_ventry + .align 7 +1: .rept 27 + nop + .endr +/* + * The default sequence is to directly branch to the KVM vectors, + * using the computed offset. This applies for VHE as well as + * !ARM64_HARDEN_EL2_VECTORS. + * + * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced + * with: + * + * stp x0, x1, [sp, #-16]! + * movz x0, #(addr & 0xffff) + * movk x0, #((addr >> 16) & 0xffff), lsl #16 + * movk x0, #((addr >> 32) & 0xffff), lsl #32 + * br x0 + * + * Where addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + 4. + * See kvm_patch_vector_branch for details. + */ +alternative_cb kvm_patch_vector_branch + b __kvm_hyp_vector + (1b - 0b) + nop + nop + nop + nop +alternative_cb_end +.endm + +.macro generate_vectors +0: + .rept 16 + hyp_ventry + .endr + .org 0b + SZ_2K // Safety measure +.endm + + .align 11 +ENTRY(__bp_harden_hyp_vecs_start) + .rept BP_HARDEN_EL2_SLOTS + generate_vectors + .endr +ENTRY(__bp_harden_hyp_vecs_end) + + .popsection + +ENTRY(__smccc_workaround_1_smc_start) + sub sp, sp, #(8 * 4) + stp x2, x3, [sp, #(8 * 0)] + stp x0, x1, [sp, #(8 * 2)] + mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1 + smc #0 + ldp x2, x3, [sp, #(8 * 0)] + ldp x0, x1, [sp, #(8 * 2)] + add sp, sp, #(8 * 4) +ENTRY(__smccc_workaround_1_smc_end) +#endif diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 07b572173265..d9645236e474 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -472,16 +472,6 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) /* And we're baaack! */ } while (fixup_guest_exit(vcpu, &exit_code)); - if (cpus_have_const_cap(ARM64_HARDEN_BP_POST_GUEST_EXIT)) { - u32 midr = read_cpuid_id(); - - /* Apply BTAC predictors mitigation to all Falkor chips */ - if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) || - ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) { - __qcom_hyp_sanitize_btac_predictors(); - } - } - fp_enabled = __fpsimd_enabled_nvhe(); __sysreg_save_state_nvhe(guest_ctxt); diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 806b0b126a64..6e3b969391fd 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -996,14 +996,12 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz) if (id == SYS_ID_AA64PFR0_EL1) { if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT)) - pr_err_once("kvm [%i]: SVE unsupported for guests, suppressing\n", - task_pid_nr(current)); + kvm_debug("SVE unsupported for guests, suppressing\n"); val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); } else if (id == SYS_ID_AA64MMFR1_EL1) { if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT)) - pr_err_once("kvm [%i]: LORegions unsupported for guests, suppressing\n", - task_pid_nr(current)); + kvm_debug("LORegions unsupported for guests, suppressing\n"); val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT); } diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 0ead8a1d1679..137710f4dac3 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -19,5 +19,9 @@ CFLAGS_atomic_ll_sc.o := -fcall-used-x0 -ffixed-x1 -ffixed-x2 \ -fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15 \ -fcall-saved-x18 -fomit-frame-pointer CFLAGS_REMOVE_atomic_ll_sc.o := -pg +GCOV_PROFILE_atomic_ll_sc.o := n +KASAN_SANITIZE_atomic_ll_sc.o := n +KCOV_INSTRUMENT_atomic_ll_sc.o := n +UBSAN_SANITIZE_atomic_ll_sc.o := n lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index e36ed5087b5c..1059884f9a6f 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -58,7 +58,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, flush_ptrace_access(vma, page, uaddr, dst, len); } -void __sync_icache_dcache(pte_t pte, unsigned long addr) +void __sync_icache_dcache(pte_t pte) { struct page *page = pte_page(pte); diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index dabfc1ecda3d..12145874c02b 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -204,7 +204,7 @@ void __init kasan_init(void) clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); kasan_map_populate(kimg_shadow_start, kimg_shadow_end, - pfn_to_nid(virt_to_pfn(lm_alias(_text)))); + early_pfn_to_nid(virt_to_pfn(lm_alias(_text)))); kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, (void *)mod_shadow_start); @@ -224,7 +224,7 @@ void __init kasan_init(void) kasan_map_populate((unsigned long)kasan_mem_to_shadow(start), (unsigned long)kasan_mem_to_shadow(end), - pfn_to_nid(virt_to_pfn(start))); + early_pfn_to_nid(virt_to_pfn(start))); } /* diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h index 9e8621d94ee9..e17262ad125e 100644 --- a/arch/hexagon/include/asm/io.h +++ b/arch/hexagon/include/asm/io.h @@ -216,6 +216,12 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, memcpy((void *) dst, src, count); } +static inline void memset_io(volatile void __iomem *addr, int value, + size_t size) +{ + memset((void __force *)addr, value, size); +} + #define PCI_IO_ADDR (volatile void __iomem *) /* diff --git a/arch/hexagon/lib/checksum.c b/arch/hexagon/lib/checksum.c index 617506d1a559..7cd0a2259269 100644 --- a/arch/hexagon/lib/checksum.c +++ b/arch/hexagon/lib/checksum.c @@ -199,3 +199,4 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) memcpy(dst, src, len); return csum_partial(dst, len, sum); } +EXPORT_SYMBOL(csum_partial_copy_nocheck); diff --git a/arch/mips/boot/dts/img/boston.dts b/arch/mips/boot/dts/img/boston.dts index 1bd105428f61..65af3f6ba81c 100644 --- a/arch/mips/boot/dts/img/boston.dts +++ b/arch/mips/boot/dts/img/boston.dts @@ -51,6 +51,8 @@ ranges = <0x02000000 0 0x40000000 0x40000000 0 0x40000000>; + bus-range = <0x00 0xff>; + interrupt-map-mask = <0 0 0 7>; interrupt-map = <0 0 0 1 &pci0_intc 1>, <0 0 0 2 &pci0_intc 2>, @@ -79,6 +81,8 @@ ranges = <0x02000000 0 0x20000000 0x20000000 0 0x20000000>; + bus-range = <0x00 0xff>; + interrupt-map-mask = <0 0 0 7>; interrupt-map = <0 0 0 1 &pci1_intc 1>, <0 0 0 2 &pci1_intc 2>, @@ -107,6 +111,8 @@ ranges = <0x02000000 0 0x16000000 0x16000000 0 0x100000>; + bus-range = <0x00 0xff>; + interrupt-map-mask = <0 0 0 7>; interrupt-map = <0 0 0 1 &pci2_intc 1>, <0 0 0 2 &pci2_intc 2>, diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 0cbf3af37eca..a7d0b836f2f7 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -307,7 +307,7 @@ static inline void iounmap(const volatile void __iomem *addr) #if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_LOONGSON3_ENHANCEMENT) #define war_io_reorder_wmb() wmb() #else -#define war_io_reorder_wmb() do { } while (0) +#define war_io_reorder_wmb() barrier() #endif #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \ @@ -377,6 +377,8 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ BUG(); \ } \ \ + /* prevent prefetching of coherent DMA data prematurely */ \ + rmb(); \ return pfx##ioswab##bwlq(__mem, __val); \ } diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index b71306947290..06629011a434 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -654,6 +654,13 @@ __clear_user(void __user *addr, __kernel_size_t size) { __kernel_size_t res; +#ifdef CONFIG_CPU_MICROMIPS +/* micromips memset / bzero also clobbers t7 & t8 */ +#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31" +#else +#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31" +#endif /* CONFIG_CPU_MICROMIPS */ + if (eva_kernel_access()) { __asm__ __volatile__( "move\t$4, %1\n\t" @@ -663,7 +670,7 @@ __clear_user(void __user *addr, __kernel_size_t size) "move\t%0, $6" : "=r" (res) : "r" (addr), "r" (size) - : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); + : bzero_clobbers); } else { might_fault(); __asm__ __volatile__( @@ -674,7 +681,7 @@ __clear_user(void __user *addr, __kernel_size_t size) "move\t%0, $6" : "=r" (res) : "r" (addr), "r" (size) - : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); + : bzero_clobbers); } return res; diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S index a1456664d6c2..f7327979a8f8 100644 --- a/arch/mips/lib/memset.S +++ b/arch/mips/lib/memset.S @@ -219,7 +219,7 @@ 1: PTR_ADDIU a0, 1 /* fill bytewise */ R10KCBARRIER(0(ra)) bne t1, a0, 1b - sb a1, -1(a0) + EX(sb, a1, -1(a0), .Lsmall_fixup\@) 2: jr ra /* done */ move a2, zero @@ -252,13 +252,18 @@ PTR_L t0, TI_TASK($28) andi a2, STORMASK LONG_L t0, THREAD_BUADDR(t0) - LONG_ADDU a2, t1 + LONG_ADDU a2, a0 jr ra LONG_SUBU a2, t0 .Llast_fixup\@: jr ra - andi v1, a2, STORMASK + nop + +.Lsmall_fixup\@: + PTR_SUBU a2, t1, a0 + jr ra + PTR_ADDIU a2, 1 .endm diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c index 1e4658eee13f..5a4875cac1ec 100644 --- a/arch/mips/mm/gup.c +++ b/arch/mips/mm/gup.c @@ -178,6 +178,8 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, /* * Like get_user_pages_fast() except its IRQ-safe in that it won't fall * back to the regular GUP. + * Note a difference with get_user_pages_fast: this always returns the + * number of pages pinned, 0 if no pages were pinned. */ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) diff --git a/arch/openrisc/include/uapi/asm/unistd.h b/arch/openrisc/include/uapi/asm/unistd.h index 9a3ee389631e..11c5a58ab333 100644 --- a/arch/openrisc/include/uapi/asm/unistd.h +++ b/arch/openrisc/include/uapi/asm/unistd.h @@ -17,8 +17,6 @@ * (at your option) any later version. */ -#define __ARCH_HAVE_MMU - #define sys_mmap2 sys_mmap_pgoff #define __ARCH_WANT_RENAMEAT diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 7e0bb9836b58..fc5a574c3482 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -338,6 +338,7 @@ source "mm/Kconfig" config COMPAT def_bool y depends on 64BIT + select COMPAT_BINFMT_ELF if BINFMT_ELF config SYSVIPC_COMPAT def_bool y diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index e2364ff59180..34ac503e28ad 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -123,6 +123,9 @@ INSTALL_TARGETS = zinstall install PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS) +# Default kernel to build +all: bzImage + zImage: vmlinuz Image: vmlinux diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h index c22db5323244..57b8b2a2fd4e 100644 --- a/arch/parisc/include/asm/compat.h +++ b/arch/parisc/include/asm/compat.h @@ -193,6 +193,12 @@ struct compat_shmid64_ds { }; /* + * The type of struct elf_prstatus.pr_reg in compatible core dumps. + */ +#define COMPAT_ELF_NGREG 80 +typedef compat_ulong_t compat_elf_gregset_t[COMPAT_ELF_NGREG]; + +/* * A pointer passed in from user mode. This should not * be used for syscall parameters, just declare them * as pointers because the syscall entry code will have diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h index 382d75a2ee4f..f019d3ec0c1c 100644 --- a/arch/parisc/include/asm/elf.h +++ b/arch/parisc/include/asm/elf.h @@ -6,7 +6,7 @@ * ELF register definitions.. */ -#include <asm/ptrace.h> +#include <linux/types.h> #define EM_PARISC 15 @@ -169,16 +169,12 @@ typedef struct elf64_fdesc { __u64 gp; } Elf64_Fdesc; -#ifdef __KERNEL__ - #ifdef CONFIG_64BIT #define Elf_Fdesc Elf64_Fdesc #else #define Elf_Fdesc Elf32_Fdesc #endif /*CONFIG_64BIT*/ -#endif /*__KERNEL__*/ - /* Legal values for p_type field of Elf32_Phdr/Elf64_Phdr. */ #define PT_HP_TLS (PT_LOOS + 0x0) @@ -213,44 +209,44 @@ typedef struct elf64_fdesc { #define PF_HP_SBP 0x08000000 /* + * This yields a string that ld.so will use to load implementation + * specific libraries for optimization. This is more specific in + * intent than poking at uname or /proc/cpuinfo. + */ + +#define ELF_PLATFORM ("PARISC") + +/* * The following definitions are those for 32-bit ELF binaries on a 32-bit * kernel and for 64-bit binaries on a 64-bit kernel. To run 32-bit binaries - * on a 64-bit kernel, arch/parisc/kernel/binfmt_elf32.c defines these - * macros appropriately and then #includes binfmt_elf.c, which then includes - * this file. + * on a 64-bit kernel, fs/compat_binfmt_elf.c defines ELF_CLASS and then + * #includes binfmt_elf.c, which then includes this file. */ #ifndef ELF_CLASS -/* - * This is used to ensure we don't load something for the wrong architecture. - * - * Note that this header file is used by default in fs/binfmt_elf.c. So - * the following macros are for the default case. However, for the 64 - * bit kernel we also support 32 bit parisc binaries. To do that - * arch/parisc/kernel/binfmt_elf32.c defines its own set of these - * macros, and then it includes fs/binfmt_elf.c to provide an alternate - * elf binary handler for 32 bit binaries (on the 64 bit kernel). - */ #ifdef CONFIG_64BIT -#define ELF_CLASS ELFCLASS64 +#define ELF_CLASS ELFCLASS64 #else #define ELF_CLASS ELFCLASS32 #endif typedef unsigned long elf_greg_t; -/* - * This yields a string that ld.so will use to load implementation - * specific libraries for optimization. This is more specific in - * intent than poking at uname or /proc/cpuinfo. - */ - -#define ELF_PLATFORM ("PARISC\0") - #define SET_PERSONALITY(ex) \ +({ \ set_personality((current->personality & ~PER_MASK) | PER_LINUX); \ current->thread.map_base = DEFAULT_MAP_BASE; \ - current->thread.task_size = DEFAULT_TASK_SIZE \ + current->thread.task_size = DEFAULT_TASK_SIZE; \ + }) + +#endif /* ! ELF_CLASS */ + +#define COMPAT_SET_PERSONALITY(ex) \ +({ \ + set_thread_flag(TIF_32BIT); \ + current->thread.map_base = DEFAULT_MAP_BASE32; \ + current->thread.task_size = DEFAULT_TASK_SIZE32; \ + }) /* * Fill in general registers in a core dump. This saves pretty @@ -277,10 +273,12 @@ typedef unsigned long elf_greg_t; #define ELF_CORE_COPY_REGS(dst, pt) \ memset(dst, 0, sizeof(dst)); /* don't leak any "random" bits */ \ - memcpy(dst + 0, pt->gr, 32 * sizeof(elf_greg_t)); \ - memcpy(dst + 32, pt->sr, 8 * sizeof(elf_greg_t)); \ - memcpy(dst + 40, pt->iaoq, 2 * sizeof(elf_greg_t)); \ - memcpy(dst + 42, pt->iasq, 2 * sizeof(elf_greg_t)); \ + { int i; \ + for (i = 0; i < 32; i++) dst[i] = pt->gr[i]; \ + for (i = 0; i < 8; i++) dst[32 + i] = pt->sr[i]; \ + } \ + dst[40] = pt->iaoq[0]; dst[41] = pt->iaoq[1]; \ + dst[42] = pt->iasq[0]; dst[43] = pt->iasq[1]; \ dst[44] = pt->sar; dst[45] = pt->iir; \ dst[46] = pt->isr; dst[47] = pt->ior; \ dst[48] = mfctl(22); dst[49] = mfctl(0); \ @@ -292,7 +290,7 @@ typedef unsigned long elf_greg_t; dst[60] = mfctl(12); dst[61] = mfctl(13); \ dst[62] = mfctl(10); dst[63] = mfctl(15); -#endif /* ! ELF_CLASS */ +#define CORE_DUMP_USE_REGSET #define ELF_NGREG 80 /* We only need 64 at present, but leave space for expansion. */ @@ -310,7 +308,10 @@ extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *); struct pt_regs; /* forward declaration... */ -#define elf_check_arch(x) ((x)->e_machine == EM_PARISC && (x)->e_ident[EI_CLASS] == ELF_CLASS) +#define elf_check_arch(x) \ + ((x)->e_machine == EM_PARISC && (x)->e_ident[EI_CLASS] == ELF_CLASS) +#define compat_elf_check_arch(x) \ + ((x)->e_machine == EM_PARISC && (x)->e_ident[EI_CLASS] == ELFCLASS32) /* * These are used to set parameters in the core dumps. diff --git a/arch/parisc/include/uapi/asm/siginfo.h b/arch/parisc/include/uapi/asm/siginfo.h index be40331f757d..4a1062e05aaf 100644 --- a/arch/parisc/include/uapi/asm/siginfo.h +++ b/arch/parisc/include/uapi/asm/siginfo.h @@ -8,11 +8,4 @@ #include <asm-generic/siginfo.h> -/* - * SIGFPE si_codes - */ -#ifdef __KERNEL__ -#define FPE_FIXME 0 /* Broken dup of SI_USER */ -#endif /* __KERNEL__ */ - #endif diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile index eafd06ab59ef..e5de34d00b1a 100644 --- a/arch/parisc/kernel/Makefile +++ b/arch/parisc/kernel/Makefile @@ -23,7 +23,7 @@ obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_PA11) += pci-dma.o obj-$(CONFIG_PCI) += pci.o obj-$(CONFIG_MODULES) += module.o -obj-$(CONFIG_64BIT) += binfmt_elf32.o sys_parisc32.o signal32.o +obj-$(CONFIG_64BIT) += sys_parisc32.o signal32.o obj-$(CONFIG_STACKTRACE)+= stacktrace.o obj-$(CONFIG_AUDIT) += audit.o obj64-$(CONFIG_AUDIT) += compat_audit.o diff --git a/arch/parisc/kernel/binfmt_elf32.c b/arch/parisc/kernel/binfmt_elf32.c deleted file mode 100644 index 20dfa081ed0b..000000000000 --- a/arch/parisc/kernel/binfmt_elf32.c +++ /dev/null @@ -1,98 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Support for 32-bit Linux/Parisc ELF binaries on 64 bit kernels - * - * Copyright (C) 2000 John Marvin - * Copyright (C) 2000 Hewlett Packard Co. - * - * Heavily inspired from various other efforts to do the same thing - * (ia64,sparc64/mips64) - */ - -/* Make sure include/asm-parisc/elf.h does the right thing */ - -#define ELF_CLASS ELFCLASS32 - -#define ELF_CORE_COPY_REGS(dst, pt) \ - memset(dst, 0, sizeof(dst)); /* don't leak any "random" bits */ \ - { int i; \ - for (i = 0; i < 32; i++) dst[i] = (elf_greg_t) pt->gr[i]; \ - for (i = 0; i < 8; i++) dst[32 + i] = (elf_greg_t) pt->sr[i]; \ - } \ - dst[40] = (elf_greg_t) pt->iaoq[0]; dst[41] = (elf_greg_t) pt->iaoq[1]; \ - dst[42] = (elf_greg_t) pt->iasq[0]; dst[43] = (elf_greg_t) pt->iasq[1]; \ - dst[44] = (elf_greg_t) pt->sar; dst[45] = (elf_greg_t) pt->iir; \ - dst[46] = (elf_greg_t) pt->isr; dst[47] = (elf_greg_t) pt->ior; \ - dst[48] = (elf_greg_t) mfctl(22); dst[49] = (elf_greg_t) mfctl(0); \ - dst[50] = (elf_greg_t) mfctl(24); dst[51] = (elf_greg_t) mfctl(25); \ - dst[52] = (elf_greg_t) mfctl(26); dst[53] = (elf_greg_t) mfctl(27); \ - dst[54] = (elf_greg_t) mfctl(28); dst[55] = (elf_greg_t) mfctl(29); \ - dst[56] = (elf_greg_t) mfctl(30); dst[57] = (elf_greg_t) mfctl(31); \ - dst[58] = (elf_greg_t) mfctl( 8); dst[59] = (elf_greg_t) mfctl( 9); \ - dst[60] = (elf_greg_t) mfctl(12); dst[61] = (elf_greg_t) mfctl(13); \ - dst[62] = (elf_greg_t) mfctl(10); dst[63] = (elf_greg_t) mfctl(15); - - -typedef unsigned int elf_greg_t; - -#include <linux/spinlock.h> -#include <asm/processor.h> -#include <linux/module.h> -#include <linux/elfcore.h> -#include <linux/compat.h> /* struct compat_timeval */ - -#define elf_prstatus elf_prstatus32 -struct elf_prstatus32 -{ - struct elf_siginfo pr_info; /* Info associated with signal */ - short pr_cursig; /* Current signal */ - unsigned int pr_sigpend; /* Set of pending signals */ - unsigned int pr_sighold; /* Set of held signals */ - pid_t pr_pid; - pid_t pr_ppid; - pid_t pr_pgrp; - pid_t pr_sid; - struct compat_timeval pr_utime; /* User time */ - struct compat_timeval pr_stime; /* System time */ - struct compat_timeval pr_cutime; /* Cumulative user time */ - struct compat_timeval pr_cstime; /* Cumulative system time */ - elf_gregset_t pr_reg; /* GP registers */ - int pr_fpvalid; /* True if math co-processor being used. */ -}; - -#define elf_prpsinfo elf_prpsinfo32 -struct elf_prpsinfo32 -{ - char pr_state; /* numeric process state */ - char pr_sname; /* char for pr_state */ - char pr_zomb; /* zombie */ - char pr_nice; /* nice val */ - unsigned int pr_flag; /* flags */ - u16 pr_uid; - u16 pr_gid; - pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; - /* Lots missing */ - char pr_fname[16]; /* filename of executable */ - char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ -}; - -#define init_elf_binfmt init_elf32_binfmt - -#define ELF_PLATFORM ("PARISC32\0") - -/* - * We should probably use this macro to set a flag somewhere to indicate - * this is a 32 on 64 process. We could use PER_LINUX_32BIT, or we - * could set a processor dependent flag in the thread_struct. - */ - -#undef SET_PERSONALITY -#define SET_PERSONALITY(ex) \ - set_thread_flag(TIF_32BIT); \ - current->thread.map_base = DEFAULT_MAP_BASE32; \ - current->thread.task_size = DEFAULT_TASK_SIZE32 \ - -#undef ns_to_timeval -#define ns_to_timeval ns_to_compat_timeval - -#include "../../../fs/binfmt_elf.c" diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index a99da95fc9fd..bddd2acebdcc 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -254,7 +254,7 @@ parisc_cache_init(void) } } -void disable_sr_hashing(void) +void __init disable_sr_hashing(void) { int srhash_type, retval; unsigned long space_bits; diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index 3b8507f71050..ee5a78a151a6 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c @@ -448,7 +448,8 @@ static int match_by_id(struct device * dev, void * data) * Checks all the children of @parent for a matching @id. If none * found, it allocates a new device and returns it. */ -static struct parisc_device * alloc_tree_node(struct device *parent, char id) +static struct parisc_device * __init alloc_tree_node( + struct device *parent, char id) { struct match_id_data d = { .id = id, @@ -825,8 +826,8 @@ static void walk_lower_bus(struct parisc_device *dev) * devices which are not physically connected (such as extra serial & * keyboard ports). This problem is not yet solved. */ -static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high, - struct device *parent) +static void __init walk_native_bus(unsigned long io_io_low, + unsigned long io_io_high, struct device *parent) { int i, devices_found = 0; unsigned long hpa = io_io_low; diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index 67b0f7532e83..22e6374ece44 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S @@ -38,9 +38,10 @@ #include <asm/cache.h> #include <asm/ldcw.h> #include <linux/linkage.h> +#include <linux/init.h> - .text - .align 128 + .section .text.hot + .align 16 ENTRY_CFI(flush_tlb_all_local) .proc @@ -328,8 +329,6 @@ fdsync: .procend ENDPROC_CFI(flush_data_cache_local) - .align 16 - /* Macros to serialize TLB purge operations on SMP. */ .macro tlb_lock la,flags,tmp @@ -1216,6 +1215,8 @@ ENTRY_CFI(flush_kernel_icache_range_asm) .procend ENDPROC_CFI(flush_kernel_icache_range_asm) + __INIT + /* align should cover use of rfi in disable_sr_hashing_asm and * srdis_done. */ diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c index 13ee3569959a..ae684ac6efb6 100644 --- a/arch/parisc/kernel/pci.c +++ b/arch/parisc/kernel/pci.c @@ -174,7 +174,7 @@ void pcibios_set_master(struct pci_dev *dev) * pcibios_init_bridge() initializes cache line and default latency * for pci controllers and pci-pci bridges */ -void __init pcibios_init_bridge(struct pci_dev *dev) +void __ref pcibios_init_bridge(struct pci_dev *dev) { unsigned short bridge_ctl, bridge_ctl_new; diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index bbe46571ff96..b931745815e0 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -112,14 +112,6 @@ void machine_restart(char *cmd) } -void machine_halt(void) -{ - /* - ** The LED/ChassisCodes are updated by the led_halt() - ** function, called by the reboot notifier chain. - */ -} - void (*chassis_power_off)(void); /* @@ -158,6 +150,11 @@ void machine_power_off(void) void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); +void machine_halt(void) +{ + machine_power_off(); +} + void flush_thread(void) { /* Only needs to handle fpu stuff or perf monitors. diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index c3830400ca28..a1e772f909cb 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -205,7 +205,7 @@ static int __init rtc_init(void) device_initcall(rtc_init); #endif -void read_persistent_clock(struct timespec *ts) +void read_persistent_clock64(struct timespec64 *ts) { static struct pdc_tod tod_data; if (pdc_tod_read(&tod_data) == 0) { diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index c919e6c0a687..71d31274d782 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -627,9 +627,10 @@ void notrace handle_interruption(int code, struct pt_regs *regs) on condition */ if(user_mode(regs)){ si.si_signo = SIGFPE; - /* Set to zero, and let the userspace app figure it out from - the insn pointed to by si_addr */ - si.si_code = FPE_FIXME; + /* Let userspace app figure it out from the insn pointed + * to by si_addr. + */ + si.si_code = FPE_CONDTRAP; si.si_addr = (void __user *) regs->iaoq[0]; force_sig_info(SIGFPE, &si, current); return; @@ -836,6 +837,17 @@ void __init initialize_ivt(const void *iva) if (pdc_instr(&instr) == PDC_OK) ivap[0] = instr; + /* + * Rules for the checksum of the HPMC handler: + * 1. The IVA does not point to PDC/PDH space (ie: the OS has installed + * its own IVA). + * 2. The word at IVA + 32 is nonzero. + * 3. If Length (IVA + 60) is not zero, then Length (IVA + 60) and + * Address (IVA + 56) are word-aligned. + * 4. The checksum of the 8 words starting at IVA + 32 plus the sum of + * the Length/4 words starting at Address is zero. + */ + /* Compute Checksum for HPMC handler */ length = os_hpmc_size; ivap[7] = length; diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index cab32ee824d2..2607d2d33405 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -516,7 +516,7 @@ static void __init map_pages(unsigned long start_vaddr, } } -void free_initmem(void) +void __ref free_initmem(void) { unsigned long init_begin = (unsigned long)__init_begin; unsigned long init_end = (unsigned long)__init_end; diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 73ce5dd07642..c32a181a7cbb 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -552,6 +552,9 @@ config KEXEC_FILE for kernel and initramfs as opposed to a list of segments as is the case for the older kexec call. +config ARCH_HAS_KEXEC_PURGATORY + def_bool KEXEC_FILE + config RELOCATABLE bool "Build a relocatable kernel" depends on PPC64 || (FLATMEM && (44x || FSL_BOOKE)) diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 931dda8be87c..66fcab13c8b4 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -545,18 +545,37 @@ enum { #ifdef CONFIG_PPC_BOOK3E #define CPU_FTRS_ALWAYS (CPU_FTRS_E6500 & CPU_FTRS_E5500) #else + +#ifdef CONFIG_PPC_DT_CPU_FTRS +#define CPU_FTRS_DT_CPU_BASE \ + (CPU_FTR_LWSYNC | \ + CPU_FTR_FPU_UNAVAILABLE | \ + CPU_FTR_NODSISRALIGN | \ + CPU_FTR_NOEXECUTE | \ + CPU_FTR_COHERENT_ICACHE | \ + CPU_FTR_STCX_CHECKS_ADDRESS | \ + CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ + CPU_FTR_DAWR | \ + CPU_FTR_ARCH_206 | \ + CPU_FTR_ARCH_207S) +#else +#define CPU_FTRS_DT_CPU_BASE (~0ul) +#endif + #ifdef CONFIG_CPU_LITTLE_ENDIAN #define CPU_FTRS_ALWAYS \ (CPU_FTRS_POSSIBLE & ~CPU_FTR_HVMODE & CPU_FTRS_POWER7 & \ CPU_FTRS_POWER8E & CPU_FTRS_POWER8 & CPU_FTRS_POWER8_DD1 & \ - CPU_FTRS_POWER9 & CPU_FTRS_POWER9_DD1 & CPU_FTRS_POWER9_DD2_1) + CPU_FTRS_POWER9 & CPU_FTRS_POWER9_DD1 & CPU_FTRS_POWER9_DD2_1 & \ + CPU_FTRS_DT_CPU_BASE) #else #define CPU_FTRS_ALWAYS \ (CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & \ CPU_FTRS_POWER6 & CPU_FTRS_POWER7 & CPU_FTRS_CELL & \ CPU_FTRS_PA6T & CPU_FTRS_POWER8 & CPU_FTRS_POWER8E & \ CPU_FTRS_POWER8_DD1 & ~CPU_FTR_HVMODE & CPU_FTRS_POSSIBLE & \ - CPU_FTRS_POWER9 & CPU_FTRS_POWER9_DD1 & CPU_FTRS_POWER9_DD2_1) + CPU_FTRS_POWER9 & CPU_FTRS_POWER9_DD1 & CPU_FTRS_POWER9_DD2_1 & \ + CPU_FTRS_DT_CPU_BASE) #endif /* CONFIG_CPU_LITTLE_ENDIAN */ #endif #else diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index d8b1e8e7e035..4a585cba1787 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -95,7 +95,7 @@ static inline bool kdump_in_progress(void) } #ifdef CONFIG_KEXEC_FILE -extern struct kexec_file_ops kexec_elf64_ops; +extern const struct kexec_file_ops kexec_elf64_ops; #ifdef CONFIG_IMA_KEXEC #define ARCH_HAS_KIMAGE_ARCH diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h index 7e28442827f1..4f6573934792 100644 --- a/arch/powerpc/include/asm/module.h +++ b/arch/powerpc/include/asm/module.h @@ -15,9 +15,19 @@ #ifdef CC_USING_MPROFILE_KERNEL -#define MODULE_ARCH_VERMAGIC "mprofile-kernel" +#define MODULE_ARCH_VERMAGIC_FTRACE "mprofile-kernel " +#else +#define MODULE_ARCH_VERMAGIC_FTRACE "" #endif +#ifdef CONFIG_RELOCATABLE +#define MODULE_ARCH_VERMAGIC_RELOCATABLE "relocatable " +#else +#define MODULE_ARCH_VERMAGIC_RELOCATABLE "" +#endif + +#define MODULE_ARCH_VERMAGIC MODULE_ARCH_VERMAGIC_FTRACE MODULE_ARCH_VERMAGIC_RELOCATABLE + #ifndef __powerpc64__ /* * Thanks to Paul M for explaining this. diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index 7159e1a6a61a..03e1a920491e 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -21,6 +21,9 @@ /* We calculate number of sg entries based on PAGE_SIZE */ #define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry)) +/* Default time to sleep or delay between OPAL_BUSY/OPAL_BUSY_EVENT loops */ +#define OPAL_BUSY_DELAY_MS 10 + /* /sys/firmware/opal */ extern struct kobject *opal_kobj; diff --git a/arch/powerpc/include/asm/powernv.h b/arch/powerpc/include/asm/powernv.h index d1c2d2e658cf..2f3ff7a27881 100644 --- a/arch/powerpc/include/asm/powernv.h +++ b/arch/powerpc/include/asm/powernv.h @@ -15,7 +15,7 @@ extern void powernv_set_nmmu_ptcr(unsigned long ptcr); extern struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, unsigned long flags, - struct npu_context *(*cb)(struct npu_context *, void *), + void (*cb)(struct npu_context *, void *), void *priv); extern void pnv_npu2_destroy_context(struct npu_context *context, struct pci_dev *gpdev); diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index e88fbb1fdb8f..8ab51f6ca03a 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -53,18 +53,6 @@ struct dt_cpu_feature { int disabled; }; -#define CPU_FTRS_BASE \ - (CPU_FTR_LWSYNC | \ - CPU_FTR_FPU_UNAVAILABLE |\ - CPU_FTR_NODSISRALIGN |\ - CPU_FTR_NOEXECUTE |\ - CPU_FTR_COHERENT_ICACHE | \ - CPU_FTR_STCX_CHECKS_ADDRESS |\ - CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ - CPU_FTR_DAWR | \ - CPU_FTR_ARCH_206 |\ - CPU_FTR_ARCH_207S) - #define MMU_FTRS_HASH_BASE (MMU_FTRS_POWER8) #define COMMON_USER_BASE (PPC_FEATURE_32 | PPC_FEATURE_64 | \ @@ -124,7 +112,7 @@ static char dt_cpu_name[64]; static struct cpu_spec __initdata base_cpu_spec = { .cpu_name = NULL, - .cpu_features = CPU_FTRS_BASE, + .cpu_features = CPU_FTRS_DT_CPU_BASE, .cpu_user_features = COMMON_USER_BASE, .cpu_user_features2 = COMMON_USER2_BASE, .mmu_features = 0, diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index 2d4956e97aa9..ee5a67d57aab 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c @@ -807,7 +807,8 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev) eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]); /* PCI Command: 0x4 */ - eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]); + eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1] | + PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); /* Check the PCIe link is ready */ eeh_bridge_check_link(edev); diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index 79d005445c6c..e734f6e45abc 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -553,12 +553,12 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE lbz r0,HSTATE_HWTHREAD_STATE(r13) cmpwi r0,KVM_HWTHREAD_IN_KERNEL - beq 1f + beq 0f li r0,KVM_HWTHREAD_IN_KERNEL stb r0,HSTATE_HWTHREAD_STATE(r13) /* Order setting hwthread_state vs. testing hwthread_req */ sync - lbz r0,HSTATE_HWTHREAD_REQ(r13) +0: lbz r0,HSTATE_HWTHREAD_REQ(r13) cmpwi r0,0 beq 1f b kvm_start_guest diff --git a/arch/powerpc/kernel/kexec_elf_64.c b/arch/powerpc/kernel/kexec_elf_64.c index 9a42309b091a..ba4f18a43ee8 100644 --- a/arch/powerpc/kernel/kexec_elf_64.c +++ b/arch/powerpc/kernel/kexec_elf_64.c @@ -572,7 +572,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, { int ret; unsigned int fdt_size; - unsigned long kernel_load_addr, purgatory_load_addr; + unsigned long kernel_load_addr; unsigned long initrd_load_addr = 0, fdt_load_addr; void *fdt; const void *slave_code; @@ -580,6 +580,8 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, struct elf_info elf_info; struct kexec_buf kbuf = { .image = image, .buf_min = 0, .buf_max = ppc64_rma_size }; + struct kexec_buf pbuf = { .image = image, .buf_min = 0, + .buf_max = ppc64_rma_size, .top_down = true }; ret = build_elf_exec_info(kernel_buf, kernel_len, &ehdr, &elf_info); if (ret) @@ -591,14 +593,13 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, pr_debug("Loaded the kernel at 0x%lx\n", kernel_load_addr); - ret = kexec_load_purgatory(image, 0, ppc64_rma_size, true, - &purgatory_load_addr); + ret = kexec_load_purgatory(image, &pbuf); if (ret) { pr_err("Loading purgatory failed.\n"); goto out; } - pr_debug("Loaded purgatory at 0x%lx\n", purgatory_load_addr); + pr_debug("Loaded purgatory at 0x%lx\n", pbuf.mem); if (initrd != NULL) { kbuf.buffer = initrd; @@ -657,7 +658,7 @@ out: return ret ? ERR_PTR(ret) : fdt; } -struct kexec_file_ops kexec_elf64_ops = { +const struct kexec_file_ops kexec_elf64_ops = { .probe = elf64_probe, .load = elf64_load, }; diff --git a/arch/powerpc/kernel/machine_kexec_file_64.c b/arch/powerpc/kernel/machine_kexec_file_64.c index 45e0b7d5f200..0bd23dc789a4 100644 --- a/arch/powerpc/kernel/machine_kexec_file_64.c +++ b/arch/powerpc/kernel/machine_kexec_file_64.c @@ -31,52 +31,19 @@ #define SLAVE_CODE_SIZE 256 -static struct kexec_file_ops *kexec_file_loaders[] = { +const struct kexec_file_ops * const kexec_file_loaders[] = { &kexec_elf64_ops, + NULL }; int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long buf_len) { - int i, ret = -ENOEXEC; - struct kexec_file_ops *fops; - /* We don't support crash kernels yet. */ if (image->type == KEXEC_TYPE_CRASH) return -EOPNOTSUPP; - for (i = 0; i < ARRAY_SIZE(kexec_file_loaders); i++) { - fops = kexec_file_loaders[i]; - if (!fops || !fops->probe) - continue; - - ret = fops->probe(buf, buf_len); - if (!ret) { - image->fops = fops; - return ret; - } - } - - return ret; -} - -void *arch_kexec_kernel_image_load(struct kimage *image) -{ - if (!image->fops || !image->fops->load) - return ERR_PTR(-ENOEXEC); - - return image->fops->load(image, image->kernel_buf, - image->kernel_buf_len, image->initrd_buf, - image->initrd_buf_len, image->cmdline_buf, - image->cmdline_buf_len); -} - -int arch_kimage_file_post_load_cleanup(struct kimage *image) -{ - if (!image->fops || !image->fops->cleanup) - return 0; - - return image->fops->cleanup(image->image_loader_data); + return kexec_image_probe_default(image, buf, buf_len); } /** diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c index fe6fc63251fe..38c5b4764bfe 100644 --- a/arch/powerpc/kernel/mce_power.c +++ b/arch/powerpc/kernel/mce_power.c @@ -441,7 +441,6 @@ static int mce_handle_ierror(struct pt_regs *regs, if (pfn != ULONG_MAX) { *phys_addr = (pfn << PAGE_SHIFT); - handled = 1; } } } @@ -532,9 +531,7 @@ static int mce_handle_derror(struct pt_regs *regs, * kernel/exception-64s.h */ if (get_paca()->in_mce < MAX_MCE_DEPTH) - if (!mce_find_instr_ea_and_pfn(regs, addr, - phys_addr)) - handled = 1; + mce_find_instr_ea_and_pfn(regs, addr, phys_addr); } found = 1; } @@ -572,7 +569,7 @@ static long mce_handle_error(struct pt_regs *regs, const struct mce_ierror_table itable[]) { struct mce_error_info mce_err = { 0 }; - uint64_t addr, phys_addr; + uint64_t addr, phys_addr = ULONG_MAX; uint64_t srr1 = regs->msr; long handled; diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 66f2b6299c40..b78f142a4148 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -880,7 +880,7 @@ void rfi_flush_enable(bool enable) rfi_flush = enable; } -static void init_fallback_flush(void) +static void __ref init_fallback_flush(void) { u64 l1d_size, limit; int cpu; @@ -890,6 +890,17 @@ static void init_fallback_flush(void) return; l1d_size = ppc64_caches.l1d.size; + + /* + * If there is no d-cache-size property in the device tree, l1d_size + * could be zero. That leads to the loop in the asm wrapping around to + * 2^64-1, and then walking off the end of the fallback area and + * eventually causing a page fault which is fatal. Just default to + * something vaguely sane. + */ + if (!l1d_size) + l1d_size = (64 * 1024); + limit = min(ppc64_bolted_size(), ppc64_rma_size); /* diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index e16ec7b3b427..9ca7148b5881 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -566,10 +566,35 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) #endif #ifdef CONFIG_NMI_IPI -static void stop_this_cpu(struct pt_regs *regs) -#else +static void nmi_stop_this_cpu(struct pt_regs *regs) +{ + /* + * This is a special case because it never returns, so the NMI IPI + * handling would never mark it as done, which makes any later + * smp_send_nmi_ipi() call spin forever. Mark it done now. + * + * IRQs are already hard disabled by the smp_handle_nmi_ipi. + */ + nmi_ipi_lock(); + nmi_ipi_busy_count--; + nmi_ipi_unlock(); + + /* Remove this CPU */ + set_cpu_online(smp_processor_id(), false); + + spin_begin(); + while (1) + spin_cpu_relax(); +} + +void smp_send_stop(void) +{ + smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000); +} + +#else /* CONFIG_NMI_IPI */ + static void stop_this_cpu(void *dummy) -#endif { /* Remove this CPU */ set_cpu_online(smp_processor_id(), false); @@ -582,12 +607,22 @@ static void stop_this_cpu(void *dummy) void smp_send_stop(void) { -#ifdef CONFIG_NMI_IPI - smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, stop_this_cpu, 1000000); -#else + static bool stopped = false; + + /* + * Prevent waiting on csd lock from a previous smp_send_stop. + * This is racy, but in general callers try to do the right + * thing and only fire off one smp_send_stop (e.g., see + * kernel/panic.c) + */ + if (stopped) + return; + + stopped = true; + smp_call_function(stop_this_cpu, NULL, 0); -#endif } +#endif /* CONFIG_NMI_IPI */ struct thread_info *current_set[NR_CPUS]; diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index a2ef0c0e6c31..0904492e7032 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -1613,6 +1613,22 @@ void facility_unavailable_exception(struct pt_regs *regs) value = mfspr(SPRN_FSCR); status = value >> 56; + if ((hv || status >= 2) && + (status < ARRAY_SIZE(facility_strings)) && + facility_strings[status]) + facility = facility_strings[status]; + + /* We should not have taken this interrupt in kernel */ + if (!user_mode(regs)) { + pr_emerg("Facility '%s' unavailable (%d) exception in kernel mode at %lx\n", + facility, status, regs->nip); + die("Unexpected facility unavailable exception", regs, SIGABRT); + } + + /* We restore the interrupt state now */ + if (!arch_irq_disabled_regs(regs)) + local_irq_enable(); + if (status == FSCR_DSCR_LG) { /* * User is accessing the DSCR register using the problem @@ -1679,25 +1695,11 @@ void facility_unavailable_exception(struct pt_regs *regs) return; } - if ((hv || status >= 2) && - (status < ARRAY_SIZE(facility_strings)) && - facility_strings[status]) - facility = facility_strings[status]; - - /* We restore the interrupt state now */ - if (!arch_irq_disabled_regs(regs)) - local_irq_enable(); - pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n", hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr); out: - if (user_mode(regs)) { - _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); - return; - } - - die("Unexpected facility unavailable exception", regs, SIGABRT); + _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); } #endif diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index e1c083fbe434..78e6a392330f 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -470,8 +470,6 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues, for (i = 0; i < npages; ++i) { asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : : "r" (rbvalues[i]), "r" (kvm->arch.lpid)); - trace_tlbie(kvm->arch.lpid, 0, rbvalues[i], - kvm->arch.lpid, 0, 0, 0); } if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) { @@ -492,8 +490,6 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues, for (i = 0; i < npages; ++i) { asm volatile(PPC_TLBIEL(%0,%1,0,0,0) : : "r" (rbvalues[i]), "r" (0)); - trace_tlbie(kvm->arch.lpid, 1, rbvalues[i], - 0, 0, 0, 0); } asm volatile("ptesync" : : : "memory"); } diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 6038e2e7aee0..876d4f294fdd 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -305,6 +305,13 @@ void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu) kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL); } +#ifdef CONFIG_ALTIVEC +void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu) +{ + kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL); +} +#endif + void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) { kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER); diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 35f80ab7cbd8..288fe4f0db4e 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -55,7 +55,7 @@ static int patch_alt_instruction(unsigned int *src, unsigned int *dest, unsigned int *target = (unsigned int *)branch_target(src); /* Branch within the section doesn't need translating */ - if (target < alt_start || target >= alt_end) { + if (target < alt_start || target > alt_end) { instr = translate_branch(dest, src); if (!instr) return 1; diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 737f8a4632cc..c3c39b02b2ba 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -133,6 +133,7 @@ int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap * start, start + size, rc); return -EFAULT; } + flush_inval_dcache_range(start, start + size); return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock); } @@ -159,6 +160,7 @@ int __meminit arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap /* Remove htab bolted mappings for this section of memory */ start = (unsigned long)__va(start); + flush_inval_dcache_range(start, start + size); ret = remove_section_mapping(start, start + size); /* Ensure all vmalloc mappings are flushed in case they also diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 9cd87d11fe4e..205fe557ca10 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -35,6 +35,7 @@ #include <asm/mmu.h> #include <asm/copro.h> #include <asm/hugetlb.h> +#include <asm/mmu_context.h> static DEFINE_SPINLOCK(slice_convert_lock); diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index 2fba6170ab3f..a5d7309c2d05 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c @@ -33,13 +33,12 @@ static inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is, { unsigned long rb; unsigned long rs; - unsigned int r = 1; /* radix format */ rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53)); rs = ((unsigned long)pid << PPC_BITLSHIFT(31)); - asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4) - : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "r"(r) + asm volatile(PPC_TLBIEL(%0, %1, %2, %3, 1) + : : "r"(rb), "r"(rs), "i"(ric), "i"(prs) : "memory"); } diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 9033c8194eda..ccc421503363 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -1093,7 +1093,7 @@ static int show_spu_loadavg(struct seq_file *s, void *private) LOAD_INT(c), LOAD_FRAC(c), count_active_contexts(), atomic_read(&nr_spu_contexts), - idr_get_cursor(&task_active_pid_ns(current)->idr)); + idr_get_cursor(&task_active_pid_ns(current)->idr) - 1); return 0; } diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c index de470caf0784..fc222a0c2ac4 100644 --- a/arch/powerpc/platforms/powernv/memtrace.c +++ b/arch/powerpc/platforms/powernv/memtrace.c @@ -82,19 +82,6 @@ static const struct file_operations memtrace_fops = { .open = simple_open, }; -static void flush_memory_region(u64 base, u64 size) -{ - unsigned long line_size = ppc64_caches.l1d.size; - u64 end = base + size; - u64 addr; - - base = round_down(base, line_size); - end = round_up(end, line_size); - - for (addr = base; addr < end; addr += line_size) - asm volatile("dcbf 0,%0" : "=r" (addr) :: "memory"); -} - static int check_memblock_online(struct memory_block *mem, void *arg) { if (mem->state != MEM_ONLINE) @@ -132,10 +119,6 @@ static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages) walk_memory_range(start_pfn, end_pfn, (void *)MEM_OFFLINE, change_memblock_state); - /* RCU grace period? */ - flush_memory_region((u64)__va(start_pfn << PAGE_SHIFT), - nr_pages << PAGE_SHIFT); - lock_device_hotplug(); remove_memory(nid, start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT); unlock_device_hotplug(); diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index 69a4f9e8bd55..525e966dce34 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -34,6 +34,19 @@ #define npu_to_phb(x) container_of(x, struct pnv_phb, npu) /* + * spinlock to protect initialisation of an npu_context for a particular + * mm_struct. + */ +static DEFINE_SPINLOCK(npu_context_lock); + +/* + * When an address shootdown range exceeds this threshold we invalidate the + * entire TLB on the GPU for the given PID rather than each specific address in + * the range. + */ +#define ATSD_THRESHOLD (2*1024*1024) + +/* * Other types of TCE cache invalidation are not functional in the * hardware. */ @@ -401,7 +414,7 @@ struct npu_context { bool nmmu_flush; /* Callback to stop translation requests on a given GPU */ - struct npu_context *(*release_cb)(struct npu_context *, void *); + void (*release_cb)(struct npu_context *context, void *priv); /* * Private pointer passed to the above callback for usage by @@ -671,11 +684,19 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn, struct npu_context *npu_context = mn_to_npu_context(mn); unsigned long address; - for (address = start; address < end; address += PAGE_SIZE) - mmio_invalidate(npu_context, 1, address, false); + if (end - start > ATSD_THRESHOLD) { + /* + * Just invalidate the entire PID if the address range is too + * large. + */ + mmio_invalidate(npu_context, 0, 0, true); + } else { + for (address = start; address < end; address += PAGE_SIZE) + mmio_invalidate(npu_context, 1, address, false); - /* Do the flush only on the final addess == end */ - mmio_invalidate(npu_context, 1, address, true); + /* Do the flush only on the final addess == end */ + mmio_invalidate(npu_context, 1, address, true); + } } static const struct mmu_notifier_ops nv_nmmu_notifier_ops = { @@ -696,11 +717,12 @@ static const struct mmu_notifier_ops nv_nmmu_notifier_ops = { * Returns an error if there no contexts are currently available or a * npu_context which should be passed to pnv_npu2_handle_fault(). * - * mmap_sem must be held in write mode. + * mmap_sem must be held in write mode and must not be called from interrupt + * context. */ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, unsigned long flags, - struct npu_context *(*cb)(struct npu_context *, void *), + void (*cb)(struct npu_context *, void *), void *priv) { int rc; @@ -743,7 +765,9 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, /* * Setup the NPU context table for a particular GPU. These need to be * per-GPU as we need the tables to filter ATSDs when there are no - * active contexts on a particular GPU. + * active contexts on a particular GPU. It is safe for these to be + * called concurrently with destroy as the OPAL call takes appropriate + * locks and refcounts on init/destroy. */ rc = opal_npu_init_context(nphb->opal_id, mm->context.id, flags, PCI_DEVID(gpdev->bus->number, gpdev->devfn)); @@ -754,8 +778,29 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, * We store the npu pci device so we can more easily get at the * associated npus. */ + spin_lock(&npu_context_lock); npu_context = mm->context.npu_context; + if (npu_context) { + if (npu_context->release_cb != cb || + npu_context->priv != priv) { + spin_unlock(&npu_context_lock); + opal_npu_destroy_context(nphb->opal_id, mm->context.id, + PCI_DEVID(gpdev->bus->number, + gpdev->devfn)); + return ERR_PTR(-EINVAL); + } + + WARN_ON(!kref_get_unless_zero(&npu_context->kref)); + } + spin_unlock(&npu_context_lock); + if (!npu_context) { + /* + * We can set up these fields without holding the + * npu_context_lock as the npu_context hasn't been returned to + * the caller meaning it can't be destroyed. Parallel allocation + * is protected against by mmap_sem. + */ rc = -ENOMEM; npu_context = kzalloc(sizeof(struct npu_context), GFP_KERNEL); if (npu_context) { @@ -774,8 +819,6 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, } mm->context.npu_context = npu_context; - } else { - WARN_ON(!kref_get_unless_zero(&npu_context->kref)); } npu_context->release_cb = cb; @@ -814,15 +857,16 @@ static void pnv_npu2_release_context(struct kref *kref) mm_context_remove_copro(npu_context->mm); npu_context->mm->context.npu_context = NULL; - mmu_notifier_unregister(&npu_context->mn, - npu_context->mm); - - kfree(npu_context); } +/* + * Destroy a context on the given GPU. May free the npu_context if it is no + * longer active on any GPUs. Must not be called from interrupt context. + */ void pnv_npu2_destroy_context(struct npu_context *npu_context, struct pci_dev *gpdev) { + int removed; struct pnv_phb *nphb; struct npu *npu; struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0); @@ -844,7 +888,21 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context, WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], NULL); opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id, PCI_DEVID(gpdev->bus->number, gpdev->devfn)); - kref_put(&npu_context->kref, pnv_npu2_release_context); + spin_lock(&npu_context_lock); + removed = kref_put(&npu_context->kref, pnv_npu2_release_context); + spin_unlock(&npu_context_lock); + + /* + * We need to do this outside of pnv_npu2_release_context so that it is + * outside the spinlock as mmu_notifier_destroy uses SRCU. + */ + if (removed) { + mmu_notifier_unregister(&npu_context->mn, + npu_context->mm); + + kfree(npu_context); + } + } EXPORT_SYMBOL(pnv_npu2_destroy_context); diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c index ba2ff06a2c98..1bceb95f422d 100644 --- a/arch/powerpc/platforms/powernv/opal-nvram.c +++ b/arch/powerpc/platforms/powernv/opal-nvram.c @@ -11,6 +11,7 @@ #define DEBUG +#include <linux/delay.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/of.h> @@ -56,8 +57,12 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index) while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { rc = opal_write_nvram(__pa(buf), count, off); - if (rc == OPAL_BUSY_EVENT) + if (rc == OPAL_BUSY_EVENT) { + msleep(OPAL_BUSY_DELAY_MS); opal_poll_events(NULL); + } else if (rc == OPAL_BUSY) { + msleep(OPAL_BUSY_DELAY_MS); + } } if (rc) diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c index f8868864f373..aa2a5139462e 100644 --- a/arch/powerpc/platforms/powernv/opal-rtc.c +++ b/arch/powerpc/platforms/powernv/opal-rtc.c @@ -48,10 +48,12 @@ unsigned long __init opal_get_boot_time(void) while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); - if (rc == OPAL_BUSY_EVENT) + if (rc == OPAL_BUSY_EVENT) { + mdelay(OPAL_BUSY_DELAY_MS); opal_poll_events(NULL); - else if (rc == OPAL_BUSY) - mdelay(10); + } else if (rc == OPAL_BUSY) { + mdelay(OPAL_BUSY_DELAY_MS); + } } if (rc != OPAL_SUCCESS) return 0; diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index d22aeb0b69e1..b48454be5b98 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -389,6 +389,10 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc) if (xive_pool_vps == XIVE_INVALID_VP) return; + /* Check if pool VP already active, if it is, pull it */ + if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP) + in_be64(xive_tima + TM_SPC_PULL_POOL_CTX); + /* Enable the pool VP */ vp = xive_pool_vps + cpu; pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp); diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 23d8acca5c90..cd4fd85fde84 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -11,6 +11,7 @@ config RISCV select ARCH_WANT_FRAME_POINTERS select CLONE_BACKWARDS select COMMON_CLK + select DMA_DIRECT_OPS select GENERIC_CLOCKEVENTS select GENERIC_CPU_DEVICES select GENERIC_IRQ_SHOW @@ -89,9 +90,6 @@ config PGTABLE_LEVELS config HAVE_KPROBES def_bool n -config DMA_DIRECT_OPS - def_bool y - menu "Platform type" choice diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild index 1e5fd280fb4d..4286a5f83876 100644 --- a/arch/riscv/include/asm/Kbuild +++ b/arch/riscv/include/asm/Kbuild @@ -15,7 +15,6 @@ generic-y += fcntl.h generic-y += futex.h generic-y += hardirq.h generic-y += hash.h -generic-y += handle_irq.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ioctls.h diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index 324568d33921..f6561b783b61 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -52,7 +52,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE # Add -lgcc so rv32 gets static muldi3 and lshrdi3 definitions. # Make sure only to export the intended __vdso_xxx symbol offsets. quiet_cmd_vdsold = VDSOLD $@ - cmd_vdsold = $(CC) $(KCFLAGS) -nostdlib $(SYSCFLAGS_$(@F)) \ + cmd_vdsold = $(CC) $(KCFLAGS) $(call cc-option, -no-pie) -nostdlib $(SYSCFLAGS_$(@F)) \ -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp -lgcc && \ $(CROSS_COMPILE)objcopy \ $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ diff --git a/arch/s390/Kbuild b/arch/s390/Kbuild index 9fdff3fe1a42..e63940bb57cd 100644 --- a/arch/s390/Kbuild +++ b/arch/s390/Kbuild @@ -8,3 +8,4 @@ obj-$(CONFIG_APPLDATA_BASE) += appldata/ obj-y += net/ obj-$(CONFIG_PCI) += pci/ obj-$(CONFIG_NUMA) += numa/ +obj-$(CONFIG_ARCH_HAS_KEXEC_PURGATORY) += purgatory/ diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 32a0d5b958bf..199ac3e4da1d 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -47,10 +47,6 @@ config PGSTE config ARCH_SUPPORTS_DEBUG_PAGEALLOC def_bool y -config KEXEC - def_bool y - select KEXEC_CORE - config AUDIT_ARCH def_bool y @@ -290,12 +286,12 @@ config MARCH_Z13 older machines. config MARCH_Z14 - bool "IBM z14" + bool "IBM z14 ZR1 and z14" select HAVE_MARCH_Z14_FEATURES help - Select this to enable optimizations for IBM z14 (3906 series). - The kernel will be slightly faster but will not work on older - machines. + Select this to enable optimizations for IBM z14 ZR1 and z14 (3907 + and 3906 series). The kernel will be slightly faster but will not + work on older machines. endchoice @@ -525,6 +521,26 @@ source kernel/Kconfig.preempt source kernel/Kconfig.hz +config KEXEC + def_bool y + select KEXEC_CORE + +config KEXEC_FILE + bool "kexec file based system call" + select KEXEC_CORE + select BUILD_BIN2C + depends on CRYPTO + depends on CRYPTO_SHA256 + depends on CRYPTO_SHA256_S390 + help + Enable the kexec file based system call. In contrast to the normal + kexec system call this system call takes file descriptors for the + kernel and initramfs as arguments. + +config ARCH_HAS_KEXEC_PURGATORY + def_bool y + depends on KEXEC_FILE + config ARCH_RANDOM def_bool y prompt "s390 architectural random number generation API" diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile index da9dad35c28e..d1fa37fcce83 100644 --- a/arch/s390/boot/Makefile +++ b/arch/s390/boot/Makefile @@ -3,12 +3,6 @@ # Makefile for the linux s390-specific parts of the memory manager. # -COMPILE_VERSION := __linux_compile_version_id__`hostname | \ - tr -c '[0-9A-Za-z]' '_'`__`date | \ - tr -c '[0-9A-Za-z]' '_'`_t - -ccflags-y := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I. - targets := image targets += bzImage subdir- := compressed diff --git a/arch/s390/boot/compressed/.gitignore b/arch/s390/boot/compressed/.gitignore index ae06b9b4c02f..2088cc140629 100644 --- a/arch/s390/boot/compressed/.gitignore +++ b/arch/s390/boot/compressed/.gitignore @@ -1,3 +1,4 @@ sizes.h vmlinux vmlinux.lds +vmlinux.bin.full diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c index 63838a17e56a..511b2cc9b91a 100644 --- a/arch/s390/boot/compressed/misc.c +++ b/arch/s390/boot/compressed/misc.c @@ -119,34 +119,12 @@ static void error(char *x) asm volatile("lpsw %0" : : "Q" (psw)); } -/* - * Safe guard the ipl parameter block against a memory area that will be - * overwritten. The validity check for the ipl parameter block is complex - * (see cio_get_iplinfo and ipl_save_parameters) but if the pointer to - * the ipl parameter block intersects with the passed memory area we can - * safely assume that we can read from that memory. In that case just copy - * the memory to IPL_PARMBLOCK_ORIGIN even if there is no ipl parameter - * block. - */ -static void check_ipl_parmblock(void *start, unsigned long size) -{ - void *src, *dst; - - src = (void *)(unsigned long) S390_lowcore.ipl_parmblock_ptr; - if (src + PAGE_SIZE <= start || src >= start + size) - return; - dst = (void *) IPL_PARMBLOCK_ORIGIN; - memmove(dst, src, PAGE_SIZE); - S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN; -} - unsigned long decompress_kernel(void) { void *output, *kernel_end; output = (void *) ALIGN((unsigned long) _end + HEAP_SIZE, PAGE_SIZE); kernel_end = output + SZ__bss_start; - check_ipl_parmblock((void *) 0, (unsigned long) kernel_end); #ifdef CONFIG_BLK_DEV_INITRD /* @@ -156,7 +134,6 @@ unsigned long decompress_kernel(void) * current bss section.. */ if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) { - check_ipl_parmblock(kernel_end, INITRD_SIZE); memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE); INITRD_START = (unsigned long) kernel_end; } diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/debug_defconfig index 5af8458951cf..6176fe9795ca 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -24,13 +24,13 @@ CONFIG_CPUSETS=y CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y -CONFIG_CHECKPOINT_RESTORE=y CONFIG_NAMESPACES=y CONFIG_USER_NS=y CONFIG_SCHED_AUTOGROUP=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y # CONFIG_SYSFS_SYSCALL is not set +CONFIG_CHECKPOINT_RESTORE=y CONFIG_BPF_SYSCALL=y CONFIG_USERFAULTFD=y # CONFIG_COMPAT_BRK is not set @@ -59,10 +59,11 @@ CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_DEFAULT_DEADLINE=y CONFIG_LIVEPATCH=y CONFIG_TUNE_ZEC12=y -CONFIG_NR_CPUS=256 +CONFIG_NR_CPUS=512 CONFIG_NUMA=y CONFIG_PREEMPT=y CONFIG_HZ_100=y +CONFIG_KEXEC_FILE=y CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y @@ -305,7 +306,6 @@ CONFIG_IP6_NF_SECURITY=m CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_NF_TABLES_BRIDGE=m -CONFIG_NET_SCTPPROBE=m CONFIG_RDS=m CONFIG_RDS_RDMA=m CONFIG_RDS_TCP=m @@ -364,11 +364,11 @@ CONFIG_NET_ACT_SIMP=m CONFIG_NET_ACT_SKBEDIT=m CONFIG_NET_ACT_CSUM=m CONFIG_DNS_RESOLVER=y +CONFIG_OPENVSWITCH=m CONFIG_NETLINK_DIAG=m CONFIG_CGROUP_NET_PRIO=y CONFIG_BPF_JIT=y CONFIG_NET_PKTGEN=m -CONFIG_NET_TCPPROBE=m CONFIG_DEVTMPFS=y CONFIG_DMA_CMA=y CONFIG_CMA_SIZE_MBYTES=0 @@ -380,9 +380,9 @@ CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=32768 -CONFIG_BLK_DEV_RAM_DAX=y CONFIG_VIRTIO_BLK=y CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_NVME=m CONFIG_ENCLOSURE_SERVICES=m CONFIG_GENWQE=m CONFIG_RAID_ATTRS=m @@ -461,6 +461,7 @@ CONFIG_PPTP=m CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m +CONFIG_INPUT_EVDEV=y # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -474,6 +475,9 @@ CONFIG_WATCHDOG=y CONFIG_WATCHDOG_NOWAYOUT=y CONFIG_SOFT_WATCHDOG=m CONFIG_DIAG288_WATCHDOG=m +CONFIG_DRM=y +CONFIG_DRM_VIRTIO_GPU=y +CONFIG_FRAMEBUFFER_CONSOLE=y # CONFIG_HID is not set # CONFIG_USB_SUPPORT is not set CONFIG_INFINIBAND=m @@ -482,7 +486,9 @@ CONFIG_MLX4_INFINIBAND=m CONFIG_MLX5_INFINIBAND=m CONFIG_VFIO=m CONFIG_VFIO_PCI=m +CONFIG_VIRTIO_PCI=m CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_INPUT=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y @@ -641,6 +647,8 @@ CONFIG_ATOMIC64_SELFTEST=y CONFIG_TEST_BPF=m CONFIG_BUG_ON_DATA_CORRUPTION=y CONFIG_S390_PTDUMP=y +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_BIG_KEYS=y CONFIG_ENCRYPTED_KEYS=m CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y @@ -649,17 +657,20 @@ CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y CONFIG_IMA=y +CONFIG_IMA_DEFAULT_HASH_SHA256=y +CONFIG_IMA_WRITE_POLICY=y CONFIG_IMA_APPRAISE=y -CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_MCRYPTD=m CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_GCM=m CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -707,9 +718,8 @@ CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_GHASH_S390=m CONFIG_CRYPTO_CRC32_S390=y -CONFIG_ASYMMETRIC_KEY_TYPE=y -CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m -CONFIG_X509_CERTIFICATE_PARSER=m +CONFIG_PKCS7_MESSAGE_PARSER=y +CONFIG_SYSTEM_TRUSTED_KEYRING=y CONFIG_CRC7=m CONFIG_CRC8=m CONFIG_RANDOM32_SELFTEST=y diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig deleted file mode 100644 index d52eafe57ae8..000000000000 --- a/arch/s390/configs/gcov_defconfig +++ /dev/null @@ -1,661 +0,0 @@ -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_AUDIT=y -CONFIG_NO_HZ_IDLE=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_TASKSTATS=y -CONFIG_TASK_DELAY_ACCT=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_NUMA_BALANCING=y -# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set -CONFIG_MEMCG=y -CONFIG_MEMCG_SWAP=y -CONFIG_BLK_CGROUP=y -CONFIG_CFS_BANDWIDTH=y -CONFIG_RT_GROUP_SCHED=y -CONFIG_CGROUP_PIDS=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_HUGETLB=y -CONFIG_CPUSETS=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_CGROUP_PERF=y -CONFIG_CHECKPOINT_RESTORE=y -CONFIG_NAMESPACES=y -CONFIG_USER_NS=y -CONFIG_SCHED_AUTOGROUP=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_EXPERT=y -# CONFIG_SYSFS_SYSCALL is not set -CONFIG_BPF_SYSCALL=y -CONFIG_USERFAULTFD=y -# CONFIG_COMPAT_BRK is not set -CONFIG_PROFILING=y -CONFIG_OPROFILE=m -CONFIG_KPROBES=y -CONFIG_JUMP_LABEL=y -CONFIG_GCOV_KERNEL=y -CONFIG_GCOV_PROFILE_ALL=y -CONFIG_MODULES=y -CONFIG_MODULE_FORCE_LOAD=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_BLK_DEV_INTEGRITY=y -CONFIG_BLK_DEV_THROTTLING=y -CONFIG_BLK_WBT=y -CONFIG_BLK_WBT_SQ=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_IBM_PARTITION=y -CONFIG_BSD_DISKLABEL=y -CONFIG_MINIX_SUBPARTITION=y -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -CONFIG_CFQ_GROUP_IOSCHED=y -CONFIG_DEFAULT_DEADLINE=y -CONFIG_LIVEPATCH=y -CONFIG_TUNE_ZEC12=y -CONFIG_NR_CPUS=512 -CONFIG_NUMA=y -CONFIG_HZ_100=y -CONFIG_MEMORY_HOTPLUG=y -CONFIG_MEMORY_HOTREMOVE=y -CONFIG_KSM=y -CONFIG_TRANSPARENT_HUGEPAGE=y -CONFIG_CLEANCACHE=y -CONFIG_FRONTSWAP=y -CONFIG_MEM_SOFT_DIRTY=y -CONFIG_ZSWAP=y -CONFIG_ZBUD=m -CONFIG_ZSMALLOC=m -CONFIG_ZSMALLOC_STAT=y -CONFIG_DEFERRED_STRUCT_PAGE_INIT=y -CONFIG_IDLE_PAGE_TRACKING=y -CONFIG_PCI=y -CONFIG_HOTPLUG_PCI=y -CONFIG_HOTPLUG_PCI_S390=y -CONFIG_CHSC_SCH=y -CONFIG_CRASH_DUMP=y -CONFIG_BINFMT_MISC=m -CONFIG_HIBERNATION=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_PACKET_DIAG=m -CONFIG_UNIX=y -CONFIG_UNIX_DIAG=m -CONFIG_XFRM_USER=m -CONFIG_NET_KEY=m -CONFIG_SMC=m -CONFIG_SMC_DIAG=m -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE_DEMUX=m -CONFIG_NET_IPGRE=m -CONFIG_NET_IPGRE_BROADCAST=y -CONFIG_IP_MROUTE=y -CONFIG_IP_MROUTE_MULTIPLE_TABLES=y -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -CONFIG_SYN_COOKIES=y -CONFIG_NET_IPVTI=m -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -CONFIG_INET_IPCOMP=m -CONFIG_INET_XFRM_MODE_TRANSPORT=m -CONFIG_INET_XFRM_MODE_TUNNEL=m -CONFIG_INET_XFRM_MODE_BEET=m -CONFIG_INET_DIAG=m -CONFIG_INET_UDP_DIAG=m -CONFIG_TCP_CONG_ADVANCED=y -CONFIG_TCP_CONG_HSTCP=m -CONFIG_TCP_CONG_HYBLA=m -CONFIG_TCP_CONG_SCALABLE=m -CONFIG_TCP_CONG_LP=m -CONFIG_TCP_CONG_VENO=m -CONFIG_TCP_CONG_YEAH=m -CONFIG_TCP_CONG_ILLINOIS=m -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_INET6_AH=m -CONFIG_INET6_ESP=m -CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_MIP6=m -CONFIG_INET6_XFRM_MODE_TRANSPORT=m -CONFIG_INET6_XFRM_MODE_TUNNEL=m -CONFIG_INET6_XFRM_MODE_BEET=m -CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m -CONFIG_IPV6_VTI=m -CONFIG_IPV6_SIT=m -CONFIG_IPV6_GRE=m -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y -CONFIG_NETFILTER=y -CONFIG_NF_CONNTRACK=m -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CONNTRACK_TIMEOUT=y -CONFIG_NF_CONNTRACK_TIMESTAMP=y -CONFIG_NF_CONNTRACK_AMANDA=m -CONFIG_NF_CONNTRACK_FTP=m -CONFIG_NF_CONNTRACK_H323=m -CONFIG_NF_CONNTRACK_IRC=m -CONFIG_NF_CONNTRACK_NETBIOS_NS=m -CONFIG_NF_CONNTRACK_SNMP=m -CONFIG_NF_CONNTRACK_PPTP=m -CONFIG_NF_CONNTRACK_SANE=m -CONFIG_NF_CONNTRACK_SIP=m -CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NF_CT_NETLINK=m -CONFIG_NF_CT_NETLINK_TIMEOUT=m -CONFIG_NF_TABLES=m -CONFIG_NFT_EXTHDR=m -CONFIG_NFT_META=m -CONFIG_NFT_CT=m -CONFIG_NFT_COUNTER=m -CONFIG_NFT_LOG=m -CONFIG_NFT_LIMIT=m -CONFIG_NFT_NAT=m -CONFIG_NFT_COMPAT=m -CONFIG_NFT_HASH=m -CONFIG_NETFILTER_XT_SET=m -CONFIG_NETFILTER_XT_TARGET_AUDIT=m -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m -CONFIG_NETFILTER_XT_TARGET_CONNMARK=m -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m -CONFIG_NETFILTER_XT_TARGET_CT=m -CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_HMARK=m -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m -CONFIG_NETFILTER_XT_TARGET_LOG=m -CONFIG_NETFILTER_XT_TARGET_MARK=m -CONFIG_NETFILTER_XT_TARGET_NFLOG=m -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_TEE=m -CONFIG_NETFILTER_XT_TARGET_TPROXY=m -CONFIG_NETFILTER_XT_TARGET_TRACE=m -CONFIG_NETFILTER_XT_TARGET_SECMARK=m -CONFIG_NETFILTER_XT_TARGET_TCPMSS=m -CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m -CONFIG_NETFILTER_XT_MATCH_BPF=m -CONFIG_NETFILTER_XT_MATCH_CLUSTER=m -CONFIG_NETFILTER_XT_MATCH_COMMENT=m -CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m -CONFIG_NETFILTER_XT_MATCH_CONNMARK=m -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_CPU=m -CONFIG_NETFILTER_XT_MATCH_DCCP=m -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m -CONFIG_NETFILTER_XT_MATCH_DSCP=m -CONFIG_NETFILTER_XT_MATCH_ESP=m -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m -CONFIG_NETFILTER_XT_MATCH_HELPER=m -CONFIG_NETFILTER_XT_MATCH_IPRANGE=m -CONFIG_NETFILTER_XT_MATCH_IPVS=m -CONFIG_NETFILTER_XT_MATCH_LENGTH=m -CONFIG_NETFILTER_XT_MATCH_LIMIT=m -CONFIG_NETFILTER_XT_MATCH_MAC=m -CONFIG_NETFILTER_XT_MATCH_MARK=m -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_NFACCT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m -CONFIG_NETFILTER_XT_MATCH_OWNER=m -CONFIG_NETFILTER_XT_MATCH_POLICY=m -CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m -CONFIG_NETFILTER_XT_MATCH_QUOTA=m -CONFIG_NETFILTER_XT_MATCH_RATEEST=m -CONFIG_NETFILTER_XT_MATCH_REALM=m -CONFIG_NETFILTER_XT_MATCH_RECENT=m -CONFIG_NETFILTER_XT_MATCH_STATE=m -CONFIG_NETFILTER_XT_MATCH_STATISTIC=m -CONFIG_NETFILTER_XT_MATCH_STRING=m -CONFIG_NETFILTER_XT_MATCH_TCPMSS=m -CONFIG_NETFILTER_XT_MATCH_TIME=m -CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_IP_SET=m -CONFIG_IP_SET_BITMAP_IP=m -CONFIG_IP_SET_BITMAP_IPMAC=m -CONFIG_IP_SET_BITMAP_PORT=m -CONFIG_IP_SET_HASH_IP=m -CONFIG_IP_SET_HASH_IPPORT=m -CONFIG_IP_SET_HASH_IPPORTIP=m -CONFIG_IP_SET_HASH_IPPORTNET=m -CONFIG_IP_SET_HASH_NETPORTNET=m -CONFIG_IP_SET_HASH_NET=m -CONFIG_IP_SET_HASH_NETNET=m -CONFIG_IP_SET_HASH_NETPORT=m -CONFIG_IP_SET_HASH_NETIFACE=m -CONFIG_IP_SET_LIST_SET=m -CONFIG_IP_VS=m -CONFIG_IP_VS_PROTO_TCP=y -CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_PROTO_ESP=y -CONFIG_IP_VS_PROTO_AH=y -CONFIG_IP_VS_RR=m -CONFIG_IP_VS_WRR=m -CONFIG_IP_VS_LC=m -CONFIG_IP_VS_WLC=m -CONFIG_IP_VS_LBLC=m -CONFIG_IP_VS_LBLCR=m -CONFIG_IP_VS_DH=m -CONFIG_IP_VS_SH=m -CONFIG_IP_VS_SED=m -CONFIG_IP_VS_NQ=m -CONFIG_IP_VS_FTP=m -CONFIG_IP_VS_PE_SIP=m -CONFIG_NF_CONNTRACK_IPV4=m -CONFIG_NF_TABLES_IPV4=m -CONFIG_NFT_CHAIN_ROUTE_IPV4=m -CONFIG_NF_TABLES_ARP=m -CONFIG_NFT_CHAIN_NAT_IPV4=m -CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_AH=m -CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_RPFILTER=m -CONFIG_IP_NF_MATCH_TTL=m -CONFIG_IP_NF_FILTER=m -CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_NAT=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_MANGLE=m -CONFIG_IP_NF_TARGET_CLUSTERIP=m -CONFIG_IP_NF_TARGET_ECN=m -CONFIG_IP_NF_TARGET_TTL=m -CONFIG_IP_NF_RAW=m -CONFIG_IP_NF_SECURITY=m -CONFIG_IP_NF_ARPTABLES=m -CONFIG_IP_NF_ARPFILTER=m -CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_CONNTRACK_IPV6=m -CONFIG_NF_TABLES_IPV6=m -CONFIG_NFT_CHAIN_ROUTE_IPV6=m -CONFIG_NFT_CHAIN_NAT_IPV6=m -CONFIG_IP6_NF_IPTABLES=m -CONFIG_IP6_NF_MATCH_AH=m -CONFIG_IP6_NF_MATCH_EUI64=m -CONFIG_IP6_NF_MATCH_FRAG=m -CONFIG_IP6_NF_MATCH_OPTS=m -CONFIG_IP6_NF_MATCH_HL=m -CONFIG_IP6_NF_MATCH_IPV6HEADER=m -CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RPFILTER=m -CONFIG_IP6_NF_MATCH_RT=m -CONFIG_IP6_NF_TARGET_HL=m -CONFIG_IP6_NF_FILTER=m -CONFIG_IP6_NF_TARGET_REJECT=m -CONFIG_IP6_NF_MANGLE=m -CONFIG_IP6_NF_RAW=m -CONFIG_IP6_NF_SECURITY=m -CONFIG_IP6_NF_NAT=m -CONFIG_IP6_NF_TARGET_MASQUERADE=m -CONFIG_NF_TABLES_BRIDGE=m -CONFIG_NET_SCTPPROBE=m -CONFIG_RDS=m -CONFIG_RDS_RDMA=m -CONFIG_RDS_TCP=m -CONFIG_L2TP=m -CONFIG_L2TP_DEBUGFS=m -CONFIG_L2TP_V3=y -CONFIG_L2TP_IP=m -CONFIG_L2TP_ETH=m -CONFIG_BRIDGE=m -CONFIG_VLAN_8021Q=m -CONFIG_VLAN_8021Q_GVRP=y -CONFIG_NET_SCHED=y -CONFIG_NET_SCH_CBQ=m -CONFIG_NET_SCH_HTB=m -CONFIG_NET_SCH_HFSC=m -CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_MULTIQ=m -CONFIG_NET_SCH_RED=m -CONFIG_NET_SCH_SFB=m -CONFIG_NET_SCH_SFQ=m -CONFIG_NET_SCH_TEQL=m -CONFIG_NET_SCH_TBF=m -CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_DSMARK=m -CONFIG_NET_SCH_NETEM=m -CONFIG_NET_SCH_DRR=m -CONFIG_NET_SCH_MQPRIO=m -CONFIG_NET_SCH_CHOKE=m -CONFIG_NET_SCH_QFQ=m -CONFIG_NET_SCH_CODEL=m -CONFIG_NET_SCH_FQ_CODEL=m -CONFIG_NET_SCH_INGRESS=m -CONFIG_NET_SCH_PLUG=m -CONFIG_NET_CLS_BASIC=m -CONFIG_NET_CLS_TCINDEX=m -CONFIG_NET_CLS_ROUTE4=m -CONFIG_NET_CLS_FW=m -CONFIG_NET_CLS_U32=m -CONFIG_CLS_U32_PERF=y -CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_RSVP=m -CONFIG_NET_CLS_RSVP6=m -CONFIG_NET_CLS_FLOW=m -CONFIG_NET_CLS_CGROUP=y -CONFIG_NET_CLS_BPF=m -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_POLICE=m -CONFIG_NET_ACT_GACT=m -CONFIG_GACT_PROB=y -CONFIG_NET_ACT_MIRRED=m -CONFIG_NET_ACT_IPT=m -CONFIG_NET_ACT_NAT=m -CONFIG_NET_ACT_PEDIT=m -CONFIG_NET_ACT_SIMP=m -CONFIG_NET_ACT_SKBEDIT=m -CONFIG_NET_ACT_CSUM=m -CONFIG_DNS_RESOLVER=y -CONFIG_NETLINK_DIAG=m -CONFIG_CGROUP_NET_PRIO=y -CONFIG_BPF_JIT=y -CONFIG_NET_PKTGEN=m -CONFIG_NET_TCPPROBE=m -CONFIG_DEVTMPFS=y -CONFIG_DMA_CMA=y -CONFIG_CMA_SIZE_MBYTES=0 -CONFIG_CONNECTOR=y -CONFIG_ZRAM=m -CONFIG_BLK_DEV_LOOP=m -CONFIG_BLK_DEV_CRYPTOLOOP=m -CONFIG_BLK_DEV_DRBD=m -CONFIG_BLK_DEV_NBD=m -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=32768 -CONFIG_BLK_DEV_RAM_DAX=y -CONFIG_VIRTIO_BLK=y -CONFIG_ENCLOSURE_SERVICES=m -CONFIG_GENWQE=m -CONFIG_RAID_ATTRS=m -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_ST=m -CONFIG_CHR_DEV_OSST=m -CONFIG_BLK_DEV_SR=m -CONFIG_CHR_DEV_SG=y -CONFIG_CHR_DEV_SCH=m -CONFIG_SCSI_ENCLOSURE=m -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SPI_ATTRS=m -CONFIG_SCSI_FC_ATTRS=y -CONFIG_SCSI_SAS_LIBSAS=m -CONFIG_SCSI_SRP_ATTRS=m -CONFIG_ISCSI_TCP=m -CONFIG_SCSI_DEBUG=m -CONFIG_ZFCP=y -CONFIG_SCSI_VIRTIO=m -CONFIG_SCSI_DH=y -CONFIG_SCSI_DH_RDAC=m -CONFIG_SCSI_DH_HP_SW=m -CONFIG_SCSI_DH_EMC=m -CONFIG_SCSI_DH_ALUA=m -CONFIG_SCSI_OSD_INITIATOR=m -CONFIG_SCSI_OSD_ULD=m -CONFIG_MD=y -CONFIG_BLK_DEV_MD=y -CONFIG_MD_LINEAR=m -CONFIG_MD_MULTIPATH=m -CONFIG_MD_FAULTY=m -CONFIG_BLK_DEV_DM=m -CONFIG_DM_CRYPT=m -CONFIG_DM_SNAPSHOT=m -CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_MIRROR=m -CONFIG_DM_LOG_USERSPACE=m -CONFIG_DM_RAID=m -CONFIG_DM_ZERO=m -CONFIG_DM_MULTIPATH=m -CONFIG_DM_MULTIPATH_QL=m -CONFIG_DM_MULTIPATH_ST=m -CONFIG_DM_DELAY=m -CONFIG_DM_UEVENT=y -CONFIG_DM_FLAKEY=m -CONFIG_DM_VERITY=m -CONFIG_DM_SWITCH=m -CONFIG_NETDEVICES=y -CONFIG_BONDING=m -CONFIG_DUMMY=m -CONFIG_EQUALIZER=m -CONFIG_IFB=m -CONFIG_MACVLAN=m -CONFIG_MACVTAP=m -CONFIG_VXLAN=m -CONFIG_TUN=m -CONFIG_VETH=m -CONFIG_VIRTIO_NET=m -CONFIG_NLMON=m -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_CHELSIO is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -CONFIG_MLX4_EN=m -CONFIG_MLX5_CORE=m -CONFIG_MLX5_CORE_EN=y -# CONFIG_NET_VENDOR_NATSEMI is not set -CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPP_MPPE=m -CONFIG_PPPOE=m -CONFIG_PPTP=m -CONFIG_PPPOL2TP=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -# CONFIG_INPUT_KEYBOARD is not set -# CONFIG_INPUT_MOUSE is not set -# CONFIG_SERIO is not set -CONFIG_LEGACY_PTY_COUNT=0 -CONFIG_HW_RANDOM_VIRTIO=m -CONFIG_RAW_DRIVER=m -CONFIG_HANGCHECK_TIMER=m -CONFIG_TN3270_FS=y -# CONFIG_HWMON is not set -CONFIG_WATCHDOG=y -CONFIG_WATCHDOG_NOWAYOUT=y -CONFIG_SOFT_WATCHDOG=m -CONFIG_DIAG288_WATCHDOG=m -# CONFIG_HID is not set -# CONFIG_USB_SUPPORT is not set -CONFIG_INFINIBAND=m -CONFIG_INFINIBAND_USER_ACCESS=m -CONFIG_MLX4_INFINIBAND=m -CONFIG_MLX5_INFINIBAND=m -CONFIG_VFIO=m -CONFIG_VFIO_PCI=m -CONFIG_VIRTIO_BALLOON=m -CONFIG_EXT4_FS=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y -CONFIG_EXT4_ENCRYPTION=y -CONFIG_JBD2_DEBUG=y -CONFIG_JFS_FS=m -CONFIG_JFS_POSIX_ACL=y -CONFIG_JFS_SECURITY=y -CONFIG_JFS_STATISTICS=y -CONFIG_XFS_FS=y -CONFIG_XFS_QUOTA=y -CONFIG_XFS_POSIX_ACL=y -CONFIG_XFS_RT=y -CONFIG_GFS2_FS=m -CONFIG_GFS2_FS_LOCKING_DLM=y -CONFIG_OCFS2_FS=m -CONFIG_BTRFS_FS=y -CONFIG_BTRFS_FS_POSIX_ACL=y -CONFIG_NILFS2_FS=m -CONFIG_FS_DAX=y -CONFIG_EXPORTFS_BLOCK_OPS=y -CONFIG_FANOTIFY=y -CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -CONFIG_QFMT_V1=m -CONFIG_QFMT_V2=m -CONFIG_AUTOFS4_FS=m -CONFIG_FUSE_FS=y -CONFIG_CUSE=m -CONFIG_OVERLAY_FS=m -CONFIG_OVERLAY_FS_REDIRECT_DIR=y -CONFIG_FSCACHE=m -CONFIG_CACHEFILES=m -CONFIG_ISO9660_FS=y -CONFIG_JOLIET=y -CONFIG_ZISOFS=y -CONFIG_UDF_FS=m -CONFIG_MSDOS_FS=m -CONFIG_VFAT_FS=m -CONFIG_NTFS_FS=m -CONFIG_NTFS_RW=y -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_HUGETLBFS=y -CONFIG_CONFIGFS_FS=m -CONFIG_ECRYPT_FS=m -CONFIG_CRAMFS=m -CONFIG_SQUASHFS=m -CONFIG_SQUASHFS_XATTR=y -CONFIG_SQUASHFS_LZO=y -CONFIG_SQUASHFS_XZ=y -CONFIG_ROMFS_FS=m -CONFIG_NFS_FS=m -CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=m -CONFIG_NFS_SWAP=y -CONFIG_NFSD=m -CONFIG_NFSD_V3_ACL=y -CONFIG_NFSD_V4=y -CONFIG_NFSD_V4_SECURITY_LABEL=y -CONFIG_CIFS=m -CONFIG_CIFS_STATS=y -CONFIG_CIFS_STATS2=y -CONFIG_CIFS_WEAK_PW_HASH=y -CONFIG_CIFS_UPCALL=y -CONFIG_CIFS_XATTR=y -CONFIG_CIFS_POSIX=y -# CONFIG_CIFS_DEBUG is not set -CONFIG_CIFS_DFS_UPCALL=y -CONFIG_NLS_DEFAULT="utf8" -CONFIG_NLS_CODEPAGE_437=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_ASCII=m -CONFIG_NLS_ISO8859_1=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_UTF8=m -CONFIG_DLM=m -CONFIG_PRINTK_TIME=y -CONFIG_DEBUG_INFO=y -CONFIG_DEBUG_INFO_DWARF4=y -CONFIG_GDB_SCRIPTS=y -# CONFIG_ENABLE_MUST_CHECK is not set -CONFIG_FRAME_WARN=1024 -CONFIG_UNUSED_SYMBOLS=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_MEMORY_INIT=y -CONFIG_PANIC_ON_OOPS=y -CONFIG_RCU_TORTURE_TEST=m -CONFIG_RCU_CPU_STALL_TIMEOUT=60 -CONFIG_LATENCYTOP=y -CONFIG_SCHED_TRACER=y -CONFIG_FTRACE_SYSCALLS=y -CONFIG_STACK_TRACER=y -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_FUNCTION_PROFILER=y -CONFIG_HIST_TRIGGERS=y -CONFIG_LKDTM=m -CONFIG_PERCPU_TEST=m -CONFIG_ATOMIC64_SELFTEST=y -CONFIG_TEST_BPF=m -CONFIG_BUG_ON_DATA_CORRUPTION=y -CONFIG_S390_PTDUMP=y -CONFIG_PERSISTENT_KEYRINGS=y -CONFIG_BIG_KEYS=y -CONFIG_ENCRYPTED_KEYS=m -CONFIG_SECURITY=y -CONFIG_SECURITY_NETWORK=y -CONFIG_SECURITY_SELINUX=y -CONFIG_SECURITY_SELINUX_BOOTPARAM=y -CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 -CONFIG_SECURITY_SELINUX_DISABLE=y -CONFIG_INTEGRITY_SIGNATURE=y -CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y -CONFIG_IMA=y -CONFIG_IMA_WRITE_POLICY=y -CONFIG_IMA_APPRAISE=y -CONFIG_CRYPTO_DH=m -CONFIG_CRYPTO_ECDH=m -CONFIG_CRYPTO_USER=m -# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set -CONFIG_CRYPTO_PCRYPT=m -CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_MCRYPTD=m -CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_CHACHA20POLY1305=m -CONFIG_CRYPTO_LRW=m -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_KEYWRAP=m -CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_VMAC=m -CONFIG_CRYPTO_CRC32=m -CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_RMD128=m -CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_RMD256=m -CONFIG_CRYPTO_RMD320=m -CONFIG_CRYPTO_SHA512=m -CONFIG_CRYPTO_SHA3=m -CONFIG_CRYPTO_TGR192=m -CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_AES_TI=m -CONFIG_CRYPTO_ANUBIS=m -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_CAMELLIA=m -CONFIG_CRYPTO_CAST5=m -CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_FCRYPT=m -CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m -CONFIG_CRYPTO_SEED=m -CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_842=m -CONFIG_CRYPTO_LZ4=m -CONFIG_CRYPTO_LZ4HC=m -CONFIG_CRYPTO_ANSI_CPRNG=m -CONFIG_CRYPTO_USER_API_HASH=m -CONFIG_CRYPTO_USER_API_SKCIPHER=m -CONFIG_CRYPTO_USER_API_RNG=m -CONFIG_CRYPTO_USER_API_AEAD=m -CONFIG_ZCRYPT=m -CONFIG_PKEY=m -CONFIG_CRYPTO_PAES_S390=m -CONFIG_CRYPTO_SHA1_S390=m -CONFIG_CRYPTO_SHA256_S390=m -CONFIG_CRYPTO_SHA512_S390=m -CONFIG_CRYPTO_DES_S390=m -CONFIG_CRYPTO_AES_S390=m -CONFIG_CRYPTO_GHASH_S390=m -CONFIG_CRYPTO_CRC32_S390=y -CONFIG_CRC7=m -CONFIG_CRC8=m -CONFIG_CORDIC=m -CONFIG_CMM=m -CONFIG_APPLDATA_BASE=y -CONFIG_KVM=m -CONFIG_KVM_S390_UCONTROL=y -CONFIG_VHOST_NET=m diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index 20ed149e1137..c105bcc6d7a6 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig @@ -25,13 +25,13 @@ CONFIG_CPUSETS=y CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y -CONFIG_CHECKPOINT_RESTORE=y CONFIG_NAMESPACES=y CONFIG_USER_NS=y CONFIG_SCHED_AUTOGROUP=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y # CONFIG_SYSFS_SYSCALL is not set +CONFIG_CHECKPOINT_RESTORE=y CONFIG_BPF_SYSCALL=y CONFIG_USERFAULTFD=y # CONFIG_COMPAT_BRK is not set @@ -45,6 +45,8 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +CONFIG_MODULE_SIG_SHA256=y CONFIG_BLK_DEV_INTEGRITY=y CONFIG_BLK_DEV_THROTTLING=y CONFIG_BLK_WBT=y @@ -62,6 +64,7 @@ CONFIG_TUNE_ZEC12=y CONFIG_NR_CPUS=512 CONFIG_NUMA=y CONFIG_HZ_100=y +CONFIG_KEXEC_FILE=y CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y @@ -301,7 +304,6 @@ CONFIG_IP6_NF_SECURITY=m CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_NF_TABLES_BRIDGE=m -CONFIG_NET_SCTPPROBE=m CONFIG_RDS=m CONFIG_RDS_RDMA=m CONFIG_RDS_TCP=m @@ -359,11 +361,11 @@ CONFIG_NET_ACT_SIMP=m CONFIG_NET_ACT_SKBEDIT=m CONFIG_NET_ACT_CSUM=m CONFIG_DNS_RESOLVER=y +CONFIG_OPENVSWITCH=m CONFIG_NETLINK_DIAG=m CONFIG_CGROUP_NET_PRIO=y CONFIG_BPF_JIT=y CONFIG_NET_PKTGEN=m -CONFIG_NET_TCPPROBE=m CONFIG_DEVTMPFS=y CONFIG_DMA_CMA=y CONFIG_CMA_SIZE_MBYTES=0 @@ -375,8 +377,9 @@ CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=32768 -CONFIG_BLK_DEV_RAM_DAX=y CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_NVME=m CONFIG_ENCLOSURE_SERVICES=m CONFIG_GENWQE=m CONFIG_RAID_ATTRS=m @@ -455,6 +458,7 @@ CONFIG_PPTP=m CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m +CONFIG_INPUT_EVDEV=y # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO is not set @@ -468,6 +472,9 @@ CONFIG_WATCHDOG=y CONFIG_WATCHDOG_NOWAYOUT=y CONFIG_SOFT_WATCHDOG=m CONFIG_DIAG288_WATCHDOG=m +CONFIG_DRM=y +CONFIG_DRM_VIRTIO_GPU=y +CONFIG_FRAMEBUFFER_CONSOLE=y # CONFIG_HID is not set # CONFIG_USB_SUPPORT is not set CONFIG_INFINIBAND=m @@ -476,7 +483,9 @@ CONFIG_MLX4_INFINIBAND=m CONFIG_MLX5_INFINIBAND=m CONFIG_VFIO=m CONFIG_VFIO_PCI=m +CONFIG_VIRTIO_PCI=m CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_INPUT=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y @@ -507,7 +516,6 @@ CONFIG_AUTOFS4_FS=m CONFIG_FUSE_FS=y CONFIG_CUSE=m CONFIG_OVERLAY_FS=m -CONFIG_OVERLAY_FS_REDIRECT_DIR=y CONFIG_FSCACHE=m CONFIG_CACHEFILES=m CONFIG_ISO9660_FS=y @@ -592,8 +600,10 @@ CONFIG_SECURITY_SELINUX_DISABLE=y CONFIG_INTEGRITY_SIGNATURE=y CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y CONFIG_IMA=y +CONFIG_IMA_DEFAULT_HASH_SHA256=y CONFIG_IMA_WRITE_POLICY=y CONFIG_IMA_APPRAISE=y +CONFIG_CRYPTO_FIPS=y CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_USER=m diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index fa9b7dd1a513..ad47abd08630 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -329,7 +329,7 @@ static void fallback_exit_blk(struct crypto_tfm *tfm) static struct crypto_alg ecb_aes_alg = { .cra_name = "ecb(aes)", .cra_driver_name = "ecb-aes-s390", - .cra_priority = 400, /* combo: aes + ecb */ + .cra_priority = 401, /* combo: aes + ecb + 1 */ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, @@ -426,7 +426,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc, static struct crypto_alg cbc_aes_alg = { .cra_name = "cbc(aes)", .cra_driver_name = "cbc-aes-s390", - .cra_priority = 400, /* combo: aes + cbc */ + .cra_priority = 402, /* ecb-aes-s390 + 1 */ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, @@ -633,7 +633,7 @@ static void xts_fallback_exit(struct crypto_tfm *tfm) static struct crypto_alg xts_aes_alg = { .cra_name = "xts(aes)", .cra_driver_name = "xts-aes-s390", - .cra_priority = 400, /* combo: aes + xts */ + .cra_priority = 402, /* ecb-aes-s390 + 1 */ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, @@ -763,7 +763,7 @@ static int ctr_aes_decrypt(struct blkcipher_desc *desc, static struct crypto_alg ctr_aes_alg = { .cra_name = "ctr(aes)", .cra_driver_name = "ctr-aes-s390", - .cra_priority = 400, /* combo: aes + ctr */ + .cra_priority = 402, /* ecb-aes-s390 + 1 */ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = 1, diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c index 003932db8d12..80b27294c1de 100644 --- a/arch/s390/crypto/paes_s390.c +++ b/arch/s390/crypto/paes_s390.c @@ -138,7 +138,7 @@ static int ecb_paes_decrypt(struct blkcipher_desc *desc, static struct crypto_alg ecb_paes_alg = { .cra_name = "ecb(paes)", .cra_driver_name = "ecb-paes-s390", - .cra_priority = 400, /* combo: aes + ecb */ + .cra_priority = 401, /* combo: aes + ecb + 1 */ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct s390_paes_ctx), @@ -241,7 +241,7 @@ static int cbc_paes_decrypt(struct blkcipher_desc *desc, static struct crypto_alg cbc_paes_alg = { .cra_name = "cbc(paes)", .cra_driver_name = "cbc-paes-s390", - .cra_priority = 400, /* combo: aes + cbc */ + .cra_priority = 402, /* ecb-paes-s390 + 1 */ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct s390_paes_ctx), @@ -377,7 +377,7 @@ static int xts_paes_decrypt(struct blkcipher_desc *desc, static struct crypto_alg xts_paes_alg = { .cra_name = "xts(paes)", .cra_driver_name = "xts-paes-s390", - .cra_priority = 400, /* combo: aes + xts */ + .cra_priority = 402, /* ecb-paes-s390 + 1 */ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct s390_pxts_ctx), @@ -523,7 +523,7 @@ static int ctr_paes_decrypt(struct blkcipher_desc *desc, static struct crypto_alg ctr_paes_alg = { .cra_name = "ctr(paes)", .cra_driver_name = "ctr-paes-s390", - .cra_priority = 400, /* combo: aes + ctr */ + .cra_priority = 402, /* ecb-paes-s390 + 1 */ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct s390_paes_ctx), diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 46a3178d8bc6..f40600eb1762 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -8,6 +8,7 @@ CONFIG_TASKSTATS=y CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y +# CONFIG_CPU_ISOLATION is not set CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_CGROUPS=y @@ -23,12 +24,12 @@ CONFIG_CPUSETS=y CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y -CONFIG_CHECKPOINT_RESTORE=y CONFIG_NAMESPACES=y CONFIG_USER_NS=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y # CONFIG_SYSFS_SYSCALL is not set +CONFIG_CHECKPOINT_RESTORE=y CONFIG_BPF_SYSCALL=y CONFIG_USERFAULTFD=y # CONFIG_COMPAT_BRK is not set @@ -47,6 +48,7 @@ CONFIG_LIVEPATCH=y CONFIG_NR_CPUS=256 CONFIG_NUMA=y CONFIG_HZ_100=y +CONFIG_KEXEC_FILE=y CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y @@ -129,10 +131,13 @@ CONFIG_EQUALIZER=m CONFIG_TUN=m CONFIG_VIRTIO_NET=y # CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_CORTINA is not set # CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set # CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_INPUT is not set # CONFIG_SERIO is not set +# CONFIG_VT is not set CONFIG_DEVKMEM=y CONFIG_RAW_DRIVER=m CONFIG_VIRTIO_BALLOON=y @@ -177,13 +182,15 @@ CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y CONFIG_STACK_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_FUNCTION_PROFILER=y -CONFIG_KPROBES_SANITY_TEST=y +# CONFIG_RUNTIME_TESTING_MENU is not set CONFIG_S390_PTDUMP=y CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_AUTHENC=m CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CFB=m CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m @@ -213,6 +220,8 @@ CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SALSA20=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SPECK=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_DEFLATE=m diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index 43bbe63e2992..06b513d192b9 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c @@ -320,7 +320,7 @@ static void hypfs_kill_super(struct super_block *sb) if (sb->s_root) hypfs_delete_tree(sb->s_root); - if (sb_info->update_file) + if (sb_info && sb_info->update_file) hypfs_remove(sb_info->update_file); kfree(sb->s_fs_info); sb->s_fs_info = NULL; diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h index cfce6835b109..c1bedb4c8de0 100644 --- a/arch/s390/include/asm/ap.h +++ b/arch/s390/include/asm/ap.h @@ -20,9 +20,9 @@ */ typedef unsigned int ap_qid_t; -#define AP_MKQID(_card, _queue) (((_card) & 63) << 8 | ((_queue) & 255)) -#define AP_QID_CARD(_qid) (((_qid) >> 8) & 63) -#define AP_QID_QUEUE(_qid) ((_qid) & 255) +#define AP_MKQID(_card, _queue) (((_card) & 0xff) << 8 | ((_queue) & 0xff)) +#define AP_QID_CARD(_qid) (((_qid) >> 8) & 0xff) +#define AP_QID_QUEUE(_qid) ((_qid) & 0xff) /** * struct ap_queue_status - Holds the AP queue status. diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h index 847a04262b9c..225667652069 100644 --- a/arch/s390/include/asm/cio.h +++ b/arch/s390/include/asm/cio.h @@ -328,16 +328,6 @@ static inline u8 pathmask_to_pos(u8 mask) void channel_subsystem_reinit(void); extern void css_schedule_reprobe(void); -extern void reipl_ccw_dev(struct ccw_dev_id *id); - -struct cio_iplinfo { - u8 ssid; - u16 devno; - int is_qdio; -}; - -extern int cio_get_iplinfo(struct cio_iplinfo *iplinfo); - /* Function from drivers/s390/cio/chsc.c */ int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta); int chsc_sstpi(void *page, void *result, size_t size); diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h index 186c7b5f5511..ae5135704616 100644 --- a/arch/s390/include/asm/ipl.h +++ b/arch/s390/include/asm/ipl.h @@ -15,8 +15,6 @@ #define NSS_NAME_SIZE 8 -#define IPL_PARMBLOCK_ORIGIN 0x2000 - #define IPL_PARM_BLK_FCP_LEN (sizeof(struct ipl_list_hdr) + \ sizeof(struct ipl_block_fcp)) @@ -29,10 +27,6 @@ #define IPL_MAX_SUPPORTED_VERSION (0) -#define IPL_PARMBLOCK_START ((struct ipl_parameter_block *) \ - IPL_PARMBLOCK_ORIGIN) -#define IPL_PARMBLOCK_SIZE (IPL_PARMBLOCK_START->hdr.len) - struct ipl_list_hdr { u32 len; u8 reserved1[3]; @@ -83,33 +77,21 @@ struct ipl_parameter_block { union { struct ipl_block_fcp fcp; struct ipl_block_ccw ccw; + char raw[PAGE_SIZE - sizeof(struct ipl_list_hdr)]; } ipl_info; } __packed __aligned(PAGE_SIZE); -/* - * IPL validity flags - */ -extern u32 ipl_flags; - struct save_area; struct save_area * __init save_area_alloc(bool is_boot_cpu); struct save_area * __init save_area_boot_cpu(void); void __init save_area_add_regs(struct save_area *, void *regs); void __init save_area_add_vxrs(struct save_area *, __vector128 *vxrs); -extern void do_reipl(void); -extern void do_halt(void); -extern void do_poff(void); -extern void ipl_verify_parameters(void); -extern void ipl_update_parameters(void); +extern void s390_reset_system(void); +extern void ipl_store_parameters(void); extern size_t append_ipl_vmparm(char *, size_t); extern size_t append_ipl_scpdata(char *, size_t); -enum { - IPL_DEVNO_VALID = 1, - IPL_PARMBLOCK_VALID = 2, -}; - enum ipl_type { IPL_TYPE_UNKNOWN = 1, IPL_TYPE_CCW = 2, @@ -138,6 +120,7 @@ struct ipl_info extern struct ipl_info ipl_info; extern void setup_ipl(void); +extern void set_os_info_reipl_block(void); /* * DIAG 308 support diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h index 1d708a419326..825dd0f7f221 100644 --- a/arch/s390/include/asm/kexec.h +++ b/arch/s390/include/asm/kexec.h @@ -46,4 +46,27 @@ static inline void crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs) { } +struct kimage; +struct s390_load_data { + /* Pointer to the kernel buffer. Used to register cmdline etc.. */ + void *kernel_buf; + + /* Total size of loaded segments in memory. Used as an offset. */ + size_t memsz; + + /* Load address of initrd. Used to register INITRD_START in kernel. */ + unsigned long initrd_load_addr; +}; + +int kexec_file_add_purgatory(struct kimage *image, + struct s390_load_data *data); +int kexec_file_add_initrd(struct kimage *image, + struct s390_load_data *data, + char *initrd, unsigned long initrd_len); +int *kexec_file_update_kernel(struct kimage *iamge, + struct s390_load_data *data); + +extern const struct kexec_file_ops s390_kexec_image_ops; +extern const struct kexec_file_ops s390_kexec_elf_ops; + #endif /*_S390_KEXEC_H */ diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h index 35bf28fe4c64..b4bd8c41e9d3 100644 --- a/arch/s390/include/asm/nospec-branch.h +++ b/arch/s390/include/asm/nospec-branch.h @@ -9,6 +9,7 @@ extern int nospec_disable; void nospec_init_branches(void); +void nospec_auto_detect(void); void nospec_revert(s32 *start, s32 *end); #endif /* __ASSEMBLY__ */ diff --git a/arch/s390/include/asm/purgatory.h b/arch/s390/include/asm/purgatory.h new file mode 100644 index 000000000000..e297bcfc476f --- /dev/null +++ b/arch/s390/include/asm/purgatory.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2018 + * + * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> + */ + +#ifndef _S390_PURGATORY_H_ +#define _S390_PURGATORY_H_ +#ifndef __ASSEMBLY__ + +#include <linux/purgatory.h> + +int verify_sha256_digest(void); + +#endif /* __ASSEMBLY__ */ +#endif /* _S390_PURGATORY_H_ */ diff --git a/arch/s390/include/asm/reset.h b/arch/s390/include/asm/reset.h deleted file mode 100644 index 6450b31ade03..000000000000 --- a/arch/s390/include/asm/reset.h +++ /dev/null @@ -1,20 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright IBM Corp. 2006 - * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> - */ - -#ifndef _ASM_S390_RESET_H -#define _ASM_S390_RESET_H - -#include <linux/list.h> - -struct reset_call { - struct list_head list; - void (*fn)(void); -}; - -extern void register_reset_call(struct reset_call *reset); -extern void unregister_reset_call(struct reset_call *reset); -extern void s390_reset_system(void); -#endif /* _ASM_S390_RESET_H */ diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index 124154fdfc97..9c30ebe046f3 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* * S390 version - * Copyright IBM Corp. 1999, 2010 + * Copyright IBM Corp. 1999, 2017 */ #ifndef _ASM_S390_SETUP_H #define _ASM_S390_SETUP_H @@ -37,17 +37,31 @@ #define LPP_MAGIC _BITUL(31) #define LPP_PID_MASK _AC(0xffffffff, UL) +/* Offsets to entry points in kernel/head.S */ + +#define STARTUP_NORMAL_OFFSET 0x10000 +#define STARTUP_KDUMP_OFFSET 0x10010 + +/* Offsets to parameters in kernel/head.S */ + +#define IPL_DEVICE_OFFSET 0x10400 +#define INITRD_START_OFFSET 0x10408 +#define INITRD_SIZE_OFFSET 0x10410 +#define OLDMEM_BASE_OFFSET 0x10418 +#define OLDMEM_SIZE_OFFSET 0x10420 +#define COMMAND_LINE_OFFSET 0x10480 + #ifndef __ASSEMBLY__ #include <asm/lowcore.h> #include <asm/types.h> -#define IPL_DEVICE (*(unsigned long *) (0x10400)) -#define INITRD_START (*(unsigned long *) (0x10408)) -#define INITRD_SIZE (*(unsigned long *) (0x10410)) -#define OLDMEM_BASE (*(unsigned long *) (0x10418)) -#define OLDMEM_SIZE (*(unsigned long *) (0x10420)) -#define COMMAND_LINE ((char *) (0x10480)) +#define IPL_DEVICE (*(unsigned long *) (IPL_DEVICE_OFFSET)) +#define INITRD_START (*(unsigned long *) (INITRD_START_OFFSET)) +#define INITRD_SIZE (*(unsigned long *) (INITRD_SIZE_OFFSET)) +#define OLDMEM_BASE (*(unsigned long *) (OLDMEM_BASE_OFFSET)) +#define OLDMEM_SIZE (*(unsigned long *) (OLDMEM_SIZE_OFFSET)) +#define COMMAND_LINE ((char *) (COMMAND_LINE_OFFSET)) extern int memory_end_set; extern unsigned long memory_end; @@ -121,12 +135,12 @@ extern void (*_machine_power_off)(void); #else /* __ASSEMBLY__ */ -#define IPL_DEVICE 0x10400 -#define INITRD_START 0x10408 -#define INITRD_SIZE 0x10410 -#define OLDMEM_BASE 0x10418 -#define OLDMEM_SIZE 0x10420 -#define COMMAND_LINE 0x10480 +#define IPL_DEVICE (IPL_DEVICE_OFFSET) +#define INITRD_START (INITRD_START_OFFSET) +#define INITRD_SIZE (INITRD_SIZE_OFFSET) +#define OLDMEM_BASE (OLDMEM_BASE_OFFSET) +#define OLDMEM_SIZE (OLDMEM_SIZE_OFFSET) +#define COMMAND_LINE (COMMAND_LINE_OFFSET) #endif /* __ASSEMBLY__ */ #endif /* _ASM_S390_SETUP_H */ diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 83ba57533ce6..3c883c368eb0 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -45,6 +45,9 @@ struct thread_info { void arch_release_task_struct(struct task_struct *tsk); int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); +void arch_setup_new_exec(void); +#define arch_setup_new_exec arch_setup_new_exec + #endif /* diff --git a/arch/s390/include/uapi/asm/signal.h b/arch/s390/include/uapi/asm/signal.h index c57f9d28d894..9a14a611ed82 100644 --- a/arch/s390/include/uapi/asm/signal.h +++ b/arch/s390/include/uapi/asm/signal.h @@ -97,22 +97,31 @@ typedef unsigned long sigset_t; #include <asm-generic/signal-defs.h> #ifndef __KERNEL__ -/* Here we must cater to libcs that poke about in kernel headers. */ +/* + * There are two system calls in regard to sigaction, sys_rt_sigaction + * and sys_sigaction. Internally the kernel uses the struct old_sigaction + * for the older sys_sigaction system call, and the kernel version of the + * struct sigaction for the newer sys_rt_sigaction. + * + * The uapi definition for struct sigaction has made a strange distinction + * between 31-bit and 64-bit in the past. For 64-bit the uapi structure + * looks like the kernel struct sigaction, but for 31-bit it used to + * look like the kernel struct old_sigaction. That practically made the + * structure unusable for either system call. To get around this problem + * the glibc always had its own definitions for the sigaction structures. + * + * The current struct sigaction uapi definition below is suitable for the + * sys_rt_sigaction system call only. + */ struct sigaction { union { __sighandler_t _sa_handler; void (*_sa_sigaction)(int, struct siginfo *, void *); } _u; -#ifndef __s390x__ /* lovely */ - sigset_t sa_mask; - unsigned long sa_flags; - void (*sa_restorer)(void); -#else /* __s390x__ */ unsigned long sa_flags; void (*sa_restorer)(void); sigset_t sa_mask; -#endif /* __s390x__ */ }; #define sa_handler _u._sa_handler diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h index d568307321fc..b62e0614e440 100644 --- a/arch/s390/include/uapi/asm/zcrypt.h +++ b/arch/s390/include/uapi/asm/zcrypt.h @@ -203,9 +203,9 @@ struct ep11_urb { } __attribute__((packed)); /** - * struct zcrypt_device_status + * struct zcrypt_device_status_ext * @hwtype: raw hardware type - * @qid: 6 bit device index, 8 bit domain + * @qid: 8 bit device index, 8 bit domain * @functions: AP device function bit field 'abcdef' * a, b, c = reserved * d = CCA coprocessor @@ -214,28 +214,23 @@ struct ep11_urb { * @online online status * @reserved reserved */ -struct zcrypt_device_status { +struct zcrypt_device_status_ext { unsigned int hwtype:8; - unsigned int qid:14; + unsigned int qid:16; unsigned int online:1; unsigned int functions:6; - unsigned int reserved:3; + unsigned int reserved:1; }; -#define MAX_ZDEV_CARDIDS 64 -#define MAX_ZDEV_DOMAINS 256 +#define MAX_ZDEV_CARDIDS_EXT 256 +#define MAX_ZDEV_DOMAINS_EXT 256 -/** - * Maximum number of zcrypt devices - */ -#define MAX_ZDEV_ENTRIES (MAX_ZDEV_CARDIDS * MAX_ZDEV_DOMAINS) +/* Maximum number of zcrypt devices */ +#define MAX_ZDEV_ENTRIES_EXT (MAX_ZDEV_CARDIDS_EXT * MAX_ZDEV_DOMAINS_EXT) -/** - * zcrypt_device_matrix - * Device matrix of all zcrypt devices - */ -struct zcrypt_device_matrix { - struct zcrypt_device_status device[MAX_ZDEV_ENTRIES]; +/* Device matrix of all zcrypt devices */ +struct zcrypt_device_matrix_ext { + struct zcrypt_device_status_ext device[MAX_ZDEV_ENTRIES_EXT]; }; #define AUTOSELECT ((unsigned int)0xFFFFFFFF) @@ -270,71 +265,35 @@ struct zcrypt_device_matrix { * ZSENDEP11CPRB * Send an arbitrary EP11 CPRB to an EP11 coprocessor crypto card. * - * Z90STAT_STATUS_MASK - * Return an 64 element array of unsigned chars for the status of - * all devices. + * ZCRYPT_DEVICE_STATUS + * The given struct zcrypt_device_matrix_ext is updated with + * status information for each currently known apqn. + * + * ZCRYPT_STATUS_MASK + * Return an MAX_ZDEV_CARDIDS_EXT element array of unsigned chars for the + * status of all devices. * 0x01: PCICA * 0x02: PCICC * 0x03: PCIXCC_MCL2 * 0x04: PCIXCC_MCL3 * 0x05: CEX2C * 0x06: CEX2A - * 0x0d: device is disabled via the proc filesystem - * - * Z90STAT_QDEPTH_MASK - * Return an 64 element array of unsigned chars for the queue - * depth of all devices. - * - * Z90STAT_PERDEV_REQCNT - * Return an 64 element array of unsigned integers for the number - * of successfully completed requests per device since the device - * was detected and made available. - * - * Z90STAT_REQUESTQ_COUNT - * Return an integer count of the number of entries waiting to be - * sent to a device. - * - * Z90STAT_PENDINGQ_COUNT - * Return an integer count of the number of entries sent to all - * devices awaiting the reply. - * - * Z90STAT_TOTALOPEN_COUNT - * Return an integer count of the number of open file handles. - * - * Z90STAT_DOMAIN_INDEX - * Return the integer value of the Cryptographic Domain. - * - * The following ioctls are deprecated and should be no longer used: - * - * Z90STAT_TOTALCOUNT - * Return an integer count of all device types together. - * - * Z90STAT_PCICACOUNT - * Return an integer count of all PCICAs. - * - * Z90STAT_PCICCCOUNT - * Return an integer count of all PCICCs. - * - * Z90STAT_PCIXCCMCL2COUNT - * Return an integer count of all MCL2 PCIXCCs. - * - * Z90STAT_PCIXCCMCL3COUNT - * Return an integer count of all MCL3 PCIXCCs. - * - * Z90STAT_CEX2CCOUNT - * Return an integer count of all CEX2Cs. + * 0x07: CEX3C + * 0x08: CEX3A + * 0x0a: CEX4 + * 0x0b: CEX5 + * 0x0c: CEX6 + * 0x0d: device is disabled * - * Z90STAT_CEX2ACOUNT - * Return an integer count of all CEX2As. + * ZCRYPT_QDEPTH_MASK + * Return an MAX_ZDEV_CARDIDS_EXT element array of unsigned chars for the + * queue depth of all devices. * - * ICAZ90STATUS - * Return some device driver status in a ica_z90_status struct - * This takes an ica_z90_status struct as its arg. + * ZCRYPT_PERDEV_REQCNT + * Return an MAX_ZDEV_CARDIDS_EXT element array of unsigned integers for + * the number of successfully completed requests per device since the + * device was detected and made available. * - * Z90STAT_PCIXCCCOUNT - * Return an integer count of all PCIXCCs (MCL2 + MCL3). - * This is DEPRECATED now that MCL3 PCIXCCs are treated differently from - * MCL2 PCIXCCs. */ /** @@ -344,22 +303,56 @@ struct zcrypt_device_matrix { #define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0) #define ZSECSENDCPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0) #define ZSENDEP11CPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x04, 0) -#define ZDEVICESTATUS _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x4f, 0) -/* New status calls */ -#define Z90STAT_TOTALCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x40, int) -#define Z90STAT_PCICACOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x41, int) -#define Z90STAT_PCICCCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x42, int) -#define Z90STAT_PCIXCCMCL2COUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x4b, int) -#define Z90STAT_PCIXCCMCL3COUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x4c, int) -#define Z90STAT_CEX2CCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x4d, int) -#define Z90STAT_CEX2ACOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x4e, int) +#define ZCRYPT_DEVICE_STATUS _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x5f, 0) +#define ZCRYPT_STATUS_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x58, char[MAX_ZDEV_CARDIDS_EXT]) +#define ZCRYPT_QDEPTH_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x59, char[MAX_ZDEV_CARDIDS_EXT]) +#define ZCRYPT_PERDEV_REQCNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x5a, int[MAX_ZDEV_CARDIDS_EXT]) + +/* + * Only deprecated defines, structs and ioctls below this line. + */ + +/* Deprecated: use MAX_ZDEV_CARDIDS_EXT */ +#define MAX_ZDEV_CARDIDS 64 +/* Deprecated: use MAX_ZDEV_DOMAINS_EXT */ +#define MAX_ZDEV_DOMAINS 256 + +/* Deprecated: use MAX_ZDEV_ENTRIES_EXT */ +#define MAX_ZDEV_ENTRIES (MAX_ZDEV_CARDIDS * MAX_ZDEV_DOMAINS) + +/* Deprecated: use struct zcrypt_device_status_ext */ +struct zcrypt_device_status { + unsigned int hwtype:8; + unsigned int qid:14; + unsigned int online:1; + unsigned int functions:6; + unsigned int reserved:3; +}; + +/* Deprecated: use struct zcrypt_device_matrix_ext */ +struct zcrypt_device_matrix { + struct zcrypt_device_status device[MAX_ZDEV_ENTRIES]; +}; + +/* Deprecated: use ZCRYPT_DEVICE_STATUS */ +#define ZDEVICESTATUS _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x4f, 0) +/* Deprecated: use ZCRYPT_STATUS_MASK */ +#define Z90STAT_STATUS_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x48, char[64]) +/* Deprecated: use ZCRYPT_QDEPTH_MASK */ +#define Z90STAT_QDEPTH_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x49, char[64]) +/* Deprecated: use ZCRYPT_PERDEV_REQCNT */ +#define Z90STAT_PERDEV_REQCNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x4a, int[64]) + +/* Deprecated: use sysfs to query these values */ #define Z90STAT_REQUESTQ_COUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x44, int) #define Z90STAT_PENDINGQ_COUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x45, int) #define Z90STAT_TOTALOPEN_COUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x46, int) #define Z90STAT_DOMAIN_INDEX _IOR(ZCRYPT_IOCTL_MAGIC, 0x47, int) -#define Z90STAT_STATUS_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x48, char[64]) -#define Z90STAT_QDEPTH_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x49, char[64]) -#define Z90STAT_PERDEV_REQCNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x4a, int[64]) + +/* + * The ioctl number ranges 0x40 - 0x42 and 0x4b - 0x4e had been used in the + * past, don't assign new ioctls for these. + */ #endif /* __ASM_S390_ZCRYPT_H */ diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index b06a6f79c1ec..84ea6225efb4 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -82,6 +82,9 @@ obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_UPROBES) += uprobes.o +obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o +obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o + obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index cfe2c45c5180..eb2a5c0443cd 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -10,6 +10,7 @@ #include <linux/kbuild.h> #include <linux/kvm_host.h> #include <linux/sched.h> +#include <linux/purgatory.h> #include <asm/idle.h> #include <asm/vdso.h> #include <asm/pgtable.h> @@ -204,5 +205,9 @@ int main(void) OFFSET(__GMAP_ASCE, gmap, asce); OFFSET(__SIE_PROG0C, kvm_s390_sie_block, prog0c); OFFSET(__SIE_PROG20, kvm_s390_sie_block, prog20); + /* kexec_sha_region */ + OFFSET(__KEXEC_SHA_REGION_START, kexec_sha_region, start); + OFFSET(__KEXEC_SHA_REGION_LEN, kexec_sha_region, len); + DEFINE(__KEXEC_SHA_REGION_SIZE, sizeof(struct kexec_sha_region)); return 0; } diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 18c1eeb847b2..6f2a193ccccc 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -279,7 +279,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set, if (put_compat_sigset((compat_sigset_t __user *)frame->sc.oldmask, set, sizeof(compat_sigset_t))) return -EFAULT; - if (__put_user(ptr_to_compat(&frame->sc), &frame->sc.sregs)) + if (__put_user(ptr_to_compat(&frame->sregs), &frame->sc.sregs)) return -EFAULT; /* Store registers needed to create the signal frame */ diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c index 11e9d8b5c1b0..607c5e9fba3d 100644 --- a/arch/s390/kernel/compat_wrapper.c +++ b/arch/s390/kernel/compat_wrapper.c @@ -182,3 +182,4 @@ COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, COMPAT_SYSCALL_WRAP2(s390_guarded_storage, int, command, struct gs_cb *, gs_cb); COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer); COMPAT_SYSCALL_WRAP4(s390_sthyi, unsigned long, code, void __user *, info, u64 __user *, rc, unsigned long, flags); +COMPAT_SYSCALL_WRAP5(kexec_file_load, int, kernel_fd, int, initrd_fd, unsigned long, cmdline_len, const char __user *, cmdline_ptr, unsigned long, flags) diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index b00b515baa53..32daa0f84325 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -342,16 +342,6 @@ static __init void memmove_early(void *dst, const void *src, size_t n) S390_lowcore.program_new_psw = old; } -static __init noinline void ipl_save_parameters(void) -{ - void *src, *dst; - - src = (void *)(unsigned long) S390_lowcore.ipl_parmblock_ptr; - dst = (void *) IPL_PARMBLOCK_ORIGIN; - memmove_early(dst, src, PAGE_SIZE); - S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN; -} - static __init noinline void rescue_initrd(void) { #ifdef CONFIG_BLK_DEV_INITRD @@ -421,10 +411,8 @@ static void __init setup_boot_command_line(void) void __init startup_init(void) { reset_tod_clock(); - ipl_save_parameters(); rescue_initrd(); clear_bss_section(); - ipl_verify_parameters(); time_early_init(); init_kernel_storage_key(); lockdep_off(); @@ -432,7 +420,7 @@ void __init startup_init(void) setup_facility_list(); detect_machine_type(); setup_arch_string(); - ipl_update_parameters(); + ipl_store_parameters(); setup_boot_command_line(); detect_diag9c(); detect_diag44(); diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 34477c1aee6d..4296d7e61fb6 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -24,9 +24,7 @@ #include <asm/smp.h> #include <asm/setup.h> #include <asm/cpcmd.h> -#include <asm/cio.h> #include <asm/ebcdic.h> -#include <asm/reset.h> #include <asm/sclp.h> #include <asm/checksum.h> #include <asm/debug.h> @@ -119,39 +117,12 @@ static char *dump_type_str(enum dump_type type) } } -static u8 ipl_ssid; -static u16 ipl_devno; -u32 ipl_flags; - -enum ipl_method { - REIPL_METHOD_CCW_CIO, - REIPL_METHOD_CCW_DIAG, - REIPL_METHOD_CCW_VM, - REIPL_METHOD_FCP_RO_DIAG, - REIPL_METHOD_FCP_RW_DIAG, - REIPL_METHOD_FCP_RO_VM, - REIPL_METHOD_FCP_DUMP, - REIPL_METHOD_NSS, - REIPL_METHOD_NSS_DIAG, - REIPL_METHOD_DEFAULT, -}; - -enum dump_method { - DUMP_METHOD_NONE, - DUMP_METHOD_CCW_CIO, - DUMP_METHOD_CCW_DIAG, - DUMP_METHOD_CCW_VM, - DUMP_METHOD_FCP_DIAG, -}; - -static int diag308_set_works; - +static int ipl_block_valid; static struct ipl_parameter_block ipl_block; static int reipl_capabilities = IPL_TYPE_UNKNOWN; static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN; -static enum ipl_method reipl_method = REIPL_METHOD_DEFAULT; static struct ipl_parameter_block *reipl_block_fcp; static struct ipl_parameter_block *reipl_block_ccw; static struct ipl_parameter_block *reipl_block_nss; @@ -159,7 +130,6 @@ static struct ipl_parameter_block *reipl_block_actual; static int dump_capabilities = DUMP_TYPE_NONE; static enum dump_type dump_type = DUMP_TYPE_NONE; -static enum dump_method dump_method = DUMP_METHOD_NONE; static struct ipl_parameter_block *dump_block_fcp; static struct ipl_parameter_block *dump_block_ccw; @@ -260,33 +230,25 @@ static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ sys_##_prefix##_##_name##_show, \ sys_##_prefix##_##_name##_store) -static void make_attrs_ro(struct attribute **attrs) -{ - while (*attrs) { - (*attrs)->mode = S_IRUGO; - attrs++; - } -} - /* * ipl section */ static __init enum ipl_type get_ipl_type(void) { - struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; - - if (!(ipl_flags & IPL_DEVNO_VALID)) + if (!ipl_block_valid) return IPL_TYPE_UNKNOWN; - if (!(ipl_flags & IPL_PARMBLOCK_VALID)) + + switch (ipl_block.hdr.pbt) { + case DIAG308_IPL_TYPE_CCW: return IPL_TYPE_CCW; - if (ipl->hdr.version > IPL_MAX_SUPPORTED_VERSION) - return IPL_TYPE_UNKNOWN; - if (ipl->hdr.pbt != DIAG308_IPL_TYPE_FCP) - return IPL_TYPE_UNKNOWN; - if (ipl->ipl_info.fcp.opt == DIAG308_IPL_OPT_DUMP) - return IPL_TYPE_FCP_DUMP; - return IPL_TYPE_FCP; + case DIAG308_IPL_TYPE_FCP: + if (ipl_block.ipl_info.fcp.opt == DIAG308_IPL_OPT_DUMP) + return IPL_TYPE_FCP_DUMP; + else + return IPL_TYPE_FCP; + } + return IPL_TYPE_UNKNOWN; } struct ipl_info ipl_info; @@ -338,7 +300,7 @@ size_t append_ipl_vmparm(char *dest, size_t size) size_t rc; rc = 0; - if (diag308_set_works && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW)) + if (ipl_block_valid && ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW) rc = reipl_get_ascii_vmparm(dest, size, &ipl_block); else dest[0] = 0; @@ -401,7 +363,7 @@ size_t append_ipl_scpdata(char *dest, size_t len) size_t rc; rc = 0; - if (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_FCP) + if (ipl_block_valid && ipl_block.hdr.pbt == DIAG308_IPL_TYPE_FCP) rc = reipl_append_ascii_scpdata(dest, len, &ipl_block); else dest[0] = 0; @@ -415,14 +377,14 @@ static struct kobj_attribute sys_ipl_vm_parm_attr = static ssize_t sys_ipl_device_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; - switch (ipl_info.type) { case IPL_TYPE_CCW: - return sprintf(page, "0.%x.%04x\n", ipl_ssid, ipl_devno); + return sprintf(page, "0.%x.%04x\n", ipl_block.ipl_info.ccw.ssid, + ipl_block.ipl_info.ccw.devno); case IPL_TYPE_FCP: case IPL_TYPE_FCP_DUMP: - return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno); + return sprintf(page, "0.0.%04x\n", + ipl_block.ipl_info.fcp.devno); default: return 0; } @@ -435,8 +397,8 @@ static ssize_t ipl_parameter_read(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { - return memory_read_from_buffer(buf, count, &off, IPL_PARMBLOCK_START, - IPL_PARMBLOCK_SIZE); + return memory_read_from_buffer(buf, count, &off, &ipl_block, + ipl_block.hdr.len); } static struct bin_attribute ipl_parameter_attr = __BIN_ATTR(binary_parameter, S_IRUGO, ipl_parameter_read, NULL, @@ -446,8 +408,8 @@ static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { - unsigned int size = IPL_PARMBLOCK_START->ipl_info.fcp.scp_data_len; - void *scp_data = &IPL_PARMBLOCK_START->ipl_info.fcp.scp_data; + unsigned int size = ipl_block.ipl_info.fcp.scp_data_len; + void *scp_data = &ipl_block.ipl_info.fcp.scp_data; return memory_read_from_buffer(buf, count, &off, scp_data, size); } @@ -462,14 +424,14 @@ static struct bin_attribute *ipl_fcp_bin_attrs[] = { /* FCP ipl device attributes */ -DEFINE_IPL_ATTR_RO(ipl_fcp, wwpn, "0x%016llx\n", (unsigned long long) - IPL_PARMBLOCK_START->ipl_info.fcp.wwpn); -DEFINE_IPL_ATTR_RO(ipl_fcp, lun, "0x%016llx\n", (unsigned long long) - IPL_PARMBLOCK_START->ipl_info.fcp.lun); -DEFINE_IPL_ATTR_RO(ipl_fcp, bootprog, "%lld\n", (unsigned long long) - IPL_PARMBLOCK_START->ipl_info.fcp.bootprog); -DEFINE_IPL_ATTR_RO(ipl_fcp, br_lba, "%lld\n", (unsigned long long) - IPL_PARMBLOCK_START->ipl_info.fcp.br_lba); +DEFINE_IPL_ATTR_RO(ipl_fcp, wwpn, "0x%016llx\n", + (unsigned long long)ipl_block.ipl_info.fcp.wwpn); +DEFINE_IPL_ATTR_RO(ipl_fcp, lun, "0x%016llx\n", + (unsigned long long)ipl_block.ipl_info.fcp.lun); +DEFINE_IPL_ATTR_RO(ipl_fcp, bootprog, "%lld\n", + (unsigned long long)ipl_block.ipl_info.fcp.bootprog); +DEFINE_IPL_ATTR_RO(ipl_fcp, br_lba, "%lld\n", + (unsigned long long)ipl_block.ipl_info.fcp.br_lba); static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) @@ -545,10 +507,6 @@ static void __ipl_run(void *unused) { __bpon(); diag308(DIAG308_LOAD_CLEAR, NULL); - if (MACHINE_IS_VM) - __cpcmd("IPL", NULL, 0, NULL); - else if (ipl_info.type == IPL_TYPE_CCW) - reipl_ccw_dev(&ipl_info.data.ccw.dev_id); } static void ipl_run(struct shutdown_trigger *trigger) @@ -776,6 +734,7 @@ static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb, /* copy and convert to ebcdic */ memcpy(ipb->hdr.loadparm, buf, lp_len); ASCEBC(ipb->hdr.loadparm, LOADPARM_LEN); + ipb->hdr.flags |= DIAG308_FLAGS_LP_VALID; return len; } @@ -938,11 +897,10 @@ static struct attribute_group reipl_nss_attr_group = { .attrs = reipl_nss_attrs, }; -static void set_reipl_block_actual(struct ipl_parameter_block *reipl_block) +void set_os_info_reipl_block(void) { - reipl_block_actual = reipl_block; os_info_entry_add(OS_INFO_REIPL_BLOCK, reipl_block_actual, - reipl_block->hdr.len); + reipl_block_actual->hdr.len); } /* reipl type */ @@ -954,38 +912,16 @@ static int reipl_set_type(enum ipl_type type) switch(type) { case IPL_TYPE_CCW: - if (diag308_set_works) - reipl_method = REIPL_METHOD_CCW_DIAG; - else if (MACHINE_IS_VM) - reipl_method = REIPL_METHOD_CCW_VM; - else - reipl_method = REIPL_METHOD_CCW_CIO; - set_reipl_block_actual(reipl_block_ccw); + reipl_block_actual = reipl_block_ccw; break; case IPL_TYPE_FCP: - if (diag308_set_works) - reipl_method = REIPL_METHOD_FCP_RW_DIAG; - else if (MACHINE_IS_VM) - reipl_method = REIPL_METHOD_FCP_RO_VM; - else - reipl_method = REIPL_METHOD_FCP_RO_DIAG; - set_reipl_block_actual(reipl_block_fcp); - break; - case IPL_TYPE_FCP_DUMP: - reipl_method = REIPL_METHOD_FCP_DUMP; + reipl_block_actual = reipl_block_fcp; break; case IPL_TYPE_NSS: - if (diag308_set_works) - reipl_method = REIPL_METHOD_NSS_DIAG; - else - reipl_method = REIPL_METHOD_NSS; - set_reipl_block_actual(reipl_block_nss); - break; - case IPL_TYPE_UNKNOWN: - reipl_method = REIPL_METHOD_DEFAULT; + reipl_block_actual = reipl_block_nss; break; default: - BUG(); + break; } reipl_type = type; return 0; @@ -1018,77 +954,25 @@ static struct kobj_attribute reipl_type_attr = static struct kset *reipl_kset; static struct kset *reipl_fcp_kset; -static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb, - const enum ipl_method m) -{ - char loadparm[LOADPARM_LEN + 1] = {}; - char vmparm[DIAG308_VMPARM_SIZE + 1] = {}; - char nss_name[NSS_NAME_SIZE + 1] = {}; - size_t pos = 0; - - reipl_get_ascii_loadparm(loadparm, ipb); - reipl_get_ascii_nss_name(nss_name, ipb); - reipl_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb); - - switch (m) { - case REIPL_METHOD_CCW_VM: - pos = sprintf(dst, "IPL %X CLEAR", ipb->ipl_info.ccw.devno); - break; - case REIPL_METHOD_NSS: - pos = sprintf(dst, "IPL %s", nss_name); - break; - default: - break; - } - if (strlen(loadparm) > 0) - pos += sprintf(dst + pos, " LOADPARM '%s'", loadparm); - if (strlen(vmparm) > 0) - sprintf(dst + pos, " PARM %s", vmparm); -} - static void __reipl_run(void *unused) { - struct ccw_dev_id devid; - static char buf[128]; - - switch (reipl_method) { - case REIPL_METHOD_CCW_CIO: - devid.ssid = reipl_block_ccw->ipl_info.ccw.ssid; - devid.devno = reipl_block_ccw->ipl_info.ccw.devno; - reipl_ccw_dev(&devid); - break; - case REIPL_METHOD_CCW_VM: - get_ipl_string(buf, reipl_block_ccw, REIPL_METHOD_CCW_VM); - __cpcmd(buf, NULL, 0, NULL); - break; - case REIPL_METHOD_CCW_DIAG: + switch (reipl_type) { + case IPL_TYPE_CCW: diag308(DIAG308_SET, reipl_block_ccw); diag308(DIAG308_LOAD_CLEAR, NULL); break; - case REIPL_METHOD_FCP_RW_DIAG: + case IPL_TYPE_FCP: diag308(DIAG308_SET, reipl_block_fcp); diag308(DIAG308_LOAD_CLEAR, NULL); break; - case REIPL_METHOD_FCP_RO_DIAG: - diag308(DIAG308_LOAD_CLEAR, NULL); - break; - case REIPL_METHOD_FCP_RO_VM: - __cpcmd("IPL", NULL, 0, NULL); - break; - case REIPL_METHOD_NSS_DIAG: + case IPL_TYPE_NSS: diag308(DIAG308_SET, reipl_block_nss); diag308(DIAG308_LOAD_CLEAR, NULL); break; - case REIPL_METHOD_NSS: - get_ipl_string(buf, reipl_block_nss, REIPL_METHOD_NSS); - __cpcmd(buf, NULL, 0, NULL); - break; - case REIPL_METHOD_DEFAULT: - if (MACHINE_IS_VM) - __cpcmd("IPL", NULL, 0, NULL); + case IPL_TYPE_UNKNOWN: diag308(DIAG308_LOAD_CLEAR, NULL); break; - case REIPL_METHOD_FCP_DUMP: + case IPL_TYPE_FCP_DUMP: break; } disabled_wait((unsigned long) __builtin_return_address(0)); @@ -1119,7 +1003,7 @@ static void reipl_block_ccw_fill_parms(struct ipl_parameter_block *ipb) ipb->hdr.flags = DIAG308_FLAGS_LP_VALID; /* VM PARM */ - if (MACHINE_IS_VM && diag308_set_works && + if (MACHINE_IS_VM && ipl_block_valid && (ipl_block.ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID)) { ipb->ipl_info.ccw.vm_flags |= DIAG308_VM_FLAGS_VP_VALID; @@ -1141,9 +1025,6 @@ static int __init reipl_nss_init(void) if (!reipl_block_nss) return -ENOMEM; - if (!diag308_set_works) - sys_reipl_nss_vmparm_attr.attr.mode = S_IRUGO; - rc = sysfs_create_group(&reipl_kset->kobj, &reipl_nss_attr_group); if (rc) return rc; @@ -1161,24 +1042,16 @@ static int __init reipl_ccw_init(void) if (!reipl_block_ccw) return -ENOMEM; - if (MACHINE_IS_VM) { - if (!diag308_set_works) - sys_reipl_ccw_vmparm_attr.attr.mode = S_IRUGO; - rc = sysfs_create_group(&reipl_kset->kobj, - &reipl_ccw_attr_group_vm); - } else { - if(!diag308_set_works) - sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO; - rc = sysfs_create_group(&reipl_kset->kobj, - &reipl_ccw_attr_group_lpar); - } + rc = sysfs_create_group(&reipl_kset->kobj, + MACHINE_IS_VM ? &reipl_ccw_attr_group_vm + : &reipl_ccw_attr_group_lpar); if (rc) return rc; reipl_block_ccw_init(reipl_block_ccw); if (ipl_info.type == IPL_TYPE_CCW) { - reipl_block_ccw->ipl_info.ccw.ssid = ipl_ssid; - reipl_block_ccw->ipl_info.ccw.devno = ipl_devno; + reipl_block_ccw->ipl_info.ccw.ssid = ipl_block.ipl_info.ccw.ssid; + reipl_block_ccw->ipl_info.ccw.devno = ipl_block.ipl_info.ccw.devno; reipl_block_ccw_fill_parms(reipl_block_ccw); } @@ -1190,14 +1063,6 @@ static int __init reipl_fcp_init(void) { int rc; - if (!diag308_set_works) { - if (ipl_info.type == IPL_TYPE_FCP) { - make_attrs_ro(reipl_fcp_attrs); - sys_reipl_fcp_scp_data_attr.attr.mode = S_IRUGO; - } else - return 0; - } - reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL); if (!reipl_block_fcp) return -ENOMEM; @@ -1218,7 +1083,7 @@ static int __init reipl_fcp_init(void) } if (ipl_info.type == IPL_TYPE_FCP) { - memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE); + memcpy(reipl_block_fcp, &ipl_block, sizeof(ipl_block)); /* * Fix loadparm: There are systems where the (SCSI) LOADPARM * is invalid in the SCSI IPL parameter block, so take it @@ -1340,21 +1205,6 @@ static int dump_set_type(enum dump_type type) { if (!(dump_capabilities & type)) return -EINVAL; - switch (type) { - case DUMP_TYPE_CCW: - if (diag308_set_works) - dump_method = DUMP_METHOD_CCW_DIAG; - else if (MACHINE_IS_VM) - dump_method = DUMP_METHOD_CCW_VM; - else - dump_method = DUMP_METHOD_CCW_CIO; - break; - case DUMP_TYPE_FCP: - dump_method = DUMP_METHOD_FCP_DIAG; - break; - default: - dump_method = DUMP_METHOD_NONE; - } dump_type = type; return 0; } @@ -1397,25 +1247,11 @@ static void diag308_dump(void *dump_block) static void __dump_run(void *unused) { - struct ccw_dev_id devid; - static char buf[100]; - - switch (dump_method) { - case DUMP_METHOD_CCW_CIO: - devid.ssid = dump_block_ccw->ipl_info.ccw.ssid; - devid.devno = dump_block_ccw->ipl_info.ccw.devno; - reipl_ccw_dev(&devid); - break; - case DUMP_METHOD_CCW_VM: - sprintf(buf, "STORE STATUS"); - __cpcmd(buf, NULL, 0, NULL); - sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno); - __cpcmd(buf, NULL, 0, NULL); - break; - case DUMP_METHOD_CCW_DIAG: + switch (dump_type) { + case DUMP_TYPE_CCW: diag308_dump(dump_block_ccw); break; - case DUMP_METHOD_FCP_DIAG: + case DUMP_TYPE_FCP: diag308_dump(dump_block_fcp); break; default: @@ -1425,7 +1261,7 @@ static void __dump_run(void *unused) static void dump_run(struct shutdown_trigger *trigger) { - if (dump_method == DUMP_METHOD_NONE) + if (dump_type == DUMP_TYPE_NONE) return; smp_send_stop(); smp_call_ipl_cpu(__dump_run, NULL); @@ -1457,8 +1293,6 @@ static int __init dump_fcp_init(void) if (!sclp_ipl_info.has_dump) return 0; /* LDIPL DUMP is not installed */ - if (!diag308_set_works) - return 0; dump_block_fcp = (void *) get_zeroed_page(GFP_KERNEL); if (!dump_block_fcp) return -ENOMEM; @@ -1516,18 +1350,9 @@ static void dump_reipl_run(struct shutdown_trigger *trigger) dump_run(trigger); } -static int __init dump_reipl_init(void) -{ - if (!diag308_set_works) - return -EOPNOTSUPP; - else - return 0; -} - static struct shutdown_action __refdata dump_reipl_action = { .name = SHUTDOWN_ACTION_DUMP_REIPL_STR, .fn = dump_reipl_run, - .init = dump_reipl_init, }; /* @@ -1838,10 +1663,8 @@ static int __init s390_ipl_init(void) * case the system is booted from HMC. Fortunately in this case * READ SCP info provides the correct value. */ - if (memcmp(sclp_ipl_info.loadparm, str, sizeof(str)) == 0 && - diag308_set_works) - memcpy(sclp_ipl_info.loadparm, ipl_block.hdr.loadparm, - LOADPARM_LEN); + if (memcmp(sclp_ipl_info.loadparm, str, sizeof(str)) == 0 && ipl_block_valid) + memcpy(sclp_ipl_info.loadparm, ipl_block.hdr.loadparm, LOADPARM_LEN); shutdown_actions_init(); shutdown_triggers_init(); return 0; @@ -1921,19 +1744,20 @@ static struct notifier_block on_panic_nb = { void __init setup_ipl(void) { + BUILD_BUG_ON(sizeof(struct ipl_parameter_block) != PAGE_SIZE); + ipl_info.type = get_ipl_type(); switch (ipl_info.type) { case IPL_TYPE_CCW: - ipl_info.data.ccw.dev_id.ssid = ipl_ssid; - ipl_info.data.ccw.dev_id.devno = ipl_devno; + ipl_info.data.ccw.dev_id.ssid = ipl_block.ipl_info.ccw.ssid; + ipl_info.data.ccw.dev_id.devno = ipl_block.ipl_info.ccw.devno; break; case IPL_TYPE_FCP: case IPL_TYPE_FCP_DUMP: ipl_info.data.fcp.dev_id.ssid = 0; - ipl_info.data.fcp.dev_id.devno = - IPL_PARMBLOCK_START->ipl_info.fcp.devno; - ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn; - ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun; + ipl_info.data.fcp.dev_id.devno = ipl_block.ipl_info.fcp.devno; + ipl_info.data.fcp.wwpn = ipl_block.ipl_info.fcp.wwpn; + ipl_info.data.fcp.lun = ipl_block.ipl_info.fcp.lun; break; case IPL_TYPE_NSS: case IPL_TYPE_UNKNOWN: @@ -1943,85 +1767,21 @@ void __init setup_ipl(void) atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb); } -void __init ipl_update_parameters(void) +void __init ipl_store_parameters(void) { int rc; rc = diag308(DIAG308_STORE, &ipl_block); - if ((rc == DIAG308_RC_OK) || (rc == DIAG308_RC_NOCONFIG)) - diag308_set_works = 1; -} - -void __init ipl_verify_parameters(void) -{ - struct cio_iplinfo iplinfo; - - if (cio_get_iplinfo(&iplinfo)) - return; - - ipl_ssid = iplinfo.ssid; - ipl_devno = iplinfo.devno; - ipl_flags |= IPL_DEVNO_VALID; - if (!iplinfo.is_qdio) - return; - ipl_flags |= IPL_PARMBLOCK_VALID; -} - -static LIST_HEAD(rcall); -static DEFINE_MUTEX(rcall_mutex); - -void register_reset_call(struct reset_call *reset) -{ - mutex_lock(&rcall_mutex); - list_add(&reset->list, &rcall); - mutex_unlock(&rcall_mutex); -} -EXPORT_SYMBOL_GPL(register_reset_call); - -void unregister_reset_call(struct reset_call *reset) -{ - mutex_lock(&rcall_mutex); - list_del(&reset->list); - mutex_unlock(&rcall_mutex); -} -EXPORT_SYMBOL_GPL(unregister_reset_call); - -static void do_reset_calls(void) -{ - struct reset_call *reset; - - if (diag308_set_works) { - diag308_reset(); - return; - } - list_for_each_entry(reset, &rcall, list) - reset->fn(); + if (rc == DIAG308_RC_OK && ipl_block.hdr.version <= IPL_MAX_SUPPORTED_VERSION) + ipl_block_valid = 1; } void s390_reset_system(void) { - struct lowcore *lc; - - lc = (struct lowcore *)(unsigned long) store_prefix(); - - /* Stack for interrupt/machine check handler */ - lc->panic_stack = S390_lowcore.panic_stack; - /* Disable prefixing */ set_prefix(0); /* Disable lowcore protection */ - __ctl_clear_bit(0,28); - - /* Set new machine check handler */ - S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT; - S390_lowcore.mcck_new_psw.addr = - (unsigned long) s390_base_mcck_handler; - - /* Set new program check handler */ - S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT; - S390_lowcore.program_new_psw.addr = - (unsigned long) s390_base_pgm_handler; - - do_reset_calls(); + __ctl_clear_bit(0, 28); + diag308_reset(); } diff --git a/arch/s390/kernel/kexec_elf.c b/arch/s390/kernel/kexec_elf.c new file mode 100644 index 000000000000..5a286b012043 --- /dev/null +++ b/arch/s390/kernel/kexec_elf.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ELF loader for kexec_file_load system call. + * + * Copyright IBM Corp. 2018 + * + * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> + */ + +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/kexec.h> +#include <asm/setup.h> + +static int kexec_file_add_elf_kernel(struct kimage *image, + struct s390_load_data *data, + char *kernel, unsigned long kernel_len) +{ + struct kexec_buf buf; + const Elf_Ehdr *ehdr; + const Elf_Phdr *phdr; + int i, ret; + + ehdr = (Elf_Ehdr *)kernel; + buf.image = image; + + phdr = (void *)ehdr + ehdr->e_phoff; + for (i = 0; i < ehdr->e_phnum; i++, phdr++) { + if (phdr->p_type != PT_LOAD) + continue; + + buf.buffer = kernel + phdr->p_offset; + buf.bufsz = phdr->p_filesz; + + buf.mem = ALIGN(phdr->p_paddr, phdr->p_align); + buf.memsz = phdr->p_memsz; + + if (phdr->p_paddr == 0) { + data->kernel_buf = buf.buffer; + data->memsz += STARTUP_NORMAL_OFFSET; + + buf.buffer += STARTUP_NORMAL_OFFSET; + buf.bufsz -= STARTUP_NORMAL_OFFSET; + + buf.mem += STARTUP_NORMAL_OFFSET; + buf.memsz -= STARTUP_NORMAL_OFFSET; + } + + if (image->type == KEXEC_TYPE_CRASH) + buf.mem += crashk_res.start; + + ret = kexec_add_buffer(&buf); + if (ret) + return ret; + + data->memsz += buf.memsz; + } + + return 0; +} + +static void *s390_elf_load(struct kimage *image, + char *kernel, unsigned long kernel_len, + char *initrd, unsigned long initrd_len, + char *cmdline, unsigned long cmdline_len) +{ + struct s390_load_data data = {0}; + const Elf_Ehdr *ehdr; + const Elf_Phdr *phdr; + size_t size; + int i, ret; + + /* image->fobs->probe already checked for valid ELF magic number. */ + ehdr = (Elf_Ehdr *)kernel; + + if (ehdr->e_type != ET_EXEC || + ehdr->e_ident[EI_CLASS] != ELFCLASS64 || + !elf_check_arch(ehdr)) + return ERR_PTR(-EINVAL); + + if (!ehdr->e_phnum || ehdr->e_phentsize != sizeof(Elf_Phdr)) + return ERR_PTR(-EINVAL); + + size = ehdr->e_ehsize + ehdr->e_phoff; + size += ehdr->e_phentsize * ehdr->e_phnum; + if (size > kernel_len) + return ERR_PTR(-EINVAL); + + phdr = (void *)ehdr + ehdr->e_phoff; + size = ALIGN(size, phdr->p_align); + for (i = 0; i < ehdr->e_phnum; i++, phdr++) { + if (phdr->p_type == PT_INTERP) + return ERR_PTR(-EINVAL); + + if (phdr->p_offset > kernel_len) + return ERR_PTR(-EINVAL); + + size += ALIGN(phdr->p_filesz, phdr->p_align); + } + + if (size > kernel_len) + return ERR_PTR(-EINVAL); + + ret = kexec_file_add_elf_kernel(image, &data, kernel, kernel_len); + if (ret) + return ERR_PTR(ret); + + if (!data.memsz) + return ERR_PTR(-EINVAL); + + if (initrd) { + ret = kexec_file_add_initrd(image, &data, initrd, initrd_len); + if (ret) + return ERR_PTR(ret); + } + + ret = kexec_file_add_purgatory(image, &data); + if (ret) + return ERR_PTR(ret); + + return kexec_file_update_kernel(image, &data); +} + +static int s390_elf_probe(const char *buf, unsigned long len) +{ + const Elf_Ehdr *ehdr; + + if (len < sizeof(Elf_Ehdr)) + return -ENOEXEC; + + ehdr = (Elf_Ehdr *)buf; + + /* Only check the ELF magic number here and do proper validity check + * in the loader. Any check here that fails would send the erroneous + * ELF file to the image loader that does not care what it gets. + * (Most likely) causing behavior not intended by the user. + */ + if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) + return -ENOEXEC; + + return 0; +} + +const struct kexec_file_ops s390_kexec_elf_ops = { + .probe = s390_elf_probe, + .load = s390_elf_load, +}; diff --git a/arch/s390/kernel/kexec_image.c b/arch/s390/kernel/kexec_image.c new file mode 100644 index 000000000000..3800852595e8 --- /dev/null +++ b/arch/s390/kernel/kexec_image.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Image loader for kexec_file_load system call. + * + * Copyright IBM Corp. 2018 + * + * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> + */ + +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/kexec.h> +#include <asm/setup.h> + +static int kexec_file_add_image_kernel(struct kimage *image, + struct s390_load_data *data, + char *kernel, unsigned long kernel_len) +{ + struct kexec_buf buf; + int ret; + + buf.image = image; + + buf.buffer = kernel + STARTUP_NORMAL_OFFSET; + buf.bufsz = kernel_len - STARTUP_NORMAL_OFFSET; + + buf.mem = STARTUP_NORMAL_OFFSET; + if (image->type == KEXEC_TYPE_CRASH) + buf.mem += crashk_res.start; + buf.memsz = buf.bufsz; + + ret = kexec_add_buffer(&buf); + + data->kernel_buf = kernel; + data->memsz += buf.memsz + STARTUP_NORMAL_OFFSET; + + return ret; +} + +static void *s390_image_load(struct kimage *image, + char *kernel, unsigned long kernel_len, + char *initrd, unsigned long initrd_len, + char *cmdline, unsigned long cmdline_len) +{ + struct s390_load_data data = {0}; + int ret; + + ret = kexec_file_add_image_kernel(image, &data, kernel, kernel_len); + if (ret) + return ERR_PTR(ret); + + if (initrd) { + ret = kexec_file_add_initrd(image, &data, initrd, initrd_len); + if (ret) + return ERR_PTR(ret); + } + + ret = kexec_file_add_purgatory(image, &data); + if (ret) + return ERR_PTR(ret); + + return kexec_file_update_kernel(image, &data); +} + +static int s390_image_probe(const char *buf, unsigned long len) +{ + /* Can't reliably tell if an image is valid. Therefore give the + * user whatever he wants. + */ + return 0; +} + +const struct kexec_file_ops s390_kexec_image_ops = { + .probe = s390_image_probe, + .load = s390_image_load, +}; diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index a80050bbe2e4..b7020e721ae3 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -20,7 +20,6 @@ #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/smp.h> -#include <asm/reset.h> #include <asm/ipl.h> #include <asm/diag.h> #include <asm/elf.h> @@ -253,6 +252,7 @@ void machine_shutdown(void) void machine_crash_shutdown(struct pt_regs *regs) { + set_os_info_reipl_block(); } /* diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c new file mode 100644 index 000000000000..f413f57f8d20 --- /dev/null +++ b/arch/s390/kernel/machine_kexec_file.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * s390 code for kexec_file_load system call + * + * Copyright IBM Corp. 2018 + * + * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> + */ + +#include <linux/elf.h> +#include <linux/kexec.h> +#include <asm/setup.h> + +const struct kexec_file_ops * const kexec_file_loaders[] = { + &s390_kexec_elf_ops, + &s390_kexec_image_ops, + NULL, +}; + +int *kexec_file_update_kernel(struct kimage *image, + struct s390_load_data *data) +{ + unsigned long *loc; + + if (image->cmdline_buf_len >= ARCH_COMMAND_LINE_SIZE) + return ERR_PTR(-EINVAL); + + if (image->cmdline_buf_len) + memcpy(data->kernel_buf + COMMAND_LINE_OFFSET, + image->cmdline_buf, image->cmdline_buf_len); + + if (image->type == KEXEC_TYPE_CRASH) { + loc = (unsigned long *)(data->kernel_buf + OLDMEM_BASE_OFFSET); + *loc = crashk_res.start; + + loc = (unsigned long *)(data->kernel_buf + OLDMEM_SIZE_OFFSET); + *loc = crashk_res.end - crashk_res.start + 1; + } + + if (image->initrd_buf) { + loc = (unsigned long *)(data->kernel_buf + INITRD_START_OFFSET); + *loc = data->initrd_load_addr; + + loc = (unsigned long *)(data->kernel_buf + INITRD_SIZE_OFFSET); + *loc = image->initrd_buf_len; + } + + return NULL; +} + +static int kexec_file_update_purgatory(struct kimage *image) +{ + u64 entry, type; + int ret; + + if (image->type == KEXEC_TYPE_CRASH) { + entry = STARTUP_KDUMP_OFFSET; + type = KEXEC_TYPE_CRASH; + } else { + entry = STARTUP_NORMAL_OFFSET; + type = KEXEC_TYPE_DEFAULT; + } + + ret = kexec_purgatory_get_set_symbol(image, "kernel_entry", &entry, + sizeof(entry), false); + if (ret) + return ret; + + ret = kexec_purgatory_get_set_symbol(image, "kernel_type", &type, + sizeof(type), false); + if (ret) + return ret; + + if (image->type == KEXEC_TYPE_CRASH) { + u64 crash_size; + + ret = kexec_purgatory_get_set_symbol(image, "crash_start", + &crashk_res.start, + sizeof(crashk_res.start), + false); + if (ret) + return ret; + + crash_size = crashk_res.end - crashk_res.start + 1; + ret = kexec_purgatory_get_set_symbol(image, "crash_size", + &crash_size, + sizeof(crash_size), + false); + } + return ret; +} + +int kexec_file_add_purgatory(struct kimage *image, struct s390_load_data *data) +{ + struct kexec_buf buf; + int ret; + + buf.image = image; + + data->memsz = ALIGN(data->memsz, PAGE_SIZE); + buf.mem = data->memsz; + if (image->type == KEXEC_TYPE_CRASH) + buf.mem += crashk_res.start; + + ret = kexec_load_purgatory(image, &buf); + if (ret) + return ret; + + ret = kexec_file_update_purgatory(image); + return ret; +} + +int kexec_file_add_initrd(struct kimage *image, struct s390_load_data *data, + char *initrd, unsigned long initrd_len) +{ + struct kexec_buf buf; + int ret; + + buf.image = image; + + buf.buffer = initrd; + buf.bufsz = initrd_len; + + data->memsz = ALIGN(data->memsz, PAGE_SIZE); + buf.mem = data->memsz; + if (image->type == KEXEC_TYPE_CRASH) + buf.mem += crashk_res.start; + buf.memsz = buf.bufsz; + + data->initrd_load_addr = buf.mem; + data->memsz += buf.memsz; + + ret = kexec_add_buffer(&buf); + return ret; +} + +/* + * The kernel is loaded to a fixed location. Turn off kexec_locate_mem_hole + * and provide kbuf->mem by hand. + */ +int arch_kexec_walk_mem(struct kexec_buf *kbuf, + int (*func)(struct resource *, void *)) +{ + return 1; +} + +int arch_kexec_apply_relocations_add(struct purgatory_info *pi, + Elf_Shdr *section, + const Elf_Shdr *relsec, + const Elf_Shdr *symtab) +{ + Elf_Rela *relas; + int i; + + relas = (void *)pi->ehdr + relsec->sh_offset; + + for (i = 0; i < relsec->sh_size / sizeof(*relas); i++) { + const Elf_Sym *sym; /* symbol to relocate */ + unsigned long addr; /* final location after relocation */ + unsigned long val; /* relocated symbol value */ + void *loc; /* tmp location to modify */ + + sym = (void *)pi->ehdr + symtab->sh_offset; + sym += ELF64_R_SYM(relas[i].r_info); + + if (sym->st_shndx == SHN_UNDEF) + return -ENOEXEC; + + if (sym->st_shndx == SHN_COMMON) + return -ENOEXEC; + + if (sym->st_shndx >= pi->ehdr->e_shnum && + sym->st_shndx != SHN_ABS) + return -ENOEXEC; + + loc = pi->purgatory_buf; + loc += section->sh_offset; + loc += relas[i].r_offset; + + val = sym->st_value; + if (sym->st_shndx != SHN_ABS) + val += pi->sechdrs[sym->st_shndx].sh_addr; + val += relas[i].r_addend; + + addr = section->sh_addr + relas[i].r_offset; + + switch (ELF64_R_TYPE(relas[i].r_info)) { + case R_390_8: /* Direct 8 bit. */ + *(u8 *)loc = val; + break; + case R_390_12: /* Direct 12 bit. */ + *(u16 *)loc &= 0xf000; + *(u16 *)loc |= val & 0xfff; + break; + case R_390_16: /* Direct 16 bit. */ + *(u16 *)loc = val; + break; + case R_390_20: /* Direct 20 bit. */ + *(u32 *)loc &= 0xf00000ff; + *(u32 *)loc |= (val & 0xfff) << 16; /* DL */ + *(u32 *)loc |= (val & 0xff000) >> 4; /* DH */ + break; + case R_390_32: /* Direct 32 bit. */ + *(u32 *)loc = val; + break; + case R_390_64: /* Direct 64 bit. */ + *(u64 *)loc = val; + break; + case R_390_PC16: /* PC relative 16 bit. */ + *(u16 *)loc = (val - addr); + break; + case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */ + *(u16 *)loc = (val - addr) >> 1; + break; + case R_390_PC32DBL: /* PC relative 32 bit shifted by 1. */ + *(u32 *)loc = (val - addr) >> 1; + break; + case R_390_PC32: /* PC relative 32 bit. */ + *(u32 *)loc = (val - addr); + break; + case R_390_PC64: /* PC relative 64 bit. */ + *(u64 *)loc = (val - addr); + break; + default: + break; + } + } + return 0; +} + +int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, + unsigned long buf_len) +{ + /* A kernel must be at least large enough to contain head.S. During + * load memory in head.S will be accessed, e.g. to register the next + * command line. If the next kernel were smaller the current kernel + * will panic at load. + * + * 0x11000 = sizeof(head.S) + */ + if (buf_len < 0x11000) + return -ENOEXEC; + + return kexec_image_probe_default(image, buf, buf_len); +} diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 5a83be955c70..0dc8ac8548ee 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -465,11 +465,11 @@ int module_finalize(const Elf_Ehdr *hdr, apply_alternatives(aseg, aseg + s->sh_size); if (IS_ENABLED(CONFIG_EXPOLINE) && - (!strcmp(".nospec_call_table", secname))) + (!strncmp(".s390_indirect", secname, 14))) nospec_revert(aseg, aseg + s->sh_size); if (IS_ENABLED(CONFIG_EXPOLINE) && - (!strcmp(".nospec_return_table", secname))) + (!strncmp(".s390_return", secname, 12))) nospec_revert(aseg, aseg + s->sh_size); } diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c index 14867ec5f726..46d49a11663f 100644 --- a/arch/s390/kernel/nospec-branch.c +++ b/arch/s390/kernel/nospec-branch.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/module.h> #include <linux/device.h> +#include <linux/cpu.h> #include <asm/nospec-branch.h> static int __init nobp_setup_early(char *str) @@ -72,7 +73,7 @@ static int __init nospectre_v2_setup_early(char *str) } early_param("nospectre_v2", nospectre_v2_setup_early); -static int __init spectre_v2_auto_early(void) +void __init nospec_auto_detect(void) { if (IS_ENABLED(CC_USING_EXPOLINE)) { /* @@ -87,11 +88,7 @@ static int __init spectre_v2_auto_early(void) * nobp setting decides what is done, this depends on the * CONFIG_KERNEL_NP option and the nobp/nospec parameters. */ - return 0; } -#ifdef CONFIG_EXPOLINE_AUTO -early_initcall(spectre_v2_auto_early); -#endif static int __init spectre_v2_setup_early(char *str) { @@ -102,7 +99,7 @@ static int __init spectre_v2_setup_early(char *str) if (str && !strncmp(str, "off", 3)) nospec_disable = 1; if (str && !strncmp(str, "auto", 4)) - spectre_v2_auto_early(); + nospec_auto_detect(); return 0; } early_param("spectre_v2", spectre_v2_setup_early); diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c index c5bc3f209652..feebb2944882 100644 --- a/arch/s390/kernel/perf_cpum_cf_events.c +++ b/arch/s390/kernel/perf_cpum_cf_events.c @@ -123,7 +123,7 @@ CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES_IV, 0x00a1); CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TABORT, 0x00b1); CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_NO_SPECIAL, 0x00b2); CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_SPECIAL, 0x00b3); -CPUMF_EVENT_ATTR(cf_z13, L1D_WRITES_RO_EXCL, 0x0080); +CPUMF_EVENT_ATTR(cf_z13, L1D_RO_EXCL_WRITES, 0x0080); CPUMF_EVENT_ATTR(cf_z13, DTLB1_WRITES, 0x0081); CPUMF_EVENT_ATTR(cf_z13, DTLB1_MISSES, 0x0082); CPUMF_EVENT_ATTR(cf_z13, DTLB1_HPAGE_WRITES, 0x0083); @@ -179,7 +179,7 @@ CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_NO_SPECIAL, 0x00db); CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_SPECIAL, 0x00dc); CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0); CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1); -CPUMF_EVENT_ATTR(cf_z14, L1D_WRITES_RO_EXCL, 0x0080); +CPUMF_EVENT_ATTR(cf_z14, L1D_RO_EXCL_WRITES, 0x0080); CPUMF_EVENT_ATTR(cf_z14, DTLB2_WRITES, 0x0081); CPUMF_EVENT_ATTR(cf_z14, DTLB2_MISSES, 0x0082); CPUMF_EVENT_ATTR(cf_z14, DTLB2_HPAGE_WRITES, 0x0083); @@ -371,7 +371,7 @@ static struct attribute *cpumcf_zec12_pmu_event_attr[] __initdata = { }; static struct attribute *cpumcf_z13_pmu_event_attr[] __initdata = { - CPUMF_EVENT_PTR(cf_z13, L1D_WRITES_RO_EXCL), + CPUMF_EVENT_PTR(cf_z13, L1D_RO_EXCL_WRITES), CPUMF_EVENT_PTR(cf_z13, DTLB1_WRITES), CPUMF_EVENT_PTR(cf_z13, DTLB1_MISSES), CPUMF_EVENT_PTR(cf_z13, DTLB1_HPAGE_WRITES), @@ -431,7 +431,7 @@ static struct attribute *cpumcf_z13_pmu_event_attr[] __initdata = { }; static struct attribute *cpumcf_z14_pmu_event_attr[] __initdata = { - CPUMF_EVENT_PTR(cf_z14, L1D_WRITES_RO_EXCL), + CPUMF_EVENT_PTR(cf_z14, L1D_RO_EXCL_WRITES), CPUMF_EVENT_PTR(cf_z14, DTLB2_WRITES), CPUMF_EVENT_PTR(cf_z14, DTLB2_MISSES), CPUMF_EVENT_PTR(cf_z14, DTLB2_HPAGE_WRITES), @@ -583,6 +583,7 @@ __init const struct attribute_group **cpumf_cf_event_group(void) model = cpumcf_z13_pmu_event_attr; break; case 0x3906: + case 0x3907: model = cpumcf_z14_pmu_event_attr; break; default: diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 70576a2f69cf..6e758bb6cd29 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -29,6 +29,7 @@ #include <linux/random.h> #include <linux/export.h> #include <linux/init_task.h> +#include <asm/cpu_mf.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/vtimer.h> @@ -48,6 +49,15 @@ void flush_thread(void) { } +void arch_setup_new_exec(void) +{ + if (S390_lowcore.current_pid != current->pid) { + S390_lowcore.current_pid = current->pid; + if (test_facility(40)) + lpp(&S390_lowcore.lpp); + } +} + void arch_release_task_struct(struct task_struct *tsk) { runtime_instr_release(tsk); diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S index a40ebd1d29d0..73cc3750f0d3 100644 --- a/arch/s390/kernel/reipl.S +++ b/arch/s390/kernel/reipl.S @@ -75,90 +75,3 @@ ENTRY(store_status) .align 8 .Lclkcmp: .quad 0x0000000000000000 .previous - -# -# do_reipl_asm -# Parameter: r2 = schid of reipl device -# - -ENTRY(do_reipl_asm) - basr %r13,0 -.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13) -.Lpg1: lgr %r3,%r2 - larl %r2,.Lstatus - brasl %r14,store_status - -.Lstatus: lctlg %c6,%c6,.Lall-.Lpg0(%r13) - lgr %r1,%r2 - mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13) - stsch .Lschib-.Lpg0(%r13) - oi .Lschib+5-.Lpg0(%r13),0x84 -.Lecs: xi .Lschib+27-.Lpg0(%r13),0x01 - msch .Lschib-.Lpg0(%r13) - lghi %r0,5 -.Lssch: ssch .Liplorb-.Lpg0(%r13) - jz .L001 - brct %r0,.Lssch - bas %r14,.Ldisab-.Lpg0(%r13) -.L001: mvc __LC_IO_NEW_PSW(16),.Lionew-.Lpg0(%r13) -.Ltpi: lpswe .Lwaitpsw-.Lpg0(%r13) -.Lcont: c %r1,__LC_SUBCHANNEL_ID - jnz .Ltpi - clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13) - jnz .Ltpi - tsch .Liplirb-.Lpg0(%r13) - tm .Liplirb+9-.Lpg0(%r13),0xbf - jz .L002 - bas %r14,.Ldisab-.Lpg0(%r13) -.L002: tm .Liplirb+8-.Lpg0(%r13),0xf3 - jz .L003 - bas %r14,.Ldisab-.Lpg0(%r13) -.L003: st %r1,__LC_SUBCHANNEL_ID - lhi %r1,0 # mode 0 = esa - slr %r0,%r0 # set cpuid to zero - sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esa mode - lpsw 0 -.Ldisab: sll %r14,1 - srl %r14,1 # need to kill hi bit to avoid specification exceptions. - st %r14,.Ldispsw+12-.Lpg0(%r13) - lpswe .Ldispsw-.Lpg0(%r13) - .align 8 -.Lall: .quad 0x00000000ff000000 - .align 16 -/* - * These addresses have to be 31 bit otherwise - * the sigp will throw a specifcation exception - * when switching to ESA mode as bit 31 be set - * in the ESA psw. - * Bit 31 of the addresses has to be 0 for the - * 31bit lpswe instruction a fact they appear to have - * omitted from the pop. - */ -.Lnewpsw: .quad 0x0000000080000000 - .quad .Lpg1 -.Lpcnew: .quad 0x0000000080000000 - .quad .Lecs -.Lionew: .quad 0x0000000080000000 - .quad .Lcont -.Lwaitpsw: .quad 0x0202000080000000 - .quad .Ltpi -.Ldispsw: .quad 0x0002000080000000 - .quad 0x0000000000000000 -.Liplccws: .long 0x02000000,0x60000018 - .long 0x08000008,0x20000001 -.Liplorb: .long 0x0049504c,0x0040ff80 - .long 0x00000000+.Liplccws -.Lschib: .long 0x00000000,0x00000000 - .long 0x00000000,0x00000000 - .long 0x00000000,0x00000000 - .long 0x00000000,0x00000000 - .long 0x00000000,0x00000000 - .long 0x00000000,0x00000000 -.Liplirb: .long 0x00000000,0x00000000 - .long 0x00000000,0x00000000 - .long 0x00000000,0x00000000 - .long 0x00000000,0x00000000 - .long 0x00000000,0x00000000 - .long 0x00000000,0x00000000 - .long 0x00000000,0x00000000 - .long 0x00000000,0x00000000 diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S index 9c2c96da23d0..c97c2d40fe15 100644 --- a/arch/s390/kernel/relocate_kernel.S +++ b/arch/s390/kernel/relocate_kernel.S @@ -29,33 +29,6 @@ ENTRY(relocate_kernel) basr %r13,0 # base address .base: - stctg %c0,%c15,ctlregs-.base(%r13) - stmg %r0,%r15,gprregs-.base(%r13) - lghi %r0,3 - sllg %r0,%r0,31 - stg %r0,0x1d0(%r0) - la %r0,.back_pgm-.base(%r13) - stg %r0,0x1d8(%r0) - la %r1,load_psw-.base(%r13) - mvc 0(8,%r0),0(%r1) - la %r0,.back-.base(%r13) - st %r0,4(%r0) - oi 4(%r0),0x80 - lghi %r0,0 - diag %r0,%r0,0x308 - .back: - lhi %r1,1 # mode 1 = esame - sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esame mode - sam64 # switch to 64 bit addressing mode - basr %r13,0 - .back_base: - oi have_diag308-.back_base(%r13),0x01 - lctlg %c0,%c15,ctlregs-.back_base(%r13) - lmg %r0,%r15,gprregs-.back_base(%r13) - j .top - .back_pgm: - lmg %r0,%r15,gprregs-.base(%r13) - .top: lghi %r7,PAGE_SIZE # load PAGE_SIZE in r7 lghi %r9,PAGE_SIZE # load PAGE_SIZE in r9 lg %r5,0(%r2) # read another word for indirection page @@ -64,55 +37,36 @@ ENTRY(relocate_kernel) je .indir_check # NO, goto "indir_check" lgr %r6,%r5 # r6 = r5 nill %r6,0xf000 # mask it out and... - j .top # ...next iteration + j .base # ...next iteration .indir_check: tml %r5,0x2 # is it a indirection page? je .done_test # NO, goto "done_test" nill %r5,0xf000 # YES, mask out, lgr %r2,%r5 # move it into the right register, - j .top # and read next... + j .base # and read next... .done_test: tml %r5,0x4 # is it the done indicator? je .source_test # NO! Well, then it should be the source indicator... j .done # ok, lets finish it here... .source_test: tml %r5,0x8 # it should be a source indicator... - je .top # NO, ignore it... + je .base # NO, ignore it... lgr %r8,%r5 # r8 = r5 nill %r8,0xf000 # masking 0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0 jo 0b - j .top + j .base .done: sgr %r0,%r0 # clear register r0 la %r4,load_psw-.base(%r13) # load psw-address into the register o %r3,4(%r4) # or load address into psw st %r3,4(%r4) mvc 0(8,%r0),0(%r4) # copy psw to absolute address 0 - tm have_diag308-.base(%r13),0x01 - jno .no_diag308 diag %r0,%r0,0x308 - .no_diag308: - sam31 # 31 bit mode - sr %r1,%r1 # erase register r1 - sr %r2,%r2 # erase register r2 - sigp %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero - lpsw 0 # hopefully start new kernel... .align 8 load_psw: .long 0x00080000,0x80000000 - ctlregs: - .rept 16 - .quad 0 - .endr - gprregs: - .rept 16 - .quad 0 - .endr - have_diag308: - .byte 0 - .align 8 relocate_kernel_end: .align 8 .globl relocate_kernel_len diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 7b58a712f818..d82a9ec64ea9 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -821,6 +821,7 @@ static int __init setup_hwcaps(void) strcpy(elf_platform, "z13"); break; case 0x3906: + case 0x3907: strcpy(elf_platform, "z14"); break; } @@ -894,6 +895,9 @@ void __init setup_arch(char **cmdline_p) init_mm.end_data = (unsigned long) _edata; init_mm.brk = (unsigned long) _end; + if (IS_ENABLED(CONFIG_EXPOLINE_AUTO)) + nospec_auto_detect(); + parse_early_param(); #ifdef CONFIG_CRASH_DUMP /* Deactivate elfcorehdr= kernel parameter */ diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl index b38d48464368..8b210ead7956 100644 --- a/arch/s390/kernel/syscalls/syscall.tbl +++ b/arch/s390/kernel/syscalls/syscall.tbl @@ -388,3 +388,4 @@ 378 common s390_guarded_storage sys_s390_guarded_storage compat_sys_s390_guarded_storage 379 common statx sys_statx compat_sys_statx 380 common s390_sthyi sys_s390_sthyi compat_sys_s390_sthyi +381 common kexec_file_load sys_kexec_file_load compat_sys_kexec_file_load diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c index d9d1f512f019..5007fac01bb5 100644 --- a/arch/s390/kernel/uprobes.c +++ b/arch/s390/kernel/uprobes.c @@ -150,6 +150,15 @@ unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline, return orig; } +bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, + struct pt_regs *regs) +{ + if (ctx == RP_CHECK_CHAIN_CALL) + return user_stack_pointer(regs) <= ret->stack; + else + return user_stack_pointer(regs) < ret->stack; +} + /* Instruction Emulation */ static void adjust_psw_addr(psw_t *psw, unsigned long len) diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 05c8abd864f1..2809d11c7a28 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c @@ -220,6 +220,8 @@ static inline int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, /* * Like get_user_pages_fast() except its IRQ-safe in that it won't fall * back to the regular GUP. + * Note a difference with get_user_pages_fast: this always returns the + * number of pages pinned, 0 if no pages were pinned. */ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) diff --git a/arch/s390/purgatory/.gitignore b/arch/s390/purgatory/.gitignore new file mode 100644 index 000000000000..e9e66f178a6d --- /dev/null +++ b/arch/s390/purgatory/.gitignore @@ -0,0 +1,2 @@ +kexec-purgatory.c +purgatory.ro diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile new file mode 100644 index 000000000000..e9525bc1b4a6 --- /dev/null +++ b/arch/s390/purgatory/Makefile @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: GPL-2.0 + +OBJECT_FILES_NON_STANDARD := y + +purgatory-y := head.o purgatory.o string.o sha256.o mem.o + +targets += $(purgatory-y) purgatory.ro kexec-purgatory.c +PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y)) + +$(obj)/sha256.o: $(srctree)/lib/sha256.c + $(call if_changed_rule,cc_o_c) + +$(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S + $(call if_changed_rule,as_o_S) + +$(obj)/string.o: $(srctree)/arch/s390/lib/string.c + $(call if_changed_rule,cc_o_c) + +LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib +LDFLAGS_purgatory.ro += -z nodefaultlib +KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes +KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare +KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding +KBUILD_CFLAGS += -c -MD -Os -m64 +KBUILD_CFLAGS += $(call cc-option,-fno-PIE) + +$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE + $(call if_changed,ld) + +CMD_BIN2C = $(objtree)/scripts/basic/bin2c +quiet_cmd_bin2c = BIN2C $@ + cmd_bin2c = $(CMD_BIN2C) kexec_purgatory < $< > $@ + +$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE + $(call if_changed,bin2c) + +obj-$(CONFIG_ARCH_HAS_KEXEC_PURGATORY) += kexec-purgatory.o diff --git a/arch/s390/purgatory/head.S b/arch/s390/purgatory/head.S new file mode 100644 index 000000000000..660c96a05a9b --- /dev/null +++ b/arch/s390/purgatory/head.S @@ -0,0 +1,279 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Purgatory setup code + * + * Copyright IBM Corp. 2018 + * + * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> + */ + +#include <linux/linkage.h> +#include <asm/asm-offsets.h> +#include <asm/page.h> +#include <asm/sigp.h> + +/* The purgatory is the code running between two kernels. It's main purpose + * is to verify that the next kernel was not corrupted after load and to + * start it. + * + * If the next kernel is a crash kernel there are some peculiarities to + * consider: + * + * First the purgatory is called twice. Once only to verify the + * sha digest. So if the crash kernel got corrupted the old kernel can try + * to trigger a stand-alone dumper. And once to actually load the crash kernel. + * + * Second the purgatory also has to swap the crash memory region with its + * destination at address 0. As the purgatory is part of crash memory this + * requires some finesse. The tactic here is that the purgatory first copies + * itself to the end of the destination and then swaps the rest of the + * memory running from there. + */ + +#define bufsz purgatory_end-stack + +.macro MEMCPY dst,src,len + lgr %r0,\dst + lgr %r1,\len + lgr %r2,\src + lgr %r3,\len + +20: mvcle %r0,%r2,0 + jo 20b +.endm + +.macro MEMSWAP dst,src,buf,len +10: cghi \len,bufsz + jh 11f + lgr %r4,\len + j 12f +11: lghi %r4,bufsz + +12: MEMCPY \buf,\dst,%r4 + MEMCPY \dst,\src,%r4 + MEMCPY \src,\buf,%r4 + + agr \dst,%r4 + agr \src,%r4 + sgr \len,%r4 + + cghi \len,0 + jh 10b +.endm + +.macro START_NEXT_KERNEL base + lg %r4,kernel_entry-\base(%r13) + lg %r5,load_psw_mask-\base(%r13) + ogr %r4,%r5 + stg %r4,0(%r0) + + xgr %r0,%r0 + diag %r0,%r0,0x308 +.endm + +.text +.align PAGE_SIZE +ENTRY(purgatory_start) + /* The purgatory might be called after a diag308 so better set + * architecture and addressing mode. + */ + lhi %r1,1 + sigp %r1,%r0,SIGP_SET_ARCHITECTURE + sam64 + + larl %r5,gprregs + stmg %r6,%r15,0(%r5) + + basr %r13,0 +.base_crash: + + /* Setup stack */ + larl %r15,purgatory_end + aghi %r15,-160 + + /* If the next kernel is KEXEC_TYPE_CRASH the purgatory is called + * directly with a flag passed in %r2 whether the purgatory shall do + * checksum verification only (%r2 = 0 -> verification only). + * + * Check now and preserve over C function call by storing in + * %r10 whith + * 1 -> checksum verification only + * 0 -> load new kernel + */ + lghi %r10,0 + lg %r11,kernel_type-.base_crash(%r13) + cghi %r11,1 /* KEXEC_TYPE_CRASH */ + jne .do_checksum_verification + cghi %r2,0 /* checksum verification only */ + jne .do_checksum_verification + lghi %r10,1 + +.do_checksum_verification: + brasl %r14,verify_sha256_digest + + cghi %r10,1 /* checksum verification only */ + je .return_old_kernel + cghi %r2,0 /* checksum match */ + jne .disabled_wait + + /* If the next kernel is a crash kernel the purgatory has to swap + * the mem regions first. + */ + cghi %r11,1 /* KEXEC_TYPE_CRASH */ + je .start_crash_kernel + + /* start normal kernel */ + START_NEXT_KERNEL .base_crash + +.return_old_kernel: + lmg %r6,%r15,gprregs-.base_crash(%r13) + br %r14 + +.disabled_wait: + lpswe disabled_wait_psw-.base_crash(%r13) + +.start_crash_kernel: + /* Location of purgatory_start in crash memory */ + lgr %r8,%r13 + aghi %r8,-(.base_crash-purgatory_start) + + /* Destination for this code i.e. end of memory to be swapped. */ + lg %r9,crash_size-.base_crash(%r13) + aghi %r9,-(purgatory_end-purgatory_start) + + /* Destination in crash memory, i.e. same as r9 but in crash memory. */ + lg %r10,crash_start-.base_crash(%r13) + agr %r10,%r9 + + /* Buffer location (in crash memory) and size. As the purgatory is + * behind the point of no return it can re-use the stack as buffer. + */ + lghi %r11,bufsz + larl %r12,stack + + MEMCPY %r12,%r9,%r11 /* dst -> (crash) buf */ + MEMCPY %r9,%r8,%r11 /* self -> dst */ + + /* Jump to new location. */ + lgr %r7,%r9 + aghi %r7,.jump_to_dst-purgatory_start + br %r7 + +.jump_to_dst: + basr %r13,0 +.base_dst: + + /* clear buffer */ + MEMCPY %r12,%r10,%r11 /* (crash) buf -> (crash) dst */ + + /* Load new buffer location after jump */ + larl %r7,stack + aghi %r10,stack-purgatory_start + MEMCPY %r10,%r7,%r11 /* (new) buf -> (crash) buf */ + + /* Now the code is set up to run from its designated location. Start + * swapping the rest of crash memory now. + * + * The registers will be used as follow: + * + * %r0-%r4 reserved for macros defined above + * %r5-%r6 tmp registers + * %r7 pointer to current struct sha region + * %r8 index to iterate over all sha regions + * %r9 pointer in crash memory + * %r10 pointer in old kernel + * %r11 total size (still) to be moved + * %r12 pointer to buffer + */ + lgr %r12,%r7 + lgr %r11,%r9 + lghi %r10,0 + lg %r9,crash_start-.base_dst(%r13) + lghi %r8,16 /* KEXEC_SEGMENTS_MAX */ + larl %r7,purgatory_sha_regions + + j .loop_first + + /* Loop over all purgatory_sha_regions. */ +.loop_next: + aghi %r8,-1 + cghi %r8,0 + je .loop_out + + aghi %r7,__KEXEC_SHA_REGION_SIZE + +.loop_first: + lg %r5,__KEXEC_SHA_REGION_START(%r7) + cghi %r5,0 + je .loop_next + + /* Copy [end last sha region, start current sha region) */ + /* Note: kexec_sha_region->start points in crash memory */ + sgr %r5,%r9 + MEMCPY %r9,%r10,%r5 + + agr %r9,%r5 + agr %r10,%r5 + sgr %r11,%r5 + + /* Swap sha region */ + lg %r6,__KEXEC_SHA_REGION_LEN(%r7) + MEMSWAP %r9,%r10,%r12,%r6 + sg %r11,__KEXEC_SHA_REGION_LEN(%r7) + j .loop_next + +.loop_out: + /* Copy rest of crash memory */ + MEMCPY %r9,%r10,%r11 + + /* start crash kernel */ + START_NEXT_KERNEL .base_dst + + +load_psw_mask: + .long 0x00080000,0x80000000 + + .align 8 +disabled_wait_psw: + .quad 0x0002000180000000 + .quad 0x0000000000000000 + .do_checksum_verification + +gprregs: + .rept 10 + .quad 0 + .endr + +purgatory_sha256_digest: + .global purgatory_sha256_digest + .rept 32 /* SHA256_DIGEST_SIZE */ + .byte 0 + .endr + +purgatory_sha_regions: + .global purgatory_sha_regions + .rept 16 * __KEXEC_SHA_REGION_SIZE /* KEXEC_SEGMENTS_MAX */ + .byte 0 + .endr + +kernel_entry: + .global kernel_entry + .quad 0 + +kernel_type: + .global kernel_type + .quad 0 + +crash_start: + .global crash_start + .quad 0 + +crash_size: + .global crash_size + .quad 0 + + .align PAGE_SIZE +stack: + /* The buffer to move this code must be as big as the code. */ + .skip stack-purgatory_start + .align PAGE_SIZE +purgatory_end: diff --git a/arch/s390/purgatory/purgatory.c b/arch/s390/purgatory/purgatory.c new file mode 100644 index 000000000000..4e2beb3c29b7 --- /dev/null +++ b/arch/s390/purgatory/purgatory.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Purgatory code running between two kernels. + * + * Copyright IBM Corp. 2018 + * + * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com> + */ + +#include <linux/kexec.h> +#include <linux/sha256.h> +#include <linux/string.h> +#include <asm/purgatory.h> + +struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX]; +u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE]; + +u64 kernel_entry; +u64 kernel_type; + +u64 crash_start; +u64 crash_size; + +int verify_sha256_digest(void) +{ + struct kexec_sha_region *ptr, *end; + u8 digest[SHA256_DIGEST_SIZE]; + struct sha256_state sctx; + + sha256_init(&sctx); + end = purgatory_sha_regions + ARRAY_SIZE(purgatory_sha_regions); + + for (ptr = purgatory_sha_regions; ptr < end; ptr++) + sha256_update(&sctx, (uint8_t *)(ptr->start), ptr->len); + + sha256_final(&sctx, digest); + + if (memcmp(digest, purgatory_sha256_digest, sizeof(digest))) + return 1; + + return 0; +} diff --git a/arch/sh/boards/of-generic.c b/arch/sh/boards/of-generic.c index 4feb7c86f4ac..46b2481eec90 100644 --- a/arch/sh/boards/of-generic.c +++ b/arch/sh/boards/of-generic.c @@ -126,12 +126,6 @@ static void __init sh_of_setup(char **cmdline_p) { struct device_node *root; -#ifdef CONFIG_USE_BUILTIN_DTB - unflatten_and_copy_device_tree(); -#else - unflatten_device_tree(); -#endif - board_time_init = sh_of_time_init; sh_mv.mv_name = "Unknown SH model"; diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c index 5976a2c8a3e3..e5b7437ab4af 100644 --- a/arch/sh/drivers/pci/pci.c +++ b/arch/sh/drivers/pci/pci.c @@ -49,6 +49,8 @@ static void pcibios_scanbus(struct pci_channel *hose) for (i = 0; i < hose->nr_resources; i++) { res = hose->resources + i; offset = 0; + if (res->flags & IORESOURCE_DISABLED) + continue; if (res->flags & IORESOURCE_IO) offset = hose->io_offset; else if (res->flags & IORESOURCE_MEM) @@ -102,6 +104,9 @@ int register_pci_controller(struct pci_channel *hose) for (i = 0; i < hose->nr_resources; i++) { struct resource *res = hose->resources + i; + if (res->flags & IORESOURCE_DISABLED) + continue; + if (res->flags & IORESOURCE_IO) { if (request_resource(&ioport_resource, res) < 0) goto out; diff --git a/arch/sh/drivers/pci/pcie-sh7786.c b/arch/sh/drivers/pci/pcie-sh7786.c index 0167a7352719..382e7ecf4c82 100644 --- a/arch/sh/drivers/pci/pcie-sh7786.c +++ b/arch/sh/drivers/pci/pcie-sh7786.c @@ -19,6 +19,7 @@ #include <linux/clk.h> #include <linux/sh_clk.h> #include <linux/sh_intc.h> +#include <cpu/sh7786.h> #include "pcie-sh7786.h" #include <asm/sizes.h> @@ -32,6 +33,7 @@ struct sh7786_pcie_port { static struct sh7786_pcie_port *sh7786_pcie_ports; static unsigned int nr_ports; +static unsigned long dma_pfn_offset; static struct sh7786_pcie_hwops { int (*core_init)(void); @@ -40,73 +42,73 @@ static struct sh7786_pcie_hwops { static struct resource sh7786_pci0_resources[] = { { - .name = "PCIe0 IO", + .name = "PCIe0 MEM 0", .start = 0xfd000000, .end = 0xfd000000 + SZ_8M - 1, - .flags = IORESOURCE_IO, + .flags = IORESOURCE_MEM, }, { - .name = "PCIe0 MEM 0", + .name = "PCIe0 MEM 1", .start = 0xc0000000, .end = 0xc0000000 + SZ_512M - 1, .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT, }, { - .name = "PCIe0 MEM 1", + .name = "PCIe0 MEM 2", .start = 0x10000000, .end = 0x10000000 + SZ_64M - 1, .flags = IORESOURCE_MEM, }, { - .name = "PCIe0 MEM 2", + .name = "PCIe0 IO", .start = 0xfe100000, .end = 0xfe100000 + SZ_1M - 1, - .flags = IORESOURCE_MEM, + .flags = IORESOURCE_IO, }, }; static struct resource sh7786_pci1_resources[] = { { - .name = "PCIe1 IO", + .name = "PCIe1 MEM 0", .start = 0xfd800000, .end = 0xfd800000 + SZ_8M - 1, - .flags = IORESOURCE_IO, + .flags = IORESOURCE_MEM, }, { - .name = "PCIe1 MEM 0", + .name = "PCIe1 MEM 1", .start = 0xa0000000, .end = 0xa0000000 + SZ_512M - 1, .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT, }, { - .name = "PCIe1 MEM 1", + .name = "PCIe1 MEM 2", .start = 0x30000000, .end = 0x30000000 + SZ_256M - 1, .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT, }, { - .name = "PCIe1 MEM 2", + .name = "PCIe1 IO", .start = 0xfe300000, .end = 0xfe300000 + SZ_1M - 1, - .flags = IORESOURCE_MEM, + .flags = IORESOURCE_IO, }, }; static struct resource sh7786_pci2_resources[] = { { - .name = "PCIe2 IO", + .name = "PCIe2 MEM 0", .start = 0xfc800000, .end = 0xfc800000 + SZ_4M - 1, - .flags = IORESOURCE_IO, + .flags = IORESOURCE_MEM, }, { - .name = "PCIe2 MEM 0", + .name = "PCIe2 MEM 1", .start = 0x80000000, .end = 0x80000000 + SZ_512M - 1, .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT, }, { - .name = "PCIe2 MEM 1", + .name = "PCIe2 MEM 2", .start = 0x20000000, .end = 0x20000000 + SZ_256M - 1, .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT, }, { - .name = "PCIe2 MEM 2", + .name = "PCIe2 IO", .start = 0xfcd00000, .end = 0xfcd00000 + SZ_1M - 1, - .flags = IORESOURCE_MEM, + .flags = IORESOURCE_IO, }, }; @@ -301,7 +303,7 @@ static int __init pcie_init(struct sh7786_pcie_port *port) { struct pci_channel *chan = port->hose; unsigned int data; - phys_addr_t memphys; + phys_addr_t memstart, memend; size_t memsize; int ret, i, win; @@ -357,15 +359,26 @@ static int __init pcie_init(struct sh7786_pcie_port *port) data |= (0xff << 16); pci_write_reg(chan, data, SH4A_PCIEMACCTLR); - memphys = __pa(memory_start); - memsize = roundup_pow_of_two(memory_end - memory_start); + memstart = __pa(memory_start); + memend = __pa(memory_end); + memsize = roundup_pow_of_two(memend - memstart); + + /* + * The start address must be aligned on its size. So we round + * it down, and then recalculate the size so that it covers + * the entire memory. + */ + memstart = ALIGN_DOWN(memstart, memsize); + memsize = roundup_pow_of_two(memend - memstart); + + dma_pfn_offset = memstart >> PAGE_SHIFT; /* * If there's more than 512MB of memory, we need to roll over to * LAR1/LAMR1. */ if (memsize > SZ_512M) { - pci_write_reg(chan, memphys + SZ_512M, SH4A_PCIELAR1); + pci_write_reg(chan, memstart + SZ_512M, SH4A_PCIELAR1); pci_write_reg(chan, ((memsize - SZ_512M) - SZ_256) | 1, SH4A_PCIELAMR1); memsize = SZ_512M; @@ -381,7 +394,7 @@ static int __init pcie_init(struct sh7786_pcie_port *port) * LAR0/LAMR0 covers up to the first 512MB, which is enough to * cover all of lowmem on most platforms. */ - pci_write_reg(chan, memphys, SH4A_PCIELAR0); + pci_write_reg(chan, memstart, SH4A_PCIELAR0); pci_write_reg(chan, (memsize - SZ_256) | 1, SH4A_PCIELAMR0); /* Finish initialization */ @@ -438,6 +451,9 @@ static int __init pcie_init(struct sh7786_pcie_port *port) * mode, so just skip them entirely. */ if ((res->flags & IORESOURCE_MEM_32BIT) && __in_29bit_mode()) + res->flags |= IORESOURCE_DISABLED; + + if (res->flags & IORESOURCE_DISABLED) continue; pci_write_reg(chan, 0x00000000, SH4A_PCIEPTCTLR(win)); @@ -472,6 +488,11 @@ int pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin) return evt2irq(0xae0); } +void pcibios_bus_add_device(struct pci_dev *pdev) +{ + pdev->dev.dma_pfn_offset = dma_pfn_offset; +} + static int __init sh7786_pcie_core_init(void) { /* Return the number of ports */ @@ -527,6 +548,7 @@ static struct sh7786_pcie_hwops sh7786_65nm_pcie_hwops __initdata = { static int __init sh7786_pcie_init(void) { struct clk *platclk; + u32 mm_sel; int i; printk(KERN_NOTICE "PCI: Starting initialization.\n"); @@ -560,6 +582,16 @@ static int __init sh7786_pcie_init(void) clk_enable(platclk); + mm_sel = sh7786_mm_sel(); + + /* + * Depending on the MMSELR register value, the PCIe0 MEM 1 + * area may not be available. See Table 13.11 of the SH7786 + * datasheet. + */ + if (mm_sel != 1 && mm_sel != 2 && mm_sel != 5 && mm_sel != 6) + sh7786_pci0_resources[2].flags |= IORESOURCE_DISABLED; + printk(KERN_NOTICE "PCI: probing %d ports.\n", nr_ports); for (i = 0; i < nr_ports; i++) { diff --git a/arch/sh/include/asm/futex.h b/arch/sh/include/asm/futex.h index 15bf07bfa96b..6d192f4908a7 100644 --- a/arch/sh/include/asm/futex.h +++ b/arch/sh/include/asm/futex.h @@ -37,10 +37,7 @@ static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, pagefault_disable(); do { - if (op == FUTEX_OP_SET) - ret = oldval = 0; - else - ret = get_user(oldval, uaddr); + ret = get_user(oldval, uaddr); if (ret) break; diff --git a/arch/sh/include/cpu-sh4/cpu/sh7786.h b/arch/sh/include/cpu-sh4/cpu/sh7786.h index 0df09e638f09..96b8cb1f754a 100644 --- a/arch/sh/include/cpu-sh4/cpu/sh7786.h +++ b/arch/sh/include/cpu-sh4/cpu/sh7786.h @@ -14,6 +14,8 @@ #ifndef __CPU_SH7786_H__ #define __CPU_SH7786_H__ +#include <linux/io.h> + enum { /* PA */ GPIO_PA7, GPIO_PA6, GPIO_PA5, GPIO_PA4, @@ -131,4 +133,9 @@ enum { GPIO_FN_IRL7, GPIO_FN_IRL6, GPIO_FN_IRL5, GPIO_FN_IRL4, }; +static inline u32 sh7786_mm_sel(void) +{ + return __raw_readl(0xFC400020) & 0x7; +} + #endif /* __CPU_SH7786_H__ */ diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c index 62b485107eae..178457d7620c 100644 --- a/arch/sh/kernel/dma-nommu.c +++ b/arch/sh/kernel/dma-nommu.c @@ -16,7 +16,8 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page, enum dma_data_direction dir, unsigned long attrs) { - dma_addr_t addr = page_to_phys(page) + offset; + dma_addr_t addr = page_to_phys(page) + offset + - PFN_PHYS(dev->dma_pfn_offset); WARN_ON(size == 0); @@ -36,12 +37,14 @@ static int nommu_map_sg(struct device *dev, struct scatterlist *sg, WARN_ON(nents == 0 || sg[0].length == 0); for_each_sg(sg, s, nents, i) { + dma_addr_t offset = PFN_PHYS(dev->dma_pfn_offset); + BUG_ON(!sg_page(s)); if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) sh_sync_dma_for_device(sg_virt(s), s->length, dir); - s->dma_address = sg_phys(s); + s->dma_address = sg_phys(s) - offset; s->dma_length = s->length; } diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index c001f782c5f1..28cc61216b64 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -255,7 +255,7 @@ debug_trap: mov.l @r8, r8 jsr @r8 nop - bra __restore_all + bra ret_from_exception nop CFI_ENDPROC diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index b95c411d0333..d34e998b809f 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -330,6 +330,14 @@ void __init setup_arch(char **cmdline_p) /* Let earlyprintk output early console messages */ early_platform_driver_probe("earlyprintk", 1, 1); +#ifdef CONFIG_OF_FLATTREE +#ifdef CONFIG_USE_BUILTIN_DTB + unflatten_and_copy_device_tree(); +#else + unflatten_device_tree(); +#endif +#endif + paging_init(); #ifdef CONFIG_DUMMY_CONSOLE diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index 6ea3aab508f2..8ce98691d822 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c @@ -59,7 +59,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order); - *dma_handle = virt_to_phys(ret); + *dma_handle = virt_to_phys(ret) - PFN_PHYS(dev->dma_pfn_offset); return ret_nocache; } @@ -69,7 +69,7 @@ void dma_generic_free_coherent(struct device *dev, size_t size, unsigned long attrs) { int order = get_order(size); - unsigned long pfn = dma_handle >> PAGE_SHIFT; + unsigned long pfn = (dma_handle >> PAGE_SHIFT) + dev->dma_pfn_offset; int k; for (k = 0; k < (1 << order); k++) diff --git a/arch/sh/mm/gup.c b/arch/sh/mm/gup.c index 8045b5bb7075..56c86ca98ecf 100644 --- a/arch/sh/mm/gup.c +++ b/arch/sh/mm/gup.c @@ -160,6 +160,8 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, /* * Like get_user_pages_fast() except its IRQ-safe in that it won't fall * back to the regular GUP. + * Note a difference with get_user_pages_fast: this always returns the + * number of pages pinned, 0 if no pages were pinned. */ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) diff --git a/arch/sparc/include/uapi/asm/oradax.h b/arch/sparc/include/uapi/asm/oradax.h index 722951908b0a..4f6676fe4bcc 100644 --- a/arch/sparc/include/uapi/asm/oradax.h +++ b/arch/sparc/include/uapi/asm/oradax.h @@ -3,7 +3,7 @@ * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or + * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c index 1a0fa10cb6b7..32bae68e34c1 100644 --- a/arch/sparc/kernel/vio.c +++ b/arch/sparc/kernel/vio.c @@ -403,7 +403,7 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp, if (err) { printk(KERN_ERR "VIO: Could not register device %s, err=%d\n", dev_name(&vdev->dev), err); - kfree(vdev); + put_device(&vdev->dev); return NULL; } if (vdev->dp) diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c index 357b6047653a..aee6dba83d0e 100644 --- a/arch/sparc/mm/gup.c +++ b/arch/sparc/mm/gup.c @@ -193,6 +193,10 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, return 1; } +/* + * Note a difference with get_user_pages_fast: this always returns the + * number of pages pinned, 0 if no pages were pinned. + */ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { diff --git a/arch/sparc/vdso/Makefile b/arch/sparc/vdso/Makefile index a6615d8864f7..dd0b5a92ffd0 100644 --- a/arch/sparc/vdso/Makefile +++ b/arch/sparc/vdso/Makefile @@ -29,9 +29,7 @@ vdso_img_cfiles := $(vdso_img-y:%=vdso-image-%.c) vdso_img_sodbg := $(vdso_img-y:%=vdso%.so.dbg) obj-y += $(vdso_img_objs) targets += $(vdso_img_cfiles) -targets += $(vdso_img_sodbg) -.SECONDARY: $(vdso_img-y:%=$(obj)/vdso-image-%.c) \ - $(vdso_img-y:%=$(obj)/vdso%.so) +targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so) export CPPFLAGS_vdso.lds += -P -C diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d234cca296db..c07f492b871a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -29,6 +29,7 @@ config X86_64 select HAVE_ARCH_SOFT_DIRTY select MODULES_USE_ELF_RELA select X86_DEV_DMA_OPS + select ARCH_HAS_SYSCALL_WRAPPER # # Arch settings @@ -51,6 +52,7 @@ config X86 select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_FAST_MULTIPLIER + select ARCH_HAS_FILTER_PGPROT select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_KCOV if X86_64 @@ -272,6 +274,9 @@ config ARCH_HAS_CPU_RELAX config ARCH_HAS_CACHE_LINE_SIZE def_bool y +config ARCH_HAS_FILTER_PGPROT + def_bool y + config HAVE_SETUP_PER_CPU_AREA def_bool y @@ -2008,6 +2013,9 @@ config KEXEC_FILE for kernel and initramfs as opposed to list of segments as accepted by previous system call. +config ARCH_HAS_KEXEC_PURGATORY + def_bool KEXEC_FILE + config KEXEC_VERIFY_SIG bool "Verify kernel signature during kexec_file_load() syscall" depends on KEXEC_FILE @@ -2760,11 +2768,9 @@ config OLPC_XO1_RTC config OLPC_XO1_SCI bool "OLPC XO-1 SCI extras" - depends on OLPC && OLPC_XO1_PM + depends on OLPC && OLPC_XO1_PM && GPIO_CS5535=y depends on INPUT=y select POWER_SUPPLY - select GPIO_CS5535 - select MFD_CORE ---help--- Add support for SCI-based features of the OLPC XO-1 laptop: - EC-driven system wakeups diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c index 66e42a098d70..a0a50b91ecef 100644 --- a/arch/x86/boot/compressed/kaslr.c +++ b/arch/x86/boot/compressed/kaslr.c @@ -54,6 +54,9 @@ unsigned int ptrs_per_p4d __ro_after_init = 1; extern unsigned long get_cmd_line_ptr(void); +/* Used by PAGE_KERN* macros: */ +pteval_t __default_kernel_pte_mask __read_mostly = ~0; + /* Simplified build-specific string for starting entropy. */ static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@" LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION; diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index be63330c5511..352e70cd33e8 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -114,7 +114,9 @@ For 32-bit we have the following conventions - kernel is built with pushq %rsi /* pt_regs->si */ .endif pushq \rdx /* pt_regs->dx */ + xorl %edx, %edx /* nospec dx */ pushq %rcx /* pt_regs->cx */ + xorl %ecx, %ecx /* nospec cx */ pushq \rax /* pt_regs->ax */ pushq %r8 /* pt_regs->r8 */ xorl %r8d, %r8d /* nospec r8 */ diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 74f6eee15179..fbf6a6c3fd2d 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -266,14 +266,13 @@ __visible inline void syscall_return_slowpath(struct pt_regs *regs) } #ifdef CONFIG_X86_64 -__visible void do_syscall_64(struct pt_regs *regs) +__visible void do_syscall_64(unsigned long nr, struct pt_regs *regs) { - struct thread_info *ti = current_thread_info(); - unsigned long nr = regs->orig_ax; + struct thread_info *ti; enter_from_user_mode(); local_irq_enable(); - + ti = current_thread_info(); if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) nr = syscall_trace_enter(regs); @@ -282,11 +281,10 @@ __visible void do_syscall_64(struct pt_regs *regs) * table. The only functional difference is the x32 bit in * regs->orig_ax, which changes the behavior of some syscalls. */ - if (likely((nr & __SYSCALL_MASK) < NR_syscalls)) { - nr = array_index_nospec(nr & __SYSCALL_MASK, NR_syscalls); - regs->ax = sys_call_table[nr]( - regs->di, regs->si, regs->dx, - regs->r10, regs->r8, regs->r9); + nr &= __SYSCALL_MASK; + if (likely(nr < NR_syscalls)) { + nr = array_index_nospec(nr, NR_syscalls); + regs->ax = sys_call_table[nr](regs); } syscall_return_slowpath(regs); @@ -321,6 +319,9 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs) if (likely(nr < IA32_NR_syscalls)) { nr = array_index_nospec(nr, IA32_NR_syscalls); +#ifdef CONFIG_IA32_EMULATION + regs->ax = ia32_sys_call_table[nr](regs); +#else /* * It's possible that a 32-bit syscall implementation * takes a 64-bit parameter but nonetheless assumes that @@ -331,6 +332,7 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs) (unsigned int)regs->bx, (unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->si, (unsigned int)regs->di, (unsigned int)regs->bp); +#endif /* CONFIG_IA32_EMULATION */ } syscall_return_slowpath(regs); diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index b0a4649e55ce..3166b9674429 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -233,7 +233,8 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) TRACE_IRQS_OFF /* IRQs are off. */ - movq %rsp, %rdi + movq %rax, %rdi + movq %rsp, %rsi call do_syscall_64 /* returns with IRQs disabled */ TRACE_IRQS_IRETQ /* we're about to change IF */ @@ -913,7 +914,7 @@ ENTRY(\sym) pushq $-1 /* ORIG_RAX: no syscall to restart */ .endif - .if \paranoid < 2 + .if \paranoid == 1 testb $3, CS-ORIG_RAX(%rsp) /* If coming from userspace, switch stacks */ jnz .Lfrom_usermode_switch_stack_\@ .endif @@ -960,7 +961,7 @@ ENTRY(\sym) jmp error_exit .endif - .if \paranoid < 2 + .if \paranoid == 1 /* * Entry from userspace. Switch stacks and treat it * as a normal entry. This means that paranoid handlers diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index 08425c42f8b7..9de7f1e1dede 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -84,13 +84,13 @@ ENTRY(entry_SYSENTER_compat) pushq %rdx /* pt_regs->dx */ pushq %rcx /* pt_regs->cx */ pushq $-ENOSYS /* pt_regs->ax */ - pushq $0 /* pt_regs->r8 = 0 */ + pushq %r8 /* pt_regs->r8 */ xorl %r8d, %r8d /* nospec r8 */ - pushq $0 /* pt_regs->r9 = 0 */ + pushq %r9 /* pt_regs->r9 */ xorl %r9d, %r9d /* nospec r9 */ - pushq $0 /* pt_regs->r10 = 0 */ + pushq %r10 /* pt_regs->r10 */ xorl %r10d, %r10d /* nospec r10 */ - pushq $0 /* pt_regs->r11 = 0 */ + pushq %r11 /* pt_regs->r11 */ xorl %r11d, %r11d /* nospec r11 */ pushq %rbx /* pt_regs->rbx */ xorl %ebx, %ebx /* nospec rbx */ @@ -220,8 +220,11 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe) pushq %rax /* pt_regs->orig_ax */ pushq %rdi /* pt_regs->di */ pushq %rsi /* pt_regs->si */ + xorl %esi, %esi /* nospec si */ pushq %rdx /* pt_regs->dx */ + xorl %edx, %edx /* nospec dx */ pushq %rbp /* pt_regs->cx (stashed in bp) */ + xorl %ecx, %ecx /* nospec cx */ pushq $-ENOSYS /* pt_regs->ax */ pushq $0 /* pt_regs->r8 = 0 */ xorl %r8d, %r8d /* nospec r8 */ @@ -365,8 +368,11 @@ ENTRY(entry_INT80_compat) pushq (%rdi) /* pt_regs->di */ pushq %rsi /* pt_regs->si */ + xorl %esi, %esi /* nospec si */ pushq %rdx /* pt_regs->dx */ + xorl %edx, %edx /* nospec dx */ pushq %rcx /* pt_regs->cx */ + xorl %ecx, %ecx /* nospec cx */ pushq $-ENOSYS /* pt_regs->ax */ pushq $0 /* pt_regs->r8 = 0 */ xorl %r8d, %r8d /* nospec r8 */ diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c index 95c294963612..aa3336a7cb15 100644 --- a/arch/x86/entry/syscall_32.c +++ b/arch/x86/entry/syscall_32.c @@ -7,14 +7,23 @@ #include <asm/asm-offsets.h> #include <asm/syscall.h> -#define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ; +#ifdef CONFIG_IA32_EMULATION +/* On X86_64, we use struct pt_regs * to pass parameters to syscalls */ +#define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(const struct pt_regs *); + +/* this is a lie, but it does not hurt as sys_ni_syscall just returns -EINVAL */ +extern asmlinkage long sys_ni_syscall(const struct pt_regs *); + +#else /* CONFIG_IA32_EMULATION */ +#define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); +extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); +#endif /* CONFIG_IA32_EMULATION */ + #include <asm/syscalls_32.h> #undef __SYSCALL_I386 #define __SYSCALL_I386(nr, sym, qual) [nr] = sym, -extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); - __visible const sys_call_ptr_t ia32_sys_call_table[__NR_syscall_compat_max+1] = { /* * Smells like a compiler bug -- it doesn't work diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c index c176d2fab1da..d5252bc1e380 100644 --- a/arch/x86/entry/syscall_64.c +++ b/arch/x86/entry/syscall_64.c @@ -7,14 +7,14 @@ #include <asm/asm-offsets.h> #include <asm/syscall.h> -#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); +/* this is a lie, but it does not hurt as sys_ni_syscall just returns -EINVAL */ +extern asmlinkage long sys_ni_syscall(const struct pt_regs *); +#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long sym(const struct pt_regs *); #include <asm/syscalls_64.h> #undef __SYSCALL_64 #define __SYSCALL_64(nr, sym, qual) [nr] = sym, -extern long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); - asmlinkage const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = { /* * Smells like a compiler bug -- it doesn't work diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index c58f75b088c5..d6b27dab1b30 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -4,390 +4,395 @@ # The format is: # <number> <abi> <name> <entry point> <compat entry point> # +# The __ia32_sys and __ia32_compat_sys stubs are created on-the-fly for +# sys_*() system calls and compat_sys_*() compat system calls if +# IA32_EMULATION is defined, and expect struct pt_regs *regs as their only +# parameter. +# # The abi is always "i386" for this file. # -0 i386 restart_syscall sys_restart_syscall -1 i386 exit sys_exit -2 i386 fork sys_fork -3 i386 read sys_read -4 i386 write sys_write -5 i386 open sys_open compat_sys_open -6 i386 close sys_close -7 i386 waitpid sys_waitpid -8 i386 creat sys_creat -9 i386 link sys_link -10 i386 unlink sys_unlink -11 i386 execve sys_execve compat_sys_execve -12 i386 chdir sys_chdir -13 i386 time sys_time compat_sys_time -14 i386 mknod sys_mknod -15 i386 chmod sys_chmod -16 i386 lchown sys_lchown16 +0 i386 restart_syscall sys_restart_syscall __ia32_sys_restart_syscall +1 i386 exit sys_exit __ia32_sys_exit +2 i386 fork sys_fork __ia32_sys_fork +3 i386 read sys_read __ia32_sys_read +4 i386 write sys_write __ia32_sys_write +5 i386 open sys_open __ia32_compat_sys_open +6 i386 close sys_close __ia32_sys_close +7 i386 waitpid sys_waitpid __ia32_sys_waitpid +8 i386 creat sys_creat __ia32_sys_creat +9 i386 link sys_link __ia32_sys_link +10 i386 unlink sys_unlink __ia32_sys_unlink +11 i386 execve sys_execve __ia32_compat_sys_execve +12 i386 chdir sys_chdir __ia32_sys_chdir +13 i386 time sys_time __ia32_compat_sys_time +14 i386 mknod sys_mknod __ia32_sys_mknod +15 i386 chmod sys_chmod __ia32_sys_chmod +16 i386 lchown sys_lchown16 __ia32_sys_lchown16 17 i386 break -18 i386 oldstat sys_stat -19 i386 lseek sys_lseek compat_sys_lseek -20 i386 getpid sys_getpid -21 i386 mount sys_mount compat_sys_mount -22 i386 umount sys_oldumount -23 i386 setuid sys_setuid16 -24 i386 getuid sys_getuid16 -25 i386 stime sys_stime compat_sys_stime -26 i386 ptrace sys_ptrace compat_sys_ptrace -27 i386 alarm sys_alarm -28 i386 oldfstat sys_fstat -29 i386 pause sys_pause -30 i386 utime sys_utime compat_sys_utime +18 i386 oldstat sys_stat __ia32_sys_stat +19 i386 lseek sys_lseek __ia32_compat_sys_lseek +20 i386 getpid sys_getpid __ia32_sys_getpid +21 i386 mount sys_mount __ia32_compat_sys_mount +22 i386 umount sys_oldumount __ia32_sys_oldumount +23 i386 setuid sys_setuid16 __ia32_sys_setuid16 +24 i386 getuid sys_getuid16 __ia32_sys_getuid16 +25 i386 stime sys_stime __ia32_compat_sys_stime +26 i386 ptrace sys_ptrace __ia32_compat_sys_ptrace +27 i386 alarm sys_alarm __ia32_sys_alarm +28 i386 oldfstat sys_fstat __ia32_sys_fstat +29 i386 pause sys_pause __ia32_sys_pause +30 i386 utime sys_utime __ia32_compat_sys_utime 31 i386 stty 32 i386 gtty -33 i386 access sys_access -34 i386 nice sys_nice +33 i386 access sys_access __ia32_sys_access +34 i386 nice sys_nice __ia32_sys_nice 35 i386 ftime -36 i386 sync sys_sync -37 i386 kill sys_kill -38 i386 rename sys_rename -39 i386 mkdir sys_mkdir -40 i386 rmdir sys_rmdir -41 i386 dup sys_dup -42 i386 pipe sys_pipe -43 i386 times sys_times compat_sys_times +36 i386 sync sys_sync __ia32_sys_sync +37 i386 kill sys_kill __ia32_sys_kill +38 i386 rename sys_rename __ia32_sys_rename +39 i386 mkdir sys_mkdir __ia32_sys_mkdir +40 i386 rmdir sys_rmdir __ia32_sys_rmdir +41 i386 dup sys_dup __ia32_sys_dup +42 i386 pipe sys_pipe __ia32_sys_pipe +43 i386 times sys_times __ia32_compat_sys_times 44 i386 prof -45 i386 brk sys_brk -46 i386 setgid sys_setgid16 -47 i386 getgid sys_getgid16 -48 i386 signal sys_signal -49 i386 geteuid sys_geteuid16 -50 i386 getegid sys_getegid16 -51 i386 acct sys_acct -52 i386 umount2 sys_umount +45 i386 brk sys_brk __ia32_sys_brk +46 i386 setgid sys_setgid16 __ia32_sys_setgid16 +47 i386 getgid sys_getgid16 __ia32_sys_getgid16 +48 i386 signal sys_signal __ia32_sys_signal +49 i386 geteuid sys_geteuid16 __ia32_sys_geteuid16 +50 i386 getegid sys_getegid16 __ia32_sys_getegid16 +51 i386 acct sys_acct __ia32_sys_acct +52 i386 umount2 sys_umount __ia32_sys_umount 53 i386 lock -54 i386 ioctl sys_ioctl compat_sys_ioctl -55 i386 fcntl sys_fcntl compat_sys_fcntl64 +54 i386 ioctl sys_ioctl __ia32_compat_sys_ioctl +55 i386 fcntl sys_fcntl __ia32_compat_sys_fcntl64 56 i386 mpx -57 i386 setpgid sys_setpgid +57 i386 setpgid sys_setpgid __ia32_sys_setpgid 58 i386 ulimit -59 i386 oldolduname sys_olduname -60 i386 umask sys_umask -61 i386 chroot sys_chroot -62 i386 ustat sys_ustat compat_sys_ustat -63 i386 dup2 sys_dup2 -64 i386 getppid sys_getppid -65 i386 getpgrp sys_getpgrp -66 i386 setsid sys_setsid -67 i386 sigaction sys_sigaction compat_sys_sigaction -68 i386 sgetmask sys_sgetmask -69 i386 ssetmask sys_ssetmask -70 i386 setreuid sys_setreuid16 -71 i386 setregid sys_setregid16 -72 i386 sigsuspend sys_sigsuspend -73 i386 sigpending sys_sigpending compat_sys_sigpending -74 i386 sethostname sys_sethostname -75 i386 setrlimit sys_setrlimit compat_sys_setrlimit -76 i386 getrlimit sys_old_getrlimit compat_sys_old_getrlimit -77 i386 getrusage sys_getrusage compat_sys_getrusage -78 i386 gettimeofday sys_gettimeofday compat_sys_gettimeofday -79 i386 settimeofday sys_settimeofday compat_sys_settimeofday -80 i386 getgroups sys_getgroups16 -81 i386 setgroups sys_setgroups16 -82 i386 select sys_old_select compat_sys_old_select -83 i386 symlink sys_symlink -84 i386 oldlstat sys_lstat -85 i386 readlink sys_readlink -86 i386 uselib sys_uselib -87 i386 swapon sys_swapon -88 i386 reboot sys_reboot -89 i386 readdir sys_old_readdir compat_sys_old_readdir -90 i386 mmap sys_old_mmap compat_sys_x86_mmap -91 i386 munmap sys_munmap -92 i386 truncate sys_truncate compat_sys_truncate -93 i386 ftruncate sys_ftruncate compat_sys_ftruncate -94 i386 fchmod sys_fchmod -95 i386 fchown sys_fchown16 -96 i386 getpriority sys_getpriority -97 i386 setpriority sys_setpriority +59 i386 oldolduname sys_olduname __ia32_sys_olduname +60 i386 umask sys_umask __ia32_sys_umask +61 i386 chroot sys_chroot __ia32_sys_chroot +62 i386 ustat sys_ustat __ia32_compat_sys_ustat +63 i386 dup2 sys_dup2 __ia32_sys_dup2 +64 i386 getppid sys_getppid __ia32_sys_getppid +65 i386 getpgrp sys_getpgrp __ia32_sys_getpgrp +66 i386 setsid sys_setsid __ia32_sys_setsid +67 i386 sigaction sys_sigaction __ia32_compat_sys_sigaction +68 i386 sgetmask sys_sgetmask __ia32_sys_sgetmask +69 i386 ssetmask sys_ssetmask __ia32_sys_ssetmask +70 i386 setreuid sys_setreuid16 __ia32_sys_setreuid16 +71 i386 setregid sys_setregid16 __ia32_sys_setregid16 +72 i386 sigsuspend sys_sigsuspend __ia32_sys_sigsuspend +73 i386 sigpending sys_sigpending __ia32_compat_sys_sigpending +74 i386 sethostname sys_sethostname __ia32_sys_sethostname +75 i386 setrlimit sys_setrlimit __ia32_compat_sys_setrlimit +76 i386 getrlimit sys_old_getrlimit __ia32_compat_sys_old_getrlimit +77 i386 getrusage sys_getrusage __ia32_compat_sys_getrusage +78 i386 gettimeofday sys_gettimeofday __ia32_compat_sys_gettimeofday +79 i386 settimeofday sys_settimeofday __ia32_compat_sys_settimeofday +80 i386 getgroups sys_getgroups16 __ia32_sys_getgroups16 +81 i386 setgroups sys_setgroups16 __ia32_sys_setgroups16 +82 i386 select sys_old_select __ia32_compat_sys_old_select +83 i386 symlink sys_symlink __ia32_sys_symlink +84 i386 oldlstat sys_lstat __ia32_sys_lstat +85 i386 readlink sys_readlink __ia32_sys_readlink +86 i386 uselib sys_uselib __ia32_sys_uselib +87 i386 swapon sys_swapon __ia32_sys_swapon +88 i386 reboot sys_reboot __ia32_sys_reboot +89 i386 readdir sys_old_readdir __ia32_compat_sys_old_readdir +90 i386 mmap sys_old_mmap __ia32_compat_sys_x86_mmap +91 i386 munmap sys_munmap __ia32_sys_munmap +92 i386 truncate sys_truncate __ia32_compat_sys_truncate +93 i386 ftruncate sys_ftruncate __ia32_compat_sys_ftruncate +94 i386 fchmod sys_fchmod __ia32_sys_fchmod +95 i386 fchown sys_fchown16 __ia32_sys_fchown16 +96 i386 getpriority sys_getpriority __ia32_sys_getpriority +97 i386 setpriority sys_setpriority __ia32_sys_setpriority 98 i386 profil -99 i386 statfs sys_statfs compat_sys_statfs -100 i386 fstatfs sys_fstatfs compat_sys_fstatfs -101 i386 ioperm sys_ioperm -102 i386 socketcall sys_socketcall compat_sys_socketcall -103 i386 syslog sys_syslog -104 i386 setitimer sys_setitimer compat_sys_setitimer -105 i386 getitimer sys_getitimer compat_sys_getitimer -106 i386 stat sys_newstat compat_sys_newstat -107 i386 lstat sys_newlstat compat_sys_newlstat -108 i386 fstat sys_newfstat compat_sys_newfstat -109 i386 olduname sys_uname -110 i386 iopl sys_iopl -111 i386 vhangup sys_vhangup +99 i386 statfs sys_statfs __ia32_compat_sys_statfs +100 i386 fstatfs sys_fstatfs __ia32_compat_sys_fstatfs +101 i386 ioperm sys_ioperm __ia32_sys_ioperm +102 i386 socketcall sys_socketcall __ia32_compat_sys_socketcall +103 i386 syslog sys_syslog __ia32_sys_syslog +104 i386 setitimer sys_setitimer __ia32_compat_sys_setitimer +105 i386 getitimer sys_getitimer __ia32_compat_sys_getitimer +106 i386 stat sys_newstat __ia32_compat_sys_newstat +107 i386 lstat sys_newlstat __ia32_compat_sys_newlstat +108 i386 fstat sys_newfstat __ia32_compat_sys_newfstat +109 i386 olduname sys_uname __ia32_sys_uname +110 i386 iopl sys_iopl __ia32_sys_iopl +111 i386 vhangup sys_vhangup __ia32_sys_vhangup 112 i386 idle 113 i386 vm86old sys_vm86old sys_ni_syscall -114 i386 wait4 sys_wait4 compat_sys_wait4 -115 i386 swapoff sys_swapoff -116 i386 sysinfo sys_sysinfo compat_sys_sysinfo -117 i386 ipc sys_ipc compat_sys_ipc -118 i386 fsync sys_fsync +114 i386 wait4 sys_wait4 __ia32_compat_sys_wait4 +115 i386 swapoff sys_swapoff __ia32_sys_swapoff +116 i386 sysinfo sys_sysinfo __ia32_compat_sys_sysinfo +117 i386 ipc sys_ipc __ia32_compat_sys_ipc +118 i386 fsync sys_fsync __ia32_sys_fsync 119 i386 sigreturn sys_sigreturn sys32_sigreturn -120 i386 clone sys_clone compat_sys_x86_clone -121 i386 setdomainname sys_setdomainname -122 i386 uname sys_newuname -123 i386 modify_ldt sys_modify_ldt -124 i386 adjtimex sys_adjtimex compat_sys_adjtimex -125 i386 mprotect sys_mprotect -126 i386 sigprocmask sys_sigprocmask compat_sys_sigprocmask +120 i386 clone sys_clone __ia32_compat_sys_x86_clone +121 i386 setdomainname sys_setdomainname __ia32_sys_setdomainname +122 i386 uname sys_newuname __ia32_sys_newuname +123 i386 modify_ldt sys_modify_ldt __ia32_sys_modify_ldt +124 i386 adjtimex sys_adjtimex __ia32_compat_sys_adjtimex +125 i386 mprotect sys_mprotect __ia32_sys_mprotect +126 i386 sigprocmask sys_sigprocmask __ia32_compat_sys_sigprocmask 127 i386 create_module -128 i386 init_module sys_init_module -129 i386 delete_module sys_delete_module +128 i386 init_module sys_init_module __ia32_sys_init_module +129 i386 delete_module sys_delete_module __ia32_sys_delete_module 130 i386 get_kernel_syms -131 i386 quotactl sys_quotactl compat_sys_quotactl32 -132 i386 getpgid sys_getpgid -133 i386 fchdir sys_fchdir -134 i386 bdflush sys_bdflush -135 i386 sysfs sys_sysfs -136 i386 personality sys_personality +131 i386 quotactl sys_quotactl __ia32_compat_sys_quotactl32 +132 i386 getpgid sys_getpgid __ia32_sys_getpgid +133 i386 fchdir sys_fchdir __ia32_sys_fchdir +134 i386 bdflush sys_bdflush __ia32_sys_bdflush +135 i386 sysfs sys_sysfs __ia32_sys_sysfs +136 i386 personality sys_personality __ia32_sys_personality 137 i386 afs_syscall -138 i386 setfsuid sys_setfsuid16 -139 i386 setfsgid sys_setfsgid16 -140 i386 _llseek sys_llseek -141 i386 getdents sys_getdents compat_sys_getdents -142 i386 _newselect sys_select compat_sys_select -143 i386 flock sys_flock -144 i386 msync sys_msync -145 i386 readv sys_readv compat_sys_readv -146 i386 writev sys_writev compat_sys_writev -147 i386 getsid sys_getsid -148 i386 fdatasync sys_fdatasync -149 i386 _sysctl sys_sysctl compat_sys_sysctl -150 i386 mlock sys_mlock -151 i386 munlock sys_munlock -152 i386 mlockall sys_mlockall -153 i386 munlockall sys_munlockall -154 i386 sched_setparam sys_sched_setparam -155 i386 sched_getparam sys_sched_getparam -156 i386 sched_setscheduler sys_sched_setscheduler -157 i386 sched_getscheduler sys_sched_getscheduler -158 i386 sched_yield sys_sched_yield -159 i386 sched_get_priority_max sys_sched_get_priority_max -160 i386 sched_get_priority_min sys_sched_get_priority_min -161 i386 sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval -162 i386 nanosleep sys_nanosleep compat_sys_nanosleep -163 i386 mremap sys_mremap -164 i386 setresuid sys_setresuid16 -165 i386 getresuid sys_getresuid16 +138 i386 setfsuid sys_setfsuid16 __ia32_sys_setfsuid16 +139 i386 setfsgid sys_setfsgid16 __ia32_sys_setfsgid16 +140 i386 _llseek sys_llseek __ia32_sys_llseek +141 i386 getdents sys_getdents __ia32_compat_sys_getdents +142 i386 _newselect sys_select __ia32_compat_sys_select +143 i386 flock sys_flock __ia32_sys_flock +144 i386 msync sys_msync __ia32_sys_msync +145 i386 readv sys_readv __ia32_compat_sys_readv +146 i386 writev sys_writev __ia32_compat_sys_writev +147 i386 getsid sys_getsid __ia32_sys_getsid +148 i386 fdatasync sys_fdatasync __ia32_sys_fdatasync +149 i386 _sysctl sys_sysctl __ia32_compat_sys_sysctl +150 i386 mlock sys_mlock __ia32_sys_mlock +151 i386 munlock sys_munlock __ia32_sys_munlock +152 i386 mlockall sys_mlockall __ia32_sys_mlockall +153 i386 munlockall sys_munlockall __ia32_sys_munlockall +154 i386 sched_setparam sys_sched_setparam __ia32_sys_sched_setparam +155 i386 sched_getparam sys_sched_getparam __ia32_sys_sched_getparam +156 i386 sched_setscheduler sys_sched_setscheduler __ia32_sys_sched_setscheduler +157 i386 sched_getscheduler sys_sched_getscheduler __ia32_sys_sched_getscheduler +158 i386 sched_yield sys_sched_yield __ia32_sys_sched_yield +159 i386 sched_get_priority_max sys_sched_get_priority_max __ia32_sys_sched_get_priority_max +160 i386 sched_get_priority_min sys_sched_get_priority_min __ia32_sys_sched_get_priority_min +161 i386 sched_rr_get_interval sys_sched_rr_get_interval __ia32_compat_sys_sched_rr_get_interval +162 i386 nanosleep sys_nanosleep __ia32_compat_sys_nanosleep +163 i386 mremap sys_mremap __ia32_sys_mremap +164 i386 setresuid sys_setresuid16 __ia32_sys_setresuid16 +165 i386 getresuid sys_getresuid16 __ia32_sys_getresuid16 166 i386 vm86 sys_vm86 sys_ni_syscall 167 i386 query_module -168 i386 poll sys_poll +168 i386 poll sys_poll __ia32_sys_poll 169 i386 nfsservctl -170 i386 setresgid sys_setresgid16 -171 i386 getresgid sys_getresgid16 -172 i386 prctl sys_prctl +170 i386 setresgid sys_setresgid16 __ia32_sys_setresgid16 +171 i386 getresgid sys_getresgid16 __ia32_sys_getresgid16 +172 i386 prctl sys_prctl __ia32_sys_prctl 173 i386 rt_sigreturn sys_rt_sigreturn sys32_rt_sigreturn -174 i386 rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction -175 i386 rt_sigprocmask sys_rt_sigprocmask -176 i386 rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending -177 i386 rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait -178 i386 rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo -179 i386 rt_sigsuspend sys_rt_sigsuspend -180 i386 pread64 sys_pread64 compat_sys_x86_pread -181 i386 pwrite64 sys_pwrite64 compat_sys_x86_pwrite -182 i386 chown sys_chown16 -183 i386 getcwd sys_getcwd -184 i386 capget sys_capget -185 i386 capset sys_capset -186 i386 sigaltstack sys_sigaltstack compat_sys_sigaltstack -187 i386 sendfile sys_sendfile compat_sys_sendfile +174 i386 rt_sigaction sys_rt_sigaction __ia32_compat_sys_rt_sigaction +175 i386 rt_sigprocmask sys_rt_sigprocmask __ia32_sys_rt_sigprocmask +176 i386 rt_sigpending sys_rt_sigpending __ia32_compat_sys_rt_sigpending +177 i386 rt_sigtimedwait sys_rt_sigtimedwait __ia32_compat_sys_rt_sigtimedwait +178 i386 rt_sigqueueinfo sys_rt_sigqueueinfo __ia32_compat_sys_rt_sigqueueinfo +179 i386 rt_sigsuspend sys_rt_sigsuspend __ia32_sys_rt_sigsuspend +180 i386 pread64 sys_pread64 __ia32_compat_sys_x86_pread +181 i386 pwrite64 sys_pwrite64 __ia32_compat_sys_x86_pwrite +182 i386 chown sys_chown16 __ia32_sys_chown16 +183 i386 getcwd sys_getcwd __ia32_sys_getcwd +184 i386 capget sys_capget __ia32_sys_capget +185 i386 capset sys_capset __ia32_sys_capset +186 i386 sigaltstack sys_sigaltstack __ia32_compat_sys_sigaltstack +187 i386 sendfile sys_sendfile __ia32_compat_sys_sendfile 188 i386 getpmsg 189 i386 putpmsg -190 i386 vfork sys_vfork -191 i386 ugetrlimit sys_getrlimit compat_sys_getrlimit -192 i386 mmap2 sys_mmap_pgoff -193 i386 truncate64 sys_truncate64 compat_sys_x86_truncate64 -194 i386 ftruncate64 sys_ftruncate64 compat_sys_x86_ftruncate64 -195 i386 stat64 sys_stat64 compat_sys_x86_stat64 -196 i386 lstat64 sys_lstat64 compat_sys_x86_lstat64 -197 i386 fstat64 sys_fstat64 compat_sys_x86_fstat64 -198 i386 lchown32 sys_lchown -199 i386 getuid32 sys_getuid -200 i386 getgid32 sys_getgid -201 i386 geteuid32 sys_geteuid -202 i386 getegid32 sys_getegid -203 i386 setreuid32 sys_setreuid -204 i386 setregid32 sys_setregid -205 i386 getgroups32 sys_getgroups -206 i386 setgroups32 sys_setgroups -207 i386 fchown32 sys_fchown -208 i386 setresuid32 sys_setresuid -209 i386 getresuid32 sys_getresuid -210 i386 setresgid32 sys_setresgid -211 i386 getresgid32 sys_getresgid -212 i386 chown32 sys_chown -213 i386 setuid32 sys_setuid -214 i386 setgid32 sys_setgid -215 i386 setfsuid32 sys_setfsuid -216 i386 setfsgid32 sys_setfsgid -217 i386 pivot_root sys_pivot_root -218 i386 mincore sys_mincore -219 i386 madvise sys_madvise -220 i386 getdents64 sys_getdents64 -221 i386 fcntl64 sys_fcntl64 compat_sys_fcntl64 +190 i386 vfork sys_vfork __ia32_sys_vfork +191 i386 ugetrlimit sys_getrlimit __ia32_compat_sys_getrlimit +192 i386 mmap2 sys_mmap_pgoff __ia32_sys_mmap_pgoff +193 i386 truncate64 sys_truncate64 __ia32_compat_sys_x86_truncate64 +194 i386 ftruncate64 sys_ftruncate64 __ia32_compat_sys_x86_ftruncate64 +195 i386 stat64 sys_stat64 __ia32_compat_sys_x86_stat64 +196 i386 lstat64 sys_lstat64 __ia32_compat_sys_x86_lstat64 +197 i386 fstat64 sys_fstat64 __ia32_compat_sys_x86_fstat64 +198 i386 lchown32 sys_lchown __ia32_sys_lchown +199 i386 getuid32 sys_getuid __ia32_sys_getuid +200 i386 getgid32 sys_getgid __ia32_sys_getgid +201 i386 geteuid32 sys_geteuid __ia32_sys_geteuid +202 i386 getegid32 sys_getegid __ia32_sys_getegid +203 i386 setreuid32 sys_setreuid __ia32_sys_setreuid +204 i386 setregid32 sys_setregid __ia32_sys_setregid +205 i386 getgroups32 sys_getgroups __ia32_sys_getgroups +206 i386 setgroups32 sys_setgroups __ia32_sys_setgroups +207 i386 fchown32 sys_fchown __ia32_sys_fchown +208 i386 setresuid32 sys_setresuid __ia32_sys_setresuid +209 i386 getresuid32 sys_getresuid __ia32_sys_getresuid +210 i386 setresgid32 sys_setresgid __ia32_sys_setresgid +211 i386 getresgid32 sys_getresgid __ia32_sys_getresgid +212 i386 chown32 sys_chown __ia32_sys_chown +213 i386 setuid32 sys_setuid __ia32_sys_setuid +214 i386 setgid32 sys_setgid __ia32_sys_setgid +215 i386 setfsuid32 sys_setfsuid __ia32_sys_setfsuid +216 i386 setfsgid32 sys_setfsgid __ia32_sys_setfsgid +217 i386 pivot_root sys_pivot_root __ia32_sys_pivot_root +218 i386 mincore sys_mincore __ia32_sys_mincore +219 i386 madvise sys_madvise __ia32_sys_madvise +220 i386 getdents64 sys_getdents64 __ia32_sys_getdents64 +221 i386 fcntl64 sys_fcntl64 __ia32_compat_sys_fcntl64 # 222 is unused # 223 is unused -224 i386 gettid sys_gettid -225 i386 readahead sys_readahead compat_sys_x86_readahead -226 i386 setxattr sys_setxattr -227 i386 lsetxattr sys_lsetxattr -228 i386 fsetxattr sys_fsetxattr -229 i386 getxattr sys_getxattr -230 i386 lgetxattr sys_lgetxattr -231 i386 fgetxattr sys_fgetxattr -232 i386 listxattr sys_listxattr -233 i386 llistxattr sys_llistxattr -234 i386 flistxattr sys_flistxattr -235 i386 removexattr sys_removexattr -236 i386 lremovexattr sys_lremovexattr -237 i386 fremovexattr sys_fremovexattr -238 i386 tkill sys_tkill -239 i386 sendfile64 sys_sendfile64 -240 i386 futex sys_futex compat_sys_futex -241 i386 sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity -242 i386 sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity -243 i386 set_thread_area sys_set_thread_area -244 i386 get_thread_area sys_get_thread_area -245 i386 io_setup sys_io_setup compat_sys_io_setup -246 i386 io_destroy sys_io_destroy -247 i386 io_getevents sys_io_getevents compat_sys_io_getevents -248 i386 io_submit sys_io_submit compat_sys_io_submit -249 i386 io_cancel sys_io_cancel -250 i386 fadvise64 sys_fadvise64 compat_sys_x86_fadvise64 +224 i386 gettid sys_gettid __ia32_sys_gettid +225 i386 readahead sys_readahead __ia32_compat_sys_x86_readahead +226 i386 setxattr sys_setxattr __ia32_sys_setxattr +227 i386 lsetxattr sys_lsetxattr __ia32_sys_lsetxattr +228 i386 fsetxattr sys_fsetxattr __ia32_sys_fsetxattr +229 i386 getxattr sys_getxattr __ia32_sys_getxattr +230 i386 lgetxattr sys_lgetxattr __ia32_sys_lgetxattr +231 i386 fgetxattr sys_fgetxattr __ia32_sys_fgetxattr +232 i386 listxattr sys_listxattr __ia32_sys_listxattr +233 i386 llistxattr sys_llistxattr __ia32_sys_llistxattr +234 i386 flistxattr sys_flistxattr __ia32_sys_flistxattr +235 i386 removexattr sys_removexattr __ia32_sys_removexattr +236 i386 lremovexattr sys_lremovexattr __ia32_sys_lremovexattr +237 i386 fremovexattr sys_fremovexattr __ia32_sys_fremovexattr +238 i386 tkill sys_tkill __ia32_sys_tkill +239 i386 sendfile64 sys_sendfile64 __ia32_sys_sendfile64 +240 i386 futex sys_futex __ia32_compat_sys_futex +241 i386 sched_setaffinity sys_sched_setaffinity __ia32_compat_sys_sched_setaffinity +242 i386 sched_getaffinity sys_sched_getaffinity __ia32_compat_sys_sched_getaffinity +243 i386 set_thread_area sys_set_thread_area __ia32_sys_set_thread_area +244 i386 get_thread_area sys_get_thread_area __ia32_sys_get_thread_area +245 i386 io_setup sys_io_setup __ia32_compat_sys_io_setup +246 i386 io_destroy sys_io_destroy __ia32_sys_io_destroy +247 i386 io_getevents sys_io_getevents __ia32_compat_sys_io_getevents +248 i386 io_submit sys_io_submit __ia32_compat_sys_io_submit +249 i386 io_cancel sys_io_cancel __ia32_sys_io_cancel +250 i386 fadvise64 sys_fadvise64 __ia32_compat_sys_x86_fadvise64 # 251 is available for reuse (was briefly sys_set_zone_reclaim) -252 i386 exit_group sys_exit_group -253 i386 lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie -254 i386 epoll_create sys_epoll_create -255 i386 epoll_ctl sys_epoll_ctl -256 i386 epoll_wait sys_epoll_wait -257 i386 remap_file_pages sys_remap_file_pages -258 i386 set_tid_address sys_set_tid_address -259 i386 timer_create sys_timer_create compat_sys_timer_create -260 i386 timer_settime sys_timer_settime compat_sys_timer_settime -261 i386 timer_gettime sys_timer_gettime compat_sys_timer_gettime -262 i386 timer_getoverrun sys_timer_getoverrun -263 i386 timer_delete sys_timer_delete -264 i386 clock_settime sys_clock_settime compat_sys_clock_settime -265 i386 clock_gettime sys_clock_gettime compat_sys_clock_gettime -266 i386 clock_getres sys_clock_getres compat_sys_clock_getres -267 i386 clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep -268 i386 statfs64 sys_statfs64 compat_sys_statfs64 -269 i386 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 -270 i386 tgkill sys_tgkill -271 i386 utimes sys_utimes compat_sys_utimes -272 i386 fadvise64_64 sys_fadvise64_64 compat_sys_x86_fadvise64_64 +252 i386 exit_group sys_exit_group __ia32_sys_exit_group +253 i386 lookup_dcookie sys_lookup_dcookie __ia32_compat_sys_lookup_dcookie +254 i386 epoll_create sys_epoll_create __ia32_sys_epoll_create +255 i386 epoll_ctl sys_epoll_ctl __ia32_sys_epoll_ctl +256 i386 epoll_wait sys_epoll_wait __ia32_sys_epoll_wait +257 i386 remap_file_pages sys_remap_file_pages __ia32_sys_remap_file_pages +258 i386 set_tid_address sys_set_tid_address __ia32_sys_set_tid_address +259 i386 timer_create sys_timer_create __ia32_compat_sys_timer_create +260 i386 timer_settime sys_timer_settime __ia32_compat_sys_timer_settime +261 i386 timer_gettime sys_timer_gettime __ia32_compat_sys_timer_gettime +262 i386 timer_getoverrun sys_timer_getoverrun __ia32_sys_timer_getoverrun +263 i386 timer_delete sys_timer_delete __ia32_sys_timer_delete +264 i386 clock_settime sys_clock_settime __ia32_compat_sys_clock_settime +265 i386 clock_gettime sys_clock_gettime __ia32_compat_sys_clock_gettime +266 i386 clock_getres sys_clock_getres __ia32_compat_sys_clock_getres +267 i386 clock_nanosleep sys_clock_nanosleep __ia32_compat_sys_clock_nanosleep +268 i386 statfs64 sys_statfs64 __ia32_compat_sys_statfs64 +269 i386 fstatfs64 sys_fstatfs64 __ia32_compat_sys_fstatfs64 +270 i386 tgkill sys_tgkill __ia32_sys_tgkill +271 i386 utimes sys_utimes __ia32_compat_sys_utimes +272 i386 fadvise64_64 sys_fadvise64_64 __ia32_compat_sys_x86_fadvise64_64 273 i386 vserver -274 i386 mbind sys_mbind -275 i386 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy -276 i386 set_mempolicy sys_set_mempolicy -277 i386 mq_open sys_mq_open compat_sys_mq_open -278 i386 mq_unlink sys_mq_unlink -279 i386 mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend -280 i386 mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive -281 i386 mq_notify sys_mq_notify compat_sys_mq_notify -282 i386 mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr -283 i386 kexec_load sys_kexec_load compat_sys_kexec_load -284 i386 waitid sys_waitid compat_sys_waitid +274 i386 mbind sys_mbind __ia32_sys_mbind +275 i386 get_mempolicy sys_get_mempolicy __ia32_compat_sys_get_mempolicy +276 i386 set_mempolicy sys_set_mempolicy __ia32_sys_set_mempolicy +277 i386 mq_open sys_mq_open __ia32_compat_sys_mq_open +278 i386 mq_unlink sys_mq_unlink __ia32_sys_mq_unlink +279 i386 mq_timedsend sys_mq_timedsend __ia32_compat_sys_mq_timedsend +280 i386 mq_timedreceive sys_mq_timedreceive __ia32_compat_sys_mq_timedreceive +281 i386 mq_notify sys_mq_notify __ia32_compat_sys_mq_notify +282 i386 mq_getsetattr sys_mq_getsetattr __ia32_compat_sys_mq_getsetattr +283 i386 kexec_load sys_kexec_load __ia32_compat_sys_kexec_load +284 i386 waitid sys_waitid __ia32_compat_sys_waitid # 285 sys_setaltroot -286 i386 add_key sys_add_key -287 i386 request_key sys_request_key -288 i386 keyctl sys_keyctl compat_sys_keyctl -289 i386 ioprio_set sys_ioprio_set -290 i386 ioprio_get sys_ioprio_get -291 i386 inotify_init sys_inotify_init -292 i386 inotify_add_watch sys_inotify_add_watch -293 i386 inotify_rm_watch sys_inotify_rm_watch -294 i386 migrate_pages sys_migrate_pages -295 i386 openat sys_openat compat_sys_openat -296 i386 mkdirat sys_mkdirat -297 i386 mknodat sys_mknodat -298 i386 fchownat sys_fchownat -299 i386 futimesat sys_futimesat compat_sys_futimesat -300 i386 fstatat64 sys_fstatat64 compat_sys_x86_fstatat -301 i386 unlinkat sys_unlinkat -302 i386 renameat sys_renameat -303 i386 linkat sys_linkat -304 i386 symlinkat sys_symlinkat -305 i386 readlinkat sys_readlinkat -306 i386 fchmodat sys_fchmodat -307 i386 faccessat sys_faccessat -308 i386 pselect6 sys_pselect6 compat_sys_pselect6 -309 i386 ppoll sys_ppoll compat_sys_ppoll -310 i386 unshare sys_unshare -311 i386 set_robust_list sys_set_robust_list compat_sys_set_robust_list -312 i386 get_robust_list sys_get_robust_list compat_sys_get_robust_list -313 i386 splice sys_splice -314 i386 sync_file_range sys_sync_file_range compat_sys_x86_sync_file_range -315 i386 tee sys_tee -316 i386 vmsplice sys_vmsplice compat_sys_vmsplice -317 i386 move_pages sys_move_pages compat_sys_move_pages -318 i386 getcpu sys_getcpu -319 i386 epoll_pwait sys_epoll_pwait -320 i386 utimensat sys_utimensat compat_sys_utimensat -321 i386 signalfd sys_signalfd compat_sys_signalfd -322 i386 timerfd_create sys_timerfd_create -323 i386 eventfd sys_eventfd -324 i386 fallocate sys_fallocate compat_sys_x86_fallocate -325 i386 timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime -326 i386 timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime -327 i386 signalfd4 sys_signalfd4 compat_sys_signalfd4 -328 i386 eventfd2 sys_eventfd2 -329 i386 epoll_create1 sys_epoll_create1 -330 i386 dup3 sys_dup3 -331 i386 pipe2 sys_pipe2 -332 i386 inotify_init1 sys_inotify_init1 -333 i386 preadv sys_preadv compat_sys_preadv -334 i386 pwritev sys_pwritev compat_sys_pwritev -335 i386 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo -336 i386 perf_event_open sys_perf_event_open -337 i386 recvmmsg sys_recvmmsg compat_sys_recvmmsg -338 i386 fanotify_init sys_fanotify_init -339 i386 fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark -340 i386 prlimit64 sys_prlimit64 -341 i386 name_to_handle_at sys_name_to_handle_at -342 i386 open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at -343 i386 clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime -344 i386 syncfs sys_syncfs -345 i386 sendmmsg sys_sendmmsg compat_sys_sendmmsg -346 i386 setns sys_setns -347 i386 process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv -348 i386 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev -349 i386 kcmp sys_kcmp -350 i386 finit_module sys_finit_module -351 i386 sched_setattr sys_sched_setattr -352 i386 sched_getattr sys_sched_getattr -353 i386 renameat2 sys_renameat2 -354 i386 seccomp sys_seccomp -355 i386 getrandom sys_getrandom -356 i386 memfd_create sys_memfd_create -357 i386 bpf sys_bpf -358 i386 execveat sys_execveat compat_sys_execveat -359 i386 socket sys_socket -360 i386 socketpair sys_socketpair -361 i386 bind sys_bind -362 i386 connect sys_connect -363 i386 listen sys_listen -364 i386 accept4 sys_accept4 -365 i386 getsockopt sys_getsockopt compat_sys_getsockopt -366 i386 setsockopt sys_setsockopt compat_sys_setsockopt -367 i386 getsockname sys_getsockname -368 i386 getpeername sys_getpeername -369 i386 sendto sys_sendto -370 i386 sendmsg sys_sendmsg compat_sys_sendmsg -371 i386 recvfrom sys_recvfrom compat_sys_recvfrom -372 i386 recvmsg sys_recvmsg compat_sys_recvmsg -373 i386 shutdown sys_shutdown -374 i386 userfaultfd sys_userfaultfd -375 i386 membarrier sys_membarrier -376 i386 mlock2 sys_mlock2 -377 i386 copy_file_range sys_copy_file_range -378 i386 preadv2 sys_preadv2 compat_sys_preadv2 -379 i386 pwritev2 sys_pwritev2 compat_sys_pwritev2 -380 i386 pkey_mprotect sys_pkey_mprotect -381 i386 pkey_alloc sys_pkey_alloc -382 i386 pkey_free sys_pkey_free -383 i386 statx sys_statx -384 i386 arch_prctl sys_arch_prctl compat_sys_arch_prctl +286 i386 add_key sys_add_key __ia32_sys_add_key +287 i386 request_key sys_request_key __ia32_sys_request_key +288 i386 keyctl sys_keyctl __ia32_compat_sys_keyctl +289 i386 ioprio_set sys_ioprio_set __ia32_sys_ioprio_set +290 i386 ioprio_get sys_ioprio_get __ia32_sys_ioprio_get +291 i386 inotify_init sys_inotify_init __ia32_sys_inotify_init +292 i386 inotify_add_watch sys_inotify_add_watch __ia32_sys_inotify_add_watch +293 i386 inotify_rm_watch sys_inotify_rm_watch __ia32_sys_inotify_rm_watch +294 i386 migrate_pages sys_migrate_pages __ia32_sys_migrate_pages +295 i386 openat sys_openat __ia32_compat_sys_openat +296 i386 mkdirat sys_mkdirat __ia32_sys_mkdirat +297 i386 mknodat sys_mknodat __ia32_sys_mknodat +298 i386 fchownat sys_fchownat __ia32_sys_fchownat +299 i386 futimesat sys_futimesat __ia32_compat_sys_futimesat +300 i386 fstatat64 sys_fstatat64 __ia32_compat_sys_x86_fstatat +301 i386 unlinkat sys_unlinkat __ia32_sys_unlinkat +302 i386 renameat sys_renameat __ia32_sys_renameat +303 i386 linkat sys_linkat __ia32_sys_linkat +304 i386 symlinkat sys_symlinkat __ia32_sys_symlinkat +305 i386 readlinkat sys_readlinkat __ia32_sys_readlinkat +306 i386 fchmodat sys_fchmodat __ia32_sys_fchmodat +307 i386 faccessat sys_faccessat __ia32_sys_faccessat +308 i386 pselect6 sys_pselect6 __ia32_compat_sys_pselect6 +309 i386 ppoll sys_ppoll __ia32_compat_sys_ppoll +310 i386 unshare sys_unshare __ia32_sys_unshare +311 i386 set_robust_list sys_set_robust_list __ia32_compat_sys_set_robust_list +312 i386 get_robust_list sys_get_robust_list __ia32_compat_sys_get_robust_list +313 i386 splice sys_splice __ia32_sys_splice +314 i386 sync_file_range sys_sync_file_range __ia32_compat_sys_x86_sync_file_range +315 i386 tee sys_tee __ia32_sys_tee +316 i386 vmsplice sys_vmsplice __ia32_compat_sys_vmsplice +317 i386 move_pages sys_move_pages __ia32_compat_sys_move_pages +318 i386 getcpu sys_getcpu __ia32_sys_getcpu +319 i386 epoll_pwait sys_epoll_pwait __ia32_sys_epoll_pwait +320 i386 utimensat sys_utimensat __ia32_compat_sys_utimensat +321 i386 signalfd sys_signalfd __ia32_compat_sys_signalfd +322 i386 timerfd_create sys_timerfd_create __ia32_sys_timerfd_create +323 i386 eventfd sys_eventfd __ia32_sys_eventfd +324 i386 fallocate sys_fallocate __ia32_compat_sys_x86_fallocate +325 i386 timerfd_settime sys_timerfd_settime __ia32_compat_sys_timerfd_settime +326 i386 timerfd_gettime sys_timerfd_gettime __ia32_compat_sys_timerfd_gettime +327 i386 signalfd4 sys_signalfd4 __ia32_compat_sys_signalfd4 +328 i386 eventfd2 sys_eventfd2 __ia32_sys_eventfd2 +329 i386 epoll_create1 sys_epoll_create1 __ia32_sys_epoll_create1 +330 i386 dup3 sys_dup3 __ia32_sys_dup3 +331 i386 pipe2 sys_pipe2 __ia32_sys_pipe2 +332 i386 inotify_init1 sys_inotify_init1 __ia32_sys_inotify_init1 +333 i386 preadv sys_preadv __ia32_compat_sys_preadv +334 i386 pwritev sys_pwritev __ia32_compat_sys_pwritev +335 i386 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo __ia32_compat_sys_rt_tgsigqueueinfo +336 i386 perf_event_open sys_perf_event_open __ia32_sys_perf_event_open +337 i386 recvmmsg sys_recvmmsg __ia32_compat_sys_recvmmsg +338 i386 fanotify_init sys_fanotify_init __ia32_sys_fanotify_init +339 i386 fanotify_mark sys_fanotify_mark __ia32_compat_sys_fanotify_mark +340 i386 prlimit64 sys_prlimit64 __ia32_sys_prlimit64 +341 i386 name_to_handle_at sys_name_to_handle_at __ia32_sys_name_to_handle_at +342 i386 open_by_handle_at sys_open_by_handle_at __ia32_compat_sys_open_by_handle_at +343 i386 clock_adjtime sys_clock_adjtime __ia32_compat_sys_clock_adjtime +344 i386 syncfs sys_syncfs __ia32_sys_syncfs +345 i386 sendmmsg sys_sendmmsg __ia32_compat_sys_sendmmsg +346 i386 setns sys_setns __ia32_sys_setns +347 i386 process_vm_readv sys_process_vm_readv __ia32_compat_sys_process_vm_readv +348 i386 process_vm_writev sys_process_vm_writev __ia32_compat_sys_process_vm_writev +349 i386 kcmp sys_kcmp __ia32_sys_kcmp +350 i386 finit_module sys_finit_module __ia32_sys_finit_module +351 i386 sched_setattr sys_sched_setattr __ia32_sys_sched_setattr +352 i386 sched_getattr sys_sched_getattr __ia32_sys_sched_getattr +353 i386 renameat2 sys_renameat2 __ia32_sys_renameat2 +354 i386 seccomp sys_seccomp __ia32_sys_seccomp +355 i386 getrandom sys_getrandom __ia32_sys_getrandom +356 i386 memfd_create sys_memfd_create __ia32_sys_memfd_create +357 i386 bpf sys_bpf __ia32_sys_bpf +358 i386 execveat sys_execveat __ia32_compat_sys_execveat +359 i386 socket sys_socket __ia32_sys_socket +360 i386 socketpair sys_socketpair __ia32_sys_socketpair +361 i386 bind sys_bind __ia32_sys_bind +362 i386 connect sys_connect __ia32_sys_connect +363 i386 listen sys_listen __ia32_sys_listen +364 i386 accept4 sys_accept4 __ia32_sys_accept4 +365 i386 getsockopt sys_getsockopt __ia32_compat_sys_getsockopt +366 i386 setsockopt sys_setsockopt __ia32_compat_sys_setsockopt +367 i386 getsockname sys_getsockname __ia32_sys_getsockname +368 i386 getpeername sys_getpeername __ia32_sys_getpeername +369 i386 sendto sys_sendto __ia32_sys_sendto +370 i386 sendmsg sys_sendmsg __ia32_compat_sys_sendmsg +371 i386 recvfrom sys_recvfrom __ia32_compat_sys_recvfrom +372 i386 recvmsg sys_recvmsg __ia32_compat_sys_recvmsg +373 i386 shutdown sys_shutdown __ia32_sys_shutdown +374 i386 userfaultfd sys_userfaultfd __ia32_sys_userfaultfd +375 i386 membarrier sys_membarrier __ia32_sys_membarrier +376 i386 mlock2 sys_mlock2 __ia32_sys_mlock2 +377 i386 copy_file_range sys_copy_file_range __ia32_sys_copy_file_range +378 i386 preadv2 sys_preadv2 __ia32_compat_sys_preadv2 +379 i386 pwritev2 sys_pwritev2 __ia32_compat_sys_pwritev2 +380 i386 pkey_mprotect sys_pkey_mprotect __ia32_sys_pkey_mprotect +381 i386 pkey_alloc sys_pkey_alloc __ia32_sys_pkey_alloc +382 i386 pkey_free sys_pkey_free __ia32_sys_pkey_free +383 i386 statx sys_statx __ia32_sys_statx +384 i386 arch_prctl sys_arch_prctl __ia32_compat_sys_arch_prctl diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index 5aef183e2f85..4dfe42666d0c 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -4,379 +4,383 @@ # The format is: # <number> <abi> <name> <entry point> # +# The __x64_sys_*() stubs are created on-the-fly for sys_*() system calls +# # The abi is "common", "64" or "x32" for this file. # -0 common read sys_read -1 common write sys_write -2 common open sys_open -3 common close sys_close -4 common stat sys_newstat -5 common fstat sys_newfstat -6 common lstat sys_newlstat -7 common poll sys_poll -8 common lseek sys_lseek -9 common mmap sys_mmap -10 common mprotect sys_mprotect -11 common munmap sys_munmap -12 common brk sys_brk -13 64 rt_sigaction sys_rt_sigaction -14 common rt_sigprocmask sys_rt_sigprocmask -15 64 rt_sigreturn sys_rt_sigreturn/ptregs -16 64 ioctl sys_ioctl -17 common pread64 sys_pread64 -18 common pwrite64 sys_pwrite64 -19 64 readv sys_readv -20 64 writev sys_writev -21 common access sys_access -22 common pipe sys_pipe -23 common select sys_select -24 common sched_yield sys_sched_yield -25 common mremap sys_mremap -26 common msync sys_msync -27 common mincore sys_mincore -28 common madvise sys_madvise -29 common shmget sys_shmget -30 common shmat sys_shmat -31 common shmctl sys_shmctl -32 common dup sys_dup -33 common dup2 sys_dup2 -34 common pause sys_pause -35 common nanosleep sys_nanosleep -36 common getitimer sys_getitimer -37 common alarm sys_alarm -38 common setitimer sys_setitimer -39 common getpid sys_getpid -40 common sendfile sys_sendfile64 -41 common socket sys_socket -42 common connect sys_connect -43 common accept sys_accept -44 common sendto sys_sendto -45 64 recvfrom sys_recvfrom -46 64 sendmsg sys_sendmsg -47 64 recvmsg sys_recvmsg -48 common shutdown sys_shutdown -49 common bind sys_bind -50 common listen sys_listen -51 common getsockname sys_getsockname -52 common getpeername sys_getpeername -53 common socketpair sys_socketpair -54 64 setsockopt sys_setsockopt -55 64 getsockopt sys_getsockopt -56 common clone sys_clone/ptregs -57 common fork sys_fork/ptregs -58 common vfork sys_vfork/ptregs -59 64 execve sys_execve/ptregs -60 common exit sys_exit -61 common wait4 sys_wait4 -62 common kill sys_kill -63 common uname sys_newuname -64 common semget sys_semget -65 common semop sys_semop -66 common semctl sys_semctl -67 common shmdt sys_shmdt -68 common msgget sys_msgget -69 common msgsnd sys_msgsnd -70 common msgrcv sys_msgrcv -71 common msgctl sys_msgctl -72 common fcntl sys_fcntl -73 common flock sys_flock -74 common fsync sys_fsync -75 common fdatasync sys_fdatasync -76 common truncate sys_truncate -77 common ftruncate sys_ftruncate -78 common getdents sys_getdents -79 common getcwd sys_getcwd -80 common chdir sys_chdir -81 common fchdir sys_fchdir -82 common rename sys_rename -83 common mkdir sys_mkdir -84 common rmdir sys_rmdir -85 common creat sys_creat -86 common link sys_link -87 common unlink sys_unlink -88 common symlink sys_symlink -89 common readlink sys_readlink -90 common chmod sys_chmod -91 common fchmod sys_fchmod -92 common chown sys_chown -93 common fchown sys_fchown -94 common lchown sys_lchown -95 common umask sys_umask -96 common gettimeofday sys_gettimeofday -97 common getrlimit sys_getrlimit -98 common getrusage sys_getrusage -99 common sysinfo sys_sysinfo -100 common times sys_times -101 64 ptrace sys_ptrace -102 common getuid sys_getuid -103 common syslog sys_syslog -104 common getgid sys_getgid -105 common setuid sys_setuid -106 common setgid sys_setgid -107 common geteuid sys_geteuid -108 common getegid sys_getegid -109 common setpgid sys_setpgid -110 common getppid sys_getppid -111 common getpgrp sys_getpgrp -112 common setsid sys_setsid -113 common setreuid sys_setreuid -114 common setregid sys_setregid -115 common getgroups sys_getgroups -116 common setgroups sys_setgroups -117 common setresuid sys_setresuid -118 common getresuid sys_getresuid -119 common setresgid sys_setresgid -120 common getresgid sys_getresgid -121 common getpgid sys_getpgid -122 common setfsuid sys_setfsuid -123 common setfsgid sys_setfsgid -124 common getsid sys_getsid -125 common capget sys_capget -126 common capset sys_capset -127 64 rt_sigpending sys_rt_sigpending -128 64 rt_sigtimedwait sys_rt_sigtimedwait -129 64 rt_sigqueueinfo sys_rt_sigqueueinfo -130 common rt_sigsuspend sys_rt_sigsuspend -131 64 sigaltstack sys_sigaltstack -132 common utime sys_utime -133 common mknod sys_mknod +0 common read __x64_sys_read +1 common write __x64_sys_write +2 common open __x64_sys_open +3 common close __x64_sys_close +4 common stat __x64_sys_newstat +5 common fstat __x64_sys_newfstat +6 common lstat __x64_sys_newlstat +7 common poll __x64_sys_poll +8 common lseek __x64_sys_lseek +9 common mmap __x64_sys_mmap +10 common mprotect __x64_sys_mprotect +11 common munmap __x64_sys_munmap +12 common brk __x64_sys_brk +13 64 rt_sigaction __x64_sys_rt_sigaction +14 common rt_sigprocmask __x64_sys_rt_sigprocmask +15 64 rt_sigreturn __x64_sys_rt_sigreturn/ptregs +16 64 ioctl __x64_sys_ioctl +17 common pread64 __x64_sys_pread64 +18 common pwrite64 __x64_sys_pwrite64 +19 64 readv __x64_sys_readv +20 64 writev __x64_sys_writev +21 common access __x64_sys_access +22 common pipe __x64_sys_pipe +23 common select __x64_sys_select +24 common sched_yield __x64_sys_sched_yield +25 common mremap __x64_sys_mremap +26 common msync __x64_sys_msync +27 common mincore __x64_sys_mincore +28 common madvise __x64_sys_madvise +29 common shmget __x64_sys_shmget +30 common shmat __x64_sys_shmat +31 common shmctl __x64_sys_shmctl +32 common dup __x64_sys_dup +33 common dup2 __x64_sys_dup2 +34 common pause __x64_sys_pause +35 common nanosleep __x64_sys_nanosleep +36 common getitimer __x64_sys_getitimer +37 common alarm __x64_sys_alarm +38 common setitimer __x64_sys_setitimer +39 common getpid __x64_sys_getpid +40 common sendfile __x64_sys_sendfile64 +41 common socket __x64_sys_socket +42 common connect __x64_sys_connect +43 common accept __x64_sys_accept +44 common sendto __x64_sys_sendto +45 64 recvfrom __x64_sys_recvfrom +46 64 sendmsg __x64_sys_sendmsg +47 64 recvmsg __x64_sys_recvmsg +48 common shutdown __x64_sys_shutdown +49 common bind __x64_sys_bind +50 common listen __x64_sys_listen +51 common getsockname __x64_sys_getsockname +52 common getpeername __x64_sys_getpeername +53 common socketpair __x64_sys_socketpair +54 64 setsockopt __x64_sys_setsockopt +55 64 getsockopt __x64_sys_getsockopt +56 common clone __x64_sys_clone/ptregs +57 common fork __x64_sys_fork/ptregs +58 common vfork __x64_sys_vfork/ptregs +59 64 execve __x64_sys_execve/ptregs +60 common exit __x64_sys_exit +61 common wait4 __x64_sys_wait4 +62 common kill __x64_sys_kill +63 common uname __x64_sys_newuname +64 common semget __x64_sys_semget +65 common semop __x64_sys_semop +66 common semctl __x64_sys_semctl +67 common shmdt __x64_sys_shmdt +68 common msgget __x64_sys_msgget +69 common msgsnd __x64_sys_msgsnd +70 common msgrcv __x64_sys_msgrcv +71 common msgctl __x64_sys_msgctl +72 common fcntl __x64_sys_fcntl +73 common flock __x64_sys_flock +74 common fsync __x64_sys_fsync +75 common fdatasync __x64_sys_fdatasync +76 common truncate __x64_sys_truncate +77 common ftruncate __x64_sys_ftruncate +78 common getdents __x64_sys_getdents +79 common getcwd __x64_sys_getcwd +80 common chdir __x64_sys_chdir +81 common fchdir __x64_sys_fchdir +82 common rename __x64_sys_rename +83 common mkdir __x64_sys_mkdir +84 common rmdir __x64_sys_rmdir +85 common creat __x64_sys_creat +86 common link __x64_sys_link +87 common unlink __x64_sys_unlink +88 common symlink __x64_sys_symlink +89 common readlink __x64_sys_readlink +90 common chmod __x64_sys_chmod +91 common fchmod __x64_sys_fchmod +92 common chown __x64_sys_chown +93 common fchown __x64_sys_fchown +94 common lchown __x64_sys_lchown +95 common umask __x64_sys_umask +96 common gettimeofday __x64_sys_gettimeofday +97 common getrlimit __x64_sys_getrlimit +98 common getrusage __x64_sys_getrusage +99 common sysinfo __x64_sys_sysinfo +100 common times __x64_sys_times +101 64 ptrace __x64_sys_ptrace +102 common getuid __x64_sys_getuid +103 common syslog __x64_sys_syslog +104 common getgid __x64_sys_getgid +105 common setuid __x64_sys_setuid +106 common setgid __x64_sys_setgid +107 common geteuid __x64_sys_geteuid +108 common getegid __x64_sys_getegid +109 common setpgid __x64_sys_setpgid +110 common getppid __x64_sys_getppid +111 common getpgrp __x64_sys_getpgrp +112 common setsid __x64_sys_setsid +113 common setreuid __x64_sys_setreuid +114 common setregid __x64_sys_setregid +115 common getgroups __x64_sys_getgroups +116 common setgroups __x64_sys_setgroups +117 common setresuid __x64_sys_setresuid +118 common getresuid __x64_sys_getresuid +119 common setresgid __x64_sys_setresgid +120 common getresgid __x64_sys_getresgid +121 common getpgid __x64_sys_getpgid +122 common setfsuid __x64_sys_setfsuid +123 common setfsgid __x64_sys_setfsgid +124 common getsid __x64_sys_getsid +125 common capget __x64_sys_capget +126 common capset __x64_sys_capset +127 64 rt_sigpending __x64_sys_rt_sigpending +128 64 rt_sigtimedwait __x64_sys_rt_sigtimedwait +129 64 rt_sigqueueinfo __x64_sys_rt_sigqueueinfo +130 common rt_sigsuspend __x64_sys_rt_sigsuspend +131 64 sigaltstack __x64_sys_sigaltstack +132 common utime __x64_sys_utime +133 common mknod __x64_sys_mknod 134 64 uselib -135 common personality sys_personality -136 common ustat sys_ustat -137 common statfs sys_statfs -138 common fstatfs sys_fstatfs -139 common sysfs sys_sysfs -140 common getpriority sys_getpriority -141 common setpriority sys_setpriority -142 common sched_setparam sys_sched_setparam -143 common sched_getparam sys_sched_getparam -144 common sched_setscheduler sys_sched_setscheduler -145 common sched_getscheduler sys_sched_getscheduler -146 common sched_get_priority_max sys_sched_get_priority_max -147 common sched_get_priority_min sys_sched_get_priority_min -148 common sched_rr_get_interval sys_sched_rr_get_interval -149 common mlock sys_mlock -150 common munlock sys_munlock -151 common mlockall sys_mlockall -152 common munlockall sys_munlockall -153 common vhangup sys_vhangup -154 common modify_ldt sys_modify_ldt -155 common pivot_root sys_pivot_root -156 64 _sysctl sys_sysctl -157 common prctl sys_prctl -158 common arch_prctl sys_arch_prctl -159 common adjtimex sys_adjtimex -160 common setrlimit sys_setrlimit -161 common chroot sys_chroot -162 common sync sys_sync -163 common acct sys_acct -164 common settimeofday sys_settimeofday -165 common mount sys_mount -166 common umount2 sys_umount -167 common swapon sys_swapon -168 common swapoff sys_swapoff -169 common reboot sys_reboot -170 common sethostname sys_sethostname -171 common setdomainname sys_setdomainname -172 common iopl sys_iopl/ptregs -173 common ioperm sys_ioperm +135 common personality __x64_sys_personality +136 common ustat __x64_sys_ustat +137 common statfs __x64_sys_statfs +138 common fstatfs __x64_sys_fstatfs +139 common sysfs __x64_sys_sysfs +140 common getpriority __x64_sys_getpriority +141 common setpriority __x64_sys_setpriority +142 common sched_setparam __x64_sys_sched_setparam +143 common sched_getparam __x64_sys_sched_getparam +144 common sched_setscheduler __x64_sys_sched_setscheduler +145 common sched_getscheduler __x64_sys_sched_getscheduler +146 common sched_get_priority_max __x64_sys_sched_get_priority_max +147 common sched_get_priority_min __x64_sys_sched_get_priority_min +148 common sched_rr_get_interval __x64_sys_sched_rr_get_interval +149 common mlock __x64_sys_mlock +150 common munlock __x64_sys_munlock +151 common mlockall __x64_sys_mlockall +152 common munlockall __x64_sys_munlockall +153 common vhangup __x64_sys_vhangup +154 common modify_ldt __x64_sys_modify_ldt +155 common pivot_root __x64_sys_pivot_root +156 64 _sysctl __x64_sys_sysctl +157 common prctl __x64_sys_prctl +158 common arch_prctl __x64_sys_arch_prctl +159 common adjtimex __x64_sys_adjtimex +160 common setrlimit __x64_sys_setrlimit +161 common chroot __x64_sys_chroot +162 common sync __x64_sys_sync +163 common acct __x64_sys_acct +164 common settimeofday __x64_sys_settimeofday +165 common mount __x64_sys_mount +166 common umount2 __x64_sys_umount +167 common swapon __x64_sys_swapon +168 common swapoff __x64_sys_swapoff +169 common reboot __x64_sys_reboot +170 common sethostname __x64_sys_sethostname +171 common setdomainname __x64_sys_setdomainname +172 common iopl __x64_sys_iopl/ptregs +173 common ioperm __x64_sys_ioperm 174 64 create_module -175 common init_module sys_init_module -176 common delete_module sys_delete_module +175 common init_module __x64_sys_init_module +176 common delete_module __x64_sys_delete_module 177 64 get_kernel_syms 178 64 query_module -179 common quotactl sys_quotactl +179 common quotactl __x64_sys_quotactl 180 64 nfsservctl 181 common getpmsg 182 common putpmsg 183 common afs_syscall 184 common tuxcall 185 common security -186 common gettid sys_gettid -187 common readahead sys_readahead -188 common setxattr sys_setxattr -189 common lsetxattr sys_lsetxattr -190 common fsetxattr sys_fsetxattr -191 common getxattr sys_getxattr -192 common lgetxattr sys_lgetxattr -193 common fgetxattr sys_fgetxattr -194 common listxattr sys_listxattr -195 common llistxattr sys_llistxattr -196 common flistxattr sys_flistxattr -197 common removexattr sys_removexattr -198 common lremovexattr sys_lremovexattr -199 common fremovexattr sys_fremovexattr -200 common tkill sys_tkill -201 common time sys_time -202 common futex sys_futex -203 common sched_setaffinity sys_sched_setaffinity -204 common sched_getaffinity sys_sched_getaffinity +186 common gettid __x64_sys_gettid +187 common readahead __x64_sys_readahead +188 common setxattr __x64_sys_setxattr +189 common lsetxattr __x64_sys_lsetxattr +190 common fsetxattr __x64_sys_fsetxattr +191 common getxattr __x64_sys_getxattr +192 common lgetxattr __x64_sys_lgetxattr +193 common fgetxattr __x64_sys_fgetxattr +194 common listxattr __x64_sys_listxattr +195 common llistxattr __x64_sys_llistxattr +196 common flistxattr __x64_sys_flistxattr +197 common removexattr __x64_sys_removexattr +198 common lremovexattr __x64_sys_lremovexattr +199 common fremovexattr __x64_sys_fremovexattr +200 common tkill __x64_sys_tkill +201 common time __x64_sys_time +202 common futex __x64_sys_futex +203 common sched_setaffinity __x64_sys_sched_setaffinity +204 common sched_getaffinity __x64_sys_sched_getaffinity 205 64 set_thread_area -206 64 io_setup sys_io_setup -207 common io_destroy sys_io_destroy -208 common io_getevents sys_io_getevents -209 64 io_submit sys_io_submit -210 common io_cancel sys_io_cancel +206 64 io_setup __x64_sys_io_setup +207 common io_destroy __x64_sys_io_destroy +208 common io_getevents __x64_sys_io_getevents +209 64 io_submit __x64_sys_io_submit +210 common io_cancel __x64_sys_io_cancel 211 64 get_thread_area -212 common lookup_dcookie sys_lookup_dcookie -213 common epoll_create sys_epoll_create +212 common lookup_dcookie __x64_sys_lookup_dcookie +213 common epoll_create __x64_sys_epoll_create 214 64 epoll_ctl_old 215 64 epoll_wait_old -216 common remap_file_pages sys_remap_file_pages -217 common getdents64 sys_getdents64 -218 common set_tid_address sys_set_tid_address -219 common restart_syscall sys_restart_syscall -220 common semtimedop sys_semtimedop -221 common fadvise64 sys_fadvise64 -222 64 timer_create sys_timer_create -223 common timer_settime sys_timer_settime -224 common timer_gettime sys_timer_gettime -225 common timer_getoverrun sys_timer_getoverrun -226 common timer_delete sys_timer_delete -227 common clock_settime sys_clock_settime -228 common clock_gettime sys_clock_gettime -229 common clock_getres sys_clock_getres -230 common clock_nanosleep sys_clock_nanosleep -231 common exit_group sys_exit_group -232 common epoll_wait sys_epoll_wait -233 common epoll_ctl sys_epoll_ctl -234 common tgkill sys_tgkill -235 common utimes sys_utimes +216 common remap_file_pages __x64_sys_remap_file_pages +217 common getdents64 __x64_sys_getdents64 +218 common set_tid_address __x64_sys_set_tid_address +219 common restart_syscall __x64_sys_restart_syscall +220 common semtimedop __x64_sys_semtimedop +221 common fadvise64 __x64_sys_fadvise64 +222 64 timer_create __x64_sys_timer_create +223 common timer_settime __x64_sys_timer_settime +224 common timer_gettime __x64_sys_timer_gettime +225 common timer_getoverrun __x64_sys_timer_getoverrun +226 common timer_delete __x64_sys_timer_delete +227 common clock_settime __x64_sys_clock_settime +228 common clock_gettime __x64_sys_clock_gettime +229 common clock_getres __x64_sys_clock_getres +230 common clock_nanosleep __x64_sys_clock_nanosleep +231 common exit_group __x64_sys_exit_group +232 common epoll_wait __x64_sys_epoll_wait +233 common epoll_ctl __x64_sys_epoll_ctl +234 common tgkill __x64_sys_tgkill +235 common utimes __x64_sys_utimes 236 64 vserver -237 common mbind sys_mbind -238 common set_mempolicy sys_set_mempolicy -239 common get_mempolicy sys_get_mempolicy -240 common mq_open sys_mq_open -241 common mq_unlink sys_mq_unlink -242 common mq_timedsend sys_mq_timedsend -243 common mq_timedreceive sys_mq_timedreceive -244 64 mq_notify sys_mq_notify -245 common mq_getsetattr sys_mq_getsetattr -246 64 kexec_load sys_kexec_load -247 64 waitid sys_waitid -248 common add_key sys_add_key -249 common request_key sys_request_key -250 common keyctl sys_keyctl -251 common ioprio_set sys_ioprio_set -252 common ioprio_get sys_ioprio_get -253 common inotify_init sys_inotify_init -254 common inotify_add_watch sys_inotify_add_watch -255 common inotify_rm_watch sys_inotify_rm_watch -256 common migrate_pages sys_migrate_pages -257 common openat sys_openat -258 common mkdirat sys_mkdirat -259 common mknodat sys_mknodat -260 common fchownat sys_fchownat -261 common futimesat sys_futimesat -262 common newfstatat sys_newfstatat -263 common unlinkat sys_unlinkat -264 common renameat sys_renameat -265 common linkat sys_linkat -266 common symlinkat sys_symlinkat -267 common readlinkat sys_readlinkat -268 common fchmodat sys_fchmodat -269 common faccessat sys_faccessat -270 common pselect6 sys_pselect6 -271 common ppoll sys_ppoll -272 common unshare sys_unshare -273 64 set_robust_list sys_set_robust_list -274 64 get_robust_list sys_get_robust_list -275 common splice sys_splice -276 common tee sys_tee -277 common sync_file_range sys_sync_file_range -278 64 vmsplice sys_vmsplice -279 64 move_pages sys_move_pages -280 common utimensat sys_utimensat -281 common epoll_pwait sys_epoll_pwait -282 common signalfd sys_signalfd -283 common timerfd_create sys_timerfd_create -284 common eventfd sys_eventfd -285 common fallocate sys_fallocate -286 common timerfd_settime sys_timerfd_settime -287 common timerfd_gettime sys_timerfd_gettime -288 common accept4 sys_accept4 -289 common signalfd4 sys_signalfd4 -290 common eventfd2 sys_eventfd2 -291 common epoll_create1 sys_epoll_create1 -292 common dup3 sys_dup3 -293 common pipe2 sys_pipe2 -294 common inotify_init1 sys_inotify_init1 -295 64 preadv sys_preadv -296 64 pwritev sys_pwritev -297 64 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo -298 common perf_event_open sys_perf_event_open -299 64 recvmmsg sys_recvmmsg -300 common fanotify_init sys_fanotify_init -301 common fanotify_mark sys_fanotify_mark -302 common prlimit64 sys_prlimit64 -303 common name_to_handle_at sys_name_to_handle_at -304 common open_by_handle_at sys_open_by_handle_at -305 common clock_adjtime sys_clock_adjtime -306 common syncfs sys_syncfs -307 64 sendmmsg sys_sendmmsg -308 common setns sys_setns -309 common getcpu sys_getcpu -310 64 process_vm_readv sys_process_vm_readv -311 64 process_vm_writev sys_process_vm_writev -312 common kcmp sys_kcmp -313 common finit_module sys_finit_module -314 common sched_setattr sys_sched_setattr -315 common sched_getattr sys_sched_getattr -316 common renameat2 sys_renameat2 -317 common seccomp sys_seccomp -318 common getrandom sys_getrandom -319 common memfd_create sys_memfd_create -320 common kexec_file_load sys_kexec_file_load -321 common bpf sys_bpf -322 64 execveat sys_execveat/ptregs -323 common userfaultfd sys_userfaultfd -324 common membarrier sys_membarrier -325 common mlock2 sys_mlock2 -326 common copy_file_range sys_copy_file_range -327 64 preadv2 sys_preadv2 -328 64 pwritev2 sys_pwritev2 -329 common pkey_mprotect sys_pkey_mprotect -330 common pkey_alloc sys_pkey_alloc -331 common pkey_free sys_pkey_free -332 common statx sys_statx +237 common mbind __x64_sys_mbind +238 common set_mempolicy __x64_sys_set_mempolicy +239 common get_mempolicy __x64_sys_get_mempolicy +240 common mq_open __x64_sys_mq_open +241 common mq_unlink __x64_sys_mq_unlink +242 common mq_timedsend __x64_sys_mq_timedsend +243 common mq_timedreceive __x64_sys_mq_timedreceive +244 64 mq_notify __x64_sys_mq_notify +245 common mq_getsetattr __x64_sys_mq_getsetattr +246 64 kexec_load __x64_sys_kexec_load +247 64 waitid __x64_sys_waitid +248 common add_key __x64_sys_add_key +249 common request_key __x64_sys_request_key +250 common keyctl __x64_sys_keyctl +251 common ioprio_set __x64_sys_ioprio_set +252 common ioprio_get __x64_sys_ioprio_get +253 common inotify_init __x64_sys_inotify_init +254 common inotify_add_watch __x64_sys_inotify_add_watch +255 common inotify_rm_watch __x64_sys_inotify_rm_watch +256 common migrate_pages __x64_sys_migrate_pages +257 common openat __x64_sys_openat +258 common mkdirat __x64_sys_mkdirat +259 common mknodat __x64_sys_mknodat +260 common fchownat __x64_sys_fchownat +261 common futimesat __x64_sys_futimesat +262 common newfstatat __x64_sys_newfstatat +263 common unlinkat __x64_sys_unlinkat +264 common renameat __x64_sys_renameat +265 common linkat __x64_sys_linkat +266 common symlinkat __x64_sys_symlinkat +267 common readlinkat __x64_sys_readlinkat +268 common fchmodat __x64_sys_fchmodat +269 common faccessat __x64_sys_faccessat +270 common pselect6 __x64_sys_pselect6 +271 common ppoll __x64_sys_ppoll +272 common unshare __x64_sys_unshare +273 64 set_robust_list __x64_sys_set_robust_list +274 64 get_robust_list __x64_sys_get_robust_list +275 common splice __x64_sys_splice +276 common tee __x64_sys_tee +277 common sync_file_range __x64_sys_sync_file_range +278 64 vmsplice __x64_sys_vmsplice +279 64 move_pages __x64_sys_move_pages +280 common utimensat __x64_sys_utimensat +281 common epoll_pwait __x64_sys_epoll_pwait +282 common signalfd __x64_sys_signalfd +283 common timerfd_create __x64_sys_timerfd_create +284 common eventfd __x64_sys_eventfd +285 common fallocate __x64_sys_fallocate +286 common timerfd_settime __x64_sys_timerfd_settime +287 common timerfd_gettime __x64_sys_timerfd_gettime +288 common accept4 __x64_sys_accept4 +289 common signalfd4 __x64_sys_signalfd4 +290 common eventfd2 __x64_sys_eventfd2 +291 common epoll_create1 __x64_sys_epoll_create1 +292 common dup3 __x64_sys_dup3 +293 common pipe2 __x64_sys_pipe2 +294 common inotify_init1 __x64_sys_inotify_init1 +295 64 preadv __x64_sys_preadv +296 64 pwritev __x64_sys_pwritev +297 64 rt_tgsigqueueinfo __x64_sys_rt_tgsigqueueinfo +298 common perf_event_open __x64_sys_perf_event_open +299 64 recvmmsg __x64_sys_recvmmsg +300 common fanotify_init __x64_sys_fanotify_init +301 common fanotify_mark __x64_sys_fanotify_mark +302 common prlimit64 __x64_sys_prlimit64 +303 common name_to_handle_at __x64_sys_name_to_handle_at +304 common open_by_handle_at __x64_sys_open_by_handle_at +305 common clock_adjtime __x64_sys_clock_adjtime +306 common syncfs __x64_sys_syncfs +307 64 sendmmsg __x64_sys_sendmmsg +308 common setns __x64_sys_setns +309 common getcpu __x64_sys_getcpu +310 64 process_vm_readv __x64_sys_process_vm_readv +311 64 process_vm_writev __x64_sys_process_vm_writev +312 common kcmp __x64_sys_kcmp +313 common finit_module __x64_sys_finit_module +314 common sched_setattr __x64_sys_sched_setattr +315 common sched_getattr __x64_sys_sched_getattr +316 common renameat2 __x64_sys_renameat2 +317 common seccomp __x64_sys_seccomp +318 common getrandom __x64_sys_getrandom +319 common memfd_create __x64_sys_memfd_create +320 common kexec_file_load __x64_sys_kexec_file_load +321 common bpf __x64_sys_bpf +322 64 execveat __x64_sys_execveat/ptregs +323 common userfaultfd __x64_sys_userfaultfd +324 common membarrier __x64_sys_membarrier +325 common mlock2 __x64_sys_mlock2 +326 common copy_file_range __x64_sys_copy_file_range +327 64 preadv2 __x64_sys_preadv2 +328 64 pwritev2 __x64_sys_pwritev2 +329 common pkey_mprotect __x64_sys_pkey_mprotect +330 common pkey_alloc __x64_sys_pkey_alloc +331 common pkey_free __x64_sys_pkey_free +332 common statx __x64_sys_statx # # x32-specific system call numbers start at 512 to avoid cache impact -# for native 64-bit operation. +# for native 64-bit operation. The __x32_compat_sys stubs are created +# on-the-fly for compat_sys_*() compatibility system calls if X86_X32 +# is defined. # -512 x32 rt_sigaction compat_sys_rt_sigaction +512 x32 rt_sigaction __x32_compat_sys_rt_sigaction 513 x32 rt_sigreturn sys32_x32_rt_sigreturn -514 x32 ioctl compat_sys_ioctl -515 x32 readv compat_sys_readv -516 x32 writev compat_sys_writev -517 x32 recvfrom compat_sys_recvfrom -518 x32 sendmsg compat_sys_sendmsg -519 x32 recvmsg compat_sys_recvmsg -520 x32 execve compat_sys_execve/ptregs -521 x32 ptrace compat_sys_ptrace -522 x32 rt_sigpending compat_sys_rt_sigpending -523 x32 rt_sigtimedwait compat_sys_rt_sigtimedwait -524 x32 rt_sigqueueinfo compat_sys_rt_sigqueueinfo -525 x32 sigaltstack compat_sys_sigaltstack -526 x32 timer_create compat_sys_timer_create -527 x32 mq_notify compat_sys_mq_notify -528 x32 kexec_load compat_sys_kexec_load -529 x32 waitid compat_sys_waitid -530 x32 set_robust_list compat_sys_set_robust_list -531 x32 get_robust_list compat_sys_get_robust_list -532 x32 vmsplice compat_sys_vmsplice -533 x32 move_pages compat_sys_move_pages -534 x32 preadv compat_sys_preadv64 -535 x32 pwritev compat_sys_pwritev64 -536 x32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo -537 x32 recvmmsg compat_sys_recvmmsg -538 x32 sendmmsg compat_sys_sendmmsg -539 x32 process_vm_readv compat_sys_process_vm_readv -540 x32 process_vm_writev compat_sys_process_vm_writev -541 x32 setsockopt compat_sys_setsockopt -542 x32 getsockopt compat_sys_getsockopt -543 x32 io_setup compat_sys_io_setup -544 x32 io_submit compat_sys_io_submit -545 x32 execveat compat_sys_execveat/ptregs -546 x32 preadv2 compat_sys_preadv64v2 -547 x32 pwritev2 compat_sys_pwritev64v2 +514 x32 ioctl __x32_compat_sys_ioctl +515 x32 readv __x32_compat_sys_readv +516 x32 writev __x32_compat_sys_writev +517 x32 recvfrom __x32_compat_sys_recvfrom +518 x32 sendmsg __x32_compat_sys_sendmsg +519 x32 recvmsg __x32_compat_sys_recvmsg +520 x32 execve __x32_compat_sys_execve/ptregs +521 x32 ptrace __x32_compat_sys_ptrace +522 x32 rt_sigpending __x32_compat_sys_rt_sigpending +523 x32 rt_sigtimedwait __x32_compat_sys_rt_sigtimedwait +524 x32 rt_sigqueueinfo __x32_compat_sys_rt_sigqueueinfo +525 x32 sigaltstack __x32_compat_sys_sigaltstack +526 x32 timer_create __x32_compat_sys_timer_create +527 x32 mq_notify __x32_compat_sys_mq_notify +528 x32 kexec_load __x32_compat_sys_kexec_load +529 x32 waitid __x32_compat_sys_waitid +530 x32 set_robust_list __x32_compat_sys_set_robust_list +531 x32 get_robust_list __x32_compat_sys_get_robust_list +532 x32 vmsplice __x32_compat_sys_vmsplice +533 x32 move_pages __x32_compat_sys_move_pages +534 x32 preadv __x32_compat_sys_preadv64 +535 x32 pwritev __x32_compat_sys_pwritev64 +536 x32 rt_tgsigqueueinfo __x32_compat_sys_rt_tgsigqueueinfo +537 x32 recvmmsg __x32_compat_sys_recvmmsg +538 x32 sendmmsg __x32_compat_sys_sendmmsg +539 x32 process_vm_readv __x32_compat_sys_process_vm_readv +540 x32 process_vm_writev __x32_compat_sys_process_vm_writev +541 x32 setsockopt __x32_compat_sys_setsockopt +542 x32 getsockopt __x32_compat_sys_getsockopt +543 x32 io_setup __x32_compat_sys_io_setup +544 x32 io_submit __x32_compat_sys_io_submit +545 x32 execveat __x32_compat_sys_execveat/ptregs +546 x32 preadv2 __x32_compat_sys_preadv64v2 +547 x32 pwritev2 __x32_compat_sys_pwritev64v2 diff --git a/arch/x86/entry/syscalls/syscalltbl.sh b/arch/x86/entry/syscalls/syscalltbl.sh index d71ef4bd3615..94fcd1951aca 100644 --- a/arch/x86/entry/syscalls/syscalltbl.sh +++ b/arch/x86/entry/syscalls/syscalltbl.sh @@ -25,15 +25,27 @@ emit() { nr="$2" entry="$3" compat="$4" + umlentry="" if [ "$abi" = "64" -a -n "$compat" ]; then echo "a compat entry for a 64-bit syscall makes no sense" >&2 exit 1 fi + # For CONFIG_UML, we need to strip the __x64_sys prefix + if [ "$abi" = "64" -a "${entry}" != "${entry#__x64_sys}" ]; then + umlentry="sys${entry#__x64_sys}" + fi + if [ -z "$compat" ]; then - if [ -n "$entry" ]; then + if [ -n "$entry" -a -z "$umlentry" ]; then syscall_macro "$abi" "$nr" "$entry" + elif [ -n "$umlentry" ]; then # implies -n "$entry" + echo "#ifdef CONFIG_X86" + syscall_macro "$abi" "$nr" "$entry" + echo "#else /* CONFIG_UML */" + syscall_macro "$abi" "$nr" "$umlentry" + echo "#endif" fi else echo "#ifdef CONFIG_X86_32" diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile index 1943aebadede..d998a487c9b1 100644 --- a/arch/x86/entry/vdso/Makefile +++ b/arch/x86/entry/vdso/Makefile @@ -42,9 +42,7 @@ vdso_img_cfiles := $(vdso_img-y:%=vdso-image-%.c) vdso_img_sodbg := $(vdso_img-y:%=vdso%.so.dbg) obj-y += $(vdso_img_objs) targets += $(vdso_img_cfiles) -targets += $(vdso_img_sodbg) -.SECONDARY: $(vdso_img-y:%=$(obj)/vdso-image-%.c) \ - $(vdso_img-y:%=$(obj)/vdso%.so) +targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so) export CPPFLAGS_vdso.lds += -P -C diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index 317be365bce3..70b7845434cb 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c @@ -127,6 +127,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) int vsyscall_nr, syscall_nr, tmp; int prev_sig_on_uaccess_err; long ret; + unsigned long orig_dx; /* * No point in checking CS -- the only way to get here is a user mode @@ -227,19 +228,22 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) ret = -EFAULT; switch (vsyscall_nr) { case 0: - ret = sys_gettimeofday( - (struct timeval __user *)regs->di, - (struct timezone __user *)regs->si); + /* this decodes regs->di and regs->si on its own */ + ret = __x64_sys_gettimeofday(regs); break; case 1: - ret = sys_time((time_t __user *)regs->di); + /* this decodes regs->di on its own */ + ret = __x64_sys_time(regs); break; case 2: - ret = sys_getcpu((unsigned __user *)regs->di, - (unsigned __user *)regs->si, - NULL); + /* while we could clobber regs->dx, we didn't in the past... */ + orig_dx = regs->dx; + regs->dx = 0; + /* this decodes regs->di, regs->si and regs->dx on its own */ + ret = __x64_sys_getcpu(regs); + regs->dx = orig_dx; break; } diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 607bf565a90c..707b2a96e516 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3339,7 +3339,8 @@ static void intel_pmu_cpu_starting(int cpu) cpuc->lbr_sel = NULL; - flip_smm_bit(&x86_pmu.attr_freeze_on_smi); + if (x86_pmu.version > 1) + flip_smm_bit(&x86_pmu.attr_freeze_on_smi); if (!cpuc->shared_regs) return; @@ -3502,6 +3503,8 @@ static __initconst const struct x86_pmu core_pmu = { .cpu_dying = intel_pmu_cpu_dying, }; +static struct attribute *intel_pmu_attrs[]; + static __initconst const struct x86_pmu intel_pmu = { .name = "Intel", .handle_irq = intel_pmu_handle_irq, @@ -3533,6 +3536,8 @@ static __initconst const struct x86_pmu intel_pmu = { .format_attrs = intel_arch3_formats_attr, .events_sysfs_show = intel_event_sysfs_show, + .attrs = intel_pmu_attrs, + .cpu_prepare = intel_pmu_cpu_prepare, .cpu_starting = intel_pmu_cpu_starting, .cpu_dying = intel_pmu_cpu_dying, @@ -3911,8 +3916,6 @@ __init int intel_pmu_init(void) x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters); - - x86_pmu.attrs = intel_pmu_attrs; /* * Quirk: v2 perfmon does not report fixed-purpose events, so * assume at least 3 events, when not running in a hypervisor: diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index da6780122786..8a10a045b57b 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1153,7 +1153,6 @@ static void setup_pebs_sample_data(struct perf_event *event, if (pebs == NULL) return; - regs->flags &= ~PERF_EFLAGS_EXACT; sample_type = event->attr.sample_type; dsrc = sample_type & PERF_SAMPLE_DATA_SRC; @@ -1197,7 +1196,13 @@ static void setup_pebs_sample_data(struct perf_event *event, * and PMI. */ *regs = *iregs; - regs->flags = pebs->flags; + + /* + * Initialize regs_>flags from PEBS, + * Clear exact bit (which uses x86 EFLAGS Reserved bit 3), + * i.e., do not rely on it being zero: + */ + regs->flags = pebs->flags & ~PERF_EFLAGS_EXACT; if (sample_type & PERF_SAMPLE_REGS_INTR) { regs->ax = pebs->ax; @@ -1217,10 +1222,6 @@ static void setup_pebs_sample_data(struct perf_event *event, regs->sp = pebs->sp; } - /* - * Preserve PERF_EFLAGS_VM from set_linear_ip(). - */ - regs->flags = pebs->flags | (regs->flags & PERF_EFLAGS_VM); #ifndef CONFIG_X86_32 regs->r8 = pebs->r8; regs->r9 = pebs->r9; @@ -1234,20 +1235,33 @@ static void setup_pebs_sample_data(struct perf_event *event, } if (event->attr.precise_ip > 1) { - /* Haswell and later have the eventing IP, so use it: */ + /* + * Haswell and later processors have an 'eventing IP' + * (real IP) which fixes the off-by-1 skid in hardware. + * Use it when precise_ip >= 2 : + */ if (x86_pmu.intel_cap.pebs_format >= 2) { set_linear_ip(regs, pebs->real_ip); regs->flags |= PERF_EFLAGS_EXACT; } else { - /* Otherwise use PEBS off-by-1 IP: */ + /* Otherwise, use PEBS off-by-1 IP: */ set_linear_ip(regs, pebs->ip); - /* ... and try to fix it up using the LBR entries: */ + /* + * With precise_ip >= 2, try to fix up the off-by-1 IP + * using the LBR. If successful, the fixup function + * corrects regs->ip and calls set_linear_ip() on regs: + */ if (intel_pmu_pebs_fixup_ip(regs)) regs->flags |= PERF_EFLAGS_EXACT; } - } else + } else { + /* + * When precise_ip == 1, return the PEBS off-by-1 IP, + * no fixup attempted: + */ set_linear_ip(regs, pebs->ip); + } if ((sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR)) && diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index c98b943e58b4..77076a102e34 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -3028,10 +3028,27 @@ static struct intel_uncore_type bdx_uncore_cbox = { .format_group = &hswep_uncore_cbox_format_group, }; +static struct intel_uncore_type bdx_uncore_sbox = { + .name = "sbox", + .num_counters = 4, + .num_boxes = 4, + .perf_ctr_bits = 48, + .event_ctl = HSWEP_S0_MSR_PMON_CTL0, + .perf_ctr = HSWEP_S0_MSR_PMON_CTR0, + .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, + .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL, + .msr_offset = HSWEP_SBOX_MSR_OFFSET, + .ops = &hswep_uncore_sbox_msr_ops, + .format_group = &hswep_uncore_sbox_format_group, +}; + +#define BDX_MSR_UNCORE_SBOX 3 + static struct intel_uncore_type *bdx_msr_uncores[] = { &bdx_uncore_ubox, &bdx_uncore_cbox, &hswep_uncore_pcu, + &bdx_uncore_sbox, NULL, }; @@ -3043,10 +3060,25 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = { void bdx_uncore_cpu_init(void) { + int pkg = topology_phys_to_logical_pkg(0); + if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; uncore_msr_uncores = bdx_msr_uncores; + /* BDX-DE doesn't have SBOX */ + if (boot_cpu_data.x86_model == 86) { + uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL; + /* Detect systems with no SBOXes */ + } else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) { + struct pci_dev *pdev; + u32 capid4; + + pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]; + pci_read_config_dword(pdev, 0x94, &capid4); + if (((capid4 >> 6) & 0x3) == 0) + bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL; + } hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints; } @@ -3264,6 +3296,11 @@ static const struct pci_device_id bdx_uncore_pci_ids[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46), .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2), }, + { /* PCU.3 (for Capability registers) */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0), + .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, + HSWEP_PCI_PCU_3), + }, { /* end: all zeroes */ } }; diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 40a3d3642f3a..08acd954f00e 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -313,7 +313,7 @@ struct apic { /* Probe, setup and smpboot functions */ int (*probe)(void); int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); - int (*apic_id_valid)(int apicid); + int (*apic_id_valid)(u32 apicid); int (*apic_id_registered)(void); bool (*check_apicid_used)(physid_mask_t *map, int apicid); @@ -486,7 +486,7 @@ static inline unsigned int read_apic_id(void) return apic->get_apic_id(reg); } -extern int default_apic_id_valid(int apicid); +extern int default_apic_id_valid(u32 apicid); extern int default_acpi_madt_oem_check(char *, char *); extern void default_setup_apic_routing(void); diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 386a6900e206..219faaec51df 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h @@ -136,7 +136,6 @@ #endif #ifndef __ASSEMBLY__ -#ifndef __BPF__ /* * This output constraint should be used for any inline asm which has a "call" * instruction. Otherwise the asm may be inserted before the frame pointer @@ -146,6 +145,5 @@ register unsigned long current_stack_pointer asm(_ASM_SP); #define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer) #endif -#endif #endif /* _ASM_X86_ASM_H */ diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index d554c11e01ff..578793e97431 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -320,6 +320,7 @@ #define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ #define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ #define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ +#define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */ /* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ #define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */ diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index 09ad88572746..cc8f8fcf9b4a 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -46,7 +46,21 @@ int ftrace_int3_handler(struct pt_regs *regs); #endif /* CONFIG_FUNCTION_TRACER */ -#if !defined(__ASSEMBLY__) && !defined(COMPILE_OFFSETS) +#ifndef __ASSEMBLY__ + +#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME +static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) +{ + /* + * Compare the symbol name with the system call name. Skip the + * "__x64_sys", "__ia32_sys" or simple "sys" prefix. + */ + return !strcmp(sym + 3, name + 3) || + (!strncmp(sym, "__x64_", 6) && !strcmp(sym + 9, name + 3)) || + (!strncmp(sym, "__ia32_", 7) && !strcmp(sym + 10, name + 3)); +} + +#ifndef COMPILE_OFFSETS #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION) #include <asm/compat.h> @@ -67,6 +81,7 @@ static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) return false; } #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */ -#endif /* !__ASSEMBLY__ && !COMPILE_OFFSETS */ +#endif /* !COMPILE_OFFSETS */ +#endif /* !__ASSEMBLY__ */ #endif /* _ASM_X86_FTRACE_H */ diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 404c5fdff859..548d90bbf919 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -34,11 +34,6 @@ * (0x80 is the syscall vector, 0x30-0x3f are for ISA) */ #define FIRST_EXTERNAL_VECTOR 0x20 -/* - * We start allocating at 0x21 to spread out vectors evenly between - * priority levels. (0x80 is the syscall vector) - */ -#define VECTOR_OFFSET_START 1 /* * Reserve the lowest usable vector (and hence lowest priority) 0x20 for @@ -119,8 +114,6 @@ #define FIRST_SYSTEM_VECTOR NR_VECTORS #endif -#define FPU_IRQ 13 - /* * Size the maximum number of interrupts. * diff --git a/arch/x86/include/asm/jailhouse_para.h b/arch/x86/include/asm/jailhouse_para.h index b885a961a150..a34897aef2c2 100644 --- a/arch/x86/include/asm/jailhouse_para.h +++ b/arch/x86/include/asm/jailhouse_para.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL2.0 */ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Jailhouse paravirt detection diff --git a/arch/x86/include/asm/kexec-bzimage64.h b/arch/x86/include/asm/kexec-bzimage64.h index 9f07cff43705..df89ee7d3e9e 100644 --- a/arch/x86/include/asm/kexec-bzimage64.h +++ b/arch/x86/include/asm/kexec-bzimage64.h @@ -2,6 +2,6 @@ #ifndef _ASM_KEXEC_BZIMAGE64_H #define _ASM_KEXEC_BZIMAGE64_H -extern struct kexec_file_ops kexec_bzImage64_ops; +extern const struct kexec_file_ops kexec_bzImage64_ops; #endif /* _ASM_KEXE_BZIMAGE64_H */ diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 949c977bc4c9..c25775fad4ed 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1013,6 +1013,7 @@ struct kvm_x86_ops { bool (*has_wbinvd_exit)(void); + u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu); void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 89d5c8886c85..f1633de5a675 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -526,22 +526,39 @@ static inline pgprotval_t massage_pgprot(pgprot_t pgprot) return protval; } +static inline pgprotval_t check_pgprot(pgprot_t pgprot) +{ + pgprotval_t massaged_val = massage_pgprot(pgprot); + + /* mmdebug.h can not be included here because of dependencies */ +#ifdef CONFIG_DEBUG_VM + WARN_ONCE(pgprot_val(pgprot) != massaged_val, + "attempted to set unsupported pgprot: %016llx " + "bits: %016llx supported: %016llx\n", + (u64)pgprot_val(pgprot), + (u64)pgprot_val(pgprot) ^ massaged_val, + (u64)__supported_pte_mask); +#endif + + return massaged_val; +} + static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) { return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) | - massage_pgprot(pgprot)); + check_pgprot(pgprot)); } static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) { return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) | - massage_pgprot(pgprot)); + check_pgprot(pgprot)); } static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot) { return __pud(((phys_addr_t)page_nr << PAGE_SHIFT) | - massage_pgprot(pgprot)); + check_pgprot(pgprot)); } static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) @@ -553,7 +570,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) * the newprot (if present): */ val &= _PAGE_CHG_MASK; - val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK; + val |= check_pgprot(newprot) & ~_PAGE_CHG_MASK; return __pte(val); } @@ -563,7 +580,7 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) pmdval_t val = pmd_val(pmd); val &= _HPAGE_CHG_MASK; - val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK; + val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK; return __pmd(val); } @@ -584,6 +601,11 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) #define canon_pgprot(p) __pgprot(massage_pgprot(p)) +static inline pgprot_t arch_filter_pgprot(pgprot_t prot) +{ + return canon_pgprot(prot); +} + static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, enum page_cache_mode pcm, enum page_cache_mode new_pcm) diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index d5c21a382475..adb47552e6bb 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -105,14 +105,14 @@ extern unsigned int ptrs_per_p4d; #define LDT_PGD_ENTRY (pgtable_l5_enabled ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4) #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) -#define __VMALLOC_BASE_L4 0xffffc90000000000 -#define __VMALLOC_BASE_L5 0xffa0000000000000 +#define __VMALLOC_BASE_L4 0xffffc90000000000UL +#define __VMALLOC_BASE_L5 0xffa0000000000000UL #define VMALLOC_SIZE_TB_L4 32UL #define VMALLOC_SIZE_TB_L5 12800UL -#define __VMEMMAP_BASE_L4 0xffffea0000000000 -#define __VMEMMAP_BASE_L5 0xffd4000000000000 +#define __VMEMMAP_BASE_L4 0xffffea0000000000UL +#define __VMEMMAP_BASE_L5 0xffd4000000000000UL #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT # define VMALLOC_START vmalloc_base diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index acfe755562a6..1e5a40673953 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -196,19 +196,21 @@ enum page_cache_mode { #define __PAGE_KERNEL_NOENC (__PAGE_KERNEL) #define __PAGE_KERNEL_NOENC_WP (__PAGE_KERNEL_WP) -#define PAGE_KERNEL __pgprot(__PAGE_KERNEL | _PAGE_ENC) -#define PAGE_KERNEL_NOENC __pgprot(__PAGE_KERNEL) -#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC) -#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC | _PAGE_ENC) -#define PAGE_KERNEL_EXEC_NOENC __pgprot(__PAGE_KERNEL_EXEC) -#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX | _PAGE_ENC) -#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE | _PAGE_ENC) -#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE | _PAGE_ENC) -#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC | _PAGE_ENC) -#define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR | _PAGE_ENC) - -#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO) -#define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE) +#define default_pgprot(x) __pgprot((x) & __default_kernel_pte_mask) + +#define PAGE_KERNEL default_pgprot(__PAGE_KERNEL | _PAGE_ENC) +#define PAGE_KERNEL_NOENC default_pgprot(__PAGE_KERNEL) +#define PAGE_KERNEL_RO default_pgprot(__PAGE_KERNEL_RO | _PAGE_ENC) +#define PAGE_KERNEL_EXEC default_pgprot(__PAGE_KERNEL_EXEC | _PAGE_ENC) +#define PAGE_KERNEL_EXEC_NOENC default_pgprot(__PAGE_KERNEL_EXEC) +#define PAGE_KERNEL_RX default_pgprot(__PAGE_KERNEL_RX | _PAGE_ENC) +#define PAGE_KERNEL_NOCACHE default_pgprot(__PAGE_KERNEL_NOCACHE | _PAGE_ENC) +#define PAGE_KERNEL_LARGE default_pgprot(__PAGE_KERNEL_LARGE | _PAGE_ENC) +#define PAGE_KERNEL_LARGE_EXEC default_pgprot(__PAGE_KERNEL_LARGE_EXEC | _PAGE_ENC) +#define PAGE_KERNEL_VVAR default_pgprot(__PAGE_KERNEL_VVAR | _PAGE_ENC) + +#define PAGE_KERNEL_IO default_pgprot(__PAGE_KERNEL_IO) +#define PAGE_KERNEL_IO_NOCACHE default_pgprot(__PAGE_KERNEL_IO_NOCACHE) #endif /* __ASSEMBLY__ */ @@ -483,6 +485,7 @@ static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot) typedef struct page *pgtable_t; extern pteval_t __supported_pte_mask; +extern pteval_t __default_kernel_pte_mask; extern void set_nx(void); extern int nx_enabled; diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 4fa4206029e3..21a114914ba4 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -749,13 +749,11 @@ enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, extern void enable_sep_cpu(void); extern int sysenter_setup(void); -extern void early_trap_init(void); void early_trap_pf_init(void); /* Defined in head.S */ extern struct desc_ptr early_gdt_descr; -extern void cpu_set_gdt(int); extern void switch_to_new_gdt(int); extern void load_direct_gdt(int); extern void load_fixmap_gdt(int); diff --git a/arch/x86/include/asm/pti.h b/arch/x86/include/asm/pti.h index 0b5ef05b2d2d..38a17f1d5c9d 100644 --- a/arch/x86/include/asm/pti.h +++ b/arch/x86/include/asm/pti.h @@ -6,8 +6,10 @@ #ifdef CONFIG_PAGE_TABLE_ISOLATION extern void pti_init(void); extern void pti_check_boottime_disable(void); +extern void pti_clone_kernel_text(void); #else static inline void pti_check_boottime_disable(void) { } +static inline void pti_clone_kernel_text(void) { } #endif #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h index 03eedc21246d..d653139857af 100644 --- a/arch/x86/include/asm/syscall.h +++ b/arch/x86/include/asm/syscall.h @@ -20,9 +20,13 @@ #include <asm/thread_info.h> /* for TS_COMPAT */ #include <asm/unistd.h> +#ifdef CONFIG_X86_64 +typedef asmlinkage long (*sys_call_ptr_t)(const struct pt_regs *); +#else typedef asmlinkage long (*sys_call_ptr_t)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); +#endif /* CONFIG_X86_64 */ extern const sys_call_ptr_t sys_call_table[]; #if defined(CONFIG_X86_32) diff --git a/arch/x86/include/asm/syscall_wrapper.h b/arch/x86/include/asm/syscall_wrapper.h new file mode 100644 index 000000000000..e046a405743d --- /dev/null +++ b/arch/x86/include/asm/syscall_wrapper.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * syscall_wrapper.h - x86 specific wrappers to syscall definitions + */ + +#ifndef _ASM_X86_SYSCALL_WRAPPER_H +#define _ASM_X86_SYSCALL_WRAPPER_H + +/* Mapping of registers to parameters for syscalls on x86-64 and x32 */ +#define SC_X86_64_REGS_TO_ARGS(x, ...) \ + __MAP(x,__SC_ARGS \ + ,,regs->di,,regs->si,,regs->dx \ + ,,regs->r10,,regs->r8,,regs->r9) \ + +/* Mapping of registers to parameters for syscalls on i386 */ +#define SC_IA32_REGS_TO_ARGS(x, ...) \ + __MAP(x,__SC_ARGS \ + ,,(unsigned int)regs->bx,,(unsigned int)regs->cx \ + ,,(unsigned int)regs->dx,,(unsigned int)regs->si \ + ,,(unsigned int)regs->di,,(unsigned int)regs->bp) + +#ifdef CONFIG_IA32_EMULATION +/* + * For IA32 emulation, we need to handle "compat" syscalls *and* create + * additional wrappers (aptly named __ia32_sys_xyzzy) which decode the + * ia32 regs in the proper order for shared or "common" syscalls. As some + * syscalls may not be implemented, we need to expand COND_SYSCALL in + * kernel/sys_ni.c and SYS_NI in kernel/time/posix-stubs.c to cover this + * case as well. + */ +#define __IA32_COMPAT_SYS_STUBx(x, name, ...) \ + asmlinkage long __ia32_compat_sys##name(const struct pt_regs *regs);\ + ALLOW_ERROR_INJECTION(__ia32_compat_sys##name, ERRNO); \ + asmlinkage long __ia32_compat_sys##name(const struct pt_regs *regs)\ + { \ + return __se_compat_sys##name(SC_IA32_REGS_TO_ARGS(x,__VA_ARGS__));\ + } \ + +#define __IA32_SYS_STUBx(x, name, ...) \ + asmlinkage long __ia32_sys##name(const struct pt_regs *regs); \ + ALLOW_ERROR_INJECTION(__ia32_sys##name, ERRNO); \ + asmlinkage long __ia32_sys##name(const struct pt_regs *regs) \ + { \ + return __se_sys##name(SC_IA32_REGS_TO_ARGS(x,__VA_ARGS__));\ + } + +/* + * To keep the naming coherent, re-define SYSCALL_DEFINE0 to create an alias + * named __ia32_sys_*() + */ +#define SYSCALL_DEFINE0(sname) \ + SYSCALL_METADATA(_##sname, 0); \ + asmlinkage long __x64_sys_##sname(void); \ + ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \ + SYSCALL_ALIAS(__ia32_sys_##sname, __x64_sys_##sname); \ + asmlinkage long __x64_sys_##sname(void) + +#define COND_SYSCALL(name) \ + cond_syscall(__x64_sys_##name); \ + cond_syscall(__ia32_sys_##name) + +#define SYS_NI(name) \ + SYSCALL_ALIAS(__x64_sys_##name, sys_ni_posix_timers); \ + SYSCALL_ALIAS(__ia32_sys_##name, sys_ni_posix_timers) + +#else /* CONFIG_IA32_EMULATION */ +#define __IA32_COMPAT_SYS_STUBx(x, name, ...) +#define __IA32_SYS_STUBx(x, fullname, name, ...) +#endif /* CONFIG_IA32_EMULATION */ + + +#ifdef CONFIG_X86_X32 +/* + * For the x32 ABI, we need to create a stub for compat_sys_*() which is aware + * of the x86-64-style parameter ordering of x32 syscalls. The syscalls common + * with x86_64 obviously do not need such care. + */ +#define __X32_COMPAT_SYS_STUBx(x, name, ...) \ + asmlinkage long __x32_compat_sys##name(const struct pt_regs *regs);\ + ALLOW_ERROR_INJECTION(__x32_compat_sys##name, ERRNO); \ + asmlinkage long __x32_compat_sys##name(const struct pt_regs *regs)\ + { \ + return __se_compat_sys##name(SC_X86_64_REGS_TO_ARGS(x,__VA_ARGS__));\ + } \ + +#else /* CONFIG_X86_X32 */ +#define __X32_COMPAT_SYS_STUBx(x, name, ...) +#endif /* CONFIG_X86_X32 */ + + +#ifdef CONFIG_COMPAT +/* + * Compat means IA32_EMULATION and/or X86_X32. As they use a different + * mapping of registers to parameters, we need to generate stubs for each + * of them. + */ +#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ + static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ + static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\ + __IA32_COMPAT_SYS_STUBx(x, name, __VA_ARGS__) \ + __X32_COMPAT_SYS_STUBx(x, name, __VA_ARGS__) \ + static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ + { \ + return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\ + } \ + static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) + +/* + * As some compat syscalls may not be implemented, we need to expand + * COND_SYSCALL_COMPAT in kernel/sys_ni.c and COMPAT_SYS_NI in + * kernel/time/posix-stubs.c to cover this case as well. + */ +#define COND_SYSCALL_COMPAT(name) \ + cond_syscall(__ia32_compat_sys_##name); \ + cond_syscall(__x32_compat_sys_##name) + +#define COMPAT_SYS_NI(name) \ + SYSCALL_ALIAS(__ia32_compat_sys_##name, sys_ni_posix_timers); \ + SYSCALL_ALIAS(__x32_compat_sys_##name, sys_ni_posix_timers) + +#endif /* CONFIG_COMPAT */ + + +/* + * Instead of the generic __SYSCALL_DEFINEx() definition, this macro takes + * struct pt_regs *regs as the only argument of the syscall stub named + * __x64_sys_*(). It decodes just the registers it needs and passes them on to + * the __se_sys_*() wrapper performing sign extension and then to the + * __do_sys_*() function doing the actual job. These wrappers and functions + * are inlined (at least in very most cases), meaning that the assembly looks + * as follows (slightly re-ordered for better readability): + * + * <__x64_sys_recv>: <-- syscall with 4 parameters + * callq <__fentry__> + * + * mov 0x70(%rdi),%rdi <-- decode regs->di + * mov 0x68(%rdi),%rsi <-- decode regs->si + * mov 0x60(%rdi),%rdx <-- decode regs->dx + * mov 0x38(%rdi),%rcx <-- decode regs->r10 + * + * xor %r9d,%r9d <-- clear %r9 + * xor %r8d,%r8d <-- clear %r8 + * + * callq __sys_recvfrom <-- do the actual work in __sys_recvfrom() + * which takes 6 arguments + * + * cltq <-- extend return value to 64-bit + * retq <-- return + * + * This approach avoids leaking random user-provided register content down + * the call chain. + * + * If IA32_EMULATION is enabled, this macro generates an additional wrapper + * named __ia32_sys_*() which decodes the struct pt_regs *regs according + * to the i386 calling convention (bx, cx, dx, si, di, bp). + */ +#define __SYSCALL_DEFINEx(x, name, ...) \ + asmlinkage long __x64_sys##name(const struct pt_regs *regs); \ + ALLOW_ERROR_INJECTION(__x64_sys##name, ERRNO); \ + static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ + static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\ + asmlinkage long __x64_sys##name(const struct pt_regs *regs) \ + { \ + return __se_sys##name(SC_X86_64_REGS_TO_ARGS(x,__VA_ARGS__));\ + } \ + __IA32_SYS_STUBx(x, name, __VA_ARGS__) \ + static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ + { \ + long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__));\ + __MAP(x,__SC_TEST,__VA_ARGS__); \ + __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \ + return ret; \ + } \ + static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) + +/* + * As the generic SYSCALL_DEFINE0() macro does not decode any parameters for + * obvious reasons, and passing struct pt_regs *regs to it in %rdi does not + * hurt, we only need to re-define it here to keep the naming congruent to + * SYSCALL_DEFINEx() -- which is essential for the COND_SYSCALL() and SYS_NI() + * macros to work correctly. + */ +#ifndef SYSCALL_DEFINE0 +#define SYSCALL_DEFINE0(sname) \ + SYSCALL_METADATA(_##sname, 0); \ + asmlinkage long __x64_sys_##sname(void); \ + ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \ + asmlinkage long __x64_sys_##sname(void) +#endif + +#ifndef COND_SYSCALL +#define COND_SYSCALL(name) cond_syscall(__x64_sys_##name) +#endif + +#ifndef SYS_NI +#define SYS_NI(name) SYSCALL_ALIAS(__x64_sys_##name, sys_ni_posix_timers); +#endif + + +/* + * For VSYSCALLS, we need to declare these three syscalls with the new + * pt_regs-based calling convention for in-kernel use. + */ +struct pt_regs; +asmlinkage long __x64_sys_getcpu(const struct pt_regs *regs); +asmlinkage long __x64_sys_gettimeofday(const struct pt_regs *regs); +asmlinkage long __x64_sys_time(const struct pt_regs *regs); + +#endif /* _ASM_X86_SYSCALL_WRAPPER_H */ diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index ae6e05fdc24b..9fa979dd0d9d 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h @@ -18,6 +18,12 @@ /* Common in X86_32 and X86_64 */ /* kernel/ioport.c */ long ksys_ioperm(unsigned long from, unsigned long num, int turn_on); + +#ifdef CONFIG_X86_32 +/* + * These definitions are only valid on pure 32-bit systems; x86-64 uses a + * different syscall calling convention + */ asmlinkage long sys_ioperm(unsigned long, unsigned long, int); asmlinkage long sys_iopl(unsigned int); @@ -32,7 +38,6 @@ asmlinkage long sys_set_thread_area(struct user_desc __user *); asmlinkage long sys_get_thread_area(struct user_desc __user *); /* X86_32 only */ -#ifdef CONFIG_X86_32 /* kernel/signal.c */ asmlinkage long sys_sigreturn(void); @@ -42,15 +47,5 @@ struct vm86_struct; asmlinkage long sys_vm86old(struct vm86_struct __user *); asmlinkage long sys_vm86(unsigned long, unsigned long); -#else /* CONFIG_X86_32 */ - -/* X86_64 only */ -/* kernel/process_64.c */ -asmlinkage long sys_arch_prctl(int, unsigned long); - -/* kernel/sys_x86_64.c */ -asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long, - unsigned long, unsigned long, unsigned long); - #endif /* CONFIG_X86_32 */ #endif /* _ASM_X86_SYSCALLS_H */ diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 84137c22fdfa..6690cd3fc8b1 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -131,7 +131,12 @@ static inline unsigned long build_cr3(pgd_t *pgd, u16 asid) static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid) { VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); - VM_WARN_ON_ONCE(!this_cpu_has(X86_FEATURE_PCID)); + /* + * Use boot_cpu_has() instead of this_cpu_has() as this function + * might be called during early boot. This should work even after + * boot because all CPU's the have same capabilities: + */ + VM_WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_PCID)); return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH; } diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index aebf60357758..a06cbf019744 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h @@ -137,15 +137,15 @@ struct boot_e820_entry { * setup data structure. */ struct jailhouse_setup_data { - u16 version; - u16 compatible_version; - u16 pm_timer_address; - u16 num_cpus; - u64 pci_mmconfig_base; - u32 tsc_khz; - u32 apic_khz; - u8 standard_ioapic; - u8 cpu_ids[255]; + __u16 version; + __u16 compatible_version; + __u16 pm_timer_address; + __u16 num_cpus; + __u64 pci_mmconfig_base; + __u32 tsc_khz; + __u32 apic_khz; + __u8 standard_ioapic; + __u8 cpu_ids[255]; } __attribute__((packed)); /* The so-called "zeropage" */ diff --git a/arch/x86/include/uapi/asm/msgbuf.h b/arch/x86/include/uapi/asm/msgbuf.h index 809134c644a6..90ab9a795b49 100644 --- a/arch/x86/include/uapi/asm/msgbuf.h +++ b/arch/x86/include/uapi/asm/msgbuf.h @@ -1 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __ASM_X64_MSGBUF_H +#define __ASM_X64_MSGBUF_H + +#if !defined(__x86_64__) || !defined(__ILP32__) #include <asm-generic/msgbuf.h> +#else +/* + * The msqid64_ds structure for x86 architecture with x32 ABI. + * + * On x86-32 and x86-64 we can just use the generic definition, but + * x32 uses the same binary layout as x86_64, which is differnet + * from other 32-bit architectures. + */ + +struct msqid64_ds { + struct ipc64_perm msg_perm; + __kernel_time_t msg_stime; /* last msgsnd time */ + __kernel_time_t msg_rtime; /* last msgrcv time */ + __kernel_time_t msg_ctime; /* last change time */ + __kernel_ulong_t msg_cbytes; /* current number of bytes on queue */ + __kernel_ulong_t msg_qnum; /* number of messages in queue */ + __kernel_ulong_t msg_qbytes; /* max number of bytes on queue */ + __kernel_pid_t msg_lspid; /* pid of last msgsnd */ + __kernel_pid_t msg_lrpid; /* last receive pid */ + __kernel_ulong_t __unused4; + __kernel_ulong_t __unused5; +}; + +#endif + +#endif /* __ASM_GENERIC_MSGBUF_H */ diff --git a/arch/x86/include/uapi/asm/shmbuf.h b/arch/x86/include/uapi/asm/shmbuf.h index 83c05fc2de38..644421f3823b 100644 --- a/arch/x86/include/uapi/asm/shmbuf.h +++ b/arch/x86/include/uapi/asm/shmbuf.h @@ -1 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __ASM_X86_SHMBUF_H +#define __ASM_X86_SHMBUF_H + +#if !defined(__x86_64__) || !defined(__ILP32__) #include <asm-generic/shmbuf.h> +#else +/* + * The shmid64_ds structure for x86 architecture with x32 ABI. + * + * On x86-32 and x86-64 we can just use the generic definition, but + * x32 uses the same binary layout as x86_64, which is differnet + * from other 32-bit architectures. + */ + +struct shmid64_ds { + struct ipc64_perm shm_perm; /* operation perms */ + size_t shm_segsz; /* size of segment (bytes) */ + __kernel_time_t shm_atime; /* last attach time */ + __kernel_time_t shm_dtime; /* last detach time */ + __kernel_time_t shm_ctime; /* last change time */ + __kernel_pid_t shm_cpid; /* pid of creator */ + __kernel_pid_t shm_lpid; /* pid of last operator */ + __kernel_ulong_t shm_nattch; /* no. of current attaches */ + __kernel_ulong_t __unused4; + __kernel_ulong_t __unused5; +}; + +struct shminfo64 { + __kernel_ulong_t shmmax; + __kernel_ulong_t shmmin; + __kernel_ulong_t shmmni; + __kernel_ulong_t shmseg; + __kernel_ulong_t shmall; + __kernel_ulong_t __unused1; + __kernel_ulong_t __unused2; + __kernel_ulong_t __unused3; + __kernel_ulong_t __unused4; +}; + +#endif + +#endif /* __ASM_X86_SHMBUF_H */ diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 7a37d9357bc4..3b20607d581b 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -200,7 +200,7 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end) { struct acpi_madt_local_x2apic *processor = NULL; #ifdef CONFIG_X86_X2APIC - int apic_id; + u32 apic_id; u8 enabled; #endif @@ -215,6 +215,10 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end) apic_id = processor->local_apic_id; enabled = processor->lapic_flags & ACPI_MADT_ENABLED; + /* Ignore invalid ID */ + if (apic_id == 0xffffffff) + return 0; + /* * We need to register disabled CPU as well to permit * counting disabled CPUs. This allows us to size @@ -222,10 +226,13 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end) * to not preallocating memory for all NR_CPUS * when we use CPU hotplug. */ - if (!apic->apic_id_valid(apic_id) && enabled) - printk(KERN_WARNING PREFIX "x2apic entry ignored\n"); - else - acpi_register_lapic(apic_id, processor->uid, enabled); + if (!apic->apic_id_valid(apic_id)) { + if (enabled) + pr_warn(PREFIX "x2apic entry ignored\n"); + return 0; + } + + acpi_register_lapic(apic_id, processor->uid, enabled); #else printk(KERN_WARNING PREFIX "x2apic entry ignored\n"); #endif diff --git a/arch/x86/kernel/apic/apic_common.c b/arch/x86/kernel/apic/apic_common.c index a360801779ae..02b4839478b1 100644 --- a/arch/x86/kernel/apic/apic_common.c +++ b/arch/x86/kernel/apic/apic_common.c @@ -40,7 +40,7 @@ int default_check_phys_apicid_present(int phys_apicid) return physid_isset(phys_apicid, phys_cpu_present_map); } -int default_apic_id_valid(int apicid) +int default_apic_id_valid(u32 apicid) { return (apicid < 255); } diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c index 134e04506ab4..78778b54f904 100644 --- a/arch/x86/kernel/apic/apic_numachip.c +++ b/arch/x86/kernel/apic/apic_numachip.c @@ -56,7 +56,7 @@ static u32 numachip2_set_apic_id(unsigned int id) return id << 24; } -static int numachip_apic_id_valid(int apicid) +static int numachip_apic_id_valid(u32 apicid) { /* Trust what bootloader passes in MADT */ return 1; diff --git a/arch/x86/kernel/apic/x2apic.h b/arch/x86/kernel/apic/x2apic.h index b107de381cb5..a49b3604027f 100644 --- a/arch/x86/kernel/apic/x2apic.h +++ b/arch/x86/kernel/apic/x2apic.h @@ -1,6 +1,6 @@ /* Common bits for X2APIC cluster/physical modes. */ -int x2apic_apic_id_valid(int apicid); +int x2apic_apic_id_valid(u32 apicid); int x2apic_apic_id_registered(void); void __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest); unsigned int x2apic_get_apic_id(unsigned long id); diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index e2829bf40e4a..b5cf9e7b3830 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -101,7 +101,7 @@ static int x2apic_phys_probe(void) } /* Common x2apic functions, also used by x2apic_cluster */ -int x2apic_apic_id_valid(int apicid) +int x2apic_apic_id_valid(u32 apicid) { return 1; } diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index f11910b44638..efaf2d4f9c3c 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -557,7 +557,7 @@ static void uv_send_IPI_all(int vector) uv_send_IPI_mask(cpu_online_mask, vector); } -static int uv_apic_id_valid(int apicid) +static int uv_apic_id_valid(u32 apicid) { return 1; } diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 4702fbd98f92..8a5b185735e1 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -848,18 +848,6 @@ void get_cpu_cap(struct cpuinfo_x86 *c) c->x86_power = edx; } - if (c->extended_cpuid_level >= 0x80000008) { - cpuid(0x80000008, &eax, &ebx, &ecx, &edx); - - c->x86_virt_bits = (eax >> 8) & 0xff; - c->x86_phys_bits = eax & 0xff; - c->x86_capability[CPUID_8000_0008_EBX] = ebx; - } -#ifdef CONFIG_X86_32 - else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) - c->x86_phys_bits = 36; -#endif - if (c->extended_cpuid_level >= 0x8000000a) c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); @@ -874,6 +862,23 @@ void get_cpu_cap(struct cpuinfo_x86 *c) apply_forced_caps(c); } +static void get_cpu_address_sizes(struct cpuinfo_x86 *c) +{ + u32 eax, ebx, ecx, edx; + + if (c->extended_cpuid_level >= 0x80000008) { + cpuid(0x80000008, &eax, &ebx, &ecx, &edx); + + c->x86_virt_bits = (eax >> 8) & 0xff; + c->x86_phys_bits = eax & 0xff; + c->x86_capability[CPUID_8000_0008_EBX] = ebx; + } +#ifdef CONFIG_X86_32 + else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) + c->x86_phys_bits = 36; +#endif +} + static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) { #ifdef CONFIG_X86_32 @@ -965,6 +970,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) cpu_detect(c); get_cpu_vendor(c); get_cpu_cap(c); + get_cpu_address_sizes(c); setup_force_cpu_cap(X86_FEATURE_CPUID); if (this_cpu->c_early_init) @@ -1097,6 +1103,8 @@ static void generic_identify(struct cpuinfo_x86 *c) get_cpu_cap(c); + get_cpu_address_sizes(c); + if (c->cpuid_level >= 0x00000001) { c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c index 904b0a3c4e53..2c0bd38a44ab 100644 --- a/arch/x86/kernel/cpu/cpuid-deps.c +++ b/arch/x86/kernel/cpu/cpuid-deps.c @@ -19,7 +19,7 @@ struct cpuid_dep { * called from cpu hotplug. It shouldn't do anything in this case, * but it's difficult to tell that to the init reference checker. */ -const static struct cpuid_dep cpuid_deps[] = { +static const struct cpuid_dep cpuid_deps[] = { { X86_FEATURE_XSAVEOPT, X86_FEATURE_XSAVE }, { X86_FEATURE_XSAVEC, X86_FEATURE_XSAVE }, { X86_FEATURE_XSAVES, X86_FEATURE_XSAVE }, diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index b9693b80fc21..60d1897041da 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -835,6 +835,9 @@ static const struct _tlb_table intel_tlb_table[] = { { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" }, { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" }, { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" }, + { 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" }, + { 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" }, + { 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" }, { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" }, { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" }, { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" }, diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 10c4fc2c91f8..77e201301528 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -564,14 +564,12 @@ static int __reload_late(void *info) apply_microcode_local(&err); spin_unlock(&update_lock); + /* siblings return UCODE_OK because their engine got updated already */ if (err > UCODE_NFOUND) { pr_warn("Error reloading microcode on CPU %d\n", cpu); - return -1; - /* siblings return UCODE_OK because their engine got updated already */ + ret = -1; } else if (err == UCODE_UPDATED || err == UCODE_OK) { ret = 1; - } else { - return ret; } /* diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 32b8e5724f96..1c2cfa0644aa 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -485,7 +485,6 @@ static void show_saved_mc(void) */ static void save_mc_for_early(u8 *mc, unsigned int size) { -#ifdef CONFIG_HOTPLUG_CPU /* Synchronization during CPU hotplug. */ static DEFINE_MUTEX(x86_cpu_microcode_mutex); @@ -495,7 +494,6 @@ static void save_mc_for_early(u8 *mc, unsigned int size) show_saved_mc(); mutex_unlock(&x86_cpu_microcode_mutex); -#endif } static bool load_builtin_intel_microcode(struct cpio_data *cp) diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index 1f6680427ff0..f631a3f15587 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -38,37 +38,6 @@ #include <asm/virtext.h> #include <asm/intel_pt.h> -/* Alignment required for elf header segment */ -#define ELF_CORE_HEADER_ALIGN 4096 - -/* This primarily represents number of split ranges due to exclusion */ -#define CRASH_MAX_RANGES 16 - -struct crash_mem_range { - u64 start, end; -}; - -struct crash_mem { - unsigned int nr_ranges; - struct crash_mem_range ranges[CRASH_MAX_RANGES]; -}; - -/* Misc data about ram ranges needed to prepare elf headers */ -struct crash_elf_data { - struct kimage *image; - /* - * Total number of ram ranges we have after various adjustments for - * crash reserved region, etc. - */ - unsigned int max_nr_ranges; - - /* Pointer to elf header */ - void *ehdr; - /* Pointer to next phdr */ - void *bufp; - struct crash_mem mem; -}; - /* Used while preparing memory map entries for second kernel */ struct crash_memmap_data { struct boot_params *params; @@ -218,124 +187,49 @@ static int get_nr_ram_ranges_callback(struct resource *res, void *arg) return 0; } - /* Gather all the required information to prepare elf headers for ram regions */ -static void fill_up_crash_elf_data(struct crash_elf_data *ced, - struct kimage *image) +static struct crash_mem *fill_up_crash_elf_data(void) { unsigned int nr_ranges = 0; - - ced->image = image; + struct crash_mem *cmem; walk_system_ram_res(0, -1, &nr_ranges, get_nr_ram_ranges_callback); + if (!nr_ranges) + return NULL; - ced->max_nr_ranges = nr_ranges; - - /* Exclusion of crash region could split memory ranges */ - ced->max_nr_ranges++; - - /* If crashk_low_res is not 0, another range split possible */ - if (crashk_low_res.end) - ced->max_nr_ranges++; -} - -static int exclude_mem_range(struct crash_mem *mem, - unsigned long long mstart, unsigned long long mend) -{ - int i, j; - unsigned long long start, end; - struct crash_mem_range temp_range = {0, 0}; - - for (i = 0; i < mem->nr_ranges; i++) { - start = mem->ranges[i].start; - end = mem->ranges[i].end; - - if (mstart > end || mend < start) - continue; - - /* Truncate any area outside of range */ - if (mstart < start) - mstart = start; - if (mend > end) - mend = end; - - /* Found completely overlapping range */ - if (mstart == start && mend == end) { - mem->ranges[i].start = 0; - mem->ranges[i].end = 0; - if (i < mem->nr_ranges - 1) { - /* Shift rest of the ranges to left */ - for (j = i; j < mem->nr_ranges - 1; j++) { - mem->ranges[j].start = - mem->ranges[j+1].start; - mem->ranges[j].end = - mem->ranges[j+1].end; - } - } - mem->nr_ranges--; - return 0; - } - - if (mstart > start && mend < end) { - /* Split original range */ - mem->ranges[i].end = mstart - 1; - temp_range.start = mend + 1; - temp_range.end = end; - } else if (mstart != start) - mem->ranges[i].end = mstart - 1; - else - mem->ranges[i].start = mend + 1; - break; - } + /* + * Exclusion of crash region and/or crashk_low_res may cause + * another range split. So add extra two slots here. + */ + nr_ranges += 2; + cmem = vzalloc(sizeof(struct crash_mem) + + sizeof(struct crash_mem_range) * nr_ranges); + if (!cmem) + return NULL; - /* If a split happend, add the split to array */ - if (!temp_range.end) - return 0; + cmem->max_nr_ranges = nr_ranges; + cmem->nr_ranges = 0; - /* Split happened */ - if (i == CRASH_MAX_RANGES - 1) { - pr_err("Too many crash ranges after split\n"); - return -ENOMEM; - } - - /* Location where new range should go */ - j = i + 1; - if (j < mem->nr_ranges) { - /* Move over all ranges one slot towards the end */ - for (i = mem->nr_ranges - 1; i >= j; i--) - mem->ranges[i + 1] = mem->ranges[i]; - } - - mem->ranges[j].start = temp_range.start; - mem->ranges[j].end = temp_range.end; - mem->nr_ranges++; - return 0; + return cmem; } /* * Look for any unwanted ranges between mstart, mend and remove them. This - * might lead to split and split ranges are put in ced->mem.ranges[] array + * might lead to split and split ranges are put in cmem->ranges[] array */ -static int elf_header_exclude_ranges(struct crash_elf_data *ced, - unsigned long long mstart, unsigned long long mend) +static int elf_header_exclude_ranges(struct crash_mem *cmem) { - struct crash_mem *cmem = &ced->mem; int ret = 0; - memset(cmem->ranges, 0, sizeof(cmem->ranges)); - - cmem->ranges[0].start = mstart; - cmem->ranges[0].end = mend; - cmem->nr_ranges = 1; - /* Exclude crashkernel region */ - ret = exclude_mem_range(cmem, crashk_res.start, crashk_res.end); + ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end); if (ret) return ret; if (crashk_low_res.end) { - ret = exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end); + ret = crash_exclude_mem_range(cmem, crashk_low_res.start, + crashk_low_res.end); if (ret) return ret; } @@ -345,144 +239,12 @@ static int elf_header_exclude_ranges(struct crash_elf_data *ced, static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg) { - struct crash_elf_data *ced = arg; - Elf64_Ehdr *ehdr; - Elf64_Phdr *phdr; - unsigned long mstart, mend; - struct kimage *image = ced->image; - struct crash_mem *cmem; - int ret, i; - - ehdr = ced->ehdr; - - /* Exclude unwanted mem ranges */ - ret = elf_header_exclude_ranges(ced, res->start, res->end); - if (ret) - return ret; - - /* Go through all the ranges in ced->mem.ranges[] and prepare phdr */ - cmem = &ced->mem; - - for (i = 0; i < cmem->nr_ranges; i++) { - mstart = cmem->ranges[i].start; - mend = cmem->ranges[i].end; - - phdr = ced->bufp; - ced->bufp += sizeof(Elf64_Phdr); - - phdr->p_type = PT_LOAD; - phdr->p_flags = PF_R|PF_W|PF_X; - phdr->p_offset = mstart; - - /* - * If a range matches backup region, adjust offset to backup - * segment. - */ - if (mstart == image->arch.backup_src_start && - (mend - mstart + 1) == image->arch.backup_src_sz) - phdr->p_offset = image->arch.backup_load_addr; - - phdr->p_paddr = mstart; - phdr->p_vaddr = (unsigned long long) __va(mstart); - phdr->p_filesz = phdr->p_memsz = mend - mstart + 1; - phdr->p_align = 0; - ehdr->e_phnum++; - pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n", - phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz, - ehdr->e_phnum, phdr->p_offset); - } - - return ret; -} - -static int prepare_elf64_headers(struct crash_elf_data *ced, - void **addr, unsigned long *sz) -{ - Elf64_Ehdr *ehdr; - Elf64_Phdr *phdr; - unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz; - unsigned char *buf, *bufp; - unsigned int cpu; - unsigned long long notes_addr; - int ret; + struct crash_mem *cmem = arg; - /* extra phdr for vmcoreinfo elf note */ - nr_phdr = nr_cpus + 1; - nr_phdr += ced->max_nr_ranges; - - /* - * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping - * area on x86_64 (ffffffff80000000 - ffffffffa0000000). - * I think this is required by tools like gdb. So same physical - * memory will be mapped in two elf headers. One will contain kernel - * text virtual addresses and other will have __va(physical) addresses. - */ + cmem->ranges[cmem->nr_ranges].start = res->start; + cmem->ranges[cmem->nr_ranges].end = res->end; + cmem->nr_ranges++; - nr_phdr++; - elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr); - elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN); - - buf = vzalloc(elf_sz); - if (!buf) - return -ENOMEM; - - bufp = buf; - ehdr = (Elf64_Ehdr *)bufp; - bufp += sizeof(Elf64_Ehdr); - memcpy(ehdr->e_ident, ELFMAG, SELFMAG); - ehdr->e_ident[EI_CLASS] = ELFCLASS64; - ehdr->e_ident[EI_DATA] = ELFDATA2LSB; - ehdr->e_ident[EI_VERSION] = EV_CURRENT; - ehdr->e_ident[EI_OSABI] = ELF_OSABI; - memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD); - ehdr->e_type = ET_CORE; - ehdr->e_machine = ELF_ARCH; - ehdr->e_version = EV_CURRENT; - ehdr->e_phoff = sizeof(Elf64_Ehdr); - ehdr->e_ehsize = sizeof(Elf64_Ehdr); - ehdr->e_phentsize = sizeof(Elf64_Phdr); - - /* Prepare one phdr of type PT_NOTE for each present cpu */ - for_each_present_cpu(cpu) { - phdr = (Elf64_Phdr *)bufp; - bufp += sizeof(Elf64_Phdr); - phdr->p_type = PT_NOTE; - notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu)); - phdr->p_offset = phdr->p_paddr = notes_addr; - phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t); - (ehdr->e_phnum)++; - } - - /* Prepare one PT_NOTE header for vmcoreinfo */ - phdr = (Elf64_Phdr *)bufp; - bufp += sizeof(Elf64_Phdr); - phdr->p_type = PT_NOTE; - phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note(); - phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE; - (ehdr->e_phnum)++; - -#ifdef CONFIG_X86_64 - /* Prepare PT_LOAD type program header for kernel text region */ - phdr = (Elf64_Phdr *)bufp; - bufp += sizeof(Elf64_Phdr); - phdr->p_type = PT_LOAD; - phdr->p_flags = PF_R|PF_W|PF_X; - phdr->p_vaddr = (Elf64_Addr)_text; - phdr->p_filesz = phdr->p_memsz = _end - _text; - phdr->p_offset = phdr->p_paddr = __pa_symbol(_text); - (ehdr->e_phnum)++; -#endif - - /* Prepare PT_LOAD headers for system ram chunks. */ - ced->ehdr = ehdr; - ced->bufp = bufp; - ret = walk_system_ram_res(0, -1, ced, - prepare_elf64_ram_headers_callback); - if (ret < 0) - return ret; - - *addr = buf; - *sz = elf_sz; return 0; } @@ -490,18 +252,46 @@ static int prepare_elf64_headers(struct crash_elf_data *ced, static int prepare_elf_headers(struct kimage *image, void **addr, unsigned long *sz) { - struct crash_elf_data *ced; - int ret; + struct crash_mem *cmem; + Elf64_Ehdr *ehdr; + Elf64_Phdr *phdr; + int ret, i; - ced = kzalloc(sizeof(*ced), GFP_KERNEL); - if (!ced) + cmem = fill_up_crash_elf_data(); + if (!cmem) return -ENOMEM; - fill_up_crash_elf_data(ced, image); + ret = walk_system_ram_res(0, -1, cmem, + prepare_elf64_ram_headers_callback); + if (ret) + goto out; + + /* Exclude unwanted mem ranges */ + ret = elf_header_exclude_ranges(cmem); + if (ret) + goto out; /* By default prepare 64bit headers */ - ret = prepare_elf64_headers(ced, addr, sz); - kfree(ced); + ret = crash_prepare_elf64_headers(cmem, + IS_ENABLED(CONFIG_X86_64), addr, sz); + if (ret) + goto out; + + /* + * If a range matches backup region, adjust offset to backup + * segment. + */ + ehdr = (Elf64_Ehdr *)*addr; + phdr = (Elf64_Phdr *)(ehdr + 1); + for (i = 0; i < ehdr->e_phnum; phdr++, i++) + if (phdr->p_type == PT_LOAD && + phdr->p_paddr == image->arch.backup_src_start && + phdr->p_memsz == image->arch.backup_src_sz) { + phdr->p_offset = image->arch.backup_load_addr; + break; + } +out: + vfree(cmem); return ret; } @@ -547,14 +337,14 @@ static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem, /* Exclude Backup region */ start = image->arch.backup_load_addr; end = start + image->arch.backup_src_sz - 1; - ret = exclude_mem_range(cmem, start, end); + ret = crash_exclude_mem_range(cmem, start, end); if (ret) return ret; /* Exclude elf header region */ start = image->arch.elf_load_addr; end = start + image->arch.elf_headers_sz - 1; - return exclude_mem_range(cmem, start, end); + return crash_exclude_mem_range(cmem, start, end); } /* Prepare memory map for crash dump kernel */ diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c index e5ec3cafa72e..aebd0d5bc086 100644 --- a/arch/x86/kernel/espfix_64.c +++ b/arch/x86/kernel/espfix_64.c @@ -195,6 +195,10 @@ void init_espfix_ap(int cpu) pte_p = pte_offset_kernel(&pmd, addr); stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0)); + /* + * __PAGE_KERNEL_* includes _PAGE_GLOBAL, which we want since + * this is mapped to userspace. + */ pte = __pte(__pa(stack_page) | ((__PAGE_KERNEL_RO | _PAGE_ENC) & ptemask)); for (n = 0; n < ESPFIX_PTE_CLONES; n++) set_pte(&pte_p[n*PTE_STRIDE], pte); diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 0c855deee165..0c408f8c4ed4 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -195,6 +195,8 @@ unsigned long __head __startup_64(unsigned long physaddr, pud[i + 1] = (pudval_t)pmd + pgtable_flags; pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL; + /* Filter out unsupported __PAGE_KERNEL_* bits: */ + pmd_entry &= __supported_pte_mask; pmd_entry += sme_get_me_mask(); pmd_entry += physaddr; diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 48385c1074a5..8344dd2f310a 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -399,8 +399,13 @@ NEXT_PAGE(level3_ident_pgt) .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC .fill 511, 8, 0 NEXT_PAGE(level2_ident_pgt) - /* Since I easily can, map the first 1G. + /* + * Since I easily can, map the first 1G. * Don't set NX because code runs from these pages. + * + * Note: This sets _PAGE_GLOBAL despite whether + * the CPU supports it or it is enabled. But, + * the CPU should ignore the bit. */ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) #else @@ -431,6 +436,10 @@ NEXT_PAGE(level2_kernel_pgt) * (NOTE: at +512MB starts the module area, see MODULES_VADDR. * If you want to increase this then increase MODULES_VADDR * too.) + * + * This table is eventually used by the kernel during normal + * runtime. Care must be taken to clear out undesired bits + * later, like _PAGE_RW or _PAGE_GLOBAL in some cases. */ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE) diff --git a/arch/x86/kernel/jailhouse.c b/arch/x86/kernel/jailhouse.c index fa183a131edc..a15fe0e92cf9 100644 --- a/arch/x86/kernel/jailhouse.c +++ b/arch/x86/kernel/jailhouse.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL2.0 +// SPDX-License-Identifier: GPL-2.0 /* * Jailhouse paravirt_ops implementation * diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c index fb095ba0c02f..7326078eaa7a 100644 --- a/arch/x86/kernel/kexec-bzimage64.c +++ b/arch/x86/kernel/kexec-bzimage64.c @@ -334,7 +334,6 @@ static void *bzImage64_load(struct kimage *image, char *kernel, unsigned long setup_header_size, params_cmdline_sz; struct boot_params *params; unsigned long bootparam_load_addr, kernel_load_addr, initrd_load_addr; - unsigned long purgatory_load_addr; struct bzimage64_data *ldata; struct kexec_entry64_regs regs64; void *stack; @@ -342,6 +341,8 @@ static void *bzImage64_load(struct kimage *image, char *kernel, unsigned int efi_map_offset, efi_map_sz, efi_setup_data_offset; struct kexec_buf kbuf = { .image = image, .buf_max = ULONG_MAX, .top_down = true }; + struct kexec_buf pbuf = { .image = image, .buf_min = MIN_PURGATORY_ADDR, + .buf_max = ULONG_MAX, .top_down = true }; header = (struct setup_header *)(kernel + setup_hdr_offset); setup_sects = header->setup_sects; @@ -379,14 +380,13 @@ static void *bzImage64_load(struct kimage *image, char *kernel, * Load purgatory. For 64bit entry point, purgatory code can be * anywhere. */ - ret = kexec_load_purgatory(image, MIN_PURGATORY_ADDR, ULONG_MAX, 1, - &purgatory_load_addr); + ret = kexec_load_purgatory(image, &pbuf); if (ret) { pr_err("Loading purgatory failed\n"); return ERR_PTR(ret); } - pr_debug("Loaded purgatory at 0x%lx\n", purgatory_load_addr); + pr_debug("Loaded purgatory at 0x%lx\n", pbuf.mem); /* @@ -398,11 +398,10 @@ static void *bzImage64_load(struct kimage *image, char *kernel, * little bit simple */ efi_map_sz = efi_get_runtime_map_size(); - efi_map_sz = ALIGN(efi_map_sz, 16); params_cmdline_sz = sizeof(struct boot_params) + cmdline_len + MAX_ELFCOREHDR_STR_LEN; params_cmdline_sz = ALIGN(params_cmdline_sz, 16); - kbuf.bufsz = params_cmdline_sz + efi_map_sz + + kbuf.bufsz = params_cmdline_sz + ALIGN(efi_map_sz, 16) + sizeof(struct setup_data) + sizeof(struct efi_setup_data); @@ -410,7 +409,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel, if (!params) return ERR_PTR(-ENOMEM); efi_map_offset = params_cmdline_sz; - efi_setup_data_offset = efi_map_offset + efi_map_sz; + efi_setup_data_offset = efi_map_offset + ALIGN(efi_map_sz, 16); /* Copy setup header onto bootparams. Documentation/x86/boot.txt */ setup_header_size = 0x0202 + kernel[0x0201] - setup_hdr_offset; @@ -538,7 +537,7 @@ static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len) } #endif -struct kexec_file_ops kexec_bzImage64_ops = { +const struct kexec_file_ops kexec_bzImage64_ops = { .probe = bzImage64_probe, .load = bzImage64_load, .cleanup = bzImage64_cleanup, diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 26d713ecad34..c9b14020f4dd 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -145,6 +145,7 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) unsigned long offset = i << PAGE_SHIFT; const void *src = (char *)ldt->entries + offset; unsigned long pfn; + pgprot_t pte_prot; pte_t pte, *ptep; va = (unsigned long)ldt_slot_va(slot) + offset; @@ -163,7 +164,10 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) * target via some kernel interface which misses a * permission check. */ - pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL)); + pte_prot = __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL); + /* Filter out unsuppored __PAGE_KERNEL* bits: */ + pgprot_val(pte_prot) &= __supported_pte_mask; + pte = pfn_pte(pfn, pte_prot); set_pte_at(mm, va, ptep, pte); pte_unmap_unlock(ptep, ptl); } diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 93bd4fb603d1..a5e55d832d0a 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -30,8 +30,9 @@ #include <asm/set_memory.h> #ifdef CONFIG_KEXEC_FILE -static struct kexec_file_ops *kexec_file_loaders[] = { +const struct kexec_file_ops * const kexec_file_loaders[] = { &kexec_bzImage64_ops, + NULL }; #endif @@ -364,27 +365,6 @@ void arch_crash_save_vmcoreinfo(void) /* arch-dependent functionality related to kexec file-based syscall */ #ifdef CONFIG_KEXEC_FILE -int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, - unsigned long buf_len) -{ - int i, ret = -ENOEXEC; - struct kexec_file_ops *fops; - - for (i = 0; i < ARRAY_SIZE(kexec_file_loaders); i++) { - fops = kexec_file_loaders[i]; - if (!fops || !fops->probe) - continue; - - ret = fops->probe(buf, buf_len); - if (!ret) { - image->fops = fops; - return ret; - } - } - - return ret; -} - void *arch_kexec_kernel_image_load(struct kimage *image) { vfree(image->arch.elf_headers); @@ -399,88 +379,53 @@ void *arch_kexec_kernel_image_load(struct kimage *image) image->cmdline_buf_len); } -int arch_kimage_file_post_load_cleanup(struct kimage *image) -{ - if (!image->fops || !image->fops->cleanup) - return 0; - - return image->fops->cleanup(image->image_loader_data); -} - -#ifdef CONFIG_KEXEC_VERIFY_SIG -int arch_kexec_kernel_verify_sig(struct kimage *image, void *kernel, - unsigned long kernel_len) -{ - if (!image->fops || !image->fops->verify_sig) { - pr_debug("kernel loader does not support signature verification."); - return -EKEYREJECTED; - } - - return image->fops->verify_sig(kernel, kernel_len); -} -#endif - /* * Apply purgatory relocations. * - * ehdr: Pointer to elf headers - * sechdrs: Pointer to section headers. - * relsec: section index of SHT_RELA section. + * @pi: Purgatory to be relocated. + * @section: Section relocations applying to. + * @relsec: Section containing RELAs. + * @symtabsec: Corresponding symtab. * * TODO: Some of the code belongs to generic code. Move that in kexec.c. */ -int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr, - Elf64_Shdr *sechdrs, unsigned int relsec) +int arch_kexec_apply_relocations_add(struct purgatory_info *pi, + Elf_Shdr *section, const Elf_Shdr *relsec, + const Elf_Shdr *symtabsec) { unsigned int i; Elf64_Rela *rel; Elf64_Sym *sym; void *location; - Elf64_Shdr *section, *symtabsec; unsigned long address, sec_base, value; const char *strtab, *name, *shstrtab; + const Elf_Shdr *sechdrs; - /* - * ->sh_offset has been modified to keep the pointer to section - * contents in memory - */ - rel = (void *)sechdrs[relsec].sh_offset; - - /* Section to which relocations apply */ - section = &sechdrs[sechdrs[relsec].sh_info]; - - pr_debug("Applying relocate section %u to %u\n", relsec, - sechdrs[relsec].sh_info); - - /* Associated symbol table */ - symtabsec = &sechdrs[sechdrs[relsec].sh_link]; - - /* String table */ - if (symtabsec->sh_link >= ehdr->e_shnum) { - /* Invalid strtab section number */ - pr_err("Invalid string table section index %d\n", - symtabsec->sh_link); - return -ENOEXEC; - } + /* String & section header string table */ + sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff; + strtab = (char *)pi->ehdr + sechdrs[symtabsec->sh_link].sh_offset; + shstrtab = (char *)pi->ehdr + sechdrs[pi->ehdr->e_shstrndx].sh_offset; - strtab = (char *)sechdrs[symtabsec->sh_link].sh_offset; + rel = (void *)pi->ehdr + relsec->sh_offset; - /* section header string table */ - shstrtab = (char *)sechdrs[ehdr->e_shstrndx].sh_offset; + pr_debug("Applying relocate section %s to %u\n", + shstrtab + relsec->sh_name, relsec->sh_info); - for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + for (i = 0; i < relsec->sh_size / sizeof(*rel); i++) { /* * rel[i].r_offset contains byte offset from beginning * of section to the storage unit affected. * - * This is location to update (->sh_offset). This is temporary - * buffer where section is currently loaded. This will finally - * be loaded to a different address later, pointed to by + * This is location to update. This is temporary buffer + * where section is currently loaded. This will finally be + * loaded to a different address later, pointed to by * ->sh_addr. kexec takes care of moving it * (kexec_load_segment()). */ - location = (void *)(section->sh_offset + rel[i].r_offset); + location = pi->purgatory_buf; + location += section->sh_offset; + location += rel[i].r_offset; /* Final address of the location */ address = section->sh_addr + rel[i].r_offset; @@ -491,8 +436,8 @@ int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr, * to apply. ELF64_R_SYM() and ELF64_R_TYPE() macros get * these respectively. */ - sym = (Elf64_Sym *)symtabsec->sh_offset + - ELF64_R_SYM(rel[i].r_info); + sym = (void *)pi->ehdr + symtabsec->sh_offset; + sym += ELF64_R_SYM(rel[i].r_info); if (sym->st_name) name = strtab + sym->st_name; @@ -515,12 +460,12 @@ int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr, if (sym->st_shndx == SHN_ABS) sec_base = 0; - else if (sym->st_shndx >= ehdr->e_shnum) { + else if (sym->st_shndx >= pi->ehdr->e_shnum) { pr_err("Invalid section %d for symbol %s\n", sym->st_shndx, name); return -ENOEXEC; } else - sec_base = sechdrs[sym->st_shndx].sh_addr; + sec_base = pi->sechdrs[sym->st_shndx].sh_addr; value = sym->st_value; value += sec_base; diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c deleted file mode 100644 index ac7ea3a8242f..000000000000 --- a/arch/x86/kernel/pci-nommu.c +++ /dev/null @@ -1,90 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Fallback functions when the main IOMMU code is not compiled in. This - code is roughly equivalent to i386. */ -#include <linux/dma-direct.h> -#include <linux/scatterlist.h> -#include <linux/string.h> -#include <linux/gfp.h> -#include <linux/pci.h> -#include <linux/mm.h> - -#include <asm/processor.h> -#include <asm/iommu.h> -#include <asm/dma.h> - -#define NOMMU_MAPPING_ERROR 0 - -static int -check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) -{ - if (hwdev && !dma_capable(hwdev, bus, size)) { - if (*hwdev->dma_mask >= DMA_BIT_MASK(32)) - printk(KERN_ERR - "nommu_%s: overflow %Lx+%zu of device mask %Lx\n", - name, (long long)bus, size, - (long long)*hwdev->dma_mask); - return 0; - } - return 1; -} - -static dma_addr_t nommu_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - unsigned long attrs) -{ - dma_addr_t bus = phys_to_dma(dev, page_to_phys(page)) + offset; - WARN_ON(size == 0); - if (!check_addr("map_single", dev, bus, size)) - return NOMMU_MAPPING_ERROR; - return bus; -} - -/* Map a set of buffers described by scatterlist in streaming - * mode for DMA. This is the scatter-gather version of the - * above pci_map_single interface. Here the scatter gather list - * elements are each tagged with the appropriate dma address - * and length. They are obtained via sg_dma_{address,length}(SG). - * - * NOTE: An implementation may be able to use a smaller number of - * DMA address/length pairs than there are SG table elements. - * (for example via virtual mapping capabilities) - * The routine returns the number of addr/length pairs actually - * used, at most nents. - * - * Device ownership issues as mentioned above for pci_map_single are - * the same here. - */ -static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, - unsigned long attrs) -{ - struct scatterlist *s; - int i; - - WARN_ON(nents == 0 || sg[0].length == 0); - - for_each_sg(sg, s, nents, i) { - BUG_ON(!sg_page(s)); - s->dma_address = sg_phys(s); - if (!check_addr("map_sg", hwdev, s->dma_address, s->length)) - return 0; - s->dma_length = s->length; - } - return nents; -} - -static int nommu_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == NOMMU_MAPPING_ERROR; -} - -const struct dma_map_ops nommu_dma_ops = { - .alloc = dma_generic_alloc_coherent, - .free = dma_generic_free_coherent, - .map_sg = nommu_map_sg, - .map_page = nommu_map_page, - .is_phys = 1, - .mapping_error = nommu_mapping_error, - .dma_supported = x86_dma_supported, -}; diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 6285697b6e56..5c623dfe39d1 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -50,6 +50,7 @@ #include <linux/init_ohci1394_dma.h> #include <linux/kvm_para.h> #include <linux/dma-contiguous.h> +#include <xen/xen.h> #include <linux/errno.h> #include <linux/kernel.h> @@ -534,6 +535,11 @@ static void __init reserve_crashkernel(void) high = true; } + if (xen_pv_domain()) { + pr_info("Ignoring crashkernel for a Xen PV domain\n"); + return; + } + /* 0 means: find the address automatically */ if (crash_base <= 0) { /* diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c index df92605d8724..14c057f29979 100644 --- a/arch/x86/kernel/signal_compat.c +++ b/arch/x86/kernel/signal_compat.c @@ -26,7 +26,7 @@ static inline void signal_compat_build_tests(void) * new fields are handled in copy_siginfo_to_user32()! */ BUILD_BUG_ON(NSIGILL != 11); - BUILD_BUG_ON(NSIGFPE != 14); + BUILD_BUG_ON(NSIGFPE != 15); BUILD_BUG_ON(NSIGSEGV != 7); BUILD_BUG_ON(NSIGBUS != 5); BUILD_BUG_ON(NSIGTRAP != 4); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index ff99e2b6fc54..0f1cbb042f49 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -77,6 +77,8 @@ #include <asm/i8259.h> #include <asm/misc.h> #include <asm/qspinlock.h> +#include <asm/intel-family.h> +#include <asm/cpu_device_id.h> /* Number of siblings per CPU package */ int smp_num_siblings = 1; @@ -390,15 +392,47 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) return false; } +/* + * Define snc_cpu[] for SNC (Sub-NUMA Cluster) CPUs. + * + * These are Intel CPUs that enumerate an LLC that is shared by + * multiple NUMA nodes. The LLC on these systems is shared for + * off-package data access but private to the NUMA node (half + * of the package) for on-package access. + * + * CPUID (the source of the information about the LLC) can only + * enumerate the cache as being shared *or* unshared, but not + * this particular configuration. The CPU in this case enumerates + * the cache to be shared across the entire package (spanning both + * NUMA nodes). + */ + +static const struct x86_cpu_id snc_cpu[] = { + { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_X }, + {} +}; + static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) { int cpu1 = c->cpu_index, cpu2 = o->cpu_index; - if (per_cpu(cpu_llc_id, cpu1) != BAD_APICID && - per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) - return topology_sane(c, o, "llc"); + /* Do not match if we do not have a valid APICID for cpu: */ + if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID) + return false; - return false; + /* Do not match if LLC id does not match: */ + if (per_cpu(cpu_llc_id, cpu1) != per_cpu(cpu_llc_id, cpu2)) + return false; + + /* + * Allow the SNC topology without warning. Return of false + * means 'c' does not share the LLC of 'o'. This will be + * reflected to userspace. + */ + if (!topology_same_node(c, o) && x86_match_cpu(snc_cpu)) + return false; + + return topology_sane(c, o, "llc"); } /* @@ -456,7 +490,8 @@ static struct sched_domain_topology_level x86_topology[] = { /* * Set if a package/die has multiple NUMA nodes inside. - * AMD Magny-Cours and Intel Cluster-on-Die have this. + * AMD Magny-Cours, Intel Cluster-on-Die, and Intel + * Sub-NUMA Clustering have this. */ static bool x86_has_numa_in_package; @@ -1536,6 +1571,8 @@ static inline void mwait_play_dead(void) void *mwait_ptr; int i; + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + return; if (!this_cpu_has(X86_FEATURE_MWAIT)) return; if (!this_cpu_has(X86_FEATURE_CLFLUSH)) diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index ef32297ff17e..91e6da48cbb6 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -317,7 +317,7 @@ static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2) hpet2 -= hpet1; tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); do_div(tmp, 1000000); - do_div(deltatsc, tmp); + deltatsc = div64_u64(deltatsc, tmp); return (unsigned long) deltatsc; } diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index b58787daf9f8..1fc05e428aba 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1423,12 +1423,23 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type) seg->base = 0; } +static u64 svm_read_l1_tsc_offset(struct kvm_vcpu *vcpu) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + if (is_guest_mode(vcpu)) + return svm->nested.hsave->control.tsc_offset; + + return vcpu->arch.tsc_offset; +} + static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) { struct vcpu_svm *svm = to_svm(vcpu); u64 g_tsc_offset = 0; if (is_guest_mode(vcpu)) { + /* Write L1's TSC offset. */ g_tsc_offset = svm->vmcb->control.tsc_offset - svm->nested.hsave->control.tsc_offset; svm->nested.hsave->control.tsc_offset = offset; @@ -3322,6 +3333,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) /* Restore the original control entries */ copy_vmcb_control_area(vmcb, hsave); + svm->vcpu.arch.tsc_offset = svm->vmcb->control.tsc_offset; kvm_clear_exception_queue(&svm->vcpu); kvm_clear_interrupt_queue(&svm->vcpu); @@ -3482,10 +3494,12 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, /* We don't want to see VMMCALLs from a nested guest */ clr_intercept(svm, INTERCEPT_VMMCALL); + svm->vcpu.arch.tsc_offset += nested_vmcb->control.tsc_offset; + svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset; + svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext; svm->vmcb->control.int_vector = nested_vmcb->control.int_vector; svm->vmcb->control.int_state = nested_vmcb->control.int_state; - svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset; svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; @@ -4035,12 +4049,6 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) struct vcpu_svm *svm = to_svm(vcpu); switch (msr_info->index) { - case MSR_IA32_TSC: { - msr_info->data = svm->vmcb->control.tsc_offset + - kvm_scale_tsc(vcpu, rdtsc()); - - break; - } case MSR_STAR: msr_info->data = svm->vmcb->save.star; break; @@ -4193,9 +4201,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) svm->vmcb->save.g_pat = data; mark_dirty(svm->vmcb, VMCB_NPT); break; - case MSR_IA32_TSC: - kvm_write_tsc(vcpu, msr); - break; case MSR_IA32_SPEC_CTRL: if (!msr->host_initiated && !guest_cpuid_has(vcpu, X86_FEATURE_IBRS)) @@ -5265,9 +5270,8 @@ static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq, } if (!ret && svm) { - trace_kvm_pi_irte_update(svm->vcpu.vcpu_id, - host_irq, e->gsi, - vcpu_info.vector, + trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id, + e->gsi, vcpu_info.vector, vcpu_info.pi_desc_addr, set); } @@ -7102,6 +7106,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .has_wbinvd_exit = svm_has_wbinvd_exit, + .read_l1_tsc_offset = svm_read_l1_tsc_offset, .write_tsc_offset = svm_write_tsc_offset, .set_tdp_cr3 = set_tdp_cr3, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index aafcc9881e88..c7668806163f 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2880,18 +2880,15 @@ static void setup_msrs(struct vcpu_vmx *vmx) vmx_update_msr_bitmap(&vmx->vcpu); } -/* - * reads and returns guest's timestamp counter "register" - * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset - * -- Intel TSC Scaling for Virtualization White Paper, sec 1.3 - */ -static u64 guest_read_tsc(struct kvm_vcpu *vcpu) +static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu) { - u64 host_tsc, tsc_offset; + struct vmcs12 *vmcs12 = get_vmcs12(vcpu); - host_tsc = rdtsc(); - tsc_offset = vmcs_read64(TSC_OFFSET); - return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset; + if (is_guest_mode(vcpu) && + (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)) + return vcpu->arch.tsc_offset - vmcs12->tsc_offset; + + return vcpu->arch.tsc_offset; } /* @@ -3524,9 +3521,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) #endif case MSR_EFER: return kvm_get_msr_common(vcpu, msr_info); - case MSR_IA32_TSC: - msr_info->data = guest_read_tsc(vcpu); - break; case MSR_IA32_SPEC_CTRL: if (!msr_info->host_initiated && !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) && @@ -3646,9 +3640,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; vmcs_write64(GUEST_BNDCFGS, data); break; - case MSR_IA32_TSC: - kvm_write_tsc(vcpu, msr_info); - break; case MSR_IA32_SPEC_CTRL: if (!msr_info->host_initiated && !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) && @@ -4553,12 +4544,6 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa) __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa); } -static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu) -{ - if (enable_ept) - vmx_flush_tlb(vcpu, true); -} - static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) { ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; @@ -9287,7 +9272,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) } else { sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; - vmx_flush_tlb_ept_only(vcpu); + vmx_flush_tlb(vcpu, true); } vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); @@ -9315,7 +9300,7 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa) !nested_cpu_has2(get_vmcs12(&vmx->vcpu), SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { vmcs_write64(APIC_ACCESS_ADDR, hpa); - vmx_flush_tlb_ept_only(vcpu); + vmx_flush_tlb(vcpu, true); } } @@ -10608,6 +10593,16 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, return true; } +static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu, + struct vmcs12 *vmcs12) +{ + if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && + !page_address_valid(vcpu, vmcs12->apic_access_addr)) + return -EINVAL; + else + return 0; +} + static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { @@ -11176,11 +11171,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); } - if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) - vmcs_write64(TSC_OFFSET, - vcpu->arch.tsc_offset + vmcs12->tsc_offset); - else - vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); + vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); + if (kvm_has_tsc_control) decache_tsc_multiplier(vmx); @@ -11222,7 +11214,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, } } else if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { - vmx_flush_tlb_ept_only(vcpu); + vmx_flush_tlb(vcpu, true); } /* @@ -11299,6 +11291,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; + if (nested_vmx_check_apic_access_controls(vcpu, vmcs12)) + return VMXERR_ENTRY_INVALID_CONTROL_FIELD; + if (nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12)) return VMXERR_ENTRY_INVALID_CONTROL_FIELD; @@ -11420,6 +11415,7 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) struct vmcs12 *vmcs12 = get_vmcs12(vcpu); u32 msr_entry_idx; u32 exit_qual; + int r; enter_guest_mode(vcpu); @@ -11429,26 +11425,21 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); vmx_segment_cache_clear(vmx); - if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual)) { - leave_guest_mode(vcpu); - vmx_switch_vmcs(vcpu, &vmx->vmcs01); - nested_vmx_entry_failure(vcpu, vmcs12, - EXIT_REASON_INVALID_STATE, exit_qual); - return 1; - } + if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) + vcpu->arch.tsc_offset += vmcs12->tsc_offset; + + r = EXIT_REASON_INVALID_STATE; + if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual)) + goto fail; nested_get_vmcs12_pages(vcpu, vmcs12); + r = EXIT_REASON_MSR_LOAD_FAIL; msr_entry_idx = nested_vmx_load_msr(vcpu, vmcs12->vm_entry_msr_load_addr, vmcs12->vm_entry_msr_load_count); - if (msr_entry_idx) { - leave_guest_mode(vcpu); - vmx_switch_vmcs(vcpu, &vmx->vmcs01); - nested_vmx_entry_failure(vcpu, vmcs12, - EXIT_REASON_MSR_LOAD_FAIL, msr_entry_idx); - return 1; - } + if (msr_entry_idx) + goto fail; /* * Note no nested_vmx_succeed or nested_vmx_fail here. At this point @@ -11457,6 +11448,14 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry) * the success flag) when L2 exits (see nested_vmx_vmexit()). */ return 0; + +fail: + if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) + vcpu->arch.tsc_offset -= vmcs12->tsc_offset; + leave_guest_mode(vcpu); + vmx_switch_vmcs(vcpu, &vmx->vmcs01); + nested_vmx_entry_failure(vcpu, vmcs12, r, exit_qual); + return 1; } /* @@ -12028,6 +12027,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, leave_guest_mode(vcpu); + if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) + vcpu->arch.tsc_offset -= vmcs12->tsc_offset; + if (likely(!vmx->fail)) { if (exit_reason == -1) sync_vmcs12(vcpu, vmcs12); @@ -12065,7 +12067,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, } else if (!nested_cpu_has_ept(vmcs12) && nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { - vmx_flush_tlb_ept_only(vcpu); + vmx_flush_tlb(vcpu, true); } /* This is needed for same reason as it was needed in prepare_vmcs02 */ @@ -12224,10 +12226,16 @@ static inline int u64_shl_div_u64(u64 a, unsigned int shift, static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc) { - struct vcpu_vmx *vmx = to_vmx(vcpu); - u64 tscl = rdtsc(); - u64 guest_tscl = kvm_read_l1_tsc(vcpu, tscl); - u64 delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl; + struct vcpu_vmx *vmx; + u64 tscl, guest_tscl, delta_tsc; + + if (kvm_mwait_in_guest(vcpu->kvm)) + return -EOPNOTSUPP; + + vmx = to_vmx(vcpu); + tscl = rdtsc(); + guest_tscl = kvm_read_l1_tsc(vcpu, tscl); + delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl; /* Convert to host delta tsc if tsc scaling is enabled */ if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio && @@ -12533,7 +12541,7 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq, vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu)); vcpu_info.vector = irq.vector; - trace_kvm_pi_irte_update(vcpu->vcpu_id, host_irq, e->gsi, + trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi, vcpu_info.vector, vcpu_info.pi_desc_addr, set); if (set) @@ -12712,6 +12720,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, + .read_l1_tsc_offset = vmx_read_l1_tsc_offset, .write_tsc_offset = vmx_write_tsc_offset, .set_tdp_cr3 = vmx_set_cr3, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b2ff74b12ec4..51ecd381793b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1490,7 +1490,7 @@ static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset) { - u64 curr_offset = vcpu->arch.tsc_offset; + u64 curr_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu); vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; } @@ -1532,7 +1532,9 @@ static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) { - return vcpu->arch.tsc_offset + kvm_scale_tsc(vcpu, host_tsc); + u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu); + + return tsc_offset + kvm_scale_tsc(vcpu, host_tsc); } EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); @@ -2362,6 +2364,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) return 1; vcpu->arch.smbase = data; break; + case MSR_IA32_TSC: + kvm_write_tsc(vcpu, msr_info); + break; case MSR_SMI_COUNT: if (!msr_info->host_initiated) return 1; @@ -2605,6 +2610,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_UCODE_REV: msr_info->data = vcpu->arch.microcode_version; break; + case MSR_IA32_TSC: + msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset; + break; case MSR_MTRRcap: case 0x200 ... 0x2ff: return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); @@ -2819,7 +2827,8 @@ out: static inline bool kvm_can_mwait_in_guest(void) { return boot_cpu_has(X86_FEATURE_MWAIT) && - !boot_cpu_has_bug(X86_BUG_MONITOR); + !boot_cpu_has_bug(X86_BUG_MONITOR) && + boot_cpu_has(X86_FEATURE_ARAT); } int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 7d35ce672989..c9492f764902 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -302,13 +302,6 @@ static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) __rem; \ }) -#define KVM_X86_DISABLE_EXITS_MWAIT (1 << 0) -#define KVM_X86_DISABLE_EXITS_HTL (1 << 1) -#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2) -#define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \ - KVM_X86_DISABLE_EXITS_HTL | \ - KVM_X86_DISABLE_EXITS_PAUSE) - static inline bool kvm_mwait_in_guest(struct kvm *kvm) { return kvm->arch.mwait_in_guest; diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c index 476d810639a8..b45f5aaefd74 100644 --- a/arch/x86/mm/cpu_entry_area.c +++ b/arch/x86/mm/cpu_entry_area.c @@ -27,8 +27,20 @@ EXPORT_SYMBOL(get_cpu_entry_area); void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags) { unsigned long va = (unsigned long) cea_vaddr; + pte_t pte = pfn_pte(pa >> PAGE_SHIFT, flags); - set_pte_vaddr(va, pfn_pte(pa >> PAGE_SHIFT, flags)); + /* + * The cpu_entry_area is shared between the user and kernel + * page tables. All of its ptes can safely be global. + * _PAGE_GLOBAL gets reused to help indicate PROT_NONE for + * non-present PTEs, so be careful not to set it in that + * case to avoid confusion. + */ + if (boot_cpu_has(X86_FEATURE_PGE) && + (pgprot_val(flags) & _PAGE_PRESENT)) + pte = pte_set_flags(pte, _PAGE_GLOBAL); + + set_pte_vaddr(va, pte); } static void __init diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index 62a7e9f65dec..cc7ff5957194 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -18,6 +18,7 @@ #include <linux/init.h> #include <linux/sched.h> #include <linux/seq_file.h> +#include <linux/highmem.h> #include <asm/pgtable.h> @@ -334,16 +335,16 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr, pgprotval_t eff_in, unsigned long P) { int i; - pte_t *start; + pte_t *pte; pgprotval_t prot, eff; - start = (pte_t *)pmd_page_vaddr(addr); for (i = 0; i < PTRS_PER_PTE; i++) { - prot = pte_flags(*start); - eff = effective_prot(eff_in, prot); st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT); + pte = pte_offset_map(&addr, st->current_address); + prot = pte_flags(*pte); + eff = effective_prot(eff_in, prot); note_page(m, st, __pgprot(prot), eff, 5); - start++; + pte_unmap(pte); } } #ifdef CONFIG_KASAN diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c index 9aa22be8331e..a2f0c7e20fb0 100644 --- a/arch/x86/mm/ident_map.c +++ b/arch/x86/mm/ident_map.c @@ -98,6 +98,9 @@ int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page, if (!info->kernpg_flag) info->kernpg_flag = _KERNPG_TABLE; + /* Filter out unsupported __PAGE_KERNEL_* bits: */ + info->kernpg_flag &= __default_kernel_pte_mask; + for (; addr < end; addr = next) { pgd_t *pgd = pgd_page + pgd_index(addr); p4d_t *p4d; diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 82f5252c723a..fec82b577c18 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -161,12 +161,6 @@ struct map_range { static int page_size_mask; -static void enable_global_pages(void) -{ - if (!static_cpu_has(X86_FEATURE_PTI)) - __supported_pte_mask |= _PAGE_GLOBAL; -} - static void __init probe_page_size_mask(void) { /* @@ -187,9 +181,15 @@ static void __init probe_page_size_mask(void) __supported_pte_mask &= ~_PAGE_GLOBAL; if (boot_cpu_has(X86_FEATURE_PGE)) { cr4_set_bits_and_update_boot(X86_CR4_PGE); - enable_global_pages(); + __supported_pte_mask |= _PAGE_GLOBAL; } + /* By the default is everything supported: */ + __default_kernel_pte_mask = __supported_pte_mask; + /* Except when with PTI where the kernel is mostly non-Global: */ + if (cpu_feature_enabled(X86_FEATURE_PTI)) + __default_kernel_pte_mask &= ~_PAGE_GLOBAL; + /* Enable 1 GB linear kernel mappings if available: */ if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) { printk(KERN_INFO "Using GB pages for direct mapping\n"); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 8008db2bddb3..c893c6a3d707 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -558,8 +558,14 @@ static void __init pagetable_init(void) permanent_kmaps_init(pgd_base); } -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL); +#define DEFAULT_PTE_MASK ~(_PAGE_NX | _PAGE_GLOBAL) +/* Bits supported by the hardware: */ +pteval_t __supported_pte_mask __read_mostly = DEFAULT_PTE_MASK; +/* Bits allowed in normal kernel mappings: */ +pteval_t __default_kernel_pte_mask __read_mostly = DEFAULT_PTE_MASK; EXPORT_SYMBOL_GPL(__supported_pte_mask); +/* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */ +EXPORT_SYMBOL(__default_kernel_pte_mask); /* user-defined highmem size */ static unsigned int highmem_pages = -1; diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 66de40e45f58..0a400606dea0 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -65,8 +65,13 @@ * around without checking the pgd every time. */ +/* Bits supported by the hardware: */ pteval_t __supported_pte_mask __read_mostly = ~0; +/* Bits allowed in normal kernel mappings: */ +pteval_t __default_kernel_pte_mask __read_mostly = ~0; EXPORT_SYMBOL_GPL(__supported_pte_mask); +/* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */ +EXPORT_SYMBOL(__default_kernel_pte_mask); int force_personality32; @@ -1286,6 +1291,12 @@ void mark_rodata_ro(void) (unsigned long) __va(__pa_symbol(_sdata))); debug_checkwx(); + + /* + * Do this after all of the manipulation of the + * kernel text page tables are complete. + */ + pti_clone_kernel_text(); } int kern_addr_valid(unsigned long addr) diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index ada98b39b8ad..b3294d36769d 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c @@ -44,6 +44,9 @@ int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot) return ret; *prot = __pgprot(__PAGE_KERNEL | cachemode2protval(pcm)); + /* Filter out unsupported __PAGE_KERNEL* bits: */ + pgprot_val(*prot) &= __default_kernel_pte_mask; + return 0; } EXPORT_SYMBOL_GPL(iomap_create_wc); @@ -88,6 +91,9 @@ iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) prot = __pgprot(__PAGE_KERNEL | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)); + /* Filter out unsupported __PAGE_KERNEL* bits: */ + pgprot_val(prot) &= __default_kernel_pte_mask; + return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot); } EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index e2db83bebc3b..c63a545ec199 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -816,6 +816,9 @@ void __init __early_set_fixmap(enum fixed_addresses idx, } pte = early_ioremap_pte(addr); + /* Sanitize 'prot' against any unsupported bits: */ + pgprot_val(flags) &= __default_kernel_pte_mask; + if (pgprot_val(flags)) set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); else diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index d8ff013ea9d0..980dbebd0ca7 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -269,6 +269,12 @@ void __init kasan_early_init(void) pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE; p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE; + /* Mask out unsupported __PAGE_KERNEL bits: */ + pte_val &= __default_kernel_pte_mask; + pmd_val &= __default_kernel_pte_mask; + pud_val &= __default_kernel_pte_mask; + p4d_val &= __default_kernel_pte_mask; + for (i = 0; i < PTRS_PER_PTE; i++) kasan_zero_pte[i] = __pte(pte_val); @@ -371,7 +377,13 @@ void __init kasan_init(void) */ memset(kasan_zero_page, 0, PAGE_SIZE); for (i = 0; i < PTRS_PER_PTE; i++) { - pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO | _PAGE_ENC); + pte_t pte; + pgprot_t prot; + + prot = __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC); + pgprot_val(prot) &= __default_kernel_pte_mask; + + pte = __pte(__pa(kasan_zero_page) | pgprot_val(prot)); set_pte(&kasan_zero_pte[i], pte); } /* Flush TLBs again to be sure that write protection applied. */ diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 85cf12219dea..3bded76e8d5c 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -93,6 +93,18 @@ void arch_report_meminfo(struct seq_file *m) static inline void split_page_count(int level) { } #endif +static inline int +within(unsigned long addr, unsigned long start, unsigned long end) +{ + return addr >= start && addr < end; +} + +static inline int +within_inclusive(unsigned long addr, unsigned long start, unsigned long end) +{ + return addr >= start && addr <= end; +} + #ifdef CONFIG_X86_64 static inline unsigned long highmap_start_pfn(void) @@ -106,20 +118,25 @@ static inline unsigned long highmap_end_pfn(void) return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT; } -#endif - -static inline int -within(unsigned long addr, unsigned long start, unsigned long end) +static bool __cpa_pfn_in_highmap(unsigned long pfn) { - return addr >= start && addr < end; + /* + * Kernel text has an alias mapping at a high address, known + * here as "highmap". + */ + return within_inclusive(pfn, highmap_start_pfn(), highmap_end_pfn()); } -static inline int -within_inclusive(unsigned long addr, unsigned long start, unsigned long end) +#else + +static bool __cpa_pfn_in_highmap(unsigned long pfn) { - return addr >= start && addr <= end; + /* There is no highmap on 32-bit */ + return false; } +#endif + /* * Flushing functions */ @@ -172,7 +189,7 @@ static void __cpa_flush_all(void *arg) static void cpa_flush_all(unsigned long cache) { - BUG_ON(irqs_disabled()); + BUG_ON(irqs_disabled() && !early_boot_irqs_disabled); on_each_cpu(__cpa_flush_all, (void *) cache, 1); } @@ -236,7 +253,7 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache, unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */ #endif - BUG_ON(irqs_disabled()); + BUG_ON(irqs_disabled() && !early_boot_irqs_disabled); on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1); @@ -298,9 +315,11 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, /* * The .rodata section needs to be read-only. Using the pfn - * catches all aliases. + * catches all aliases. This also includes __ro_after_init, + * so do not enforce until kernel_set_to_readonly is true. */ - if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT, + if (kernel_set_to_readonly && + within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT, __pa_symbol(__end_rodata) >> PAGE_SHIFT)) pgprot_val(forbidden) |= _PAGE_RW; @@ -512,6 +531,23 @@ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) #endif } +static pgprot_t pgprot_clear_protnone_bits(pgprot_t prot) +{ + /* + * _PAGE_GLOBAL means "global page" for present PTEs. + * But, it is also used to indicate _PAGE_PROTNONE + * for non-present PTEs. + * + * This ensures that a _PAGE_GLOBAL PTE going from + * present to non-present is not confused as + * _PAGE_PROTNONE. + */ + if (!(pgprot_val(prot) & _PAGE_PRESENT)) + pgprot_val(prot) &= ~_PAGE_GLOBAL; + + return prot; +} + static int try_preserve_large_page(pte_t *kpte, unsigned long address, struct cpa_data *cpa) @@ -566,6 +602,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, * up accordingly. */ old_pte = *kpte; + /* Clear PSE (aka _PAGE_PAT) and move PAT bit to correct position */ req_prot = pgprot_large_2_4k(old_prot); pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); @@ -577,19 +614,9 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, * different bit positions in the two formats. */ req_prot = pgprot_4k_2_large(req_prot); - - /* - * Set the PSE and GLOBAL flags only if the PRESENT flag is - * set otherwise pmd_present/pmd_huge will return true even on - * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL - * for the ancient hardware that doesn't support it. - */ + req_prot = pgprot_clear_protnone_bits(req_prot); if (pgprot_val(req_prot) & _PAGE_PRESENT) - pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL; - else - pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL); - - req_prot = canon_pgprot(req_prot); + pgprot_val(req_prot) |= _PAGE_PSE; /* * old_pfn points to the large page base pfn. So we need @@ -674,8 +701,12 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, switch (level) { case PG_LEVEL_2M: ref_prot = pmd_pgprot(*(pmd_t *)kpte); - /* clear PSE and promote PAT bit to correct position */ + /* + * Clear PSE (aka _PAGE_PAT) and move + * PAT bit to correct position. + */ ref_prot = pgprot_large_2_4k(ref_prot); + ref_pfn = pmd_pfn(*(pmd_t *)kpte); break; @@ -698,23 +729,14 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, return 1; } - /* - * Set the GLOBAL flags only if the PRESENT flag is set - * otherwise pmd/pte_present will return true even on a non - * present pmd/pte. The canon_pgprot will clear _PAGE_GLOBAL - * for the ancient hardware that doesn't support it. - */ - if (pgprot_val(ref_prot) & _PAGE_PRESENT) - pgprot_val(ref_prot) |= _PAGE_GLOBAL; - else - pgprot_val(ref_prot) &= ~_PAGE_GLOBAL; + ref_prot = pgprot_clear_protnone_bits(ref_prot); /* * Get the target pfn from the original entry: */ pfn = ref_pfn; for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) - set_pte(&pbase[i], pfn_pte(pfn, canon_pgprot(ref_prot))); + set_pte(&pbase[i], pfn_pte(pfn, ref_prot)); if (virt_addr_valid(address)) { unsigned long pfn = PFN_DOWN(__pa(address)); @@ -930,19 +952,7 @@ static void populate_pte(struct cpa_data *cpa, pte = pte_offset_kernel(pmd, start); - /* - * Set the GLOBAL flags only if the PRESENT flag is - * set otherwise pte_present will return true even on - * a non present pte. The canon_pgprot will clear - * _PAGE_GLOBAL for the ancient hardware that doesn't - * support it. - */ - if (pgprot_val(pgprot) & _PAGE_PRESENT) - pgprot_val(pgprot) |= _PAGE_GLOBAL; - else - pgprot_val(pgprot) &= ~_PAGE_GLOBAL; - - pgprot = canon_pgprot(pgprot); + pgprot = pgprot_clear_protnone_bits(pgprot); while (num_pages-- && start < end) { set_pte(pte, pfn_pte(cpa->pfn, pgprot)); @@ -1190,6 +1200,10 @@ static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, cpa->numpages = 1; cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; return 0; + + } else if (__cpa_pfn_in_highmap(cpa->pfn)) { + /* Faults in the highmap are OK, so do not warn: */ + return -EFAULT; } else { WARN(1, KERN_WARNING "CPA: called for zero pte. " "vaddr = %lx cpa->vaddr = %lx\n", vaddr, @@ -1234,24 +1248,14 @@ repeat: new_prot = static_protections(new_prot, address, pfn); - /* - * Set the GLOBAL flags only if the PRESENT flag is - * set otherwise pte_present will return true even on - * a non present pte. The canon_pgprot will clear - * _PAGE_GLOBAL for the ancient hardware that doesn't - * support it. - */ - if (pgprot_val(new_prot) & _PAGE_PRESENT) - pgprot_val(new_prot) |= _PAGE_GLOBAL; - else - pgprot_val(new_prot) &= ~_PAGE_GLOBAL; + new_prot = pgprot_clear_protnone_bits(new_prot); /* * We need to keep the pfn from the existing PTE, * after all we're only going to change it's attributes * not the memory it points to */ - new_pte = pfn_pte(pfn, canon_pgprot(new_prot)); + new_pte = pfn_pte(pfn, new_prot); cpa->pfn = pfn; /* * Do we really change anything ? @@ -1352,8 +1356,7 @@ static int cpa_process_alias(struct cpa_data *cpa) * to touch the high mapped kernel as well: */ if (!within(vaddr, (unsigned long)_text, _brk_end) && - within_inclusive(cpa->pfn, highmap_start_pfn(), - highmap_end_pfn())) { + __cpa_pfn_in_highmap(cpa->pfn)) { unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base; alias_cpa = *cpa; @@ -1428,11 +1431,11 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, memset(&cpa, 0, sizeof(cpa)); /* - * Check, if we are requested to change a not supported - * feature: + * Check, if we are requested to set a not supported + * feature. Clearing non-supported features is OK. */ mask_set = canon_pgprot(mask_set); - mask_clr = canon_pgprot(mask_clr); + if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split) return 0; @@ -1775,6 +1778,12 @@ int set_memory_4k(unsigned long addr, int numpages) __pgprot(0), 1, 0, NULL); } +int set_memory_nonglobal(unsigned long addr, int numpages) +{ + return change_page_attr_clear(&addr, numpages, + __pgprot(_PAGE_GLOBAL), 0); +} + static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) { struct cpa_data cpa; diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 34cda7e0551b..ffc8c13c50e4 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/mm.h> #include <linux/gfp.h> +#include <linux/hugetlb.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/tlb.h> @@ -583,6 +584,9 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte) void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) { + /* Sanitize 'prot' against any unsupported bits: */ + pgprot_val(flags) &= __default_kernel_pte_mask; + __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags)); } @@ -636,6 +640,10 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) (mtrr != MTRR_TYPE_WRBACK)) return 0; + /* Bail out if we are we on a populated non-leaf entry: */ + if (pud_present(*pud) && !pud_huge(*pud)) + return 0; + prot = pgprot_4k_2_large(prot); set_pte((pte_t *)pud, pfn_pte( @@ -664,6 +672,10 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) return 0; } + /* Bail out if we are we on a populated non-leaf entry: */ + if (pmd_present(*pmd) && !pmd_huge(*pmd)) + return 0; + prot = pgprot_4k_2_large(prot); set_pte((pte_t *)pmd, pfn_pte( diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index 631507f0c198..4d418e705878 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c @@ -66,12 +66,22 @@ static void __init pti_print_if_secure(const char *reason) pr_info("%s\n", reason); } +enum pti_mode { + PTI_AUTO = 0, + PTI_FORCE_OFF, + PTI_FORCE_ON +} pti_mode; + void __init pti_check_boottime_disable(void) { char arg[5]; int ret; + /* Assume mode is auto unless overridden. */ + pti_mode = PTI_AUTO; + if (hypervisor_is_type(X86_HYPER_XEN_PV)) { + pti_mode = PTI_FORCE_OFF; pti_print_if_insecure("disabled on XEN PV."); return; } @@ -79,18 +89,23 @@ void __init pti_check_boottime_disable(void) ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg)); if (ret > 0) { if (ret == 3 && !strncmp(arg, "off", 3)) { + pti_mode = PTI_FORCE_OFF; pti_print_if_insecure("disabled on command line."); return; } if (ret == 2 && !strncmp(arg, "on", 2)) { + pti_mode = PTI_FORCE_ON; pti_print_if_secure("force enabled on command line."); goto enable; } - if (ret == 4 && !strncmp(arg, "auto", 4)) + if (ret == 4 && !strncmp(arg, "auto", 4)) { + pti_mode = PTI_AUTO; goto autosel; + } } if (cmdline_find_option_bool(boot_command_line, "nopti")) { + pti_mode = PTI_FORCE_OFF; pti_print_if_insecure("disabled on command line."); return; } @@ -149,7 +164,7 @@ pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd) * * Returns a pointer to a P4D on success, or NULL on failure. */ -static __init p4d_t *pti_user_pagetable_walk_p4d(unsigned long address) +static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address) { pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address)); gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); @@ -177,7 +192,7 @@ static __init p4d_t *pti_user_pagetable_walk_p4d(unsigned long address) * * Returns a pointer to a PMD on success, or NULL on failure. */ -static __init pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) +static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) { gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); p4d_t *p4d = pti_user_pagetable_walk_p4d(address); @@ -267,7 +282,7 @@ static void __init pti_setup_vsyscall(void) static void __init pti_setup_vsyscall(void) { } #endif -static void __init +static void pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear) { unsigned long addr; @@ -300,6 +315,27 @@ pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear) return; /* + * Only clone present PMDs. This ensures only setting + * _PAGE_GLOBAL on present PMDs. This should only be + * called on well-known addresses anyway, so a non- + * present PMD would be a surprise. + */ + if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT))) + return; + + /* + * Setting 'target_pmd' below creates a mapping in both + * the user and kernel page tables. It is effectively + * global, so set it as global in both copies. Note: + * the X86_FEATURE_PGE check is not _required_ because + * the CPU ignores _PAGE_GLOBAL when PGE is not + * supported. The check keeps consistentency with + * code that only set this bit when supported. + */ + if (boot_cpu_has(X86_FEATURE_PGE)) + *pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL); + + /* * Copy the PMD. That is, the kernelmode and usermode * tables will share the last-level page tables of this * address range @@ -348,7 +384,103 @@ static void __init pti_clone_entry_text(void) { pti_clone_pmds((unsigned long) __entry_text_start, (unsigned long) __irqentry_text_end, - _PAGE_RW | _PAGE_GLOBAL); + _PAGE_RW); +} + +/* + * Global pages and PCIDs are both ways to make kernel TLB entries + * live longer, reduce TLB misses and improve kernel performance. + * But, leaving all kernel text Global makes it potentially accessible + * to Meltdown-style attacks which make it trivial to find gadgets or + * defeat KASLR. + * + * Only use global pages when it is really worth it. + */ +static inline bool pti_kernel_image_global_ok(void) +{ + /* + * Systems with PCIDs get litlle benefit from global + * kernel text and are not worth the downsides. + */ + if (cpu_feature_enabled(X86_FEATURE_PCID)) + return false; + + /* + * Only do global kernel image for pti=auto. Do the most + * secure thing (not global) if pti=on specified. + */ + if (pti_mode != PTI_AUTO) + return false; + + /* + * K8 may not tolerate the cleared _PAGE_RW on the userspace + * global kernel image pages. Do the safe thing (disable + * global kernel image). This is unlikely to ever be + * noticed because PTI is disabled by default on AMD CPUs. + */ + if (boot_cpu_has(X86_FEATURE_K8)) + return false; + + /* + * RANDSTRUCT derives its hardening benefits from the + * attacker's lack of knowledge about the layout of kernel + * data structures. Keep the kernel image non-global in + * cases where RANDSTRUCT is in use to help keep the layout a + * secret. + */ + if (IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT)) + return false; + + return true; +} + +/* + * For some configurations, map all of kernel text into the user page + * tables. This reduces TLB misses, especially on non-PCID systems. + */ +void pti_clone_kernel_text(void) +{ + /* + * rodata is part of the kernel image and is normally + * readable on the filesystem or on the web. But, do not + * clone the areas past rodata, they might contain secrets. + */ + unsigned long start = PFN_ALIGN(_text); + unsigned long end = (unsigned long)__end_rodata_hpage_align; + + if (!pti_kernel_image_global_ok()) + return; + + pr_debug("mapping partial kernel image into user address space\n"); + + /* + * Note that this will undo _some_ of the work that + * pti_set_kernel_image_nonglobal() did to clear the + * global bit. + */ + pti_clone_pmds(start, end, _PAGE_RW); +} + +/* + * This is the only user for it and it is not arch-generic like + * the other set_memory.h functions. Just extern it. + */ +extern int set_memory_nonglobal(unsigned long addr, int numpages); +void pti_set_kernel_image_nonglobal(void) +{ + /* + * The identity map is created with PMDs, regardless of the + * actual length of the kernel. We need to clear + * _PAGE_GLOBAL up to a PMD boundary, not just to the end + * of the image. + */ + unsigned long start = PFN_ALIGN(_text); + unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE); + + if (pti_kernel_image_global_ok()) + return; + + set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT); } /* @@ -362,6 +494,10 @@ void __init pti_init(void) pr_info("enabled\n"); pti_clone_user_shared(); + + /* Undo all global bits from the init pagetables in head_64.S: */ + pti_set_kernel_image_nonglobal(); + /* Replace some of the global bits just for shared entry text: */ pti_clone_entry_text(); pti_setup_espfix64(); pti_setup_vsyscall(); diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index b725154182cc..263c8453815e 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1027,7 +1027,17 @@ emit_cond_jmp: /* convert BPF opcode to x86 */ break; case BPF_JMP | BPF_JA: - jmp_offset = addrs[i + insn->off] - addrs[i]; + if (insn->off == -1) + /* -1 jmp instructions will always jump + * backwards two bytes. Explicitly handling + * this case avoids wasting too many passes + * when there are long sequences of replaced + * dead code. + */ + jmp_offset = -2; + else + jmp_offset = addrs[i + insn->off] - addrs[i]; + if (!jmp_offset) /* optimize out nop jumps */ break; @@ -1226,6 +1236,7 @@ skip_init_addrs: for (pass = 0; pass < 20 || image; pass++) { proglen = do_jit(prog, addrs, image, oldproglen, &ctx); if (proglen <= 0) { +out_image: image = NULL; if (header) bpf_jit_binary_free(header); @@ -1236,8 +1247,7 @@ skip_init_addrs: if (proglen != oldproglen) { pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", proglen, oldproglen); - prog = orig_prog; - goto out_addrs; + goto out_image; } break; } @@ -1273,7 +1283,7 @@ skip_init_addrs: prog = orig_prog; } - if (!prog->is_func || extra_pass) { + if (!image || !prog->is_func || extra_pass) { out_addrs: kfree(addrs); kfree(jit_data); diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c index 74a532989308..ccf4a49bb065 100644 --- a/arch/x86/power/hibernate_64.c +++ b/arch/x86/power/hibernate_64.c @@ -51,6 +51,12 @@ static int set_up_temporary_text_mapping(pgd_t *pgd) pmd_t *pmd; pud_t *pud; p4d_t *p4d = NULL; + pgprot_t pgtable_prot = __pgprot(_KERNPG_TABLE); + pgprot_t pmd_text_prot = __pgprot(__PAGE_KERNEL_LARGE_EXEC); + + /* Filter out unsupported __PAGE_KERNEL* bits: */ + pgprot_val(pmd_text_prot) &= __default_kernel_pte_mask; + pgprot_val(pgtable_prot) &= __default_kernel_pte_mask; /* * The new mapping only has to cover the page containing the image @@ -81,15 +87,19 @@ static int set_up_temporary_text_mapping(pgd_t *pgd) return -ENOMEM; set_pmd(pmd + pmd_index(restore_jump_address), - __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC)); + __pmd((jump_address_phys & PMD_MASK) | pgprot_val(pmd_text_prot))); set_pud(pud + pud_index(restore_jump_address), - __pud(__pa(pmd) | _KERNPG_TABLE)); + __pud(__pa(pmd) | pgprot_val(pgtable_prot))); if (p4d) { - set_p4d(p4d + p4d_index(restore_jump_address), __p4d(__pa(pud) | _KERNPG_TABLE)); - set_pgd(pgd + pgd_index(restore_jump_address), __pgd(__pa(p4d) | _KERNPG_TABLE)); + p4d_t new_p4d = __p4d(__pa(pud) | pgprot_val(pgtable_prot)); + pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot)); + + set_p4d(p4d + p4d_index(restore_jump_address), new_p4d); + set_pgd(pgd + pgd_index(restore_jump_address), new_pgd); } else { /* No p4d for 4-level paging: point the pgd to the pud page table */ - set_pgd(pgd + pgd_index(restore_jump_address), __pgd(__pa(pud) | _KERNPG_TABLE)); + pgd_t new_pgd = __pgd(__pa(pud) | pgprot_val(pgtable_prot)); + set_pgd(pgd + pgd_index(restore_jump_address), new_pgd); } return 0; diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index d70c15de417b..2e9ee023e6bc 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile @@ -6,6 +6,9 @@ purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string targets += $(purgatory-y) PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y)) +$(obj)/sha256.o: $(srctree)/lib/sha256.c + $(call if_changed_rule,cc_o_c) + LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib targets += purgatory.ro diff --git a/arch/x86/purgatory/purgatory.c b/arch/x86/purgatory/purgatory.c index 470edad96bb9..025c34ac0d84 100644 --- a/arch/x86/purgatory/purgatory.c +++ b/arch/x86/purgatory/purgatory.c @@ -11,9 +11,9 @@ */ #include <linux/bug.h> +#include <linux/sha256.h> #include <asm/purgatory.h> -#include "sha256.h" #include "../boot/string.h" unsigned long purgatory_backup_dest __section(.kexec-purgatory); diff --git a/arch/x86/purgatory/sha256.c b/arch/x86/purgatory/sha256.c deleted file mode 100644 index 548ca675a14a..000000000000 --- a/arch/x86/purgatory/sha256.c +++ /dev/null @@ -1,283 +0,0 @@ -/* - * SHA-256, as specified in - * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf - * - * SHA-256 code by Jean-Luc Cooke <jlcooke@certainkey.com>. - * - * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com> - * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> - * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> - * Copyright (c) 2014 Red Hat Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - */ - -#include <linux/bitops.h> -#include <asm/byteorder.h> -#include "sha256.h" -#include "../boot/string.h" - -static inline u32 Ch(u32 x, u32 y, u32 z) -{ - return z ^ (x & (y ^ z)); -} - -static inline u32 Maj(u32 x, u32 y, u32 z) -{ - return (x & y) | (z & (x | y)); -} - -#define e0(x) (ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22)) -#define e1(x) (ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25)) -#define s0(x) (ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3)) -#define s1(x) (ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10)) - -static inline void LOAD_OP(int I, u32 *W, const u8 *input) -{ - W[I] = __be32_to_cpu(((__be32 *)(input))[I]); -} - -static inline void BLEND_OP(int I, u32 *W) -{ - W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16]; -} - -static void sha256_transform(u32 *state, const u8 *input) -{ - u32 a, b, c, d, e, f, g, h, t1, t2; - u32 W[64]; - int i; - - /* load the input */ - for (i = 0; i < 16; i++) - LOAD_OP(i, W, input); - - /* now blend */ - for (i = 16; i < 64; i++) - BLEND_OP(i, W); - - /* load the state into our registers */ - a = state[0]; b = state[1]; c = state[2]; d = state[3]; - e = state[4]; f = state[5]; g = state[6]; h = state[7]; - - /* now iterate */ - t1 = h + e1(e) + Ch(e, f, g) + 0x428a2f98 + W[0]; - t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; - t1 = g + e1(d) + Ch(d, e, f) + 0x71374491 + W[1]; - t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; - t1 = f + e1(c) + Ch(c, d, e) + 0xb5c0fbcf + W[2]; - t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; - t1 = e + e1(b) + Ch(b, c, d) + 0xe9b5dba5 + W[3]; - t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; - t1 = d + e1(a) + Ch(a, b, c) + 0x3956c25b + W[4]; - t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; - t1 = c + e1(h) + Ch(h, a, b) + 0x59f111f1 + W[5]; - t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; - t1 = b + e1(g) + Ch(g, h, a) + 0x923f82a4 + W[6]; - t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; - t1 = a + e1(f) + Ch(f, g, h) + 0xab1c5ed5 + W[7]; - t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2; - - t1 = h + e1(e) + Ch(e, f, g) + 0xd807aa98 + W[8]; - t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2; - t1 = g + e1(d) + Ch(d, e, f) + 0x12835b01 + W[9]; - t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2; - t1 = f + e1(c) + Ch(c, d, e) + 0x243185be + W[10]; - t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2; - t1 = e + e1(b) + Ch(b, c, d) + 0x550c7dc3 + W[11]; - t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2; - t1 = d + e1(a) + Ch(a, b, c) + 0x72be5d74 + W[12]; - t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2; - t1 = c + e1(h) + Ch(h, a, b) + 0x80deb1fe + W[13]; - t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2; - t1 = b + e1(g) + Ch(g, h, a) + 0x9bdc06a7 + W[14]; - t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2; - t1 = a + e1(f) + Ch(f, g, h) + 0xc19bf174 + W[15]; - t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2; - - t1 = h + e1(e) + Ch(e, f, g) + 0xe49b69c1 + W[16]; - t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2; - t1 = g + e1(d) + Ch(d, e, f) + 0xefbe4786 + W[17]; - t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2; - t1 = f + e1(c) + Ch(c, d, e) + 0x0fc19dc6 + W[18]; - t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2; - t1 = e + e1(b) + Ch(b, c, d) + 0x240ca1cc + W[19]; - t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2; - t1 = d + e1(a) + Ch(a, b, c) + 0x2de92c6f + W[20]; - t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2; - t1 = c + e1(h) + Ch(h, a, b) + 0x4a7484aa + W[21]; - t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2; - t1 = b + e1(g) + Ch(g, h, a) + 0x5cb0a9dc + W[22]; - t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2; - t1 = a + e1(f) + Ch(f, g, h) + 0x76f988da + W[23]; - t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2; - - t1 = h + e1(e) + Ch(e, f, g) + 0x983e5152 + W[24]; - t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2; - t1 = g + e1(d) + Ch(d, e, f) + 0xa831c66d + W[25]; - t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2; - t1 = f + e1(c) + Ch(c, d, e) + 0xb00327c8 + W[26]; - t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2; - t1 = e + e1(b) + Ch(b, c, d) + 0xbf597fc7 + W[27]; - t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2; - t1 = d + e1(a) + Ch(a, b, c) + 0xc6e00bf3 + W[28]; - t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2; - t1 = c + e1(h) + Ch(h, a, b) + 0xd5a79147 + W[29]; - t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2; - t1 = b + e1(g) + Ch(g, h, a) + 0x06ca6351 + W[30]; - t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2; - t1 = a + e1(f) + Ch(f, g, h) + 0x14292967 + W[31]; - t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2; - - t1 = h + e1(e) + Ch(e, f, g) + 0x27b70a85 + W[32]; - t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2; - t1 = g + e1(d) + Ch(d, e, f) + 0x2e1b2138 + W[33]; - t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2; - t1 = f + e1(c) + Ch(c, d, e) + 0x4d2c6dfc + W[34]; - t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2; - t1 = e + e1(b) + Ch(b, c, d) + 0x53380d13 + W[35]; - t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2; - t1 = d + e1(a) + Ch(a, b, c) + 0x650a7354 + W[36]; - t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2; - t1 = c + e1(h) + Ch(h, a, b) + 0x766a0abb + W[37]; - t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2; - t1 = b + e1(g) + Ch(g, h, a) + 0x81c2c92e + W[38]; - t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2; - t1 = a + e1(f) + Ch(f, g, h) + 0x92722c85 + W[39]; - t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2; - - t1 = h + e1(e) + Ch(e, f, g) + 0xa2bfe8a1 + W[40]; - t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2; - t1 = g + e1(d) + Ch(d, e, f) + 0xa81a664b + W[41]; - t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2; - t1 = f + e1(c) + Ch(c, d, e) + 0xc24b8b70 + W[42]; - t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2; - t1 = e + e1(b) + Ch(b, c, d) + 0xc76c51a3 + W[43]; - t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2; - t1 = d + e1(a) + Ch(a, b, c) + 0xd192e819 + W[44]; - t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2; - t1 = c + e1(h) + Ch(h, a, b) + 0xd6990624 + W[45]; - t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2; - t1 = b + e1(g) + Ch(g, h, a) + 0xf40e3585 + W[46]; - t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2; - t1 = a + e1(f) + Ch(f, g, h) + 0x106aa070 + W[47]; - t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2; - - t1 = h + e1(e) + Ch(e, f, g) + 0x19a4c116 + W[48]; - t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2; - t1 = g + e1(d) + Ch(d, e, f) + 0x1e376c08 + W[49]; - t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2; - t1 = f + e1(c) + Ch(c, d, e) + 0x2748774c + W[50]; - t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2; - t1 = e + e1(b) + Ch(b, c, d) + 0x34b0bcb5 + W[51]; - t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2; - t1 = d + e1(a) + Ch(a, b, c) + 0x391c0cb3 + W[52]; - t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2; - t1 = c + e1(h) + Ch(h, a, b) + 0x4ed8aa4a + W[53]; - t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2; - t1 = b + e1(g) + Ch(g, h, a) + 0x5b9cca4f + W[54]; - t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2; - t1 = a + e1(f) + Ch(f, g, h) + 0x682e6ff3 + W[55]; - t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2; - - t1 = h + e1(e) + Ch(e, f, g) + 0x748f82ee + W[56]; - t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2; - t1 = g + e1(d) + Ch(d, e, f) + 0x78a5636f + W[57]; - t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2; - t1 = f + e1(c) + Ch(c, d, e) + 0x84c87814 + W[58]; - t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2; - t1 = e + e1(b) + Ch(b, c, d) + 0x8cc70208 + W[59]; - t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2; - t1 = d + e1(a) + Ch(a, b, c) + 0x90befffa + W[60]; - t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2; - t1 = c + e1(h) + Ch(h, a, b) + 0xa4506ceb + W[61]; - t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2; - t1 = b + e1(g) + Ch(g, h, a) + 0xbef9a3f7 + W[62]; - t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2; - t1 = a + e1(f) + Ch(f, g, h) + 0xc67178f2 + W[63]; - t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2; - - state[0] += a; state[1] += b; state[2] += c; state[3] += d; - state[4] += e; state[5] += f; state[6] += g; state[7] += h; - - /* clear any sensitive info... */ - a = b = c = d = e = f = g = h = t1 = t2 = 0; - memset(W, 0, 64 * sizeof(u32)); -} - -int sha256_init(struct sha256_state *sctx) -{ - sctx->state[0] = SHA256_H0; - sctx->state[1] = SHA256_H1; - sctx->state[2] = SHA256_H2; - sctx->state[3] = SHA256_H3; - sctx->state[4] = SHA256_H4; - sctx->state[5] = SHA256_H5; - sctx->state[6] = SHA256_H6; - sctx->state[7] = SHA256_H7; - sctx->count = 0; - - return 0; -} - -int sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len) -{ - unsigned int partial, done; - const u8 *src; - - partial = sctx->count & 0x3f; - sctx->count += len; - done = 0; - src = data; - - if ((partial + len) > 63) { - if (partial) { - done = -partial; - memcpy(sctx->buf + partial, data, done + 64); - src = sctx->buf; - } - - do { - sha256_transform(sctx->state, src); - done += 64; - src = data + done; - } while (done + 63 < len); - - partial = 0; - } - memcpy(sctx->buf + partial, src, len - done); - - return 0; -} - -int sha256_final(struct sha256_state *sctx, u8 *out) -{ - __be32 *dst = (__be32 *)out; - __be64 bits; - unsigned int index, pad_len; - int i; - static const u8 padding[64] = { 0x80, }; - - /* Save number of bits */ - bits = cpu_to_be64(sctx->count << 3); - - /* Pad out to 56 mod 64. */ - index = sctx->count & 0x3f; - pad_len = (index < 56) ? (56 - index) : ((64+56) - index); - sha256_update(sctx, padding, pad_len); - - /* Append length (before padding) */ - sha256_update(sctx, (const u8 *)&bits, sizeof(bits)); - - /* Store state in digest */ - for (i = 0; i < 8; i++) - dst[i] = cpu_to_be32(sctx->state[i]); - - /* Zeroize sensitive information. */ - memset(sctx, 0, sizeof(*sctx)); - - return 0; -} diff --git a/arch/x86/purgatory/sha256.h b/arch/x86/purgatory/sha256.h deleted file mode 100644 index 2867d9825a57..000000000000 --- a/arch/x86/purgatory/sha256.h +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright (C) 2014 Red Hat Inc. - * - * Author: Vivek Goyal <vgoyal@redhat.com> - * - * This source code is licensed under the GNU General Public License, - * Version 2. See the file COPYING for more details. - */ - -#ifndef SHA256_H -#define SHA256_H - -#include <linux/types.h> -#include <crypto/sha.h> - -extern int sha256_init(struct sha256_state *sctx); -extern int sha256_update(struct sha256_state *sctx, const u8 *input, - unsigned int length); -extern int sha256_final(struct sha256_state *sctx, u8 *hash); - -#endif /* SHA256_H */ diff --git a/arch/x86/purgatory/string.c b/arch/x86/purgatory/string.c index d886b1fa36f0..795ca4f2cb3c 100644 --- a/arch/x86/purgatory/string.c +++ b/arch/x86/purgatory/string.c @@ -10,4 +10,16 @@ * Version 2. See the file COPYING for more details. */ +#include <linux/types.h> + #include "../boot/string.c" + +void *memcpy(void *dst, const void *src, size_t len) +{ + return __builtin_memcpy(dst, src, len); +} + +void *memset(void *dst, int c, size_t len) +{ + return __builtin_memset(dst, c, len); +} diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c index 2163888497d3..5e53bfbe5823 100644 --- a/arch/x86/xen/apic.c +++ b/arch/x86/xen/apic.c @@ -112,7 +112,7 @@ static int xen_madt_oem_check(char *oem_id, char *oem_table_id) return xen_pv_domain(); } -static int xen_id_always_valid(int apicid) +static int xen_id_always_valid(u32 apicid) { return 1; } |