diff options
Diffstat (limited to 'arch')
139 files changed, 3561 insertions, 1163 deletions
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h index 4e547296831d..52312cb5dbe2 100644 --- a/arch/arc/include/asm/processor.h +++ b/arch/arc/include/asm/processor.h @@ -47,9 +47,6 @@ struct thread_struct { /* Forward declaration, a strange C thing */ struct task_struct; -/* Return saved PC of a blocked thread */ -unsigned long thread_saved_pc(struct task_struct *t); - #define task_pt_regs(p) \ ((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1) @@ -72,18 +69,21 @@ unsigned long thread_saved_pc(struct task_struct *t); #define release_segments(mm) do { } while (0) #define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret) +#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) /* * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode. * Look in process.c for details of kernel stack layout */ -#define KSTK_ESP(tsk) (tsk->thread.ksp) +#define TSK_K_ESP(tsk) (tsk->thread.ksp) -#define KSTK_REG(tsk, off) (*((unsigned int *)(KSTK_ESP(tsk) + \ +#define TSK_K_REG(tsk, off) (*((unsigned int *)(TSK_K_ESP(tsk) + \ sizeof(struct callee_regs) + off))) -#define KSTK_BLINK(tsk) KSTK_REG(tsk, 4) -#define KSTK_FP(tsk) KSTK_REG(tsk, 0) +#define TSK_K_BLINK(tsk) TSK_K_REG(tsk, 4) +#define TSK_K_FP(tsk) TSK_K_REG(tsk, 0) + +#define thread_saved_pc(tsk) TSK_K_BLINK(tsk) extern void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long usp); diff --git a/arch/arc/include/asm/stacktrace.h b/arch/arc/include/asm/stacktrace.h new file mode 100644 index 000000000000..b29b6064ea14 --- /dev/null +++ b/arch/arc/include/asm/stacktrace.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) + * Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_STACKTRACE_H +#define __ASM_STACKTRACE_H + +#include <linux/sched.h> + +/** + * arc_unwind_core - Unwind the kernel mode stack for an execution context + * @tsk: NULL for current task, specific task otherwise + * @regs: pt_regs used to seed the unwinder {SP, FP, BLINK, PC} + * If NULL, use pt_regs of @tsk (if !NULL) otherwise + * use the current values of {SP, FP, BLINK, PC} + * @consumer_fn: Callback invoked for each frame unwound + * Returns 0 to continue unwinding, -1 to stop + * @arg: Arg to callback + * + * Returns the address of first function in stack + * + * Semantics: + * - synchronous unwinding (e.g. dump_stack): @tsk NULL, @regs NULL + * - Asynchronous unwinding of sleeping task: @tsk !NULL, @regs NULL + * - Asynchronous unwinding of intr/excp etc: @tsk !NULL, @regs !NULL + */ +notrace noinline unsigned int arc_unwind_core( + struct task_struct *tsk, struct pt_regs *regs, + int (*consumer_fn) (unsigned int, void *), + void *arg); + +#endif /* __ASM_STACKTRACE_H */ diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c index fdd89715d2d3..98c00a2d4dd9 100644 --- a/arch/arc/kernel/process.c +++ b/arch/arc/kernel/process.c @@ -192,29 +192,6 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) return 0; } -/* - * API: expected by schedular Code: If thread is sleeping where is that. - * What is this good for? it will be always the scheduler or ret_from_fork. - * So we hard code that anyways. - */ -unsigned long thread_saved_pc(struct task_struct *t) -{ - struct pt_regs *regs = task_pt_regs(t); - unsigned long blink = 0; - - /* - * If the thread being queried for in not itself calling this, then it - * implies it is not executing, which in turn implies it is sleeping, - * which in turn implies it got switched OUT by the schedular. - * In that case, it's kernel mode blink can reliably retrieved as per - * the picture above (right above pt_regs). - */ - if (t != current && t->state != TASK_RUNNING) - blink = *((unsigned int *)regs - 1); - - return blink; -} - int elf_check_arch(const struct elf32_hdr *x) { unsigned int eflags; diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c index 9ce47cfe2303..92320d6f737c 100644 --- a/arch/arc/kernel/stacktrace.c +++ b/arch/arc/kernel/stacktrace.c @@ -43,6 +43,10 @@ static void seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs, struct unwind_frame_info *frame_info) { + /* + * synchronous unwinding (e.g. dump_stack) + * - uses current values of SP and friends + */ if (tsk == NULL && regs == NULL) { unsigned long fp, sp, blink, ret; frame_info->task = current; @@ -61,12 +65,17 @@ static void seed_unwind_frame_info(struct task_struct *tsk, frame_info->regs.r63 = ret; frame_info->call_frame = 0; } else if (regs == NULL) { + /* + * Asynchronous unwinding of sleeping task + * - Gets SP etc from task's pt_regs (saved bottom of kernel + * mode stack of task) + */ frame_info->task = tsk; - frame_info->regs.r27 = KSTK_FP(tsk); - frame_info->regs.r28 = KSTK_ESP(tsk); - frame_info->regs.r31 = KSTK_BLINK(tsk); + frame_info->regs.r27 = TSK_K_FP(tsk); + frame_info->regs.r28 = TSK_K_ESP(tsk); + frame_info->regs.r31 = TSK_K_BLINK(tsk); frame_info->regs.r63 = (unsigned int)__switch_to; /* In the prologue of __switch_to, first FP is saved on stack @@ -83,6 +92,10 @@ static void seed_unwind_frame_info(struct task_struct *tsk, frame_info->call_frame = 0; } else { + /* + * Asynchronous unwinding of intr/exception + * - Just uses the pt_regs passed + */ frame_info->task = tsk; frame_info->regs.r27 = regs->fp; @@ -95,7 +108,7 @@ static void seed_unwind_frame_info(struct task_struct *tsk, #endif -static noinline unsigned int +notrace noinline unsigned int arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs, int (*consumer_fn) (unsigned int, void *), void *arg) { diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c index 7ff5b5c183bb..74db59b6f392 100644 --- a/arch/arc/kernel/unaligned.c +++ b/arch/arc/kernel/unaligned.c @@ -12,6 +12,7 @@ */ #include <linux/types.h> +#include <linux/perf_event.h> #include <linux/ptrace.h> #include <linux/uaccess.h> #include <asm/disasm.h> @@ -253,6 +254,7 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs, } } + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address); return 0; fault: diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index 563cb27e37f5..6a2e006cbcce 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -14,6 +14,7 @@ #include <linux/ptrace.h> #include <linux/uaccess.h> #include <linux/kdebug.h> +#include <linux/perf_event.h> #include <asm/pgalloc.h> #include <asm/mmu.h> @@ -139,13 +140,20 @@ good_area: return; } + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); + if (likely(!(fault & VM_FAULT_ERROR))) { if (flags & FAULT_FLAG_ALLOW_RETRY) { /* To avoid updating stats twice for retry case */ - if (fault & VM_FAULT_MAJOR) + if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; - else + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, + regs, address); + } else { tsk->min_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, + regs, address); + } if (fault & VM_FAULT_RETRY) { flags &= ~FAULT_FLAG_ALLOW_RETRY; diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi index 6cc25ed912ee..2c6248d9a9ef 100644 --- a/arch/arm/boot/dts/am335x-bone-common.dtsi +++ b/arch/arm/boot/dts/am335x-bone-common.dtsi @@ -195,6 +195,7 @@ &usb0 { status = "okay"; + dr_mode = "peripheral"; }; &usb1 { diff --git a/arch/arm/boot/dts/am437x-idk-evm.dts b/arch/arm/boot/dts/am437x-idk-evm.dts index f9a17e2ca8cb..0198f5a62b96 100644 --- a/arch/arm/boot/dts/am437x-idk-evm.dts +++ b/arch/arm/boot/dts/am437x-idk-evm.dts @@ -133,20 +133,6 @@ >; }; - i2c1_pins_default: i2c1_pins_default { - pinctrl-single,pins = < - 0x15c (PIN_INPUT | SLEWCTRL_FAST | MUX_MODE2) /* spi0_cs0.i2c1_scl */ - 0x158 (PIN_INPUT | SLEWCTRL_FAST | MUX_MODE2) /* spi0_d1.i2c1_sda */ - >; - }; - - i2c1_pins_sleep: i2c1_pins_sleep { - pinctrl-single,pins = < - 0x15c (PIN_INPUT_PULLDOWN | MUX_MODE7) /* spi0_cs0.i2c1_scl */ - 0x158 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* spi0_d1.i2c1_sda */ - >; - }; - mmc1_pins_default: pinmux_mmc1_pins_default { pinctrl-single,pins = < 0x100 (PIN_INPUT | MUX_MODE0) /* mmc0_clk.mmc0_clk */ @@ -254,7 +240,7 @@ status = "okay"; pinctrl-names = "default", "sleep"; pinctrl-0 = <&i2c0_pins_default>; - pinctrl-1 = <&i2c0_pins_default>; + pinctrl-1 = <&i2c0_pins_sleep>; clock-frequency = <400000>; at24@50 { @@ -262,17 +248,10 @@ pagesize = <64>; reg = <0x50>; }; -}; - -&i2c1 { - status = "okay"; - pinctrl-names = "default", "sleep"; - pinctrl-0 = <&i2c1_pins_default>; - pinctrl-1 = <&i2c1_pins_default>; - clock-frequency = <400000>; tps: tps62362@60 { compatible = "ti,tps62362"; + reg = <0x60>; regulator-name = "VDD_MPU"; regulator-min-microvolt = <950000>; regulator-max-microvolt = <1330000>; diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts index 03750af3b49a..6463f9ef2b54 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15.dts +++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts @@ -549,14 +549,6 @@ pinctrl-0 = <&usb1_pins>; }; -&omap_dwc3_1 { - extcon = <&extcon_usb1>; -}; - -&omap_dwc3_2 { - extcon = <&extcon_usb2>; -}; - &usb2 { dr_mode = "peripheral"; }; diff --git a/arch/arm/boot/dts/dm8168-evm.dts b/arch/arm/boot/dts/dm8168-evm.dts index 857d0289ad4d..d3a29c1b8417 100644 --- a/arch/arm/boot/dts/dm8168-evm.dts +++ b/arch/arm/boot/dts/dm8168-evm.dts @@ -35,6 +35,18 @@ DM816X_IOPAD(0x0aac, PIN_INPUT | MUX_MODE0) /* SPI_D1 */ >; }; + + usb0_pins: pinmux_usb0_pins { + pinctrl-single,pins = < + DM816X_IOPAD(0x0d00, MUX_MODE0) /* USB0_DRVVBUS */ + >; + }; + + usb1_pins: pinmux_usb0_pins { + pinctrl-single,pins = < + DM816X_IOPAD(0x0d04, MUX_MODE0) /* USB1_DRVVBUS */ + >; + }; }; &i2c1 { @@ -127,3 +139,16 @@ &mmc1 { vmmc-supply = <&vmmcsd_fixed>; }; + +/* At least dm8168-evm rev c won't support multipoint, later may */ +&usb0 { + pinctrl-names = "default"; + pinctrl-0 = <&usb0_pins>; + mentor,multipoint = <0>; +}; + +&usb1 { + pinctrl-names = "default"; + pinctrl-0 = <&usb1_pins>; + mentor,multipoint = <0>; +}; diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi index d98d0f7de380..3c97b5f2addc 100644 --- a/arch/arm/boot/dts/dm816x.dtsi +++ b/arch/arm/boot/dts/dm816x.dtsi @@ -97,10 +97,31 @@ /* Device Configuration Registers */ scm_conf: syscon@600 { - compatible = "syscon"; + compatible = "syscon", "simple-bus"; reg = <0x600 0x110>; #address-cells = <1>; #size-cells = <1>; + ranges = <0 0x600 0x110>; + + usb_phy0: usb-phy@20 { + compatible = "ti,dm8168-usb-phy"; + reg = <0x20 0x8>; + reg-names = "phy"; + clocks = <&main_fapll 6>; + clock-names = "refclk"; + #phy-cells = <0>; + syscon = <&scm_conf>; + }; + + usb_phy1: usb-phy@28 { + compatible = "ti,dm8168-usb-phy"; + reg = <0x28 0x8>; + reg-names = "phy"; + clocks = <&main_fapll 6>; + clock-names = "refclk"; + #phy-cells = <0>; + syscon = <&scm_conf>; + }; }; scrm_clocks: clocks { @@ -357,7 +378,10 @@ reg-names = "mc", "control"; interrupts = <18>; interrupt-names = "mc"; - dr_mode = "otg"; + dr_mode = "host"; + interface-type = <0>; + phys = <&usb_phy0>; + phy-names = "usb2-phy"; mentor,multipoint = <1>; mentor,num-eps = <16>; mentor,ram-bits = <12>; @@ -366,13 +390,15 @@ usb1: usb@47401800 { compatible = "ti,musb-am33xx"; - status = "disabled"; reg = <0x47401c00 0x400 0x47401800 0x200>; reg-names = "mc", "control"; interrupts = <19>; interrupt-names = "mc"; - dr_mode = "otg"; + dr_mode = "host"; + interface-type = <0>; + phys = <&usb_phy1>; + phy-names = "usb2-phy"; mentor,multipoint = <1>; mentor,num-eps = <16>; mentor,ram-bits = <12>; diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts index 746cddb1b8f5..3290a96ba586 100644 --- a/arch/arm/boot/dts/dra7-evm.dts +++ b/arch/arm/boot/dts/dra7-evm.dts @@ -543,14 +543,6 @@ }; }; -&omap_dwc3_1 { - extcon = <&extcon_usb1>; -}; - -&omap_dwc3_2 { - extcon = <&extcon_usb2>; -}; - &usb1 { dr_mode = "peripheral"; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 5827fedafd43..127608d79033 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -249,8 +249,8 @@ <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>; #dma-cells = <1>; - #dma-channels = <32>; - #dma-requests = <127>; + dma-channels = <32>; + dma-requests = <127>; }; gpio1: gpio@4ae10000 { @@ -1090,8 +1090,8 @@ <0x4A096800 0x40>; /* pll_ctrl */ reg-names = "phy_rx", "phy_tx", "pll_ctrl"; ctrl-module = <&omap_control_sata>; - clocks = <&sys_clkin1>; - clock-names = "sysclk"; + clocks = <&sys_clkin1>, <&sata_ref_clk>; + clock-names = "sysclk", "refclk"; #phy-cells = <0>; }; diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts index 4d8711713610..e0264d0bf7b9 100644 --- a/arch/arm/boot/dts/dra72-evm.dts +++ b/arch/arm/boot/dts/dra72-evm.dts @@ -380,14 +380,6 @@ phy-supply = <&ldo4_reg>; }; -&omap_dwc3_1 { - extcon = <&extcon_usb1>; -}; - -&omap_dwc3_2 { - extcon = <&extcon_usb2>; -}; - &usb1 { dr_mode = "peripheral"; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi index 59d1c297bb30..578fa2a54dce 100644 --- a/arch/arm/boot/dts/omap2.dtsi +++ b/arch/arm/boot/dts/omap2.dtsi @@ -87,8 +87,8 @@ <14>, <15>; #dma-cells = <1>; - #dma-channels = <32>; - #dma-requests = <64>; + dma-channels = <32>; + dma-requests = <64>; }; i2c1: i2c@48070000 { diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 60403273f83e..db80f9d376fa 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -16,6 +16,13 @@ model = "Nokia N900"; compatible = "nokia,omap3-n900", "ti,omap3430", "ti,omap3"; + aliases { + i2c0; + i2c1 = &i2c1; + i2c2 = &i2c2; + i2c3 = &i2c3; + }; + cpus { cpu@0 { cpu0-supply = <&vcc>; @@ -704,7 +711,7 @@ compatible = "smsc,lan91c94"; interrupt-parent = <&gpio2>; interrupts = <22 IRQ_TYPE_LEVEL_HIGH>; /* gpio54 */ - reg = <1 0x300 0xf>; /* 16 byte IO range at offset 0x300 */ + reg = <1 0 0xf>; /* 16 byte IO range */ bank-width = <2>; pinctrl-names = "default"; pinctrl-0 = <ðernet_pins>; diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi index 01b71111bd55..f4f78c40b564 100644 --- a/arch/arm/boot/dts/omap3.dtsi +++ b/arch/arm/boot/dts/omap3.dtsi @@ -155,8 +155,8 @@ <14>, <15>; #dma-cells = <1>; - #dma-channels = <32>; - #dma-requests = <96>; + dma-channels = <32>; + dma-requests = <96>; }; omap3_pmx_core: pinmux@48002030 { diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi index 074147cebae4..87401d9f4d8b 100644 --- a/arch/arm/boot/dts/omap4.dtsi +++ b/arch/arm/boot/dts/omap4.dtsi @@ -223,8 +223,8 @@ <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>; #dma-cells = <1>; - #dma-channels = <32>; - #dma-requests = <127>; + dma-channels = <32>; + dma-requests = <127>; }; gpio1: gpio@4a310000 { diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index b321fdf42c9f..ddff674bd05e 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi @@ -238,8 +238,8 @@ <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>; #dma-cells = <1>; - #dma-channels = <32>; - #dma-requests = <127>; + dma-channels = <32>; + dma-requests = <127>; }; gpio1: gpio@4ae10000 { @@ -929,8 +929,8 @@ <0x4A096800 0x40>; /* pll_ctrl */ reg-names = "phy_rx", "phy_tx", "pll_ctrl"; ctrl-module = <&omap_control_sata>; - clocks = <&sys_clkin>; - clock-names = "sysclk"; + clocks = <&sys_clkin>, <&sata_ref_clk>; + clock-names = "sysclk", "refclk"; #phy-cells = <0>; }; }; diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index e8a4c955241b..b7e6b6fba5e0 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -62,6 +62,17 @@ CONFIG_MACH_SPEAR1340=y CONFIG_ARCH_STI=y CONFIG_ARCH_EXYNOS=y CONFIG_EXYNOS5420_MCPM=y +CONFIG_ARCH_SHMOBILE_MULTI=y +CONFIG_ARCH_EMEV2=y +CONFIG_ARCH_R7S72100=y +CONFIG_ARCH_R8A73A4=y +CONFIG_ARCH_R8A7740=y +CONFIG_ARCH_R8A7779=y +CONFIG_ARCH_R8A7790=y +CONFIG_ARCH_R8A7791=y +CONFIG_ARCH_R8A7794=y +CONFIG_ARCH_SH73A0=y +CONFIG_MACH_MARZEN=y CONFIG_ARCH_SUNXI=y CONFIG_ARCH_SIRF=y CONFIG_ARCH_TEGRA=y @@ -84,6 +95,8 @@ CONFIG_PCI_KEYSTONE=y CONFIG_PCI_MSI=y CONFIG_PCI_MVEBU=y CONFIG_PCI_TEGRA=y +CONFIG_PCI_RCAR_GEN2=y +CONFIG_PCI_RCAR_GEN2_PCIE=y CONFIG_PCIEPORTBUS=y CONFIG_SMP=y CONFIG_NR_CPUS=8 @@ -130,6 +143,7 @@ CONFIG_DEVTMPFS_MOUNT=y CONFIG_DMA_CMA=y CONFIG_CMA_SIZE_MBYTES=64 CONFIG_OMAP_OCP2SCP=y +CONFIG_SIMPLE_PM_BUS=y CONFIG_MTD=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_BLOCK=y @@ -157,6 +171,7 @@ CONFIG_AHCI_SUNXI=y CONFIG_AHCI_TEGRA=y CONFIG_SATA_HIGHBANK=y CONFIG_SATA_MV=y +CONFIG_SATA_RCAR=y CONFIG_NETDEVICES=y CONFIG_HIX5HD2_GMAC=y CONFIG_SUN4I_EMAC=y @@ -167,14 +182,17 @@ CONFIG_MV643XX_ETH=y CONFIG_MVNETA=y CONFIG_KS8851=y CONFIG_R8169=y +CONFIG_SH_ETH=y CONFIG_SMSC911X=y CONFIG_STMMAC_ETH=y CONFIG_TI_CPSW=y CONFIG_XILINX_EMACLITE=y CONFIG_AT803X_PHY=y CONFIG_MARVELL_PHY=y +CONFIG_SMSC_PHY=y CONFIG_BROADCOM_PHY=y CONFIG_ICPLUS_PHY=y +CONFIG_MICREL_PHY=y CONFIG_USB_PEGASUS=y CONFIG_USB_USBNET=y CONFIG_USB_NET_SMSC75XX=y @@ -192,15 +210,18 @@ CONFIG_KEYBOARD_CROS_EC=y CONFIG_MOUSE_PS2_ELANTECH=y CONFIG_INPUT_TOUCHSCREEN=y CONFIG_TOUCHSCREEN_ATMEL_MXT=y +CONFIG_TOUCHSCREEN_ST1232=m CONFIG_TOUCHSCREEN_STMPE=y CONFIG_TOUCHSCREEN_SUN4I=y CONFIG_INPUT_MISC=y CONFIG_INPUT_MPU3050=y CONFIG_INPUT_AXP20X_PEK=y +CONFIG_INPUT_ADXL34X=m CONFIG_SERIO_AMBAKMI=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_8250_EM=y CONFIG_SERIAL_8250_MT6577=y CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y @@ -213,6 +234,9 @@ CONFIG_SERIAL_SIRFSOC_CONSOLE=y CONFIG_SERIAL_TEGRA=y CONFIG_SERIAL_IMX=y CONFIG_SERIAL_IMX_CONSOLE=y +CONFIG_SERIAL_SH_SCI=y +CONFIG_SERIAL_SH_SCI_NR_UARTS=20 +CONFIG_SERIAL_SH_SCI_CONSOLE=y CONFIG_SERIAL_MSM=y CONFIG_SERIAL_MSM_CONSOLE=y CONFIG_SERIAL_VT8500=y @@ -233,19 +257,26 @@ CONFIG_I2C_MUX_PCA954x=y CONFIG_I2C_MUX_PINCTRL=y CONFIG_I2C_CADENCE=y CONFIG_I2C_DESIGNWARE_PLATFORM=y +CONFIG_I2C_GPIO=m CONFIG_I2C_EXYNOS5=y CONFIG_I2C_MV64XXX=y +CONFIG_I2C_RIIC=y CONFIG_I2C_S3C2410=y +CONFIG_I2C_SH_MOBILE=y CONFIG_I2C_SIRF=y -CONFIG_I2C_TEGRA=y CONFIG_I2C_ST=y -CONFIG_SPI=y +CONFIG_I2C_TEGRA=y CONFIG_I2C_XILINX=y -CONFIG_SPI_DAVINCI=y +CONFIG_I2C_RCAR=y +CONFIG_SPI=y CONFIG_SPI_CADENCE=y +CONFIG_SPI_DAVINCI=y CONFIG_SPI_OMAP24XX=y CONFIG_SPI_ORION=y CONFIG_SPI_PL022=y +CONFIG_SPI_RSPI=y +CONFIG_SPI_SH_MSIOF=m +CONFIG_SPI_SH_HSPI=y CONFIG_SPI_SIRF=y CONFIG_SPI_SUN4I=y CONFIG_SPI_SUN6I=y @@ -259,12 +290,15 @@ CONFIG_PINCTRL_PALMAS=y CONFIG_PINCTRL_APQ8084=y CONFIG_GPIO_SYSFS=y CONFIG_GPIO_GENERIC_PLATFORM=y -CONFIG_GPIO_DWAPB=y CONFIG_GPIO_DAVINCI=y +CONFIG_GPIO_DWAPB=y +CONFIG_GPIO_EM=y +CONFIG_GPIO_RCAR=y CONFIG_GPIO_XILINX=y CONFIG_GPIO_ZYNQ=y CONFIG_GPIO_PCA953X=y CONFIG_GPIO_PCA953X_IRQ=y +CONFIG_GPIO_PCF857X=y CONFIG_GPIO_TWL4030=y CONFIG_GPIO_PALMAS=y CONFIG_GPIO_SYSCON=y @@ -276,10 +310,12 @@ CONFIG_POWER_RESET_AS3722=y CONFIG_POWER_RESET_GPIO=y CONFIG_POWER_RESET_KEYSTONE=y CONFIG_POWER_RESET_SUN6I=y +CONFIG_POWER_RESET_RMOBILE=y CONFIG_SENSORS_LM90=y CONFIG_SENSORS_LM95245=y CONFIG_THERMAL=y CONFIG_CPU_THERMAL=y +CONFIG_RCAR_THERMAL=y CONFIG_ARMADA_THERMAL=y CONFIG_DAVINCI_WATCHDOG CONFIG_ST_THERMAL_SYSCFG=y @@ -290,6 +326,7 @@ CONFIG_ARM_SP805_WATCHDOG=y CONFIG_ORION_WATCHDOG=y CONFIG_SUNXI_WATCHDOG=y CONFIG_MESON_WATCHDOG=y +CONFIG_MFD_AS3711=y CONFIG_MFD_AS3722=y CONFIG_MFD_BCM590XX=y CONFIG_MFD_AXP20X=y @@ -304,13 +341,16 @@ CONFIG_MFD_TPS65090=y CONFIG_MFD_TPS6586X=y CONFIG_MFD_TPS65910=y CONFIG_REGULATOR_AB8500=y +CONFIG_REGULATOR_AS3711=y CONFIG_REGULATOR_AS3722=y CONFIG_REGULATOR_AXP20X=y CONFIG_REGULATOR_BCM590XX=y +CONFIG_REGULATOR_DA9210=y CONFIG_REGULATOR_GPIO=y CONFIG_MFD_SYSCON=y CONFIG_POWER_RESET_SYSCON=y CONFIG_REGULATOR_MAX8907=y +CONFIG_REGULATOR_MAX8973=y CONFIG_REGULATOR_MAX77686=y CONFIG_REGULATOR_PALMAS=y CONFIG_REGULATOR_S2MPS11=y @@ -324,18 +364,32 @@ CONFIG_REGULATOR_TWL4030=y CONFIG_REGULATOR_VEXPRESS=y CONFIG_MEDIA_SUPPORT=y CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y CONFIG_MEDIA_USB_SUPPORT=y CONFIG_USB_VIDEO_CLASS=y CONFIG_USB_GSPCA=y +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_SOC_CAMERA=m +CONFIG_SOC_CAMERA_PLATFORM=m +CONFIG_VIDEO_RCAR_VIN=m +CONFIG_V4L_MEM2MEM_DRIVERS=y +CONFIG_VIDEO_RENESAS_VSP1=m +# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set +CONFIG_VIDEO_ADV7180=m CONFIG_DRM=y +CONFIG_DRM_RCAR_DU=m CONFIG_DRM_TEGRA=y CONFIG_DRM_PANEL_SIMPLE=y CONFIG_FB_ARMCLCD=y CONFIG_FB_WM8505=y +CONFIG_FB_SH_MOBILE_LCDC=y CONFIG_FB_SIMPLE=y +CONFIG_FB_SH_MOBILE_MERAM=y CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_BACKLIGHT_CLASS_DEVICE=y CONFIG_BACKLIGHT_PWM=y +CONFIG_BACKLIGHT_AS3711=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y CONFIG_SOUND=y @@ -343,6 +397,8 @@ CONFIG_SND=y CONFIG_SND_DYNAMIC_MINORS=y CONFIG_SND_USB_AUDIO=y CONFIG_SND_SOC=y +CONFIG_SND_SOC_SH4_FSI=m +CONFIG_SND_SOC_RCAR=m CONFIG_SND_SOC_TEGRA=y CONFIG_SND_SOC_TEGRA_RT5640=y CONFIG_SND_SOC_TEGRA_WM8753=y @@ -350,6 +406,8 @@ CONFIG_SND_SOC_TEGRA_WM8903=y CONFIG_SND_SOC_TEGRA_TRIMSLICE=y CONFIG_SND_SOC_TEGRA_ALC5632=y CONFIG_SND_SOC_TEGRA_MAX98090=y +CONFIG_SND_SOC_AK4642=m +CONFIG_SND_SOC_WM8978=m CONFIG_USB=y CONFIG_USB_XHCI_HCD=y CONFIG_USB_XHCI_MVEBU=y @@ -362,6 +420,8 @@ CONFIG_USB_ISP1760_HCD=y CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_STI=y CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_R8A66597_HCD=m +CONFIG_USB_RENESAS_USBHS=m CONFIG_USB_STORAGE=y CONFIG_USB_DWC3=y CONFIG_USB_CHIPIDEA=y @@ -374,6 +434,10 @@ CONFIG_SAMSUNG_USB3PHY=y CONFIG_USB_GPIO_VBUS=y CONFIG_USB_ISP1301=y CONFIG_USB_MXS_PHY=y +CONFIG_USB_RCAR_PHY=m +CONFIG_USB_RCAR_GEN2_PHY=m +CONFIG_USB_GADGET=y +CONFIG_USB_RENESAS_USBHS_UDC=m CONFIG_MMC=y CONFIG_MMC_BLOCK_MINORS=16 CONFIG_MMC_ARMMMCI=y @@ -392,12 +456,14 @@ CONFIG_MMC_SDHCI_ST=y CONFIG_MMC_OMAP=y CONFIG_MMC_OMAP_HS=y CONFIG_MMC_MVSDIO=y -CONFIG_MMC_SUNXI=y +CONFIG_MMC_SDHI=y CONFIG_MMC_DW=y CONFIG_MMC_DW_IDMAC=y CONFIG_MMC_DW_PLTFM=y CONFIG_MMC_DW_EXYNOS=y CONFIG_MMC_DW_ROCKCHIP=y +CONFIG_MMC_SH_MMCIF=y +CONFIG_MMC_SUNXI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_GPIO=y @@ -421,10 +487,12 @@ CONFIG_RTC_DRV_AS3722=y CONFIG_RTC_DRV_DS1307=y CONFIG_RTC_DRV_MAX8907=y CONFIG_RTC_DRV_MAX77686=y +CONFIG_RTC_DRV_RS5C372=m CONFIG_RTC_DRV_PALMAS=y CONFIG_RTC_DRV_TWL4030=y CONFIG_RTC_DRV_TPS6586X=y CONFIG_RTC_DRV_TPS65910=y +CONFIG_RTC_DRV_S35390A=m CONFIG_RTC_DRV_EM3027=y CONFIG_RTC_DRV_PL031=y CONFIG_RTC_DRV_VT8500=y @@ -436,6 +504,9 @@ CONFIG_DMADEVICES=y CONFIG_DW_DMAC=y CONFIG_MV_XOR=y CONFIG_TEGRA20_APB_DMA=y +CONFIG_SH_DMAE=y +CONFIG_RCAR_AUDMAC_PP=m +CONFIG_RCAR_DMAC=y CONFIG_STE_DMA40=y CONFIG_SIRF_DMA=y CONFIG_TI_EDMA=y @@ -468,6 +539,7 @@ CONFIG_IIO=y CONFIG_XILINX_XADC=y CONFIG_AK8975=y CONFIG_PWM=y +CONFIG_PWM_RENESAS_TPU=y CONFIG_PWM_TEGRA=y CONFIG_PWM_VT8500=y CONFIG_PHY_HIX5HD2_SATA=y diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index b7386524c356..a097cffa1231 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -114,6 +114,7 @@ CONFIG_MTD_PHYSMAP_OF=y CONFIG_MTD_NAND=y CONFIG_MTD_NAND_ECC_BCH=y CONFIG_MTD_NAND_OMAP2=y +CONFIG_MTD_NAND_OMAP_BCH=y CONFIG_MTD_ONENAND=y CONFIG_MTD_ONENAND_VERIFY_WRITE=y CONFIG_MTD_ONENAND_OMAP2=y @@ -248,6 +249,7 @@ CONFIG_TWL6040_CORE=y CONFIG_REGULATOR_PALMAS=y CONFIG_REGULATOR_PBIAS=y CONFIG_REGULATOR_TI_ABB=y +CONFIG_REGULATOR_TPS62360=m CONFIG_REGULATOR_TPS65023=y CONFIG_REGULATOR_TPS6507X=y CONFIG_REGULATOR_TPS65217=y @@ -374,7 +376,7 @@ CONFIG_PWM_TIEHRPWM=m CONFIG_PWM_TWL=m CONFIG_PWM_TWL_LED=m CONFIG_OMAP_USB2=m -CONFIG_TI_PIPE3=m +CONFIG_TI_PIPE3=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_FS_XATTR is not set diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index bf0fe99e8ca9..4cf48c3aca13 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -149,29 +149,28 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd) (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) +#define kvm_pgd_index(addr) pgd_index(addr) + static inline bool kvm_page_empty(void *ptr) { struct page *ptr_page = virt_to_page(ptr); return page_count(ptr_page) == 1; } - #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) #define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp) #define kvm_pud_table_empty(kvm, pudp) (0) #define KVM_PREALLOC_LEVEL 0 -static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd) +static inline void *kvm_get_hwpgd(struct kvm *kvm) { - return 0; + return kvm->arch.pgd; } -static inline void kvm_free_hwpgd(struct kvm *kvm) { } - -static inline void *kvm_get_hwpgd(struct kvm *kvm) +static inline unsigned int kvm_get_hwpgd_size(void) { - return kvm->arch.pgd; + return PTRS_PER_S2_PGD * sizeof(pgd_t); } struct kvm; diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 56c8b03c0ca1..15b050d46fc9 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -290,7 +290,7 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp, phys_addr_t addr = start, end = start + size; phys_addr_t next; - pgd = pgdp + pgd_index(addr); + pgd = pgdp + kvm_pgd_index(addr); do { next = kvm_pgd_addr_end(addr, end); if (!pgd_none(*pgd)) @@ -355,7 +355,7 @@ static void stage2_flush_memslot(struct kvm *kvm, phys_addr_t next; pgd_t *pgd; - pgd = kvm->arch.pgd + pgd_index(addr); + pgd = kvm->arch.pgd + kvm_pgd_index(addr); do { next = kvm_pgd_addr_end(addr, end); stage2_flush_puds(kvm, pgd, addr, next); @@ -632,6 +632,20 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE); } +/* Free the HW pgd, one page at a time */ +static void kvm_free_hwpgd(void *hwpgd) +{ + free_pages_exact(hwpgd, kvm_get_hwpgd_size()); +} + +/* Allocate the HW PGD, making sure that each page gets its own refcount */ +static void *kvm_alloc_hwpgd(void) +{ + unsigned int size = kvm_get_hwpgd_size(); + + return alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); +} + /** * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. * @kvm: The KVM struct pointer for the VM. @@ -645,15 +659,31 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) */ int kvm_alloc_stage2_pgd(struct kvm *kvm) { - int ret; pgd_t *pgd; + void *hwpgd; if (kvm->arch.pgd != NULL) { kvm_err("kvm_arch already initialized?\n"); return -EINVAL; } + hwpgd = kvm_alloc_hwpgd(); + if (!hwpgd) + return -ENOMEM; + + /* When the kernel uses more levels of page tables than the + * guest, we allocate a fake PGD and pre-populate it to point + * to the next-level page table, which will be the real + * initial page table pointed to by the VTTBR. + * + * When KVM_PREALLOC_LEVEL==2, we allocate a single page for + * the PMD and the kernel will use folded pud. + * When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD + * pages. + */ if (KVM_PREALLOC_LEVEL > 0) { + int i; + /* * Allocate fake pgd for the page table manipulation macros to * work. This is not used by the hardware and we have no @@ -661,30 +691,32 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm) */ pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t), GFP_KERNEL | __GFP_ZERO); + + if (!pgd) { + kvm_free_hwpgd(hwpgd); + return -ENOMEM; + } + + /* Plug the HW PGD into the fake one. */ + for (i = 0; i < PTRS_PER_S2_PGD; i++) { + if (KVM_PREALLOC_LEVEL == 1) + pgd_populate(NULL, pgd + i, + (pud_t *)hwpgd + i * PTRS_PER_PUD); + else if (KVM_PREALLOC_LEVEL == 2) + pud_populate(NULL, pud_offset(pgd, 0) + i, + (pmd_t *)hwpgd + i * PTRS_PER_PMD); + } } else { /* * Allocate actual first-level Stage-2 page table used by the * hardware for Stage-2 page table walks. */ - pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER); + pgd = (pgd_t *)hwpgd; } - if (!pgd) - return -ENOMEM; - - ret = kvm_prealloc_hwpgd(kvm, pgd); - if (ret) - goto out_err; - kvm_clean_pgd(pgd); kvm->arch.pgd = pgd; return 0; -out_err: - if (KVM_PREALLOC_LEVEL > 0) - kfree(pgd); - else - free_pages((unsigned long)pgd, S2_PGD_ORDER); - return ret; } /** @@ -785,11 +817,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm) return; unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); - kvm_free_hwpgd(kvm); + kvm_free_hwpgd(kvm_get_hwpgd(kvm)); if (KVM_PREALLOC_LEVEL > 0) kfree(kvm->arch.pgd); - else - free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER); + kvm->arch.pgd = NULL; } @@ -799,7 +830,7 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache pgd_t *pgd; pud_t *pud; - pgd = kvm->arch.pgd + pgd_index(addr); + pgd = kvm->arch.pgd + kvm_pgd_index(addr); if (WARN_ON(pgd_none(*pgd))) { if (!cache) return NULL; @@ -1089,7 +1120,7 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) pgd_t *pgd; phys_addr_t next; - pgd = kvm->arch.pgd + pgd_index(addr); + pgd = kvm->arch.pgd + kvm_pgd_index(addr); do { /* * Release kvm_mmu_lock periodically if the memory region is diff --git a/arch/arm/mach-asm9260/Kconfig b/arch/arm/mach-asm9260/Kconfig index 8423be76080e..52241207a82a 100644 --- a/arch/arm/mach-asm9260/Kconfig +++ b/arch/arm/mach-asm9260/Kconfig @@ -2,5 +2,7 @@ config MACH_ASM9260 bool "Alphascale ASM9260" depends on ARCH_MULTI_V5 select CPU_ARM926T + select ASM9260_TIMER + select GENERIC_CLOCKEVENTS help Support for Alphascale ASM9260 based platform. diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c index 61bfe584a9d7..fc832040c6e9 100644 --- a/arch/arm/mach-msm/board-halibut.c +++ b/arch/arm/mach-msm/board-halibut.c @@ -20,6 +20,7 @@ #include <linux/input.h> #include <linux/io.h> #include <linux/delay.h> +#include <linux/smc91x.h> #include <mach/hardware.h> #include <asm/mach-types.h> @@ -46,15 +47,20 @@ static struct resource smc91x_resources[] = { [1] = { .start = MSM_GPIO_TO_INT(49), .end = MSM_GPIO_TO_INT(49), - .flags = IORESOURCE_IRQ, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; +static struct smc91x_platdata smc91x_platdata = { + .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, +}; + static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, + .dev.platform_data = &smc91x_platdata, }; static struct platform_device *devices[] __initdata = { diff --git a/arch/arm/mach-msm/board-qsd8x50.c b/arch/arm/mach-msm/board-qsd8x50.c index 4c748616ef47..10016a3bc698 100644 --- a/arch/arm/mach-msm/board-qsd8x50.c +++ b/arch/arm/mach-msm/board-qsd8x50.c @@ -22,6 +22,7 @@ #include <linux/usb/msm_hsusb.h> #include <linux/err.h> #include <linux/clkdev.h> +#include <linux/smc91x.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> @@ -49,15 +50,20 @@ static struct resource smc91x_resources[] = { .flags = IORESOURCE_MEM, }, [1] = { - .flags = IORESOURCE_IRQ, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; +static struct smc91x_platdata smc91x_platdata = { + .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, +}; + static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, + .dev.platform_data = &smc91x_platdata, }; static int __init msm_init_smc91x(void) diff --git a/arch/arm/mach-pxa/idp.c b/arch/arm/mach-pxa/idp.c index 343c4e3a7c5d..f6d02e4cbcda 100644 --- a/arch/arm/mach-pxa/idp.c +++ b/arch/arm/mach-pxa/idp.c @@ -36,6 +36,7 @@ #include <linux/platform_data/video-pxafb.h> #include <mach/bitfield.h> #include <linux/platform_data/mmc-pxamci.h> +#include <linux/smc91x.h> #include "generic.h" #include "devices.h" @@ -81,11 +82,16 @@ static struct resource smc91x_resources[] = { } }; +static struct smc91x_platdata smc91x_platdata = { + .flags = SMC91X_USE_32BIT | SMC91X_USE_DMA | SMC91X_NOWAIT, +}; + static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, + .dev.platform_data = &smc91x_platdata, }; static void idp_backlight_power(int on) diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c index ad777b353bd5..eaee2c20b189 100644 --- a/arch/arm/mach-pxa/lpd270.c +++ b/arch/arm/mach-pxa/lpd270.c @@ -24,6 +24,7 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/pwm_backlight.h> +#include <linux/smc91x.h> #include <asm/types.h> #include <asm/setup.h> @@ -189,15 +190,20 @@ static struct resource smc91x_resources[] = { [1] = { .start = LPD270_ETHERNET_IRQ, .end = LPD270_ETHERNET_IRQ, - .flags = IORESOURCE_IRQ, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, }, }; +struct smc91x_platdata smc91x_platdata = { + .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, +}; + static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, + .dev.platform_data = &smc91x_platdata, }; static struct resource lpd270_flash_resources[] = { diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c index 850e506926df..c309593abdb2 100644 --- a/arch/arm/mach-realview/core.c +++ b/arch/arm/mach-realview/core.c @@ -28,6 +28,7 @@ #include <linux/platform_data/video-clcd-versatile.h> #include <linux/io.h> #include <linux/smsc911x.h> +#include <linux/smc91x.h> #include <linux/ata_platform.h> #include <linux/amba/mmci.h> #include <linux/gfp.h> @@ -94,6 +95,10 @@ static struct smsc911x_platform_config smsc911x_config = { .phy_interface = PHY_INTERFACE_MODE_MII, }; +static struct smc91x_platdata smc91x_platdata = { + .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT, +}; + static struct platform_device realview_eth_device = { .name = "smsc911x", .id = 0, @@ -107,6 +112,8 @@ int realview_eth_register(const char *name, struct resource *res) realview_eth_device.resource = res; if (strcmp(realview_eth_device.name, "smsc911x") == 0) realview_eth_device.dev.platform_data = &smsc911x_config; + else + realview_eth_device.dev.platform_data = &smc91x_platdata; return platform_device_register(&realview_eth_device); } diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c index 64c88d657f9e..b3869cbbcc68 100644 --- a/arch/arm/mach-realview/realview_eb.c +++ b/arch/arm/mach-realview/realview_eb.c @@ -234,7 +234,7 @@ static struct resource realview_eb_eth_resources[] = { [1] = { .start = IRQ_EB_ETH, .end = IRQ_EB_ETH, - .flags = IORESOURCE_IRQ, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, }, }; diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c index 169262e3040d..af868d258e66 100644 --- a/arch/arm/mach-sa1100/neponset.c +++ b/arch/arm/mach-sa1100/neponset.c @@ -12,6 +12,7 @@ #include <linux/pm.h> #include <linux/serial_core.h> #include <linux/slab.h> +#include <linux/smc91x.h> #include <asm/mach-types.h> #include <asm/mach/map.h> @@ -258,12 +259,17 @@ static int neponset_probe(struct platform_device *dev) 0x02000000, "smc91x-attrib"), { .flags = IORESOURCE_IRQ }, }; + struct smc91x_platdata smc91x_platdata = { + .flags = SMC91X_USE_8BIT | SMC91X_IO_SHIFT_2 | SMC91X_NOWAIT, + }; struct platform_device_info smc91x_devinfo = { .parent = &dev->dev, .name = "smc91x", .id = 0, .res = smc91x_resources, .num_res = ARRAY_SIZE(smc91x_resources), + .data = &smc91x_platdata, + .size_data = sizeof(smc91x_platdata), }; int ret, irq; diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c index 091261878eff..1525d7b5f1b7 100644 --- a/arch/arm/mach-sa1100/pleb.c +++ b/arch/arm/mach-sa1100/pleb.c @@ -11,6 +11,7 @@ #include <linux/irq.h> #include <linux/io.h> #include <linux/mtd/partitions.h> +#include <linux/smc91x.h> #include <mach/hardware.h> #include <asm/setup.h> @@ -43,12 +44,18 @@ static struct resource smc91x_resources[] = { #endif }; +static struct smc91x_platdata smc91x_platdata = { + .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, +}; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, + .dev = { + .platform_data = &smc91x_platdata, + }, }; static struct platform_device *devices[] __initdata = { diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi index f1ad9c2ab2e9..a857794432d6 100644 --- a/arch/arm64/boot/dts/apm/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi @@ -622,7 +622,7 @@ }; sgenet0: ethernet@1f210000 { - compatible = "apm,xgene-enet"; + compatible = "apm,xgene1-sgenet"; status = "disabled"; reg = <0x0 0x1f210000 0x0 0xd100>, <0x0 0x1f200000 0x0 0Xc300>, @@ -636,7 +636,7 @@ }; xgenet: ethernet@1f610000 { - compatible = "apm,xgene-enet"; + compatible = "apm,xgene1-xgenet"; status = "disabled"; reg = <0x0 0x1f610000 0x0 0xd100>, <0x0 0x1f600000 0x0 0Xc300>, diff --git a/arch/arm64/boot/dts/arm/foundation-v8.dts b/arch/arm64/boot/dts/arm/foundation-v8.dts index 27f32962e55c..4eac8dcea423 100644 --- a/arch/arm64/boot/dts/arm/foundation-v8.dts +++ b/arch/arm64/boot/dts/arm/foundation-v8.dts @@ -34,6 +34,7 @@ reg = <0x0 0x0>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; }; cpu@1 { device_type = "cpu"; @@ -41,6 +42,7 @@ reg = <0x0 0x1>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; }; cpu@2 { device_type = "cpu"; @@ -48,6 +50,7 @@ reg = <0x0 0x2>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; }; cpu@3 { device_type = "cpu"; @@ -55,6 +58,11 @@ reg = <0x0 0x3>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; + }; + + L2_0: l2-cache0 { + compatible = "cache"; }; }; diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts index d429129ecb3d..133ee59de2d7 100644 --- a/arch/arm64/boot/dts/arm/juno.dts +++ b/arch/arm64/boot/dts/arm/juno.dts @@ -39,6 +39,7 @@ reg = <0x0 0x0>; device_type = "cpu"; enable-method = "psci"; + next-level-cache = <&A57_L2>; }; A57_1: cpu@1 { @@ -46,6 +47,7 @@ reg = <0x0 0x1>; device_type = "cpu"; enable-method = "psci"; + next-level-cache = <&A57_L2>; }; A53_0: cpu@100 { @@ -53,6 +55,7 @@ reg = <0x0 0x100>; device_type = "cpu"; enable-method = "psci"; + next-level-cache = <&A53_L2>; }; A53_1: cpu@101 { @@ -60,6 +63,7 @@ reg = <0x0 0x101>; device_type = "cpu"; enable-method = "psci"; + next-level-cache = <&A53_L2>; }; A53_2: cpu@102 { @@ -67,6 +71,7 @@ reg = <0x0 0x102>; device_type = "cpu"; enable-method = "psci"; + next-level-cache = <&A53_L2>; }; A53_3: cpu@103 { @@ -74,6 +79,15 @@ reg = <0x0 0x103>; device_type = "cpu"; enable-method = "psci"; + next-level-cache = <&A53_L2>; + }; + + A57_L2: l2-cache0 { + compatible = "cache"; + }; + + A53_L2: l2-cache1 { + compatible = "cache"; }; }; diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts index efc59b3baf63..20addabbd127 100644 --- a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts +++ b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts @@ -37,6 +37,7 @@ reg = <0x0 0x0>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; }; cpu@1 { device_type = "cpu"; @@ -44,6 +45,7 @@ reg = <0x0 0x1>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; }; cpu@2 { device_type = "cpu"; @@ -51,6 +53,7 @@ reg = <0x0 0x2>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; }; cpu@3 { device_type = "cpu"; @@ -58,6 +61,11 @@ reg = <0x0 0x3>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; + }; + + L2_0: l2-cache0 { + compatible = "cache"; }; }; diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index 5720608c50b1..abb79b3cfcfe 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile @@ -29,7 +29,7 @@ aes-ce-blk-y := aes-glue-ce.o aes-ce.o obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o aes-neon-blk-y := aes-glue-neon.o aes-neon.o -AFLAGS_aes-ce.o := -DINTERLEAVE=2 -DINTERLEAVE_INLINE +AFLAGS_aes-ce.o := -DINTERLEAVE=4 AFLAGS_aes-neon.o := -DINTERLEAVE=4 CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 5901480bfdca..750bac4e637e 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -20,6 +20,9 @@ #error "Only include this from assembly code" #endif +#ifndef __ASM_ASSEMBLER_H +#define __ASM_ASSEMBLER_H + #include <asm/ptrace.h> #include <asm/thread_info.h> @@ -155,3 +158,5 @@ lr .req x30 // link register #endif orr \rd, \lbits, \hbits, lsl #32 .endm + +#endif /* __ASM_ASSEMBLER_H */ diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h index 0710654631e7..c60643f14cda 100644 --- a/arch/arm64/include/asm/cpuidle.h +++ b/arch/arm64/include/asm/cpuidle.h @@ -1,6 +1,8 @@ #ifndef __ASM_CPUIDLE_H #define __ASM_CPUIDLE_H +#include <asm/proc-fns.h> + #ifdef CONFIG_CPU_IDLE extern int cpu_init_idle(unsigned int cpu); extern int cpu_suspend(unsigned long arg); diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index e2ff32a93b5c..d2f49423c5dc 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -264,8 +264,10 @@ __AARCH64_INSN_FUNCS(ands, 0x7F200000, 0x6A000000) __AARCH64_INSN_FUNCS(bics, 0x7F200000, 0x6A200000) __AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) __AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) -__AARCH64_INSN_FUNCS(cbz, 0xFE000000, 0x34000000) -__AARCH64_INSN_FUNCS(cbnz, 0xFE000000, 0x35000000) +__AARCH64_INSN_FUNCS(cbz, 0x7F000000, 0x34000000) +__AARCH64_INSN_FUNCS(cbnz, 0x7F000000, 0x35000000) +__AARCH64_INSN_FUNCS(tbz, 0x7F000000, 0x36000000) +__AARCH64_INSN_FUNCS(tbnz, 0x7F000000, 0x37000000) __AARCH64_INSN_FUNCS(bcond, 0xFF000010, 0x54000000) __AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001) __AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 9e5543e08955..ac6fafb95fe7 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -129,6 +129,9 @@ * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are * not known to exist and will break with this configuration. * + * VTCR_EL2.PS is extracted from ID_AA64MMFR0_EL1.PARange at boot time + * (see hyp-init.S). + * * Note that when using 4K pages, we concatenate two first level page tables * together. * @@ -138,7 +141,6 @@ #ifdef CONFIG_ARM64_64K_PAGES /* * Stage2 translation configuration: - * 40bits output (PS = 2) * 40bits input (T0SZ = 24) * 64kB pages (TG0 = 1) * 2 level page tables (SL = 1) @@ -150,7 +152,6 @@ #else /* * Stage2 translation configuration: - * 40bits output (PS = 2) * 40bits input (T0SZ = 24) * 4kB pages (TG0 = 0) * 3 level page tables (SL = 1) diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 6458b5373142..bbfb600fa822 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -158,6 +158,8 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd) #define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT) #define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) +#define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1)) + /* * If we are concatenating first level stage-2 page tables, we would have less * than or equal to 16 pointers in the fake PGD, because that's what the @@ -171,43 +173,6 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd) #define KVM_PREALLOC_LEVEL (0) #endif -/** - * kvm_prealloc_hwpgd - allocate inital table for VTTBR - * @kvm: The KVM struct pointer for the VM. - * @pgd: The kernel pseudo pgd - * - * When the kernel uses more levels of page tables than the guest, we allocate - * a fake PGD and pre-populate it to point to the next-level page table, which - * will be the real initial page table pointed to by the VTTBR. - * - * When KVM_PREALLOC_LEVEL==2, we allocate a single page for the PMD and - * the kernel will use folded pud. When KVM_PREALLOC_LEVEL==1, we - * allocate 2 consecutive PUD pages. - */ -static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd) -{ - unsigned int i; - unsigned long hwpgd; - - if (KVM_PREALLOC_LEVEL == 0) - return 0; - - hwpgd = __get_free_pages(GFP_KERNEL | __GFP_ZERO, PTRS_PER_S2_PGD_SHIFT); - if (!hwpgd) - return -ENOMEM; - - for (i = 0; i < PTRS_PER_S2_PGD; i++) { - if (KVM_PREALLOC_LEVEL == 1) - pgd_populate(NULL, pgd + i, - (pud_t *)hwpgd + i * PTRS_PER_PUD); - else if (KVM_PREALLOC_LEVEL == 2) - pud_populate(NULL, pud_offset(pgd, 0) + i, - (pmd_t *)hwpgd + i * PTRS_PER_PMD); - } - - return 0; -} - static inline void *kvm_get_hwpgd(struct kvm *kvm) { pgd_t *pgd = kvm->arch.pgd; @@ -224,12 +189,11 @@ static inline void *kvm_get_hwpgd(struct kvm *kvm) return pmd_offset(pud, 0); } -static inline void kvm_free_hwpgd(struct kvm *kvm) +static inline unsigned int kvm_get_hwpgd_size(void) { - if (KVM_PREALLOC_LEVEL > 0) { - unsigned long hwpgd = (unsigned long)kvm_get_hwpgd(kvm); - free_pages(hwpgd, PTRS_PER_S2_PGD_SHIFT); - } + if (KVM_PREALLOC_LEVEL > 0) + return PTRS_PER_S2_PGD * PAGE_SIZE; + return PTRS_PER_S2_PGD * sizeof(pgd_t); } static inline bool kvm_page_empty(void *ptr) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 16449c535e50..800ec0e87ed9 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -460,7 +460,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | - PTE_PROT_NONE | PTE_VALID | PTE_WRITE; + PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK; pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); return pte; } diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index f9be30ea1cbd..20e9591a60cf 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -45,7 +45,8 @@ #define STACK_TOP STACK_TOP_MAX #endif /* CONFIG_COMPAT */ -#define ARCH_LOW_ADDRESS_LIMIT PHYS_MASK +extern phys_addr_t arm64_dma_phys_limit; +#define ARCH_LOW_ADDRESS_LIMIT (arm64_dma_phys_limit - 1) #endif /* __KERNEL__ */ struct debug_info { diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 73f0ce570fb3..4abe9b945f77 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -24,11 +24,6 @@ #include <linux/sched.h> #include <asm/cputype.h> -extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *); -extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long); - -extern struct cpu_tlb_fns cpu_tlb; - /* * TLB Management * ============== diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index bef04afd6031..5ee07eee80c2 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -15,8 +15,9 @@ CFLAGS_REMOVE_return_address.o = -pg arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ entry-fpsimd.o process.o ptrace.o setup.o signal.o \ sys.o stacktrace.o time.o traps.o io.o vdso.o \ - hyp-stub.o psci.o cpu_ops.o insn.o return_address.o \ - cpuinfo.o cpu_errata.o alternative.o cacheinfo.o + hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o \ + return_address.o cpuinfo.o cpu_errata.o \ + alternative.o cacheinfo.o arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ sys_compat.o entry32.o \ diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index cf8556ae09d0..c851be795080 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -156,7 +156,7 @@ static int ftrace_modify_graph_caller(bool enable) branch = aarch64_insn_gen_branch_imm(pc, (unsigned long)ftrace_graph_caller, - AARCH64_INSN_BRANCH_LINK); + AARCH64_INSN_BRANCH_NOLINK); nop = aarch64_insn_gen_nop(); if (enable) diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 27d4864577e5..c8eca88f12e6 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -87,8 +87,10 @@ static void __kprobes *patch_map(void *addr, int fixmap) if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX)) page = vmalloc_to_page(addr); - else + else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA)) page = virt_to_page(addr); + else + return addr; BUG_ON(!page); set_fixmap(fixmap, page_to_phys(page)); diff --git a/arch/arm64/kernel/psci-call.S b/arch/arm64/kernel/psci-call.S new file mode 100644 index 000000000000..cf83e61cd3b5 --- /dev/null +++ b/arch/arm64/kernel/psci-call.S @@ -0,0 +1,28 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2015 ARM Limited + * + * Author: Will Deacon <will.deacon@arm.com> + */ + +#include <linux/linkage.h> + +/* int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */ +ENTRY(__invoke_psci_fn_hvc) + hvc #0 + ret +ENDPROC(__invoke_psci_fn_hvc) + +/* int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */ +ENTRY(__invoke_psci_fn_smc) + smc #0 + ret +ENDPROC(__invoke_psci_fn_smc) diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index 3425f311c49e..9b8a70ae64a1 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -57,6 +57,9 @@ static struct psci_operations psci_ops; static int (*invoke_psci_fn)(u64, u64, u64, u64); typedef int (*psci_initcall_t)(const struct device_node *); +asmlinkage int __invoke_psci_fn_hvc(u64, u64, u64, u64); +asmlinkage int __invoke_psci_fn_smc(u64, u64, u64, u64); + enum psci_function { PSCI_FN_CPU_SUSPEND, PSCI_FN_CPU_ON, @@ -109,40 +112,6 @@ static void psci_power_state_unpack(u32 power_state, PSCI_0_2_POWER_STATE_AFFL_SHIFT; } -/* - * The following two functions are invoked via the invoke_psci_fn pointer - * and will not be inlined, allowing us to piggyback on the AAPCS. - */ -static noinline int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, - u64 arg2) -{ - asm volatile( - __asmeq("%0", "x0") - __asmeq("%1", "x1") - __asmeq("%2", "x2") - __asmeq("%3", "x3") - "hvc #0\n" - : "+r" (function_id) - : "r" (arg0), "r" (arg1), "r" (arg2)); - - return function_id; -} - -static noinline int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, - u64 arg2) -{ - asm volatile( - __asmeq("%0", "x0") - __asmeq("%1", "x1") - __asmeq("%2", "x2") - __asmeq("%3", "x3") - "smc #0\n" - : "+r" (function_id) - : "r" (arg0), "r" (arg1), "r" (arg2)); - - return function_id; -} - static int psci_get_version(void) { int err; diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index c20a300e2213..d26fcd4cd6e6 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -154,8 +154,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) case __SI_TIMER: err |= __put_user(from->si_tid, &to->si_tid); err |= __put_user(from->si_overrun, &to->si_overrun); - err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, - &to->si_ptr); + err |= __put_user(from->si_int, &to->si_int); break; case __SI_POLL: err |= __put_user(from->si_band, &to->si_band); @@ -184,7 +183,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) case __SI_MESGQ: /* But this is */ err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); - err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr); + err |= __put_user(from->si_int, &to->si_int); break; case __SI_SYS: err |= __put_user((compat_uptr_t)(unsigned long) diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S index fe652ffd34c2..efa79e8d4196 100644 --- a/arch/arm64/kernel/vdso/gettimeofday.S +++ b/arch/arm64/kernel/vdso/gettimeofday.S @@ -174,8 +174,6 @@ ENDPROC(__kernel_clock_gettime) /* int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); */ ENTRY(__kernel_clock_getres) .cfi_startproc - cbz w1, 3f - cmp w0, #CLOCK_REALTIME ccmp w0, #CLOCK_MONOTONIC, #0x4, ne b.ne 1f @@ -188,6 +186,7 @@ ENTRY(__kernel_clock_getres) b.ne 4f ldr x2, 6f 2: + cbz w1, 3f stp xzr, x2, [x1] 3: /* res == NULL. */ diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 0a24b9b8c698..58e0c2bdde04 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -348,8 +348,6 @@ static struct dma_map_ops swiotlb_dma_ops = { .mapping_error = swiotlb_dma_mapping_error, }; -extern int swiotlb_late_init_with_default_size(size_t default_size); - static int __init atomic_pool_init(void) { pgprot_t prot = __pgprot(PROT_NORMAL_NC); @@ -411,21 +409,13 @@ out: return -ENOMEM; } -static int __init swiotlb_late_init(void) +static int __init arm64_dma_init(void) { - size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT); + int ret; dma_ops = &swiotlb_dma_ops; - return swiotlb_late_init_with_default_size(swiotlb_size); -} - -static int __init arm64_dma_init(void) -{ - int ret = 0; - - ret |= swiotlb_late_init(); - ret |= atomic_pool_init(); + ret = atomic_pool_init(); return ret; } diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 71145f952070..ae85da6307bb 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -33,6 +33,7 @@ #include <linux/dma-mapping.h> #include <linux/dma-contiguous.h> #include <linux/efi.h> +#include <linux/swiotlb.h> #include <asm/fixmap.h> #include <asm/memory.h> @@ -45,6 +46,7 @@ #include "mm.h" phys_addr_t memstart_addr __read_mostly = 0; +phys_addr_t arm64_dma_phys_limit __read_mostly; #ifdef CONFIG_BLK_DEV_INITRD static int __init early_initrd(char *p) @@ -85,7 +87,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) /* 4GB maximum for 32-bit only capable devices */ if (IS_ENABLED(CONFIG_ZONE_DMA)) { - max_dma = PFN_DOWN(max_zone_dma_phys()); + max_dma = PFN_DOWN(arm64_dma_phys_limit); zone_size[ZONE_DMA] = max_dma - min; } zone_size[ZONE_NORMAL] = max - max_dma; @@ -156,8 +158,6 @@ early_param("mem", early_mem); void __init arm64_memblock_init(void) { - phys_addr_t dma_phys_limit = 0; - memblock_enforce_memory_limit(memory_limit); /* @@ -174,8 +174,10 @@ void __init arm64_memblock_init(void) /* 4GB maximum for 32-bit only capable devices */ if (IS_ENABLED(CONFIG_ZONE_DMA)) - dma_phys_limit = max_zone_dma_phys(); - dma_contiguous_reserve(dma_phys_limit); + arm64_dma_phys_limit = max_zone_dma_phys(); + else + arm64_dma_phys_limit = PHYS_MASK + 1; + dma_contiguous_reserve(arm64_dma_phys_limit); memblock_allow_resize(); memblock_dump_all(); @@ -276,6 +278,8 @@ static void __init free_unused_memmap(void) */ void __init mem_init(void) { + swiotlb_init(1); + set_max_mapnr(pfn_to_page(max_pfn) - mem_map); #ifndef CONFIG_SPARSEMEM_VMEMMAP diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index bb0ea94c4ba1..1d3ec3ddd84b 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -51,7 +51,10 @@ static int change_memory_common(unsigned long addr, int numpages, WARN_ON_ONCE(1); } - if (!is_module_address(start) || !is_module_address(end - 1)) + if (start < MODULES_VADDR || start >= MODULES_END) + return -EINVAL; + + if (end < MODULES_VADDR || end >= MODULES_END) return -EINVAL; data.set_mask = set_mask; diff --git a/arch/frv/include/asm/pgtable.h b/arch/frv/include/asm/pgtable.h index 93bcf2abd1a1..07d7a7ef8bd5 100644 --- a/arch/frv/include/asm/pgtable.h +++ b/arch/frv/include/asm/pgtable.h @@ -123,12 +123,14 @@ extern unsigned long empty_zero_page; #define PGDIR_MASK (~(PGDIR_SIZE - 1)) #define PTRS_PER_PGD 64 +#define __PAGETABLE_PUD_FOLDED #define PUD_SHIFT 26 #define PTRS_PER_PUD 1 #define PUD_SIZE (1UL << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE - 1)) #define PUE_SIZE 256 +#define __PAGETABLE_PMD_FOLDED #define PMD_SHIFT 26 #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE - 1)) diff --git a/arch/m32r/include/asm/pgtable-2level.h b/arch/m32r/include/asm/pgtable-2level.h index 8fd8ee70266a..421e6ba3a173 100644 --- a/arch/m32r/include/asm/pgtable-2level.h +++ b/arch/m32r/include/asm/pgtable-2level.h @@ -13,6 +13,7 @@ * the M32R is two-level, so we don't really have any * PMD directory physically. */ +#define __PAGETABLE_PMD_FOLDED #define PMD_SHIFT 22 #define PTRS_PER_PMD 1 diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h index 28a145bfbb71..35ed4a9981ae 100644 --- a/arch/m68k/include/asm/pgtable_mm.h +++ b/arch/m68k/include/asm/pgtable_mm.h @@ -54,10 +54,12 @@ */ #ifdef CONFIG_SUN3 #define PTRS_PER_PTE 16 +#define __PAGETABLE_PMD_FOLDED #define PTRS_PER_PMD 1 #define PTRS_PER_PGD 2048 #elif defined(CONFIG_COLDFIRE) #define PTRS_PER_PTE 512 +#define __PAGETABLE_PMD_FOLDED #define PTRS_PER_PMD 1 #define PTRS_PER_PGD 1024 #else diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h index 881071c07942..13272fd5a5ba 100644 --- a/arch/metag/include/asm/processor.h +++ b/arch/metag/include/asm/processor.h @@ -149,8 +149,8 @@ extern void exit_thread(void); unsigned long get_wchan(struct task_struct *p); -#define KSTK_EIP(tsk) ((tsk)->thread.kernel_context->CurrPC) -#define KSTK_ESP(tsk) ((tsk)->thread.kernel_context->AX[0].U0) +#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ctx.CurrPC) +#define KSTK_ESP(tsk) (task_pt_regs(tsk)->ctx.AX[0].U0) #define user_stack_pointer(regs) ((regs)->ctx.AX[0].U0) diff --git a/arch/mips/include/asm/asmmacro-32.h b/arch/mips/include/asm/asmmacro-32.h index cdac7b3eeaf7..80386470d3a4 100644 --- a/arch/mips/include/asm/asmmacro-32.h +++ b/arch/mips/include/asm/asmmacro-32.h @@ -16,38 +16,38 @@ .set push SET_HARDFLOAT cfc1 \tmp, fcr31 - swc1 $f0, THREAD_FPR0_LS64(\thread) - swc1 $f1, THREAD_FPR1_LS64(\thread) - swc1 $f2, THREAD_FPR2_LS64(\thread) - swc1 $f3, THREAD_FPR3_LS64(\thread) - swc1 $f4, THREAD_FPR4_LS64(\thread) - swc1 $f5, THREAD_FPR5_LS64(\thread) - swc1 $f6, THREAD_FPR6_LS64(\thread) - swc1 $f7, THREAD_FPR7_LS64(\thread) - swc1 $f8, THREAD_FPR8_LS64(\thread) - swc1 $f9, THREAD_FPR9_LS64(\thread) - swc1 $f10, THREAD_FPR10_LS64(\thread) - swc1 $f11, THREAD_FPR11_LS64(\thread) - swc1 $f12, THREAD_FPR12_LS64(\thread) - swc1 $f13, THREAD_FPR13_LS64(\thread) - swc1 $f14, THREAD_FPR14_LS64(\thread) - swc1 $f15, THREAD_FPR15_LS64(\thread) - swc1 $f16, THREAD_FPR16_LS64(\thread) - swc1 $f17, THREAD_FPR17_LS64(\thread) - swc1 $f18, THREAD_FPR18_LS64(\thread) - swc1 $f19, THREAD_FPR19_LS64(\thread) - swc1 $f20, THREAD_FPR20_LS64(\thread) - swc1 $f21, THREAD_FPR21_LS64(\thread) - swc1 $f22, THREAD_FPR22_LS64(\thread) - swc1 $f23, THREAD_FPR23_LS64(\thread) - swc1 $f24, THREAD_FPR24_LS64(\thread) - swc1 $f25, THREAD_FPR25_LS64(\thread) - swc1 $f26, THREAD_FPR26_LS64(\thread) - swc1 $f27, THREAD_FPR27_LS64(\thread) - swc1 $f28, THREAD_FPR28_LS64(\thread) - swc1 $f29, THREAD_FPR29_LS64(\thread) - swc1 $f30, THREAD_FPR30_LS64(\thread) - swc1 $f31, THREAD_FPR31_LS64(\thread) + swc1 $f0, THREAD_FPR0(\thread) + swc1 $f1, THREAD_FPR1(\thread) + swc1 $f2, THREAD_FPR2(\thread) + swc1 $f3, THREAD_FPR3(\thread) + swc1 $f4, THREAD_FPR4(\thread) + swc1 $f5, THREAD_FPR5(\thread) + swc1 $f6, THREAD_FPR6(\thread) + swc1 $f7, THREAD_FPR7(\thread) + swc1 $f8, THREAD_FPR8(\thread) + swc1 $f9, THREAD_FPR9(\thread) + swc1 $f10, THREAD_FPR10(\thread) + swc1 $f11, THREAD_FPR11(\thread) + swc1 $f12, THREAD_FPR12(\thread) + swc1 $f13, THREAD_FPR13(\thread) + swc1 $f14, THREAD_FPR14(\thread) + swc1 $f15, THREAD_FPR15(\thread) + swc1 $f16, THREAD_FPR16(\thread) + swc1 $f17, THREAD_FPR17(\thread) + swc1 $f18, THREAD_FPR18(\thread) + swc1 $f19, THREAD_FPR19(\thread) + swc1 $f20, THREAD_FPR20(\thread) + swc1 $f21, THREAD_FPR21(\thread) + swc1 $f22, THREAD_FPR22(\thread) + swc1 $f23, THREAD_FPR23(\thread) + swc1 $f24, THREAD_FPR24(\thread) + swc1 $f25, THREAD_FPR25(\thread) + swc1 $f26, THREAD_FPR26(\thread) + swc1 $f27, THREAD_FPR27(\thread) + swc1 $f28, THREAD_FPR28(\thread) + swc1 $f29, THREAD_FPR29(\thread) + swc1 $f30, THREAD_FPR30(\thread) + swc1 $f31, THREAD_FPR31(\thread) sw \tmp, THREAD_FCR31(\thread) .set pop .endm @@ -56,38 +56,38 @@ .set push SET_HARDFLOAT lw \tmp, THREAD_FCR31(\thread) - lwc1 $f0, THREAD_FPR0_LS64(\thread) - lwc1 $f1, THREAD_FPR1_LS64(\thread) - lwc1 $f2, THREAD_FPR2_LS64(\thread) - lwc1 $f3, THREAD_FPR3_LS64(\thread) - lwc1 $f4, THREAD_FPR4_LS64(\thread) - lwc1 $f5, THREAD_FPR5_LS64(\thread) - lwc1 $f6, THREAD_FPR6_LS64(\thread) - lwc1 $f7, THREAD_FPR7_LS64(\thread) - lwc1 $f8, THREAD_FPR8_LS64(\thread) - lwc1 $f9, THREAD_FPR9_LS64(\thread) - lwc1 $f10, THREAD_FPR10_LS64(\thread) - lwc1 $f11, THREAD_FPR11_LS64(\thread) - lwc1 $f12, THREAD_FPR12_LS64(\thread) - lwc1 $f13, THREAD_FPR13_LS64(\thread) - lwc1 $f14, THREAD_FPR14_LS64(\thread) - lwc1 $f15, THREAD_FPR15_LS64(\thread) - lwc1 $f16, THREAD_FPR16_LS64(\thread) - lwc1 $f17, THREAD_FPR17_LS64(\thread) - lwc1 $f18, THREAD_FPR18_LS64(\thread) - lwc1 $f19, THREAD_FPR19_LS64(\thread) - lwc1 $f20, THREAD_FPR20_LS64(\thread) - lwc1 $f21, THREAD_FPR21_LS64(\thread) - lwc1 $f22, THREAD_FPR22_LS64(\thread) - lwc1 $f23, THREAD_FPR23_LS64(\thread) - lwc1 $f24, THREAD_FPR24_LS64(\thread) - lwc1 $f25, THREAD_FPR25_LS64(\thread) - lwc1 $f26, THREAD_FPR26_LS64(\thread) - lwc1 $f27, THREAD_FPR27_LS64(\thread) - lwc1 $f28, THREAD_FPR28_LS64(\thread) - lwc1 $f29, THREAD_FPR29_LS64(\thread) - lwc1 $f30, THREAD_FPR30_LS64(\thread) - lwc1 $f31, THREAD_FPR31_LS64(\thread) + lwc1 $f0, THREAD_FPR0(\thread) + lwc1 $f1, THREAD_FPR1(\thread) + lwc1 $f2, THREAD_FPR2(\thread) + lwc1 $f3, THREAD_FPR3(\thread) + lwc1 $f4, THREAD_FPR4(\thread) + lwc1 $f5, THREAD_FPR5(\thread) + lwc1 $f6, THREAD_FPR6(\thread) + lwc1 $f7, THREAD_FPR7(\thread) + lwc1 $f8, THREAD_FPR8(\thread) + lwc1 $f9, THREAD_FPR9(\thread) + lwc1 $f10, THREAD_FPR10(\thread) + lwc1 $f11, THREAD_FPR11(\thread) + lwc1 $f12, THREAD_FPR12(\thread) + lwc1 $f13, THREAD_FPR13(\thread) + lwc1 $f14, THREAD_FPR14(\thread) + lwc1 $f15, THREAD_FPR15(\thread) + lwc1 $f16, THREAD_FPR16(\thread) + lwc1 $f17, THREAD_FPR17(\thread) + lwc1 $f18, THREAD_FPR18(\thread) + lwc1 $f19, THREAD_FPR19(\thread) + lwc1 $f20, THREAD_FPR20(\thread) + lwc1 $f21, THREAD_FPR21(\thread) + lwc1 $f22, THREAD_FPR22(\thread) + lwc1 $f23, THREAD_FPR23(\thread) + lwc1 $f24, THREAD_FPR24(\thread) + lwc1 $f25, THREAD_FPR25(\thread) + lwc1 $f26, THREAD_FPR26(\thread) + lwc1 $f27, THREAD_FPR27(\thread) + lwc1 $f28, THREAD_FPR28(\thread) + lwc1 $f29, THREAD_FPR29(\thread) + lwc1 $f30, THREAD_FPR30(\thread) + lwc1 $f31, THREAD_FPR31(\thread) ctc1 \tmp, fcr31 .set pop .endm diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h index 0cae4595e985..6156ac8c4cfb 100644 --- a/arch/mips/include/asm/asmmacro.h +++ b/arch/mips/include/asm/asmmacro.h @@ -60,22 +60,22 @@ .set push SET_HARDFLOAT cfc1 \tmp, fcr31 - sdc1 $f0, THREAD_FPR0_LS64(\thread) - sdc1 $f2, THREAD_FPR2_LS64(\thread) - sdc1 $f4, THREAD_FPR4_LS64(\thread) - sdc1 $f6, THREAD_FPR6_LS64(\thread) - sdc1 $f8, THREAD_FPR8_LS64(\thread) - sdc1 $f10, THREAD_FPR10_LS64(\thread) - sdc1 $f12, THREAD_FPR12_LS64(\thread) - sdc1 $f14, THREAD_FPR14_LS64(\thread) - sdc1 $f16, THREAD_FPR16_LS64(\thread) - sdc1 $f18, THREAD_FPR18_LS64(\thread) - sdc1 $f20, THREAD_FPR20_LS64(\thread) - sdc1 $f22, THREAD_FPR22_LS64(\thread) - sdc1 $f24, THREAD_FPR24_LS64(\thread) - sdc1 $f26, THREAD_FPR26_LS64(\thread) - sdc1 $f28, THREAD_FPR28_LS64(\thread) - sdc1 $f30, THREAD_FPR30_LS64(\thread) + sdc1 $f0, THREAD_FPR0(\thread) + sdc1 $f2, THREAD_FPR2(\thread) + sdc1 $f4, THREAD_FPR4(\thread) + sdc1 $f6, THREAD_FPR6(\thread) + sdc1 $f8, THREAD_FPR8(\thread) + sdc1 $f10, THREAD_FPR10(\thread) + sdc1 $f12, THREAD_FPR12(\thread) + sdc1 $f14, THREAD_FPR14(\thread) + sdc1 $f16, THREAD_FPR16(\thread) + sdc1 $f18, THREAD_FPR18(\thread) + sdc1 $f20, THREAD_FPR20(\thread) + sdc1 $f22, THREAD_FPR22(\thread) + sdc1 $f24, THREAD_FPR24(\thread) + sdc1 $f26, THREAD_FPR26(\thread) + sdc1 $f28, THREAD_FPR28(\thread) + sdc1 $f30, THREAD_FPR30(\thread) sw \tmp, THREAD_FCR31(\thread) .set pop .endm @@ -84,22 +84,22 @@ .set push .set mips64r2 SET_HARDFLOAT - sdc1 $f1, THREAD_FPR1_LS64(\thread) - sdc1 $f3, THREAD_FPR3_LS64(\thread) - sdc1 $f5, THREAD_FPR5_LS64(\thread) - sdc1 $f7, THREAD_FPR7_LS64(\thread) - sdc1 $f9, THREAD_FPR9_LS64(\thread) - sdc1 $f11, THREAD_FPR11_LS64(\thread) - sdc1 $f13, THREAD_FPR13_LS64(\thread) - sdc1 $f15, THREAD_FPR15_LS64(\thread) - sdc1 $f17, THREAD_FPR17_LS64(\thread) - sdc1 $f19, THREAD_FPR19_LS64(\thread) - sdc1 $f21, THREAD_FPR21_LS64(\thread) - sdc1 $f23, THREAD_FPR23_LS64(\thread) - sdc1 $f25, THREAD_FPR25_LS64(\thread) - sdc1 $f27, THREAD_FPR27_LS64(\thread) - sdc1 $f29, THREAD_FPR29_LS64(\thread) - sdc1 $f31, THREAD_FPR31_LS64(\thread) + sdc1 $f1, THREAD_FPR1(\thread) + sdc1 $f3, THREAD_FPR3(\thread) + sdc1 $f5, THREAD_FPR5(\thread) + sdc1 $f7, THREAD_FPR7(\thread) + sdc1 $f9, THREAD_FPR9(\thread) + sdc1 $f11, THREAD_FPR11(\thread) + sdc1 $f13, THREAD_FPR13(\thread) + sdc1 $f15, THREAD_FPR15(\thread) + sdc1 $f17, THREAD_FPR17(\thread) + sdc1 $f19, THREAD_FPR19(\thread) + sdc1 $f21, THREAD_FPR21(\thread) + sdc1 $f23, THREAD_FPR23(\thread) + sdc1 $f25, THREAD_FPR25(\thread) + sdc1 $f27, THREAD_FPR27(\thread) + sdc1 $f29, THREAD_FPR29(\thread) + sdc1 $f31, THREAD_FPR31(\thread) .set pop .endm @@ -118,22 +118,22 @@ .set push SET_HARDFLOAT lw \tmp, THREAD_FCR31(\thread) - ldc1 $f0, THREAD_FPR0_LS64(\thread) - ldc1 $f2, THREAD_FPR2_LS64(\thread) - ldc1 $f4, THREAD_FPR4_LS64(\thread) - ldc1 $f6, THREAD_FPR6_LS64(\thread) - ldc1 $f8, THREAD_FPR8_LS64(\thread) - ldc1 $f10, THREAD_FPR10_LS64(\thread) - ldc1 $f12, THREAD_FPR12_LS64(\thread) - ldc1 $f14, THREAD_FPR14_LS64(\thread) - ldc1 $f16, THREAD_FPR16_LS64(\thread) - ldc1 $f18, THREAD_FPR18_LS64(\thread) - ldc1 $f20, THREAD_FPR20_LS64(\thread) - ldc1 $f22, THREAD_FPR22_LS64(\thread) - ldc1 $f24, THREAD_FPR24_LS64(\thread) - ldc1 $f26, THREAD_FPR26_LS64(\thread) - ldc1 $f28, THREAD_FPR28_LS64(\thread) - ldc1 $f30, THREAD_FPR30_LS64(\thread) + ldc1 $f0, THREAD_FPR0(\thread) + ldc1 $f2, THREAD_FPR2(\thread) + ldc1 $f4, THREAD_FPR4(\thread) + ldc1 $f6, THREAD_FPR6(\thread) + ldc1 $f8, THREAD_FPR8(\thread) + ldc1 $f10, THREAD_FPR10(\thread) + ldc1 $f12, THREAD_FPR12(\thread) + ldc1 $f14, THREAD_FPR14(\thread) + ldc1 $f16, THREAD_FPR16(\thread) + ldc1 $f18, THREAD_FPR18(\thread) + ldc1 $f20, THREAD_FPR20(\thread) + ldc1 $f22, THREAD_FPR22(\thread) + ldc1 $f24, THREAD_FPR24(\thread) + ldc1 $f26, THREAD_FPR26(\thread) + ldc1 $f28, THREAD_FPR28(\thread) + ldc1 $f30, THREAD_FPR30(\thread) ctc1 \tmp, fcr31 .endm @@ -141,22 +141,22 @@ .set push .set mips64r2 SET_HARDFLOAT - ldc1 $f1, THREAD_FPR1_LS64(\thread) - ldc1 $f3, THREAD_FPR3_LS64(\thread) - ldc1 $f5, THREAD_FPR5_LS64(\thread) - ldc1 $f7, THREAD_FPR7_LS64(\thread) - ldc1 $f9, THREAD_FPR9_LS64(\thread) - ldc1 $f11, THREAD_FPR11_LS64(\thread) - ldc1 $f13, THREAD_FPR13_LS64(\thread) - ldc1 $f15, THREAD_FPR15_LS64(\thread) - ldc1 $f17, THREAD_FPR17_LS64(\thread) - ldc1 $f19, THREAD_FPR19_LS64(\thread) - ldc1 $f21, THREAD_FPR21_LS64(\thread) - ldc1 $f23, THREAD_FPR23_LS64(\thread) - ldc1 $f25, THREAD_FPR25_LS64(\thread) - ldc1 $f27, THREAD_FPR27_LS64(\thread) - ldc1 $f29, THREAD_FPR29_LS64(\thread) - ldc1 $f31, THREAD_FPR31_LS64(\thread) + ldc1 $f1, THREAD_FPR1(\thread) + ldc1 $f3, THREAD_FPR3(\thread) + ldc1 $f5, THREAD_FPR5(\thread) + ldc1 $f7, THREAD_FPR7(\thread) + ldc1 $f9, THREAD_FPR9(\thread) + ldc1 $f11, THREAD_FPR11(\thread) + ldc1 $f13, THREAD_FPR13(\thread) + ldc1 $f15, THREAD_FPR15(\thread) + ldc1 $f17, THREAD_FPR17(\thread) + ldc1 $f19, THREAD_FPR19(\thread) + ldc1 $f21, THREAD_FPR21(\thread) + ldc1 $f23, THREAD_FPR23(\thread) + ldc1 $f25, THREAD_FPR25(\thread) + ldc1 $f27, THREAD_FPR27(\thread) + ldc1 $f29, THREAD_FPR29(\thread) + ldc1 $f31, THREAD_FPR31(\thread) .set pop .endm @@ -211,6 +211,22 @@ .endm #ifdef TOOLCHAIN_SUPPORTS_MSA + .macro _cfcmsa rd, cs + .set push + .set mips32r2 + .set msa + cfcmsa \rd, $\cs + .set pop + .endm + + .macro _ctcmsa cd, rs + .set push + .set mips32r2 + .set msa + ctcmsa $\cd, \rs + .set pop + .endm + .macro ld_d wd, off, base .set push .set mips32r2 @@ -227,35 +243,35 @@ .set pop .endm - .macro copy_u_w rd, ws, n + .macro copy_u_w ws, n .set push .set mips32r2 .set msa - copy_u.w \rd, $w\ws[\n] + copy_u.w $1, $w\ws[\n] .set pop .endm - .macro copy_u_d rd, ws, n + .macro copy_u_d ws, n .set push .set mips64r2 .set msa - copy_u.d \rd, $w\ws[\n] + copy_u.d $1, $w\ws[\n] .set pop .endm - .macro insert_w wd, n, rs + .macro insert_w wd, n .set push .set mips32r2 .set msa - insert.w $w\wd[\n], \rs + insert.w $w\wd[\n], $1 .set pop .endm - .macro insert_d wd, n, rs + .macro insert_d wd, n .set push .set mips64r2 .set msa - insert.d $w\wd[\n], \rs + insert.d $w\wd[\n], $1 .set pop .endm #else @@ -283,7 +299,7 @@ /* * Temporary until all toolchains in use include MSA support. */ - .macro cfcmsa rd, cs + .macro _cfcmsa rd, cs .set push .set noat SET_HARDFLOAT @@ -293,7 +309,7 @@ .set pop .endm - .macro ctcmsa cd, rs + .macro _ctcmsa cd, rs .set push .set noat SET_HARDFLOAT @@ -320,44 +336,36 @@ .set pop .endm - .macro copy_u_w rd, ws, n + .macro copy_u_w ws, n .set push .set noat SET_HARDFLOAT .insn .word COPY_UW_MSA_INSN | (\n << 16) | (\ws << 11) - /* move triggers an assembler bug... */ - or \rd, $1, zero .set pop .endm - .macro copy_u_d rd, ws, n + .macro copy_u_d ws, n .set push .set noat SET_HARDFLOAT .insn .word COPY_UD_MSA_INSN | (\n << 16) | (\ws << 11) - /* move triggers an assembler bug... */ - or \rd, $1, zero .set pop .endm - .macro insert_w wd, n, rs + .macro insert_w wd, n .set push .set noat SET_HARDFLOAT - /* move triggers an assembler bug... */ - or $1, \rs, zero .word INSERT_W_MSA_INSN | (\n << 16) | (\wd << 6) .set pop .endm - .macro insert_d wd, n, rs + .macro insert_d wd, n .set push .set noat SET_HARDFLOAT - /* move triggers an assembler bug... */ - or $1, \rs, zero .word INSERT_D_MSA_INSN | (\n << 16) | (\wd << 6) .set pop .endm @@ -399,7 +407,7 @@ .set push .set noat SET_HARDFLOAT - cfcmsa $1, MSA_CSR + _cfcmsa $1, MSA_CSR sw $1, THREAD_MSA_CSR(\thread) .set pop .endm @@ -409,7 +417,7 @@ .set noat SET_HARDFLOAT lw $1, THREAD_MSA_CSR(\thread) - ctcmsa MSA_CSR, $1 + _ctcmsa MSA_CSR, $1 .set pop ld_d 0, THREAD_FPR0, \thread ld_d 1, THREAD_FPR1, \thread @@ -452,9 +460,6 @@ insert_w \wd, 2 insert_w \wd, 3 #endif - .if 31-\wd - msa_init_upper (\wd+1) - .endif .endm .macro msa_init_all_upper @@ -463,6 +468,37 @@ SET_HARDFLOAT not $1, zero msa_init_upper 0 + msa_init_upper 1 + msa_init_upper 2 + msa_init_upper 3 + msa_init_upper 4 + msa_init_upper 5 + msa_init_upper 6 + msa_init_upper 7 + msa_init_upper 8 + msa_init_upper 9 + msa_init_upper 10 + msa_init_upper 11 + msa_init_upper 12 + msa_init_upper 13 + msa_init_upper 14 + msa_init_upper 15 + msa_init_upper 16 + msa_init_upper 17 + msa_init_upper 18 + msa_init_upper 19 + msa_init_upper 20 + msa_init_upper 21 + msa_init_upper 22 + msa_init_upper 23 + msa_init_upper 24 + msa_init_upper 25 + msa_init_upper 26 + msa_init_upper 27 + msa_init_upper 28 + msa_init_upper 29 + msa_init_upper 30 + msa_init_upper 31 .set pop .endm diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index dd083e999b08..b104ad9d655f 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h @@ -48,6 +48,12 @@ enum fpu_mode { #define FPU_FR_MASK 0x1 }; +#define __disable_fpu() \ +do { \ + clear_c0_status(ST0_CU1); \ + disable_fpu_hazard(); \ +} while (0) + static inline int __enable_fpu(enum fpu_mode mode) { int fr; @@ -86,7 +92,12 @@ fr_common: enable_fpu_hazard(); /* check FR has the desired value */ - return (!!(read_c0_status() & ST0_FR) == !!fr) ? 0 : SIGFPE; + if (!!(read_c0_status() & ST0_FR) == !!fr) + return 0; + + /* unsupported FR value */ + __disable_fpu(); + return SIGFPE; default: BUG(); @@ -95,12 +106,6 @@ fr_common: return SIGFPE; } -#define __disable_fpu() \ -do { \ - clear_c0_status(ST0_CU1); \ - disable_fpu_hazard(); \ -} while (0) - #define clear_fpu_owner() clear_thread_flag(TIF_USEDFPU) static inline int __is_fpu_owner(void) @@ -170,6 +175,7 @@ static inline void lose_fpu(int save) } disable_msa(); clear_thread_flag(TIF_USEDMSA); + __disable_fpu(); } else if (is_fpu_owner()) { if (save) _save_fp(current); diff --git a/arch/mips/include/asm/kdebug.h b/arch/mips/include/asm/kdebug.h index 6a9af5fcb5d7..cba22ab7ad4d 100644 --- a/arch/mips/include/asm/kdebug.h +++ b/arch/mips/include/asm/kdebug.h @@ -10,7 +10,8 @@ enum die_val { DIE_RI, DIE_PAGE_FAULT, DIE_BREAK, - DIE_SSTEPBP + DIE_SSTEPBP, + DIE_MSAFP }; #endif /* _ASM_MIPS_KDEBUG_H */ diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index ac4fc716062b..4c25823563fe 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -21,10 +21,10 @@ /* MIPS KVM register ids */ #define MIPS_CP0_32(_R, _S) \ - (KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S))) + (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S))) #define MIPS_CP0_64(_R, _S) \ - (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S))) + (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S))) #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0) #define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0) @@ -42,11 +42,14 @@ #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0) +#define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0) #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1) #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0) #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1) #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2) #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3) +#define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4) +#define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5) #define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7) #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0) #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) @@ -119,6 +122,10 @@ struct kvm_vcpu_stat { u32 syscall_exits; u32 resvd_inst_exits; u32 break_inst_exits; + u32 trap_inst_exits; + u32 msa_fpe_exits; + u32 fpe_exits; + u32 msa_disabled_exits; u32 flush_dcache_exits; u32 halt_successful_poll; u32 halt_wakeup; @@ -138,6 +145,10 @@ enum kvm_mips_exit_types { SYSCALL_EXITS, RESVD_INST_EXITS, BREAK_INST_EXITS, + TRAP_INST_EXITS, + MSA_FPE_EXITS, + FPE_EXITS, + MSA_DISABLED_EXITS, FLUSH_DCACHE_EXITS, MAX_KVM_MIPS_EXIT_TYPES }; @@ -206,6 +217,8 @@ struct mips_coproc { #define MIPS_CP0_CONFIG1_SEL 1 #define MIPS_CP0_CONFIG2_SEL 2 #define MIPS_CP0_CONFIG3_SEL 3 +#define MIPS_CP0_CONFIG4_SEL 4 +#define MIPS_CP0_CONFIG5_SEL 5 /* Config0 register bits */ #define CP0C0_M 31 @@ -262,31 +275,6 @@ struct mips_coproc { #define CP0C3_SM 1 #define CP0C3_TL 0 -/* Have config1, Cacheable, noncoherent, write-back, write allocate*/ -#define MIPS_CONFIG0 \ - ((1 << CP0C0_M) | (0x3 << CP0C0_K0)) - -/* Have config2, no coprocessor2 attached, no MDMX support attached, - no performance counters, watch registers present, - no code compression, EJTAG present, no FPU, no watch registers */ -#define MIPS_CONFIG1 \ -((1 << CP0C1_M) | \ - (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) | \ - (0 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) | \ - (0 << CP0C1_FP)) - -/* Have config3, no tertiary/secondary caches implemented */ -#define MIPS_CONFIG2 \ -((1 << CP0C2_M)) - -/* No config4, no DSP ASE, no large physaddr (PABITS), - no external interrupt controller, no vectored interrupts, - no 1kb pages, no SmartMIPS ASE, no trace logic */ -#define MIPS_CONFIG3 \ -((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) | \ - (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) | \ - (0 << CP0C3_SM) | (0 << CP0C3_TL)) - /* MMU types, the first four entries have the same layout as the CP0C0_MT field. */ enum mips_mmu_types { @@ -321,7 +309,9 @@ enum mips_mmu_types { */ #define T_TRAP 13 /* Trap instruction */ #define T_VCEI 14 /* Virtual coherency exception */ +#define T_MSAFPE 14 /* MSA floating point exception */ #define T_FPE 15 /* Floating point exception */ +#define T_MSADIS 21 /* MSA disabled exception */ #define T_WATCH 23 /* Watch address reference */ #define T_VCED 31 /* Virtual coherency data */ @@ -374,6 +364,9 @@ struct kvm_mips_tlb { long tlb_lo1; }; +#define KVM_MIPS_FPU_FPU 0x1 +#define KVM_MIPS_FPU_MSA 0x2 + #define KVM_MIPS_GUEST_TLB_SIZE 64 struct kvm_vcpu_arch { void *host_ebase, *guest_ebase; @@ -395,6 +388,8 @@ struct kvm_vcpu_arch { /* FPU State */ struct mips_fpu_struct fpu; + /* Which FPU state is loaded (KVM_MIPS_FPU_*) */ + unsigned int fpu_inuse; /* COP0 State */ struct mips_coproc *cop0; @@ -441,6 +436,9 @@ struct kvm_vcpu_arch { /* WAIT executed */ int wait; + + u8 fpu_enabled; + u8 msa_enabled; }; @@ -482,11 +480,15 @@ struct kvm_vcpu_arch { #define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1]) #define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2]) #define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3]) +#define kvm_read_c0_guest_config4(cop0) (cop0->reg[MIPS_CP0_CONFIG][4]) +#define kvm_read_c0_guest_config5(cop0) (cop0->reg[MIPS_CP0_CONFIG][5]) #define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7]) #define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val)) #define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val)) #define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val)) #define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val)) +#define kvm_write_c0_guest_config4(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][4] = (val)) +#define kvm_write_c0_guest_config5(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][5] = (val)) #define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val)) #define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0]) #define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val)) @@ -567,6 +569,31 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg, kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \ } +/* Helpers */ + +static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu) +{ + return (!__builtin_constant_p(cpu_has_fpu) || cpu_has_fpu) && + vcpu->fpu_enabled; +} + +static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu) +{ + return kvm_mips_guest_can_have_fpu(vcpu) && + kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP; +} + +static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu) +{ + return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) && + vcpu->msa_enabled; +} + +static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu) +{ + return kvm_mips_guest_can_have_msa(vcpu) && + kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA; +} struct kvm_mips_callbacks { int (*handle_cop_unusable)(struct kvm_vcpu *vcpu); @@ -578,6 +605,10 @@ struct kvm_mips_callbacks { int (*handle_syscall)(struct kvm_vcpu *vcpu); int (*handle_res_inst)(struct kvm_vcpu *vcpu); int (*handle_break)(struct kvm_vcpu *vcpu); + int (*handle_trap)(struct kvm_vcpu *vcpu); + int (*handle_msa_fpe)(struct kvm_vcpu *vcpu); + int (*handle_fpe)(struct kvm_vcpu *vcpu); + int (*handle_msa_disabled)(struct kvm_vcpu *vcpu); int (*vm_init)(struct kvm *kvm); int (*vcpu_init)(struct kvm_vcpu *vcpu); int (*vcpu_setup)(struct kvm_vcpu *vcpu); @@ -596,6 +627,8 @@ struct kvm_mips_callbacks { const struct kvm_one_reg *reg, s64 *v); int (*set_one_reg)(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, s64 v); + int (*vcpu_get_regs)(struct kvm_vcpu *vcpu); + int (*vcpu_set_regs)(struct kvm_vcpu *vcpu); }; extern struct kvm_mips_callbacks *kvm_mips_callbacks; int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); @@ -606,6 +639,19 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); /* Trampoline ASM routine to start running in "Guest" context */ extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu); +/* FPU/MSA context management */ +void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu); +void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu); +void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu); +void __kvm_save_msa(struct kvm_vcpu_arch *vcpu); +void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu); +void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu); +void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu); +void kvm_own_fpu(struct kvm_vcpu *vcpu); +void kvm_own_msa(struct kvm_vcpu *vcpu); +void kvm_drop_fpu(struct kvm_vcpu *vcpu); +void kvm_lose_fpu(struct kvm_vcpu *vcpu); + /* TLB handling */ uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu); @@ -711,6 +757,26 @@ extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause, struct kvm_run *run, struct kvm_vcpu *vcpu); +extern enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run); @@ -749,6 +815,11 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst, struct kvm_run *run, struct kvm_vcpu *vcpu); +unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu); +unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu); +unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu); +unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu); + /* Dynamic binary translation */ extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu); diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index b5dcbee01fd7..9b3b48e21c22 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -105,7 +105,7 @@ union fpureg { #ifdef CONFIG_CPU_LITTLE_ENDIAN # define FPR_IDX(width, idx) (idx) #else -# define FPR_IDX(width, idx) ((FPU_REG_WIDTH / (width)) - 1 - (idx)) +# define FPR_IDX(width, idx) ((idx) ^ ((64 / (width)) - 1)) #endif #define BUILD_FPR_ACCESS(width) \ diff --git a/arch/mips/include/uapi/asm/kvm.h b/arch/mips/include/uapi/asm/kvm.h index 2c04b6d9ff85..6985eb59b085 100644 --- a/arch/mips/include/uapi/asm/kvm.h +++ b/arch/mips/include/uapi/asm/kvm.h @@ -36,77 +36,85 @@ struct kvm_regs { /* * for KVM_GET_FPU and KVM_SET_FPU - * - * If Status[FR] is zero (32-bit FPU), the upper 32-bits of the FPRs - * are zero filled. */ struct kvm_fpu { - __u64 fpr[32]; - __u32 fir; - __u32 fccr; - __u32 fexr; - __u32 fenr; - __u32 fcsr; - __u32 pad; }; /* - * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access CP0 + * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various * registers. The id field is broken down as follows: * - * bits[2..0] - Register 'sel' index. - * bits[7..3] - Register 'rd' index. - * bits[15..8] - Must be zero. - * bits[31..16] - 1 -> CP0 registers. - * bits[51..32] - Must be zero. * bits[63..52] - As per linux/kvm.h + * bits[51..32] - Must be zero. + * bits[31..16] - Register set. + * + * Register set = 0: GP registers from kvm_regs (see definitions below). + * + * Register set = 1: CP0 registers. + * bits[15..8] - Must be zero. + * bits[7..3] - Register 'rd' index. + * bits[2..0] - Register 'sel' index. + * + * Register set = 2: KVM specific registers (see definitions below). + * + * Register set = 3: FPU / MSA registers (see definitions below). * * Other sets registers may be added in the future. Each set would * have its own identifier in bits[31..16]. - * - * The registers defined in struct kvm_regs are also accessible, the - * id values for these are below. */ -#define KVM_REG_MIPS_R0 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0) -#define KVM_REG_MIPS_R1 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 1) -#define KVM_REG_MIPS_R2 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 2) -#define KVM_REG_MIPS_R3 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 3) -#define KVM_REG_MIPS_R4 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 4) -#define KVM_REG_MIPS_R5 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 5) -#define KVM_REG_MIPS_R6 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 6) -#define KVM_REG_MIPS_R7 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 7) -#define KVM_REG_MIPS_R8 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 8) -#define KVM_REG_MIPS_R9 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 9) -#define KVM_REG_MIPS_R10 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 10) -#define KVM_REG_MIPS_R11 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 11) -#define KVM_REG_MIPS_R12 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 12) -#define KVM_REG_MIPS_R13 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 13) -#define KVM_REG_MIPS_R14 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 14) -#define KVM_REG_MIPS_R15 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 15) -#define KVM_REG_MIPS_R16 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 16) -#define KVM_REG_MIPS_R17 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 17) -#define KVM_REG_MIPS_R18 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 18) -#define KVM_REG_MIPS_R19 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 19) -#define KVM_REG_MIPS_R20 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 20) -#define KVM_REG_MIPS_R21 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 21) -#define KVM_REG_MIPS_R22 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 22) -#define KVM_REG_MIPS_R23 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 23) -#define KVM_REG_MIPS_R24 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 24) -#define KVM_REG_MIPS_R25 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 25) -#define KVM_REG_MIPS_R26 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 26) -#define KVM_REG_MIPS_R27 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 27) -#define KVM_REG_MIPS_R28 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 28) -#define KVM_REG_MIPS_R29 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 29) -#define KVM_REG_MIPS_R30 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 30) -#define KVM_REG_MIPS_R31 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 31) - -#define KVM_REG_MIPS_HI (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 32) -#define KVM_REG_MIPS_LO (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 33) -#define KVM_REG_MIPS_PC (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 34) - -/* KVM specific control registers */ +#define KVM_REG_MIPS_GP (KVM_REG_MIPS | 0x0000000000000000ULL) +#define KVM_REG_MIPS_CP0 (KVM_REG_MIPS | 0x0000000000010000ULL) +#define KVM_REG_MIPS_KVM (KVM_REG_MIPS | 0x0000000000020000ULL) +#define KVM_REG_MIPS_FPU (KVM_REG_MIPS | 0x0000000000030000ULL) + + +/* + * KVM_REG_MIPS_GP - General purpose registers from kvm_regs. + */ + +#define KVM_REG_MIPS_R0 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 0) +#define KVM_REG_MIPS_R1 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 1) +#define KVM_REG_MIPS_R2 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 2) +#define KVM_REG_MIPS_R3 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 3) +#define KVM_REG_MIPS_R4 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 4) +#define KVM_REG_MIPS_R5 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 5) +#define KVM_REG_MIPS_R6 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 6) +#define KVM_REG_MIPS_R7 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 7) +#define KVM_REG_MIPS_R8 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 8) +#define KVM_REG_MIPS_R9 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 9) +#define KVM_REG_MIPS_R10 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 10) +#define KVM_REG_MIPS_R11 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 11) +#define KVM_REG_MIPS_R12 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 12) +#define KVM_REG_MIPS_R13 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 13) +#define KVM_REG_MIPS_R14 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 14) +#define KVM_REG_MIPS_R15 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 15) +#define KVM_REG_MIPS_R16 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 16) +#define KVM_REG_MIPS_R17 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 17) +#define KVM_REG_MIPS_R18 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 18) +#define KVM_REG_MIPS_R19 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 19) +#define KVM_REG_MIPS_R20 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 20) +#define KVM_REG_MIPS_R21 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 21) +#define KVM_REG_MIPS_R22 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 22) +#define KVM_REG_MIPS_R23 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 23) +#define KVM_REG_MIPS_R24 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 24) +#define KVM_REG_MIPS_R25 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 25) +#define KVM_REG_MIPS_R26 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 26) +#define KVM_REG_MIPS_R27 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 27) +#define KVM_REG_MIPS_R28 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 28) +#define KVM_REG_MIPS_R29 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 29) +#define KVM_REG_MIPS_R30 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 30) +#define KVM_REG_MIPS_R31 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 31) + +#define KVM_REG_MIPS_HI (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 32) +#define KVM_REG_MIPS_LO (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 33) +#define KVM_REG_MIPS_PC (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 34) + + +/* + * KVM_REG_MIPS_KVM - KVM specific control registers. + */ /* * CP0_Count control @@ -118,8 +126,7 @@ struct kvm_fpu { * safely without losing time or guest timer interrupts. * Other: Reserved, do not change. */ -#define KVM_REG_MIPS_COUNT_CTL (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \ - 0x20000 | 0) +#define KVM_REG_MIPS_COUNT_CTL (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 0) #define KVM_REG_MIPS_COUNT_CTL_DC 0x00000001 /* @@ -131,15 +138,46 @@ struct kvm_fpu { * emulated. * Modifications to times in the future are rejected. */ -#define KVM_REG_MIPS_COUNT_RESUME (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \ - 0x20000 | 1) +#define KVM_REG_MIPS_COUNT_RESUME (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 1) /* * CP0_Count rate in Hz * Specifies the rate of the CP0_Count timer in Hz. Modifications occur without * discontinuities in CP0_Count. */ -#define KVM_REG_MIPS_COUNT_HZ (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \ - 0x20000 | 2) +#define KVM_REG_MIPS_COUNT_HZ (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 2) + + +/* + * KVM_REG_MIPS_FPU - Floating Point and MIPS SIMD Architecture (MSA) registers. + * + * bits[15..8] - Register subset (see definitions below). + * bits[7..5] - Must be zero. + * bits[4..0] - Register number within register subset. + */ + +#define KVM_REG_MIPS_FPR (KVM_REG_MIPS_FPU | 0x0000000000000000ULL) +#define KVM_REG_MIPS_FCR (KVM_REG_MIPS_FPU | 0x0000000000000100ULL) +#define KVM_REG_MIPS_MSACR (KVM_REG_MIPS_FPU | 0x0000000000000200ULL) + +/* + * KVM_REG_MIPS_FPR - Floating point / Vector registers. + */ +#define KVM_REG_MIPS_FPR_32(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U32 | (n)) +#define KVM_REG_MIPS_FPR_64(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U64 | (n)) +#define KVM_REG_MIPS_VEC_128(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U128 | (n)) + +/* + * KVM_REG_MIPS_FCR - Floating point control registers. + */ +#define KVM_REG_MIPS_FCR_IR (KVM_REG_MIPS_FCR | KVM_REG_SIZE_U32 | 0) +#define KVM_REG_MIPS_FCR_CSR (KVM_REG_MIPS_FCR | KVM_REG_SIZE_U32 | 31) + +/* + * KVM_REG_MIPS_MSACR - MIPS SIMD Architecture (MSA) control registers. + */ +#define KVM_REG_MIPS_MSA_IR (KVM_REG_MIPS_MSACR | KVM_REG_SIZE_U32 | 0) +#define KVM_REG_MIPS_MSA_CSR (KVM_REG_MIPS_MSACR | KVM_REG_SIZE_U32 | 1) + /* * KVM MIPS specific structures and definitions diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 750d67ac41e9..e59fd7cfac9e 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -167,72 +167,6 @@ void output_thread_fpu_defines(void) OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]); OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]); - /* the least significant 64 bits of each FP register */ - OFFSET(THREAD_FPR0_LS64, task_struct, - thread.fpu.fpr[0].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR1_LS64, task_struct, - thread.fpu.fpr[1].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR2_LS64, task_struct, - thread.fpu.fpr[2].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR3_LS64, task_struct, - thread.fpu.fpr[3].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR4_LS64, task_struct, - thread.fpu.fpr[4].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR5_LS64, task_struct, - thread.fpu.fpr[5].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR6_LS64, task_struct, - thread.fpu.fpr[6].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR7_LS64, task_struct, - thread.fpu.fpr[7].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR8_LS64, task_struct, - thread.fpu.fpr[8].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR9_LS64, task_struct, - thread.fpu.fpr[9].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR10_LS64, task_struct, - thread.fpu.fpr[10].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR11_LS64, task_struct, - thread.fpu.fpr[11].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR12_LS64, task_struct, - thread.fpu.fpr[12].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR13_LS64, task_struct, - thread.fpu.fpr[13].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR14_LS64, task_struct, - thread.fpu.fpr[14].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR15_LS64, task_struct, - thread.fpu.fpr[15].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR16_LS64, task_struct, - thread.fpu.fpr[16].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR17_LS64, task_struct, - thread.fpu.fpr[17].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR18_LS64, task_struct, - thread.fpu.fpr[18].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR19_LS64, task_struct, - thread.fpu.fpr[19].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR20_LS64, task_struct, - thread.fpu.fpr[20].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR21_LS64, task_struct, - thread.fpu.fpr[21].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR22_LS64, task_struct, - thread.fpu.fpr[22].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR23_LS64, task_struct, - thread.fpu.fpr[23].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR24_LS64, task_struct, - thread.fpu.fpr[24].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR25_LS64, task_struct, - thread.fpu.fpr[25].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR26_LS64, task_struct, - thread.fpu.fpr[26].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR27_LS64, task_struct, - thread.fpu.fpr[27].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR28_LS64, task_struct, - thread.fpu.fpr[28].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR29_LS64, task_struct, - thread.fpu.fpr[29].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR30_LS64, task_struct, - thread.fpu.fpr[30].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FPR31_LS64, task_struct, - thread.fpu.fpr[31].val64[FPR_IDX(64, 0)]); - OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31); OFFSET(THREAD_MSA_CSR, task_struct, thread.fpu.msacsr); BLANK(); @@ -470,6 +404,45 @@ void output_kvm_defines(void) OFFSET(VCPU_LO, kvm_vcpu_arch, lo); OFFSET(VCPU_HI, kvm_vcpu_arch, hi); OFFSET(VCPU_PC, kvm_vcpu_arch, pc); + BLANK(); + + OFFSET(VCPU_FPR0, kvm_vcpu_arch, fpu.fpr[0]); + OFFSET(VCPU_FPR1, kvm_vcpu_arch, fpu.fpr[1]); + OFFSET(VCPU_FPR2, kvm_vcpu_arch, fpu.fpr[2]); + OFFSET(VCPU_FPR3, kvm_vcpu_arch, fpu.fpr[3]); + OFFSET(VCPU_FPR4, kvm_vcpu_arch, fpu.fpr[4]); + OFFSET(VCPU_FPR5, kvm_vcpu_arch, fpu.fpr[5]); + OFFSET(VCPU_FPR6, kvm_vcpu_arch, fpu.fpr[6]); + OFFSET(VCPU_FPR7, kvm_vcpu_arch, fpu.fpr[7]); + OFFSET(VCPU_FPR8, kvm_vcpu_arch, fpu.fpr[8]); + OFFSET(VCPU_FPR9, kvm_vcpu_arch, fpu.fpr[9]); + OFFSET(VCPU_FPR10, kvm_vcpu_arch, fpu.fpr[10]); + OFFSET(VCPU_FPR11, kvm_vcpu_arch, fpu.fpr[11]); + OFFSET(VCPU_FPR12, kvm_vcpu_arch, fpu.fpr[12]); + OFFSET(VCPU_FPR13, kvm_vcpu_arch, fpu.fpr[13]); + OFFSET(VCPU_FPR14, kvm_vcpu_arch, fpu.fpr[14]); + OFFSET(VCPU_FPR15, kvm_vcpu_arch, fpu.fpr[15]); + OFFSET(VCPU_FPR16, kvm_vcpu_arch, fpu.fpr[16]); + OFFSET(VCPU_FPR17, kvm_vcpu_arch, fpu.fpr[17]); + OFFSET(VCPU_FPR18, kvm_vcpu_arch, fpu.fpr[18]); + OFFSET(VCPU_FPR19, kvm_vcpu_arch, fpu.fpr[19]); + OFFSET(VCPU_FPR20, kvm_vcpu_arch, fpu.fpr[20]); + OFFSET(VCPU_FPR21, kvm_vcpu_arch, fpu.fpr[21]); + OFFSET(VCPU_FPR22, kvm_vcpu_arch, fpu.fpr[22]); + OFFSET(VCPU_FPR23, kvm_vcpu_arch, fpu.fpr[23]); + OFFSET(VCPU_FPR24, kvm_vcpu_arch, fpu.fpr[24]); + OFFSET(VCPU_FPR25, kvm_vcpu_arch, fpu.fpr[25]); + OFFSET(VCPU_FPR26, kvm_vcpu_arch, fpu.fpr[26]); + OFFSET(VCPU_FPR27, kvm_vcpu_arch, fpu.fpr[27]); + OFFSET(VCPU_FPR28, kvm_vcpu_arch, fpu.fpr[28]); + OFFSET(VCPU_FPR29, kvm_vcpu_arch, fpu.fpr[29]); + OFFSET(VCPU_FPR30, kvm_vcpu_arch, fpu.fpr[30]); + OFFSET(VCPU_FPR31, kvm_vcpu_arch, fpu.fpr[31]); + + OFFSET(VCPU_FCR31, kvm_vcpu_arch, fpu.fcr31); + OFFSET(VCPU_MSA_CSR, kvm_vcpu_arch, fpu.msacsr); + BLANK(); + OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0); OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid); OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid); diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index 2ebaabe3af15..af42e7003f12 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -360,12 +360,15 @@ NESTED(nmi_handler, PT_SIZE, sp) .set mips1 SET_HARDFLOAT cfc1 a1, fcr31 - li a2, ~(0x3f << 12) - and a2, a1 - ctc1 a2, fcr31 .set pop - TRACE_IRQS_ON - STI + CLI + TRACE_IRQS_OFF + .endm + + .macro __build_clear_msa_fpe + _cfcmsa a1, MSA_CSR + CLI + TRACE_IRQS_OFF .endm .macro __build_clear_ade @@ -426,7 +429,7 @@ NESTED(nmi_handler, PT_SIZE, sp) BUILD_HANDLER cpu cpu sti silent /* #11 */ BUILD_HANDLER ov ov sti silent /* #12 */ BUILD_HANDLER tr tr sti silent /* #13 */ - BUILD_HANDLER msa_fpe msa_fpe sti silent /* #14 */ + BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */ BUILD_HANDLER fpe fpe fpe silent /* #15 */ BUILD_HANDLER ftlb ftlb none silent /* #16 */ BUILD_HANDLER msa msa sti silent /* #21 */ diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 510452812594..7da6e324dd35 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -46,6 +46,26 @@ #define CREATE_TRACE_POINTS #include <trace/events/syscalls.h> +static void init_fp_ctx(struct task_struct *target) +{ + /* If FP has been used then the target already has context */ + if (tsk_used_math(target)) + return; + + /* Begin with data registers set to all 1s... */ + memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr)); + + /* ...and FCSR zeroed */ + target->thread.fpu.fcr31 = 0; + + /* + * Record that the target has "used" math, such that the context + * just initialised, and any modifications made by the caller, + * aren't discarded. + */ + set_stopped_child_used_math(target); +} + /* * Called by kernel/ptrace.c when detaching.. * @@ -142,6 +162,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) if (!access_ok(VERIFY_READ, data, 33 * 8)) return -EIO; + init_fp_ctx(child); fregs = get_fpu_regs(child); for (i = 0; i < 32; i++) { @@ -439,6 +460,8 @@ static int fpr_set(struct task_struct *target, /* XXX fcr31 */ + init_fp_ctx(target); + if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t)) return user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.fpu, @@ -660,12 +683,7 @@ long arch_ptrace(struct task_struct *child, long request, case FPR_BASE ... FPR_BASE + 31: { union fpureg *fregs = get_fpu_regs(child); - if (!tsk_used_math(child)) { - /* FP not yet used */ - memset(&child->thread.fpu, ~0, - sizeof(child->thread.fpu)); - child->thread.fpu.fcr31 = 0; - } + init_fp_ctx(child); #ifdef CONFIG_32BIT if (test_thread_flag(TIF_32BIT_FPREGS)) { /* diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S index 676c5030a953..1d88af26ba82 100644 --- a/arch/mips/kernel/r4k_fpu.S +++ b/arch/mips/kernel/r4k_fpu.S @@ -34,7 +34,6 @@ .endm .set noreorder - .set MIPS_ISA_ARCH_LEVEL_RAW LEAF(_save_fp_context) .set push @@ -103,6 +102,7 @@ LEAF(_save_fp_context) /* Save 32-bit process floating point context */ LEAF(_save_fp_context32) .set push + .set MIPS_ISA_ARCH_LEVEL_RAW SET_HARDFLOAT cfc1 t1, fcr31 diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 33984c04b60b..5b4d711f878d 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -701,6 +701,13 @@ asmlinkage void do_ov(struct pt_regs *regs) int process_fpemu_return(int sig, void __user *fault_addr) { + /* + * We can't allow the emulated instruction to leave any of the cause + * bits set in FCSR. If they were then the kernel would take an FP + * exception when restoring FP context. + */ + current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; + if (sig == SIGSEGV || sig == SIGBUS) { struct siginfo si = {0}; si.si_addr = fault_addr; @@ -781,6 +788,11 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE) == NOTIFY_STOP) goto out; + + /* Clear FCSR.Cause before enabling interrupts */ + write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X); + local_irq_enable(); + die_if_kernel("FP exception in kernel code", regs); if (fcr31 & FPU_CSR_UNI_X) { @@ -804,18 +816,12 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, &fault_addr); - /* - * We can't allow the emulated instruction to leave any of - * the cause bit set in $fcr31. - */ - current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; + /* If something went wrong, signal */ + process_fpemu_return(sig, fault_addr); /* Restore the hardware register state */ own_fpu(1); /* Using the FPU again. */ - /* If something went wrong, signal */ - process_fpemu_return(sig, fault_addr); - goto out; } else if (fcr31 & FPU_CSR_INV_X) info.si_code = FPE_FLTINV; @@ -1392,13 +1398,22 @@ out: exception_exit(prev_state); } -asmlinkage void do_msa_fpe(struct pt_regs *regs) +asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr) { enum ctx_state prev_state; prev_state = exception_enter(); + if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0, + regs_to_trapnr(regs), SIGFPE) == NOTIFY_STOP) + goto out; + + /* Clear MSACSR.Cause before enabling interrupts */ + write_msa_csr(msacsr & ~MSA_CSR_CAUSEF); + local_irq_enable(); + die_if_kernel("do_msa_fpe invoked from kernel context!", regs); force_sig(SIGFPE, current); +out: exception_exit(prev_state); } diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile index 401fe027c261..637ebbebd549 100644 --- a/arch/mips/kvm/Makefile +++ b/arch/mips/kvm/Makefile @@ -1,13 +1,15 @@ # Makefile for KVM support for MIPS # -common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) +common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm -kvm-objs := $(common-objs) mips.o emulate.o locore.o \ +common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o + +kvm-objs := $(common-objs-y) mips.o emulate.o locore.o \ interrupt.o stats.o commpage.o \ - dyntrans.o trap_emul.o + dyntrans.o trap_emul.o fpu.o obj-$(CONFIG_KVM) += kvm.o obj-y += callback.o tlb.o diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index fb3e8dfd1ff6..6230f376a44e 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -884,6 +884,84 @@ enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) return EMULATE_DONE; } +/** + * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1 + * @vcpu: Virtual CPU. + * + * Finds the mask of bits which are writable in the guest's Config1 CP0 + * register, by userland (currently read-only to the guest). + */ +unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu) +{ + unsigned int mask = 0; + + /* Permit FPU to be present if FPU is supported */ + if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) + mask |= MIPS_CONF1_FP; + + return mask; +} + +/** + * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3 + * @vcpu: Virtual CPU. + * + * Finds the mask of bits which are writable in the guest's Config3 CP0 + * register, by userland (currently read-only to the guest). + */ +unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu) +{ + /* Config4 is optional */ + unsigned int mask = MIPS_CONF_M; + + /* Permit MSA to be present if MSA is supported */ + if (kvm_mips_guest_can_have_msa(&vcpu->arch)) + mask |= MIPS_CONF3_MSA; + + return mask; +} + +/** + * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4 + * @vcpu: Virtual CPU. + * + * Finds the mask of bits which are writable in the guest's Config4 CP0 + * register, by userland (currently read-only to the guest). + */ +unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu) +{ + /* Config5 is optional */ + return MIPS_CONF_M; +} + +/** + * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5 + * @vcpu: Virtual CPU. + * + * Finds the mask of bits which are writable in the guest's Config5 CP0 + * register, by the guest itself. + */ +unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu) +{ + unsigned int mask = 0; + + /* Permit MSAEn changes if MSA supported and enabled */ + if (kvm_mips_guest_has_msa(&vcpu->arch)) + mask |= MIPS_CONF5_MSAEN; + + /* + * Permit guest FPU mode changes if FPU is enabled and the relevant + * feature exists according to FIR register. + */ + if (kvm_mips_guest_has_fpu(&vcpu->arch)) { + if (cpu_has_fre) + mask |= MIPS_CONF5_FRE; + /* We don't support UFR or UFE */ + } + + return mask; +} + enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, struct kvm_run *run, struct kvm_vcpu *vcpu) @@ -1021,18 +1099,114 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, kvm_mips_write_compare(vcpu, vcpu->arch.gprs[rt]); } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { - kvm_write_c0_guest_status(cop0, - vcpu->arch.gprs[rt]); + unsigned int old_val, val, change; + + old_val = kvm_read_c0_guest_status(cop0); + val = vcpu->arch.gprs[rt]; + change = val ^ old_val; + + /* Make sure that the NMI bit is never set */ + val &= ~ST0_NMI; + + /* + * Don't allow CU1 or FR to be set unless FPU + * capability enabled and exists in guest + * configuration. + */ + if (!kvm_mips_guest_has_fpu(&vcpu->arch)) + val &= ~(ST0_CU1 | ST0_FR); + + /* + * Also don't allow FR to be set if host doesn't + * support it. + */ + if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64)) + val &= ~ST0_FR; + + + /* Handle changes in FPU mode */ + preempt_disable(); + + /* + * FPU and Vector register state is made + * UNPREDICTABLE by a change of FR, so don't + * even bother saving it. + */ + if (change & ST0_FR) + kvm_drop_fpu(vcpu); + + /* + * If MSA state is already live, it is undefined + * how it interacts with FR=0 FPU state, and we + * don't want to hit reserved instruction + * exceptions trying to save the MSA state later + * when CU=1 && FR=1, so play it safe and save + * it first. + */ + if (change & ST0_CU1 && !(val & ST0_FR) && + vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) + kvm_lose_fpu(vcpu); + /* - * Make sure that CU1 and NMI bits are - * never set + * Propagate CU1 (FPU enable) changes + * immediately if the FPU context is already + * loaded. When disabling we leave the context + * loaded so it can be quickly enabled again in + * the near future. */ - kvm_clear_c0_guest_status(cop0, - (ST0_CU1 | ST0_NMI)); + if (change & ST0_CU1 && + vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) + change_c0_status(ST0_CU1, val); + + preempt_enable(); + + kvm_write_c0_guest_status(cop0, val); #ifdef CONFIG_KVM_MIPS_DYN_TRANS - kvm_mips_trans_mtc0(inst, opc, vcpu); + /* + * If FPU present, we need CU1/FR bits to take + * effect fairly soon. + */ + if (!kvm_mips_guest_has_fpu(&vcpu->arch)) + kvm_mips_trans_mtc0(inst, opc, vcpu); #endif + } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) { + unsigned int old_val, val, change, wrmask; + + old_val = kvm_read_c0_guest_config5(cop0); + val = vcpu->arch.gprs[rt]; + + /* Only a few bits are writable in Config5 */ + wrmask = kvm_mips_config5_wrmask(vcpu); + change = (val ^ old_val) & wrmask; + val = old_val ^ change; + + + /* Handle changes in FPU/MSA modes */ + preempt_disable(); + + /* + * Propagate FRE changes immediately if the FPU + * context is already loaded. + */ + if (change & MIPS_CONF5_FRE && + vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) + change_c0_config5(MIPS_CONF5_FRE, val); + + /* + * Propagate MSAEn changes immediately if the + * MSA context is already loaded. When disabling + * we leave the context loaded so it can be + * quickly enabled again in the near future. + */ + if (change & MIPS_CONF5_MSAEN && + vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) + change_c0_config5(MIPS_CONF5_MSAEN, + val); + + preempt_enable(); + + kvm_write_c0_guest_config5(cop0, val); } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { uint32_t old_cause, new_cause; @@ -1970,6 +2144,146 @@ enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause, return er; } +enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc); + + kvm_change_c0_guest_cause(cop0, (0xff), + (T_TRAP << CAUSEB_EXCCODE)); + + /* Set PC to the exception entry point */ + arch->pc = KVM_GUEST_KSEG0 + 0x180; + + } else { + kvm_err("Trying to deliver TRAP when EXL is already set\n"); + er = EMULATE_FAIL; + } + + return er; +} + +enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc); + + kvm_change_c0_guest_cause(cop0, (0xff), + (T_MSAFPE << CAUSEB_EXCCODE)); + + /* Set PC to the exception entry point */ + arch->pc = KVM_GUEST_KSEG0 + 0x180; + + } else { + kvm_err("Trying to deliver MSAFPE when EXL is already set\n"); + er = EMULATE_FAIL; + } + + return er; +} + +enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc); + + kvm_change_c0_guest_cause(cop0, (0xff), + (T_FPE << CAUSEB_EXCCODE)); + + /* Set PC to the exception entry point */ + arch->pc = KVM_GUEST_KSEG0 + 0x180; + + } else { + kvm_err("Trying to deliver FPE when EXL is already set\n"); + er = EMULATE_FAIL; + } + + return er; +} + +enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc); + + kvm_change_c0_guest_cause(cop0, (0xff), + (T_MSADIS << CAUSEB_EXCCODE)); + + /* Set PC to the exception entry point */ + arch->pc = KVM_GUEST_KSEG0 + 0x180; + + } else { + kvm_err("Trying to deliver MSADIS when EXL is already set\n"); + er = EMULATE_FAIL; + } + + return er; +} + /* ll/sc, rdhwr, sync emulation */ #define OPCODE 0xfc000000 @@ -2176,6 +2490,10 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause, case T_SYSCALL: case T_BREAK: case T_RES_INST: + case T_TRAP: + case T_MSAFPE: + case T_FPE: + case T_MSADIS: break; case T_COP_UNUSABLE: diff --git a/arch/mips/kvm/fpu.S b/arch/mips/kvm/fpu.S new file mode 100644 index 000000000000..531fbf5131c0 --- /dev/null +++ b/arch/mips/kvm/fpu.S @@ -0,0 +1,122 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * FPU context handling code for KVM. + * + * Copyright (C) 2015 Imagination Technologies Ltd. + */ + +#include <asm/asm.h> +#include <asm/asm-offsets.h> +#include <asm/fpregdef.h> +#include <asm/mipsregs.h> +#include <asm/regdef.h> + + .set noreorder + .set noat + +LEAF(__kvm_save_fpu) + .set push + .set mips64r2 + SET_HARDFLOAT + mfc0 t0, CP0_STATUS + sll t0, t0, 5 # is Status.FR set? + bgez t0, 1f # no: skip odd doubles + nop + sdc1 $f1, VCPU_FPR1(a0) + sdc1 $f3, VCPU_FPR3(a0) + sdc1 $f5, VCPU_FPR5(a0) + sdc1 $f7, VCPU_FPR7(a0) + sdc1 $f9, VCPU_FPR9(a0) + sdc1 $f11, VCPU_FPR11(a0) + sdc1 $f13, VCPU_FPR13(a0) + sdc1 $f15, VCPU_FPR15(a0) + sdc1 $f17, VCPU_FPR17(a0) + sdc1 $f19, VCPU_FPR19(a0) + sdc1 $f21, VCPU_FPR21(a0) + sdc1 $f23, VCPU_FPR23(a0) + sdc1 $f25, VCPU_FPR25(a0) + sdc1 $f27, VCPU_FPR27(a0) + sdc1 $f29, VCPU_FPR29(a0) + sdc1 $f31, VCPU_FPR31(a0) +1: sdc1 $f0, VCPU_FPR0(a0) + sdc1 $f2, VCPU_FPR2(a0) + sdc1 $f4, VCPU_FPR4(a0) + sdc1 $f6, VCPU_FPR6(a0) + sdc1 $f8, VCPU_FPR8(a0) + sdc1 $f10, VCPU_FPR10(a0) + sdc1 $f12, VCPU_FPR12(a0) + sdc1 $f14, VCPU_FPR14(a0) + sdc1 $f16, VCPU_FPR16(a0) + sdc1 $f18, VCPU_FPR18(a0) + sdc1 $f20, VCPU_FPR20(a0) + sdc1 $f22, VCPU_FPR22(a0) + sdc1 $f24, VCPU_FPR24(a0) + sdc1 $f26, VCPU_FPR26(a0) + sdc1 $f28, VCPU_FPR28(a0) + jr ra + sdc1 $f30, VCPU_FPR30(a0) + .set pop + END(__kvm_save_fpu) + +LEAF(__kvm_restore_fpu) + .set push + .set mips64r2 + SET_HARDFLOAT + mfc0 t0, CP0_STATUS + sll t0, t0, 5 # is Status.FR set? + bgez t0, 1f # no: skip odd doubles + nop + ldc1 $f1, VCPU_FPR1(a0) + ldc1 $f3, VCPU_FPR3(a0) + ldc1 $f5, VCPU_FPR5(a0) + ldc1 $f7, VCPU_FPR7(a0) + ldc1 $f9, VCPU_FPR9(a0) + ldc1 $f11, VCPU_FPR11(a0) + ldc1 $f13, VCPU_FPR13(a0) + ldc1 $f15, VCPU_FPR15(a0) + ldc1 $f17, VCPU_FPR17(a0) + ldc1 $f19, VCPU_FPR19(a0) + ldc1 $f21, VCPU_FPR21(a0) + ldc1 $f23, VCPU_FPR23(a0) + ldc1 $f25, VCPU_FPR25(a0) + ldc1 $f27, VCPU_FPR27(a0) + ldc1 $f29, VCPU_FPR29(a0) + ldc1 $f31, VCPU_FPR31(a0) +1: ldc1 $f0, VCPU_FPR0(a0) + ldc1 $f2, VCPU_FPR2(a0) + ldc1 $f4, VCPU_FPR4(a0) + ldc1 $f6, VCPU_FPR6(a0) + ldc1 $f8, VCPU_FPR8(a0) + ldc1 $f10, VCPU_FPR10(a0) + ldc1 $f12, VCPU_FPR12(a0) + ldc1 $f14, VCPU_FPR14(a0) + ldc1 $f16, VCPU_FPR16(a0) + ldc1 $f18, VCPU_FPR18(a0) + ldc1 $f20, VCPU_FPR20(a0) + ldc1 $f22, VCPU_FPR22(a0) + ldc1 $f24, VCPU_FPR24(a0) + ldc1 $f26, VCPU_FPR26(a0) + ldc1 $f28, VCPU_FPR28(a0) + jr ra + ldc1 $f30, VCPU_FPR30(a0) + .set pop + END(__kvm_restore_fpu) + +LEAF(__kvm_restore_fcsr) + .set push + SET_HARDFLOAT + lw t0, VCPU_FCR31(a0) + /* + * The ctc1 must stay at this offset in __kvm_restore_fcsr. + * See kvm_mips_csr_die_notify() which handles t0 containing a value + * which triggers an FP Exception, which must be stepped over and + * ignored since the set cause bits must remain there for the guest. + */ + ctc1 t0, fcr31 + jr ra + nop + .set pop + END(__kvm_restore_fcsr) diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S index 4a68b176d6e4..c567240386a0 100644 --- a/arch/mips/kvm/locore.S +++ b/arch/mips/kvm/locore.S @@ -36,6 +36,8 @@ #define PT_HOST_USERLOCAL PT_EPC #define CP0_DDATA_LO $28,3 +#define CP0_CONFIG3 $16,3 +#define CP0_CONFIG5 $16,5 #define CP0_EBASE $15,1 #define CP0_INTCTL $12,1 @@ -353,6 +355,42 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) LONG_L k0, VCPU_HOST_EBASE(k1) mtc0 k0,CP0_EBASE + /* + * If FPU is enabled, save FCR31 and clear it so that later ctc1's don't + * trigger FPE for pending exceptions. + */ + .set at + and v1, v0, ST0_CU1 + beqz v1, 1f + nop + .set push + SET_HARDFLOAT + cfc1 t0, fcr31 + sw t0, VCPU_FCR31(k1) + ctc1 zero,fcr31 + .set pop + .set noat +1: + +#ifdef CONFIG_CPU_HAS_MSA + /* + * If MSA is enabled, save MSACSR and clear it so that later + * instructions don't trigger MSAFPE for pending exceptions. + */ + mfc0 t0, CP0_CONFIG3 + ext t0, t0, 28, 1 /* MIPS_CONF3_MSAP */ + beqz t0, 1f + nop + mfc0 t0, CP0_CONFIG5 + ext t0, t0, 27, 1 /* MIPS_CONF5_MSAEN */ + beqz t0, 1f + nop + _cfcmsa t0, MSA_CSR + sw t0, VCPU_MSA_CSR(k1) + _ctcmsa MSA_CSR, zero +1: +#endif + /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ .set at and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE) diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index c9eccf5df912..bb68e8d520e8 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -11,6 +11,7 @@ #include <linux/errno.h> #include <linux/err.h> +#include <linux/kdebug.h> #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/fs.h> @@ -48,6 +49,10 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU }, { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU }, { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU }, + { "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU }, + { "msa_fpe", VCPU_STAT(msa_fpe_exits), KVM_STAT_VCPU }, + { "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU }, + { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU }, { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU }, { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU }, { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU }, @@ -504,10 +509,13 @@ static u64 kvm_mips_get_one_regs[] = { KVM_REG_MIPS_CP0_STATUS, KVM_REG_MIPS_CP0_CAUSE, KVM_REG_MIPS_CP0_EPC, + KVM_REG_MIPS_CP0_PRID, KVM_REG_MIPS_CP0_CONFIG, KVM_REG_MIPS_CP0_CONFIG1, KVM_REG_MIPS_CP0_CONFIG2, KVM_REG_MIPS_CP0_CONFIG3, + KVM_REG_MIPS_CP0_CONFIG4, + KVM_REG_MIPS_CP0_CONFIG5, KVM_REG_MIPS_CP0_CONFIG7, KVM_REG_MIPS_CP0_ERROREPC, @@ -520,10 +528,14 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { struct mips_coproc *cop0 = vcpu->arch.cop0; + struct mips_fpu_struct *fpu = &vcpu->arch.fpu; int ret; s64 v; + s64 vs[2]; + unsigned int idx; switch (reg->id) { + /* General purpose registers */ case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31: v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; break; @@ -537,6 +549,67 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, v = (long)vcpu->arch.pc; break; + /* Floating point registers */ + case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31): + if (!kvm_mips_guest_has_fpu(&vcpu->arch)) + return -EINVAL; + idx = reg->id - KVM_REG_MIPS_FPR_32(0); + /* Odd singles in top of even double when FR=0 */ + if (kvm_read_c0_guest_status(cop0) & ST0_FR) + v = get_fpr32(&fpu->fpr[idx], 0); + else + v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1); + break; + case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31): + if (!kvm_mips_guest_has_fpu(&vcpu->arch)) + return -EINVAL; + idx = reg->id - KVM_REG_MIPS_FPR_64(0); + /* Can't access odd doubles in FR=0 mode */ + if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR)) + return -EINVAL; + v = get_fpr64(&fpu->fpr[idx], 0); + break; + case KVM_REG_MIPS_FCR_IR: + if (!kvm_mips_guest_has_fpu(&vcpu->arch)) + return -EINVAL; + v = boot_cpu_data.fpu_id; + break; + case KVM_REG_MIPS_FCR_CSR: + if (!kvm_mips_guest_has_fpu(&vcpu->arch)) + return -EINVAL; + v = fpu->fcr31; + break; + + /* MIPS SIMD Architecture (MSA) registers */ + case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31): + if (!kvm_mips_guest_has_msa(&vcpu->arch)) + return -EINVAL; + /* Can't access MSA registers in FR=0 mode */ + if (!(kvm_read_c0_guest_status(cop0) & ST0_FR)) + return -EINVAL; + idx = reg->id - KVM_REG_MIPS_VEC_128(0); +#ifdef CONFIG_CPU_LITTLE_ENDIAN + /* least significant byte first */ + vs[0] = get_fpr64(&fpu->fpr[idx], 0); + vs[1] = get_fpr64(&fpu->fpr[idx], 1); +#else + /* most significant byte first */ + vs[0] = get_fpr64(&fpu->fpr[idx], 1); + vs[1] = get_fpr64(&fpu->fpr[idx], 0); +#endif + break; + case KVM_REG_MIPS_MSA_IR: + if (!kvm_mips_guest_has_msa(&vcpu->arch)) + return -EINVAL; + v = boot_cpu_data.msa_id; + break; + case KVM_REG_MIPS_MSA_CSR: + if (!kvm_mips_guest_has_msa(&vcpu->arch)) + return -EINVAL; + v = fpu->msacsr; + break; + + /* Co-processor 0 registers */ case KVM_REG_MIPS_CP0_INDEX: v = (long)kvm_read_c0_guest_index(cop0); break; @@ -573,8 +646,8 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, case KVM_REG_MIPS_CP0_EPC: v = (long)kvm_read_c0_guest_epc(cop0); break; - case KVM_REG_MIPS_CP0_ERROREPC: - v = (long)kvm_read_c0_guest_errorepc(cop0); + case KVM_REG_MIPS_CP0_PRID: + v = (long)kvm_read_c0_guest_prid(cop0); break; case KVM_REG_MIPS_CP0_CONFIG: v = (long)kvm_read_c0_guest_config(cop0); @@ -588,9 +661,18 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, case KVM_REG_MIPS_CP0_CONFIG3: v = (long)kvm_read_c0_guest_config3(cop0); break; + case KVM_REG_MIPS_CP0_CONFIG4: + v = (long)kvm_read_c0_guest_config4(cop0); + break; + case KVM_REG_MIPS_CP0_CONFIG5: + v = (long)kvm_read_c0_guest_config5(cop0); + break; case KVM_REG_MIPS_CP0_CONFIG7: v = (long)kvm_read_c0_guest_config7(cop0); break; + case KVM_REG_MIPS_CP0_ERROREPC: + v = (long)kvm_read_c0_guest_errorepc(cop0); + break; /* registers to be handled specially */ case KVM_REG_MIPS_CP0_COUNT: case KVM_REG_MIPS_COUNT_CTL: @@ -612,6 +694,10 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, u32 v32 = (u32)v; return put_user(v32, uaddr32); + } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { + void __user *uaddr = (void __user *)(long)reg->addr; + + return copy_to_user(uaddr, vs, 16); } else { return -EINVAL; } @@ -621,7 +707,10 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { struct mips_coproc *cop0 = vcpu->arch.cop0; - u64 v; + struct mips_fpu_struct *fpu = &vcpu->arch.fpu; + s64 v; + s64 vs[2]; + unsigned int idx; if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; @@ -635,11 +724,16 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, if (get_user(v32, uaddr32) != 0) return -EFAULT; v = (s64)v32; + } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { + void __user *uaddr = (void __user *)(long)reg->addr; + + return copy_from_user(vs, uaddr, 16); } else { return -EINVAL; } switch (reg->id) { + /* General purpose registers */ case KVM_REG_MIPS_R0: /* Silently ignore requests to set $0 */ break; @@ -656,6 +750,64 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, vcpu->arch.pc = v; break; + /* Floating point registers */ + case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31): + if (!kvm_mips_guest_has_fpu(&vcpu->arch)) + return -EINVAL; + idx = reg->id - KVM_REG_MIPS_FPR_32(0); + /* Odd singles in top of even double when FR=0 */ + if (kvm_read_c0_guest_status(cop0) & ST0_FR) + set_fpr32(&fpu->fpr[idx], 0, v); + else + set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v); + break; + case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31): + if (!kvm_mips_guest_has_fpu(&vcpu->arch)) + return -EINVAL; + idx = reg->id - KVM_REG_MIPS_FPR_64(0); + /* Can't access odd doubles in FR=0 mode */ + if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR)) + return -EINVAL; + set_fpr64(&fpu->fpr[idx], 0, v); + break; + case KVM_REG_MIPS_FCR_IR: + if (!kvm_mips_guest_has_fpu(&vcpu->arch)) + return -EINVAL; + /* Read-only */ + break; + case KVM_REG_MIPS_FCR_CSR: + if (!kvm_mips_guest_has_fpu(&vcpu->arch)) + return -EINVAL; + fpu->fcr31 = v; + break; + + /* MIPS SIMD Architecture (MSA) registers */ + case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31): + if (!kvm_mips_guest_has_msa(&vcpu->arch)) + return -EINVAL; + idx = reg->id - KVM_REG_MIPS_VEC_128(0); +#ifdef CONFIG_CPU_LITTLE_ENDIAN + /* least significant byte first */ + set_fpr64(&fpu->fpr[idx], 0, vs[0]); + set_fpr64(&fpu->fpr[idx], 1, vs[1]); +#else + /* most significant byte first */ + set_fpr64(&fpu->fpr[idx], 1, vs[0]); + set_fpr64(&fpu->fpr[idx], 0, vs[1]); +#endif + break; + case KVM_REG_MIPS_MSA_IR: + if (!kvm_mips_guest_has_msa(&vcpu->arch)) + return -EINVAL; + /* Read-only */ + break; + case KVM_REG_MIPS_MSA_CSR: + if (!kvm_mips_guest_has_msa(&vcpu->arch)) + return -EINVAL; + fpu->msacsr = v; + break; + + /* Co-processor 0 registers */ case KVM_REG_MIPS_CP0_INDEX: kvm_write_c0_guest_index(cop0, v); break; @@ -686,6 +838,9 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, case KVM_REG_MIPS_CP0_EPC: kvm_write_c0_guest_epc(cop0, v); break; + case KVM_REG_MIPS_CP0_PRID: + kvm_write_c0_guest_prid(cop0, v); + break; case KVM_REG_MIPS_CP0_ERROREPC: kvm_write_c0_guest_errorepc(cop0, v); break; @@ -693,6 +848,12 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, case KVM_REG_MIPS_CP0_COUNT: case KVM_REG_MIPS_CP0_COMPARE: case KVM_REG_MIPS_CP0_CAUSE: + case KVM_REG_MIPS_CP0_CONFIG: + case KVM_REG_MIPS_CP0_CONFIG1: + case KVM_REG_MIPS_CP0_CONFIG2: + case KVM_REG_MIPS_CP0_CONFIG3: + case KVM_REG_MIPS_CP0_CONFIG4: + case KVM_REG_MIPS_CP0_CONFIG5: case KVM_REG_MIPS_COUNT_CTL: case KVM_REG_MIPS_COUNT_RESUME: case KVM_REG_MIPS_COUNT_HZ: @@ -703,6 +864,33 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, return 0; } +static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, + struct kvm_enable_cap *cap) +{ + int r = 0; + + if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap)) + return -EINVAL; + if (cap->flags) + return -EINVAL; + if (cap->args[0]) + return -EINVAL; + + switch (cap->cap) { + case KVM_CAP_MIPS_FPU: + vcpu->arch.fpu_enabled = true; + break; + case KVM_CAP_MIPS_MSA: + vcpu->arch.msa_enabled = true; + break; + default: + r = -EINVAL; + break; + } + + return r; +} + long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -760,6 +948,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); break; } + case KVM_ENABLE_CAP: { + struct kvm_enable_cap cap; + + r = -EFAULT; + if (copy_from_user(&cap, argp, sizeof(cap))) + goto out; + r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); + break; + } default: r = -ENOIOCTLCMD; } @@ -868,11 +1065,30 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) switch (ext) { case KVM_CAP_ONE_REG: + case KVM_CAP_ENABLE_CAP: r = 1; break; case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; + case KVM_CAP_MIPS_FPU: + r = !!cpu_has_fpu; + break; + case KVM_CAP_MIPS_MSA: + /* + * We don't support MSA vector partitioning yet: + * 1) It would require explicit support which can't be tested + * yet due to lack of support in current hardware. + * 2) It extends the state that would need to be saved/restored + * by e.g. QEMU for migration. + * + * When vector partitioning hardware becomes available, support + * could be added by requiring a flag when enabling + * KVM_CAP_MIPS_MSA capability to indicate that userland knows + * to save/restore the appropriate extra state. + */ + r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF); + break; default: r = 0; break; @@ -1119,6 +1335,30 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ret = kvm_mips_callbacks->handle_break(vcpu); break; + case T_TRAP: + ++vcpu->stat.trap_inst_exits; + trace_kvm_exit(vcpu, TRAP_INST_EXITS); + ret = kvm_mips_callbacks->handle_trap(vcpu); + break; + + case T_MSAFPE: + ++vcpu->stat.msa_fpe_exits; + trace_kvm_exit(vcpu, MSA_FPE_EXITS); + ret = kvm_mips_callbacks->handle_msa_fpe(vcpu); + break; + + case T_FPE: + ++vcpu->stat.fpe_exits; + trace_kvm_exit(vcpu, FPE_EXITS); + ret = kvm_mips_callbacks->handle_fpe(vcpu); + break; + + case T_MSADIS: + ++vcpu->stat.msa_disabled_exits; + trace_kvm_exit(vcpu, MSA_DISABLED_EXITS); + ret = kvm_mips_callbacks->handle_msa_disabled(vcpu); + break; + default: kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, @@ -1146,12 +1386,233 @@ skip_emul: } } + if (ret == RESUME_GUEST) { + /* + * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context + * is live), restore FCR31 / MSACSR. + * + * This should be before returning to the guest exception + * vector, as it may well cause an [MSA] FP exception if there + * are pending exception bits unmasked. (see + * kvm_mips_csr_die_notifier() for how that is handled). + */ + if (kvm_mips_guest_has_fpu(&vcpu->arch) && + read_c0_status() & ST0_CU1) + __kvm_restore_fcsr(&vcpu->arch); + + if (kvm_mips_guest_has_msa(&vcpu->arch) && + read_c0_config5() & MIPS_CONF5_MSAEN) + __kvm_restore_msacsr(&vcpu->arch); + } + /* Disable HTW before returning to guest or host */ htw_stop(); return ret; } +/* Enable FPU for guest and restore context */ +void kvm_own_fpu(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + unsigned int sr, cfg5; + + preempt_disable(); + + sr = kvm_read_c0_guest_status(cop0); + + /* + * If MSA state is already live, it is undefined how it interacts with + * FR=0 FPU state, and we don't want to hit reserved instruction + * exceptions trying to save the MSA state later when CU=1 && FR=1, so + * play it safe and save it first. + * + * In theory we shouldn't ever hit this case since kvm_lose_fpu() should + * get called when guest CU1 is set, however we can't trust the guest + * not to clobber the status register directly via the commpage. + */ + if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) && + vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) + kvm_lose_fpu(vcpu); + + /* + * Enable FPU for guest + * We set FR and FRE according to guest context + */ + change_c0_status(ST0_CU1 | ST0_FR, sr); + if (cpu_has_fre) { + cfg5 = kvm_read_c0_guest_config5(cop0); + change_c0_config5(MIPS_CONF5_FRE, cfg5); + } + enable_fpu_hazard(); + + /* If guest FPU state not active, restore it now */ + if (!(vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)) { + __kvm_restore_fpu(&vcpu->arch); + vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU; + } + + preempt_enable(); +} + +#ifdef CONFIG_CPU_HAS_MSA +/* Enable MSA for guest and restore context */ +void kvm_own_msa(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + unsigned int sr, cfg5; + + preempt_disable(); + + /* + * Enable FPU if enabled in guest, since we're restoring FPU context + * anyway. We set FR and FRE according to guest context. + */ + if (kvm_mips_guest_has_fpu(&vcpu->arch)) { + sr = kvm_read_c0_guest_status(cop0); + + /* + * If FR=0 FPU state is already live, it is undefined how it + * interacts with MSA state, so play it safe and save it first. + */ + if (!(sr & ST0_FR) && + (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | + KVM_MIPS_FPU_MSA)) == KVM_MIPS_FPU_FPU) + kvm_lose_fpu(vcpu); + + change_c0_status(ST0_CU1 | ST0_FR, sr); + if (sr & ST0_CU1 && cpu_has_fre) { + cfg5 = kvm_read_c0_guest_config5(cop0); + change_c0_config5(MIPS_CONF5_FRE, cfg5); + } + } + + /* Enable MSA for guest */ + set_c0_config5(MIPS_CONF5_MSAEN); + enable_fpu_hazard(); + + switch (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA)) { + case KVM_MIPS_FPU_FPU: + /* + * Guest FPU state already loaded, only restore upper MSA state + */ + __kvm_restore_msa_upper(&vcpu->arch); + vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA; + break; + case 0: + /* Neither FPU or MSA already active, restore full MSA state */ + __kvm_restore_msa(&vcpu->arch); + vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA; + if (kvm_mips_guest_has_fpu(&vcpu->arch)) + vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU; + break; + default: + break; + } + + preempt_enable(); +} +#endif + +/* Drop FPU & MSA without saving it */ +void kvm_drop_fpu(struct kvm_vcpu *vcpu) +{ + preempt_disable(); + if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) { + disable_msa(); + vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_MSA; + } + if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { + clear_c0_status(ST0_CU1 | ST0_FR); + vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU; + } + preempt_enable(); +} + +/* Save and disable FPU & MSA */ +void kvm_lose_fpu(struct kvm_vcpu *vcpu) +{ + /* + * FPU & MSA get disabled in root context (hardware) when it is disabled + * in guest context (software), but the register state in the hardware + * may still be in use. This is why we explicitly re-enable the hardware + * before saving. + */ + + preempt_disable(); + if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) { + set_c0_config5(MIPS_CONF5_MSAEN); + enable_fpu_hazard(); + + __kvm_save_msa(&vcpu->arch); + + /* Disable MSA & FPU */ + disable_msa(); + if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) + clear_c0_status(ST0_CU1 | ST0_FR); + vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA); + } else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { + set_c0_status(ST0_CU1); + enable_fpu_hazard(); + + __kvm_save_fpu(&vcpu->arch); + vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU; + + /* Disable FPU */ + clear_c0_status(ST0_CU1 | ST0_FR); + } + preempt_enable(); +} + +/* + * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are + * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP + * exception if cause bits are set in the value being written. + */ +static int kvm_mips_csr_die_notify(struct notifier_block *self, + unsigned long cmd, void *ptr) +{ + struct die_args *args = (struct die_args *)ptr; + struct pt_regs *regs = args->regs; + unsigned long pc; + + /* Only interested in FPE and MSAFPE */ + if (cmd != DIE_FP && cmd != DIE_MSAFP) + return NOTIFY_DONE; + + /* Return immediately if guest context isn't active */ + if (!(current->flags & PF_VCPU)) + return NOTIFY_DONE; + + /* Should never get here from user mode */ + BUG_ON(user_mode(regs)); + + pc = instruction_pointer(regs); + switch (cmd) { + case DIE_FP: + /* match 2nd instruction in __kvm_restore_fcsr */ + if (pc != (unsigned long)&__kvm_restore_fcsr + 4) + return NOTIFY_DONE; + break; + case DIE_MSAFP: + /* match 2nd/3rd instruction in __kvm_restore_msacsr */ + if (!cpu_has_msa || + pc < (unsigned long)&__kvm_restore_msacsr + 4 || + pc > (unsigned long)&__kvm_restore_msacsr + 8) + return NOTIFY_DONE; + break; + } + + /* Move PC forward a little and continue executing */ + instruction_pointer(regs) += 4; + + return NOTIFY_STOP; +} + +static struct notifier_block kvm_mips_csr_die_notifier = { + .notifier_call = kvm_mips_csr_die_notify, +}; + int __init kvm_mips_init(void) { int ret; @@ -1161,6 +1622,8 @@ int __init kvm_mips_init(void) if (ret) return ret; + register_die_notifier(&kvm_mips_csr_die_notifier); + /* * On MIPS, kernel modules are executed from "mapped space", which * requires TLBs. The TLB handling code is statically linked with @@ -1173,7 +1636,6 @@ int __init kvm_mips_init(void) kvm_mips_release_pfn_clean = kvm_release_pfn_clean; kvm_mips_is_error_pfn = is_error_pfn; - pr_info("KVM/MIPS Initialized\n"); return 0; } @@ -1185,7 +1647,7 @@ void __exit kvm_mips_exit(void) kvm_mips_release_pfn_clean = NULL; kvm_mips_is_error_pfn = NULL; - pr_info("KVM/MIPS unloaded\n"); + unregister_die_notifier(&kvm_mips_csr_die_notifier); } module_init(kvm_mips_init); diff --git a/arch/mips/kvm/msa.S b/arch/mips/kvm/msa.S new file mode 100644 index 000000000000..d02f0c6cc2cc --- /dev/null +++ b/arch/mips/kvm/msa.S @@ -0,0 +1,161 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * MIPS SIMD Architecture (MSA) context handling code for KVM. + * + * Copyright (C) 2015 Imagination Technologies Ltd. + */ + +#include <asm/asm.h> +#include <asm/asm-offsets.h> +#include <asm/asmmacro.h> +#include <asm/regdef.h> + + .set noreorder + .set noat + +LEAF(__kvm_save_msa) + st_d 0, VCPU_FPR0, a0 + st_d 1, VCPU_FPR1, a0 + st_d 2, VCPU_FPR2, a0 + st_d 3, VCPU_FPR3, a0 + st_d 4, VCPU_FPR4, a0 + st_d 5, VCPU_FPR5, a0 + st_d 6, VCPU_FPR6, a0 + st_d 7, VCPU_FPR7, a0 + st_d 8, VCPU_FPR8, a0 + st_d 9, VCPU_FPR9, a0 + st_d 10, VCPU_FPR10, a0 + st_d 11, VCPU_FPR11, a0 + st_d 12, VCPU_FPR12, a0 + st_d 13, VCPU_FPR13, a0 + st_d 14, VCPU_FPR14, a0 + st_d 15, VCPU_FPR15, a0 + st_d 16, VCPU_FPR16, a0 + st_d 17, VCPU_FPR17, a0 + st_d 18, VCPU_FPR18, a0 + st_d 19, VCPU_FPR19, a0 + st_d 20, VCPU_FPR20, a0 + st_d 21, VCPU_FPR21, a0 + st_d 22, VCPU_FPR22, a0 + st_d 23, VCPU_FPR23, a0 + st_d 24, VCPU_FPR24, a0 + st_d 25, VCPU_FPR25, a0 + st_d 26, VCPU_FPR26, a0 + st_d 27, VCPU_FPR27, a0 + st_d 28, VCPU_FPR28, a0 + st_d 29, VCPU_FPR29, a0 + st_d 30, VCPU_FPR30, a0 + st_d 31, VCPU_FPR31, a0 + jr ra + nop + END(__kvm_save_msa) + +LEAF(__kvm_restore_msa) + ld_d 0, VCPU_FPR0, a0 + ld_d 1, VCPU_FPR1, a0 + ld_d 2, VCPU_FPR2, a0 + ld_d 3, VCPU_FPR3, a0 + ld_d 4, VCPU_FPR4, a0 + ld_d 5, VCPU_FPR5, a0 + ld_d 6, VCPU_FPR6, a0 + ld_d 7, VCPU_FPR7, a0 + ld_d 8, VCPU_FPR8, a0 + ld_d 9, VCPU_FPR9, a0 + ld_d 10, VCPU_FPR10, a0 + ld_d 11, VCPU_FPR11, a0 + ld_d 12, VCPU_FPR12, a0 + ld_d 13, VCPU_FPR13, a0 + ld_d 14, VCPU_FPR14, a0 + ld_d 15, VCPU_FPR15, a0 + ld_d 16, VCPU_FPR16, a0 + ld_d 17, VCPU_FPR17, a0 + ld_d 18, VCPU_FPR18, a0 + ld_d 19, VCPU_FPR19, a0 + ld_d 20, VCPU_FPR20, a0 + ld_d 21, VCPU_FPR21, a0 + ld_d 22, VCPU_FPR22, a0 + ld_d 23, VCPU_FPR23, a0 + ld_d 24, VCPU_FPR24, a0 + ld_d 25, VCPU_FPR25, a0 + ld_d 26, VCPU_FPR26, a0 + ld_d 27, VCPU_FPR27, a0 + ld_d 28, VCPU_FPR28, a0 + ld_d 29, VCPU_FPR29, a0 + ld_d 30, VCPU_FPR30, a0 + ld_d 31, VCPU_FPR31, a0 + jr ra + nop + END(__kvm_restore_msa) + + .macro kvm_restore_msa_upper wr, off, base + .set push + .set noat +#ifdef CONFIG_64BIT + ld $1, \off(\base) + insert_d \wr, 1 +#elif defined(CONFIG_CPU_LITTLE_ENDIAN) + lw $1, \off(\base) + insert_w \wr, 2 + lw $1, (\off+4)(\base) + insert_w \wr, 3 +#else /* CONFIG_CPU_BIG_ENDIAN */ + lw $1, (\off+4)(\base) + insert_w \wr, 2 + lw $1, \off(\base) + insert_w \wr, 3 +#endif + .set pop + .endm + +LEAF(__kvm_restore_msa_upper) + kvm_restore_msa_upper 0, VCPU_FPR0 +8, a0 + kvm_restore_msa_upper 1, VCPU_FPR1 +8, a0 + kvm_restore_msa_upper 2, VCPU_FPR2 +8, a0 + kvm_restore_msa_upper 3, VCPU_FPR3 +8, a0 + kvm_restore_msa_upper 4, VCPU_FPR4 +8, a0 + kvm_restore_msa_upper 5, VCPU_FPR5 +8, a0 + kvm_restore_msa_upper 6, VCPU_FPR6 +8, a0 + kvm_restore_msa_upper 7, VCPU_FPR7 +8, a0 + kvm_restore_msa_upper 8, VCPU_FPR8 +8, a0 + kvm_restore_msa_upper 9, VCPU_FPR9 +8, a0 + kvm_restore_msa_upper 10, VCPU_FPR10+8, a0 + kvm_restore_msa_upper 11, VCPU_FPR11+8, a0 + kvm_restore_msa_upper 12, VCPU_FPR12+8, a0 + kvm_restore_msa_upper 13, VCPU_FPR13+8, a0 + kvm_restore_msa_upper 14, VCPU_FPR14+8, a0 + kvm_restore_msa_upper 15, VCPU_FPR15+8, a0 + kvm_restore_msa_upper 16, VCPU_FPR16+8, a0 + kvm_restore_msa_upper 17, VCPU_FPR17+8, a0 + kvm_restore_msa_upper 18, VCPU_FPR18+8, a0 + kvm_restore_msa_upper 19, VCPU_FPR19+8, a0 + kvm_restore_msa_upper 20, VCPU_FPR20+8, a0 + kvm_restore_msa_upper 21, VCPU_FPR21+8, a0 + kvm_restore_msa_upper 22, VCPU_FPR22+8, a0 + kvm_restore_msa_upper 23, VCPU_FPR23+8, a0 + kvm_restore_msa_upper 24, VCPU_FPR24+8, a0 + kvm_restore_msa_upper 25, VCPU_FPR25+8, a0 + kvm_restore_msa_upper 26, VCPU_FPR26+8, a0 + kvm_restore_msa_upper 27, VCPU_FPR27+8, a0 + kvm_restore_msa_upper 28, VCPU_FPR28+8, a0 + kvm_restore_msa_upper 29, VCPU_FPR29+8, a0 + kvm_restore_msa_upper 30, VCPU_FPR30+8, a0 + kvm_restore_msa_upper 31, VCPU_FPR31+8, a0 + jr ra + nop + END(__kvm_restore_msa_upper) + +LEAF(__kvm_restore_msacsr) + lw t0, VCPU_MSA_CSR(a0) + /* + * The ctcmsa must stay at this offset in __kvm_restore_msacsr. + * See kvm_mips_csr_die_notify() which handles t0 containing a value + * which triggers an MSA FP Exception, which must be stepped over and + * ignored since the set cause bits must remain there for the guest. + */ + _ctcmsa MSA_CSR, t0 + jr ra + nop + END(__kvm_restore_msacsr) diff --git a/arch/mips/kvm/stats.c b/arch/mips/kvm/stats.c index a74d6024c5ad..888bb67070ac 100644 --- a/arch/mips/kvm/stats.c +++ b/arch/mips/kvm/stats.c @@ -25,6 +25,10 @@ char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = { "System Call", "Reserved Inst", "Break Inst", + "Trap Inst", + "MSA FPE", + "FPE", + "MSA Disabled", "D-Cache Flushes", }; diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c index bbcd82242059..aed0ac2a4972 100644 --- a/arch/mips/kvm/tlb.c +++ b/arch/mips/kvm/tlb.c @@ -216,6 +216,7 @@ int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, if (idx > current_cpu_data.tlbsize) { kvm_err("%s: Invalid Index: %d\n", __func__, idx); kvm_mips_dump_host_tlbs(); + local_irq_restore(flags); return -1; } @@ -732,6 +733,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) } } + /* restore guest state to registers */ + kvm_mips_callbacks->vcpu_set_regs(vcpu); + local_irq_restore(flags); } @@ -750,6 +754,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) vcpu->arch.preempt_entryhi = read_c0_entryhi(); vcpu->arch.last_sched_cpu = cpu; + /* save guest state in registers */ + kvm_mips_callbacks->vcpu_get_regs(vcpu); + if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & ASID_VERSION_MASK)) { kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__, diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h index c1388d40663b..bd6437f67dc0 100644 --- a/arch/mips/kvm/trace.h +++ b/arch/mips/kvm/trace.h @@ -24,18 +24,18 @@ TRACE_EVENT(kvm_exit, TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), TP_ARGS(vcpu, reason), TP_STRUCT__entry( - __field(struct kvm_vcpu *, vcpu) + __field(unsigned long, pc) __field(unsigned int, reason) ), TP_fast_assign( - __entry->vcpu = vcpu; + __entry->pc = vcpu->arch.pc; __entry->reason = reason; ), TP_printk("[%s]PC: 0x%08lx", kvm_mips_exit_types_str[__entry->reason], - __entry->vcpu->arch.pc) + __entry->pc) ); #endif /* _TRACE_KVM_H */ diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c index fd7257b70e65..d836ed5b0bc7 100644 --- a/arch/mips/kvm/trap_emul.c +++ b/arch/mips/kvm/trap_emul.c @@ -39,16 +39,30 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) { + struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_run *run = vcpu->run; uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; unsigned long cause = vcpu->arch.host_cp0_cause; enum emulation_result er = EMULATE_DONE; int ret = RESUME_GUEST; - if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) - er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); - else + if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { + /* FPU Unusable */ + if (!kvm_mips_guest_has_fpu(&vcpu->arch) || + (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) { + /* + * Unusable/no FPU in guest: + * deliver guest COP1 Unusable Exception + */ + er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); + } else { + /* Restore FPU state */ + kvm_own_fpu(vcpu); + er = EMULATE_DONE; + } + } else { er = kvm_mips_emulate_inst(cause, opc, run, vcpu); + } switch (er) { case EMULATE_DONE: @@ -330,6 +344,107 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) return ret; } +static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc; + unsigned long cause = vcpu->arch.host_cp0_cause; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu); + if (er == EMULATE_DONE) { + ret = RESUME_GUEST; + } else { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + return ret; +} + +static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc; + unsigned long cause = vcpu->arch.host_cp0_cause; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu); + if (er == EMULATE_DONE) { + ret = RESUME_GUEST; + } else { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + return ret; +} + +static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc; + unsigned long cause = vcpu->arch.host_cp0_cause; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu); + if (er == EMULATE_DONE) { + ret = RESUME_GUEST; + } else { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + return ret; +} + +/** + * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root. + * @vcpu: Virtual CPU context. + * + * Handle when the guest attempts to use MSA when it is disabled. + */ +static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_run *run = vcpu->run; + uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + unsigned long cause = vcpu->arch.host_cp0_cause; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + if (!kvm_mips_guest_has_msa(&vcpu->arch) || + (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) { + /* + * No MSA in guest, or FPU enabled and not in FR=1 mode, + * guest reserved instruction exception + */ + er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); + } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) { + /* MSA disabled by guest, guest MSA disabled exception */ + er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu); + } else { + /* Restore MSA/FPU state */ + kvm_own_msa(vcpu); + er = EMULATE_DONE; + } + + switch (er) { + case EMULATE_DONE: + ret = RESUME_GUEST; + break; + + case EMULATE_FAIL: + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + break; + + default: + BUG(); + } + return ret; +} + static int kvm_trap_emul_vm_init(struct kvm *kvm) { return 0; @@ -351,8 +466,9 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) * guest will come up as expected, for now we simulate a MIPS 24kc */ kvm_write_c0_guest_prid(cop0, 0x00019300); - kvm_write_c0_guest_config(cop0, - MIPS_CONFIG0 | (0x1 << CP0C0_AR) | + /* Have config1, Cacheable, noncoherent, write-back, write allocate */ + kvm_write_c0_guest_config(cop0, MIPS_CONF_M | (0x3 << CP0C0_K0) | + (0x1 << CP0C0_AR) | (MMU_TYPE_R4000 << CP0C0_MT)); /* Read the cache characteristics from the host Config1 Register */ @@ -368,10 +484,18 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) (1 << CP0C1_WR) | (1 << CP0C1_CA)); kvm_write_c0_guest_config1(cop0, config1); - kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2); - /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */ - kvm_write_c0_guest_config3(cop0, MIPS_CONFIG3 | (0 << CP0C3_VInt) | - (1 << CP0C3_ULRI)); + /* Have config3, no tertiary/secondary caches implemented */ + kvm_write_c0_guest_config2(cop0, MIPS_CONF_M); + /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */ + + /* Have config4, UserLocal */ + kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI); + + /* Have config5 */ + kvm_write_c0_guest_config4(cop0, MIPS_CONF_M); + + /* No config6 */ + kvm_write_c0_guest_config5(cop0, 0); /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); @@ -416,6 +540,7 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, { struct mips_coproc *cop0 = vcpu->arch.cop0; int ret = 0; + unsigned int cur, change; switch (reg->id) { case KVM_REG_MIPS_CP0_COUNT: @@ -444,6 +569,44 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, kvm_write_c0_guest_cause(cop0, v); } break; + case KVM_REG_MIPS_CP0_CONFIG: + /* read-only for now */ + break; + case KVM_REG_MIPS_CP0_CONFIG1: + cur = kvm_read_c0_guest_config1(cop0); + change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu); + if (change) { + v = cur ^ change; + kvm_write_c0_guest_config1(cop0, v); + } + break; + case KVM_REG_MIPS_CP0_CONFIG2: + /* read-only for now */ + break; + case KVM_REG_MIPS_CP0_CONFIG3: + cur = kvm_read_c0_guest_config3(cop0); + change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu); + if (change) { + v = cur ^ change; + kvm_write_c0_guest_config3(cop0, v); + } + break; + case KVM_REG_MIPS_CP0_CONFIG4: + cur = kvm_read_c0_guest_config4(cop0); + change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu); + if (change) { + v = cur ^ change; + kvm_write_c0_guest_config4(cop0, v); + } + break; + case KVM_REG_MIPS_CP0_CONFIG5: + cur = kvm_read_c0_guest_config5(cop0); + change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu); + if (change) { + v = cur ^ change; + kvm_write_c0_guest_config5(cop0, v); + } + break; case KVM_REG_MIPS_COUNT_CTL: ret = kvm_mips_set_count_ctl(vcpu, v); break; @@ -459,6 +622,18 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, return ret; } +static int kvm_trap_emul_vcpu_get_regs(struct kvm_vcpu *vcpu) +{ + kvm_lose_fpu(vcpu); + + return 0; +} + +static int kvm_trap_emul_vcpu_set_regs(struct kvm_vcpu *vcpu) +{ + return 0; +} + static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { /* exit handlers */ .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable, @@ -470,6 +645,10 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { .handle_syscall = kvm_trap_emul_handle_syscall, .handle_res_inst = kvm_trap_emul_handle_res_inst, .handle_break = kvm_trap_emul_handle_break, + .handle_trap = kvm_trap_emul_handle_trap, + .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe, + .handle_fpe = kvm_trap_emul_handle_fpe, + .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled, .vm_init = kvm_trap_emul_vm_init, .vcpu_init = kvm_trap_emul_vcpu_init, @@ -483,6 +662,8 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { .irq_clear = kvm_mips_irq_clear_cb, .get_one_reg = kvm_trap_emul_get_one_reg, .set_one_reg = kvm_trap_emul_set_one_reg, + .vcpu_get_regs = kvm_trap_emul_vcpu_get_regs, + .vcpu_set_regs = kvm_trap_emul_vcpu_set_regs, }; int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) diff --git a/arch/mn10300/include/asm/pgtable.h b/arch/mn10300/include/asm/pgtable.h index afab728ab65e..96d3f9deb59c 100644 --- a/arch/mn10300/include/asm/pgtable.h +++ b/arch/mn10300/include/asm/pgtable.h @@ -56,7 +56,9 @@ extern void paging_init(void); #define PGDIR_SHIFT 22 #define PTRS_PER_PGD 1024 #define PTRS_PER_PUD 1 /* we don't really have any PUD physically */ +#define __PAGETABLE_PUD_FOLDED #define PTRS_PER_PMD 1 /* we don't really have any PMD physically */ +#define __PAGETABLE_PMD_FOLDED #define PTRS_PER_PTE 1024 #define PGD_SIZE PAGE_SIZE diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 8c966b2270aa..15207b9362bf 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -96,6 +96,7 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long); #if PT_NLEVELS == 3 #define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY) #else +#define __PAGETABLE_PMD_FOLDED #define BITS_PER_PMD 0 #endif #define PTRS_PER_PMD (1UL << BITS_PER_PMD) diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index 9cfa3706a1b8..f1ea5972f6ec 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -113,6 +113,7 @@ extern void iommu_register_group(struct iommu_table *tbl, int pci_domain_number, unsigned long pe_num); extern int iommu_add_device(struct device *dev); extern void iommu_del_device(struct device *dev); +extern int __init tce_iommu_bus_notifier_init(void); #else static inline void iommu_register_group(struct iommu_table *tbl, int pci_domain_number, @@ -128,6 +129,11 @@ static inline int iommu_add_device(struct device *dev) static inline void iommu_del_device(struct device *dev) { } + +static inline int __init tce_iommu_bus_notifier_init(void) +{ + return 0; +} #endif /* !CONFIG_IOMMU_API */ static inline void set_iommu_table_base_and_group(struct device *dev, diff --git a/arch/powerpc/include/asm/irq_work.h b/arch/powerpc/include/asm/irq_work.h new file mode 100644 index 000000000000..744fd54de374 --- /dev/null +++ b/arch/powerpc/include/asm/irq_work.h @@ -0,0 +1,9 @@ +#ifndef _ASM_POWERPC_IRQ_WORK_H +#define _ASM_POWERPC_IRQ_WORK_H + +static inline bool arch_irq_work_has_interrupt(void) +{ + return true; +} + +#endif /* _ASM_POWERPC_IRQ_WORK_H */ diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 5d3968c4d799..b054f33ab1fb 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -1175,4 +1175,30 @@ void iommu_del_device(struct device *dev) } EXPORT_SYMBOL_GPL(iommu_del_device); +static int tce_iommu_bus_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + + switch (action) { + case BUS_NOTIFY_ADD_DEVICE: + return iommu_add_device(dev); + case BUS_NOTIFY_DEL_DEVICE: + if (dev->iommu_group) + iommu_del_device(dev); + return 0; + default: + return 0; + } +} + +static struct notifier_block tce_iommu_bus_nb = { + .notifier_call = tce_iommu_bus_notifier, +}; + +int __init tce_iommu_bus_notifier_init(void) +{ + bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb); + return 0; +} #endif /* CONFIG_IOMMU_API */ diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 6e19afa35a15..ec9ec2058d2d 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -541,8 +541,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) if (smp_ops->give_timebase) smp_ops->give_timebase(); - /* Wait until cpu puts itself in the online map */ - while (!cpu_online(cpu)) + /* Wait until cpu puts itself in the online & active maps */ + while (!cpu_online(cpu) || !cpu_active(cpu)) cpu_relax(); return 0; diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index e69142f4af08..54323d6b5166 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -836,30 +836,4 @@ void __init pnv_pci_init(void) #endif } -static int tce_iommu_bus_notifier(struct notifier_block *nb, - unsigned long action, void *data) -{ - struct device *dev = data; - - switch (action) { - case BUS_NOTIFY_ADD_DEVICE: - return iommu_add_device(dev); - case BUS_NOTIFY_DEL_DEVICE: - if (dev->iommu_group) - iommu_del_device(dev); - return 0; - default: - return 0; - } -} - -static struct notifier_block tce_iommu_bus_nb = { - .notifier_call = tce_iommu_bus_notifier, -}; - -static int __init tce_iommu_bus_notifier_init(void) -{ - bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb); - return 0; -} machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init); diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 1d3d52dc3ff3..7803a19adb31 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -1340,3 +1340,5 @@ static int __init disable_multitce(char *str) } __setup("multitce=", disable_multitce); + +machine_subsys_initcall_sync(pseries, tce_iommu_bus_notifier_init); diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index d84559e31f32..b8d1e97fb201 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -172,7 +172,9 @@ struct kvm_s390_sie_block { __u32 fac; /* 0x01a0 */ __u8 reserved1a4[20]; /* 0x01a4 */ __u64 cbrlo; /* 0x01b8 */ - __u8 reserved1c0[30]; /* 0x01c0 */ + __u8 reserved1c0[8]; /* 0x01c0 */ + __u32 ecd; /* 0x01c8 */ + __u8 reserved1cc[18]; /* 0x01cc */ __u64 pp; /* 0x01de */ __u8 reserved1e6[2]; /* 0x01e6 */ __u64 itdba; /* 0x01e8 */ @@ -183,11 +185,17 @@ struct kvm_s390_itdb { __u8 data[256]; } __packed; +struct kvm_s390_vregs { + __vector128 vrs[32]; + __u8 reserved200[512]; /* for future vector expansion */ +} __packed; + struct sie_page { struct kvm_s390_sie_block sie_block; __u8 reserved200[1024]; /* 0x0200 */ struct kvm_s390_itdb itdb; /* 0x0600 */ - __u8 reserved700[2304]; /* 0x0700 */ + __u8 reserved700[1280]; /* 0x0700 */ + struct kvm_s390_vregs vregs; /* 0x0c00 */ } __packed; struct kvm_vcpu_stat { @@ -238,6 +246,7 @@ struct kvm_vcpu_stat { u32 instruction_sigp_stop; u32 instruction_sigp_stop_store_status; u32 instruction_sigp_store_status; + u32 instruction_sigp_store_adtl_status; u32 instruction_sigp_arch; u32 instruction_sigp_prefix; u32 instruction_sigp_restart; @@ -270,6 +279,7 @@ struct kvm_vcpu_stat { #define PGM_SPECIAL_OPERATION 0x13 #define PGM_OPERAND 0x15 #define PGM_TRACE_TABEL 0x16 +#define PGM_VECTOR_PROCESSING 0x1b #define PGM_SPACE_SWITCH 0x1c #define PGM_HFP_SQUARE_ROOT 0x1d #define PGM_PC_TRANSLATION_SPEC 0x1f @@ -465,6 +475,7 @@ struct kvm_vcpu_arch { s390_fp_regs host_fpregs; unsigned int host_acrs[NUM_ACRS]; s390_fp_regs guest_fpregs; + struct kvm_s390_vregs *host_vregs; struct kvm_s390_local_interrupt local_int; struct hrtimer ckc_timer; struct kvm_s390_pgm_info pgm; @@ -515,15 +526,15 @@ struct s390_io_adapter { #define S390_ARCH_FAC_MASK_SIZE_U64 \ (S390_ARCH_FAC_MASK_SIZE_BYTE / sizeof(u64)) -struct s390_model_fac { - /* facilities used in SIE context */ - __u64 sie[S390_ARCH_FAC_LIST_SIZE_U64]; - /* subset enabled by kvm */ - __u64 kvm[S390_ARCH_FAC_LIST_SIZE_U64]; +struct kvm_s390_fac { + /* facility list requested by guest */ + __u64 list[S390_ARCH_FAC_LIST_SIZE_U64]; + /* facility mask supported by kvm & hosting machine */ + __u64 mask[S390_ARCH_FAC_LIST_SIZE_U64]; }; struct kvm_s390_cpu_model { - struct s390_model_fac *fac; + struct kvm_s390_fac *fac; struct cpuid cpu_id; unsigned short ibc; }; @@ -553,6 +564,7 @@ struct kvm_arch{ int use_cmma; int user_cpu_state_ctrl; int user_sigp; + int user_stsi; struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; wait_queue_head_t ipte_wq; int ipte_lock_count; diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index f49b71954654..8fb3802f8fad 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -62,6 +62,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, { int cpu = smp_processor_id(); + S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd); if (prev == next) return; if (MACHINE_HAS_TLB_LC) @@ -73,7 +74,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, atomic_dec(&prev->context.attach_count); if (MACHINE_HAS_TLB_LC) cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); - S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd); } #define finish_arch_post_lock_switch finish_arch_post_lock_switch diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 7b2ac6e44166..53eacbd4f09b 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -37,16 +37,7 @@ static inline void storage_key_init_range(unsigned long start, unsigned long end #endif } -static inline void clear_page(void *page) -{ - register unsigned long reg1 asm ("1") = 0; - register void *reg2 asm ("2") = page; - register unsigned long reg3 asm ("3") = 4096; - asm volatile( - " mvcl 2,0" - : "+d" (reg2), "+d" (reg3) : "d" (reg1) - : "memory", "cc"); -} +#define clear_page(page) memset((page), 0, PAGE_SIZE) /* * copy_page uses the mvcl instruction with 0xb0 padding byte in order to diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index fbb5ee3ae57c..e08ec38f8c6e 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -91,7 +91,9 @@ extern unsigned long zero_page_mask; */ #define PTRS_PER_PTE 256 #ifndef CONFIG_64BIT +#define __PAGETABLE_PUD_FOLDED #define PTRS_PER_PMD 1 +#define __PAGETABLE_PMD_FOLDED #define PTRS_PER_PUD 1 #else /* CONFIG_64BIT */ #define PTRS_PER_PMD 2048 diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index 9c77e60b9a26..ef1a5fcc6c66 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -150,6 +150,7 @@ struct kvm_guest_debug_arch { #define KVM_SYNC_CRS (1UL << 3) #define KVM_SYNC_ARCH0 (1UL << 4) #define KVM_SYNC_PFAULT (1UL << 5) +#define KVM_SYNC_VRS (1UL << 6) /* definition of registers in kvm_run */ struct kvm_sync_regs { __u64 prefix; /* prefix register */ @@ -164,6 +165,9 @@ struct kvm_sync_regs { __u64 pft; /* pfault token [PFAULT] */ __u64 pfs; /* pfault select [PFAULT] */ __u64 pfc; /* pfault compare [PFAULT] */ + __u64 vrs[32][2]; /* vector registers */ + __u8 reserved[512]; /* for future vector expansion */ + __u32 fpc; /* only valid with vector registers */ }; #define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1) diff --git a/arch/s390/include/uapi/asm/sie.h b/arch/s390/include/uapi/asm/sie.h index d4096fdfc6ab..ee69c0854c88 100644 --- a/arch/s390/include/uapi/asm/sie.h +++ b/arch/s390/include/uapi/asm/sie.h @@ -230,7 +230,7 @@ * and returns a key, which can be used to find a mnemonic name * of the instruction in the icpt_insn_codes table. */ -#define icpt_insn_decoder(insn) \ +#define icpt_insn_decoder(insn) ( \ INSN_DECODE_IPA0(0x01, insn, 48, 0xff) \ INSN_DECODE_IPA0(0xaa, insn, 48, 0x0f) \ INSN_DECODE_IPA0(0xb2, insn, 48, 0xff) \ @@ -239,6 +239,6 @@ INSN_DECODE_IPA0(0xe5, insn, 48, 0xff) \ INSN_DECODE_IPA0(0xeb, insn, 16, 0xff) \ INSN_DECODE_IPA0(0xc8, insn, 48, 0x0f) \ - INSN_DECODE(insn) + INSN_DECODE(insn)) #endif /* _UAPI_ASM_S390_SIE_H */ diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index e07e91605353..8dc4db10d160 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -171,6 +171,7 @@ int main(void) #else /* CONFIG_32BIT */ DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code)); DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address)); + DEFINE(__LC_VX_SAVE_AREA_ADDR, offsetof(struct _lowcore, vector_save_area_addr)); DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2)); DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area)); DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste)); diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c index cb2d51e779df..830066f936c8 100644 --- a/arch/s390/kernel/jump_label.c +++ b/arch/s390/kernel/jump_label.c @@ -36,16 +36,20 @@ static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn) insn->offset = (entry->target - entry->code) >> 1; } -static void jump_label_bug(struct jump_entry *entry, struct insn *insn) +static void jump_label_bug(struct jump_entry *entry, struct insn *expected, + struct insn *new) { unsigned char *ipc = (unsigned char *)entry->code; - unsigned char *ipe = (unsigned char *)insn; + unsigned char *ipe = (unsigned char *)expected; + unsigned char *ipn = (unsigned char *)new; pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc); pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n", ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]); pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n", ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]); + pr_emerg("New: %02x %02x %02x %02x %02x %02x\n", + ipn[0], ipn[1], ipn[2], ipn[3], ipn[4], ipn[5]); panic("Corrupted kernel text"); } @@ -69,10 +73,10 @@ static void __jump_label_transform(struct jump_entry *entry, } if (init) { if (memcmp((void *)entry->code, &orignop, sizeof(orignop))) - jump_label_bug(entry, &old); + jump_label_bug(entry, &orignop, &new); } else { if (memcmp((void *)entry->code, &old, sizeof(old))) - jump_label_bug(entry, &old); + jump_label_bug(entry, &old, &new); } probe_kernel_write((void *)entry->code, &new, sizeof(new)); } diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 36154a2f1814..2ca95862e336 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -436,6 +436,7 @@ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me) { + jump_label_apply_nops(me); vfree(me->arch.syminfo); me->arch.syminfo = NULL; return 0; diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index 26108232fcaa..dc488e13b7e3 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c @@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct cpuid, cpu_id); -void cpu_relax(void) +void notrace cpu_relax(void) { if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) asm volatile("diag 0,0,0x44"); diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 329ec75e9214..fc7ec95848c3 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c @@ -77,7 +77,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu) if (vcpu->run->s.regs.gprs[rx] & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm)); + rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258) @@ -230,7 +230,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu) int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) { - int code = kvm_s390_get_base_disp_rs(vcpu) & 0xffff; + int code = kvm_s390_get_base_disp_rs(vcpu, NULL) & 0xffff; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 267523cac6de..a7559f7207df 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -10,6 +10,7 @@ #include <asm/pgtable.h> #include "kvm-s390.h" #include "gaccess.h" +#include <asm/switch_to.h> union asce { unsigned long val; @@ -207,6 +208,54 @@ union raddress { unsigned long pfra : 52; /* Page-Frame Real Address */ }; +union alet { + u32 val; + struct { + u32 reserved : 7; + u32 p : 1; + u32 alesn : 8; + u32 alen : 16; + }; +}; + +union ald { + u32 val; + struct { + u32 : 1; + u32 alo : 24; + u32 all : 7; + }; +}; + +struct ale { + unsigned long i : 1; /* ALEN-Invalid Bit */ + unsigned long : 5; + unsigned long fo : 1; /* Fetch-Only Bit */ + unsigned long p : 1; /* Private Bit */ + unsigned long alesn : 8; /* Access-List-Entry Sequence Number */ + unsigned long aleax : 16; /* Access-List-Entry Authorization Index */ + unsigned long : 32; + unsigned long : 1; + unsigned long asteo : 25; /* ASN-Second-Table-Entry Origin */ + unsigned long : 6; + unsigned long astesn : 32; /* ASTE Sequence Number */ +} __packed; + +struct aste { + unsigned long i : 1; /* ASX-Invalid Bit */ + unsigned long ato : 29; /* Authority-Table Origin */ + unsigned long : 1; + unsigned long b : 1; /* Base-Space Bit */ + unsigned long ax : 16; /* Authorization Index */ + unsigned long atl : 12; /* Authority-Table Length */ + unsigned long : 2; + unsigned long ca : 1; /* Controlled-ASN Bit */ + unsigned long ra : 1; /* Reusable-ASN Bit */ + unsigned long asce : 64; /* Address-Space-Control Element */ + unsigned long ald : 32; + unsigned long astesn : 32; + /* .. more fields there */ +} __packed; int ipte_lock_held(struct kvm_vcpu *vcpu) { @@ -307,15 +356,157 @@ void ipte_unlock(struct kvm_vcpu *vcpu) ipte_unlock_simple(vcpu); } -static unsigned long get_vcpu_asce(struct kvm_vcpu *vcpu) +static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar, + int write) +{ + union alet alet; + struct ale ale; + struct aste aste; + unsigned long ald_addr, authority_table_addr; + union ald ald; + int eax, rc; + u8 authority_table; + + if (ar >= NUM_ACRS) + return -EINVAL; + + save_access_regs(vcpu->run->s.regs.acrs); + alet.val = vcpu->run->s.regs.acrs[ar]; + + if (ar == 0 || alet.val == 0) { + asce->val = vcpu->arch.sie_block->gcr[1]; + return 0; + } else if (alet.val == 1) { + asce->val = vcpu->arch.sie_block->gcr[7]; + return 0; + } + + if (alet.reserved) + return PGM_ALET_SPECIFICATION; + + if (alet.p) + ald_addr = vcpu->arch.sie_block->gcr[5]; + else + ald_addr = vcpu->arch.sie_block->gcr[2]; + ald_addr &= 0x7fffffc0; + + rc = read_guest_real(vcpu, ald_addr + 16, &ald.val, sizeof(union ald)); + if (rc) + return rc; + + if (alet.alen / 8 > ald.all) + return PGM_ALEN_TRANSLATION; + + if (0x7fffffff - ald.alo * 128 < alet.alen * 16) + return PGM_ADDRESSING; + + rc = read_guest_real(vcpu, ald.alo * 128 + alet.alen * 16, &ale, + sizeof(struct ale)); + if (rc) + return rc; + + if (ale.i == 1) + return PGM_ALEN_TRANSLATION; + if (ale.alesn != alet.alesn) + return PGM_ALE_SEQUENCE; + + rc = read_guest_real(vcpu, ale.asteo * 64, &aste, sizeof(struct aste)); + if (rc) + return rc; + + if (aste.i) + return PGM_ASTE_VALIDITY; + if (aste.astesn != ale.astesn) + return PGM_ASTE_SEQUENCE; + + if (ale.p == 1) { + eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff; + if (ale.aleax != eax) { + if (eax / 16 > aste.atl) + return PGM_EXTENDED_AUTHORITY; + + authority_table_addr = aste.ato * 4 + eax / 4; + + rc = read_guest_real(vcpu, authority_table_addr, + &authority_table, + sizeof(u8)); + if (rc) + return rc; + + if ((authority_table & (0x40 >> ((eax & 3) * 2))) == 0) + return PGM_EXTENDED_AUTHORITY; + } + } + + if (ale.fo == 1 && write) + return PGM_PROTECTION; + + asce->val = aste.asce; + return 0; +} + +struct trans_exc_code_bits { + unsigned long addr : 52; /* Translation-exception Address */ + unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */ + unsigned long : 6; + unsigned long b60 : 1; + unsigned long b61 : 1; + unsigned long as : 2; /* ASCE Identifier */ +}; + +enum { + FSI_UNKNOWN = 0, /* Unknown wether fetch or store */ + FSI_STORE = 1, /* Exception was due to store operation */ + FSI_FETCH = 2 /* Exception was due to fetch operation */ +}; + +static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce, + ar_t ar, int write) { + int rc; + psw_t *psw = &vcpu->arch.sie_block->gpsw; + struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; + struct trans_exc_code_bits *tec_bits; + + memset(pgm, 0, sizeof(*pgm)); + tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; + tec_bits->fsi = write ? FSI_STORE : FSI_FETCH; + tec_bits->as = psw_bits(*psw).as; + + if (!psw_bits(*psw).t) { + asce->val = 0; + asce->r = 1; + return 0; + } + switch (psw_bits(vcpu->arch.sie_block->gpsw).as) { case PSW_AS_PRIMARY: - return vcpu->arch.sie_block->gcr[1]; + asce->val = vcpu->arch.sie_block->gcr[1]; + return 0; case PSW_AS_SECONDARY: - return vcpu->arch.sie_block->gcr[7]; + asce->val = vcpu->arch.sie_block->gcr[7]; + return 0; case PSW_AS_HOME: - return vcpu->arch.sie_block->gcr[13]; + asce->val = vcpu->arch.sie_block->gcr[13]; + return 0; + case PSW_AS_ACCREG: + rc = ar_translation(vcpu, asce, ar, write); + switch (rc) { + case PGM_ALEN_TRANSLATION: + case PGM_ALE_SEQUENCE: + case PGM_ASTE_VALIDITY: + case PGM_ASTE_SEQUENCE: + case PGM_EXTENDED_AUTHORITY: + vcpu->arch.pgm.exc_access_id = ar; + break; + case PGM_PROTECTION: + tec_bits->b60 = 1; + tec_bits->b61 = 1; + break; + } + if (rc > 0) + pgm->code = rc; + return rc; } return 0; } @@ -330,10 +521,11 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val) * @vcpu: virtual cpu * @gva: guest virtual address * @gpa: points to where guest physical (absolute) address should be stored + * @asce: effective asce * @write: indicates if access is a write access * * Translate a guest virtual address into a guest absolute address by means - * of dynamic address translation as specified by the architecuture. + * of dynamic address translation as specified by the architecture. * If the resulting absolute address is not available in the configuration * an addressing exception is indicated and @gpa will not be changed. * @@ -345,7 +537,8 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val) * by the architecture */ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, - unsigned long *gpa, int write) + unsigned long *gpa, const union asce asce, + int write) { union vaddress vaddr = {.addr = gva}; union raddress raddr = {.addr = gva}; @@ -354,12 +547,10 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, union ctlreg0 ctlreg0; unsigned long ptr; int edat1, edat2; - union asce asce; ctlreg0.val = vcpu->arch.sie_block->gcr[0]; edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8); edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78); - asce.val = get_vcpu_asce(vcpu); if (asce.r) goto real_address; ptr = asce.origin * 4096; @@ -506,48 +697,30 @@ static inline int is_low_address(unsigned long ga) return (ga & ~0x11fful) == 0; } -static int low_address_protection_enabled(struct kvm_vcpu *vcpu) +static int low_address_protection_enabled(struct kvm_vcpu *vcpu, + const union asce asce) { union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]}; psw_t *psw = &vcpu->arch.sie_block->gpsw; - union asce asce; if (!ctlreg0.lap) return 0; - asce.val = get_vcpu_asce(vcpu); if (psw_bits(*psw).t && asce.p) return 0; return 1; } -struct trans_exc_code_bits { - unsigned long addr : 52; /* Translation-exception Address */ - unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */ - unsigned long : 7; - unsigned long b61 : 1; - unsigned long as : 2; /* ASCE Identifier */ -}; - -enum { - FSI_UNKNOWN = 0, /* Unknown wether fetch or store */ - FSI_STORE = 1, /* Exception was due to store operation */ - FSI_FETCH = 2 /* Exception was due to fetch operation */ -}; - static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, unsigned long *pages, unsigned long nr_pages, - int write) + const union asce asce, int write) { struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; psw_t *psw = &vcpu->arch.sie_block->gpsw; struct trans_exc_code_bits *tec_bits; int lap_enabled, rc; - memset(pgm, 0, sizeof(*pgm)); tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; - tec_bits->fsi = write ? FSI_STORE : FSI_FETCH; - tec_bits->as = psw_bits(*psw).as; - lap_enabled = low_address_protection_enabled(vcpu); + lap_enabled = low_address_protection_enabled(vcpu, asce); while (nr_pages) { ga = kvm_s390_logical_to_effective(vcpu, ga); tec_bits->addr = ga >> PAGE_SHIFT; @@ -557,7 +730,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, } ga &= PAGE_MASK; if (psw_bits(*psw).t) { - rc = guest_translate(vcpu, ga, pages, write); + rc = guest_translate(vcpu, ga, pages, asce, write); if (rc < 0) return rc; if (rc == PGM_PROTECTION) @@ -578,7 +751,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, return 0; } -int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, +int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, unsigned long len, int write) { psw_t *psw = &vcpu->arch.sie_block->gpsw; @@ -591,20 +764,19 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, if (!len) return 0; - /* Access register mode is not supported yet. */ - if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG) - return -EOPNOTSUPP; + rc = get_vcpu_asce(vcpu, &asce, ar, write); + if (rc) + return rc; nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1; pages = pages_array; if (nr_pages > ARRAY_SIZE(pages_array)) pages = vmalloc(nr_pages * sizeof(unsigned long)); if (!pages) return -ENOMEM; - asce.val = get_vcpu_asce(vcpu); need_ipte_lock = psw_bits(*psw).t && !asce.r; if (need_ipte_lock) ipte_lock(vcpu); - rc = guest_page_range(vcpu, ga, pages, nr_pages, write); + rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, write); for (idx = 0; idx < nr_pages && !rc; idx++) { gpa = *(pages + idx) + (ga & ~PAGE_MASK); _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len); @@ -652,7 +824,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, * Note: The IPTE lock is not taken during this function, so the caller * has to take care of this. */ -int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, +int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, unsigned long *gpa, int write) { struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; @@ -661,26 +833,21 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, union asce asce; int rc; - /* Access register mode is not supported yet. */ - if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG) - return -EOPNOTSUPP; - gva = kvm_s390_logical_to_effective(vcpu, gva); - memset(pgm, 0, sizeof(*pgm)); tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code; - tec->as = psw_bits(*psw).as; - tec->fsi = write ? FSI_STORE : FSI_FETCH; + rc = get_vcpu_asce(vcpu, &asce, ar, write); tec->addr = gva >> PAGE_SHIFT; - if (is_low_address(gva) && low_address_protection_enabled(vcpu)) { + if (rc) + return rc; + if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) { if (write) { rc = pgm->code = PGM_PROTECTION; return rc; } } - asce.val = get_vcpu_asce(vcpu); if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */ - rc = guest_translate(vcpu, gva, gpa, write); + rc = guest_translate(vcpu, gva, gpa, asce, write); if (rc > 0) { if (rc == PGM_PROTECTION) tec->b61 = 1; @@ -697,28 +864,51 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, } /** - * kvm_s390_check_low_addr_protection - check for low-address protection - * @ga: Guest address + * check_gva_range - test a range of guest virtual addresses for accessibility + */ +int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, + unsigned long length, int is_write) +{ + unsigned long gpa; + unsigned long currlen; + int rc = 0; + + ipte_lock(vcpu); + while (length > 0 && !rc) { + currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE)); + rc = guest_translate_address(vcpu, gva, ar, &gpa, is_write); + gva += currlen; + length -= currlen; + } + ipte_unlock(vcpu); + + return rc; +} + +/** + * kvm_s390_check_low_addr_prot_real - check for low-address protection + * @gra: Guest real address * * Checks whether an address is subject to low-address protection and set * up vcpu->arch.pgm accordingly if necessary. * * Return: 0 if no protection exception, or PGM_PROTECTION if protected. */ -int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga) +int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra) { struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; psw_t *psw = &vcpu->arch.sie_block->gpsw; struct trans_exc_code_bits *tec_bits; + union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]}; - if (!is_low_address(ga) || !low_address_protection_enabled(vcpu)) + if (!ctlreg0.lap || !is_low_address(gra)) return 0; memset(pgm, 0, sizeof(*pgm)); tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; tec_bits->fsi = FSI_STORE; tec_bits->as = psw_bits(*psw).as; - tec_bits->addr = ga >> PAGE_SHIFT; + tec_bits->addr = gra >> PAGE_SHIFT; pgm->code = PGM_PROTECTION; return pgm->code; diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index 0149cf15058a..ef03726cc661 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h @@ -156,9 +156,11 @@ int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data, } int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, - unsigned long *gpa, int write); + ar_t ar, unsigned long *gpa, int write); +int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, + unsigned long length, int is_write); -int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, +int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, unsigned long len, int write); int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, @@ -168,6 +170,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, * write_guest - copy data from kernel space to guest space * @vcpu: virtual cpu * @ga: guest address + * @ar: access register * @data: source address in kernel space * @len: number of bytes to copy * @@ -176,8 +179,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, * If DAT is off data will be copied to guest real or absolute memory. * If DAT is on data will be copied to the address space as specified by * the address space bits of the PSW: - * Primary, secondory or home space (access register mode is currently not - * implemented). + * Primary, secondary, home space or access register mode. * The addressing mode of the PSW is also inspected, so that address wrap * around is taken into account for 24-, 31- and 64-bit addressing mode, * if the to be copied data crosses page boundaries in guest address space. @@ -210,16 +212,17 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, * if data has been changed in guest space in case of an exception. */ static inline __must_check -int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, +int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, unsigned long len) { - return access_guest(vcpu, ga, data, len, 1); + return access_guest(vcpu, ga, ar, data, len, 1); } /** * read_guest - copy data from guest space to kernel space * @vcpu: virtual cpu * @ga: guest address + * @ar: access register * @data: destination address in kernel space * @len: number of bytes to copy * @@ -229,10 +232,10 @@ int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, * data will be copied from guest space to kernel space. */ static inline __must_check -int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, +int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, unsigned long len) { - return access_guest(vcpu, ga, data, len, 0); + return access_guest(vcpu, ga, ar, data, len, 0); } /** @@ -330,6 +333,6 @@ int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data, void ipte_lock(struct kvm_vcpu *vcpu); void ipte_unlock(struct kvm_vcpu *vcpu); int ipte_lock_held(struct kvm_vcpu *vcpu); -int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga); +int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra); #endif /* __KVM_S390_GACCESS_H */ diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c index 3e8d4092ce30..e97b3455d7e6 100644 --- a/arch/s390/kvm/guestdbg.c +++ b/arch/s390/kvm/guestdbg.c @@ -191,8 +191,8 @@ static int __import_wp_info(struct kvm_vcpu *vcpu, if (!wp_info->old_data) return -ENOMEM; /* try to backup the original value */ - ret = read_guest(vcpu, wp_info->phys_addr, wp_info->old_data, - wp_info->len); + ret = read_guest_abs(vcpu, wp_info->phys_addr, wp_info->old_data, + wp_info->len); if (ret) { kfree(wp_info->old_data); wp_info->old_data = NULL; @@ -362,8 +362,8 @@ static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu) continue; /* refetch the wp data and compare it to the old value */ - if (!read_guest(vcpu, wp_info->phys_addr, temp, - wp_info->len)) { + if (!read_guest_abs(vcpu, wp_info->phys_addr, temp, + wp_info->len)) { if (memcmp(temp, wp_info->old_data, wp_info->len)) { kfree(temp); return wp_info; diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index bebd2157edd0..9e3779e3e496 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -165,6 +165,7 @@ static void __extract_prog_irq(struct kvm_vcpu *vcpu, pgm_info->mon_class_nr = vcpu->arch.sie_block->mcn; pgm_info->mon_code = vcpu->arch.sie_block->tecmc; break; + case PGM_VECTOR_PROCESSING: case PGM_DATA: pgm_info->data_exc_code = vcpu->arch.sie_block->dxc; break; @@ -319,7 +320,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu) /* Make sure that the source is paged-in */ rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2], - &srcaddr, 0); + reg2, &srcaddr, 0); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0); @@ -328,7 +329,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu) /* Make sure that the destination is paged-in */ rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1], - &dstaddr, 1); + reg1, &dstaddr, 1); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1); diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 073b5f387d1d..2afec6006def 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -1,7 +1,7 @@ /* * handling kvm guest interrupts * - * Copyright IBM Corp. 2008,2014 + * Copyright IBM Corp. 2008, 2015 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (version 2 only) @@ -18,6 +18,7 @@ #include <linux/slab.h> #include <linux/bitmap.h> #include <asm/asm-offsets.h> +#include <asm/dis.h> #include <asm/uaccess.h> #include <asm/sclp.h> #include "kvm-s390.h" @@ -265,8 +266,6 @@ static void __set_intercept_indicator(struct kvm_vcpu *vcpu, static u16 get_ilc(struct kvm_vcpu *vcpu) { - const unsigned short table[] = { 2, 4, 4, 6 }; - switch (vcpu->arch.sie_block->icptcode) { case ICPT_INST: case ICPT_INSTPROGI: @@ -274,7 +273,7 @@ static u16 get_ilc(struct kvm_vcpu *vcpu) case ICPT_PARTEXEC: case ICPT_IOINST: /* last instruction only stored for these icptcodes */ - return table[vcpu->arch.sie_block->ipa >> 14]; + return insn_length(vcpu->arch.sie_block->ipa >> 8); case ICPT_PROGI: return vcpu->arch.sie_block->pgmilc; default: @@ -352,6 +351,7 @@ static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_mchk_info mchk; + unsigned long adtl_status_addr; int rc; spin_lock(&li->lock); @@ -372,6 +372,9 @@ static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) mchk.cr14, mchk.mcic); rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED); + rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR, + &adtl_status_addr, sizeof(unsigned long)); + rc |= kvm_s390_vcpu_store_adtl_status(vcpu, adtl_status_addr); rc |= put_guest_lc(vcpu, mchk.mcic, (u64 __user *) __LC_MCCK_CODE); rc |= put_guest_lc(vcpu, mchk.failing_storage_address, @@ -484,7 +487,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_pgm_info pgm_info; - int rc = 0; + int rc = 0, nullifying = false; u16 ilc = get_ilc(vcpu); spin_lock(&li->lock); @@ -509,6 +512,8 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) case PGM_LX_TRANSLATION: case PGM_PRIMARY_AUTHORITY: case PGM_SECONDARY_AUTHORITY: + nullifying = true; + /* fall through */ case PGM_SPACE_SWITCH: rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, (u64 *)__LC_TRANS_EXC_CODE); @@ -521,6 +526,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) case PGM_EXTENDED_AUTHORITY: rc = put_guest_lc(vcpu, pgm_info.exc_access_id, (u8 *)__LC_EXC_ACCESS_ID); + nullifying = true; break; case PGM_ASCE_TYPE: case PGM_PAGE_TRANSLATION: @@ -534,6 +540,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) (u8 *)__LC_EXC_ACCESS_ID); rc |= put_guest_lc(vcpu, pgm_info.op_access_id, (u8 *)__LC_OP_ACCESS_ID); + nullifying = true; break; case PGM_MONITOR: rc = put_guest_lc(vcpu, pgm_info.mon_class_nr, @@ -541,6 +548,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) rc |= put_guest_lc(vcpu, pgm_info.mon_code, (u64 *)__LC_MON_CODE); break; + case PGM_VECTOR_PROCESSING: case PGM_DATA: rc = put_guest_lc(vcpu, pgm_info.data_exc_code, (u32 *)__LC_DATA_EXC_CODE); @@ -551,6 +559,15 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) rc |= put_guest_lc(vcpu, pgm_info.exc_access_id, (u8 *)__LC_EXC_ACCESS_ID); break; + case PGM_STACK_FULL: + case PGM_STACK_EMPTY: + case PGM_STACK_SPECIFICATION: + case PGM_STACK_TYPE: + case PGM_STACK_OPERATION: + case PGM_TRACE_TABEL: + case PGM_CRYPTO_OPERATION: + nullifying = true; + break; } if (pgm_info.code & PGM_PER) { @@ -564,6 +581,9 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) (u8 *) __LC_PER_ACCESS_ID); } + if (nullifying && vcpu->arch.sie_block->icptcode == ICPT_INST) + kvm_s390_rewind_psw(vcpu, ilc); + rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC); rc |= put_guest_lc(vcpu, pgm_info.code, (u16 *)__LC_PGM_INT_CODE); @@ -1332,10 +1352,10 @@ int kvm_s390_inject_vm(struct kvm *kvm, return rc; } -void kvm_s390_reinject_io_int(struct kvm *kvm, +int kvm_s390_reinject_io_int(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) { - __inject_vm(kvm, inti); + return __inject_vm(kvm, inti); } int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 0c3623927563..9072127bd51b 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -25,6 +25,7 @@ #include <linux/random.h> #include <linux/slab.h> #include <linux/timer.h> +#include <linux/vmalloc.h> #include <asm/asm-offsets.h> #include <asm/lowcore.h> #include <asm/pgtable.h> @@ -38,6 +39,8 @@ #include "trace.h" #include "trace-s390.h" +#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */ + #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU struct kvm_stats_debugfs_item debugfs_entries[] = { @@ -87,6 +90,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) }, { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) }, + { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) }, { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, @@ -173,8 +177,13 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_VM_ATTRIBUTES: case KVM_CAP_MP_STATE: case KVM_CAP_S390_USER_SIGP: + case KVM_CAP_S390_USER_STSI: + case KVM_CAP_S390_SKEYS: r = 1; break; + case KVM_CAP_S390_MEM_OP: + r = MEM_OP_MAX_SIZE; + break; case KVM_CAP_NR_VCPUS: case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; @@ -185,6 +194,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_S390_COW: r = MACHINE_HAS_ESOP; break; + case KVM_CAP_S390_VECTOR_REGISTERS: + r = MACHINE_HAS_VX; + break; default: r = 0; } @@ -265,6 +277,18 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) kvm->arch.user_sigp = 1; r = 0; break; + case KVM_CAP_S390_VECTOR_REGISTERS: + if (MACHINE_HAS_VX) { + set_kvm_facility(kvm->arch.model.fac->mask, 129); + set_kvm_facility(kvm->arch.model.fac->list, 129); + r = 0; + } else + r = -EINVAL; + break; + case KVM_CAP_S390_USER_STSI: + kvm->arch.user_stsi = 1; + r = 0; + break; default: r = -EINVAL; break; @@ -522,7 +546,7 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr) memcpy(&kvm->arch.model.cpu_id, &proc->cpuid, sizeof(struct cpuid)); kvm->arch.model.ibc = proc->ibc; - memcpy(kvm->arch.model.fac->kvm, proc->fac_list, + memcpy(kvm->arch.model.fac->list, proc->fac_list, S390_ARCH_FAC_LIST_SIZE_BYTE); } else ret = -EFAULT; @@ -556,7 +580,7 @@ static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr) } memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid)); proc->ibc = kvm->arch.model.ibc; - memcpy(&proc->fac_list, kvm->arch.model.fac->kvm, S390_ARCH_FAC_LIST_SIZE_BYTE); + memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE); if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) ret = -EFAULT; kfree(proc); @@ -576,10 +600,10 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr) } get_cpu_id((struct cpuid *) &mach->cpuid); mach->ibc = sclp_get_ibc(); - memcpy(&mach->fac_mask, kvm_s390_fac_list_mask, - kvm_s390_fac_list_mask_size() * sizeof(u64)); + memcpy(&mach->fac_mask, kvm->arch.model.fac->mask, + S390_ARCH_FAC_LIST_SIZE_BYTE); memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, - S390_ARCH_FAC_LIST_SIZE_U64); + S390_ARCH_FAC_LIST_SIZE_BYTE); if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) ret = -EFAULT; kfree(mach); @@ -709,6 +733,108 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) return ret; } +static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) +{ + uint8_t *keys; + uint64_t hva; + unsigned long curkey; + int i, r = 0; + + if (args->flags != 0) + return -EINVAL; + + /* Is this guest using storage keys? */ + if (!mm_use_skey(current->mm)) + return KVM_S390_GET_SKEYS_NONE; + + /* Enforce sane limit on memory allocation */ + if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) + return -EINVAL; + + keys = kmalloc_array(args->count, sizeof(uint8_t), + GFP_KERNEL | __GFP_NOWARN); + if (!keys) + keys = vmalloc(sizeof(uint8_t) * args->count); + if (!keys) + return -ENOMEM; + + for (i = 0; i < args->count; i++) { + hva = gfn_to_hva(kvm, args->start_gfn + i); + if (kvm_is_error_hva(hva)) { + r = -EFAULT; + goto out; + } + + curkey = get_guest_storage_key(current->mm, hva); + if (IS_ERR_VALUE(curkey)) { + r = curkey; + goto out; + } + keys[i] = curkey; + } + + r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys, + sizeof(uint8_t) * args->count); + if (r) + r = -EFAULT; +out: + kvfree(keys); + return r; +} + +static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) +{ + uint8_t *keys; + uint64_t hva; + int i, r = 0; + + if (args->flags != 0) + return -EINVAL; + + /* Enforce sane limit on memory allocation */ + if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) + return -EINVAL; + + keys = kmalloc_array(args->count, sizeof(uint8_t), + GFP_KERNEL | __GFP_NOWARN); + if (!keys) + keys = vmalloc(sizeof(uint8_t) * args->count); + if (!keys) + return -ENOMEM; + + r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr, + sizeof(uint8_t) * args->count); + if (r) { + r = -EFAULT; + goto out; + } + + /* Enable storage key handling for the guest */ + s390_enable_skey(); + + for (i = 0; i < args->count; i++) { + hva = gfn_to_hva(kvm, args->start_gfn + i); + if (kvm_is_error_hva(hva)) { + r = -EFAULT; + goto out; + } + + /* Lowest order bit is reserved */ + if (keys[i] & 0x01) { + r = -EINVAL; + goto out; + } + + r = set_guest_storage_key(current->mm, hva, + (unsigned long)keys[i], 0); + if (r) + goto out; + } +out: + kvfree(keys); + return r; +} + long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -768,6 +894,26 @@ long kvm_arch_vm_ioctl(struct file *filp, r = kvm_s390_vm_has_attr(kvm, &attr); break; } + case KVM_S390_GET_SKEYS: { + struct kvm_s390_skeys args; + + r = -EFAULT; + if (copy_from_user(&args, argp, + sizeof(struct kvm_s390_skeys))) + break; + r = kvm_s390_get_skeys(kvm, &args); + break; + } + case KVM_S390_SET_SKEYS: { + struct kvm_s390_skeys args; + + r = -EFAULT; + if (copy_from_user(&args, argp, + sizeof(struct kvm_s390_skeys))) + break; + r = kvm_s390_set_skeys(kvm, &args); + break; + } default: r = -ENOTTY; } @@ -778,15 +924,18 @@ long kvm_arch_vm_ioctl(struct file *filp, static int kvm_s390_query_ap_config(u8 *config) { u32 fcn_code = 0x04000000UL; - u32 cc; + u32 cc = 0; + memset(config, 0, 128); asm volatile( "lgr 0,%1\n" "lgr 2,%2\n" ".long 0xb2af0000\n" /* PQAP(QCI) */ - "ipm %0\n" + "0: ipm %0\n" "srl %0,28\n" - : "=r" (cc) + "1:\n" + EX_TABLE(0b, 1b) + : "+r" (cc) : "r" (fcn_code), "r" (config) : "cc", "0", "2", "memory" ); @@ -839,9 +988,13 @@ static int kvm_s390_crypto_init(struct kvm *kvm) kvm_s390_set_crycb_format(kvm); - /* Disable AES/DEA protected key functions by default */ - kvm->arch.crypto.aes_kw = 0; - kvm->arch.crypto.dea_kw = 0; + /* Enable AES/DEA protected key functions by default */ + kvm->arch.crypto.aes_kw = 1; + kvm->arch.crypto.dea_kw = 1; + get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, + sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); + get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, + sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); return 0; } @@ -881,50 +1034,39 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); if (!kvm->arch.dbf) - goto out_nodbf; + goto out_err; /* * The architectural maximum amount of facilities is 16 kbit. To store * this amount, 2 kbyte of memory is required. Thus we need a full - * page to hold the active copy (arch.model.fac->sie) and the current - * facilities set (arch.model.fac->kvm). Its address size has to be + * page to hold the guest facility list (arch.model.fac->list) and the + * facility mask (arch.model.fac->mask). Its address size has to be * 31 bits and word aligned. */ kvm->arch.model.fac = - (struct s390_model_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!kvm->arch.model.fac) - goto out_nofac; - - memcpy(kvm->arch.model.fac->kvm, S390_lowcore.stfle_fac_list, - S390_ARCH_FAC_LIST_SIZE_U64); - - /* - * If this KVM host runs *not* in a LPAR, relax the facility bits - * of the kvm facility mask by all missing facilities. This will allow - * to determine the right CPU model by means of the remaining facilities. - * Live guest migration must prohibit the migration of KVMs running in - * a LPAR to non LPAR hosts. - */ - if (!MACHINE_IS_LPAR) - for (i = 0; i < kvm_s390_fac_list_mask_size(); i++) - kvm_s390_fac_list_mask[i] &= kvm->arch.model.fac->kvm[i]; + goto out_err; - /* - * Apply the kvm facility mask to limit the kvm supported/tolerated - * facility list. - */ + /* Populate the facility mask initially. */ + memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list, + S390_ARCH_FAC_LIST_SIZE_BYTE); for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { if (i < kvm_s390_fac_list_mask_size()) - kvm->arch.model.fac->kvm[i] &= kvm_s390_fac_list_mask[i]; + kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i]; else - kvm->arch.model.fac->kvm[i] = 0UL; + kvm->arch.model.fac->mask[i] = 0UL; } + /* Populate the facility list initially. */ + memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask, + S390_ARCH_FAC_LIST_SIZE_BYTE); + kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id); kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; if (kvm_s390_crypto_init(kvm) < 0) - goto out_crypto; + goto out_err; spin_lock_init(&kvm->arch.float_int.lock); INIT_LIST_HEAD(&kvm->arch.float_int.list); @@ -939,7 +1081,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) } else { kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1); if (!kvm->arch.gmap) - goto out_nogmap; + goto out_err; kvm->arch.gmap->private = kvm; kvm->arch.gmap->pfault_enabled = 0; } @@ -951,15 +1093,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) spin_lock_init(&kvm->arch.start_stop_lock); return 0; -out_nogmap: +out_err: kfree(kvm->arch.crypto.crycb); -out_crypto: free_page((unsigned long)kvm->arch.model.fac); -out_nofac: debug_unregister(kvm->arch.dbf); -out_nodbf: free_page((unsigned long)(kvm->arch.sca)); -out_err: return rc; } @@ -1039,6 +1177,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) KVM_SYNC_CRS | KVM_SYNC_ARCH0 | KVM_SYNC_PFAULT; + if (test_kvm_facility(vcpu->kvm, 129)) + vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS; if (kvm_is_ucontrol(vcpu->kvm)) return __kvm_ucontrol_vcpu_init(vcpu); @@ -1049,10 +1189,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { save_fp_ctl(&vcpu->arch.host_fpregs.fpc); - save_fp_regs(vcpu->arch.host_fpregs.fprs); + if (test_kvm_facility(vcpu->kvm, 129)) + save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs); + else + save_fp_regs(vcpu->arch.host_fpregs.fprs); save_access_regs(vcpu->arch.host_acrs); - restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); - restore_fp_regs(vcpu->arch.guest_fpregs.fprs); + if (test_kvm_facility(vcpu->kvm, 129)) { + restore_fp_ctl(&vcpu->run->s.regs.fpc); + restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); + } else { + restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); + restore_fp_regs(vcpu->arch.guest_fpregs.fprs); + } restore_access_regs(vcpu->run->s.regs.acrs); gmap_enable(vcpu->arch.gmap); atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); @@ -1062,11 +1210,19 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); gmap_disable(vcpu->arch.gmap); - save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); - save_fp_regs(vcpu->arch.guest_fpregs.fprs); + if (test_kvm_facility(vcpu->kvm, 129)) { + save_fp_ctl(&vcpu->run->s.regs.fpc); + save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); + } else { + save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); + save_fp_regs(vcpu->arch.guest_fpregs.fprs); + } save_access_regs(vcpu->run->s.regs.acrs); restore_fp_ctl(&vcpu->arch.host_fpregs.fpc); - restore_fp_regs(vcpu->arch.host_fpregs.fprs); + if (test_kvm_facility(vcpu->kvm, 129)) + restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs); + else + restore_fp_regs(vcpu->arch.host_fpregs.fprs); restore_access_regs(vcpu->arch.host_acrs); } @@ -1134,6 +1290,15 @@ int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu) return 0; } +static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu) +{ + struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; + + vcpu->arch.cpu_id = model->cpu_id; + vcpu->arch.sie_block->ibc = model->ibc; + vcpu->arch.sie_block->fac = (int) (long) model->fac->list; +} + int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { int rc = 0; @@ -1142,6 +1307,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) CPUSTAT_SM | CPUSTAT_STOPPED | CPUSTAT_GED); + kvm_s390_vcpu_setup_model(vcpu); + vcpu->arch.sie_block->ecb = 6; if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73)) vcpu->arch.sie_block->ecb |= 0x10; @@ -1152,8 +1319,11 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->eca |= 1; if (sclp_has_sigpif()) vcpu->arch.sie_block->eca |= 0x10000000U; - vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE | - ICTL_TPROT; + if (test_kvm_facility(vcpu->kvm, 129)) { + vcpu->arch.sie_block->eca |= 0x00020000; + vcpu->arch.sie_block->ecd |= 0x20000000; + } + vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; if (kvm_s390_cmma_enabled(vcpu->kvm)) { rc = kvm_s390_vcpu_setup_cmma(vcpu); @@ -1163,13 +1333,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; - mutex_lock(&vcpu->kvm->lock); - vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id; - memcpy(vcpu->kvm->arch.model.fac->sie, vcpu->kvm->arch.model.fac->kvm, - S390_ARCH_FAC_LIST_SIZE_BYTE); - vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc; - mutex_unlock(&vcpu->kvm->lock); - kvm_s390_vcpu_crypto_setup(vcpu); return rc; @@ -1197,6 +1360,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, vcpu->arch.sie_block = &sie_page->sie_block; vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; + vcpu->arch.host_vregs = &sie_page->vregs; vcpu->arch.sie_block->icpua = id; if (!kvm_is_ucontrol(kvm)) { @@ -1212,7 +1376,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); } - vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->sie; spin_lock_init(&vcpu->arch.local_int.lock); vcpu->arch.local_int.float_int = &kvm->arch.float_int; @@ -1732,6 +1895,31 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu) return 0; } +static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu) +{ + psw_t *psw = &vcpu->arch.sie_block->gpsw; + u8 opcode; + int rc; + + VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); + trace_kvm_s390_sie_fault(vcpu); + + /* + * We want to inject an addressing exception, which is defined as a + * suppressing or terminating exception. However, since we came here + * by a DAT access exception, the PSW still points to the faulting + * instruction since DAT exceptions are nullifying. So we've got + * to look up the current opcode to get the length of the instruction + * to be able to forward the PSW. + */ + rc = read_guest(vcpu, psw->addr, 0, &opcode, 1); + if (rc) + return kvm_s390_inject_prog_cond(vcpu, rc); + psw->addr = __rewind_psw(*psw, -insn_length(opcode)); + + return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); +} + static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) { int rc = -1; @@ -1763,11 +1951,8 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) } } - if (rc == -1) { - VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); - trace_kvm_s390_sie_fault(vcpu); - rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); - } + if (rc == -1) + rc = vcpu_post_run_fault_in_sie(vcpu); memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); @@ -1983,6 +2168,35 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) return kvm_s390_store_status_unloaded(vcpu, addr); } +/* + * store additional status at address + */ +int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu, + unsigned long gpa) +{ + /* Only bits 0-53 are used for address formation */ + if (!(gpa & ~0x3ff)) + return 0; + + return write_guest_abs(vcpu, gpa & ~0x3ff, + (void *)&vcpu->run->s.regs.vrs, 512); +} + +int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr) +{ + if (!test_kvm_facility(vcpu->kvm, 129)) + return 0; + + /* + * The guest VXRS are in the host VXRs due to the lazy + * copying in vcpu load/put. Let's update our copies before we save + * it into the save area. + */ + save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); + + return kvm_s390_store_adtl_status_unloaded(vcpu, addr); +} + static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) { kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); @@ -2107,6 +2321,65 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, return r; } +static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu, + struct kvm_s390_mem_op *mop) +{ + void __user *uaddr = (void __user *)mop->buf; + void *tmpbuf = NULL; + int r, srcu_idx; + const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION + | KVM_S390_MEMOP_F_CHECK_ONLY; + + if (mop->flags & ~supported_flags) + return -EINVAL; + + if (mop->size > MEM_OP_MAX_SIZE) + return -E2BIG; + + if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) { + tmpbuf = vmalloc(mop->size); + if (!tmpbuf) + return -ENOMEM; + } + + srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); + + switch (mop->op) { + case KVM_S390_MEMOP_LOGICAL_READ: + if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { + r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false); + break; + } + r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); + if (r == 0) { + if (copy_to_user(uaddr, tmpbuf, mop->size)) + r = -EFAULT; + } + break; + case KVM_S390_MEMOP_LOGICAL_WRITE: + if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { + r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true); + break; + } + if (copy_from_user(tmpbuf, uaddr, mop->size)) { + r = -EFAULT; + break; + } + r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); + break; + default: + r = -EINVAL; + } + + srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); + + if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0) + kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); + + vfree(tmpbuf); + return r; +} + long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -2206,6 +2479,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); break; } + case KVM_S390_MEM_OP: { + struct kvm_s390_mem_op mem_op; + + if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0) + r = kvm_s390_guest_mem_op(vcpu, &mem_op); + else + r = -EFAULT; + break; + } default: r = -ENOTTY; } diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 985c2114d7ef..c5aefef158e5 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -70,16 +70,22 @@ static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix) kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); } -static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu) +typedef u8 __bitwise ar_t; + +static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, ar_t *ar) { u32 base2 = vcpu->arch.sie_block->ipb >> 28; u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); + if (ar) + *ar = base2; + return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; } static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu, - u64 *address1, u64 *address2) + u64 *address1, u64 *address2, + ar_t *ar_b1, ar_t *ar_b2) { u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16; @@ -88,6 +94,11 @@ static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu, *address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1; *address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; + + if (ar_b1) + *ar_b1 = base1; + if (ar_b2) + *ar_b2 = base2; } static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2) @@ -98,7 +109,7 @@ static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2 *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16; } -static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu) +static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, ar_t *ar) { u32 base2 = vcpu->arch.sie_block->ipb >> 28; u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + @@ -107,14 +118,20 @@ static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu) if (disp2 & 0x80000) disp2+=0xfff00000; + if (ar) + *ar = base2; + return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2; } -static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu) +static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, ar_t *ar) { u32 base2 = vcpu->arch.sie_block->ipb >> 28; u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); + if (ar) + *ar = base2; + return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; } @@ -125,10 +142,22 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc) vcpu->arch.sie_block->gpsw.mask |= cc << 44; } -/* test availability of facility in a kvm intance */ +/* test availability of facility in a kvm instance */ static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr) { - return __test_facility(nr, kvm->arch.model.fac->kvm); + return __test_facility(nr, kvm->arch.model.fac->mask) && + __test_facility(nr, kvm->arch.model.fac->list); +} + +static inline int set_kvm_facility(u64 *fac_list, unsigned long nr) +{ + unsigned char *ptr; + + if (nr >= MAX_FACILITY_BIT) + return -EINVAL; + ptr = (unsigned char *) fac_list + (nr >> 3); + *ptr |= (0x80UL >> (nr & 7)); + return 0; } /* are cpu states controlled by user space */ @@ -150,8 +179,8 @@ int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, u64 cr6, u64 schid); -void kvm_s390_reinject_io_int(struct kvm *kvm, - struct kvm_s390_interrupt_info *inti); +int kvm_s390_reinject_io_int(struct kvm *kvm, + struct kvm_s390_interrupt_info *inti); int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked); /* implemented in intercept.c */ @@ -176,7 +205,10 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); /* implemented in kvm-s390.c */ long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); +int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu, + unsigned long addr); int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); +int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr); void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu); void s390_vcpu_block(struct kvm_vcpu *vcpu); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index bdd9b5b17e03..5e4658d20c77 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -36,15 +36,16 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) struct kvm_vcpu *cpup; s64 hostclk, val; int i, rc; + ar_t ar; u64 op2; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - op2 = kvm_s390_get_base_disp_s(vcpu); + op2 = kvm_s390_get_base_disp_s(vcpu, &ar); if (op2 & 7) /* Operand must be on a doubleword boundary */ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - rc = read_guest(vcpu, op2, &val, sizeof(val)); + rc = read_guest(vcpu, op2, ar, &val, sizeof(val)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); @@ -68,20 +69,21 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu) u64 operand2; u32 address; int rc; + ar_t ar; vcpu->stat.instruction_spx++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - operand2 = kvm_s390_get_base_disp_s(vcpu); + operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); /* must be word boundary */ if (operand2 & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); /* get the value */ - rc = read_guest(vcpu, operand2, &address, sizeof(address)); + rc = read_guest(vcpu, operand2, ar, &address, sizeof(address)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); @@ -107,13 +109,14 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu) u64 operand2; u32 address; int rc; + ar_t ar; vcpu->stat.instruction_stpx++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - operand2 = kvm_s390_get_base_disp_s(vcpu); + operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); /* must be word boundary */ if (operand2 & 3) @@ -122,7 +125,7 @@ static int handle_store_prefix(struct kvm_vcpu *vcpu) address = kvm_s390_get_prefix(vcpu); /* get the value */ - rc = write_guest(vcpu, operand2, &address, sizeof(address)); + rc = write_guest(vcpu, operand2, ar, &address, sizeof(address)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); @@ -136,18 +139,19 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu) u16 vcpu_id = vcpu->vcpu_id; u64 ga; int rc; + ar_t ar; vcpu->stat.instruction_stap++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - ga = kvm_s390_get_base_disp_s(vcpu); + ga = kvm_s390_get_base_disp_s(vcpu, &ar); if (ga & 1) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - rc = write_guest(vcpu, ga, &vcpu_id, sizeof(vcpu_id)); + rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); @@ -207,7 +211,7 @@ static int handle_test_block(struct kvm_vcpu *vcpu) kvm_s390_get_regs_rre(vcpu, NULL, ®2); addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; addr = kvm_s390_logical_to_effective(vcpu, addr); - if (kvm_s390_check_low_addr_protection(vcpu, addr)) + if (kvm_s390_check_low_addr_prot_real(vcpu, addr)) return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); addr = kvm_s390_real_to_abs(vcpu, addr); @@ -229,18 +233,20 @@ static int handle_tpi(struct kvm_vcpu *vcpu) struct kvm_s390_interrupt_info *inti; unsigned long len; u32 tpi_data[3]; - int cc, rc; + int rc; u64 addr; + ar_t ar; - rc = 0; - addr = kvm_s390_get_base_disp_s(vcpu); + addr = kvm_s390_get_base_disp_s(vcpu, &ar); if (addr & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - cc = 0; + inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); - if (!inti) - goto no_interrupt; - cc = 1; + if (!inti) { + kvm_s390_set_psw_cc(vcpu, 0); + return 0; + } + tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr; tpi_data[1] = inti->io.io_int_parm; tpi_data[2] = inti->io.io_int_word; @@ -250,31 +256,39 @@ static int handle_tpi(struct kvm_vcpu *vcpu) * provided area. */ len = sizeof(tpi_data) - 4; - rc = write_guest(vcpu, addr, &tpi_data, len); - if (rc) - return kvm_s390_inject_prog_cond(vcpu, rc); + rc = write_guest(vcpu, addr, ar, &tpi_data, len); + if (rc) { + rc = kvm_s390_inject_prog_cond(vcpu, rc); + goto reinject_interrupt; + } } else { /* * Store the three-word I/O interruption code into * the appropriate lowcore area. */ len = sizeof(tpi_data); - if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) + if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) { + /* failed writes to the low core are not recoverable */ rc = -EFAULT; + goto reinject_interrupt; + } } + + /* irq was successfully handed to the guest */ + kfree(inti); + kvm_s390_set_psw_cc(vcpu, 1); + return 0; +reinject_interrupt: /* * If we encounter a problem storing the interruption code, the * instruction is suppressed from the guest's view: reinject the * interrupt. */ - if (!rc) + if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) { kfree(inti); - else - kvm_s390_reinject_io_int(vcpu->kvm, inti); -no_interrupt: - /* Set condition code and we're done. */ - if (!rc) - kvm_s390_set_psw_cc(vcpu, cc); + rc = -EFAULT; + } + /* don't set the cc, a pgm irq was injected or we drop to user space */ return rc ? -EFAULT : 0; } @@ -348,7 +362,7 @@ static int handle_stfl(struct kvm_vcpu *vcpu) * We need to shift the lower 32 facility bits (bit 0-31) from a u64 * into a u32 memory representation. They will remain bits 0-31. */ - fac = *vcpu->kvm->arch.model.fac->sie >> 32; + fac = *vcpu->kvm->arch.model.fac->list >> 32; rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list), &fac, sizeof(fac)); if (rc) @@ -386,15 +400,16 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) psw_compat_t new_psw; u64 addr; int rc; + ar_t ar; if (gpsw->mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - addr = kvm_s390_get_base_disp_s(vcpu); + addr = kvm_s390_get_base_disp_s(vcpu, &ar); if (addr & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw)); + rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); if (!(new_psw.mask & PSW32_MASK_BASE)) @@ -412,14 +427,15 @@ static int handle_lpswe(struct kvm_vcpu *vcpu) psw_t new_psw; u64 addr; int rc; + ar_t ar; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - addr = kvm_s390_get_base_disp_s(vcpu); + addr = kvm_s390_get_base_disp_s(vcpu, &ar); if (addr & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw)); + rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); vcpu->arch.sie_block->gpsw = new_psw; @@ -433,18 +449,19 @@ static int handle_stidp(struct kvm_vcpu *vcpu) u64 stidp_data = vcpu->arch.stidp_data; u64 operand2; int rc; + ar_t ar; vcpu->stat.instruction_stidp++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - operand2 = kvm_s390_get_base_disp_s(vcpu); + operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); if (operand2 & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); - rc = write_guest(vcpu, operand2, &stidp_data, sizeof(stidp_data)); + rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); @@ -467,6 +484,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) for (n = mem->count - 1; n > 0 ; n--) memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); + memset(&mem->vm[0], 0, sizeof(mem->vm[0])); mem->vm[0].cpus_total = cpus; mem->vm[0].cpus_configured = cpus; mem->vm[0].cpus_standby = 0; @@ -478,6 +496,17 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) ASCEBC(mem->vm[0].cpi, 16); } +static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, ar_t ar, + u8 fc, u8 sel1, u16 sel2) +{ + vcpu->run->exit_reason = KVM_EXIT_S390_STSI; + vcpu->run->s390_stsi.addr = addr; + vcpu->run->s390_stsi.ar = ar; + vcpu->run->s390_stsi.fc = fc; + vcpu->run->s390_stsi.sel1 = sel1; + vcpu->run->s390_stsi.sel2 = sel2; +} + static int handle_stsi(struct kvm_vcpu *vcpu) { int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; @@ -486,6 +515,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu) unsigned long mem = 0; u64 operand2; int rc = 0; + ar_t ar; vcpu->stat.instruction_stsi++; VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); @@ -508,7 +538,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu) return 0; } - operand2 = kvm_s390_get_base_disp_s(vcpu); + operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); if (operand2 & 0xfff) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); @@ -532,16 +562,20 @@ static int handle_stsi(struct kvm_vcpu *vcpu) break; } - rc = write_guest(vcpu, operand2, (void *)mem, PAGE_SIZE); + rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE); if (rc) { rc = kvm_s390_inject_prog_cond(vcpu, rc); goto out; } + if (vcpu->kvm->arch.user_stsi) { + insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2); + rc = -EREMOTE; + } trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); free_page(mem); kvm_s390_set_psw_cc(vcpu, 0); vcpu->run->s.regs.gprs[0] = 0; - return 0; + return rc; out_no_data: kvm_s390_set_psw_cc(vcpu, 3); out: @@ -670,7 +704,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) } if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { - if (kvm_s390_check_low_addr_protection(vcpu, start)) + if (kvm_s390_check_low_addr_prot_real(vcpu, start)) return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); } @@ -776,13 +810,14 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) int reg, rc, nr_regs; u32 ctl_array[16]; u64 ga; + ar_t ar; vcpu->stat.instruction_lctl++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - ga = kvm_s390_get_base_disp_rs(vcpu); + ga = kvm_s390_get_base_disp_rs(vcpu, &ar); if (ga & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); @@ -791,7 +826,7 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); nr_regs = ((reg3 - reg1) & 0xf) + 1; - rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32)); + rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); reg = reg1; @@ -814,13 +849,14 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) int reg, rc, nr_regs; u32 ctl_array[16]; u64 ga; + ar_t ar; vcpu->stat.instruction_stctl++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - ga = kvm_s390_get_base_disp_rs(vcpu); + ga = kvm_s390_get_base_disp_rs(vcpu, &ar); if (ga & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); @@ -836,7 +872,7 @@ int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu) break; reg = (reg + 1) % 16; } while (1); - rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32)); + rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; } @@ -847,13 +883,14 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) int reg, rc, nr_regs; u64 ctl_array[16]; u64 ga; + ar_t ar; vcpu->stat.instruction_lctlg++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - ga = kvm_s390_get_base_disp_rsy(vcpu); + ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); if (ga & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); @@ -862,7 +899,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); nr_regs = ((reg3 - reg1) & 0xf) + 1; - rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64)); + rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); reg = reg1; @@ -884,13 +921,14 @@ static int handle_stctg(struct kvm_vcpu *vcpu) int reg, rc, nr_regs; u64 ctl_array[16]; u64 ga; + ar_t ar; vcpu->stat.instruction_stctg++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - ga = kvm_s390_get_base_disp_rsy(vcpu); + ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); if (ga & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); @@ -906,7 +944,7 @@ static int handle_stctg(struct kvm_vcpu *vcpu) break; reg = (reg + 1) % 16; } while (1); - rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64)); + rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; } @@ -931,13 +969,14 @@ static int handle_tprot(struct kvm_vcpu *vcpu) unsigned long hva, gpa; int ret = 0, cc = 0; bool writable; + ar_t ar; vcpu->stat.instruction_tprot++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - kvm_s390_get_base_disp_sse(vcpu, &address1, &address2); + kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL); /* we only handle the Linux memory detection case: * access key == 0 @@ -946,11 +985,11 @@ static int handle_tprot(struct kvm_vcpu *vcpu) return -EOPNOTSUPP; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) ipte_lock(vcpu); - ret = guest_translate_address(vcpu, address1, &gpa, 1); + ret = guest_translate_address(vcpu, address1, ar, &gpa, 1); if (ret == PGM_PROTECTION) { /* Write protected? Try again with read-only... */ cc = 1; - ret = guest_translate_address(vcpu, address1, &gpa, 0); + ret = guest_translate_address(vcpu, address1, ar, &gpa, 0); } if (ret) { if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) { diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 23b1e86b2122..72e58bd2bee7 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -393,6 +393,9 @@ static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code) case SIGP_STORE_STATUS_AT_ADDRESS: vcpu->stat.instruction_sigp_store_status++; break; + case SIGP_STORE_ADDITIONAL_STATUS: + vcpu->stat.instruction_sigp_store_adtl_status++; + break; case SIGP_SET_PREFIX: vcpu->stat.instruction_sigp_prefix++; break; @@ -431,7 +434,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); - order_code = kvm_s390_get_base_disp_rs(vcpu); + order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); if (handle_sigp_order_in_user_space(vcpu, order_code)) return -EOPNOTSUPP; @@ -473,7 +476,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu) int r3 = vcpu->arch.sie_block->ipa & 0x000f; u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; struct kvm_vcpu *dest_vcpu; - u8 order_code = kvm_s390_get_base_disp_rs(vcpu); + u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 753a56731951..f0b85443e060 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -287,7 +287,7 @@ void __iomem *pci_iomap_range(struct pci_dev *pdev, addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); return (void __iomem *) addr + offset; } -EXPORT_SYMBOL_GPL(pci_iomap_range); +EXPORT_SYMBOL(pci_iomap_range); void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) { @@ -309,7 +309,7 @@ void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) } spin_unlock(&zpci_iomap_lock); } -EXPORT_SYMBOL_GPL(pci_iounmap); +EXPORT_SYMBOL(pci_iounmap); static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) @@ -483,9 +483,8 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev) airq_iv_free_bit(zpci_aisb_iv, zdev->aisb); } -static void zpci_map_resources(struct zpci_dev *zdev) +static void zpci_map_resources(struct pci_dev *pdev) { - struct pci_dev *pdev = zdev->pdev; resource_size_t len; int i; @@ -499,9 +498,8 @@ static void zpci_map_resources(struct zpci_dev *zdev) } } -static void zpci_unmap_resources(struct zpci_dev *zdev) +static void zpci_unmap_resources(struct pci_dev *pdev) { - struct pci_dev *pdev = zdev->pdev; resource_size_t len; int i; @@ -651,7 +649,7 @@ int pcibios_add_device(struct pci_dev *pdev) zdev->pdev = pdev; pdev->dev.groups = zpci_attr_groups; - zpci_map_resources(zdev); + zpci_map_resources(pdev); for (i = 0; i < PCI_BAR_COUNT; i++) { res = &pdev->resource[i]; @@ -663,6 +661,11 @@ int pcibios_add_device(struct pci_dev *pdev) return 0; } +void pcibios_release_device(struct pci_dev *pdev) +{ + zpci_unmap_resources(pdev); +} + int pcibios_enable_device(struct pci_dev *pdev, int mask) { struct zpci_dev *zdev = get_zdev(pdev); @@ -670,7 +673,6 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask) zdev->pdev = pdev; zpci_debug_init_device(zdev); zpci_fmb_enable_device(zdev); - zpci_map_resources(zdev); return pci_enable_resources(pdev, mask); } @@ -679,7 +681,6 @@ void pcibios_disable_device(struct pci_dev *pdev) { struct zpci_dev *zdev = get_zdev(pdev); - zpci_unmap_resources(zdev); zpci_fmb_disable_device(zdev); zpci_debug_exit_device(zdev); zdev->pdev = NULL; @@ -688,7 +689,8 @@ void pcibios_disable_device(struct pci_dev *pdev) #ifdef CONFIG_HIBERNATE_CALLBACKS static int zpci_restore(struct device *dev) { - struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); + struct pci_dev *pdev = to_pci_dev(dev); + struct zpci_dev *zdev = get_zdev(pdev); int ret = 0; if (zdev->state != ZPCI_FN_STATE_ONLINE) @@ -698,7 +700,7 @@ static int zpci_restore(struct device *dev) if (ret) goto out; - zpci_map_resources(zdev); + zpci_map_resources(pdev); zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET, zdev->start_dma + zdev->iommu_size - 1, (u64) zdev->dma_table); @@ -709,12 +711,14 @@ out: static int zpci_freeze(struct device *dev) { - struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); + struct pci_dev *pdev = to_pci_dev(dev); + struct zpci_dev *zdev = get_zdev(pdev); if (zdev->state != ZPCI_FN_STATE_ONLINE) return 0; zpci_unregister_ioat(zdev, 0); + zpci_unmap_resources(pdev); return clp_disable_fh(zdev); } diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c index 8aa271b3d1ad..b1bb2b72302c 100644 --- a/arch/s390/pci/pci_mmio.c +++ b/arch/s390/pci/pci_mmio.c @@ -64,8 +64,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr, if (copy_from_user(buf, user_buffer, length)) goto out; - memcpy_toio(io_addr, buf, length); - ret = 0; + ret = zpci_memcpy_toio(io_addr, buf, length); out: if (buf != local_buf) kfree(buf); @@ -98,16 +97,16 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr, goto out; io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); - ret = -EFAULT; - if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) + if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) { + ret = -EFAULT; goto out; - - memcpy_fromio(buf, io_addr, length); - - if (copy_to_user(user_buffer, buf, length)) + } + ret = zpci_memcpy_fromio(buf, io_addr, length); + if (ret) goto out; + if (copy_to_user(user_buffer, buf, length)) + ret = -EFAULT; - ret = 0; out: if (buf != local_buf) kfree(buf); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c2fb8a87dccb..b7d31ca55187 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -499,6 +499,7 @@ config X86_INTEL_QUARK depends on X86_IO_APIC select IOSF_MBI select INTEL_IMR + select COMMON_CLK ---help--- Select to include support for Quark X1000 SoC. Say Y here if you have a Quark based system such as the Arduino diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index a236e39cc385..30b28dc76411 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -81,11 +81,6 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); } -#define SELECTOR_TI_MASK (1 << 2) -#define SELECTOR_RPL_MASK 0x03 - -#define IOPL_SHIFT 12 - #define KVM_PERMILLE_MMU_PAGES 20 #define KVM_MIN_ALLOC_MMU_PAGES 64 #define KVM_MMU_HASH_SHIFT 10 @@ -933,6 +928,7 @@ struct x86_emulate_ctxt; int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port); void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); int kvm_emulate_halt(struct kvm_vcpu *vcpu); +int kvm_vcpu_halt(struct kvm_vcpu *vcpu); int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu); void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index e62cf897f781..c1adf33fdd0d 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h @@ -115,7 +115,7 @@ static inline void kvm_spinlock_init(void) static inline bool kvm_para_available(void) { - return 0; + return false; } static inline unsigned int kvm_arch_para_features(void) diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h index d6b078e9fa28..25b1cc07d496 100644 --- a/arch/x86/include/asm/pvclock.h +++ b/arch/x86/include/asm/pvclock.h @@ -95,6 +95,7 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, struct pvclock_vsyscall_time_info { struct pvclock_vcpu_time_info pvti; + u32 migrate_count; } __attribute__((__aligned__(SMP_CACHE_BYTES))); #define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info) diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h index 5fa9770035dc..c9a6d68b8d62 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h @@ -82,18 +82,15 @@ static inline int xsave_state_booting(struct xsave_struct *fx, u64 mask) if (boot_cpu_has(X86_FEATURE_XSAVES)) asm volatile("1:"XSAVES"\n\t" "2:\n\t" - : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) + xstate_fault + : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) : "memory"); else asm volatile("1:"XSAVE"\n\t" "2:\n\t" - : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) + xstate_fault + : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) : "memory"); - - asm volatile(xstate_fault - : "0" (0) - : "memory"); - return err; } @@ -112,18 +109,15 @@ static inline int xrstor_state_booting(struct xsave_struct *fx, u64 mask) if (boot_cpu_has(X86_FEATURE_XSAVES)) asm volatile("1:"XRSTORS"\n\t" "2:\n\t" - : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) + xstate_fault + : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) : "memory"); else asm volatile("1:"XRSTOR"\n\t" "2:\n\t" - : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) + xstate_fault + : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) : "memory"); - - asm volatile(xstate_fault - : "0" (0) - : "memory"); - return err; } @@ -149,9 +143,9 @@ static inline int xsave_state(struct xsave_struct *fx, u64 mask) */ alternative_input_2( "1:"XSAVE, - "1:"XSAVEOPT, + XSAVEOPT, X86_FEATURE_XSAVEOPT, - "1:"XSAVES, + XSAVES, X86_FEATURE_XSAVES, [fx] "D" (fx), "a" (lmask), "d" (hmask) : "memory"); @@ -178,7 +172,7 @@ static inline int xrstor_state(struct xsave_struct *fx, u64 mask) */ alternative_input( "1: " XRSTOR, - "1: " XRSTORS, + XRSTORS, X86_FEATURE_XSAVES, "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) : "memory"); diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h index c5f1a1deb91a..1fe92181ee9e 100644 --- a/arch/x86/include/uapi/asm/vmx.h +++ b/arch/x86/include/uapi/asm/vmx.h @@ -67,6 +67,7 @@ #define EXIT_REASON_EPT_VIOLATION 48 #define EXIT_REASON_EPT_MISCONFIG 49 #define EXIT_REASON_INVEPT 50 +#define EXIT_REASON_RDTSCP 51 #define EXIT_REASON_PREEMPTION_TIMER 52 #define EXIT_REASON_INVVPID 53 #define EXIT_REASON_WBINVD 54 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index b5c8ff5e9dfc..2346c95c6ab1 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1396,6 +1396,12 @@ void cpu_init(void) wait_for_master_cpu(cpu); + /* + * Initialize the CR4 shadow before doing anything that could + * try to read it. + */ + cr4_init_shadow(); + show_ucode_info_early(); printk(KERN_INFO "Initializing CPU#%d\n", cpu); diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 94d7dcb12145..50163fa9034f 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -565,8 +565,8 @@ static const struct _tlb_table intel_tlb_table[] = { { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" }, { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" }, { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" }, - { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set ssociative" }, - { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set ssociative" }, + { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set associative" }, + { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set associative" }, { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" }, { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" }, { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" }, diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 000d4199b03e..31e2d5bf3e38 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -982,6 +982,9 @@ ENTRY(xen_hypervisor_callback) ENTRY(xen_do_upcall) 1: mov %esp, %eax call xen_evtchn_do_upcall +#ifndef CONFIG_PREEMPT + call xen_maybe_preempt_hcall +#endif jmp ret_from_intr CFI_ENDPROC ENDPROC(xen_hypervisor_callback) diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index db13655c3a2a..1d74d161687c 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -269,11 +269,14 @@ ENTRY(ret_from_fork) testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? jz 1f - testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET - jnz int_ret_from_sys_call - - RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET - jmp ret_from_sys_call # go to the SYSRET fastpath + /* + * By the time we get here, we have no idea whether our pt_regs, + * ti flags, and ti status came from the 64-bit SYSCALL fast path, + * the slow path, or one of the ia32entry paths. + * Use int_ret_from_sys_call to return, since it can safely handle + * all of the above. + */ + jmp int_ret_from_sys_call 1: subq $REST_SKIP, %rsp # leave space for volatiles @@ -1208,6 +1211,9 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) popq %rsp CFI_DEF_CFA_REGISTER rsp decl PER_CPU_VAR(irq_count) +#ifndef CONFIG_PREEMPT + call xen_maybe_preempt_hcall +#endif jmp error_exit CFI_ENDPROC END(xen_do_hypervisor_callback) diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 6a1146ea4d4d..4e3d5a9621fe 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -223,27 +223,48 @@ static unsigned long __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) { struct kprobe *kp; + unsigned long faddr; kp = get_kprobe((void *)addr); - /* There is no probe, return original address */ - if (!kp) + faddr = ftrace_location(addr); + /* + * Addresses inside the ftrace location are refused by + * arch_check_ftrace_location(). Something went terribly wrong + * if such an address is checked here. + */ + if (WARN_ON(faddr && faddr != addr)) + return 0UL; + /* + * Use the current code if it is not modified by Kprobe + * and it cannot be modified by ftrace. + */ + if (!kp && !faddr) return addr; /* - * Basically, kp->ainsn.insn has an original instruction. - * However, RIP-relative instruction can not do single-stepping - * at different place, __copy_instruction() tweaks the displacement of - * that instruction. In that case, we can't recover the instruction - * from the kp->ainsn.insn. + * Basically, kp->ainsn.insn has an original instruction. + * However, RIP-relative instruction can not do single-stepping + * at different place, __copy_instruction() tweaks the displacement of + * that instruction. In that case, we can't recover the instruction + * from the kp->ainsn.insn. * - * On the other hand, kp->opcode has a copy of the first byte of - * the probed instruction, which is overwritten by int3. And - * the instruction at kp->addr is not modified by kprobes except - * for the first byte, we can recover the original instruction - * from it and kp->opcode. + * On the other hand, in case on normal Kprobe, kp->opcode has a copy + * of the first byte of the probed instruction, which is overwritten + * by int3. And the instruction at kp->addr is not modified by kprobes + * except for the first byte, we can recover the original instruction + * from it and kp->opcode. + * + * In case of Kprobes using ftrace, we do not have a copy of + * the original instruction. In fact, the ftrace location might + * be modified at anytime and even could be in an inconsistent state. + * Fortunately, we know that the original code is the ideal 5-byte + * long NOP. */ - memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); - buf[0] = kp->opcode; + memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); + if (faddr) + memcpy(buf, ideal_nops[NOP_ATOMIC5], 5); + else + buf[0] = kp->opcode; return (unsigned long)buf; } @@ -251,6 +272,7 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) * Recover the probed instruction at addr for further analysis. * Caller must lock kprobes by kprobe_mutex, or disable preemption * for preventing to release referencing kprobes. + * Returns zero if the instruction can not get recovered. */ unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) { @@ -285,6 +307,8 @@ static int can_probe(unsigned long paddr) * normally used, we just go through if there is no kprobe. */ __addr = recover_probed_instruction(buf, addr); + if (!__addr) + return 0; kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE); insn_get_length(&insn); @@ -333,6 +357,8 @@ int __copy_instruction(u8 *dest, u8 *src) unsigned long recovered_insn = recover_probed_instruction(buf, (unsigned long)src); + if (!recovered_insn) + return 0; kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); insn_get_length(&insn); /* Another subsystem puts a breakpoint, failed to recover */ diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 0dd8d089c315..7b3b9d15c47a 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -259,6 +259,8 @@ static int can_optimize(unsigned long paddr) */ return 0; recovered_insn = recover_probed_instruction(buf, addr); + if (!recovered_insn) + return 0; kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); insn_get_length(&insn); /* Another subsystem puts a breakpoint */ diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c index 2f355d229a58..e5ecd20e72dd 100644 --- a/arch/x86/kernel/pvclock.c +++ b/arch/x86/kernel/pvclock.c @@ -141,7 +141,46 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock, set_normalized_timespec(ts, now.tv_sec, now.tv_nsec); } +static struct pvclock_vsyscall_time_info *pvclock_vdso_info; + +static struct pvclock_vsyscall_time_info * +pvclock_get_vsyscall_user_time_info(int cpu) +{ + if (!pvclock_vdso_info) { + BUG(); + return NULL; + } + + return &pvclock_vdso_info[cpu]; +} + +struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu) +{ + return &pvclock_get_vsyscall_user_time_info(cpu)->pvti; +} + #ifdef CONFIG_X86_64 +static int pvclock_task_migrate(struct notifier_block *nb, unsigned long l, + void *v) +{ + struct task_migration_notifier *mn = v; + struct pvclock_vsyscall_time_info *pvti; + + pvti = pvclock_get_vsyscall_user_time_info(mn->from_cpu); + + /* this is NULL when pvclock vsyscall is not initialized */ + if (unlikely(pvti == NULL)) + return NOTIFY_DONE; + + pvti->migrate_count++; + + return NOTIFY_DONE; +} + +static struct notifier_block pvclock_migrate = { + .notifier_call = pvclock_task_migrate, +}; + /* * Initialize the generic pvclock vsyscall state. This will allocate * a/some page(s) for the per-vcpu pvclock information, set up a @@ -155,12 +194,17 @@ int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i, WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE); + pvclock_vdso_info = i; + for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) { __set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx, __pa(i) + (idx*PAGE_SIZE), PAGE_KERNEL_VVAR); } + + register_task_migration_notifier(&pvclock_migrate); + return 0; } #endif diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index 4452eedfaedd..26228466f3f8 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -26,7 +26,7 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu) struct kvm_cpuid_entry2 *best; if (!static_cpu_has(X86_FEATURE_XSAVE)) - return 0; + return false; best = kvm_find_cpuid_entry(vcpu, 1, 0); return best && (best->ecx & bit(X86_FEATURE_XSAVE)); diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 106c01557f2b..b304728aabe3 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -248,27 +248,7 @@ struct mode_dual { struct opcode mode64; }; -/* EFLAGS bit definitions. */ -#define EFLG_ID (1<<21) -#define EFLG_VIP (1<<20) -#define EFLG_VIF (1<<19) -#define EFLG_AC (1<<18) -#define EFLG_VM (1<<17) -#define EFLG_RF (1<<16) -#define EFLG_IOPL (3<<12) -#define EFLG_NT (1<<14) -#define EFLG_OF (1<<11) -#define EFLG_DF (1<<10) -#define EFLG_IF (1<<9) -#define EFLG_TF (1<<8) -#define EFLG_SF (1<<7) -#define EFLG_ZF (1<<6) -#define EFLG_AF (1<<4) -#define EFLG_PF (1<<2) -#define EFLG_CF (1<<0) - #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a -#define EFLG_RESERVED_ONE_MASK 2 enum x86_transfer_type { X86_TRANSFER_NONE, @@ -317,7 +297,8 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt) * These EFLAGS bits are restored from saved value during emulation, and * any changes are written back to the saved value after emulation. */ -#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF) +#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\ + X86_EFLAGS_PF|X86_EFLAGS_CF) #ifdef CONFIG_X86_64 #define ON64(x) x @@ -478,6 +459,25 @@ static void assign_masked(ulong *dest, ulong src, ulong mask) *dest = (*dest & ~mask) | (src & mask); } +static void assign_register(unsigned long *reg, u64 val, int bytes) +{ + /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */ + switch (bytes) { + case 1: + *(u8 *)reg = (u8)val; + break; + case 2: + *(u16 *)reg = (u16)val; + break; + case 4: + *reg = (u32)val; + break; /* 64b: zero-extend */ + case 8: + *reg = val; + break; + } +} + static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt) { return (1UL << (ctxt->ad_bytes << 3)) - 1; @@ -943,6 +943,22 @@ FASTOP2(xadd); FASTOP2R(cmp, cmp_r); +static int em_bsf_c(struct x86_emulate_ctxt *ctxt) +{ + /* If src is zero, do not writeback, but update flags */ + if (ctxt->src.val == 0) + ctxt->dst.type = OP_NONE; + return fastop(ctxt, em_bsf); +} + +static int em_bsr_c(struct x86_emulate_ctxt *ctxt) +{ + /* If src is zero, do not writeback, but update flags */ + if (ctxt->src.val == 0) + ctxt->dst.type = OP_NONE; + return fastop(ctxt, em_bsr); +} + static u8 test_cc(unsigned int condition, unsigned long flags) { u8 rc; @@ -1399,7 +1415,7 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, unsigned int in_page, n; unsigned int count = ctxt->rep_prefix ? address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1; - in_page = (ctxt->eflags & EFLG_DF) ? + in_page = (ctxt->eflags & X86_EFLAGS_DF) ? offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) : PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)); n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count); @@ -1412,7 +1428,7 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, } if (ctxt->rep_prefix && (ctxt->d & String) && - !(ctxt->eflags & EFLG_DF)) { + !(ctxt->eflags & X86_EFLAGS_DF)) { ctxt->dst.data = rc->data + rc->pos; ctxt->dst.type = OP_MEM_STR; ctxt->dst.count = (rc->end - rc->pos) / size; @@ -1691,21 +1707,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, static void write_register_operand(struct operand *op) { - /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */ - switch (op->bytes) { - case 1: - *(u8 *)op->addr.reg = (u8)op->val; - break; - case 2: - *(u16 *)op->addr.reg = (u16)op->val; - break; - case 4: - *op->addr.reg = (u32)op->val; - break; /* 64b: zero-extend */ - case 8: - *op->addr.reg = op->val; - break; - } + return assign_register(op->addr.reg, op->val, op->bytes); } static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op) @@ -1792,32 +1794,34 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt, { int rc; unsigned long val, change_mask; - int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; + int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT; int cpl = ctxt->ops->cpl(ctxt); rc = emulate_pop(ctxt, &val, len); if (rc != X86EMUL_CONTINUE) return rc; - change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF - | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID; + change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | + X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF | + X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT | + X86_EFLAGS_AC | X86_EFLAGS_ID; switch(ctxt->mode) { case X86EMUL_MODE_PROT64: case X86EMUL_MODE_PROT32: case X86EMUL_MODE_PROT16: if (cpl == 0) - change_mask |= EFLG_IOPL; + change_mask |= X86_EFLAGS_IOPL; if (cpl <= iopl) - change_mask |= EFLG_IF; + change_mask |= X86_EFLAGS_IF; break; case X86EMUL_MODE_VM86: if (iopl < 3) return emulate_gp(ctxt, 0); - change_mask |= EFLG_IF; + change_mask |= X86_EFLAGS_IF; break; default: /* real mode */ - change_mask |= (EFLG_IOPL | EFLG_IF); + change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF); break; } @@ -1918,7 +1922,7 @@ static int em_pusha(struct x86_emulate_ctxt *ctxt) static int em_pushf(struct x86_emulate_ctxt *ctxt) { - ctxt->src.val = (unsigned long)ctxt->eflags & ~EFLG_VM; + ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM; return em_push(ctxt); } @@ -1926,6 +1930,7 @@ static int em_popa(struct x86_emulate_ctxt *ctxt) { int rc = X86EMUL_CONTINUE; int reg = VCPU_REGS_RDI; + u32 val; while (reg >= VCPU_REGS_RAX) { if (reg == VCPU_REGS_RSP) { @@ -1933,9 +1938,10 @@ static int em_popa(struct x86_emulate_ctxt *ctxt) --reg; } - rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes); + rc = emulate_pop(ctxt, &val, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) break; + assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes); --reg; } return rc; @@ -1956,7 +1962,7 @@ static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) if (rc != X86EMUL_CONTINUE) return rc; - ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC); + ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC); ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS); rc = em_push(ctxt); @@ -2022,10 +2028,14 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt) unsigned long temp_eip = 0; unsigned long temp_eflags = 0; unsigned long cs = 0; - unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF | - EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF | - EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */ - unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP; + unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | + X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF | + X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF | + X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF | + X86_EFLAGS_AC | X86_EFLAGS_ID | + X86_EFLAGS_FIXED_BIT; + unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF | + X86_EFLAGS_VIP; /* TODO: Add stack limit check */ @@ -2054,7 +2064,6 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt) ctxt->_eip = temp_eip; - if (ctxt->op_bytes == 4) ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask)); else if (ctxt->op_bytes == 2) { @@ -2063,7 +2072,7 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt) } ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */ - ctxt->eflags |= EFLG_RESERVED_ONE_MASK; + ctxt->eflags |= X86_EFLAGS_FIXED_BIT; ctxt->ops->set_nmi_mask(ctxt, false); return rc; @@ -2145,12 +2154,12 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt) ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) { *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0); *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32); - ctxt->eflags &= ~EFLG_ZF; + ctxt->eflags &= ~X86_EFLAGS_ZF; } else { ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) | (u32) reg_read(ctxt, VCPU_REGS_RBX); - ctxt->eflags |= EFLG_ZF; + ctxt->eflags |= X86_EFLAGS_ZF; } return X86EMUL_CONTINUE; } @@ -2222,7 +2231,7 @@ static int em_cmpxchg(struct x86_emulate_ctxt *ctxt) ctxt->src.val = ctxt->dst.orig_val; fastop(ctxt, em_cmp); - if (ctxt->eflags & EFLG_ZF) { + if (ctxt->eflags & X86_EFLAGS_ZF) { /* Success: write back to memory; no update of EAX */ ctxt->src.type = OP_NONE; ctxt->dst.val = ctxt->src.orig_val; @@ -2381,14 +2390,14 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt) ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data); ctxt->eflags &= ~msr_data; - ctxt->eflags |= EFLG_RESERVED_ONE_MASK; + ctxt->eflags |= X86_EFLAGS_FIXED_BIT; #endif } else { /* legacy mode */ ops->get_msr(ctxt, MSR_STAR, &msr_data); ctxt->_eip = (u32)msr_data; - ctxt->eflags &= ~(EFLG_VM | EFLG_IF); + ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); } return X86EMUL_CONTINUE; @@ -2425,8 +2434,8 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt) if ((msr_data & 0xfffc) == 0x0) return emulate_gp(ctxt, 0); - ctxt->eflags &= ~(EFLG_VM | EFLG_IF); - cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK; + ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); + cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK; ss_sel = cs_sel + 8; if (efer & EFER_LMA) { cs.d = 0; @@ -2493,8 +2502,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt) return emulate_gp(ctxt, 0); break; } - cs_sel |= SELECTOR_RPL_MASK; - ss_sel |= SELECTOR_RPL_MASK; + cs_sel |= SEGMENT_RPL_MASK; + ss_sel |= SEGMENT_RPL_MASK; ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); @@ -2512,7 +2521,7 @@ static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt) return false; if (ctxt->mode == X86EMUL_MODE_VM86) return true; - iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; + iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT; return ctxt->ops->cpl(ctxt) > iopl; } @@ -2782,10 +2791,8 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, return ret; ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, X86_TRANSFER_TASK_SWITCH, NULL); - if (ret != X86EMUL_CONTINUE) - return ret; - return X86EMUL_CONTINUE; + return ret; } static int task_switch_32(struct x86_emulate_ctxt *ctxt, @@ -2954,7 +2961,7 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt, static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg, struct operand *op) { - int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count; + int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count; register_address_increment(ctxt, reg, df * op->bytes); op->addr.mem.ea = register_address(ctxt, reg); @@ -3323,7 +3330,7 @@ static int em_clts(struct x86_emulate_ctxt *ctxt) return X86EMUL_CONTINUE; } -static int em_vmcall(struct x86_emulate_ctxt *ctxt) +static int em_hypercall(struct x86_emulate_ctxt *ctxt) { int rc = ctxt->ops->fix_hypercall(ctxt); @@ -3395,17 +3402,6 @@ static int em_lgdt(struct x86_emulate_ctxt *ctxt) return em_lgdt_lidt(ctxt, true); } -static int em_vmmcall(struct x86_emulate_ctxt *ctxt) -{ - int rc; - - rc = ctxt->ops->fix_hypercall(ctxt); - - /* Disable writeback. */ - ctxt->dst.type = OP_NONE; - return rc; -} - static int em_lidt(struct x86_emulate_ctxt *ctxt) { return em_lgdt_lidt(ctxt, false); @@ -3504,7 +3500,8 @@ static int em_sahf(struct x86_emulate_ctxt *ctxt) { u32 flags; - flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF; + flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | + X86_EFLAGS_SF; flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8; ctxt->eflags &= ~0xffUL; @@ -3769,7 +3766,7 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt) static const struct opcode group7_rm0[] = { N, - I(SrcNone | Priv | EmulateOnUD, em_vmcall), + I(SrcNone | Priv | EmulateOnUD, em_hypercall), N, N, N, N, N, N, }; @@ -3781,7 +3778,7 @@ static const struct opcode group7_rm1[] = { static const struct opcode group7_rm3[] = { DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa), - II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall), + II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall), DIP(SrcNone | Prot | Priv, vmload, check_svme_pa), DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa), DIP(SrcNone | Prot | Priv, stgi, check_svme), @@ -4192,7 +4189,8 @@ static const struct opcode twobyte_table[256] = { N, N, G(BitOp, group8), F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc), - F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr), + I(DstReg | SrcMem | ModRM, em_bsf_c), + I(DstReg | SrcMem | ModRM, em_bsr_c), D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), /* 0xC0 - 0xC7 */ F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd), @@ -4759,9 +4757,9 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt) if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) || (ctxt->b == 0xae) || (ctxt->b == 0xaf)) && (((ctxt->rep_prefix == REPE_PREFIX) && - ((ctxt->eflags & EFLG_ZF) == 0)) + ((ctxt->eflags & X86_EFLAGS_ZF) == 0)) || ((ctxt->rep_prefix == REPNE_PREFIX) && - ((ctxt->eflags & EFLG_ZF) == EFLG_ZF)))) + ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF)))) return true; return false; @@ -4913,7 +4911,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) /* All REP prefixes have the same first termination condition */ if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) { ctxt->eip = ctxt->_eip; - ctxt->eflags &= ~EFLG_RF; + ctxt->eflags &= ~X86_EFLAGS_RF; goto done; } } @@ -4963,9 +4961,9 @@ special_insn: } if (ctxt->rep_prefix && (ctxt->d & String)) - ctxt->eflags |= EFLG_RF; + ctxt->eflags |= X86_EFLAGS_RF; else - ctxt->eflags &= ~EFLG_RF; + ctxt->eflags &= ~X86_EFLAGS_RF; if (ctxt->execute) { if (ctxt->d & Fastop) { @@ -5014,7 +5012,7 @@ special_insn: rc = emulate_int(ctxt, ctxt->src.val); break; case 0xce: /* into */ - if (ctxt->eflags & EFLG_OF) + if (ctxt->eflags & X86_EFLAGS_OF) rc = emulate_int(ctxt, 4); break; case 0xe9: /* jmp rel */ @@ -5027,19 +5025,19 @@ special_insn: break; case 0xf5: /* cmc */ /* complement carry flag from eflags reg */ - ctxt->eflags ^= EFLG_CF; + ctxt->eflags ^= X86_EFLAGS_CF; break; case 0xf8: /* clc */ - ctxt->eflags &= ~EFLG_CF; + ctxt->eflags &= ~X86_EFLAGS_CF; break; case 0xf9: /* stc */ - ctxt->eflags |= EFLG_CF; + ctxt->eflags |= X86_EFLAGS_CF; break; case 0xfc: /* cld */ - ctxt->eflags &= ~EFLG_DF; + ctxt->eflags &= ~X86_EFLAGS_DF; break; case 0xfd: /* std */ - ctxt->eflags |= EFLG_DF; + ctxt->eflags |= X86_EFLAGS_DF; break; default: goto cannot_emulate; @@ -5100,7 +5098,7 @@ writeback: } goto done; /* skip rip writeback */ } - ctxt->eflags &= ~EFLG_RF; + ctxt->eflags &= ~X86_EFLAGS_RF; } ctxt->eip = ctxt->_eip; @@ -5137,8 +5135,7 @@ twobyte_insn: case 0x40 ... 0x4f: /* cmov */ if (test_cc(ctxt->b, ctxt->eflags)) ctxt->dst.val = ctxt->src.val; - else if (ctxt->mode != X86EMUL_MODE_PROT64 || - ctxt->op_bytes != 4) + else if (ctxt->op_bytes != 4) ctxt->dst.type = OP_NONE; /* no writeback */ break; case 0x80 ... 0x8f: /* jnz rel, etc*/ diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 8ff4eaa2bf19..fef922ff2635 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -507,6 +507,7 @@ static int picdev_read(struct kvm_pic *s, return -EOPNOTSUPP; if (len != 1) { + memset(val, 0, len); pr_pic_unimpl("non byte read\n"); return 0; } diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index 8bf2e49708e3..51889ec847b0 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c @@ -206,6 +206,8 @@ static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq, old_irr = ioapic->irr; ioapic->irr |= mask; + if (edge) + ioapic->irr_delivered &= ~mask; if ((edge && old_irr == ioapic->irr) || (!edge && entry.fields.remote_irr)) { ret = 0; @@ -349,7 +351,7 @@ static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status) irqe.shorthand = 0; if (irqe.trig_mode == IOAPIC_EDGE_TRIG) - ioapic->irr &= ~(1 << irq); + ioapic->irr_delivered |= 1 << irq; if (irq == RTC_GSI && line_status) { /* @@ -471,13 +473,6 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, } } -bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector) -{ - struct kvm_ioapic *ioapic = kvm->arch.vioapic; - smp_rmb(); - return test_bit(vector, ioapic->handled_vectors); -} - void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode) { struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; @@ -597,6 +592,7 @@ static void kvm_ioapic_reset(struct kvm_ioapic *ioapic) ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS; ioapic->ioregsel = 0; ioapic->irr = 0; + ioapic->irr_delivered = 0; ioapic->id = 0; memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS); rtc_irq_eoi_tracking_reset(ioapic); @@ -654,6 +650,7 @@ int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) spin_lock(&ioapic->lock); memcpy(state, ioapic, sizeof(struct kvm_ioapic_state)); + state->irr &= ~ioapic->irr_delivered; spin_unlock(&ioapic->lock); return 0; } @@ -667,6 +664,7 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) spin_lock(&ioapic->lock); memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); ioapic->irr = 0; + ioapic->irr_delivered = 0; update_handled_vectors(ioapic); kvm_vcpu_request_scan_ioapic(kvm); kvm_ioapic_inject_all(ioapic, state->irr); diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h index d9e02cab7b96..ca0b0b4e6256 100644 --- a/arch/x86/kvm/ioapic.h +++ b/arch/x86/kvm/ioapic.h @@ -77,6 +77,7 @@ struct kvm_ioapic { struct rtc_status rtc_status; struct delayed_work eoi_inject; u32 irq_eoi[IOAPIC_NUM_PINS]; + u32 irr_delivered; }; #ifdef DEBUG @@ -97,13 +98,19 @@ static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm) return kvm->arch.vioapic; } +static inline bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector) +{ + struct kvm_ioapic *ioapic = kvm->arch.vioapic; + smp_rmb(); + return test_bit(vector, ioapic->handled_vectors); +} + void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu); bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, int short_hand, unsigned int dest, int dest_mode); int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2); void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode); -bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector); int kvm_ioapic_init(struct kvm *kvm); void kvm_ioapic_destroy(struct kvm *kvm); int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index ba57bb79e795..44f7b9afbedb 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1572,7 +1572,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu) apic_set_reg(apic, APIC_TMR + 0x10 * i, 0); } apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm); - apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm); + apic->isr_count = kvm_x86_ops->hwapic_isr_update ? 1 : 0; apic->highest_isr_cache = -1; update_divide_count(apic); atomic_set(&apic->lapic_timer.pending, 0); @@ -1782,7 +1782,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu, update_divide_count(apic); start_apic_timer(apic); apic->irr_pending = true; - apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm) ? + apic->isr_count = kvm_x86_ops->hwapic_isr_update ? 1 : count_vectors(apic->regs + APIC_ISR); apic->highest_isr_cache = -1; if (kvm_x86_ops->hwapic_irr_update) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 8e6b7d869d2f..29fbf9dfdc54 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -38,7 +38,7 @@ static struct kvm_arch_event_perf_mapping { }; /* mapping between fixed pmc index and arch_events array */ -int fixed_pmc_events[] = {1, 0, 7}; +static int fixed_pmc_events[] = {1, 0, 7}; static bool pmc_is_gp(struct kvm_pmc *pmc) { diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index d319e0c24758..155534c0f5e8 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1929,14 +1929,12 @@ static int nop_on_interception(struct vcpu_svm *svm) static int halt_interception(struct vcpu_svm *svm) { svm->next_rip = kvm_rip_read(&svm->vcpu) + 1; - skip_emulated_instruction(&svm->vcpu); return kvm_emulate_halt(&svm->vcpu); } static int vmmcall_interception(struct vcpu_svm *svm) { svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; - skip_emulated_instruction(&svm->vcpu); kvm_emulate_hypercall(&svm->vcpu); return 1; } @@ -2757,11 +2755,11 @@ static int invlpga_interception(struct vcpu_svm *svm) { struct kvm_vcpu *vcpu = &svm->vcpu; - trace_kvm_invlpga(svm->vmcb->save.rip, vcpu->arch.regs[VCPU_REGS_RCX], - vcpu->arch.regs[VCPU_REGS_RAX]); + trace_kvm_invlpga(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RCX), + kvm_register_read(&svm->vcpu, VCPU_REGS_RAX)); /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */ - kvm_mmu_invlpg(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]); + kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX)); svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; skip_emulated_instruction(&svm->vcpu); @@ -2770,12 +2768,18 @@ static int invlpga_interception(struct vcpu_svm *svm) static int skinit_interception(struct vcpu_svm *svm) { - trace_kvm_skinit(svm->vmcb->save.rip, svm->vcpu.arch.regs[VCPU_REGS_RAX]); + trace_kvm_skinit(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX)); kvm_queue_exception(&svm->vcpu, UD_VECTOR); return 1; } +static int wbinvd_interception(struct vcpu_svm *svm) +{ + kvm_emulate_wbinvd(&svm->vcpu); + return 1; +} + static int xsetbv_interception(struct vcpu_svm *svm) { u64 new_bv = kvm_read_edx_eax(&svm->vcpu); @@ -2902,7 +2906,8 @@ static int rdpmc_interception(struct vcpu_svm *svm) return 1; } -bool check_selective_cr0_intercepted(struct vcpu_svm *svm, unsigned long val) +static bool check_selective_cr0_intercepted(struct vcpu_svm *svm, + unsigned long val) { unsigned long cr0 = svm->vcpu.arch.cr0; bool ret = false; @@ -2940,7 +2945,10 @@ static int cr_interception(struct vcpu_svm *svm) return emulate_on_interception(svm); reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; - cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0; + if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE) + cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0; + else + cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0; err = 0; if (cr >= 16) { /* mov to cr */ @@ -3133,7 +3141,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) static int rdmsr_interception(struct vcpu_svm *svm) { - u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; + u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX); u64 data; if (svm_get_msr(&svm->vcpu, ecx, &data)) { @@ -3142,8 +3150,8 @@ static int rdmsr_interception(struct vcpu_svm *svm) } else { trace_kvm_msr_read(ecx, data); - svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff; - svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32; + kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, data & 0xffffffff); + kvm_register_write(&svm->vcpu, VCPU_REGS_RDX, data >> 32); svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; skip_emulated_instruction(&svm->vcpu); } @@ -3246,9 +3254,8 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) static int wrmsr_interception(struct vcpu_svm *svm) { struct msr_data msr; - u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; - u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u) - | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32); + u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX); + u64 data = kvm_read_edx_eax(&svm->vcpu); msr.data = data; msr.index = ecx; @@ -3325,7 +3332,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = { [SVM_EXIT_READ_CR3] = cr_interception, [SVM_EXIT_READ_CR4] = cr_interception, [SVM_EXIT_READ_CR8] = cr_interception, - [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, + [SVM_EXIT_CR0_SEL_WRITE] = cr_interception, [SVM_EXIT_WRITE_CR0] = cr_interception, [SVM_EXIT_WRITE_CR3] = cr_interception, [SVM_EXIT_WRITE_CR4] = cr_interception, @@ -3376,7 +3383,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = { [SVM_EXIT_STGI] = stgi_interception, [SVM_EXIT_CLGI] = clgi_interception, [SVM_EXIT_SKINIT] = skinit_interception, - [SVM_EXIT_WBINVD] = emulate_on_interception, + [SVM_EXIT_WBINVD] = wbinvd_interception, [SVM_EXIT_MONITOR] = monitor_interception, [SVM_EXIT_MWAIT] = mwait_interception, [SVM_EXIT_XSETBV] = xsetbv_interception, @@ -3555,7 +3562,7 @@ static int handle_exit(struct kvm_vcpu *vcpu) if (exit_code >= ARRAY_SIZE(svm_exit_handlers) || !svm_exit_handlers[exit_code]) { - WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_code); + WARN_ONCE(1, "svm: unexpected exit reason 0x%x\n", exit_code); kvm_queue_exception(vcpu, UD_VECTOR); return 1; } @@ -3649,11 +3656,6 @@ static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) return; } -static void svm_hwapic_isr_update(struct kvm *kvm, int isr) -{ - return; -} - static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu) { return; @@ -4403,7 +4405,6 @@ static struct kvm_x86_ops svm_x86_ops = { .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode, .vm_has_apicv = svm_vm_has_apicv, .load_eoi_exitmap = svm_load_eoi_exitmap, - .hwapic_isr_update = svm_hwapic_isr_update, .sync_pir_to_irr = svm_sync_pir_to_irr, .set_tss_addr = svm_set_tss_addr, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 317da9bde728..b5a6425d8d97 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2467,6 +2467,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) vmx->nested.nested_vmx_secondary_ctls_low = 0; vmx->nested.nested_vmx_secondary_ctls_high &= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + SECONDARY_EXEC_RDTSCP | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | @@ -3262,8 +3263,8 @@ static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg, * default value. */ if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS) - save->selector &= ~SELECTOR_RPL_MASK; - save->dpl = save->selector & SELECTOR_RPL_MASK; + save->selector &= ~SEGMENT_RPL_MASK; + save->dpl = save->selector & SEGMENT_RPL_MASK; save->s = 1; } vmx_set_segment(vcpu, save, seg); @@ -3836,7 +3837,7 @@ static bool code_segment_valid(struct kvm_vcpu *vcpu) unsigned int cs_rpl; vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); - cs_rpl = cs.selector & SELECTOR_RPL_MASK; + cs_rpl = cs.selector & SEGMENT_RPL_MASK; if (cs.unusable) return false; @@ -3864,7 +3865,7 @@ static bool stack_segment_valid(struct kvm_vcpu *vcpu) unsigned int ss_rpl; vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); - ss_rpl = ss.selector & SELECTOR_RPL_MASK; + ss_rpl = ss.selector & SEGMENT_RPL_MASK; if (ss.unusable) return true; @@ -3886,7 +3887,7 @@ static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) unsigned int rpl; vmx_get_segment(vcpu, &var, seg); - rpl = var.selector & SELECTOR_RPL_MASK; + rpl = var.selector & SEGMENT_RPL_MASK; if (var.unusable) return true; @@ -3913,7 +3914,7 @@ static bool tr_valid(struct kvm_vcpu *vcpu) if (tr.unusable) return false; - if (tr.selector & SELECTOR_TI_MASK) /* TI = 1 */ + if (tr.selector & SEGMENT_TI_MASK) /* TI = 1 */ return false; if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */ return false; @@ -3931,7 +3932,7 @@ static bool ldtr_valid(struct kvm_vcpu *vcpu) if (ldtr.unusable) return true; - if (ldtr.selector & SELECTOR_TI_MASK) /* TI = 1 */ + if (ldtr.selector & SEGMENT_TI_MASK) /* TI = 1 */ return false; if (ldtr.type != 2) return false; @@ -3948,8 +3949,8 @@ static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); - return ((cs.selector & SELECTOR_RPL_MASK) == - (ss.selector & SELECTOR_RPL_MASK)); + return ((cs.selector & SEGMENT_RPL_MASK) == + (ss.selector & SEGMENT_RPL_MASK)); } /* @@ -5000,7 +5001,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu, if (emulate_instruction(vcpu, 0) == EMULATE_DONE) { if (vcpu->arch.halt_request) { vcpu->arch.halt_request = 0; - return kvm_emulate_halt(vcpu); + return kvm_vcpu_halt(vcpu); } return 1; } @@ -5065,6 +5066,10 @@ static int handle_exception(struct kvm_vcpu *vcpu) } if (is_invalid_opcode(intr_info)) { + if (is_guest_mode(vcpu)) { + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; + } er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); if (er != EMULATE_DONE) kvm_queue_exception(vcpu, UD_VECTOR); @@ -5527,13 +5532,11 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu) static int handle_halt(struct kvm_vcpu *vcpu) { - skip_emulated_instruction(vcpu); return kvm_emulate_halt(vcpu); } static int handle_vmcall(struct kvm_vcpu *vcpu) { - skip_emulated_instruction(vcpu); kvm_emulate_hypercall(vcpu); return 1; } @@ -5564,7 +5567,6 @@ static int handle_rdpmc(struct kvm_vcpu *vcpu) static int handle_wbinvd(struct kvm_vcpu *vcpu) { - skip_emulated_instruction(vcpu); kvm_emulate_wbinvd(vcpu); return 1; } @@ -5903,7 +5905,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) if (vcpu->arch.halt_request) { vcpu->arch.halt_request = 0; - ret = kvm_emulate_halt(vcpu); + ret = kvm_vcpu_halt(vcpu); goto out; } @@ -7312,21 +7314,21 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, else if (port < 0x10000) bitmap = vmcs12->io_bitmap_b; else - return 1; + return true; bitmap += (port & 0x7fff) / 8; if (last_bitmap != bitmap) if (kvm_read_guest(vcpu->kvm, bitmap, &b, 1)) - return 1; + return true; if (b & (1 << (port & 7))) - return 1; + return true; port++; size--; last_bitmap = bitmap; } - return 0; + return false; } /* @@ -7342,7 +7344,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, gpa_t bitmap; if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) - return 1; + return true; /* * The MSR_BITMAP page is divided into four 1024-byte bitmaps, @@ -7361,10 +7363,10 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, if (msr_index < 1024*8) { unsigned char b; if (kvm_read_guest(vcpu->kvm, bitmap + msr_index/8, &b, 1)) - return 1; + return true; return 1 & (b >> (msr_index & 7)); } else - return 1; /* let L1 handle the wrong parameter */ + return true; /* let L1 handle the wrong parameter */ } /* @@ -7386,7 +7388,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, case 0: if (vmcs12->cr0_guest_host_mask & (val ^ vmcs12->cr0_read_shadow)) - return 1; + return true; break; case 3: if ((vmcs12->cr3_target_count >= 1 && @@ -7397,37 +7399,37 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, vmcs12->cr3_target_value2 == val) || (vmcs12->cr3_target_count >= 4 && vmcs12->cr3_target_value3 == val)) - return 0; + return false; if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING)) - return 1; + return true; break; case 4: if (vmcs12->cr4_guest_host_mask & (vmcs12->cr4_read_shadow ^ val)) - return 1; + return true; break; case 8: if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING)) - return 1; + return true; break; } break; case 2: /* clts */ if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) && (vmcs12->cr0_read_shadow & X86_CR0_TS)) - return 1; + return true; break; case 1: /* mov from cr */ switch (cr) { case 3: if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_CR3_STORE_EXITING) - return 1; + return true; break; case 8: if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_CR8_STORE_EXITING) - return 1; + return true; break; } break; @@ -7438,14 +7440,14 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, */ if (vmcs12->cr0_guest_host_mask & 0xe & (val ^ vmcs12->cr0_read_shadow)) - return 1; + return true; if ((vmcs12->cr0_guest_host_mask & 0x1) && !(vmcs12->cr0_read_shadow & 0x1) && (val & 0x1)) - return 1; + return true; break; } - return 0; + return false; } /* @@ -7468,48 +7470,48 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) KVM_ISA_VMX); if (vmx->nested.nested_run_pending) - return 0; + return false; if (unlikely(vmx->fail)) { pr_info_ratelimited("%s failed vm entry %x\n", __func__, vmcs_read32(VM_INSTRUCTION_ERROR)); - return 1; + return true; } switch (exit_reason) { case EXIT_REASON_EXCEPTION_NMI: if (!is_exception(intr_info)) - return 0; + return false; else if (is_page_fault(intr_info)) return enable_ept; else if (is_no_device(intr_info) && !(vmcs12->guest_cr0 & X86_CR0_TS)) - return 0; + return false; return vmcs12->exception_bitmap & (1u << (intr_info & INTR_INFO_VECTOR_MASK)); case EXIT_REASON_EXTERNAL_INTERRUPT: - return 0; + return false; case EXIT_REASON_TRIPLE_FAULT: - return 1; + return true; case EXIT_REASON_PENDING_INTERRUPT: return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING); case EXIT_REASON_NMI_WINDOW: return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING); case EXIT_REASON_TASK_SWITCH: - return 1; + return true; case EXIT_REASON_CPUID: if (kvm_register_read(vcpu, VCPU_REGS_RAX) == 0xa) - return 0; - return 1; + return false; + return true; case EXIT_REASON_HLT: return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING); case EXIT_REASON_INVD: - return 1; + return true; case EXIT_REASON_INVLPG: return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); case EXIT_REASON_RDPMC: return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING); - case EXIT_REASON_RDTSC: + case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP: return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING); case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR: case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD: @@ -7521,7 +7523,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) * VMX instructions trap unconditionally. This allows L1 to * emulate them for its L2 guest, i.e., allows 3-level nesting! */ - return 1; + return true; case EXIT_REASON_CR_ACCESS: return nested_vmx_exit_handled_cr(vcpu, vmcs12); case EXIT_REASON_DR_ACCESS: @@ -7532,7 +7534,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) case EXIT_REASON_MSR_WRITE: return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason); case EXIT_REASON_INVALID_STATE: - return 1; + return true; case EXIT_REASON_MWAIT_INSTRUCTION: return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING); case EXIT_REASON_MONITOR_INSTRUCTION: @@ -7542,7 +7544,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) nested_cpu_has2(vmcs12, SECONDARY_EXEC_PAUSE_LOOP_EXITING); case EXIT_REASON_MCE_DURING_VMENTRY: - return 0; + return false; case EXIT_REASON_TPR_BELOW_THRESHOLD: return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW); case EXIT_REASON_APIC_ACCESS: @@ -7551,7 +7553,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) case EXIT_REASON_APIC_WRITE: case EXIT_REASON_EOI_INDUCED: /* apic_write and eoi_induced should exit unconditionally. */ - return 1; + return true; case EXIT_REASON_EPT_VIOLATION: /* * L0 always deals with the EPT violation. If nested EPT is @@ -7559,7 +7561,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) * missing in the guest EPT table (EPT12), the EPT violation * will be injected with nested_ept_inject_page_fault() */ - return 0; + return false; case EXIT_REASON_EPT_MISCONFIG: /* * L2 never uses directly L1's EPT, but rather L0's own EPT @@ -7567,11 +7569,11 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) * (EPT on EPT). So any problems with the structure of the * table is L0's fault. */ - return 0; + return false; case EXIT_REASON_WBINVD: return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING); case EXIT_REASON_XSETBV: - return 1; + return true; case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS: /* * This should never happen, since it is not possible to @@ -7581,7 +7583,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) */ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); default: - return 1; + return true; } } @@ -8516,6 +8518,9 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu) exec_control); } } + if (nested && !vmx->rdtscp_enabled) + vmx->nested.nested_vmx_secondary_ctls_high &= + ~SECONDARY_EXEC_RDTSCP; } /* Exposing INVPCID only when PCID is exposed */ @@ -9145,8 +9150,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) exec_control &= ~SECONDARY_EXEC_RDTSCP; /* Take the following fields only from vmcs12 */ exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + SECONDARY_EXEC_RDTSCP | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | - SECONDARY_EXEC_APIC_REGISTER_VIRT); + SECONDARY_EXEC_APIC_REGISTER_VIRT); if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) exec_control |= vmcs12->secondary_vm_exec_control; @@ -9518,7 +9524,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) vmcs12->launch_state = 1; if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) - return kvm_emulate_halt(vcpu); + return kvm_vcpu_halt(vcpu); vmx->nested.nested_run_pending = 1; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5573d633144c..a284c927551e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4477,7 +4477,8 @@ mmio: return X86EMUL_CONTINUE; } -int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr, +static int emulator_read_write(struct x86_emulate_ctxt *ctxt, + unsigned long addr, void *val, unsigned int bytes, struct x86_exception *exception, const struct read_write_emulator_ops *ops) @@ -4540,7 +4541,7 @@ static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, exception, &read_emultor); } -int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, +static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, const void *val, unsigned int bytes, @@ -4707,7 +4708,7 @@ static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address) kvm_mmu_invlpg(emul_to_vcpu(ctxt), address); } -int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) +int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu) { if (!need_emulate_wbinvd(vcpu)) return X86EMUL_CONTINUE; @@ -4724,19 +4725,29 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) wbinvd(); return X86EMUL_CONTINUE; } + +int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) +{ + kvm_x86_ops->skip_emulated_instruction(vcpu); + return kvm_emulate_wbinvd_noskip(vcpu); +} EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); + + static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) { - kvm_emulate_wbinvd(emul_to_vcpu(ctxt)); + kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt)); } -int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest) +static int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, + unsigned long *dest) { return kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); } -int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value) +static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, + unsigned long value) { return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value); @@ -5818,7 +5829,7 @@ void kvm_arch_exit(void) free_percpu(shared_msrs); } -int kvm_emulate_halt(struct kvm_vcpu *vcpu) +int kvm_vcpu_halt(struct kvm_vcpu *vcpu) { ++vcpu->stat.halt_exits; if (irqchip_in_kernel(vcpu->kvm)) { @@ -5829,6 +5840,13 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) return 0; } } +EXPORT_SYMBOL_GPL(kvm_vcpu_halt); + +int kvm_emulate_halt(struct kvm_vcpu *vcpu) +{ + kvm_x86_ops->skip_emulated_instruction(vcpu); + return kvm_vcpu_halt(vcpu); +} EXPORT_SYMBOL_GPL(kvm_emulate_halt); int kvm_hv_hypercall(struct kvm_vcpu *vcpu) @@ -5905,7 +5923,7 @@ static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid) lapic_irq.dest_id = apicid; lapic_irq.delivery_mode = APIC_DM_REMRD; - kvm_irq_delivery_to_apic(kvm, 0, &lapic_irq, NULL); + kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL); } int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) @@ -5913,6 +5931,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) unsigned long nr, a0, a1, a2, a3, ret; int op_64_bit, r = 1; + kvm_x86_ops->skip_emulated_instruction(vcpu); + if (kvm_hv_hypercall_enabled(vcpu->kvm)) return kvm_hv_hypercall(vcpu); @@ -7430,7 +7450,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) { - kvm_kvfree(free->arch.rmap[i]); + kvfree(free->arch.rmap[i]); free->arch.rmap[i] = NULL; } if (i == 0) @@ -7438,7 +7458,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, if (!dont || free->arch.lpage_info[i - 1] != dont->arch.lpage_info[i - 1]) { - kvm_kvfree(free->arch.lpage_info[i - 1]); + kvfree(free->arch.lpage_info[i - 1]); free->arch.lpage_info[i - 1] = NULL; } } @@ -7492,12 +7512,12 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, out_free: for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { - kvm_kvfree(slot->arch.rmap[i]); + kvfree(slot->arch.rmap[i]); slot->arch.rmap[i] = NULL; if (i == 0) continue; - kvm_kvfree(slot->arch.lpage_info[i - 1]); + kvfree(slot->arch.lpage_info[i - 1]); slot->arch.lpage_info[i - 1] = NULL; } return -ENOMEM; diff --git a/arch/x86/lguest/Kconfig b/arch/x86/lguest/Kconfig index 4a0890f815c4..08f41caada45 100644 --- a/arch/x86/lguest/Kconfig +++ b/arch/x86/lguest/Kconfig @@ -1,6 +1,6 @@ config LGUEST_GUEST bool "Lguest guest support" - depends on X86_32 && PARAVIRT + depends on X86_32 && PARAVIRT && PCI select TTY select VIRTUALIZATION select VIRTIO @@ -8,7 +8,7 @@ config LGUEST_GUEST help Lguest is a tiny in-kernel hypervisor. Selecting this will allow your kernel to boot under lguest. This option will increase - your kernel size by about 6k. If in doubt, say N. + your kernel size by about 10k. If in doubt, say N. If you say Y here, make sure you say Y (or M) to the virtio block and net drivers which lguest needs. diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 6ac273832f28..e4695985f9de 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -331,7 +331,7 @@ static void probe_pci_root_info(struct pci_root_info *info, struct list_head *list) { int ret; - struct resource_entry *entry; + struct resource_entry *entry, *tmp; sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum); info->bridge = device; @@ -345,8 +345,13 @@ static void probe_pci_root_info(struct pci_root_info *info, dev_dbg(&device->dev, "no IO and memory resources present in _CRS\n"); else - resource_list_for_each_entry(entry, list) - entry->res->name = info->name; + resource_list_for_each_entry_safe(entry, tmp, list) { + if ((entry->res->flags & IORESOURCE_WINDOW) == 0 || + (entry->res->flags & IORESOURCE_DISABLED)) + resource_list_destroy_entry(entry); + else + entry->res->name = info->name; + } } struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c index 1bbedc4b0f88..3005f0c89f2e 100644 --- a/arch/x86/platform/intel-mid/intel-mid.c +++ b/arch/x86/platform/intel-mid/intel-mid.c @@ -130,7 +130,7 @@ static void intel_mid_arch_setup(void) intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip](); else { intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL](); - pr_info("ARCH: Uknown SoC, assuming PENWELL!\n"); + pr_info("ARCH: Unknown SoC, assuming PENWELL!\n"); } out: diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index 9793322751e0..30933760ee5f 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c @@ -82,18 +82,15 @@ static notrace cycle_t vread_pvclock(int *mode) cycle_t ret; u64 last; u32 version; + u32 migrate_count; u8 flags; unsigned cpu, cpu1; /* - * Note: hypervisor must guarantee that: - * 1. cpu ID number maps 1:1 to per-CPU pvclock time info. - * 2. that per-CPU pvclock time info is updated if the - * underlying CPU changes. - * 3. that version is increased whenever underlying CPU - * changes. - * + * When looping to get a consistent (time-info, tsc) pair, we + * also need to deal with the possibility we can switch vcpus, + * so make sure we always re-fetch time-info for the current vcpu. */ do { cpu = __getcpu() & VGETCPU_CPU_MASK; @@ -104,6 +101,8 @@ static notrace cycle_t vread_pvclock(int *mode) pvti = get_pvti(cpu); + migrate_count = pvti->migrate_count; + version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags); /* @@ -115,7 +114,8 @@ static notrace cycle_t vread_pvclock(int *mode) cpu1 = __getcpu() & VGETCPU_CPU_MASK; } while (unlikely(cpu != cpu1 || (pvti->pvti.version & 1) || - pvti->pvti.version != version)); + pvti->pvti.version != version || + pvti->migrate_count != migrate_count)); if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT))) *mode = VCLOCK_NONE; diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index bd8b8459c3d0..5240f563076d 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1070,6 +1070,23 @@ static inline void xen_write_cr8(unsigned long val) BUG_ON(val); } #endif + +static u64 xen_read_msr_safe(unsigned int msr, int *err) +{ + u64 val; + + val = native_read_msr_safe(msr, err); + switch (msr) { + case MSR_IA32_APICBASE: +#ifdef CONFIG_X86_X2APIC + if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31)))) +#endif + val &= ~X2APIC_ENABLE; + break; + } + return val; +} + static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) { int ret; @@ -1240,7 +1257,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { .wbinvd = native_wbinvd, - .read_msr = native_read_msr_safe, + .read_msr = xen_read_msr_safe, .write_msr = xen_write_msr_safe, .read_tsc = native_read_tsc, @@ -1741,6 +1758,7 @@ asmlinkage __visible void __init xen_start_kernel(void) #ifdef CONFIG_X86_32 i386_start_kernel(); #else + cr4_init_shadow(); /* 32b kernel does this in i386_start_kernel() */ x86_64_start_reservations((char *)__pa_symbol(&boot_params)); #endif } |