summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig74
-rw-r--r--arch/arm64/Kconfig.debug23
-rw-r--r--arch/arm64/Makefile4
-rw-r--r--arch/arm64/boot/dts/Makefile5
-rw-r--r--arch/arm64/boot/dts/arm/juno.dts2
-rw-r--r--arch/arm64/boot/dts/exynos/Makefile5
-rw-r--r--arch/arm64/boot/dts/exynos/exynos7-espresso.dts84
-rw-r--r--arch/arm64/boot/dts/exynos/exynos7-pinctrl.dtsi588
-rw-r--r--arch/arm64/boot/dts/exynos/exynos7.dtsi530
-rw-r--r--arch/arm64/boot/dts/freescale/Makefile5
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2085a-simu.dts65
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2085a.dtsi163
-rw-r--r--arch/arm64/boot/dts/mediatek/Makefile5
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173-evb.dts38
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173.dtsi168
-rw-r--r--arch/arm64/configs/defconfig5
-rw-r--r--arch/arm64/include/asm/bitrev.h19
-rw-r--r--arch/arm64/include/asm/cacheflush.h5
-rw-r--r--arch/arm64/include/asm/cachetype.h29
-rw-r--r--arch/arm64/include/asm/compat.h1
-rw-r--r--arch/arm64/include/asm/cpu_ops.h8
-rw-r--r--arch/arm64/include/asm/cpufeature.h2
-rw-r--r--arch/arm64/include/asm/cpuidle.h6
-rw-r--r--arch/arm64/include/asm/cputype.h17
-rw-r--r--arch/arm64/include/asm/dma-mapping.h11
-rw-r--r--arch/arm64/include/asm/efi.h30
-rw-r--r--arch/arm64/include/asm/esr.h118
-rw-r--r--arch/arm64/include/asm/fixmap.h1
-rw-r--r--arch/arm64/include/asm/fpsimdmacros.h43
-rw-r--r--arch/arm64/include/asm/hardirq.h2
-rw-r--r--arch/arm64/include/asm/io.h5
-rw-r--r--arch/arm64/include/asm/kvm_arm.h73
-rw-r--r--arch/arm64/include/asm/kvm_asm.h1
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h50
-rw-r--r--arch/arm64/include/asm/kvm_host.h10
-rw-r--r--arch/arm64/include/asm/kvm_mmio.h1
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h55
-rw-r--r--arch/arm64/include/asm/memory.h10
-rw-r--r--arch/arm64/include/asm/mmu.h5
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h1
-rw-r--r--arch/arm64/include/asm/pgtable.h32
-rw-r--r--arch/arm64/include/asm/ptrace.h7
-rw-r--r--arch/arm64/include/asm/suspend.h2
-rw-r--r--arch/arm64/include/asm/syscalls.h30
-rw-r--r--arch/arm64/include/asm/thread_info.h4
-rw-r--r--arch/arm64/include/asm/unistd.h5
-rw-r--r--arch/arm64/include/asm/unistd32.h2
-rw-r--r--arch/arm64/include/uapi/asm/Kbuild1
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h9
-rw-r--r--arch/arm64/include/uapi/asm/ucontext.h (renamed from arch/arm64/include/asm/ucontext.h)8
-rw-r--r--arch/arm64/kernel/Makefile6
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c205
-rw-r--r--arch/arm64/kernel/asm-offsets.c3
-rw-r--r--arch/arm64/kernel/cacheinfo.c128
-rw-r--r--arch/arm64/kernel/cpuidle.c20
-rw-r--r--arch/arm64/kernel/cpuinfo.c34
-rw-r--r--arch/arm64/kernel/efi-stub.c14
-rw-r--r--arch/arm64/kernel/efi.c356
-rw-r--r--arch/arm64/kernel/entry.S66
-rw-r--r--arch/arm64/kernel/entry32.S (renamed from arch/arm64/kernel/sys32.S)34
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c2
-rw-r--r--arch/arm64/kernel/insn.c47
-rw-r--r--arch/arm64/kernel/module.c4
-rw-r--r--arch/arm64/kernel/pci.c22
-rw-r--r--arch/arm64/kernel/psci.c2
-rw-r--r--arch/arm64/kernel/setup.c22
-rw-r--r--arch/arm64/kernel/signal.c2
-rw-r--r--arch/arm64/kernel/signal32.c11
-rw-r--r--arch/arm64/kernel/smp.c10
-rw-r--r--arch/arm64/kernel/suspend.c21
-rw-r--r--arch/arm64/kernel/sys.c5
-rw-r--r--arch/arm64/kernel/sys32.c51
-rw-r--r--arch/arm64/kernel/traps.c50
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S17
-rw-r--r--arch/arm64/kvm/Kconfig3
-rw-r--r--arch/arm64/kvm/Makefile2
-rw-r--r--arch/arm64/kvm/emulate.c5
-rw-r--r--arch/arm64/kvm/handle_exit.c50
-rw-r--r--arch/arm64/kvm/hyp.S40
-rw-r--r--arch/arm64/kvm/inject_fault.c14
-rw-r--r--arch/arm64/kvm/reset.c1
-rw-r--r--arch/arm64/kvm/sys_regs.c122
-rw-r--r--arch/arm64/kvm/trace.h55
-rw-r--r--arch/arm64/kvm/vgic-v3-switch.S14
-rw-r--r--arch/arm64/mm/dma-mapping.c116
-rw-r--r--arch/arm64/mm/dump.c31
-rw-r--r--arch/arm64/mm/fault.c2
-rw-r--r--arch/arm64/mm/hugetlbpage.c6
-rw-r--r--arch/arm64/mm/init.c33
-rw-r--r--arch/arm64/mm/ioremap.c1
-rw-r--r--arch/arm64/mm/mm.h2
-rw-r--r--arch/arm64/mm/mmu.c342
-rw-r--r--arch/arm64/mm/proc.S14
93 files changed, 3321 insertions, 1031 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index b1f9a20a3677..1b8e97331ffb 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -39,6 +39,7 @@ config ARM64
select HARDIRQS_SW_RESEND
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_BITREVERSE
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KGDB
select HAVE_ARCH_SECCOMP_FILTER
@@ -148,11 +149,65 @@ source "kernel/Kconfig.freezer"
menu "Platform selection"
+config ARCH_EXYNOS
+ bool
+ help
+ This enables support for Samsung Exynos SoC family
+
+config ARCH_EXYNOS7
+ bool "ARMv8 based Samsung Exynos7"
+ select ARCH_EXYNOS
+ select COMMON_CLK_SAMSUNG
+ select HAVE_S3C2410_WATCHDOG if WATCHDOG
+ select HAVE_S3C_RTC if RTC_CLASS
+ select PINCTRL
+ select PINCTRL_EXYNOS
+
+ help
+ This enables support for Samsung Exynos7 SoC family
+
+config ARCH_FSL_LS2085A
+ bool "Freescale LS2085A SOC"
+ help
+ This enables support for Freescale LS2085A SOC.
+
+config ARCH_MEDIATEK
+ bool "Mediatek MT65xx & MT81xx ARMv8 SoC"
+ select ARM_GIC
+ help
+ Support for Mediatek MT65xx & MT81xx ARMv8 SoCs
+
config ARCH_SEATTLE
bool "AMD Seattle SoC Family"
help
This enables support for AMD Seattle SOC Family
+config ARCH_TEGRA
+ bool "NVIDIA Tegra SoC Family"
+ select ARCH_HAS_RESET_CONTROLLER
+ select ARCH_REQUIRE_GPIOLIB
+ select CLKDEV_LOOKUP
+ select CLKSRC_MMIO
+ select CLKSRC_OF
+ select GENERIC_CLOCKEVENTS
+ select HAVE_CLK
+ select PINCTRL
+ select RESET_CONTROLLER
+ help
+ This enables support for the NVIDIA Tegra SoC family.
+
+config ARCH_TEGRA_132_SOC
+ bool "NVIDIA Tegra132 SoC"
+ depends on ARCH_TEGRA
+ select PINCTRL_TEGRA124
+ select USB_ULPI if USB_PHY
+ select USB_ULPI_VIEWPORT if USB_PHY
+ help
+ Enable support for NVIDIA Tegra132 SoC, based on the Denver
+ ARMv8 CPU. The Tegra132 SoC is similar to the Tegra124 SoC,
+ but contains an NVIDIA Denver CPU complex in place of
+ Tegra124's "4+1" Cortex-A15 CPU complex.
+
config ARCH_THUNDER
bool "Cavium Inc. Thunder SoC Family"
help
@@ -349,7 +404,6 @@ config ARM64_VA_BITS_42
config ARM64_VA_BITS_48
bool "48-bit"
- depends on !ARM_SMMU
endchoice
@@ -540,6 +594,21 @@ config CP15_BARRIER_EMULATION
If unsure, say Y
+config SETEND_EMULATION
+ bool "Emulate SETEND instruction"
+ help
+ The SETEND instruction alters the data-endianness of the
+ AArch32 EL0, and is deprecated in ARMv8.
+
+ Say Y here to enable software emulation of the instruction
+ for AArch32 userspace code.
+
+ Note: All the cpus on the system must have mixed endian support at EL0
+ for this feature to be enabled. If a new CPU - which doesn't support mixed
+ endian - is hotplugged in after this feature has been enabled, there could
+ be unexpected results in the applications.
+
+ If unsure, say Y
endif
endmenu
@@ -627,9 +696,6 @@ source "kernel/power/Kconfig"
config ARCH_SUSPEND_POSSIBLE
def_bool y
-config ARM64_CPU_SUSPEND
- def_bool PM_SLEEP
-
endmenu
menu "CPU Power Management"
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 5fdd6dce8061..4a8741073c90 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -66,4 +66,27 @@ config DEBUG_SET_MODULE_RONX
against certain classes of kernel exploits.
If in doubt, say "N".
+config DEBUG_RODATA
+ bool "Make kernel text and rodata read-only"
+ help
+ If this is set, kernel text and rodata will be made read-only. This
+ is to help catch accidental or malicious attempts to change the
+ kernel's executable code. Additionally splits rodata from kernel
+ text so it can be made explicitly non-executable.
+
+ If in doubt, say Y
+
+config DEBUG_ALIGN_RODATA
+ depends on DEBUG_RODATA && !ARM64_64K_PAGES
+ bool "Align linker sections up to SECTION_SIZE"
+ help
+ If this option is enabled, sections that may potentially be marked as
+ read only or non-executable will be aligned up to the section size of
+ the kernel. This prevents sections from being split into pages and
+ avoids a potential TLB penalty. The downside is an increase in
+ alignment and potentially wasted space. Turn on this option if
+ performance is more important than memory pressure.
+
+ If in doubt, say N
+
endmenu
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 1c43cec971b5..69ceedc982a5 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -15,8 +15,6 @@ CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
GZFLAGS :=-9
-LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
-
KBUILD_DEFCONFIG := defconfig
KBUILD_CFLAGS += -mgeneral-regs-only
@@ -50,7 +48,6 @@ core-$(CONFIG_KVM) += arch/arm64/kvm/
core-$(CONFIG_XEN) += arch/arm64/xen/
core-$(CONFIG_CRYPTO) += arch/arm64/crypto/
libs-y := arch/arm64/lib/ $(libs-y)
-libs-y += $(LIBGCC)
libs-$(CONFIG_EFI_STUB) += drivers/firmware/efi/libstub/
# Default target when executing plain make
@@ -85,6 +82,7 @@ vdso_install:
# We use MRPROPER_FILES and CLEAN_FILES now
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
+ $(Q)$(MAKE) $(clean)=$(boot)/dts
define archhelp
echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index 3b8d427c3985..e0350caf049e 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -2,7 +2,8 @@ dts-dirs += amd
dts-dirs += apm
dts-dirs += arm
dts-dirs += cavium
+dts-dirs += exynos
+dts-dirs += freescale
+dts-dirs += mediatek
-always := $(dtb-y)
subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts
index cb3073e4e7a8..d429129ecb3d 100644
--- a/arch/arm64/boot/dts/arm/juno.dts
+++ b/arch/arm64/boot/dts/arm/juno.dts
@@ -22,7 +22,7 @@
};
chosen {
- stdout-path = &soc_uart0;
+ stdout-path = "serial0:115200n8";
};
psci {
diff --git a/arch/arm64/boot/dts/exynos/Makefile b/arch/arm64/boot/dts/exynos/Makefile
new file mode 100644
index 000000000000..20310e5b6d6f
--- /dev/null
+++ b/arch/arm64/boot/dts/exynos/Makefile
@@ -0,0 +1,5 @@
+dtb-$(CONFIG_ARCH_EXYNOS7) += exynos7-espresso.dtb
+
+always := $(dtb-y)
+subdir-y := $(dts-dirs)
+clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
new file mode 100644
index 000000000000..5424cc450f72
--- /dev/null
+++ b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
@@ -0,0 +1,84 @@
+/*
+ * SAMSUNG Exynos7 Espresso board device tree source
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/dts-v1/;
+#include "exynos7.dtsi"
+
+/ {
+ model = "Samsung Exynos7 Espresso board based on EXYNOS7";
+ compatible = "samsung,exynos7-espresso", "samsung,exynos7";
+
+ aliases {
+ serial0 = &serial_2;
+ mshc0 = &mmc_0;
+ mshc2 = &mmc_2;
+ };
+
+ chosen {
+ linux,stdout-path = &serial_2;
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0x0 0x40000000 0x0 0xC0000000>;
+ };
+};
+
+&fin_pll {
+ clock-frequency = <24000000>;
+};
+
+&serial_2 {
+ status = "okay";
+};
+
+&rtc {
+ status = "okay";
+};
+
+&watchdog {
+ status = "okay";
+};
+
+&adc {
+ status = "okay";
+};
+
+&mmc_0 {
+ status = "okay";
+ num-slots = <1>;
+ broken-cd;
+ cap-mmc-highspeed;
+ non-removable;
+ card-detect-delay = <200>;
+ clock-frequency = <800000000>;
+ samsung,dw-mshc-ciu-div = <3>;
+ samsung,dw-mshc-sdr-timing = <0 4>;
+ samsung,dw-mshc-ddr-timing = <0 2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_qrdy &sd0_bus1 &sd0_bus4 &sd0_bus8>;
+ bus-width = <8>;
+};
+
+&mmc_2 {
+ status = "okay";
+ num-slots = <1>;
+ cap-sd-highspeed;
+ card-detect-delay = <200>;
+ clock-frequency = <400000000>;
+ samsung,dw-mshc-ciu-div = <3>;
+ samsung,dw-mshc-sdr-timing = <2 3>;
+ samsung,dw-mshc-ddr-timing = <1 2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_cd &sd2_bus1 &sd2_bus4>;
+ bus-width = <4>;
+ disable-wp;
+};
diff --git a/arch/arm64/boot/dts/exynos/exynos7-pinctrl.dtsi b/arch/arm64/boot/dts/exynos/exynos7-pinctrl.dtsi
new file mode 100644
index 000000000000..2eef4a279131
--- /dev/null
+++ b/arch/arm64/boot/dts/exynos/exynos7-pinctrl.dtsi
@@ -0,0 +1,588 @@
+/*
+ * Samsung's Exynos7 SoC pin-mux and pin-config device tree source
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Samsung's Exynos7 SoC pin-mux and pin-config options are listed as
+ * device tree nodes in this file.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+&pinctrl_alive {
+ gpa0: gpa0 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ interrupt-parent = <&gic>;
+ #interrupt-cells = <2>;
+ interrupts = <0 0 0>, <0 1 0>, <0 2 0>, <0 3 0>,
+ <0 4 0>, <0 5 0>, <0 6 0>, <0 7 0>;
+ };
+
+ gpa1: gpa1 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ interrupt-parent = <&gic>;
+ #interrupt-cells = <2>;
+ interrupts = <0 8 0>, <0 9 0>, <0 10 0>, <0 11 0>,
+ <0 12 0>, <0 13 0>, <0 14 0>, <0 15 0>;
+ };
+
+ gpa2: gpa2 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpa3: gpa3 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+};
+
+&pinctrl_bus0 {
+ gpb0: gpb0 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpc0: gpc0 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpc1: gpc1 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpc2: gpc2 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpc3: gpc3 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpd0: gpd0 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpd1: gpd1 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpd2: gpd2 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpd4: gpd4 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpd5: gpd5 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpd6: gpd6 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpd7: gpd7 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpd8: gpd8 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpg0: gpg0 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpg3: gpg3 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ hs_i2c10_bus: hs-i2c10-bus {
+ samsung,pins = "gpb0-1", "gpb0-0";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ hs_i2c11_bus: hs-i2c11-bus {
+ samsung,pins = "gpb0-3", "gpb0-2";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ hs_i2c2_bus: hs-i2c2-bus {
+ samsung,pins = "gpd0-3", "gpd0-2";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ uart0_data: uart0-data {
+ samsung,pins = "gpd0-0", "gpd0-1";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ uart0_fctl: uart0-fctl {
+ samsung,pins = "gpd0-2", "gpd0-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ uart2_data: uart2-data {
+ samsung,pins = "gpd1-4", "gpd1-5";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ hs_i2c3_bus: hs-i2c3-bus {
+ samsung,pins = "gpd1-3", "gpd1-2";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ uart1_data: uart1-data {
+ samsung,pins = "gpd1-0", "gpd1-1";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ uart1_fctl: uart1-fctl {
+ samsung,pins = "gpd1-2", "gpd1-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ hs_i2c0_bus: hs-i2c0-bus {
+ samsung,pins = "gpd2-1", "gpd2-0";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ hs_i2c1_bus: hs-i2c1-bus {
+ samsung,pins = "gpd2-3", "gpd2-2";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ hs_i2c9_bus: hs-i2c9-bus {
+ samsung,pins = "gpd2-7", "gpd2-6";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ pwm0_out: pwm0-out {
+ samsung,pins = "gpd2-4";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ pwm1_out: pwm1-out {
+ samsung,pins = "gpd2-5";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ pwm2_out: pwm2-out {
+ samsung,pins = "gpd2-6";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ pwm3_out: pwm3-out {
+ samsung,pins = "gpd2-7";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ hs_i2c8_bus: hs-i2c8-bus {
+ samsung,pins = "gpd5-3", "gpd5-2";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ uart3_data: uart3-data {
+ samsung,pins = "gpd5-0", "gpd5-1";
+ samsung,pin-function = <3>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <0>;
+ };
+
+ spi2_bus: spi2-bus {
+ samsung,pins = "gpd5-0", "gpd5-1", "gpd5-2", "gpd5-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ spi1_bus: spi1-bus {
+ samsung,pins = "gpd6-2", "gpd6-3", "gpd6-4", "gpd6-5";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ spi0_bus: spi0-bus {
+ samsung,pins = "gpd8-0", "gpd8-1", "gpd6-0", "gpd6-1";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ hs_i2c4_bus: hs-i2c4-bus {
+ samsung,pins = "gpg3-1", "gpg3-0";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+
+ hs_i2c5_bus: hs-i2c5-bus {
+ samsung,pins = "gpg3-3", "gpg3-2";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+};
+
+&pinctrl_nfc {
+ gpj0: gpj0 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ hs_i2c6_bus: hs-i2c6-bus {
+ samsung,pins = "gpj0-1", "gpj0-0";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+};
+
+&pinctrl_touch {
+ gpj1: gpj1 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ hs_i2c7_bus: hs-i2c7-bus {
+ samsung,pins = "gpj1-1", "gpj1-0";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+};
+
+&pinctrl_ff {
+ gpg4: gpg4 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ spi3_bus: spi3-bus {
+ samsung,pins = "gpg4-0", "gpg4-1", "gpg4-2", "gpg4-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+};
+
+&pinctrl_ese {
+ gpv7: gpv7 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ spi4_bus: spi4-bus {
+ samsung,pins = "gpv7-0", "gpv7-1", "gpv7-2", "gpv7-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <0>;
+ };
+};
+
+&pinctrl_fsys0 {
+ gpr4: gpr4 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ sd2_clk: sd2-clk {
+ samsung,pins = "gpr4-0";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd2_cmd: sd2-cmd {
+ samsung,pins = "gpr4-1";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd2_cd: sd2-cd {
+ samsung,pins = "gpr4-2";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd2_bus1: sd2-bus-width1 {
+ samsung,pins = "gpr4-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd2_bus4: sd2-bus-width4 {
+ samsung,pins = "gpr4-4", "gpr4-5", "gpr4-6";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+};
+
+&pinctrl_fsys1 {
+ gpr0: gpr0 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpr1: gpr1 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpr2: gpr2 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpr3: gpr3 {
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ sd0_clk: sd0-clk {
+ samsung,pins = "gpr0-0";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd0_cmd: sd0-cmd {
+ samsung,pins = "gpr0-1";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd0_ds: sd0-ds {
+ samsung,pins = "gpr0-2";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <1>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd0_qrdy: sd0-qrdy {
+ samsung,pins = "gpr0-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <1>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd0_bus1: sd0-bus-width1 {
+ samsung,pins = "gpr1-0";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd0_bus4: sd0-bus-width4 {
+ samsung,pins = "gpr1-1", "gpr1-2", "gpr1-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd0_bus8: sd0-bus-width8 {
+ samsung,pins = "gpr1-4", "gpr1-5", "gpr1-6", "gpr1-7";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <3>;
+ };
+
+ sd1_clk: sd1-clk {
+ samsung,pins = "gpr2-0";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <2>;
+ };
+
+ sd1_cmd: sd1-cmd {
+ samsung,pins = "gpr2-1";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <0>;
+ samsung,pin-drv = <2>;
+ };
+
+ sd1_ds: sd1-ds {
+ samsung,pins = "gpr2-2";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <1>;
+ samsung,pin-drv = <6>;
+ };
+
+ sd1_qrdy: sd1-qrdy {
+ samsung,pins = "gpr2-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <1>;
+ samsung,pin-drv = <6>;
+ };
+
+ sd1_int: sd1-int {
+ samsung,pins = "gpr2-4";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <1>;
+ samsung,pin-drv = <6>;
+ };
+
+ sd1_bus1: sd1-bus-width1 {
+ samsung,pins = "gpr3-0";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <2>;
+ };
+
+ sd1_bus4: sd1-bus-width4 {
+ samsung,pins = "gpr3-1", "gpr3-2", "gpr3-3";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <2>;
+ };
+
+ sd1_bus8: sd1-bus-width8 {
+ samsung,pins = "gpr3-4", "gpr3-5", "gpr3-6", "gpr3-7";
+ samsung,pin-function = <2>;
+ samsung,pin-pud = <3>;
+ samsung,pin-drv = <2>;
+ };
+};
diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi
new file mode 100644
index 000000000000..d7a37c3a6b52
--- /dev/null
+++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi
@@ -0,0 +1,530 @@
+/*
+ * SAMSUNG EXYNOS7 SoC device tree source
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <dt-bindings/clock/exynos7-clk.h>
+
+/ {
+ compatible = "samsung,exynos7";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases {
+ pinctrl0 = &pinctrl_alive;
+ pinctrl1 = &pinctrl_bus0;
+ pinctrl2 = &pinctrl_nfc;
+ pinctrl3 = &pinctrl_touch;
+ pinctrl4 = &pinctrl_ff;
+ pinctrl5 = &pinctrl_ese;
+ pinctrl6 = &pinctrl_fsys0;
+ pinctrl7 = &pinctrl_fsys1;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57", "arm,armv8";
+ reg = <0x0>;
+ enable-method = "psci";
+ };
+
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57", "arm,armv8";
+ reg = <0x1>;
+ enable-method = "psci";
+ };
+
+ cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57", "arm,armv8";
+ reg = <0x2>;
+ enable-method = "psci";
+ };
+
+ cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57", "arm,armv8";
+ reg = <0x3>;
+ enable-method = "psci";
+ };
+ };
+
+ psci {
+ compatible = "arm,psci-0.2";
+ method = "smc";
+ };
+
+ soc: soc {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0x18000000>;
+
+ chipid@10000000 {
+ compatible = "samsung,exynos4210-chipid";
+ reg = <0x10000000 0x100>;
+ };
+
+ fin_pll: xxti {
+ compatible = "fixed-clock";
+ clock-output-names = "fin_pll";
+ #clock-cells = <0>;
+ };
+
+ gic: interrupt-controller@11001000 {
+ compatible = "arm,gic-400";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0x11001000 0x1000>,
+ <0x11002000 0x1000>,
+ <0x11004000 0x2000>,
+ <0x11006000 0x2000>;
+ };
+
+ clock_topc: clock-controller@10570000 {
+ compatible = "samsung,exynos7-clock-topc";
+ reg = <0x10570000 0x10000>;
+ #clock-cells = <1>;
+ };
+
+ clock_top0: clock-controller@105d0000 {
+ compatible = "samsung,exynos7-clock-top0";
+ reg = <0x105d0000 0xb000>;
+ #clock-cells = <1>;
+ clocks = <&fin_pll>, <&clock_topc DOUT_SCLK_BUS0_PLL>,
+ <&clock_topc DOUT_SCLK_BUS1_PLL>,
+ <&clock_topc DOUT_SCLK_CC_PLL>,
+ <&clock_topc DOUT_SCLK_MFC_PLL>;
+ clock-names = "fin_pll", "dout_sclk_bus0_pll",
+ "dout_sclk_bus1_pll", "dout_sclk_cc_pll",
+ "dout_sclk_mfc_pll";
+ };
+
+ clock_top1: clock-controller@105e0000 {
+ compatible = "samsung,exynos7-clock-top1";
+ reg = <0x105e0000 0xb000>;
+ #clock-cells = <1>;
+ clocks = <&fin_pll>, <&clock_topc DOUT_SCLK_BUS0_PLL>,
+ <&clock_topc DOUT_SCLK_BUS1_PLL>,
+ <&clock_topc DOUT_SCLK_CC_PLL>,
+ <&clock_topc DOUT_SCLK_MFC_PLL>;
+ clock-names = "fin_pll", "dout_sclk_bus0_pll",
+ "dout_sclk_bus1_pll", "dout_sclk_cc_pll",
+ "dout_sclk_mfc_pll";
+ };
+
+ clock_ccore: clock-controller@105b0000 {
+ compatible = "samsung,exynos7-clock-ccore";
+ reg = <0x105b0000 0xd00>;
+ #clock-cells = <1>;
+ clocks = <&fin_pll>, <&clock_topc DOUT_ACLK_CCORE_133>;
+ clock-names = "fin_pll", "dout_aclk_ccore_133";
+ };
+
+ clock_peric0: clock-controller@13610000 {
+ compatible = "samsung,exynos7-clock-peric0";
+ reg = <0x13610000 0xd00>;
+ #clock-cells = <1>;
+ clocks = <&fin_pll>, <&clock_top0 DOUT_ACLK_PERIC0>,
+ <&clock_top0 CLK_SCLK_UART0>;
+ clock-names = "fin_pll", "dout_aclk_peric0_66",
+ "sclk_uart0";
+ };
+
+ clock_peric1: clock-controller@14c80000 {
+ compatible = "samsung,exynos7-clock-peric1";
+ reg = <0x14c80000 0xd00>;
+ #clock-cells = <1>;
+ clocks = <&fin_pll>, <&clock_top0 DOUT_ACLK_PERIC1>,
+ <&clock_top0 CLK_SCLK_UART1>,
+ <&clock_top0 CLK_SCLK_UART2>,
+ <&clock_top0 CLK_SCLK_UART3>;
+ clock-names = "fin_pll", "dout_aclk_peric1_66",
+ "sclk_uart1", "sclk_uart2", "sclk_uart3";
+ };
+
+ clock_peris: clock-controller@10040000 {
+ compatible = "samsung,exynos7-clock-peris";
+ reg = <0x10040000 0xd00>;
+ #clock-cells = <1>;
+ clocks = <&fin_pll>, <&clock_topc DOUT_ACLK_PERIS>;
+ clock-names = "fin_pll", "dout_aclk_peris_66";
+ };
+
+ clock_fsys0: clock-controller@10e90000 {
+ compatible = "samsung,exynos7-clock-fsys0";
+ reg = <0x10e90000 0xd00>;
+ #clock-cells = <1>;
+ clocks = <&fin_pll>, <&clock_top1 DOUT_ACLK_FSYS0_200>,
+ <&clock_top1 DOUT_SCLK_MMC2>;
+ clock-names = "fin_pll", "dout_aclk_fsys0_200",
+ "dout_sclk_mmc2";
+ };
+
+ clock_fsys1: clock-controller@156e0000 {
+ compatible = "samsung,exynos7-clock-fsys1";
+ reg = <0x156e0000 0xd00>;
+ #clock-cells = <1>;
+ clocks = <&fin_pll>, <&clock_top1 DOUT_ACLK_FSYS1_200>,
+ <&clock_top1 DOUT_SCLK_MMC0>,
+ <&clock_top1 DOUT_SCLK_MMC1>;
+ clock-names = "fin_pll", "dout_aclk_fsys1_200",
+ "dout_sclk_mmc0", "dout_sclk_mmc1";
+ };
+
+ serial_0: serial@13630000 {
+ compatible = "samsung,exynos4210-uart";
+ reg = <0x13630000 0x100>;
+ interrupts = <0 440 0>;
+ clocks = <&clock_peric0 PCLK_UART0>,
+ <&clock_peric0 SCLK_UART0>;
+ clock-names = "uart", "clk_uart_baud0";
+ status = "disabled";
+ };
+
+ serial_1: serial@14c20000 {
+ compatible = "samsung,exynos4210-uart";
+ reg = <0x14c20000 0x100>;
+ interrupts = <0 456 0>;
+ clocks = <&clock_peric1 PCLK_UART1>,
+ <&clock_peric1 SCLK_UART1>;
+ clock-names = "uart", "clk_uart_baud0";
+ status = "disabled";
+ };
+
+ serial_2: serial@14c30000 {
+ compatible = "samsung,exynos4210-uart";
+ reg = <0x14c30000 0x100>;
+ interrupts = <0 457 0>;
+ clocks = <&clock_peric1 PCLK_UART2>,
+ <&clock_peric1 SCLK_UART2>;
+ clock-names = "uart", "clk_uart_baud0";
+ status = "disabled";
+ };
+
+ serial_3: serial@14c40000 {
+ compatible = "samsung,exynos4210-uart";
+ reg = <0x14c40000 0x100>;
+ interrupts = <0 458 0>;
+ clocks = <&clock_peric1 PCLK_UART3>,
+ <&clock_peric1 SCLK_UART3>;
+ clock-names = "uart", "clk_uart_baud0";
+ status = "disabled";
+ };
+
+ pinctrl_alive: pinctrl@10580000 {
+ compatible = "samsung,exynos7-pinctrl";
+ reg = <0x10580000 0x1000>;
+
+ wakeup-interrupt-controller {
+ compatible = "samsung,exynos7-wakeup-eint";
+ interrupt-parent = <&gic>;
+ interrupts = <0 16 0>;
+ };
+ };
+
+ pinctrl_bus0: pinctrl@13470000 {
+ compatible = "samsung,exynos7-pinctrl";
+ reg = <0x13470000 0x1000>;
+ interrupts = <0 383 0>;
+ };
+
+ pinctrl_nfc: pinctrl@14cd0000 {
+ compatible = "samsung,exynos7-pinctrl";
+ reg = <0x14cd0000 0x1000>;
+ interrupts = <0 473 0>;
+ };
+
+ pinctrl_touch: pinctrl@14ce0000 {
+ compatible = "samsung,exynos7-pinctrl";
+ reg = <0x14ce0000 0x1000>;
+ interrupts = <0 474 0>;
+ };
+
+ pinctrl_ff: pinctrl@14c90000 {
+ compatible = "samsung,exynos7-pinctrl";
+ reg = <0x14c90000 0x1000>;
+ interrupts = <0 475 0>;
+ };
+
+ pinctrl_ese: pinctrl@14ca0000 {
+ compatible = "samsung,exynos7-pinctrl";
+ reg = <0x14ca0000 0x1000>;
+ interrupts = <0 476 0>;
+ };
+
+ pinctrl_fsys0: pinctrl@10e60000 {
+ compatible = "samsung,exynos7-pinctrl";
+ reg = <0x10e60000 0x1000>;
+ interrupts = <0 221 0>;
+ };
+
+ pinctrl_fsys1: pinctrl@15690000 {
+ compatible = "samsung,exynos7-pinctrl";
+ reg = <0x15690000 0x1000>;
+ interrupts = <0 203 0>;
+ };
+
+ hsi2c_0: hsi2c@13640000 {
+ compatible = "samsung,exynos7-hsi2c";
+ reg = <0x13640000 0x1000>;
+ interrupts = <0 441 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hs_i2c0_bus>;
+ clocks = <&clock_peric0 PCLK_HSI2C0>;
+ clock-names = "hsi2c";
+ status = "disabled";
+ };
+
+ hsi2c_1: hsi2c@13650000 {
+ compatible = "samsung,exynos7-hsi2c";
+ reg = <0x13650000 0x1000>;
+ interrupts = <0 442 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hs_i2c1_bus>;
+ clocks = <&clock_peric0 PCLK_HSI2C1>;
+ clock-names = "hsi2c";
+ status = "disabled";
+ };
+
+ hsi2c_2: hsi2c@14e60000 {
+ compatible = "samsung,exynos7-hsi2c";
+ reg = <0x14e60000 0x1000>;
+ interrupts = <0 459 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hs_i2c2_bus>;
+ clocks = <&clock_peric1 PCLK_HSI2C2>;
+ clock-names = "hsi2c";
+ status = "disabled";
+ };
+
+ hsi2c_3: hsi2c@14e70000 {
+ compatible = "samsung,exynos7-hsi2c";
+ reg = <0x14e70000 0x1000>;
+ interrupts = <0 460 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hs_i2c3_bus>;
+ clocks = <&clock_peric1 PCLK_HSI2C3>;
+ clock-names = "hsi2c";
+ status = "disabled";
+ };
+
+ hsi2c_4: hsi2c@13660000 {
+ compatible = "samsung,exynos7-hsi2c";
+ reg = <0x13660000 0x1000>;
+ interrupts = <0 443 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hs_i2c4_bus>;
+ clocks = <&clock_peric0 PCLK_HSI2C4>;
+ clock-names = "hsi2c";
+ status = "disabled";
+ };
+
+ hsi2c_5: hsi2c@13670000 {
+ compatible = "samsung,exynos7-hsi2c";
+ reg = <0x13670000 0x1000>;
+ interrupts = <0 444 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hs_i2c5_bus>;
+ clocks = <&clock_peric0 PCLK_HSI2C5>;
+ clock-names = "hsi2c";
+ status = "disabled";
+ };
+
+ hsi2c_6: hsi2c@14e00000 {
+ compatible = "samsung,exynos7-hsi2c";
+ reg = <0x14e00000 0x1000>;
+ interrupts = <0 461 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hs_i2c6_bus>;
+ clocks = <&clock_peric1 PCLK_HSI2C6>;
+ clock-names = "hsi2c";
+ status = "disabled";
+ };
+
+ hsi2c_7: hsi2c@13e10000 {
+ compatible = "samsung,exynos7-hsi2c";
+ reg = <0x13e10000 0x1000>;
+ interrupts = <0 462 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hs_i2c7_bus>;
+ clocks = <&clock_peric1 PCLK_HSI2C7>;
+ clock-names = "hsi2c";
+ status = "disabled";
+ };
+
+ hsi2c_8: hsi2c@14e20000 {
+ compatible = "samsung,exynos7-hsi2c";
+ reg = <0x14e20000 0x1000>;
+ interrupts = <0 463 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hs_i2c8_bus>;
+ clocks = <&clock_peric1 PCLK_HSI2C8>;
+ clock-names = "hsi2c";
+ status = "disabled";
+ };
+
+ hsi2c_9: hsi2c@13680000 {
+ compatible = "samsung,exynos7-hsi2c";
+ reg = <0x13680000 0x1000>;
+ interrupts = <0 445 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hs_i2c9_bus>;
+ clocks = <&clock_peric0 PCLK_HSI2C9>;
+ clock-names = "hsi2c";
+ status = "disabled";
+ };
+
+ hsi2c_10: hsi2c@13690000 {
+ compatible = "samsung,exynos7-hsi2c";
+ reg = <0x13690000 0x1000>;
+ interrupts = <0 446 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hs_i2c10_bus>;
+ clocks = <&clock_peric0 PCLK_HSI2C10>;
+ clock-names = "hsi2c";
+ status = "disabled";
+ };
+
+ hsi2c_11: hsi2c@136a0000 {
+ compatible = "samsung,exynos7-hsi2c";
+ reg = <0x136a0000 0x1000>;
+ interrupts = <0 447 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hs_i2c11_bus>;
+ clocks = <&clock_peric0 PCLK_HSI2C11>;
+ clock-names = "hsi2c";
+ status = "disabled";
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <1 13 0xff01>,
+ <1 14 0xff01>,
+ <1 11 0xff01>,
+ <1 10 0xff01>;
+ };
+
+ pmu_system_controller: system-controller@105c0000 {
+ compatible = "samsung,exynos7-pmu", "syscon";
+ reg = <0x105c0000 0x5000>;
+ };
+
+ rtc: rtc@10590000 {
+ compatible = "samsung,s3c6410-rtc";
+ reg = <0x10590000 0x100>;
+ interrupts = <0 355 0>, <0 356 0>;
+ clocks = <&clock_ccore PCLK_RTC>;
+ clock-names = "rtc";
+ status = "disabled";
+ };
+
+ watchdog: watchdog@101d0000 {
+ compatible = "samsung,exynos7-wdt";
+ reg = <0x101d0000 0x100>;
+ interrupts = <0 110 0>;
+ clocks = <&clock_peris PCLK_WDT>;
+ clock-names = "watchdog";
+ samsung,syscon-phandle = <&pmu_system_controller>;
+ status = "disabled";
+ };
+
+ mmc_0: mmc@15740000 {
+ compatible = "samsung,exynos7-dw-mshc-smu";
+ interrupts = <0 201 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x15740000 0x2000>;
+ clocks = <&clock_fsys1 ACLK_MMC0>,
+ <&clock_top1 CLK_SCLK_MMC0>;
+ clock-names = "biu", "ciu";
+ fifo-depth = <0x40>;
+ status = "disabled";
+ };
+
+ mmc_1: mmc@15750000 {
+ compatible = "samsung,exynos7-dw-mshc";
+ interrupts = <0 202 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x15750000 0x2000>;
+ clocks = <&clock_fsys1 ACLK_MMC1>,
+ <&clock_top1 CLK_SCLK_MMC1>;
+ clock-names = "biu", "ciu";
+ fifo-depth = <0x40>;
+ status = "disabled";
+ };
+
+ mmc_2: mmc@15560000 {
+ compatible = "samsung,exynos7-dw-mshc-smu";
+ interrupts = <0 216 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x15560000 0x2000>;
+ clocks = <&clock_fsys0 ACLK_MMC2>,
+ <&clock_top1 CLK_SCLK_MMC2>;
+ clock-names = "biu", "ciu";
+ fifo-depth = <0x40>;
+ status = "disabled";
+ };
+
+ adc: adc@13620000 {
+ compatible = "samsung,exynos7-adc";
+ reg = <0x13620000 0x100>;
+ interrupts = <0 448 0>;
+ clocks = <&clock_peric0 PCLK_ADCIF>;
+ clock-names = "adc";
+ #io-channel-cells = <1>;
+ io-channel-ranges;
+ status = "disabled";
+ };
+
+ pwm: pwm@136c0000 {
+ compatible = "samsung,exynos4210-pwm";
+ reg = <0x136c0000 0x100>;
+ samsung,pwm-outputs = <0>, <1>, <2>, <3>;
+ #pwm-cells = <3>;
+ clocks = <&clock_peric0 PCLK_PWM>;
+ clock-names = "timers";
+ };
+ };
+};
+
+#include "exynos7-pinctrl.dtsi"
diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile
new file mode 100644
index 000000000000..4f2de3e789ee
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/Makefile
@@ -0,0 +1,5 @@
+dtb-$(CONFIG_ARCH_FSL_LS2085A) += fsl-ls2085a-simu.dtb
+
+always := $(dtb-y)
+subdir-y := $(dts-dirs)
+clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2085a-simu.dts b/arch/arm64/boot/dts/freescale/fsl-ls2085a-simu.dts
new file mode 100644
index 000000000000..82e2a6fccc64
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2085a-simu.dts
@@ -0,0 +1,65 @@
+/*
+ * Device Tree file for Freescale LS2085a software Simulator model
+ *
+ * Copyright (C) 2014, Freescale Semiconductor
+ *
+ * Bhupesh Sharma <bhupesh.sharma@freescale.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this library; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+/include/ "fsl-ls2085a.dtsi"
+
+/ {
+ model = "Freescale Layerscape 2085a software Simulator model";
+ compatible = "fsl,ls2085a-simu", "fsl,ls2085a";
+
+ ethernet@2210000 {
+ compatible = "smsc,lan91c111";
+ reg = <0x0 0x2210000 0x0 0x100>;
+ interrupts = <0 58 0x1>;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2085a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2085a.dtsi
new file mode 100644
index 000000000000..e281ceb338c3
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2085a.dtsi
@@ -0,0 +1,163 @@
+/*
+ * Device Tree Include file for Freescale Layerscape-2085A family SoC.
+ *
+ * Copyright (C) 2014, Freescale Semiconductor
+ *
+ * Bhupesh Sharma <bhupesh.sharma@freescale.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPLv2 or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this library; if not, write to the Free
+ * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ * MA 02110-1301 USA
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/ {
+ compatible = "fsl,ls2085a";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ /*
+ * We expect the enable-method for cpu's to be "psci", but this
+ * is dependent on the SoC FW, which will fill this in.
+ *
+ * Currently supported enable-method is psci v0.2
+ */
+
+ /* We have 4 clusters having 2 Cortex-A57 cores each */
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x0 0x0>;
+ };
+
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x0 0x1>;
+ };
+
+ cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x0 0x100>;
+ };
+
+ cpu@101 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x0 0x101>;
+ };
+
+ cpu@200 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x0 0x200>;
+ };
+
+ cpu@201 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x0 0x201>;
+ };
+
+ cpu@300 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x0 0x300>;
+ };
+
+ cpu@301 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x0 0x301>;
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x00000000 0x80000000 0 0x80000000>;
+ /* DRAM space - 1, size : 2 GB DRAM */
+ };
+
+ gic: interrupt-controller@6000000 {
+ compatible = "arm,gic-v3";
+ reg = <0x0 0x06000000 0 0x10000>, /* GIC Dist */
+ <0x0 0x06100000 0 0x100000>; /* GICR (RD_base + SGI_base) */
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ interrupts = <1 9 0x4>;
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <1 13 0x8>, /* Physical Secure PPI, active-low */
+ <1 14 0x8>, /* Physical Non-Secure PPI, active-low */
+ <1 11 0x8>, /* Virtual PPI, active-low */
+ <1 10 0x8>; /* Hypervisor PPI, active-low */
+ };
+
+ serial0: serial@21c0500 {
+ device_type = "serial";
+ compatible = "fsl,ns16550", "ns16550a";
+ reg = <0x0 0x21c0500 0x0 0x100>;
+ clock-frequency = <0>; /* Updated by bootloader */
+ interrupts = <0 32 0x1>; /* edge triggered */
+ };
+
+ serial1: serial@21c0600 {
+ device_type = "serial";
+ compatible = "fsl,ns16550", "ns16550a";
+ reg = <0x0 0x21c0600 0x0 0x100>;
+ clock-frequency = <0>; /* Updated by bootloader */
+ interrupts = <0 32 0x1>; /* edge triggered */
+ };
+
+ fsl_mc: fsl-mc@80c000000 {
+ compatible = "fsl,qoriq-mc";
+ reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */
+ <0x00000000 0x08340000 0 0x40000>; /* MC control reg */
+ };
+};
diff --git a/arch/arm64/boot/dts/mediatek/Makefile b/arch/arm64/boot/dts/mediatek/Makefile
new file mode 100644
index 000000000000..3ce24622b231
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/Makefile
@@ -0,0 +1,5 @@
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-evb.dtb
+
+always := $(dtb-y)
+subdir-y := $(dts-dirs)
+clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
new file mode 100644
index 000000000000..43d54017b779
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Eddie Huang <eddie.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+#include "mt8173.dtsi"
+
+/ {
+ model = "mediatek,mt8173-evb";
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ serial2 = &uart2;
+ serial3 = &uart3;
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0 0x40000000 0 0x80000000>;
+ };
+
+ chosen { };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
new file mode 100644
index 000000000000..8554ec31dd9e
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Eddie Huang <eddie.huang@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ compatible = "mediatek,mt8173";
+ interrupt-parent = <&sysirq>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&cpu0>;
+ };
+ core1 {
+ cpu = <&cpu1>;
+ };
+ };
+
+ cluster1 {
+ core0 {
+ cpu = <&cpu2>;
+ };
+ core1 {
+ cpu = <&cpu3>;
+ };
+ };
+ };
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x000>;
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x001>;
+ enable-method = "psci";
+ };
+
+ cpu2: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x100>;
+ enable-method = "psci";
+ };
+
+ cpu3: cpu@101 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57";
+ reg = <0x101>;
+ enable-method = "psci";
+ };
+ };
+
+ psci {
+ compatible = "arm,psci";
+ method = "smc";
+ cpu_suspend = <0x84000001>;
+ cpu_off = <0x84000002>;
+ cpu_on = <0x84000003>;
+ };
+
+ uart_clk: dummy26m {
+ compatible = "fixed-clock";
+ clock-frequency = <26000000>;
+ #clock-cells = <0>;
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_PPI 13
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ compatible = "simple-bus";
+ ranges;
+
+ sysirq: intpol-controller@10200620 {
+ compatible = "mediatek,mt8173-sysirq",
+ "mediatek,mt6577-sysirq";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ interrupt-parent = <&gic>;
+ reg = <0 0x10200620 0 0x20>;
+ };
+
+ gic: interrupt-controller@10220000 {
+ compatible = "arm,gic-400";
+ #interrupt-cells = <3>;
+ interrupt-parent = <&gic>;
+ interrupt-controller;
+ reg = <0 0x10221000 0 0x1000>,
+ <0 0x10222000 0 0x2000>,
+ <0 0x10224000 0 0x2000>,
+ <0 0x10226000 0 0x2000>;
+ interrupts = <GIC_PPI 9
+ (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+ };
+
+ uart0: serial@11002000 {
+ compatible = "mediatek,mt8173-uart",
+ "mediatek,mt6577-uart";
+ reg = <0 0x11002000 0 0x400>;
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ uart1: serial@11003000 {
+ compatible = "mediatek,mt8173-uart",
+ "mediatek,mt6577-uart";
+ reg = <0 0x11003000 0 0x400>;
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ uart2: serial@11004000 {
+ compatible = "mediatek,mt8173-uart",
+ "mediatek,mt6577-uart";
+ reg = <0 0x11004000 0 0x400>;
+ interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ uart3: serial@11005000 {
+ compatible = "mediatek,mt8173-uart",
+ "mediatek,mt6577-uart";
+ reg = <0 0x11005000 0 0x400>;
+ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+ };
+
+};
+
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 5376d908eabe..be1f12a5a5f0 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -31,6 +31,8 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_ARCH_FSL_LS2085A=y
+CONFIG_ARCH_MEDIATEK=y
CONFIG_ARCH_THUNDER=y
CONFIG_ARCH_VEXPRESS=y
CONFIG_ARCH_XGENE=y
@@ -45,6 +47,8 @@ CONFIG_CMA=y
CONFIG_CMDLINE="console=ttyAMA0"
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_COMPAT=y
+CONFIG_CPU_IDLE=y
+CONFIG_ARM64_CPUIDLE=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -86,6 +90,7 @@ CONFIG_SERIO_AMBAKMI=y
CONFIG_LEGACY_PTY_COUNT=16
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_MT6577=y
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
diff --git a/arch/arm64/include/asm/bitrev.h b/arch/arm64/include/asm/bitrev.h
new file mode 100644
index 000000000000..a5a0c3660137
--- /dev/null
+++ b/arch/arm64/include/asm/bitrev.h
@@ -0,0 +1,19 @@
+#ifndef __ASM_BITREV_H
+#define __ASM_BITREV_H
+static __always_inline __attribute_const__ u32 __arch_bitrev32(u32 x)
+{
+ __asm__ ("rbit %w0, %w1" : "=r" (x) : "r" (x));
+ return x;
+}
+
+static __always_inline __attribute_const__ u16 __arch_bitrev16(u16 x)
+{
+ return __arch_bitrev32((u32)x) >> 16;
+}
+
+static __always_inline __attribute_const__ u8 __arch_bitrev8(u8 x)
+{
+ return __arch_bitrev32((u32)x) >> 24;
+}
+
+#endif
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 7ae31a2cc6c0..67d309cc3b6b 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -152,4 +152,9 @@ int set_memory_ro(unsigned long addr, int numpages);
int set_memory_rw(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
+
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void);
+#endif
+
#endif
diff --git a/arch/arm64/include/asm/cachetype.h b/arch/arm64/include/asm/cachetype.h
index 4c631a0a3609..da2fc9e3cedd 100644
--- a/arch/arm64/include/asm/cachetype.h
+++ b/arch/arm64/include/asm/cachetype.h
@@ -39,24 +39,41 @@
extern unsigned long __icache_flags;
+/*
+ * NumSets, bits[27:13] - (Number of sets in cache) - 1
+ * Associativity, bits[12:3] - (Associativity of cache) - 1
+ * LineSize, bits[2:0] - (Log2(Number of words in cache line)) - 2
+ */
+#define CCSIDR_EL1_WRITE_THROUGH BIT(31)
+#define CCSIDR_EL1_WRITE_BACK BIT(30)
+#define CCSIDR_EL1_READ_ALLOCATE BIT(29)
+#define CCSIDR_EL1_WRITE_ALLOCATE BIT(28)
#define CCSIDR_EL1_LINESIZE_MASK 0x7
#define CCSIDR_EL1_LINESIZE(x) ((x) & CCSIDR_EL1_LINESIZE_MASK)
-
+#define CCSIDR_EL1_ASSOCIATIVITY_SHIFT 3
+#define CCSIDR_EL1_ASSOCIATIVITY_MASK 0x3ff
+#define CCSIDR_EL1_ASSOCIATIVITY(x) \
+ (((x) >> CCSIDR_EL1_ASSOCIATIVITY_SHIFT) & CCSIDR_EL1_ASSOCIATIVITY_MASK)
#define CCSIDR_EL1_NUMSETS_SHIFT 13
-#define CCSIDR_EL1_NUMSETS_MASK (0x7fff << CCSIDR_EL1_NUMSETS_SHIFT)
+#define CCSIDR_EL1_NUMSETS_MASK 0x7fff
#define CCSIDR_EL1_NUMSETS(x) \
- (((x) & CCSIDR_EL1_NUMSETS_MASK) >> CCSIDR_EL1_NUMSETS_SHIFT)
+ (((x) >> CCSIDR_EL1_NUMSETS_SHIFT) & CCSIDR_EL1_NUMSETS_MASK)
+
+#define CACHE_LINESIZE(x) (16 << CCSIDR_EL1_LINESIZE(x))
+#define CACHE_NUMSETS(x) (CCSIDR_EL1_NUMSETS(x) + 1)
+#define CACHE_ASSOCIATIVITY(x) (CCSIDR_EL1_ASSOCIATIVITY(x) + 1)
-extern u64 __attribute_const__ icache_get_ccsidr(void);
+extern u64 __attribute_const__ cache_get_ccsidr(u64 csselr);
+/* Helpers for Level 1 Instruction cache csselr = 1L */
static inline int icache_get_linesize(void)
{
- return 16 << CCSIDR_EL1_LINESIZE(icache_get_ccsidr());
+ return CACHE_LINESIZE(cache_get_ccsidr(1L));
}
static inline int icache_get_numsets(void)
{
- return 1 + CCSIDR_EL1_NUMSETS(icache_get_ccsidr());
+ return CACHE_NUMSETS(cache_get_ccsidr(1L));
}
/*
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 3fb053fa6e98..7fbed6919b54 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -161,7 +161,6 @@ typedef struct compat_siginfo {
int si_code;
union {
- /* The padding is the same size as AArch64. */
int _pad[128/sizeof(int) - 3];
/* kill() */
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
index 6f8e2ef9094a..da301ee7395c 100644
--- a/arch/arm64/include/asm/cpu_ops.h
+++ b/arch/arm64/include/asm/cpu_ops.h
@@ -28,8 +28,6 @@ struct device_node;
* enable-method property.
* @cpu_init: Reads any data necessary for a specific enable-method from the
* devicetree, for a given cpu node and proposed logical id.
- * @cpu_init_idle: Reads any data necessary to initialize CPU idle states from
- * devicetree, for a given cpu node and proposed logical id.
* @cpu_prepare: Early one-time preparation step for a cpu. If there is a
* mechanism for doing so, tests whether it is possible to boot
* the given CPU.
@@ -42,6 +40,8 @@ struct device_node;
* @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the
* cpu being killed.
* @cpu_kill: Ensures a cpu has left the kernel. Called from another cpu.
+ * @cpu_init_idle: Reads any data necessary to initialize CPU idle states from
+ * devicetree, for a given cpu node and proposed logical id.
* @cpu_suspend: Suspends a cpu and saves the required context. May fail owing
* to wrong parameters or error conditions. Called from the
* CPU being suspended. Must be called with IRQs disabled.
@@ -49,7 +49,6 @@ struct device_node;
struct cpu_operations {
const char *name;
int (*cpu_init)(struct device_node *, unsigned int);
- int (*cpu_init_idle)(struct device_node *, unsigned int);
int (*cpu_prepare)(unsigned int);
int (*cpu_boot)(unsigned int);
void (*cpu_postboot)(void);
@@ -58,7 +57,8 @@ struct cpu_operations {
void (*cpu_die)(unsigned int cpu);
int (*cpu_kill)(unsigned int cpu);
#endif
-#ifdef CONFIG_ARM64_CPU_SUSPEND
+#ifdef CONFIG_CPU_IDLE
+ int (*cpu_init_idle)(struct device_node *, unsigned int);
int (*cpu_suspend)(unsigned long);
#endif
};
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 07547ccc1f2b..b6c16d5f622f 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -52,6 +52,8 @@ static inline void cpus_set_cap(unsigned int num)
}
void check_local_cpu_errata(void);
+bool cpu_supports_mixed_endian_el0(void);
+bool system_supports_mixed_endian_el0(void);
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h
index b52a9932e2b1..0710654631e7 100644
--- a/arch/arm64/include/asm/cpuidle.h
+++ b/arch/arm64/include/asm/cpuidle.h
@@ -3,11 +3,17 @@
#ifdef CONFIG_CPU_IDLE
extern int cpu_init_idle(unsigned int cpu);
+extern int cpu_suspend(unsigned long arg);
#else
static inline int cpu_init_idle(unsigned int cpu)
{
return -EOPNOTSUPP;
}
+
+static inline int cpu_suspend(unsigned long arg)
+{
+ return -EOPNOTSUPP;
+}
#endif
#endif
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 8adb986a3086..a84ec605bed8 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -72,6 +72,18 @@
#define APM_CPU_PART_POTENZA 0x000
+#define ID_AA64MMFR0_BIGENDEL0_SHIFT 16
+#define ID_AA64MMFR0_BIGENDEL0_MASK (0xf << ID_AA64MMFR0_BIGENDEL0_SHIFT)
+#define ID_AA64MMFR0_BIGENDEL0(mmfr0) \
+ (((mmfr0) & ID_AA64MMFR0_BIGENDEL0_MASK) >> ID_AA64MMFR0_BIGENDEL0_SHIFT)
+#define ID_AA64MMFR0_BIGEND_SHIFT 8
+#define ID_AA64MMFR0_BIGEND_MASK (0xf << ID_AA64MMFR0_BIGEND_SHIFT)
+#define ID_AA64MMFR0_BIGEND(mmfr0) \
+ (((mmfr0) & ID_AA64MMFR0_BIGEND_MASK) >> ID_AA64MMFR0_BIGEND_SHIFT)
+
+#define SCTLR_EL1_CP15BEN (0x1 << 5)
+#define SCTLR_EL1_SED (0x1 << 8)
+
#ifndef __ASSEMBLY__
/*
@@ -104,6 +116,11 @@ static inline u32 __attribute_const__ read_cpuid_cachetype(void)
return read_cpuid(CTR_EL0);
}
+static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
+{
+ return (ID_AA64MMFR0_BIGEND(mmfr0) == 0x1) ||
+ (ID_AA64MMFR0_BIGENDEL0(mmfr0) == 0x1);
+}
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 9ce3e680ae1c..6932bb57dba0 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -28,8 +28,6 @@
#define DMA_ERROR_CODE (~(dma_addr_t)0)
extern struct dma_map_ops *dma_ops;
-extern struct dma_map_ops coherent_swiotlb_dma_ops;
-extern struct dma_map_ops noncoherent_swiotlb_dma_ops;
static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
{
@@ -47,23 +45,18 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
return __generic_dma_ops(dev);
}
-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
-{
- dev->archdata.dma_ops = ops;
-}
-
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu, bool coherent)
{
dev->archdata.dma_coherent = coherent;
- if (coherent)
- set_dma_ops(dev, &coherent_swiotlb_dma_ops);
}
#define arch_setup_dma_ops arch_setup_dma_ops
/* do not use this function in a driver */
static inline bool is_device_dma_coherent(struct device *dev)
{
+ if (!dev)
+ return false;
return dev->archdata.dma_coherent;
}
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index a34fd3b12e2b..ef572206f1c3 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -6,29 +6,33 @@
#ifdef CONFIG_EFI
extern void efi_init(void);
-extern void efi_idmap_init(void);
#else
#define efi_init()
-#define efi_idmap_init()
#endif
#define efi_call_virt(f, ...) \
({ \
- efi_##f##_t *__f = efi.systab->runtime->f; \
+ efi_##f##_t *__f; \
efi_status_t __s; \
\
kernel_neon_begin(); \
+ efi_virtmap_load(); \
+ __f = efi.systab->runtime->f; \
__s = __f(__VA_ARGS__); \
+ efi_virtmap_unload(); \
kernel_neon_end(); \
__s; \
})
#define __efi_call_virt(f, ...) \
({ \
- efi_##f##_t *__f = efi.systab->runtime->f; \
+ efi_##f##_t *__f; \
\
kernel_neon_begin(); \
+ efi_virtmap_load(); \
+ __f = efi.systab->runtime->f; \
__f(__VA_ARGS__); \
+ efi_virtmap_unload(); \
kernel_neon_end(); \
})
@@ -44,4 +48,22 @@ extern void efi_idmap_init(void);
#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__)
+#define EFI_ALLOC_ALIGN SZ_64K
+
+/*
+ * On ARM systems, virtually remapped UEFI runtime services are set up in two
+ * distinct stages:
+ * - The stub retrieves the final version of the memory map from UEFI, populates
+ * the virt_addr fields and calls the SetVirtualAddressMap() [SVAM] runtime
+ * service to communicate the new mapping to the firmware (Note that the new
+ * mapping is not live at this time)
+ * - During an early initcall(), the EFI system table is permanently remapped
+ * and the virtual remapping of the UEFI Runtime Services regions is loaded
+ * into a private set of page tables. If this all succeeds, the Runtime
+ * Services are enabled and the EFI_RUNTIME_SERVICES bit set.
+ */
+
+void efi_virtmap_load(void);
+void efi_virtmap_unload(void);
+
#endif /* _ASM_EFI_H */
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 72674f4c3871..92bbae381598 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -18,40 +18,90 @@
#ifndef __ASM_ESR_H
#define __ASM_ESR_H
-#define ESR_EL1_WRITE (1 << 6)
-#define ESR_EL1_CM (1 << 8)
-#define ESR_EL1_IL (1 << 25)
+#define ESR_ELx_EC_UNKNOWN (0x00)
+#define ESR_ELx_EC_WFx (0x01)
+/* Unallocated EC: 0x02 */
+#define ESR_ELx_EC_CP15_32 (0x03)
+#define ESR_ELx_EC_CP15_64 (0x04)
+#define ESR_ELx_EC_CP14_MR (0x05)
+#define ESR_ELx_EC_CP14_LS (0x06)
+#define ESR_ELx_EC_FP_ASIMD (0x07)
+#define ESR_ELx_EC_CP10_ID (0x08)
+/* Unallocated EC: 0x09 - 0x0B */
+#define ESR_ELx_EC_CP14_64 (0x0C)
+/* Unallocated EC: 0x0d */
+#define ESR_ELx_EC_ILL (0x0E)
+/* Unallocated EC: 0x0F - 0x10 */
+#define ESR_ELx_EC_SVC32 (0x11)
+#define ESR_ELx_EC_HVC32 (0x12)
+#define ESR_ELx_EC_SMC32 (0x13)
+/* Unallocated EC: 0x14 */
+#define ESR_ELx_EC_SVC64 (0x15)
+#define ESR_ELx_EC_HVC64 (0x16)
+#define ESR_ELx_EC_SMC64 (0x17)
+#define ESR_ELx_EC_SYS64 (0x18)
+/* Unallocated EC: 0x19 - 0x1E */
+#define ESR_ELx_EC_IMP_DEF (0x1f)
+#define ESR_ELx_EC_IABT_LOW (0x20)
+#define ESR_ELx_EC_IABT_CUR (0x21)
+#define ESR_ELx_EC_PC_ALIGN (0x22)
+/* Unallocated EC: 0x23 */
+#define ESR_ELx_EC_DABT_LOW (0x24)
+#define ESR_ELx_EC_DABT_CUR (0x25)
+#define ESR_ELx_EC_SP_ALIGN (0x26)
+/* Unallocated EC: 0x27 */
+#define ESR_ELx_EC_FP_EXC32 (0x28)
+/* Unallocated EC: 0x29 - 0x2B */
+#define ESR_ELx_EC_FP_EXC64 (0x2C)
+/* Unallocated EC: 0x2D - 0x2E */
+#define ESR_ELx_EC_SERROR (0x2F)
+#define ESR_ELx_EC_BREAKPT_LOW (0x30)
+#define ESR_ELx_EC_BREAKPT_CUR (0x31)
+#define ESR_ELx_EC_SOFTSTP_LOW (0x32)
+#define ESR_ELx_EC_SOFTSTP_CUR (0x33)
+#define ESR_ELx_EC_WATCHPT_LOW (0x34)
+#define ESR_ELx_EC_WATCHPT_CUR (0x35)
+/* Unallocated EC: 0x36 - 0x37 */
+#define ESR_ELx_EC_BKPT32 (0x38)
+/* Unallocated EC: 0x39 */
+#define ESR_ELx_EC_VECTOR32 (0x3A)
+/* Unallocted EC: 0x3B */
+#define ESR_ELx_EC_BRK64 (0x3C)
+/* Unallocated EC: 0x3D - 0x3F */
+#define ESR_ELx_EC_MAX (0x3F)
-#define ESR_EL1_EC_SHIFT (26)
-#define ESR_EL1_EC_UNKNOWN (0x00)
-#define ESR_EL1_EC_WFI (0x01)
-#define ESR_EL1_EC_CP15_32 (0x03)
-#define ESR_EL1_EC_CP15_64 (0x04)
-#define ESR_EL1_EC_CP14_MR (0x05)
-#define ESR_EL1_EC_CP14_LS (0x06)
-#define ESR_EL1_EC_FP_ASIMD (0x07)
-#define ESR_EL1_EC_CP10_ID (0x08)
-#define ESR_EL1_EC_CP14_64 (0x0C)
-#define ESR_EL1_EC_ILL_ISS (0x0E)
-#define ESR_EL1_EC_SVC32 (0x11)
-#define ESR_EL1_EC_SVC64 (0x15)
-#define ESR_EL1_EC_SYS64 (0x18)
-#define ESR_EL1_EC_IABT_EL0 (0x20)
-#define ESR_EL1_EC_IABT_EL1 (0x21)
-#define ESR_EL1_EC_PC_ALIGN (0x22)
-#define ESR_EL1_EC_DABT_EL0 (0x24)
-#define ESR_EL1_EC_DABT_EL1 (0x25)
-#define ESR_EL1_EC_SP_ALIGN (0x26)
-#define ESR_EL1_EC_FP_EXC32 (0x28)
-#define ESR_EL1_EC_FP_EXC64 (0x2C)
-#define ESR_EL1_EC_SERROR (0x2F)
-#define ESR_EL1_EC_BREAKPT_EL0 (0x30)
-#define ESR_EL1_EC_BREAKPT_EL1 (0x31)
-#define ESR_EL1_EC_SOFTSTP_EL0 (0x32)
-#define ESR_EL1_EC_SOFTSTP_EL1 (0x33)
-#define ESR_EL1_EC_WATCHPT_EL0 (0x34)
-#define ESR_EL1_EC_WATCHPT_EL1 (0x35)
-#define ESR_EL1_EC_BKPT32 (0x38)
-#define ESR_EL1_EC_BRK64 (0x3C)
+#define ESR_ELx_EC_SHIFT (26)
+#define ESR_ELx_EC_MASK (UL(0x3F) << ESR_ELx_EC_SHIFT)
+
+#define ESR_ELx_IL (UL(1) << 25)
+#define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1)
+#define ESR_ELx_ISV (UL(1) << 24)
+#define ESR_ELx_SAS_SHIFT (22)
+#define ESR_ELx_SAS (UL(3) << ESR_ELx_SAS_SHIFT)
+#define ESR_ELx_SSE (UL(1) << 21)
+#define ESR_ELx_SRT_SHIFT (16)
+#define ESR_ELx_SRT_MASK (UL(0x1F) << ESR_ELx_SRT_SHIFT)
+#define ESR_ELx_SF (UL(1) << 15)
+#define ESR_ELx_AR (UL(1) << 14)
+#define ESR_ELx_EA (UL(1) << 9)
+#define ESR_ELx_CM (UL(1) << 8)
+#define ESR_ELx_S1PTW (UL(1) << 7)
+#define ESR_ELx_WNR (UL(1) << 6)
+#define ESR_ELx_FSC (0x3F)
+#define ESR_ELx_FSC_TYPE (0x3C)
+#define ESR_ELx_FSC_EXTABT (0x10)
+#define ESR_ELx_FSC_FAULT (0x04)
+#define ESR_ELx_FSC_PERM (0x0C)
+#define ESR_ELx_CV (UL(1) << 24)
+#define ESR_ELx_COND_SHIFT (20)
+#define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT)
+#define ESR_ELx_WFx_ISS_WFE (UL(1) << 0)
+#define ESR_ELx_xVC_IMM_MASK ((1UL << 16) - 1)
+
+#ifndef __ASSEMBLY__
+#include <asm/types.h>
+
+const char *esr_get_class_string(u32 esr);
+#endif /* __ASSEMBLY */
#endif /* __ASM_ESR_H */
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index 9ef6eca905ca..defa0ff98250 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -49,6 +49,7 @@ enum fixed_addresses {
FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
+ FIX_TEXT_POKE0,
__end_of_fixed_addresses
};
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index 007618b8188c..a2daf1293028 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -76,7 +76,6 @@
fpsimd_restore_fpcr x\tmpnr, \state
.endm
-.altmacro
.macro fpsimd_save_partial state, numnr, tmpnr1, tmpnr2
mrs x\tmpnr1, fpsr
str w\numnr, [\state, #8]
@@ -86,11 +85,22 @@
add \state, \state, x\numnr, lsl #4
sub x\tmpnr1, x\tmpnr1, x\numnr, lsl #1
br x\tmpnr1
- .irp qa, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0
- .irp qb, %(qa + 1)
- stp q\qa, q\qb, [\state, # -16 * \qa - 16]
- .endr
- .endr
+ stp q30, q31, [\state, #-16 * 30 - 16]
+ stp q28, q29, [\state, #-16 * 28 - 16]
+ stp q26, q27, [\state, #-16 * 26 - 16]
+ stp q24, q25, [\state, #-16 * 24 - 16]
+ stp q22, q23, [\state, #-16 * 22 - 16]
+ stp q20, q21, [\state, #-16 * 20 - 16]
+ stp q18, q19, [\state, #-16 * 18 - 16]
+ stp q16, q17, [\state, #-16 * 16 - 16]
+ stp q14, q15, [\state, #-16 * 14 - 16]
+ stp q12, q13, [\state, #-16 * 12 - 16]
+ stp q10, q11, [\state, #-16 * 10 - 16]
+ stp q8, q9, [\state, #-16 * 8 - 16]
+ stp q6, q7, [\state, #-16 * 6 - 16]
+ stp q4, q5, [\state, #-16 * 4 - 16]
+ stp q2, q3, [\state, #-16 * 2 - 16]
+ stp q0, q1, [\state, #-16 * 0 - 16]
0:
.endm
@@ -103,10 +113,21 @@
add \state, \state, x\tmpnr2, lsl #4
sub x\tmpnr1, x\tmpnr1, x\tmpnr2, lsl #1
br x\tmpnr1
- .irp qa, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0
- .irp qb, %(qa + 1)
- ldp q\qa, q\qb, [\state, # -16 * \qa - 16]
- .endr
- .endr
+ ldp q30, q31, [\state, #-16 * 30 - 16]
+ ldp q28, q29, [\state, #-16 * 28 - 16]
+ ldp q26, q27, [\state, #-16 * 26 - 16]
+ ldp q24, q25, [\state, #-16 * 24 - 16]
+ ldp q22, q23, [\state, #-16 * 22 - 16]
+ ldp q20, q21, [\state, #-16 * 20 - 16]
+ ldp q18, q19, [\state, #-16 * 18 - 16]
+ ldp q16, q17, [\state, #-16 * 16 - 16]
+ ldp q14, q15, [\state, #-16 * 14 - 16]
+ ldp q12, q13, [\state, #-16 * 12 - 16]
+ ldp q10, q11, [\state, #-16 * 10 - 16]
+ ldp q8, q9, [\state, #-16 * 8 - 16]
+ ldp q6, q7, [\state, #-16 * 6 - 16]
+ ldp q4, q5, [\state, #-16 * 4 - 16]
+ ldp q2, q3, [\state, #-16 * 2 - 16]
+ ldp q0, q1, [\state, #-16 * 0 - 16]
0:
.endm
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index e8a3268a891c..6aae421f4d73 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -20,7 +20,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
-#define NR_IPI 6
+#define NR_IPI 5
typedef struct {
unsigned int __softirq_pending;
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 949c406d4df4..540f7c0aea82 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -26,6 +26,7 @@
#include <asm/byteorder.h>
#include <asm/barrier.h>
+#include <asm/memory.h>
#include <asm/pgtable.h>
#include <asm/early_ioremap.h>
#include <asm/alternative.h>
@@ -145,8 +146,8 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
* I/O port access primitives.
*/
#define arch_has_dev_port() (1)
-#define IO_SPACE_LIMIT (SZ_32M - 1)
-#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_32M))
+#define IO_SPACE_LIMIT (PCI_IO_SIZE - 1)
+#define PCI_IOBASE ((void __iomem *)PCI_IO_START)
/*
* String version of I/O memory access operations.
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 8afb863f5a9e..94674eb7e7bb 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -18,6 +18,7 @@
#ifndef __ARM64_KVM_ARM_H__
#define __ARM64_KVM_ARM_H__
+#include <asm/esr.h>
#include <asm/memory.h>
#include <asm/types.h>
@@ -184,77 +185,11 @@
#define MDCR_EL2_TPMCR (1 << 5)
#define MDCR_EL2_HPMN_MASK (0x1F)
-/* Exception Syndrome Register (ESR) bits */
-#define ESR_EL2_EC_SHIFT (26)
-#define ESR_EL2_EC (UL(0x3f) << ESR_EL2_EC_SHIFT)
-#define ESR_EL2_IL (UL(1) << 25)
-#define ESR_EL2_ISS (ESR_EL2_IL - 1)
-#define ESR_EL2_ISV_SHIFT (24)
-#define ESR_EL2_ISV (UL(1) << ESR_EL2_ISV_SHIFT)
-#define ESR_EL2_SAS_SHIFT (22)
-#define ESR_EL2_SAS (UL(3) << ESR_EL2_SAS_SHIFT)
-#define ESR_EL2_SSE (1 << 21)
-#define ESR_EL2_SRT_SHIFT (16)
-#define ESR_EL2_SRT_MASK (0x1f << ESR_EL2_SRT_SHIFT)
-#define ESR_EL2_SF (1 << 15)
-#define ESR_EL2_AR (1 << 14)
-#define ESR_EL2_EA (1 << 9)
-#define ESR_EL2_CM (1 << 8)
-#define ESR_EL2_S1PTW (1 << 7)
-#define ESR_EL2_WNR (1 << 6)
-#define ESR_EL2_FSC (0x3f)
-#define ESR_EL2_FSC_TYPE (0x3c)
-
-#define ESR_EL2_CV_SHIFT (24)
-#define ESR_EL2_CV (UL(1) << ESR_EL2_CV_SHIFT)
-#define ESR_EL2_COND_SHIFT (20)
-#define ESR_EL2_COND (UL(0xf) << ESR_EL2_COND_SHIFT)
-
-
-#define FSC_FAULT (0x04)
-#define FSC_PERM (0x0c)
+/* For compatibility with fault code shared with 32-bit */
+#define FSC_FAULT ESR_ELx_FSC_FAULT
+#define FSC_PERM ESR_ELx_FSC_PERM
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
#define HPFAR_MASK (~UL(0xf))
-#define ESR_EL2_EC_UNKNOWN (0x00)
-#define ESR_EL2_EC_WFI (0x01)
-#define ESR_EL2_EC_CP15_32 (0x03)
-#define ESR_EL2_EC_CP15_64 (0x04)
-#define ESR_EL2_EC_CP14_MR (0x05)
-#define ESR_EL2_EC_CP14_LS (0x06)
-#define ESR_EL2_EC_FP_ASIMD (0x07)
-#define ESR_EL2_EC_CP10_ID (0x08)
-#define ESR_EL2_EC_CP14_64 (0x0C)
-#define ESR_EL2_EC_ILL_ISS (0x0E)
-#define ESR_EL2_EC_SVC32 (0x11)
-#define ESR_EL2_EC_HVC32 (0x12)
-#define ESR_EL2_EC_SMC32 (0x13)
-#define ESR_EL2_EC_SVC64 (0x15)
-#define ESR_EL2_EC_HVC64 (0x16)
-#define ESR_EL2_EC_SMC64 (0x17)
-#define ESR_EL2_EC_SYS64 (0x18)
-#define ESR_EL2_EC_IABT (0x20)
-#define ESR_EL2_EC_IABT_HYP (0x21)
-#define ESR_EL2_EC_PC_ALIGN (0x22)
-#define ESR_EL2_EC_DABT (0x24)
-#define ESR_EL2_EC_DABT_HYP (0x25)
-#define ESR_EL2_EC_SP_ALIGN (0x26)
-#define ESR_EL2_EC_FP_EXC32 (0x28)
-#define ESR_EL2_EC_FP_EXC64 (0x2C)
-#define ESR_EL2_EC_SERROR (0x2F)
-#define ESR_EL2_EC_BREAKPT (0x30)
-#define ESR_EL2_EC_BREAKPT_HYP (0x31)
-#define ESR_EL2_EC_SOFTSTP (0x32)
-#define ESR_EL2_EC_SOFTSTP_HYP (0x33)
-#define ESR_EL2_EC_WATCHPT (0x34)
-#define ESR_EL2_EC_WATCHPT_HYP (0x35)
-#define ESR_EL2_EC_BKPT32 (0x38)
-#define ESR_EL2_EC_VECTOR32 (0x3A)
-#define ESR_EL2_EC_BRK64 (0x3C)
-
-#define ESR_EL2_EC_xABT_xFSR_EXTABT 0x10
-
-#define ESR_EL2_EC_WFI_ISS_WFE (1 << 0)
-
#endif /* __ARM64_KVM_ARM_H__ */
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 483842180f8f..4f7310fa77f0 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -126,6 +126,7 @@ extern char __kvm_hyp_vector[];
extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
+extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 8127e45e2637..17e92f05b1fe 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -23,10 +23,13 @@
#define __ARM64_KVM_EMULATE_H__
#include <linux/kvm_host.h>
-#include <asm/kvm_asm.h>
+
+#include <asm/esr.h>
#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
#include <asm/ptrace.h>
+#include <asm/cputype.h>
unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu);
@@ -41,6 +44,18 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
+ if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
+ vcpu->arch.hcr_el2 &= ~HCR_RW;
+}
+
+static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.hcr_el2;
+}
+
+static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
+{
+ vcpu->arch.hcr_el2 = hcr;
}
static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
@@ -126,70 +141,75 @@ static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
}
+static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
+}
+
static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
{
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_ISV);
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
}
static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
{
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_WNR);
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR);
}
static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
{
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SSE);
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
}
static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
{
- return (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SRT_MASK) >> ESR_EL2_SRT_SHIFT;
+ return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
}
static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
{
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EA);
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_EA);
}
static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
{
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_S1PTW);
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
}
static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
{
- return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SAS) >> ESR_EL2_SAS_SHIFT);
+ return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
}
/* This one is not specific to Data Abort */
static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
{
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_IL);
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
}
static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
{
- return kvm_vcpu_get_hsr(vcpu) >> ESR_EL2_EC_SHIFT;
+ return kvm_vcpu_get_hsr(vcpu) >> ESR_ELx_EC_SHIFT;
}
static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
{
- return kvm_vcpu_trap_get_class(vcpu) == ESR_EL2_EC_IABT;
+ return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
}
static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
{
- return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC;
+ return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
}
static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
{
- return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE;
+ return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
}
-static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
+static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
{
- return vcpu_sys_reg(vcpu, MPIDR_EL1);
+ return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
}
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 0b7dfdb931df..8ac3c70fe3c6 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -59,6 +59,9 @@ struct kvm_arch {
/* VTTBR value associated with above pgd and vmid */
u64 vttbr;
+ /* The maximum number of vCPUs depends on the used GIC model */
+ int max_vcpus;
+
/* Interrupt controller */
struct vgic_dist vgic;
@@ -116,9 +119,6 @@ struct kvm_vcpu_arch {
* Anything that is not used directly from assembly code goes
* here.
*/
- /* dcache set/way operation pending */
- int last_pcpu;
- cpumask_t require_dcache_flush;
/* Don't run the guest */
bool pause;
@@ -162,6 +162,7 @@ struct kvm_vm_stat {
};
struct kvm_vcpu_stat {
+ u32 halt_successful_poll;
u32 halt_wakeup;
};
@@ -199,6 +200,7 @@ struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
u64 kvm_call_hyp(void *hypfn, ...);
void force_vm_exit(const cpumask_t *mask);
+void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
int exception_index);
@@ -206,6 +208,8 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
int kvm_perf_init(void);
int kvm_perf_teardown(void);
+struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
+
static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
phys_addr_t pgd_ptr,
unsigned long hyp_stack_ptr,
diff --git a/arch/arm64/include/asm/kvm_mmio.h b/arch/arm64/include/asm/kvm_mmio.h
index fc2f689c0694..9f52beb7cb13 100644
--- a/arch/arm64/include/asm/kvm_mmio.h
+++ b/arch/arm64/include/asm/kvm_mmio.h
@@ -40,6 +40,7 @@ struct kvm_exit_mmio {
u8 data[8];
u32 len;
bool is_write;
+ void *private;
};
static inline void kvm_prepare_mmio(struct kvm_run *run,
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 14a74f136272..6458b5373142 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -118,6 +118,27 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
pmd_val(*pmd) |= PMD_S2_RDWR;
}
+static inline void kvm_set_s2pte_readonly(pte_t *pte)
+{
+ pte_val(*pte) = (pte_val(*pte) & ~PTE_S2_RDWR) | PTE_S2_RDONLY;
+}
+
+static inline bool kvm_s2pte_readonly(pte_t *pte)
+{
+ return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;
+}
+
+static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
+{
+ pmd_val(*pmd) = (pmd_val(*pmd) & ~PMD_S2_RDWR) | PMD_S2_RDONLY;
+}
+
+static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
+{
+ return (pmd_val(*pmd) & PMD_S2_RDWR) == PMD_S2_RDONLY;
+}
+
+
#define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end)
#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
@@ -243,24 +264,46 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
}
-static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
- unsigned long size,
- bool ipa_uncached)
+static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
+ unsigned long size,
+ bool ipa_uncached)
{
+ void *va = page_address(pfn_to_page(pfn));
+
if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
- kvm_flush_dcache_to_poc((void *)hva, size);
+ kvm_flush_dcache_to_poc(va, size);
if (!icache_is_aliasing()) { /* PIPT */
- flush_icache_range(hva, hva + size);
+ flush_icache_range((unsigned long)va,
+ (unsigned long)va + size);
} else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
/* any kind of VIPT cache */
__flush_icache_all();
}
}
+static inline void __kvm_flush_dcache_pte(pte_t pte)
+{
+ struct page *page = pte_page(pte);
+ kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
+}
+
+static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
+{
+ struct page *page = pmd_page(pmd);
+ kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
+}
+
+static inline void __kvm_flush_dcache_pud(pud_t pud)
+{
+ struct page *page = pud_page(pud);
+ kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
+}
+
#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
-void stage2_flush_vm(struct kvm *kvm);
+void kvm_set_way_flush(struct kvm_vcpu *vcpu);
+void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 6486b2bfd562..f800d45ea226 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -33,6 +33,12 @@
#define UL(x) _AC(x, UL)
/*
+ * Size of the PCI I/O space. This must remain a power of two so that
+ * IO_SPACE_LIMIT acts as a mask for the low bits of I/O addresses.
+ */
+#define PCI_IO_SIZE SZ_16M
+
+/*
* PAGE_OFFSET - the virtual address of the start of the kernel image (top
* (VA_BITS - 1))
* VA_BITS - the maximum number of bits for virtual addresses.
@@ -45,7 +51,9 @@
#define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1))
#define MODULES_END (PAGE_OFFSET)
#define MODULES_VADDR (MODULES_END - SZ_64M)
-#define FIXADDR_TOP (MODULES_VADDR - SZ_2M - PAGE_SIZE)
+#define PCI_IO_END (MODULES_VADDR - SZ_2M)
+#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
+#define FIXADDR_TOP (PCI_IO_START - SZ_2M)
#define TASK_SIZE_64 (UL(1) << VA_BITS)
#ifdef CONFIG_COMPAT
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index c2f006c48bdb..3d311761e3c2 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -31,7 +31,8 @@ extern void paging_init(void);
extern void setup_mm_for_reboot(void);
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
extern void init_mem_pgprot(void);
-/* create an identity mapping for memory (or io if map_io is true) */
-extern void create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io);
+extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
+ unsigned long virt, phys_addr_t size,
+ pgprot_t prot);
#endif
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 88174e0bfafe..5f930cc9ea83 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -119,6 +119,7 @@
#define PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[2:1] */
#define PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
+#define PMD_S2_RDONLY (_AT(pmdval_t, 1) << 6) /* HAP[2:1] */
#define PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
/*
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 210d632aa5ad..16449c535e50 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -25,7 +25,6 @@
* Software defined PTE bits definition.
*/
#define PTE_VALID (_AT(pteval_t, 1) << 0)
-#define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */
#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
#define PTE_WRITE (_AT(pteval_t, 1) << 57)
@@ -46,7 +45,7 @@
#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K))
-#define FIRST_USER_ADDRESS 0
+#define FIRST_USER_ADDRESS 0UL
#ifndef __ASSEMBLY__
extern void __pte_error(const char *file, int line, unsigned long val);
@@ -264,6 +263,11 @@ static inline pmd_t pte_pmd(pte_t pte)
return __pmd(pte_val(pte));
}
+static inline pgprot_t mk_sect_prot(pgprot_t prot)
+{
+ return __pgprot(pgprot_val(prot) & ~PTE_TABLE_BIT);
+}
+
/*
* THP definitions.
*/
@@ -337,9 +341,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
#ifdef CONFIG_ARM64_64K_PAGES
#define pud_sect(pud) (0)
+#define pud_table(pud) (1)
#else
#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
PUD_TYPE_SECT)
+#define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
+ PUD_TYPE_TABLE)
#endif
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
@@ -469,13 +476,12 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
/*
* Encode and decode a swap entry:
* bits 0-1: present (must be zero)
- * bit 2: PTE_FILE
- * bits 3-8: swap type
- * bits 9-57: swap offset
+ * bits 2-7: swap type
+ * bits 8-57: swap offset
*/
-#define __SWP_TYPE_SHIFT 3
+#define __SWP_TYPE_SHIFT 2
#define __SWP_TYPE_BITS 6
-#define __SWP_OFFSET_BITS 49
+#define __SWP_OFFSET_BITS 50
#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
@@ -493,18 +499,6 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
*/
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
-/*
- * Encode and decode a file entry:
- * bits 0-1: present (must be zero)
- * bit 2: PTE_FILE
- * bits 3-57: file offset / PAGE_SIZE
- */
-#define pte_file(pte) (pte_val(pte) & PTE_FILE)
-#define pte_to_pgoff(x) (pte_val(x) >> 3)
-#define pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE)
-
-#define PTE_FILE_MAX_BITS 55
-
extern int kern_addr_valid(unsigned long addr);
#include <asm-generic/pgtable.h>
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 41ed9e13795e..d6dd9fdbc3be 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -58,6 +58,13 @@
#define COMPAT_PSR_Z_BIT 0x40000000
#define COMPAT_PSR_N_BIT 0x80000000
#define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define COMPAT_PSR_ENDSTATE COMPAT_PSR_E_BIT
+#else
+#define COMPAT_PSR_ENDSTATE 0
+#endif
+
/*
* These are 'magic' values for PTRACE_PEEKUSR that return info about where a
* process is located in memory.
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
index 456d67c1f0fa..003802f58963 100644
--- a/arch/arm64/include/asm/suspend.h
+++ b/arch/arm64/include/asm/suspend.h
@@ -23,6 +23,4 @@ struct sleep_save_sp {
extern int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long));
extern void cpu_resume(void);
-extern int cpu_suspend(unsigned long);
-
#endif
diff --git a/arch/arm64/include/asm/syscalls.h b/arch/arm64/include/asm/syscalls.h
deleted file mode 100644
index 48fe7c600e98..000000000000
--- a/arch/arm64/include/asm/syscalls.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_SYSCALLS_H
-#define __ASM_SYSCALLS_H
-
-#include <linux/linkage.h>
-#include <linux/compiler.h>
-#include <linux/signal.h>
-
-/*
- * System call wrappers implemented in kernel/entry.S.
- */
-asmlinkage long sys_rt_sigreturn_wrapper(void);
-
-#include <asm-generic/syscalls.h>
-
-#endif /* __ASM_SYSCALLS_H */
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 459bf8e53208..702e1e6a0d80 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -48,7 +48,6 @@ struct thread_info {
mm_segment_t addr_limit; /* address limit */
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
- struct restart_block restart_block;
int preempt_count; /* 0 => preemptable, <0 => bug */
int cpu; /* cpu */
};
@@ -60,9 +59,6 @@ struct thread_info {
.flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index b780c6c76eec..3bc498c250dc 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -44,10 +44,13 @@
#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
-#define __NR_compat_syscalls 387
+#define __NR_compat_syscalls 388
#endif
#define __ARCH_WANT_SYS_CLONE
+
+#ifndef __COMPAT_SYSCALL_NR
#include <uapi/asm/unistd.h>
+#endif
#define NR_syscalls (__NR_syscalls)
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 8893cebcea5b..27224426e0bf 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -795,3 +795,5 @@ __SYSCALL(__NR_getrandom, sys_getrandom)
__SYSCALL(__NR_memfd_create, sys_memfd_create)
#define __NR_bpf 386
__SYSCALL(__NR_bpf, sys_bpf)
+#define __NR_execveat 387
+__SYSCALL(__NR_execveat, compat_sys_execveat)
diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild
index 942376d37d22..825b0fe51c2b 100644
--- a/arch/arm64/include/uapi/asm/Kbuild
+++ b/arch/arm64/include/uapi/asm/Kbuild
@@ -18,4 +18,5 @@ header-y += siginfo.h
header-y += signal.h
header-y += stat.h
header-y += statfs.h
+header-y += ucontext.h
header-y += unistd.h
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 8e38878c87c6..3ef77a466018 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -78,6 +78,13 @@ struct kvm_regs {
#define KVM_VGIC_V2_DIST_SIZE 0x1000
#define KVM_VGIC_V2_CPU_SIZE 0x2000
+/* Supported VGICv3 address types */
+#define KVM_VGIC_V3_ADDR_TYPE_DIST 2
+#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3
+
+#define KVM_VGIC_V3_DIST_SIZE SZ_64K
+#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K)
+
#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
#define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */
#define KVM_ARM_VCPU_PSCI_0_2 2 /* CPU uses PSCI v0.2 */
@@ -161,6 +168,8 @@ struct kvm_arch_memory_slot {
#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3
+#define KVM_DEV_ARM_VGIC_GRP_CTRL 4
+#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
/* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_TYPE_SHIFT 24
diff --git a/arch/arm64/include/asm/ucontext.h b/arch/arm64/include/uapi/asm/ucontext.h
index 42e04c877428..791de8e89e35 100644
--- a/arch/arm64/include/asm/ucontext.h
+++ b/arch/arm64/include/uapi/asm/ucontext.h
@@ -13,8 +13,10 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef __ASM_UCONTEXT_H
-#define __ASM_UCONTEXT_H
+#ifndef _UAPI__ASM_UCONTEXT_H
+#define _UAPI__ASM_UCONTEXT_H
+
+#include <linux/types.h>
struct ucontext {
unsigned long uc_flags;
@@ -27,4 +29,4 @@ struct ucontext {
struct sigcontext uc_mcontext;
};
-#endif /* __ASM_UCONTEXT_H */
+#endif /* _UAPI__ASM_UCONTEXT_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index eaa77ed7766a..bef04afd6031 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -16,10 +16,10 @@ arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
entry-fpsimd.o process.o ptrace.o setup.o signal.o \
sys.o stacktrace.o time.o traps.o io.o vdso.o \
hyp-stub.o psci.o cpu_ops.o insn.o return_address.o \
- cpuinfo.o cpu_errata.o alternative.o
+ cpuinfo.o cpu_errata.o alternative.o cacheinfo.o
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
- sys_compat.o \
+ sys_compat.o entry32.o \
../../arm/kernel/opcodes.o
arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
@@ -27,7 +27,7 @@ arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o topology.o
arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
-arm64-obj-$(CONFIG_ARM64_CPU_SUSPEND) += sleep.o suspend.o
+arm64-obj-$(CONFIG_CPU_PM) += sleep.o suspend.o
arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o
arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o
arm64-obj-$(CONFIG_KGDB) += kgdb.o
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index c363671d7509..7922c2e710ca 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -19,6 +19,7 @@
#include <asm/system_misc.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
+#include <asm/cpufeature.h>
#define CREATE_TRACE_POINTS
#include "trace-events-emulation.h"
@@ -85,6 +86,57 @@ static void remove_emulation_hooks(struct insn_emulation_ops *ops)
pr_notice("Removed %s emulation handler\n", ops->name);
}
+static void enable_insn_hw_mode(void *data)
+{
+ struct insn_emulation *insn = (struct insn_emulation *)data;
+ if (insn->ops->set_hw_mode)
+ insn->ops->set_hw_mode(true);
+}
+
+static void disable_insn_hw_mode(void *data)
+{
+ struct insn_emulation *insn = (struct insn_emulation *)data;
+ if (insn->ops->set_hw_mode)
+ insn->ops->set_hw_mode(false);
+}
+
+/* Run set_hw_mode(mode) on all active CPUs */
+static int run_all_cpu_set_hw_mode(struct insn_emulation *insn, bool enable)
+{
+ if (!insn->ops->set_hw_mode)
+ return -EINVAL;
+ if (enable)
+ on_each_cpu(enable_insn_hw_mode, (void *)insn, true);
+ else
+ on_each_cpu(disable_insn_hw_mode, (void *)insn, true);
+ return 0;
+}
+
+/*
+ * Run set_hw_mode for all insns on a starting CPU.
+ * Returns:
+ * 0 - If all the hooks ran successfully.
+ * -EINVAL - At least one hook is not supported by the CPU.
+ */
+static int run_all_insn_set_hw_mode(unsigned long cpu)
+{
+ int rc = 0;
+ unsigned long flags;
+ struct insn_emulation *insn;
+
+ raw_spin_lock_irqsave(&insn_emulation_lock, flags);
+ list_for_each_entry(insn, &insn_emulation, node) {
+ bool enable = (insn->current_mode == INSN_HW);
+ if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(enable)) {
+ pr_warn("CPU[%ld] cannot support the emulation of %s",
+ cpu, insn->ops->name);
+ rc = -EINVAL;
+ }
+ }
+ raw_spin_unlock_irqrestore(&insn_emulation_lock, flags);
+ return rc;
+}
+
static int update_insn_emulation_mode(struct insn_emulation *insn,
enum insn_emulation_mode prev)
{
@@ -97,10 +149,8 @@ static int update_insn_emulation_mode(struct insn_emulation *insn,
remove_emulation_hooks(insn->ops);
break;
case INSN_HW:
- if (insn->ops->set_hw_mode) {
- insn->ops->set_hw_mode(false);
+ if (!run_all_cpu_set_hw_mode(insn, false))
pr_notice("Disabled %s support\n", insn->ops->name);
- }
break;
}
@@ -111,10 +161,9 @@ static int update_insn_emulation_mode(struct insn_emulation *insn,
register_emulation_hooks(insn->ops);
break;
case INSN_HW:
- if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(true))
+ ret = run_all_cpu_set_hw_mode(insn, true);
+ if (!ret)
pr_notice("Enabled %s support\n", insn->ops->name);
- else
- ret = -EINVAL;
break;
}
@@ -133,6 +182,8 @@ static void register_insn_emulation(struct insn_emulation_ops *ops)
switch (ops->status) {
case INSN_DEPRECATED:
insn->current_mode = INSN_EMULATE;
+ /* Disable the HW mode if it was turned on at early boot time */
+ run_all_cpu_set_hw_mode(insn, false);
insn->max = INSN_HW;
break;
case INSN_OBSOLETE:
@@ -453,8 +504,6 @@ ret:
return 0;
}
-#define SCTLR_EL1_CP15BEN (1 << 5)
-
static inline void config_sctlr_el1(u32 clear, u32 set)
{
u32 val;
@@ -465,48 +514,13 @@ static inline void config_sctlr_el1(u32 clear, u32 set)
asm volatile("msr sctlr_el1, %0" : : "r" (val));
}
-static void enable_cp15_ben(void *info)
-{
- config_sctlr_el1(0, SCTLR_EL1_CP15BEN);
-}
-
-static void disable_cp15_ben(void *info)
-{
- config_sctlr_el1(SCTLR_EL1_CP15BEN, 0);
-}
-
-static int cpu_hotplug_notify(struct notifier_block *b,
- unsigned long action, void *hcpu)
-{
- switch (action) {
- case CPU_STARTING:
- case CPU_STARTING_FROZEN:
- enable_cp15_ben(NULL);
- return NOTIFY_DONE;
- case CPU_DYING:
- case CPU_DYING_FROZEN:
- disable_cp15_ben(NULL);
- return NOTIFY_DONE;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block cpu_hotplug_notifier = {
- .notifier_call = cpu_hotplug_notify,
-};
-
static int cp15_barrier_set_hw_mode(bool enable)
{
- if (enable) {
- register_cpu_notifier(&cpu_hotplug_notifier);
- on_each_cpu(enable_cp15_ben, NULL, true);
- } else {
- unregister_cpu_notifier(&cpu_hotplug_notifier);
- on_each_cpu(disable_cp15_ben, NULL, true);
- }
-
- return true;
+ if (enable)
+ config_sctlr_el1(0, SCTLR_EL1_CP15BEN);
+ else
+ config_sctlr_el1(SCTLR_EL1_CP15BEN, 0);
+ return 0;
}
static struct undef_hook cp15_barrier_hooks[] = {
@@ -534,6 +548,93 @@ static struct insn_emulation_ops cp15_barrier_ops = {
.set_hw_mode = cp15_barrier_set_hw_mode,
};
+static int setend_set_hw_mode(bool enable)
+{
+ if (!cpu_supports_mixed_endian_el0())
+ return -EINVAL;
+
+ if (enable)
+ config_sctlr_el1(SCTLR_EL1_SED, 0);
+ else
+ config_sctlr_el1(0, SCTLR_EL1_SED);
+ return 0;
+}
+
+static int compat_setend_handler(struct pt_regs *regs, u32 big_endian)
+{
+ char *insn;
+
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
+
+ if (big_endian) {
+ insn = "setend be";
+ regs->pstate |= COMPAT_PSR_E_BIT;
+ } else {
+ insn = "setend le";
+ regs->pstate &= ~COMPAT_PSR_E_BIT;
+ }
+
+ trace_instruction_emulation(insn, regs->pc);
+ pr_warn_ratelimited("\"%s\" (%ld) uses deprecated setend instruction at 0x%llx\n",
+ current->comm, (unsigned long)current->pid, regs->pc);
+
+ return 0;
+}
+
+static int a32_setend_handler(struct pt_regs *regs, u32 instr)
+{
+ int rc = compat_setend_handler(regs, (instr >> 9) & 1);
+ regs->pc += 4;
+ return rc;
+}
+
+static int t16_setend_handler(struct pt_regs *regs, u32 instr)
+{
+ int rc = compat_setend_handler(regs, (instr >> 3) & 1);
+ regs->pc += 2;
+ return rc;
+}
+
+static struct undef_hook setend_hooks[] = {
+ {
+ .instr_mask = 0xfffffdff,
+ .instr_val = 0xf1010000,
+ .pstate_mask = COMPAT_PSR_MODE_MASK,
+ .pstate_val = COMPAT_PSR_MODE_USR,
+ .fn = a32_setend_handler,
+ },
+ {
+ /* Thumb mode */
+ .instr_mask = 0x0000fff7,
+ .instr_val = 0x0000b650,
+ .pstate_mask = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_MASK),
+ .pstate_val = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_USR),
+ .fn = t16_setend_handler,
+ },
+ {}
+};
+
+static struct insn_emulation_ops setend_ops = {
+ .name = "setend",
+ .status = INSN_DEPRECATED,
+ .hooks = setend_hooks,
+ .set_hw_mode = setend_set_hw_mode,
+};
+
+static int insn_cpu_hotplug_notify(struct notifier_block *b,
+ unsigned long action, void *hcpu)
+{
+ int rc = 0;
+ if ((action & ~CPU_TASKS_FROZEN) == CPU_STARTING)
+ rc = run_all_insn_set_hw_mode((unsigned long)hcpu);
+
+ return notifier_from_errno(rc);
+}
+
+static struct notifier_block insn_cpu_hotplug_notifier = {
+ .notifier_call = insn_cpu_hotplug_notify,
+};
+
/*
* Invoked as late_initcall, since not needed before init spawned.
*/
@@ -545,6 +646,14 @@ static int __init armv8_deprecated_init(void)
if (IS_ENABLED(CONFIG_CP15_BARRIER_EMULATION))
register_insn_emulation(&cp15_barrier_ops);
+ if (IS_ENABLED(CONFIG_SETEND_EMULATION)) {
+ if(system_supports_mixed_endian_el0())
+ register_insn_emulation(&setend_ops);
+ else
+ pr_info("setend instruction emulation is not supported on the system");
+ }
+
+ register_cpu_notifier(&insn_cpu_hotplug_notifier);
register_insn_emulation_sysctl(ctl_abi);
return 0;
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 9a9fce090d58..f7fa65d4c352 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -140,6 +140,7 @@ int main(void)
DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
+ DEFINE(VGIC_V3_CPU_SRE, offsetof(struct vgic_cpu, vgic_v3.vgic_sre));
DEFINE(VGIC_V3_CPU_HCR, offsetof(struct vgic_cpu, vgic_v3.vgic_hcr));
DEFINE(VGIC_V3_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v3.vgic_vmcr));
DEFINE(VGIC_V3_CPU_MISR, offsetof(struct vgic_cpu, vgic_v3.vgic_misr));
@@ -152,7 +153,7 @@ int main(void)
DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
#endif
-#ifdef CONFIG_ARM64_CPU_SUSPEND
+#ifdef CONFIG_CPU_PM
DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx));
DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp));
DEFINE(MPIDR_HASH_MASK, offsetof(struct mpidr_hash, mask));
diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c
new file mode 100644
index 000000000000..b8629d52fba9
--- /dev/null
+++ b/arch/arm64/kernel/cacheinfo.c
@@ -0,0 +1,128 @@
+/*
+ * ARM64 cacheinfo support
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/bitops.h>
+#include <linux/cacheinfo.h>
+#include <linux/cpu.h>
+#include <linux/compiler.h>
+#include <linux/of.h>
+
+#include <asm/cachetype.h>
+#include <asm/processor.h>
+
+#define MAX_CACHE_LEVEL 7 /* Max 7 level supported */
+/* Ctypen, bits[3(n - 1) + 2 : 3(n - 1)], for n = 1 to 7 */
+#define CLIDR_CTYPE_SHIFT(level) (3 * (level - 1))
+#define CLIDR_CTYPE_MASK(level) (7 << CLIDR_CTYPE_SHIFT(level))
+#define CLIDR_CTYPE(clidr, level) \
+ (((clidr) & CLIDR_CTYPE_MASK(level)) >> CLIDR_CTYPE_SHIFT(level))
+
+static inline enum cache_type get_cache_type(int level)
+{
+ u64 clidr;
+
+ if (level > MAX_CACHE_LEVEL)
+ return CACHE_TYPE_NOCACHE;
+ asm volatile ("mrs %x0, clidr_el1" : "=r" (clidr));
+ return CLIDR_CTYPE(clidr, level);
+}
+
+/*
+ * Cache Size Selection Register(CSSELR) selects which Cache Size ID
+ * Register(CCSIDR) is accessible by specifying the required cache
+ * level and the cache type. We need to ensure that no one else changes
+ * CSSELR by calling this in non-preemtible context
+ */
+u64 __attribute_const__ cache_get_ccsidr(u64 csselr)
+{
+ u64 ccsidr;
+
+ WARN_ON(preemptible());
+
+ /* Put value into CSSELR */
+ asm volatile("msr csselr_el1, %x0" : : "r" (csselr));
+ isb();
+ /* Read result out of CCSIDR */
+ asm volatile("mrs %x0, ccsidr_el1" : "=r" (ccsidr));
+
+ return ccsidr;
+}
+
+static void ci_leaf_init(struct cacheinfo *this_leaf,
+ enum cache_type type, unsigned int level)
+{
+ bool is_icache = type & CACHE_TYPE_INST;
+ u64 tmp = cache_get_ccsidr((level - 1) << 1 | is_icache);
+
+ this_leaf->level = level;
+ this_leaf->type = type;
+ this_leaf->coherency_line_size = CACHE_LINESIZE(tmp);
+ this_leaf->number_of_sets = CACHE_NUMSETS(tmp);
+ this_leaf->ways_of_associativity = CACHE_ASSOCIATIVITY(tmp);
+ this_leaf->size = this_leaf->number_of_sets *
+ this_leaf->coherency_line_size * this_leaf->ways_of_associativity;
+ this_leaf->attributes =
+ ((tmp & CCSIDR_EL1_WRITE_THROUGH) ? CACHE_WRITE_THROUGH : 0) |
+ ((tmp & CCSIDR_EL1_WRITE_BACK) ? CACHE_WRITE_BACK : 0) |
+ ((tmp & CCSIDR_EL1_READ_ALLOCATE) ? CACHE_READ_ALLOCATE : 0) |
+ ((tmp & CCSIDR_EL1_WRITE_ALLOCATE) ? CACHE_WRITE_ALLOCATE : 0);
+}
+
+static int __init_cache_level(unsigned int cpu)
+{
+ unsigned int ctype, level, leaves;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+
+ for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) {
+ ctype = get_cache_type(level);
+ if (ctype == CACHE_TYPE_NOCACHE) {
+ level--;
+ break;
+ }
+ /* Separate instruction and data caches */
+ leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
+ }
+
+ this_cpu_ci->num_levels = level;
+ this_cpu_ci->num_leaves = leaves;
+ return 0;
+}
+
+static int __populate_cache_leaves(unsigned int cpu)
+{
+ unsigned int level, idx;
+ enum cache_type type;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cacheinfo *this_leaf = this_cpu_ci->info_list;
+
+ for (idx = 0, level = 1; level <= this_cpu_ci->num_levels &&
+ idx < this_cpu_ci->num_leaves; idx++, level++) {
+ type = get_cache_type(level);
+ if (type == CACHE_TYPE_SEPARATE) {
+ ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
+ ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
+ } else {
+ ci_leaf_init(this_leaf++, type, level);
+ }
+ }
+ return 0;
+}
+
+DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
+DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
index 19d17f51db37..5c0896647fd1 100644
--- a/arch/arm64/kernel/cpuidle.c
+++ b/arch/arm64/kernel/cpuidle.c
@@ -29,3 +29,23 @@ int cpu_init_idle(unsigned int cpu)
of_node_put(cpu_node);
return ret;
}
+
+/**
+ * cpu_suspend() - function to enter a low-power idle state
+ * @arg: argument to pass to CPU suspend operations
+ *
+ * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
+ * operations back-end error code otherwise.
+ */
+int cpu_suspend(unsigned long arg)
+{
+ int cpu = smp_processor_id();
+
+ /*
+ * If cpu_ops have not been registered or suspend
+ * has not been initialized, cpu_suspend call fails early.
+ */
+ if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend)
+ return -EOPNOTSUPP;
+ return cpu_ops[cpu]->cpu_suspend(arg);
+}
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 07d435cf2eea..929855691dae 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -35,6 +35,7 @@
*/
DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
static struct cpuinfo_arm64 boot_cpu_data;
+static bool mixed_endian_el0 = true;
static char *icache_policy_str[] = {
[ICACHE_POLICY_RESERVED] = "RESERVED/UNKNOWN",
@@ -68,6 +69,26 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
}
+bool cpu_supports_mixed_endian_el0(void)
+{
+ return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
+}
+
+bool system_supports_mixed_endian_el0(void)
+{
+ return mixed_endian_el0;
+}
+
+static void update_mixed_endian_el0_support(struct cpuinfo_arm64 *info)
+{
+ mixed_endian_el0 &= id_aa64mmfr0_mixed_endian_el0(info->reg_id_aa64mmfr0);
+}
+
+static void update_cpu_features(struct cpuinfo_arm64 *info)
+{
+ update_mixed_endian_el0_support(info);
+}
+
static int check_reg_mask(char *name, u64 mask, u64 boot, u64 cur, int cpu)
{
if ((boot & mask) == (cur & mask))
@@ -215,6 +236,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
cpuinfo_detect_icache_policy(info);
check_local_cpu_errata();
+ update_cpu_features(info);
}
void cpuinfo_store_cpu(void)
@@ -231,15 +253,3 @@ void __init cpuinfo_store_boot_cpu(void)
boot_cpu_data = *info;
}
-
-u64 __attribute_const__ icache_get_ccsidr(void)
-{
- u64 ccsidr;
-
- WARN_ON(preemptible());
-
- /* Select L1 I-cache and read its size ID register */
- asm("msr csselr_el1, %1; isb; mrs %0, ccsidr_el1"
- : "=r"(ccsidr) : "r"(1L));
- return ccsidr;
-}
diff --git a/arch/arm64/kernel/efi-stub.c b/arch/arm64/kernel/efi-stub.c
index d27dd982ff26..f5374065ad53 100644
--- a/arch/arm64/kernel/efi-stub.c
+++ b/arch/arm64/kernel/efi-stub.c
@@ -13,13 +13,13 @@
#include <asm/efi.h>
#include <asm/sections.h>
-efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
- unsigned long *image_addr,
- unsigned long *image_size,
- unsigned long *reserve_addr,
- unsigned long *reserve_size,
- unsigned long dram_base,
- efi_loaded_image_t *image)
+efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table,
+ unsigned long *image_addr,
+ unsigned long *image_size,
+ unsigned long *reserve_addr,
+ unsigned long *reserve_size,
+ unsigned long dram_base,
+ efi_loaded_image_t *image)
{
efi_status_t status;
unsigned long kernel_size, kernel_memsize = 0;
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 2bb4347d0edf..b42c7b480e1e 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -11,27 +11,46 @@
*
*/
+#include <linux/atomic.h>
#include <linux/dmi.h>
#include <linux/efi.h>
#include <linux/export.h>
#include <linux/memblock.h>
+#include <linux/mm_types.h>
#include <linux/bootmem.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
+#include <linux/preempt.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <asm/cacheflush.h>
#include <asm/efi.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
struct efi_memory_map memmap;
-static efi_runtime_services_t *runtime;
-
static u64 efi_system_table;
+static pgd_t efi_pgd[PTRS_PER_PGD] __page_aligned_bss;
+
+static struct mm_struct efi_mm = {
+ .mm_rb = RB_ROOT,
+ .pgd = efi_pgd,
+ .mm_users = ATOMIC_INIT(2),
+ .mm_count = ATOMIC_INIT(1),
+ .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem),
+ .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
+ .mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
+ INIT_MM_CONTEXT(efi_mm)
+};
+
static int uefi_debug __initdata;
static int __init uefi_debug_setup(char *str)
{
@@ -48,30 +67,33 @@ static int __init is_normal_ram(efi_memory_desc_t *md)
return 0;
}
-static void __init efi_setup_idmap(void)
+/*
+ * Translate a EFI virtual address into a physical address: this is necessary,
+ * as some data members of the EFI system table are virtually remapped after
+ * SetVirtualAddressMap() has been called.
+ */
+static phys_addr_t efi_to_phys(unsigned long addr)
{
- struct memblock_region *r;
efi_memory_desc_t *md;
- u64 paddr, npages, size;
- for_each_memblock(memory, r)
- create_id_mapping(r->base, r->size, 0);
-
- /* map runtime io spaces */
for_each_efi_memory_desc(&memmap, md) {
- if (!(md->attribute & EFI_MEMORY_RUNTIME) || is_normal_ram(md))
+ if (!(md->attribute & EFI_MEMORY_RUNTIME))
continue;
- paddr = md->phys_addr;
- npages = md->num_pages;
- memrange_efi_to_native(&paddr, &npages);
- size = npages << PAGE_SHIFT;
- create_id_mapping(paddr, size, 1);
+ if (md->virt_addr == 0)
+ /* no virtual mapping has been installed by the stub */
+ break;
+ if (md->virt_addr <= addr &&
+ (addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT))
+ return md->phys_addr + addr - md->virt_addr;
}
+ return addr;
}
static int __init uefi_init(void)
{
efi_char16_t *c16;
+ void *config_tables;
+ u64 table_size;
char vendor[100] = "unknown";
int i, retval;
@@ -99,7 +121,7 @@ static int __init uefi_init(void)
efi.systab->hdr.revision & 0xffff);
/* Show what we know for posterity */
- c16 = early_memremap(efi.systab->fw_vendor,
+ c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
sizeof(vendor));
if (c16) {
for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
@@ -112,8 +134,14 @@ static int __init uefi_init(void)
efi.systab->hdr.revision >> 16,
efi.systab->hdr.revision & 0xffff, vendor);
- retval = efi_config_init(NULL);
+ table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables;
+ config_tables = early_memremap(efi_to_phys(efi.systab->tables),
+ table_size);
+ retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables,
+ sizeof(efi_config_table_64_t), NULL);
+
+ early_memunmap(config_tables, table_size);
out:
early_memunmap(efi.systab, sizeof(efi_system_table_t));
return retval;
@@ -163,9 +191,7 @@ static __init void reserve_regions(void)
if (is_normal_ram(md))
early_init_dt_add_memory_arch(paddr, size);
- if (is_reserve_region(md) ||
- md->type == EFI_BOOT_SERVICES_CODE ||
- md->type == EFI_BOOT_SERVICES_DATA) {
+ if (is_reserve_region(md)) {
memblock_reserve(paddr, size);
if (uefi_debug)
pr_cont("*");
@@ -178,123 +204,6 @@ static __init void reserve_regions(void)
set_bit(EFI_MEMMAP, &efi.flags);
}
-
-static u64 __init free_one_region(u64 start, u64 end)
-{
- u64 size = end - start;
-
- if (uefi_debug)
- pr_info(" EFI freeing: 0x%012llx-0x%012llx\n", start, end - 1);
-
- free_bootmem_late(start, size);
- return size;
-}
-
-static u64 __init free_region(u64 start, u64 end)
-{
- u64 map_start, map_end, total = 0;
-
- if (end <= start)
- return total;
-
- map_start = (u64)memmap.phys_map;
- map_end = PAGE_ALIGN(map_start + (memmap.map_end - memmap.map));
- map_start &= PAGE_MASK;
-
- if (start < map_end && end > map_start) {
- /* region overlaps UEFI memmap */
- if (start < map_start)
- total += free_one_region(start, map_start);
-
- if (map_end < end)
- total += free_one_region(map_end, end);
- } else
- total += free_one_region(start, end);
-
- return total;
-}
-
-static void __init free_boot_services(void)
-{
- u64 total_freed = 0;
- u64 keep_end, free_start, free_end;
- efi_memory_desc_t *md;
-
- /*
- * If kernel uses larger pages than UEFI, we have to be careful
- * not to inadvertantly free memory we want to keep if there is
- * overlap at the kernel page size alignment. We do not want to
- * free is_reserve_region() memory nor the UEFI memmap itself.
- *
- * The memory map is sorted, so we keep track of the end of
- * any previous region we want to keep, remember any region
- * we want to free and defer freeing it until we encounter
- * the next region we want to keep. This way, before freeing
- * it, we can clip it as needed to avoid freeing memory we
- * want to keep for UEFI.
- */
-
- keep_end = 0;
- free_start = 0;
-
- for_each_efi_memory_desc(&memmap, md) {
- u64 paddr, npages, size;
-
- if (is_reserve_region(md)) {
- /*
- * We don't want to free any memory from this region.
- */
- if (free_start) {
- /* adjust free_end then free region */
- if (free_end > md->phys_addr)
- free_end -= PAGE_SIZE;
- total_freed += free_region(free_start, free_end);
- free_start = 0;
- }
- keep_end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
- continue;
- }
-
- if (md->type != EFI_BOOT_SERVICES_CODE &&
- md->type != EFI_BOOT_SERVICES_DATA) {
- /* no need to free this region */
- continue;
- }
-
- /*
- * We want to free memory from this region.
- */
- paddr = md->phys_addr;
- npages = md->num_pages;
- memrange_efi_to_native(&paddr, &npages);
- size = npages << PAGE_SHIFT;
-
- if (free_start) {
- if (paddr <= free_end)
- free_end = paddr + size;
- else {
- total_freed += free_region(free_start, free_end);
- free_start = paddr;
- free_end = paddr + size;
- }
- } else {
- free_start = paddr;
- free_end = paddr + size;
- }
- if (free_start < keep_end) {
- free_start += PAGE_SIZE;
- if (free_start >= free_end)
- free_start = 0;
- }
- }
- if (free_start)
- total_freed += free_region(free_start, free_end);
-
- if (total_freed)
- pr_info("Freed 0x%llx bytes of EFI boot services memory",
- total_freed);
-}
-
void __init efi_init(void)
{
struct efi_fdt_params params;
@@ -317,159 +226,100 @@ void __init efi_init(void)
return;
reserve_regions();
+ early_memunmap(memmap.map, params.mmap_size);
}
-void __init efi_idmap_init(void)
+static bool __init efi_virtmap_init(void)
{
- if (!efi_enabled(EFI_BOOT))
- return;
-
- /* boot time idmap_pg_dir is incomplete, so fill in missing parts */
- efi_setup_idmap();
- early_memunmap(memmap.map, memmap.map_end - memmap.map);
-}
-
-static int __init remap_region(efi_memory_desc_t *md, void **new)
-{
- u64 paddr, vaddr, npages, size;
-
- paddr = md->phys_addr;
- npages = md->num_pages;
- memrange_efi_to_native(&paddr, &npages);
- size = npages << PAGE_SHIFT;
+ efi_memory_desc_t *md;
- if (is_normal_ram(md))
- vaddr = (__force u64)ioremap_cache(paddr, size);
- else
- vaddr = (__force u64)ioremap(paddr, size);
+ for_each_efi_memory_desc(&memmap, md) {
+ u64 paddr, npages, size;
+ pgprot_t prot;
- if (!vaddr) {
- pr_err("Unable to remap 0x%llx pages @ %p\n",
- npages, (void *)paddr);
- return 0;
- }
+ if (!(md->attribute & EFI_MEMORY_RUNTIME))
+ continue;
+ if (md->virt_addr == 0)
+ return false;
- /* adjust for any rounding when EFI and system pagesize differs */
- md->virt_addr = vaddr + (md->phys_addr - paddr);
+ paddr = md->phys_addr;
+ npages = md->num_pages;
+ memrange_efi_to_native(&paddr, &npages);
+ size = npages << PAGE_SHIFT;
- if (uefi_debug)
- pr_info(" EFI remap 0x%012llx => %p\n",
+ pr_info(" EFI remap 0x%016llx => %p\n",
md->phys_addr, (void *)md->virt_addr);
- memcpy(*new, md, memmap.desc_size);
- *new += memmap.desc_size;
-
- return 1;
+ /*
+ * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
+ * executable, everything else can be mapped with the XN bits
+ * set.
+ */
+ if (!is_normal_ram(md))
+ prot = __pgprot(PROT_DEVICE_nGnRE);
+ else if (md->type == EFI_RUNTIME_SERVICES_CODE)
+ prot = PAGE_KERNEL_EXEC;
+ else
+ prot = PAGE_KERNEL;
+
+ create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot);
+ }
+ return true;
}
/*
- * Switch UEFI from an identity map to a kernel virtual map
+ * Enable the UEFI Runtime Services if all prerequisites are in place, i.e.,
+ * non-early mapping of the UEFI system table and virtual mappings for all
+ * EFI_MEMORY_RUNTIME regions.
*/
-static int __init arm64_enter_virtual_mode(void)
+static int __init arm64_enable_runtime_services(void)
{
- efi_memory_desc_t *md;
- phys_addr_t virtmap_phys;
- void *virtmap, *virt_md;
- efi_status_t status;
u64 mapsize;
- int count = 0;
- unsigned long flags;
if (!efi_enabled(EFI_BOOT)) {
pr_info("EFI services will not be available.\n");
return -1;
}
- mapsize = memmap.map_end - memmap.map;
-
if (efi_runtime_disabled()) {
pr_info("EFI runtime services will be disabled.\n");
return -1;
}
pr_info("Remapping and enabling EFI services.\n");
- /* replace early memmap mapping with permanent mapping */
+
+ mapsize = memmap.map_end - memmap.map;
memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map,
mapsize);
- memmap.map_end = memmap.map + mapsize;
-
- efi.memmap = &memmap;
-
- /* Map the runtime regions */
- virtmap = kmalloc(mapsize, GFP_KERNEL);
- if (!virtmap) {
- pr_err("Failed to allocate EFI virtual memmap\n");
+ if (!memmap.map) {
+ pr_err("Failed to remap EFI memory map\n");
return -1;
}
- virtmap_phys = virt_to_phys(virtmap);
- virt_md = virtmap;
-
- for_each_efi_memory_desc(&memmap, md) {
- if (!(md->attribute & EFI_MEMORY_RUNTIME))
- continue;
- if (!remap_region(md, &virt_md))
- goto err_unmap;
- ++count;
- }
+ memmap.map_end = memmap.map + mapsize;
+ efi.memmap = &memmap;
- efi.systab = (__force void *)efi_lookup_mapped_addr(efi_system_table);
+ efi.systab = (__force void *)ioremap_cache(efi_system_table,
+ sizeof(efi_system_table_t));
if (!efi.systab) {
- /*
- * If we have no virtual mapping for the System Table at this
- * point, the memory map doesn't cover the physical offset where
- * it resides. This means the System Table will be inaccessible
- * to Runtime Services themselves once the virtual mapping is
- * installed.
- */
- pr_err("Failed to remap EFI System Table -- buggy firmware?\n");
- goto err_unmap;
+ pr_err("Failed to remap EFI System Table\n");
+ return -1;
}
set_bit(EFI_SYSTEM_TABLES, &efi.flags);
- local_irq_save(flags);
- cpu_switch_mm(idmap_pg_dir, &init_mm);
-
- /* Call SetVirtualAddressMap with the physical address of the map */
- runtime = efi.systab->runtime;
- efi.set_virtual_address_map = runtime->set_virtual_address_map;
-
- status = efi.set_virtual_address_map(count * memmap.desc_size,
- memmap.desc_size,
- memmap.desc_version,
- (efi_memory_desc_t *)virtmap_phys);
- cpu_set_reserved_ttbr0();
- flush_tlb_all();
- local_irq_restore(flags);
-
- kfree(virtmap);
-
- free_boot_services();
-
- if (status != EFI_SUCCESS) {
- pr_err("Failed to set EFI virtual address map! [%lx]\n",
- status);
+ if (!efi_virtmap_init()) {
+ pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n");
return -1;
}
/* Set up runtime services function pointers */
- runtime = efi.systab->runtime;
efi_native_runtime_setup();
set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
efi.runtime_version = efi.systab->hdr.revision;
return 0;
-
-err_unmap:
- /* unmap all mappings that succeeded: there are 'count' of those */
- for (virt_md = virtmap; count--; virt_md += memmap.desc_size) {
- md = virt_md;
- iounmap((__force void __iomem *)md->virt_addr);
- }
- kfree(virtmap);
- return -1;
}
-early_initcall(arm64_enter_virtual_mode);
+early_initcall(arm64_enable_runtime_services);
static int __init arm64_dmi_init(void)
{
@@ -484,3 +334,23 @@ static int __init arm64_dmi_init(void)
return 0;
}
core_initcall(arm64_dmi_init);
+
+static void efi_set_pgd(struct mm_struct *mm)
+{
+ cpu_switch_mm(mm->pgd, mm);
+ flush_tlb_all();
+ if (icache_is_aivivt())
+ __flush_icache_all();
+}
+
+void efi_virtmap_load(void)
+{
+ preempt_disable();
+ efi_set_pgd(&efi_mm);
+}
+
+void efi_virtmap_unload(void)
+{
+ efi_set_pgd(current->active_mm);
+ preempt_enable();
+}
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index fd4fa374e5d2..cf21bb3bf752 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -269,18 +269,18 @@ ENDPROC(el1_error_invalid)
el1_sync:
kernel_entry 1
mrs x1, esr_el1 // read the syndrome register
- lsr x24, x1, #ESR_EL1_EC_SHIFT // exception class
- cmp x24, #ESR_EL1_EC_DABT_EL1 // data abort in EL1
+ lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
+ cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1
b.eq el1_da
- cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
+ cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
b.eq el1_undef
- cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
+ cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
b.eq el1_sp_pc
- cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
+ cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
b.eq el1_sp_pc
- cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL1
+ cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1
b.eq el1_undef
- cmp x24, #ESR_EL1_EC_BREAKPT_EL1 // debug exception in EL1
+ cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
b.ge el1_dbg
b el1_inv
el1_da:
@@ -318,7 +318,7 @@ el1_dbg:
/*
* Debug exception handling
*/
- cmp x24, #ESR_EL1_EC_BRK64 // if BRK64
+ cmp x24, #ESR_ELx_EC_BRK64 // if BRK64
cinc x24, x24, eq // set bit '0'
tbz x24, #0, el1_inv // EL1 only
mrs x0, far_el1
@@ -375,26 +375,26 @@ el1_preempt:
el0_sync:
kernel_entry 0
mrs x25, esr_el1 // read the syndrome register
- lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
- cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state
+ lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
+ cmp x24, #ESR_ELx_EC_SVC64 // SVC in 64-bit state
b.eq el0_svc
- cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
+ cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
b.eq el0_da
- cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
+ cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
b.eq el0_ia
- cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
+ cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
b.eq el0_fpsimd_acc
- cmp x24, #ESR_EL1_EC_FP_EXC64 // FP/ASIMD exception
+ cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
b.eq el0_fpsimd_exc
- cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
+ cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
b.eq el0_undef
- cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
+ cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
b.eq el0_sp_pc
- cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
+ cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
b.eq el0_sp_pc
- cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
+ cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
b.eq el0_undef
- cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
+ cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
b.ge el0_dbg
b el0_inv
@@ -403,37 +403,37 @@ el0_sync:
el0_sync_compat:
kernel_entry 0, 32
mrs x25, esr_el1 // read the syndrome register
- lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
- cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state
+ lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
+ cmp x24, #ESR_ELx_EC_SVC32 // SVC in 32-bit state
b.eq el0_svc_compat
- cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
+ cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
b.eq el0_da
- cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
+ cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
b.eq el0_ia
- cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
+ cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
b.eq el0_fpsimd_acc
- cmp x24, #ESR_EL1_EC_FP_EXC32 // FP/ASIMD exception
+ cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception
b.eq el0_fpsimd_exc
- cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
+ cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
b.eq el0_undef
- cmp x24, #ESR_EL1_EC_CP15_32 // CP15 MRC/MCR trap
+ cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
b.eq el0_undef
- cmp x24, #ESR_EL1_EC_CP15_64 // CP15 MRRC/MCRR trap
+ cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap
b.eq el0_undef
- cmp x24, #ESR_EL1_EC_CP14_MR // CP14 MRC/MCR trap
+ cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap
b.eq el0_undef
- cmp x24, #ESR_EL1_EC_CP14_LS // CP14 LDC/STC trap
+ cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap
b.eq el0_undef
- cmp x24, #ESR_EL1_EC_CP14_64 // CP14 MRRC/MCRR trap
+ cmp x24, #ESR_ELx_EC_CP14_64 // CP14 MRRC/MCRR trap
b.eq el0_undef
- cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
+ cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
b.ge el0_dbg
b el0_inv
el0_svc_compat:
/*
* AArch32 syscall handling
*/
- adr stbl, compat_sys_call_table // load compat syscall table pointer
+ adrp stbl, compat_sys_call_table // load compat syscall table pointer
uxtw scno, w7 // syscall number in w7 (r7)
mov sc_nr, #__NR_compat_syscalls
b el0_svc_naked
diff --git a/arch/arm64/kernel/sys32.S b/arch/arm64/kernel/entry32.S
index 423a5b3fc2be..9a8f6ae2530e 100644
--- a/arch/arm64/kernel/sys32.S
+++ b/arch/arm64/kernel/entry32.S
@@ -27,26 +27,26 @@
* System call wrappers for the AArch32 compatibility layer.
*/
-compat_sys_sigreturn_wrapper:
+ENTRY(compat_sys_sigreturn_wrapper)
mov x0, sp
mov x27, #0 // prevent syscall restart handling (why)
b compat_sys_sigreturn
ENDPROC(compat_sys_sigreturn_wrapper)
-compat_sys_rt_sigreturn_wrapper:
+ENTRY(compat_sys_rt_sigreturn_wrapper)
mov x0, sp
mov x27, #0 // prevent syscall restart handling (why)
b compat_sys_rt_sigreturn
ENDPROC(compat_sys_rt_sigreturn_wrapper)
-compat_sys_statfs64_wrapper:
+ENTRY(compat_sys_statfs64_wrapper)
mov w3, #84
cmp w1, #88
csel w1, w3, w1, eq
b compat_sys_statfs64
ENDPROC(compat_sys_statfs64_wrapper)
-compat_sys_fstatfs64_wrapper:
+ENTRY(compat_sys_fstatfs64_wrapper)
mov w3, #84
cmp w1, #88
csel w1, w3, w1, eq
@@ -58,33 +58,33 @@ ENDPROC(compat_sys_fstatfs64_wrapper)
* in registers or that take 32-bit parameters which require sign
* extension.
*/
-compat_sys_pread64_wrapper:
+ENTRY(compat_sys_pread64_wrapper)
regs_to_64 x3, x4, x5
b sys_pread64
ENDPROC(compat_sys_pread64_wrapper)
-compat_sys_pwrite64_wrapper:
+ENTRY(compat_sys_pwrite64_wrapper)
regs_to_64 x3, x4, x5
b sys_pwrite64
ENDPROC(compat_sys_pwrite64_wrapper)
-compat_sys_truncate64_wrapper:
+ENTRY(compat_sys_truncate64_wrapper)
regs_to_64 x1, x2, x3
b sys_truncate
ENDPROC(compat_sys_truncate64_wrapper)
-compat_sys_ftruncate64_wrapper:
+ENTRY(compat_sys_ftruncate64_wrapper)
regs_to_64 x1, x2, x3
b sys_ftruncate
ENDPROC(compat_sys_ftruncate64_wrapper)
-compat_sys_readahead_wrapper:
+ENTRY(compat_sys_readahead_wrapper)
regs_to_64 x1, x2, x3
mov w2, w4
b sys_readahead
ENDPROC(compat_sys_readahead_wrapper)
-compat_sys_fadvise64_64_wrapper:
+ENTRY(compat_sys_fadvise64_64_wrapper)
mov w6, w1
regs_to_64 x1, x2, x3
regs_to_64 x2, x4, x5
@@ -92,24 +92,14 @@ compat_sys_fadvise64_64_wrapper:
b sys_fadvise64_64
ENDPROC(compat_sys_fadvise64_64_wrapper)
-compat_sys_sync_file_range2_wrapper:
+ENTRY(compat_sys_sync_file_range2_wrapper)
regs_to_64 x2, x2, x3
regs_to_64 x3, x4, x5
b sys_sync_file_range2
ENDPROC(compat_sys_sync_file_range2_wrapper)
-compat_sys_fallocate_wrapper:
+ENTRY(compat_sys_fallocate_wrapper)
regs_to_64 x2, x2, x3
regs_to_64 x3, x4, x5
b sys_fallocate
ENDPROC(compat_sys_fallocate_wrapper)
-
-#undef __SYSCALL
-#define __SYSCALL(x, y) .quad y // x
-
-/*
- * The system calls table must be 4KB aligned.
- */
- .align 12
-ENTRY(compat_sys_call_table)
-#include <asm/unistd32.h>
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index df1cf15377b4..98bbe06e469c 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -894,7 +894,7 @@ static struct notifier_block hw_breakpoint_reset_nb = {
.notifier_call = hw_breakpoint_reset_notify,
};
-#ifdef CONFIG_ARM64_CPU_SUSPEND
+#ifdef CONFIG_CPU_PM
extern void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *));
#else
static inline void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 7e9327a0986d..27d4864577e5 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -17,14 +17,19 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/bitops.h>
+#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/smp.h>
+#include <linux/spinlock.h>
#include <linux/stop_machine.h>
+#include <linux/types.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/debug-monitors.h>
+#include <asm/fixmap.h>
#include <asm/insn.h>
#define AARCH64_INSN_SF_BIT BIT(31)
@@ -72,6 +77,29 @@ bool __kprobes aarch64_insn_is_nop(u32 insn)
}
}
+static DEFINE_SPINLOCK(patch_lock);
+
+static void __kprobes *patch_map(void *addr, int fixmap)
+{
+ unsigned long uintaddr = (uintptr_t) addr;
+ bool module = !core_kernel_text(uintaddr);
+ struct page *page;
+
+ if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
+ page = vmalloc_to_page(addr);
+ else
+ page = virt_to_page(addr);
+
+ BUG_ON(!page);
+ set_fixmap(fixmap, page_to_phys(page));
+
+ return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
+}
+
+static void __kprobes patch_unmap(int fixmap)
+{
+ clear_fixmap(fixmap);
+}
/*
* In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
* little-endian.
@@ -88,10 +116,27 @@ int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
return ret;
}
+static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
+{
+ void *waddr = addr;
+ unsigned long flags = 0;
+ int ret;
+
+ spin_lock_irqsave(&patch_lock, flags);
+ waddr = patch_map(addr, FIX_TEXT_POKE0);
+
+ ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
+
+ patch_unmap(FIX_TEXT_POKE0);
+ spin_unlock_irqrestore(&patch_lock, flags);
+
+ return ret;
+}
+
int __kprobes aarch64_insn_write(void *addr, u32 insn)
{
insn = cpu_to_le32(insn);
- return probe_kernel_write(addr, &insn, AARCH64_INSN_SIZE);
+ return __aarch64_insn_write(addr, insn);
}
static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 9b6f71db2709..67bf4107f6ef 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -35,8 +35,8 @@
void *module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
- __builtin_return_address(0));
+ GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
+ NUMA_NO_NODE, __builtin_return_address(0));
}
enum aarch64_reloc_op {
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index ce5836c14ec1..6f93c24ca801 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -46,25 +46,3 @@ int pcibios_add_device(struct pci_dev *dev)
return 0;
}
-
-
-#ifdef CONFIG_PCI_DOMAINS_GENERIC
-static bool dt_domain_found = false;
-
-void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
-{
- int domain = of_get_pci_domain_nr(parent->of_node);
-
- if (domain >= 0) {
- dt_domain_found = true;
- } else if (dt_domain_found == true) {
- dev_err(parent, "Node %s is missing \"linux,pci-domain\" property in DT\n",
- parent->of_node->full_name);
- return;
- } else {
- domain = pci_get_new_domain_nr();
- }
-
- bus->domain_nr = domain;
-}
-#endif
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index f1dbca7d5c96..3425f311c49e 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -540,8 +540,6 @@ const struct cpu_operations cpu_psci_ops = {
.name = "psci",
#ifdef CONFIG_CPU_IDLE
.cpu_init_idle = cpu_psci_cpu_init_idle,
-#endif
-#ifdef CONFIG_ARM64_CPU_SUSPEND
.cpu_suspend = cpu_psci_cpu_suspend,
#endif
#ifdef CONFIG_SMP
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 20fe2932ad0c..e8420f635bd4 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -40,6 +40,7 @@
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/memblock.h>
+#include <linux/of_iommu.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <linux/efi.h>
@@ -322,25 +323,6 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name());
}
-/*
- * Limit the memory size that was specified via FDT.
- */
-static int __init early_mem(char *p)
-{
- phys_addr_t limit;
-
- if (!p)
- return 1;
-
- limit = memparse(p, &p) & PAGE_MASK;
- pr_notice("Memory limited to %lldMB\n", limit >> 20);
-
- memblock_enforce_memory_limit(limit);
-
- return 0;
-}
-early_param("mem", early_mem);
-
static void __init request_standard_resources(void)
{
struct memblock_region *region;
@@ -401,7 +383,6 @@ void __init setup_arch(char **cmdline_p)
paging_init();
request_standard_resources();
- efi_idmap_init();
early_ioremap_reset();
unflatten_device_tree();
@@ -425,6 +406,7 @@ void __init setup_arch(char **cmdline_p)
static int __init arm64_device_init(void)
{
+ of_iommu_init();
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
return 0;
}
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 6fa792137eda..660ccf9f7524 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -131,7 +131,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
struct rt_sigframe __user *frame;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
/*
* Since we stacked the signal on a 128-bit boundary, then 'sp' should
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 5a1ba6e80d4e..c20a300e2213 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -347,7 +347,7 @@ asmlinkage int compat_sys_sigreturn(struct pt_regs *regs)
struct compat_sigframe __user *frame;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
/*
* Since we stacked the signal on a 64-bit boundary,
@@ -381,7 +381,7 @@ asmlinkage int compat_sys_rt_sigreturn(struct pt_regs *regs)
struct compat_rt_sigframe __user *frame;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
/*
* Since we stacked the signal on a 64-bit boundary,
@@ -440,7 +440,7 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
{
compat_ulong_t handler = ptr_to_compat(ka->sa.sa_handler);
compat_ulong_t retcode;
- compat_ulong_t spsr = regs->pstate & ~PSR_f;
+ compat_ulong_t spsr = regs->pstate & ~(PSR_f | COMPAT_PSR_E_BIT);
int thumb;
/* Check if the handler is written for ARM or Thumb */
@@ -454,6 +454,9 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
/* The IT state must be cleared for both ARM and Thumb-2 */
spsr &= ~COMPAT_PSR_IT_MASK;
+ /* Restore the original endianness */
+ spsr |= COMPAT_PSR_ENDSTATE;
+
if (ka->sa.sa_flags & SA_RESTORER) {
retcode = ptr_to_compat(ka->sa.sa_restorer);
} else {
@@ -501,7 +504,7 @@ static int compat_setup_sigframe(struct compat_sigframe __user *sf,
__put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err);
/* set the compat FSR WnR */
- __put_user_error(!!(current->thread.fault_code & ESR_EL1_WRITE) <<
+ __put_user_error(!!(current->thread.fault_code & ESR_ELx_WNR) <<
FSR_WRITE_SHIFT, &sf->uc.uc_mcontext.error_code, err);
__put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
__put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 7ae6ee085261..328b8ce4b007 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -65,7 +65,6 @@ struct secondary_data secondary_data;
enum ipi_msg_type {
IPI_RESCHEDULE,
IPI_CALL_FUNC,
- IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP,
IPI_TIMER,
IPI_IRQ_WORK,
@@ -483,7 +482,6 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
#define S(x,s) [x] = s
S(IPI_RESCHEDULE, "Rescheduling interrupts"),
S(IPI_CALL_FUNC, "Function call interrupts"),
- S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
S(IPI_CPU_STOP, "CPU stop interrupts"),
S(IPI_TIMER, "Timer broadcast interrupts"),
S(IPI_IRQ_WORK, "IRQ work interrupts"),
@@ -527,7 +525,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
void arch_send_call_function_single_ipi(int cpu)
{
- smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+ smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
}
#ifdef CONFIG_IRQ_WORK
@@ -585,12 +583,6 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
irq_exit();
break;
- case IPI_CALL_FUNC_SINGLE:
- irq_enter();
- generic_smp_call_function_single_interrupt();
- irq_exit();
- break;
-
case IPI_CPU_STOP:
irq_enter();
ipi_cpu_stop(cpu);
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 2d6b6065fe7f..d7daf45ae7a2 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -1,7 +1,6 @@
#include <linux/percpu.h>
#include <linux/slab.h>
#include <asm/cacheflush.h>
-#include <asm/cpu_ops.h>
#include <asm/debug-monitors.h>
#include <asm/pgtable.h>
#include <asm/memory.h>
@@ -51,26 +50,6 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
hw_breakpoint_restore = hw_bp_restore;
}
-/**
- * cpu_suspend() - function to enter a low-power state
- * @arg: argument to pass to CPU suspend operations
- *
- * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
- * operations back-end error code otherwise.
- */
-int cpu_suspend(unsigned long arg)
-{
- int cpu = smp_processor_id();
-
- /*
- * If cpu_ops have not been registered or suspend
- * has not been initialized, cpu_suspend call fails early.
- */
- if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend)
- return -EOPNOTSUPP;
- return cpu_ops[cpu]->cpu_suspend(arg);
-}
-
/*
* __cpu_suspend
*
diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c
index 3fa98ff14f0e..75151aaf1a52 100644
--- a/arch/arm64/kernel/sys.c
+++ b/arch/arm64/kernel/sys.c
@@ -39,10 +39,9 @@ asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
/*
* Wrappers to pass the pt_regs argument.
*/
+asmlinkage long sys_rt_sigreturn_wrapper(void);
#define sys_rt_sigreturn sys_rt_sigreturn_wrapper
-#include <asm/syscalls.h>
-
#undef __SYSCALL
#define __SYSCALL(nr, sym) [nr] = sym,
@@ -50,7 +49,7 @@ asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
* The sys_call_table array must be 4K aligned to be accessible from
* kernel/entry.S.
*/
-void *sys_call_table[__NR_syscalls] __aligned(4096) = {
+void * const sys_call_table[__NR_syscalls] __aligned(4096) = {
[0 ... __NR_syscalls - 1] = sys_ni_syscall,
#include <asm/unistd.h>
};
diff --git a/arch/arm64/kernel/sys32.c b/arch/arm64/kernel/sys32.c
new file mode 100644
index 000000000000..2d5ab3c90b82
--- /dev/null
+++ b/arch/arm64/kernel/sys32.c
@@ -0,0 +1,51 @@
+/*
+ * arch/arm64/kernel/sys32.c
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ *
+ * This program is free software(void); you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http(void);//www.gnu.org/licenses/>.
+ */
+
+/*
+ * Needed to avoid conflicting __NR_* macros between uapi/asm/unistd.h and
+ * asm/unistd32.h.
+ */
+#define __COMPAT_SYSCALL_NR
+
+#include <linux/compiler.h>
+#include <linux/syscalls.h>
+
+asmlinkage long compat_sys_sigreturn_wrapper(void);
+asmlinkage long compat_sys_rt_sigreturn_wrapper(void);
+asmlinkage long compat_sys_statfs64_wrapper(void);
+asmlinkage long compat_sys_fstatfs64_wrapper(void);
+asmlinkage long compat_sys_pread64_wrapper(void);
+asmlinkage long compat_sys_pwrite64_wrapper(void);
+asmlinkage long compat_sys_truncate64_wrapper(void);
+asmlinkage long compat_sys_ftruncate64_wrapper(void);
+asmlinkage long compat_sys_readahead_wrapper(void);
+asmlinkage long compat_sys_fadvise64_64_wrapper(void);
+asmlinkage long compat_sys_sync_file_range2_wrapper(void);
+asmlinkage long compat_sys_fallocate_wrapper(void);
+
+#undef __SYSCALL
+#define __SYSCALL(nr, sym) [nr] = sym,
+
+/*
+ * The sys_call_table array must be 4K aligned to be accessible from
+ * kernel/entry.S.
+ */
+void * const compat_sys_call_table[__NR_compat_syscalls] __aligned(4096) = {
+ [0 ... __NR_compat_syscalls - 1] = sys_ni_syscall,
+#include <asm/unistd32.h>
+};
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 0a801e3743d5..1ef2940df13c 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -33,6 +33,7 @@
#include <asm/atomic.h>
#include <asm/debug-monitors.h>
+#include <asm/esr.h>
#include <asm/traps.h>
#include <asm/stacktrace.h>
#include <asm/exception.h>
@@ -373,6 +374,51 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
return sys_ni_syscall();
}
+static const char *esr_class_str[] = {
+ [0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC",
+ [ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized",
+ [ESR_ELx_EC_WFx] = "WFI/WFE",
+ [ESR_ELx_EC_CP15_32] = "CP15 MCR/MRC",
+ [ESR_ELx_EC_CP15_64] = "CP15 MCRR/MRRC",
+ [ESR_ELx_EC_CP14_MR] = "CP14 MCR/MRC",
+ [ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC",
+ [ESR_ELx_EC_FP_ASIMD] = "ASIMD",
+ [ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS",
+ [ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC",
+ [ESR_ELx_EC_ILL] = "PSTATE.IL",
+ [ESR_ELx_EC_SVC32] = "SVC (AArch32)",
+ [ESR_ELx_EC_HVC32] = "HVC (AArch32)",
+ [ESR_ELx_EC_SMC32] = "SMC (AArch32)",
+ [ESR_ELx_EC_SVC64] = "SVC (AArch64)",
+ [ESR_ELx_EC_HVC64] = "HVC (AArch64)",
+ [ESR_ELx_EC_SMC64] = "SMC (AArch64)",
+ [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)",
+ [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF",
+ [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)",
+ [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)",
+ [ESR_ELx_EC_PC_ALIGN] = "PC Alignment",
+ [ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)",
+ [ESR_ELx_EC_DABT_CUR] = "DABT (current EL)",
+ [ESR_ELx_EC_SP_ALIGN] = "SP Alignment",
+ [ESR_ELx_EC_FP_EXC32] = "FP (AArch32)",
+ [ESR_ELx_EC_FP_EXC64] = "FP (AArch64)",
+ [ESR_ELx_EC_SERROR] = "SError",
+ [ESR_ELx_EC_BREAKPT_LOW] = "Breakpoint (lower EL)",
+ [ESR_ELx_EC_BREAKPT_CUR] = "Breakpoint (current EL)",
+ [ESR_ELx_EC_SOFTSTP_LOW] = "Software Step (lower EL)",
+ [ESR_ELx_EC_SOFTSTP_CUR] = "Software Step (current EL)",
+ [ESR_ELx_EC_WATCHPT_LOW] = "Watchpoint (lower EL)",
+ [ESR_ELx_EC_WATCHPT_CUR] = "Watchpoint (current EL)",
+ [ESR_ELx_EC_BKPT32] = "BKPT (AArch32)",
+ [ESR_ELx_EC_VECTOR32] = "Vector catch (AArch32)",
+ [ESR_ELx_EC_BRK64] = "BRK (AArch64)",
+};
+
+const char *esr_get_class_string(u32 esr)
+{
+ return esr_class_str[esr >> ESR_ELx_EC_SHIFT];
+}
+
/*
* bad_mode handles the impossible case in the exception vector.
*/
@@ -382,8 +428,8 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
void __user *pc = (void __user *)instruction_pointer(regs);
console_verbose();
- pr_crit("Bad mode in %s handler detected, code 0x%08x\n",
- handler[reason], esr);
+ pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n",
+ handler[reason], esr, esr_get_class_string(esr));
__show_regs(regs);
info.si_signo = SIGILL;
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 9965ec87cbec..5d9d2dca530d 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -8,6 +8,7 @@
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/page.h>
+#include <asm/pgtable.h>
#include "image.h"
@@ -49,6 +50,14 @@ PECOFF_FILE_ALIGNMENT = 0x200;
#define PECOFF_EDATA_PADDING
#endif
+#ifdef CONFIG_DEBUG_ALIGN_RODATA
+#define ALIGN_DEBUG_RO . = ALIGN(1<<SECTION_SHIFT);
+#define ALIGN_DEBUG_RO_MIN(min) ALIGN_DEBUG_RO
+#else
+#define ALIGN_DEBUG_RO
+#define ALIGN_DEBUG_RO_MIN(min) . = ALIGN(min);
+#endif
+
SECTIONS
{
/*
@@ -71,6 +80,7 @@ SECTIONS
_text = .;
HEAD_TEXT
}
+ ALIGN_DEBUG_RO
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
__exception_text_start = .;
@@ -87,19 +97,22 @@ SECTIONS
*(.got) /* Global offset table */
}
+ ALIGN_DEBUG_RO
RO_DATA(PAGE_SIZE)
EXCEPTION_TABLE(8)
NOTES
+ ALIGN_DEBUG_RO
_etext = .; /* End of text and rodata section */
- . = ALIGN(PAGE_SIZE);
+ ALIGN_DEBUG_RO_MIN(PAGE_SIZE)
__init_begin = .;
INIT_TEXT_SECTION(8)
.exit.text : {
ARM_EXIT_KEEP(EXIT_TEXT)
}
- . = ALIGN(16);
+
+ ALIGN_DEBUG_RO_MIN(16)
.init.data : {
INIT_DATA
INIT_SETUP(16)
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 8ba85e9ea388..f5590c81d95f 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -22,10 +22,13 @@ config KVM
select PREEMPT_NOTIFIERS
select ANON_INODES
select HAVE_KVM_CPU_RELAX_INTERCEPT
+ select HAVE_KVM_ARCH_TLB_FLUSH_ALL
select KVM_MMIO
select KVM_ARM_HOST
select KVM_ARM_VGIC
select KVM_ARM_TIMER
+ select KVM_GENERIC_DIRTYLOG_READ_PROTECT
+ select SRCU
---help---
Support hosting virtualized guest machines.
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 32a096174b94..4e6e09ee4033 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -21,7 +21,9 @@ kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2-emul.o
kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o
kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3.o
+kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3-emul.o
kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v3-switch.o
kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm64/kvm/emulate.c b/arch/arm64/kvm/emulate.c
index 124418d17049..f87d8fbaa48d 100644
--- a/arch/arm64/kvm/emulate.c
+++ b/arch/arm64/kvm/emulate.c
@@ -22,6 +22,7 @@
*/
#include <linux/kvm_host.h>
+#include <asm/esr.h>
#include <asm/kvm_emulate.h>
/*
@@ -55,8 +56,8 @@ static int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
{
u32 esr = kvm_vcpu_get_hsr(vcpu);
- if (esr & ESR_EL2_CV)
- return (esr & ESR_EL2_COND) >> ESR_EL2_COND_SHIFT;
+ if (esr & ESR_ELx_CV)
+ return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
return -1;
}
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 34b8bd0711e9..524fa25671fc 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -21,17 +21,25 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
-#include <asm/kvm_emulate.h>
+
+#include <asm/esr.h>
#include <asm/kvm_coproc.h>
+#include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_psci.h>
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int ret;
+ trace_kvm_hvc_arm64(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
+ kvm_vcpu_hvc_get_imm(vcpu));
+
ret = kvm_psci_call(vcpu);
if (ret < 0) {
kvm_inject_undefined(vcpu);
@@ -61,10 +69,13 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
*/
static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
- if (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EC_WFI_ISS_WFE)
+ if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
+ trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
kvm_vcpu_on_spin(vcpu);
- else
+ } else {
+ trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
kvm_vcpu_block(vcpu);
+ }
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
@@ -72,29 +83,30 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
}
static exit_handle_fn arm_exit_handlers[] = {
- [ESR_EL2_EC_WFI] = kvm_handle_wfx,
- [ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32,
- [ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64,
- [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_32,
- [ESR_EL2_EC_CP14_LS] = kvm_handle_cp14_load_store,
- [ESR_EL2_EC_CP14_64] = kvm_handle_cp14_64,
- [ESR_EL2_EC_HVC32] = handle_hvc,
- [ESR_EL2_EC_SMC32] = handle_smc,
- [ESR_EL2_EC_HVC64] = handle_hvc,
- [ESR_EL2_EC_SMC64] = handle_smc,
- [ESR_EL2_EC_SYS64] = kvm_handle_sys_reg,
- [ESR_EL2_EC_IABT] = kvm_handle_guest_abort,
- [ESR_EL2_EC_DABT] = kvm_handle_guest_abort,
+ [ESR_ELx_EC_WFx] = kvm_handle_wfx,
+ [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
+ [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64,
+ [ESR_ELx_EC_CP14_MR] = kvm_handle_cp14_32,
+ [ESR_ELx_EC_CP14_LS] = kvm_handle_cp14_load_store,
+ [ESR_ELx_EC_CP14_64] = kvm_handle_cp14_64,
+ [ESR_ELx_EC_HVC32] = handle_hvc,
+ [ESR_ELx_EC_SMC32] = handle_smc,
+ [ESR_ELx_EC_HVC64] = handle_hvc,
+ [ESR_ELx_EC_SMC64] = handle_smc,
+ [ESR_ELx_EC_SYS64] = kvm_handle_sys_reg,
+ [ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort,
+ [ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort,
};
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
{
- u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
+ u32 hsr = kvm_vcpu_get_hsr(vcpu);
+ u8 hsr_ec = hsr >> ESR_ELx_EC_SHIFT;
if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
!arm_exit_handlers[hsr_ec]) {
- kvm_err("Unknown exception class: hsr: %#08x\n",
- (unsigned int)kvm_vcpu_get_hsr(vcpu));
+ kvm_err("Unknown exception class: hsr: %#08x -- %s\n",
+ hsr, esr_get_class_string(hsr));
BUG();
}
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index fbe909fb0a1a..5befd010e232 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -17,15 +17,16 @@
#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/memory.h>
#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
#include <asm/debug-monitors.h>
+#include <asm/esr.h>
#include <asm/fpsimdmacros.h>
#include <asm/kvm.h>
-#include <asm/kvm_asm.h>
#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
+#include <asm/memory.h>
#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
@@ -1014,6 +1015,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
* Instead, we invalidate Stage-2 for this IPA, and the
* whole of Stage-1. Weep...
*/
+ lsr x1, x1, #12
tlbi ipas2e1is, x1
/*
* We have to ensure completion of the invalidation at Stage-2,
@@ -1030,6 +1032,28 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
ret
ENDPROC(__kvm_tlb_flush_vmid_ipa)
+/**
+ * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs
+ * @struct kvm *kvm - pointer to kvm structure
+ *
+ * Invalidates all Stage 1 and 2 TLB entries for current VMID.
+ */
+ENTRY(__kvm_tlb_flush_vmid)
+ dsb ishst
+
+ kern_hyp_va x0
+ ldr x2, [x0, #KVM_VTTBR]
+ msr vttbr_el2, x2
+ isb
+
+ tlbi vmalls12e1is
+ dsb ish
+ isb
+
+ msr vttbr_el2, xzr
+ ret
+ENDPROC(__kvm_tlb_flush_vmid)
+
ENTRY(__kvm_flush_vm_context)
dsb ishst
tlbi alle1is
@@ -1140,9 +1164,9 @@ el1_sync: // Guest trapped into EL2
push x2, x3
mrs x1, esr_el2
- lsr x2, x1, #ESR_EL2_EC_SHIFT
+ lsr x2, x1, #ESR_ELx_EC_SHIFT
- cmp x2, #ESR_EL2_EC_HVC64
+ cmp x2, #ESR_ELx_EC_HVC64
b.ne el1_trap
mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
@@ -1177,13 +1201,13 @@ el1_trap:
* x1: ESR
* x2: ESR_EC
*/
- cmp x2, #ESR_EL2_EC_DABT
- mov x0, #ESR_EL2_EC_IABT
+ cmp x2, #ESR_ELx_EC_DABT_LOW
+ mov x0, #ESR_ELx_EC_IABT_LOW
ccmp x2, x0, #4, ne
b.ne 1f // Not an abort we care about
/* This is an abort. Check for permission fault */
- and x2, x1, #ESR_EL2_FSC_TYPE
+ and x2, x1, #ESR_ELx_FSC_TYPE
cmp x2, #FSC_PERM
b.ne 1f // Not a permission fault
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 81a02a8762b0..f02530e726f6 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -118,27 +118,27 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
* instruction set. Report an external synchronous abort.
*/
if (kvm_vcpu_trap_il_is32bit(vcpu))
- esr |= ESR_EL1_IL;
+ esr |= ESR_ELx_IL;
/*
* Here, the guest runs in AArch64 mode when in EL1. If we get
* an AArch32 fault, it means we managed to trap an EL0 fault.
*/
if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
- esr |= (ESR_EL1_EC_IABT_EL0 << ESR_EL1_EC_SHIFT);
+ esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT);
else
- esr |= (ESR_EL1_EC_IABT_EL1 << ESR_EL1_EC_SHIFT);
+ esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
if (!is_iabt)
- esr |= ESR_EL1_EC_DABT_EL0;
+ esr |= ESR_ELx_EC_DABT_LOW;
- vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_EL2_EC_xABT_xFSR_EXTABT;
+ vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_ELx_FSC_EXTABT;
}
static void inject_undef64(struct kvm_vcpu *vcpu)
{
unsigned long cpsr = *vcpu_cpsr(vcpu);
- u32 esr = (ESR_EL1_EC_UNKNOWN << ESR_EL1_EC_SHIFT);
+ u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
*vcpu_spsr(vcpu) = cpsr;
*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
@@ -151,7 +151,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
* set.
*/
if (kvm_vcpu_trap_il_is32bit(vcpu))
- esr |= ESR_EL1_IL;
+ esr |= ESR_ELx_IL;
vcpu_sys_reg(vcpu, ESR_EL1) = esr;
}
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 70a7816535cd..0b4326578985 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -90,7 +90,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
if (!cpu_has_32bit_el1())
return -EINVAL;
cpu_reset = &default_regs_reset32;
- vcpu->arch.hcr_el2 &= ~HCR_RW;
} else {
cpu_reset = &default_regs_reset;
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 3d7c2df89946..c370b4014799 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -20,17 +20,20 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/mm.h>
#include <linux/kvm_host.h>
+#include <linux/mm.h>
#include <linux/uaccess.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_host.h>
-#include <asm/kvm_emulate.h>
-#include <asm/kvm_coproc.h>
-#include <asm/kvm_mmu.h>
+
#include <asm/cacheflush.h>
#include <asm/cputype.h>
#include <asm/debug-monitors.h>
+#include <asm/esr.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_coproc.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_host.h>
+#include <asm/kvm_mmu.h>
+
#include <trace/events/kvm.h>
#include "sys_regs.h"
@@ -69,68 +72,31 @@ static u32 get_ccsidr(u32 csselr)
return ccsidr;
}
-static void do_dc_cisw(u32 val)
-{
- asm volatile("dc cisw, %x0" : : "r" (val));
- dsb(ish);
-}
-
-static void do_dc_csw(u32 val)
-{
- asm volatile("dc csw, %x0" : : "r" (val));
- dsb(ish);
-}
-
-/* See note at ARM ARM B1.14.4 */
+/*
+ * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
+ */
static bool access_dcsw(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- unsigned long val;
- int cpu;
-
if (!p->is_write)
return read_from_write_only(vcpu, p);
- cpu = get_cpu();
-
- cpumask_setall(&vcpu->arch.require_dcache_flush);
- cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
-
- /* If we were already preempted, take the long way around */
- if (cpu != vcpu->arch.last_pcpu) {
- flush_cache_all();
- goto done;
- }
-
- val = *vcpu_reg(vcpu, p->Rt);
-
- switch (p->CRm) {
- case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
- case 14: /* DCCISW */
- do_dc_cisw(val);
- break;
-
- case 10: /* DCCSW */
- do_dc_csw(val);
- break;
- }
-
-done:
- put_cpu();
-
+ kvm_set_way_flush(vcpu);
return true;
}
/*
* Generic accessor for VM registers. Only called as long as HCR_TVM
- * is set.
+ * is set. If the guest enables the MMU, we stop trapping the VM
+ * sys_regs and leave it in complete control of the caches.
*/
static bool access_vm_reg(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
unsigned long val;
+ bool was_enabled = vcpu_has_cache_enabled(vcpu);
BUG_ON(!p->is_write);
@@ -143,24 +109,27 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
}
+ kvm_toggle_cache(vcpu, was_enabled);
return true;
}
/*
- * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set. If the
- * guest enables the MMU, we stop trapping the VM sys_regs and leave
- * it in complete control of the caches.
+ * Trap handler for the GICv3 SGI generation system register.
+ * Forward the request to the VGIC emulation.
+ * The cp15_64 code makes sure this automatically works
+ * for both AArch64 and AArch32 accesses.
*/
-static bool access_sctlr(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *p,
- const struct sys_reg_desc *r)
+static bool access_gic_sgi(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
{
- access_vm_reg(vcpu, p, r);
+ u64 val;
- if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
- vcpu->arch.hcr_el2 &= ~HCR_TVM;
- stage2_flush_vm(vcpu->kvm);
- }
+ if (!p->is_write)
+ return read_from_write_only(vcpu, p);
+
+ val = *vcpu_reg(vcpu, p->Rt);
+ vgic_v3_dispatch_sgi(vcpu, val);
return true;
}
@@ -252,10 +221,19 @@ static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
+ u64 mpidr;
+
/*
- * Simply map the vcpu_id into the Aff0 field of the MPIDR.
+ * Map the vcpu_id into the first three affinity level fields of
+ * the MPIDR. We limit the number of VCPUs in level 0 due to a
+ * limitation to 16 CPUs in that level in the ICC_SGIxR registers
+ * of the GICv3 to be able to address each CPU directly when
+ * sending IPIs.
*/
- vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff);
+ mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
+ mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
+ mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
+ vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr;
}
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
@@ -377,7 +355,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
NULL, reset_mpidr, MPIDR_EL1 },
/* SCTLR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
- access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 },
+ access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
/* CPACR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
NULL, reset_val, CPACR_EL1, 0 },
@@ -425,6 +403,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
NULL, reset_val, VBAR_EL1, 0 },
+ /* ICC_SGI1R_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1011), Op2(0b101),
+ access_gic_sgi },
/* ICC_SRE_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
trap_raz_wi },
@@ -657,7 +638,9 @@ static const struct sys_reg_desc cp14_64_regs[] = {
* register).
*/
static const struct sys_reg_desc cp15_regs[] = {
- { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR },
+ { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
+
+ { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
@@ -704,6 +687,7 @@ static const struct sys_reg_desc cp15_regs[] = {
static const struct sys_reg_desc cp15_64_regs[] = {
{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
+ { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
};
@@ -815,12 +799,12 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu,
int cp;
switch(hsr_ec) {
- case ESR_EL2_EC_CP15_32:
- case ESR_EL2_EC_CP15_64:
+ case ESR_ELx_EC_CP15_32:
+ case ESR_ELx_EC_CP15_64:
cp = 15;
break;
- case ESR_EL2_EC_CP14_MR:
- case ESR_EL2_EC_CP14_64:
+ case ESR_ELx_EC_CP14_MR:
+ case ESR_ELx_EC_CP14_64:
cp = 14;
break;
default:
diff --git a/arch/arm64/kvm/trace.h b/arch/arm64/kvm/trace.h
new file mode 100644
index 000000000000..157416e963f2
--- /dev/null
+++ b/arch/arm64/kvm/trace.h
@@ -0,0 +1,55 @@
+#if !defined(_TRACE_ARM64_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ARM64_KVM_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm
+
+TRACE_EVENT(kvm_wfx_arm64,
+ TP_PROTO(unsigned long vcpu_pc, bool is_wfe),
+ TP_ARGS(vcpu_pc, is_wfe),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, vcpu_pc)
+ __field(bool, is_wfe)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_pc = vcpu_pc;
+ __entry->is_wfe = is_wfe;
+ ),
+
+ TP_printk("guest executed wf%c at: 0x%08lx",
+ __entry->is_wfe ? 'e' : 'i', __entry->vcpu_pc)
+);
+
+TRACE_EVENT(kvm_hvc_arm64,
+ TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm),
+ TP_ARGS(vcpu_pc, r0, imm),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, vcpu_pc)
+ __field(unsigned long, r0)
+ __field(unsigned long, imm)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_pc = vcpu_pc;
+ __entry->r0 = r0;
+ __entry->imm = imm;
+ ),
+
+ TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx)",
+ __entry->vcpu_pc, __entry->r0, __entry->imm)
+);
+
+#endif /* _TRACE_ARM64_KVM_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/arch/arm64/kvm/vgic-v3-switch.S b/arch/arm64/kvm/vgic-v3-switch.S
index d16046999e06..617a012a0107 100644
--- a/arch/arm64/kvm/vgic-v3-switch.S
+++ b/arch/arm64/kvm/vgic-v3-switch.S
@@ -148,17 +148,18 @@
* x0: Register pointing to VCPU struct
*/
.macro restore_vgic_v3_state
- // Disable SRE_EL1 access. Necessary, otherwise
- // ICH_VMCR_EL2.VFIQEn becomes one, and FIQ happens...
- msr_s ICC_SRE_EL1, xzr
- isb
-
// Compute the address of struct vgic_cpu
add x3, x0, #VCPU_VGIC_CPU
// Restore all interesting registers
ldr w4, [x3, #VGIC_V3_CPU_HCR]
ldr w5, [x3, #VGIC_V3_CPU_VMCR]
+ ldr w25, [x3, #VGIC_V3_CPU_SRE]
+
+ msr_s ICC_SRE_EL1, x25
+
+ // make sure SRE is valid before writing the other registers
+ isb
msr_s ICH_HCR_EL2, x4
msr_s ICH_VMCR_EL2, x5
@@ -244,9 +245,12 @@
dsb sy
// Prevent the guest from touching the GIC system registers
+ // if SRE isn't enabled for GICv3 emulation
+ cbnz x25, 1f
mrs_s x5, ICC_SRE_EL2
and x5, x5, #~ICC_SRE_EL2_ENABLE
msr_s ICC_SRE_EL2, x5
+1:
.endm
ENTRY(__save_vgic_v3_state)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index d92094203913..0a24b9b8c698 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -134,16 +134,17 @@ static void __dma_free_coherent(struct device *dev, size_t size,
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
}
-static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+static void *__dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
{
struct page *page;
void *ptr, *coherent_ptr;
+ bool coherent = is_device_dma_coherent(dev);
size = PAGE_ALIGN(size);
- if (!(flags & __GFP_WAIT)) {
+ if (!coherent && !(flags & __GFP_WAIT)) {
struct page *page = NULL;
void *addr = __alloc_from_pool(size, &page);
@@ -151,13 +152,16 @@ static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
*dma_handle = phys_to_dma(dev, page_to_phys(page));
return addr;
-
}
ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
if (!ptr)
goto no_mem;
+ /* no need for non-cacheable mapping if coherent */
+ if (coherent)
+ return ptr;
+
/* remove any dirty cache lines on the kernel alias */
__dma_flush_range(ptr, ptr + size);
@@ -179,15 +183,17 @@ no_mem:
return NULL;
}
-static void __dma_free_noncoherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+static void __dma_free(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
- if (__free_from_pool(vaddr, size))
- return;
- vunmap(vaddr);
+ if (!is_device_dma_coherent(dev)) {
+ if (__free_from_pool(vaddr, size))
+ return;
+ vunmap(vaddr);
+ }
__dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
}
@@ -199,7 +205,8 @@ static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
dma_addr_t dev_addr;
dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
- __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+ if (!is_device_dma_coherent(dev))
+ __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
return dev_addr;
}
@@ -209,7 +216,8 @@ static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+ if (!is_device_dma_coherent(dev))
+ __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
}
@@ -221,9 +229,10 @@ static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
int i, ret;
ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
- for_each_sg(sgl, sg, ret, i)
- __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
- sg->length, dir);
+ if (!is_device_dma_coherent(dev))
+ for_each_sg(sgl, sg, ret, i)
+ __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+ sg->length, dir);
return ret;
}
@@ -236,9 +245,10 @@ static void __swiotlb_unmap_sg_attrs(struct device *dev,
struct scatterlist *sg;
int i;
- for_each_sg(sgl, sg, nelems, i)
- __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
- sg->length, dir);
+ if (!is_device_dma_coherent(dev))
+ for_each_sg(sgl, sg, nelems, i)
+ __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+ sg->length, dir);
swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
}
@@ -246,7 +256,8 @@ static void __swiotlb_sync_single_for_cpu(struct device *dev,
dma_addr_t dev_addr, size_t size,
enum dma_data_direction dir)
{
- __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+ if (!is_device_dma_coherent(dev))
+ __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
}
@@ -255,7 +266,8 @@ static void __swiotlb_sync_single_for_device(struct device *dev,
enum dma_data_direction dir)
{
swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
- __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+ if (!is_device_dma_coherent(dev))
+ __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
}
static void __swiotlb_sync_sg_for_cpu(struct device *dev,
@@ -265,9 +277,10 @@ static void __swiotlb_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg;
int i;
- for_each_sg(sgl, sg, nelems, i)
- __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
- sg->length, dir);
+ if (!is_device_dma_coherent(dev))
+ for_each_sg(sgl, sg, nelems, i)
+ __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+ sg->length, dir);
swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
}
@@ -279,9 +292,10 @@ static void __swiotlb_sync_sg_for_device(struct device *dev,
int i;
swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
- for_each_sg(sgl, sg, nelems, i)
- __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
- sg->length, dir);
+ if (!is_device_dma_coherent(dev))
+ for_each_sg(sgl, sg, nelems, i)
+ __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+ sg->length, dir);
}
/* vma->vm_page_prot must be set appropriately before calling this function */
@@ -308,28 +322,20 @@ static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
return ret;
}
-static int __swiotlb_mmap_noncoherent(struct device *dev,
- struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
-{
- vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, false);
- return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-
-static int __swiotlb_mmap_coherent(struct device *dev,
- struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
+static int __swiotlb_mmap(struct device *dev,
+ struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
{
- /* Just use whatever page_prot attributes were specified */
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
+ is_device_dma_coherent(dev));
return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
}
-struct dma_map_ops noncoherent_swiotlb_dma_ops = {
- .alloc = __dma_alloc_noncoherent,
- .free = __dma_free_noncoherent,
- .mmap = __swiotlb_mmap_noncoherent,
+static struct dma_map_ops swiotlb_dma_ops = {
+ .alloc = __dma_alloc,
+ .free = __dma_free,
+ .mmap = __swiotlb_mmap,
.map_page = __swiotlb_map_page,
.unmap_page = __swiotlb_unmap_page,
.map_sg = __swiotlb_map_sg_attrs,
@@ -341,24 +347,6 @@ struct dma_map_ops noncoherent_swiotlb_dma_ops = {
.dma_supported = swiotlb_dma_supported,
.mapping_error = swiotlb_dma_mapping_error,
};
-EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops);
-
-struct dma_map_ops coherent_swiotlb_dma_ops = {
- .alloc = __dma_alloc_coherent,
- .free = __dma_free_coherent,
- .mmap = __swiotlb_mmap_coherent,
- .map_page = swiotlb_map_page,
- .unmap_page = swiotlb_unmap_page,
- .map_sg = swiotlb_map_sg_attrs,
- .unmap_sg = swiotlb_unmap_sg_attrs,
- .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
- .sync_single_for_device = swiotlb_sync_single_for_device,
- .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = swiotlb_sync_sg_for_device,
- .dma_supported = swiotlb_dma_supported,
- .mapping_error = swiotlb_dma_mapping_error,
-};
-EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
extern int swiotlb_late_init_with_default_size(size_t default_size);
@@ -427,7 +415,7 @@ static int __init swiotlb_late_init(void)
{
size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
- dma_ops = &noncoherent_swiotlb_dma_ops;
+ dma_ops = &swiotlb_dma_ops;
return swiotlb_late_init_with_default_size(swiotlb_size);
}
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index cf33f33333cc..74c256744b25 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -14,13 +14,18 @@
* of the License.
*/
#include <linux/debugfs.h>
+#include <linux/errno.h>
#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/init.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <asm/fixmap.h>
+#include <asm/memory.h>
#include <asm/pgtable.h>
+#include <asm/pgtable-hwdef.h>
#define LOWEST_ADDR (UL(0xffffffffffffffff) << VA_BITS)
@@ -36,10 +41,10 @@ enum address_markers_idx {
VMEMMAP_START_NR,
VMEMMAP_END_NR,
#endif
- PCI_START_NR,
- PCI_END_NR,
FIXADDR_START_NR,
FIXADDR_END_NR,
+ PCI_START_NR,
+ PCI_END_NR,
MODULES_START_NR,
MODUELS_END_NR,
KERNEL_SPACE_NR,
@@ -52,10 +57,10 @@ static struct addr_marker address_markers[] = {
{ 0, "vmemmap start" },
{ 0, "vmemmap end" },
#endif
- { (unsigned long) PCI_IOBASE, "PCI I/O start" },
- { (unsigned long) PCI_IOBASE + SZ_16M, "PCI I/O end" },
{ FIXADDR_START, "Fixmap start" },
{ FIXADDR_TOP, "Fixmap end" },
+ { PCI_IO_START, "PCI I/O start" },
+ { PCI_IO_END, "PCI I/O end" },
{ MODULES_VADDR, "Modules start" },
{ MODULES_END, "Modules end" },
{ PAGE_OFFSET, "Kernel Mapping" },
@@ -245,10 +250,12 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
addr = start + i * PMD_SIZE;
- if (pmd_none(*pmd) || pmd_sect(*pmd) || pmd_bad(*pmd))
+ if (pmd_none(*pmd) || pmd_sect(*pmd)) {
note_page(st, addr, 3, pmd_val(*pmd));
- else
+ } else {
+ BUG_ON(pmd_bad(*pmd));
walk_pte(st, pmd, addr);
+ }
}
}
@@ -260,10 +267,12 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
addr = start + i * PUD_SIZE;
- if (pud_none(*pud) || pud_sect(*pud) || pud_bad(*pud))
+ if (pud_none(*pud) || pud_sect(*pud)) {
note_page(st, addr, 2, pud_val(*pud));
- else
+ } else {
+ BUG_ON(pud_bad(*pud));
walk_pmd(st, pud, addr);
+ }
}
}
@@ -275,10 +284,12 @@ static void walk_pgd(struct pg_state *st, struct mm_struct *mm, unsigned long st
for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
addr = start + i * PGDIR_SIZE;
- if (pgd_none(*pgd) || pgd_bad(*pgd))
+ if (pgd_none(*pgd)) {
note_page(st, addr, 1, pgd_val(*pgd));
- else
+ } else {
+ BUG_ON(pgd_bad(*pgd));
walk_pud(st, pgd, addr);
+ }
}
}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index c11cd27ca8f5..96da13167d4a 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -219,7 +219,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
if (esr & ESR_LNX_EXEC) {
vm_flags = VM_EXEC;
- } else if ((esr & ESR_EL1_WRITE) && !(esr & ESR_EL1_CM)) {
+ } else if ((esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM)) {
vm_flags = VM_WRITE;
mm_flags |= FAULT_FLAG_WRITE;
}
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 023747bf4dd7..2de9d2e59d96 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -38,12 +38,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
}
#endif
-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
- int write)
-{
- return ERR_PTR(-EINVAL);
-}
-
int pmd_huge(pmd_t pmd)
{
return !(pmd_val(pmd) & PMD_TABLE_BIT);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index bac492c12fcc..71145f952070 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -35,6 +35,7 @@
#include <linux/efi.h>
#include <asm/fixmap.h>
+#include <asm/memory.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/sizes.h>
@@ -136,10 +137,29 @@ static void arm64_memory_present(void)
}
#endif
+static phys_addr_t memory_limit = (phys_addr_t)ULLONG_MAX;
+
+/*
+ * Limit the memory size that was specified via FDT.
+ */
+static int __init early_mem(char *p)
+{
+ if (!p)
+ return 1;
+
+ memory_limit = memparse(p, &p) & PAGE_MASK;
+ pr_notice("Memory limited to %lldMB\n", memory_limit >> 20);
+
+ return 0;
+}
+early_param("mem", early_mem);
+
void __init arm64_memblock_init(void)
{
phys_addr_t dma_phys_limit = 0;
+ memblock_enforce_memory_limit(memory_limit);
+
/*
* Register the kernel text, kernel data, initrd, and initial
* pagetables with memblock.
@@ -277,8 +297,8 @@ void __init mem_init(void)
" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n"
" 0x%16lx - 0x%16lx (%6ld MB actual)\n"
#endif
- " PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n"
" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n"
+ " PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n"
" modules : 0x%16lx - 0x%16lx (%6ld MB)\n"
" memory : 0x%16lx - 0x%16lx (%6ld MB)\n"
" .init : 0x%p" " - 0x%p" " (%6ld KB)\n"
@@ -291,8 +311,8 @@ void __init mem_init(void)
MLM((unsigned long)virt_to_page(PAGE_OFFSET),
(unsigned long)virt_to_page(high_memory)),
#endif
- MLM((unsigned long)PCI_IOBASE, (unsigned long)PCI_IOBASE + SZ_16M),
MLK(FIXADDR_START, FIXADDR_TOP),
+ MLM(PCI_IO_START, PCI_IO_END),
MLM(MODULES_VADDR, MODULES_END),
MLM(PAGE_OFFSET, (unsigned long)high_memory),
MLK_ROUNDUP(__init_begin, __init_end),
@@ -325,6 +345,7 @@ void __init mem_init(void)
void free_initmem(void)
{
+ fixup_init();
free_initmem_default(0);
free_alternatives_memory();
}
@@ -335,14 +356,8 @@ static int keep_initrd;
void free_initrd_mem(unsigned long start, unsigned long end)
{
- if (!keep_initrd) {
- if (start == initrd_start)
- start = round_down(start, PAGE_SIZE);
- if (end == initrd_end)
- end = round_up(end, PAGE_SIZE);
-
+ if (!keep_initrd)
free_reserved_area((void *)start, (void *)end, 0, "initrd");
- }
}
static int __init keepinitrd_setup(char *__unused)
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index cbb99c8f1e04..01e88c8bcab0 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -62,6 +62,7 @@ static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
if (!area)
return NULL;
addr = (unsigned long)area->addr;
+ area->phys_addr = phys_addr;
err = ioremap_page_range(addr, addr + size, phys_addr, prot);
if (err) {
diff --git a/arch/arm64/mm/mm.h b/arch/arm64/mm/mm.h
index 50c3351df9c7..ef47d99b5cbc 100644
--- a/arch/arm64/mm/mm.h
+++ b/arch/arm64/mm/mm.h
@@ -1 +1,3 @@
extern void __init bootmem_init(void);
+
+void fixup_init(void);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 6032f3e3056a..c6daaf6c6f97 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -26,6 +26,8 @@
#include <linux/memblock.h>
#include <linux/fs.h>
#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/stop_machine.h>
#include <asm/cputype.h>
#include <asm/fixmap.h>
@@ -45,80 +47,6 @@
struct page *empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
-struct cachepolicy {
- const char policy[16];
- u64 mair;
- u64 tcr;
-};
-
-static struct cachepolicy cache_policies[] __initdata = {
- {
- .policy = "uncached",
- .mair = 0x44, /* inner, outer non-cacheable */
- .tcr = TCR_IRGN_NC | TCR_ORGN_NC,
- }, {
- .policy = "writethrough",
- .mair = 0xaa, /* inner, outer write-through, read-allocate */
- .tcr = TCR_IRGN_WT | TCR_ORGN_WT,
- }, {
- .policy = "writeback",
- .mair = 0xee, /* inner, outer write-back, read-allocate */
- .tcr = TCR_IRGN_WBnWA | TCR_ORGN_WBnWA,
- }
-};
-
-/*
- * These are useful for identifying cache coherency problems by allowing the
- * cache or the cache and writebuffer to be turned off. It changes the Normal
- * memory caching attributes in the MAIR_EL1 register.
- */
-static int __init early_cachepolicy(char *p)
-{
- int i;
- u64 tmp;
-
- for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
- int len = strlen(cache_policies[i].policy);
-
- if (memcmp(p, cache_policies[i].policy, len) == 0)
- break;
- }
- if (i == ARRAY_SIZE(cache_policies)) {
- pr_err("ERROR: unknown or unsupported cache policy: %s\n", p);
- return 0;
- }
-
- flush_cache_all();
-
- /*
- * Modify MT_NORMAL attributes in MAIR_EL1.
- */
- asm volatile(
- " mrs %0, mair_el1\n"
- " bfi %0, %1, %2, #8\n"
- " msr mair_el1, %0\n"
- " isb\n"
- : "=&r" (tmp)
- : "r" (cache_policies[i].mair), "i" (MT_NORMAL * 8));
-
- /*
- * Modify TCR PTW cacheability attributes.
- */
- asm volatile(
- " mrs %0, tcr_el1\n"
- " bic %0, %0, %2\n"
- " orr %0, %0, %1\n"
- " msr tcr_el1, %0\n"
- " isb\n"
- : "=&r" (tmp)
- : "r" (cache_policies[i].tcr), "r" (TCR_IRGN_MASK | TCR_ORGN_MASK));
-
- flush_cache_all();
-
- return 0;
-}
-early_param("cachepolicy", early_cachepolicy);
-
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
@@ -133,19 +61,42 @@ EXPORT_SYMBOL(phys_mem_access_prot);
static void __init *early_alloc(unsigned long sz)
{
void *ptr = __va(memblock_alloc(sz, sz));
+ BUG_ON(!ptr);
memset(ptr, 0, sz);
return ptr;
}
-static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
+/*
+ * remap a PMD into pages
+ */
+static void split_pmd(pmd_t *pmd, pte_t *pte)
+{
+ unsigned long pfn = pmd_pfn(*pmd);
+ int i = 0;
+
+ do {
+ /*
+ * Need to have the least restrictive permissions available
+ * permissions will be fixed up later
+ */
+ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
+ pfn++;
+ } while (pte++, i++, i < PTRS_PER_PTE);
+}
+
+static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
unsigned long end, unsigned long pfn,
- pgprot_t prot)
+ pgprot_t prot,
+ void *(*alloc)(unsigned long size))
{
pte_t *pte;
- if (pmd_none(*pmd)) {
- pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t));
+ if (pmd_none(*pmd) || pmd_sect(*pmd)) {
+ pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
+ if (pmd_sect(*pmd))
+ split_pmd(pmd, pte);
__pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
+ flush_tlb_all();
}
BUG_ON(pmd_bad(*pmd));
@@ -156,30 +107,42 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
} while (pte++, addr += PAGE_SIZE, addr != end);
}
-static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
- unsigned long end, phys_addr_t phys,
- int map_io)
+void split_pud(pud_t *old_pud, pmd_t *pmd)
+{
+ unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
+ pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
+ int i = 0;
+
+ do {
+ set_pmd(pmd, __pmd(addr | prot));
+ addr += PMD_SIZE;
+ } while (pmd++, i++, i < PTRS_PER_PMD);
+}
+
+static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
+ unsigned long addr, unsigned long end,
+ phys_addr_t phys, pgprot_t prot,
+ void *(*alloc)(unsigned long size))
{
pmd_t *pmd;
unsigned long next;
- pmdval_t prot_sect;
- pgprot_t prot_pte;
-
- if (map_io) {
- prot_sect = PROT_SECT_DEVICE_nGnRE;
- prot_pte = __pgprot(PROT_DEVICE_nGnRE);
- } else {
- prot_sect = PROT_SECT_NORMAL_EXEC;
- prot_pte = PAGE_KERNEL_EXEC;
- }
/*
* Check for initial section mappings in the pgd/pud and remove them.
*/
- if (pud_none(*pud) || pud_bad(*pud)) {
- pmd = early_alloc(PTRS_PER_PMD * sizeof(pmd_t));
- pud_populate(&init_mm, pud, pmd);
+ if (pud_none(*pud) || pud_sect(*pud)) {
+ pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
+ if (pud_sect(*pud)) {
+ /*
+ * need to have the 1G of mappings continue to be
+ * present
+ */
+ split_pud(pud, pmd);
+ }
+ pud_populate(mm, pud, pmd);
+ flush_tlb_all();
}
+ BUG_ON(pud_bad(*pud));
pmd = pmd_offset(pud, addr);
do {
@@ -187,31 +150,51 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
/* try section mapping first */
if (((addr | next | phys) & ~SECTION_MASK) == 0) {
pmd_t old_pmd =*pmd;
- set_pmd(pmd, __pmd(phys | prot_sect));
+ set_pmd(pmd, __pmd(phys |
+ pgprot_val(mk_sect_prot(prot))));
/*
* Check for previous table entries created during
* boot (__create_page_tables) and flush them.
*/
- if (!pmd_none(old_pmd))
+ if (!pmd_none(old_pmd)) {
flush_tlb_all();
+ if (pmd_table(old_pmd)) {
+ phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
+ if (!WARN_ON_ONCE(slab_is_available()))
+ memblock_free(table, PAGE_SIZE);
+ }
+ }
} else {
alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
- prot_pte);
+ prot, alloc);
}
phys += next - addr;
} while (pmd++, addr = next, addr != end);
}
-static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
- unsigned long end, phys_addr_t phys,
- int map_io)
+static inline bool use_1G_block(unsigned long addr, unsigned long next,
+ unsigned long phys)
+{
+ if (PAGE_SHIFT != 12)
+ return false;
+
+ if (((addr | next | phys) & ~PUD_MASK) != 0)
+ return false;
+
+ return true;
+}
+
+static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
+ unsigned long addr, unsigned long end,
+ phys_addr_t phys, pgprot_t prot,
+ void *(*alloc)(unsigned long size))
{
pud_t *pud;
unsigned long next;
if (pgd_none(*pgd)) {
- pud = early_alloc(PTRS_PER_PUD * sizeof(pud_t));
- pgd_populate(&init_mm, pgd, pud);
+ pud = alloc(PTRS_PER_PUD * sizeof(pud_t));
+ pgd_populate(mm, pgd, pud);
}
BUG_ON(pgd_bad(*pgd));
@@ -222,10 +205,10 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
/*
* For 4K granule only, attempt to put down a 1GB block
*/
- if (!map_io && (PAGE_SHIFT == 12) &&
- ((addr | next | phys) & ~PUD_MASK) == 0) {
+ if (use_1G_block(addr, next, phys)) {
pud_t old_pud = *pud;
- set_pud(pud, __pud(phys | PROT_SECT_NORMAL_EXEC));
+ set_pud(pud, __pud(phys |
+ pgprot_val(mk_sect_prot(prot))));
/*
* If we have an old value for a pud, it will
@@ -235,12 +218,15 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
* Look up the old pmd table and free it.
*/
if (!pud_none(old_pud)) {
- phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
- memblock_free(table, PAGE_SIZE);
flush_tlb_all();
+ if (pud_table(old_pud)) {
+ phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
+ if (!WARN_ON_ONCE(slab_is_available()))
+ memblock_free(table, PAGE_SIZE);
+ }
}
} else {
- alloc_init_pmd(pud, addr, next, phys, map_io);
+ alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc);
}
phys += next - addr;
} while (pud++, addr = next, addr != end);
@@ -250,9 +236,10 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
* Create the page directory entries and any necessary page tables for the
* mapping specified by 'md'.
*/
-static void __init __create_mapping(pgd_t *pgd, phys_addr_t phys,
- unsigned long virt, phys_addr_t size,
- int map_io)
+static void __create_mapping(struct mm_struct *mm, pgd_t *pgd,
+ phys_addr_t phys, unsigned long virt,
+ phys_addr_t size, pgprot_t prot,
+ void *(*alloc)(unsigned long size))
{
unsigned long addr, length, end, next;
@@ -262,31 +249,95 @@ static void __init __create_mapping(pgd_t *pgd, phys_addr_t phys,
end = addr + length;
do {
next = pgd_addr_end(addr, end);
- alloc_init_pud(pgd, addr, next, phys, map_io);
+ alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc);
phys += next - addr;
} while (pgd++, addr = next, addr != end);
}
-static void __init create_mapping(phys_addr_t phys, unsigned long virt,
- phys_addr_t size)
+static void *late_alloc(unsigned long size)
+{
+ void *ptr;
+
+ BUG_ON(size > PAGE_SIZE);
+ ptr = (void *)__get_free_page(PGALLOC_GFP);
+ BUG_ON(!ptr);
+ return ptr;
+}
+
+static void __ref create_mapping(phys_addr_t phys, unsigned long virt,
+ phys_addr_t size, pgprot_t prot)
{
if (virt < VMALLOC_START) {
pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
&phys, virt);
return;
}
- __create_mapping(pgd_offset_k(virt & PAGE_MASK), phys, virt, size, 0);
+ __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
+ size, prot, early_alloc);
+}
+
+void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
+ unsigned long virt, phys_addr_t size,
+ pgprot_t prot)
+{
+ __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
+ late_alloc);
}
-void __init create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io)
+static void create_mapping_late(phys_addr_t phys, unsigned long virt,
+ phys_addr_t size, pgprot_t prot)
{
- if ((addr >> PGDIR_SHIFT) >= ARRAY_SIZE(idmap_pg_dir)) {
- pr_warn("BUG: not creating id mapping for %pa\n", &addr);
+ if (virt < VMALLOC_START) {
+ pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
+ &phys, virt);
return;
}
- __create_mapping(&idmap_pg_dir[pgd_index(addr)],
- addr, addr, size, map_io);
+
+ return __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK),
+ phys, virt, size, prot, late_alloc);
+}
+
+#ifdef CONFIG_DEBUG_RODATA
+static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
+{
+ /*
+ * Set up the executable regions using the existing section mappings
+ * for now. This will get more fine grained later once all memory
+ * is mapped
+ */
+ unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
+ unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+
+ if (end < kernel_x_start) {
+ create_mapping(start, __phys_to_virt(start),
+ end - start, PAGE_KERNEL);
+ } else if (start >= kernel_x_end) {
+ create_mapping(start, __phys_to_virt(start),
+ end - start, PAGE_KERNEL);
+ } else {
+ if (start < kernel_x_start)
+ create_mapping(start, __phys_to_virt(start),
+ kernel_x_start - start,
+ PAGE_KERNEL);
+ create_mapping(kernel_x_start,
+ __phys_to_virt(kernel_x_start),
+ kernel_x_end - kernel_x_start,
+ PAGE_KERNEL_EXEC);
+ if (kernel_x_end < end)
+ create_mapping(kernel_x_end,
+ __phys_to_virt(kernel_x_end),
+ end - kernel_x_end,
+ PAGE_KERNEL);
+ }
+
}
+#else
+static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
+{
+ create_mapping(start, __phys_to_virt(start), end - start,
+ PAGE_KERNEL_EXEC);
+}
+#endif
static void __init map_mem(void)
{
@@ -332,14 +383,53 @@ static void __init map_mem(void)
memblock_set_current_limit(limit);
}
#endif
-
- create_mapping(start, __phys_to_virt(start), end - start);
+ __map_memblock(start, end);
}
/* Limit no longer required. */
memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
}
+void __init fixup_executable(void)
+{
+#ifdef CONFIG_DEBUG_RODATA
+ /* now that we are actually fully mapped, make the start/end more fine grained */
+ if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) {
+ unsigned long aligned_start = round_down(__pa(_stext),
+ SECTION_SIZE);
+
+ create_mapping(aligned_start, __phys_to_virt(aligned_start),
+ __pa(_stext) - aligned_start,
+ PAGE_KERNEL);
+ }
+
+ if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) {
+ unsigned long aligned_end = round_up(__pa(__init_end),
+ SECTION_SIZE);
+ create_mapping(__pa(__init_end), (unsigned long)__init_end,
+ aligned_end - __pa(__init_end),
+ PAGE_KERNEL);
+ }
+#endif
+}
+
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void)
+{
+ create_mapping_late(__pa(_stext), (unsigned long)_stext,
+ (unsigned long)_etext - (unsigned long)_stext,
+ PAGE_KERNEL_EXEC | PTE_RDONLY);
+
+}
+#endif
+
+void fixup_init(void)
+{
+ create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
+ (unsigned long)__init_end - (unsigned long)__init_begin,
+ PAGE_KERNEL);
+}
+
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps and sets up the zero page.
@@ -349,13 +439,7 @@ void __init paging_init(void)
void *zero_page;
map_mem();
-
- /*
- * Finally flush the caches and tlb to ensure that we're in a
- * consistent state.
- */
- flush_cache_all();
- flush_tlb_all();
+ fixup_executable();
/* allocate the zero page. */
zero_page = early_alloc(PAGE_SIZE);
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 4e778b13291b..28eebfb6af76 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -102,7 +102,7 @@ ENTRY(cpu_do_idle)
ret
ENDPROC(cpu_do_idle)
-#ifdef CONFIG_ARM64_CPU_SUSPEND
+#ifdef CONFIG_CPU_PM
/**
* cpu_do_suspend - save CPU registers context
*
@@ -244,14 +244,18 @@ ENTRY(__cpu_setup)
ENDPROC(__cpu_setup)
/*
+ * We set the desired value explicitly, including those of the
+ * reserved bits. The values of bits EE & E0E were set early in
+ * el2_setup, which are left untouched below.
+ *
* n n T
* U E WT T UD US IHBS
* CE0 XWHW CZ ME TEEA S
* .... .IEE .... NEAI TE.I ..AD DEN0 ACAM
- * 0011 0... 1101 ..0. ..0. 10.. .... .... < hardware reserved
- * .... .1.. .... 01.1 11.1 ..01 0001 1101 < software settings
+ * 0011 0... 1101 ..0. ..0. 10.. .0.. .... < hardware reserved
+ * .... .1.. .... 01.1 11.1 ..01 0.01 1101 < software settings
*/
.type crval, #object
crval:
- .word 0x000802e2 // clear
- .word 0x0405d11d // set
+ .word 0xfcffffff // clear
+ .word 0x34d5d91d // set