summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/Kconfig.platforms1
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/boot/dts/broadcom/ns2-svk.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi1
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts4
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi12
-rw-r--r--arch/arm64/include/asm/alternative.h2
-rw-r--r--arch/arm64/include/asm/cpucaps.h40
-rw-r--r--arch/arm64/include/asm/cpufeature.h22
-rw-r--r--arch/arm64/include/asm/exec.h3
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h11
-rw-r--r--arch/arm64/include/asm/lse.h1
-rw-r--r--arch/arm64/include/asm/memory.h2
-rw-r--r--arch/arm64/include/asm/module.h5
-rw-r--r--arch/arm64/include/asm/percpu.h120
-rw-r--r--arch/arm64/include/asm/processor.h6
-rw-r--r--arch/arm64/include/asm/sysreg.h2
-rw-r--r--arch/arm64/include/asm/uaccess.h8
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c36
-rw-r--r--arch/arm64/kernel/cpu_errata.c3
-rw-r--r--arch/arm64/kernel/cpufeature.c10
-rw-r--r--arch/arm64/kernel/head.S3
-rw-r--r--arch/arm64/kernel/process.c18
-rw-r--r--arch/arm64/kernel/sleep.S2
-rw-r--r--arch/arm64/kernel/smp.c1
-rw-r--r--arch/arm64/kernel/suspend.c11
-rw-r--r--arch/arm64/kernel/traps.c30
-rw-r--r--arch/arm64/mm/fault.c15
-rw-r--r--arch/arm64/mm/init.c26
-rw-r--r--arch/arm64/mm/numa.c9
34 files changed, 251 insertions, 165 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 30398dbc940a..969ef880d234 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -915,7 +915,7 @@ config RANDOMIZE_BASE
config RANDOMIZE_MODULE_REGION_FULL
bool "Randomize the module region independently from the core kernel"
- depends on RANDOMIZE_BASE
+ depends on RANDOMIZE_BASE && !DYNAMIC_FTRACE
default y
help
Randomizes the location of the module region without considering the
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index cfbdf02ef566..101794f5ce10 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -190,6 +190,7 @@ config ARCH_THUNDER
config ARCH_UNIPHIER
bool "Socionext UniPhier SoC Family"
+ select ARCH_HAS_RESET_CONTROLLER
select PINCTRL
help
This enables support for Socionext UniPhier SoC family.
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index ab51aed6b6c1..3635b8662724 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -15,7 +15,7 @@ CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
GZFLAGS :=-9
ifneq ($(CONFIG_RELOCATABLE),)
-LDFLAGS_vmlinux += -pie -Bsymbolic
+LDFLAGS_vmlinux += -pie -shared -Bsymbolic
endif
ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
diff --git a/arch/arm64/boot/dts/broadcom/ns2-svk.dts b/arch/arm64/boot/dts/broadcom/ns2-svk.dts
index 2d7872a36b91..b09f3bc5c6c1 100644
--- a/arch/arm64/boot/dts/broadcom/ns2-svk.dts
+++ b/arch/arm64/boot/dts/broadcom/ns2-svk.dts
@@ -164,6 +164,8 @@
nand-ecc-mode = "hw";
nand-ecc-strength = <8>;
nand-ecc-step-size = <512>;
+ nand-bus-width = <16>;
+ brcm,nand-oob-sector-size = <16>;
#address-cells = <1>;
#size-cells = <1>;
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index 220ac7057d12..97d331ec2500 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -123,6 +123,7 @@
<1 14 0xf08>, /* Physical Non-Secure PPI */
<1 11 0xf08>, /* Virtual PPI */
<1 10 0xf08>; /* Hypervisor PPI */
+ fsl,erratum-a008585;
};
pmu {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
index 337da90bd7da..7f0dc13b4087 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
@@ -195,6 +195,7 @@
<1 14 4>, /* Physical Non-Secure PPI, active-low */
<1 11 4>, /* Virtual PPI, active-low */
<1 10 4>; /* Hypervisor PPI, active-low */
+ fsl,erratum-a008585;
};
pmu {
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
index e5e3ed678b6f..602e2c2e9a4d 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
@@ -131,7 +131,7 @@
#address-cells = <0x1>;
#size-cells = <0x0>;
cell-index = <1>;
- clocks = <&cpm_syscon0 0 3>;
+ clocks = <&cpm_syscon0 1 21>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts b/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts
index 46cdddfcea6c..e5eeca2c2456 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts
@@ -116,7 +116,6 @@
cap-mmc-highspeed;
clock-frequency = <150000000>;
disable-wp;
- keep-power-in-suspend;
non-removable;
num-slots = <1>;
vmmc-supply = <&vcc_io>;
@@ -258,8 +257,6 @@
};
vcc_sd: SWITCH_REG1 {
- regulator-always-on;
- regulator-boot-on;
regulator-name = "vcc_sd";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
index 5797933ef80e..ea0a8eceefd4 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
@@ -152,8 +152,6 @@
gpio = <&gpio3 11 GPIO_ACTIVE_LOW>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
- regulator-always-on;
- regulator-boot-on;
vin-supply = <&vcc_io>;
};
@@ -201,7 +199,6 @@
bus-width = <8>;
cap-mmc-highspeed;
disable-wp;
- keep-power-in-suspend;
mmc-pwrseq = <&emmc_pwrseq>;
mmc-hs200-1_2v;
mmc-hs200-1_8v;
@@ -350,7 +347,6 @@
clock-freq-min-max = <400000 50000000>;
cap-sd-highspeed;
card-detect-delay = <200>;
- keep-power-in-suspend;
num-slots = <1>;
pinctrl-names = "default";
pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
index 08fd7cf7769c..56a1b2e92cf3 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
@@ -257,18 +257,18 @@
reg = <0x59801000 0x400>;
};
- mioctrl@59810000 {
- compatible = "socionext,uniphier-mioctrl",
+ sdctrl@59810000 {
+ compatible = "socionext,uniphier-ld20-sdctrl",
"simple-mfd", "syscon";
reg = <0x59810000 0x800>;
- mio_clk: clock {
- compatible = "socionext,uniphier-ld20-mio-clock";
+ sd_clk: clock {
+ compatible = "socionext,uniphier-ld20-sd-clock";
#clock-cells = <1>;
};
- mio_rst: reset {
- compatible = "socionext,uniphier-ld20-mio-reset";
+ sd_rst: reset {
+ compatible = "socionext,uniphier-ld20-sd-reset";
#reset-cells = <1>;
};
};
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index 39feb85a6931..6e1cb8c5af4d 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -1,7 +1,7 @@
#ifndef __ASM_ALTERNATIVE_H
#define __ASM_ALTERNATIVE_H
-#include <asm/cpufeature.h>
+#include <asm/cpucaps.h>
#include <asm/insn.h>
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
new file mode 100644
index 000000000000..87b446535185
--- /dev/null
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -0,0 +1,40 @@
+/*
+ * arch/arm64/include/asm/cpucaps.h
+ *
+ * Copyright (C) 2016 ARM Ltd.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_CPUCAPS_H
+#define __ASM_CPUCAPS_H
+
+#define ARM64_WORKAROUND_CLEAN_CACHE 0
+#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
+#define ARM64_WORKAROUND_845719 2
+#define ARM64_HAS_SYSREG_GIC_CPUIF 3
+#define ARM64_HAS_PAN 4
+#define ARM64_HAS_LSE_ATOMICS 5
+#define ARM64_WORKAROUND_CAVIUM_23154 6
+#define ARM64_WORKAROUND_834220 7
+#define ARM64_HAS_NO_HW_PREFETCH 8
+#define ARM64_HAS_UAO 9
+#define ARM64_ALT_PAN_NOT_UAO 10
+#define ARM64_HAS_VIRT_HOST_EXTN 11
+#define ARM64_WORKAROUND_CAVIUM_27456 12
+#define ARM64_HAS_32BIT_EL0 13
+#define ARM64_HYP_OFFSET_LOW 14
+#define ARM64_MISMATCHED_CACHE_LINE_SIZE 15
+
+#define ARM64_NCAPS 16
+
+#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 758d74fedfad..0bc0b1de90c4 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -11,6 +11,7 @@
#include <linux/jump_label.h>
+#include <asm/cpucaps.h>
#include <asm/hwcap.h>
#include <asm/sysreg.h>
@@ -24,25 +25,6 @@
#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
#define cpu_feature(x) ilog2(HWCAP_ ## x)
-#define ARM64_WORKAROUND_CLEAN_CACHE 0
-#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
-#define ARM64_WORKAROUND_845719 2
-#define ARM64_HAS_SYSREG_GIC_CPUIF 3
-#define ARM64_HAS_PAN 4
-#define ARM64_HAS_LSE_ATOMICS 5
-#define ARM64_WORKAROUND_CAVIUM_23154 6
-#define ARM64_WORKAROUND_834220 7
-#define ARM64_HAS_NO_HW_PREFETCH 8
-#define ARM64_HAS_UAO 9
-#define ARM64_ALT_PAN_NOT_UAO 10
-#define ARM64_HAS_VIRT_HOST_EXTN 11
-#define ARM64_WORKAROUND_CAVIUM_27456 12
-#define ARM64_HAS_32BIT_EL0 13
-#define ARM64_HYP_OFFSET_LOW 14
-#define ARM64_MISMATCHED_CACHE_LINE_SIZE 15
-
-#define ARM64_NCAPS 16
-
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
@@ -94,7 +76,7 @@ struct arm64_cpu_capabilities {
u16 capability;
int def_scope; /* default scope */
bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
- void (*enable)(void *); /* Called on all active CPUs */
+ int (*enable)(void *); /* Called on all active CPUs */
union {
struct { /* To be used for erratum handling only */
u32 midr_model;
diff --git a/arch/arm64/include/asm/exec.h b/arch/arm64/include/asm/exec.h
index db0563c23482..f7865dd9d868 100644
--- a/arch/arm64/include/asm/exec.h
+++ b/arch/arm64/include/asm/exec.h
@@ -18,6 +18,9 @@
#ifndef __ASM_EXEC_H
#define __ASM_EXEC_H
+#include <linux/sched.h>
+
extern unsigned long arch_align_stack(unsigned long sp);
+void uao_thread_switch(struct task_struct *next);
#endif /* __ASM_EXEC_H */
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index fd9d5fd788f5..f5ea0ba70f07 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -178,11 +178,6 @@ static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
}
-static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
-{
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR);
-}
-
static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
@@ -203,6 +198,12 @@ static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
}
+static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
+{
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
+ kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
+}
+
static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
index 23acc00be32d..fc756e22c84c 100644
--- a/arch/arm64/include/asm/lse.h
+++ b/arch/arm64/include/asm/lse.h
@@ -5,7 +5,6 @@
#include <linux/stringify.h>
#include <asm/alternative.h>
-#include <asm/cpufeature.h>
#ifdef __ASSEMBLER__
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index ba62df8c6e35..b71086d25195 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -217,7 +217,7 @@ static inline void *phys_to_virt(phys_addr_t x)
#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#else
#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
-#define __page_to_voff(kaddr) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
+#define __page_to_voff(page) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
index e12af6754634..06ff7fd9e81f 100644
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -17,6 +17,7 @@
#define __ASM_MODULE_H
#include <asm-generic/module.h>
+#include <asm/memory.h>
#define MODULE_ARCH_VERMAGIC "aarch64"
@@ -32,6 +33,10 @@ u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
Elf64_Sym *sym);
#ifdef CONFIG_RANDOMIZE_BASE
+#ifdef CONFIG_MODVERSIONS
+#define ARCH_RELOCATES_KCRCTAB
+#define reloc_start (kimage_vaddr - KIMAGE_VADDR)
+#endif
extern u64 module_alloc_base;
#else
#define module_alloc_base ((u64)_etext - MODULES_VSIZE)
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 2fee2f59288c..5394c8405e66 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -44,48 +44,44 @@ static inline unsigned long __percpu_##op(void *ptr, \
\
switch (size) { \
case 1: \
- do { \
- asm ("//__per_cpu_" #op "_1\n" \
- "ldxrb %w[ret], %[ptr]\n" \
+ asm ("//__per_cpu_" #op "_1\n" \
+ "1: ldxrb %w[ret], %[ptr]\n" \
#asm_op " %w[ret], %w[ret], %w[val]\n" \
- "stxrb %w[loop], %w[ret], %[ptr]\n" \
- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
- [ptr] "+Q"(*(u8 *)ptr) \
- : [val] "Ir" (val)); \
- } while (loop); \
+ " stxrb %w[loop], %w[ret], %[ptr]\n" \
+ " cbnz %w[loop], 1b" \
+ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+ [ptr] "+Q"(*(u8 *)ptr) \
+ : [val] "Ir" (val)); \
break; \
case 2: \
- do { \
- asm ("//__per_cpu_" #op "_2\n" \
- "ldxrh %w[ret], %[ptr]\n" \
+ asm ("//__per_cpu_" #op "_2\n" \
+ "1: ldxrh %w[ret], %[ptr]\n" \
#asm_op " %w[ret], %w[ret], %w[val]\n" \
- "stxrh %w[loop], %w[ret], %[ptr]\n" \
- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
- [ptr] "+Q"(*(u16 *)ptr) \
- : [val] "Ir" (val)); \
- } while (loop); \
+ " stxrh %w[loop], %w[ret], %[ptr]\n" \
+ " cbnz %w[loop], 1b" \
+ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+ [ptr] "+Q"(*(u16 *)ptr) \
+ : [val] "Ir" (val)); \
break; \
case 4: \
- do { \
- asm ("//__per_cpu_" #op "_4\n" \
- "ldxr %w[ret], %[ptr]\n" \
+ asm ("//__per_cpu_" #op "_4\n" \
+ "1: ldxr %w[ret], %[ptr]\n" \
#asm_op " %w[ret], %w[ret], %w[val]\n" \
- "stxr %w[loop], %w[ret], %[ptr]\n" \
- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
- [ptr] "+Q"(*(u32 *)ptr) \
- : [val] "Ir" (val)); \
- } while (loop); \
+ " stxr %w[loop], %w[ret], %[ptr]\n" \
+ " cbnz %w[loop], 1b" \
+ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+ [ptr] "+Q"(*(u32 *)ptr) \
+ : [val] "Ir" (val)); \
break; \
case 8: \
- do { \
- asm ("//__per_cpu_" #op "_8\n" \
- "ldxr %[ret], %[ptr]\n" \
+ asm ("//__per_cpu_" #op "_8\n" \
+ "1: ldxr %[ret], %[ptr]\n" \
#asm_op " %[ret], %[ret], %[val]\n" \
- "stxr %w[loop], %[ret], %[ptr]\n" \
- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
- [ptr] "+Q"(*(u64 *)ptr) \
- : [val] "Ir" (val)); \
- } while (loop); \
+ " stxr %w[loop], %[ret], %[ptr]\n" \
+ " cbnz %w[loop], 1b" \
+ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+ [ptr] "+Q"(*(u64 *)ptr) \
+ : [val] "Ir" (val)); \
break; \
default: \
BUILD_BUG(); \
@@ -150,44 +146,40 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
switch (size) {
case 1:
- do {
- asm ("//__percpu_xchg_1\n"
- "ldxrb %w[ret], %[ptr]\n"
- "stxrb %w[loop], %w[val], %[ptr]\n"
- : [loop] "=&r"(loop), [ret] "=&r"(ret),
- [ptr] "+Q"(*(u8 *)ptr)
- : [val] "r" (val));
- } while (loop);
+ asm ("//__percpu_xchg_1\n"
+ "1: ldxrb %w[ret], %[ptr]\n"
+ " stxrb %w[loop], %w[val], %[ptr]\n"
+ " cbnz %w[loop], 1b"
+ : [loop] "=&r"(loop), [ret] "=&r"(ret),
+ [ptr] "+Q"(*(u8 *)ptr)
+ : [val] "r" (val));
break;
case 2:
- do {
- asm ("//__percpu_xchg_2\n"
- "ldxrh %w[ret], %[ptr]\n"
- "stxrh %w[loop], %w[val], %[ptr]\n"
- : [loop] "=&r"(loop), [ret] "=&r"(ret),
- [ptr] "+Q"(*(u16 *)ptr)
- : [val] "r" (val));
- } while (loop);
+ asm ("//__percpu_xchg_2\n"
+ "1: ldxrh %w[ret], %[ptr]\n"
+ " stxrh %w[loop], %w[val], %[ptr]\n"
+ " cbnz %w[loop], 1b"
+ : [loop] "=&r"(loop), [ret] "=&r"(ret),
+ [ptr] "+Q"(*(u16 *)ptr)
+ : [val] "r" (val));
break;
case 4:
- do {
- asm ("//__percpu_xchg_4\n"
- "ldxr %w[ret], %[ptr]\n"
- "stxr %w[loop], %w[val], %[ptr]\n"
- : [loop] "=&r"(loop), [ret] "=&r"(ret),
- [ptr] "+Q"(*(u32 *)ptr)
- : [val] "r" (val));
- } while (loop);
+ asm ("//__percpu_xchg_4\n"
+ "1: ldxr %w[ret], %[ptr]\n"
+ " stxr %w[loop], %w[val], %[ptr]\n"
+ " cbnz %w[loop], 1b"
+ : [loop] "=&r"(loop), [ret] "=&r"(ret),
+ [ptr] "+Q"(*(u32 *)ptr)
+ : [val] "r" (val));
break;
case 8:
- do {
- asm ("//__percpu_xchg_8\n"
- "ldxr %[ret], %[ptr]\n"
- "stxr %w[loop], %[val], %[ptr]\n"
- : [loop] "=&r"(loop), [ret] "=&r"(ret),
- [ptr] "+Q"(*(u64 *)ptr)
- : [val] "r" (val));
- } while (loop);
+ asm ("//__percpu_xchg_8\n"
+ "1: ldxr %[ret], %[ptr]\n"
+ " stxr %w[loop], %[val], %[ptr]\n"
+ " cbnz %w[loop], 1b"
+ : [loop] "=&r"(loop), [ret] "=&r"(ret),
+ [ptr] "+Q"(*(u64 *)ptr)
+ : [val] "r" (val));
break;
default:
BUILD_BUG();
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index df2e53d3a969..60e34824e18c 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -188,8 +188,8 @@ static inline void spin_lock_prefetch(const void *ptr)
#endif
-void cpu_enable_pan(void *__unused);
-void cpu_enable_uao(void *__unused);
-void cpu_enable_cache_maint_trap(void *__unused);
+int cpu_enable_pan(void *__unused);
+int cpu_enable_uao(void *__unused);
+int cpu_enable_cache_maint_trap(void *__unused);
#endif /* __ASM_PROCESSOR_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index e8d46e8e6079..6c80b3699cb8 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -286,7 +286,7 @@ asm(
#define write_sysreg_s(v, r) do { \
u64 __val = (u64)v; \
- asm volatile("msr_s " __stringify(r) ", %0" : : "rZ" (__val)); \
+ asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
} while (0)
static inline void config_sctlr_el1(u32 clear, u32 set)
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index bcaf6fba1b65..55d0adbf6509 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -21,6 +21,7 @@
/*
* User space memory access functions
*/
+#include <linux/bitops.h>
#include <linux/kasan-checks.h>
#include <linux/string.h>
#include <linux/thread_info.h>
@@ -102,6 +103,13 @@ static inline void set_fs(mm_segment_t fs)
flag; \
})
+/*
+ * When dealing with data aborts or instruction traps we may end up with
+ * a tagged userland pointer. Clear the tag to get a sane pointer to pass
+ * on to access_ok(), for instance.
+ */
+#define untagged_addr(addr) sign_extend64(addr, 55)
+
#define access_ok(type, addr, size) __range_ok(addr, size)
#define user_addr_max get_fs
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index 42ffdb54e162..b0988bb1bf64 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -280,35 +280,43 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table)
/*
* Error-checking SWP macros implemented using ldxr{b}/stxr{b}
*/
-#define __user_swpX_asm(data, addr, res, temp, B) \
+
+/* Arbitrary constant to ensure forward-progress of the LL/SC loop */
+#define __SWP_LL_SC_LOOPS 4
+
+#define __user_swpX_asm(data, addr, res, temp, temp2, B) \
__asm__ __volatile__( \
+ " mov %w3, %w7\n" \
ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN) \
- "0: ldxr"B" %w2, [%3]\n" \
- "1: stxr"B" %w0, %w1, [%3]\n" \
+ "0: ldxr"B" %w2, [%4]\n" \
+ "1: stxr"B" %w0, %w1, [%4]\n" \
" cbz %w0, 2f\n" \
- " mov %w0, %w4\n" \
+ " sub %w3, %w3, #1\n" \
+ " cbnz %w3, 0b\n" \
+ " mov %w0, %w5\n" \
" b 3f\n" \
"2:\n" \
" mov %w1, %w2\n" \
"3:\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
- "4: mov %w0, %w5\n" \
+ "4: mov %w0, %w6\n" \
" b 3b\n" \
" .popsection" \
_ASM_EXTABLE(0b, 4b) \
_ASM_EXTABLE(1b, 4b) \
ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN) \
- : "=&r" (res), "+r" (data), "=&r" (temp) \
- : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \
+ : "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \
+ : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT), \
+ "i" (__SWP_LL_SC_LOOPS) \
: "memory")
-#define __user_swp_asm(data, addr, res, temp) \
- __user_swpX_asm(data, addr, res, temp, "")
-#define __user_swpb_asm(data, addr, res, temp) \
- __user_swpX_asm(data, addr, res, temp, "b")
+#define __user_swp_asm(data, addr, res, temp, temp2) \
+ __user_swpX_asm(data, addr, res, temp, temp2, "")
+#define __user_swpb_asm(data, addr, res, temp, temp2) \
+ __user_swpX_asm(data, addr, res, temp, temp2, "b")
/*
* Bit 22 of the instruction encoding distinguishes between
@@ -328,12 +336,12 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
}
while (1) {
- unsigned long temp;
+ unsigned long temp, temp2;
if (type == TYPE_SWPB)
- __user_swpb_asm(*data, address, res, temp);
+ __user_swpb_asm(*data, address, res, temp, temp2);
else
- __user_swp_asm(*data, address, res, temp);
+ __user_swp_asm(*data, address, res, temp, temp2);
if (likely(res != -EAGAIN) || signal_pending(current))
break;
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 0150394f4cab..b75e917aac46 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -39,10 +39,11 @@ has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
(arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
}
-static void cpu_enable_trap_ctr_access(void *__unused)
+static int cpu_enable_trap_ctr_access(void *__unused)
{
/* Clear SCTLR_EL1.UCT */
config_sctlr_el1(SCTLR_EL1_UCT, 0);
+ return 0;
}
#define MIDR_RANGE(model, min, max) \
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index d577f263cc4a..c02504ea304b 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -19,7 +19,9 @@
#define pr_fmt(fmt) "CPU features: " fmt
#include <linux/bsearch.h>
+#include <linux/cpumask.h>
#include <linux/sort.h>
+#include <linux/stop_machine.h>
#include <linux/types.h>
#include <asm/cpu.h>
#include <asm/cpufeature.h>
@@ -941,7 +943,13 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
{
for (; caps->matches; caps++)
if (caps->enable && cpus_have_cap(caps->capability))
- on_each_cpu(caps->enable, NULL, true);
+ /*
+ * Use stop_machine() as it schedules the work allowing
+ * us to modify PSTATE, instead of on_each_cpu() which
+ * uses an IPI, giving us a PSTATE that disappears when
+ * we return.
+ */
+ stop_machine(caps->enable, NULL, cpu_online_mask);
}
/*
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 427f6d3f084c..332e33193ccf 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -586,8 +586,9 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
b.lt 4f // Skip if no PMU present
mrs x0, pmcr_el0 // Disable debug access traps
ubfx x0, x0, #11, #5 // to EL2 and allow access to
- msr mdcr_el2, x0 // all PMU counters from EL1
4:
+ csel x0, xzr, x0, lt // all PMU counters from EL1
+ msr mdcr_el2, x0 // (if they exist)
/* Stage-2 translation */
msr vttbr_el2, xzr
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 27b2f1387df4..01753cd7d3f0 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -49,6 +49,7 @@
#include <asm/alternative.h>
#include <asm/compat.h>
#include <asm/cacheflush.h>
+#include <asm/exec.h>
#include <asm/fpsimd.h>
#include <asm/mmu_context.h>
#include <asm/processor.h>
@@ -186,10 +187,19 @@ void __show_regs(struct pt_regs *regs)
printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n",
regs->pc, lr, regs->pstate);
printk("sp : %016llx\n", sp);
- for (i = top_reg; i >= 0; i--) {
+
+ i = top_reg;
+
+ while (i >= 0) {
printk("x%-2d: %016llx ", i, regs->regs[i]);
- if (i % 2 == 0)
- printk("\n");
+ i--;
+
+ if (i % 2 == 0) {
+ pr_cont("x%-2d: %016llx ", i, regs->regs[i]);
+ i--;
+ }
+
+ pr_cont("\n");
}
printk("\n");
}
@@ -301,7 +311,7 @@ static void tls_thread_switch(struct task_struct *next)
}
/* Restore the UAO state depending on next's addr_limit */
-static void uao_thread_switch(struct task_struct *next)
+void uao_thread_switch(struct task_struct *next)
{
if (IS_ENABLED(CONFIG_ARM64_UAO)) {
if (task_thread_info(next)->addr_limit == KERNEL_DS)
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index b8799e7c79de..1bec41b5fda3 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -135,7 +135,7 @@ ENTRY(_cpu_resume)
#ifdef CONFIG_KASAN
mov x0, sp
- bl kasan_unpoison_remaining_stack
+ bl kasan_unpoison_task_stack_below
#endif
ldp x19, x20, [x29, #16]
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index d3f151cfd4a1..8507703dabe4 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -544,6 +544,7 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
return;
}
bootcpu_valid = true;
+ early_map_cpu_to_node(0, acpi_numa_get_nid(0, hwid));
return;
}
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index ad734142070d..bb0cd787a9d3 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -1,8 +1,11 @@
#include <linux/ftrace.h>
#include <linux/percpu.h>
#include <linux/slab.h>
+#include <asm/alternative.h>
#include <asm/cacheflush.h>
+#include <asm/cpufeature.h>
#include <asm/debug-monitors.h>
+#include <asm/exec.h>
#include <asm/pgtable.h>
#include <asm/memory.h>
#include <asm/mmu_context.h>
@@ -50,6 +53,14 @@ void notrace __cpu_suspend_exit(void)
set_my_cpu_offset(per_cpu_offset(cpu));
/*
+ * PSTATE was not saved over suspend/resume, re-enable any detected
+ * features that might not have been set correctly.
+ */
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
+ CONFIG_ARM64_PAN));
+ uao_thread_switch(current);
+
+ /*
* Restore HW breakpoint registers to sane values
* before debug exceptions are possibly reenabled
* through local_dbg_restore.
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 5ff020f8fb7f..c9986b3e0a96 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -428,24 +428,28 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
}
-void cpu_enable_cache_maint_trap(void *__unused)
+int cpu_enable_cache_maint_trap(void *__unused)
{
config_sctlr_el1(SCTLR_EL1_UCI, 0);
+ return 0;
}
#define __user_cache_maint(insn, address, res) \
- asm volatile ( \
- "1: " insn ", %1\n" \
- " mov %w0, #0\n" \
- "2:\n" \
- " .pushsection .fixup,\"ax\"\n" \
- " .align 2\n" \
- "3: mov %w0, %w2\n" \
- " b 2b\n" \
- " .popsection\n" \
- _ASM_EXTABLE(1b, 3b) \
- : "=r" (res) \
- : "r" (address), "i" (-EFAULT) )
+ if (untagged_addr(address) >= user_addr_max()) \
+ res = -EFAULT; \
+ else \
+ asm volatile ( \
+ "1: " insn ", %1\n" \
+ " mov %w0, #0\n" \
+ "2:\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "3: mov %w0, %w2\n" \
+ " b 2b\n" \
+ " .popsection\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "=r" (res) \
+ : "r" (address), "i" (-EFAULT) )
static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
{
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 53d9159662fe..0f8788374815 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -29,7 +29,9 @@
#include <linux/sched.h>
#include <linux/highmem.h>
#include <linux/perf_event.h>
+#include <linux/preempt.h>
+#include <asm/bug.h>
#include <asm/cpufeature.h>
#include <asm/exception.h>
#include <asm/debug-monitors.h>
@@ -670,9 +672,17 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
NOKPROBE_SYMBOL(do_debug_exception);
#ifdef CONFIG_ARM64_PAN
-void cpu_enable_pan(void *__unused)
+int cpu_enable_pan(void *__unused)
{
+ /*
+ * We modify PSTATE. This won't work from irq context as the PSTATE
+ * is discarded once we return from the exception.
+ */
+ WARN_ON_ONCE(in_interrupt());
+
config_sctlr_el1(SCTLR_EL1_SPAN, 0);
+ asm(SET_PSTATE_PAN(1));
+ return 0;
}
#endif /* CONFIG_ARM64_PAN */
@@ -683,8 +693,9 @@ void cpu_enable_pan(void *__unused)
* We need to enable the feature at runtime (instead of adding it to
* PSR_MODE_EL1h) as the feature may not be implemented by the cpu.
*/
-void cpu_enable_uao(void *__unused)
+int cpu_enable_uao(void *__unused)
{
asm(SET_PSTATE_UAO(1));
+ return 0;
}
#endif /* CONFIG_ARM64_UAO */
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 21c489bdeb4e..212c4d1e2f26 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -421,35 +421,35 @@ void __init mem_init(void)
pr_notice("Virtual kernel memory layout:\n");
#ifdef CONFIG_KASAN
- pr_cont(" kasan : 0x%16lx - 0x%16lx (%6ld GB)\n",
+ pr_notice(" kasan : 0x%16lx - 0x%16lx (%6ld GB)\n",
MLG(KASAN_SHADOW_START, KASAN_SHADOW_END));
#endif
- pr_cont(" modules : 0x%16lx - 0x%16lx (%6ld MB)\n",
+ pr_notice(" modules : 0x%16lx - 0x%16lx (%6ld MB)\n",
MLM(MODULES_VADDR, MODULES_END));
- pr_cont(" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n",
+ pr_notice(" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n",
MLG(VMALLOC_START, VMALLOC_END));
- pr_cont(" .text : 0x%p" " - 0x%p" " (%6ld KB)\n",
+ pr_notice(" .text : 0x%p" " - 0x%p" " (%6ld KB)\n",
MLK_ROUNDUP(_text, _etext));
- pr_cont(" .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n",
+ pr_notice(" .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n",
MLK_ROUNDUP(__start_rodata, __init_begin));
- pr_cont(" .init : 0x%p" " - 0x%p" " (%6ld KB)\n",
+ pr_notice(" .init : 0x%p" " - 0x%p" " (%6ld KB)\n",
MLK_ROUNDUP(__init_begin, __init_end));
- pr_cont(" .data : 0x%p" " - 0x%p" " (%6ld KB)\n",
+ pr_notice(" .data : 0x%p" " - 0x%p" " (%6ld KB)\n",
MLK_ROUNDUP(_sdata, _edata));
- pr_cont(" .bss : 0x%p" " - 0x%p" " (%6ld KB)\n",
+ pr_notice(" .bss : 0x%p" " - 0x%p" " (%6ld KB)\n",
MLK_ROUNDUP(__bss_start, __bss_stop));
- pr_cont(" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n",
+ pr_notice(" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n",
MLK(FIXADDR_START, FIXADDR_TOP));
- pr_cont(" PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n",
+ pr_notice(" PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n",
MLM(PCI_IO_START, PCI_IO_END));
#ifdef CONFIG_SPARSEMEM_VMEMMAP
- pr_cont(" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n",
+ pr_notice(" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n",
MLG(VMEMMAP_START, VMEMMAP_START + VMEMMAP_SIZE));
- pr_cont(" 0x%16lx - 0x%16lx (%6ld MB actual)\n",
+ pr_notice(" 0x%16lx - 0x%16lx (%6ld MB actual)\n",
MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()),
(unsigned long)virt_to_page(high_memory)));
#endif
- pr_cont(" memory : 0x%16lx - 0x%16lx (%6ld MB)\n",
+ pr_notice(" memory : 0x%16lx - 0x%16lx (%6ld MB)\n",
MLM(__phys_to_virt(memblock_start_of_DRAM()),
(unsigned long)high_memory));
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index 778a985c8a70..4b32168cf91a 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -147,7 +147,7 @@ static int __init early_cpu_to_node(int cpu)
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
{
- return node_distance(from, to);
+ return node_distance(early_cpu_to_node(from), early_cpu_to_node(to));
}
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
@@ -223,8 +223,11 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
void *nd;
int tnid;
- pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
- nid, start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
+ if (start_pfn < end_pfn)
+ pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n", nid,
+ start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
+ else
+ pr_info("Initmem setup node %d [<memory-less node>]\n", nid);
nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
nd = __va(nd_pa);