summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig3
-rw-r--r--arch/arm64/boot/dts/apm/apm-storm.dtsi110
-rw-r--r--arch/arm64/boot/dts/arm/juno-motherboard.dtsi31
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173-evb.dts3
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-glue.c2
-rw-r--r--arch/arm64/crypto/crc32-arm64.c22
-rw-r--r--arch/arm64/crypto/sha1-ce-glue.c3
-rw-r--r--arch/arm64/crypto/sha2-ce-glue.c3
-rw-r--r--arch/arm64/include/asm/acpi.h19
-rw-r--r--arch/arm64/include/asm/alternative-asm.h29
-rw-r--r--arch/arm64/include/asm/alternative.h46
-rw-r--r--arch/arm64/include/asm/barrier.h2
-rw-r--r--arch/arm64/include/asm/boot.h14
-rw-r--r--arch/arm64/include/asm/cacheflush.h5
-rw-r--r--arch/arm64/include/asm/cpu_ops.h27
-rw-r--r--arch/arm64/include/asm/cpufeature.h8
-rw-r--r--arch/arm64/include/asm/cpuidle.h8
-rw-r--r--arch/arm64/include/asm/dma-mapping.h18
-rw-r--r--arch/arm64/include/asm/fixmap.h15
-rw-r--r--arch/arm64/include/asm/futex.h4
-rw-r--r--arch/arm64/include/asm/hugetlb.h4
-rw-r--r--arch/arm64/include/asm/insn.h3
-rw-r--r--arch/arm64/include/asm/io.h9
-rw-r--r--arch/arm64/include/asm/kvm_asm.h7
-rw-r--r--arch/arm64/include/asm/kvm_host.h23
-rw-r--r--arch/arm64/include/asm/mm-arch-hooks.h15
-rw-r--r--arch/arm64/include/asm/mmu.h1
-rw-r--r--arch/arm64/include/asm/perf_event.h7
-rw-r--r--arch/arm64/include/asm/proc-fns.h4
-rw-r--r--arch/arm64/include/asm/processor.h19
-rw-r--r--arch/arm64/include/asm/psci.h12
-rw-r--r--arch/arm64/include/asm/smp.h2
-rw-r--r--arch/arm64/include/asm/smp_plat.h16
-rw-r--r--arch/arm64/include/asm/suspend.h2
-rw-r--r--arch/arm64/include/asm/system_misc.h14
-rw-r--r--arch/arm64/include/asm/tlbflush.h2
-rw-r--r--arch/arm64/include/asm/topology.h2
-rw-r--r--arch/arm64/kernel/acpi.c123
-rw-r--r--arch/arm64/kernel/alternative.c74
-rw-r--r--arch/arm64/kernel/asm-offsets.c1
-rw-r--r--arch/arm64/kernel/cpu_ops.c72
-rw-r--r--arch/arm64/kernel/cpufeature.c16
-rw-r--r--arch/arm64/kernel/cpuidle.c11
-rw-r--r--arch/arm64/kernel/entry.S37
-rw-r--r--arch/arm64/kernel/fpsimd.c31
-rw-r--r--arch/arm64/kernel/head.S52
-rw-r--r--arch/arm64/kernel/insn.c60
-rw-r--r--arch/arm64/kernel/perf_event.c10
-rw-r--r--arch/arm64/kernel/process.c62
-rw-r--r--arch/arm64/kernel/psci.c244
-rw-r--r--arch/arm64/kernel/setup.c37
-rw-r--r--arch/arm64/kernel/signal32.c4
-rw-r--r--arch/arm64/kernel/sleep.S9
-rw-r--r--arch/arm64/kernel/smp.c246
-rw-r--r--arch/arm64/kernel/smp_spin_table.c8
-rw-r--r--arch/arm64/kernel/suspend.c9
-rw-r--r--arch/arm64/kernel/traps.c5
-rw-r--r--arch/arm64/kernel/vdso/Makefile4
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S11
-rw-r--r--arch/arm64/kvm/Kconfig1
-rw-r--r--arch/arm64/kvm/Makefile2
-rw-r--r--arch/arm64/kvm/hyp.S26
-rw-r--r--arch/arm64/kvm/vgic-v2-switch.S3
-rw-r--r--arch/arm64/kvm/vgic-v3-switch.S2
-rw-r--r--arch/arm64/mm/Makefile2
-rw-r--r--arch/arm64/mm/cache.S75
-rw-r--r--arch/arm64/mm/context.c8
-rw-r--r--arch/arm64/mm/dma-mapping.c92
-rw-r--r--arch/arm64/mm/dump.c2
-rw-r--r--arch/arm64/mm/fault.c14
-rw-r--r--arch/arm64/mm/flush.c1
-rw-r--r--arch/arm64/mm/hugetlbpage.c7
-rw-r--r--arch/arm64/mm/init.c2
-rw-r--r--arch/arm64/mm/mmu.c66
-rw-r--r--arch/arm64/mm/proc.S46
-rw-r--r--arch/arm64/net/bpf_jit_comp.c2
77 files changed, 1198 insertions, 794 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 7796af4b1d6f..290ed648aa11 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1,5 +1,6 @@
config ARM64
def_bool y
+ select ACPI_CCA_REQUIRED if ACPI
select ACPI_GENERIC_GSI if ACPI
select ACPI_REDUCED_HARDWARE_ONLY if ACPI
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
@@ -22,6 +23,7 @@ config ARM64
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
select COMMON_CLK
+ select EDAC_SUPPORT
select CPU_PM if (SUSPEND || CPU_IDLE)
select DCACHE_WORD_ACCESS
select GENERIC_ALLOCATOR
@@ -71,6 +73,7 @@ config ARM64
select HAVE_RCU_TABLE_FREE
select HAVE_SYSCALL_TRACEPOINTS
select IRQ_DOMAIN
+ select IRQ_FORCED_THREADING
select MODULES_USE_ELF_RELA
select NO_BOOTMEM
select OF
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index c8d3e0e86678..0bb287ca0a98 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -374,6 +374,111 @@
};
};
+ msi: msi@79000000 {
+ compatible = "apm,xgene1-msi";
+ msi-controller;
+ reg = <0x00 0x79000000 0x0 0x900000>;
+ interrupts = < 0x0 0x10 0x4
+ 0x0 0x11 0x4
+ 0x0 0x12 0x4
+ 0x0 0x13 0x4
+ 0x0 0x14 0x4
+ 0x0 0x15 0x4
+ 0x0 0x16 0x4
+ 0x0 0x17 0x4
+ 0x0 0x18 0x4
+ 0x0 0x19 0x4
+ 0x0 0x1a 0x4
+ 0x0 0x1b 0x4
+ 0x0 0x1c 0x4
+ 0x0 0x1d 0x4
+ 0x0 0x1e 0x4
+ 0x0 0x1f 0x4>;
+ };
+
+ csw: csw@7e200000 {
+ compatible = "apm,xgene-csw", "syscon";
+ reg = <0x0 0x7e200000 0x0 0x1000>;
+ };
+
+ mcba: mcba@7e700000 {
+ compatible = "apm,xgene-mcb", "syscon";
+ reg = <0x0 0x7e700000 0x0 0x1000>;
+ };
+
+ mcbb: mcbb@7e720000 {
+ compatible = "apm,xgene-mcb", "syscon";
+ reg = <0x0 0x7e720000 0x0 0x1000>;
+ };
+
+ efuse: efuse@1054a000 {
+ compatible = "apm,xgene-efuse", "syscon";
+ reg = <0x0 0x1054a000 0x0 0x20>;
+ };
+
+ edac@78800000 {
+ compatible = "apm,xgene-edac";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ regmap-csw = <&csw>;
+ regmap-mcba = <&mcba>;
+ regmap-mcbb = <&mcbb>;
+ regmap-efuse = <&efuse>;
+ reg = <0x0 0x78800000 0x0 0x100>;
+ interrupts = <0x0 0x20 0x4>,
+ <0x0 0x21 0x4>,
+ <0x0 0x27 0x4>;
+
+ edacmc@7e800000 {
+ compatible = "apm,xgene-edac-mc";
+ reg = <0x0 0x7e800000 0x0 0x1000>;
+ memory-controller = <0>;
+ };
+
+ edacmc@7e840000 {
+ compatible = "apm,xgene-edac-mc";
+ reg = <0x0 0x7e840000 0x0 0x1000>;
+ memory-controller = <1>;
+ };
+
+ edacmc@7e880000 {
+ compatible = "apm,xgene-edac-mc";
+ reg = <0x0 0x7e880000 0x0 0x1000>;
+ memory-controller = <2>;
+ };
+
+ edacmc@7e8c0000 {
+ compatible = "apm,xgene-edac-mc";
+ reg = <0x0 0x7e8c0000 0x0 0x1000>;
+ memory-controller = <3>;
+ };
+
+ edacpmd@7c000000 {
+ compatible = "apm,xgene-edac-pmd";
+ reg = <0x0 0x7c000000 0x0 0x200000>;
+ pmd-controller = <0>;
+ };
+
+ edacpmd@7c200000 {
+ compatible = "apm,xgene-edac-pmd";
+ reg = <0x0 0x7c200000 0x0 0x200000>;
+ pmd-controller = <1>;
+ };
+
+ edacpmd@7c400000 {
+ compatible = "apm,xgene-edac-pmd";
+ reg = <0x0 0x7c400000 0x0 0x200000>;
+ pmd-controller = <2>;
+ };
+
+ edacpmd@7c600000 {
+ compatible = "apm,xgene-edac-pmd";
+ reg = <0x0 0x7c600000 0x0 0x200000>;
+ pmd-controller = <3>;
+ };
+ };
+
pcie0: pcie@1f2b0000 {
status = "disabled";
device_type = "pci";
@@ -395,6 +500,7 @@
0x0 0x0 0x0 0x4 &gic 0x0 0xc5 0x1>;
dma-coherent;
clocks = <&pcie0clk 0>;
+ msi-parent = <&msi>;
};
pcie1: pcie@1f2c0000 {
@@ -418,6 +524,7 @@
0x0 0x0 0x0 0x4 &gic 0x0 0xcb 0x1>;
dma-coherent;
clocks = <&pcie1clk 0>;
+ msi-parent = <&msi>;
};
pcie2: pcie@1f2d0000 {
@@ -441,6 +548,7 @@
0x0 0x0 0x0 0x4 &gic 0x0 0xd1 0x1>;
dma-coherent;
clocks = <&pcie2clk 0>;
+ msi-parent = <&msi>;
};
pcie3: pcie@1f500000 {
@@ -464,6 +572,7 @@
0x0 0x0 0x0 0x4 &gic 0x0 0xd7 0x1>;
dma-coherent;
clocks = <&pcie3clk 0>;
+ msi-parent = <&msi>;
};
pcie4: pcie@1f510000 {
@@ -487,6 +596,7 @@
0x0 0x0 0x0 0x4 &gic 0x0 0xdd 0x1>;
dma-coherent;
clocks = <&pcie4clk 0>;
+ msi-parent = <&msi>;
};
serial0: serial@1c020000 {
diff --git a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
index c138b95a8356..351c95bda89e 100644
--- a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
@@ -21,6 +21,20 @@
clock-output-names = "juno_mb:clk25mhz";
};
+ v2m_refclk1mhz: refclk1mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <1000000>;
+ clock-output-names = "juno_mb:refclk1mhz";
+ };
+
+ v2m_refclk32khz: refclk32khz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "juno_mb:refclk32khz";
+ };
+
motherboard {
compatible = "arm,vexpress,v2p-p1", "simple-bus";
#address-cells = <2>; /* SMB chipselect number and offset */
@@ -66,6 +80,15 @@
#size-cells = <1>;
ranges = <0 3 0 0x200000>;
+ v2m_sysctl: sysctl@020000 {
+ compatible = "arm,sp810", "arm,primecell";
+ reg = <0x020000 0x1000>;
+ clocks = <&v2m_refclk32khz>, <&v2m_refclk1mhz>, <&mb_clk24mhz>;
+ clock-names = "refclk", "timclk", "apb_pclk";
+ #clock-cells = <1>;
+ clock-output-names = "timerclken0", "timerclken1", "timerclken2", "timerclken3";
+ };
+
mmci@050000 {
compatible = "arm,pl180", "arm,primecell";
reg = <0x050000 0x1000>;
@@ -106,16 +129,16 @@
compatible = "arm,sp804", "arm,primecell";
reg = <0x110000 0x10000>;
interrupts = <9>;
- clocks = <&mb_clk24mhz>, <&soc_smc50mhz>;
- clock-names = "timclken1", "apb_pclk";
+ clocks = <&v2m_sysctl 0>, <&v2m_sysctl 1>, <&mb_clk24mhz>;
+ clock-names = "timclken1", "timclken2", "apb_pclk";
};
v2m_timer23: timer@120000 {
compatible = "arm,sp804", "arm,primecell";
reg = <0x120000 0x10000>;
interrupts = <9>;
- clocks = <&mb_clk24mhz>, <&soc_smc50mhz>;
- clock-names = "timclken1", "apb_pclk";
+ clocks = <&v2m_sysctl 2>, <&v2m_sysctl 3>, <&mb_clk24mhz>;
+ clock-names = "timclken1", "timclken2", "apb_pclk";
};
rtc@170000 {
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
index 43d54017b779..d0ab012fa379 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
@@ -16,7 +16,8 @@
#include "mt8173.dtsi"
/ {
- model = "mediatek,mt8173-evb";
+ model = "MediaTek MT8173 evaluation board";
+ compatible = "mediatek,mt8173-evb", "mediatek,mt8173";
aliases {
serial0 = &uart0;
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 2ed7449d9273..daefbf0329a6 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -180,6 +180,7 @@ CONFIG_LOCKUP_DETECTOR=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set
# CONFIG_FTRACE is not set
+CONFIG_MEMTEST=y
CONFIG_SECURITY=y
CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_ARM64_CRYPTO=y
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
index 6c348df5bf36..3303e8a7b837 100644
--- a/arch/arm64/crypto/aes-ce-ccm-glue.c
+++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
@@ -13,7 +13,7 @@
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/scatterwalk.h>
-#include <linux/crypto.h>
+#include <crypto/internal/aead.h>
#include <linux/module.h>
#include "aes-ce-setkey.h"
diff --git a/arch/arm64/crypto/crc32-arm64.c b/arch/arm64/crypto/crc32-arm64.c
index 9499199924ae..6a37c3c6b11d 100644
--- a/arch/arm64/crypto/crc32-arm64.c
+++ b/arch/arm64/crypto/crc32-arm64.c
@@ -147,13 +147,21 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+ put_unaligned_le32(ctx->crc, out);
+ return 0;
+}
+
+static int chksumc_final(struct shash_desc *desc, u8 *out)
+{
+ struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
put_unaligned_le32(~ctx->crc, out);
return 0;
}
static int __chksum_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
{
- put_unaligned_le32(~crc32_arm64_le_hw(crc, data, len), out);
+ put_unaligned_le32(crc32_arm64_le_hw(crc, data, len), out);
return 0;
}
@@ -199,6 +207,14 @@ static int crc32_cra_init(struct crypto_tfm *tfm)
{
struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
+ mctx->key = 0;
+ return 0;
+}
+
+static int crc32c_cra_init(struct crypto_tfm *tfm)
+{
+ struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
+
mctx->key = ~0;
return 0;
}
@@ -229,7 +245,7 @@ static struct shash_alg crc32c_alg = {
.setkey = chksum_setkey,
.init = chksum_init,
.update = chksumc_update,
- .final = chksum_final,
+ .final = chksumc_final,
.finup = chksumc_finup,
.digest = chksumc_digest,
.descsize = sizeof(struct chksum_desc_ctx),
@@ -241,7 +257,7 @@ static struct shash_alg crc32c_alg = {
.cra_alignmask = 0,
.cra_ctxsize = sizeof(struct chksum_ctx),
.cra_module = THIS_MODULE,
- .cra_init = crc32_cra_init,
+ .cra_init = crc32c_cra_init,
}
};
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index 114e7cc5de8c..aefda9868627 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -74,6 +74,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
static int sha1_ce_final(struct shash_desc *desc, u8 *out)
{
+ struct sha1_ce_state *sctx = shash_desc_ctx(desc);
+
+ sctx->finalize = 0;
kernel_neon_begin_partial(16);
sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
kernel_neon_end();
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 1340e44c048b..7cd587564a41 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -75,6 +75,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
static int sha256_ce_final(struct shash_desc *desc, u8 *out)
{
+ struct sha256_ce_state *sctx = shash_desc_ctx(desc);
+
+ sctx->finalize = 0;
kernel_neon_begin_partial(28);
sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
kernel_neon_end();
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 59c05d8ea4a0..39248d3adf5d 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -16,6 +16,7 @@
#include <linux/irqchip/arm-gic-acpi.h>
#include <asm/cputype.h>
+#include <asm/psci.h>
#include <asm/smp_plat.h>
/* Basic configuration for ACPI */
@@ -39,18 +40,6 @@ extern int acpi_disabled;
extern int acpi_noirq;
extern int acpi_pci_disabled;
-/* 1 to indicate PSCI 0.2+ is implemented */
-static inline bool acpi_psci_present(void)
-{
- return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_COMPLIANT;
-}
-
-/* 1 to indicate HVC must be used instead of SMC as the PSCI conduit */
-static inline bool acpi_psci_use_hvc(void)
-{
- return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_USE_HVC;
-}
-
static inline void disable_acpi(void)
{
acpi_disabled = 1;
@@ -88,9 +77,11 @@ static inline void arch_fix_phys_package_id(int num, u32 slot) { }
void __init acpi_init_cpus(void);
#else
-static inline bool acpi_psci_present(void) { return false; }
-static inline bool acpi_psci_use_hvc(void) { return false; }
static inline void acpi_init_cpus(void) { }
#endif /* CONFIG_ACPI */
+static inline const char *acpi_get_enable_method(int cpu)
+{
+ return acpi_psci_present() ? "psci" : NULL;
+}
#endif /*_ASM_ACPI_H*/
diff --git a/arch/arm64/include/asm/alternative-asm.h b/arch/arm64/include/asm/alternative-asm.h
deleted file mode 100644
index 919a67855b63..000000000000
--- a/arch/arm64/include/asm/alternative-asm.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef __ASM_ALTERNATIVE_ASM_H
-#define __ASM_ALTERNATIVE_ASM_H
-
-#ifdef __ASSEMBLY__
-
-.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len
- .word \orig_offset - .
- .word \alt_offset - .
- .hword \feature
- .byte \orig_len
- .byte \alt_len
-.endm
-
-.macro alternative_insn insn1 insn2 cap
-661: \insn1
-662: .pushsection .altinstructions, "a"
- altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
- .popsection
- .pushsection .altinstr_replacement, "ax"
-663: \insn2
-664: .popsection
- .if ((664b-663b) != (662b-661b))
- .error "Alternatives instruction length mismatch"
- .endif
-.endm
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __ASM_ALTERNATIVE_ASM_H */
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index d261f01e2bae..c385a0c4057f 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -1,6 +1,8 @@
#ifndef __ASM_ALTERNATIVE_H
#define __ASM_ALTERNATIVE_H
+#ifndef __ASSEMBLY__
+
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/stringify.h>
@@ -24,7 +26,20 @@ void free_alternatives_memory(void);
" .byte 662b-661b\n" /* source len */ \
" .byte 664f-663f\n" /* replacement len */
-/* alternative assembly primitive: */
+/*
+ * alternative assembly primitive:
+ *
+ * If any of these .org directive fail, it means that insn1 and insn2
+ * don't have the same length. This used to be written as
+ *
+ * .if ((664b-663b) != (662b-661b))
+ * .error "Alternatives instruction length mismatch"
+ * .endif
+ *
+ * but most assemblers die if insn1 or insn2 have a .inst. This should
+ * be fixed in a binutils release posterior to 2.25.51.0.2 (anything
+ * containing commit 4e4d08cf7399b606 or c1baaddf8861).
+ */
#define ALTERNATIVE(oldinstr, newinstr, feature) \
"661:\n\t" \
oldinstr "\n" \
@@ -37,8 +52,31 @@ void free_alternatives_memory(void);
newinstr "\n" \
"664:\n\t" \
".popsection\n\t" \
- ".if ((664b-663b) != (662b-661b))\n\t" \
- " .error \"Alternatives instruction length mismatch\"\n\t"\
- ".endif\n"
+ ".org . - (664b-663b) + (662b-661b)\n\t" \
+ ".org . - (662b-661b) + (664b-663b)\n"
+
+#else
+
+.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len
+ .word \orig_offset - .
+ .word \alt_offset - .
+ .hword \feature
+ .byte \orig_len
+ .byte \alt_len
+.endm
+
+.macro alternative_insn insn1 insn2 cap
+661: \insn1
+662: .pushsection .altinstructions, "a"
+ altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
+ .popsection
+ .pushsection .altinstr_replacement, "ax"
+663: \insn2
+664: .popsection
+ .org . - (664b-663b) + (662b-661b)
+ .org . - (662b-661b) + (664b-663b)
+.endm
+
+#endif /* __ASSEMBLY__ */
#endif /* __ASM_ALTERNATIVE_H */
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 71f19c4dc0de..0fa47c4275cb 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -114,7 +114,7 @@ do { \
#define read_barrier_depends() do { } while(0)
#define smp_read_barrier_depends() do { } while(0)
-#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
+#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
#define nop() asm volatile("nop");
#define smp_mb__before_atomic() smp_mb()
diff --git a/arch/arm64/include/asm/boot.h b/arch/arm64/include/asm/boot.h
new file mode 100644
index 000000000000..81151b67b26b
--- /dev/null
+++ b/arch/arm64/include/asm/boot.h
@@ -0,0 +1,14 @@
+
+#ifndef __ASM_BOOT_H
+#define __ASM_BOOT_H
+
+#include <asm/sizes.h>
+
+/*
+ * arm64 requires the DTB to be 8 byte aligned and
+ * not exceed 2MB in size.
+ */
+#define MIN_FDT_ALIGN 8
+#define MAX_FDT_SIZE SZ_2M
+
+#endif
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 67d309cc3b6b..c75b8d027eb1 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -40,10 +40,6 @@
* the implementation assumes non-aliasing VIPT D-cache and (aliasing)
* VIPT or ASID-tagged VIVT I-cache.
*
- * flush_cache_all()
- *
- * Unconditionally clean and invalidate the entire cache.
- *
* flush_cache_mm(mm)
*
* Clean and invalidate all user space cache entries
@@ -69,7 +65,6 @@
* - kaddr - page address
* - size - region size
*/
-extern void flush_cache_all(void);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_area(void *addr, size_t len);
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
index 5a31d6716914..8f03446cf89f 100644
--- a/arch/arm64/include/asm/cpu_ops.h
+++ b/arch/arm64/include/asm/cpu_ops.h
@@ -19,15 +19,15 @@
#include <linux/init.h>
#include <linux/threads.h>
-struct device_node;
-
/**
* struct cpu_operations - Callback operations for hotplugging CPUs.
*
* @name: Name of the property as appears in a devicetree cpu node's
- * enable-method property.
- * @cpu_init: Reads any data necessary for a specific enable-method from the
- * devicetree, for a given cpu node and proposed logical id.
+ * enable-method property. On systems booting with ACPI, @name
+ * identifies the struct cpu_operations entry corresponding to
+ * the boot protocol specified in the ACPI MADT table.
+ * @cpu_init: Reads any data necessary for a specific enable-method for a
+ * proposed logical id.
* @cpu_prepare: Early one-time preparation step for a cpu. If there is a
* mechanism for doing so, tests whether it is possible to boot
* the given CPU.
@@ -40,15 +40,15 @@ struct device_node;
* @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the
* cpu being killed.
* @cpu_kill: Ensures a cpu has left the kernel. Called from another cpu.
- * @cpu_init_idle: Reads any data necessary to initialize CPU idle states from
- * devicetree, for a given cpu node and proposed logical id.
+ * @cpu_init_idle: Reads any data necessary to initialize CPU idle states for
+ * a proposed logical id.
* @cpu_suspend: Suspends a cpu and saves the required context. May fail owing
* to wrong parameters or error conditions. Called from the
* CPU being suspended. Must be called with IRQs disabled.
*/
struct cpu_operations {
const char *name;
- int (*cpu_init)(struct device_node *, unsigned int);
+ int (*cpu_init)(unsigned int);
int (*cpu_prepare)(unsigned int);
int (*cpu_boot)(unsigned int);
void (*cpu_postboot)(void);
@@ -58,14 +58,17 @@ struct cpu_operations {
int (*cpu_kill)(unsigned int cpu);
#endif
#ifdef CONFIG_CPU_IDLE
- int (*cpu_init_idle)(struct device_node *, unsigned int);
+ int (*cpu_init_idle)(unsigned int);
int (*cpu_suspend)(unsigned long);
#endif
};
extern const struct cpu_operations *cpu_ops[NR_CPUS];
-int __init cpu_read_ops(struct device_node *dn, int cpu);
-void __init cpu_read_bootcpu_ops(void);
-const struct cpu_operations *cpu_get_ops(const char *name);
+int __init cpu_read_ops(int cpu);
+
+static inline void __init cpu_read_bootcpu_ops(void)
+{
+ cpu_read_ops(0);
+}
#endif /* ifndef __ASM_CPU_OPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 82cb9f98ba1a..c1044218a63a 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -24,8 +24,9 @@
#define ARM64_WORKAROUND_CLEAN_CACHE 0
#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
#define ARM64_WORKAROUND_845719 2
+#define ARM64_HAS_SYSREG_GIC_CPUIF 3
-#define ARM64_NCAPS 3
+#define ARM64_NCAPS 4
#ifndef __ASSEMBLY__
@@ -38,6 +39,11 @@ struct arm64_cpu_capabilities {
u32 midr_model;
u32 midr_range_min, midr_range_max;
};
+
+ struct { /* Feature register checking */
+ u64 register_mask;
+ u64 register_value;
+ };
};
};
diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h
index 141b2fcabaa6..0f74f05d662a 100644
--- a/arch/arm64/include/asm/cpuidle.h
+++ b/arch/arm64/include/asm/cpuidle.h
@@ -5,20 +5,16 @@
#ifdef CONFIG_CPU_IDLE
extern int arm_cpuidle_init(unsigned int cpu);
-extern int cpu_suspend(unsigned long arg);
+extern int arm_cpuidle_suspend(int index);
#else
static inline int arm_cpuidle_init(unsigned int cpu)
{
return -EOPNOTSUPP;
}
-static inline int cpu_suspend(unsigned long arg)
+static inline int arm_cpuidle_suspend(int index)
{
return -EOPNOTSUPP;
}
#endif
-static inline int arm_cpuidle_suspend(int index)
-{
- return cpu_suspend(index);
-}
#endif
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 9437e3dc5833..f0d6d0bfe55c 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -18,6 +18,7 @@
#ifdef __KERNEL__
+#include <linux/acpi.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
@@ -28,13 +29,23 @@
#define DMA_ERROR_CODE (~(dma_addr_t)0)
extern struct dma_map_ops *dma_ops;
+extern struct dma_map_ops dummy_dma_ops;
static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
{
- if (unlikely(!dev) || !dev->archdata.dma_ops)
+ if (unlikely(!dev))
return dma_ops;
- else
+ else if (dev->archdata.dma_ops)
return dev->archdata.dma_ops;
+ else if (acpi_disabled)
+ return dma_ops;
+
+ /*
+ * When ACPI is enabled, if arch_set_dma_ops is not called,
+ * we will disable device DMA capability by setting it
+ * to dummy_dma_ops.
+ */
+ return &dummy_dma_ops;
}
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
@@ -48,6 +59,9 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu, bool coherent)
{
+ if (!acpi_disabled && !dev->archdata.dma_ops)
+ dev->archdata.dma_ops = dma_ops;
+
dev->archdata.dma_coherent = coherent;
}
#define arch_setup_dma_ops arch_setup_dma_ops
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index 95e6b6dcbe37..c0739187a920 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -17,6 +17,7 @@
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
+#include <asm/boot.h>
#include <asm/page.h>
/*
@@ -32,6 +33,20 @@
*/
enum fixed_addresses {
FIX_HOLE,
+
+ /*
+ * Reserve a virtual window for the FDT that is 2 MB larger than the
+ * maximum supported size, and put it at the top of the fixmap region.
+ * The additional space ensures that any FDT that does not exceed
+ * MAX_FDT_SIZE can be mapped regardless of whether it crosses any
+ * 2 MB alignment boundaries.
+ *
+ * Keep this at the top so it remains 2 MB aligned.
+ */
+#define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M)
+ FIX_FDT_END,
+ FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1,
+
FIX_EARLYCON_MEM_BASE,
FIX_TEXT_POKE0,
__end_of_permanent_fixed_addresses,
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 5f750dc96e0f..74069b3bd919 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -58,7 +58,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
- pagefault_disable(); /* implies preempt_disable() */
+ pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
@@ -85,7 +85,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
ret = -ENOSYS;
}
- pagefault_enable(); /* subsumes preempt_enable() */
+ pagefault_enable();
if (!ret) {
switch (cmp) {
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index 5b7ca8ace95f..734c17e89e94 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -86,10 +86,6 @@ static inline int prepare_hugepage_range(struct file *file,
return 0;
}
-static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
-{
-}
-
static inline int huge_pte_none(pte_t pte)
{
return pte_none(pte);
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index f81b328d9cf4..30e50eb54a67 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -281,6 +281,7 @@ __AARCH64_INSN_FUNCS(ret, 0xFFFFFC1F, 0xD65F0000)
#undef __AARCH64_INSN_FUNCS
bool aarch64_insn_is_nop(u32 insn);
+bool aarch64_insn_is_branch_imm(u32 insn);
int aarch64_insn_read(void *addr, u32 *insnp);
int aarch64_insn_write(void *addr, u32 insn);
@@ -351,6 +352,8 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
int shift,
enum aarch64_insn_variant variant,
enum aarch64_insn_logic_type type);
+s32 aarch64_get_branch_offset(u32 insn);
+u32 aarch64_set_branch_offset(u32 insn, s32 offset);
bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 540f7c0aea82..44be1e03ed65 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -117,10 +117,10 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
* ordering rules but do not guarantee any ordering relative to Normal memory
* accesses.
*/
-#define readb_relaxed(c) ({ u8 __v = __raw_readb(c); __v; })
-#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16)__raw_readw(c)); __v; })
-#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32)__raw_readl(c)); __v; })
-#define readq_relaxed(c) ({ u64 __v = le64_to_cpu((__force __le64)__raw_readq(c)); __v; })
+#define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; })
+#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; })
+#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
+#define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })
#define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c)))
#define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c)))
@@ -170,6 +170,7 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
+#define ioremap_wt(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define iounmap __iounmap
/*
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 4f7310fa77f0..3c5fe685a2d6 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -27,7 +27,7 @@
#define MPIDR_EL1 1 /* MultiProcessor Affinity Register */
#define CSSELR_EL1 2 /* Cache Size Selection Register */
#define SCTLR_EL1 3 /* System Control Register */
-#define ACTLR_EL1 4 /* Auxilliary Control Register */
+#define ACTLR_EL1 4 /* Auxiliary Control Register */
#define CPACR_EL1 5 /* Coprocessor Access Control */
#define TTBR0_EL1 6 /* Translation Table Base Register 0 */
#define TTBR1_EL1 7 /* Translation Table Base Register 1 */
@@ -132,11 +132,6 @@ extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
extern u64 __vgic_v3_get_ich_vtr_el2(void);
-extern char __save_vgic_v2_state[];
-extern char __restore_vgic_v2_state[];
-extern char __save_vgic_v3_state[];
-extern char __restore_vgic_v3_state[];
-
#endif
#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index f0f58c9beec0..2709db2a7eac 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -221,29 +221,6 @@ struct vgic_sr_vectors {
void *restore_vgic;
};
-static inline void vgic_arch_setup(const struct vgic_params *vgic)
-{
- extern struct vgic_sr_vectors __vgic_sr_vectors;
-
- switch(vgic->type)
- {
- case VGIC_V2:
- __vgic_sr_vectors.save_vgic = __save_vgic_v2_state;
- __vgic_sr_vectors.restore_vgic = __restore_vgic_v2_state;
- break;
-
-#ifdef CONFIG_ARM_GIC_V3
- case VGIC_V3:
- __vgic_sr_vectors.save_vgic = __save_vgic_v3_state;
- __vgic_sr_vectors.restore_vgic = __restore_vgic_v3_state;
- break;
-#endif
-
- default:
- BUG();
- }
-}
-
static inline void kvm_arch_hardware_disable(void) {}
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
diff --git a/arch/arm64/include/asm/mm-arch-hooks.h b/arch/arm64/include/asm/mm-arch-hooks.h
new file mode 100644
index 000000000000..562b655f5ba9
--- /dev/null
+++ b/arch/arm64/include/asm/mm-arch-hooks.h
@@ -0,0 +1,15 @@
+/*
+ * Architecture specific mm hooks
+ *
+ * Copyright (C) 2015, IBM Corporation
+ * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARM64_MM_ARCH_HOOKS_H
+#define _ASM_ARM64_MM_ARCH_HOOKS_H
+
+#endif /* _ASM_ARM64_MM_ARCH_HOOKS_H */
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 3d311761e3c2..79fcfb048884 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -34,5 +34,6 @@ extern void init_mem_pgprot(void);
extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot);
+extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
#endif
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index d26d1d53c0d7..6471773db6fd 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -24,4 +24,11 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_misc_flags(regs) perf_misc_flags(regs)
#endif
+#define perf_arch_fetch_caller_regs(regs, __ip) { \
+ (regs)->pc = (__ip); \
+ (regs)->regs[29] = (unsigned long) __builtin_frame_address(0); \
+ (regs)->sp = current_stack_pointer; \
+ (regs)->pstate = PSR_MODE_EL1h; \
+}
+
#endif
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 220633b791b8..14ad6e4e87d1 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -28,12 +28,8 @@
struct mm_struct;
struct cpu_suspend_ctx;
-extern void cpu_cache_off(void);
extern void cpu_do_idle(void);
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
-extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
-void cpu_soft_restart(phys_addr_t cpu_reset,
- unsigned long addr) __attribute__((noreturn));
extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index d2c37a1df0eb..e4c893e54f01 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -78,13 +78,30 @@ struct cpu_context {
struct thread_struct {
struct cpu_context cpu_context; /* cpu context */
- unsigned long tp_value;
+ unsigned long tp_value; /* TLS register */
+#ifdef CONFIG_COMPAT
+ unsigned long tp2_value;
+#endif
struct fpsimd_state fpsimd_state;
unsigned long fault_address; /* fault info */
unsigned long fault_code; /* ESR_EL1 value */
struct debug_info debug; /* debugging */
};
+#ifdef CONFIG_COMPAT
+#define task_user_tls(t) \
+({ \
+ unsigned long *__tls; \
+ if (is_compat_thread(task_thread_info(t))) \
+ __tls = &(t)->thread.tp2_value; \
+ else \
+ __tls = &(t)->thread.tp_value; \
+ __tls; \
+ })
+#else
+#define task_user_tls(t) (&(t)->thread.tp_value)
+#endif
+
#define INIT_THREAD { }
static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h
index 2454bc59c916..49d7e1aaebdc 100644
--- a/arch/arm64/include/asm/psci.h
+++ b/arch/arm64/include/asm/psci.h
@@ -14,7 +14,15 @@
#ifndef __ASM_PSCI_H
#define __ASM_PSCI_H
-int psci_dt_init(void);
-int psci_acpi_init(void);
+int __init psci_dt_init(void);
+
+#ifdef CONFIG_ACPI
+int __init psci_acpi_init(void);
+bool __init acpi_psci_present(void);
+bool __init acpi_psci_use_hvc(void);
+#else
+static inline int psci_acpi_init(void) { return 0; }
+static inline bool acpi_psci_present(void) { return false; }
+#endif
#endif /* __ASM_PSCI_H */
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index bf22650b1a78..db02be81b90a 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -42,7 +42,7 @@ extern void handle_IPI(int ipinr, struct pt_regs *regs);
* Discover the set of possible CPUs and determine their
* SMP operations.
*/
-extern void of_smp_init_cpus(void);
+extern void smp_init_cpus(void);
/*
* Provide a function to raise an IPI cross call on CPUs in callmap.
diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h
index 8dcd61e32176..7abf7570c00f 100644
--- a/arch/arm64/include/asm/smp_plat.h
+++ b/arch/arm64/include/asm/smp_plat.h
@@ -19,6 +19,8 @@
#ifndef __ASM_SMP_PLAT_H
#define __ASM_SMP_PLAT_H
+#include <linux/cpumask.h>
+
#include <asm/types.h>
struct mpidr_hash {
@@ -39,6 +41,20 @@ static inline u32 mpidr_hash_size(void)
*/
extern u64 __cpu_logical_map[NR_CPUS];
#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
+/*
+ * Retrieve logical cpu index corresponding to a given MPIDR.Aff*
+ * - mpidr: MPIDR.Aff* bits to be used for the look-up
+ *
+ * Returns the cpu logical index or -EINVAL on look-up error
+ */
+static inline int get_logical_index(u64 mpidr)
+{
+ int cpu;
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++)
+ if (cpu_logical_map(cpu) == mpidr)
+ return cpu;
+ return -EINVAL;
+}
void __init do_post_cpus_up_work(void);
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
index 003802f58963..59a5b0f1e81c 100644
--- a/arch/arm64/include/asm/suspend.h
+++ b/arch/arm64/include/asm/suspend.h
@@ -21,6 +21,6 @@ struct sleep_save_sp {
phys_addr_t save_ptr_stash_phys;
};
-extern int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long));
+extern int cpu_suspend(unsigned long arg, int (*fn)(unsigned long));
extern void cpu_resume(void);
#endif
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index 7a18fabbe0f6..57f110bea6a8 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -23,6 +23,8 @@
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <linux/irqflags.h>
+#include <linux/signal.h>
+#include <linux/ratelimit.h>
#include <linux/reboot.h>
struct pt_regs;
@@ -41,9 +43,19 @@ struct mm_struct;
extern void show_pte(struct mm_struct *mm, unsigned long addr);
extern void __show_regs(struct pt_regs *);
-void soft_restart(unsigned long);
extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
+#define show_unhandled_signals_ratelimited() \
+({ \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ bool __show_ratelimited = false; \
+ if (show_unhandled_signals && __ratelimit(&_rs)) \
+ __show_ratelimited = true; \
+ __show_ratelimited; \
+})
+
#define UDBG_UNDEFINED (1 << 0)
#define UDBG_SYSCALL (1 << 1)
#define UDBG_BADABORT (1 << 2)
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index c3bb05b98616..934815d45eda 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -28,8 +28,6 @@
* TLB Management
* ==============
*
- * The arch/arm64/mm/tlb.S files implement these methods.
- *
* The TLB specific code is expected to perform whatever tests it needs
* to determine if it should invalidate the TLB for each call. Start
* addresses are inclusive and end addresses are exclusive; it is safe to
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index 7ebcd31ce51c..225ec3524fbf 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -18,7 +18,7 @@ extern struct cpu_topology cpu_topology[NR_CPUS];
#define topology_physical_package_id(cpu) (cpu_topology[cpu].cluster_id)
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
-#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
+#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index 8b839558838e..19de7537e7d3 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -36,12 +36,6 @@ EXPORT_SYMBOL(acpi_disabled);
int acpi_pci_disabled = 1; /* skip ACPI PCI scan and IRQ initialization */
EXPORT_SYMBOL(acpi_pci_disabled);
-/* Processors with enabled flag and sane MPIDR */
-static int enabled_cpus;
-
-/* Boot CPU is valid or not in MADT */
-static bool bootcpu_valid __initdata;
-
static bool param_acpi_off __initdata;
static bool param_acpi_force __initdata;
@@ -95,122 +89,15 @@ void __init __acpi_unmap_table(char *map, unsigned long size)
early_memunmap(map, size);
}
-/**
- * acpi_map_gic_cpu_interface - generates a logical cpu number
- * and map to MPIDR represented by GICC structure
- */
-static void __init
-acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
+bool __init acpi_psci_present(void)
{
- int i;
- u64 mpidr = processor->arm_mpidr & MPIDR_HWID_BITMASK;
- bool enabled = !!(processor->flags & ACPI_MADT_ENABLED);
-
- if (mpidr == INVALID_HWID) {
- pr_info("Skip MADT cpu entry with invalid MPIDR\n");
- return;
- }
-
- total_cpus++;
- if (!enabled)
- return;
-
- if (enabled_cpus >= NR_CPUS) {
- pr_warn("NR_CPUS limit of %d reached, Processor %d/0x%llx ignored.\n",
- NR_CPUS, total_cpus, mpidr);
- return;
- }
-
- /* Check if GICC structure of boot CPU is available in the MADT */
- if (cpu_logical_map(0) == mpidr) {
- if (bootcpu_valid) {
- pr_err("Firmware bug, duplicate CPU MPIDR: 0x%llx in MADT\n",
- mpidr);
- return;
- }
-
- bootcpu_valid = true;
- }
-
- /*
- * Duplicate MPIDRs are a recipe for disaster. Scan
- * all initialized entries and check for
- * duplicates. If any is found just ignore the CPU.
- */
- for (i = 1; i < enabled_cpus; i++) {
- if (cpu_logical_map(i) == mpidr) {
- pr_err("Firmware bug, duplicate CPU MPIDR: 0x%llx in MADT\n",
- mpidr);
- return;
- }
- }
-
- if (!acpi_psci_present())
- return;
-
- cpu_ops[enabled_cpus] = cpu_get_ops("psci");
- /* CPU 0 was already initialized */
- if (enabled_cpus) {
- if (!cpu_ops[enabled_cpus])
- return;
-
- if (cpu_ops[enabled_cpus]->cpu_init(NULL, enabled_cpus))
- return;
-
- /* map the logical cpu id to cpu MPIDR */
- cpu_logical_map(enabled_cpus) = mpidr;
- }
-
- enabled_cpus++;
+ return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_COMPLIANT;
}
-static int __init
-acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
- const unsigned long end)
+/* Whether HVC must be used instead of SMC as the PSCI conduit */
+bool __init acpi_psci_use_hvc(void)
{
- struct acpi_madt_generic_interrupt *processor;
-
- processor = (struct acpi_madt_generic_interrupt *)header;
-
- if (BAD_MADT_ENTRY(processor, end))
- return -EINVAL;
-
- acpi_table_print_madt_entry(header);
- acpi_map_gic_cpu_interface(processor);
- return 0;
-}
-
-/* Parse GIC cpu interface entries in MADT for SMP init */
-void __init acpi_init_cpus(void)
-{
- int count, i;
-
- /*
- * do a partial walk of MADT to determine how many CPUs
- * we have including disabled CPUs, and get information
- * we need for SMP init
- */
- count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
- acpi_parse_gic_cpu_interface, 0);
-
- if (!count) {
- pr_err("No GIC CPU interface entries present\n");
- return;
- } else if (count < 0) {
- pr_err("Error parsing GIC CPU interface entry\n");
- return;
- }
-
- if (!bootcpu_valid) {
- pr_err("MADT missing boot CPU MPIDR, not enabling secondaries\n");
- return;
- }
-
- for (i = 0; i < enabled_cpus; i++)
- set_cpu_possible(i, true);
-
- /* Make boot-up look pretty */
- pr_info("%d CPUs enabled, %d CPUs total\n", enabled_cpus, total_cpus);
+ return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_USE_HVC;
}
/*
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 21033bba9390..221b98312f0c 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -27,6 +27,10 @@
#include <asm/insn.h>
#include <linux/stop_machine.h>
+#define __ALT_PTR(a,f) (u32 *)((void *)&(a)->f + (a)->f)
+#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
+#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
+
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
struct alt_region {
@@ -35,42 +39,47 @@ struct alt_region {
};
/*
- * Decode the imm field of a b/bl instruction, and return the byte
- * offset as a signed value (so it can be used when computing a new
- * branch target).
+ * Check if the target PC is within an alternative block.
*/
-static s32 get_branch_offset(u32 insn)
+static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
{
- s32 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
+ unsigned long replptr;
+
+ if (kernel_text_address(pc))
+ return 1;
+
+ replptr = (unsigned long)ALT_REPL_PTR(alt);
+ if (pc >= replptr && pc <= (replptr + alt->alt_len))
+ return 0;
- /* sign-extend the immediate before turning it into a byte offset */
- return (imm << 6) >> 4;
+ /*
+ * Branching into *another* alternate sequence is doomed, and
+ * we're not even trying to fix it up.
+ */
+ BUG();
}
-static u32 get_alt_insn(u8 *insnptr, u8 *altinsnptr)
+static u32 get_alt_insn(struct alt_instr *alt, u32 *insnptr, u32 *altinsnptr)
{
u32 insn;
- aarch64_insn_read(altinsnptr, &insn);
+ insn = le32_to_cpu(*altinsnptr);
- /* Stop the world on instructions we don't support... */
- BUG_ON(aarch64_insn_is_cbz(insn));
- BUG_ON(aarch64_insn_is_cbnz(insn));
- BUG_ON(aarch64_insn_is_bcond(insn));
- /* ... and there is probably more. */
-
- if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
- enum aarch64_insn_branch_type type;
+ if (aarch64_insn_is_branch_imm(insn)) {
+ s32 offset = aarch64_get_branch_offset(insn);
unsigned long target;
- if (aarch64_insn_is_b(insn))
- type = AARCH64_INSN_BRANCH_NOLINK;
- else
- type = AARCH64_INSN_BRANCH_LINK;
+ target = (unsigned long)altinsnptr + offset;
- target = (unsigned long)altinsnptr + get_branch_offset(insn);
- insn = aarch64_insn_gen_branch_imm((unsigned long)insnptr,
- target, type);
+ /*
+ * If we're branching inside the alternate sequence,
+ * do not rewrite the instruction, as it is already
+ * correct. Otherwise, generate the new instruction.
+ */
+ if (branch_insn_requires_update(alt, target)) {
+ offset = target - (unsigned long)insnptr;
+ insn = aarch64_set_branch_offset(insn, offset);
+ }
}
return insn;
@@ -80,11 +89,11 @@ static int __apply_alternatives(void *alt_region)
{
struct alt_instr *alt;
struct alt_region *region = alt_region;
- u8 *origptr, *replptr;
+ u32 *origptr, *replptr;
for (alt = region->begin; alt < region->end; alt++) {
u32 insn;
- int i;
+ int i, nr_inst;
if (!cpus_have_cap(alt->cpufeature))
continue;
@@ -93,16 +102,17 @@ static int __apply_alternatives(void *alt_region)
pr_info_once("patching kernel code\n");
- origptr = (u8 *)&alt->orig_offset + alt->orig_offset;
- replptr = (u8 *)&alt->alt_offset + alt->alt_offset;
+ origptr = ALT_ORIG_PTR(alt);
+ replptr = ALT_REPL_PTR(alt);
+ nr_inst = alt->alt_len / sizeof(insn);
- for (i = 0; i < alt->alt_len; i += sizeof(insn)) {
- insn = get_alt_insn(origptr + i, replptr + i);
- aarch64_insn_write(origptr + i, insn);
+ for (i = 0; i < nr_inst; i++) {
+ insn = get_alt_insn(alt, origptr + i, replptr + i);
+ *(origptr + i) = cpu_to_le32(insn);
}
flush_icache_range((uintptr_t)origptr,
- (uintptr_t)(origptr + alt->alt_len));
+ (uintptr_t)(origptr + nr_inst));
}
return 0;
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index da675cc5dfae..c99701a34d7b 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -127,7 +127,6 @@ int main(void)
DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
DEFINE(VGIC_SAVE_FN, offsetof(struct vgic_sr_vectors, save_vgic));
DEFINE(VGIC_RESTORE_FN, offsetof(struct vgic_sr_vectors, restore_vgic));
- DEFINE(VGIC_SR_VECTOR_SZ, sizeof(struct vgic_sr_vectors));
DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
index fb8ff9ba467a..5ea337dd2f15 100644
--- a/arch/arm64/kernel/cpu_ops.c
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -16,11 +16,13 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <asm/cpu_ops.h>
-#include <asm/smp_plat.h>
+#include <linux/acpi.h>
#include <linux/errno.h>
#include <linux/of.h>
#include <linux/string.h>
+#include <asm/acpi.h>
+#include <asm/cpu_ops.h>
+#include <asm/smp_plat.h>
extern const struct cpu_operations smp_spin_table_ops;
extern const struct cpu_operations cpu_psci_ops;
@@ -35,7 +37,7 @@ static const struct cpu_operations *supported_cpu_ops[] __initconst = {
NULL,
};
-const struct cpu_operations * __init cpu_get_ops(const char *name)
+static const struct cpu_operations * __init cpu_get_ops(const char *name)
{
const struct cpu_operations **ops = supported_cpu_ops;
@@ -49,39 +51,53 @@ const struct cpu_operations * __init cpu_get_ops(const char *name)
return NULL;
}
+static const char *__init cpu_read_enable_method(int cpu)
+{
+ const char *enable_method;
+
+ if (acpi_disabled) {
+ struct device_node *dn = of_get_cpu_node(cpu, NULL);
+
+ if (!dn) {
+ if (!cpu)
+ pr_err("Failed to find device node for boot cpu\n");
+ return NULL;
+ }
+
+ enable_method = of_get_property(dn, "enable-method", NULL);
+ if (!enable_method) {
+ /*
+ * The boot CPU may not have an enable method (e.g.
+ * when spin-table is used for secondaries).
+ * Don't warn spuriously.
+ */
+ if (cpu != 0)
+ pr_err("%s: missing enable-method property\n",
+ dn->full_name);
+ }
+ } else {
+ enable_method = acpi_get_enable_method(cpu);
+ if (!enable_method)
+ pr_err("Unsupported ACPI enable-method\n");
+ }
+
+ return enable_method;
+}
/*
- * Read a cpu's enable method from the device tree and record it in cpu_ops.
+ * Read a cpu's enable method and record it in cpu_ops.
*/
-int __init cpu_read_ops(struct device_node *dn, int cpu)
+int __init cpu_read_ops(int cpu)
{
- const char *enable_method = of_get_property(dn, "enable-method", NULL);
- if (!enable_method) {
- /*
- * The boot CPU may not have an enable method (e.g. when
- * spin-table is used for secondaries). Don't warn spuriously.
- */
- if (cpu != 0)
- pr_err("%s: missing enable-method property\n",
- dn->full_name);
- return -ENOENT;
- }
+ const char *enable_method = cpu_read_enable_method(cpu);
+
+ if (!enable_method)
+ return -ENODEV;
cpu_ops[cpu] = cpu_get_ops(enable_method);
if (!cpu_ops[cpu]) {
- pr_warn("%s: unsupported enable-method property: %s\n",
- dn->full_name, enable_method);
+ pr_warn("Unsupported enable-method: %s\n", enable_method);
return -EOPNOTSUPP;
}
return 0;
}
-
-void __init cpu_read_bootcpu_ops(void)
-{
- struct device_node *dn = of_get_cpu_node(0, NULL);
- if (!dn) {
- pr_err("Failed to find device node for boot cpu\n");
- return;
- }
- cpu_read_ops(dn, 0);
-}
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 3d9967e43d89..5ad86ceac010 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -22,7 +22,23 @@
#include <asm/cpu.h>
#include <asm/cpufeature.h>
+static bool
+has_id_aa64pfr0_feature(const struct arm64_cpu_capabilities *entry)
+{
+ u64 val;
+
+ val = read_cpuid(id_aa64pfr0_el1);
+ return (val & entry->register_mask) == entry->register_value;
+}
+
static const struct arm64_cpu_capabilities arm64_features[] = {
+ {
+ .desc = "GIC system register CPU interface",
+ .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
+ .matches = has_id_aa64pfr0_feature,
+ .register_mask = (0xf << 24),
+ .register_value = (1 << 24),
+ },
{},
};
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
index a78143a5c99f..7ce589ca54a4 100644
--- a/arch/arm64/kernel/cpuidle.c
+++ b/arch/arm64/kernel/cpuidle.c
@@ -18,15 +18,10 @@
int arm_cpuidle_init(unsigned int cpu)
{
int ret = -EOPNOTSUPP;
- struct device_node *cpu_node = of_cpu_device_node_get(cpu);
-
- if (!cpu_node)
- return -ENODEV;
if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_init_idle)
- ret = cpu_ops[cpu]->cpu_init_idle(cpu_node, cpu);
+ ret = cpu_ops[cpu]->cpu_init_idle(cpu);
- of_node_put(cpu_node);
return ret;
}
@@ -37,7 +32,7 @@ int arm_cpuidle_init(unsigned int cpu)
* Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
* operations back-end error code otherwise.
*/
-int cpu_suspend(unsigned long arg)
+int arm_cpuidle_suspend(int index)
{
int cpu = smp_processor_id();
@@ -47,5 +42,5 @@ int cpu_suspend(unsigned long arg)
*/
if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend)
return -EOPNOTSUPP;
- return cpu_ops[cpu]->cpu_suspend(arg);
+ return cpu_ops[cpu]->cpu_suspend(index);
}
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 959fe8733560..a7691a378668 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -21,7 +21,7 @@
#include <linux/init.h>
#include <linux/linkage.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/cpufeature.h>
@@ -124,21 +124,24 @@
msr sp_el0, x23
#ifdef CONFIG_ARM64_ERRATUM_845719
- alternative_insn \
- "nop", \
- "tbz x22, #4, 1f", \
- ARM64_WORKAROUND_845719
+
+#undef SEQUENCE_ORG
+#undef SEQUENCE_ALT
+
#ifdef CONFIG_PID_IN_CONTEXTIDR
- alternative_insn \
- "nop; nop", \
- "mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:", \
- ARM64_WORKAROUND_845719
+
+#define SEQUENCE_ORG "nop ; nop ; nop"
+#define SEQUENCE_ALT "tbz x22, #4, 1f ; mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:"
+
#else
- alternative_insn \
- "nop", \
- "msr contextidr_el1, xzr; 1:", \
- ARM64_WORKAROUND_845719
+
+#define SEQUENCE_ORG "nop ; nop"
+#define SEQUENCE_ALT "tbz x22, #4, 1f ; msr contextidr_el1, xzr; 1:"
+
#endif
+
+ alternative_insn SEQUENCE_ORG, SEQUENCE_ALT, ARM64_WORKAROUND_845719
+
#endif
.endif
msr elr_el1, x21 // set up the return data
@@ -517,6 +520,7 @@ el0_sp_pc:
mrs x26, far_el1
// enable interrupts before calling the main handler
enable_dbg_and_irq
+ ct_user_exit
mov x0, x26
mov x1, x25
mov x2, sp
@@ -608,11 +612,16 @@ ENDPROC(cpu_switch_to)
*/
ret_fast_syscall:
disable_irq // disable interrupts
- ldr x1, [tsk, #TI_FLAGS]
+ ldr x1, [tsk, #TI_FLAGS] // re-check for syscall tracing
+ and x2, x1, #_TIF_SYSCALL_WORK
+ cbnz x2, ret_fast_syscall_trace
and x2, x1, #_TIF_WORK_MASK
cbnz x2, fast_work_pending
enable_step_tsk x1, x2
kernel_exit 0, ret = 1
+ret_fast_syscall_trace:
+ enable_irq // enable interrupts
+ b __sys_trace_return
/*
* Ok, we need to do extra processing, enter the slow path.
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 3dca15634e69..44d6f7545505 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/cpu.h>
#include <linux/cpu_pm.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -296,6 +297,35 @@ static void fpsimd_pm_init(void)
static inline void fpsimd_pm_init(void) { }
#endif /* CONFIG_CPU_PM */
+#ifdef CONFIG_HOTPLUG_CPU
+static int fpsimd_cpu_hotplug_notifier(struct notifier_block *nfb,
+ unsigned long action,
+ void *hcpu)
+{
+ unsigned int cpu = (long)hcpu;
+
+ switch (action) {
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ per_cpu(fpsimd_last_state, cpu) = NULL;
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block fpsimd_cpu_hotplug_notifier_block = {
+ .notifier_call = fpsimd_cpu_hotplug_notifier,
+};
+
+static inline void fpsimd_hotplug_init(void)
+{
+ register_cpu_notifier(&fpsimd_cpu_hotplug_notifier_block);
+}
+
+#else
+static inline void fpsimd_hotplug_init(void) { }
+#endif
+
/*
* FP/SIMD support code initialisation.
*/
@@ -315,6 +345,7 @@ static int __init fpsimd_init(void)
elf_hwcap |= HWCAP_ASIMD;
fpsimd_pm_init();
+ fpsimd_hotplug_init();
return 0;
}
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 19f915e8f6e0..c0ff3ce4299e 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -237,8 +237,6 @@ ENTRY(stext)
bl el2_setup // Drop to EL1, w20=cpu_boot_mode
adrp x24, __PHYS_OFFSET
bl set_cpu_boot_mode_flag
-
- bl __vet_fdt
bl __create_page_tables // x25=TTBR0, x26=TTBR1
/*
* The following calls CPU setup code, see arch/arm64/mm/proc.S for
@@ -270,24 +268,6 @@ preserve_boot_args:
ENDPROC(preserve_boot_args)
/*
- * Determine validity of the x21 FDT pointer.
- * The dtb must be 8-byte aligned and live in the first 512M of memory.
- */
-__vet_fdt:
- tst x21, #0x7
- b.ne 1f
- cmp x21, x24
- b.lt 1f
- mov x0, #(1 << 29)
- add x0, x0, x24
- cmp x21, x0
- b.ge 1f
- ret
-1:
- mov x21, #0
- ret
-ENDPROC(__vet_fdt)
-/*
* Macro to create a table entry to the next page.
*
* tbl: page table address
@@ -348,8 +328,7 @@ ENDPROC(__vet_fdt)
* required to get the kernel running. The following sections are required:
* - identity mapping to enable the MMU (low address, TTBR0)
* - first few MB of the kernel linear mapping to jump to once the MMU has
- * been enabled, including the FDT blob (TTBR1)
- * - pgd entry for fixed mappings (TTBR1)
+ * been enabled
*/
__create_page_tables:
adrp x25, idmap_pg_dir
@@ -382,7 +361,7 @@ __create_page_tables:
* Create the identity mapping.
*/
mov x0, x25 // idmap_pg_dir
- adrp x3, KERNEL_START // __pa(KERNEL_START)
+ adrp x3, __idmap_text_start // __pa(__idmap_text_start)
#ifndef CONFIG_ARM64_VA_BITS_48
#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
@@ -405,11 +384,11 @@ __create_page_tables:
/*
* Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
- * entire kernel image can be ID mapped. As T0SZ == (64 - #bits used),
+ * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
* this number conveniently equals the number of leading zeroes in
- * the physical address of KERNEL_END.
+ * the physical address of __idmap_text_end.
*/
- adrp x5, KERNEL_END
+ adrp x5, __idmap_text_end
clz x5, x5
cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
b.ge 1f // .. then skip additional level
@@ -424,8 +403,8 @@ __create_page_tables:
#endif
create_pgd_entry x0, x3, x5, x6
- mov x5, x3 // __pa(KERNEL_START)
- adr_l x6, KERNEL_END // __pa(KERNEL_END)
+ mov x5, x3 // __pa(__idmap_text_start)
+ adr_l x6, __idmap_text_end // __pa(__idmap_text_end)
create_block_map x0, x7, x3, x5, x6
/*
@@ -439,22 +418,6 @@ __create_page_tables:
create_block_map x0, x7, x3, x5, x6
/*
- * Map the FDT blob (maximum 2MB; must be within 512MB of
- * PHYS_OFFSET).
- */
- mov x3, x21 // FDT phys address
- and x3, x3, #~((1 << 21) - 1) // 2MB aligned
- mov x6, #PAGE_OFFSET
- sub x5, x3, x24 // subtract PHYS_OFFSET
- tst x5, #~((1 << 29) - 1) // within 512MB?
- csel x21, xzr, x21, ne // zero the FDT pointer
- b.ne 1f
- add x5, x5, x6 // __va(FDT blob)
- add x6, x5, #1 << 21 // 2MB for the FDT blob
- sub x6, x6, #1 // inclusive range
- create_block_map x0, x7, x3, x5, x6
-1:
- /*
* Since the page tables have been populated with non-cacheable
* accesses (MMU disabled), invalidate the idmap and swapper page
* tables again to remove any speculatively loaded cache lines.
@@ -669,6 +632,7 @@ ENDPROC(__secondary_switched)
*
* other registers depend on the function called upon completion
*/
+ .section ".idmap.text", "ax"
__enable_mmu:
ldr x5, =vectors
msr vbar_el1, x5
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 924902083e47..dd9671cd0bb2 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -77,6 +77,14 @@ bool __kprobes aarch64_insn_is_nop(u32 insn)
}
}
+bool aarch64_insn_is_branch_imm(u32 insn)
+{
+ return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
+ aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
+ aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
+ aarch64_insn_is_bcond(insn));
+}
+
static DEFINE_SPINLOCK(patch_lock);
static void __kprobes *patch_map(void *addr, int fixmap)
@@ -1057,6 +1065,58 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
}
+/*
+ * Decode the imm field of a branch, and return the byte offset as a
+ * signed value (so it can be used when computing a new branch
+ * target).
+ */
+s32 aarch64_get_branch_offset(u32 insn)
+{
+ s32 imm;
+
+ if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
+ imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
+ return (imm << 6) >> 4;
+ }
+
+ if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
+ aarch64_insn_is_bcond(insn)) {
+ imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
+ return (imm << 13) >> 11;
+ }
+
+ if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
+ imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
+ return (imm << 18) >> 16;
+ }
+
+ /* Unhandled instruction */
+ BUG();
+}
+
+/*
+ * Encode the displacement of a branch in the imm field and return the
+ * updated instruction.
+ */
+u32 aarch64_set_branch_offset(u32 insn, s32 offset)
+{
+ if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
+ offset >> 2);
+
+ if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
+ aarch64_insn_is_bcond(insn))
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
+ offset >> 2);
+
+ if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
+ offset >> 2);
+
+ /* Unhandled instruction */
+ BUG();
+}
+
bool aarch32_insn_is_wide(u32 insn)
{
return insn >= 0xe800;
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 23f25acf43a9..702591f6180a 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -488,7 +488,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
}
err = request_irq(irq, armpmu->handle_irq,
- IRQF_NOBALANCING,
+ IRQF_NOBALANCING | IRQF_NO_THREAD,
"arm-pmu", armpmu);
if (err) {
pr_err("unable to request IRQ%d for ARM PMU counters\n",
@@ -1315,15 +1315,15 @@ static int armpmu_device_probe(struct platform_device *pdev)
if (!cpu_pmu)
return -ENODEV;
- irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
- if (!irqs)
- return -ENOMEM;
-
/* Don't bother with PPIs; they're already affine */
irq = platform_get_irq(pdev, 0);
if (irq >= 0 && irq_is_percpu(irq))
return 0;
+ irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
+ if (!irqs)
+ return -ENOMEM;
+
for (i = 0; i < pdev->num_resources; ++i) {
struct device_node *dn;
int cpu;
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index c6b1f3b96f45..223b093c9440 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -58,14 +58,6 @@ unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
-void soft_restart(unsigned long addr)
-{
- setup_mm_for_reboot();
- cpu_soft_restart(virt_to_phys(cpu_reset), addr);
- /* Should never get here */
- BUG();
-}
-
/*
* Function pointers to optional machine specific functions
*/
@@ -136,9 +128,7 @@ void machine_power_off(void)
/*
* Restart requires that the secondary CPUs stop performing any activity
- * while the primary CPU resets the system. Systems with a single CPU can
- * use soft_restart() as their machine descriptor's .restart hook, since that
- * will cause the only available CPU to reset. Systems with multiple CPUs must
+ * while the primary CPU resets the system. Systems with multiple CPUs must
* provide a HW restart implementation, to ensure that all CPUs reset at once.
* This is required so that any code running after reset on the primary CPU
* doesn't have to co-ordinate with other CPUs to ensure they aren't still
@@ -243,7 +233,8 @@ void release_thread(struct task_struct *dead_task)
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
- fpsimd_preserve_current_state();
+ if (current->mm)
+ fpsimd_preserve_current_state();
*dst = *src;
return 0;
}
@@ -254,35 +245,35 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
unsigned long stk_sz, struct task_struct *p)
{
struct pt_regs *childregs = task_pt_regs(p);
- unsigned long tls = p->thread.tp_value;
memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
if (likely(!(p->flags & PF_KTHREAD))) {
*childregs = *current_pt_regs();
childregs->regs[0] = 0;
- if (is_compat_thread(task_thread_info(p))) {
- if (stack_start)
+
+ /*
+ * Read the current TLS pointer from tpidr_el0 as it may be
+ * out-of-sync with the saved value.
+ */
+ asm("mrs %0, tpidr_el0" : "=r" (*task_user_tls(p)));
+
+ if (stack_start) {
+ if (is_compat_thread(task_thread_info(p)))
childregs->compat_sp = stack_start;
- } else {
- /*
- * Read the current TLS pointer from tpidr_el0 as it may be
- * out-of-sync with the saved value.
- */
- asm("mrs %0, tpidr_el0" : "=r" (tls));
- if (stack_start) {
- /* 16-byte aligned stack mandatory on AArch64 */
- if (stack_start & 15)
- return -EINVAL;
+ /* 16-byte aligned stack mandatory on AArch64 */
+ else if (stack_start & 15)
+ return -EINVAL;
+ else
childregs->sp = stack_start;
- }
}
+
/*
* If a TLS pointer was passed to clone (4th argument), use it
* for the new thread.
*/
if (clone_flags & CLONE_SETTLS)
- tls = childregs->regs[3];
+ p->thread.tp_value = childregs->regs[3];
} else {
memset(childregs, 0, sizeof(struct pt_regs));
childregs->pstate = PSR_MODE_EL1h;
@@ -291,7 +282,6 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
}
p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
p->thread.cpu_context.sp = (unsigned long)childregs;
- p->thread.tp_value = tls;
ptrace_hw_copy_thread(p);
@@ -302,18 +292,12 @@ static void tls_thread_switch(struct task_struct *next)
{
unsigned long tpidr, tpidrro;
- if (!is_compat_task()) {
- asm("mrs %0, tpidr_el0" : "=r" (tpidr));
- current->thread.tp_value = tpidr;
- }
+ asm("mrs %0, tpidr_el0" : "=r" (tpidr));
+ *task_user_tls(current) = tpidr;
- if (is_compat_thread(task_thread_info(next))) {
- tpidr = 0;
- tpidrro = next->thread.tp_value;
- } else {
- tpidr = next->thread.tp_value;
- tpidrro = 0;
- }
+ tpidr = *task_user_tls(next);
+ tpidrro = is_compat_thread(task_thread_info(next)) ?
+ next->thread.tp_value : 0;
asm(
" msr tpidr_el0, %0\n"
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index ea18cb53921e..869f202748e8 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -15,7 +15,6 @@
#define pr_fmt(fmt) "psci: " fmt
-#include <linux/acpi.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/smp.h>
@@ -25,8 +24,8 @@
#include <linux/slab.h>
#include <uapi/linux/psci.h>
-#include <asm/acpi.h>
#include <asm/compiler.h>
+#include <asm/cputype.h>
#include <asm/cpu_ops.h>
#include <asm/errno.h>
#include <asm/psci.h>
@@ -37,16 +36,31 @@
#define PSCI_POWER_STATE_TYPE_STANDBY 0
#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
-struct psci_power_state {
- u16 id;
- u8 type;
- u8 affinity_level;
-};
+static bool psci_power_state_loses_context(u32 state)
+{
+ return state & PSCI_0_2_POWER_STATE_TYPE_MASK;
+}
+
+static bool psci_power_state_is_valid(u32 state)
+{
+ const u32 valid_mask = PSCI_0_2_POWER_STATE_ID_MASK |
+ PSCI_0_2_POWER_STATE_TYPE_MASK |
+ PSCI_0_2_POWER_STATE_AFFL_MASK;
+
+ return !(state & ~valid_mask);
+}
+
+/*
+ * The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF
+ * calls to its resident CPU, so we must avoid issuing those. We never migrate
+ * a Trusted OS even if it claims to be capable of migration -- doing so will
+ * require cooperation with a Trusted OS driver.
+ */
+static int resident_cpu = -1;
struct psci_operations {
- int (*cpu_suspend)(struct psci_power_state state,
- unsigned long entry_point);
- int (*cpu_off)(struct psci_power_state state);
+ int (*cpu_suspend)(u32 state, unsigned long entry_point);
+ int (*cpu_off)(u32 state);
int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
int (*migrate)(unsigned long cpuid);
int (*affinity_info)(unsigned long target_affinity,
@@ -56,23 +70,21 @@ struct psci_operations {
static struct psci_operations psci_ops;
-static int (*invoke_psci_fn)(u64, u64, u64, u64);
-typedef int (*psci_initcall_t)(const struct device_node *);
-
-asmlinkage int __invoke_psci_fn_hvc(u64, u64, u64, u64);
-asmlinkage int __invoke_psci_fn_smc(u64, u64, u64, u64);
+typedef unsigned long (psci_fn)(unsigned long, unsigned long,
+ unsigned long, unsigned long);
+asmlinkage psci_fn __invoke_psci_fn_hvc;
+asmlinkage psci_fn __invoke_psci_fn_smc;
+static psci_fn *invoke_psci_fn;
enum psci_function {
PSCI_FN_CPU_SUSPEND,
PSCI_FN_CPU_ON,
PSCI_FN_CPU_OFF,
PSCI_FN_MIGRATE,
- PSCI_FN_AFFINITY_INFO,
- PSCI_FN_MIGRATE_INFO_TYPE,
PSCI_FN_MAX,
};
-static DEFINE_PER_CPU_READ_MOSTLY(struct psci_power_state *, psci_power_state);
+static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
static u32 psci_function_id[PSCI_FN_MAX];
@@ -92,56 +104,28 @@ static int psci_to_linux_errno(int errno)
return -EINVAL;
}
-static u32 psci_power_state_pack(struct psci_power_state state)
-{
- return ((state.id << PSCI_0_2_POWER_STATE_ID_SHIFT)
- & PSCI_0_2_POWER_STATE_ID_MASK) |
- ((state.type << PSCI_0_2_POWER_STATE_TYPE_SHIFT)
- & PSCI_0_2_POWER_STATE_TYPE_MASK) |
- ((state.affinity_level << PSCI_0_2_POWER_STATE_AFFL_SHIFT)
- & PSCI_0_2_POWER_STATE_AFFL_MASK);
-}
-
-static void psci_power_state_unpack(u32 power_state,
- struct psci_power_state *state)
-{
- state->id = (power_state & PSCI_0_2_POWER_STATE_ID_MASK) >>
- PSCI_0_2_POWER_STATE_ID_SHIFT;
- state->type = (power_state & PSCI_0_2_POWER_STATE_TYPE_MASK) >>
- PSCI_0_2_POWER_STATE_TYPE_SHIFT;
- state->affinity_level =
- (power_state & PSCI_0_2_POWER_STATE_AFFL_MASK) >>
- PSCI_0_2_POWER_STATE_AFFL_SHIFT;
-}
-
-static int psci_get_version(void)
+static u32 psci_get_version(void)
{
- int err;
-
- err = invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
- return err;
+ return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
}
-static int psci_cpu_suspend(struct psci_power_state state,
- unsigned long entry_point)
+static int psci_cpu_suspend(u32 state, unsigned long entry_point)
{
int err;
- u32 fn, power_state;
+ u32 fn;
fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
- power_state = psci_power_state_pack(state);
- err = invoke_psci_fn(fn, power_state, entry_point, 0);
+ err = invoke_psci_fn(fn, state, entry_point, 0);
return psci_to_linux_errno(err);
}
-static int psci_cpu_off(struct psci_power_state state)
+static int psci_cpu_off(u32 state)
{
int err;
- u32 fn, power_state;
+ u32 fn;
fn = psci_function_id[PSCI_FN_CPU_OFF];
- power_state = psci_power_state_pack(state);
- err = invoke_psci_fn(fn, power_state, 0, 0);
+ err = invoke_psci_fn(fn, state, 0, 0);
return psci_to_linux_errno(err);
}
@@ -168,30 +152,29 @@ static int psci_migrate(unsigned long cpuid)
static int psci_affinity_info(unsigned long target_affinity,
unsigned long lowest_affinity_level)
{
- int err;
- u32 fn;
-
- fn = psci_function_id[PSCI_FN_AFFINITY_INFO];
- err = invoke_psci_fn(fn, target_affinity, lowest_affinity_level, 0);
- return err;
+ return invoke_psci_fn(PSCI_0_2_FN64_AFFINITY_INFO, target_affinity,
+ lowest_affinity_level, 0);
}
static int psci_migrate_info_type(void)
{
- int err;
- u32 fn;
+ return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0);
+}
- fn = psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE];
- err = invoke_psci_fn(fn, 0, 0, 0);
- return err;
+static unsigned long psci_migrate_info_up_cpu(void)
+{
+ return invoke_psci_fn(PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU, 0, 0, 0);
}
-static int __maybe_unused cpu_psci_cpu_init_idle(struct device_node *cpu_node,
- unsigned int cpu)
+static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu)
{
int i, ret, count = 0;
- struct psci_power_state *psci_states;
- struct device_node *state_node;
+ u32 *psci_states;
+ struct device_node *state_node, *cpu_node;
+
+ cpu_node = of_get_cpu_node(cpu, NULL);
+ if (!cpu_node)
+ return -ENODEV;
/*
* If the PSCI cpu_suspend function hook has not been initialized
@@ -215,13 +198,13 @@ static int __maybe_unused cpu_psci_cpu_init_idle(struct device_node *cpu_node,
return -ENOMEM;
for (i = 0; i < count; i++) {
- u32 psci_power_state;
+ u32 state;
state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
ret = of_property_read_u32(state_node,
"arm,psci-suspend-param",
- &psci_power_state);
+ &state);
if (ret) {
pr_warn(" * %s missing arm,psci-suspend-param property\n",
state_node->full_name);
@@ -230,9 +213,13 @@ static int __maybe_unused cpu_psci_cpu_init_idle(struct device_node *cpu_node,
}
of_node_put(state_node);
- pr_debug("psci-power-state %#x index %d\n", psci_power_state,
- i);
- psci_power_state_unpack(psci_power_state, &psci_states[i]);
+ pr_debug("psci-power-state %#x index %d\n", state, i);
+ if (!psci_power_state_is_valid(state)) {
+ pr_warn("Invalid PSCI power state %#x\n", state);
+ ret = -EINVAL;
+ goto free_mem;
+ }
+ psci_states[i] = state;
}
/* Idle states parsed correctly, initialize per-cpu pointer */
per_cpu(psci_power_state, cpu) = psci_states;
@@ -275,6 +262,46 @@ static void psci_sys_poweroff(void)
invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
}
+/*
+ * Detect the presence of a resident Trusted OS which may cause CPU_OFF to
+ * return DENIED (which would be fatal).
+ */
+static void __init psci_init_migrate(void)
+{
+ unsigned long cpuid;
+ int type, cpu;
+
+ type = psci_ops.migrate_info_type();
+
+ if (type == PSCI_0_2_TOS_MP) {
+ pr_info("Trusted OS migration not required\n");
+ return;
+ }
+
+ if (type == PSCI_RET_NOT_SUPPORTED) {
+ pr_info("MIGRATE_INFO_TYPE not supported.\n");
+ return;
+ }
+
+ if (type != PSCI_0_2_TOS_UP_MIGRATE &&
+ type != PSCI_0_2_TOS_UP_NO_MIGRATE) {
+ pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type);
+ return;
+ }
+
+ cpuid = psci_migrate_info_up_cpu();
+ if (cpuid & ~MPIDR_HWID_BITMASK) {
+ pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n",
+ cpuid);
+ return;
+ }
+
+ cpu = get_logical_index(cpuid);
+ resident_cpu = cpu >= 0 ? cpu : -1;
+
+ pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
+}
+
static void __init psci_0_2_set_functions(void)
{
pr_info("Using standard PSCI v0.2 function IDs\n");
@@ -290,11 +317,8 @@ static void __init psci_0_2_set_functions(void)
psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN64_MIGRATE;
psci_ops.migrate = psci_migrate;
- psci_function_id[PSCI_FN_AFFINITY_INFO] = PSCI_0_2_FN64_AFFINITY_INFO;
psci_ops.affinity_info = psci_affinity_info;
- psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE] =
- PSCI_0_2_FN_MIGRATE_INFO_TYPE;
psci_ops.migrate_info_type = psci_migrate_info_type;
arm_pm_restart = psci_sys_reset;
@@ -307,32 +331,26 @@ static void __init psci_0_2_set_functions(void)
*/
static int __init psci_probe(void)
{
- int ver = psci_get_version();
-
- if (ver == PSCI_RET_NOT_SUPPORTED) {
- /*
- * PSCI versions >=0.2 mandates implementation of
- * PSCI_VERSION.
- */
- pr_err("PSCI firmware does not comply with the v0.2 spec.\n");
- return -EOPNOTSUPP;
- } else {
- pr_info("PSCIv%d.%d detected in firmware.\n",
- PSCI_VERSION_MAJOR(ver),
- PSCI_VERSION_MINOR(ver));
-
- if (PSCI_VERSION_MAJOR(ver) == 0 &&
- PSCI_VERSION_MINOR(ver) < 2) {
- pr_err("Conflicting PSCI version detected.\n");
- return -EINVAL;
- }
+ u32 ver = psci_get_version();
+
+ pr_info("PSCIv%d.%d detected in firmware.\n",
+ PSCI_VERSION_MAJOR(ver),
+ PSCI_VERSION_MINOR(ver));
+
+ if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) {
+ pr_err("Conflicting PSCI version detected.\n");
+ return -EINVAL;
}
psci_0_2_set_functions();
+ psci_init_migrate();
+
return 0;
}
+typedef int (*psci_initcall_t)(const struct device_node *);
+
/*
* PSCI init function for PSCI versions >=0.2
*
@@ -421,6 +439,7 @@ int __init psci_dt_init(void)
return init_fn(np);
}
+#ifdef CONFIG_ACPI
/*
* We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's
* explicitly clarified in SBBR
@@ -441,10 +460,11 @@ int __init psci_acpi_init(void)
return psci_probe();
}
+#endif
#ifdef CONFIG_SMP
-static int __init cpu_psci_cpu_init(struct device_node *dn, unsigned int cpu)
+static int __init cpu_psci_cpu_init(unsigned int cpu)
{
return 0;
}
@@ -469,11 +489,21 @@ static int cpu_psci_cpu_boot(unsigned int cpu)
}
#ifdef CONFIG_HOTPLUG_CPU
+static bool psci_tos_resident_on(int cpu)
+{
+ return cpu == resident_cpu;
+}
+
static int cpu_psci_cpu_disable(unsigned int cpu)
{
/* Fail early if we don't have CPU_OFF support */
if (!psci_ops.cpu_off)
return -EOPNOTSUPP;
+
+ /* Trusted OS will deny CPU_OFF */
+ if (psci_tos_resident_on(cpu))
+ return -EPERM;
+
return 0;
}
@@ -484,9 +514,8 @@ static void cpu_psci_cpu_die(unsigned int cpu)
* There are no known implementations of PSCI actually using the
* power state field, pass a sensible default for now.
*/
- struct psci_power_state state = {
- .type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
- };
+ u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN <<
+ PSCI_0_2_POWER_STATE_TYPE_SHIFT;
ret = psci_ops.cpu_off(state);
@@ -498,7 +527,7 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
int err, i;
if (!psci_ops.affinity_info)
- return 1;
+ return 0;
/*
* cpu_kill could race with cpu_die and we can
* potentially end up declaring this cpu undead
@@ -509,7 +538,7 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) {
pr_info("CPU%d killed.\n", cpu);
- return 1;
+ return 0;
}
msleep(10);
@@ -518,15 +547,14 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n",
cpu, err);
- /* Make op_cpu_kill() fail. */
- return 0;
+ return -ETIMEDOUT;
}
#endif
#endif
static int psci_suspend_finisher(unsigned long index)
{
- struct psci_power_state *state = __this_cpu_read(psci_power_state);
+ u32 *state = __this_cpu_read(psci_power_state);
return psci_ops.cpu_suspend(state[index - 1],
virt_to_phys(cpu_resume));
@@ -535,7 +563,7 @@ static int psci_suspend_finisher(unsigned long index)
static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index)
{
int ret;
- struct psci_power_state *state = __this_cpu_read(psci_power_state);
+ u32 *state = __this_cpu_read(psci_power_state);
/*
* idle state index 0 corresponds to wfi, should never be called
* from the cpu_suspend operations
@@ -543,10 +571,10 @@ static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index)
if (WARN_ON_ONCE(!index))
return -EINVAL;
- if (state[index - 1].type == PSCI_POWER_STATE_TYPE_STANDBY)
+ if (!psci_power_state_loses_context(state[index - 1]))
ret = psci_ops.cpu_suspend(state[index - 1], 0);
else
- ret = __cpu_suspend(index, psci_suspend_finisher);
+ ret = cpu_suspend(index, psci_suspend_finisher);
return ret;
}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 74753132c3ac..ffd3970721bf 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -105,18 +105,6 @@ static struct resource mem_res[] = {
#define kernel_code mem_res[0]
#define kernel_data mem_res[1]
-void __init early_print(const char *str, ...)
-{
- char buf[256];
- va_list ap;
-
- va_start(ap, str);
- vsnprintf(buf, sizeof(buf), str, ap);
- va_end(ap);
-
- printk("%s", buf);
-}
-
/*
* The recorded values of x0 .. x3 upon kernel entry.
*/
@@ -326,12 +314,14 @@ static void __init setup_processor(void)
static void __init setup_machine_fdt(phys_addr_t dt_phys)
{
- if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) {
- early_print("\n"
- "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n"
- "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n"
- "\nPlease check your bootloader.\n",
- dt_phys, phys_to_virt(dt_phys));
+ void *dt_virt = fixmap_remap_fdt(dt_phys);
+
+ if (!dt_virt || !early_init_dt_scan(dt_virt)) {
+ pr_crit("\n"
+ "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
+ "The dtb must be 8-byte aligned and must not exceed 2 MB in size\n"
+ "\nPlease check your bootloader.",
+ &dt_phys, dt_virt);
while (true)
cpu_relax();
@@ -374,8 +364,6 @@ void __init setup_arch(char **cmdline_p)
{
setup_processor();
- setup_machine_fdt(__fdt_pointer);
-
init_mm.start_code = (unsigned long) _text;
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;
@@ -386,6 +374,8 @@ void __init setup_arch(char **cmdline_p)
early_fixmap_init();
early_ioremap_init();
+ setup_machine_fdt(__fdt_pointer);
+
parse_early_param();
/*
@@ -408,16 +398,13 @@ void __init setup_arch(char **cmdline_p)
if (acpi_disabled) {
unflatten_device_tree();
psci_dt_init();
- cpu_read_bootcpu_ops();
-#ifdef CONFIG_SMP
- of_smp_init_cpus();
-#endif
} else {
psci_acpi_init();
- acpi_init_cpus();
}
+ cpu_read_bootcpu_ops();
#ifdef CONFIG_SMP
+ smp_init_cpus();
smp_build_mpidr_hash();
#endif
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index d26fcd4cd6e6..1670f15ef69e 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -370,7 +370,7 @@ badframe:
if (show_unhandled_signals)
pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n",
current->comm, task_pid_nr(current), __func__,
- regs->pc, regs->sp);
+ regs->pc, regs->compat_sp);
force_sig(SIGSEGV, current);
return 0;
}
@@ -407,7 +407,7 @@ badframe:
if (show_unhandled_signals)
pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n",
current->comm, task_pid_nr(current), __func__,
- regs->pc, regs->sp);
+ regs->pc, regs->compat_sp);
force_sig(SIGSEGV, current);
return 0;
}
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index ede186cdd452..803cfea41962 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -130,12 +130,14 @@ ENDPROC(__cpu_suspend_enter)
/*
* x0 must contain the sctlr value retrieved from restored context
*/
+ .pushsection ".idmap.text", "ax"
ENTRY(cpu_resume_mmu)
ldr x3, =cpu_resume_after_mmu
msr sctlr_el1, x0 // restore sctlr_el1
isb
br x3 // global jump to virtual address
ENDPROC(cpu_resume_mmu)
+ .popsection
cpu_resume_after_mmu:
mov x0, #0 // return zero on success
ldp x19, x20, [sp, #16]
@@ -162,15 +164,12 @@ ENTRY(cpu_resume)
#else
mov x7, xzr
#endif
- adrp x0, sleep_save_sp
- add x0, x0, #:lo12:sleep_save_sp
- ldr x0, [x0, #SLEEP_SAVE_SP_PHYS]
+ ldr_l x0, sleep_save_sp + SLEEP_SAVE_SP_PHYS
ldr x0, [x0, x7, lsl #3]
/* load sp from context */
ldr x2, [x0, #CPU_CTX_SP]
- adrp x1, sleep_idmap_phys
/* load physical address of identity map page table in x1 */
- ldr x1, [x1, #:lo12:sleep_idmap_phys]
+ adrp x1, idmap_pg_dir
mov sp, x2
/*
* cpu_do_resume expects x0 to contain context physical address
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 2cb008177252..4b2121bd7f9c 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
@@ -248,20 +249,20 @@ static int op_cpu_kill(unsigned int cpu)
* time and hope that it's dead, so let's skip the wait and just hope.
*/
if (!cpu_ops[cpu]->cpu_kill)
- return 1;
+ return 0;
return cpu_ops[cpu]->cpu_kill(cpu);
}
-static DECLARE_COMPLETION(cpu_died);
-
/*
* called on the thread which is asking for a CPU to be shutdown -
* waits until shutdown has completed, or it is timed out.
*/
void __cpu_die(unsigned int cpu)
{
- if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
+ int err;
+
+ if (!cpu_wait_death(cpu, 5)) {
pr_crit("CPU%u: cpu didn't die\n", cpu);
return;
}
@@ -273,8 +274,10 @@ void __cpu_die(unsigned int cpu)
* verify that it has really left the kernel before we consider
* clobbering anything it might still be using.
*/
- if (!op_cpu_kill(cpu))
- pr_warn("CPU%d may not have shut down cleanly\n", cpu);
+ err = op_cpu_kill(cpu);
+ if (err)
+ pr_warn("CPU%d may not have shut down cleanly: %d\n",
+ cpu, err);
}
/*
@@ -294,7 +297,7 @@ void cpu_die(void)
local_irq_disable();
/* Tell __cpu_die() that this CPU is now safe to dispose of */
- complete(&cpu_died);
+ (void)cpu_report_death();
/*
* Actually shutdown the CPU. This must never fail. The specific hotplug
@@ -318,57 +321,158 @@ void __init smp_prepare_boot_cpu(void)
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
}
+static u64 __init of_get_cpu_mpidr(struct device_node *dn)
+{
+ const __be32 *cell;
+ u64 hwid;
+
+ /*
+ * A cpu node with missing "reg" property is
+ * considered invalid to build a cpu_logical_map
+ * entry.
+ */
+ cell = of_get_property(dn, "reg", NULL);
+ if (!cell) {
+ pr_err("%s: missing reg property\n", dn->full_name);
+ return INVALID_HWID;
+ }
+
+ hwid = of_read_number(cell, of_n_addr_cells(dn));
+ /*
+ * Non affinity bits must be set to 0 in the DT
+ */
+ if (hwid & ~MPIDR_HWID_BITMASK) {
+ pr_err("%s: invalid reg property\n", dn->full_name);
+ return INVALID_HWID;
+ }
+ return hwid;
+}
+
+/*
+ * Duplicate MPIDRs are a recipe for disaster. Scan all initialized
+ * entries and check for duplicates. If any is found just ignore the
+ * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid
+ * matching valid MPIDR values.
+ */
+static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
+{
+ unsigned int i;
+
+ for (i = 1; (i < cpu) && (i < NR_CPUS); i++)
+ if (cpu_logical_map(i) == hwid)
+ return true;
+ return false;
+}
+
+/*
+ * Initialize cpu operations for a logical cpu and
+ * set it in the possible mask on success
+ */
+static int __init smp_cpu_setup(int cpu)
+{
+ if (cpu_read_ops(cpu))
+ return -ENODEV;
+
+ if (cpu_ops[cpu]->cpu_init(cpu))
+ return -ENODEV;
+
+ set_cpu_possible(cpu, true);
+
+ return 0;
+}
+
+static bool bootcpu_valid __initdata;
+static unsigned int cpu_count = 1;
+
+#ifdef CONFIG_ACPI
+/*
+ * acpi_map_gic_cpu_interface - parse processor MADT entry
+ *
+ * Carry out sanity checks on MADT processor entry and initialize
+ * cpu_logical_map on success
+ */
+static void __init
+acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
+{
+ u64 hwid = processor->arm_mpidr;
+
+ if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) {
+ pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid);
+ return;
+ }
+
+ if (!(processor->flags & ACPI_MADT_ENABLED)) {
+ pr_err("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
+ return;
+ }
+
+ if (is_mpidr_duplicate(cpu_count, hwid)) {
+ pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid);
+ return;
+ }
+
+ /* Check if GICC structure of boot CPU is available in the MADT */
+ if (cpu_logical_map(0) == hwid) {
+ if (bootcpu_valid) {
+ pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n",
+ hwid);
+ return;
+ }
+ bootcpu_valid = true;
+ return;
+ }
+
+ if (cpu_count >= NR_CPUS)
+ return;
+
+ /* map the logical cpu id to cpu MPIDR */
+ cpu_logical_map(cpu_count) = hwid;
+
+ cpu_count++;
+}
+
+static int __init
+acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_interrupt *processor;
+
+ processor = (struct acpi_madt_generic_interrupt *)header;
+ if (BAD_MADT_ENTRY(processor, end))
+ return -EINVAL;
+
+ acpi_table_print_madt_entry(header);
+
+ acpi_map_gic_cpu_interface(processor);
+
+ return 0;
+}
+#else
+#define acpi_table_parse_madt(...) do { } while (0)
+#endif
+
/*
* Enumerate the possible CPU set from the device tree and build the
* cpu logical map array containing MPIDR values related to logical
* cpus. Assumes that cpu_logical_map(0) has already been initialized.
*/
-void __init of_smp_init_cpus(void)
+void __init of_parse_and_init_cpus(void)
{
struct device_node *dn = NULL;
- unsigned int i, cpu = 1;
- bool bootcpu_valid = false;
while ((dn = of_find_node_by_type(dn, "cpu"))) {
- const u32 *cell;
- u64 hwid;
+ u64 hwid = of_get_cpu_mpidr(dn);
- /*
- * A cpu node with missing "reg" property is
- * considered invalid to build a cpu_logical_map
- * entry.
- */
- cell = of_get_property(dn, "reg", NULL);
- if (!cell) {
- pr_err("%s: missing reg property\n", dn->full_name);
+ if (hwid == INVALID_HWID)
goto next;
- }
- hwid = of_read_number(cell, of_n_addr_cells(dn));
- /*
- * Non affinity bits must be set to 0 in the DT
- */
- if (hwid & ~MPIDR_HWID_BITMASK) {
- pr_err("%s: invalid reg property\n", dn->full_name);
+ if (is_mpidr_duplicate(cpu_count, hwid)) {
+ pr_err("%s: duplicate cpu reg properties in the DT\n",
+ dn->full_name);
goto next;
}
/*
- * Duplicate MPIDRs are a recipe for disaster. Scan
- * all initialized entries and check for
- * duplicates. If any is found just ignore the cpu.
- * cpu_logical_map was initialized to INVALID_HWID to
- * avoid matching valid MPIDR values.
- */
- for (i = 1; (i < cpu) && (i < NR_CPUS); i++) {
- if (cpu_logical_map(i) == hwid) {
- pr_err("%s: duplicate cpu reg properties in the DT\n",
- dn->full_name);
- goto next;
- }
- }
-
- /*
* The numbering scheme requires that the boot CPU
* must be assigned logical id 0. Record it so that
* the logical map built from DT is validated and can
@@ -392,38 +496,58 @@ void __init of_smp_init_cpus(void)
continue;
}
- if (cpu >= NR_CPUS)
- goto next;
-
- if (cpu_read_ops(dn, cpu) != 0)
- goto next;
-
- if (cpu_ops[cpu]->cpu_init(dn, cpu))
+ if (cpu_count >= NR_CPUS)
goto next;
pr_debug("cpu logical map 0x%llx\n", hwid);
- cpu_logical_map(cpu) = hwid;
+ cpu_logical_map(cpu_count) = hwid;
next:
- cpu++;
+ cpu_count++;
}
+}
+
+/*
+ * Enumerate the possible CPU set from the device tree or ACPI and build the
+ * cpu logical map array containing MPIDR values related to logical
+ * cpus. Assumes that cpu_logical_map(0) has already been initialized.
+ */
+void __init smp_init_cpus(void)
+{
+ int i;
- /* sanity check */
- if (cpu > NR_CPUS)
- pr_warning("no. of cores (%d) greater than configured maximum of %d - clipping\n",
- cpu, NR_CPUS);
+ if (acpi_disabled)
+ of_parse_and_init_cpus();
+ else
+ /*
+ * do a walk of MADT to determine how many CPUs
+ * we have including disabled CPUs, and get information
+ * we need for SMP init
+ */
+ acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
+ acpi_parse_gic_cpu_interface, 0);
+
+ if (cpu_count > NR_CPUS)
+ pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n",
+ cpu_count, NR_CPUS);
if (!bootcpu_valid) {
- pr_err("DT missing boot CPU MPIDR, not enabling secondaries\n");
+ pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
return;
}
/*
- * All the cpus that made it to the cpu_logical_map have been
- * validated so set them as possible cpus.
+ * We need to set the cpu_logical_map entries before enabling
+ * the cpus so that cpu processor description entries (DT cpu nodes
+ * and ACPI MADT entries) can be retrieved by matching the cpu hwid
+ * with entries in cpu_logical_map while initializing the cpus.
+ * If the cpu set-up fails, invalidate the cpu_logical_map entry.
*/
- for (i = 0; i < NR_CPUS; i++)
- if (cpu_logical_map(i) != INVALID_HWID)
- set_cpu_possible(i, true);
+ for (i = 1; i < NR_CPUS; i++) {
+ if (cpu_logical_map(i) != INVALID_HWID) {
+ if (smp_cpu_setup(i))
+ cpu_logical_map(i) = INVALID_HWID;
+ }
+ }
}
void __init smp_prepare_cpus(unsigned int max_cpus)
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 14944e5b28da..aef3605a8c47 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -49,8 +49,14 @@ static void write_pen_release(u64 val)
}
-static int smp_spin_table_cpu_init(struct device_node *dn, unsigned int cpu)
+static int smp_spin_table_cpu_init(unsigned int cpu)
{
+ struct device_node *dn;
+
+ dn = of_get_cpu_node(cpu, NULL);
+ if (!dn)
+ return -ENODEV;
+
/*
* Determine the address from which the CPU is polling.
*/
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index d7daf45ae7a2..8297d502217e 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -51,13 +51,13 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
}
/*
- * __cpu_suspend
+ * cpu_suspend
*
* arg: argument to pass to the finisher function
* fn: finisher function pointer
*
*/
-int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
+int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
{
struct mm_struct *mm = current->active_mm;
int ret;
@@ -82,7 +82,7 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
* We are resuming from reset with TTBR0_EL1 set to the
* idmap to enable the MMU; restore the active_mm mappings in
* TTBR0_EL1 unless the active_mm == &init_mm, in which case
- * the thread entered __cpu_suspend with TTBR0_EL1 set to
+ * the thread entered cpu_suspend with TTBR0_EL1 set to
* reserved TTBR0 page tables and should be restored as such.
*/
if (mm == &init_mm)
@@ -118,7 +118,6 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
}
struct sleep_save_sp sleep_save_sp;
-phys_addr_t sleep_idmap_phys;
static int __init cpu_suspend_init(void)
{
@@ -132,9 +131,7 @@ static int __init cpu_suspend_init(void)
sleep_save_sp.save_ptr_stash = ctx_ptr;
sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr);
- sleep_idmap_phys = virt_to_phys(idmap_pg_dir);
__flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp));
- __flush_dcache_area(&sleep_idmap_phys, sizeof(sleep_idmap_phys));
return 0;
}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 1ef2940df13c..a12251c074a8 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -335,8 +335,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
if (call_undef_hook(regs) == 0)
return;
- if (show_unhandled_signals && unhandled_signal(current, SIGILL) &&
- printk_ratelimit()) {
+ if (show_unhandled_signals_ratelimited() && unhandled_signal(current, SIGILL)) {
pr_info("%s[%d]: undefined instruction: pc=%p\n",
current->comm, task_pid_nr(current), pc);
dump_instr(KERN_INFO, regs);
@@ -363,7 +362,7 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
}
#endif
- if (show_unhandled_signals && printk_ratelimit()) {
+ if (show_unhandled_signals_ratelimited()) {
pr_info("%s[%d]: syscall %d\n", current->comm,
task_pid_nr(current), (int)regs->syscallno);
dump_instr("", regs);
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index ff3bddea482d..f6fe17d88da5 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -15,6 +15,10 @@ ccflags-y := -shared -fno-common -fno-builtin
ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \
$(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+# Workaround for bare-metal (ELF) toolchains that neglect to pass -shared
+# down to collect2, resulting in silent corruption of the vDSO image.
+ccflags-y += -Wl,-shared
+
obj-y += vdso.o
extra-y += vdso.lds vdso-offsets.h
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index a2c29865c3fe..98073332e2d0 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -38,6 +38,12 @@ jiffies = jiffies_64;
*(.hyp.text) \
VMLINUX_SYMBOL(__hyp_text_end) = .;
+#define IDMAP_TEXT \
+ . = ALIGN(SZ_4K); \
+ VMLINUX_SYMBOL(__idmap_text_start) = .; \
+ *(.idmap.text) \
+ VMLINUX_SYMBOL(__idmap_text_end) = .;
+
/*
* The size of the PE/COFF section that covers the kernel image, which
* runs from stext to _edata, must be a round multiple of the PE/COFF
@@ -95,6 +101,7 @@ SECTIONS
SCHED_TEXT
LOCK_TEXT
HYPERVISOR_TEXT
+ IDMAP_TEXT
*(.fixup)
*(.gnu.warning)
. = ALIGN(16);
@@ -167,11 +174,13 @@ SECTIONS
}
/*
- * The HYP init code can't be more than a page long,
+ * The HYP init code and ID map text can't be longer than a page each,
* and should not cross a page boundary.
*/
ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
"HYP init code too big or misaligned")
+ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
+ "ID map text too big or misaligned")
/*
* If padding is applied before .head.text, virt<->phys conversions will fail.
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 5105e297ed5f..bfffe8f4bd53 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -28,6 +28,7 @@ config KVM
select KVM_ARM_HOST
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select SRCU
+ select KVM_VFIO
select HAVE_KVM_EVENTFD
select HAVE_KVM_IRQFD
---help---
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index d5904f876cdb..f90f4aa7f88d 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -11,7 +11,7 @@ ARM=../../../arch/arm/kvm
obj-$(CONFIG_KVM_ARM_HOST) += kvm.o
-kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 5befd010e232..17a8fb14f428 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -17,8 +17,10 @@
#include <linux/linkage.h>
+#include <asm/alternative.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
+#include <asm/cpufeature.h>
#include <asm/debug-monitors.h>
#include <asm/esr.h>
#include <asm/fpsimdmacros.h>
@@ -50,8 +52,8 @@
stp x29, lr, [x3, #80]
mrs x19, sp_el0
- mrs x20, elr_el2 // EL1 PC
- mrs x21, spsr_el2 // EL1 pstate
+ mrs x20, elr_el2 // pc before entering el2
+ mrs x21, spsr_el2 // pstate before entering el2
stp x19, x20, [x3, #96]
str x21, [x3, #112]
@@ -82,8 +84,8 @@
ldr x21, [x3, #16]
msr sp_el0, x19
- msr elr_el2, x20 // EL1 PC
- msr spsr_el2, x21 // EL1 pstate
+ msr elr_el2, x20 // pc on return from el2
+ msr spsr_el2, x21 // pstate on return from el2
add x3, x2, #CPU_XREG_OFFSET(19)
ldp x19, x20, [x3]
@@ -808,10 +810,7 @@
* Call into the vgic backend for state saving
*/
.macro save_vgic_state
- adr x24, __vgic_sr_vectors
- ldr x24, [x24, VGIC_SAVE_FN]
- kern_hyp_va x24
- blr x24
+ alternative_insn "bl __save_vgic_v2_state", "bl __save_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF
mrs x24, hcr_el2
mov x25, #HCR_INT_OVERRIDE
neg x25, x25
@@ -828,10 +827,7 @@
orr x24, x24, #HCR_INT_OVERRIDE
orr x24, x24, x25
msr hcr_el2, x24
- adr x24, __vgic_sr_vectors
- ldr x24, [x24, #VGIC_RESTORE_FN]
- kern_hyp_va x24
- blr x24
+ alternative_insn "bl __restore_vgic_v2_state", "bl __restore_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF
.endm
.macro save_timer_state
@@ -1062,12 +1058,6 @@ ENTRY(__kvm_flush_vm_context)
ret
ENDPROC(__kvm_flush_vm_context)
- // struct vgic_sr_vectors __vgi_sr_vectors;
- .align 3
-ENTRY(__vgic_sr_vectors)
- .skip VGIC_SR_VECTOR_SZ
-ENDPROC(__vgic_sr_vectors)
-
__kvm_hyp_panic:
// Guess the context by looking at VTTBR:
// If zero, then we're already a host.
diff --git a/arch/arm64/kvm/vgic-v2-switch.S b/arch/arm64/kvm/vgic-v2-switch.S
index f002fe1c3700..3f000712a85d 100644
--- a/arch/arm64/kvm/vgic-v2-switch.S
+++ b/arch/arm64/kvm/vgic-v2-switch.S
@@ -47,7 +47,6 @@ __save_vgic_v2_state:
add x3, x0, #VCPU_VGIC_CPU
/* Save all interesting registers */
- ldr w4, [x2, #GICH_HCR]
ldr w5, [x2, #GICH_VMCR]
ldr w6, [x2, #GICH_MISR]
ldr w7, [x2, #GICH_EISR0]
@@ -55,7 +54,6 @@ __save_vgic_v2_state:
ldr w9, [x2, #GICH_ELRSR0]
ldr w10, [x2, #GICH_ELRSR1]
ldr w11, [x2, #GICH_APR]
-CPU_BE( rev w4, w4 )
CPU_BE( rev w5, w5 )
CPU_BE( rev w6, w6 )
CPU_BE( rev w7, w7 )
@@ -64,7 +62,6 @@ CPU_BE( rev w9, w9 )
CPU_BE( rev w10, w10 )
CPU_BE( rev w11, w11 )
- str w4, [x3, #VGIC_V2_CPU_HCR]
str w5, [x3, #VGIC_V2_CPU_VMCR]
str w6, [x3, #VGIC_V2_CPU_MISR]
CPU_LE( str w7, [x3, #VGIC_V2_CPU_EISR] )
diff --git a/arch/arm64/kvm/vgic-v3-switch.S b/arch/arm64/kvm/vgic-v3-switch.S
index 617a012a0107..3c20730ddff5 100644
--- a/arch/arm64/kvm/vgic-v3-switch.S
+++ b/arch/arm64/kvm/vgic-v3-switch.S
@@ -48,13 +48,11 @@
dsb st
// Save all interesting registers
- mrs_s x4, ICH_HCR_EL2
mrs_s x5, ICH_VMCR_EL2
mrs_s x6, ICH_MISR_EL2
mrs_s x7, ICH_EISR_EL2
mrs_s x8, ICH_ELSR_EL2
- str w4, [x3, #VGIC_V3_CPU_HCR]
str w5, [x3, #VGIC_V3_CPU_VMCR]
str w6, [x3, #VGIC_V3_CPU_MISR]
str w7, [x3, #VGIC_V3_CPU_EISR]
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index 773d37a14039..9d84feb41a16 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -4,3 +4,5 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
context.o proc.o pageattr.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_ARM64_PTDUMP) += dump.o
+
+CFLAGS_mmu.o := -I$(srctree)/scripts/dtc/libfdt/
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 2560e1e1562e..bdeb5d38c2dd 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -22,84 +22,11 @@
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/cpufeature.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
#include "proc-macros.S"
/*
- * __flush_dcache_all()
- *
- * Flush the whole D-cache.
- *
- * Corrupted registers: x0-x7, x9-x11
- */
-__flush_dcache_all:
- dmb sy // ensure ordering with previous memory accesses
- mrs x0, clidr_el1 // read clidr
- and x3, x0, #0x7000000 // extract loc from clidr
- lsr x3, x3, #23 // left align loc bit field
- cbz x3, finished // if loc is 0, then no need to clean
- mov x10, #0 // start clean at cache level 0
-loop1:
- add x2, x10, x10, lsr #1 // work out 3x current cache level
- lsr x1, x0, x2 // extract cache type bits from clidr
- and x1, x1, #7 // mask of the bits for current cache only
- cmp x1, #2 // see what cache we have at this level
- b.lt skip // skip if no cache, or just i-cache
- save_and_disable_irqs x9 // make CSSELR and CCSIDR access atomic
- msr csselr_el1, x10 // select current cache level in csselr
- isb // isb to sych the new cssr&csidr
- mrs x1, ccsidr_el1 // read the new ccsidr
- restore_irqs x9
- and x2, x1, #7 // extract the length of the cache lines
- add x2, x2, #4 // add 4 (line length offset)
- mov x4, #0x3ff
- and x4, x4, x1, lsr #3 // find maximum number on the way size
- clz w5, w4 // find bit position of way size increment
- mov x7, #0x7fff
- and x7, x7, x1, lsr #13 // extract max number of the index size
-loop2:
- mov x9, x4 // create working copy of max way size
-loop3:
- lsl x6, x9, x5
- orr x11, x10, x6 // factor way and cache number into x11
- lsl x6, x7, x2
- orr x11, x11, x6 // factor index number into x11
- dc cisw, x11 // clean & invalidate by set/way
- subs x9, x9, #1 // decrement the way
- b.ge loop3
- subs x7, x7, #1 // decrement the index
- b.ge loop2
-skip:
- add x10, x10, #2 // increment cache number
- cmp x3, x10
- b.gt loop1
-finished:
- mov x10, #0 // swith back to cache level 0
- msr csselr_el1, x10 // select current cache level in csselr
- dsb sy
- isb
- ret
-ENDPROC(__flush_dcache_all)
-
-/*
- * flush_cache_all()
- *
- * Flush the entire cache system. The data cache flush is now achieved
- * using atomic clean / invalidates working outwards from L1 cache. This
- * is done using Set/Way based cache maintainance instructions. The
- * instruction cache can still be invalidated back to the point of
- * unification in a single instruction.
- */
-ENTRY(flush_cache_all)
- mov x12, lr
- bl __flush_dcache_all
- mov x0, #0
- ic ialluis // I+BTB cache invalidate
- ret x12
-ENDPROC(flush_cache_all)
-
-/*
* flush_icache_range(start,end)
*
* Ensure that the I and D caches are coherent within specified region.
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index baa758d37021..76c1e6cd36fc 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -92,6 +92,14 @@ static void reset_context(void *info)
unsigned int cpu = smp_processor_id();
struct mm_struct *mm = current->active_mm;
+ /*
+ * current->active_mm could be init_mm for the idle thread immediately
+ * after secondary CPU boot or hotplug. TTBR0_EL1 is already set to
+ * the reserved value, so no need to reset any context.
+ */
+ if (mm == &init_mm)
+ return;
+
smp_rmb();
asid = cpu_last_asid + cpu;
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index b0bd4e5fd5cf..d16a1cead23f 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -414,6 +414,98 @@ out:
return -ENOMEM;
}
+/********************************************
+ * The following APIs are for dummy DMA ops *
+ ********************************************/
+
+static void *__dummy_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
+{
+ return NULL;
+}
+
+static void __dummy_free(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+}
+
+static int __dummy_mmap(struct device *dev,
+ struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
+{
+ return -ENXIO;
+}
+
+static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ return DMA_ERROR_CODE;
+}
+
+static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+}
+
+static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ return 0;
+}
+
+static void __dummy_unmap_sg(struct device *dev,
+ struct scatterlist *sgl, int nelems,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+}
+
+static void __dummy_sync_single(struct device *dev,
+ dma_addr_t dev_addr, size_t size,
+ enum dma_data_direction dir)
+{
+}
+
+static void __dummy_sync_sg(struct device *dev,
+ struct scatterlist *sgl, int nelems,
+ enum dma_data_direction dir)
+{
+}
+
+static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
+{
+ return 1;
+}
+
+static int __dummy_dma_supported(struct device *hwdev, u64 mask)
+{
+ return 0;
+}
+
+struct dma_map_ops dummy_dma_ops = {
+ .alloc = __dummy_alloc,
+ .free = __dummy_free,
+ .mmap = __dummy_mmap,
+ .map_page = __dummy_map_page,
+ .unmap_page = __dummy_unmap_page,
+ .map_sg = __dummy_map_sg,
+ .unmap_sg = __dummy_unmap_sg,
+ .sync_single_for_cpu = __dummy_sync_single,
+ .sync_single_for_device = __dummy_sync_single,
+ .sync_sg_for_cpu = __dummy_sync_sg,
+ .sync_sg_for_device = __dummy_sync_sg,
+ .mapping_error = __dummy_mapping_error,
+ .dma_supported = __dummy_dma_supported,
+};
+EXPORT_SYMBOL(dummy_dma_ops);
+
static int __init arm64_dma_init(void)
{
int ret;
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index 74c256744b25..f3d6221cd5bd 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -328,10 +328,12 @@ static int ptdump_init(void)
for (j = 0; j < pg_level[i].num; j++)
pg_level[i].mask |= pg_level[i].bits[j].mask;
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
address_markers[VMEMMAP_START_NR].start_address =
(unsigned long)virt_to_page(PAGE_OFFSET);
address_markers[VMEMMAP_END_NR].start_address =
(unsigned long)virt_to_page(high_memory);
+#endif
pe = debugfs_create_file("kernel_page_tables", 0400, NULL, NULL,
&ptdump_fops);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 96da13167d4a..b1fc69cd1499 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -115,8 +115,7 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
{
struct siginfo si;
- if (show_unhandled_signals && unhandled_signal(tsk, sig) &&
- printk_ratelimit()) {
+ if (show_unhandled_signals_ratelimited() && unhandled_signal(tsk, sig)) {
pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
addr, esr);
@@ -211,7 +210,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
* If we're in an interrupt or have no user context, we must not take
* the fault.
*/
- if (in_atomic() || !mm)
+ if (faulthandler_disabled() || !mm)
goto no_context;
if (user_mode(regs))
@@ -478,12 +477,19 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
struct pt_regs *regs)
{
struct siginfo info;
+ struct task_struct *tsk = current;
+
+ if (show_unhandled_signals && unhandled_signal(tsk, SIGBUS))
+ pr_info_ratelimited("%s[%d]: %s exception: pc=%p sp=%p\n",
+ tsk->comm, task_pid_nr(tsk),
+ esr_get_class_string(esr), (void *)regs->pc,
+ (void *)regs->sp);
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRALN;
info.si_addr = (void __user *)addr;
- arm64_notify_die("", regs, &info, esr);
+ arm64_notify_die("Oops - SP/PC alignment exception", regs, &info, esr);
}
static struct fault_info debug_fault_info[] = {
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index b6f14e8d2121..4dfa3975ce5b 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -102,7 +102,6 @@ EXPORT_SYMBOL(flush_dcache_page);
/*
* Additional functions defined in assembly.
*/
-EXPORT_SYMBOL(flush_cache_all);
EXPORT_SYMBOL(flush_icache_range);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 2de9d2e59d96..cccc4af87a03 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -31,13 +31,6 @@
#include <asm/tlbflush.h>
#include <asm/pgalloc.h>
-#ifndef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
-int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
-{
- return 0;
-}
-#endif
-
int pmd_huge(pmd_t pmd)
{
return !(pmd_val(pmd) & PMD_TABLE_BIT);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 597831bdddf3..ad87ce826cce 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -262,7 +262,7 @@ static void __init free_unused_memmap(void)
* memmap entries are valid from the bank end aligned to
* MAX_ORDER_NR_PAGES.
*/
- prev_end = ALIGN(start + __phys_to_pfn(reg->size),
+ prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size),
MAX_ORDER_NR_PAGES);
}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 5b8b664422d3..82d3435bf14f 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
+#include <linux/libfdt.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/memblock.h>
@@ -643,3 +644,68 @@ void __set_fixmap(enum fixed_addresses idx,
flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
}
}
+
+void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
+{
+ const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
+ pgprot_t prot = PAGE_KERNEL | PTE_RDONLY;
+ int granularity, size, offset;
+ void *dt_virt;
+
+ /*
+ * Check whether the physical FDT address is set and meets the minimum
+ * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
+ * at least 8 bytes so that we can always access the size field of the
+ * FDT header after mapping the first chunk, double check here if that
+ * is indeed the case.
+ */
+ BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
+ if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
+ return NULL;
+
+ /*
+ * Make sure that the FDT region can be mapped without the need to
+ * allocate additional translation table pages, so that it is safe
+ * to call create_mapping() this early.
+ *
+ * On 64k pages, the FDT will be mapped using PTEs, so we need to
+ * be in the same PMD as the rest of the fixmap.
+ * On 4k pages, we'll use section mappings for the FDT so we only
+ * have to be in the same PUD.
+ */
+ BUILD_BUG_ON(dt_virt_base % SZ_2M);
+
+ if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) {
+ BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> PMD_SHIFT !=
+ __fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT);
+
+ granularity = PAGE_SIZE;
+ } else {
+ BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> PUD_SHIFT !=
+ __fix_to_virt(FIX_BTMAP_BEGIN) >> PUD_SHIFT);
+
+ granularity = PMD_SIZE;
+ }
+
+ offset = dt_phys % granularity;
+ dt_virt = (void *)dt_virt_base + offset;
+
+ /* map the first chunk so we can read the size from the header */
+ create_mapping(round_down(dt_phys, granularity), dt_virt_base,
+ granularity, prot);
+
+ if (fdt_check_header(dt_virt) != 0)
+ return NULL;
+
+ size = fdt_totalsize(dt_virt);
+ if (size > MAX_FDT_SIZE)
+ return NULL;
+
+ if (offset + size > granularity)
+ create_mapping(round_down(dt_phys, granularity), dt_virt_base,
+ round_up(offset + size, granularity), prot);
+
+ memblock_reserve(dt_phys, size);
+
+ return dt_virt;
+}
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index cdd754e19b9b..39139a3aa16d 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -46,52 +46,6 @@
#define MAIR(attr, mt) ((attr) << ((mt) * 8))
/*
- * cpu_cache_off()
- *
- * Turn the CPU D-cache off.
- */
-ENTRY(cpu_cache_off)
- mrs x0, sctlr_el1
- bic x0, x0, #1 << 2 // clear SCTLR.C
- msr sctlr_el1, x0
- isb
- ret
-ENDPROC(cpu_cache_off)
-
-/*
- * cpu_reset(loc)
- *
- * Perform a soft reset of the system. Put the CPU into the same state
- * as it would be if it had been reset, and branch to what would be the
- * reset vector. It must be executed with the flat identity mapping.
- *
- * - loc - location to jump to for soft reset
- */
- .align 5
-ENTRY(cpu_reset)
- mrs x1, sctlr_el1
- bic x1, x1, #1
- msr sctlr_el1, x1 // disable the MMU
- isb
- ret x0
-ENDPROC(cpu_reset)
-
-ENTRY(cpu_soft_restart)
- /* Save address of cpu_reset() and reset address */
- mov x19, x0
- mov x20, x1
-
- /* Turn D-cache off */
- bl cpu_cache_off
-
- /* Push out all dirty data, and ensure cache is empty */
- bl flush_cache_all
-
- mov x0, x20
- ret x19
-ENDPROC(cpu_soft_restart)
-
-/*
* cpu_do_idle()
*
* Idle the processor (wait for interrupt).
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index edba042b2325..dc6a4842683a 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -487,7 +487,7 @@ emit_cond_jmp:
return -EINVAL;
}
- imm64 = (u64)insn1.imm << 32 | imm;
+ imm64 = (u64)insn1.imm << 32 | (u32)imm;
emit_a64_mov_i64(dst, imm64, ctx);
return 1;