summaryrefslogtreecommitdiff
path: root/arch/arm64/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-04-05 02:01:43 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-05 02:01:43 +0300
commit23221d997b3d28cb80c4d4d1b4bd36610f8e12fc (patch)
tree2abbdfd8f44c4d61440f780bad2f584ef301e6a3 /arch/arm64/include
parent5b1f3dc927a2681cb339b05156f828f83bfa1b80 (diff)
parent65896545b69ffaac947c12e11d3dcc57fd1fb772 (diff)
downloadlinux-23221d997b3d28cb80c4d4d1b4bd36610f8e12fc.tar.xz
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon: "Nothing particularly stands out here, probably because people were tied up with spectre/meltdown stuff last time around. Still, the main pieces are: - Rework of our CPU features framework so that we can whitelist CPUs that don't require kpti even in a heterogeneous system - Support for the IDC/DIC architecture extensions, which allow us to elide instruction and data cache maintenance when writing out instructions - Removal of the large memory model which resulted in suboptimal codegen by the compiler and increased the use of literal pools, which could potentially be used as ROP gadgets since they are mapped as executable - Rework of forced signal delivery so that the siginfo_t is well-formed and handling of show_unhandled_signals is consolidated and made consistent between different fault types - More siginfo cleanup based on the initial patches from Eric Biederman - Workaround for Cortex-A55 erratum #1024718 - Some small ACPI IORT updates and cleanups from Lorenzo Pieralisi - Misc cleanups and non-critical fixes" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (70 commits) arm64: uaccess: Fix omissions from usercopy whitelist arm64: fpsimd: Split cpu field out from struct fpsimd_state arm64: tlbflush: avoid writing RES0 bits arm64: cmpxchg: Include linux/compiler.h in asm/cmpxchg.h arm64: move percpu cmpxchg implementation from cmpxchg.h to percpu.h arm64: cmpxchg: Include build_bug.h instead of bug.h for BUILD_BUG arm64: lse: Include compiler_types.h and export.h for out-of-line LL/SC arm64: fpsimd: include <linux/init.h> in fpsimd.h drivers/perf: arm_pmu_platform: do not warn about affinity on uniprocessor perf: arm_spe: include linux/vmalloc.h for vmap() Revert "arm64: Revert L1_CACHE_SHIFT back to 6 (64-byte cache line size)" arm64: cpufeature: Avoid warnings due to unused symbols arm64: Add work around for Arm Cortex-A55 Erratum 1024718 arm64: Delay enabling hardware DBM feature arm64: Add MIDR encoding for Arm Cortex-A55 and Cortex-A35 arm64: capabilities: Handle shared entries arm64: capabilities: Add support for checks based on a list of MIDRs arm64: Add helpers for checking CPU MIDR against a range arm64: capabilities: Clean up midr range helpers arm64: capabilities: Change scope of VHE to Boot CPU feature ...
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/assembler.h34
-rw-r--r--arch/arm64/include/asm/cache.h4
-rw-r--r--arch/arm64/include/asm/cacheflush.h3
-rw-r--r--arch/arm64/include/asm/cmpxchg.h29
-rw-r--r--arch/arm64/include/asm/cpucaps.h6
-rw-r--r--arch/arm64/include/asm/cpufeature.h262
-rw-r--r--arch/arm64/include/asm/cputype.h43
-rw-r--r--arch/arm64/include/asm/esr.h9
-rw-r--r--arch/arm64/include/asm/fpsimd.h34
-rw-r--r--arch/arm64/include/asm/lse.h3
-rw-r--r--arch/arm64/include/asm/module.h2
-rw-r--r--arch/arm64/include/asm/percpu.h29
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h1
-rw-r--r--arch/arm64/include/asm/processor.h47
-rw-r--r--arch/arm64/include/asm/sysreg.h3
-rw-r--r--arch/arm64/include/asm/system_misc.h11
-rw-r--r--arch/arm64/include/asm/tlbflush.h25
-rw-r--r--arch/arm64/include/asm/traps.h8
-rw-r--r--arch/arm64/include/asm/virt.h6
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h4
-rw-r--r--arch/arm64/include/uapi/asm/siginfo.h21
21 files changed, 413 insertions, 171 deletions
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 3c78835bba94..053d83e8db6f 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -202,25 +202,15 @@ lr .req x30 // link register
/*
* Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
- * <symbol> is within the range +/- 4 GB of the PC when running
- * in core kernel context. In module context, a movz/movk sequence
- * is used, since modules may be loaded far away from the kernel
- * when KASLR is in effect.
+ * <symbol> is within the range +/- 4 GB of the PC.
*/
/*
* @dst: destination register (64 bit wide)
* @sym: name of the symbol
*/
.macro adr_l, dst, sym
-#ifndef MODULE
adrp \dst, \sym
add \dst, \dst, :lo12:\sym
-#else
- movz \dst, #:abs_g3:\sym
- movk \dst, #:abs_g2_nc:\sym
- movk \dst, #:abs_g1_nc:\sym
- movk \dst, #:abs_g0_nc:\sym
-#endif
.endm
/*
@@ -231,7 +221,6 @@ lr .req x30 // link register
* the address
*/
.macro ldr_l, dst, sym, tmp=
-#ifndef MODULE
.ifb \tmp
adrp \dst, \sym
ldr \dst, [\dst, :lo12:\sym]
@@ -239,15 +228,6 @@ lr .req x30 // link register
adrp \tmp, \sym
ldr \dst, [\tmp, :lo12:\sym]
.endif
-#else
- .ifb \tmp
- adr_l \dst, \sym
- ldr \dst, [\dst]
- .else
- adr_l \tmp, \sym
- ldr \dst, [\tmp]
- .endif
-#endif
.endm
/*
@@ -257,28 +237,18 @@ lr .req x30 // link register
* while <src> needs to be preserved.
*/
.macro str_l, src, sym, tmp
-#ifndef MODULE
adrp \tmp, \sym
str \src, [\tmp, :lo12:\sym]
-#else
- adr_l \tmp, \sym
- str \src, [\tmp]
-#endif
.endm
/*
- * @dst: Result of per_cpu(sym, smp_processor_id()), can be SP for
- * non-module code
+ * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
* @sym: The name of the per-cpu variable
* @tmp: scratch register
*/
.macro adr_this_cpu, dst, sym, tmp
-#ifndef MODULE
adrp \tmp, \sym
add \dst, \tmp, #:lo12:\sym
-#else
- adr_l \dst, \sym
-#endif
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs \tmp, tpidr_el1
alternative_else
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index ea9bb4e0e9bb..9bbffc7a301f 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -20,8 +20,12 @@
#define CTR_L1IP_SHIFT 14
#define CTR_L1IP_MASK 3
+#define CTR_DMINLINE_SHIFT 16
+#define CTR_ERG_SHIFT 20
#define CTR_CWG_SHIFT 24
#define CTR_CWG_MASK 15
+#define CTR_IDC_SHIFT 28
+#define CTR_DIC_SHIFT 29
#define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index bef9f418f089..7dfcec4700fe 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -133,6 +133,9 @@ extern void flush_dcache_page(struct page *);
static inline void __flush_icache_all(void)
{
+ if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
+ return;
+
asm("ic ialluis");
dsb(ish);
}
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index ae852add053d..4f5fd2a36e6e 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -18,7 +18,8 @@
#ifndef __ASM_CMPXCHG_H
#define __ASM_CMPXCHG_H
-#include <linux/bug.h>
+#include <linux/build_bug.h>
+#include <linux/compiler.h>
#include <asm/atomic.h>
#include <asm/barrier.h>
@@ -196,32 +197,6 @@ __CMPXCHG_GEN(_mb)
__ret; \
})
-/* this_cpu_cmpxchg */
-#define _protect_cmpxchg_local(pcp, o, n) \
-({ \
- typeof(*raw_cpu_ptr(&(pcp))) __ret; \
- preempt_disable(); \
- __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
- preempt_enable(); \
- __ret; \
-})
-
-#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
-#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
-#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
-#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
-
-#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
-({ \
- int __ret; \
- preempt_disable(); \
- __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \
- raw_cpu_ptr(&(ptr2)), \
- o1, o2, n1, n2); \
- preempt_enable(); \
- __ret; \
-})
-
#define __CMPWAIT_CASE(w, sz, name) \
static inline void __cmpwait_case_##name(volatile void *ptr, \
unsigned long val) \
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index bb263820de13..21bb624e0a7a 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -45,7 +45,11 @@
#define ARM64_HARDEN_BRANCH_PREDICTOR 24
#define ARM64_HARDEN_BP_POST_GUEST_EXIT 25
#define ARM64_HAS_RAS_EXTN 26
+#define ARM64_WORKAROUND_843419 27
+#define ARM64_HAS_CACHE_IDC 28
+#define ARM64_HAS_CACHE_DIC 29
+#define ARM64_HW_DBM 30
-#define ARM64_NCAPS 27
+#define ARM64_NCAPS 31
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 060e3a4008ab..09b0f2a80c8f 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -10,6 +10,7 @@
#define __ASM_CPUFEATURE_H
#include <asm/cpucaps.h>
+#include <asm/cputype.h>
#include <asm/fpsimd.h>
#include <asm/hwcap.h>
#include <asm/sigcontext.h>
@@ -89,24 +90,231 @@ struct arm64_ftr_reg {
extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
-/* scope of capability check */
-enum {
- SCOPE_SYSTEM,
- SCOPE_LOCAL_CPU,
-};
+/*
+ * CPU capabilities:
+ *
+ * We use arm64_cpu_capabilities to represent system features, errata work
+ * arounds (both used internally by kernel and tracked in cpu_hwcaps) and
+ * ELF HWCAPs (which are exposed to user).
+ *
+ * To support systems with heterogeneous CPUs, we need to make sure that we
+ * detect the capabilities correctly on the system and take appropriate
+ * measures to ensure there are no incompatibilities.
+ *
+ * This comment tries to explain how we treat the capabilities.
+ * Each capability has the following list of attributes :
+ *
+ * 1) Scope of Detection : The system detects a given capability by
+ * performing some checks at runtime. This could be, e.g, checking the
+ * value of a field in CPU ID feature register or checking the cpu
+ * model. The capability provides a call back ( @matches() ) to
+ * perform the check. Scope defines how the checks should be performed.
+ * There are three cases:
+ *
+ * a) SCOPE_LOCAL_CPU: check all the CPUs and "detect" if at least one
+ * matches. This implies, we have to run the check on all the
+ * booting CPUs, until the system decides that state of the
+ * capability is finalised. (See section 2 below)
+ * Or
+ * b) SCOPE_SYSTEM: check all the CPUs and "detect" if all the CPUs
+ * matches. This implies, we run the check only once, when the
+ * system decides to finalise the state of the capability. If the
+ * capability relies on a field in one of the CPU ID feature
+ * registers, we use the sanitised value of the register from the
+ * CPU feature infrastructure to make the decision.
+ * Or
+ * c) SCOPE_BOOT_CPU: Check only on the primary boot CPU to detect the
+ * feature. This category is for features that are "finalised"
+ * (or used) by the kernel very early even before the SMP cpus
+ * are brought up.
+ *
+ * The process of detection is usually denoted by "update" capability
+ * state in the code.
+ *
+ * 2) Finalise the state : The kernel should finalise the state of a
+ * capability at some point during its execution and take necessary
+ * actions if any. Usually, this is done, after all the boot-time
+ * enabled CPUs are brought up by the kernel, so that it can make
+ * better decision based on the available set of CPUs. However, there
+ * are some special cases, where the action is taken during the early
+ * boot by the primary boot CPU. (e.g, running the kernel at EL2 with
+ * Virtualisation Host Extensions). The kernel usually disallows any
+ * changes to the state of a capability once it finalises the capability
+ * and takes any action, as it may be impossible to execute the actions
+ * safely. A CPU brought up after a capability is "finalised" is
+ * referred to as "Late CPU" w.r.t the capability. e.g, all secondary
+ * CPUs are treated "late CPUs" for capabilities determined by the boot
+ * CPU.
+ *
+ * At the moment there are two passes of finalising the capabilities.
+ * a) Boot CPU scope capabilities - Finalised by primary boot CPU via
+ * setup_boot_cpu_capabilities().
+ * b) Everything except (a) - Run via setup_system_capabilities().
+ *
+ * 3) Verification: When a CPU is brought online (e.g, by user or by the
+ * kernel), the kernel should make sure that it is safe to use the CPU,
+ * by verifying that the CPU is compliant with the state of the
+ * capabilities finalised already. This happens via :
+ *
+ * secondary_start_kernel()-> check_local_cpu_capabilities()
+ *
+ * As explained in (2) above, capabilities could be finalised at
+ * different points in the execution. Each newly booted CPU is verified
+ * against the capabilities that have been finalised by the time it
+ * boots.
+ *
+ * a) SCOPE_BOOT_CPU : All CPUs are verified against the capability
+ * except for the primary boot CPU.
+ *
+ * b) SCOPE_LOCAL_CPU, SCOPE_SYSTEM: All CPUs hotplugged on by the
+ * user after the kernel boot are verified against the capability.
+ *
+ * If there is a conflict, the kernel takes an action, based on the
+ * severity (e.g, a CPU could be prevented from booting or cause a
+ * kernel panic). The CPU is allowed to "affect" the state of the
+ * capability, if it has not been finalised already. See section 5
+ * for more details on conflicts.
+ *
+ * 4) Action: As mentioned in (2), the kernel can take an action for each
+ * detected capability, on all CPUs on the system. Appropriate actions
+ * include, turning on an architectural feature, modifying the control
+ * registers (e.g, SCTLR, TCR etc.) or patching the kernel via
+ * alternatives. The kernel patching is batched and performed at later
+ * point. The actions are always initiated only after the capability
+ * is finalised. This is usally denoted by "enabling" the capability.
+ * The actions are initiated as follows :
+ * a) Action is triggered on all online CPUs, after the capability is
+ * finalised, invoked within the stop_machine() context from
+ * enable_cpu_capabilitie().
+ *
+ * b) Any late CPU, brought up after (1), the action is triggered via:
+ *
+ * check_local_cpu_capabilities() -> verify_local_cpu_capabilities()
+ *
+ * 5) Conflicts: Based on the state of the capability on a late CPU vs.
+ * the system state, we could have the following combinations :
+ *
+ * x-----------------------------x
+ * | Type | System | Late CPU |
+ * |-----------------------------|
+ * | a | y | n |
+ * |-----------------------------|
+ * | b | n | y |
+ * x-----------------------------x
+ *
+ * Two separate flag bits are defined to indicate whether each kind of
+ * conflict can be allowed:
+ * ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU - Case(a) is allowed
+ * ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU - Case(b) is allowed
+ *
+ * Case (a) is not permitted for a capability that the system requires
+ * all CPUs to have in order for the capability to be enabled. This is
+ * typical for capabilities that represent enhanced functionality.
+ *
+ * Case (b) is not permitted for a capability that must be enabled
+ * during boot if any CPU in the system requires it in order to run
+ * safely. This is typical for erratum work arounds that cannot be
+ * enabled after the corresponding capability is finalised.
+ *
+ * In some non-typical cases either both (a) and (b), or neither,
+ * should be permitted. This can be described by including neither
+ * or both flags in the capability's type field.
+ */
+
+
+/*
+ * Decide how the capability is detected.
+ * On any local CPU vs System wide vs the primary boot CPU
+ */
+#define ARM64_CPUCAP_SCOPE_LOCAL_CPU ((u16)BIT(0))
+#define ARM64_CPUCAP_SCOPE_SYSTEM ((u16)BIT(1))
+/*
+ * The capabilitiy is detected on the Boot CPU and is used by kernel
+ * during early boot. i.e, the capability should be "detected" and
+ * "enabled" as early as possibly on all booting CPUs.
+ */
+#define ARM64_CPUCAP_SCOPE_BOOT_CPU ((u16)BIT(2))
+#define ARM64_CPUCAP_SCOPE_MASK \
+ (ARM64_CPUCAP_SCOPE_SYSTEM | \
+ ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
+ ARM64_CPUCAP_SCOPE_BOOT_CPU)
+
+#define SCOPE_SYSTEM ARM64_CPUCAP_SCOPE_SYSTEM
+#define SCOPE_LOCAL_CPU ARM64_CPUCAP_SCOPE_LOCAL_CPU
+#define SCOPE_BOOT_CPU ARM64_CPUCAP_SCOPE_BOOT_CPU
+#define SCOPE_ALL ARM64_CPUCAP_SCOPE_MASK
+
+/*
+ * Is it permitted for a late CPU to have this capability when system
+ * hasn't already enabled it ?
+ */
+#define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4))
+/* Is it safe for a late CPU to miss this capability when system has it */
+#define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5))
+
+/*
+ * CPU errata workarounds that need to be enabled at boot time if one or
+ * more CPUs in the system requires it. When one of these capabilities
+ * has been enabled, it is safe to allow any CPU to boot that doesn't
+ * require the workaround. However, it is not safe if a "late" CPU
+ * requires a workaround and the system hasn't enabled it already.
+ */
+#define ARM64_CPUCAP_LOCAL_CPU_ERRATUM \
+ (ARM64_CPUCAP_SCOPE_LOCAL_CPU | ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
+/*
+ * CPU feature detected at boot time based on system-wide value of a
+ * feature. It is safe for a late CPU to have this feature even though
+ * the system hasn't enabled it, although the featuer will not be used
+ * by Linux in this case. If the system has enabled this feature already,
+ * then every late CPU must have it.
+ */
+#define ARM64_CPUCAP_SYSTEM_FEATURE \
+ (ARM64_CPUCAP_SCOPE_SYSTEM | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
+/*
+ * CPU feature detected at boot time based on feature of one or more CPUs.
+ * All possible conflicts for a late CPU are ignored.
+ */
+#define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE \
+ (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
+ ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU | \
+ ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
+
+/*
+ * CPU feature detected at boot time, on one or more CPUs. A late CPU
+ * is not allowed to have the capability when the system doesn't have it.
+ * It is Ok for a late CPU to miss the feature.
+ */
+#define ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE \
+ (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
+ ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
+
+/*
+ * CPU feature used early in the boot based on the boot CPU. All secondary
+ * CPUs must match the state of the capability as detected by the boot CPU.
+ */
+#define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE ARM64_CPUCAP_SCOPE_BOOT_CPU
struct arm64_cpu_capabilities {
const char *desc;
u16 capability;
- int def_scope; /* default scope */
+ u16 type;
bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
- int (*enable)(void *); /* Called on all active CPUs */
+ /*
+ * Take the appropriate actions to enable this capability for this CPU.
+ * For each successfully booted CPU, this method is called for each
+ * globally detected capability.
+ */
+ void (*cpu_enable)(const struct arm64_cpu_capabilities *cap);
union {
struct { /* To be used for erratum handling only */
- u32 midr_model;
- u32 midr_range_min, midr_range_max;
+ struct midr_range midr_range;
+ const struct arm64_midr_revidr {
+ u32 midr_rv; /* revision/variant */
+ u32 revidr_mask;
+ } * const fixed_revs;
};
+ const struct midr_range *midr_range_list;
struct { /* Feature register checking */
u32 sys_reg;
u8 field_pos;
@@ -115,9 +323,38 @@ struct arm64_cpu_capabilities {
bool sign;
unsigned long hwcap;
};
+ /*
+ * A list of "matches/cpu_enable" pair for the same
+ * "capability" of the same "type" as described by the parent.
+ * Only matches(), cpu_enable() and fields relevant to these
+ * methods are significant in the list. The cpu_enable is
+ * invoked only if the corresponding entry "matches()".
+ * However, if a cpu_enable() method is associated
+ * with multiple matches(), care should be taken that either
+ * the match criteria are mutually exclusive, or that the
+ * method is robust against being called multiple times.
+ */
+ const struct arm64_cpu_capabilities *match_list;
};
};
+static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
+{
+ return cap->type & ARM64_CPUCAP_SCOPE_MASK;
+}
+
+static inline bool
+cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
+{
+ return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU);
+}
+
+static inline bool
+cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
+{
+ return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
+}
+
extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
extern struct static_key_false arm64_const_caps_ready;
@@ -236,15 +473,8 @@ static inline bool id_aa64pfr0_sve(u64 pfr0)
}
void __init setup_cpu_features(void);
-
-void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
- const char *info);
-void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps);
void check_local_cpu_capabilities(void);
-void update_cpu_errata_workarounds(void);
-void __init enable_errata_workarounds(void);
-void verify_local_cpu_errata_workarounds(void);
u64 read_sanitised_ftr_reg(u32 id);
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 350c76a1d15b..30014a9f8f2b 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -83,6 +83,8 @@
#define ARM_CPU_PART_CORTEX_A53 0xD03
#define ARM_CPU_PART_CORTEX_A73 0xD09
#define ARM_CPU_PART_CORTEX_A75 0xD0A
+#define ARM_CPU_PART_CORTEX_A35 0xD04
+#define ARM_CPU_PART_CORTEX_A55 0xD05
#define APM_CPU_PART_POTENZA 0x000
@@ -102,6 +104,8 @@
#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
#define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73)
#define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
+#define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35)
+#define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
@@ -118,6 +122,45 @@
#define read_cpuid(reg) read_sysreg_s(SYS_ ## reg)
/*
+ * Represent a range of MIDR values for a given CPU model and a
+ * range of variant/revision values.
+ *
+ * @model - CPU model as defined by MIDR_CPU_MODEL
+ * @rv_min - Minimum value for the revision/variant as defined by
+ * MIDR_CPU_VAR_REV
+ * @rv_max - Maximum value for the variant/revision for the range.
+ */
+struct midr_range {
+ u32 model;
+ u32 rv_min;
+ u32 rv_max;
+};
+
+#define MIDR_RANGE(m, v_min, r_min, v_max, r_max) \
+ { \
+ .model = m, \
+ .rv_min = MIDR_CPU_VAR_REV(v_min, r_min), \
+ .rv_max = MIDR_CPU_VAR_REV(v_max, r_max), \
+ }
+
+#define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf)
+
+static inline bool is_midr_in_range(u32 midr, struct midr_range const *range)
+{
+ return MIDR_IS_CPU_MODEL_RANGE(midr, range->model,
+ range->rv_min, range->rv_max);
+}
+
+static inline bool
+is_midr_in_range_list(u32 midr, struct midr_range const *ranges)
+{
+ while (ranges->model)
+ if (is_midr_in_range(midr, ranges++))
+ return true;
+ return false;
+}
+
+/*
* The CPU ID never changes at run time, so we might as well tell the
* compiler that it's constant. Use this function to read the CPU ID
* rather than directly reading processor_id or read_cpuid() directly.
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 803443d74926..ce70c3ffb993 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -240,6 +240,15 @@
(((e) & ESR_ELx_SYS64_ISS_OP2_MASK) >> \
ESR_ELx_SYS64_ISS_OP2_SHIFT))
+/*
+ * ISS field definitions for floating-point exception traps
+ * (FP_EXC_32/FP_EXC_64).
+ *
+ * (The FPEXC_* constants are used instead for common bits.)
+ */
+
+#define ESR_ELx_FP_EXC_TFV (UL(1) << 23)
+
#ifndef __ASSEMBLY__
#include <asm/types.h>
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index 8857a0f0d0f7..aa7162ae93e3 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -22,33 +22,9 @@
#ifndef __ASSEMBLY__
#include <linux/cache.h>
+#include <linux/init.h>
#include <linux/stddef.h>
-/*
- * FP/SIMD storage area has:
- * - FPSR and FPCR
- * - 32 128-bit data registers
- *
- * Note that user_fpsimd forms a prefix of this structure, which is
- * relied upon in the ptrace FP/SIMD accessors.
- */
-struct fpsimd_state {
- union {
- struct user_fpsimd_state user_fpsimd;
- struct {
- __uint128_t vregs[32];
- u32 fpsr;
- u32 fpcr;
- /*
- * For ptrace compatibility, pad to next 128-bit
- * boundary here if extending this struct.
- */
- };
- };
- /* the id of the last cpu to have restored this state */
- unsigned int cpu;
-};
-
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/* Masks for extracting the FPSR and FPCR from the FPSCR */
#define VFP_FPSCR_STAT_MASK 0xf800009f
@@ -62,8 +38,8 @@ struct fpsimd_state {
struct task_struct;
-extern void fpsimd_save_state(struct fpsimd_state *state);
-extern void fpsimd_load_state(struct fpsimd_state *state);
+extern void fpsimd_save_state(struct user_fpsimd_state *state);
+extern void fpsimd_load_state(struct user_fpsimd_state *state);
extern void fpsimd_thread_switch(struct task_struct *next);
extern void fpsimd_flush_thread(void);
@@ -83,7 +59,9 @@ extern void sve_save_state(void *state, u32 *pfpsr);
extern void sve_load_state(void const *state, u32 const *pfpsr,
unsigned long vq_minus_1);
extern unsigned int sve_get_vl(void);
-extern int sve_kernel_enable(void *);
+
+struct arm64_cpu_capabilities;
+extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
extern int __ro_after_init sve_max_vl;
diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
index eec95768eaad..8262325e2fc6 100644
--- a/arch/arm64/include/asm/lse.h
+++ b/arch/arm64/include/asm/lse.h
@@ -4,8 +4,11 @@
#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
+#include <linux/compiler_types.h>
+#include <linux/export.h>
#include <linux/stringify.h>
#include <asm/alternative.h>
+#include <asm/cpucaps.h>
#ifdef __ASSEMBLER__
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
index 4f766178fa6f..b6dbbe3123a9 100644
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -39,6 +39,8 @@ struct mod_arch_specific {
u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
Elf64_Sym *sym);
+u64 module_emit_adrp_veneer(struct module *mod, void *loc, u64 val);
+
#ifdef CONFIG_RANDOMIZE_BASE
extern u64 module_alloc_base;
#else
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 43393208229e..9234013e759e 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -16,7 +16,10 @@
#ifndef __ASM_PERCPU_H
#define __ASM_PERCPU_H
+#include <linux/preempt.h>
+
#include <asm/alternative.h>
+#include <asm/cmpxchg.h>
#include <asm/stack_pointer.h>
static inline void set_my_cpu_offset(unsigned long off)
@@ -197,6 +200,32 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
return ret;
}
+/* this_cpu_cmpxchg */
+#define _protect_cmpxchg_local(pcp, o, n) \
+({ \
+ typeof(*raw_cpu_ptr(&(pcp))) __ret; \
+ preempt_disable(); \
+ __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
+ preempt_enable(); \
+ __ret; \
+})
+
+#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+
+#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
+({ \
+ int __ret; \
+ preempt_disable(); \
+ __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \
+ raw_cpu_ptr(&(ptr2)), \
+ o1, o2, n1, n2); \
+ preempt_enable(); \
+ __ret; \
+})
+
#define _percpu_read(pcp) \
({ \
typeof(pcp) __retval; \
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index cdfe3e657a9e..fd208eac9f2a 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -291,6 +291,7 @@
#define TCR_TBI0 (UL(1) << 37)
#define TCR_HA (UL(1) << 39)
#define TCR_HD (UL(1) << 40)
+#define TCR_NFD1 (UL(1) << 54)
/*
* TTBR.
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index fce604e3e599..767598932549 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -34,10 +34,12 @@
#ifdef __KERNEL__
+#include <linux/build_bug.h>
+#include <linux/stddef.h>
#include <linux/string.h>
#include <asm/alternative.h>
-#include <asm/fpsimd.h>
+#include <asm/cpufeature.h>
#include <asm/hw_breakpoint.h>
#include <asm/lse.h>
#include <asm/pgtable-hwdef.h>
@@ -103,11 +105,19 @@ struct cpu_context {
struct thread_struct {
struct cpu_context cpu_context; /* cpu context */
- unsigned long tp_value; /* TLS register */
-#ifdef CONFIG_COMPAT
- unsigned long tp2_value;
-#endif
- struct fpsimd_state fpsimd_state;
+
+ /*
+ * Whitelisted fields for hardened usercopy:
+ * Maintainers must ensure manually that this contains no
+ * implicit padding.
+ */
+ struct {
+ unsigned long tp_value; /* TLS register */
+ unsigned long tp2_value;
+ struct user_fpsimd_state fpsimd_state;
+ } uw;
+
+ unsigned int fpsimd_cpu;
void *sve_state; /* SVE registers, if any */
unsigned int sve_vl; /* SVE vector length */
unsigned int sve_vl_onexec; /* SVE vl after next exec */
@@ -116,14 +126,17 @@ struct thread_struct {
struct debug_info debug; /* debugging */
};
-/*
- * Everything usercopied to/from thread_struct is statically-sized, so
- * no hardened usercopy whitelist is needed.
- */
static inline void arch_thread_struct_whitelist(unsigned long *offset,
unsigned long *size)
{
- *offset = *size = 0;
+ /* Verify that there is no padding among the whitelisted fields: */
+ BUILD_BUG_ON(sizeof_field(struct thread_struct, uw) !=
+ sizeof_field(struct thread_struct, uw.tp_value) +
+ sizeof_field(struct thread_struct, uw.tp2_value) +
+ sizeof_field(struct thread_struct, uw.fpsimd_state));
+
+ *offset = offsetof(struct thread_struct, uw);
+ *size = sizeof_field(struct thread_struct, uw);
}
#ifdef CONFIG_COMPAT
@@ -131,13 +144,13 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset,
({ \
unsigned long *__tls; \
if (is_compat_thread(task_thread_info(t))) \
- __tls = &(t)->thread.tp2_value; \
+ __tls = &(t)->thread.uw.tp2_value; \
else \
- __tls = &(t)->thread.tp_value; \
+ __tls = &(t)->thread.uw.tp_value; \
__tls; \
})
#else
-#define task_user_tls(t) (&(t)->thread.tp_value)
+#define task_user_tls(t) (&(t)->thread.uw.tp_value)
#endif
/* Sync TPIDR_EL0 back to thread_struct for current */
@@ -227,9 +240,9 @@ static inline void spin_lock_prefetch(const void *ptr)
#endif
-int cpu_enable_pan(void *__unused);
-int cpu_enable_cache_maint_trap(void *__unused);
-int cpu_clear_disr(void *__unused);
+void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused);
+void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused);
+void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused);
/* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */
#define SVE_SET_VL(arg) sve_set_current_vl(arg)
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 0e1960c59197..e7b9f154e476 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -490,6 +490,7 @@
#define SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS BUILD_BUG_ON((SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != ~0)
/* id_aa64isar0 */
+#define ID_AA64ISAR0_TS_SHIFT 52
#define ID_AA64ISAR0_FHM_SHIFT 48
#define ID_AA64ISAR0_DP_SHIFT 44
#define ID_AA64ISAR0_SM4_SHIFT 40
@@ -511,6 +512,7 @@
/* id_aa64pfr0 */
#define ID_AA64PFR0_CSV3_SHIFT 60
#define ID_AA64PFR0_CSV2_SHIFT 56
+#define ID_AA64PFR0_DIT_SHIFT 48
#define ID_AA64PFR0_SVE_SHIFT 32
#define ID_AA64PFR0_RAS_SHIFT 28
#define ID_AA64PFR0_GIC_SHIFT 24
@@ -568,6 +570,7 @@
#define ID_AA64MMFR1_VMIDBITS_16 2
/* id_aa64mmfr2 */
+#define ID_AA64MMFR2_AT_SHIFT 32
#define ID_AA64MMFR2_LVA_SHIFT 16
#define ID_AA64MMFR2_IESB_SHIFT 12
#define ID_AA64MMFR2_LSM_SHIFT 8
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index 07aa8e3c5630..28893a0b141d 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -45,17 +45,6 @@ extern void __show_regs(struct pt_regs *);
extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
-#define show_unhandled_signals_ratelimited() \
-({ \
- static DEFINE_RATELIMIT_STATE(_rs, \
- DEFAULT_RATELIMIT_INTERVAL, \
- DEFAULT_RATELIMIT_BURST); \
- bool __show_ratelimited = false; \
- if (show_unhandled_signals && __ratelimit(&_rs)) \
- __show_ratelimited = true; \
- __show_ratelimited; \
-})
-
int handle_guest_sea(phys_addr_t addr, unsigned int esr);
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 9e82dd79c7db..dfc61d73f740 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -60,6 +60,15 @@
__tlbi(op, (arg) | USER_ASID_FLAG); \
} while (0)
+/* This macro creates a properly formatted VA operand for the TLBI */
+#define __TLBI_VADDR(addr, asid) \
+ ({ \
+ unsigned long __ta = (addr) >> 12; \
+ __ta &= GENMASK_ULL(43, 0); \
+ __ta |= (unsigned long)(asid) << 48; \
+ __ta; \
+ })
+
/*
* TLB Management
* ==============
@@ -117,7 +126,7 @@ static inline void flush_tlb_all(void)
static inline void flush_tlb_mm(struct mm_struct *mm)
{
- unsigned long asid = ASID(mm) << 48;
+ unsigned long asid = __TLBI_VADDR(0, ASID(mm));
dsb(ishst);
__tlbi(aside1is, asid);
@@ -128,7 +137,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long uaddr)
{
- unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48);
+ unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
dsb(ishst);
__tlbi(vale1is, addr);
@@ -146,7 +155,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end,
bool last_level)
{
- unsigned long asid = ASID(vma->vm_mm) << 48;
+ unsigned long asid = ASID(vma->vm_mm);
unsigned long addr;
if ((end - start) > MAX_TLB_RANGE) {
@@ -154,8 +163,8 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
return;
}
- start = asid | (start >> 12);
- end = asid | (end >> 12);
+ start = __TLBI_VADDR(start, asid);
+ end = __TLBI_VADDR(end, asid);
dsb(ishst);
for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
@@ -185,8 +194,8 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
return;
}
- start >>= 12;
- end >>= 12;
+ start = __TLBI_VADDR(start, 0);
+ end = __TLBI_VADDR(end, 0);
dsb(ishst);
for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
@@ -202,7 +211,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
static inline void __flush_tlb_pgtable(struct mm_struct *mm,
unsigned long uaddr)
{
- unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
+ unsigned long addr = __TLBI_VADDR(uaddr, ASID(mm));
__tlbi(vae1is, addr);
__tlbi_user(vae1is, addr);
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index 178e338d2889..c320f3bf6c57 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -35,10 +35,10 @@ struct undef_hook {
void register_undef_hook(struct undef_hook *hook);
void unregister_undef_hook(struct undef_hook *hook);
-void force_signal_inject(int signal, int code, struct pt_regs *regs,
- unsigned long address);
-
-void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr);
+void force_signal_inject(int signal, int code, unsigned long address);
+void arm64_notify_segfault(unsigned long addr);
+void arm64_force_sig_info(struct siginfo *info, const char *str,
+ struct task_struct *tsk);
/*
* Move regs->pc to next instruction and do necessary setup before it
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index c5f89442785c..9d1e24e030b3 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -102,12 +102,6 @@ static inline bool has_vhe(void)
return false;
}
-#ifdef CONFIG_ARM64_VHE
-extern void verify_cpu_run_el(void);
-#else
-static inline void verify_cpu_run_el(void) {}
-#endif
-
#endif /* __ASSEMBLY__ */
#endif /* ! __ASM__VIRT_H */
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index f018c3deea3b..17c65c8f33cb 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -44,5 +44,9 @@
#define HWCAP_SHA512 (1 << 21)
#define HWCAP_SVE (1 << 22)
#define HWCAP_ASIMDFHM (1 << 23)
+#define HWCAP_DIT (1 << 24)
+#define HWCAP_USCAT (1 << 25)
+#define HWCAP_ILRCPC (1 << 26)
+#define HWCAP_FLAGM (1 << 27)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/include/uapi/asm/siginfo.h b/arch/arm64/include/uapi/asm/siginfo.h
index 9b4d91277742..574d12f86039 100644
--- a/arch/arm64/include/uapi/asm/siginfo.h
+++ b/arch/arm64/include/uapi/asm/siginfo.h
@@ -21,25 +21,4 @@
#include <asm-generic/siginfo.h>
-/*
- * SIGFPE si_codes
- */
-#ifdef __KERNEL__
-#define FPE_FIXME 0 /* Broken dup of SI_USER */
-#endif /* __KERNEL__ */
-
-/*
- * SIGBUS si_codes
- */
-#ifdef __KERNEL__
-#define BUS_FIXME 0 /* Broken dup of SI_USER */
-#endif /* __KERNEL__ */
-
-/*
- * SIGTRAP si_codes
- */
-#ifdef __KERNEL__
-#define TRAP_FIXME 0 /* Broken dup of SI_USER */
-#endif /* __KERNEL__ */
-
#endif