summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/Makefile4
-rw-r--r--arch/arm64/kernel/acpi_parking_protocol.c3
-rw-r--r--arch/arm64/kernel/arm64ksyms.c4
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c3
-rw-r--r--arch/arm64/kernel/asm-offsets.c7
-rw-r--r--arch/arm64/kernel/cacheinfo.c13
-rw-r--r--arch/arm64/kernel/cpu-reset.h2
-rw-r--r--arch/arm64/kernel/cpu_errata.c33
-rw-r--r--arch/arm64/kernel/cpufeature.c426
-rw-r--r--arch/arm64/kernel/cpuinfo.c2
-rw-r--r--arch/arm64/kernel/efi-entry.S9
-rw-r--r--arch/arm64/kernel/entry-ftrace.S13
-rw-r--r--arch/arm64/kernel/head.S73
-rw-r--r--arch/arm64/kernel/hibernate.c26
-rw-r--r--arch/arm64/kernel/insn.c34
-rw-r--r--arch/arm64/kernel/probes/decode-insn.h2
-rw-r--r--arch/arm64/kernel/probes/simulate-insn.c18
-rw-r--r--arch/arm64/kernel/process.c6
-rw-r--r--arch/arm64/kernel/psci.c3
-rw-r--r--arch/arm64/kernel/setup.c13
-rw-r--r--arch/arm64/kernel/smccc-call.S23
-rw-r--r--arch/arm64/kernel/smp.c6
-rw-r--r--arch/arm64/kernel/smp_spin_table.c3
-rw-r--r--arch/arm64/kernel/topology.c10
-rw-r--r--arch/arm64/kernel/traps.c13
-rw-r--r--arch/arm64/kernel/vdso.c8
26 files changed, 504 insertions, 253 deletions
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 7d66bbaafc0c..1606c6b2a280 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -55,3 +55,7 @@ obj-y += $(arm64-obj-y) vdso/ probes/
obj-m += $(arm64-obj-m)
head-y := head.o
extra-y += $(head-y) vmlinux.lds
+
+ifeq ($(CONFIG_DEBUG_EFI),y)
+AFLAGS_head.o += -DVMLINUX_PATH="\"$(realpath $(objtree)/vmlinux)\""
+endif
diff --git a/arch/arm64/kernel/acpi_parking_protocol.c b/arch/arm64/kernel/acpi_parking_protocol.c
index a32b4011d711..1f5655cd9cc9 100644
--- a/arch/arm64/kernel/acpi_parking_protocol.c
+++ b/arch/arm64/kernel/acpi_parking_protocol.c
@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/acpi.h>
+#include <linux/mm.h>
#include <linux/types.h>
#include <asm/cpu_ops.h>
@@ -109,7 +110,7 @@ static int acpi_parking_protocol_cpu_boot(unsigned int cpu)
* that read this address need to convert this address to the
* Boot-Loader's endianness before jumping.
*/
- writeq_relaxed(__pa(secondary_entry), &mailbox->entry_point);
+ writeq_relaxed(__pa_symbol(secondary_entry), &mailbox->entry_point);
writel_relaxed(cpu_entry->gic_cpu_id, &mailbox->cpu_id);
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index 78f368039c79..e9c4dc9e0ada 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -73,5 +73,5 @@ NOKPROBE_SYMBOL(_mcount);
#endif
/* arm-smccc */
-EXPORT_SYMBOL(arm_smccc_smc);
-EXPORT_SYMBOL(arm_smccc_hvc);
+EXPORT_SYMBOL(__arm_smccc_smc);
+EXPORT_SYMBOL(__arm_smccc_hvc);
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index ecf9298a12d4..657977e77ec8 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -19,6 +19,7 @@
#include <asm/sysreg.h>
#include <asm/system_misc.h>
#include <asm/traps.h>
+#include <asm/kprobes.h>
#include <linux/uaccess.h>
#include <asm/cpufeature.h>
@@ -636,7 +637,7 @@ static int __init armv8_deprecated_init(void)
if(system_supports_mixed_endian_el0())
register_insn_emulation(&setend_ops);
else
- pr_info("setend instruction emulation is not supported on the system");
+ pr_info("setend instruction emulation is not supported on this system\n");
}
cpuhp_setup_state_nocalls(CPUHP_AP_ARM64_ISNDEP_STARTING,
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index bc049afc73a7..b3bb7ef97bc8 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -143,8 +143,11 @@ int main(void)
DEFINE(SLEEP_STACK_DATA_SYSTEM_REGS, offsetof(struct sleep_stack_data, system_regs));
DEFINE(SLEEP_STACK_DATA_CALLEE_REGS, offsetof(struct sleep_stack_data, callee_saved_regs));
#endif
- DEFINE(ARM_SMCCC_RES_X0_OFFS, offsetof(struct arm_smccc_res, a0));
- DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2));
+ DEFINE(ARM_SMCCC_RES_X0_OFFS, offsetof(struct arm_smccc_res, a0));
+ DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2));
+ DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id));
+ DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state));
+
BLANK();
DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address));
DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address));
diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c
index 9617301f76b5..3f2250fc391b 100644
--- a/arch/arm64/kernel/cacheinfo.c
+++ b/arch/arm64/kernel/cacheinfo.c
@@ -84,7 +84,7 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
static int __init_cache_level(unsigned int cpu)
{
- unsigned int ctype, level, leaves;
+ unsigned int ctype, level, leaves, of_level;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) {
@@ -97,6 +97,17 @@ static int __init_cache_level(unsigned int cpu)
leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
}
+ of_level = of_find_last_cache_level(cpu);
+ if (level < of_level) {
+ /*
+ * some external caches not specified in CLIDR_EL1
+ * the information may be available in the device tree
+ * only unified external caches are considered here
+ */
+ leaves += (of_level - level);
+ level = of_level;
+ }
+
this_cpu_ci->num_levels = level;
this_cpu_ci->num_leaves = leaves;
return 0;
diff --git a/arch/arm64/kernel/cpu-reset.h b/arch/arm64/kernel/cpu-reset.h
index d4e9ecb264f0..6c2b1b4f57c9 100644
--- a/arch/arm64/kernel/cpu-reset.h
+++ b/arch/arm64/kernel/cpu-reset.h
@@ -24,7 +24,7 @@ static inline void __noreturn cpu_soft_restart(unsigned long el2_switch,
el2_switch = el2_switch && !is_kernel_in_hyp_mode() &&
is_hyp_mode_available();
- restart = (void *)virt_to_phys(__cpu_soft_restart);
+ restart = (void *)__pa_symbol(__cpu_soft_restart);
cpu_install_idmap();
restart(el2_switch, entry, arg0, arg1, arg2);
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index b75e917aac46..f6cc67e7626e 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -79,8 +79,9 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
/* Cortex-A57 r0p0 - r1p2 */
.desc = "ARM erratum 832075",
.capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
- MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
- (1 << MIDR_VARIANT_SHIFT) | 2),
+ MIDR_RANGE(MIDR_CORTEX_A57,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(1, 2)),
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_834220
@@ -88,8 +89,9 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
/* Cortex-A57 r0p0 - r1p2 */
.desc = "ARM erratum 834220",
.capability = ARM64_WORKAROUND_834220,
- MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
- (1 << MIDR_VARIANT_SHIFT) | 2),
+ MIDR_RANGE(MIDR_CORTEX_A57,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(1, 2)),
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_845719
@@ -113,8 +115,9 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
/* Cavium ThunderX, T88 pass 1.x - 2.1 */
.desc = "Cavium erratum 27456",
.capability = ARM64_WORKAROUND_CAVIUM_27456,
- MIDR_RANGE(MIDR_THUNDERX, 0x00,
- (1 << MIDR_VARIANT_SHIFT) | 1),
+ MIDR_RANGE(MIDR_THUNDERX,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(1, 1)),
},
{
/* Cavium ThunderX, T81 pass 1.0 */
@@ -130,6 +133,24 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.def_scope = SCOPE_LOCAL_CPU,
.enable = cpu_enable_trap_ctr_access,
},
+#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
+ {
+ .desc = "Qualcomm Technologies Falkor erratum 1003",
+ .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
+ MIDR_RANGE(MIDR_QCOM_FALKOR_V1,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(0, 0)),
+ },
+#endif
+#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
+ {
+ .desc = "Qualcomm Technologies Falkor erratum 1009",
+ .capability = ARM64_WORKAROUND_REPEAT_TLBI,
+ MIDR_RANGE(MIDR_QCOM_FALKOR_V1,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(0, 0)),
+ },
+#endif
{
}
};
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index fdf8f045929f..abda8e861865 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -23,12 +23,14 @@
#include <linux/sort.h>
#include <linux/stop_machine.h>
#include <linux/types.h>
+#include <linux/mm.h>
#include <asm/cpu.h>
#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
#include <asm/mmu_context.h>
#include <asm/processor.h>
#include <asm/sysreg.h>
+#include <asm/traps.h>
#include <asm/virt.h>
unsigned long elf_hwcap __read_mostly;
@@ -52,9 +54,10 @@ EXPORT_SYMBOL(cpu_hwcaps);
DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
EXPORT_SYMBOL(cpu_hwcap_keys);
-#define __ARM64_FTR_BITS(SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
+#define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
{ \
.sign = SIGNED, \
+ .visible = VISIBLE, \
.strict = STRICT, \
.type = TYPE, \
.shift = SHIFT, \
@@ -63,12 +66,12 @@ EXPORT_SYMBOL(cpu_hwcap_keys);
}
/* Define a feature with unsigned values */
-#define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
- __ARM64_FTR_BITS(FTR_UNSIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
+#define ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
+ __ARM64_FTR_BITS(FTR_UNSIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
/* Define a feature with a signed value */
-#define S_ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
- __ARM64_FTR_BITS(FTR_SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
+#define S_ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
+ __ARM64_FTR_BITS(FTR_SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
#define ARM64_FTR_END \
{ \
@@ -80,85 +83,80 @@ static bool __maybe_unused
cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);
+/*
+ * NOTE: Any changes to the visibility of features should be kept in
+ * sync with the documentation of the CPU feature register ABI.
+ */
static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 24, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* RAZ */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0),
- S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
- S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0),
+ S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
+ S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
/* Linux doesn't care about the EL3 */
- ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64PFR0_EL3_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL2_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64PFR0_EL3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL2_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
- S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
- S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
/* Linux shouldn't care about secure memory */
- ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
/*
* Differing PARange is fine as long as all peripherals and memory are mapped
* within the minimum PARange of all CPUs
*/
- ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_ctr[] = {
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
/*
* Linux can handle differing I-cache policies. Userspace JITs will
* make use of *minLine.
* If we have differing I-cache policies, report it as the weakest - AIVIVT.
*/
- ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_AIVIVT), /* L1Ip */
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0), /* RAZ */
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_AIVIVT), /* L1Ip */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
ARM64_FTR_END,
};
@@ -168,79 +166,78 @@ struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
};
static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
- S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0xf), /* InnerShr */
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 24, 4, 0), /* FCSE */
- ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0), /* AuxReg */
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 16, 4, 0), /* TCM */
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 12, 4, 0), /* ShareLvl */
- S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 4, 0xf), /* OuterShr */
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* PMSA */
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* VMSA */
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 28, 4, 0xf), /* InnerShr */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 24, 4, 0), /* FCSE */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0), /* AuxReg */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 16, 4, 0), /* TCM */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 12, 4, 0), /* ShareLvl */
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 8, 4, 0xf), /* OuterShr */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 4, 4, 0), /* PMSA */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 4, 0), /* VMSA */
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
- S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 36, 28, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
+ /*
+ * We can instantiate multiple PMU instances with different levels
+ * of support.
+ */
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_mvfr2[] = {
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 24, 0), /* RAZ */
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* FPMisc */
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* SIMDMisc */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 4, 4, 0), /* FPMisc */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 4, 0), /* SIMDMisc */
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_dczid[] = {
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 5, 27, 0), /* RAZ */
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 1, 1), /* DZP */
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* BS */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 4, 1, 1), /* DZP */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* BS */
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_isar5[] = {
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_RDM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 20, 4, 0), /* RAZ */
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_CRC32_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA2_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA1_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_AES_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SEVL_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_RDM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_CRC32_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA2_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA1_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_AES_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_ISAR5_SEVL_SHIFT, 4, 0),
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 24, 0), /* RAZ */
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* ac2 */
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* RAZ */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 4, 4, 0), /* ac2 */
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_pfr0[] = {
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 16, 16, 0), /* RAZ */
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 12, 4, 0), /* State3 */
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 4, 0), /* State2 */
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* State1 */
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* State0 */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 12, 4, 0), /* State3 */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 8, 4, 0), /* State2 */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 4, 4, 0), /* State1 */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 4, 0), /* State0 */
ARM64_FTR_END,
};
static const struct arm64_ftr_bits ftr_id_dfr0[] = {
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
- S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
ARM64_FTR_END,
};
@@ -251,29 +248,24 @@ static const struct arm64_ftr_bits ftr_id_dfr0[] = {
* id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
*/
static const struct arm64_ftr_bits ftr_generic_32bits[] = {
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
ARM64_FTR_END,
};
-static const struct arm64_ftr_bits ftr_generic[] = {
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 64, 0),
+/* Table for a single 32bit feature value */
+static const struct arm64_ftr_bits ftr_single32[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 32, 0),
ARM64_FTR_END,
};
-static const struct arm64_ftr_bits ftr_generic32[] = {
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 32, 0),
- ARM64_FTR_END,
-};
-
-static const struct arm64_ftr_bits ftr_aa64raz[] = {
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 64, 0),
+static const struct arm64_ftr_bits ftr_raz[] = {
ARM64_FTR_END,
};
@@ -314,15 +306,15 @@ static const struct __ftr_reg_entry {
/* Op1 = 0, CRn = 0, CRm = 4 */
ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
- ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_aa64raz),
+ ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_raz),
/* Op1 = 0, CRn = 0, CRm = 5 */
ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
- ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_generic),
+ ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_raz),
/* Op1 = 0, CRn = 0, CRm = 6 */
ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
- ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_aa64raz),
+ ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_raz),
/* Op1 = 0, CRn = 0, CRm = 7 */
ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
@@ -334,7 +326,7 @@ static const struct __ftr_reg_entry {
ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
/* Op1 = 3, CRn = 14, CRm = 0 */
- ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_generic32),
+ ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_single32),
};
static int search_cmp_ftr_reg(const void *id, const void *regp)
@@ -410,25 +402,43 @@ static void __init sort_ftr_regs(void)
/*
* Initialise the CPU feature register from Boot CPU values.
* Also initiliases the strict_mask for the register.
+ * Any bits that are not covered by an arm64_ftr_bits entry are considered
+ * RES0 for the system-wide value, and must strictly match.
*/
static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
{
u64 val = 0;
u64 strict_mask = ~0x0ULL;
+ u64 user_mask = 0;
+ u64 valid_mask = 0;
+
const struct arm64_ftr_bits *ftrp;
struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);
BUG_ON(!reg);
for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
+ u64 ftr_mask = arm64_ftr_mask(ftrp);
s64 ftr_new = arm64_ftr_value(ftrp, new);
val = arm64_ftr_set_value(ftrp, val, ftr_new);
+
+ valid_mask |= ftr_mask;
if (!ftrp->strict)
- strict_mask &= ~arm64_ftr_mask(ftrp);
+ strict_mask &= ~ftr_mask;
+ if (ftrp->visible)
+ user_mask |= ftr_mask;
+ else
+ reg->user_val = arm64_ftr_set_value(ftrp,
+ reg->user_val,
+ ftrp->safe_val);
}
+
+ val &= valid_mask;
+
reg->sys_val = val;
reg->strict_mask = strict_mask;
+ reg->user_mask = user_mask;
}
void __init init_cpu_features(struct cpuinfo_arm64 *info)
@@ -635,6 +645,9 @@ u64 read_system_reg(u32 id)
return regp->sys_val;
}
+#define read_sysreg_case(r) \
+ case r: return read_sysreg_s(r)
+
/*
* __raw_read_system_reg() - Used by a STARTING cpu before cpuinfo is populated.
* Read the system register on the current CPU
@@ -642,36 +655,37 @@ u64 read_system_reg(u32 id)
static u64 __raw_read_system_reg(u32 sys_id)
{
switch (sys_id) {
- case SYS_ID_PFR0_EL1: return read_cpuid(ID_PFR0_EL1);
- case SYS_ID_PFR1_EL1: return read_cpuid(ID_PFR1_EL1);
- case SYS_ID_DFR0_EL1: return read_cpuid(ID_DFR0_EL1);
- case SYS_ID_MMFR0_EL1: return read_cpuid(ID_MMFR0_EL1);
- case SYS_ID_MMFR1_EL1: return read_cpuid(ID_MMFR1_EL1);
- case SYS_ID_MMFR2_EL1: return read_cpuid(ID_MMFR2_EL1);
- case SYS_ID_MMFR3_EL1: return read_cpuid(ID_MMFR3_EL1);
- case SYS_ID_ISAR0_EL1: return read_cpuid(ID_ISAR0_EL1);
- case SYS_ID_ISAR1_EL1: return read_cpuid(ID_ISAR1_EL1);
- case SYS_ID_ISAR2_EL1: return read_cpuid(ID_ISAR2_EL1);
- case SYS_ID_ISAR3_EL1: return read_cpuid(ID_ISAR3_EL1);
- case SYS_ID_ISAR4_EL1: return read_cpuid(ID_ISAR4_EL1);
- case SYS_ID_ISAR5_EL1: return read_cpuid(ID_ISAR4_EL1);
- case SYS_MVFR0_EL1: return read_cpuid(MVFR0_EL1);
- case SYS_MVFR1_EL1: return read_cpuid(MVFR1_EL1);
- case SYS_MVFR2_EL1: return read_cpuid(MVFR2_EL1);
-
- case SYS_ID_AA64PFR0_EL1: return read_cpuid(ID_AA64PFR0_EL1);
- case SYS_ID_AA64PFR1_EL1: return read_cpuid(ID_AA64PFR0_EL1);
- case SYS_ID_AA64DFR0_EL1: return read_cpuid(ID_AA64DFR0_EL1);
- case SYS_ID_AA64DFR1_EL1: return read_cpuid(ID_AA64DFR0_EL1);
- case SYS_ID_AA64MMFR0_EL1: return read_cpuid(ID_AA64MMFR0_EL1);
- case SYS_ID_AA64MMFR1_EL1: return read_cpuid(ID_AA64MMFR1_EL1);
- case SYS_ID_AA64MMFR2_EL1: return read_cpuid(ID_AA64MMFR2_EL1);
- case SYS_ID_AA64ISAR0_EL1: return read_cpuid(ID_AA64ISAR0_EL1);
- case SYS_ID_AA64ISAR1_EL1: return read_cpuid(ID_AA64ISAR1_EL1);
-
- case SYS_CNTFRQ_EL0: return read_cpuid(CNTFRQ_EL0);
- case SYS_CTR_EL0: return read_cpuid(CTR_EL0);
- case SYS_DCZID_EL0: return read_cpuid(DCZID_EL0);
+ read_sysreg_case(SYS_ID_PFR0_EL1);
+ read_sysreg_case(SYS_ID_PFR1_EL1);
+ read_sysreg_case(SYS_ID_DFR0_EL1);
+ read_sysreg_case(SYS_ID_MMFR0_EL1);
+ read_sysreg_case(SYS_ID_MMFR1_EL1);
+ read_sysreg_case(SYS_ID_MMFR2_EL1);
+ read_sysreg_case(SYS_ID_MMFR3_EL1);
+ read_sysreg_case(SYS_ID_ISAR0_EL1);
+ read_sysreg_case(SYS_ID_ISAR1_EL1);
+ read_sysreg_case(SYS_ID_ISAR2_EL1);
+ read_sysreg_case(SYS_ID_ISAR3_EL1);
+ read_sysreg_case(SYS_ID_ISAR4_EL1);
+ read_sysreg_case(SYS_ID_ISAR5_EL1);
+ read_sysreg_case(SYS_MVFR0_EL1);
+ read_sysreg_case(SYS_MVFR1_EL1);
+ read_sysreg_case(SYS_MVFR2_EL1);
+
+ read_sysreg_case(SYS_ID_AA64PFR0_EL1);
+ read_sysreg_case(SYS_ID_AA64PFR1_EL1);
+ read_sysreg_case(SYS_ID_AA64DFR0_EL1);
+ read_sysreg_case(SYS_ID_AA64DFR1_EL1);
+ read_sysreg_case(SYS_ID_AA64MMFR0_EL1);
+ read_sysreg_case(SYS_ID_AA64MMFR1_EL1);
+ read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
+ read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
+ read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
+
+ read_sysreg_case(SYS_CNTFRQ_EL0);
+ read_sysreg_case(SYS_CTR_EL0);
+ read_sysreg_case(SYS_DCZID_EL0);
+
default:
BUG();
return 0;
@@ -720,13 +734,11 @@ static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry,
static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
{
u32 midr = read_cpuid_id();
- u32 rv_min, rv_max;
/* Cavium ThunderX pass 1.x and 2.x */
- rv_min = 0;
- rv_max = (1 << MIDR_VARIANT_SHIFT) | MIDR_REVISION_MASK;
-
- return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, rv_min, rv_max);
+ return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
}
static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
@@ -737,7 +749,7 @@ static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused
static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry,
int __unused)
{
- phys_addr_t idmap_addr = virt_to_phys(__hyp_idmap_text_start);
+ phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
/*
* Activate the lower HYP offset only if:
@@ -806,7 +818,10 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.sys_reg = SYS_ID_AA64MMFR2_EL1,
.field_pos = ID_AA64MMFR2_UAO_SHIFT,
.min_field_value = 1,
- .enable = cpu_enable_uao,
+ /*
+ * We rely on stop_machine() calling uao_thread_switch() to set
+ * UAO immediately after patching.
+ */
},
#endif /* CONFIG_ARM64_UAO */
#ifdef CONFIG_ARM64_PAN
@@ -868,6 +883,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDRDM),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
@@ -933,6 +949,8 @@ static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
{
+ /* We support emulation of accesses to CPU ID feature registers */
+ elf_hwcap |= HWCAP_CPUID;
for (; hwcaps->matches; hwcaps++)
if (hwcaps->matches(hwcaps, hwcaps->def_scope))
cap_set_elf_hwcap(hwcaps);
@@ -1120,3 +1138,101 @@ cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
{
return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
}
+
+/*
+ * We emulate only the following system register space.
+ * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7]
+ * See Table C5-6 System instruction encodings for System register accesses,
+ * ARMv8 ARM(ARM DDI 0487A.f) for more details.
+ */
+static inline bool __attribute_const__ is_emulated(u32 id)
+{
+ return (sys_reg_Op0(id) == 0x3 &&
+ sys_reg_CRn(id) == 0x0 &&
+ sys_reg_Op1(id) == 0x0 &&
+ (sys_reg_CRm(id) == 0 ||
+ ((sys_reg_CRm(id) >= 4) && (sys_reg_CRm(id) <= 7))));
+}
+
+/*
+ * With CRm == 0, reg should be one of :
+ * MIDR_EL1, MPIDR_EL1 or REVIDR_EL1.
+ */
+static inline int emulate_id_reg(u32 id, u64 *valp)
+{
+ switch (id) {
+ case SYS_MIDR_EL1:
+ *valp = read_cpuid_id();
+ break;
+ case SYS_MPIDR_EL1:
+ *valp = SYS_MPIDR_SAFE_VAL;
+ break;
+ case SYS_REVIDR_EL1:
+ /* IMPLEMENTATION DEFINED values are emulated with 0 */
+ *valp = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int emulate_sys_reg(u32 id, u64 *valp)
+{
+ struct arm64_ftr_reg *regp;
+
+ if (!is_emulated(id))
+ return -EINVAL;
+
+ if (sys_reg_CRm(id) == 0)
+ return emulate_id_reg(id, valp);
+
+ regp = get_arm64_ftr_reg(id);
+ if (regp)
+ *valp = arm64_ftr_reg_user_value(regp);
+ else
+ /*
+ * The untracked registers are either IMPLEMENTATION DEFINED
+ * (e.g, ID_AFR0_EL1) or reserved RAZ.
+ */
+ *valp = 0;
+ return 0;
+}
+
+static int emulate_mrs(struct pt_regs *regs, u32 insn)
+{
+ int rc;
+ u32 sys_reg, dst;
+ u64 val;
+
+ /*
+ * sys_reg values are defined as used in mrs/msr instruction.
+ * shift the imm value to get the encoding.
+ */
+ sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5;
+ rc = emulate_sys_reg(sys_reg, &val);
+ if (!rc) {
+ dst = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
+ pt_regs_write_reg(regs, dst, val);
+ regs->pc += 4;
+ }
+
+ return rc;
+}
+
+static struct undef_hook mrs_hook = {
+ .instr_mask = 0xfff00000,
+ .instr_val = 0xd5300000,
+ .pstate_mask = COMPAT_PSR_MODE_MASK,
+ .pstate_val = PSR_MODE_EL0t,
+ .fn = emulate_mrs,
+};
+
+static int __init enable_mrs_emulation(void)
+{
+ register_undef_hook(&mrs_hook);
+ return 0;
+}
+
+late_initcall(enable_mrs_emulation);
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 7b7be71e87bf..5b22c687f02a 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -63,6 +63,8 @@ static const char *const hwcap_str[] = {
"atomics",
"fphp",
"asimdhp",
+ "cpuid",
+ "asimdrdm",
NULL
};
diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S
index e88c064b845c..4e6ad355bd05 100644
--- a/arch/arm64/kernel/efi-entry.S
+++ b/arch/arm64/kernel/efi-entry.S
@@ -46,8 +46,7 @@ ENTRY(entry)
* efi_system_table_t *sys_table,
* unsigned long *image_addr) ;
*/
- adrp x8, _text
- add x8, x8, #:lo12:_text
+ adr_l x8, _text
add x2, sp, 16
str x8, [x2]
bl efi_entry
@@ -68,10 +67,8 @@ ENTRY(entry)
/*
* Calculate size of the kernel Image (same for original and copy).
*/
- adrp x1, _text
- add x1, x1, #:lo12:_text
- adrp x2, _edata
- add x2, x2, #:lo12:_edata
+ adr_l x1, _text
+ adr_l x2, _edata
sub x1, x2, x1
/*
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index aef02d2af3b5..e1be42e11ff5 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -10,6 +10,7 @@
*/
#include <linux/linkage.h>
+#include <asm/assembler.h>
#include <asm/ftrace.h>
#include <asm/insn.h>
@@ -98,8 +99,7 @@
ENTRY(_mcount)
mcount_enter
- adrp x0, ftrace_trace_function
- ldr x2, [x0, #:lo12:ftrace_trace_function]
+ ldr_l x2, ftrace_trace_function
adr x0, ftrace_stub
cmp x0, x2 // if (ftrace_trace_function
b.eq skip_ftrace_call // != ftrace_stub) {
@@ -115,15 +115,12 @@ skip_ftrace_call: // return;
mcount_exit // return;
// }
skip_ftrace_call:
- adrp x1, ftrace_graph_return
- ldr x2, [x1, #:lo12:ftrace_graph_return]
+ ldr_l x2, ftrace_graph_return
cmp x0, x2 // if ((ftrace_graph_return
b.ne ftrace_graph_caller // != ftrace_stub)
- adrp x1, ftrace_graph_entry // || (ftrace_graph_entry
- adrp x0, ftrace_graph_entry_stub // != ftrace_graph_entry_stub))
- ldr x2, [x1, #:lo12:ftrace_graph_entry]
- add x0, x0, #:lo12:ftrace_graph_entry_stub
+ ldr_l x2, ftrace_graph_entry // || (ftrace_graph_entry
+ adr_l x0, ftrace_graph_entry_stub // != ftrace_graph_entry_stub))
cmp x0, x2
b.ne ftrace_graph_caller // ftrace_graph_caller();
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 4b1abac3485a..4fb6ccd886d1 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -149,7 +149,7 @@ extra_header_fields:
.quad 0 // SizeOfHeapReserve
.quad 0 // SizeOfHeapCommit
.long 0 // LoaderFlags
- .long 0x6 // NumberOfRvaAndSizes
+ .long (section_table - .) / 8 // NumberOfRvaAndSizes
.quad 0 // ExportTable
.quad 0 // ImportTable
@@ -158,6 +158,11 @@ extra_header_fields:
.quad 0 // CertificationTable
.quad 0 // BaseRelocationTable
+#ifdef CONFIG_DEBUG_EFI
+ .long efi_debug_table - _head // DebugTable
+ .long efi_debug_table_size
+#endif
+
// Section table
section_table:
@@ -195,6 +200,46 @@ section_table:
.short 0 // NumberOfLineNumbers (0 for executables)
.long 0xe0500020 // Characteristics (section flags)
+#ifdef CONFIG_DEBUG_EFI
+ /*
+ * The debug table is referenced via its Relative Virtual Address (RVA),
+ * which is only defined for those parts of the image that are covered
+ * by a section declaration. Since this header is not covered by any
+ * section, the debug table must be emitted elsewhere. So stick it in
+ * the .init.rodata section instead.
+ *
+ * Note that the EFI debug entry itself may legally have a zero RVA,
+ * which means we can simply put it right after the section headers.
+ */
+ __INITRODATA
+
+ .align 2
+efi_debug_table:
+ // EFI_IMAGE_DEBUG_DIRECTORY_ENTRY
+ .long 0 // Characteristics
+ .long 0 // TimeDateStamp
+ .short 0 // MajorVersion
+ .short 0 // MinorVersion
+ .long 2 // Type == EFI_IMAGE_DEBUG_TYPE_CODEVIEW
+ .long efi_debug_entry_size // SizeOfData
+ .long 0 // RVA
+ .long efi_debug_entry - _head // FileOffset
+
+ .set efi_debug_table_size, . - efi_debug_table
+ .previous
+
+efi_debug_entry:
+ // EFI_IMAGE_DEBUG_CODEVIEW_NB10_ENTRY
+ .ascii "NB10" // Signature
+ .long 0 // Unknown
+ .long 0 // Unknown2
+ .long 0 // Unknown3
+
+ .asciz VMLINUX_PATH
+
+ .set efi_debug_entry_size, . - efi_debug_entry
+#endif
+
/*
* EFI will load .text onwards at the 4k section alignment
* described in the PE/COFF header. To ensure that instruction
@@ -483,7 +528,7 @@ ENTRY(kimage_vaddr)
* If we're fortunate enough to boot at EL2, ensure that the world is
* sane before dropping to EL1.
*
- * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x20 if
+ * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if
* booted in EL1 or EL2 respectively.
*/
ENTRY(el2_setup)
@@ -592,15 +637,26 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
#endif
/* EL2 debug */
- mrs x0, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
- sbfx x0, x0, #8, #4
+ mrs x1, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
+ sbfx x0, x1, #8, #4
cmp x0, #1
b.lt 4f // Skip if no PMU present
mrs x0, pmcr_el0 // Disable debug access traps
ubfx x0, x0, #11, #5 // to EL2 and allow access to
4:
- csel x0, xzr, x0, lt // all PMU counters from EL1
- msr mdcr_el2, x0 // (if they exist)
+ csel x3, xzr, x0, lt // all PMU counters from EL1
+
+ /* Statistical profiling */
+ ubfx x0, x1, #32, #4 // Check ID_AA64DFR0_EL1 PMSVer
+ cbz x0, 6f // Skip if SPE not present
+ cbnz x2, 5f // VHE?
+ mov x1, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
+ orr x3, x3, x1 // If we don't have VHE, then
+ b 6f // use EL1&0 translation.
+5: // For VHE, use EL2 translation
+ orr x3, x3, #MDCR_EL2_TPMS // and disable access from EL1
+6:
+ msr mdcr_el2, x3 // Configure debug traps
/* Stage-2 translation */
msr vttbr_el2, xzr
@@ -613,8 +669,7 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
install_el2_stub:
/* Hypervisor stub */
- adrp x0, __hyp_stub_vectors
- add x0, x0, #:lo12:__hyp_stub_vectors
+ adr_l x0, __hyp_stub_vectors
msr vbar_el2, x0
/* spsr */
@@ -628,7 +683,7 @@ ENDPROC(el2_setup)
/*
* Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
- * in x20. See arch/arm64/include/asm/virt.h for more info.
+ * in w0. See arch/arm64/include/asm/virt.h for more info.
*/
set_cpu_boot_mode_flag:
adr_l x1, __boot_cpu_mode
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index fe301cbcb442..97a7384100f3 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -50,9 +50,6 @@
*/
extern int in_suspend;
-/* Find a symbols alias in the linear map */
-#define LMADDR(x) phys_to_virt(virt_to_phys(x))
-
/* Do we need to reset el2? */
#define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode())
@@ -102,8 +99,8 @@ static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
int pfn_is_nosave(unsigned long pfn)
{
- unsigned long nosave_begin_pfn = virt_to_pfn(&__nosave_begin);
- unsigned long nosave_end_pfn = virt_to_pfn(&__nosave_end - 1);
+ unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin);
+ unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1);
return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn);
}
@@ -125,12 +122,12 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
return -EOVERFLOW;
arch_hdr_invariants(&hdr->invariants);
- hdr->ttbr1_el1 = virt_to_phys(swapper_pg_dir);
+ hdr->ttbr1_el1 = __pa_symbol(swapper_pg_dir);
hdr->reenter_kernel = _cpu_resume;
/* We can't use __hyp_get_vectors() because kvm may still be loaded */
if (el2_reset_needed())
- hdr->__hyp_stub_vectors = virt_to_phys(__hyp_stub_vectors);
+ hdr->__hyp_stub_vectors = __pa_symbol(__hyp_stub_vectors);
else
hdr->__hyp_stub_vectors = 0;
@@ -460,7 +457,6 @@ int swsusp_arch_resume(void)
void *zero_page;
size_t exit_size;
pgd_t *tmp_pg_dir;
- void *lm_restore_pblist;
phys_addr_t phys_hibernate_exit;
void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
void *, phys_addr_t, phys_addr_t);
@@ -472,7 +468,7 @@ int swsusp_arch_resume(void)
*/
tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
if (!tmp_pg_dir) {
- pr_err("Failed to allocate memory for temporary page tables.");
+ pr_err("Failed to allocate memory for temporary page tables.\n");
rc = -ENOMEM;
goto out;
}
@@ -481,18 +477,12 @@ int swsusp_arch_resume(void)
goto out;
/*
- * Since we only copied the linear map, we need to find restore_pblist's
- * linear map address.
- */
- lm_restore_pblist = LMADDR(restore_pblist);
-
- /*
* We need a zero page that is zero before & after resume in order to
* to break before make on the ttbr1 page tables.
*/
zero_page = (void *)get_safe_page(GFP_ATOMIC);
if (!zero_page) {
- pr_err("Failed to allocate zero page.");
+ pr_err("Failed to allocate zero page.\n");
rc = -ENOMEM;
goto out;
}
@@ -512,7 +502,7 @@ int swsusp_arch_resume(void)
&phys_hibernate_exit,
(void *)get_safe_page, GFP_ATOMIC);
if (rc) {
- pr_err("Failed to create safe executable page for hibernate_exit code.");
+ pr_err("Failed to create safe executable page for hibernate_exit code.\n");
goto out;
}
@@ -537,7 +527,7 @@ int swsusp_arch_resume(void)
}
hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
- resume_hdr.reenter_kernel, lm_restore_pblist,
+ resume_hdr.reenter_kernel, restore_pblist,
resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
out:
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 94b62c1fa4df..3a63954a8b14 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -31,6 +31,7 @@
#include <asm/debug-monitors.h>
#include <asm/fixmap.h>
#include <asm/insn.h>
+#include <asm/kprobes.h>
#define AARCH64_INSN_SF_BIT BIT(31)
#define AARCH64_INSN_N_BIT BIT(22)
@@ -93,10 +94,10 @@ static void __kprobes *patch_map(void *addr, int fixmap)
bool module = !core_kernel_text(uintaddr);
struct page *page;
- if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
+ if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
page = vmalloc_to_page(addr);
else if (!module)
- page = pfn_to_page(PHYS_PFN(__pa(addr)));
+ page = phys_to_page(__pa_symbol(addr));
else
return addr;
@@ -417,6 +418,35 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
return insn;
}
+u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type,
+ u32 insn)
+{
+ int shift;
+
+ switch (type) {
+ case AARCH64_INSN_REGTYPE_RT:
+ case AARCH64_INSN_REGTYPE_RD:
+ shift = 0;
+ break;
+ case AARCH64_INSN_REGTYPE_RN:
+ shift = 5;
+ break;
+ case AARCH64_INSN_REGTYPE_RT2:
+ case AARCH64_INSN_REGTYPE_RA:
+ shift = 10;
+ break;
+ case AARCH64_INSN_REGTYPE_RM:
+ shift = 16;
+ break;
+ default:
+ pr_err("%s: unknown register type encoding %d\n", __func__,
+ type);
+ return 0;
+ }
+
+ return (insn >> shift) & GENMASK(4, 0);
+}
+
static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
u32 insn,
enum aarch64_insn_register reg)
diff --git a/arch/arm64/kernel/probes/decode-insn.h b/arch/arm64/kernel/probes/decode-insn.h
index 76d3f315407f..192ab007bacb 100644
--- a/arch/arm64/kernel/probes/decode-insn.h
+++ b/arch/arm64/kernel/probes/decode-insn.h
@@ -16,6 +16,8 @@
#ifndef _ARM_KERNEL_KPROBES_ARM64_H
#define _ARM_KERNEL_KPROBES_ARM64_H
+#include <asm/kprobes.h>
+
/*
* ARM strongly recommends a limit of 128 bytes between LoadExcl and
* StoreExcl instructions in a single thread of execution. So keep the
diff --git a/arch/arm64/kernel/probes/simulate-insn.c b/arch/arm64/kernel/probes/simulate-insn.c
index 357d3efe1366..be05868418ee 100644
--- a/arch/arm64/kernel/probes/simulate-insn.c
+++ b/arch/arm64/kernel/probes/simulate-insn.c
@@ -17,6 +17,8 @@
#include <linux/kernel.h>
#include <linux/kprobes.h>
+#include <asm/ptrace.h>
+
#include "simulate-insn.h"
#define bbl_displacement(insn) \
@@ -36,30 +38,22 @@
static inline void set_x_reg(struct pt_regs *regs, int reg, u64 val)
{
- if (reg < 31)
- regs->regs[reg] = val;
+ pt_regs_write_reg(regs, reg, val);
}
static inline void set_w_reg(struct pt_regs *regs, int reg, u64 val)
{
- if (reg < 31)
- regs->regs[reg] = lower_32_bits(val);
+ pt_regs_write_reg(regs, reg, lower_32_bits(val));
}
static inline u64 get_x_reg(struct pt_regs *regs, int reg)
{
- if (reg < 31)
- return regs->regs[reg];
- else
- return 0;
+ return pt_regs_read_reg(regs, reg);
}
static inline u32 get_w_reg(struct pt_regs *regs, int reg)
{
- if (reg < 31)
- return lower_32_bits(regs->regs[reg]);
- else
- return 0;
+ return lower_32_bits(pt_regs_read_reg(regs, reg));
}
static bool __kprobes check_cbz(u32 opcode, struct pt_regs *regs)
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index a3a2816ba73a..1ad48f93abdd 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -339,7 +339,7 @@ static void entry_task_switch(struct task_struct *next)
/*
* Thread switching.
*/
-struct task_struct *__switch_to(struct task_struct *prev,
+__notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *next)
{
struct task_struct *last;
@@ -407,7 +407,7 @@ unsigned long arch_align_stack(unsigned long sp)
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
if (is_compat_task())
- return randomize_page(mm->brk, 0x02000000);
+ return randomize_page(mm->brk, SZ_32M);
else
- return randomize_page(mm->brk, 0x40000000);
+ return randomize_page(mm->brk, SZ_1G);
}
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 42816bebb1e0..e8edbf13302a 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -20,6 +20,7 @@
#include <linux/smp.h>
#include <linux/delay.h>
#include <linux/psci.h>
+#include <linux/mm.h>
#include <uapi/linux/psci.h>
@@ -45,7 +46,7 @@ static int __init cpu_psci_cpu_prepare(unsigned int cpu)
static int cpu_psci_cpu_boot(unsigned int cpu)
{
- int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_entry));
+ int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa_symbol(secondary_entry));
if (err)
pr_err("failed to boot CPU%d (%d)\n", cpu, err);
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index b051367e2149..952e2c0dabd5 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -42,6 +42,7 @@
#include <linux/of_fdt.h>
#include <linux/efi.h>
#include <linux/psci.h>
+#include <linux/mm.h>
#include <asm/acpi.h>
#include <asm/fixmap.h>
@@ -199,16 +200,16 @@ static void __init request_standard_resources(void)
struct memblock_region *region;
struct resource *res;
- kernel_code.start = virt_to_phys(_text);
- kernel_code.end = virt_to_phys(__init_begin - 1);
- kernel_data.start = virt_to_phys(_sdata);
- kernel_data.end = virt_to_phys(_end - 1);
+ kernel_code.start = __pa_symbol(_text);
+ kernel_code.end = __pa_symbol(__init_begin - 1);
+ kernel_data.start = __pa_symbol(_sdata);
+ kernel_data.end = __pa_symbol(_end - 1);
for_each_memblock(memory, region) {
res = alloc_bootmem_low(sizeof(*res));
if (memblock_is_nomap(region)) {
res->name = "reserved";
- res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ res->flags = IORESOURCE_MEM;
} else {
res->name = "System RAM";
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
@@ -297,7 +298,7 @@ void __init setup_arch(char **cmdline_p)
* faults in case uaccess_enable() is inadvertently called by the init
* thread.
*/
- init_task.thread_info.ttbr0 = virt_to_phys(empty_zero_page);
+ init_task.thread_info.ttbr0 = __pa_symbol(empty_zero_page);
#endif
#ifdef CONFIG_VT
diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S
index ae0496fa4235..62522342e1e4 100644
--- a/arch/arm64/kernel/smccc-call.S
+++ b/arch/arm64/kernel/smccc-call.S
@@ -12,6 +12,7 @@
*
*/
#include <linux/linkage.h>
+#include <linux/arm-smccc.h>
#include <asm/asm-offsets.h>
.macro SMCCC instr
@@ -20,24 +21,32 @@
ldr x4, [sp]
stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS]
stp x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS]
- ret
+ ldr x4, [sp, #8]
+ cbz x4, 1f /* no quirk structure */
+ ldr x9, [x4, #ARM_SMCCC_QUIRK_ID_OFFS]
+ cmp x9, #ARM_SMCCC_QUIRK_QCOM_A6
+ b.ne 1f
+ str x6, [x4, ARM_SMCCC_QUIRK_STATE_OFFS]
+1: ret
.cfi_endproc
.endm
/*
* void arm_smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2,
* unsigned long a3, unsigned long a4, unsigned long a5,
- * unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
+ * unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
+ * struct arm_smccc_quirk *quirk)
*/
-ENTRY(arm_smccc_smc)
+ENTRY(__arm_smccc_smc)
SMCCC smc
-ENDPROC(arm_smccc_smc)
+ENDPROC(__arm_smccc_smc)
/*
* void arm_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
* unsigned long a3, unsigned long a4, unsigned long a5,
- * unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
+ * unsigned long a6, unsigned long a7, struct arm_smccc_res *res,
+ * struct arm_smccc_quirk *quirk)
*/
-ENTRY(arm_smccc_hvc)
+ENTRY(__arm_smccc_hvc)
SMCCC hvc
-ENDPROC(arm_smccc_hvc)
+ENDPROC(__arm_smccc_hvc)
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index cb87234cfcf2..827d52d78b67 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -222,7 +222,7 @@ asmlinkage void secondary_start_kernel(void)
* All kernel threads share the same mm context; grab a
* reference and switch to it.
*/
- atomic_inc(&mm->mm_count);
+ mmgrab(mm);
current->active_mm = mm;
/*
@@ -603,9 +603,9 @@ acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
*/
static void __init of_parse_and_init_cpus(void)
{
- struct device_node *dn = NULL;
+ struct device_node *dn;
- while ((dn = of_find_node_by_type(dn, "cpu"))) {
+ for_each_node_by_type(dn, "cpu") {
u64 hwid = of_get_cpu_mpidr(dn);
if (hwid == INVALID_HWID)
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 9a00eee9acc8..93034651c87e 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -21,6 +21,7 @@
#include <linux/of.h>
#include <linux/smp.h>
#include <linux/types.h>
+#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/cpu_ops.h>
@@ -98,7 +99,7 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
* boot-loader's endianess before jumping. This is mandated by
* the boot protocol.
*/
- writeq_relaxed(__pa(secondary_holding_pen), release_addr);
+ writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr);
__flush_dcache_area((__force void *)release_addr,
sizeof(*release_addr));
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 23e9e13bd2aa..565dd69888cc 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -11,6 +11,7 @@
* for more details.
*/
+#include <linux/acpi.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/init.h>
@@ -40,7 +41,6 @@ static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
per_cpu(cpu_scale, cpu) = capacity;
}
-#ifdef CONFIG_PROC_SYSCTL
static ssize_t cpu_capacity_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -97,7 +97,6 @@ static int register_cpu_capacity_sysctl(void)
return 0;
}
subsys_initcall(register_cpu_capacity_sysctl);
-#endif
static u32 capacity_scale;
static u32 *raw_capacity;
@@ -209,7 +208,12 @@ static struct notifier_block init_cpu_capacity_notifier = {
static int __init register_cpufreq_notifier(void)
{
- if (cap_parsing_failed)
+ /*
+ * on ACPI-based systems we need to use the default cpu capacity
+ * until we have the necessary code to parse the cpu capacity, so
+ * skip registering cpufreq notifier.
+ */
+ if (!acpi_disabled || cap_parsing_failed)
return -EINVAL;
if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 659b2e6b6cf7..7d47c2cdfd93 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -466,7 +466,7 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
int ret = 0;
- address = (rt == 31) ? 0 : regs->regs[rt];
+ address = pt_regs_read_reg(regs, rt);
switch (crm) {
case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */
@@ -495,8 +495,10 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
{
int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+ unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
+
+ pt_regs_write_reg(regs, rt, val);
- regs->regs[rt] = arm64_ftr_reg_ctrel0.sys_val;
regs->pc += 4;
}
@@ -531,7 +533,12 @@ asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
return;
}
- force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
+ /*
+ * New SYS instructions may previously have been undefined at EL0. Fall
+ * back to our usual undefined instruction handler so that we handle
+ * these consistently.
+ */
+ do_undefinstr(regs);
}
long compat_arm_syscall(struct pt_regs *regs);
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index a2c2478e7d78..41b6e31f8f55 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -123,6 +123,7 @@ static int __init vdso_init(void)
{
int i;
struct page **vdso_pagelist;
+ unsigned long pfn;
if (memcmp(&vdso_start, "\177ELF", 4)) {
pr_err("vDSO is not a valid ELF object!\n");
@@ -140,11 +141,14 @@ static int __init vdso_init(void)
return -ENOMEM;
/* Grab the vDSO data page. */
- vdso_pagelist[0] = pfn_to_page(PHYS_PFN(__pa(vdso_data)));
+ vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
+
/* Grab the vDSO code pages. */
+ pfn = sym_to_pfn(&vdso_start);
+
for (i = 0; i < vdso_pages; i++)
- vdso_pagelist[i + 1] = pfn_to_page(PHYS_PFN(__pa(&vdso_start)) + i);
+ vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
vdso_spec[0].pages = &vdso_pagelist[0];
vdso_spec[1].pages = &vdso_pagelist[1];