diff options
Diffstat (limited to 'arch/arm64/include')
25 files changed, 260 insertions, 355 deletions
diff --git a/arch/arm64/include/asm/asm-extable.h b/arch/arm64/include/asm/asm-extable.h index c39f2437e08e..980d1dd8e1a3 100644 --- a/arch/arm64/include/asm/asm-extable.h +++ b/arch/arm64/include/asm/asm-extable.h @@ -2,12 +2,27 @@ #ifndef __ASM_ASM_EXTABLE_H #define __ASM_ASM_EXTABLE_H +#include <linux/bits.h> +#include <asm/gpr-num.h> + #define EX_TYPE_NONE 0 -#define EX_TYPE_FIXUP 1 -#define EX_TYPE_BPF 2 -#define EX_TYPE_UACCESS_ERR_ZERO 3 +#define EX_TYPE_BPF 1 +#define EX_TYPE_UACCESS_ERR_ZERO 2 +#define EX_TYPE_KACCESS_ERR_ZERO 3 #define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 4 +/* Data fields for EX_TYPE_UACCESS_ERR_ZERO */ +#define EX_DATA_REG_ERR_SHIFT 0 +#define EX_DATA_REG_ERR GENMASK(4, 0) +#define EX_DATA_REG_ZERO_SHIFT 5 +#define EX_DATA_REG_ZERO GENMASK(9, 5) + +/* Data fields for EX_TYPE_LOAD_UNALIGNED_ZEROPAD */ +#define EX_DATA_REG_DATA_SHIFT 0 +#define EX_DATA_REG_DATA GENMASK(4, 0) +#define EX_DATA_REG_ADDR_SHIFT 5 +#define EX_DATA_REG_ADDR GENMASK(9, 5) + #ifdef __ASSEMBLY__ #define __ASM_EXTABLE_RAW(insn, fixup, type, data) \ @@ -19,31 +34,45 @@ .short (data); \ .popsection; +#define EX_DATA_REG(reg, gpr) \ + (.L__gpr_num_##gpr << EX_DATA_REG_##reg##_SHIFT) + +#define _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) \ + __ASM_EXTABLE_RAW(insn, fixup, \ + EX_TYPE_UACCESS_ERR_ZERO, \ + ( \ + EX_DATA_REG(ERR, err) | \ + EX_DATA_REG(ZERO, zero) \ + )) + +#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \ + _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, wzr) + +#define _ASM_EXTABLE_UACCESS(insn, fixup) \ + _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, wzr, wzr) + /* - * Create an exception table entry for `insn`, which will branch to `fixup` + * Create an exception table entry for uaccess `insn`, which will branch to `fixup` * when an unhandled fault is taken. */ - .macro _asm_extable, insn, fixup - __ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_FIXUP, 0) + .macro _asm_extable_uaccess, insn, fixup + _ASM_EXTABLE_UACCESS(\insn, \fixup) .endm /* * Create an exception table entry for `insn` if `fixup` is provided. Otherwise * do nothing. */ - .macro _cond_extable, insn, fixup - .ifnc \fixup, - _asm_extable \insn, \fixup + .macro _cond_uaccess_extable, insn, fixup + .ifnc \fixup, + _asm_extable_uaccess \insn, \fixup .endif .endm #else /* __ASSEMBLY__ */ -#include <linux/bits.h> #include <linux/stringify.h> -#include <asm/gpr-num.h> - #define __ASM_EXTABLE_RAW(insn, fixup, type, data) \ ".pushsection __ex_table, \"a\"\n" \ ".align 2\n" \ @@ -53,14 +82,6 @@ ".short (" data ")\n" \ ".popsection\n" -#define _ASM_EXTABLE(insn, fixup) \ - __ASM_EXTABLE_RAW(#insn, #fixup, __stringify(EX_TYPE_FIXUP), "0") - -#define EX_DATA_REG_ERR_SHIFT 0 -#define EX_DATA_REG_ERR GENMASK(4, 0) -#define EX_DATA_REG_ZERO_SHIFT 5 -#define EX_DATA_REG_ZERO GENMASK(9, 5) - #define EX_DATA_REG(reg, gpr) \ "((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")" @@ -73,13 +94,23 @@ EX_DATA_REG(ZERO, zero) \ ")") +#define _ASM_EXTABLE_KACCESS_ERR_ZERO(insn, fixup, err, zero) \ + __DEFINE_ASM_GPR_NUMS \ + __ASM_EXTABLE_RAW(#insn, #fixup, \ + __stringify(EX_TYPE_KACCESS_ERR_ZERO), \ + "(" \ + EX_DATA_REG(ERR, err) " | " \ + EX_DATA_REG(ZERO, zero) \ + ")") + #define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \ _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, wzr) -#define EX_DATA_REG_DATA_SHIFT 0 -#define EX_DATA_REG_DATA GENMASK(4, 0) -#define EX_DATA_REG_ADDR_SHIFT 5 -#define EX_DATA_REG_ADDR GENMASK(9, 5) +#define _ASM_EXTABLE_UACCESS(insn, fixup) \ + _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, wzr, wzr) + +#define _ASM_EXTABLE_KACCESS_ERR(insn, fixup, err) \ + _ASM_EXTABLE_KACCESS_ERR_ZERO(insn, fixup, err, wzr) #define _ASM_EXTABLE_LOAD_UNALIGNED_ZEROPAD(insn, fixup, data, addr) \ __DEFINE_ASM_GPR_NUMS \ diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h index 0557af834e03..75b211c98dea 100644 --- a/arch/arm64/include/asm/asm-uaccess.h +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -61,7 +61,7 @@ alternative_else_nop_endif #define USER(l, x...) \ 9999: x; \ - _asm_extable 9999b, l + _asm_extable_uaccess 9999b, l /* * Generate the assembly for LDTR/STTR with exception table entries. @@ -73,8 +73,8 @@ alternative_else_nop_endif 8889: ldtr \reg2, [\addr, #8]; add \addr, \addr, \post_inc; - _asm_extable 8888b,\l; - _asm_extable 8889b,\l; + _asm_extable_uaccess 8888b, \l; + _asm_extable_uaccess 8889b, \l; .endm .macro user_stp l, reg1, reg2, addr, post_inc @@ -82,14 +82,14 @@ alternative_else_nop_endif 8889: sttr \reg2, [\addr, #8]; add \addr, \addr, \post_inc; - _asm_extable 8888b,\l; - _asm_extable 8889b,\l; + _asm_extable_uaccess 8888b,\l; + _asm_extable_uaccess 8889b,\l; .endm .macro user_ldst l, inst, reg, addr, post_inc 8888: \inst \reg, [\addr]; add \addr, \addr, \post_inc; - _asm_extable 8888b,\l; + _asm_extable_uaccess 8888b, \l; .endm #endif diff --git a/arch/arm64/include/asm/asm_pointer_auth.h b/arch/arm64/include/asm/asm_pointer_auth.h index ead62f7dd269..13ecc79854ee 100644 --- a/arch/arm64/include/asm/asm_pointer_auth.h +++ b/arch/arm64/include/asm/asm_pointer_auth.h @@ -59,9 +59,9 @@ alternative_else_nop_endif .macro __ptrauth_keys_init_cpu tsk, tmp1, tmp2, tmp3 mrs \tmp1, id_aa64isar1_el1 - ubfx \tmp1, \tmp1, #ID_AA64ISAR1_APA_SHIFT, #8 + ubfx \tmp1, \tmp1, #ID_AA64ISAR1_EL1_APA_SHIFT, #8 mrs_s \tmp2, SYS_ID_AA64ISAR2_EL1 - ubfx \tmp2, \tmp2, #ID_AA64ISAR2_APA3_SHIFT, #4 + ubfx \tmp2, \tmp2, #ID_AA64ISAR2_EL1_APA3_SHIFT, #4 orr \tmp1, \tmp1, \tmp2 cbz \tmp1, .Lno_addr_auth\@ mov_q \tmp1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \ diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 8c5a61aeaf8e..5846145be523 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -360,6 +360,20 @@ alternative_cb_end .endm /* + * idmap_get_t0sz - get the T0SZ value needed to cover the ID map + * + * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the + * entire ID map region can be mapped. As T0SZ == (64 - #bits used), + * this number conveniently equals the number of leading zeroes in + * the physical address of _end. + */ + .macro idmap_get_t0sz, reg + adrp \reg, _end + orr \reg, \reg, #(1 << VA_BITS_MIN) - 1 + clz \reg, \reg + .endm + +/* * tcr_compute_pa_size - set TCR.(I)PS to the highest supported * ID_AA64MMFR0_EL1.PARange value * @@ -423,7 +437,7 @@ alternative_endif b.lo .Ldcache_op\@ dsb \domain - _cond_extable .Ldcache_op\@, \fixup + _cond_uaccess_extable .Ldcache_op\@, \fixup .endm /* @@ -462,7 +476,19 @@ alternative_endif dsb ish isb - _cond_extable .Licache_op\@, \fixup + _cond_uaccess_extable .Licache_op\@, \fixup + .endm + +/* + * load_ttbr1 - install @pgtbl as a TTBR1 page table + * pgtbl preserved + * tmp1/tmp2 clobbered, either may overlap with pgtbl + */ + .macro load_ttbr1, pgtbl, tmp1, tmp2 + phys_to_ttbr \tmp1, \pgtbl + offset_ttbr1 \tmp1, \tmp2 + msr ttbr1_el1, \tmp1 + isb .endm /* @@ -478,10 +504,7 @@ alternative_endif isb tlbi vmalle1 dsb nsh - phys_to_ttbr \tmp, \page_table - offset_ttbr1 \tmp, \tmp2 - msr ttbr1_el1, \tmp - isb + load_ttbr1 \page_table, \tmp, \tmp2 .endm /* diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 9f3e2c3d2ca0..2cfc4245d2e2 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -50,13 +50,13 @@ #define pmr_sync() do {} while (0) #endif -#define mb() dsb(sy) -#define rmb() dsb(ld) -#define wmb() dsb(st) +#define __mb() dsb(sy) +#define __rmb() dsb(ld) +#define __wmb() dsb(st) -#define dma_mb() dmb(osh) -#define dma_rmb() dmb(oshld) -#define dma_wmb() dmb(oshst) +#define __dma_mb() dmb(osh) +#define __dma_rmb() dmb(oshld) +#define __dma_wmb() dmb(oshst) #define io_stop_wc() dgh() diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 7c2181c72116..ca9b487112cc 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -5,34 +5,9 @@ #ifndef __ASM_CACHE_H #define __ASM_CACHE_H -#include <asm/cputype.h> -#include <asm/mte-def.h> - -#define CTR_L1IP_SHIFT 14 -#define CTR_L1IP_MASK 3 -#define CTR_DMINLINE_SHIFT 16 -#define CTR_IMINLINE_SHIFT 0 -#define CTR_IMINLINE_MASK 0xf -#define CTR_ERG_SHIFT 20 -#define CTR_CWG_SHIFT 24 -#define CTR_CWG_MASK 15 -#define CTR_IDC_SHIFT 28 -#define CTR_DIC_SHIFT 29 - -#define CTR_CACHE_MINLINE_MASK \ - (0xf << CTR_DMINLINE_SHIFT | CTR_IMINLINE_MASK << CTR_IMINLINE_SHIFT) - -#define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK) - -#define ICACHE_POLICY_VPIPT 0 -#define ICACHE_POLICY_RESERVED 1 -#define ICACHE_POLICY_VIPT 2 -#define ICACHE_POLICY_PIPT 3 - #define L1_CACHE_SHIFT (6) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) - #define CLIDR_LOUU_SHIFT 27 #define CLIDR_LOC_SHIFT 24 #define CLIDR_LOUIS_SHIFT 21 @@ -55,6 +30,10 @@ #include <linux/bitops.h> #include <linux/kasan-enabled.h> +#include <asm/cputype.h> +#include <asm/mte-def.h> +#include <asm/sysreg.h> + #ifdef CONFIG_KASAN_SW_TAGS #define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT) #elif defined(CONFIG_KASAN_HW_TAGS) @@ -66,6 +45,12 @@ static inline unsigned int arch_slab_minalign(void) #define arch_slab_minalign() arch_slab_minalign() #endif +#define CTR_CACHE_MINLINE_MASK \ + (0xf << CTR_EL0_DMINLINE_SHIFT | \ + CTR_EL0_IMINLINE_MASK << CTR_EL0_IMINLINE_SHIFT) + +#define CTR_L1IP(ctr) SYS_FIELD_GET(CTR_EL0, L1Ip, ctr) + #define ICACHEF_ALIASING 0 #define ICACHEF_VPIPT 1 extern unsigned long __icache_flags; @@ -86,7 +71,7 @@ static __always_inline int icache_is_vpipt(void) static inline u32 cache_type_cwg(void) { - return (read_cpuid_cachetype() >> CTR_CWG_SHIFT) & CTR_CWG_MASK; + return (read_cpuid_cachetype() >> CTR_EL0_CWG_SHIFT) & CTR_EL0_CWG_MASK; } #define __read_mostly __section(".data..read_mostly") @@ -120,12 +105,12 @@ static inline u32 __attribute_const__ read_cpuid_effective_cachetype(void) { u32 ctr = read_cpuid_cachetype(); - if (!(ctr & BIT(CTR_IDC_SHIFT))) { + if (!(ctr & BIT(CTR_EL0_IDC_SHIFT))) { u64 clidr = read_sysreg(clidr_el1); if (CLIDR_LOC(clidr) == 0 || (CLIDR_LOUIS(clidr) == 0 && CLIDR_LOUU(clidr) == 0)) - ctr |= BIT(CTR_IDC_SHIFT); + ctr |= BIT(CTR_EL0_IDC_SHIFT); } return ctr; diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 5a228e203ef9..37185e978aeb 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -105,13 +105,6 @@ static inline void flush_icache_range(unsigned long start, unsigned long end) #define flush_icache_range flush_icache_range /* - * Cache maintenance functions used by the DMA API. No to be used directly. - */ -extern void __dma_map_area(const void *, size_t, int); -extern void __dma_unmap_area(const void *, size_t, int); -extern void __dma_flush_area(const void *, size_t); - -/* * Copy user data from/to a page which is mapped into a different * processes address space. Really, we want to allow our "user * space" model to handle this. diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h index 115cdec1ae87..fd7a92219eea 100644 --- a/arch/arm64/include/asm/cpu.h +++ b/arch/arm64/include/asm/cpu.h @@ -46,6 +46,7 @@ struct cpuinfo_arm64 { u64 reg_midr; u64 reg_revidr; u64 reg_gmid; + u64 reg_smidr; u64 reg_id_aa64dfr0; u64 reg_id_aa64dfr1; diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h index e95c4df83911..a444c8915e88 100644 --- a/arch/arm64/include/asm/cpu_ops.h +++ b/arch/arm64/include/asm/cpu_ops.h @@ -31,11 +31,6 @@ * @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the * cpu being killed. * @cpu_kill: Ensures a cpu has left the kernel. Called from another cpu. - * @cpu_init_idle: Reads any data necessary to initialize CPU idle states for - * a proposed logical id. - * @cpu_suspend: Suspends a cpu and saves the required context. May fail owing - * to wrong parameters or error conditions. Called from the - * CPU being suspended. Must be called with IRQs disabled. */ struct cpu_operations { const char *name; @@ -49,10 +44,6 @@ struct cpu_operations { void (*cpu_die)(unsigned int cpu); int (*cpu_kill)(unsigned int cpu); #endif -#ifdef CONFIG_CPU_IDLE - int (*cpu_init_idle)(unsigned int); - int (*cpu_suspend)(unsigned long); -#endif }; int __init init_cpu_ops(int cpu); diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 14a8f3d93add..fd7d75a275f6 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -11,7 +11,7 @@ #include <asm/hwcap.h> #include <asm/sysreg.h> -#define MAX_CPU_FEATURES 64 +#define MAX_CPU_FEATURES 128 #define cpu_feature(x) KERNEL_HWCAP_ ## x #ifndef __ASSEMBLY__ @@ -673,7 +673,7 @@ static inline bool supports_clearbhb(int scope) isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1); return cpuid_feature_extract_unsigned_field(isar2, - ID_AA64ISAR2_CLEARBHB_SHIFT); + ID_AA64ISAR2_EL1_BC_SHIFT); } const struct cpumask *system_32bit_el0_cpumask(void); @@ -908,7 +908,10 @@ static inline unsigned int get_vmid_bits(u64 mmfr1) } extern struct arm64_ftr_override id_aa64mmfr1_override; +extern struct arm64_ftr_override id_aa64pfr0_override; extern struct arm64_ftr_override id_aa64pfr1_override; +extern struct arm64_ftr_override id_aa64zfr0_override; +extern struct arm64_ftr_override id_aa64smfr0_override; extern struct arm64_ftr_override id_aa64isar1_override; extern struct arm64_ftr_override id_aa64isar2_override; diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h index 14a19d1141bd..2047713e097d 100644 --- a/arch/arm64/include/asm/cpuidle.h +++ b/arch/arm64/include/asm/cpuidle.h @@ -4,21 +4,6 @@ #include <asm/proc-fns.h> -#ifdef CONFIG_CPU_IDLE -extern int arm_cpuidle_init(unsigned int cpu); -extern int arm_cpuidle_suspend(int index); -#else -static inline int arm_cpuidle_init(unsigned int cpu) -{ - return -EOPNOTSUPP; -} - -static inline int arm_cpuidle_suspend(int index) -{ - return -EOPNOTSUPP; -} -#endif - #ifdef CONFIG_ARM64_PSEUDO_NMI #include <asm/arch_gicv3.h> diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 34ceff08cac4..2630faa5bc08 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -129,64 +129,6 @@ msr cptr_el2, x0 // Disable copro. traps to EL2 .endm -/* SVE register access */ -.macro __init_el2_nvhe_sve - mrs x1, id_aa64pfr0_el1 - ubfx x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4 - cbz x1, .Lskip_sve_\@ - - bic x0, x0, #CPTR_EL2_TZ // Also disable SVE traps - msr cptr_el2, x0 // Disable copro. traps to EL2 - isb - mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector - msr_s SYS_ZCR_EL2, x1 // length for EL1. -.Lskip_sve_\@: -.endm - -/* SME register access and priority mapping */ -.macro __init_el2_nvhe_sme - mrs x1, id_aa64pfr1_el1 - ubfx x1, x1, #ID_AA64PFR1_SME_SHIFT, #4 - cbz x1, .Lskip_sme_\@ - - bic x0, x0, #CPTR_EL2_TSM // Also disable SME traps - msr cptr_el2, x0 // Disable copro. traps to EL2 - isb - - mrs x1, sctlr_el2 - orr x1, x1, #SCTLR_ELx_ENTP2 // Disable TPIDR2 traps - msr sctlr_el2, x1 - isb - - mov x1, #0 // SMCR controls - - mrs_s x2, SYS_ID_AA64SMFR0_EL1 - ubfx x2, x2, #ID_AA64SMFR0_FA64_SHIFT, #1 // Full FP in SM? - cbz x2, .Lskip_sme_fa64_\@ - - orr x1, x1, SMCR_ELx_FA64_MASK -.Lskip_sme_fa64_\@: - - orr x1, x1, #SMCR_ELx_LEN_MASK // Enable full SME vector - msr_s SYS_SMCR_EL2, x1 // length for EL1. - - mrs_s x1, SYS_SMIDR_EL1 // Priority mapping supported? - ubfx x1, x1, #SMIDR_EL1_SMPS_SHIFT, #1 - cbz x1, .Lskip_sme_\@ - - msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal - - mrs x1, id_aa64mmfr1_el1 // HCRX_EL2 present? - ubfx x1, x1, #ID_AA64MMFR1_HCX_SHIFT, #4 - cbz x1, .Lskip_sme_\@ - - mrs_s x1, SYS_HCRX_EL2 - orr x1, x1, #HCRX_EL2_SMPME_MASK // Enable priority mapping - msr_s SYS_HCRX_EL2, x1 - -.Lskip_sme_\@: -.endm - /* Disable any fine grained traps */ .macro __init_el2_fgt mrs x1, id_aa64mmfr0_el1 @@ -250,8 +192,6 @@ __init_el2_hstr __init_el2_nvhe_idregs __init_el2_nvhe_cptr - __init_el2_nvhe_sve - __init_el2_nvhe_sme __init_el2_fgt __init_el2_nvhe_prepare_eret .endm diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index daff882883f9..71ed5fdf718b 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h @@ -62,10 +62,12 @@ enum fixed_addresses { #endif /* CONFIG_ACPI_APEI_GHES */ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +#ifdef CONFIG_RELOCATABLE + FIX_ENTRY_TRAMP_TEXT4, /* one extra slot for the data page */ +#endif FIX_ENTRY_TRAMP_TEXT3, FIX_ENTRY_TRAMP_TEXT2, FIX_ENTRY_TRAMP_TEXT1, - FIX_ENTRY_TRAMP_DATA, #define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT1)) #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ __end_of_permanent_fixed_addresses, diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index aa443d8f8cfb..cef4ae7a3d8b 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -85,7 +85,7 @@ #define KERNEL_HWCAP_PACA __khwcap_feature(PACA) #define KERNEL_HWCAP_PACG __khwcap_feature(PACG) -#define __khwcap2_feature(x) (const_ilog2(HWCAP2_ ## x) + 32) +#define __khwcap2_feature(x) (const_ilog2(HWCAP2_ ## x) + 64) #define KERNEL_HWCAP_DCPODP __khwcap2_feature(DCPODP) #define KERNEL_HWCAP_SVE2 __khwcap2_feature(SVE2) #define KERNEL_HWCAP_SVEAES __khwcap2_feature(SVEAES) @@ -118,6 +118,7 @@ #define KERNEL_HWCAP_SME_F32F32 __khwcap2_feature(SME_F32F32) #define KERNEL_HWCAP_SME_FA64 __khwcap2_feature(SME_FA64) #define KERNEL_HWCAP_WFXT __khwcap2_feature(WFXT) +#define KERNEL_HWCAP_EBF16 __khwcap2_feature(EBF16) /* * This yields a mask that user programs can use to figure out what diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 3995652daf81..87dd42d74afe 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -163,13 +163,16 @@ extern void __memset_io(volatile void __iomem *, int, size_t); /* * I/O memory mapping functions. */ -extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot); -extern void iounmap(volatile void __iomem *addr); -extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); -#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) -#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) -#define ioremap_np(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRnE)) +bool ioremap_allowed(phys_addr_t phys_addr, size_t size, unsigned long prot); +#define ioremap_allowed ioremap_allowed + +#define _PAGE_IOREMAP PROT_DEVICE_nGnRE + +#define ioremap_wc(addr, size) \ + ioremap_prot((addr), (size), PROT_NORMAL_NC) +#define ioremap_np(addr, size) \ + ioremap_prot((addr), (size), PROT_DEVICE_nGnRnE) /* * io{read,write}{16,32,64}be() macros @@ -184,6 +187,15 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); #include <asm-generic/io.h> +#define ioremap_cache ioremap_cache +static inline void __iomem *ioremap_cache(phys_addr_t addr, size_t size) +{ + if (pfn_is_map_memory(__phys_to_pfn(addr))) + return (void __iomem *)__phys_to_virt(addr); + + return ioremap_prot(addr, size, PROT_NORMAL); +} + /* * More restrictive address range checking than the default implementation * (PHYS_OFFSET and PHYS_MASK taken into account). diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index 96dc0f7da258..02e59fa8f293 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -8,6 +8,7 @@ #ifndef __ASM_KERNEL_PGTABLE_H #define __ASM_KERNEL_PGTABLE_H +#include <asm/boot.h> #include <asm/pgtable-hwdef.h> #include <asm/sparsemem.h> @@ -35,10 +36,8 @@ */ #if ARM64_KERNEL_USES_PMD_MAPS #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1) -#define IDMAP_PGTABLE_LEVELS (ARM64_HW_PGTABLE_LEVELS(PHYS_MASK_SHIFT) - 1) #else #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS) -#define IDMAP_PGTABLE_LEVELS (ARM64_HW_PGTABLE_LEVELS(PHYS_MASK_SHIFT)) #endif @@ -87,7 +86,14 @@ + EARLY_PUDS((vstart), (vend)) /* each PUD needs a next level page table */ \ + EARLY_PMDS((vstart), (vend))) /* each PMD needs a next level page table */ #define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end)) -#define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE) + +/* the initial ID map may need two extra pages if it needs to be extended */ +#if VA_BITS < 48 +#define INIT_IDMAP_DIR_SIZE ((INIT_IDMAP_DIR_PAGES + 2) * PAGE_SIZE) +#else +#define INIT_IDMAP_DIR_SIZE (INIT_IDMAP_DIR_PAGES * PAGE_SIZE) +#endif +#define INIT_IDMAP_DIR_PAGES EARLY_PAGES(KIMAGE_VADDR, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE) /* Initial memory map size */ #if ARM64_KERNEL_USES_PMD_MAPS @@ -107,9 +113,11 @@ #define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) #if ARM64_KERNEL_USES_PMD_MAPS -#define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS) +#define SWAPPER_RW_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS) +#define SWAPPER_RX_MMUFLAGS (SWAPPER_RW_MMUFLAGS | PMD_SECT_RDONLY) #else -#define SWAPPER_MM_MMUFLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS) +#define SWAPPER_RW_MMUFLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS) +#define SWAPPER_RX_MMUFLAGS (SWAPPER_RW_MMUFLAGS | PTE_RDONLY) #endif /* diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 0af70d9abede..227d256cd4b9 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -174,7 +174,11 @@ #include <linux/types.h> #include <asm/bug.h> +#if VA_BITS > 48 extern u64 vabits_actual; +#else +#define vabits_actual ((u64)VA_BITS) +#endif extern s64 memstart_addr; /* PHYS_OFFSET - the physical address of the start of memory. */ @@ -351,6 +355,11 @@ static inline void *phys_to_virt(phys_addr_t x) }) void dump_mem_limit(void); + +static inline bool defer_reserve_crashkernel(void) +{ + return IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32); +} #endif /* !ASSEMBLY */ /* diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 6770667b34a3..c7ccd82db1d2 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -60,8 +60,7 @@ static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm) * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in * physical memory, in which case it will be smaller. */ -extern u64 idmap_t0sz; -extern u64 idmap_ptrs_per_pgd; +extern int idmap_t0sz; /* * Ensure TCR.T0SZ is set to the provided value. @@ -106,13 +105,18 @@ static inline void cpu_uninstall_idmap(void) cpu_switch_mm(mm->pgd, mm); } -static inline void cpu_install_idmap(void) +static inline void __cpu_install_idmap(pgd_t *idmap) { cpu_set_reserved_ttbr0(); local_flush_tlb_all(); cpu_set_idmap_tcr_t0sz(); - cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm); + cpu_switch_mm(lm_alias(idmap), &init_mm); +} + +static inline void cpu_install_idmap(void) +{ + __cpu_install_idmap(idmap_pg_dir); } /* @@ -143,7 +147,7 @@ static inline void cpu_install_ttbr0(phys_addr_t ttbr0, unsigned long t0sz) * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD, * avoiding the possibility of conflicting TLB entries being allocated. */ -static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp) +static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap) { typedef void (ttbr_replace_func)(phys_addr_t); extern ttbr_replace_func idmap_cpu_replace_ttbr1; @@ -166,7 +170,7 @@ static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp) replace_phys = (void *)__pa_symbol(function_nocfi(idmap_cpu_replace_ttbr1)); - cpu_install_idmap(); + __cpu_install_idmap(idmap); replace_phys(ttbr1); cpu_uninstall_idmap(); } diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index dd3d12bce07b..5ab8d163198f 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -281,10 +281,9 @@ */ #ifdef CONFIG_ARM64_PA_BITS_52 /* - * This should be GENMASK_ULL(47, 2). * TTBR_ELx[1] is RES0 in this configuration. */ -#define TTBR_BADDR_MASK_52 (((UL(1) << 46) - 1) << 2) +#define TTBR_BADDR_MASK_52 GENMASK_ULL(47, 2) #endif #ifdef CONFIG_ARM64_VA_BITS_52 diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 0b6632f18364..b5df82aa99e6 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -45,6 +45,12 @@ __flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +static inline bool arch_thp_swp_supported(void) +{ + return !system_supports_mte(); +} +#define arch_thp_swp_supported arch_thp_swp_supported + /* * Outside of a few very special situations (e.g. hibernation), we always * use broadcast TLB invalidation instructions, therefore a spurious page @@ -427,6 +433,16 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte) return clear_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE)); } +/* + * Select all bits except the pfn + */ +static inline pgprot_t pte_pgprot(pte_t pte) +{ + unsigned long pfn = pte_pfn(pte); + + return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte)); +} + #ifdef CONFIG_NUMA_BALANCING /* * See the comment in include/linux/pgtable.h diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 9e58749db21d..86eb0bfe3b38 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -272,8 +272,9 @@ void tls_preserve_current_state(void); static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) { + s32 previous_syscall = regs->syscallno; memset(regs, 0, sizeof(*regs)); - forget_syscall(regs); + regs->syscallno = previous_syscall; regs->pc = pc; if (system_uses_irq_prio_masking()) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 42ff95dba6da..7c71358d44c4 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -192,8 +192,6 @@ #define SYS_ID_AA64PFR0_EL1 sys_reg(3, 0, 0, 4, 0) #define SYS_ID_AA64PFR1_EL1 sys_reg(3, 0, 0, 4, 1) -#define SYS_ID_AA64ZFR0_EL1 sys_reg(3, 0, 0, 4, 4) -#define SYS_ID_AA64SMFR0_EL1 sys_reg(3, 0, 0, 4, 5) #define SYS_ID_AA64DFR0_EL1 sys_reg(3, 0, 0, 5, 0) #define SYS_ID_AA64DFR1_EL1 sys_reg(3, 0, 0, 5, 1) @@ -201,9 +199,6 @@ #define SYS_ID_AA64AFR0_EL1 sys_reg(3, 0, 0, 5, 4) #define SYS_ID_AA64AFR1_EL1 sys_reg(3, 0, 0, 5, 5) -#define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1) -#define SYS_ID_AA64ISAR2_EL1 sys_reg(3, 0, 0, 6, 2) - #define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0) #define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1) #define SYS_ID_AA64MMFR2_EL1 sys_reg(3, 0, 0, 7, 2) @@ -410,12 +405,6 @@ #define SYS_MAIR_EL1 sys_reg(3, 0, 10, 2, 0) #define SYS_AMAIR_EL1 sys_reg(3, 0, 10, 3, 0) -#define SYS_LORSA_EL1 sys_reg(3, 0, 10, 4, 0) -#define SYS_LOREA_EL1 sys_reg(3, 0, 10, 4, 1) -#define SYS_LORN_EL1 sys_reg(3, 0, 10, 4, 2) -#define SYS_LORC_EL1 sys_reg(3, 0, 10, 4, 3) -#define SYS_LORID_EL1 sys_reg(3, 0, 10, 4, 7) - #define SYS_VBAR_EL1 sys_reg(3, 0, 12, 0, 0) #define SYS_DISR_EL1 sys_reg(3, 0, 12, 1, 1) @@ -454,16 +443,12 @@ #define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0) #define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0) -#define SYS_GMID_EL1 sys_reg(3, 1, 0, 0, 4) #define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7) #define SMIDR_EL1_IMPLEMENTER_SHIFT 24 #define SMIDR_EL1_SMPS_SHIFT 15 #define SMIDR_EL1_AFFINITY_SHIFT 0 -#define SYS_CTR_EL0 sys_reg(3, 3, 0, 0, 1) -#define SYS_DCZID_EL0 sys_reg(3, 3, 0, 0, 7) - #define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0) #define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1) @@ -704,66 +689,6 @@ /* Position the attr at the correct index */ #define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8)) -/* id_aa64isar1 */ -#define ID_AA64ISAR1_I8MM_SHIFT 52 -#define ID_AA64ISAR1_DGH_SHIFT 48 -#define ID_AA64ISAR1_BF16_SHIFT 44 -#define ID_AA64ISAR1_SPECRES_SHIFT 40 -#define ID_AA64ISAR1_SB_SHIFT 36 -#define ID_AA64ISAR1_FRINTTS_SHIFT 32 -#define ID_AA64ISAR1_GPI_SHIFT 28 -#define ID_AA64ISAR1_GPA_SHIFT 24 -#define ID_AA64ISAR1_LRCPC_SHIFT 20 -#define ID_AA64ISAR1_FCMA_SHIFT 16 -#define ID_AA64ISAR1_JSCVT_SHIFT 12 -#define ID_AA64ISAR1_API_SHIFT 8 -#define ID_AA64ISAR1_APA_SHIFT 4 -#define ID_AA64ISAR1_DPB_SHIFT 0 - -#define ID_AA64ISAR1_APA_NI 0x0 -#define ID_AA64ISAR1_APA_ARCHITECTED 0x1 -#define ID_AA64ISAR1_APA_ARCH_EPAC 0x2 -#define ID_AA64ISAR1_APA_ARCH_EPAC2 0x3 -#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC 0x4 -#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC_CMB 0x5 -#define ID_AA64ISAR1_API_NI 0x0 -#define ID_AA64ISAR1_API_IMP_DEF 0x1 -#define ID_AA64ISAR1_API_IMP_DEF_EPAC 0x2 -#define ID_AA64ISAR1_API_IMP_DEF_EPAC2 0x3 -#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC 0x4 -#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC_CMB 0x5 -#define ID_AA64ISAR1_GPA_NI 0x0 -#define ID_AA64ISAR1_GPA_ARCHITECTED 0x1 -#define ID_AA64ISAR1_GPI_NI 0x0 -#define ID_AA64ISAR1_GPI_IMP_DEF 0x1 - -/* id_aa64isar2 */ -#define ID_AA64ISAR2_CLEARBHB_SHIFT 28 -#define ID_AA64ISAR2_APA3_SHIFT 12 -#define ID_AA64ISAR2_GPA3_SHIFT 8 -#define ID_AA64ISAR2_RPRES_SHIFT 4 -#define ID_AA64ISAR2_WFXT_SHIFT 0 - -#define ID_AA64ISAR2_RPRES_8BIT 0x0 -#define ID_AA64ISAR2_RPRES_12BIT 0x1 -/* - * Value 0x1 has been removed from the architecture, and is - * reserved, but has not yet been removed from the ARM ARM - * as of ARM DDI 0487G.b. - */ -#define ID_AA64ISAR2_WFXT_NI 0x0 -#define ID_AA64ISAR2_WFXT_SUPPORTED 0x2 - -#define ID_AA64ISAR2_APA3_NI 0x0 -#define ID_AA64ISAR2_APA3_ARCHITECTED 0x1 -#define ID_AA64ISAR2_APA3_ARCH_EPAC 0x2 -#define ID_AA64ISAR2_APA3_ARCH_EPAC2 0x3 -#define ID_AA64ISAR2_APA3_ARCH_EPAC2_FPAC 0x4 -#define ID_AA64ISAR2_APA3_ARCH_EPAC2_FPAC_CMB 0x5 - -#define ID_AA64ISAR2_GPA3_NI 0x0 -#define ID_AA64ISAR2_GPA3_ARCHITECTED 0x1 - /* id_aa64pfr0 */ #define ID_AA64PFR0_CSV3_SHIFT 60 #define ID_AA64PFR0_CSV2_SHIFT 56 @@ -811,45 +736,6 @@ #define ID_AA64PFR1_MTE 0x2 #define ID_AA64PFR1_MTE_ASYMM 0x3 -/* id_aa64zfr0 */ -#define ID_AA64ZFR0_F64MM_SHIFT 56 -#define ID_AA64ZFR0_F32MM_SHIFT 52 -#define ID_AA64ZFR0_I8MM_SHIFT 44 -#define ID_AA64ZFR0_SM4_SHIFT 40 -#define ID_AA64ZFR0_SHA3_SHIFT 32 -#define ID_AA64ZFR0_BF16_SHIFT 20 -#define ID_AA64ZFR0_BITPERM_SHIFT 16 -#define ID_AA64ZFR0_AES_SHIFT 4 -#define ID_AA64ZFR0_SVEVER_SHIFT 0 - -#define ID_AA64ZFR0_F64MM 0x1 -#define ID_AA64ZFR0_F32MM 0x1 -#define ID_AA64ZFR0_I8MM 0x1 -#define ID_AA64ZFR0_BF16 0x1 -#define ID_AA64ZFR0_SM4 0x1 -#define ID_AA64ZFR0_SHA3 0x1 -#define ID_AA64ZFR0_BITPERM 0x1 -#define ID_AA64ZFR0_AES 0x1 -#define ID_AA64ZFR0_AES_PMULL 0x2 -#define ID_AA64ZFR0_SVEVER_SVE2 0x1 - -/* id_aa64smfr0 */ -#define ID_AA64SMFR0_FA64_SHIFT 63 -#define ID_AA64SMFR0_I16I64_SHIFT 52 -#define ID_AA64SMFR0_F64F64_SHIFT 48 -#define ID_AA64SMFR0_I8I32_SHIFT 36 -#define ID_AA64SMFR0_F16F32_SHIFT 35 -#define ID_AA64SMFR0_B16F32_SHIFT 34 -#define ID_AA64SMFR0_F32F32_SHIFT 32 - -#define ID_AA64SMFR0_FA64 0x1 -#define ID_AA64SMFR0_I16I64 0xf -#define ID_AA64SMFR0_F64F64 0x1 -#define ID_AA64SMFR0_I8I32 0xf -#define ID_AA64SMFR0_F16F32 0x1 -#define ID_AA64SMFR0_B16F32 0x1 -#define ID_AA64SMFR0_F32F32 0x1 - /* id_aa64mmfr0 */ #define ID_AA64MMFR0_ECV_SHIFT 60 #define ID_AA64MMFR0_FGT_SHIFT 56 @@ -902,6 +788,7 @@ /* id_aa64mmfr1 */ #define ID_AA64MMFR1_ECBHB_SHIFT 60 +#define ID_AA64MMFR1_TIDCP1_SHIFT 52 #define ID_AA64MMFR1_HCX_SHIFT 40 #define ID_AA64MMFR1_AFP_SHIFT 44 #define ID_AA64MMFR1_ETS_SHIFT 36 @@ -918,6 +805,9 @@ #define ID_AA64MMFR1_VMIDBITS_8 0 #define ID_AA64MMFR1_VMIDBITS_16 2 +#define ID_AA64MMFR1_TIDCP1_NI 0 +#define ID_AA64MMFR1_TIDCP1_IMP 1 + /* id_aa64mmfr2 */ #define ID_AA64MMFR2_E0PD_SHIFT 60 #define ID_AA64MMFR2_EVT_SHIFT 56 @@ -1084,9 +974,6 @@ #define MVFR2_FPMISC_SHIFT 4 #define MVFR2_SIMDMISC_SHIFT 0 -#define DCZID_DZP_SHIFT 4 -#define DCZID_BS_SHIFT 0 - #define CPACR_EL1_FPEN_EL1EN (BIT(20)) /* enable EL1 access */ #define CPACR_EL1_FPEN_EL0EN (BIT(21)) /* enable EL0 access, if EL1EN set */ @@ -1121,8 +1008,8 @@ #define SYS_RGSR_EL1_SEED_MASK 0xffffUL /* GMID_EL1 field definitions */ -#define SYS_GMID_EL1_BS_SHIFT 0 -#define SYS_GMID_EL1_BS_SIZE 4 +#define GMID_EL1_BS_SHIFT 0 +#define GMID_EL1_BS_SIZE 4 /* TFSR{,E0}_EL1 bit definitions */ #define SYS_TFSR_EL1_TF0_SHIFT 0 @@ -1324,6 +1211,9 @@ #endif +#define SYS_FIELD_GET(reg, field, val) \ + FIELD_GET(reg##_##field##_MASK, val) + #define SYS_FIELD_PREP(reg, field, val) \ FIELD_PREP(reg##_##field##_MASK, val) diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 63f9c828f1a7..2fc9f0861769 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -232,34 +232,34 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr) * The "__xxx_error" versions set the third argument to -EFAULT if an error * occurs, and leave it unchanged on success. */ -#define __get_mem_asm(load, reg, x, addr, err) \ +#define __get_mem_asm(load, reg, x, addr, err, type) \ asm volatile( \ "1: " load " " reg "1, [%2]\n" \ "2:\n" \ - _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %w0, %w1) \ + _ASM_EXTABLE_##type##ACCESS_ERR_ZERO(1b, 2b, %w0, %w1) \ : "+r" (err), "=&r" (x) \ : "r" (addr)) -#define __raw_get_mem(ldr, x, ptr, err) \ -do { \ - unsigned long __gu_val; \ - switch (sizeof(*(ptr))) { \ - case 1: \ - __get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err)); \ - break; \ - case 2: \ - __get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err)); \ - break; \ - case 4: \ - __get_mem_asm(ldr, "%w", __gu_val, (ptr), (err)); \ - break; \ - case 8: \ - __get_mem_asm(ldr, "%x", __gu_val, (ptr), (err)); \ - break; \ - default: \ - BUILD_BUG(); \ - } \ - (x) = (__force __typeof__(*(ptr)))__gu_val; \ +#define __raw_get_mem(ldr, x, ptr, err, type) \ +do { \ + unsigned long __gu_val; \ + switch (sizeof(*(ptr))) { \ + case 1: \ + __get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err), type); \ + break; \ + case 2: \ + __get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err), type); \ + break; \ + case 4: \ + __get_mem_asm(ldr, "%w", __gu_val, (ptr), (err), type); \ + break; \ + case 8: \ + __get_mem_asm(ldr, "%x", __gu_val, (ptr), (err), type); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ } while (0) /* @@ -274,7 +274,7 @@ do { \ __chk_user_ptr(ptr); \ \ uaccess_ttbr0_enable(); \ - __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err); \ + __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err, U); \ uaccess_ttbr0_disable(); \ \ (x) = __rgu_val; \ @@ -314,40 +314,40 @@ do { \ \ __uaccess_enable_tco_async(); \ __raw_get_mem("ldr", *((type *)(__gkn_dst)), \ - (__force type *)(__gkn_src), __gkn_err); \ + (__force type *)(__gkn_src), __gkn_err, K); \ __uaccess_disable_tco_async(); \ \ if (unlikely(__gkn_err)) \ goto err_label; \ } while (0) -#define __put_mem_asm(store, reg, x, addr, err) \ +#define __put_mem_asm(store, reg, x, addr, err, type) \ asm volatile( \ "1: " store " " reg "1, [%2]\n" \ "2:\n" \ - _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0) \ + _ASM_EXTABLE_##type##ACCESS_ERR(1b, 2b, %w0) \ : "+r" (err) \ : "r" (x), "r" (addr)) -#define __raw_put_mem(str, x, ptr, err) \ -do { \ - __typeof__(*(ptr)) __pu_val = (x); \ - switch (sizeof(*(ptr))) { \ - case 1: \ - __put_mem_asm(str "b", "%w", __pu_val, (ptr), (err)); \ - break; \ - case 2: \ - __put_mem_asm(str "h", "%w", __pu_val, (ptr), (err)); \ - break; \ - case 4: \ - __put_mem_asm(str, "%w", __pu_val, (ptr), (err)); \ - break; \ - case 8: \ - __put_mem_asm(str, "%x", __pu_val, (ptr), (err)); \ - break; \ - default: \ - BUILD_BUG(); \ - } \ +#define __raw_put_mem(str, x, ptr, err, type) \ +do { \ + __typeof__(*(ptr)) __pu_val = (x); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + __put_mem_asm(str "b", "%w", __pu_val, (ptr), (err), type); \ + break; \ + case 2: \ + __put_mem_asm(str "h", "%w", __pu_val, (ptr), (err), type); \ + break; \ + case 4: \ + __put_mem_asm(str, "%w", __pu_val, (ptr), (err), type); \ + break; \ + case 8: \ + __put_mem_asm(str, "%x", __pu_val, (ptr), (err), type); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ } while (0) /* @@ -362,7 +362,7 @@ do { \ __chk_user_ptr(__rpu_ptr); \ \ uaccess_ttbr0_enable(); \ - __raw_put_mem("sttr", __rpu_val, __rpu_ptr, err); \ + __raw_put_mem("sttr", __rpu_val, __rpu_ptr, err, U); \ uaccess_ttbr0_disable(); \ } while (0) @@ -400,7 +400,7 @@ do { \ \ __uaccess_enable_tco_async(); \ __raw_put_mem("str", *((type *)(__pkn_src)), \ - (__force type *)(__pkn_dst), __pkn_err); \ + (__force type *)(__pkn_dst), __pkn_err, K); \ __uaccess_disable_tco_async(); \ \ if (unlikely(__pkn_err)) \ diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 0e80db4327b6..4eb601e7de50 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -36,9 +36,9 @@ #define HVC_RESET_VECTORS 2 /* - * HVC_VHE_RESTART - Upgrade the CPU from EL1 to EL2, if possible + * HVC_FINALISE_EL2 - Upgrade the CPU from EL1 to EL2, if possible */ -#define HVC_VHE_RESTART 3 +#define HVC_FINALISE_EL2 3 /* Max number of HYP stub hypercalls */ #define HVC_STUB_HCALL_NR 4 @@ -49,6 +49,13 @@ #define BOOT_CPU_MODE_EL1 (0xe11) #define BOOT_CPU_MODE_EL2 (0xe12) +/* + * Flags returned together with the boot mode, but not preserved in + * __boot_cpu_mode. Used by the idreg override code to work out the + * boot state. + */ +#define BOOT_CPU_FLAG_E2H BIT_ULL(32) + #ifndef __ASSEMBLY__ #include <asm/ptrace.h> diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index 4bb2cc8ac446..1ad2568a2569 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -19,6 +19,9 @@ /* * HWCAP flags - for AT_HWCAP + * + * Bits 62 and 63 are reserved for use by libc. + * Bits 32-61 are unallocated for potential use by libc. */ #define HWCAP_FP (1 << 0) #define HWCAP_ASIMD (1 << 1) @@ -88,5 +91,6 @@ #define HWCAP2_SME_F32F32 (1 << 29) #define HWCAP2_SME_FA64 (1 << 30) #define HWCAP2_WFXT (1UL << 31) +#define HWCAP2_EBF16 (1UL << 32) #endif /* _UAPI__ASM_HWCAP_H */ |