diff options
Diffstat (limited to 'arch/arm64/include/asm')
35 files changed, 392 insertions, 325 deletions
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 3a9b84d39d71..6cd5d77b6b44 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -16,6 +16,7 @@ generic-y += mm-arch-hooks.h generic-y += msi.h generic-y += preempt.h generic-y += qrwlock.h +generic-y += qspinlock.h generic-y += rwsem.h generic-y += segment.h generic-y += serial.h diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index 0db62a4cbce2..709208dfdc8b 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h @@ -12,10 +12,12 @@ #ifndef _ASM_ACPI_H #define _ASM_ACPI_H +#include <linux/efi.h> #include <linux/memblock.h> #include <linux/psci.h> #include <asm/cputype.h> +#include <asm/io.h> #include <asm/smp_plat.h> #include <asm/tlbflush.h> @@ -29,18 +31,22 @@ /* Basic configuration for ACPI */ #ifdef CONFIG_ACPI +pgprot_t __acpi_get_mem_attribute(phys_addr_t addr); + /* ACPI table mapping after acpi_permanent_mmap is set */ static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) { + /* For normal memory we already have a cacheable mapping. */ + if (memblock_is_map_memory(phys)) + return (void __iomem *)__phys_to_virt(phys); + /* - * EFI's reserve_regions() call adds memory with the WB attribute - * to memblock via early_init_dt_add_memory_arch(). + * We should still honor the memory's attribute here because + * crash dump kernel possibly excludes some ACPI (reclaim) + * regions from memblock list. */ - if (!memblock_is_memory(phys)) - return ioremap(phys, size); - - return ioremap_cache(phys, size); + return __ioremap(phys, size, __acpi_get_mem_attribute(phys)); } #define acpi_os_ioremap acpi_os_ioremap @@ -129,15 +135,20 @@ static inline const char *acpi_get_enable_method(int cpu) * for compatibility. */ #define acpi_disable_cmcff 1 -pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr); +static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr) +{ + return __acpi_get_mem_attribute(addr); +} #endif /* CONFIG_ACPI_APEI */ #ifdef CONFIG_ACPI_NUMA int arm64_acpi_numa_init(void); -int acpi_numa_get_nid(unsigned int cpu, u64 hwid); +int acpi_numa_get_nid(unsigned int cpu); +void acpi_map_cpus_to_nodes(void); #else static inline int arm64_acpi_numa_init(void) { return -ENOSYS; } -static inline int acpi_numa_get_nid(unsigned int cpu, u64 hwid) { return NUMA_NO_NODE; } +static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; } +static inline void acpi_map_cpus_to_nodes(void) { } #endif /* CONFIG_ACPI_NUMA */ #define ACPI_TABLE_UPGRADE_MAX_PHYS MEMBLOCK_ALLOC_ACCESSIBLE diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index a91933b1e2e6..4b650ec1d7dd 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -28,7 +28,12 @@ typedef void (*alternative_cb_t)(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst); void __init apply_alternatives_all(void); -void apply_alternatives(void *start, size_t length); + +#ifdef CONFIG_MODULES +void apply_alternatives_module(void *start, size_t length); +#else +static inline void apply_alternatives_module(void *start, size_t length) { } +#endif #define ALTINSTR_ENTRY(feature,cb) \ " .word 661b - .\n" /* label */ \ diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index c0235e0ff849..9bca54dda75c 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -40,17 +40,6 @@ #include <asm/cmpxchg.h> -#define ___atomic_add_unless(v, a, u, sfx) \ -({ \ - typeof((v)->counter) c, old; \ - \ - c = atomic##sfx##_read(v); \ - while (c != (u) && \ - (old = atomic##sfx##_cmpxchg((v), c, c + (a))) != c) \ - c = old; \ - c; \ - }) - #define ATOMIC_INIT(i) { (i) } #define atomic_read(v) READ_ONCE((v)->counter) @@ -61,21 +50,11 @@ #define atomic_add_return_release atomic_add_return_release #define atomic_add_return atomic_add_return -#define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v)) -#define atomic_inc_return_acquire(v) atomic_add_return_acquire(1, (v)) -#define atomic_inc_return_release(v) atomic_add_return_release(1, (v)) -#define atomic_inc_return(v) atomic_add_return(1, (v)) - #define atomic_sub_return_relaxed atomic_sub_return_relaxed #define atomic_sub_return_acquire atomic_sub_return_acquire #define atomic_sub_return_release atomic_sub_return_release #define atomic_sub_return atomic_sub_return -#define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v)) -#define atomic_dec_return_acquire(v) atomic_sub_return_acquire(1, (v)) -#define atomic_dec_return_release(v) atomic_sub_return_release(1, (v)) -#define atomic_dec_return(v) atomic_sub_return(1, (v)) - #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed #define atomic_fetch_add_acquire atomic_fetch_add_acquire #define atomic_fetch_add_release atomic_fetch_add_release @@ -119,13 +98,6 @@ cmpxchg_release(&((v)->counter), (old), (new)) #define atomic_cmpxchg(v, old, new) cmpxchg(&((v)->counter), (old), (new)) -#define atomic_inc(v) atomic_add(1, (v)) -#define atomic_dec(v) atomic_sub(1, (v)) -#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) -#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) -#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) -#define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0) -#define __atomic_add_unless(v, a, u) ___atomic_add_unless(v, a, u,) #define atomic_andnot atomic_andnot /* @@ -140,21 +112,11 @@ #define atomic64_add_return_release atomic64_add_return_release #define atomic64_add_return atomic64_add_return -#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v)) -#define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v)) -#define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v)) -#define atomic64_inc_return(v) atomic64_add_return(1, (v)) - #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed #define atomic64_sub_return_acquire atomic64_sub_return_acquire #define atomic64_sub_return_release atomic64_sub_return_release #define atomic64_sub_return atomic64_sub_return -#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v)) -#define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v)) -#define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v)) -#define atomic64_dec_return(v) atomic64_sub_return(1, (v)) - #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed #define atomic64_fetch_add_acquire atomic64_fetch_add_acquire #define atomic64_fetch_add_release atomic64_fetch_add_release @@ -195,16 +157,9 @@ #define atomic64_cmpxchg_release atomic_cmpxchg_release #define atomic64_cmpxchg atomic_cmpxchg -#define atomic64_inc(v) atomic64_add(1, (v)) -#define atomic64_dec(v) atomic64_sub(1, (v)) -#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) -#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0) -#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0) -#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0) -#define atomic64_add_unless(v, a, u) (___atomic_add_unless(v, a, u, 64) != u) #define atomic64_andnot atomic64_andnot -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) +#define atomic64_dec_if_positive atomic64_dec_if_positive #endif #endif diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index f11518af96a9..822a9192c551 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -128,6 +128,19 @@ do { \ __u.__val; \ }) +#define smp_cond_load_relaxed(ptr, cond_expr) \ +({ \ + typeof(ptr) __PTR = (ptr); \ + typeof(*ptr) VAL; \ + for (;;) { \ + VAL = READ_ONCE(*__PTR); \ + if (cond_expr) \ + break; \ + __cmpwait_relaxed(__PTR, VAL); \ + } \ + VAL; \ +}) + #define smp_cond_load_acquire(ptr, cond_expr) \ ({ \ typeof(ptr) __PTR = (ptr); \ diff --git a/arch/arm64/include/asm/bitops.h b/arch/arm64/include/asm/bitops.h index 9c19594ce7cb..10d536b1af74 100644 --- a/arch/arm64/include/asm/bitops.h +++ b/arch/arm64/include/asm/bitops.h @@ -17,22 +17,11 @@ #define __ASM_BITOPS_H #include <linux/compiler.h> -#include <asm/barrier.h> #ifndef _LINUX_BITOPS_H #error only <linux/bitops.h> can be included directly #endif -/* - * Little endian assembly atomic bitops. - */ -extern void set_bit(int nr, volatile unsigned long *p); -extern void clear_bit(int nr, volatile unsigned long *p); -extern void change_bit(int nr, volatile unsigned long *p); -extern int test_and_set_bit(int nr, volatile unsigned long *p); -extern int test_and_clear_bit(int nr, volatile unsigned long *p); -extern int test_and_change_bit(int nr, volatile unsigned long *p); - #include <asm-generic/bitops/builtin-__ffs.h> #include <asm-generic/bitops/builtin-ffs.h> #include <asm-generic/bitops/builtin-__fls.h> @@ -44,15 +33,11 @@ extern int test_and_change_bit(int nr, volatile unsigned long *p); #include <asm-generic/bitops/sched.h> #include <asm-generic/bitops/hweight.h> -#include <asm-generic/bitops/lock.h> +#include <asm-generic/bitops/atomic.h> +#include <asm-generic/bitops/lock.h> #include <asm-generic/bitops/non-atomic.h> #include <asm-generic/bitops/le.h> - -/* - * Ext2 is defined to use little-endian byte ordering. - */ -#define ext2_set_bit_atomic(lock, nr, p) test_and_set_bit_le(nr, p) -#define ext2_clear_bit_atomic(lock, nr, p) test_and_clear_bit_le(nr, p) +#include <asm-generic/bitops/ext2-atomic-setbit.h> #endif /* __ASM_BITOPS_H */ diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 5df5cfe1c143..5ee5bca8c24b 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -21,12 +21,16 @@ #define CTR_L1IP_SHIFT 14 #define CTR_L1IP_MASK 3 #define CTR_DMINLINE_SHIFT 16 +#define CTR_IMINLINE_SHIFT 0 #define CTR_ERG_SHIFT 20 #define CTR_CWG_SHIFT 24 #define CTR_CWG_MASK 15 #define CTR_IDC_SHIFT 28 #define CTR_DIC_SHIFT 29 +#define CTR_CACHE_MINLINE_MASK \ + (0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT) + #define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK) #define ICACHE_POLICY_VPIPT 0 diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index d264a7274811..19844211a4e6 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -19,6 +19,7 @@ #ifndef __ASM_CACHEFLUSH_H #define __ASM_CACHEFLUSH_H +#include <linux/kgdb.h> #include <linux/mm.h> /* @@ -71,7 +72,7 @@ * - kaddr - page address * - size - region size */ -extern void flush_icache_range(unsigned long start, unsigned long end); +extern void __flush_icache_range(unsigned long start, unsigned long end); extern int invalidate_icache_range(unsigned long start, unsigned long end); extern void __flush_dcache_area(void *addr, size_t len); extern void __inval_dcache_area(void *addr, size_t len); @@ -81,6 +82,30 @@ extern void __clean_dcache_area_pou(void *addr, size_t len); extern long __flush_cache_user_range(unsigned long start, unsigned long end); extern void sync_icache_aliases(void *kaddr, unsigned long len); +static inline void flush_icache_range(unsigned long start, unsigned long end) +{ + __flush_icache_range(start, end); + + /* + * IPI all online CPUs so that they undergo a context synchronization + * event and are forced to refetch the new instructions. + */ +#ifdef CONFIG_KGDB + /* + * KGDB performs cache maintenance with interrupts disabled, so we + * will deadlock trying to IPI the secondary CPUs. In theory, we can + * set CACHE_FLUSH_IS_SAFE to 0 to avoid this known issue, but that + * just means that KGDB will elide the maintenance altogether! As it + * turns out, KGDB uses IPIs to round-up the secondary CPUs during + * the patching operation, so we don't need extra IPIs here anyway. + * In which case, add a KGDB-specific bodge and return early. + */ + if (kgdb_connected && irqs_disabled()) + return; +#endif + kick_all_cpus_sync(); +} + static inline void flush_cache_mm(struct mm_struct *mm) { } diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 8a699c708fc9..be3bf3d08916 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -49,7 +49,8 @@ #define ARM64_HAS_CACHE_DIC 28 #define ARM64_HW_DBM 29 #define ARM64_SSBD 30 +#define ARM64_MISMATCHED_CACHE_TYPE 31 -#define ARM64_NCAPS 31 +#define ARM64_NCAPS 32 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 192d791f1103..7ed320895d1f 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -87,6 +87,9 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base, #define efi_call_runtime(f, ...) sys_table_arg->runtime->f(__VA_ARGS__) #define efi_is_64bit() (true) +#define efi_table_attr(table, attr, instance) \ + ((table##_t *)instance)->attr + #define efi_call_proto(protocol, f, instance, ...) \ ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__) diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index fa92747a49c8..dd1ad3950ef5 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -16,13 +16,15 @@ #ifndef __ASM_FP_H #define __ASM_FP_H -#include <asm/ptrace.h> #include <asm/errno.h> +#include <asm/ptrace.h> #include <asm/processor.h> #include <asm/sigcontext.h> +#include <asm/sysreg.h> #ifndef __ASSEMBLY__ +#include <linux/build_bug.h> #include <linux/cache.h> #include <linux/init.h> #include <linux/stddef.h> @@ -102,6 +104,16 @@ extern int sve_set_vector_length(struct task_struct *task, extern int sve_set_current_vl(unsigned long arg); extern int sve_get_current_vl(void); +static inline void sve_user_disable(void) +{ + sysreg_clear_set(cpacr_el1, CPACR_EL1_ZEN_EL0EN, 0); +} + +static inline void sve_user_enable(void) +{ + sysreg_clear_set(cpacr_el1, 0, CPACR_EL1_ZEN_EL0EN); +} + /* * Probing and setup functions. * Calls to these functions must be serialised with one another. @@ -128,6 +140,9 @@ static inline int sve_get_current_vl(void) return -EINVAL; } +static inline void sve_user_disable(void) { BUILD_BUG(); } +static inline void sve_user_enable(void) { BUILD_BUG(); } + static inline void sve_init_vq_map(void) { } static inline void sve_update_vq_map(void) { } static inline int sve_verify_vq_map(void) { return 0; } diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index 41770766d964..6a53e59ced95 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h @@ -119,13 +119,16 @@ static inline void decode_ctrl_reg(u32 reg, struct task_struct; struct notifier_block; +struct perf_event_attr; struct perf_event; struct pmu; extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, int *gen_len, int *gen_type, int *offset); -extern int arch_check_bp_in_kernelspace(struct perf_event *bp); -extern int arch_validate_hwbkpt_settings(struct perf_event *bp); +extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw); +extern int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw); extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, unsigned long val, void *data); diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index f62c56b1793f..c6802dea6cab 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -446,8 +446,6 @@ u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base, s32 aarch64_get_branch_offset(u32 insn); u32 aarch64_set_branch_offset(u32 insn, s32 offset); -bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); - int aarch64_insn_patch_text_nosync(void *addr, u32 insn); int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt); diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h index a0fee6985e6a..b2b0c6405eb0 100644 --- a/arch/arm64/include/asm/irq.h +++ b/arch/arm64/include/asm/irq.h @@ -8,8 +8,6 @@ struct pt_regs; -extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); - static inline int nr_legacy_irqs(void) { return 0; diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h index 6deb8d726041..d5a44cf859e9 100644 --- a/arch/arm64/include/asm/kprobes.h +++ b/arch/arm64/include/asm/kprobes.h @@ -48,7 +48,6 @@ struct kprobe_ctlblk { unsigned long saved_irqflag; struct prev_kprobe prev_kprobe; struct kprobe_step_ctx ss_ctx; - struct pt_regs jprobe_saved_regs; }; void arch_remove_kprobe(struct kprobe *); diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 1dab3a984608..0c97e45d1dc3 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -140,7 +140,7 @@ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) { - *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT; + *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT; } /* @@ -190,8 +190,8 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) u32 mode; if (vcpu_mode_is_32bit(vcpu)) { - mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK; - return mode > COMPAT_PSR_MODE_USR; + mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK; + return mode > PSR_AA32_MODE_USR; } mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; @@ -329,7 +329,7 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) { if (vcpu_mode_is_32bit(vcpu)) { - *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT; + *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT; } else { u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1); sctlr |= (1 << 25); @@ -340,7 +340,7 @@ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) { if (vcpu_mode_is_32bit(vcpu)) - return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT); + return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT); return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25)); } diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index fda9a8ca48be..fe8777b12f86 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -306,6 +306,7 @@ struct kvm_vcpu_arch { #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */ #define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */ #define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */ +#define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */ #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) diff --git a/arch/arm64/include/asm/neon.h b/arch/arm64/include/asm/neon.h index f922eaf780f9..fb9d137256a6 100644 --- a/arch/arm64/include/asm/neon.h +++ b/arch/arm64/include/asm/neon.h @@ -19,11 +19,4 @@ void kernel_neon_begin(void); void kernel_neon_end(void); -/* - * Temporary macro to allow the crypto code to compile. Note that the - * semantics of kernel_neon_begin_partial() are now different from the - * original as it does not allow being called in an interrupt context. - */ -#define kernel_neon_begin_partial(num_regs) kernel_neon_begin() - #endif /* ! __ASM_NEON_H */ diff --git a/arch/arm64/include/asm/numa.h b/arch/arm64/include/asm/numa.h index 01bc46d5b43a..626ad01e83bf 100644 --- a/arch/arm64/include/asm/numa.h +++ b/arch/arm64/include/asm/numa.h @@ -35,10 +35,14 @@ void __init numa_set_distance(int from, int to, int distance); void __init numa_free_distance(void); void __init early_map_cpu_to_node(unsigned int cpu, int nid); void numa_store_cpu_info(unsigned int cpu); +void numa_add_cpu(unsigned int cpu); +void numa_remove_cpu(unsigned int cpu); #else /* CONFIG_NUMA */ static inline void numa_store_cpu_info(unsigned int cpu) { } +static inline void numa_add_cpu(unsigned int cpu) { } +static inline void numa_remove_cpu(unsigned int cpu) { } static inline void arm64_numa_init(void) { } static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { } diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 9f82d6b53851..1bdeca8918a6 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -224,10 +224,8 @@ static inline void set_pte(pte_t *ptep, pte_t pte) * Only if the new pte is valid and kernel, otherwise TLB maintenance * or update_mmu_cache() have the necessary barriers. */ - if (pte_valid_not_user(pte)) { + if (pte_valid_not_user(pte)) dsb(ishst); - isb(); - } } extern void __sync_icache_dcache(pte_t pteval); @@ -434,7 +432,6 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) { WRITE_ONCE(*pmdp, pmd); dsb(ishst); - isb(); } static inline void pmd_clear(pmd_t *pmdp) @@ -485,7 +482,6 @@ static inline void set_pud(pud_t *pudp, pud_t pud) { WRITE_ONCE(*pudp, pud); dsb(ishst); - isb(); } static inline void pud_clear(pud_t *pudp) diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index a73ae1e49200..79657ad91397 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -182,12 +182,12 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) { start_thread_common(regs, pc); - regs->pstate = COMPAT_PSR_MODE_USR; + regs->pstate = PSR_AA32_MODE_USR; if (pc & 1) - regs->pstate |= COMPAT_PSR_T_BIT; + regs->pstate |= PSR_AA32_T_BIT; #ifdef __AARCH64EB__ - regs->pstate |= COMPAT_PSR_E_BIT; + regs->pstate |= PSR_AA32_E_BIT; #endif regs->compat_sp = sp; @@ -266,5 +266,20 @@ extern void __init minsigstksz_setup(void); #define SVE_SET_VL(arg) sve_set_current_vl(arg) #define SVE_GET_VL() sve_get_current_vl() +/* + * For CONFIG_GCC_PLUGIN_STACKLEAK + * + * These need to be macros because otherwise we get stuck in a nightmare + * of header definitions for the use of task_stack_page. + */ + +#define current_top_of_stack() \ +({ \ + struct stack_info _info; \ + BUG_ON(!on_accessible_stack(current, current_stack_pointer, &_info)); \ + _info.high; \ +}) +#define on_thread_stack() (on_task_stack(current, current_stack_pointer, NULL)) + #endif /* __ASSEMBLY__ */ #endif /* __ASM_PROCESSOR_H */ diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 6069d66e0bc2..177b851ca6d9 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -35,36 +35,39 @@ #define COMPAT_PTRACE_GETHBPREGS 29 #define COMPAT_PTRACE_SETHBPREGS 30 -/* AArch32 CPSR bits */ -#define COMPAT_PSR_MODE_MASK 0x0000001f -#define COMPAT_PSR_MODE_USR 0x00000010 -#define COMPAT_PSR_MODE_FIQ 0x00000011 -#define COMPAT_PSR_MODE_IRQ 0x00000012 -#define COMPAT_PSR_MODE_SVC 0x00000013 -#define COMPAT_PSR_MODE_ABT 0x00000017 -#define COMPAT_PSR_MODE_HYP 0x0000001a -#define COMPAT_PSR_MODE_UND 0x0000001b -#define COMPAT_PSR_MODE_SYS 0x0000001f -#define COMPAT_PSR_T_BIT 0x00000020 -#define COMPAT_PSR_F_BIT 0x00000040 -#define COMPAT_PSR_I_BIT 0x00000080 -#define COMPAT_PSR_A_BIT 0x00000100 -#define COMPAT_PSR_E_BIT 0x00000200 -#define COMPAT_PSR_J_BIT 0x01000000 -#define COMPAT_PSR_Q_BIT 0x08000000 -#define COMPAT_PSR_V_BIT 0x10000000 -#define COMPAT_PSR_C_BIT 0x20000000 -#define COMPAT_PSR_Z_BIT 0x40000000 -#define COMPAT_PSR_N_BIT 0x80000000 -#define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ -#define COMPAT_PSR_GE_MASK 0x000f0000 +/* SPSR_ELx bits for exceptions taken from AArch32 */ +#define PSR_AA32_MODE_MASK 0x0000001f +#define PSR_AA32_MODE_USR 0x00000010 +#define PSR_AA32_MODE_FIQ 0x00000011 +#define PSR_AA32_MODE_IRQ 0x00000012 +#define PSR_AA32_MODE_SVC 0x00000013 +#define PSR_AA32_MODE_ABT 0x00000017 +#define PSR_AA32_MODE_HYP 0x0000001a +#define PSR_AA32_MODE_UND 0x0000001b +#define PSR_AA32_MODE_SYS 0x0000001f +#define PSR_AA32_T_BIT 0x00000020 +#define PSR_AA32_F_BIT 0x00000040 +#define PSR_AA32_I_BIT 0x00000080 +#define PSR_AA32_A_BIT 0x00000100 +#define PSR_AA32_E_BIT 0x00000200 +#define PSR_AA32_DIT_BIT 0x01000000 +#define PSR_AA32_Q_BIT 0x08000000 +#define PSR_AA32_V_BIT 0x10000000 +#define PSR_AA32_C_BIT 0x20000000 +#define PSR_AA32_Z_BIT 0x40000000 +#define PSR_AA32_N_BIT 0x80000000 +#define PSR_AA32_IT_MASK 0x0600fc00 /* If-Then execution state mask */ +#define PSR_AA32_GE_MASK 0x000f0000 #ifdef CONFIG_CPU_BIG_ENDIAN -#define COMPAT_PSR_ENDSTATE COMPAT_PSR_E_BIT +#define PSR_AA32_ENDSTATE PSR_AA32_E_BIT #else -#define COMPAT_PSR_ENDSTATE 0 +#define PSR_AA32_ENDSTATE 0 #endif +/* AArch32 CPSR bits, as seen in AArch32 */ +#define COMPAT_PSR_DIT_BIT 0x00200000 + /* * These are 'magic' values for PTRACE_PEEKUSR that return info about where a * process is located in memory. @@ -111,6 +114,30 @@ #define compat_sp_fiq regs[29] #define compat_lr_fiq regs[30] +static inline unsigned long compat_psr_to_pstate(const unsigned long psr) +{ + unsigned long pstate; + + pstate = psr & ~COMPAT_PSR_DIT_BIT; + + if (psr & COMPAT_PSR_DIT_BIT) + pstate |= PSR_AA32_DIT_BIT; + + return pstate; +} + +static inline unsigned long pstate_to_compat_psr(const unsigned long pstate) +{ + unsigned long psr; + + psr = pstate & ~PSR_AA32_DIT_BIT; + + if (pstate & PSR_AA32_DIT_BIT) + psr |= COMPAT_PSR_DIT_BIT; + + return psr; +} + /* * This struct defines the way the registers are stored on the stack during an * exception. Note that sizeof(struct pt_regs) has to be a multiple of 16 (for @@ -156,7 +183,7 @@ static inline void forget_syscall(struct pt_regs *regs) #ifdef CONFIG_COMPAT #define compat_thumb_mode(regs) \ - (((regs)->pstate & COMPAT_PSR_T_BIT)) + (((regs)->pstate & PSR_AA32_T_BIT)) #else #define compat_thumb_mode(regs) (0) #endif diff --git a/arch/arm64/include/asm/sdei.h b/arch/arm64/include/asm/sdei.h index e073e6886685..ffe47d766c25 100644 --- a/arch/arm64/include/asm/sdei.h +++ b/arch/arm64/include/asm/sdei.h @@ -40,15 +40,18 @@ asmlinkage unsigned long __sdei_handler(struct pt_regs *regs, unsigned long sdei_arch_get_entry_point(int conduit); #define sdei_arch_get_entry_point(x) sdei_arch_get_entry_point(x) -bool _on_sdei_stack(unsigned long sp); -static inline bool on_sdei_stack(unsigned long sp) +struct stack_info; + +bool _on_sdei_stack(unsigned long sp, struct stack_info *info); +static inline bool on_sdei_stack(unsigned long sp, + struct stack_info *info) { if (!IS_ENABLED(CONFIG_VMAP_STACK)) return false; if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE)) return false; if (in_nmi()) - return _on_sdei_stack(sp); + return _on_sdei_stack(sp, info); return false; } diff --git a/arch/arm64/include/asm/simd.h b/arch/arm64/include/asm/simd.h index fa8b3fe932e6..6495cc51246f 100644 --- a/arch/arm64/include/asm/simd.h +++ b/arch/arm64/include/asm/simd.h @@ -29,20 +29,15 @@ DECLARE_PER_CPU(bool, kernel_neon_busy); static __must_check inline bool may_use_simd(void) { /* - * The raw_cpu_read() is racy if called with preemption enabled. - * This is not a bug: kernel_neon_busy is only set when - * preemption is disabled, so we cannot migrate to another CPU - * while it is set, nor can we migrate to a CPU where it is set. - * So, if we find it clear on some CPU then we're guaranteed to - * find it clear on any CPU we could migrate to. - * - * If we are in between kernel_neon_begin()...kernel_neon_end(), - * the flag will be set, but preemption is also disabled, so we - * can't migrate to another CPU and spuriously see it become - * false. + * kernel_neon_busy is only set while preemption is disabled, + * and is clear whenever preemption is enabled. Since + * this_cpu_read() is atomic w.r.t. preemption, kernel_neon_busy + * cannot change under our feet -- if it's set we cannot be + * migrated, and if it's clear we cannot be migrated to a CPU + * where it is set. */ return !in_irq() && !irqs_disabled() && !in_nmi() && - !raw_cpu_read(kernel_neon_busy); + !this_cpu_read(kernel_neon_busy); } #else /* ! CONFIG_KERNEL_MODE_NEON */ diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 26c5bd7d88d8..38116008d18b 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -16,123 +16,8 @@ #ifndef __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H -#include <asm/lse.h> -#include <asm/spinlock_types.h> -#include <asm/processor.h> - -/* - * Spinlock implementation. - * - * The memory barriers are implicit with the load-acquire and store-release - * instructions. - */ - -static inline void arch_spin_lock(arch_spinlock_t *lock) -{ - unsigned int tmp; - arch_spinlock_t lockval, newval; - - asm volatile( - /* Atomically increment the next ticket. */ - ARM64_LSE_ATOMIC_INSN( - /* LL/SC */ -" prfm pstl1strm, %3\n" -"1: ldaxr %w0, %3\n" -" add %w1, %w0, %w5\n" -" stxr %w2, %w1, %3\n" -" cbnz %w2, 1b\n", - /* LSE atomics */ -" mov %w2, %w5\n" -" ldadda %w2, %w0, %3\n" - __nops(3) - ) - - /* Did we get the lock? */ -" eor %w1, %w0, %w0, ror #16\n" -" cbz %w1, 3f\n" - /* - * No: spin on the owner. Send a local event to avoid missing an - * unlock before the exclusive load. - */ -" sevl\n" -"2: wfe\n" -" ldaxrh %w2, %4\n" -" eor %w1, %w2, %w0, lsr #16\n" -" cbnz %w1, 2b\n" - /* We got the lock. Critical section starts here. */ -"3:" - : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock) - : "Q" (lock->owner), "I" (1 << TICKET_SHIFT) - : "memory"); -} - -static inline int arch_spin_trylock(arch_spinlock_t *lock) -{ - unsigned int tmp; - arch_spinlock_t lockval; - - asm volatile(ARM64_LSE_ATOMIC_INSN( - /* LL/SC */ - " prfm pstl1strm, %2\n" - "1: ldaxr %w0, %2\n" - " eor %w1, %w0, %w0, ror #16\n" - " cbnz %w1, 2f\n" - " add %w0, %w0, %3\n" - " stxr %w1, %w0, %2\n" - " cbnz %w1, 1b\n" - "2:", - /* LSE atomics */ - " ldr %w0, %2\n" - " eor %w1, %w0, %w0, ror #16\n" - " cbnz %w1, 1f\n" - " add %w1, %w0, %3\n" - " casa %w0, %w1, %2\n" - " sub %w1, %w1, %3\n" - " eor %w1, %w1, %w0\n" - "1:") - : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) - : "I" (1 << TICKET_SHIFT) - : "memory"); - - return !tmp; -} - -static inline void arch_spin_unlock(arch_spinlock_t *lock) -{ - unsigned long tmp; - - asm volatile(ARM64_LSE_ATOMIC_INSN( - /* LL/SC */ - " ldrh %w1, %0\n" - " add %w1, %w1, #1\n" - " stlrh %w1, %0", - /* LSE atomics */ - " mov %w1, #1\n" - " staddlh %w1, %0\n" - __nops(1)) - : "=Q" (lock->owner), "=&r" (tmp) - : - : "memory"); -} - -static inline int arch_spin_value_unlocked(arch_spinlock_t lock) -{ - return lock.owner == lock.next; -} - -static inline int arch_spin_is_locked(arch_spinlock_t *lock) -{ - return !arch_spin_value_unlocked(READ_ONCE(*lock)); -} - -static inline int arch_spin_is_contended(arch_spinlock_t *lock) -{ - arch_spinlock_t lockval = READ_ONCE(*lock); - return (lockval.next - lockval.owner) > 1; -} -#define arch_spin_is_contended arch_spin_is_contended - #include <asm/qrwlock.h> +#include <asm/qspinlock.h> /* See include/linux/spinlock.h */ #define smp_mb__after_spinlock() smp_mb() diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h index 6b856012c51b..a157ff465e27 100644 --- a/arch/arm64/include/asm/spinlock_types.h +++ b/arch/arm64/include/asm/spinlock_types.h @@ -20,22 +20,7 @@ # error "please don't include this file directly" #endif -#include <linux/types.h> - -#define TICKET_SHIFT 16 - -typedef struct { -#ifdef __AARCH64EB__ - u16 next; - u16 owner; -#else - u16 owner; - u16 next; -#endif -} __aligned(4) arch_spinlock_t; - -#define __ARCH_SPIN_LOCK_UNLOCKED { 0 , 0 } - +#include <asm-generic/qspinlock_types.h> #include <asm-generic/qrwlock_types.h> #endif diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index 902f9edacbea..e86737b7c924 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -32,6 +32,21 @@ struct stackframe { #endif }; +enum stack_type { + STACK_TYPE_UNKNOWN, + STACK_TYPE_TASK, + STACK_TYPE_IRQ, + STACK_TYPE_OVERFLOW, + STACK_TYPE_SDEI_NORMAL, + STACK_TYPE_SDEI_CRITICAL, +}; + +struct stack_info { + unsigned long low; + unsigned long high; + enum stack_type type; +}; + extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame); extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame, int (*fn)(struct stackframe *, void *), void *data); @@ -39,7 +54,8 @@ extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk); DECLARE_PER_CPU(unsigned long *, irq_stack_ptr); -static inline bool on_irq_stack(unsigned long sp) +static inline bool on_irq_stack(unsigned long sp, + struct stack_info *info) { unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr); unsigned long high = low + IRQ_STACK_SIZE; @@ -47,46 +63,79 @@ static inline bool on_irq_stack(unsigned long sp) if (!low) return false; - return (low <= sp && sp < high); + if (sp < low || sp >= high) + return false; + + if (info) { + info->low = low; + info->high = high; + info->type = STACK_TYPE_IRQ; + } + + return true; } -static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp) +static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp, + struct stack_info *info) { unsigned long low = (unsigned long)task_stack_page(tsk); unsigned long high = low + THREAD_SIZE; - return (low <= sp && sp < high); + if (sp < low || sp >= high) + return false; + + if (info) { + info->low = low; + info->high = high; + info->type = STACK_TYPE_TASK; + } + + return true; } #ifdef CONFIG_VMAP_STACK DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack); -static inline bool on_overflow_stack(unsigned long sp) +static inline bool on_overflow_stack(unsigned long sp, + struct stack_info *info) { unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack); unsigned long high = low + OVERFLOW_STACK_SIZE; - return (low <= sp && sp < high); + if (sp < low || sp >= high) + return false; + + if (info) { + info->low = low; + info->high = high; + info->type = STACK_TYPE_OVERFLOW; + } + + return true; } #else -static inline bool on_overflow_stack(unsigned long sp) { return false; } +static inline bool on_overflow_stack(unsigned long sp, + struct stack_info *info) { return false; } #endif + /* * We can only safely access per-cpu stacks from current in a non-preemptible * context. */ -static inline bool on_accessible_stack(struct task_struct *tsk, unsigned long sp) +static inline bool on_accessible_stack(struct task_struct *tsk, + unsigned long sp, + struct stack_info *info) { - if (on_task_stack(tsk, sp)) + if (on_task_stack(tsk, sp, info)) return true; if (tsk != current || preemptible()) return false; - if (on_irq_stack(sp)) + if (on_irq_stack(sp, info)) return true; - if (on_overflow_stack(sp)) + if (on_overflow_stack(sp, info)) return true; - if (on_sdei_stack(sp)) + if (on_sdei_stack(sp, info)) return true; return false; diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h index 709a574468f0..ad8be16a39c9 100644 --- a/arch/arm64/include/asm/syscall.h +++ b/arch/arm64/include/asm/syscall.h @@ -20,7 +20,13 @@ #include <linux/compat.h> #include <linux/err.h> -extern const void *sys_call_table[]; +typedef long (*syscall_fn_t)(struct pt_regs *regs); + +extern const syscall_fn_t sys_call_table[]; + +#ifdef CONFIG_COMPAT +extern const syscall_fn_t compat_sys_call_table[]; +#endif static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) diff --git a/arch/arm64/include/asm/syscall_wrapper.h b/arch/arm64/include/asm/syscall_wrapper.h new file mode 100644 index 000000000000..a4477e515b79 --- /dev/null +++ b/arch/arm64/include/asm/syscall_wrapper.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * syscall_wrapper.h - arm64 specific wrappers to syscall definitions + * + * Based on arch/x86/include/asm_syscall_wrapper.h + */ + +#ifndef __ASM_SYSCALL_WRAPPER_H +#define __ASM_SYSCALL_WRAPPER_H + +#define SC_ARM64_REGS_TO_ARGS(x, ...) \ + __MAP(x,__SC_ARGS \ + ,,regs->regs[0],,regs->regs[1],,regs->regs[2] \ + ,,regs->regs[3],,regs->regs[4],,regs->regs[5]) + +#ifdef CONFIG_COMPAT + +#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ + asmlinkage long __arm64_compat_sys##name(const struct pt_regs *regs); \ + ALLOW_ERROR_INJECTION(__arm64_compat_sys##name, ERRNO); \ + static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ + static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ + asmlinkage long __arm64_compat_sys##name(const struct pt_regs *regs) \ + { \ + return __se_compat_sys##name(SC_ARM64_REGS_TO_ARGS(x,__VA_ARGS__)); \ + } \ + static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ + { \ + return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \ + } \ + static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) + +#define COMPAT_SYSCALL_DEFINE0(sname) \ + asmlinkage long __arm64_compat_sys_##sname(void); \ + ALLOW_ERROR_INJECTION(__arm64_compat_sys_##sname, ERRNO); \ + asmlinkage long __arm64_compat_sys_##sname(void) + +#define COND_SYSCALL_COMPAT(name) \ + cond_syscall(__arm64_compat_sys_##name); + +#define COMPAT_SYS_NI(name) \ + SYSCALL_ALIAS(__arm64_compat_sys_##name, sys_ni_posix_timers); + +#endif /* CONFIG_COMPAT */ + +#define __SYSCALL_DEFINEx(x, name, ...) \ + asmlinkage long __arm64_sys##name(const struct pt_regs *regs); \ + ALLOW_ERROR_INJECTION(__arm64_sys##name, ERRNO); \ + static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ + static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ + asmlinkage long __arm64_sys##name(const struct pt_regs *regs) \ + { \ + return __se_sys##name(SC_ARM64_REGS_TO_ARGS(x,__VA_ARGS__)); \ + } \ + static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ + { \ + long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \ + __MAP(x,__SC_TEST,__VA_ARGS__); \ + __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \ + return ret; \ + } \ + static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) + +#ifndef SYSCALL_DEFINE0 +#define SYSCALL_DEFINE0(sname) \ + SYSCALL_METADATA(_##sname, 0); \ + asmlinkage long __arm64_sys_##sname(void); \ + ALLOW_ERROR_INJECTION(__arm64_sys_##sname, ERRNO); \ + asmlinkage long __arm64_sys_##sname(void) +#endif + +#ifndef COND_SYSCALL +#define COND_SYSCALL(name) cond_syscall(__arm64_sys_##name) +#endif + +#ifndef SYS_NI +#define SYS_NI(name) SYSCALL_ALIAS(__arm64_sys_##name, sys_ni_posix_timers); +#endif + +#endif /* __ASM_SYSCALL_WRAPPER_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 6171178075dc..e205ec8489e9 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -436,7 +436,8 @@ #define SCTLR_EL2_RES0 ((1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) | \ (1 << 10) | (1 << 13) | (1 << 14) | (1 << 15) | \ (1 << 17) | (1 << 20) | (1 << 24) | (1 << 26) | \ - (1 << 27) | (1 << 30) | (1 << 31)) + (1 << 27) | (1 << 30) | (1 << 31) | \ + (0xffffffffUL << 32)) #ifdef CONFIG_CPU_BIG_ENDIAN #define ENDIAN_SET_EL2 SCTLR_ELx_EE @@ -452,9 +453,9 @@ SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \ ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0) -/* Check all the bits are accounted for */ -#define SCTLR_EL2_BUILD_BUG_ON_MISSING_BITS BUILD_BUG_ON((SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != ~0) - +#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff +#error "Inconsistent SCTLR_EL2 set/clear bits" +#endif /* SCTLR_EL1 specific flags. */ #define SCTLR_EL1_UCI (1 << 26) @@ -473,7 +474,8 @@ #define SCTLR_EL1_RES1 ((1 << 11) | (1 << 20) | (1 << 22) | (1 << 28) | \ (1 << 29)) #define SCTLR_EL1_RES0 ((1 << 6) | (1 << 10) | (1 << 13) | (1 << 17) | \ - (1 << 27) | (1 << 30) | (1 << 31)) + (1 << 27) | (1 << 30) | (1 << 31) | \ + (0xffffffffUL << 32)) #ifdef CONFIG_CPU_BIG_ENDIAN #define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE) @@ -492,8 +494,9 @@ SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\ SCTLR_EL1_RES0) -/* Check all the bits are accounted for */ -#define SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS BUILD_BUG_ON((SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != ~0) +#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff +#error "Inconsistent SCTLR_EL1 set/clear bits" +#endif /* id_aa64isar0 */ #define ID_AA64ISAR0_TS_SHIFT 52 @@ -728,18 +731,16 @@ asm( asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \ } while (0) -static inline void config_sctlr_el1(u32 clear, u32 set) -{ - u32 val; - - SCTLR_EL2_BUILD_BUG_ON_MISSING_BITS; - SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS; - - val = read_sysreg(sctlr_el1); - val &= ~clear; - val |= set; - write_sysreg(val, sctlr_el1); -} +/* + * Modify bits in a sysreg. Bits in the clear mask are zeroed, then bits in the + * set mask are set. Other bits are left as-is. + */ +#define sysreg_clear_set(sysreg, clear, set) do { \ + u64 __scs_val = read_sysreg(sysreg); \ + u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \ + if (__scs_new != __scs_val) \ + write_sysreg(__scs_new, sysreg); \ +} while (0) #endif diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index ffdaea7954bb..0ad1cf233470 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -37,7 +37,7 @@ static inline void __tlb_remove_table(void *_table) static inline void tlb_flush(struct mmu_gather *tlb) { - struct vm_area_struct vma = { .vm_mm = tlb->mm, }; + struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0); /* * The ASID allocator will either invalidate the ASID or mark diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index dfc61d73f740..a4a1901140ee 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -218,6 +218,13 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm, dsb(ish); } +static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr) +{ + unsigned long addr = __TLBI_VADDR(kaddr, 0); + + __tlbi(vaae1is, addr); + dsb(ish); +} #endif #endif diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h index df48212f767b..49a0fee4f89b 100644 --- a/arch/arm64/include/asm/topology.h +++ b/arch/arm64/include/asm/topology.h @@ -11,7 +11,7 @@ struct cpu_topology { int llc_id; cpumask_t thread_sibling; cpumask_t core_sibling; - cpumask_t llc_siblings; + cpumask_t llc_sibling; }; extern struct cpu_topology cpu_topology[NR_CPUS]; @@ -20,9 +20,11 @@ extern struct cpu_topology cpu_topology[NR_CPUS]; #define topology_core_id(cpu) (cpu_topology[cpu].core_id) #define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) #define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) +#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling) void init_cpu_topology(void); void store_cpu_topology(unsigned int cpuid); +void remove_cpu_topology(unsigned int cpuid); const struct cpumask *cpu_coregroup_mask(int cpu); #ifdef CONFIG_NUMA diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index a0baa9af5487..e0d0f5b856e7 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -43,7 +43,7 @@ #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) -#define __NR_compat_syscalls 398 +#define __NR_compat_syscalls 399 #endif #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index ef292160748c..2cd6dcf8d246 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -260,7 +260,7 @@ __SYSCALL(117, sys_ni_syscall) #define __NR_fsync 118 __SYSCALL(__NR_fsync, sys_fsync) #define __NR_sigreturn 119 -__SYSCALL(__NR_sigreturn, compat_sys_sigreturn_wrapper) +__SYSCALL(__NR_sigreturn, compat_sys_sigreturn) #define __NR_clone 120 __SYSCALL(__NR_clone, sys_clone) #define __NR_setdomainname 121 @@ -368,7 +368,7 @@ __SYSCALL(__NR_getresgid, sys_getresgid16) #define __NR_prctl 172 __SYSCALL(__NR_prctl, sys_prctl) #define __NR_rt_sigreturn 173 -__SYSCALL(__NR_rt_sigreturn, compat_sys_rt_sigreturn_wrapper) +__SYSCALL(__NR_rt_sigreturn, compat_sys_rt_sigreturn) #define __NR_rt_sigaction 174 __SYSCALL(__NR_rt_sigaction, compat_sys_rt_sigaction) #define __NR_rt_sigprocmask 175 @@ -382,9 +382,9 @@ __SYSCALL(__NR_rt_sigqueueinfo, compat_sys_rt_sigqueueinfo) #define __NR_rt_sigsuspend 179 __SYSCALL(__NR_rt_sigsuspend, compat_sys_rt_sigsuspend) #define __NR_pread64 180 -__SYSCALL(__NR_pread64, compat_sys_pread64_wrapper) +__SYSCALL(__NR_pread64, compat_sys_aarch32_pread64) #define __NR_pwrite64 181 -__SYSCALL(__NR_pwrite64, compat_sys_pwrite64_wrapper) +__SYSCALL(__NR_pwrite64, compat_sys_aarch32_pwrite64) #define __NR_chown 182 __SYSCALL(__NR_chown, sys_chown16) #define __NR_getcwd 183 @@ -406,11 +406,11 @@ __SYSCALL(__NR_vfork, sys_vfork) #define __NR_ugetrlimit 191 /* SuS compliant getrlimit */ __SYSCALL(__NR_ugetrlimit, compat_sys_getrlimit) /* SuS compliant getrlimit */ #define __NR_mmap2 192 -__SYSCALL(__NR_mmap2, compat_sys_mmap2_wrapper) +__SYSCALL(__NR_mmap2, compat_sys_aarch32_mmap2) #define __NR_truncate64 193 -__SYSCALL(__NR_truncate64, compat_sys_truncate64_wrapper) +__SYSCALL(__NR_truncate64, compat_sys_aarch32_truncate64) #define __NR_ftruncate64 194 -__SYSCALL(__NR_ftruncate64, compat_sys_ftruncate64_wrapper) +__SYSCALL(__NR_ftruncate64, compat_sys_aarch32_ftruncate64) #define __NR_stat64 195 __SYSCALL(__NR_stat64, sys_stat64) #define __NR_lstat64 196 @@ -472,7 +472,7 @@ __SYSCALL(223, sys_ni_syscall) #define __NR_gettid 224 __SYSCALL(__NR_gettid, sys_gettid) #define __NR_readahead 225 -__SYSCALL(__NR_readahead, compat_sys_readahead_wrapper) +__SYSCALL(__NR_readahead, compat_sys_aarch32_readahead) #define __NR_setxattr 226 __SYSCALL(__NR_setxattr, sys_setxattr) #define __NR_lsetxattr 227 @@ -554,15 +554,15 @@ __SYSCALL(__NR_clock_getres, compat_sys_clock_getres) #define __NR_clock_nanosleep 265 __SYSCALL(__NR_clock_nanosleep, compat_sys_clock_nanosleep) #define __NR_statfs64 266 -__SYSCALL(__NR_statfs64, compat_sys_statfs64_wrapper) +__SYSCALL(__NR_statfs64, compat_sys_aarch32_statfs64) #define __NR_fstatfs64 267 -__SYSCALL(__NR_fstatfs64, compat_sys_fstatfs64_wrapper) +__SYSCALL(__NR_fstatfs64, compat_sys_aarch32_fstatfs64) #define __NR_tgkill 268 __SYSCALL(__NR_tgkill, sys_tgkill) #define __NR_utimes 269 __SYSCALL(__NR_utimes, compat_sys_utimes) #define __NR_arm_fadvise64_64 270 -__SYSCALL(__NR_arm_fadvise64_64, compat_sys_fadvise64_64_wrapper) +__SYSCALL(__NR_arm_fadvise64_64, compat_sys_aarch32_fadvise64_64) #define __NR_pciconfig_iobase 271 __SYSCALL(__NR_pciconfig_iobase, sys_pciconfig_iobase) #define __NR_pciconfig_read 272 @@ -704,7 +704,7 @@ __SYSCALL(__NR_get_robust_list, compat_sys_get_robust_list) #define __NR_splice 340 __SYSCALL(__NR_splice, sys_splice) #define __NR_sync_file_range2 341 -__SYSCALL(__NR_sync_file_range2, compat_sys_sync_file_range2_wrapper) +__SYSCALL(__NR_sync_file_range2, compat_sys_aarch32_sync_file_range2) #define __NR_tee 342 __SYSCALL(__NR_tee, sys_tee) #define __NR_vmsplice 343 @@ -726,7 +726,7 @@ __SYSCALL(__NR_timerfd_create, sys_timerfd_create) #define __NR_eventfd 351 __SYSCALL(__NR_eventfd, sys_eventfd) #define __NR_fallocate 352 -__SYSCALL(__NR_fallocate, compat_sys_fallocate_wrapper) +__SYSCALL(__NR_fallocate, compat_sys_aarch32_fallocate) #define __NR_timerfd_settime 353 __SYSCALL(__NR_timerfd_settime, compat_sys_timerfd_settime) #define __NR_timerfd_gettime 354 @@ -817,6 +817,8 @@ __SYSCALL(__NR_pkey_alloc, sys_pkey_alloc) __SYSCALL(__NR_pkey_free, sys_pkey_free) #define __NR_statx 397 __SYSCALL(__NR_statx, sys_statx) +#define __NR_rseq 398 +__SYSCALL(__NR_rseq, sys_rseq) /* * Please add new compat syscalls above this comment and update |