diff options
Diffstat (limited to 'arch/arm64/include/asm')
25 files changed, 543 insertions, 89 deletions
diff --git a/arch/arm64/include/asm/alternative-asm.h b/arch/arm64/include/asm/alternative-asm.h new file mode 100644 index 000000000000..919a67855b63 --- /dev/null +++ b/arch/arm64/include/asm/alternative-asm.h @@ -0,0 +1,29 @@ +#ifndef __ASM_ALTERNATIVE_ASM_H +#define __ASM_ALTERNATIVE_ASM_H + +#ifdef __ASSEMBLY__ + +.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len + .word \orig_offset - . + .word \alt_offset - . + .hword \feature + .byte \orig_len + .byte \alt_len +.endm + +.macro alternative_insn insn1 insn2 cap +661: \insn1 +662: .pushsection .altinstructions, "a" + altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f + .popsection + .pushsection .altinstr_replacement, "ax" +663: \insn2 +664: .popsection + .if ((664b-663b) != (662b-661b)) + .error "Alternatives instruction length mismatch" + .endif +.endm + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_ALTERNATIVE_ASM_H */ diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h new file mode 100644 index 000000000000..d261f01e2bae --- /dev/null +++ b/arch/arm64/include/asm/alternative.h @@ -0,0 +1,44 @@ +#ifndef __ASM_ALTERNATIVE_H +#define __ASM_ALTERNATIVE_H + +#include <linux/types.h> +#include <linux/stddef.h> +#include <linux/stringify.h> + +struct alt_instr { + s32 orig_offset; /* offset to original instruction */ + s32 alt_offset; /* offset to replacement instruction */ + u16 cpufeature; /* cpufeature bit set for replacement */ + u8 orig_len; /* size of original instruction(s) */ + u8 alt_len; /* size of new instruction(s), <= orig_len */ +}; + +void apply_alternatives_all(void); +void apply_alternatives(void *start, size_t length); +void free_alternatives_memory(void); + +#define ALTINSTR_ENTRY(feature) \ + " .word 661b - .\n" /* label */ \ + " .word 663f - .\n" /* new instruction */ \ + " .hword " __stringify(feature) "\n" /* feature bit */ \ + " .byte 662b-661b\n" /* source len */ \ + " .byte 664f-663f\n" /* replacement len */ + +/* alternative assembly primitive: */ +#define ALTERNATIVE(oldinstr, newinstr, feature) \ + "661:\n\t" \ + oldinstr "\n" \ + "662:\n" \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY(feature) \ + ".popsection\n" \ + ".pushsection .altinstr_replacement, \"a\"\n" \ + "663:\n\t" \ + newinstr "\n" \ + "664:\n\t" \ + ".popsection\n\t" \ + ".if ((664b-663b) != (662b-661b))\n\t" \ + " .error \"Alternatives instruction length mismatch\"\n\t"\ + ".endif\n" + +#endif /* __ASM_ALTERNATIVE_H */ diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 88cc05b5f3ac..bde449936e2f 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -32,6 +32,8 @@ #ifndef __ASSEMBLY__ +#define __read_mostly __attribute__((__section__(".data..read_mostly"))) + static inline int cache_line_size(void) { u32 cwg = cache_type_cwg(); diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index 689b6379188c..7ae31a2cc6c0 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -73,7 +73,7 @@ extern void flush_cache_all(void); extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_icache_range(unsigned long start, unsigned long end); extern void __flush_dcache_area(void *addr, size_t len); -extern void __flush_cache_user_range(unsigned long start, unsigned long end); +extern long __flush_cache_user_range(unsigned long start, unsigned long end); static inline void flush_cache_mm(struct mm_struct *mm) { diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index ddb9d7830558..cb9593079f29 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -19,6 +19,7 @@ #define __ASM_CMPXCHG_H #include <linux/bug.h> +#include <linux/mmdebug.h> #include <asm/barrier.h> @@ -152,6 +153,51 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, return oldval; } +#define system_has_cmpxchg_double() 1 + +static inline int __cmpxchg_double(volatile void *ptr1, volatile void *ptr2, + unsigned long old1, unsigned long old2, + unsigned long new1, unsigned long new2, int size) +{ + unsigned long loop, lost; + + switch (size) { + case 8: + VM_BUG_ON((unsigned long *)ptr2 - (unsigned long *)ptr1 != 1); + do { + asm volatile("// __cmpxchg_double8\n" + " ldxp %0, %1, %2\n" + " eor %0, %0, %3\n" + " eor %1, %1, %4\n" + " orr %1, %0, %1\n" + " mov %w0, #0\n" + " cbnz %1, 1f\n" + " stxp %w0, %5, %6, %2\n" + "1:\n" + : "=&r"(loop), "=&r"(lost), "+Q" (*(u64 *)ptr1) + : "r" (old1), "r"(old2), "r"(new1), "r"(new2)); + } while (loop); + break; + default: + BUILD_BUG(); + } + + return !lost; +} + +static inline int __cmpxchg_double_mb(volatile void *ptr1, volatile void *ptr2, + unsigned long old1, unsigned long old2, + unsigned long new1, unsigned long new2, int size) +{ + int ret; + + smp_mb(); + ret = __cmpxchg_double(ptr1, ptr2, old1, old2, new1, new2, size); + smp_mb(); + + return ret; +} + static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, unsigned long new, int size) { @@ -182,6 +228,33 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, __ret; \ }) +#define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \ +({\ + int __ret;\ + __ret = __cmpxchg_double_mb((ptr1), (ptr2), (unsigned long)(o1), \ + (unsigned long)(o2), (unsigned long)(n1), \ + (unsigned long)(n2), sizeof(*(ptr1)));\ + __ret; \ +}) + +#define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \ +({\ + int __ret;\ + __ret = __cmpxchg_double((ptr1), (ptr2), (unsigned long)(o1), \ + (unsigned long)(o2), (unsigned long)(n1), \ + (unsigned long)(n2), sizeof(*(ptr1)));\ + __ret; \ +}) + +#define this_cpu_cmpxchg_1(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) +#define this_cpu_cmpxchg_2(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) +#define this_cpu_cmpxchg_4(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) +#define this_cpu_cmpxchg_8(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n) + +#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ + cmpxchg_double_local(raw_cpu_ptr(&(ptr1)), raw_cpu_ptr(&(ptr2)), \ + o1, o2, n1, n2) + #define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n)) #define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n)) diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index 56de5aadede2..3fb053fa6e98 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h @@ -205,6 +205,13 @@ typedef struct compat_siginfo { compat_long_t _band; /* POLL_IN, POLL_OUT, POLL_MSG */ int _fd; } _sigpoll; + + /* SIGSYS */ + struct { + compat_uptr_t _call_addr; /* calling user insn */ + int _syscall; /* triggering system call number */ + compat_uint_t _arch; /* AUDIT_ARCH_* of syscall */ + } _sigsys; } _sifields; } compat_siginfo_t; diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h index 056443086019..ace70682499b 100644 --- a/arch/arm64/include/asm/cpu.h +++ b/arch/arm64/include/asm/cpu.h @@ -30,6 +30,8 @@ struct cpuinfo_arm64 { u32 reg_dczid; u32 reg_midr; + u64 reg_id_aa64dfr0; + u64 reg_id_aa64dfr1; u64 reg_id_aa64isar0; u64 reg_id_aa64isar1; u64 reg_id_aa64mmfr0; diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index cd4ac0516488..07547ccc1f2b 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -21,9 +21,38 @@ #define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap)) #define cpu_feature(x) ilog2(HWCAP_ ## x) +#define ARM64_WORKAROUND_CLEAN_CACHE 0 +#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1 + +#define ARM64_NCAPS 2 + +#ifndef __ASSEMBLY__ + +extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); + static inline bool cpu_have_feature(unsigned int num) { return elf_hwcap & (1UL << num); } +static inline bool cpus_have_cap(unsigned int num) +{ + if (num >= ARM64_NCAPS) + return false; + return test_bit(num, cpu_hwcaps); +} + +static inline void cpus_set_cap(unsigned int num) +{ + if (num >= ARM64_NCAPS) + pr_warn("Attempt to set an illegal CPU capability (%d >= %d)\n", + num, ARM64_NCAPS); + else + __set_bit(num, cpu_hwcaps); +} + +void check_local_cpu_errata(void); + +#endif /* __ASSEMBLY__ */ + #endif diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 379d0b874328..8adb986a3086 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -57,6 +57,11 @@ #define MIDR_IMPLEMENTOR(midr) \ (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT) +#define MIDR_CPU_PART(imp, partnum) \ + (((imp) << MIDR_IMPLEMENTOR_SHIFT) | \ + (0xf << MIDR_ARCHITECTURE_SHIFT) | \ + ((partnum) << MIDR_PARTNUM_SHIFT)) + #define ARM_CPU_IMP_ARM 0x41 #define ARM_CPU_IMP_APM 0x50 diff --git a/arch/arm64/include/asm/dmi.h b/arch/arm64/include/asm/dmi.h new file mode 100644 index 000000000000..69d37d87b159 --- /dev/null +++ b/arch/arm64/include/asm/dmi.h @@ -0,0 +1,31 @@ +/* + * arch/arm64/include/asm/dmi.h + * + * Copyright (C) 2013 Linaro Limited. + * Written by: Yi Li (yi.li@linaro.org) + * + * based on arch/ia64/include/asm/dmi.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef __ASM_DMI_H +#define __ASM_DMI_H + +#include <linux/io.h> +#include <linux/slab.h> + +/* + * According to section 2.3.6 of the UEFI spec, the firmware should not + * request a virtual mapping for configuration tables such as SMBIOS. + * This means we have to map them before use. + */ +#define dmi_early_remap(x, l) ioremap_cache(x, l) +#define dmi_early_unmap(x, l) iounmap(x) +#define dmi_remap(x, l) ioremap_cache(x, l) +#define dmi_unmap(x) iounmap(x) +#define dmi_alloc(l) kzalloc(l, GFP_KERNEL) + +#endif diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index 5f7bfe6df723..9ef6eca905ca 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h @@ -31,6 +31,7 @@ * */ enum fixed_addresses { + FIX_HOLE, FIX_EARLYCON_MEM_BASE, __end_of_permanent_fixed_addresses, @@ -56,10 +57,11 @@ enum fixed_addresses { #define FIXMAP_PAGE_IO __pgprot(PROT_DEVICE_nGnRE) -extern void __early_set_fixmap(enum fixed_addresses idx, - phys_addr_t phys, pgprot_t flags); +void __init early_fixmap_init(void); -#define __set_fixmap __early_set_fixmap +#define __early_set_fixmap __set_fixmap + +extern void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot); #include <asm-generic/fixmap.h> diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 024c46183c3c..0ad735166d9f 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -30,6 +30,7 @@ #define COMPAT_HWCAP_IDIVA (1 << 17) #define COMPAT_HWCAP_IDIVT (1 << 18) #define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT) +#define COMPAT_HWCAP_LPAE (1 << 20) #define COMPAT_HWCAP_EVTSTRM (1 << 21) #define COMPAT_HWCAP2_AES (1 << 0) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 56a9e63b6c33..e2ff32a93b5c 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -354,6 +354,16 @@ bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); int aarch64_insn_patch_text_nosync(void *addr, u32 insn); int aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt); int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt); + +bool aarch32_insn_is_wide(u32 insn); + +#define A32_RN_OFFSET 16 +#define A32_RT_OFFSET 12 +#define A32_RT2_OFFSET 0 + +u32 aarch32_insn_extract_reg_num(u32 insn, int offset); +u32 aarch32_insn_mcr_extract_opc2(u32 insn); +u32 aarch32_insn_mcr_extract_crm(u32 insn); #endif /* __ASSEMBLY__ */ #endif /* __ASM_INSN_H */ diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 79f1d519221f..75825b63464d 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -28,6 +28,8 @@ #include <asm/barrier.h> #include <asm/pgtable.h> #include <asm/early_ioremap.h> +#include <asm/alternative.h> +#include <asm/cpufeature.h> #include <xen/xen.h> @@ -57,28 +59,41 @@ static inline void __raw_writeq(u64 val, volatile void __iomem *addr) static inline u8 __raw_readb(const volatile void __iomem *addr) { u8 val; - asm volatile("ldrb %w0, [%1]" : "=r" (val) : "r" (addr)); + asm volatile(ALTERNATIVE("ldrb %w0, [%1]", + "ldarb %w0, [%1]", + ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) + : "=r" (val) : "r" (addr)); return val; } static inline u16 __raw_readw(const volatile void __iomem *addr) { u16 val; - asm volatile("ldrh %w0, [%1]" : "=r" (val) : "r" (addr)); + + asm volatile(ALTERNATIVE("ldrh %w0, [%1]", + "ldarh %w0, [%1]", + ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) + : "=r" (val) : "r" (addr)); return val; } static inline u32 __raw_readl(const volatile void __iomem *addr) { u32 val; - asm volatile("ldr %w0, [%1]" : "=r" (val) : "r" (addr)); + asm volatile(ALTERNATIVE("ldr %w0, [%1]", + "ldar %w0, [%1]", + ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) + : "=r" (val) : "r" (addr)); return val; } static inline u64 __raw_readq(const volatile void __iomem *addr) { u64 val; - asm volatile("ldr %0, [%1]" : "=r" (val) : "r" (addr)); + asm volatile(ALTERNATIVE("ldr %0, [%1]", + "ldar %0, [%1]", + ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) + : "=r" (val) : "r" (addr)); return val; } diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h index e1f7ecdde11f..94c53674a31d 100644 --- a/arch/arm64/include/asm/irq.h +++ b/arch/arm64/include/asm/irq.h @@ -3,7 +3,8 @@ #include <asm-generic/irq.h> -extern void (*handle_arch_irq)(struct pt_regs *); +struct pt_regs; + extern void migrate_irqs(void); extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 7fd3e27e3ccc..8afb863f5a9e 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -18,6 +18,7 @@ #ifndef __ARM64_KVM_ARM_H__ #define __ARM64_KVM_ARM_H__ +#include <asm/memory.h> #include <asm/types.h> /* Hyp Configuration Register (HCR) bits */ @@ -160,9 +161,9 @@ #endif #define VTTBR_BADDR_SHIFT (VTTBR_X - 1) -#define VTTBR_BADDR_MASK (((1LLU << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) -#define VTTBR_VMID_SHIFT (48LLU) -#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT) +#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) +#define VTTBR_VMID_SHIFT (UL(48)) +#define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT) /* Hyp System Trap Register */ #define HSTR_EL2_TTEE (1 << 16) @@ -185,13 +186,13 @@ /* Exception Syndrome Register (ESR) bits */ #define ESR_EL2_EC_SHIFT (26) -#define ESR_EL2_EC (0x3fU << ESR_EL2_EC_SHIFT) -#define ESR_EL2_IL (1U << 25) +#define ESR_EL2_EC (UL(0x3f) << ESR_EL2_EC_SHIFT) +#define ESR_EL2_IL (UL(1) << 25) #define ESR_EL2_ISS (ESR_EL2_IL - 1) #define ESR_EL2_ISV_SHIFT (24) -#define ESR_EL2_ISV (1U << ESR_EL2_ISV_SHIFT) +#define ESR_EL2_ISV (UL(1) << ESR_EL2_ISV_SHIFT) #define ESR_EL2_SAS_SHIFT (22) -#define ESR_EL2_SAS (3U << ESR_EL2_SAS_SHIFT) +#define ESR_EL2_SAS (UL(3) << ESR_EL2_SAS_SHIFT) #define ESR_EL2_SSE (1 << 21) #define ESR_EL2_SRT_SHIFT (16) #define ESR_EL2_SRT_MASK (0x1f << ESR_EL2_SRT_SHIFT) @@ -205,16 +206,16 @@ #define ESR_EL2_FSC_TYPE (0x3c) #define ESR_EL2_CV_SHIFT (24) -#define ESR_EL2_CV (1U << ESR_EL2_CV_SHIFT) +#define ESR_EL2_CV (UL(1) << ESR_EL2_CV_SHIFT) #define ESR_EL2_COND_SHIFT (20) -#define ESR_EL2_COND (0xfU << ESR_EL2_COND_SHIFT) +#define ESR_EL2_COND (UL(0xf) << ESR_EL2_COND_SHIFT) #define FSC_FAULT (0x04) #define FSC_PERM (0x0c) /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ -#define HPFAR_MASK (~0xFUL) +#define HPFAR_MASK (~UL(0xf)) #define ESR_EL2_EC_UNKNOWN (0x00) #define ESR_EL2_EC_WFI (0x01) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index ccc7087d3c4e..a62cd077457b 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -142,7 +142,7 @@ static inline void *phys_to_virt(phys_addr_t x) * virt_to_page(k) convert a _valid_ virtual address to struct page * * virt_addr_valid(k) indicates whether a virtual address is valid */ -#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET +#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) diff --git a/arch/arm64/include/asm/opcodes.h b/arch/arm64/include/asm/opcodes.h new file mode 100644 index 000000000000..4e603ea36ad3 --- /dev/null +++ b/arch/arm64/include/asm/opcodes.h @@ -0,0 +1 @@ +#include <../../arm/include/asm/opcodes.h> diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 5279e5733386..09da25bc596f 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h @@ -44,6 +44,221 @@ static inline unsigned long __my_cpu_offset(void) #endif /* CONFIG_SMP */ +#define PERCPU_OP(op, asm_op) \ +static inline unsigned long __percpu_##op(void *ptr, \ + unsigned long val, int size) \ +{ \ + unsigned long loop, ret; \ + \ + switch (size) { \ + case 1: \ + do { \ + asm ("//__per_cpu_" #op "_1\n" \ + "ldxrb %w[ret], %[ptr]\n" \ + #asm_op " %w[ret], %w[ret], %w[val]\n" \ + "stxrb %w[loop], %w[ret], %[ptr]\n" \ + : [loop] "=&r" (loop), [ret] "=&r" (ret), \ + [ptr] "+Q"(*(u8 *)ptr) \ + : [val] "Ir" (val)); \ + } while (loop); \ + break; \ + case 2: \ + do { \ + asm ("//__per_cpu_" #op "_2\n" \ + "ldxrh %w[ret], %[ptr]\n" \ + #asm_op " %w[ret], %w[ret], %w[val]\n" \ + "stxrh %w[loop], %w[ret], %[ptr]\n" \ + : [loop] "=&r" (loop), [ret] "=&r" (ret), \ + [ptr] "+Q"(*(u16 *)ptr) \ + : [val] "Ir" (val)); \ + } while (loop); \ + break; \ + case 4: \ + do { \ + asm ("//__per_cpu_" #op "_4\n" \ + "ldxr %w[ret], %[ptr]\n" \ + #asm_op " %w[ret], %w[ret], %w[val]\n" \ + "stxr %w[loop], %w[ret], %[ptr]\n" \ + : [loop] "=&r" (loop), [ret] "=&r" (ret), \ + [ptr] "+Q"(*(u32 *)ptr) \ + : [val] "Ir" (val)); \ + } while (loop); \ + break; \ + case 8: \ + do { \ + asm ("//__per_cpu_" #op "_8\n" \ + "ldxr %[ret], %[ptr]\n" \ + #asm_op " %[ret], %[ret], %[val]\n" \ + "stxr %w[loop], %[ret], %[ptr]\n" \ + : [loop] "=&r" (loop), [ret] "=&r" (ret), \ + [ptr] "+Q"(*(u64 *)ptr) \ + : [val] "Ir" (val)); \ + } while (loop); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ + \ + return ret; \ +} + +PERCPU_OP(add, add) +PERCPU_OP(and, and) +PERCPU_OP(or, orr) +#undef PERCPU_OP + +static inline unsigned long __percpu_read(void *ptr, int size) +{ + unsigned long ret; + + switch (size) { + case 1: + ret = ACCESS_ONCE(*(u8 *)ptr); + break; + case 2: + ret = ACCESS_ONCE(*(u16 *)ptr); + break; + case 4: + ret = ACCESS_ONCE(*(u32 *)ptr); + break; + case 8: + ret = ACCESS_ONCE(*(u64 *)ptr); + break; + default: + BUILD_BUG(); + } + + return ret; +} + +static inline void __percpu_write(void *ptr, unsigned long val, int size) +{ + switch (size) { + case 1: + ACCESS_ONCE(*(u8 *)ptr) = (u8)val; + break; + case 2: + ACCESS_ONCE(*(u16 *)ptr) = (u16)val; + break; + case 4: + ACCESS_ONCE(*(u32 *)ptr) = (u32)val; + break; + case 8: + ACCESS_ONCE(*(u64 *)ptr) = (u64)val; + break; + default: + BUILD_BUG(); + } +} + +static inline unsigned long __percpu_xchg(void *ptr, unsigned long val, + int size) +{ + unsigned long ret, loop; + + switch (size) { + case 1: + do { + asm ("//__percpu_xchg_1\n" + "ldxrb %w[ret], %[ptr]\n" + "stxrb %w[loop], %w[val], %[ptr]\n" + : [loop] "=&r"(loop), [ret] "=&r"(ret), + [ptr] "+Q"(*(u8 *)ptr) + : [val] "r" (val)); + } while (loop); + break; + case 2: + do { + asm ("//__percpu_xchg_2\n" + "ldxrh %w[ret], %[ptr]\n" + "stxrh %w[loop], %w[val], %[ptr]\n" + : [loop] "=&r"(loop), [ret] "=&r"(ret), + [ptr] "+Q"(*(u16 *)ptr) + : [val] "r" (val)); + } while (loop); + break; + case 4: + do { + asm ("//__percpu_xchg_4\n" + "ldxr %w[ret], %[ptr]\n" + "stxr %w[loop], %w[val], %[ptr]\n" + : [loop] "=&r"(loop), [ret] "=&r"(ret), + [ptr] "+Q"(*(u32 *)ptr) + : [val] "r" (val)); + } while (loop); + break; + case 8: + do { + asm ("//__percpu_xchg_8\n" + "ldxr %[ret], %[ptr]\n" + "stxr %w[loop], %[val], %[ptr]\n" + : [loop] "=&r"(loop), [ret] "=&r"(ret), + [ptr] "+Q"(*(u64 *)ptr) + : [val] "r" (val)); + } while (loop); + break; + default: + BUILD_BUG(); + } + + return ret; +} + +#define _percpu_add(pcp, val) \ + __percpu_add(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) + +#define _percpu_add_return(pcp, val) (typeof(pcp)) (_percpu_add(pcp, val)) + +#define _percpu_and(pcp, val) \ + __percpu_and(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) + +#define _percpu_or(pcp, val) \ + __percpu_or(raw_cpu_ptr(&(pcp)), val, sizeof(pcp)) + +#define _percpu_read(pcp) (typeof(pcp)) \ + (__percpu_read(raw_cpu_ptr(&(pcp)), sizeof(pcp))) + +#define _percpu_write(pcp, val) \ + __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp)) + +#define _percpu_xchg(pcp, val) (typeof(pcp)) \ + (__percpu_xchg(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))) + +#define this_cpu_add_1(pcp, val) _percpu_add(pcp, val) +#define this_cpu_add_2(pcp, val) _percpu_add(pcp, val) +#define this_cpu_add_4(pcp, val) _percpu_add(pcp, val) +#define this_cpu_add_8(pcp, val) _percpu_add(pcp, val) + +#define this_cpu_add_return_1(pcp, val) _percpu_add_return(pcp, val) +#define this_cpu_add_return_2(pcp, val) _percpu_add_return(pcp, val) +#define this_cpu_add_return_4(pcp, val) _percpu_add_return(pcp, val) +#define this_cpu_add_return_8(pcp, val) _percpu_add_return(pcp, val) + +#define this_cpu_and_1(pcp, val) _percpu_and(pcp, val) +#define this_cpu_and_2(pcp, val) _percpu_and(pcp, val) +#define this_cpu_and_4(pcp, val) _percpu_and(pcp, val) +#define this_cpu_and_8(pcp, val) _percpu_and(pcp, val) + +#define this_cpu_or_1(pcp, val) _percpu_or(pcp, val) +#define this_cpu_or_2(pcp, val) _percpu_or(pcp, val) +#define this_cpu_or_4(pcp, val) _percpu_or(pcp, val) +#define this_cpu_or_8(pcp, val) _percpu_or(pcp, val) + +#define this_cpu_read_1(pcp) _percpu_read(pcp) +#define this_cpu_read_2(pcp) _percpu_read(pcp) +#define this_cpu_read_4(pcp) _percpu_read(pcp) +#define this_cpu_read_8(pcp) _percpu_read(pcp) + +#define this_cpu_write_1(pcp, val) _percpu_write(pcp, val) +#define this_cpu_write_2(pcp, val) _percpu_write(pcp, val) +#define this_cpu_write_4(pcp, val) _percpu_write(pcp, val) +#define this_cpu_write_8(pcp, val) _percpu_write(pcp, val) + +#define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val) +#define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val) +#define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val) +#define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val) + #include <asm-generic/percpu.h> #endif /* __ASM_PERCPU_H */ diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index d5bed02073d6..e20df38a8ff3 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h @@ -26,11 +26,13 @@ #define check_pgt_cache() do { } while (0) +#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) + #if CONFIG_ARM64_PGTABLE_LEVELS > 2 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) { - return (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT); + return (pmd_t *)__get_free_page(PGALLOC_GFP); } static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) @@ -50,7 +52,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) { - return (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT); + return (pud_t *)__get_free_page(PGALLOC_GFP); } static inline void pud_free(struct mm_struct *mm, pud_t *pud) @@ -69,8 +71,6 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) extern pgd_t *pgd_alloc(struct mm_struct *mm); extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); -#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) - static inline pte_t * pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) { diff --git a/arch/arm64/include/asm/seccomp.h b/arch/arm64/include/asm/seccomp.h new file mode 100644 index 000000000000..c76fac979629 --- /dev/null +++ b/arch/arm64/include/asm/seccomp.h @@ -0,0 +1,25 @@ +/* + * arch/arm64/include/asm/seccomp.h + * + * Copyright (C) 2014 Linaro Limited + * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_SECCOMP_H +#define _ASM_SECCOMP_H + +#include <asm/unistd.h> + +#ifdef CONFIG_COMPAT +#define __NR_seccomp_read_32 __NR_compat_read +#define __NR_seccomp_write_32 __NR_compat_write +#define __NR_seccomp_exit_32 __NR_compat_exit +#define __NR_seccomp_sigreturn_32 __NR_compat_rt_sigreturn +#endif /* CONFIG_COMPAT */ + +#include <asm-generic/seccomp.h> + +#endif /* _ASM_SECCOMP_H */ diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index a82c0c5c8b52..c028fe37456f 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -19,10 +19,6 @@ #ifndef __ASM_TLB_H #define __ASM_TLB_H -#define __tlb_remove_pmd_tlb_entry __tlb_remove_pmd_tlb_entry - -#include <asm-generic/tlb.h> - #include <linux/pagemap.h> #include <linux/swap.h> @@ -37,71 +33,22 @@ static inline void __tlb_remove_table(void *_table) #define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry) #endif /* CONFIG_HAVE_RCU_TABLE_FREE */ -/* - * There's three ways the TLB shootdown code is used: - * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region(). - * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called. - * 2. Unmapping all vmas. See exit_mmap(). - * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called. - * Page tables will be freed. - * 3. Unmapping argument pages. See shift_arg_pages(). - * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called. - */ +#include <asm-generic/tlb.h> + static inline void tlb_flush(struct mmu_gather *tlb) { if (tlb->fullmm) { flush_tlb_mm(tlb->mm); - } else if (tlb->end > 0) { + } else { struct vm_area_struct vma = { .vm_mm = tlb->mm, }; flush_tlb_range(&vma, tlb->start, tlb->end); - tlb->start = TASK_SIZE; - tlb->end = 0; - } -} - -static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr) -{ - if (!tlb->fullmm) { - tlb->start = min(tlb->start, addr); - tlb->end = max(tlb->end, addr + PAGE_SIZE); - } -} - -/* - * Memorize the range for the TLB flush. - */ -static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, - unsigned long addr) -{ - tlb_add_flush(tlb, addr); -} - -/* - * In the case of tlb vma handling, we can optimise these away in the - * case where we're doing a full MM flush. When we're doing a munmap, - * the vmas are adjusted to only cover the region to be torn down. - */ -static inline void tlb_start_vma(struct mmu_gather *tlb, - struct vm_area_struct *vma) -{ - if (!tlb->fullmm) { - tlb->start = TASK_SIZE; - tlb->end = 0; } } -static inline void tlb_end_vma(struct mmu_gather *tlb, - struct vm_area_struct *vma) -{ - if (!tlb->fullmm) - tlb_flush(tlb); -} - static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr) { pgtable_page_dtor(pte); - tlb_add_flush(tlb, addr); tlb_remove_entry(tlb, pte); } @@ -109,7 +56,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) { - tlb_add_flush(tlb, addr); tlb_remove_entry(tlb, virt_to_page(pmdp)); } #endif @@ -118,15 +64,8 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, unsigned long addr) { - tlb_add_flush(tlb, addr); tlb_remove_entry(tlb, virt_to_page(pudp)); } #endif -static inline void __tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, - unsigned long address) -{ - tlb_add_flush(tlb, address); -} - #endif diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h index 10ca8ff93cc2..232e4ba5d314 100644 --- a/arch/arm64/include/asm/traps.h +++ b/arch/arm64/include/asm/traps.h @@ -18,6 +18,22 @@ #ifndef __ASM_TRAP_H #define __ASM_TRAP_H +#include <linux/list.h> + +struct pt_regs; + +struct undef_hook { + struct list_head node; + u32 instr_mask; + u32 instr_val; + u64 pstate_mask; + u64 pstate_val; + int (*fn)(struct pt_regs *regs, u32 instr); +}; + +void register_undef_hook(struct undef_hook *hook); +void unregister_undef_hook(struct undef_hook *hook); + static inline int in_exception_text(unsigned long ptr) { extern char __exception_text_start[]; diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 6d2bf419431d..49c9aefd24a5 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -31,6 +31,9 @@ * Compat syscall numbers used by the AArch64 kernel. */ #define __NR_compat_restart_syscall 0 +#define __NR_compat_exit 1 +#define __NR_compat_read 3 +#define __NR_compat_write 4 #define __NR_compat_sigreturn 119 #define __NR_compat_rt_sigreturn 173 diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index da1f06b535e3..8893cebcea5b 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -787,8 +787,11 @@ __SYSCALL(__NR_sched_setattr, sys_sched_setattr) __SYSCALL(__NR_sched_getattr, sys_sched_getattr) #define __NR_renameat2 382 __SYSCALL(__NR_renameat2, sys_renameat2) - /* 383 for seccomp */ +#define __NR_seccomp 383 +__SYSCALL(__NR_seccomp, sys_seccomp) #define __NR_getrandom 384 __SYSCALL(__NR_getrandom, sys_getrandom) #define __NR_memfd_create 385 __SYSCALL(__NR_memfd_create, sys_memfd_create) +#define __NR_bpf 386 +__SYSCALL(__NR_bpf, sys_bpf) |