diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-22 21:46:44 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-22 21:46:44 +0300 |
commit | ca78d3173cff3503bcd15723b049757f75762d15 (patch) | |
tree | 8e5db7a3adbe3bcd3b08e6983a6e0f6becb0aaa0 /arch/arm64/include/asm | |
parent | a4ee7bacd6c08479d56738456c07e4f32fc8e523 (diff) | |
parent | ffe7afd1713558d73483834c2e2d03a1e39a4062 (diff) | |
download | linux-ca78d3173cff3503bcd15723b049757f75762d15.tar.xz |
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon:
- Errata workarounds for Qualcomm's Falkor CPU
- Qualcomm L2 Cache PMU driver
- Qualcomm SMCCC firmware quirk
- Support for DEBUG_VIRTUAL
- CPU feature detection for userspace via MRS emulation
- Preliminary work for the Statistical Profiling Extension
- Misc cleanups and non-critical fixes
* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (74 commits)
arm64/kprobes: consistently handle MRS/MSR with XZR
arm64: cpufeature: correctly handle MRS to XZR
arm64: traps: correctly handle MRS/MSR with XZR
arm64: ptrace: add XZR-safe regs accessors
arm64: include asm/assembler.h in entry-ftrace.S
arm64: fix warning about swapper_pg_dir overflow
arm64: Work around Falkor erratum 1003
arm64: head.S: Enable EL1 (host) access to SPE when entered at EL2
arm64: arch_timer: document Hisilicon erratum 161010101
arm64: use is_vmalloc_addr
arm64: use linux/sizes.h for constants
arm64: uaccess: consistently check object sizes
perf: add qcom l2 cache perf events driver
arm64: remove wrong CONFIG_PROC_SYSCTL ifdef
ARM: smccc: Update HVC comment to describe new quirk parameter
arm64: do not trace atomic operations
ACPI/IORT: Fix the error return code in iort_add_smmu_platform_device()
ACPI/IORT: Fix iort_node_get_id() mapping entries indexing
arm64: mm: enable CONFIG_HOLES_IN_ZONE for NUMA
perf: xgene: Include module.h
...
Diffstat (limited to 'arch/arm64/include/asm')
-rw-r--r-- | arch/arm64/include/asm/assembler.h | 23 | ||||
-rw-r--r-- | arch/arm64/include/asm/cpucaps.h | 4 | ||||
-rw-r--r-- | arch/arm64/include/asm/cpufeature.h | 26 | ||||
-rw-r--r-- | arch/arm64/include/asm/cputype.h | 7 | ||||
-rw-r--r-- | arch/arm64/include/asm/insn.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_arm.h | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_host.h | 7 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_mmu.h | 4 | ||||
-rw-r--r-- | arch/arm64/include/asm/lse.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/memory.h | 66 | ||||
-rw-r--r-- | arch/arm64/include/asm/mmu_context.h | 14 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgtable.h | 17 | ||||
-rw-r--r-- | arch/arm64/include/asm/processor.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/ptrace.h | 20 | ||||
-rw-r--r-- | arch/arm64/include/asm/sysreg.h | 26 | ||||
-rw-r--r-- | arch/arm64/include/asm/tlbflush.h | 18 | ||||
-rw-r--r-- | arch/arm64/include/asm/uaccess.h | 4 |
17 files changed, 200 insertions, 44 deletions
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 3a4301163e04..1b67c3782d00 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -25,6 +25,7 @@ #include <asm/asm-offsets.h> #include <asm/cpufeature.h> +#include <asm/mmu_context.h> #include <asm/page.h> #include <asm/pgtable-hwdef.h> #include <asm/ptrace.h> @@ -441,6 +442,28 @@ alternative_endif .endm /* + * Errata workaround prior to TTBR0_EL1 update + * + * val: TTBR value with new BADDR, preserved + * tmp0: temporary register, clobbered + * tmp1: other temporary register, clobbered + */ + .macro pre_ttbr0_update_workaround, val, tmp0, tmp1 +#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 +alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003 + mrs \tmp0, ttbr0_el1 + mov \tmp1, #FALKOR_RESERVED_ASID + bfi \tmp0, \tmp1, #48, #16 // reserved ASID + old BADDR + msr ttbr0_el1, \tmp0 + isb + bfi \tmp0, \val, #0, #48 // reserved ASID + new BADDR + msr ttbr0_el1, \tmp0 + isb +alternative_else_nop_endif +#endif + .endm + +/* * Errata workaround post TTBR0_EL1 update. */ .macro post_ttbr0_update_workaround diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 4174f09678c4..fb78a5d3b60b 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -35,7 +35,9 @@ #define ARM64_HYP_OFFSET_LOW 14 #define ARM64_MISMATCHED_CACHE_LINE_SIZE 15 #define ARM64_HAS_NO_FPSIMD 16 +#define ARM64_WORKAROUND_REPEAT_TLBI 17 +#define ARM64_WORKAROUND_QCOM_FALKOR_E1003 18 -#define ARM64_NCAPS 17 +#define ARM64_NCAPS 19 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index b4989df48670..4ce82ed3e7c3 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -29,7 +29,20 @@ #include <linux/jump_label.h> #include <linux/kernel.h> -/* CPU feature register tracking */ +/* + * CPU feature register tracking + * + * The safe value of a CPUID feature field is dependent on the implications + * of the values assigned to it by the architecture. Based on the relationship + * between the values, the features are classified into 3 types - LOWER_SAFE, + * HIGHER_SAFE and EXACT. + * + * The lowest value of all the CPUs is chosen for LOWER_SAFE and highest + * for HIGHER_SAFE. It is expected that all CPUs have the same value for + * a field when EXACT is specified, failing which, the safe value specified + * in the table is chosen. + */ + enum ftr_type { FTR_EXACT, /* Use a predefined safe value */ FTR_LOWER_SAFE, /* Smaller value is safe */ @@ -42,8 +55,12 @@ enum ftr_type { #define FTR_SIGNED true /* Value should be treated as signed */ #define FTR_UNSIGNED false /* Value should be treated as unsigned */ +#define FTR_VISIBLE true /* Feature visible to the user space */ +#define FTR_HIDDEN false /* Feature is hidden from the user */ + struct arm64_ftr_bits { bool sign; /* Value is signed ? */ + bool visible; bool strict; /* CPU Sanity check: strict matching required ? */ enum ftr_type type; u8 shift; @@ -59,7 +76,9 @@ struct arm64_ftr_bits { struct arm64_ftr_reg { const char *name; u64 strict_mask; + u64 user_mask; u64 sys_val; + u64 user_val; const struct arm64_ftr_bits *ftr_bits; }; @@ -159,6 +178,11 @@ static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp) return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift); } +static inline u64 arm64_ftr_reg_user_value(const struct arm64_ftr_reg *reg) +{ + return (reg->user_val | (reg->sys_val & reg->user_mask)); +} + static inline int __attribute_const__ cpuid_feature_extract_field(u64 features, int field, bool sign) { diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 26a68ddb11c1..fc502713ab37 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -56,6 +56,9 @@ (0xf << MIDR_ARCHITECTURE_SHIFT) | \ ((partnum) << MIDR_PARTNUM_SHIFT)) +#define MIDR_CPU_VAR_REV(var, rev) \ + (((var) << MIDR_VARIANT_SHIFT) | (rev)) + #define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \ MIDR_ARCHITECTURE_MASK) @@ -71,6 +74,7 @@ #define ARM_CPU_IMP_APM 0x50 #define ARM_CPU_IMP_CAVIUM 0x43 #define ARM_CPU_IMP_BRCM 0x42 +#define ARM_CPU_IMP_QCOM 0x51 #define ARM_CPU_PART_AEM_V8 0xD0F #define ARM_CPU_PART_FOUNDATION 0xD00 @@ -84,10 +88,13 @@ #define BRCM_CPU_PART_VULCAN 0x516 +#define QCOM_CPU_PART_FALKOR_V1 0x800 + #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) +#define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1) #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index bc853663dd51..aecc07e09a18 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -332,6 +332,8 @@ bool aarch64_insn_is_branch(u32 insn); u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn); u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, u32 insn, u64 imm); +u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type, + u32 insn); u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, enum aarch64_insn_branch_type type); u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr, diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 2a2752b5b6aa..6e99978e83bd 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -188,6 +188,9 @@ #define CPTR_EL2_DEFAULT 0x000033ff /* Hyp Debug Configuration Register bits */ +#define MDCR_EL2_TPMS (1 << 14) +#define MDCR_EL2_E2PB_MASK (UL(0x3)) +#define MDCR_EL2_E2PB_SHIFT (UL(12)) #define MDCR_EL2_TDRA (1 << 11) #define MDCR_EL2_TDOSA (1 << 10) #define MDCR_EL2_TDA (1 << 9) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index e5050388e062..443b387021f2 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -229,7 +229,12 @@ struct kvm_vcpu_arch { /* Pointer to host CPU context */ kvm_cpu_context_t *host_cpu_context; - struct kvm_guest_debug_arch host_debug_state; + struct { + /* {Break,watch}point registers */ + struct kvm_guest_debug_arch regs; + /* Statistical profiling extension */ + u64 pmscr_el1; + } host_debug_state; /* VGIC state */ struct vgic_cpu vgic_cpu; diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 6f72fe8b0e3e..55772c13a375 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -47,7 +47,7 @@ * If the page is in the bottom half, we have to use the top half. If * the page is in the top half, we have to use the bottom half: * - * T = __virt_to_phys(__hyp_idmap_text_start) + * T = __pa_symbol(__hyp_idmap_text_start) * if (T & BIT(VA_BITS - 1)) * HYP_VA_MIN = 0 //idmap in upper half * else @@ -271,7 +271,7 @@ static inline void __kvm_flush_dcache_pud(pud_t pud) kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE); } -#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x)) +#define kvm_virt_to_phys(x) __pa_symbol(x) void kvm_set_way_flush(struct kvm_vcpu *vcpu); void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h index fc756e22c84c..606b20910a5c 100644 --- a/arch/arm64/include/asm/lse.h +++ b/arch/arm64/include/asm/lse.h @@ -19,7 +19,7 @@ __asm__(".arch_extension lse"); /* Move the ll/sc atomics out-of-line */ -#define __LL_SC_INLINE +#define __LL_SC_INLINE notrace #define __LL_SC_PREFIX(x) __ll_sc_##x #define __LL_SC_EXPORT(x) EXPORT_SYMBOL(__LL_SC_PREFIX(x)) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 90c39a662379..32f82723338a 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -102,25 +102,6 @@ #endif /* - * Physical vs virtual RAM address space conversion. These are - * private definitions which should NOT be used outside memory.h - * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. - */ -#define __virt_to_phys(x) ({ \ - phys_addr_t __x = (phys_addr_t)(x); \ - __x & BIT(VA_BITS - 1) ? (__x & ~PAGE_OFFSET) + PHYS_OFFSET : \ - (__x - kimage_voffset); }) - -#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET) -#define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset)) - -/* - * Convert a page to/from a physical address - */ -#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) -#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) - -/* * Memory types available. */ #define MT_DEVICE_nGnRnE 0 @@ -187,6 +168,48 @@ static inline unsigned long kaslr_offset(void) #define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) /* + * Physical vs virtual RAM address space conversion. These are + * private definitions which should NOT be used outside memory.h + * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. + */ + + +/* + * The linear kernel range starts in the middle of the virtual adddress + * space. Testing the top bit for the start of the region is a + * sufficient check. + */ +#define __is_lm_address(addr) (!!((addr) & BIT(VA_BITS - 1))) + +#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET) +#define __kimg_to_phys(addr) ((addr) - kimage_voffset) + +#define __virt_to_phys_nodebug(x) ({ \ + phys_addr_t __x = (phys_addr_t)(x); \ + __is_lm_address(__x) ? __lm_to_phys(__x) : \ + __kimg_to_phys(__x); \ +}) + +#define __pa_symbol_nodebug(x) __kimg_to_phys((phys_addr_t)(x)) + +#ifdef CONFIG_DEBUG_VIRTUAL +extern phys_addr_t __virt_to_phys(unsigned long x); +extern phys_addr_t __phys_addr_symbol(unsigned long x); +#else +#define __virt_to_phys(x) __virt_to_phys_nodebug(x) +#define __phys_addr_symbol(x) __pa_symbol_nodebug(x) +#endif + +#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET) +#define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset)) + +/* + * Convert a page to/from a physical address + */ +#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) +#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) + +/* * Note: Drivers should NOT use these. They are the wrong * translation for translating DMA addresses. Use the driver * DMA support - see dma-mapping.h. @@ -207,9 +230,12 @@ static inline void *phys_to_virt(phys_addr_t x) * Drivers should NOT use these either. */ #define __pa(x) __virt_to_phys((unsigned long)(x)) +#define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0)) +#define __pa_nodebug(x) __virt_to_phys_nodebug((unsigned long)(x)) #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) -#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys(x)) +#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x))) +#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) /* * virt_to_page(k) convert a _valid_ virtual address to struct page * diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 0363fe80455c..1ef40d82cfd3 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -19,6 +19,10 @@ #ifndef __ASM_MMU_CONTEXT_H #define __ASM_MMU_CONTEXT_H +#define FALKOR_RESERVED_ASID 1 + +#ifndef __ASSEMBLY__ + #include <linux/compiler.h> #include <linux/sched.h> @@ -45,7 +49,7 @@ static inline void contextidr_thread_switch(struct task_struct *next) */ static inline void cpu_set_reserved_ttbr0(void) { - unsigned long ttbr = virt_to_phys(empty_zero_page); + unsigned long ttbr = __pa_symbol(empty_zero_page); write_sysreg(ttbr, ttbr0_el1); isb(); @@ -114,7 +118,7 @@ static inline void cpu_install_idmap(void) local_flush_tlb_all(); cpu_set_idmap_tcr_t0sz(); - cpu_switch_mm(idmap_pg_dir, &init_mm); + cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm); } /* @@ -129,7 +133,7 @@ static inline void cpu_replace_ttbr1(pgd_t *pgd) phys_addr_t pgd_phys = virt_to_phys(pgd); - replace_phys = (void *)virt_to_phys(idmap_cpu_replace_ttbr1); + replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1); cpu_install_idmap(); replace_phys(pgd_phys); @@ -220,4 +224,6 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, void verify_cpu_asid_bits(void); -#endif +#endif /* !__ASSEMBLY__ */ + +#endif /* !__ASM_MMU_CONTEXT_H */ diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index ffbb9a520563..0eef6064bf3b 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -52,7 +52,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); * for zero-mapped memory areas etc.. */ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; -#define ZERO_PAGE(vaddr) pfn_to_page(PHYS_PFN(__pa(empty_zero_page))) +#define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page)) #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) @@ -71,9 +71,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define pte_young(pte) (!!(pte_val(pte) & PTE_AF)) #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL)) #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) -#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) +#define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN)) #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) -#define pte_ng(pte) (!!(pte_val(pte) & PTE_NG)) #ifdef CONFIG_ARM64_HW_AFDBM #define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY)) @@ -84,8 +83,12 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) -#define pte_valid_global(pte) \ - ((pte_val(pte) & (PTE_VALID | PTE_NG)) == PTE_VALID) +/* + * Execute-only user mappings do not have the PTE_USER bit set. All valid + * kernel mappings have the PTE_UXN bit set. + */ +#define pte_valid_not_user(pte) \ + ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) #define pte_valid_young(pte) \ ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) @@ -178,7 +181,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte) * Only if the new pte is valid and kernel, otherwise TLB maintenance * or update_mmu_cache() have the necessary barriers. */ - if (pte_valid_global(pte)) { + if (pte_valid_not_user(pte)) { dsb(ishst); isb(); } @@ -212,7 +215,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_val(pte) &= ~PTE_RDONLY; else pte_val(pte) |= PTE_RDONLY; - if (pte_ng(pte) && pte_exec(pte) && !pte_special(pte)) + if (pte_user_exec(pte) && !pte_special(pte)) __sync_icache_dcache(pte, addr); } diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 747c65a616ed..c97b8bd2acba 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -187,7 +187,6 @@ static inline void spin_lock_prefetch(const void *ptr) #endif int cpu_enable_pan(void *__unused); -int cpu_enable_uao(void *__unused); int cpu_enable_cache_maint_trap(void *__unused); #endif /* __ASM_PROCESSOR_H */ diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 513daf050e84..11403fdd0a50 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -194,6 +194,26 @@ static inline u64 regs_get_register(struct pt_regs *regs, unsigned int offset) return val; } +/* + * Read a register given an architectural register index r. + * This handles the common case where 31 means XZR, not SP. + */ +static inline unsigned long pt_regs_read_reg(const struct pt_regs *regs, int r) +{ + return (r == 31) ? 0 : regs->regs[r]; +} + +/* + * Write a register given an architectural register index r. + * This handles the common case where 31 means XZR, not SP. + */ +static inline void pt_regs_write_reg(struct pt_regs *regs, int r, + unsigned long val) +{ + if (r != 31) + regs->regs[r] = val; +} + /* Valid only for Kernel mode traps. */ static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) { diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 98ae03f8eedd..ac24b6e798b1 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -32,8 +32,27 @@ * [11-8] : CRm * [7-5] : Op2 */ +#define Op0_shift 19 +#define Op0_mask 0x3 +#define Op1_shift 16 +#define Op1_mask 0x7 +#define CRn_shift 12 +#define CRn_mask 0xf +#define CRm_shift 8 +#define CRm_mask 0xf +#define Op2_shift 5 +#define Op2_mask 0x7 + #define sys_reg(op0, op1, crn, crm, op2) \ - ((((op0)&3)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5)) + (((op0) << Op0_shift) | ((op1) << Op1_shift) | \ + ((crn) << CRn_shift) | ((crm) << CRm_shift) | \ + ((op2) << Op2_shift)) + +#define sys_reg_Op0(id) (((id) >> Op0_shift) & Op0_mask) +#define sys_reg_Op1(id) (((id) >> Op1_shift) & Op1_mask) +#define sys_reg_CRn(id) (((id) >> CRn_shift) & CRn_mask) +#define sys_reg_CRm(id) (((id) >> CRm_shift) & CRm_mask) +#define sys_reg_Op2(id) (((id) >> Op2_shift) & Op2_mask) #ifndef CONFIG_BROKEN_GAS_INST @@ -190,6 +209,7 @@ #define ID_AA64MMFR2_CNP_SHIFT 0 /* id_aa64dfr0 */ +#define ID_AA64DFR0_PMSVER_SHIFT 32 #define ID_AA64DFR0_CTX_CMPS_SHIFT 28 #define ID_AA64DFR0_WRPS_SHIFT 20 #define ID_AA64DFR0_BRPS_SHIFT 12 @@ -245,6 +265,10 @@ #define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN64_SUPPORTED #endif + +/* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */ +#define SYS_MPIDR_SAFE_VAL (1UL << 31) + #ifdef __ASSEMBLY__ .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30 diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index deab52374119..af1c76981911 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -36,9 +36,21 @@ * not. The macros handles invoking the asm with or without the * register argument as appropriate. */ -#define __TLBI_0(op, arg) asm ("tlbi " #op) -#define __TLBI_1(op, arg) asm ("tlbi " #op ", %0" : : "r" (arg)) -#define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg) +#define __TLBI_0(op, arg) asm ("tlbi " #op "\n" \ + ALTERNATIVE("nop\n nop", \ + "dsb ish\n tlbi " #op, \ + ARM64_WORKAROUND_REPEAT_TLBI, \ + CONFIG_QCOM_FALKOR_ERRATUM_1009) \ + : : ) + +#define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n" \ + ALTERNATIVE("nop\n nop", \ + "dsb ish\n tlbi " #op ", %0", \ + ARM64_WORKAROUND_REPEAT_TLBI, \ + CONFIG_QCOM_FALKOR_ERRATUM_1009) \ + : : "r" (arg)) + +#define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg) #define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0) diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 46da3ea638bb..5308d696311b 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -379,9 +379,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u { unsigned long res = n; kasan_check_write(to, n); + check_object_size(to, n, false); if (access_ok(VERIFY_READ, from, n)) { - check_object_size(to, n, false); res = __arch_copy_from_user(to, from, n); } if (unlikely(res)) @@ -392,9 +392,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) { kasan_check_read(from, n); + check_object_size(from, n, true); if (access_ok(VERIFY_WRITE, to, n)) { - check_object_size(from, n, true); n = __arch_copy_to_user(to, from, n); } return n; |