diff options
Diffstat (limited to 'arch/arm64')
30 files changed, 202 insertions, 128 deletions
diff --git a/arch/arm64/boot/dts/apm/apm-mustang.dts b/arch/arm64/boot/dts/apm/apm-mustang.dts index 2e25de0800b9..83578e766b94 100644 --- a/arch/arm64/boot/dts/apm/apm-mustang.dts +++ b/arch/arm64/boot/dts/apm/apm-mustang.dts @@ -45,6 +45,10 @@ status = "ok"; }; +&sgenet1 { + status = "ok"; +}; + &xgenet { status = "ok"; }; diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi index f1ad9c2ab2e9..e74f6e0a208c 100644 --- a/arch/arm64/boot/dts/apm/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi @@ -186,6 +186,16 @@ clock-output-names = "sge0clk"; }; + sge1clk: sge1clk@1f21c000 { + compatible = "apm,xgene-device-clock"; + #clock-cells = <1>; + clocks = <&socplldiv2 0>; + reg = <0x0 0x1f21c000 0x0 0x1000>; + reg-names = "csr-reg"; + csr-mask = <0xc>; + clock-output-names = "sge1clk"; + }; + xge0clk: xge0clk@1f61c000 { compatible = "apm,xgene-device-clock"; #clock-cells = <1>; @@ -622,27 +632,45 @@ }; sgenet0: ethernet@1f210000 { - compatible = "apm,xgene-enet"; + compatible = "apm,xgene1-sgenet"; status = "disabled"; reg = <0x0 0x1f210000 0x0 0xd100>, <0x0 0x1f200000 0x0 0Xc300>, <0x0 0x1B000000 0x0 0X200>; reg-names = "enet_csr", "ring_csr", "ring_cmd"; - interrupts = <0x0 0xA0 0x4>; + interrupts = <0x0 0xA0 0x4>, + <0x0 0xA1 0x4>; dma-coherent; clocks = <&sge0clk 0>; local-mac-address = [00 00 00 00 00 00]; phy-connection-type = "sgmii"; }; + sgenet1: ethernet@1f210030 { + compatible = "apm,xgene1-sgenet"; + status = "disabled"; + reg = <0x0 0x1f210030 0x0 0xd100>, + <0x0 0x1f200000 0x0 0Xc300>, + <0x0 0x1B000000 0x0 0X8000>; + reg-names = "enet_csr", "ring_csr", "ring_cmd"; + interrupts = <0x0 0xAC 0x4>, + <0x0 0xAD 0x4>; + port-id = <1>; + dma-coherent; + clocks = <&sge1clk 0>; + local-mac-address = [00 00 00 00 00 00]; + phy-connection-type = "sgmii"; + }; + xgenet: ethernet@1f610000 { - compatible = "apm,xgene-enet"; + compatible = "apm,xgene1-xgenet"; status = "disabled"; reg = <0x0 0x1f610000 0x0 0xd100>, <0x0 0x1f600000 0x0 0Xc300>, <0x0 0x18000000 0x0 0X200>; reg-names = "enet_csr", "ring_csr", "ring_cmd"; - interrupts = <0x0 0x60 0x4>; + interrupts = <0x0 0x60 0x4>, + <0x0 0x61 0x4>; dma-coherent; clocks = <&xge0clk 0>; /* mac address will be overwritten by the bootloader */ diff --git a/arch/arm64/boot/dts/arm/foundation-v8.dts b/arch/arm64/boot/dts/arm/foundation-v8.dts index 27f32962e55c..4eac8dcea423 100644 --- a/arch/arm64/boot/dts/arm/foundation-v8.dts +++ b/arch/arm64/boot/dts/arm/foundation-v8.dts @@ -34,6 +34,7 @@ reg = <0x0 0x0>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; }; cpu@1 { device_type = "cpu"; @@ -41,6 +42,7 @@ reg = <0x0 0x1>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; }; cpu@2 { device_type = "cpu"; @@ -48,6 +50,7 @@ reg = <0x0 0x2>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; }; cpu@3 { device_type = "cpu"; @@ -55,6 +58,11 @@ reg = <0x0 0x3>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; + }; + + L2_0: l2-cache0 { + compatible = "cache"; }; }; diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts index d429129ecb3d..133ee59de2d7 100644 --- a/arch/arm64/boot/dts/arm/juno.dts +++ b/arch/arm64/boot/dts/arm/juno.dts @@ -39,6 +39,7 @@ reg = <0x0 0x0>; device_type = "cpu"; enable-method = "psci"; + next-level-cache = <&A57_L2>; }; A57_1: cpu@1 { @@ -46,6 +47,7 @@ reg = <0x0 0x1>; device_type = "cpu"; enable-method = "psci"; + next-level-cache = <&A57_L2>; }; A53_0: cpu@100 { @@ -53,6 +55,7 @@ reg = <0x0 0x100>; device_type = "cpu"; enable-method = "psci"; + next-level-cache = <&A53_L2>; }; A53_1: cpu@101 { @@ -60,6 +63,7 @@ reg = <0x0 0x101>; device_type = "cpu"; enable-method = "psci"; + next-level-cache = <&A53_L2>; }; A53_2: cpu@102 { @@ -67,6 +71,7 @@ reg = <0x0 0x102>; device_type = "cpu"; enable-method = "psci"; + next-level-cache = <&A53_L2>; }; A53_3: cpu@103 { @@ -74,6 +79,15 @@ reg = <0x0 0x103>; device_type = "cpu"; enable-method = "psci"; + next-level-cache = <&A53_L2>; + }; + + A57_L2: l2-cache0 { + compatible = "cache"; + }; + + A53_L2: l2-cache1 { + compatible = "cache"; }; }; diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts index efc59b3baf63..20addabbd127 100644 --- a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts +++ b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts @@ -37,6 +37,7 @@ reg = <0x0 0x0>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; }; cpu@1 { device_type = "cpu"; @@ -44,6 +45,7 @@ reg = <0x0 0x1>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; }; cpu@2 { device_type = "cpu"; @@ -51,6 +53,7 @@ reg = <0x0 0x2>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; }; cpu@3 { device_type = "cpu"; @@ -58,6 +61,11 @@ reg = <0x0 0x3>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; + }; + + L2_0: l2-cache0 { + compatible = "cache"; }; }; diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index 5720608c50b1..abb79b3cfcfe 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile @@ -29,7 +29,7 @@ aes-ce-blk-y := aes-glue-ce.o aes-ce.o obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o aes-neon-blk-y := aes-glue-neon.o aes-neon.o -AFLAGS_aes-ce.o := -DINTERLEAVE=2 -DINTERLEAVE_INLINE +AFLAGS_aes-ce.o := -DINTERLEAVE=4 AFLAGS_aes-neon.o := -DINTERLEAVE=4 CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 5901480bfdca..750bac4e637e 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -20,6 +20,9 @@ #error "Only include this from assembly code" #endif +#ifndef __ASM_ASSEMBLER_H +#define __ASM_ASSEMBLER_H + #include <asm/ptrace.h> #include <asm/thread_info.h> @@ -155,3 +158,5 @@ lr .req x30 // link register #endif orr \rd, \lbits, \hbits, lsl #32 .endm + +#endif /* __ASM_ASSEMBLER_H */ diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h index 0710654631e7..c60643f14cda 100644 --- a/arch/arm64/include/asm/cpuidle.h +++ b/arch/arm64/include/asm/cpuidle.h @@ -1,6 +1,8 @@ #ifndef __ASM_CPUIDLE_H #define __ASM_CPUIDLE_H +#include <asm/proc-fns.h> + #ifdef CONFIG_CPU_IDLE extern int cpu_init_idle(unsigned int cpu); extern int cpu_suspend(unsigned long arg); diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index e2ff32a93b5c..d2f49423c5dc 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -264,8 +264,10 @@ __AARCH64_INSN_FUNCS(ands, 0x7F200000, 0x6A000000) __AARCH64_INSN_FUNCS(bics, 0x7F200000, 0x6A200000) __AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) __AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) -__AARCH64_INSN_FUNCS(cbz, 0xFE000000, 0x34000000) -__AARCH64_INSN_FUNCS(cbnz, 0xFE000000, 0x35000000) +__AARCH64_INSN_FUNCS(cbz, 0x7F000000, 0x34000000) +__AARCH64_INSN_FUNCS(cbnz, 0x7F000000, 0x35000000) +__AARCH64_INSN_FUNCS(tbz, 0x7F000000, 0x36000000) +__AARCH64_INSN_FUNCS(tbnz, 0x7F000000, 0x37000000) __AARCH64_INSN_FUNCS(bcond, 0xFF000010, 0x54000000) __AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001) __AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 94674eb7e7bb..54bb4ba97441 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -129,6 +129,9 @@ * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are * not known to exist and will break with this configuration. * + * VTCR_EL2.PS is extracted from ID_AA64MMFR0_EL1.PARange at boot time + * (see hyp-init.S). + * * Note that when using 4K pages, we concatenate two first level page tables * together. * @@ -138,7 +141,6 @@ #ifdef CONFIG_ARM64_64K_PAGES /* * Stage2 translation configuration: - * 40bits output (PS = 2) * 40bits input (T0SZ = 24) * 64kB pages (TG0 = 1) * 2 level page tables (SL = 1) @@ -150,7 +152,6 @@ #else /* * Stage2 translation configuration: - * 40bits output (PS = 2) * 40bits input (T0SZ = 24) * 4kB pages (TG0 = 0) * 3 level page tables (SL = 1) diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 6458b5373142..bbfb600fa822 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -158,6 +158,8 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd) #define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT) #define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) +#define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1)) + /* * If we are concatenating first level stage-2 page tables, we would have less * than or equal to 16 pointers in the fake PGD, because that's what the @@ -171,43 +173,6 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd) #define KVM_PREALLOC_LEVEL (0) #endif -/** - * kvm_prealloc_hwpgd - allocate inital table for VTTBR - * @kvm: The KVM struct pointer for the VM. - * @pgd: The kernel pseudo pgd - * - * When the kernel uses more levels of page tables than the guest, we allocate - * a fake PGD and pre-populate it to point to the next-level page table, which - * will be the real initial page table pointed to by the VTTBR. - * - * When KVM_PREALLOC_LEVEL==2, we allocate a single page for the PMD and - * the kernel will use folded pud. When KVM_PREALLOC_LEVEL==1, we - * allocate 2 consecutive PUD pages. - */ -static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd) -{ - unsigned int i; - unsigned long hwpgd; - - if (KVM_PREALLOC_LEVEL == 0) - return 0; - - hwpgd = __get_free_pages(GFP_KERNEL | __GFP_ZERO, PTRS_PER_S2_PGD_SHIFT); - if (!hwpgd) - return -ENOMEM; - - for (i = 0; i < PTRS_PER_S2_PGD; i++) { - if (KVM_PREALLOC_LEVEL == 1) - pgd_populate(NULL, pgd + i, - (pud_t *)hwpgd + i * PTRS_PER_PUD); - else if (KVM_PREALLOC_LEVEL == 2) - pud_populate(NULL, pud_offset(pgd, 0) + i, - (pmd_t *)hwpgd + i * PTRS_PER_PMD); - } - - return 0; -} - static inline void *kvm_get_hwpgd(struct kvm *kvm) { pgd_t *pgd = kvm->arch.pgd; @@ -224,12 +189,11 @@ static inline void *kvm_get_hwpgd(struct kvm *kvm) return pmd_offset(pud, 0); } -static inline void kvm_free_hwpgd(struct kvm *kvm) +static inline unsigned int kvm_get_hwpgd_size(void) { - if (KVM_PREALLOC_LEVEL > 0) { - unsigned long hwpgd = (unsigned long)kvm_get_hwpgd(kvm); - free_pages(hwpgd, PTRS_PER_S2_PGD_SHIFT); - } + if (KVM_PREALLOC_LEVEL > 0) + return PTRS_PER_S2_PGD * PAGE_SIZE; + return PTRS_PER_S2_PGD * sizeof(pgd_t); } static inline bool kvm_page_empty(void *ptr) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 16449c535e50..800ec0e87ed9 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -460,7 +460,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | - PTE_PROT_NONE | PTE_VALID | PTE_WRITE; + PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK; pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); return pte; } diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 9a8fd84f8fb2..941c375616e2 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h @@ -39,7 +39,11 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); #include <asm/memory.h> -#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) +#define cpu_switch_mm(pgd,mm) \ +do { \ + BUG_ON(pgd == swapper_pg_dir); \ + cpu_do_switch_mm(virt_to_phys(pgd),mm); \ +} while (0) #define cpu_get_pgd() \ ({ \ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index f9be30ea1cbd..20e9591a60cf 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -45,7 +45,8 @@ #define STACK_TOP STACK_TOP_MAX #endif /* CONFIG_COMPAT */ -#define ARCH_LOW_ADDRESS_LIMIT PHYS_MASK +extern phys_addr_t arm64_dma_phys_limit; +#define ARCH_LOW_ADDRESS_LIMIT (arm64_dma_phys_limit - 1) #endif /* __KERNEL__ */ struct debug_info { diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index c028fe37456f..53d9c354219f 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -48,6 +48,7 @@ static inline void tlb_flush(struct mmu_gather *tlb) static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr) { + __flush_tlb_pgtable(tlb->mm, addr); pgtable_page_dtor(pte); tlb_remove_entry(tlb, pte); } @@ -56,6 +57,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) { + __flush_tlb_pgtable(tlb->mm, addr); tlb_remove_entry(tlb, virt_to_page(pmdp)); } #endif @@ -64,6 +66,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, unsigned long addr) { + __flush_tlb_pgtable(tlb->mm, addr); tlb_remove_entry(tlb, virt_to_page(pudp)); } #endif diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 73f0ce570fb3..c3bb05b98616 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -24,11 +24,6 @@ #include <linux/sched.h> #include <asm/cputype.h> -extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *); -extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long); - -extern struct cpu_tlb_fns cpu_tlb; - /* * TLB Management * ============== @@ -149,6 +144,19 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end } /* + * Used to invalidate the TLB (walk caches) corresponding to intermediate page + * table levels (pgd/pud/pmd). + */ +static inline void __flush_tlb_pgtable(struct mm_struct *mm, + unsigned long uaddr) +{ + unsigned long addr = uaddr >> 12 | ((unsigned long)ASID(mm) << 48); + + dsb(ishst); + asm("tlbi vae1is, %0" : : "r" (addr)); + dsb(ish); +} +/* * On AArch64, the cache coherency is handled via the set_pte_at() function. */ static inline void update_mmu_cache(struct vm_area_struct *vma, diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 3bf8f4e99a51..07e1ba449bf1 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -63,7 +63,7 @@ static inline void set_fs(mm_segment_t fs) current_thread_info()->addr_limit = fs; } -#define segment_eq(a,b) ((a) == (b)) +#define segment_eq(a, b) ((a) == (b)) /* * Return 1 if addr < current->addr_limit, 0 otherwise. @@ -147,7 +147,7 @@ do { \ default: \ BUILD_BUG(); \ } \ - (x) = (__typeof__(*(ptr)))__gu_val; \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ } while (0) #define __get_user(x, ptr) \ diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index bef04afd6031..5ee07eee80c2 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -15,8 +15,9 @@ CFLAGS_REMOVE_return_address.o = -pg arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ entry-fpsimd.o process.o ptrace.o setup.o signal.o \ sys.o stacktrace.o time.o traps.o io.o vdso.o \ - hyp-stub.o psci.o cpu_ops.o insn.o return_address.o \ - cpuinfo.o cpu_errata.o alternative.o cacheinfo.o + hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o \ + return_address.o cpuinfo.o cpu_errata.o \ + alternative.o cacheinfo.o arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ sys_compat.o entry32.o \ diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index b42c7b480e1e..ab21e0d58278 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -337,7 +337,11 @@ core_initcall(arm64_dmi_init); static void efi_set_pgd(struct mm_struct *mm) { - cpu_switch_mm(mm->pgd, mm); + if (mm == &init_mm) + cpu_set_reserved_ttbr0(); + else + cpu_switch_mm(mm->pgd, mm); + flush_tlb_all(); if (icache_is_aivivt()) __flush_icache_all(); @@ -354,3 +358,12 @@ void efi_virtmap_unload(void) efi_set_pgd(current->active_mm); preempt_enable(); } + +/* + * UpdateCapsule() depends on the system being shutdown via + * ResetSystem(). + */ +bool efi_poweroff_required(void) +{ + return efi_enabled(EFI_RUNTIME_SERVICES); +} diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index cf8556ae09d0..c851be795080 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -156,7 +156,7 @@ static int ftrace_modify_graph_caller(bool enable) branch = aarch64_insn_gen_branch_imm(pc, (unsigned long)ftrace_graph_caller, - AARCH64_INSN_BRANCH_LINK); + AARCH64_INSN_BRANCH_NOLINK); nop = aarch64_insn_gen_nop(); if (enable) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 8ce88e08c030..07f930540f4a 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -585,8 +585,8 @@ ENDPROC(set_cpu_boot_mode_flag) * zeroing of .bss would clobber it. */ .pushsection .data..cacheline_aligned -ENTRY(__boot_cpu_mode) .align L1_CACHE_SHIFT +ENTRY(__boot_cpu_mode) .long BOOT_CPU_MODE_EL2 .long 0 .popsection diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 27d4864577e5..c8eca88f12e6 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -87,8 +87,10 @@ static void __kprobes *patch_map(void *addr, int fixmap) if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX)) page = vmalloc_to_page(addr); - else + else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA)) page = virt_to_page(addr); + else + return addr; BUG_ON(!page); set_fixmap(fixmap, page_to_phys(page)); diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index fde9923af859..c6b1f3b96f45 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -21,6 +21,7 @@ #include <stdarg.h> #include <linux/compat.h> +#include <linux/efi.h> #include <linux/export.h> #include <linux/sched.h> #include <linux/kernel.h> @@ -150,6 +151,13 @@ void machine_restart(char *cmd) local_irq_disable(); smp_send_stop(); + /* + * UpdateCapsule() depends on the system being reset via + * ResetSystem(). + */ + if (efi_enabled(EFI_RUNTIME_SERVICES)) + efi_reboot(reboot_mode, NULL); + /* Now call the architecture specific reboot code. */ if (arm_pm_restart) arm_pm_restart(reboot_mode, cmd); diff --git a/arch/arm64/kernel/psci-call.S b/arch/arm64/kernel/psci-call.S new file mode 100644 index 000000000000..cf83e61cd3b5 --- /dev/null +++ b/arch/arm64/kernel/psci-call.S @@ -0,0 +1,28 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2015 ARM Limited + * + * Author: Will Deacon <will.deacon@arm.com> + */ + +#include <linux/linkage.h> + +/* int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */ +ENTRY(__invoke_psci_fn_hvc) + hvc #0 + ret +ENDPROC(__invoke_psci_fn_hvc) + +/* int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */ +ENTRY(__invoke_psci_fn_smc) + smc #0 + ret +ENDPROC(__invoke_psci_fn_smc) diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index 3425f311c49e..9b8a70ae64a1 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -57,6 +57,9 @@ static struct psci_operations psci_ops; static int (*invoke_psci_fn)(u64, u64, u64, u64); typedef int (*psci_initcall_t)(const struct device_node *); +asmlinkage int __invoke_psci_fn_hvc(u64, u64, u64, u64); +asmlinkage int __invoke_psci_fn_smc(u64, u64, u64, u64); + enum psci_function { PSCI_FN_CPU_SUSPEND, PSCI_FN_CPU_ON, @@ -109,40 +112,6 @@ static void psci_power_state_unpack(u32 power_state, PSCI_0_2_POWER_STATE_AFFL_SHIFT; } -/* - * The following two functions are invoked via the invoke_psci_fn pointer - * and will not be inlined, allowing us to piggyback on the AAPCS. - */ -static noinline int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, - u64 arg2) -{ - asm volatile( - __asmeq("%0", "x0") - __asmeq("%1", "x1") - __asmeq("%2", "x2") - __asmeq("%3", "x3") - "hvc #0\n" - : "+r" (function_id) - : "r" (arg0), "r" (arg1), "r" (arg2)); - - return function_id; -} - -static noinline int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, - u64 arg2) -{ - asm volatile( - __asmeq("%0", "x0") - __asmeq("%1", "x1") - __asmeq("%2", "x2") - __asmeq("%3", "x3") - "smc #0\n" - : "+r" (function_id) - : "r" (arg0), "r" (arg1), "r" (arg2)); - - return function_id; -} - static int psci_get_version(void) { int err; diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index c20a300e2213..d26fcd4cd6e6 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -154,8 +154,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) case __SI_TIMER: err |= __put_user(from->si_tid, &to->si_tid); err |= __put_user(from->si_overrun, &to->si_overrun); - err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, - &to->si_ptr); + err |= __put_user(from->si_int, &to->si_int); break; case __SI_POLL: err |= __put_user(from->si_band, &to->si_band); @@ -184,7 +183,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) case __SI_MESGQ: /* But this is */ err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); - err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr); + err |= __put_user(from->si_int, &to->si_int); break; case __SI_SYS: err |= __put_user((compat_uptr_t)(unsigned long) diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S index fe652ffd34c2..efa79e8d4196 100644 --- a/arch/arm64/kernel/vdso/gettimeofday.S +++ b/arch/arm64/kernel/vdso/gettimeofday.S @@ -174,8 +174,6 @@ ENDPROC(__kernel_clock_gettime) /* int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); */ ENTRY(__kernel_clock_getres) .cfi_startproc - cbz w1, 3f - cmp w0, #CLOCK_REALTIME ccmp w0, #CLOCK_MONOTONIC, #0x4, ne b.ne 1f @@ -188,6 +186,7 @@ ENTRY(__kernel_clock_getres) b.ne 4f ldr x2, 6f 2: + cbz w1, 3f stp xzr, x2, [x1] 3: /* res == NULL. */ diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 0a24b9b8c698..ef7d112f5ce0 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -51,7 +51,7 @@ static int __init early_coherent_pool(char *p) } early_param("coherent_pool", early_coherent_pool); -static void *__alloc_from_pool(size_t size, struct page **ret_page) +static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags) { unsigned long val; void *ptr = NULL; @@ -67,6 +67,8 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page) *ret_page = phys_to_page(phys); ptr = (void *)val; + if (flags & __GFP_ZERO) + memset(ptr, 0, size); } return ptr; @@ -101,6 +103,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, flags |= GFP_DMA; if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) { struct page *page; + void *addr; size = PAGE_ALIGN(size); page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, @@ -109,7 +112,10 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, return NULL; *dma_handle = phys_to_dma(dev, page_to_phys(page)); - return page_address(page); + addr = page_address(page); + if (flags & __GFP_ZERO) + memset(addr, 0, size); + return addr; } else { return swiotlb_alloc_coherent(dev, size, dma_handle, flags); } @@ -146,7 +152,7 @@ static void *__dma_alloc(struct device *dev, size_t size, if (!coherent && !(flags & __GFP_WAIT)) { struct page *page = NULL; - void *addr = __alloc_from_pool(size, &page); + void *addr = __alloc_from_pool(size, &page, flags); if (addr) *dma_handle = phys_to_dma(dev, page_to_phys(page)); @@ -348,8 +354,6 @@ static struct dma_map_ops swiotlb_dma_ops = { .mapping_error = swiotlb_dma_mapping_error, }; -extern int swiotlb_late_init_with_default_size(size_t default_size); - static int __init atomic_pool_init(void) { pgprot_t prot = __pgprot(PROT_NORMAL_NC); @@ -411,21 +415,13 @@ out: return -ENOMEM; } -static int __init swiotlb_late_init(void) +static int __init arm64_dma_init(void) { - size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT); + int ret; dma_ops = &swiotlb_dma_ops; - return swiotlb_late_init_with_default_size(swiotlb_size); -} - -static int __init arm64_dma_init(void) -{ - int ret = 0; - - ret |= swiotlb_late_init(); - ret |= atomic_pool_init(); + ret = atomic_pool_init(); return ret; } diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 71145f952070..ae85da6307bb 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -33,6 +33,7 @@ #include <linux/dma-mapping.h> #include <linux/dma-contiguous.h> #include <linux/efi.h> +#include <linux/swiotlb.h> #include <asm/fixmap.h> #include <asm/memory.h> @@ -45,6 +46,7 @@ #include "mm.h" phys_addr_t memstart_addr __read_mostly = 0; +phys_addr_t arm64_dma_phys_limit __read_mostly; #ifdef CONFIG_BLK_DEV_INITRD static int __init early_initrd(char *p) @@ -85,7 +87,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) /* 4GB maximum for 32-bit only capable devices */ if (IS_ENABLED(CONFIG_ZONE_DMA)) { - max_dma = PFN_DOWN(max_zone_dma_phys()); + max_dma = PFN_DOWN(arm64_dma_phys_limit); zone_size[ZONE_DMA] = max_dma - min; } zone_size[ZONE_NORMAL] = max - max_dma; @@ -156,8 +158,6 @@ early_param("mem", early_mem); void __init arm64_memblock_init(void) { - phys_addr_t dma_phys_limit = 0; - memblock_enforce_memory_limit(memory_limit); /* @@ -174,8 +174,10 @@ void __init arm64_memblock_init(void) /* 4GB maximum for 32-bit only capable devices */ if (IS_ENABLED(CONFIG_ZONE_DMA)) - dma_phys_limit = max_zone_dma_phys(); - dma_contiguous_reserve(dma_phys_limit); + arm64_dma_phys_limit = max_zone_dma_phys(); + else + arm64_dma_phys_limit = PHYS_MASK + 1; + dma_contiguous_reserve(arm64_dma_phys_limit); memblock_allow_resize(); memblock_dump_all(); @@ -276,6 +278,8 @@ static void __init free_unused_memmap(void) */ void __init mem_init(void) { + swiotlb_init(1); + set_max_mapnr(pfn_to_page(max_pfn) - mem_map); #ifndef CONFIG_SPARSEMEM_VMEMMAP diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index bb0ea94c4ba1..1d3ec3ddd84b 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -51,7 +51,10 @@ static int change_memory_common(unsigned long addr, int numpages, WARN_ON_ONCE(1); } - if (!is_module_address(start) || !is_module_address(end - 1)) + if (start < MODULES_VADDR || start >= MODULES_END) + return -EINVAL; + + if (end < MODULES_VADDR || end >= MODULES_END) return -EINVAL; data.set_mask = set_mask; |