diff options
Diffstat (limited to 'arch/arm64')
62 files changed, 1576 insertions, 1029 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index ef54a59a9e89..fd70a68387eb 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1,6 +1,13 @@ config ARM64 def_bool y select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE + select ARCH_WANT_OPTIONAL_GPIOLIB + select ARCH_WANT_COMPAT_IPC_PARSE_VERSION + select ARCH_WANT_FRAME_POINTERS + select ARM_AMBA + select ARM_ARCH_TIMER + select CLONE_BACKWARDS + select COMMON_CLK select GENERIC_CLOCKEVENTS select GENERIC_HARDIRQS_NO_DEPRECATED select GENERIC_IOMAP @@ -17,10 +24,8 @@ config ARM64 select HAVE_GENERIC_DMA_COHERENT select HAVE_GENERIC_HARDIRQS select HAVE_HW_BREAKPOINT if PERF_EVENTS - select HAVE_IRQ_WORK select HAVE_MEMBLOCK select HAVE_PERF_EVENTS - select HAVE_SPARSE_IRQ select IRQ_DOMAIN select MODULES_USE_ELF_RELA select NO_BOOTMEM @@ -88,6 +93,9 @@ config SWIOTLB config IOMMU_HELPER def_bool SWIOTLB +config GENERIC_GPIO + bool + source "init/Kconfig" source "kernel/Kconfig.freezer" @@ -198,6 +206,8 @@ config COMPAT depends on !ARM64_64K_PAGES select COMPAT_BINFMT_ELF select HAVE_UID16 + select OLD_SIGSUSPEND3 + select COMPAT_OLD_SIGACTION help This option enables support for a 32-bit EL0 running under a 64-bit kernel at EL1. AArch32-specific components such as system calls, diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index d7553f2bda66..51493430f142 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug @@ -24,4 +24,21 @@ config DEBUG_STACK_USAGE Enables the display of the minimum amount of free stack which each task has ever had available in the sysrq-T output. +config EARLY_PRINTK + bool "Early printk support" + default y + help + Say Y here if you want to have an early console using the + earlyprintk=<name>[,<addr>][,<options>] kernel parameter. It + is assumed that the early console device has been initialised + by the boot loader prior to starting the Linux kernel. + +config PID_IN_CONTEXTIDR + bool "Write the current PID to the CONTEXTIDR register" + help + Enabling this option causes the kernel to write the current PID to + the CONTEXTIDR register, at the expense of some additional + instructions during context switch. Say Y here only if you are + planning to use hardware trace tools with this kernel. + endmenu diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 364191f3be43..c95c5cb212fd 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -41,20 +41,24 @@ libs-y := arch/arm64/lib/ $(libs-y) libs-y += $(LIBGCC) # Default target when executing plain make -KBUILD_IMAGE := Image.gz +KBUILD_IMAGE := Image.gz +KBUILD_DTBS := dtbs -all: $(KBUILD_IMAGE) +all: $(KBUILD_IMAGE) $(KBUILD_DTBS) boot := arch/arm64/boot Image Image.gz: vmlinux - $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ zinstall install: vmlinux - $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ + $(Q)$(MAKE) $(build)=$(boot) $@ -%.dtb: - $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ +%.dtb: scripts + $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@ + +dtbs: scripts + $(Q)$(MAKE) $(build)=$(boot)/dts dtbs # We use MRPROPER_FILES and CLEAN_FILES now archclean: @@ -63,6 +67,7 @@ archclean: define archhelp echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)' echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)' + echo '* dtbs - Build device tree blobs for enabled boards' echo ' install - Install uncompressed kernel' echo ' zinstall - Install compressed kernel' echo ' Install using (your) ~/bin/installkernel or' diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile index eca209b2b0bf..5a0e3ab854a5 100644 --- a/arch/arm64/boot/Makefile +++ b/arch/arm64/boot/Makefile @@ -22,9 +22,6 @@ $(obj)/Image: vmlinux FORCE $(obj)/Image.gz: $(obj)/Image FORCE $(call if_changed,gzip) -$(obj)/%.dtb: $(src)/dts/%.dts - $(call cmd,dtc) - install: $(obj)/Image $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ $(obj)/Image System.map "$(INSTALL_PATH)" @@ -32,5 +29,3 @@ install: $(obj)/Image zinstall: $(obj)/Image.gz $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ $(obj)/Image.gz System.map "$(INSTALL_PATH)" - -clean-files += *.dtb diff --git a/arch/arm64/boot/dts/.gitignore b/arch/arm64/boot/dts/.gitignore new file mode 100644 index 000000000000..b60ed208c779 --- /dev/null +++ b/arch/arm64/boot/dts/.gitignore @@ -0,0 +1 @@ +*.dtb diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile new file mode 100644 index 000000000000..32ac0aef0068 --- /dev/null +++ b/arch/arm64/boot/dts/Makefile @@ -0,0 +1,6 @@ +targets += dtbs +targets += $(dtb-y) + +dtbs: $(addprefix $(obj)/, $(dtb-y)) + +clean-files := *.dtb diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index a581a2205938..e5fe4f99fe10 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -3,6 +3,7 @@ generic-y += bug.h generic-y += bugs.h generic-y += checksum.h +generic-y += clkdev.h generic-y += cputime.h generic-y += current.h generic-y += delay.h @@ -18,6 +19,7 @@ generic-y += ipcbuf.h generic-y += irq_regs.h generic-y += kdebug.h generic-y += kmap_types.h +generic-y += kvm_para.h generic-y += local.h generic-y += local64.h generic-y += mman.h @@ -43,6 +45,8 @@ generic-y += swab.h generic-y += termbits.h generic-y += termios.h generic-y += topology.h +generic-y += trace_clock.h generic-y += types.h generic-y += unaligned.h generic-y += user.h +generic-y += xor.h diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h new file mode 100644 index 000000000000..91e2a6a6fcd4 --- /dev/null +++ b/arch/arm64/include/asm/arch_timer.h @@ -0,0 +1,133 @@ +/* + * arch/arm64/include/asm/arch_timer.h + * + * Copyright (C) 2012 ARM Ltd. + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_ARCH_TIMER_H +#define __ASM_ARCH_TIMER_H + +#include <asm/barrier.h> + +#include <linux/init.h> +#include <linux/types.h> + +#include <clocksource/arm_arch_timer.h> + +static inline void arch_timer_reg_write(int access, int reg, u32 val) +{ + if (access == ARCH_TIMER_PHYS_ACCESS) { + switch (reg) { + case ARCH_TIMER_REG_CTRL: + asm volatile("msr cntp_ctl_el0, %0" : : "r" (val)); + break; + case ARCH_TIMER_REG_TVAL: + asm volatile("msr cntp_tval_el0, %0" : : "r" (val)); + break; + default: + BUILD_BUG(); + } + } else if (access == ARCH_TIMER_VIRT_ACCESS) { + switch (reg) { + case ARCH_TIMER_REG_CTRL: + asm volatile("msr cntv_ctl_el0, %0" : : "r" (val)); + break; + case ARCH_TIMER_REG_TVAL: + asm volatile("msr cntv_tval_el0, %0" : : "r" (val)); + break; + default: + BUILD_BUG(); + } + } else { + BUILD_BUG(); + } + + isb(); +} + +static inline u32 arch_timer_reg_read(int access, int reg) +{ + u32 val; + + if (access == ARCH_TIMER_PHYS_ACCESS) { + switch (reg) { + case ARCH_TIMER_REG_CTRL: + asm volatile("mrs %0, cntp_ctl_el0" : "=r" (val)); + break; + case ARCH_TIMER_REG_TVAL: + asm volatile("mrs %0, cntp_tval_el0" : "=r" (val)); + break; + default: + BUILD_BUG(); + } + } else if (access == ARCH_TIMER_VIRT_ACCESS) { + switch (reg) { + case ARCH_TIMER_REG_CTRL: + asm volatile("mrs %0, cntv_ctl_el0" : "=r" (val)); + break; + case ARCH_TIMER_REG_TVAL: + asm volatile("mrs %0, cntv_tval_el0" : "=r" (val)); + break; + default: + BUILD_BUG(); + } + } else { + BUILD_BUG(); + } + + return val; +} + +static inline u32 arch_timer_get_cntfrq(void) +{ + u32 val; + asm volatile("mrs %0, cntfrq_el0" : "=r" (val)); + return val; +} + +static inline void __cpuinit arch_counter_set_user_access(void) +{ + u32 cntkctl; + + /* Disable user access to the timers and the physical counter. */ + asm volatile("mrs %0, cntkctl_el1" : "=r" (cntkctl)); + cntkctl &= ~((3 << 8) | (1 << 0)); + + /* Enable user access to the virtual counter and frequency. */ + cntkctl |= (1 << 1); + asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl)); +} + +static inline u64 arch_counter_get_cntpct(void) +{ + u64 cval; + + isb(); + asm volatile("mrs %0, cntpct_el0" : "=r" (cval)); + + return cval; +} + +static inline u64 arch_counter_get_cntvct(void) +{ + u64 cval; + + isb(); + asm volatile("mrs %0, cntvct_el0" : "=r" (cval)); + + return cval; +} + +#endif diff --git a/arch/arm64/include/asm/arm_generic.h b/arch/arm64/include/asm/arm_generic.h deleted file mode 100644 index e4cec9d30f27..000000000000 --- a/arch/arm64/include/asm/arm_generic.h +++ /dev/null @@ -1,100 +0,0 @@ -/* - * arch/arm64/include/asm/arm_generic.h - * - * Copyright (C) 2012 ARM Ltd. - * Author: Marc Zyngier <marc.zyngier@arm.com> - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ -#ifndef __ASM_ARM_GENERIC_H -#define __ASM_ARM_GENERIC_H - -#include <linux/clocksource.h> - -#define ARCH_TIMER_CTRL_ENABLE (1 << 0) -#define ARCH_TIMER_CTRL_IMASK (1 << 1) -#define ARCH_TIMER_CTRL_ISTATUS (1 << 2) - -#define ARCH_TIMER_REG_CTRL 0 -#define ARCH_TIMER_REG_FREQ 1 -#define ARCH_TIMER_REG_TVAL 2 - -static inline void arch_timer_reg_write(int reg, u32 val) -{ - switch (reg) { - case ARCH_TIMER_REG_CTRL: - asm volatile("msr cntp_ctl_el0, %0" : : "r" (val)); - break; - case ARCH_TIMER_REG_TVAL: - asm volatile("msr cntp_tval_el0, %0" : : "r" (val)); - break; - default: - BUILD_BUG(); - } - - isb(); -} - -static inline u32 arch_timer_reg_read(int reg) -{ - u32 val; - - switch (reg) { - case ARCH_TIMER_REG_CTRL: - asm volatile("mrs %0, cntp_ctl_el0" : "=r" (val)); - break; - case ARCH_TIMER_REG_FREQ: - asm volatile("mrs %0, cntfrq_el0" : "=r" (val)); - break; - case ARCH_TIMER_REG_TVAL: - asm volatile("mrs %0, cntp_tval_el0" : "=r" (val)); - break; - default: - BUILD_BUG(); - } - - return val; -} - -static inline void __cpuinit arch_counter_enable_user_access(void) -{ - u32 cntkctl; - - /* Disable user access to the timers and the virtual counter. */ - asm volatile("mrs %0, cntkctl_el1" : "=r" (cntkctl)); - cntkctl &= ~((3 << 8) | (1 << 1)); - - /* Enable user access to the physical counter and frequency. */ - cntkctl |= 1; - asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl)); -} - -static inline cycle_t arch_counter_get_cntpct(void) -{ - cycle_t cval; - - asm volatile("mrs %0, cntpct_el0" : "=r" (cval)); - - return cval; -} - -static inline cycle_t arch_counter_get_cntvct(void) -{ - cycle_t cval; - - asm volatile("mrs %0, cntvct_el0" : "=r" (cval)); - - return cval; -} - -#endif diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index da2a13e8f1e6..c8eedc604984 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -107,3 +107,11 @@ * Register aliases. */ lr .req x30 // link register + +/* + * Vector entry + */ + .macro ventry label + .align 7 + b \label + .endm diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 407717ba060e..836364468571 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -49,12 +49,12 @@ static inline void atomic_add(int i, atomic_t *v) int result; asm volatile("// atomic_add\n" -"1: ldxr %w0, [%3]\n" -" add %w0, %w0, %w4\n" -" stxr %w1, %w0, [%3]\n" +"1: ldxr %w0, %2\n" +" add %w0, %w0, %w3\n" +" stxr %w1, %w0, %2\n" " cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+o" (v->counter) - : "r" (&v->counter), "Ir" (i) + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) : "cc"); } @@ -64,13 +64,13 @@ static inline int atomic_add_return(int i, atomic_t *v) int result; asm volatile("// atomic_add_return\n" -"1: ldaxr %w0, [%3]\n" -" add %w0, %w0, %w4\n" -" stlxr %w1, %w0, [%3]\n" +"1: ldaxr %w0, %2\n" +" add %w0, %w0, %w3\n" +" stlxr %w1, %w0, %2\n" " cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+o" (v->counter) - : "r" (&v->counter), "Ir" (i) - : "cc"); + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) + : "cc", "memory"); return result; } @@ -81,12 +81,12 @@ static inline void atomic_sub(int i, atomic_t *v) int result; asm volatile("// atomic_sub\n" -"1: ldxr %w0, [%3]\n" -" sub %w0, %w0, %w4\n" -" stxr %w1, %w0, [%3]\n" +"1: ldxr %w0, %2\n" +" sub %w0, %w0, %w3\n" +" stxr %w1, %w0, %2\n" " cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+o" (v->counter) - : "r" (&v->counter), "Ir" (i) + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) : "cc"); } @@ -96,13 +96,13 @@ static inline int atomic_sub_return(int i, atomic_t *v) int result; asm volatile("// atomic_sub_return\n" -"1: ldaxr %w0, [%3]\n" -" sub %w0, %w0, %w4\n" -" stlxr %w1, %w0, [%3]\n" +"1: ldaxr %w0, %2\n" +" sub %w0, %w0, %w3\n" +" stlxr %w1, %w0, %2\n" " cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+o" (v->counter) - : "r" (&v->counter), "Ir" (i) - : "cc"); + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) + : "cc", "memory"); return result; } @@ -113,15 +113,15 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) int oldval; asm volatile("// atomic_cmpxchg\n" -"1: ldaxr %w1, [%3]\n" -" cmp %w1, %w4\n" +"1: ldaxr %w1, %2\n" +" cmp %w1, %w3\n" " b.ne 2f\n" -" stlxr %w0, %w5, [%3]\n" +" stlxr %w0, %w4, %2\n" " cbnz %w0, 1b\n" "2:" - : "=&r" (tmp), "=&r" (oldval), "+o" (ptr->counter) - : "r" (&ptr->counter), "Ir" (old), "r" (new) - : "cc"); + : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) + : "Ir" (old), "r" (new) + : "cc", "memory"); return oldval; } @@ -131,12 +131,12 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) unsigned long tmp, tmp2; asm volatile("// atomic_clear_mask\n" -"1: ldxr %0, [%3]\n" -" bic %0, %0, %4\n" -" stxr %w1, %0, [%3]\n" +"1: ldxr %0, %2\n" +" bic %0, %0, %3\n" +" stxr %w1, %0, %2\n" " cbnz %w1, 1b" - : "=&r" (tmp), "=&r" (tmp2), "+o" (*addr) - : "r" (addr), "Ir" (mask) + : "=&r" (tmp), "=&r" (tmp2), "+Q" (*addr) + : "Ir" (mask) : "cc"); } @@ -182,12 +182,12 @@ static inline void atomic64_add(u64 i, atomic64_t *v) unsigned long tmp; asm volatile("// atomic64_add\n" -"1: ldxr %0, [%3]\n" -" add %0, %0, %4\n" -" stxr %w1, %0, [%3]\n" +"1: ldxr %0, %2\n" +" add %0, %0, %3\n" +" stxr %w1, %0, %2\n" " cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+o" (v->counter) - : "r" (&v->counter), "Ir" (i) + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) : "cc"); } @@ -197,13 +197,13 @@ static inline long atomic64_add_return(long i, atomic64_t *v) unsigned long tmp; asm volatile("// atomic64_add_return\n" -"1: ldaxr %0, [%3]\n" -" add %0, %0, %4\n" -" stlxr %w1, %0, [%3]\n" +"1: ldaxr %0, %2\n" +" add %0, %0, %3\n" +" stlxr %w1, %0, %2\n" " cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+o" (v->counter) - : "r" (&v->counter), "Ir" (i) - : "cc"); + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) + : "cc", "memory"); return result; } @@ -214,12 +214,12 @@ static inline void atomic64_sub(u64 i, atomic64_t *v) unsigned long tmp; asm volatile("// atomic64_sub\n" -"1: ldxr %0, [%3]\n" -" sub %0, %0, %4\n" -" stxr %w1, %0, [%3]\n" +"1: ldxr %0, %2\n" +" sub %0, %0, %3\n" +" stxr %w1, %0, %2\n" " cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+o" (v->counter) - : "r" (&v->counter), "Ir" (i) + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) : "cc"); } @@ -229,13 +229,13 @@ static inline long atomic64_sub_return(long i, atomic64_t *v) unsigned long tmp; asm volatile("// atomic64_sub_return\n" -"1: ldaxr %0, [%3]\n" -" sub %0, %0, %4\n" -" stlxr %w1, %0, [%3]\n" +"1: ldaxr %0, %2\n" +" sub %0, %0, %3\n" +" stlxr %w1, %0, %2\n" " cbnz %w1, 1b" - : "=&r" (result), "=&r" (tmp), "+o" (v->counter) - : "r" (&v->counter), "Ir" (i) - : "cc"); + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) + : "cc", "memory"); return result; } @@ -246,15 +246,15 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) unsigned long res; asm volatile("// atomic64_cmpxchg\n" -"1: ldaxr %1, [%3]\n" -" cmp %1, %4\n" +"1: ldaxr %1, %2\n" +" cmp %1, %3\n" " b.ne 2f\n" -" stlxr %w0, %5, [%3]\n" +" stlxr %w0, %4, %2\n" " cbnz %w0, 1b\n" "2:" - : "=&r" (res), "=&r" (oldval), "+o" (ptr->counter) - : "r" (&ptr->counter), "Ir" (old), "r" (new) - : "cc"); + : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter) + : "Ir" (old), "r" (new) + : "cc", "memory"); return oldval; } @@ -267,15 +267,15 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) unsigned long tmp; asm volatile("// atomic64_dec_if_positive\n" -"1: ldaxr %0, [%3]\n" +"1: ldaxr %0, %2\n" " subs %0, %0, #1\n" " b.mi 2f\n" -" stlxr %w1, %0, [%3]\n" +" stlxr %w1, %0, %2\n" " cbnz %w1, 1b\n" "2:" - : "=&r" (result), "=&r" (tmp), "+o" (v->counter) - : "r" (&v->counter) - : "cc"); + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : + : "cc", "memory"); return result; } diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index aa3132ab7f29..3300cbd18a89 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -70,13 +70,20 @@ * - size - region size */ extern void flush_cache_all(void); -extern void flush_cache_mm(struct mm_struct *mm); extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); -extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); extern void flush_icache_range(unsigned long start, unsigned long end); extern void __flush_dcache_area(void *addr, size_t len); extern void __flush_cache_user_range(unsigned long start, unsigned long end); +static inline void flush_cache_mm(struct mm_struct *mm) +{ +} + +static inline void flush_cache_page(struct vm_area_struct *vma, + unsigned long user_addr, unsigned long pfn) +{ +} + /* * Copy user data from/to a page which is mapped into a different * processes address space. Really, we want to allow our "user diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index e0e65b069d9e..968b5cbfc260 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -29,39 +29,39 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size switch (size) { case 1: asm volatile("// __xchg1\n" - "1: ldaxrb %w0, [%3]\n" - " stlxrb %w1, %w2, [%3]\n" + "1: ldaxrb %w0, %2\n" + " stlxrb %w1, %w3, %2\n" " cbnz %w1, 1b\n" - : "=&r" (ret), "=&r" (tmp) - : "r" (x), "r" (ptr) - : "memory", "cc"); + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) + : "r" (x) + : "cc", "memory"); break; case 2: asm volatile("// __xchg2\n" - "1: ldaxrh %w0, [%3]\n" - " stlxrh %w1, %w2, [%3]\n" + "1: ldaxrh %w0, %2\n" + " stlxrh %w1, %w3, %2\n" " cbnz %w1, 1b\n" - : "=&r" (ret), "=&r" (tmp) - : "r" (x), "r" (ptr) - : "memory", "cc"); + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr) + : "r" (x) + : "cc", "memory"); break; case 4: asm volatile("// __xchg4\n" - "1: ldaxr %w0, [%3]\n" - " stlxr %w1, %w2, [%3]\n" + "1: ldaxr %w0, %2\n" + " stlxr %w1, %w3, %2\n" " cbnz %w1, 1b\n" - : "=&r" (ret), "=&r" (tmp) - : "r" (x), "r" (ptr) - : "memory", "cc"); + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr) + : "r" (x) + : "cc", "memory"); break; case 8: asm volatile("// __xchg8\n" - "1: ldaxr %0, [%3]\n" - " stlxr %w1, %2, [%3]\n" + "1: ldaxr %0, %2\n" + " stlxr %w1, %3, %2\n" " cbnz %w1, 1b\n" - : "=&r" (ret), "=&r" (tmp) - : "r" (x), "r" (ptr) - : "memory", "cc"); + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr) + : "r" (x) + : "cc", "memory"); break; default: BUILD_BUG(); @@ -82,14 +82,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, case 1: do { asm volatile("// __cmpxchg1\n" - " ldxrb %w1, [%2]\n" + " ldxrb %w1, %2\n" " mov %w0, #0\n" " cmp %w1, %w3\n" " b.ne 1f\n" - " stxrb %w0, %w4, [%2]\n" + " stxrb %w0, %w4, %2\n" "1:\n" - : "=&r" (res), "=&r" (oldval) - : "r" (ptr), "Ir" (old), "r" (new) + : "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr) + : "Ir" (old), "r" (new) : "cc"); } while (res); break; @@ -97,29 +97,29 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, case 2: do { asm volatile("// __cmpxchg2\n" - " ldxrh %w1, [%2]\n" + " ldxrh %w1, %2\n" " mov %w0, #0\n" " cmp %w1, %w3\n" " b.ne 1f\n" - " stxrh %w0, %w4, [%2]\n" + " stxrh %w0, %w4, %2\n" "1:\n" - : "=&r" (res), "=&r" (oldval) - : "r" (ptr), "Ir" (old), "r" (new) - : "memory", "cc"); + : "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr) + : "Ir" (old), "r" (new) + : "cc"); } while (res); break; case 4: do { asm volatile("// __cmpxchg4\n" - " ldxr %w1, [%2]\n" + " ldxr %w1, %2\n" " mov %w0, #0\n" " cmp %w1, %w3\n" " b.ne 1f\n" - " stxr %w0, %w4, [%2]\n" + " stxr %w0, %w4, %2\n" "1:\n" - : "=&r" (res), "=&r" (oldval) - : "r" (ptr), "Ir" (old), "r" (new) + : "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr) + : "Ir" (old), "r" (new) : "cc"); } while (res); break; @@ -127,14 +127,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, case 8: do { asm volatile("// __cmpxchg8\n" - " ldxr %1, [%2]\n" + " ldxr %1, %2\n" " mov %w0, #0\n" " cmp %1, %3\n" " b.ne 1f\n" - " stxr %w0, %4, [%2]\n" + " stxr %w0, %4, %2\n" "1:\n" - : "=&r" (res), "=&r" (oldval) - : "r" (ptr), "Ir" (old), "r" (new) + : "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr) + : "Ir" (old), "r" (new) : "cc"); } while (res); break; diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index 37e610dc084e..618b450e5a1d 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h @@ -23,6 +23,7 @@ */ #include <linux/types.h> #include <linux/sched.h> +#include <linux/ptrace.h> #define COMPAT_USER_HZ 100 #define COMPAT_UTS_MACHINE "armv8l\0\0" @@ -209,10 +210,11 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) return (u32)(unsigned long)uptr; } +#define compat_user_stack_pointer() (current_pt_regs()->compat_sp) + static inline void __user *arch_compat_alloc_user_space(long len) { - struct pt_regs *regs = task_pt_regs(current); - return (void __user *)regs->compat_sp - len; + return (void __user *)compat_user_stack_pointer() - len; } struct compat_ipc64_perm { diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 538f4b44db5d..994776894198 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -50,6 +50,7 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) static inline int dma_mapping_error(struct device *dev, dma_addr_t dev_addr) { struct dma_map_ops *ops = get_dma_ops(dev); + debug_dma_mapping_error(dev, dev_addr); return ops->mapping_error(dev, dev_addr); } diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index cf284649dfcb..fe32c0e4ac01 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -25,12 +25,13 @@ #include <asm/user.h> typedef unsigned long elf_greg_t; -typedef unsigned long elf_freg_t[3]; -#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t)) -typedef elf_greg_t elf_gregset_t[ELF_NGREG]; +#define ELF_NGREG (sizeof(struct user_pt_regs) / sizeof(elf_greg_t)) +#define ELF_CORE_COPY_REGS(dest, regs) \ + *(struct user_pt_regs *)&(dest) = (regs)->user_regs; -typedef struct user_fp elf_fpregset_t; +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; +typedef struct user_fpsimd_state elf_fpregset_t; #define EM_AARCH64 183 @@ -87,7 +88,6 @@ typedef struct user_fp elf_fpregset_t; #define R_AARCH64_MOVW_PREL_G2_NC 292 #define R_AARCH64_MOVW_PREL_G3 293 - /* * These are used to set parameters in the core dumps. */ diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index b42fab9f62a9..c43b4ac13008 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -25,9 +25,8 @@ * - FPSR and FPCR * - 32 128-bit data registers * - * Note that user_fp forms a prefix of this structure, which is relied - * upon in the ptrace FP/SIMD accessors. struct user_fpsimd_state must - * form a prefix of struct fpsimd_state. + * Note that user_fpsimd forms a prefix of this structure, which is + * relied upon in the ptrace FP/SIMD accessors. */ struct fpsimd_state { union { diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h new file mode 100644 index 000000000000..bbec599c96bd --- /dev/null +++ b/arch/arm64/include/asm/fpsimdmacros.h @@ -0,0 +1,64 @@ +/* + * FP/SIMD state saving and restoring macros + * + * Copyright (C) 2012 ARM Ltd. + * Author: Catalin Marinas <catalin.marinas@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +.macro fpsimd_save state, tmpnr + stp q0, q1, [\state, #16 * 0] + stp q2, q3, [\state, #16 * 2] + stp q4, q5, [\state, #16 * 4] + stp q6, q7, [\state, #16 * 6] + stp q8, q9, [\state, #16 * 8] + stp q10, q11, [\state, #16 * 10] + stp q12, q13, [\state, #16 * 12] + stp q14, q15, [\state, #16 * 14] + stp q16, q17, [\state, #16 * 16] + stp q18, q19, [\state, #16 * 18] + stp q20, q21, [\state, #16 * 20] + stp q22, q23, [\state, #16 * 22] + stp q24, q25, [\state, #16 * 24] + stp q26, q27, [\state, #16 * 26] + stp q28, q29, [\state, #16 * 28] + stp q30, q31, [\state, #16 * 30]! + mrs x\tmpnr, fpsr + str w\tmpnr, [\state, #16 * 2] + mrs x\tmpnr, fpcr + str w\tmpnr, [\state, #16 * 2 + 4] +.endm + +.macro fpsimd_restore state, tmpnr + ldp q0, q1, [\state, #16 * 0] + ldp q2, q3, [\state, #16 * 2] + ldp q4, q5, [\state, #16 * 4] + ldp q6, q7, [\state, #16 * 6] + ldp q8, q9, [\state, #16 * 8] + ldp q10, q11, [\state, #16 * 10] + ldp q12, q13, [\state, #16 * 12] + ldp q14, q15, [\state, #16 * 14] + ldp q16, q17, [\state, #16 * 16] + ldp q18, q19, [\state, #16 * 18] + ldp q20, q21, [\state, #16 * 20] + ldp q22, q23, [\state, #16 * 22] + ldp q24, q25, [\state, #16 * 24] + ldp q26, q27, [\state, #16 * 26] + ldp q28, q29, [\state, #16 * 28] + ldp q30, q31, [\state, #16 * 30]! + ldr w\tmpnr, [\state, #16 * 2] + msr fpsr, x\tmpnr + ldr w\tmpnr, [\state, #16 * 2 + 4] + msr fpcr, x\tmpnr +.endm diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 3468ae8439fa..c582fa316366 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -39,7 +39,7 @@ " .popsection\n" \ : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ : "r" (oparg), "Ir" (-EFAULT) \ - : "cc") + : "cc", "memory") static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 74a2a7d304a9..57f12c991de2 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -114,7 +114,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) * I/O port access primitives. */ #define IO_SPACE_LIMIT 0xffff -#define PCI_IOBASE ((void __iomem *)0xffffffbbfffe0000UL) +#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_2M)) static inline u8 inb(unsigned long addr) { @@ -222,14 +222,17 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot extern void __iounmap(volatile void __iomem *addr); #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) -#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_XN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) +#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) -#define ioremap(addr, size) __ioremap((addr), (size), PROT_DEVICE_nGnRE) -#define ioremap_nocache(addr, size) __ioremap((addr), (size), PROT_DEVICE_nGnRE) -#define ioremap_wc(addr, size) __ioremap((addr), (size), PROT_NORMAL_NC) +#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) +#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) +#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) #define iounmap __iounmap +#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF) +#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PTE_PXN | PTE_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) + #define ARCH_HAS_IOREMAP_WC #include <asm-generic/iomap.h> diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 1cac16a001cb..381f556b664e 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -43,6 +43,7 @@ #define PAGE_OFFSET UL(0xffffffc000000000) #define MODULES_END (PAGE_OFFSET) #define MODULES_VADDR (MODULES_END - SZ_64M) +#define EARLYCON_IOBASE (MODULES_VADDR - SZ_4M) #define VA_BITS (39) #define TASK_SIZE_64 (UL(1) << VA_BITS) diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index d4f7fd5b9e33..2494fc01896a 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -26,5 +26,6 @@ typedef struct { extern void paging_init(void); extern void setup_mm_for_reboot(void); +extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); #endif diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index f68465dee026..e2bc385adb6b 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -35,6 +35,21 @@ extern unsigned int cpu_last_asid; void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); void __new_context(struct mm_struct *mm); +#ifdef CONFIG_PID_IN_CONTEXTIDR +static inline void contextidr_thread_switch(struct task_struct *next) +{ + asm( + " msr contextidr_el1, %0\n" + " isb" + : + : "r" (task_pid_nr(next))); +} +#else +static inline void contextidr_thread_switch(struct task_struct *next) +{ +} +#endif + /* * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0. */ diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h index a6fffd511c5e..d26d1d53c0d7 100644 --- a/arch/arm64/include/asm/perf_event.h +++ b/arch/arm64/include/asm/perf_event.h @@ -17,6 +17,11 @@ #ifndef __ASM_PERF_EVENT_H #define __ASM_PERF_EVENT_H -/* It's quiet around here... */ +#ifdef CONFIG_HW_PERF_EVENTS +struct pt_regs; +extern unsigned long perf_instruction_pointer(struct pt_regs *regs); +extern unsigned long perf_misc_flags(struct pt_regs *regs); +#define perf_misc_flags(regs) perf_misc_flags(regs) +#endif #endif diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 0f3b4581d925..75fd13d289b9 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -38,7 +38,8 @@ #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) #define PMD_SECT_NG (_AT(pmdval_t, 1) << 11) -#define PMD_SECT_XN (_AT(pmdval_t, 1) << 54) +#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53) +#define PMD_SECT_UXN (_AT(pmdval_t, 1) << 54) /* * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). @@ -57,7 +58,8 @@ #define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ #define PTE_AF (_AT(pteval_t, 1) << 10) /* Access Flag */ #define PTE_NG (_AT(pteval_t, 1) << 11) /* nG */ -#define PTE_XN (_AT(pteval_t, 1) << 54) /* XN */ +#define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */ +#define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */ /* * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 8960239be722..e333a243bfcc 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -24,7 +24,8 @@ /* * Software defined PTE bits definition. */ -#define PTE_VALID (_AT(pteval_t, 1) << 0) /* pte_present() check */ +#define PTE_VALID (_AT(pteval_t, 1) << 0) +#define PTE_PROT_NONE (_AT(pteval_t, 1) << 1) /* only when !PTE_VALID */ #define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */ #define PTE_DIRTY (_AT(pteval_t, 1) << 55) #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) @@ -60,25 +61,28 @@ extern void __pgd_error(const char *file, int line, unsigned long val); extern pgprot_t pgprot_default; -#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) - -#define PAGE_NONE _MOD_PROT(pgprot_default, PTE_NG | PTE_XN | PTE_RDONLY) -#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN) -#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG) -#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY) -#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_RDONLY) -#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY) -#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_RDONLY) -#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_XN | PTE_DIRTY) -#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_DIRTY) - -#define __PAGE_NONE __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_XN | PTE_RDONLY) -#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN) -#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG) -#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY) -#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_RDONLY) -#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY) -#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_RDONLY) +#define __pgprot_modify(prot,mask,bits) \ + __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) + +#define _MOD_PROT(p, b) __pgprot_modify(p, 0, b) + +#define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE) +#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) +#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) +#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) +#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) +#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) +#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) +#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY) +#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY) + +#define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE) +#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) +#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) +#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) +#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) +#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) +#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) #endif /* __ASSEMBLY__ */ @@ -125,16 +129,15 @@ extern struct page *empty_zero_page; /* * The following only work if pte_present(). Undefined behaviour otherwise. */ -#define pte_present(pte) (pte_val(pte) & PTE_VALID) +#define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) #define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY) #define pte_young(pte) (pte_val(pte) & PTE_AF) #define pte_special(pte) (pte_val(pte) & PTE_SPECIAL) #define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY)) -#define pte_exec(pte) (!(pte_val(pte) & PTE_XN)) +#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) -#define pte_present_exec_user(pte) \ - ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_XN)) == \ - (PTE_VALID | PTE_USER)) +#define pte_valid_user(pte) \ + ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) #define PTE_BIT_FUNC(fn,op) \ static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } @@ -157,8 +160,13 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { - if (pte_present_exec_user(pte)) - __sync_icache_dcache(pte, addr); + if (pte_valid_user(pte)) { + if (pte_exec(pte)) + __sync_icache_dcache(pte, addr); + if (!pte_dirty(pte)) + pte = pte_wrprotect(pte); + } + set_pte(ptep, pte); } @@ -168,9 +176,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, #define pte_huge(pte) ((pte_val(pte) & PTE_TYPE_MASK) == PTE_TYPE_HUGEPAGE) #define pte_mkhuge(pte) (__pte((pte_val(pte) & ~PTE_TYPE_MASK) | PTE_TYPE_HUGEPAGE)) -#define __pgprot_modify(prot,mask,bits) \ - __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) - #define __HAVE_ARCH_PTE_SPECIAL /* @@ -262,7 +267,8 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { - const pteval_t mask = PTE_USER | PTE_XN | PTE_RDONLY; + const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | + PTE_PROT_NONE | PTE_VALID; pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); return pte; } diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 5d810044feda..ab239b2c456f 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -43,6 +43,8 @@ #else #define STACK_TOP STACK_TOP_MAX #endif /* CONFIG_COMPAT */ + +#define ARCH_LOW_ADDRESS_LIMIT PHYS_MASK #endif /* __KERNEL__ */ struct debug_info { @@ -126,11 +128,6 @@ unsigned long get_wchan(struct task_struct *p); extern struct task_struct *cpu_switch_to(struct task_struct *prev, struct task_struct *next); -/* - * Create a new kernel thread - */ -extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); - #define task_pt_regs(p) \ ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h new file mode 100644 index 000000000000..0604237ecd99 --- /dev/null +++ b/arch/arm64/include/asm/psci.h @@ -0,0 +1,38 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2013 ARM Limited + */ + +#ifndef __ASM_PSCI_H +#define __ASM_PSCI_H + +#define PSCI_POWER_STATE_TYPE_STANDBY 0 +#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1 + +struct psci_power_state { + u16 id; + u8 type; + u8 affinity_level; +}; + +struct psci_operations { + int (*cpu_suspend)(struct psci_power_state state, + unsigned long entry_point); + int (*cpu_off)(struct psci_power_state state); + int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); + int (*migrate)(unsigned long cpuid); +}; + +extern struct psci_operations psci_ops; + +int psci_init(void); + +#endif /* __ASM_PSCI_H */ diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index b04d3404f0d1..41a71ee4c3df 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -30,8 +30,28 @@ #define COMPAT_PTRACE_SETVFPREGS 28 #define COMPAT_PTRACE_GETHBPREGS 29 #define COMPAT_PTRACE_SETHBPREGS 30 + +/* AArch32 CPSR bits */ +#define COMPAT_PSR_MODE_MASK 0x0000001f #define COMPAT_PSR_MODE_USR 0x00000010 +#define COMPAT_PSR_MODE_FIQ 0x00000011 +#define COMPAT_PSR_MODE_IRQ 0x00000012 +#define COMPAT_PSR_MODE_SVC 0x00000013 +#define COMPAT_PSR_MODE_ABT 0x00000017 +#define COMPAT_PSR_MODE_HYP 0x0000001a +#define COMPAT_PSR_MODE_UND 0x0000001b +#define COMPAT_PSR_MODE_SYS 0x0000001f #define COMPAT_PSR_T_BIT 0x00000020 +#define COMPAT_PSR_F_BIT 0x00000040 +#define COMPAT_PSR_I_BIT 0x00000080 +#define COMPAT_PSR_A_BIT 0x00000100 +#define COMPAT_PSR_E_BIT 0x00000200 +#define COMPAT_PSR_J_BIT 0x01000000 +#define COMPAT_PSR_Q_BIT 0x08000000 +#define COMPAT_PSR_V_BIT 0x10000000 +#define COMPAT_PSR_C_BIT 0x20000000 +#define COMPAT_PSR_Z_BIT 0x40000000 +#define COMPAT_PSR_N_BIT 0x80000000 #define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ /* * These are 'magic' values for PTRACE_PEEKUSR that return info about where a @@ -44,10 +64,27 @@ /* sizeof(struct user) for AArch32 */ #define COMPAT_USER_SZ 296 -/* AArch32 uses x13 as the stack pointer... */ + +/* Architecturally defined mapping between AArch32 and AArch64 registers */ +#define compat_usr(x) regs[(x)] #define compat_sp regs[13] -/* ... and x14 as the link register. */ #define compat_lr regs[14] +#define compat_sp_hyp regs[15] +#define compat_sp_irq regs[16] +#define compat_lr_irq regs[17] +#define compat_sp_svc regs[18] +#define compat_lr_svc regs[19] +#define compat_sp_abt regs[20] +#define compat_lr_abt regs[21] +#define compat_sp_und regs[22] +#define compat_lr_und regs[23] +#define compat_r8_fiq regs[24] +#define compat_r9_fiq regs[25] +#define compat_r10_fiq regs[26] +#define compat_r11_fiq regs[27] +#define compat_r12_fiq regs[28] +#define compat_sp_fiq regs[29] +#define compat_lr_fiq regs[30] /* * This struct defines the way the registers are stored on the stack during an diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index 7e34295f78e3..4b8023c5d146 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -66,4 +66,15 @@ extern volatile unsigned long secondary_holding_pen_release; extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); +struct device_node; + +struct smp_enable_ops { + const char *name; + int (*init_cpu)(struct device_node *, int); + int (*prepare_cpu)(int); +}; + +extern const struct smp_enable_ops smp_spin_table_ops; +extern const struct smp_enable_ops smp_psci_ops; + #endif /* ifndef __ASM_SMP_H */ diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 41112fe2f8b1..7065e920149d 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -45,13 +45,13 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) asm volatile( " sevl\n" "1: wfe\n" - "2: ldaxr %w0, [%1]\n" + "2: ldaxr %w0, %1\n" " cbnz %w0, 1b\n" - " stxr %w0, %w2, [%1]\n" + " stxr %w0, %w2, %1\n" " cbnz %w0, 2b\n" - : "=&r" (tmp) - : "r" (&lock->lock), "r" (1) - : "memory"); + : "=&r" (tmp), "+Q" (lock->lock) + : "r" (1) + : "cc", "memory"); } static inline int arch_spin_trylock(arch_spinlock_t *lock) @@ -59,13 +59,13 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) unsigned int tmp; asm volatile( - " ldaxr %w0, [%1]\n" + " ldaxr %w0, %1\n" " cbnz %w0, 1f\n" - " stxr %w0, %w2, [%1]\n" + " stxr %w0, %w2, %1\n" "1:\n" - : "=&r" (tmp) - : "r" (&lock->lock), "r" (1) - : "memory"); + : "=&r" (tmp), "+Q" (lock->lock) + : "r" (1) + : "cc", "memory"); return !tmp; } @@ -73,8 +73,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) static inline void arch_spin_unlock(arch_spinlock_t *lock) { asm volatile( - " stlr %w1, [%0]\n" - : : "r" (&lock->lock), "r" (0) : "memory"); + " stlr %w1, %0\n" + : "=Q" (lock->lock) : "r" (0) : "memory"); } /* @@ -94,13 +94,13 @@ static inline void arch_write_lock(arch_rwlock_t *rw) asm volatile( " sevl\n" "1: wfe\n" - "2: ldaxr %w0, [%1]\n" + "2: ldaxr %w0, %1\n" " cbnz %w0, 1b\n" - " stxr %w0, %w2, [%1]\n" + " stxr %w0, %w2, %1\n" " cbnz %w0, 2b\n" - : "=&r" (tmp) - : "r" (&rw->lock), "r" (0x80000000) - : "memory"); + : "=&r" (tmp), "+Q" (rw->lock) + : "r" (0x80000000) + : "cc", "memory"); } static inline int arch_write_trylock(arch_rwlock_t *rw) @@ -108,13 +108,13 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) unsigned int tmp; asm volatile( - " ldaxr %w0, [%1]\n" + " ldaxr %w0, %1\n" " cbnz %w0, 1f\n" - " stxr %w0, %w2, [%1]\n" + " stxr %w0, %w2, %1\n" "1:\n" - : "=&r" (tmp) - : "r" (&rw->lock), "r" (0x80000000) - : "memory"); + : "=&r" (tmp), "+Q" (rw->lock) + : "r" (0x80000000) + : "cc", "memory"); return !tmp; } @@ -122,8 +122,8 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) static inline void arch_write_unlock(arch_rwlock_t *rw) { asm volatile( - " stlr %w1, [%0]\n" - : : "r" (&rw->lock), "r" (0) : "memory"); + " stlr %w1, %0\n" + : "=Q" (rw->lock) : "r" (0) : "memory"); } /* write_can_lock - would write_trylock() succeed? */ @@ -148,14 +148,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw) asm volatile( " sevl\n" "1: wfe\n" - "2: ldaxr %w0, [%2]\n" + "2: ldaxr %w0, %2\n" " add %w0, %w0, #1\n" " tbnz %w0, #31, 1b\n" - " stxr %w1, %w0, [%2]\n" + " stxr %w1, %w0, %2\n" " cbnz %w1, 2b\n" - : "=&r" (tmp), "=&r" (tmp2) - : "r" (&rw->lock) - : "memory"); + : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) + : + : "cc", "memory"); } static inline void arch_read_unlock(arch_rwlock_t *rw) @@ -163,13 +163,13 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) unsigned int tmp, tmp2; asm volatile( - "1: ldxr %w0, [%2]\n" + "1: ldxr %w0, %2\n" " sub %w0, %w0, #1\n" - " stlxr %w1, %w0, [%2]\n" + " stlxr %w1, %w0, %2\n" " cbnz %w1, 1b\n" - : "=&r" (tmp), "=&r" (tmp2) - : "r" (&rw->lock) - : "memory"); + : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) + : + : "cc", "memory"); } static inline int arch_read_trylock(arch_rwlock_t *rw) @@ -177,14 +177,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) unsigned int tmp, tmp2 = 1; asm volatile( - " ldaxr %w0, [%2]\n" + " ldaxr %w0, %2\n" " add %w0, %w0, #1\n" " tbnz %w0, #31, 1f\n" - " stxr %w1, %w0, [%2]\n" + " stxr %w1, %w0, %2\n" "1:\n" - : "=&r" (tmp), "+r" (tmp2) - : "r" (&rw->lock) - : "memory"); + : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock) + : + : "cc", "memory"); return !tmp2; } diff --git a/arch/arm64/include/asm/syscalls.h b/arch/arm64/include/asm/syscalls.h index 09ff33572aab..48fe7c600e98 100644 --- a/arch/arm64/include/asm/syscalls.h +++ b/arch/arm64/include/asm/syscalls.h @@ -23,17 +23,7 @@ /* * System call wrappers implemented in kernel/entry.S. */ -asmlinkage long sys_execve_wrapper(const char __user *filename, - const char __user *const __user *argv, - const char __user *const __user *envp); -asmlinkage long sys_clone_wrapper(unsigned long clone_flags, - unsigned long newsp, - void __user *parent_tid, - unsigned long tls_val, - void __user *child_tid); asmlinkage long sys_rt_sigreturn_wrapper(void); -asmlinkage long sys_sigaltstack_wrapper(const stack_t __user *uss, - stack_t __user *uoss); #include <asm-generic/syscalls.h> diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 63f853f8b718..82ce217e94cf 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -14,7 +14,6 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #ifdef CONFIG_COMPAT -#define __ARCH_WANT_COMPAT_IPC_PARSE_VERSION #define __ARCH_WANT_COMPAT_STAT64 #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_PAUSE @@ -23,7 +22,9 @@ #define __ARCH_WANT_SYS_NICE #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK -#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND #define __ARCH_WANT_COMPAT_SYS_SENDFILE +#define __ARCH_WANT_SYS_FORK +#define __ARCH_WANT_SYS_VFORK #endif +#define __ARCH_WANT_SYS_CLONE #include <uapi/asm/unistd.h> diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 6d909faebf28..12f22492df4c 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -23,7 +23,7 @@ __SYSCALL(0, sys_restart_syscall) __SYSCALL(1, sys_exit) -__SYSCALL(2, compat_sys_fork_wrapper) +__SYSCALL(2, sys_fork) __SYSCALL(3, sys_read) __SYSCALL(4, sys_write) __SYSCALL(5, compat_sys_open) @@ -32,7 +32,7 @@ __SYSCALL(7, sys_ni_syscall) /* 7 was sys_waitpid */ __SYSCALL(8, sys_creat) __SYSCALL(9, sys_link) __SYSCALL(10, sys_unlink) -__SYSCALL(11, compat_sys_execve_wrapper) +__SYSCALL(11, compat_sys_execve) __SYSCALL(12, sys_chdir) __SYSCALL(13, sys_ni_syscall) /* 13 was sys_time */ __SYSCALL(14, sys_mknod) @@ -40,7 +40,7 @@ __SYSCALL(15, sys_chmod) __SYSCALL(16, sys_lchown16) __SYSCALL(17, sys_ni_syscall) /* 17 was sys_break */ __SYSCALL(18, sys_ni_syscall) /* 18 was sys_stat */ -__SYSCALL(19, compat_sys_lseek_wrapper) +__SYSCALL(19, compat_sys_lseek) __SYSCALL(20, sys_getpid) __SYSCALL(21, compat_sys_mount) __SYSCALL(22, sys_ni_syscall) /* 22 was sys_umount */ @@ -93,7 +93,7 @@ __SYSCALL(68, sys_ni_syscall) /* 68 was sys_sgetmask */ __SYSCALL(69, sys_ni_syscall) /* 69 was sys_ssetmask */ __SYSCALL(70, sys_setreuid16) __SYSCALL(71, sys_setregid16) -__SYSCALL(72, compat_sys_sigsuspend) +__SYSCALL(72, sys_sigsuspend) __SYSCALL(73, compat_sys_sigpending) __SYSCALL(74, sys_sethostname) __SYSCALL(75, compat_sys_setrlimit) @@ -113,8 +113,8 @@ __SYSCALL(88, sys_reboot) __SYSCALL(89, sys_ni_syscall) /* 89 was sys_readdir */ __SYSCALL(90, sys_ni_syscall) /* 90 was sys_mmap */ __SYSCALL(91, sys_munmap) -__SYSCALL(92, sys_truncate) -__SYSCALL(93, sys_ftruncate) +__SYSCALL(92, compat_sys_truncate) +__SYSCALL(93, compat_sys_ftruncate) __SYSCALL(94, sys_fchmod) __SYSCALL(95, sys_fchown16) __SYSCALL(96, sys_getpriority) @@ -141,7 +141,7 @@ __SYSCALL(116, compat_sys_sysinfo) __SYSCALL(117, sys_ni_syscall) /* 117 was sys_ipc */ __SYSCALL(118, sys_fsync) __SYSCALL(119, compat_sys_sigreturn_wrapper) -__SYSCALL(120, compat_sys_clone_wrapper) +__SYSCALL(120, sys_clone) __SYSCALL(121, sys_setdomainname) __SYSCALL(122, sys_newuname) __SYSCALL(123, sys_ni_syscall) /* 123 was sys_modify_ldt */ @@ -207,11 +207,11 @@ __SYSCALL(182, sys_chown16) __SYSCALL(183, sys_getcwd) __SYSCALL(184, sys_capget) __SYSCALL(185, sys_capset) -__SYSCALL(186, compat_sys_sigaltstack_wrapper) +__SYSCALL(186, compat_sys_sigaltstack) __SYSCALL(187, compat_sys_sendfile) __SYSCALL(188, sys_ni_syscall) /* 188 reserved */ __SYSCALL(189, sys_ni_syscall) /* 189 reserved */ -__SYSCALL(190, compat_sys_vfork_wrapper) +__SYSCALL(190, sys_vfork) __SYSCALL(191, compat_sys_getrlimit) /* SuS compliant getrlimit */ __SYSCALL(192, sys_mmap_pgoff) __SYSCALL(193, compat_sys_truncate64_wrapper) @@ -392,11 +392,16 @@ __SYSCALL(367, sys_fanotify_init) __SYSCALL(368, compat_sys_fanotify_mark_wrapper) __SYSCALL(369, sys_prlimit64) __SYSCALL(370, sys_name_to_handle_at) -__SYSCALL(371, sys_open_by_handle_at) -__SYSCALL(372, sys_clock_adjtime) +__SYSCALL(371, compat_sys_open_by_handle_at) +__SYSCALL(372, compat_sys_clock_adjtime) __SYSCALL(373, sys_syncfs) +__SYSCALL(374, compat_sys_sendmmsg) +__SYSCALL(375, sys_setns) +__SYSCALL(376, compat_sys_process_vm_readv) +__SYSCALL(377, compat_sys_process_vm_writev) +__SYSCALL(378, sys_ni_syscall) /* 378 for kcmp */ -#define __NR_compat_syscalls 374 +#define __NR_compat_syscalls 379 /* * Compat syscall numbers used by the AArch64 kernel. diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h new file mode 100644 index 000000000000..439827271e3d --- /dev/null +++ b/arch/arm64/include/asm/virt.h @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ASM__VIRT_H +#define __ASM__VIRT_H + +#define BOOT_CPU_MODE_EL2 (0x0e12b007) + +#ifndef __ASSEMBLY__ + +/* + * __boot_cpu_mode records what mode CPUs were booted in. + * A correctly-implemented bootloader must start all CPUs in the same mode: + * In this case, both 32bit halves of __boot_cpu_mode will contain the + * same value (either 0 if booted in EL1, BOOT_CPU_MODE_EL2 if booted in EL2). + * + * Should the bootloader fail to do this, the two values will be different. + * This allows the kernel to flag an error when the secondaries have come up. + */ +extern u32 __boot_cpu_mode[2]; + +void __hyp_set_vectors(phys_addr_t phys_vector_base); +phys_addr_t __hyp_get_vectors(void); + +/* Reports the availability of HYP mode */ +static inline bool is_hyp_mode_available(void) +{ + return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 && + __boot_cpu_mode[1] == BOOT_CPU_MODE_EL2); +} + +/* Check if the bootloader has booted CPUs in different modes */ +static inline bool is_hyp_mode_mismatched(void) +{ + return __boot_cpu_mode[0] != __boot_cpu_mode[1]; +} + +#endif /* __ASSEMBLY__ */ + +#endif /* ! __ASM__VIRT_H */ diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild index ca5b65f75c7b..e4b78bdca19e 100644 --- a/arch/arm64/include/uapi/asm/Kbuild +++ b/arch/arm64/include/uapi/asm/Kbuild @@ -1,11 +1,14 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm +generic-y += kvm_para.h + header-y += auxvec.h header-y += bitsperlong.h header-y += byteorder.h header-y += fcntl.h header-y += hwcap.h +header-y += kvm_para.h header-y += param.h header-y += ptrace.h header-y += setup.h diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index e2caff1b812a..7b4b564961d4 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -8,14 +8,16 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) # Object file lists. arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ entry-fpsimd.o process.o ptrace.o setup.o signal.o \ - sys.o stacktrace.o time.o traps.o io.o vdso.o + sys.o stacktrace.o time.o traps.o io.o vdso.o \ + hyp-stub.o psci.o arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ sys_compat.o arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o -arm64-obj-$(CONFIG_SMP) += smp.o +arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o smp_psci.o arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o +arm64-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-y += $(arm64-obj-y) vdso/ obj-m += $(arm64-obj-m) diff --git a/arch/arm64/kernel/early_printk.c b/arch/arm64/kernel/early_printk.c new file mode 100644 index 000000000000..7e320a2edb9b --- /dev/null +++ b/arch/arm64/kernel/early_printk.c @@ -0,0 +1,118 @@ +/* + * Earlyprintk support. + * + * Copyright (C) 2012 ARM Ltd. + * Author: Catalin Marinas <catalin.marinas@arm.com> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#include <linux/kernel.h> +#include <linux/console.h> +#include <linux/init.h> +#include <linux/string.h> +#include <linux/mm.h> +#include <linux/io.h> + +#include <linux/amba/serial.h> + +static void __iomem *early_base; +static void (*printch)(char ch); + +/* + * PL011 single character TX. + */ +static void pl011_printch(char ch) +{ + while (readl_relaxed(early_base + UART01x_FR) & UART01x_FR_TXFF) + ; + writeb_relaxed(ch, early_base + UART01x_DR); + while (readl_relaxed(early_base + UART01x_FR) & UART01x_FR_BUSY) + ; +} + +struct earlycon_match { + const char *name; + void (*printch)(char ch); +}; + +static const struct earlycon_match earlycon_match[] __initconst = { + { .name = "pl011", .printch = pl011_printch, }, + {} +}; + +static void early_write(struct console *con, const char *s, unsigned n) +{ + while (n-- > 0) { + if (*s == '\n') + printch('\r'); + printch(*s); + s++; + } +} + +static struct console early_console = { + .name = "earlycon", + .write = early_write, + .flags = CON_PRINTBUFFER | CON_BOOT, + .index = -1, +}; + +/* + * Parse earlyprintk=... parameter in the format: + * + * <name>[,<addr>][,<options>] + * + * and register the early console. It is assumed that the UART has been + * initialised by the bootloader already. + */ +static int __init setup_early_printk(char *buf) +{ + const struct earlycon_match *match = earlycon_match; + phys_addr_t paddr = 0; + + if (!buf) { + pr_warning("No earlyprintk arguments passed.\n"); + return 0; + } + + while (match->name) { + size_t len = strlen(match->name); + if (!strncmp(buf, match->name, len)) { + buf += len; + break; + } + match++; + } + if (!match->name) { + pr_warning("Unknown earlyprintk arguments: %s\n", buf); + return 0; + } + + /* I/O address */ + if (!strncmp(buf, ",0x", 3)) { + char *e; + paddr = simple_strtoul(buf + 1, &e, 16); + buf = e; + } + /* no options parsing yet */ + + if (paddr) + early_base = early_io_map(paddr, EARLYCON_IOBASE); + + printch = match->printch; + register_console(&early_console); + + return 0; +} + +early_param("earlyprintk", setup_early_printk); diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S index 17988a6e7ea2..6a27cd6dbfa6 100644 --- a/arch/arm64/kernel/entry-fpsimd.S +++ b/arch/arm64/kernel/entry-fpsimd.S @@ -20,6 +20,7 @@ #include <linux/linkage.h> #include <asm/assembler.h> +#include <asm/fpsimdmacros.h> /* * Save the FP registers. @@ -27,26 +28,7 @@ * x0 - pointer to struct fpsimd_state */ ENTRY(fpsimd_save_state) - stp q0, q1, [x0, #16 * 0] - stp q2, q3, [x0, #16 * 2] - stp q4, q5, [x0, #16 * 4] - stp q6, q7, [x0, #16 * 6] - stp q8, q9, [x0, #16 * 8] - stp q10, q11, [x0, #16 * 10] - stp q12, q13, [x0, #16 * 12] - stp q14, q15, [x0, #16 * 14] - stp q16, q17, [x0, #16 * 16] - stp q18, q19, [x0, #16 * 18] - stp q20, q21, [x0, #16 * 20] - stp q22, q23, [x0, #16 * 22] - stp q24, q25, [x0, #16 * 24] - stp q26, q27, [x0, #16 * 26] - stp q28, q29, [x0, #16 * 28] - stp q30, q31, [x0, #16 * 30]! - mrs x8, fpsr - str w8, [x0, #16 * 2] - mrs x8, fpcr - str w8, [x0, #16 * 2 + 4] + fpsimd_save x0, 8 ret ENDPROC(fpsimd_save_state) @@ -56,25 +38,6 @@ ENDPROC(fpsimd_save_state) * x0 - pointer to struct fpsimd_state */ ENTRY(fpsimd_load_state) - ldp q0, q1, [x0, #16 * 0] - ldp q2, q3, [x0, #16 * 2] - ldp q4, q5, [x0, #16 * 4] - ldp q6, q7, [x0, #16 * 6] - ldp q8, q9, [x0, #16 * 8] - ldp q10, q11, [x0, #16 * 10] - ldp q12, q13, [x0, #16 * 12] - ldp q14, q15, [x0, #16 * 14] - ldp q16, q17, [x0, #16 * 16] - ldp q18, q19, [x0, #16 * 18] - ldp q20, q21, [x0, #16 * 20] - ldp q22, q23, [x0, #16 * 22] - ldp q24, q25, [x0, #16 * 24] - ldp q26, q27, [x0, #16 * 26] - ldp q28, q29, [x0, #16 * 28] - ldp q30, q31, [x0, #16 * 30]! - ldr w8, [x0, #16 * 2] - ldr w9, [x0, #16 * 2 + 4] - msr fpsr, x8 - msr fpcr, x9 + fpsimd_restore x0, 8 ret ENDPROC(fpsimd_load_state) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index a6f3f7da6880..514d6098dbee 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -148,10 +148,6 @@ tsk .req x28 // current thread_info /* * Exception vectors. */ - .macro ventry label - .align 7 - b \label - .endm .align 11 ENTRY(vectors) @@ -594,7 +590,7 @@ work_resched: /* * "slow" syscall return path. */ -ENTRY(ret_to_user) +ret_to_user: disable_irq // disable interrupts ldr x1, [tsk, #TI_FLAGS] and x2, x1, #_TIF_WORK_MASK @@ -611,7 +607,10 @@ ENDPROC(ret_to_user) */ ENTRY(ret_from_fork) bl schedule_tail - get_thread_info tsk + cbz x19, 1f // not a kernel thread + mov x0, x20 + blr x19 +1: get_thread_info tsk b ret_to_user ENDPROC(ret_from_fork) @@ -673,25 +672,10 @@ __sys_trace_return: /* * Special system call wrappers. */ -ENTRY(sys_execve_wrapper) - mov x3, sp - b sys_execve -ENDPROC(sys_execve_wrapper) - -ENTRY(sys_clone_wrapper) - mov x5, sp - b sys_clone -ENDPROC(sys_clone_wrapper) - ENTRY(sys_rt_sigreturn_wrapper) mov x0, sp b sys_rt_sigreturn ENDPROC(sys_rt_sigreturn_wrapper) -ENTRY(sys_sigaltstack_wrapper) - ldr x2, [sp, #S_SP] - b sys_sigaltstack -ENDPROC(sys_sigaltstack_wrapper) - ENTRY(handle_arch_irq) .quad 0 diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index a2f02b63eae9..0a0a49756826 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -31,6 +31,7 @@ #include <asm/pgtable-hwdef.h> #include <asm/pgtable.h> #include <asm/page.h> +#include <asm/virt.h> /* * swapper_pg_dir is the virtual address of the initial page table. We place @@ -81,10 +82,8 @@ #ifdef CONFIG_ARM64_64K_PAGES #define MM_MMUFLAGS PTE_ATTRINDX(MT_NORMAL) | PTE_FLAGS -#define IO_MMUFLAGS PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_XN | PTE_FLAGS #else #define MM_MMUFLAGS PMD_ATTRINDX(MT_NORMAL) | PMD_FLAGS -#define IO_MMUFLAGS PMD_ATTRINDX(MT_DEVICE_nGnRE) | PMD_SECT_XN | PMD_FLAGS #endif /* @@ -115,13 +114,13 @@ ENTRY(stext) mov x21, x0 // x21=FDT + bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET bl el2_setup // Drop to EL1 mrs x22, midr_el1 // x22=cpuid mov x0, x22 bl lookup_processor_type mov x23, x0 // x23=current cpu_table cbz x23, __error_p // invalid processor (x23=0)? - bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET bl __vet_fdt bl __create_page_tables // x25=TTBR0, x26=TTBR1 /* @@ -147,17 +146,23 @@ ENTRY(el2_setup) mrs x0, CurrentEL cmp x0, #PSR_MODE_EL2t ccmp x0, #PSR_MODE_EL2h, #0x4, ne + ldr x0, =__boot_cpu_mode // Compute __boot_cpu_mode + add x0, x0, x28 b.eq 1f + str wzr, [x0] // Remember we don't have EL2... ret /* Hyp configuration. */ -1: mov x0, #(1 << 31) // 64-bit EL1 +1: ldr w1, =BOOT_CPU_MODE_EL2 + str w1, [x0, #4] // This CPU has EL2 + mov x0, #(1 << 31) // 64-bit EL1 msr hcr_el2, x0 /* Generic timers. */ mrs x0, cnthctl_el2 orr x0, x0, #3 // Enable EL1 physical timers msr cnthctl_el2, x0 + msr cntvoff_el2, xzr // Clear virtual offset /* Populate ID registers. */ mrs x0, midr_el1 @@ -178,6 +183,13 @@ ENTRY(el2_setup) msr hstr_el2, xzr // Disable CP15 traps to EL2 #endif + /* Stage-2 translation */ + msr vttbr_el2, xzr + + /* Hypervisor stub */ + adr x0, __hyp_stub_vectors + msr vbar_el2, x0 + /* spsr */ mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ PSR_MODE_EL1h) @@ -186,6 +198,19 @@ ENTRY(el2_setup) eret ENDPROC(el2_setup) +/* + * We need to find out the CPU boot mode long after boot, so we need to + * store it in a writable variable. + * + * This is not in .bss, because we set it sufficiently early that the boot-time + * zeroing of .bss would clobber it. + */ + .pushsection .data +ENTRY(__boot_cpu_mode) + .long BOOT_CPU_MODE_EL2 + .long 0 + .popsection + .align 3 2: .quad . .quad PAGE_OFFSET @@ -201,6 +226,7 @@ ENDPROC(el2_setup) * cores are held until we're ready for them to initialise. */ ENTRY(secondary_holding_pen) + bl __calc_phys_offset // x24=phys offset bl el2_setup // Drop to EL1 mrs x0, mpidr_el1 and x0, x0, #15 // CPU number @@ -226,7 +252,6 @@ ENTRY(secondary_startup) mov x23, x0 // x23=current cpu_table cbz x23, __error_p // invalid processor (x23=0)? - bl __calc_phys_offset // x24=phys offset pgtbl x25, x26, x24 // x25=TTBR0, x26=TTBR1 ldr x12, [x23, #CPU_INFO_SETUP] add x12, x12, x28 // __virt_to_phys @@ -341,6 +366,7 @@ ENDPROC(__calc_phys_offset) * - identity mapping to enable the MMU (low address, TTBR0) * - first few MB of the kernel linear mapping to jump to once the MMU has * been enabled, including the FDT blob (TTBR1) + * - UART mapping if CONFIG_EARLY_PRINTK is enabled (TTBR1) */ __create_page_tables: pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses @@ -393,6 +419,15 @@ __create_page_tables: sub x6, x6, #1 // inclusive range create_block_map x0, x7, x3, x5, x6 1: +#ifdef CONFIG_EARLY_PRINTK + /* + * Create the pgd entry for the UART mapping. The full mapping is done + * later based earlyprintk kernel parameter. + */ + ldr x5, =EARLYCON_IOBASE // UART virtual address + add x0, x26, #2 * PAGE_SIZE // section table address + create_pgd_entry x26, x0, x5, x6, x7 +#endif ret ENDPROC(__create_page_tables) .ltorg diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S new file mode 100644 index 000000000000..0959611d9ff1 --- /dev/null +++ b/arch/arm64/kernel/hyp-stub.S @@ -0,0 +1,109 @@ +/* + * Hypervisor stub + * + * Copyright (C) 2012 ARM Ltd. + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/init.h> +#include <linux/linkage.h> + +#include <asm/assembler.h> +#include <asm/ptrace.h> +#include <asm/virt.h> + + .text + .align 11 + +ENTRY(__hyp_stub_vectors) + ventry el2_sync_invalid // Synchronous EL2t + ventry el2_irq_invalid // IRQ EL2t + ventry el2_fiq_invalid // FIQ EL2t + ventry el2_error_invalid // Error EL2t + + ventry el2_sync_invalid // Synchronous EL2h + ventry el2_irq_invalid // IRQ EL2h + ventry el2_fiq_invalid // FIQ EL2h + ventry el2_error_invalid // Error EL2h + + ventry el1_sync // Synchronous 64-bit EL1 + ventry el1_irq_invalid // IRQ 64-bit EL1 + ventry el1_fiq_invalid // FIQ 64-bit EL1 + ventry el1_error_invalid // Error 64-bit EL1 + + ventry el1_sync_invalid // Synchronous 32-bit EL1 + ventry el1_irq_invalid // IRQ 32-bit EL1 + ventry el1_fiq_invalid // FIQ 32-bit EL1 + ventry el1_error_invalid // Error 32-bit EL1 +ENDPROC(__hyp_stub_vectors) + + .align 11 + +el1_sync: + mrs x1, esr_el2 + lsr x1, x1, #26 + cmp x1, #0x16 + b.ne 2f // Not an HVC trap + cbz x0, 1f + msr vbar_el2, x0 // Set vbar_el2 + b 2f +1: mrs x0, vbar_el2 // Return vbar_el2 +2: eret +ENDPROC(el1_sync) + +.macro invalid_vector label +\label: + b \label +ENDPROC(\label) +.endm + + invalid_vector el2_sync_invalid + invalid_vector el2_irq_invalid + invalid_vector el2_fiq_invalid + invalid_vector el2_error_invalid + invalid_vector el1_sync_invalid + invalid_vector el1_irq_invalid + invalid_vector el1_fiq_invalid + invalid_vector el1_error_invalid + +/* + * __hyp_set_vectors: Call this after boot to set the initial hypervisor + * vectors as part of hypervisor installation. On an SMP system, this should + * be called on each CPU. + * + * x0 must be the physical address of the new vector table, and must be + * 2KB aligned. + * + * Before calling this, you must check that the stub hypervisor is installed + * everywhere, by waiting for any secondary CPUs to be brought up and then + * checking that is_hyp_mode_available() is true. + * + * If not, there is a pre-existing hypervisor, some CPUs failed to boot, or + * something else went wrong... in such cases, trying to install a new + * hypervisor is unlikely to work as desired. + * + * When you call into your shiny new hypervisor, sp_el2 will contain junk, + * so you will need to set that to something sensible at the new hypervisor's + * initialisation entry point. + */ + +ENTRY(__hyp_get_vectors) + mov x0, xzr + // fall through +ENTRY(__hyp_set_vectors) + hvc #0 + ret +ENDPROC(__hyp_get_vectors) +ENDPROC(__hyp_set_vectors) diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index ecbf2d81ec5c..1e49e5eb81e9 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -613,17 +613,11 @@ enum armv8_pmuv3_perf_types { ARMV8_PMUV3_PERFCTR_BUS_ACCESS = 0x19, ARMV8_PMUV3_PERFCTR_MEM_ERROR = 0x1A, ARMV8_PMUV3_PERFCTR_BUS_CYCLES = 0x1D, - - /* - * This isn't an architected event. - * We detect this event number and use the cycle counter instead. - */ - ARMV8_PMUV3_PERFCTR_CPU_CYCLES = 0xFF, }; /* PMUv3 HW events mapping. */ static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = { - [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES, [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED, [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS, [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL, @@ -1106,7 +1100,7 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, unsigned long evtype = event->config_base & ARMV8_EVTYPE_EVENT; /* Always place a cycle counter into the cycle counter. */ - if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) { + if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) { if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask)) return -EAGAIN; @@ -1227,7 +1221,7 @@ static struct of_device_id armpmu_of_device_ids[] = { {}, }; -static int __devinit armpmu_device_probe(struct platform_device *pdev) +static int armpmu_device_probe(struct platform_device *pdev) { if (!cpu_pmu) return -ENODEV; @@ -1337,6 +1331,11 @@ void perf_callchain_user(struct perf_callchain_entry *entry, { struct frame_tail __user *tail; + if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + /* We don't support guest os callchain now */ + return; + } + tail = (struct frame_tail __user *)regs->regs[29]; while (entry->nr < PERF_MAX_STACK_DEPTH && @@ -1361,8 +1360,40 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry, { struct stackframe frame; + if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + /* We don't support guest os callchain now */ + return; + } + frame.fp = regs->regs[29]; frame.sp = regs->sp; frame.pc = regs->pc; walk_stackframe(&frame, callchain_trace, entry); } + +unsigned long perf_instruction_pointer(struct pt_regs *regs) +{ + if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) + return perf_guest_cbs->get_guest_ip(); + + return instruction_pointer(regs); +} + +unsigned long perf_misc_flags(struct pt_regs *regs) +{ + int misc = 0; + + if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { + if (perf_guest_cbs->is_user_mode()) + misc |= PERF_RECORD_MISC_GUEST_USER; + else + misc |= PERF_RECORD_MISC_GUEST_KERNEL; + } else { + if (user_mode(regs)) + misc |= PERF_RECORD_MISC_USER; + else + misc |= PERF_RECORD_MISC_KERNEL; + } + + return misc; +} diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index f22965ea1cfc..0337cdb0667b 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -45,9 +45,10 @@ #include <asm/compat.h> #include <asm/cacheflush.h> +#include <asm/fpsimd.h> +#include <asm/mmu_context.h> #include <asm/processor.h> #include <asm/stacktrace.h> -#include <asm/fpsimd.h> static void setup_restart(void) { @@ -97,14 +98,9 @@ static void default_idle(void) local_irq_enable(); } -void (*pm_idle)(void) = default_idle; -EXPORT_SYMBOL_GPL(pm_idle); - /* - * The idle thread, has rather strange semantics for calling pm_idle, - * but this is what x86 does and we need to do the same, so that - * things like cpuidle get called in the same way. The only difference - * is that we always respect 'hlt_counter' to prevent low power idle. + * The idle thread. + * We always respect 'hlt_counter' to prevent low power idle. */ void cpu_idle(void) { @@ -122,10 +118,10 @@ void cpu_idle(void) local_irq_disable(); if (!need_resched()) { stop_critical_timings(); - pm_idle(); + default_idle(); start_critical_timings(); /* - * pm_idle functions should always return + * default_idle functions should always return * with IRQs enabled. */ WARN_ON(irqs_disabled()); @@ -234,33 +230,46 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) asmlinkage void ret_from_fork(void) asm("ret_from_fork"); int copy_thread(unsigned long clone_flags, unsigned long stack_start, - unsigned long stk_sz, struct task_struct *p, - struct pt_regs *regs) + unsigned long stk_sz, struct task_struct *p) { struct pt_regs *childregs = task_pt_regs(p); unsigned long tls = p->thread.tp_value; - *childregs = *regs; - childregs->regs[0] = 0; + memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); - if (is_compat_thread(task_thread_info(p))) - childregs->compat_sp = stack_start; - else { + if (likely(!(p->flags & PF_KTHREAD))) { + *childregs = *current_pt_regs(); + childregs->regs[0] = 0; + if (is_compat_thread(task_thread_info(p))) { + if (stack_start) + childregs->compat_sp = stack_start; + } else { + /* + * Read the current TLS pointer from tpidr_el0 as it may be + * out-of-sync with the saved value. + */ + asm("mrs %0, tpidr_el0" : "=r" (tls)); + if (stack_start) { + /* 16-byte aligned stack mandatory on AArch64 */ + if (stack_start & 15) + return -EINVAL; + childregs->sp = stack_start; + } + } /* - * Read the current TLS pointer from tpidr_el0 as it may be - * out-of-sync with the saved value. + * If a TLS pointer was passed to clone (4th argument), use it + * for the new thread. */ - asm("mrs %0, tpidr_el0" : "=r" (tls)); - childregs->sp = stack_start; + if (clone_flags & CLONE_SETTLS) + tls = childregs->regs[3]; + } else { + memset(childregs, 0, sizeof(struct pt_regs)); + childregs->pstate = PSR_MODE_EL1h; + p->thread.cpu_context.x19 = stack_start; + p->thread.cpu_context.x20 = stk_sz; } - - memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); - p->thread.cpu_context.sp = (unsigned long)childregs; p->thread.cpu_context.pc = (unsigned long)ret_from_fork; - - /* If a TLS pointer was passed to clone, use that for the new thread. */ - if (clone_flags & CLONE_SETTLS) - tls = regs->regs[3]; + p->thread.cpu_context.sp = (unsigned long)childregs; p->thread.tp_value = tls; ptrace_hw_copy_thread(p); @@ -306,64 +315,10 @@ struct task_struct *__switch_to(struct task_struct *prev, /* the actual thread switch */ last = cpu_switch_to(prev, next); + contextidr_thread_switch(next); return last; } -/* - * Fill in the task's elfregs structure for a core dump. - */ -int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs) -{ - elf_core_copy_regs(elfregs, task_pt_regs(t)); - return 1; -} - -/* - * fill in the fpe structure for a core dump... - */ -int dump_fpu (struct pt_regs *regs, struct user_fp *fp) -{ - return 0; -} -EXPORT_SYMBOL(dump_fpu); - -/* - * Shuffle the argument into the correct register before calling the - * thread function. x1 is the thread argument, x2 is the pointer to - * the thread function, and x3 points to the exit function. - */ -extern void kernel_thread_helper(void); -asm( ".section .text\n" -" .align\n" -" .type kernel_thread_helper, #function\n" -"kernel_thread_helper:\n" -" mov x0, x1\n" -" mov x30, x3\n" -" br x2\n" -" .size kernel_thread_helper, . - kernel_thread_helper\n" -" .previous"); - -#define kernel_thread_exit do_exit - -/* - * Create a kernel thread. - */ -pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) -{ - struct pt_regs regs; - - memset(®s, 0, sizeof(regs)); - - regs.regs[1] = (unsigned long)arg; - regs.regs[2] = (unsigned long)fn; - regs.regs[3] = (unsigned long)kernel_thread_exit; - regs.pc = (unsigned long)kernel_thread_helper; - regs.pstate = PSR_MODE_EL1h; - - return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); -} -EXPORT_SYMBOL(kernel_thread); - unsigned long get_wchan(struct task_struct *p) { struct stackframe frame; diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c new file mode 100644 index 000000000000..14f73c445ff5 --- /dev/null +++ b/arch/arm64/kernel/psci.c @@ -0,0 +1,211 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2013 ARM Limited + * + * Author: Will Deacon <will.deacon@arm.com> + */ + +#define pr_fmt(fmt) "psci: " fmt + +#include <linux/init.h> +#include <linux/of.h> + +#include <asm/compiler.h> +#include <asm/errno.h> +#include <asm/psci.h> + +struct psci_operations psci_ops; + +static int (*invoke_psci_fn)(u64, u64, u64, u64); + +enum psci_function { + PSCI_FN_CPU_SUSPEND, + PSCI_FN_CPU_ON, + PSCI_FN_CPU_OFF, + PSCI_FN_MIGRATE, + PSCI_FN_MAX, +}; + +static u32 psci_function_id[PSCI_FN_MAX]; + +#define PSCI_RET_SUCCESS 0 +#define PSCI_RET_EOPNOTSUPP -1 +#define PSCI_RET_EINVAL -2 +#define PSCI_RET_EPERM -3 + +static int psci_to_linux_errno(int errno) +{ + switch (errno) { + case PSCI_RET_SUCCESS: + return 0; + case PSCI_RET_EOPNOTSUPP: + return -EOPNOTSUPP; + case PSCI_RET_EINVAL: + return -EINVAL; + case PSCI_RET_EPERM: + return -EPERM; + }; + + return -EINVAL; +} + +#define PSCI_POWER_STATE_ID_MASK 0xffff +#define PSCI_POWER_STATE_ID_SHIFT 0 +#define PSCI_POWER_STATE_TYPE_MASK 0x1 +#define PSCI_POWER_STATE_TYPE_SHIFT 16 +#define PSCI_POWER_STATE_AFFL_MASK 0x3 +#define PSCI_POWER_STATE_AFFL_SHIFT 24 + +static u32 psci_power_state_pack(struct psci_power_state state) +{ + return ((state.id & PSCI_POWER_STATE_ID_MASK) + << PSCI_POWER_STATE_ID_SHIFT) | + ((state.type & PSCI_POWER_STATE_TYPE_MASK) + << PSCI_POWER_STATE_TYPE_SHIFT) | + ((state.affinity_level & PSCI_POWER_STATE_AFFL_MASK) + << PSCI_POWER_STATE_AFFL_SHIFT); +} + +/* + * The following two functions are invoked via the invoke_psci_fn pointer + * and will not be inlined, allowing us to piggyback on the AAPCS. + */ +static noinline int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, + u64 arg2) +{ + asm volatile( + __asmeq("%0", "x0") + __asmeq("%1", "x1") + __asmeq("%2", "x2") + __asmeq("%3", "x3") + "hvc #0\n" + : "+r" (function_id) + : "r" (arg0), "r" (arg1), "r" (arg2)); + + return function_id; +} + +static noinline int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, + u64 arg2) +{ + asm volatile( + __asmeq("%0", "x0") + __asmeq("%1", "x1") + __asmeq("%2", "x2") + __asmeq("%3", "x3") + "smc #0\n" + : "+r" (function_id) + : "r" (arg0), "r" (arg1), "r" (arg2)); + + return function_id; +} + +static int psci_cpu_suspend(struct psci_power_state state, + unsigned long entry_point) +{ + int err; + u32 fn, power_state; + + fn = psci_function_id[PSCI_FN_CPU_SUSPEND]; + power_state = psci_power_state_pack(state); + err = invoke_psci_fn(fn, power_state, entry_point, 0); + return psci_to_linux_errno(err); +} + +static int psci_cpu_off(struct psci_power_state state) +{ + int err; + u32 fn, power_state; + + fn = psci_function_id[PSCI_FN_CPU_OFF]; + power_state = psci_power_state_pack(state); + err = invoke_psci_fn(fn, power_state, 0, 0); + return psci_to_linux_errno(err); +} + +static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point) +{ + int err; + u32 fn; + + fn = psci_function_id[PSCI_FN_CPU_ON]; + err = invoke_psci_fn(fn, cpuid, entry_point, 0); + return psci_to_linux_errno(err); +} + +static int psci_migrate(unsigned long cpuid) +{ + int err; + u32 fn; + + fn = psci_function_id[PSCI_FN_MIGRATE]; + err = invoke_psci_fn(fn, cpuid, 0, 0); + return psci_to_linux_errno(err); +} + +static const struct of_device_id psci_of_match[] __initconst = { + { .compatible = "arm,psci", }, + {}, +}; + +int __init psci_init(void) +{ + struct device_node *np; + const char *method; + u32 id; + int err = 0; + + np = of_find_matching_node(NULL, psci_of_match); + if (!np) + return -ENODEV; + + pr_info("probing function IDs from device-tree\n"); + + if (of_property_read_string(np, "method", &method)) { + pr_warning("missing \"method\" property\n"); + err = -ENXIO; + goto out_put_node; + } + + if (!strcmp("hvc", method)) { + invoke_psci_fn = __invoke_psci_fn_hvc; + } else if (!strcmp("smc", method)) { + invoke_psci_fn = __invoke_psci_fn_smc; + } else { + pr_warning("invalid \"method\" property: %s\n", method); + err = -EINVAL; + goto out_put_node; + } + + if (!of_property_read_u32(np, "cpu_suspend", &id)) { + psci_function_id[PSCI_FN_CPU_SUSPEND] = id; + psci_ops.cpu_suspend = psci_cpu_suspend; + } + + if (!of_property_read_u32(np, "cpu_off", &id)) { + psci_function_id[PSCI_FN_CPU_OFF] = id; + psci_ops.cpu_off = psci_cpu_off; + } + + if (!of_property_read_u32(np, "cpu_on", &id)) { + psci_function_id[PSCI_FN_CPU_ON] = id; + psci_ops.cpu_on = psci_cpu_on; + } + + if (!of_property_read_u32(np, "migrate", &id)) { + psci_function_id[PSCI_FN_MIGRATE] = id; + psci_ops.migrate = psci_migrate; + } + +out_put_node: + of_node_put(np); + return err; +} diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 7665a9bfdb1e..113db863f832 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -39,6 +39,7 @@ #include <linux/proc_fs.h> #include <linux/memblock.h> #include <linux/of_fdt.h> +#include <linux/of_platform.h> #include <asm/cputype.h> #include <asm/elf.h> @@ -49,6 +50,7 @@ #include <asm/tlbflush.h> #include <asm/traps.h> #include <asm/memblock.h> +#include <asm/psci.h> unsigned int processor_id; EXPORT_SYMBOL(processor_id); @@ -260,6 +262,8 @@ void __init setup_arch(char **cmdline_p) unflatten_device_tree(); + psci_init(); + #ifdef CONFIG_SMP smp_init_cpus(); #endif @@ -289,6 +293,13 @@ static int __init topology_init(void) } subsys_initcall(topology_init); +static int __init arm64_device_probe(void) +{ + of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); + return 0; +} +device_initcall(arm64_device_probe); + static const char *hwcap_str[] = { "fp", "asimd", diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 8807ba2cf262..890a591f75dd 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -41,6 +41,8 @@ struct rt_sigframe { struct siginfo info; struct ucontext uc; + u64 fp; + u64 lr; }; static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) @@ -147,8 +149,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) if (restore_sigframe(regs, frame)) goto badframe; - if (do_sigaltstack(&frame->uc.uc_stack, - NULL, regs->sp) == -EFAULT) + if (restore_altstack(&frame->uc.uc_stack)) goto badframe; return regs->regs[0]; @@ -162,12 +163,6 @@ badframe: return 0; } -asmlinkage long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, - unsigned long sp) -{ - return do_sigaltstack(uss, uoss, sp); -} - static int setup_sigframe(struct rt_sigframe __user *sf, struct pt_regs *regs, sigset_t *set) { @@ -175,6 +170,10 @@ static int setup_sigframe(struct rt_sigframe __user *sf, struct aux_context __user *aux = (struct aux_context __user *)sf->uc.uc_mcontext.__reserved; + /* set up the stack frame for unwinding */ + __put_user_error(regs->regs[29], &sf->fp, err); + __put_user_error(regs->regs[30], &sf->lr, err); + for (i = 0; i < 31; i++) __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], err); @@ -196,11 +195,11 @@ static int setup_sigframe(struct rt_sigframe __user *sf, return err; } -static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, - int framesize) +static struct rt_sigframe __user *get_sigframe(struct k_sigaction *ka, + struct pt_regs *regs) { unsigned long sp, sp_top; - void __user *frame; + struct rt_sigframe __user *frame; sp = sp_top = regs->sp; @@ -210,11 +209,8 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) sp = sp_top = current->sas_ss_sp + current->sas_ss_size; - /* room for stack frame (FP, LR) */ - sp -= 16; - - sp = (sp - framesize) & ~15; - frame = (void __user *)sp; + sp = (sp - sizeof(struct rt_sigframe)) & ~15; + frame = (struct rt_sigframe __user *)sp; /* * Check that we can actually write to the signal frame. @@ -225,20 +221,14 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, return frame; } -static int setup_return(struct pt_regs *regs, struct k_sigaction *ka, - void __user *frame, int usig) +static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, + void __user *frame, int usig) { - int err = 0; __sigrestore_t sigtramp; - unsigned long __user *sp = (unsigned long __user *)regs->sp; - - /* set up the stack frame */ - __put_user_error(regs->regs[29], sp - 2, err); - __put_user_error(regs->regs[30], sp - 1, err); regs->regs[0] = usig; - regs->regs[29] = regs->sp - 16; regs->sp = (unsigned long)frame; + regs->regs[29] = regs->sp + offsetof(struct rt_sigframe, fp); regs->pc = (unsigned long)ka->sa.sa_handler; if (ka->sa.sa_flags & SA_RESTORER) @@ -247,38 +237,30 @@ static int setup_return(struct pt_regs *regs, struct k_sigaction *ka, sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp); regs->regs[30] = (unsigned long)sigtramp; - - return err; } static int setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame; - stack_t stack; int err = 0; - frame = get_sigframe(ka, regs, sizeof(*frame)); + frame = get_sigframe(ka, regs); if (!frame) return 1; __put_user_error(0, &frame->uc.uc_flags, err); __put_user_error(NULL, &frame->uc.uc_link, err); - memset(&stack, 0, sizeof(stack)); - stack.ss_sp = (void __user *)current->sas_ss_sp; - stack.ss_flags = sas_ss_flags(regs->sp); - stack.ss_size = current->sas_ss_size; - err |= __copy_to_user(&frame->uc.uc_stack, &stack, sizeof(stack)); - + err |= __save_altstack(&frame->uc.uc_stack, regs->sp); err |= setup_sigframe(frame, regs, set); - if (err == 0) - err = setup_return(regs, ka, frame, usig); - - if (err == 0 && ka->sa.sa_flags & SA_SIGINFO) { - err |= copy_siginfo_to_user(&frame->info, info); - regs->regs[1] = (unsigned long)&frame->info; - regs->regs[2] = (unsigned long)&frame->uc; + if (err == 0) { + setup_return(regs, ka, frame, usig); + if (ka->sa.sa_flags & SA_SIGINFO) { + err |= copy_siginfo_to_user(&frame->info, info); + regs->regs[1] = (unsigned long)&frame->info; + regs->regs[2] = (unsigned long)&frame->uc; + } } return err; diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index 4654824747a4..7f4f3673f2bc 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -28,26 +28,6 @@ #include <asm/uaccess.h> #include <asm/unistd32.h> -struct compat_sigaction { - compat_uptr_t sa_handler; - compat_ulong_t sa_flags; - compat_uptr_t sa_restorer; - compat_sigset_t sa_mask; -}; - -struct compat_old_sigaction { - compat_uptr_t sa_handler; - compat_old_sigset_t sa_mask; - compat_ulong_t sa_flags; - compat_uptr_t sa_restorer; -}; - -typedef struct compat_sigaltstack { - compat_uptr_t ss_sp; - int ss_flags; - compat_size_t ss_size; -} compat_stack_t; - struct compat_sigcontext { /* We always set these two fields to 0 */ compat_ulong_t trap_no; @@ -76,7 +56,7 @@ struct compat_sigcontext { struct compat_ucontext { compat_ulong_t uc_flags; - struct compat_ucontext *uc_link; + compat_uptr_t uc_link; compat_stack_t uc_stack; struct compat_sigcontext uc_mcontext; compat_sigset_t uc_sigmask; @@ -339,127 +319,6 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame) return err ? -EFAULT : 0; } -/* - * atomically swap in the new signal mask, and wait for a signal. - */ -asmlinkage int compat_sys_sigsuspend(int restart, compat_ulong_t oldmask, - compat_old_sigset_t mask) -{ - sigset_t blocked; - - siginitset(¤t->blocked, mask); - return sigsuspend(&blocked); -} - -asmlinkage int compat_sys_sigaction(int sig, - const struct compat_old_sigaction __user *act, - struct compat_old_sigaction __user *oact) -{ - struct k_sigaction new_ka, old_ka; - int ret; - compat_old_sigset_t mask; - compat_uptr_t handler, restorer; - - if (act) { - if (!access_ok(VERIFY_READ, act, sizeof(*act)) || - __get_user(handler, &act->sa_handler) || - __get_user(restorer, &act->sa_restorer) || - __get_user(new_ka.sa.sa_flags, &act->sa_flags) || - __get_user(mask, &act->sa_mask)) - return -EFAULT; - - new_ka.sa.sa_handler = compat_ptr(handler); - new_ka.sa.sa_restorer = compat_ptr(restorer); - siginitset(&new_ka.sa.sa_mask, mask); - } - - ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); - - if (!ret && oact) { - if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || - __put_user(ptr_to_compat(old_ka.sa.sa_handler), - &oact->sa_handler) || - __put_user(ptr_to_compat(old_ka.sa.sa_restorer), - &oact->sa_restorer) || - __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || - __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) - return -EFAULT; - } - - return ret; -} - -asmlinkage int compat_sys_rt_sigaction(int sig, - const struct compat_sigaction __user *act, - struct compat_sigaction __user *oact, - compat_size_t sigsetsize) -{ - struct k_sigaction new_ka, old_ka; - int ret; - - /* XXX: Don't preclude handling different sized sigset_t's. */ - if (sigsetsize != sizeof(compat_sigset_t)) - return -EINVAL; - - if (act) { - compat_uptr_t handler, restorer; - - ret = get_user(handler, &act->sa_handler); - new_ka.sa.sa_handler = compat_ptr(handler); - ret |= get_user(restorer, &act->sa_restorer); - new_ka.sa.sa_restorer = compat_ptr(restorer); - ret |= get_sigset_t(&new_ka.sa.sa_mask, &act->sa_mask); - ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); - if (ret) - return -EFAULT; - } - - ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); - if (!ret && oact) { - ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler); - ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask); - ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); - } - return ret; -} - -int compat_do_sigaltstack(compat_uptr_t compat_uss, compat_uptr_t compat_uoss, - compat_ulong_t sp) -{ - compat_stack_t __user *newstack = compat_ptr(compat_uss); - compat_stack_t __user *oldstack = compat_ptr(compat_uoss); - compat_uptr_t ss_sp; - int ret; - mm_segment_t old_fs; - stack_t uss, uoss; - - /* Marshall the compat new stack into a stack_t */ - if (newstack) { - if (get_user(ss_sp, &newstack->ss_sp) || - __get_user(uss.ss_flags, &newstack->ss_flags) || - __get_user(uss.ss_size, &newstack->ss_size)) - return -EFAULT; - uss.ss_sp = compat_ptr(ss_sp); - } - - old_fs = get_fs(); - set_fs(KERNEL_DS); - /* The __user pointer casts are valid because of the set_fs() */ - ret = do_sigaltstack( - newstack ? (stack_t __user *) &uss : NULL, - oldstack ? (stack_t __user *) &uoss : NULL, - (unsigned long)sp); - set_fs(old_fs); - - /* Convert the old stack_t into a compat stack. */ - if (!ret && oldstack && - (put_user(ptr_to_compat(uoss.ss_sp), &oldstack->ss_sp) || - __put_user(uoss.ss_flags, &oldstack->ss_flags) || - __put_user(uoss.ss_size, &oldstack->ss_size))) - return -EFAULT; - return ret; -} - static int compat_restore_sigframe(struct pt_regs *regs, struct compat_sigframe __user *sf) { @@ -562,9 +421,7 @@ asmlinkage int compat_sys_rt_sigreturn(struct pt_regs *regs) if (compat_restore_sigframe(regs, &frame->sig)) goto badframe; - if (compat_do_sigaltstack(ptr_to_compat(&frame->sig.uc.uc_stack), - ptr_to_compat((void __user *)NULL), - regs->compat_sp) == -EFAULT) + if (compat_restore_altstack(&frame->sig.uc.uc_stack)) goto badframe; return regs->regs[0]; @@ -578,9 +435,9 @@ badframe: return 0; } -static inline void __user *compat_get_sigframe(struct k_sigaction *ka, - struct pt_regs *regs, - int framesize) +static void __user *compat_get_sigframe(struct k_sigaction *ka, + struct pt_regs *regs, + int framesize) { compat_ulong_t sp = regs->compat_sp; void __user *frame; @@ -605,9 +462,9 @@ static inline void __user *compat_get_sigframe(struct k_sigaction *ka, return frame; } -static int compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka, - compat_ulong_t __user *rc, void __user *frame, - int usig) +static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka, + compat_ulong_t __user *rc, void __user *frame, + int usig) { compat_ulong_t handler = ptr_to_compat(ka->sa.sa_handler); compat_ulong_t retcode; @@ -643,8 +500,6 @@ static int compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka, regs->compat_lr = retcode; regs->pc = handler; regs->pstate = spsr; - - return 0; } static int compat_setup_sigframe(struct compat_sigframe __user *sf, @@ -705,20 +560,14 @@ int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, err |= copy_siginfo_to_user32(&frame->info, info); __put_user_error(0, &frame->sig.uc.uc_flags, err); - __put_user_error(NULL, &frame->sig.uc.uc_link, err); + __put_user_error(0, &frame->sig.uc.uc_link, err); - memset(&stack, 0, sizeof(stack)); - stack.ss_sp = (compat_uptr_t)current->sas_ss_sp; - stack.ss_flags = sas_ss_flags(regs->compat_sp); - stack.ss_size = current->sas_ss_size; - err |= __copy_to_user(&frame->sig.uc.uc_stack, &stack, sizeof(stack)); + err |= __compat_save_altstack(&frame->sig.uc.uc_stack, regs->compat_sp); err |= compat_setup_sigframe(&frame->sig, regs, set); - if (err == 0) - err = compat_setup_return(regs, ka, frame->sig.retcode, frame, - usig); if (err == 0) { + compat_setup_return(regs, ka, frame->sig.retcode, frame, usig); regs->regs[1] = (compat_ulong_t)(unsigned long)&frame->info; regs->regs[2] = (compat_ulong_t)(unsigned long)&frame->sig.uc; } @@ -741,80 +590,11 @@ int compat_setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, err |= compat_setup_sigframe(frame, regs, set); if (err == 0) - err = compat_setup_return(regs, ka, frame->retcode, frame, usig); + compat_setup_return(regs, ka, frame->retcode, frame, usig); return err; } -/* - * RT signals don't have generic compat wrappers. - * See arch/powerpc/kernel/signal_32.c - */ -asmlinkage int compat_sys_rt_sigprocmask(int how, compat_sigset_t __user *set, - compat_sigset_t __user *oset, - compat_size_t sigsetsize) -{ - sigset_t s; - sigset_t __user *up; - int ret; - mm_segment_t old_fs = get_fs(); - - if (set) { - if (get_sigset_t(&s, set)) - return -EFAULT; - } - - set_fs(KERNEL_DS); - /* This is valid because of the set_fs() */ - up = (sigset_t __user *) &s; - ret = sys_rt_sigprocmask(how, set ? up : NULL, oset ? up : NULL, - sigsetsize); - set_fs(old_fs); - if (ret) - return ret; - if (oset) { - if (put_sigset_t(oset, &s)) - return -EFAULT; - } - return 0; -} - -asmlinkage int compat_sys_rt_sigpending(compat_sigset_t __user *set, - compat_size_t sigsetsize) -{ - sigset_t s; - int ret; - mm_segment_t old_fs = get_fs(); - - set_fs(KERNEL_DS); - /* The __user pointer cast is valid because of the set_fs() */ - ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize); - set_fs(old_fs); - if (!ret) { - if (put_sigset_t(set, &s)) - return -EFAULT; - } - return ret; -} - -asmlinkage int compat_sys_rt_sigqueueinfo(int pid, int sig, - compat_siginfo_t __user *uinfo) -{ - siginfo_t info; - int ret; - mm_segment_t old_fs = get_fs(); - - ret = copy_siginfo_from_user32(&info, uinfo); - if (unlikely(ret)) - return ret; - - set_fs (KERNEL_DS); - /* The __user pointer cast is valid because of the set_fs() */ - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *) &info); - set_fs (old_fs); - return ret; -} - void compat_setup_restart_syscall(struct pt_regs *regs) { regs->regs[7] = __NR_compat_restart_syscall; diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 226b6bf6e9c2..bdd34597254b 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -211,8 +211,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void) * before we continue. */ set_cpu_online(cpu, true); - while (!cpu_active(cpu)) - cpu_relax(); + complete(&cpu_running); /* * OK, it's off to the idle thread for us @@ -234,7 +233,28 @@ void __init smp_prepare_boot_cpu(void) } static void (*smp_cross_call)(const struct cpumask *, unsigned int); -static phys_addr_t cpu_release_addr[NR_CPUS]; + +static const struct smp_enable_ops *enable_ops[] __initconst = { + &smp_spin_table_ops, + &smp_psci_ops, + NULL, +}; + +static const struct smp_enable_ops *smp_enable_ops[NR_CPUS]; + +static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name) +{ + const struct smp_enable_ops *ops = enable_ops[0]; + + while (ops) { + if (!strcmp(name, ops->name)) + return ops; + + ops++; + } + + return NULL; +} /* * Enumerate the possible CPU set from the device tree. @@ -253,22 +273,22 @@ void __init smp_init_cpus(void) * We currently support only the "spin-table" enable-method. */ enable_method = of_get_property(dn, "enable-method", NULL); - if (!enable_method || strcmp(enable_method, "spin-table")) { - pr_err("CPU %d: missing or invalid enable-method property: %s\n", - cpu, enable_method); + if (!enable_method) { + pr_err("CPU %d: missing enable-method property\n", cpu); goto next; } - /* - * Determine the address from which the CPU is polling. - */ - if (of_property_read_u64(dn, "cpu-release-addr", - &cpu_release_addr[cpu])) { - pr_err("CPU %d: missing or invalid cpu-release-addr property\n", - cpu); + smp_enable_ops[cpu] = smp_get_enable_ops(enable_method); + + if (!smp_enable_ops[cpu]) { + pr_err("CPU %d: invalid enable-method property: %s\n", + cpu, enable_method); goto next; } + if (smp_enable_ops[cpu]->init_cpu(dn, cpu)) + goto next; + set_cpu_possible(cpu, true); next: cpu++; @@ -282,8 +302,7 @@ next: void __init smp_prepare_cpus(unsigned int max_cpus) { - int cpu; - void **release_addr; + int cpu, err; unsigned int ncores = num_possible_cpus(); /* @@ -292,30 +311,35 @@ void __init smp_prepare_cpus(unsigned int max_cpus) if (max_cpus > ncores) max_cpus = ncores; + /* Don't bother if we're effectively UP */ + if (max_cpus <= 1) + return; + /* * Initialise the present map (which describes the set of CPUs * actually populated at the present time) and release the * secondaries from the bootloader. + * + * Make sure we online at most (max_cpus - 1) additional CPUs. */ + max_cpus--; for_each_possible_cpu(cpu) { if (max_cpus == 0) break; - if (!cpu_release_addr[cpu]) + if (cpu == smp_processor_id()) continue; - release_addr = __va(cpu_release_addr[cpu]); - release_addr[0] = (void *)__pa(secondary_holding_pen); - __flush_dcache_area(release_addr, sizeof(release_addr[0])); + if (!smp_enable_ops[cpu]) + continue; + + err = smp_enable_ops[cpu]->prepare_cpu(cpu); + if (err) + continue; set_cpu_present(cpu, true); max_cpus--; } - - /* - * Send an event to wake up the secondaries. - */ - sev(); } diff --git a/arch/arm64/kernel/smp_psci.c b/arch/arm64/kernel/smp_psci.c new file mode 100644 index 000000000000..112091684c22 --- /dev/null +++ b/arch/arm64/kernel/smp_psci.c @@ -0,0 +1,52 @@ +/* + * PSCI SMP initialisation + * + * Copyright (C) 2013 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/init.h> +#include <linux/of.h> +#include <linux/smp.h> + +#include <asm/psci.h> + +static int __init smp_psci_init_cpu(struct device_node *dn, int cpu) +{ + return 0; +} + +static int __init smp_psci_prepare_cpu(int cpu) +{ + int err; + + if (!psci_ops.cpu_on) { + pr_err("psci: no cpu_on method, not booting CPU%d\n", cpu); + return -ENODEV; + } + + err = psci_ops.cpu_on(cpu, __pa(secondary_holding_pen)); + if (err) { + pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err); + return err; + } + + return 0; +} + +const struct smp_enable_ops smp_psci_ops __initconst = { + .name = "psci", + .init_cpu = smp_psci_init_cpu, + .prepare_cpu = smp_psci_prepare_cpu, +}; diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c new file mode 100644 index 000000000000..7c35fa682f76 --- /dev/null +++ b/arch/arm64/kernel/smp_spin_table.c @@ -0,0 +1,66 @@ +/* + * Spin Table SMP initialisation + * + * Copyright (C) 2013 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/init.h> +#include <linux/of.h> +#include <linux/smp.h> + +#include <asm/cacheflush.h> + +static phys_addr_t cpu_release_addr[NR_CPUS]; + +static int __init smp_spin_table_init_cpu(struct device_node *dn, int cpu) +{ + /* + * Determine the address from which the CPU is polling. + */ + if (of_property_read_u64(dn, "cpu-release-addr", + &cpu_release_addr[cpu])) { + pr_err("CPU %d: missing or invalid cpu-release-addr property\n", + cpu); + + return -1; + } + + return 0; +} + +static int __init smp_spin_table_prepare_cpu(int cpu) +{ + void **release_addr; + + if (!cpu_release_addr[cpu]) + return -ENODEV; + + release_addr = __va(cpu_release_addr[cpu]); + release_addr[0] = (void *)__pa(secondary_holding_pen); + __flush_dcache_area(release_addr, sizeof(release_addr[0])); + + /* + * Send an event to wake up the secondary CPU. + */ + sev(); + + return 0; +} + +const struct smp_enable_ops smp_spin_table_ops __initconst = { + .name = "spin-table", + .init_cpu = smp_spin_table_init_cpu, + .prepare_cpu = smp_spin_table_prepare_cpu, +}; diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c index b120df37de35..3fa98ff14f0e 100644 --- a/arch/arm64/kernel/sys.c +++ b/arch/arm64/kernel/sys.c @@ -26,85 +26,6 @@ #include <linux/slab.h> #include <linux/syscalls.h> -/* - * Clone a task - this clones the calling program thread. - */ -asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, - int __user *parent_tidptr, unsigned long tls_val, - int __user *child_tidptr, struct pt_regs *regs) -{ - if (!newsp) - newsp = regs->sp; - /* 16-byte aligned stack mandatory on AArch64 */ - if (newsp & 15) - return -EINVAL; - return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr); -} - -/* - * sys_execve() executes a new program. - */ -asmlinkage long sys_execve(const char __user *filenamei, - const char __user *const __user *argv, - const char __user *const __user *envp, - struct pt_regs *regs) -{ - long error; - struct filename *filename; - - filename = getname(filenamei); - error = PTR_ERR(filename); - if (IS_ERR(filename)) - goto out; - error = do_execve(filename->name, argv, envp, regs); - putname(filename); -out: - return error; -} - -int kernel_execve(const char *filename, - const char *const argv[], - const char *const envp[]) -{ - struct pt_regs regs; - int ret; - - memset(®s, 0, sizeof(struct pt_regs)); - ret = do_execve(filename, - (const char __user *const __user *)argv, - (const char __user *const __user *)envp, ®s); - if (ret < 0) - goto out; - - /* - * Save argc to the register structure for userspace. - */ - regs.regs[0] = ret; - - /* - * We were successful. We won't be returning to our caller, but - * instead to user space by manipulating the kernel stack. - */ - asm( "add x0, %0, %1\n\t" - "mov x1, %2\n\t" - "mov x2, %3\n\t" - "bl memmove\n\t" /* copy regs to top of stack */ - "mov x27, #0\n\t" /* not a syscall */ - "mov x28, %0\n\t" /* thread structure */ - "mov sp, x0\n\t" /* reposition stack pointer */ - "b ret_to_user" - : - : "r" (current_thread_info()), - "Ir" (THREAD_START_SP - sizeof(regs)), - "r" (®s), - "Ir" (sizeof(regs)) - : "x0", "x1", "x2", "x27", "x28", "x30", "memory"); - - out: - return ret; -} -EXPORT_SYMBOL(kernel_execve); - asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, off_t off) @@ -118,10 +39,7 @@ asmlinkage long sys_mmap(unsigned long addr, unsigned long len, /* * Wrappers to pass the pt_regs argument. */ -#define sys_execve sys_execve_wrapper -#define sys_clone sys_clone_wrapper #define sys_rt_sigreturn sys_rt_sigreturn_wrapper -#define sys_sigaltstack sys_sigaltstack_wrapper #include <asm/syscalls.h> diff --git a/arch/arm64/kernel/sys32.S b/arch/arm64/kernel/sys32.S index 54c4aec47a08..9416d045a687 100644 --- a/arch/arm64/kernel/sys32.S +++ b/arch/arm64/kernel/sys32.S @@ -26,25 +26,6 @@ /* * System call wrappers for the AArch32 compatibility layer. */ -compat_sys_fork_wrapper: - mov x0, sp - b compat_sys_fork -ENDPROC(compat_sys_fork_wrapper) - -compat_sys_vfork_wrapper: - mov x0, sp - b compat_sys_vfork -ENDPROC(compat_sys_vfork_wrapper) - -compat_sys_execve_wrapper: - mov x3, sp - b compat_sys_execve -ENDPROC(compat_sys_execve_wrapper) - -compat_sys_clone_wrapper: - mov x5, sp - b compat_sys_clone -ENDPROC(compat_sys_clone_wrapper) compat_sys_sigreturn_wrapper: mov x0, sp @@ -58,11 +39,6 @@ compat_sys_rt_sigreturn_wrapper: b compat_sys_rt_sigreturn ENDPROC(compat_sys_rt_sigreturn_wrapper) -compat_sys_sigaltstack_wrapper: - ldr x2, [sp, #S_COMPAT_SP] - b compat_do_sigaltstack -ENDPROC(compat_sys_sigaltstack_wrapper) - compat_sys_statfs64_wrapper: mov w3, #84 cmp w1, #88 @@ -82,11 +58,6 @@ ENDPROC(compat_sys_fstatfs64_wrapper) * in registers or that take 32-bit parameters which require sign * extension. */ -compat_sys_lseek_wrapper: - sxtw x1, w1 - b sys_lseek -ENDPROC(compat_sys_lseek_wrapper) - compat_sys_pread64_wrapper: orr x3, x4, x5, lsl #32 b sys_pread64 diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index 906e3bd270b0..26e9c4eeaba8 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c @@ -28,60 +28,6 @@ #include <asm/cacheflush.h> #include <asm/unistd32.h> -asmlinkage int compat_sys_fork(struct pt_regs *regs) -{ - return do_fork(SIGCHLD, regs->compat_sp, regs, 0, NULL, NULL); -} - -asmlinkage int compat_sys_clone(unsigned long clone_flags, unsigned long newsp, - int __user *parent_tidptr, int tls_val, - int __user *child_tidptr, struct pt_regs *regs) -{ - if (!newsp) - newsp = regs->compat_sp; - - return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr); -} - -asmlinkage int compat_sys_vfork(struct pt_regs *regs) -{ - return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->compat_sp, - regs, 0, NULL, NULL); -} - -asmlinkage int compat_sys_execve(const char __user *filenamei, - compat_uptr_t argv, compat_uptr_t envp, - struct pt_regs *regs) -{ - int error; - struct filename *filename; - - filename = getname(filenamei); - error = PTR_ERR(filename); - if (IS_ERR(filename)) - goto out; - error = compat_do_execve(filename->name, compat_ptr(argv), - compat_ptr(envp), regs); - putname(filename); -out: - return error; -} - -asmlinkage int compat_sys_sched_rr_get_interval(compat_pid_t pid, - struct compat_timespec __user *interval) -{ - struct timespec t; - int ret; - mm_segment_t old_fs = get_fs(); - - set_fs(KERNEL_DS); - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t); - set_fs(old_fs); - if (put_compat_timespec(&t, interval)) - return -EFAULT; - return ret; -} - static inline void do_compat_cache_op(unsigned long start, unsigned long end, int flags) { diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c index 3b4b7258f492..b0ef18d14c3b 100644 --- a/arch/arm64/kernel/time.c +++ b/arch/arm64/kernel/time.c @@ -31,8 +31,9 @@ #include <linux/syscore_ops.h> #include <linux/timer.h> #include <linux/irq.h> +#include <linux/delay.h> -#include <clocksource/arm_generic.h> +#include <clocksource/arm_arch_timer.h> #include <asm/thread_info.h> #include <asm/stacktrace.h> @@ -59,7 +60,31 @@ unsigned long profile_pc(struct pt_regs *regs) EXPORT_SYMBOL(profile_pc); #endif +static u64 sched_clock_mult __read_mostly; + +unsigned long long notrace sched_clock(void) +{ + return arch_timer_read_counter() * sched_clock_mult; +} + +int read_current_timer(unsigned long *timer_value) +{ + *timer_value = arch_timer_read_counter(); + return 0; +} + void __init time_init(void) { - arm_generic_timer_init(); + u32 arch_timer_rate; + + if (arch_timer_init()) + panic("Unable to initialise architected timer.\n"); + + arch_timer_rate = arch_timer_get_rate(); + + /* Cache the sched_clock multiplier to save a divide in the hot path. */ + sched_clock_mult = NSEC_PER_SEC / arch_timer_rate; + + /* Calibrate the delay loop directly */ + lpj_fine = arch_timer_rate / HZ; } diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 3883f842434f..b3c5f628bdb4 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -242,7 +242,7 @@ void die(const char *str, struct pt_regs *regs, int err) crash_kexec(regs); bust_spinlocks(0); - add_taint(TAINT_DIE); + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); raw_spin_unlock_irq(&die_lock); oops_exit(); diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index ba457943a16b..6a389dc1bd49 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -239,7 +239,7 @@ void update_vsyscall(struct timekeeper *tk) if (!use_syscall) { vdso_data->cs_cycle_last = tk->clock->cycle_last; vdso_data->xtime_clock_sec = tk->xtime_sec; - vdso_data->xtime_clock_nsec = tk->xtime_nsec >> tk->shift; + vdso_data->xtime_clock_nsec = tk->xtime_nsec; vdso_data->cs_mult = tk->mult; vdso_data->cs_shift = tk->shift; vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; @@ -252,10 +252,6 @@ void update_vsyscall(struct timekeeper *tk) void update_vsyscall_tz(void) { - ++vdso_data->tb_seq_count; - smp_wmb(); vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; vdso_data->tz_dsttime = sys_tz.tz_dsttime; - smp_wmb(); - ++vdso_data->tb_seq_count; } diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S index dcb8c203a3b2..f0a6d10b5211 100644 --- a/arch/arm64/kernel/vdso/gettimeofday.S +++ b/arch/arm64/kernel/vdso/gettimeofday.S @@ -62,18 +62,17 @@ ENTRY(__kernel_gettimeofday) /* If tv is NULL, skip to the timezone code. */ cbz x0, 2f bl __do_get_tspec - seqcnt_check w13, 1b + seqcnt_check w9, 1b /* Convert ns to us. */ - mov x11, #1000 - udiv x10, x10, x11 - stp x9, x10, [x0, #TVAL_TV_SEC] + mov x13, #1000 + lsl x13, x13, x12 + udiv x11, x11, x13 + stp x10, x11, [x0, #TVAL_TV_SEC] 2: /* If tz is NULL, return 0. */ cbz x1, 3f ldp w4, w5, [vdso_data, #VDSO_TZ_MINWEST] - seqcnt_read w13 - seqcnt_check w13, 1b stp w4, w5, [x1, #TZ_MINWEST] 3: mov x0, xzr @@ -102,17 +101,17 @@ ENTRY(__kernel_clock_gettime) cbnz use_syscall, 7f bl __do_get_tspec - seqcnt_check w13, 1b + seqcnt_check w9, 1b cmp w0, #CLOCK_MONOTONIC b.ne 6f /* Get wtm timespec. */ - ldp x14, x15, [vdso_data, #VDSO_WTM_CLK_SEC] + ldp x13, x14, [vdso_data, #VDSO_WTM_CLK_SEC] /* Check the sequence counter. */ - seqcnt_read w13 - seqcnt_check w13, 1b + seqcnt_read w9 + seqcnt_check w9, 1b b 4f 2: cmp w0, #CLOCK_REALTIME_COARSE @@ -122,37 +121,40 @@ ENTRY(__kernel_clock_gettime) /* Get coarse timespec. */ adr vdso_data, _vdso_data 3: seqcnt_acquire - ldp x9, x10, [vdso_data, #VDSO_XTIME_CRS_SEC] - - cmp w0, #CLOCK_MONOTONIC_COARSE - b.ne 6f + ldp x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC] /* Get wtm timespec. */ - ldp x14, x15, [vdso_data, #VDSO_WTM_CLK_SEC] + ldp x13, x14, [vdso_data, #VDSO_WTM_CLK_SEC] /* Check the sequence counter. */ - seqcnt_read w13 - seqcnt_check w13, 3b + seqcnt_read w9 + seqcnt_check w9, 3b + + cmp w0, #CLOCK_MONOTONIC_COARSE + b.ne 6f 4: /* Add on wtm timespec. */ - add x9, x9, x14 - add x10, x10, x15 + add x10, x10, x13 + lsl x14, x14, x12 + add x11, x11, x14 /* Normalise the new timespec. */ - mov x14, #NSEC_PER_SEC_LO16 - movk x14, #NSEC_PER_SEC_HI16, lsl #16 - cmp x10, x14 + mov x15, #NSEC_PER_SEC_LO16 + movk x15, #NSEC_PER_SEC_HI16, lsl #16 + lsl x15, x15, x12 + cmp x11, x15 b.lt 5f - sub x10, x10, x14 - add x9, x9, #1 + sub x11, x11, x15 + add x10, x10, #1 5: - cmp x10, #0 + cmp x11, #0 b.ge 6f - add x10, x10, x14 - sub x9, x9, #1 + add x11, x11, x15 + sub x10, x10, #1 6: /* Store to the user timespec. */ - stp x9, x10, [x1, #TSPEC_TV_SEC] + lsr x11, x11, x12 + stp x10, x11, [x1, #TSPEC_TV_SEC] mov x0, xzr ret x2 7: @@ -203,39 +205,39 @@ ENDPROC(__kernel_clock_getres) * Expects vdso_data to be initialised. * Clobbers the temporary registers (x9 - x15). * Returns: - * - (x9, x10) = (ts->tv_sec, ts->tv_nsec) - * - (x11, x12) = (xtime->tv_sec, xtime->tv_nsec) - * - w13 = vDSO sequence counter + * - w9 = vDSO sequence counter + * - (x10, x11) = (ts->tv_sec, shifted ts->tv_nsec) + * - w12 = cs_shift */ ENTRY(__do_get_tspec) .cfi_startproc /* Read from the vDSO data page. */ ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST] - ldp x11, x12, [vdso_data, #VDSO_XTIME_CLK_SEC] - ldp w14, w15, [vdso_data, #VDSO_CS_MULT] - seqcnt_read w13 + ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC] + ldp w11, w12, [vdso_data, #VDSO_CS_MULT] + seqcnt_read w9 - /* Read the physical counter. */ + /* Read the virtual counter. */ isb - mrs x9, cntpct_el0 + mrs x15, cntvct_el0 /* Calculate cycle delta and convert to ns. */ - sub x10, x9, x10 + sub x10, x15, x10 /* We can only guarantee 56 bits of precision. */ - movn x9, #0xff0, lsl #48 - and x10, x9, x10 - mul x10, x10, x14 - lsr x10, x10, x15 + movn x15, #0xff00, lsl #48 + and x10, x15, x10 + mul x10, x10, x11 /* Use the kernel time to calculate the new timespec. */ - add x10, x12, x10 - mov x14, #NSEC_PER_SEC_LO16 - movk x14, #NSEC_PER_SEC_HI16, lsl #16 - udiv x15, x10, x14 - add x9, x15, x11 - mul x14, x14, x15 - sub x10, x10, x14 + mov x11, #NSEC_PER_SEC_LO16 + movk x11, #NSEC_PER_SEC_HI16, lsl #16 + lsl x11, x11, x12 + add x15, x10, x14 + udiv x14, x15, x11 + add x10, x13, x14 + mul x13, x14, x11 + sub x11, x15, x13 ret .cfi_endproc diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 1909a69983ca..afadae6682ed 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -36,6 +36,8 @@ #include <asm/pgtable.h> #include <asm/tlbflush.h> +static const char *fault_name(unsigned int esr); + /* * Dump out the page tables associated with 'addr' in mm 'mm'. */ @@ -112,8 +114,9 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr, struct siginfo si; if (show_unhandled_signals) { - pr_info("%s[%d]: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n", - tsk->comm, task_pid_nr(tsk), sig, addr, esr); + pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n", + tsk->comm, task_pid_nr(tsk), fault_name(esr), sig, + addr, esr); show_pte(tsk->mm, addr); show_regs(regs); } @@ -450,6 +453,12 @@ static struct fault_info { { do_bad, SIGBUS, 0, "unknown 63" }, }; +static const char *fault_name(unsigned int esr) +{ + const struct fault_info *inf = fault_info + (esr & 63); + return inf->name; +} + /* * Dispatch a data abort to the relevant handler. */ diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index c144adb1682f..88611c3a421a 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -27,10 +27,6 @@ #include "mm.h" -void flush_cache_mm(struct mm_struct *mm) -{ -} - void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { @@ -38,11 +34,6 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, __flush_icache_all(); } -void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, - unsigned long pfn) -{ -} - static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *kaddr, unsigned long len) diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index efbf7df05d3f..800aac306a08 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -79,7 +79,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) #ifdef CONFIG_ZONE_DMA32 /* 4GB maximum for 32-bit only capable devices */ - max_dma32 = min(max, MAX_DMA32_PFN); + max_dma32 = max(min, min(max, MAX_DMA32_PFN)); zone_size[ZONE_DMA32] = max_dma32 - min; #endif zone_size[ZONE_NORMAL] = max - max_dma32; diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index a6885d896ab6..224b44ab534e 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -25,6 +25,7 @@ #include <linux/nodemask.h> #include <linux/memblock.h> #include <linux/fs.h> +#include <linux/io.h> #include <asm/cputype.h> #include <asm/sections.h> @@ -251,6 +252,47 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt, } while (pgd++, addr = next, addr != end); } +#ifdef CONFIG_EARLY_PRINTK +/* + * Create an early I/O mapping using the pgd/pmd entries already populated + * in head.S as this function is called too early to allocated any memory. The + * mapping size is 2MB with 4KB pages or 64KB or 64KB pages. + */ +void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt) +{ + unsigned long size, mask; + bool page64k = IS_ENABLED(ARM64_64K_PAGES); + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + /* + * No early pte entries with !ARM64_64K_PAGES configuration, so using + * sections (pmd). + */ + size = page64k ? PAGE_SIZE : SECTION_SIZE; + mask = ~(size - 1); + + pgd = pgd_offset_k(virt); + pud = pud_offset(pgd, virt); + if (pud_none(*pud)) + return NULL; + pmd = pmd_offset(pud, virt); + + if (page64k) { + if (pmd_none(*pmd)) + return NULL; + pte = pte_offset_kernel(pmd, virt); + set_pte(pte, __pte((phys & mask) | PROT_DEVICE_nGnRE)); + } else { + set_pmd(pmd, __pmd((phys & mask) | PROT_SECT_DEVICE_nGnRE)); + } + + return (void __iomem *)((virt & mask) + (phys & ~mask)); +} +#endif + static void __init map_mem(void) { struct memblock_region *reg; @@ -392,4 +434,7 @@ int __meminit vmemmap_populate(struct page *start_page, return 0; } #endif /* CONFIG_ARM64_64K_PAGES */ +void vmemmap_free(struct page *memmap, unsigned long nr_pages) +{ +} #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |